summaryrefslogtreecommitdiffstats
path: root/storage/tokudb/PerconaFT
diff options
context:
space:
mode:
Diffstat (limited to 'storage/tokudb/PerconaFT')
-rw-r--r--storage/tokudb/PerconaFT/.clang-format36
-rw-r--r--storage/tokudb/PerconaFT/CMakeLists.txt106
-rw-r--r--storage/tokudb/PerconaFT/COPYING.AGPLv3661
-rw-r--r--storage/tokudb/PerconaFT/COPYING.APACHEv2174
-rw-r--r--storage/tokudb/PerconaFT/COPYING.GPLv2339
-rw-r--r--storage/tokudb/PerconaFT/CTestConfig.cmake13
-rw-r--r--storage/tokudb/PerconaFT/CTestCustom.cmake.in239
-rw-r--r--storage/tokudb/PerconaFT/PATENTS37
-rw-r--r--storage/tokudb/PerconaFT/README.md117
-rw-r--r--storage/tokudb/PerconaFT/bash.suppressions6
-rw-r--r--storage/tokudb/PerconaFT/buildbot/compile.suppressions4
-rw-r--r--storage/tokudb/PerconaFT/buildheader/CMakeLists.txt29
-rw-r--r--storage/tokudb/PerconaFT/buildheader/db-4.6.19.h2670
-rw-r--r--storage/tokudb/PerconaFT/buildheader/make_tdb.cc845
-rw-r--r--storage/tokudb/PerconaFT/cmake/merge_archives_unix.cmake.in96
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/FindValgrind.cmake18
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuBuildTagDatabases.cmake126
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake137
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake99
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCTest.cmake155
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake191
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuThirdParty.cmake111
-rw-r--r--storage/tokudb/PerconaFT/ft/CMakeLists.txt96
-rw-r--r--storage/tokudb/PerconaFT/ft/bndata.cc675
-rw-r--r--storage/tokudb/PerconaFT/ft/bndata.h333
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.cc109
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.h78
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h607
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc5018
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/cachetable.h588
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/checkpoint.cc333
-rw-r--r--storage/tokudb/PerconaFT/ft/cachetable/checkpoint.h120
-rw-r--r--storage/tokudb/PerconaFT/ft/comparator.h150
-rw-r--r--storage/tokudb/PerconaFT/ft/cursor.cc456
-rw-r--r--storage/tokudb/PerconaFT/ft/cursor.h186
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc373
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.h141
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-flusher-internal.h183
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-flusher.cc1929
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-flusher.h147
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-hot-flusher.cc362
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-internal.h495
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc5263
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.h295
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-recount-rows.cc106
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-status.cc503
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-status.h539
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-test-helpers.cc268
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-verify.cc524
-rw-r--r--storage/tokudb/PerconaFT/ft/ft.cc1186
-rw-r--r--storage/tokudb/PerconaFT/ft/ft.h195
-rw-r--r--storage/tokudb/PerconaFT/ft/le-cursor.cc139
-rw-r--r--storage/tokudb/PerconaFT/ft/le-cursor.h75
-rw-r--r--storage/tokudb/PerconaFT/ft/leafentry.cc45
-rw-r--r--storage/tokudb/PerconaFT/ft/leafentry.h236
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/callbacks.cc148
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/dbufio.cc598
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/dbufio.h58
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/loader-internal.h320
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/loader.cc3424
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/loader.h83
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/pqueue.cc181
-rw-r--r--storage/tokudb/PerconaFT/ft/loader/pqueue.h68
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/log-internal.h225
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/log.h69
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc295
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logcursor.cc497
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logcursor.h74
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logfilemgr.cc205
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logfilemgr.h65
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logformat.cc835
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logger.cc1436
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logger.h274
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.cc1740
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.h85
-rw-r--r--storage/tokudb/PerconaFT/ft/msg.cc120
-rw-r--r--storage/tokudb/PerconaFT/ft/msg.h191
-rw-r--r--storage/tokudb/PerconaFT/ft/msg_buffer.cc292
-rw-r--r--storage/tokudb/PerconaFT/ft/msg_buffer.h131
-rw-r--r--storage/tokudb/PerconaFT/ft/node.cc2150
-rw-r--r--storage/tokudb/PerconaFT/ft/node.h608
-rw-r--r--storage/tokudb/PerconaFT/ft/pivotkeys.cc438
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc260
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/block_allocator.h188
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/block_table.cc1157
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/block_table.h340
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/compress.cc259
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/compress.h78
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft-node-deserialize.cc186
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc914
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft-serialize.h73
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h80
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc3259
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.h127
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/quicklz.cc887
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/quicklz.h177
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.cc833
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h356
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/rbuf.h156
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/sub_block.cc392
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/sub_block.h160
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/wbuf.h209
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/workset.h135
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt144
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/benchmark-test.cc254
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/block_allocator_test.cc280
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/bnc-insert-benchmark.cc139
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-4357.cc107
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-4365.cc135
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-5097.cc192
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc201
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-5978.cc235
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-all-write.cc96
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pending.cc225
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pinned-nodes.cc155
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-prefetched-nodes.cc158
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-test.cc186
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-checkpointer-class.cc368
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint.cc140
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint2.cc140
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc169
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc81
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-everything-pinned.cc104
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc102
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-same-fullhash.cc117
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-simple.cc122
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clock-all-pinned.cc79
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction.cc143
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction2.cc198
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction3.cc225
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction4.cc179
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clone-checkpoint.cc147
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc148
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch.cc149
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clone-pin-nonblocking.cc124
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-clone-unpin-remove.cc137
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-count-pinned-test.cc92
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-debug-test.cc96
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test.cc173
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test2.cc188
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test.cc162
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test2.cc190
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-evictor-class.cc272
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-fd-test.cc98
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-fetch-inducing-evictor.cc149
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-flush-during-cleaner.cc101
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-flush-test.cc116
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-getandpin-test.cc119
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc89
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-partial-fetch.cc225
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-pin-checkpoint.cc433
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc111
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-checkpoint-test.cc201
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-leak-test.cc115
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-test.cc141
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-flowcontrol-test.cc150
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-getandpin-test.cc183
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-maybegetandpin-test.cc108
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch2-test.cc118
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-put-checkpoint.cc562
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-put-test.cc88
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-reserve-filenum.cc112
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-rwlock-test.cc225
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone.cc188
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone2.cc138
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc330
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-maybe-get-pin.cc110
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-cheap.cc127
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-dep-nodes.cc198
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc155
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking.cc145
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin.cc144
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-put-dep-nodes.cc220
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin-nonblocking.cc174
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin.cc184
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc100
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-verify.cc68
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-test.cc547
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-test.h72
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-and-remove-test.cc160
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-remove-and-checkpoint.cc123
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-test.cc157
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-writer-thread-limit.cc100
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/comparator-test.cc127
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/compress-test.cc145
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/dbufio-test-destroy.cc110
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/dbufio-test.cc126
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/dmt-test.cc985
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/dmt-test2.cc321
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/fifo-test.cc133
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc439
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc464
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc444
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-serialize-sub-block-test.cc126
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc1299
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test-cursor-2.cc135
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test-cursor.cc914
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc95
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test.cc1281
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test0.cc72
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test1.cc75
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test2.cc85
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test3.cc110
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test4.cc103
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test5.cc99
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-error-injector.h172
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-bad-generate.cc202
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor-errors.cc263
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor.cc456
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-merge-files-dbufio.cc568
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-open.cc127
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-vm.cc83
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer-errors.cc277
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer.cc289
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ftloader-test.cc434
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/generate-upgrade-recovery-logs.cc98
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/is_empty.cc156
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/keyrange.cc372
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/keytest.cc63
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/le-cursor-provdel.cc256
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/le-cursor-right.cc322
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/le-cursor-walk.cc217
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/list-test.cc201
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test-maybe-trim.cc76
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test.cc76
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test2.cc57
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test3.cc58
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test4.cc77
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test5.cc96
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test6.cc100
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/log-test7.cc117
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-bad-checksum.cc118
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-bw.cc71
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logdir.cc72
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-2.cc143
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-3.cc185
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile.cc148
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-fw.cc71
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-print.cc62
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logcursor-timestamp.cc131
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logfilemgr-create-destroy.cc53
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/logfilemgr-print.cc55
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/make-tree.cc244
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/mempool-115.cc144
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/msnfilter.cc252
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc1295
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/pqueue-test.cc263
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/quicklz-test.cc90
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-bad-last-entry.cc120
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend-hello.cc95
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend.cc80
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-cbegin.cc86
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-cend-cbegin.cc92
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-datadir-is-file.cc98
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-empty.cc84
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-fopen-missing-file.cc89
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-hello.cc89
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-lsn-error-during-forward-scan.cc119
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-no-datadir.cc83
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-no-log.cc75
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-no-logdir.cc69
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc81
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/shortcut.cc92
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/subblock-test-checksum.cc193
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/subblock-test-compression.cc142
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/subblock-test-index.cc102
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/subblock-test-size.cc78
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc178
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc208
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-assert.cc63
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-bjm.cc104
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc295
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc372
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc353
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc348
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-del-inorder.cc98
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc310
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-dump-ft.cc72
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc331
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-ft-overflow.cc85
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-ft-txns.h127
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-hot-with-bounds.cc187
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-inc-split.cc185
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-leafentry-child-txn.cc153
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-leafentry-nested.cc999
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-merges-on-cleaner.cc248
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-oldest-referenced-xid-flush.cc190
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc342
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-with-mhs.cc97
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc103
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-txn-child-manager.cc290
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc140
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test.h349
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test1308a.cc90
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test3681.cc131
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test3856.cc112
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test3884.cc502
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test4115.cc98
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test4244.cc123
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test_logcursor.cc265
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test_oexcl.cc51
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc131
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_split_merge.cc161
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test_toku_malloc_plain_free.cc52
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26bin0 -> 94 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27bin0 -> 131 bytes
-rwxr-xr-xstorage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28bin0 -> 131 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29bin0 -> 131 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29bin0 -> 94 bytes
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/upgrade_test_simple.cc212
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc249
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc215
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc160
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc219
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc231
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc161
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc215
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/xid_lsn_independent.cc253
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ybt-test.cc125
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/roll.cc692
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc258
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback-apply.h47
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.cc257
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.h80
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback.cc334
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback.h145
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.cc109
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.h63
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn.cc754
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn.h362
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_child_manager.cc143
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_child_manager.h66
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.cc1040
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.h223
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_state.h50
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/xids.cc247
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/xids.h116
-rw-r--r--storage/tokudb/PerconaFT/ft/ule-internal.h103
-rw-r--r--storage/tokudb/PerconaFT/ft/ule.cc2662
-rw-r--r--storage/tokudb/PerconaFT/ft/ule.h74
-rw-r--r--storage/tokudb/PerconaFT/ft/valgrind.suppressions294
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/CMakeLists.txt31
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/buffer.cpp141
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/buffer.hpp159
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/cursor-inl.hpp418
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/cursor.cpp136
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/cursor.hpp417
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/db.hpp370
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/db_env-inl.hpp75
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/db_env.cpp70
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/db_env.hpp466
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/db_txn.hpp127
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/exceptions.hpp152
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp97
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp226
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/slice.hpp189
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/stats.hpp48
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt47
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/tests/buffer_test.cpp217
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/tests/cursor_test.cpp178
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/tests/doubling_buffer.cpp118
-rw-r--r--storage/tokudb/PerconaFT/locktree/CMakeLists.txt29
-rw-r--r--storage/tokudb/PerconaFT/locktree/concurrent_tree.cc135
-rw-r--r--storage/tokudb/PerconaFT/locktree/concurrent_tree.h165
-rw-r--r--storage/tokudb/PerconaFT/locktree/keyrange.cc216
-rw-r--r--storage/tokudb/PerconaFT/locktree/keyrange.h146
-rw-r--r--storage/tokudb/PerconaFT/locktree/lock_request.cc525
-rw-r--r--storage/tokudb/PerconaFT/locktree/lock_request.h231
-rw-r--r--storage/tokudb/PerconaFT/locktree/locktree.cc787
-rw-r--r--storage/tokudb/PerconaFT/locktree/locktree.h523
-rw-r--r--storage/tokudb/PerconaFT/locktree/manager.cc513
-rw-r--r--storage/tokudb/PerconaFT/locktree/range_buffer.cc259
-rw-r--r--storage/tokudb/PerconaFT/locktree/range_buffer.h172
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/CMakeLists.txt15
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_create_destroy.cc68
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_acquire_release.cc120
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_remove.cc158
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc95
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_remove_all.cc92
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_unit_test.h95
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/kill_waiter.cc100
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_create_set.cc72
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_get_set_keys.cc88
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_killed.cc124
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_not_killed.cc118
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_deadlock.cc120
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_pending.cc106
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_release_wait.cc91
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc117
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc133
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc135
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_unit_test.h80
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_wait_time_callback.cc96
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_conflicts.cc126
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_create_destroy.cc73
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_1big7lt_1small.cc234
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_1lt.cc197
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_2lt.cc197
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_impossible.cc150
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_stalls.cc226
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_infinity.cc123
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_misc.cc105
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_overlapping_relock.cc164
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_simple_lock.cc149
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_single_txnid_optimization.cc130
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/locktree_unit_test.h105
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_create_destroy.cc75
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_locktree_map.cc100
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_parallel_locktree_get_release.cc93
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_params.cc68
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_reference_release_lt.cc131
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_status.cc119
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/manager_unit_test.h59
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/range_buffer_test.cc197
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/test.h122
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/txnid_set_test.cc111
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/wfg_test.cc172
-rw-r--r--storage/tokudb/PerconaFT/locktree/treenode.cc491
-rw-r--r--storage/tokudb/PerconaFT/locktree/treenode.h245
-rw-r--r--storage/tokudb/PerconaFT/locktree/txnid_set.cc116
-rw-r--r--storage/tokudb/PerconaFT/locktree/txnid_set.h92
-rw-r--r--storage/tokudb/PerconaFT/locktree/wfg.cc202
-rw-r--r--storage/tokudb/PerconaFT/locktree/wfg.h118
-rw-r--r--storage/tokudb/PerconaFT/portability/CMakeLists.txt63
-rw-r--r--storage/tokudb/PerconaFT/portability/file.cc821
-rw-r--r--storage/tokudb/PerconaFT/portability/huge_page_detection.cc148
-rw-r--r--storage/tokudb/PerconaFT/portability/memory.cc516
-rw-r--r--storage/tokudb/PerconaFT/portability/memory.h196
-rw-r--r--storage/tokudb/PerconaFT/portability/os_malloc.cc294
-rw-r--r--storage/tokudb/PerconaFT/portability/portability.cc477
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/CMakeLists.txt50
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/ensure_memcheck_fails.sh21
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/rwlock_condvar.h193
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-active-cpus.cc65
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-cache-line-boundary-fails.cc122
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-cpu-freq-openlimit17.cc68
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-cpu-freq.cc55
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-filesystem-sizes.cc87
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-flock.cc67
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-fsync-directory.cc77
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-fsync.cc271
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-gettime.cc56
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-gettimeofday.cc53
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-hugepage.cc46
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-max-data.cc76
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-memory-status.cc50
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-pagesize.cc48
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rdlock.cc60
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rwr.cc103
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-pwrite4g.cc81
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-snprintf.cc82
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-stat.cc88
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-toku-malloc.cc66
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test-xid.cc80
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/test.h61
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/try-assert-zero.cc54
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/try-assert0.cc53
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/try-leak-lost.cc46
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/try-leak-reachable.cc46
-rw-r--r--storage/tokudb/PerconaFT/portability/tests/try-uninit.cc54
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_assert.cc194
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_assert.h146
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_atomic.h122
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_byteswap.h51
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_config.h.in104
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_crash.cc160
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_crash.h141
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_debug_sync.h76
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_htod.h114
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_htonl.h50
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc374
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_instr_mysql.h256
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_instrumentation.h387
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_list.h121
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_os.h125
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_os_types.h77
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_path.cc125
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_path.h72
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_portability.h576
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_pthread.cc73
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_pthread.h545
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_race_tools.h163
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_random.h118
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_stdint.h42
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_stdlib.h41
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_time.cc86
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_time.h147
-rw-r--r--storage/tokudb/PerconaFT/scripts/run-all-nightly-tests.bash10
-rw-r--r--storage/tokudb/PerconaFT/scripts/run-nightly-coverage-tests.bash42
-rw-r--r--storage/tokudb/PerconaFT/scripts/run-nightly-drd-tests.bash35
-rw-r--r--storage/tokudb/PerconaFT/scripts/run-nightly-release-tests.bash45
-rw-r--r--storage/tokudb/PerconaFT/scripts/run.fractal.tree.tests.cmake135
-rw-r--r--storage/tokudb/PerconaFT/scripts/run.stress-tests-forever.bash97
-rw-r--r--storage/tokudb/PerconaFT/scripts/run.stress-tests.py800
-rw-r--r--storage/tokudb/PerconaFT/scripts/tokugrind52
-rw-r--r--storage/tokudb/PerconaFT/scripts/watch.stress-tests.bash3
-rw-r--r--storage/tokudb/PerconaFT/src/CMakeLists.txt56
-rw-r--r--storage/tokudb/PerconaFT/src/errors.cc141
-rw-r--r--storage/tokudb/PerconaFT/src/export.map99
-rw-r--r--storage/tokudb/PerconaFT/src/indexer-internal.h116
-rw-r--r--storage/tokudb/PerconaFT/src/indexer-undo-do.cc654
-rw-r--r--storage/tokudb/PerconaFT/src/indexer.cc720
-rw-r--r--storage/tokudb/PerconaFT/src/indexer.h125
-rw-r--r--storage/tokudb/PerconaFT/src/loader.cc518
-rw-r--r--storage/tokudb/PerconaFT/src/loader.h156
-rw-r--r--storage/tokudb/PerconaFT/src/tests/CMakeLists.txt493
-rw-r--r--storage/tokudb/PerconaFT/src/tests/big-nested-abort-abort.cc151
-rw-r--r--storage/tokudb/PerconaFT/src/tests/big-nested-abort-commit.cc149
-rw-r--r--storage/tokudb/PerconaFT/src/tests/big-nested-commit-abort.cc144
-rw-r--r--storage/tokudb/PerconaFT/src/tests/big-nested-commit-commit.cc145
-rw-r--r--storage/tokudb/PerconaFT/src/tests/big-shutdown.cc136
-rw-r--r--storage/tokudb/PerconaFT/src/tests/bigtxn27.cc172
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blackhole.cc129
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-first-empty.cc183
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-first.cc203
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-last.cc203
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-next-prev-deadlock.cc268
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-next-prev.cc274
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-prelock-range.cc163
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-put-timeout.cc194
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-put-wakeup.cc189
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-put.cc159
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-set-range-0.cc216
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-set-range-n.cc209
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-set-range-reverse-0.cc214
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-set.cc202
-rw-r--r--storage/tokudb/PerconaFT/src/tests/blocking-table-lock.cc156
-rw-r--r--storage/tokudb/PerconaFT/src/tests/bug1381.cc189
-rw-r--r--storage/tokudb/PerconaFT/src/tests/cachetable-race.cc152
-rw-r--r--storage/tokudb/PerconaFT/src/tests/checkpoint1.cc94
-rw-r--r--storage/tokudb/PerconaFT/src/tests/checkpoint_fairness.cc137
-rw-r--r--storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc380
-rw-r--r--storage/tokudb/PerconaFT/src/tests/checkpoint_test.h484
-rw-r--r--storage/tokudb/PerconaFT/src/tests/create-datadir.cc118
-rw-r--r--storage/tokudb/PerconaFT/src/tests/cursor-isolation.cc136
-rw-r--r--storage/tokudb/PerconaFT/src/tests/cursor-more-than-a-leaf-provdel.cc145
-rw-r--r--storage/tokudb/PerconaFT/src/tests/cursor-set-del-rmw.cc146
-rw-r--r--storage/tokudb/PerconaFT/src/tests/cursor-set-range-rmw.cc160
-rw-r--r--storage/tokudb/PerconaFT/src/tests/cursor-step-over-delete.cc108
-rw-r--r--storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock-threads.cc242
-rw-r--r--storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock.cc158
-rw-r--r--storage/tokudb/PerconaFT/src/tests/db-put-simple-lockwait.cc190
-rw-r--r--storage/tokudb/PerconaFT/src/tests/db-put-update-deadlock.cc240
-rw-r--r--storage/tokudb/PerconaFT/src/tests/dbremove-nofile-limit.cc125
-rw-r--r--storage/tokudb/PerconaFT/src/tests/del-multiple-huge-primary-row.cc240
-rw-r--r--storage/tokudb/PerconaFT/src/tests/del-multiple-srcdb.cc235
-rw-r--r--storage/tokudb/PerconaFT/src/tests/del-multiple.cc236
-rw-r--r--storage/tokudb/PerconaFT/src/tests/del-simple.cc152
-rw-r--r--storage/tokudb/PerconaFT/src/tests/directory_lock.cc390
-rw-r--r--storage/tokudb/PerconaFT/src/tests/diskfull.cc254
-rw-r--r--storage/tokudb/PerconaFT/src/tests/drd.suppressions107
-rw-r--r--storage/tokudb/PerconaFT/src/tests/dump-env.cc128
-rw-r--r--storage/tokudb/PerconaFT/src/tests/env-put-multiple.cc322
-rw-r--r--storage/tokudb/PerconaFT/src/tests/env_loader_memory.cc62
-rw-r--r--storage/tokudb/PerconaFT/src/tests/env_nproc.cc90
-rw-r--r--storage/tokudb/PerconaFT/src/tests/env_startup.cc197
-rw-r--r--storage/tokudb/PerconaFT/src/tests/filesize.cc267
-rw-r--r--storage/tokudb/PerconaFT/src/tests/get_key_after_bytes_unit.cc247
-rw-r--r--storage/tokudb/PerconaFT/src/tests/get_last_key.cc259
-rw-r--r--storage/tokudb/PerconaFT/src/tests/helgrind.suppressions158
-rw-r--r--storage/tokudb/PerconaFT/src/tests/helgrind1.cc65
-rw-r--r--storage/tokudb/PerconaFT/src/tests/helgrind2.cc135
-rw-r--r--storage/tokudb/PerconaFT/src/tests/helgrind3.cc135
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hot-optimize-table-tests.cc239
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-bw.cc475
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-error-callback.cc172
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed-optimized.cc183
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed.cc181
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-insert-provisional.cc182
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-lock-test.cc220
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-multiclient.cc478
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-nested-insert-committed.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-put-abort.cc191
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-put-commit.cc218
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-put-multiple.cc225
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort-put.cc130
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort.cc118
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-test.cc596
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/README77
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.result6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.test0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.result7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.result9
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.result9
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.test7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.test7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.result6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.test8
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.test7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov-2.py45
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.result15
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.test8
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.result14
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.test7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.result1
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.test3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.result6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.result7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.result6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.result7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.result9
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.test8
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.result9
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.test6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.result4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.result6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.result0
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.result7
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.result9
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.result3
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.result5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.result2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.test4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.result6
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.test5
-rw-r--r--storage/tokudb/PerconaFT/src/tests/hotindexer-with-queries.cc280
-rw-r--r--storage/tokudb/PerconaFT/src/tests/inflate.cc171
-rw-r--r--storage/tokudb/PerconaFT/src/tests/inflate2.cc161
-rw-r--r--storage/tokudb/PerconaFT/src/tests/insert-dup-prelock.cc171
-rw-r--r--storage/tokudb/PerconaFT/src/tests/ipm.py61
-rw-r--r--storage/tokudb/PerconaFT/src/tests/isolation-read-committed.cc161
-rw-r--r--storage/tokudb/PerconaFT/src/tests/isolation.cc94
-rw-r--r--storage/tokudb/PerconaFT/src/tests/key-val.h245
-rw-r--r--storage/tokudb/PerconaFT/src/tests/keyrange-merge.cc234
-rw-r--r--storage/tokudb/PerconaFT/src/tests/keyrange.cc336
-rw-r--r--storage/tokudb/PerconaFT/src/tests/last-verify-time.cc150
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc1067
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-close-nproc-limit.cc143
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-create-abort.cc118
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-create-close.cc130
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-create-commit-nproc-limit.cc159
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-create-nproc-limit.cc147
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-dup-test.cc452
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-no-puts.cc245
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-reference-test.cc254
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-stress-del.cc733
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-stress-test.cc697
-rw-r--r--storage/tokudb/PerconaFT/src/tests/loader-tpch-load.cc508
-rw-r--r--storage/tokudb/PerconaFT/src/tests/locktree_escalation_stalls.cc264
-rw-r--r--storage/tokudb/PerconaFT/src/tests/manyfiles.cc123
-rw-r--r--storage/tokudb/PerconaFT/src/tests/maxsize-for-loader.cc392
-rw-r--r--storage/tokudb/PerconaFT/src/tests/medium-nested-commit-commit.cc152
-rw-r--r--storage/tokudb/PerconaFT/src/tests/multiprocess.cc234
-rw-r--r--storage/tokudb/PerconaFT/src/tests/mvcc-create-table.cc88
-rw-r--r--storage/tokudb/PerconaFT/src/tests/mvcc-many-committed.cc138
-rw-r--r--storage/tokudb/PerconaFT/src/tests/mvcc-read-committed.cc96
-rw-r--r--storage/tokudb/PerconaFT/src/tests/openlimit17-locktree.cc117
-rw-r--r--storage/tokudb/PerconaFT/src/tests/openlimit17-metafiles.cc105
-rw-r--r--storage/tokudb/PerconaFT/src/tests/openlimit17.cc100
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_checkpoint_var.cc142
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_child_txn.cc90
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_cursor_nop.cc81
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_iibench.cc453
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_insert.cc91
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_malloc_free.cc81
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_nop.cc77
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_partitioned_counter.cc105
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_ptquery.cc102
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_ptquery2.cc115
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_rangequery.cc71
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_read_txn.cc84
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_read_txn_single_thread.cc110
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_read_write.cc117
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_root_txn.cc83
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_simple_counter.cc79
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_thread_counter.cc79
-rw-r--r--storage/tokudb/PerconaFT/src/tests/perf_txn_single_thread.cc110
-rw-r--r--storage/tokudb/PerconaFT/src/tests/powerfail.cc186
-rw-r--r--storage/tokudb/PerconaFT/src/tests/preload-db-nested.cc340
-rw-r--r--storage/tokudb/PerconaFT/src/tests/preload-db.cc246
-rw-r--r--storage/tokudb/PerconaFT/src/tests/prelock-read-read.cc112
-rw-r--r--storage/tokudb/PerconaFT/src/tests/prelock-read-write.cc106
-rw-r--r--storage/tokudb/PerconaFT/src/tests/prelock-write-read.cc106
-rw-r--r--storage/tokudb/PerconaFT/src/tests/prelock-write-write.cc106
-rw-r--r--storage/tokudb/PerconaFT/src/tests/print_engine_status.cc177
-rw-r--r--storage/tokudb/PerconaFT/src/tests/progress.cc445
-rw-r--r--storage/tokudb/PerconaFT/src/tests/put-del-multiple-array-indexing.cc371
-rw-r--r--storage/tokudb/PerconaFT/src/tests/queries_with_deletes.cc196
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-2483.cc201
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-3113.cc178
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-5146.cc180
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc165
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-abort.cc249
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-commit.cc249
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-child-rollback.cc117
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-compare-db-descriptor.cc330
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-compare-db.cc306
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc285
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc285
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc277
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-delboth-after-checkpoint.cc247
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-delboth-checkpoint.cc247
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor.cc185
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor10.cc201
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor11.cc191
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor12.cc191
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor2.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor3.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor4.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor5.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor6.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor7.cc199
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor8.cc201
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-descriptor9.cc199
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fassociate.cc165
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fclose-in-checkpoint.cc157
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fcreate-basementnodesize.cc192
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fcreate-fclose.cc147
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fcreate-fdelete.cc156
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fcreate-nodesize.cc193
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fcreate-xabort.cc143
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt1.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt10.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt2.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt3.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt4.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt5.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt6.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt7.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt8.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-flt9.cc58
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fopen-checkpoint-fclose.cc153
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fopen-fclose-checkpoint.cc153
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-hotindexer-simple-abort-put.cc146
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-loader-test.cc518
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-lsn-filter-multiple.cc248
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-lsn-filter.cc190
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile-2.cc186
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile.cc177
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-missing-logfile.cc182
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc257
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-all.cc232
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-some.cc251
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-put-multiple-srcdb-fdelete-all.cc233
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-put-multiple.cc277
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-rollback.cc209
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-rollinclude.cc221
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-split-checkpoint.cc198
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-straddle-txn-nested.cc172
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-straddle-txn.cc176
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-tablelock.cc239
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress-put.cc290
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress.cc287
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test1.cc160
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test2.cc180
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test3.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test_crash_in_flusher_thread.h140
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test_stress1.cc151
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test_stress2.cc84
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test_stress3.cc180
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-test_stress_openclose.cc63
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update-multiple-abort.cc497
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update-multiple.cc507
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_aborts.cc215
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_checkpoint.cc215
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_close.cc215
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts.cc206
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts2.cc208
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts3.cc208
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc206
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_close.cc206
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values.cc210
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values2.cc213
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values3.cc211
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc207
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_close.cc207
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_changes_values.cc216
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_checkpoint.cc216
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_close.cc216
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor-multihandle.cc327
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor.cc330
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-x1-abort.cc304
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-x1-commit.cc307
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-x1-nested-abort.cc290
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-x1-nested-commit.cc291
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-x2-abort.cc267
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recover-x2-commit.cc267
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recovery_fileops_stress.cc587
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc652
-rw-r--r--storage/tokudb/PerconaFT/src/tests/recovery_stress.cc573
-rw-r--r--storage/tokudb/PerconaFT/src/tests/redirect.cc327
-rw-r--r--storage/tokudb/PerconaFT/src/tests/replace-into-write-lock.cc102
-rw-r--r--storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc161
-rw-r--r--storage/tokudb/PerconaFT/src/tests/root_fifo_1.cc185
-rw-r--r--storage/tokudb/PerconaFT/src/tests/root_fifo_2.cc166
-rw-r--r--storage/tokudb/PerconaFT/src/tests/root_fifo_31.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/root_fifo_32.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/root_fifo_41.cc230
-rw-r--r--storage/tokudb/PerconaFT/src/tests/rowsize.cc91
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run-hotindexer-undo-do-tests.bash50
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_abortrecover_test.sh19
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_checkpoint_stress_test.sh26
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_diskfull_test.sh20
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_powerfail_test.py140
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_recover_stress_test.sh27
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_recover_test.sh29
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_recovery_fileops_unit.sh20
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_stress_test.py34
-rw-r--r--storage/tokudb/PerconaFT/src/tests/run_test_thread_stack.sh14
-rw-r--r--storage/tokudb/PerconaFT/src/tests/seqinsert.cc112
-rw-r--r--storage/tokudb/PerconaFT/src/tests/shutdown-3344.cc232
-rw-r--r--storage/tokudb/PerconaFT/src/tests/simple.cc89
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stat64-create-modify-times.cc129
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stat64-null-txn.cc173
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc249
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stat64.cc168
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stress-gc.cc115
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stress-gc2.cc81
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stress-test.cc264
-rw-r--r--storage/tokudb/PerconaFT/src/tests/stress_openclose.h284
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-5138.cc87
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-nested-xopen-eclose.cc142
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-prepare.cc139
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-prepare2.cc161
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-prepare3.cc340
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-rollinclude.cc118
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-xa-prepare.cc157
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test-xopen-eclose.cc139
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test.h454
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test1572.cc112
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test1753.cc90
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test1842.cc177
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test3039.cc284
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test3219.cc207
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test3522.cc178
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test3522b.cc189
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test3529.cc210
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test4573-logtrim.cc121
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test5092.cc81
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test938.cc186
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test938b.cc113
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test938c.cc120
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_1672532.cc210
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_3529_insert_2.cc219
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_3529_table_lock.cc212
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_3645.cc350
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_3755.cc156
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_4015.cc179
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_4368.cc71
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_4657.cc133
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_5015.cc99
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_5469.cc172
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_789.cc177
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_935.cc132
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_abort1.cc198
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_abort2.cc147
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_abort3.cc196
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_abort4.cc265
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_abort5.cc253
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_abort_delete_first.cc174
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_archive0.cc73
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_archive1.cc93
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_archive2.cc102
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_bad_implicit_promotion.cc138
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_blobs_leaf_split.cc141
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_bulk_fetch.cc305
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cachesize.cc114
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cmp_descriptor.cc281
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_compression_methods.cc155
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_2.cc127
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_3.cc139
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_DB_NEXT_no_dup.cc177
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_db_current.cc162
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_delete2.cc109
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_flags.cc92
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_interrupt.cc152
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_nonleaf_expand.cc142
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_null.cc210
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_stickyness.cc133
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_cursor_with_read_txn.cc125
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_already_exists.cc97
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_change_pagesize.cc104
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_change_xxx.cc150
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_close_no_open.cc65
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_current_clobbers_db.cc109
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_dbt_mem_behavior.cc192
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_delete.cc186
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_descriptor.cc336
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_open_close.cc59
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_open_nocreate.cc92
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_open_open_close.cc75
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_set_errpfx.cc74
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_set_lg_dir.cc80
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_set_tmp_dir.cc80
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_env_strdup_null.cc67
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_get_put_flags.cc172
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_named_delete_last.cc133
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_no_env.cc62
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_open_notexist_reopen.cc67
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_remove.cc79
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_remove_subdb.cc125
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc523
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_set_flags.cc84
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_subdb.cc95
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_subdb_different_flags.cc114
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_nonheaviside.cc612
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_read_uncommitted.cc241
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_db_version.cc61
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_env_close_flags.cc84
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_env_create_db_create.cc60
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_env_open_flags.cc92
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_equal_keys_with_different_bytes.cc97
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_error.cc131
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_forkjoin.cc60
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_get_max_row_size.cc74
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_get_zeroed_dbt.cc80
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_groupcommit_count.cc224
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_groupcommit_perf.cc151
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_hsoc.cc151
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_insert_cursor_delete_insert.cc113
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_insert_many_gc.cc105
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_insert_memleak.cc96
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_insert_unique.cc159
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_iterate_live_transactions.cc138
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_iterate_pending_lock_requests.cc139
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_keylen_diff.cc232
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_kv_gen.h226
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_kv_limits.cc211
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_large_update_broadcast_small_cachetable.cc191
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_lock_timeout_callback.cc141
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_locking_with_read_txn.cc90
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_locktree_close.cc115
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log0.cc62
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log1.cc113
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log10.cc147
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log1_abort.cc92
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log2.cc83
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log2_abort.cc76
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log3.cc91
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log3_abort.cc93
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log4.cc99
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log4_abort.cc104
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log5.cc118
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log5_abort.cc123
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log6.cc158
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log6_abort.cc163
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log6a_abort.cc338
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log7.cc123
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log8.cc147
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_log9.cc146
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_logflush.cc97
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_logmax.cc144
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_memcmp_magic.cc169
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc110
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_multiple_checkpoints_block_commit.cc143
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_nested.cc185
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_nodup_set.cc208
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_query.cc433
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_rand_insert.cc133
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_read_txn_invalid_ops.cc196
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_redirect_func.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_restrict.cc305
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_reverse_compare_fun.cc179
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_set_func_malloc.cc123
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_simple_read_txn.cc97
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress0.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress1.cc146
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress2.cc140
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress3.cc143
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress4.cc139
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress5.cc113
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress6.cc171
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress7.cc109
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress_hot_indexing.cc333
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress_openclose.cc56
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_stress_with_verify.cc110
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_thread_flags.cc132
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_thread_insert.cc174
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt.cc132
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt2.cc132
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt3.cc132
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt4.cc132
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_transactional_descriptor.cc227
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_abort5.cc112
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_abort5a.cc133
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_abort6.cc162
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_abort7.cc124
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_begin_commit.cc70
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_close_before_commit.cc89
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_close_before_prepare_commit.cc92
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_cursor_last.cc249
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested1.cc173
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested2.cc245
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested3.cc281
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested4.cc367
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested5.cc386
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort.cc128
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort2.cc117
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort3.cc123
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort4.cc148
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_read_committed_always.cc121
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_txn_recover3.cc140
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_unused_memory_crash.cc137
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_abort_works.cc191
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_abort_works.cc182
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_calls_back.cc136
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_can_delete_elements.cc166
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_changes_values.cc154
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_indexer.cc225
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_loader.cc178
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_nested_updates.cc165
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_previously_deleted.cc196
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_stress.cc177
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_update_fun_has_choices.cc176
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_broadcast_with_empty_table.cc107
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_calls_back.cc136
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_can_delete_elements.cc168
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_changes_values.cc162
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_nested_updates.cc174
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_nonexistent_keys.cc193
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_previously_deleted.cc202
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_stress.cc187
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_concurrently.cc183
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc168
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_update_with_empty_table.cc142
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_updates_single_key.cc101
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_weakxaction.cc101
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_zero_length_keys.cc188
-rw-r--r--storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h2915
-rw-r--r--storage/tokudb/PerconaFT/src/tests/time_create_db.cc122
-rw-r--r--storage/tokudb/PerconaFT/src/tests/transactional_fileops.cc468
-rw-r--r--storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc217
-rw-r--r--storage/tokudb/PerconaFT/src/tests/update-multiple-data-diagonal.cc343
-rw-r--r--storage/tokudb/PerconaFT/src/tests/update-multiple-key0.cc327
-rw-r--r--storage/tokudb/PerconaFT/src/tests/update-multiple-nochange.cc319
-rw-r--r--storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer-array.cc459
-rw-r--r--storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer.cc358
-rw-r--r--storage/tokudb/PerconaFT/src/tests/update.cc91
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-1.cc263
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-2.cc244
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-3.cc260
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-4.cc364
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-5.cc245
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-6.cc416
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade-test-7.cc144
-rw-r--r--storage/tokudb/PerconaFT/src/tests/upgrade_simple.cc160
-rw-r--r--storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-abort.cc209
-rw-r--r--storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-commit.cc206
-rw-r--r--storage/tokudb/PerconaFT/src/tests/xa-dirty-commit.cc141
-rw-r--r--storage/tokudb/PerconaFT/src/tests/xa-dirty-rollback.cc141
-rw-r--r--storage/tokudb/PerconaFT/src/tests/xa-txn-discard-abort.cc143
-rw-r--r--storage/tokudb/PerconaFT/src/tests/xa-txn-discard-commit.cc144
-rw-r--r--storage/tokudb/PerconaFT/src/tests/zombie_db.cc158
-rw-r--r--storage/tokudb/PerconaFT/src/toku_patent.cc66
-rw-r--r--storage/tokudb/PerconaFT/src/ydb-internal.h283
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.cc3510
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.h63
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_cursor.cc900
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_cursor.h61
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_db.cc1284
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_db.h137
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_env_func.cc185
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_env_func.h52
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_lib.cc57
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_load.h62
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_row_lock.cc295
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_row_lock.h61
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_txn.cc624
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_txn.h59
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_write.cc1136
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_write.h104
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/AUTHORS1
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/CMakeLists.txt24
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/COPYING54
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ChangeLog1916
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/INSTALL370
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.am23
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.in957
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/NEWS128
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/README135
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/aclocal.m49738
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/autogen.sh7
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.guess1530
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.h.in124
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.sub1773
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure18851
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure.ac133
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/depcomp688
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/format_description.txt110
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/framing_format.txt135
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/install-sh527
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ltmain.sh9661
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/m4/gtest.m474
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/missing331
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.cc90
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.h138
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-internal.h150
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.cc71
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.h137
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.cc42
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.h491
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h98
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h.in98
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.cc606
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.h582
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.cc1306
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.h184
-rw-r--r--storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy_unittest.cc1355
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ABOUT-NLS1101
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/AUTHORS27
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING65
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv2339
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv3674
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.LGPLv2.1504
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ChangeLog7041
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Doxyfile.in1234
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL339
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL.generic302
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.am80
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.in887
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/NEWS0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/PACKAGERS279
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/README217
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/THANKS47
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/TODO56
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/aclocal.m41027
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh22
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile143
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess1530
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath614
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub1686
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp630
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh520
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh8406
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing376
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/config.h.in404
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure22982
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure.ac649
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.am30
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.in580
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/README17
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/crc32.c40
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/full_flush.c104
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/hex2bin.c55
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/known_sizes.c131
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/memusage.c51
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/repeat.c38
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/sync_flush.c135
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/faq.txt122
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/history.txt149
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/lzma-file-format.txt166
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xz-a4.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdec-a4.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdiff-a4.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzgrep-a4.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzless-a4.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzmore-a4.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xz-letter.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdec-letter.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdiff-letter.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzgrep-letter.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzless-letter.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzmore-letter.pdf0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xz.txt786
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdec.txt95
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdiff.txt36
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzgrep.txt39
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzless.txt40
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzmore.txt34
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/xz-file-format.txt1150
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/Makefile255
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/README113
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/config.h152
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash115
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/scanlzma/scanlzma.c86
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.am32
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.in525
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.c1199
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.in.h228
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt1.c173
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt_int.h133
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/acx_pthread.m4279
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/getopt.m471
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/gettext.m4419
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/iconv.m4101
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_cpucores.m457
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_physmem.m484
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-ld.m4110
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-link.m4644
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-prefix.m4185
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/libtool.m47357
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltoptions.m4368
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltsugar.m4123
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltversion.m423
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lt~obsolete.m492
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/nls.m431
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/po.m4428
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/posix-shell.m463
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/progtest.m492
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/visibility.m452
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/LINGUAS0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makefile.in.in403
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makevars46
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/POTFILES.in10
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Rules-quot47
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/boldquot.sed10
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@boldquot.header25
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@quot.header22
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/insert-header.sin23
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/quot.sed6
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/remove-potcdate.sin19
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/stamp-po1
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/xz.pot481
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.am9
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.in598
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/bswap.h54
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/common_w32res.rc53
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/cpucores.h53
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/integer.h172
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/mythread.h44
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/open_stdxxx.h51
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/physmem.h136
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/sysdefs.h171
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.am94
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.in1728
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.am23
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.in512
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma.h323
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/base.h598
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/bcj.h92
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/block.h536
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/check.h152
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/container.h406
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/delta.h79
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/filter.h362
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index.h405
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index_hash.h109
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/lzma.h412
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/stream_flags.h229
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/subblock.h202
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/version.h123
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/vli.h170
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/Makefile.inc51
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.c176
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.h97
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_fast.c84
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_small.c63
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table.c21
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_be.h527
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_le.h527
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_tablegen.c93
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_x86.S304
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_fast.c73
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_small.c55
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table.c21
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_be.h523
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_le.h523
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_tablegen.c94
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_x86.S287
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc_macros.h34
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/sha256.c215
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/Makefile.inc67
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.c231
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.h24
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_encoder.c159
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/auto_decoder.c188
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_decoder.c82
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_encoder.c301
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.c244
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.h24
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.c200
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.h49
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_decoder.c118
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_encoder.c134
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_util.c92
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/bsr.h62
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.c357
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.h270
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_buffer_encoder.c29
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_decoder_memusage.c26
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder.c82
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder_memusage.c26
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.c29
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.h34
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_decoder.c89
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_encoder.c56
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.c263
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.h50
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.c201
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.h25
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.c273
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.h29
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_decoder.c48
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_encoder.c58
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.c778
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.h69
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_decoder.c325
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.c260
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.h25
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_hash.c334
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_decoder.c93
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_encoder.c133
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.c447
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.h23
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.c276
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.h25
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.c49
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.h35
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_decoder.c84
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_encoder.c88
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_decoder.c88
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_encoder.c71
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_size.c32
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/Makefile.inc23
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.c75
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.h22
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.c77
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.h27
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.c108
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.h25
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_private.h39
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma.pc.in19
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma_w32res.rc12
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/Makefile.inc21
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.c301
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.h236
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.c561
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.h326
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_hash.h99
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_mf.c756
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/Makefile.inc43
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos.h145
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_table.c521
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_tablegen.c58
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.c307
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.h30
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.c386
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.h43
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_common.h225
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.c1059
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.h54
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.c677
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.h56
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_fast.c181
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_normal.c870
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_presets.c55
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_private.h150
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/Makefile.inc21
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price.h94
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_table.c24
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_tablegen.c89
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_common.h75
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_decoder.h181
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_encoder.h233
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/Makefile.inc47
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/arm.c71
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/armthumb.c76
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/ia64.c112
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/powerpc.c75
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.c270
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.h62
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.c42
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.h24
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.c40
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.h25
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_private.h78
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/sparc.c83
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/x86.c156
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/Makefile.inc20
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.c632
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.h24
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.c72
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.h31
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.c986
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.h23
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.am29
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.in659
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.155
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.c244
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.am65
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.in586
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.174
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.in172
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.194
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.in196
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.166
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.in58
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.154
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.in78
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.am96
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.in849
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.c532
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.h43
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.c641
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.h59
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.c721
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.h88
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.c97
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.h37
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.c314
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.h39
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.c1174
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.h134
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.c440
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.h40
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/private.h57
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.c175
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.h46
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.c213
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.h30
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.c231
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.h81
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz.11250
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz_w32res.rc12
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.am61
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.in714
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/lzmadec_w32res.rc5
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.1168
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.c498
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec_w32res.rc12
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.am51
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.in687
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/bcj_test.c67
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_sparc0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_x860
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/create_compress_files.c159
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/README232
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-backward_size.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-empty-truncated.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-footer_magic.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-header_magic.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-nonempty_index.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-alone.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-header_magic.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0catpad-empty.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0pad-empty.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-4.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-5.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc32.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc64.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-sha256.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-4.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-5.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-6.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-7.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-8.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-compressed_data_padding.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-4.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-5.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0-empty.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0cat-empty.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0catpad-empty.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0pad-empty.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-3delta-lzma2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc32.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc64.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-none.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-sha256.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-delta-lzma2.tiff.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-4.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-sparc-lzma2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-x86-lzma2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-2-lzma2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-block_header.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-check.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-1.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-2.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-3.xz0
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_block_header.c242
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_check.c85
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh129
-rwxr-xr-xstorage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh33
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_filter_flags.c283
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_index.c534
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_stream_flags.c182
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/tests.h126
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/version.sh24
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/Makefile308
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/README155
-rw-r--r--storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/config.h167
-rw-r--r--storage/tokudb/PerconaFT/tools/CMakeLists.txt25
-rw-r--r--storage/tokudb/PerconaFT/tools/ftverify.cc452
-rw-r--r--storage/tokudb/PerconaFT/tools/pmprof31
-rw-r--r--storage/tokudb/PerconaFT/tools/tdb-recover.cc80
-rw-r--r--storage/tokudb/PerconaFT/tools/tokudb_dump.cc685
-rw-r--r--storage/tokudb/PerconaFT/tools/tokuft_logprint.cc74
-rw-r--r--storage/tokudb/PerconaFT/tools/tokuftdump.cc1246
-rw-r--r--storage/tokudb/PerconaFT/util/CMakeLists.txt34
-rw-r--r--storage/tokudb/PerconaFT/util/bytestring.h46
-rw-r--r--storage/tokudb/PerconaFT/util/constexpr.h52
-rw-r--r--storage/tokudb/PerconaFT/util/context.cc184
-rw-r--r--storage/tokudb/PerconaFT/util/context.h152
-rw-r--r--storage/tokudb/PerconaFT/util/dbt.cc291
-rw-r--r--storage/tokudb/PerconaFT/util/dbt.h101
-rw-r--r--storage/tokudb/PerconaFT/util/dmt.cc1213
-rw-r--r--storage/tokudb/PerconaFT/util/dmt.h675
-rw-r--r--storage/tokudb/PerconaFT/util/doubly_linked_list.h174
-rw-r--r--storage/tokudb/PerconaFT/util/fmutex.h146
-rw-r--r--storage/tokudb/PerconaFT/util/frwlock.cc351
-rw-r--r--storage/tokudb/PerconaFT/util/frwlock.h131
-rw-r--r--storage/tokudb/PerconaFT/util/growable_array.h138
-rw-r--r--storage/tokudb/PerconaFT/util/kibbutz.cc242
-rw-r--r--storage/tokudb/PerconaFT/util/kibbutz.h74
-rw-r--r--storage/tokudb/PerconaFT/util/memarena.cc191
-rw-r--r--storage/tokudb/PerconaFT/util/memarena.h136
-rw-r--r--storage/tokudb/PerconaFT/util/mempool.cc197
-rw-r--r--storage/tokudb/PerconaFT/util/mempool.h129
-rw-r--r--storage/tokudb/PerconaFT/util/minicron.cc201
-rw-r--r--storage/tokudb/PerconaFT/util/minicron.h74
-rw-r--r--storage/tokudb/PerconaFT/util/nb_mutex.h136
-rw-r--r--storage/tokudb/PerconaFT/util/omt.cc1388
-rw-r--r--storage/tokudb/PerconaFT/util/omt.h773
-rw-r--r--storage/tokudb/PerconaFT/util/partitioned_counter.cc417
-rw-r--r--storage/tokudb/PerconaFT/util/partitioned_counter.h149
-rw-r--r--storage/tokudb/PerconaFT/util/queue.cc182
-rw-r--r--storage/tokudb/PerconaFT/util/queue.h83
-rw-r--r--storage/tokudb/PerconaFT/util/rwlock.h348
-rw-r--r--storage/tokudb/PerconaFT/util/scoped_malloc.cc227
-rw-r--r--storage/tokudb/PerconaFT/util/scoped_malloc.h103
-rw-r--r--storage/tokudb/PerconaFT/util/sort.h208
-rw-r--r--storage/tokudb/PerconaFT/util/status.h75
-rw-r--r--storage/tokudb/PerconaFT/util/tests/CMakeLists.txt24
-rw-r--r--storage/tokudb/PerconaFT/util/tests/marked-omt-test.cc466
-rw-r--r--storage/tokudb/PerconaFT/util/tests/memarena-test.cc184
-rw-r--r--storage/tokudb/PerconaFT/util/tests/minicron-change-period-data-race.cc66
-rw-r--r--storage/tokudb/PerconaFT/util/tests/minicron-test.cc221
-rw-r--r--storage/tokudb/PerconaFT/util/tests/omt-test.cc898
-rw-r--r--storage/tokudb/PerconaFT/util/tests/omt-tmpl-test.cc162
-rw-r--r--storage/tokudb/PerconaFT/util/tests/queue-test.cc136
-rw-r--r--storage/tokudb/PerconaFT/util/tests/rwlock_condvar.h149
-rw-r--r--storage/tokudb/PerconaFT/util/tests/sm-basic.cc77
-rw-r--r--storage/tokudb/PerconaFT/util/tests/sm-crash-double-free.cc79
-rw-r--r--storage/tokudb/PerconaFT/util/tests/sort-tmpl-test.cc179
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test-frwlock-fair-writers.cc90
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test-kibbutz.cc91
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test-kibbutz2.cc89
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test-rwlock-cheapness.cc254
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test-rwlock-unfair-writers.cc98
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test-rwlock.cc403
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test.h84
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test_doubly_linked_list.cc184
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test_partitioned_counter.cc416
-rw-r--r--storage/tokudb/PerconaFT/util/tests/test_partitioned_counter_5833.cc102
-rw-r--r--storage/tokudb/PerconaFT/util/tests/threadpool-nproc-limit.cc119
-rw-r--r--storage/tokudb/PerconaFT/util/tests/threadpool-test.cc170
-rw-r--r--storage/tokudb/PerconaFT/util/tests/threadpool-testrunf.cc114
-rw-r--r--storage/tokudb/PerconaFT/util/tests/x1764-test.cc139
-rw-r--r--storage/tokudb/PerconaFT/util/threadpool.cc298
-rw-r--r--storage/tokudb/PerconaFT/util/threadpool.h85
-rw-r--r--storage/tokudb/PerconaFT/util/x1764.cc244
-rw-r--r--storage/tokudb/PerconaFT/util/x1764.h70
1686 files changed, 420312 insertions, 0 deletions
diff --git a/storage/tokudb/PerconaFT/.clang-format b/storage/tokudb/PerconaFT/.clang-format
new file mode 100644
index 00000000..08881858
--- /dev/null
+++ b/storage/tokudb/PerconaFT/.clang-format
@@ -0,0 +1,36 @@
+Language: Cpp
+BasedOnStyle: Google
+
+# The following parameters are default for Google style,
+# but as they are important for our project they
+# are set explicitly here
+AlignAfterOpenBracket: Align
+BreakBeforeBinaryOperators: None
+ColumnLimit: 80
+PointerAlignment: Left
+SpaceAfterCStyleCast: false
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeParens: ControlStatements
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles: false
+SpacesInContainerLiterals: true
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+UseTab: Never
+
+# Non-default parametes
+NamespaceIndentation: All
+IndentWidth: 4
+TabWidth: 4
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+BinPackParameters: false
+BinPackArguments: false
+ExperimentalAutoDetectBinPacking: false
+AllowAllParametersOfDeclarationOnNextLine: false
+#AlignConsecutiveAssignments: yes
+#AlignConsecutiveDeclarations: yes
+BreakStringLiterals: false
+ReflowComments: true
diff --git a/storage/tokudb/PerconaFT/CMakeLists.txt b/storage/tokudb/PerconaFT/CMakeLists.txt
new file mode 100644
index 00000000..672e4b10
--- /dev/null
+++ b/storage/tokudb/PerconaFT/CMakeLists.txt
@@ -0,0 +1,106 @@
+if (CMAKE_PROJECT_NAME STREQUAL TokuDB)
+ cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)
+endif()
+set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules")
+
+project(TokuDB)
+
+# suppress -rdynamic
+set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
+set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
+
+# See: https://jira.percona.com/browse/TDB-93
+MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-address-of-packed-member")
+
+# detect when we are being built as a subproject
+if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ add_definitions(-DMYSQL_TOKUDB_ENGINE=1)
+ add_definitions(-DMYSQL_VERSION_ID=${MYSQL_VERSION_ID})
+ # Extended PFS instrumentation:
+ # -DTOKU_PFS_MUTEX_EXTENDED_CACHETABLEMMUTEX=1
+ if (WITH_PERFSCHEMA_STORAGE_ENGINE)
+ add_definitions(-DTOKU_MYSQL_WITH_PFS)
+ endif ()
+ include_directories(${CMAKE_SOURCE_DIR}/include)
+ if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
+ (CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
+ include_directories(${CMAKE_SOURCE_DIR}/sql)
+ endif ()
+endif ()
+
+## Versions of gcc >= 4.9.0 require special version of 'ar' and 'ranlib' for
+## link-time optimizations to work properly.
+##
+## From https://gcc.gnu.org/gcc-4.9/changes.html:
+##
+## When using a linker plugin, compiling with the -flto option now
+## generates slim objects files (.o) which only contain intermediate
+## language representation for LTO. Use -ffat-lto-objects to create
+## files which contain additionally the object code. To generate
+## static libraries suitable for LTO processing, use gcc-ar and
+## gcc-ranlib; to list symbols from a slim object file use
+## gcc-nm. (Requires that ar, ranlib and nm have been compiled with
+## plugin support.)
+if ((CMAKE_CXX_COMPILER_ID STREQUAL GNU) AND
+ NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9.0"))
+ find_program(gcc_ar "gcc-ar")
+ if (gcc_ar)
+ set(CMAKE_AR "${gcc_ar}")
+ endif ()
+ find_program(gcc_ranlib "gcc-ranlib")
+ if (gcc_ranlib)
+ set(CMAKE_RANLIB "${gcc_ranlib}")
+ endif ()
+endif()
+
+include(TokuFeatureDetection)
+include(TokuSetupCompiler)
+#include(TokuSetupCTest)
+include(TokuThirdParty)
+
+set(TOKU_CMAKE_SCRIPT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
+include(TokuMergeLibs)
+
+## need a way to change the name of libs we build
+set(LIBTOKUPORTABILITY "tokuportability" CACHE STRING "Name of libtokuportability.so")
+set(LIBTOKUDB "tokufractaltree" CACHE STRING "Name of libtokufractaltree.so")
+
+set(INSTALL_LIBDIR "lib" CACHE STRING "where to install libs")
+
+if (USE_VALGRIND AND NOT VALGRIND_INCLUDE_DIR MATCHES NOTFOUND)
+ include_directories(
+ ${VALGRIND_INCLUDE_DIR}
+ )
+endif()
+include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}/portability
+ ${CMAKE_CURRENT_SOURCE_DIR} ## so you can include <ft/ft-ops.h> from inside src/
+ ${CMAKE_CURRENT_BINARY_DIR} ## for logging code
+ )
+## include where config.h will be generated
+include_directories(${CMAKE_CURRENT_BINARY_DIR}/portability)
+
+## build db.h and include where it will be generated
+add_subdirectory(buildheader)
+include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/buildheader)
+
+## default includes and libraries
+include_directories(SYSTEM
+ /usr/local/include
+ ${ZLIB_INCLUDE_DIRS}
+ )
+
+## add subdirectories
+add_subdirectory(util)
+add_subdirectory(portability)
+add_subdirectory(ft)
+add_subdirectory(locktree)
+add_subdirectory(src)
+add_subdirectory(ftcxx)
+add_subdirectory(tools)
+
+INSTALL_DOCUMENTATION(README.md COPYING.AGPLv3 COPYING.GPLv2 PATENTS
+ COMPONENT Server)
+
+## build tags
+#include(TokuBuildTagDatabases)
diff --git a/storage/tokudb/PerconaFT/COPYING.AGPLv3 b/storage/tokudb/PerconaFT/COPYING.AGPLv3
new file mode 100644
index 00000000..dba13ed2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/COPYING.AGPLv3
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/storage/tokudb/PerconaFT/COPYING.APACHEv2 b/storage/tokudb/PerconaFT/COPYING.APACHEv2
new file mode 100644
index 00000000..ecbfc770
--- /dev/null
+++ b/storage/tokudb/PerconaFT/COPYING.APACHEv2
@@ -0,0 +1,174 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/storage/tokudb/PerconaFT/COPYING.GPLv2 b/storage/tokudb/PerconaFT/COPYING.GPLv2
new file mode 100644
index 00000000..6e475df5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/COPYING.GPLv2
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/storage/tokudb/PerconaFT/CTestConfig.cmake b/storage/tokudb/PerconaFT/CTestConfig.cmake
new file mode 100644
index 00000000..84b2e378
--- /dev/null
+++ b/storage/tokudb/PerconaFT/CTestConfig.cmake
@@ -0,0 +1,13 @@
+## This file should be placed in the root directory of your project.
+## Then modify the CMakeLists.txt file in the root directory of your
+## project to incorporate the testing dashboard.
+## # The following are required to uses Dart and the Cdash dashboard
+## ENABLE_TESTING()
+## INCLUDE(CTest)
+set(CTEST_PROJECT_NAME "tokudb")
+set(CTEST_NIGHTLY_START_TIME "23:59:00 EDT")
+
+set(CTEST_DROP_METHOD "http")
+set(CTEST_DROP_SITE "lex1:8080")
+set(CTEST_DROP_LOCATION "/CDash/submit.php?project=tokudb")
+set(CTEST_DROP_SITE_CDASH TRUE)
diff --git a/storage/tokudb/PerconaFT/CTestCustom.cmake.in b/storage/tokudb/PerconaFT/CTestCustom.cmake.in
new file mode 100644
index 00000000..54170b2b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/CTestCustom.cmake.in
@@ -0,0 +1,239 @@
+cmake_policy(SET CMP0012 NEW)
+
+## these tests shouldn't run with valgrind
+list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE
+ ft/bnc-insert-benchmark
+ ft/ft_loader-test-extractor-1
+ ft/ft_loader-test-extractor-2
+ ft/ft_loader-test-extractor-3
+ ft/upgrade_test_simple
+ portability/test-cache-line-boundary-fails
+ portability/try-leak-lost
+ portability/try-leak-reachable
+ portability/try-leak-uninit
+ util/helgrind_test_partitioned_counter
+ util/helgrind_test_partitioned_counter_5833
+ ydb/diskfull.tdb
+ ydb/drd_test_4015.tdb
+ ydb/drd_test_groupcommit_count.tdb
+ ydb/filesize.tdb
+ ydb/helgrind_helgrind1.tdb
+ ydb/helgrind_helgrind2.tdb
+ ydb/helgrind_helgrind3.tdb
+ ydb/helgrind_test_groupcommit_count.tdb
+ ydb/hot-optimize-table-tests.tdb
+ ydb/insert-dup-prelock.tdb
+ ydb/loader-cleanup-test2.tdb
+ ydb/loader-cleanup-test3.tdb
+ ydb/loader-stress-test4.tdb
+ ydb/maxsize-for-loader-B.tdb
+ ydb/openlimit17.tdb
+ ydb/openlimit17-locktree.tdb
+ ydb/preload-db-nested.tdb
+ ydb/stress-gc.tdb
+ ydb/stress-gc2.tdb
+ ydb/stress-test.tdb
+ ydb/test-5138.tdb
+ ydb/test-prepare.tdb
+ ydb/test-prepare2.tdb
+ ydb/test-prepare3.tdb
+ ydb/test-recover1.tdb
+ ydb/test-recover2.tdb
+ ydb/test-recover3.tdb
+ ydb/test-xa-prepare.tdb
+ ydb/test4573-logtrim.tdb
+ ydb/test_3645.tdb
+ ydb/test_groupcommit_perf.tdb
+ ydb/test_large_update_broadcast_small_cachetable.tdb
+ ydb/test_update_broadcast_stress.tdb
+ ydb/test_update_stress.tdb
+ ydb/upgrade-test-4.tdb
+ )
+
+if (NOT @RUN_HELGRIND_TESTS@)
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE
+ util/helgrind_test_partitioned_counter
+ util/helgrind_test_partitioned_counter_5833
+ ydb/helgrind_helgrind1.tdb
+ ydb/helgrind_helgrind2.tdb
+ ydb/helgrind_helgrind3.tdb
+ ydb/helgrind_test_groupcommit_count.tdb
+ )
+endif ()
+
+if (NOT @RUN_DRD_TESTS@)
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE
+ ydb/drd_test_groupcommit_count.tdb
+ ydb/drd_test_4015.tdb
+ )
+endif ()
+
+## osx's pthreads prefer writers, so this test will deadlock
+if (@CMAKE_SYSTEM_NAME@ STREQUAL Darwin)
+ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE portability/test-pthread-rwlock-rwr)
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE portability/test-pthread-rwlock-rwr)
+endif ()
+
+## tests that are supposed to crash will generate memcheck failures
+set(tests_that_should_fail
+ ft/test-assertA
+ ft/test-assertB
+ portability/try-assert-zero
+ portability/try-assert0
+ ydb/recover-missing-dbfile-2.abortrecover
+ ydb/recover-missing-dbfile.abortrecover
+ ydb/test_db_no_env.tdb
+ ydb/test_truncate_txn_abort.tdb
+ )
+list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ${tests_that_should_fail})
+
+## don't run drd stress tests with valgrind either (because that would do valgrind twice)
+set(stress_tests
+ test_stress0.tdb
+ test_stress1.tdb
+ test_stress2.tdb
+ test_stress3.tdb
+ test_stress4.tdb
+ test_stress5.tdb
+ test_stress6.tdb
+ test_stress7.tdb
+ test_stress_hot_indexing.tdb
+ test_stress_openclose.tdb
+ test_stress_with_verify.tdb
+ )
+foreach(test ${stress_tests})
+ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE
+ ydb/drd_tiny_${test}
+ ydb/drd_mid_${test}
+ ydb/drd_large_${test}
+ )
+ if(NOT @RUN_LONG_TESTS@)
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE
+ ydb/drd_large_${test}
+ )
+ endif()
+ if (NOT @RUN_DRD_TESTS@)
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE
+ ydb/drd_tiny_${test}
+ ydb/drd_mid_${test}
+ ydb/drd_large_${test}
+ )
+ endif ()
+endforeach(test)
+
+## upgrade stress tests are 5 minutes long, don't need to run them always
+if(NOT @RUN_LONG_TESTS@)
+ foreach(test ${stress_tests})
+ if (NOT ${test} MATCHES test_stress_openclose)
+ foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3)
+ foreach(p_or_s pristine stressed)
+ if (NOT (${test} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed))
+ foreach(size 2000)
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE ydb/${test}/upgrade/${oldver}/${p_or_s}/${size})
+ endforeach(size)
+ endif ()
+ endforeach(p_or_s)
+ endforeach(oldver)
+ endif ()
+ endforeach(test)
+endif()
+
+set(tdb_tests_that_should_fail "ydb/${stress_tests}")
+string(REGEX REPLACE ";" ";ydb/" stress_tests "${stress_tests}")
+
+set(recover_stress_tests
+ ydb/recover-test_stress1.abortrecover
+ ydb/recover-test_stress2.abortrecover
+ ydb/recover-test_stress3.abortrecover
+ ydb/recover-test_stress_openclose.abortrecover
+ )
+
+## we run stress tests separately, only run them if asked to
+if(NOT @RUN_STRESS_TESTS@)
+ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ${stress_tests} ${recover_stress_tests})
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE ${stress_tests} ${recover_stress_tests})
+endif()
+
+set(perf_tests
+ ydb/perf_checkpoint_var.tdb
+ ydb/perf_cursor_nop.tdb
+ ydb/perf_malloc_free.tdb
+ ydb/perf_nop.tdb
+ ydb/perf_ptquery.tdb
+ ydb/perf_ptquery2.tdb
+ ydb/perf_read_write.tdb
+ ydb/perf_xmalloc_free.tdb
+ )
+
+## we also don't need to run perf tests every time
+if(NOT @RUN_PERF_TESTS@)
+ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ${perf_tests})
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE ${perf_tests})
+endif()
+
+## don't run perf tests with valgrind (that's slow)
+file(GLOB perf_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/src/tests" perf_*.cc)
+string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" perf_tests "${perf_test_srcs}")
+set(tdb_tests_that_should_fail "ydb/${perf_tests}")
+string(REGEX REPLACE ";" ";ydb/" perf_tests "${perf_tests}")
+list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ${perf_tests})
+
+## these tests fail often and aren't helpful
+set(known_failing_tests
+ ydb/diskfull.tdb
+ )
+list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ${known_failing_tests})
+list(APPEND CTEST_CUSTOM_TESTS_IGNORE ${known_failing_tests})
+
+## these tests take a long time, only run them if asked to
+set(long_running_tests
+ ft/is_empty
+ ft/upgrade_test_simple
+ ydb/checkpoint_1.tdb
+ ydb/checkpoint_stress.tdb
+ ydb/hotindexer-with-queries.tdb
+ ydb/hot-optimize-table-tests.tdb
+ ydb/loader-cleanup-test0.tdb
+ ydb/loader-cleanup-test0z.tdb
+ ydb/loader-cleanup-test2.tdb
+ ydb/loader-cleanup-test2z.tdb
+ ydb/loader-stress-test4.tdb
+ ydb/loader-stress-test4z.tdb
+ ydb/manyfiles.tdb
+ ydb/preload-db-nested.tdb
+ ydb/recover_stress.tdb
+ ydb/root_fifo_1.tdb
+ ydb/root_fifo_2.tdb
+ ydb/root_fifo_31.tdb
+ ydb/root_fifo_32.tdb
+ ydb/stress-gc.tdb
+ ydb/stress-test.tdb
+ ydb/test3529.tdb
+ ydb/test_logmax.tdb
+ ydb/test_txn_nested2.tdb
+ ydb/test_update_broadcast_stress.tdb
+ ydb/test_update_stress.tdb
+ )
+if(NOT @RUN_LONG_TESTS@)
+ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ${long_running_tests})
+ list(APPEND CTEST_CUSTOM_TESTS_IGNORE ${long_running_tests})
+endif()
+
+## ignore log_print.cc in coverage report
+list(APPEND CTEST_CUSTOM_COVERAGE_EXCLUDE "log_print.cc")
+
+list(APPEND CTEST_CUSTOM_WARNING_EXCEPTION
+ # don't complain about warnings in xz source
+ "xz-4.999.9beta/src/liblzma"
+ # don't complain about clang missing warnings from xz code
+ "clang: warning: unknown warning option"
+ # don't complain about warnings in jemalloc source
+ "jemalloc/src"
+ "jemalloc/internal"
+ # don't complain about valgrind headers leaving things unused
+ "valgrind/valgrind.h"
+ "valgrind/memcheck.h"
+ # don't complain about ranlib or libtool on empty archive
+ "has no symbols"
+ "the table of contents is empty"
+ )
diff --git a/storage/tokudb/PerconaFT/PATENTS b/storage/tokudb/PerconaFT/PATENTS
new file mode 100644
index 00000000..ac724731
--- /dev/null
+++ b/storage/tokudb/PerconaFT/PATENTS
@@ -0,0 +1,37 @@
+UNIVERSITY PATENT NOTICE:
+ The technology is licensed by the Massachusetts Institute of
+ Technology, Rutgers State University of New Jersey, and the Research
+ Foundation of State University of New York at Stony Brook under
+ United States of America Serial No. 11/760379 and to the patents
+ and/or patent applications resulting from it.
+PATENT MARKING NOTICE:
+ This software is covered by US Patent No. 8,185,551.
+ This software is covered by US Patent No. 8,489,638.
+PATENT RIGHTS GRANT:
+ "THIS IMPLEMENTATION" means the copyrightable works distributed by
+ Percona as part of the Fractal Tree project.
+ "PATENT CLAIMS" means the claims of patents that are owned or
+ licensable by Percona, both currently or in the future; and that in
+ the absence of this license would be infringed by THIS
+ IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
+ "PATENT CHALLENGE" shall mean a challenge to the validity,
+ patentability, enforceability and/or non-infringement of any of the
+ PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
+ Percona hereby grants to you, for the term and geographical scope of
+ the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
+ irrevocable (except as stated in this section) patent license to
+ make, have made, use, offer to sell, sell, import, transfer, and
+ otherwise run, modify, and propagate the contents of THIS
+ IMPLEMENTATION, where such license applies only to the PATENT
+ CLAIMS. This grant does not include claims that would be infringed
+ only as a consequence of further modifications of THIS
+ IMPLEMENTATION. If you or your agent or licensee institute or order
+ or agree to the institution of patent litigation against any entity
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
+ THIS IMPLEMENTATION constitutes direct or contributory patent
+ infringement, or inducement of patent infringement, then any rights
+ granted to you under this License shall terminate as of the date
+ such litigation is filed. If you or your agent or exclusive
+ licensee institute or order or agree to the institution of a PATENT
+ CHALLENGE, then Percona may terminate any rights granted to you
+ under this License.
diff --git a/storage/tokudb/PerconaFT/README.md b/storage/tokudb/PerconaFT/README.md
new file mode 100644
index 00000000..26333df8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/README.md
@@ -0,0 +1,117 @@
+PerconaFT
+======
+
+PerconaFT is a high-performance, transactional key-value store, used in the
+TokuDB storage engine for Percona Server and MySQL, and in TokuMX, the
+high-performance MongoDB distribution.
+
+PerconaFT is provided as a shared library with an interface similar to
+Berkeley DB.
+
+To build the full MySQL product, see the instructions for
+[Percona/percona-server][percona-server]. This document covers PerconaFT only.
+
+[percona-server]: https://github.com/Percona/percona-server
+
+
+Building
+--------
+
+PerconaFT is built using CMake >= 2.8.9. Out-of-source builds are
+recommended. You need a C++11 compiler, though only some versions
+of GCC >= 4.7 and Clang are tested. You also need zlib development
+packages (`yum install zlib-devel` or `apt-get install zlib1g-dev`).
+
+You will also need the source code for jemalloc, checked out in
+`third_party/`.
+
+```sh
+git clone git://github.com/Percona/PerconaFT.git percona-ft
+cd percona-ft
+git clone git://github.com/Percona/jemalloc.git third_party/jemalloc
+mkdir build
+cd build
+CC=gcc47 CXX=g++47 cmake \
+ -D CMAKE_BUILD_TYPE=Debug \
+ -D BUILD_TESTING=OFF \
+ -D USE_VALGRIND=OFF \
+ -D CMAKE_INSTALL_PREFIX=../prefix/ \
+ ..
+cmake --build . --target install
+```
+
+This will build `libft.so` and `libtokuportability.so` and install it,
+some header files, and some examples to `percona-ft/prefix/`. It will also
+build jemalloc and install it alongside these libraries, you should link
+to that if you are planning to run benchmarks or in production.
+
+### Platforms
+
+PerconaFT is supported on 64-bit Centos, Debian, and Ubuntu and should work
+on other 64-bit linux distributions, and may work on OSX 10.8 and FreeBSD.
+PerconaFT is not supported on 32-bit systems.
+
+[Transparent hugepages][transparent-hugepages] is a feature in newer linux
+kernel versions that causes problems for the memory usage tracking
+calculations in PerconaFT and can lead to memory overcommit. If you have
+this feature enabled, PerconaFT will not start, and you should turn it off.
+If you want to run with transparent hugepages on, you can set an
+environment variable `TOKU_HUGE_PAGES_OK=1`, but only do this for testing,
+and only with a small cache size.
+
+[transparent-hugepages]: https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Performance_Tuning_Guide/s-memory-transhuge.html
+
+
+Testing
+-------
+
+PerconaFT uses CTest for testing. The CDash testing dashboard is not
+currently public, but you can run the tests without submitting them.
+
+There are some large data files not stored in the git repository, that
+will be made available soon. For now, the tests that use these files will
+not run.
+
+In the build directory from above:
+
+```sh
+cmake -D BUILD_TESTING=ON ..
+ctest -D ExperimentalStart \
+ -D ExperimentalConfigure \
+ -D ExperimentalBuild \
+ -D ExperimentalTest
+```
+
+
+Contributing
+------------
+
+Please report bugs in PerconaFT to the [issue tracker][jira].
+
+We have two publicly accessible mailing lists for TokuDB:
+
+ - tokudb-user@googlegroups.com is for general and support related
+ questions about the use of TokuDB.
+ - tokudb-dev@googlegroups.com is for discussion of the development of
+ TokuDB.
+
+All source code and test contributions must be provided under a [BSD 2-Clause][bsd-2] license. For any small change set, the license text may be contained within the commit comment and the pull request. For larger contributions, the license must be presented in a COPYING.<feature_name> file in the root of the PerconaFT project. Please see the [BSD 2-Clause license template][bsd-2] for the content of the license text.
+
+[jira]: https://jira.percona.com/projects/TDB
+[bsd-2]: http://opensource.org/licenses/BSD-2-Clause/
+
+
+License
+-------
+
+Portions of the PerconaFT library (the 'locktree' and 'omt') are available under the Apache version 2 license.
+PerconaFT is available under the GPL version 2, and AGPL version 3.
+See [COPYING.APACHEv2][apachelicense],
+[COPYING.AGPLv3][agpllicense],
+[COPYING.GPLv2][gpllicense], and
+[PATENTS][patents].
+
+[apachelicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.APACHEv2
+[agpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.AGPLv3
+[gpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.GPLv2
+[patents]: http://github.com/Percona/PerconaFT/blob/master/PATENTS
diff --git a/storage/tokudb/PerconaFT/bash.suppressions b/storage/tokudb/PerconaFT/bash.suppressions
new file mode 100644
index 00000000..18e80e65
--- /dev/null
+++ b/storage/tokudb/PerconaFT/bash.suppressions
@@ -0,0 +1,6 @@
+{
+ bash
+ Memcheck:Leak
+ ...
+ obj:/bin/bash
+}
diff --git a/storage/tokudb/PerconaFT/buildbot/compile.suppressions b/storage/tokudb/PerconaFT/buildbot/compile.suppressions
new file mode 100644
index 00000000..39930d63
--- /dev/null
+++ b/storage/tokudb/PerconaFT/buildbot/compile.suppressions
@@ -0,0 +1,4 @@
+# Suppress some warnings we get from jemalloc and lzma, they aren't our fault.
+.*third_party/jemalloc/src/jemalloc.c : .*-Wattributes.*
+.*third_party/jemalloc/src/ctl.c : .*-Wunused-but-set-variable.*
+.*xz/src/build_lzma/src/liblzma/lz/lz_encoder.c : .*-Wunused-but-set-variable.*
diff --git a/storage/tokudb/PerconaFT/buildheader/CMakeLists.txt b/storage/tokudb/PerconaFT/buildheader/CMakeLists.txt
new file mode 100644
index 00000000..6d5cbb94
--- /dev/null
+++ b/storage/tokudb/PerconaFT/buildheader/CMakeLists.txt
@@ -0,0 +1,29 @@
+set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES "")
+
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/runcat.sh" "#!/bin/sh
+out=$1; shift
+exec \"$@\" >$out")
+
+add_executable(make_tdb make_tdb.cc)
+set_property(TARGET make_tdb APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE)
+add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/db.h"
+ COMMAND sh runcat.sh "${CMAKE_CURRENT_BINARY_DIR}/db.h" $<TARGET_FILE:make_tdb>
+ DEPENDS make_tdb)
+add_custom_target(install_tdb_h DEPENDS
+ "${CMAKE_CURRENT_BINARY_DIR}/db.h")
+
+# detect when we are being built as a subproject
+if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ install(
+ FILES "${CMAKE_CURRENT_BINARY_DIR}/db.h"
+ DESTINATION include
+ RENAME tokudb.h
+ COMPONENT tokukv_headers
+ )
+ install(
+ FILES "${CMAKE_CURRENT_BINARY_DIR}/db.h"
+ DESTINATION include
+ COMPONENT tokukv_headers
+ )
+endif ()
diff --git a/storage/tokudb/PerconaFT/buildheader/db-4.6.19.h b/storage/tokudb/PerconaFT/buildheader/db-4.6.19.h
new file mode 100644
index 00000000..f220e063
--- /dev/null
+++ b/storage/tokudb/PerconaFT/buildheader/db-4.6.19.h
@@ -0,0 +1,2670 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996,2007 Oracle. All rights reserved.
+ *
+ * $Id$
+ *
+ * db.h include file layout:
+ * General.
+ * Database Environment.
+ * Locking subsystem.
+ * Logging subsystem.
+ * Shared buffer cache (mpool) subsystem.
+ * Transaction subsystem.
+ * Access methods.
+ * Access method cursors.
+ * Dbm/Ndbm, Hsearch historic interfaces.
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <pthread.h>
+#endif
+
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+#undef __P
+#define __P(protos) protos
+
+/*
+ * Berkeley DB version information.
+ */
+#define DB_VERSION_MAJOR 4
+#define DB_VERSION_MINOR 6
+#define DB_VERSION_PATCH 19
+#define DB_VERSION_STRING "Berkeley DB 4.6.19: (August 10, 2007)"
+
+/*
+ * !!!
+ * Berkeley DB uses specifically sized types. If they're not provided by
+ * the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+
+
+
+
+
+
+
+#endif
+
+
+
+
+
+
+/*
+ * Missing ANSI types.
+ *
+ * uintmax_t --
+ * Largest unsigned type, used to align structures in memory. We don't store
+ * floating point types in structures, so integral types should be sufficient
+ * (and we don't have to worry about systems that store floats in other than
+ * power-of-2 numbers of bytes). Additionally this fixes compilers that rewrite
+ * structure assignments and ANSI C memcpy calls to be in-line instructions
+ * that happen to require alignment.
+ *
+ * uintptr_t --
+ * Unsigned type that's the same size as a pointer. There are places where
+ * DB modifies pointers by discarding the bottom bits to guarantee alignment.
+ * We can't use uintmax_t, it may be larger than the pointer, and compilers
+ * get upset about that. So far we haven't run on any machine where there's
+ * no unsigned type the same size as a pointer -- here's hoping.
+ */
+
+
+
+
+
+
+
+
+
+
+/*
+ * Sequences are only available on machines with 64-bit integral types.
+ */
+typedef int64_t db_seq_t;
+
+/* Thread and process identification. */
+typedef pthread_t db_threadid_t;
+
+/* Basic types that are exported or quasi-exported. */
+typedef uint32_t db_pgno_t; /* Page number type. */
+typedef uint16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef uint32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+typedef uint32_t db_timeout_t; /* Type of a timeout. */
+
+/*
+ * Region offsets are the difference between a pointer in a region and the
+ * region's base address. With private environments, both addresses are the
+ * result of calling malloc, and we can't assume anything about what malloc
+ * will return, so region offsets have to be able to hold differences between
+ * arbitrary pointers.
+ */
+typedef uintptr_t roff_t;
+
+/*
+ * Forward structure declarations, so we can declare pointers and
+ * applications can get type checking.
+ */
+struct __db; typedef struct __db DB;
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_cipher; typedef struct __db_cipher DB_CIPHER;
+struct __db_compact; typedef struct __db_compact DB_COMPACT;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_hstat; typedef struct __db_lock_hstat DB_LOCK_HSTAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_locker; typedef struct __db_locker DB_LOCKER;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_locktab; typedef struct __db_locktab DB_LOCKTAB;
+struct __db_log; typedef struct __db_log DB_LOG;
+struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_mutex_stat; typedef struct __db_mutex_stat DB_MUTEX_STAT;
+struct __db_mutex_t; typedef struct __db_mutex_t DB_MUTEX;
+struct __db_mutexmgr; typedef struct __db_mutexmgr DB_MUTEXMGR;
+struct __db_preplist; typedef struct __db_preplist DB_PREPLIST;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_rep; typedef struct __db_rep DB_REP;
+struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT;
+struct __db_repmgr_site; \
+ typedef struct __db_repmgr_site DB_REPMGR_SITE;
+struct __db_repmgr_stat; \
+ typedef struct __db_repmgr_stat DB_REPMGR_STAT;
+struct __db_seq_record; typedef struct __db_seq_record DB_SEQ_RECORD;
+struct __db_seq_stat; typedef struct __db_seq_stat DB_SEQUENCE_STAT;
+struct __db_sequence; typedef struct __db_sequence DB_SEQUENCE;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __fname; typedef struct __fname FNAME;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ void *data; /* Key/data */
+ uint32_t size; /* key/data length */
+
+ uint32_t ulen; /* RO: length of user buffer. */
+ uint32_t dlen; /* RO: get/put record length. */
+ uint32_t doff; /* RO: get/put record offset. */
+
+ void *app_data;
+
+#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */
+#define DB_DBT_DUPOK 0x002 /* Insert if duplicate. */
+#define DB_DBT_ISSET 0x004 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x008 /* Return in malloc'd memory. */
+#define DB_DBT_MULTIPLE 0x010 /* References multiple records. */
+#define DB_DBT_PARTIAL 0x020 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x040 /* Return in realloc'd memory. */
+#define DB_DBT_USERCOPY 0x080 /* Use the user-supplied callback. */
+#define DB_DBT_USERMEM 0x100 /* Return in user's memory. */
+ uint32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x0000001 /* Create file as necessary. */
+#define DB_DURABLE_UNKNOWN 0x0000002 /* Durability on open (internal). */
+#define DB_FORCE 0x0000004 /* Force (anything). */
+#define DB_MULTIVERSION 0x0000008 /* Multiversion concurrency control. */
+#define DB_NOMMAP 0x0000010 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x0000020 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x0000040 /* Run normal recovery. */
+#define DB_THREAD 0x0000080 /* Applications are threaded. */
+#define DB_TRUNCATE 0x0000100 /* Discard existing DB (O_TRUNC). */
+#define DB_TXN_NOSYNC 0x0000200 /* Do not sync log on commit. */
+#define DB_TXN_NOWAIT 0x0000400 /* Do not wait for locks. */
+#define DB_TXN_NOT_DURABLE 0x0000800 /* Do not log changes. */
+#define DB_TXN_WRITE_NOSYNC 0x0001000 /* Write the log but don't sync. */
+#define DB_SPARE_FLAG 0x0002000 /* Spare. */
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ *
+ * DB_AUTO_COMMIT:
+ * DB_ENV->set_flags, DB->open
+ * (Note: until the 4.3 release, legal to DB->associate, DB->del,
+ * DB->put, DB->remove, DB->rename and DB->truncate, and others.)
+ * DB_READ_COMMITTED:
+ * DB->cursor, DB->get, DB->join, DBcursor->get, DB_ENV->txn_begin
+ * DB_READ_UNCOMMITTED:
+ * DB->cursor, DB->get, DB->join, DB->open, DBcursor->get,
+ * DB_ENV->txn_begin
+ * DB_TXN_SNAPSHOT:
+ * DB_ENV->set_flags, DB_ENV->txn_begin, DB->cursor
+ *
+ * !!!
+ * The DB_READ_COMMITTED and DB_READ_UNCOMMITTED bit masks can't be changed
+ * without also changing the masks for the flags that can be OR'd into DB
+ * access method and cursor operation values.
+ */
+#define DB_IGNORE_LEASE 0x01000000/* Ignore leases. */
+#define DB_AUTO_COMMIT 0x02000000/* Implied transaction. */
+
+#define DB_READ_COMMITTED 0x04000000/* Degree 2 isolation. */
+#define DB_DEGREE_2 0x04000000/* Historic name. */
+
+#define DB_READ_UNCOMMITTED 0x08000000/* Degree 1 isolation. */
+#define DB_DIRTY_READ 0x08000000/* Historic name. */
+
+#define DB_TXN_SNAPSHOT 0x10000000/* Snapshot isolation. */
+
+/*
+ * Flags common to db_env_create and db_create.
+ */
+#define DB_CXX_NO_EXCEPTIONS 0x0000001 /* C++: return error values. */
+
+/*
+ * Flags private to db_env_create.
+ * Shared flags up to 0x0000001 */
+#define DB_RPCCLIENT 0x0000002 /* An RPC client environment. */
+
+/*
+ * Flags private to db_create.
+ * Shared flags up to 0x0000001 */
+#define DB_XA_CREATE 0x0000002 /* Open in an XA environment. */
+
+/*
+ * Flags shared by DB_ENV->remove and DB_ENV->open.
+ * Shared flags up to 0x0002000 */
+#define DB_USE_ENVIRON 0x0004000 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x0008000 /* Use the environment if root. */
+/*
+ * Flags private to DB_ENV->open.
+ */
+#define DB_INIT_CDB 0x0010000 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x0020000 /* Initialize locking. */
+#define DB_INIT_LOG 0x0040000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x0080000 /* Initialize mpool. */
+#define DB_INIT_REP 0x0100000 /* Initialize replication. */
+#define DB_INIT_TXN 0x0200000 /* Initialize transactions. */
+#define DB_LOCKDOWN 0x0400000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x0800000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x1000000 /* Run catastrophic recovery. */
+#define DB_REGISTER 0x2000000 /* Multi-process registry. */
+#define DB_SYSTEM_MEM 0x4000000 /* Use system-backed memory. */
+
+#define DB_JOINENV 0x0 /* Compatibility. */
+
+/*
+ * Flags private to DB->open.
+ * Shared flags up to 0x0002000 */
+#define DB_EXCL 0x0004000 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x0008000 /* UNDOC: fcntl(2) locking. */
+#define DB_NO_AUTO_COMMIT 0x0010000 /* Override env-wide AUTOCOMMIT. */
+#define DB_RDWRMASTER 0x0020000 /* UNDOC: allow subdb master open R/W */
+#define DB_WRITEOPEN 0x0040000 /* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB->associate.
+ * Shared flags up to 0x0002000 */
+#define DB_IMMUTABLE_KEY 0x0004000 /* Secondary key is immutable. */
+/* Shared flags at 0x1000000 */
+
+/*
+ * Flags private to DB_ENV->txn_begin.
+ * Shared flags up to 0x0002000 */
+#define DB_TXN_SYNC 0x0004000 /* Always sync log on commit. */
+#define DB_TXN_WAIT 0x0008000 /* Always wait for locks in this TXN. */
+
+/*
+ * Flags private to DB_ENV->txn_checkpoint.
+ * Shared flags up to 0x0002000 */
+#define DB_CKP_INTERNAL 0x0004000 /* Internally generated checkpoint. */
+
+/*
+ * Flags private to DB_ENV->set_encrypt.
+ */
+#define DB_ENCRYPT_AES 0x0000001 /* AES, assumes SHA1 checksum */
+
+/*
+ * Flags private to DB_ENV->set_flags.
+ * Shared flags up to 0x00002000 */
+#define DB_CDB_ALLDB 0x00004000/* Set CDB locking per environment. */
+#define DB_DIRECT_DB 0x00008000/* Don't buffer databases in the OS. */
+#define DB_DIRECT_LOG 0x00010000/* Don't buffer log files in the OS. */
+#define DB_DSYNC_DB 0x00020000/* Set O_DSYNC on the databases. */
+#define DB_DSYNC_LOG 0x00040000/* Set O_DSYNC on the log. */
+#define DB_LOG_AUTOREMOVE 0x00080000/* Automatically remove log files. */
+#define DB_LOG_INMEMORY 0x00100000/* Store logs in buffers in memory. */
+#define DB_NOLOCKING 0x00200000/* Set locking/mutex behavior. */
+#define DB_NOPANIC 0x00400000/* Set panic state per DB_ENV. */
+#define DB_OVERWRITE 0x00800000/* Overwrite unlinked region files. */
+#define DB_PANIC_ENVIRONMENT 0x01000000/* Set panic state per environment. */
+/* Shared flags at 0x02000000 */
+/* Shared flags at 0x04000000 */
+/* Shared flags at 0x08000000 */
+/* Shared flags at 0x10000000 */
+#define DB_REGION_INIT 0x20000000/* Page-fault regions on open. */
+#define DB_TIME_NOTGRANTED 0x40000000/* Return NOTGRANTED on timeout. */
+#define DB_YIELDCPU 0x80000000/* Yield the CPU (a lot). */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x0000001 /* Upgrading. */
+#define DB_VERIFY 0x0000002 /* Verifying. */
+
+/*
+ * Flags private to DB->compact.
+ * Shared flags up to 0x00002000
+ */
+#define DB_FREELIST_ONLY 0x00004000 /* Just sort and truncate. */
+#define DB_FREE_SPACE 0x00008000 /* Free space . */
+#define DB_COMPACT_FLAGS \
+ (DB_FREELIST_ONLY | DB_FREE_SPACE)
+
+/*
+ * Flags private to DB_MPOOLFILE->open.
+ * Shared flags up to 0x0002000 */
+#define DB_DIRECT 0x0004000 /* Don't buffer the file in the OS. */
+#define DB_EXTENT 0x0008000 /* internal: dealing with an extent. */
+#define DB_ODDFILESIZE 0x0010000 /* Truncate file to N * pgsize. */
+
+/*
+ * Flags private to DB->set_flags.
+ * Shared flags up to 0x00002000 */
+#define DB_CHKSUM 0x00004000 /* Do checksumming */
+#define DB_DUP 0x00008000 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x00010000 /* Btree, Hash: duplicate keys. */
+#define DB_ENCRYPT 0x00020000 /* Btree, Hash: duplicate keys. */
+#define DB_INORDER 0x00040000 /* Queue: strict ordering on consume */
+#define DB_RECNUM 0x00080000 /* Btree: record numbers. */
+#define DB_RENUMBER 0x00100000 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x00200000 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x00400000 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to the DB_ENV->stat_print, DB->stat and DB->stat_print methods.
+ */
+#define DB_FAST_STAT 0x0000001 /* Don't traverse the database. */
+#define DB_STAT_ALL 0x0000002 /* Print: Everything. */
+#define DB_STAT_CLEAR 0x0000004 /* Clear stat after returning values. */
+#define DB_STAT_LOCK_CONF 0x0000008 /* Print: Lock conflict matrix. */
+#define DB_STAT_LOCK_LOCKERS 0x0000010 /* Print: Lockers. */
+#define DB_STAT_LOCK_OBJECTS 0x0000020 /* Print: Lock objects. */
+#define DB_STAT_LOCK_PARAMS 0x0000040 /* Print: Lock parameters. */
+#define DB_STAT_MEMP_HASH 0x0000080 /* Print: Mpool hash buckets. */
+#define DB_STAT_NOERROR 0x0000100 /* Internal: continue on error. */
+#define DB_STAT_SUBSYSTEM 0x0000200 /* Print: Subsystems too. */
+
+/*
+ * Flags private to DB->join.
+ */
+#define DB_JOIN_NOSORT 0x0000001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ */
+#define DB_AGGRESSIVE 0x0000001 /* Salvage whatever could be data.*/
+#define DB_NOORDERCHK 0x0000002 /* Skip sort order/hashing check. */
+#define DB_ORDERCHKONLY 0x0000004 /* Only perform the order check. */
+#define DB_PR_PAGE 0x0000008 /* Show page contents (-da). */
+#define DB_PR_RECOVERYTEST 0x0000010 /* Recovery test (-dr). */
+#define DB_PRINTABLE 0x0000020 /* Use printable format for salvage. */
+#define DB_SALVAGE 0x0000040 /* Salvage what looks like data. */
+#define DB_UNREF 0x0000080 /* Report unreferenced pages. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+
+/*
+ * Flags private to DB->rep_set_transport's send callback.
+ */
+#define DB_REP_ANYWHERE 0x0000001 /* Message can be serviced anywhere. */
+#define DB_REP_NOBUFFER 0x0000002 /* Do not buffer this message. */
+#define DB_REP_PERMANENT 0x0000004 /* Important--app. may want to flush. */
+#define DB_REP_REREQUEST 0x0000008 /* This msg already been requested. */
+
+/*******************************************************
+ * Mutexes.
+ *******************************************************/
+typedef uint32_t db_mutex_t;
+
+/*
+ * Flag arguments for DbEnv.mutex_alloc, DbEnv.is_alive and for the
+ * DB_MUTEX structure.
+ */
+#define DB_MUTEX_ALLOCATED 0x01 /* Mutex currently allocated. */
+#define DB_MUTEX_LOCKED 0x02 /* Mutex currently locked. */
+#define DB_MUTEX_LOGICAL_LOCK 0x04 /* Mutex backs a database lock. */
+#define DB_MUTEX_PROCESS_ONLY 0x08 /* Mutex private to a process. */
+#define DB_MUTEX_SELF_BLOCK 0x10 /* Must be able to block self. */
+
+struct __db_mutex_stat {
+ /* The following fields are maintained in the region's copy. */
+ uint32_t st_mutex_align; /* Mutex alignment */
+ uint32_t st_mutex_tas_spins; /* Mutex test-and-set spins */
+ uint32_t st_mutex_cnt; /* Mutex count */
+ uint32_t st_mutex_free; /* Available mutexes */
+ uint32_t st_mutex_inuse; /* Mutexes in use */
+ uint32_t st_mutex_inuse_max; /* Maximum mutexes ever in use */
+
+ /* The following fields are filled-in from other places. */
+#ifndef __TEST_DB_NO_STATISTICS
+ uint32_t st_region_wait; /* Region lock granted after wait. */
+ uint32_t st_region_nowait; /* Region lock granted without wait. */
+ roff_t st_regsize; /* Region size. */
+#endif
+};
+
+/* This is the length of the buffer passed to DB_ENV->thread_id_string() */
+#define DB_THREADID_STRLEN 128
+
+/*******************************************************
+ * Locking.
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+#define DB_FILE_ID_LEN 20 /* Unique file ID length. */
+
+/*
+ * Deadlock detector modes; used in the DB_ENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */
+#define DB_LOCK_MAXLOCKS 3 /* Select locker with max locks. */
+#define DB_LOCK_MAXWRITE 4 /* Select locker with max writelocks. */
+#define DB_LOCK_MINLOCKS 5 /* Select locker with min locks. */
+#define DB_LOCK_MINWRITE 6 /* Select locker with min writelocks. */
+#define DB_LOCK_OLDEST 7 /* Select oldest locker. */
+#define DB_LOCK_RANDOM 8 /* Select random locker. */
+#define DB_LOCK_YOUNGEST 9 /* Select youngest locker. */
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_ABORT 0x001 /* Internal: Lock during abort. */
+#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */
+#define DB_LOCK_SET_TIMEOUT 0x008 /* Internal: set lock timeout. */
+#define DB_LOCK_SWITCH 0x010 /* Internal: switch existing lock. */
+#define DB_LOCK_UPGRADE 0x020 /* Internal: upgrade existing lock. */
+
+/* Flag values for DbEnv.set_timeout. */
+#define DB_SET_LOCK_TIMEOUT 1 /* Set lock timeout */
+#define DB_SET_TXN_NOW 2 /* Timeout lock now (internal) */
+#define DB_SET_TXN_TIMEOUT 3 /* Set transaction timeout */
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ=1, /* Shared/read. */
+ DB_LOCK_WRITE=2, /* Exclusive/write. */
+ DB_LOCK_WAIT=3, /* Wait for event */
+ DB_LOCK_IWRITE=4, /* Intent exclusive/write. */
+ DB_LOCK_IREAD=5, /* Intent to share/read. */
+ DB_LOCK_IWR=6, /* Intent to read and write. */
+ DB_LOCK_READ_UNCOMMITTED=7, /* Degree 1 isolation. */
+ DB_LOCK_WWRITE=8 /* Was Written. */
+} db_lockmode_t;
+
+/*
+ * Request types.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET=1, /* Get the lock. */
+ DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */
+ DB_LOCK_INHERIT=3, /* Pass locks to parent. */
+ DB_LOCK_PUT=4, /* Release the lock. */
+ DB_LOCK_PUT_ALL=5, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */
+ DB_LOCK_PUT_READ=7, /* Release locker's read locks. */
+ DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */
+ DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */
+ DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */
+} db_lockop_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_EXPIRED=2, /* Lock has expired. */
+ DB_LSTAT_FREE=3, /* Lock is unallocated. */
+ DB_LSTAT_HELD=4, /* Lock is currently held. */
+ DB_LSTAT_PENDING=5, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING=6 /* Lock is on the wait queue. */
+}db_status_t;
+
+/* Lock statistics structure. */
+struct __db_lock_stat {
+ uint32_t st_id; /* Last allocated locker ID. */
+ uint32_t st_cur_maxid; /* Current maximum unused ID. */
+ uint32_t st_maxlocks; /* Maximum number of locks in table. */
+ uint32_t st_maxlockers; /* Maximum num of lockers in table. */
+ uint32_t st_maxobjects; /* Maximum num of objects in table. */
+ int st_nmodes; /* Number of lock modes. */
+ uint32_t st_nlockers; /* Current number of lockers. */
+#ifndef __TEST_DB_NO_STATISTICS
+ uint32_t st_nlocks; /* Current number of locks. */
+ uint32_t st_maxnlocks; /* Maximum number of locks so far. */
+ uint32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ uint32_t st_nobjects; /* Current number of objects. */
+ uint32_t st_maxnobjects; /* Maximum number of objects so far. */
+ uint32_t st_nrequests; /* Number of lock gets. */
+ uint32_t st_nreleases; /* Number of lock puts. */
+ uint32_t st_nupgrade; /* Number of lock upgrades. */
+ uint32_t st_ndowngrade; /* Number of lock downgrades. */
+ uint32_t st_lock_wait; /* Lock conflicts w/ subsequent wait */
+ uint32_t st_lock_nowait; /* Lock conflicts w/o subsequent wait */
+ uint32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ db_timeout_t st_locktimeout; /* Lock timeout. */
+ uint32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ db_timeout_t st_txntimeout; /* Transaction timeout. */
+ uint32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ uint32_t st_objs_wait; /* Object lock granted after wait. */
+ uint32_t st_objs_nowait; /* Object lock granted without wait. */
+ uint32_t st_lockers_wait; /* Locker lock granted after wait. */
+ uint32_t st_lockers_nowait; /* Locker lock granted without wait. */
+ uint32_t st_locks_wait; /* Lock lock granted after wait. */
+ uint32_t st_locks_nowait; /* Lock lock granted without wait. */
+ uint32_t st_region_wait; /* Region lock granted after wait. */
+ uint32_t st_region_nowait; /* Region lock granted without wait. */
+ uint32_t st_hash_len; /* Max length of bucket. */
+ roff_t st_regsize; /* Region size. */
+#endif
+};
+
+struct __db_lock_hstat {
+ uint32_t st_nrequests; /* Number of lock gets. */
+ uint32_t st_nreleases; /* Number of lock puts. */
+ uint32_t st_nupgrade; /* Number of lock upgrades. */
+ uint32_t st_ndowngrade; /* Number of lock downgrades. */
+ uint32_t st_lock_wait; /* Lock conflicts w/ subsequent wait */
+ uint32_t st_lock_nowait; /* Lock conflicts w/o subsequent wait */
+ uint32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ uint32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ uint32_t st_hash_len; /* Max length of bucket. */
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ uint8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_HANDLE_LOCK 1
+#define DB_RECORD_LOCK 2
+#define DB_PAGE_LOCK 3
+ uint32_t type; /* Type of lock. */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ roff_t off; /* Offset of the lock in the region */
+ uint32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ uint32_t gen; /* Generation number of this lock. */
+ db_lockmode_t mode; /* mode of this lock. */
+};
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ db_timeout_t timeout; /* Time to expire lock. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+#define DB_LOGVERSION 13 /* Current log version. */
+#define DB_LOGOLDVER 8 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/* Flag values for DB_ENV->log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+#define DB_ARCH_REMOVE 0x008 /* Remove log files. */
+
+/* Flag values for DB_ENV->log_put(). */
+#define DB_FLUSH 0x001 /* Flush data to disk (public). */
+#define DB_LOG_CHKPNT 0x002 /* Flush supports a checkpoint */
+#define DB_LOG_COMMIT 0x004 /* Flush supports a commit */
+#define DB_LOG_NOCOPY 0x008 /* Don't copy data */
+#define DB_LOG_NOT_DURABLE 0x010 /* Do not log; keep in memory */
+#define DB_LOG_WRNOSYNC 0x020 /* Write, don't sync log_put */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ uint32_t file; /* File ID. */
+ uint32_t offset; /* File offset. */
+};
+
+/*
+ * Application-specified log record types start at DB_user_BEGIN, and must not
+ * equal or exceed DB_debug_FLAG.
+ *
+ * DB_debug_FLAG is the high-bit of the uint32_t that specifies a log record
+ * type. If the flag is set, it's a log record that was logged for debugging
+ * purposes only, even if it reflects a database change -- the change was part
+ * of a non-durable transaction.
+ */
+#define DB_user_BEGIN 10000
+#define DB_debug_FLAG 0x80000000
+
+/*
+ * DB_LOGC --
+ * Log cursor.
+ */
+struct __db_log_cursor {
+ DB_ENV *dbenv; /* Enclosing dbenv. */
+
+ DB_FH *fhp; /* File handle. */
+ DB_LSN lsn; /* Cursor: LSN */
+ uint32_t len; /* Cursor: record length */
+ uint32_t prev; /* Cursor: previous record's offset */
+
+ DBT dbt; /* Return DBT. */
+ DB_LSN p_lsn; /* Persist LSN. */
+ uint32_t p_version; /* Persist version. */
+
+ uint8_t *bp; /* Allocated read buffer. */
+ uint32_t bp_size; /* Read buffer length in bytes. */
+ uint32_t bp_rlen; /* Read buffer valid data length. */
+ DB_LSN bp_lsn; /* Read buffer first byte LSN. */
+
+ uint32_t bp_maxrec; /* Max record length in the log file. */
+
+ /* DB_LOGC PUBLIC HANDLE LIST BEGIN */
+ int (*close) __P((DB_LOGC *, uint32_t));
+ int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, uint32_t));
+ int (*version) __P((DB_LOGC *, uint32_t *, uint32_t));
+ /* DB_LOGC PUBLIC HANDLE LIST END */
+
+#define DB_LOG_DISK 0x01 /* Log record came from disk. */
+#define DB_LOG_LOCKED 0x02 /* Log region already locked */
+#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */
+ uint32_t flags;
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ uint32_t st_magic; /* Log file magic number. */
+ uint32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file permissions mode. */
+ uint32_t st_lg_bsize; /* Log buffer size. */
+ uint32_t st_lg_size; /* Log file size. */
+ uint32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ uint32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+#ifndef __TEST_DB_NO_STATISTICS
+ uint32_t st_record; /* Records entered into the log. */
+ uint32_t st_w_bytes; /* Bytes to log. */
+ uint32_t st_w_mbytes; /* Megabytes to log. */
+ uint32_t st_wcount; /* Total I/O writes to the log. */
+ uint32_t st_wcount_fill; /* Overflow writes to the log. */
+ uint32_t st_rcount; /* Total I/O reads from the log. */
+ uint32_t st_scount; /* Total syncs to the log. */
+ uint32_t st_region_wait; /* Region lock granted after wait. */
+ uint32_t st_region_nowait; /* Region lock granted without wait. */
+ uint32_t st_cur_file; /* Current log file number. */
+ uint32_t st_cur_offset; /* Current log file offset. */
+ uint32_t st_disk_file; /* Known on disk log file number. */
+ uint32_t st_disk_offset; /* Known on disk log file offset. */
+ uint32_t st_maxcommitperflush; /* Max number of commits in a flush. */
+ uint32_t st_mincommitperflush; /* Min number of commits in a flush. */
+ roff_t st_regsize; /* Region size. */
+#endif
+};
+
+/*
+ * We need to record the first log record of a transaction. For user
+ * defined logging this macro returns the place to put that information,
+ * if it is need in rlsnp, otherwise it leaves it unchanged. We also
+ * need to track the last record of the transaction, this returns the
+ * place to put that info.
+ */
+#define DB_SET_TXN_LSNP(txn, blsnp, llsnp) \
+ ((txn)->set_txn_lsnp(txn, blsnp, llsnp))
+
+/*******************************************************
+ * Shared buffer cache (mpool).
+ *******************************************************/
+/* Flag values for DB_MPOOLFILE->get. */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_DIRTY 0x002 /* Get page for an update. */
+#define DB_MPOOL_EDIT 0x004 /* Modify without copying. */
+#define DB_MPOOL_FREE 0x008 /* Free page if present. */
+#define DB_MPOOL_LAST 0x010 /* Return the last page. */
+#define DB_MPOOL_NEW 0x020 /* Create a new page. */
+
+/* Undocumented flag value for DB_MPOOLFILE->close. */
+#define DB_MPOOL_DISCARD 0x001 /* Discard file. */
+
+/* Flags values for DB_MPOOLFILE->set_flags. */
+#define DB_MPOOL_NOFILE 0x001 /* Never open a backing file. */
+#define DB_MPOOL_UNLINK 0x002 /* Unlink the file on last close. */
+
+/* Priority values for DB_MPOOLFILE->{put,set_priority}. */
+typedef enum {
+ DB_PRIORITY_UNCHANGED=0,
+ DB_PRIORITY_VERY_LOW=1,
+ DB_PRIORITY_LOW=2,
+ DB_PRIORITY_DEFAULT=3,
+ DB_PRIORITY_HIGH=4,
+ DB_PRIORITY_VERY_HIGH=5
+} DB_CACHE_PRIORITY;
+
+/* Per-process DB_MPOOLFILE information. */
+struct __db_mpoolfile {
+ DB_FH *fhp; /* Underlying file handle. */
+
+ /*
+ * !!!
+ * The ref, pinref and q fields are protected by the region lock.
+ */
+ uint32_t ref; /* Reference count. */
+
+ uint32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_mpoolfile) q;
+ */
+ struct {
+ struct __db_mpoolfile *tqe_next;
+ struct __db_mpoolfile **tqe_prev;
+ } q; /* Linked list of DB_MPOOLFILE's. */
+
+ /*
+ * !!!
+ * The rest of the fields (with the exception of the MP_FLUSH flag)
+ * are not thread-protected, even when they may be modified at any
+ * time by the application. The reason is the DB_MPOOLFILE handle
+ * is single-threaded from the viewpoint of the application, and so
+ * the only fields needing to be thread-protected are those accessed
+ * by checkpoint or sync threads when using DB_MPOOLFILE structures
+ * to flush buffers from the cache.
+ */
+ DB_ENV *dbenv; /* Overlying DB_ENV. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ uint32_t clear_len; /* Cleared length on created pages. */
+ uint8_t /* Unique file ID. */
+ fileid[DB_FILE_ID_LEN];
+ int ftype; /* File type. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ uint32_t gbytes, bytes; /* Maximum file size. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ int32_t priority; /* Cache priority. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ uint32_t config_flags; /* Flags to DB_MPOOLFILE->set_flags. */
+
+ /* DB_MPOOLFILE PUBLIC HANDLE LIST BEGIN */
+ int (*close) __P((DB_MPOOLFILE *, uint32_t));
+ int (*get)
+ __P((DB_MPOOLFILE *, db_pgno_t *, DB_TXN *, uint32_t, void *));
+ int (*get_clear_len) __P((DB_MPOOLFILE *, uint32_t *));
+ int (*get_fileid) __P((DB_MPOOLFILE *, uint8_t *));
+ int (*get_flags) __P((DB_MPOOLFILE *, uint32_t *));
+ int (*get_ftype) __P((DB_MPOOLFILE *, int *));
+ int (*get_last_pgno) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*get_lsn_offset) __P((DB_MPOOLFILE *, int32_t *));
+ int (*get_maxsize) __P((DB_MPOOLFILE *, uint32_t *, uint32_t *));
+ int (*get_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*get_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY *));
+ int (*open) __P((DB_MPOOLFILE *, const char *, uint32_t, int, size_t));
+ int (*put) __P((DB_MPOOLFILE *, void *, DB_CACHE_PRIORITY, uint32_t));
+ int (*set_clear_len) __P((DB_MPOOLFILE *, uint32_t));
+ int (*set_fileid) __P((DB_MPOOLFILE *, uint8_t *));
+ int (*set_flags) __P((DB_MPOOLFILE *, uint32_t, int));
+ int (*set_ftype) __P((DB_MPOOLFILE *, int));
+ int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t));
+ int (*set_maxsize) __P((DB_MPOOLFILE *, uint32_t, uint32_t));
+ int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+ int (*sync) __P((DB_MPOOLFILE *));
+ /* DB_MPOOLFILE PUBLIC HANDLE LIST END */
+
+ /*
+ * MP_FILEID_SET, MP_OPEN_CALLED and MP_READONLY do not need to be
+ * thread protected because they are initialized before the file is
+ * linked onto the per-process lists, and never modified.
+ *
+ * MP_FLUSH is thread protected because it is potentially read/set by
+ * multiple threads of control.
+ */
+#define MP_FILEID_SET 0x001 /* Application supplied a file ID. */
+#define MP_FLUSH 0x002 /* Was opened to flush a buffer. */
+#define MP_MULTIVERSION 0x004 /* Opened for multiversion access. */
+#define MP_OPEN_CALLED 0x008 /* File opened. */
+#define MP_READONLY 0x010 /* File is readonly. */
+ uint32_t flags;
+};
+
+/* Mpool statistics structure. */
+struct __db_mpool_stat {
+ uint32_t st_gbytes; /* Total cache size: GB. */
+ uint32_t st_bytes; /* Total cache size: B. */
+ uint32_t st_ncache; /* Number of cache regions. */
+ uint32_t st_max_ncache; /* Maximum number of regions. */
+ size_t st_mmapsize; /* Maximum file size for mmap. */
+ int st_maxopenfd; /* Maximum number of open fd's. */
+ int st_maxwrite; /* Maximum buffers to write. */
+ db_timeout_t st_maxwrite_sleep; /* Sleep after writing max buffers. */
+ uint32_t st_pages; /* Total number of pages. */
+#ifndef __TEST_DB_NO_STATISTICS
+ uint32_t st_map; /* Pages from mapped files. */
+ uint32_t st_cache_hit; /* Pages found in the cache. */
+ uint32_t st_cache_miss; /* Pages not found in the cache. */
+ uint32_t st_page_create; /* Pages created in the cache. */
+ uint32_t st_page_in; /* Pages read in. */
+ uint32_t st_page_out; /* Pages written out. */
+ uint32_t st_ro_evict; /* Clean pages forced from the cache. */
+ uint32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ uint32_t st_page_trickle; /* Pages written by memp_trickle. */
+ uint32_t st_page_clean; /* Clean pages. */
+ uint32_t st_page_dirty; /* Dirty pages. */
+ uint32_t st_hash_buckets; /* Number of hash buckets. */
+ uint32_t st_hash_searches; /* Total hash chain searches. */
+ uint32_t st_hash_longest; /* Longest hash chain searched. */
+ uint32_t st_hash_examined; /* Total hash entries searched. */
+ uint32_t st_hash_nowait; /* Hash lock granted with nowait. */
+ uint32_t st_hash_wait; /* Hash lock granted after wait. */
+ uint32_t st_hash_max_nowait; /* Max hash lock granted with nowait. */
+ uint32_t st_hash_max_wait; /* Max hash lock granted after wait. */
+ uint32_t st_region_nowait; /* Region lock granted with nowait. */
+ uint32_t st_region_wait; /* Region lock granted after wait. */
+ uint32_t st_mvcc_frozen; /* Buffers frozen. */
+ uint32_t st_mvcc_thawed; /* Buffers thawed. */
+ uint32_t st_mvcc_freed; /* Frozen buffers freed. */
+ uint32_t st_alloc; /* Number of page allocations. */
+ uint32_t st_alloc_buckets; /* Buckets checked during allocation. */
+ uint32_t st_alloc_max_buckets; /* Max checked during allocation. */
+ uint32_t st_alloc_pages; /* Pages checked during allocation. */
+ uint32_t st_alloc_max_pages; /* Max checked during allocation. */
+ uint32_t st_io_wait; /* Thread waited on buffer I/O. */
+ roff_t st_regsize; /* Region size. */
+#endif
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ uint32_t st_pagesize; /* Page size. */
+#ifndef __TEST_DB_NO_STATISTICS
+ uint32_t st_map; /* Pages from mapped files. */
+ uint32_t st_cache_hit; /* Pages found in the cache. */
+ uint32_t st_cache_miss; /* Pages not found in the cache. */
+ uint32_t st_page_create; /* Pages created in the cache. */
+ uint32_t st_page_in; /* Pages read in. */
+ uint32_t st_page_out; /* Pages written out. */
+#endif
+};
+
+/*******************************************************
+ * Transactions and recovery.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+typedef enum {
+ DB_TXN_ABORT=0, /* Public. */
+ DB_TXN_APPLY=1, /* Public. */
+ DB_TXN_BACKWARD_ALLOC=2, /* Internal. */
+ DB_TXN_BACKWARD_ROLL=3, /* Public. */
+ DB_TXN_FORWARD_ROLL=4, /* Public. */
+ DB_TXN_OPENFILES=5, /* Internal. */
+ DB_TXN_POPENFILES=6, /* Internal. */
+ DB_TXN_PRINT=7 /* Public. */
+} db_recops;
+
+/*
+ * BACKWARD_ALLOC is used during the forward pass to pick up any aborted
+ * allocations for files that were created during the forward pass.
+ * The main difference between _ALLOC and _ROLL is that the entry for
+ * the file not exist during the rollforward pass.
+ */
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \
+ (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY)
+
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+
+ uint32_t txnid; /* Unique transaction id. */
+ char *name; /* Transaction name. */
+ DB_LOCKER *locker; /* Locker for this txn. */
+
+ db_threadid_t tid; /* Thread id for use in MT XA. */
+ void *td; /* Detail structure within region. */
+ db_timeout_t lock_timeout; /* Timeout for locks for this txn. */
+ db_timeout_t expire; /* Time transaction expires. */
+ void *txn_list; /* Undo information for parent. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) links;
+ * TAILQ_ENTRY(__db_txn) xalinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } links; /* Links transactions off manager. */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } xalinks; /* Links active XA transactions. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__kids, __db_txn) kids;
+ */
+ struct __kids {
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } kids;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__events, __txn_event) events;
+ */
+ struct {
+ struct __txn_event *tqh_first;
+ struct __txn_event **tqh_last;
+ } events; /* Links deferred events. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * STAILQ_HEAD(__logrec, __txn_logrec) logs;
+ */
+ struct {
+ struct __txn_logrec *stqh_first;
+ struct __txn_logrec **stqh_last;
+ } logs; /* Links in memory log records. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) klinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } klinks;
+
+ void *api_internal; /* C++ API private. */
+ void *xml_internal; /* XML API private. */
+
+ uint32_t cursors; /* Number of cursors open for txn */
+
+ /* DB_TXN PUBLIC HANDLE LIST BEGIN */
+ int (*abort) __P((DB_TXN *));
+ int (*commit) __P((DB_TXN *, uint32_t));
+ int (*discard) __P((DB_TXN *, uint32_t));
+ int (*get_name) __P((DB_TXN *, const char **));
+ uint32_t (*id) __P((DB_TXN *));
+ int (*prepare) __P((DB_TXN *, uint8_t *));
+ int (*set_name) __P((DB_TXN *, const char *));
+ int (*set_timeout) __P((DB_TXN *, db_timeout_t, uint32_t));
+ /* DB_TXN PUBLIC HANDLE LIST END */
+
+ /* DB_TXN PRIVATE HANDLE LIST BEGIN */
+ void (*set_txn_lsnp) __P((DB_TXN *txn, DB_LSN **, DB_LSN **));
+ /* DB_TXN PRIVATE HANDLE LIST END */
+
+#define TXN_CHILDCOMMIT 0x0001 /* Txn has committed. */
+#define TXN_CDSGROUP 0x0002 /* CDS group handle. */
+#define TXN_COMPENSATE 0x0004 /* Compensating transaction. */
+#define TXN_DEADLOCK 0x0008 /* Txn has deadlocked. */
+#define TXN_LOCKTIMEOUT 0x0010 /* Txn has a lock timeout. */
+#define TXN_MALLOC 0x0020 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x0040 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x0080 /* Do not wait on locks. */
+#define TXN_PRIVATE 0x0100 /* Txn owned by cursor.. */
+#define TXN_READ_COMMITTED 0x0200 /* Txn has degree 2 isolation. */
+#define TXN_READ_UNCOMMITTED 0x0400 /* Txn has degree 1 isolation. */
+#define TXN_RESTORED 0x0800 /* Txn has been restored. */
+#define TXN_SNAPSHOT 0x1000 /* Snapshot Isolation. */
+#define TXN_SYNC 0x2000 /* Write and sync on prepare/commit. */
+#define TXN_WRITE_NOSYNC 0x4000 /* Write only on prepare/commit. */
+ uint32_t flags;
+};
+
+#define TXN_SYNC_FLAGS (TXN_SYNC | TXN_NOSYNC | TXN_WRITE_NOSYNC)
+
+/*
+ * Structure used for two phase commit interface. Berkeley DB support for two
+ * phase commit is compatible with the X/Open XA interface.
+ *
+ * The XA #define XIDDATASIZE defines the size of a global transaction ID. We
+ * have our own version here (for name space reasons) which must have the same
+ * value.
+ */
+#define DB_XIDDATASIZE 128
+struct __db_preplist {
+ DB_TXN *txn;
+ uint8_t gid[DB_XIDDATASIZE];
+};
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ uint32_t txnid; /* Transaction ID */
+ uint32_t parentid; /* Transaction ID of parent */
+ pid_t pid; /* Process owning txn ID */
+ db_threadid_t tid; /* Thread owning txn ID */
+
+ DB_LSN lsn; /* LSN when transaction began */
+
+ DB_LSN read_lsn; /* Read LSN for MVCC */
+ uint32_t mvcc_ref; /* MVCC reference count */
+
+#define TXN_ABORTED 1
+#define TXN_COMMITTED 2
+#define TXN_PREPARED 3
+#define TXN_RUNNING 4
+ uint32_t status; /* Status of the transaction */
+
+#define TXN_XA_ABORTED 1
+#define TXN_XA_DEADLOCKED 2
+#define TXN_XA_ENDED 3
+#define TXN_XA_PREPARED 4
+#define TXN_XA_STARTED 5
+#define TXN_XA_SUSPENDED 6
+ uint32_t xa_status; /* XA status */
+
+ uint8_t xid[DB_XIDDATASIZE]; /* Global transaction ID */
+ char name[51]; /* 50 bytes of name, nul termination */
+};
+
+struct __db_txn_stat {
+ uint32_t st_nrestores; /* number of restored transactions
+ after recovery. */
+#ifndef __TEST_DB_NO_STATISTICS
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ time_t st_time_ckp; /* time of last checkpoint */
+ uint32_t st_last_txnid; /* last transaction id given out */
+ uint32_t st_maxtxns; /* maximum txns possible */
+ uint32_t st_naborts; /* number of aborted transactions */
+ uint32_t st_nbegins; /* number of begun transactions */
+ uint32_t st_ncommits; /* number of committed transactions */
+ uint32_t st_nactive; /* number of active transactions */
+ uint32_t st_nsnapshot; /* number of snapshot transactions */
+ uint32_t st_maxnactive; /* maximum active transactions */
+ uint32_t st_maxnsnapshot; /* maximum snapshot transactions */
+ DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */
+ uint32_t st_region_wait; /* Region lock granted after wait. */
+ uint32_t st_region_nowait; /* Region lock granted without wait. */
+ roff_t st_regsize; /* Region size. */
+#endif
+};
+
+/*******************************************************
+ * Replication.
+ *******************************************************/
+/* Special, out-of-band environment IDs. */
+#define DB_EID_BROADCAST -1
+#define DB_EID_INVALID -2
+
+/* rep_config flag values. */
+#define DB_REP_CONF_BULK 0x0001 /* Bulk transfer. */
+#define DB_REP_CONF_DELAYCLIENT 0x0002 /* Delay client synchronization. */
+#define DB_REP_CONF_NOAUTOINIT 0x0004 /* No automatic client init. */
+#define DB_REP_CONF_NOWAIT 0x0008 /* Don't wait, return error. */
+
+/*
+ * Operation code values for rep_start and/or repmgr_start. Just one of the
+ * following values should be passed in the flags parameter. (If we ever need
+ * additional, independent bit flags for these methods, we can start allocating
+ * them from the high-order byte of the flags word, as we currently do elsewhere
+ * for DB_AFTER through DB_WRITELOCK and DB_AUTO_COMMIT, etc.)
+ */
+#define DB_REP_CLIENT 1
+#define DB_REP_ELECTION 2
+#define DB_REP_MASTER 3
+
+#define DB_REPFLAGS_MASK 0x000000ff /* Mask for rep modes. */
+
+#define DB_REP_DEFAULT_PRIORITY 100
+
+/* Acknowledgement policies. */
+#define DB_REPMGR_ACKS_ALL 1
+#define DB_REPMGR_ACKS_ALL_PEERS 2
+#define DB_REPMGR_ACKS_NONE 3
+#define DB_REPMGR_ACKS_ONE 4
+#define DB_REPMGR_ACKS_ONE_PEER 5
+#define DB_REPMGR_ACKS_QUORUM 6
+
+/* Replication timeout configuration values. */
+#define DB_REP_ACK_TIMEOUT 1 /* RepMgr acknowledgements. */
+#define DB_REP_CHECKPOINT_DELAY 2 /* RepMgr acknowledgements. */
+#define DB_REP_CONNECTION_RETRY 3 /* RepMgr connections. */
+#define DB_REP_ELECTION_RETRY 4 /* RepMgr elect retries. */
+#define DB_REP_ELECTION_TIMEOUT 5 /* Rep normal elections. */
+#define DB_REP_FULL_ELECTION_TIMEOUT 6 /* Rep full elections. */
+#define DB_REP_LEASE_TIMEOUT 7 /* Master leases. */
+
+/* Event notification types. */
+#define DB_EVENT_NO_SUCH_EVENT 0 /* out-of-band sentinel value */
+#define DB_EVENT_PANIC 1
+#define DB_EVENT_REP_CLIENT 2
+#define DB_EVENT_REP_ELECTED 3
+#define DB_EVENT_REP_MASTER 4
+#define DB_EVENT_REP_NEWMASTER 5
+#define DB_EVENT_REP_PERM_FAILED 6
+#define DB_EVENT_REP_STARTUPDONE 7
+#define DB_EVENT_WRITE_FAILED 8
+
+/* Flag value for repmgr_add_remote_site. */
+#define DB_REPMGR_PEER 0x01
+
+/* Replication Manager site status. */
+struct __db_repmgr_site {
+ int eid;
+ char *host;
+ u_int port;
+
+#define DB_REPMGR_CONNECTED 0x01
+#define DB_REPMGR_DISCONNECTED 0x02
+ uint32_t status;
+};
+
+/* Replication statistics. */
+struct __db_rep_stat {
+ /* !!!
+ * Many replication statistics fields cannot be protected by a mutex
+ * without an unacceptable performance penalty, since most message
+ * processing is done without the need to hold a region-wide lock.
+ * Fields whose comments end with a '+' may be updated without holding
+ * the replication or log mutexes (as appropriate), and thus may be
+ * off somewhat (or, on unreasonable architectures under unlucky
+ * circumstances, garbaged).
+ */
+ uint32_t st_log_queued; /* Log records currently queued.+ */
+ uint32_t st_startup_complete; /* Site completed client sync-up. */
+#ifndef __TEST_DB_NO_STATISTICS
+ uint32_t st_status; /* Current replication status. */
+ DB_LSN st_next_lsn; /* Next LSN to use or expect. */
+ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */
+ db_pgno_t st_next_pg; /* Next pg we expect. */
+ db_pgno_t st_waiting_pg; /* pg we're awaiting, if any. */
+
+ uint32_t st_dupmasters; /* # of times a duplicate master
+ condition was detected.+ */
+ int st_env_id; /* Current environment ID. */
+ int st_env_priority; /* Current environment priority. */
+ uint32_t st_bulk_fills; /* Bulk buffer fills. */
+ uint32_t st_bulk_overflows; /* Bulk buffer overflows. */
+ uint32_t st_bulk_records; /* Bulk records stored. */
+ uint32_t st_bulk_transfers; /* Transfers of bulk buffers. */
+ uint32_t st_client_rerequests; /* Number of forced rerequests. */
+ uint32_t st_client_svc_req; /* Number of client service requests
+ received by this client. */
+ uint32_t st_client_svc_miss; /* Number of client service requests
+ missing on this client. */
+ uint32_t st_gen; /* Current generation number. */
+ uint32_t st_egen; /* Current election gen number. */
+ uint32_t st_log_duplicated; /* Log records received multiply.+ */
+ uint32_t st_log_queued_max; /* Max. log records queued at once.+ */
+ uint32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */
+ uint32_t st_log_records; /* Log records received and put.+ */
+ uint32_t st_log_requested; /* Log recs. missed and requested.+ */
+ int st_master; /* Env. ID of the current master. */
+ uint32_t st_master_changes; /* # of times we've switched masters. */
+ uint32_t st_msgs_badgen; /* Messages with a bad generation #.+ */
+ uint32_t st_msgs_processed; /* Messages received and processed.+ */
+ uint32_t st_msgs_recover; /* Messages ignored because this site
+ was a client in recovery.+ */
+ uint32_t st_msgs_send_failures;/* # of failed message sends.+ */
+ uint32_t st_msgs_sent; /* # of successful message sends.+ */
+ uint32_t st_newsites; /* # of NEWSITE msgs. received.+ */
+ int st_nsites; /* Current number of sites we will
+ assume during elections. */
+ uint32_t st_nthrottles; /* # of times we were throttled. */
+ uint32_t st_outdated; /* # of times we detected and returned
+ an OUTDATED condition.+ */
+ uint32_t st_pg_duplicated; /* Pages received multiply.+ */
+ uint32_t st_pg_records; /* Pages received and stored.+ */
+ uint32_t st_pg_requested; /* Pages missed and requested.+ */
+ uint32_t st_txns_applied; /* # of transactions applied.+ */
+ uint32_t st_startsync_delayed; /* # of STARTSYNC msgs delayed.+ */
+
+ /* Elections generally. */
+ uint32_t st_elections; /* # of elections held.+ */
+ uint32_t st_elections_won; /* # of elections won by this site.+ */
+
+ /* Statistics about an in-progress election. */
+ int st_election_cur_winner; /* Current front-runner. */
+ uint32_t st_election_gen; /* Election generation number. */
+ DB_LSN st_election_lsn; /* Max. LSN of current winner. */
+ int st_election_nsites; /* # of "registered voters". */
+ int st_election_nvotes; /* # of "registered voters" needed. */
+ int st_election_priority; /* Current election priority. */
+ int st_election_status; /* Current election status. */
+ uint32_t st_election_tiebreaker;/* Election tiebreaker value. */
+ int st_election_votes; /* Votes received in this round. */
+ uint32_t st_election_sec; /* Last election time seconds. */
+ uint32_t st_election_usec; /* Last election time useconds. */
+#endif
+};
+
+/* Replication Manager statistics. */
+struct __db_repmgr_stat {
+ uint32_t st_perm_failed; /* # of insufficiently ack'ed msgs. */
+ uint32_t st_msgs_queued; /* # msgs queued for network delay. */
+ uint32_t st_msgs_dropped; /* # msgs discarded due to excessive
+ queue length. */
+ uint32_t st_connection_drop; /* Existing connections dropped. */
+ uint32_t st_connect_fail; /* Failed new connection attempts. */
+};
+
+/*******************************************************
+ * Sequences.
+ *******************************************************/
+/*
+ * The storage record for a sequence.
+ */
+struct __db_seq_record {
+ uint32_t seq_version; /* Version size/number. */
+#define DB_SEQ_DEC 0x00000001 /* Decrement sequence. */
+#define DB_SEQ_INC 0x00000002 /* Increment sequence. */
+#define DB_SEQ_RANGE_SET 0x00000004 /* Range set (internal). */
+#define DB_SEQ_WRAP 0x00000008 /* Wrap sequence at min/max. */
+#define DB_SEQ_WRAPPED 0x00000010 /* Just wrapped (internal). */
+ uint32_t flags; /* Flags. */
+ db_seq_t seq_value; /* Current value. */
+ db_seq_t seq_max; /* Max permitted. */
+ db_seq_t seq_min; /* Min permitted. */
+};
+
+/*
+ * Handle for a sequence object.
+ */
+struct __db_sequence {
+ DB *seq_dbp; /* DB handle for this sequence. */
+ db_mutex_t mtx_seq; /* Mutex if sequence is threaded. */
+ DB_SEQ_RECORD *seq_rp; /* Pointer to current data. */
+ DB_SEQ_RECORD seq_record; /* Data from DB_SEQUENCE. */
+ int32_t seq_cache_size; /* Number of values cached. */
+ db_seq_t seq_last_value; /* Last value cached. */
+ DBT seq_key; /* DBT pointing to sequence key. */
+ DBT seq_data; /* DBT pointing to seq_record. */
+
+ /* API-private structure: used by C++ and Java. */
+ void *api_internal;
+
+ /* DB_SEQUENCE PUBLIC HANDLE LIST BEGIN */
+ int (*close) __P((DB_SEQUENCE *, uint32_t));
+ int (*get) __P((DB_SEQUENCE *,
+ DB_TXN *, int32_t, db_seq_t *, uint32_t));
+ int (*get_cachesize) __P((DB_SEQUENCE *, int32_t *));
+ int (*get_db) __P((DB_SEQUENCE *, DB **));
+ int (*get_flags) __P((DB_SEQUENCE *, uint32_t *));
+ int (*get_key) __P((DB_SEQUENCE *, DBT *));
+ int (*get_range) __P((DB_SEQUENCE *,
+ db_seq_t *, db_seq_t *));
+ int (*initial_value) __P((DB_SEQUENCE *, db_seq_t));
+ int (*open) __P((DB_SEQUENCE *,
+ DB_TXN *, DBT *, uint32_t));
+ int (*remove) __P((DB_SEQUENCE *, DB_TXN *, uint32_t));
+ int (*set_cachesize) __P((DB_SEQUENCE *, int32_t));
+ int (*set_flags) __P((DB_SEQUENCE *, uint32_t));
+ int (*set_range) __P((DB_SEQUENCE *, db_seq_t, db_seq_t));
+ int (*stat) __P((DB_SEQUENCE *,
+ DB_SEQUENCE_STAT **, uint32_t));
+ int (*stat_print) __P((DB_SEQUENCE *, uint32_t));
+ /* DB_SEQUENCE PUBLIC HANDLE LIST END */
+};
+
+struct __db_seq_stat {
+ uint32_t st_wait; /* Sequence lock granted w/o wait. */
+ uint32_t st_nowait; /* Sequence lock granted after wait. */
+ db_seq_t st_current; /* Current value in db. */
+ db_seq_t st_value; /* Current cached value. */
+ db_seq_t st_last_value; /* Last cached value. */
+ db_seq_t st_min; /* Minimum value. */
+ db_seq_t st_max; /* Maximum value. */
+ int32_t st_cache_size; /* Cache size. */
+ uint32_t st_flags; /* Flag value. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH=2,
+ DB_RECNO=3,
+ DB_QUEUE=4,
+ DB_UNKNOWN=5 /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */
+
+#define DB_BTREEVERSION 9 /* Current btree version. */
+#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 9 /* Current hash version. */
+#define DB_HASHOLDVER 7 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 4 /* Current queue version. */
+#define DB_QAMOLDVER 3 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+#define DB_SEQUENCE_VERSION 2 /* Current sequence version. */
+#define DB_SEQUENCE_OLDVER 1 /* Oldest sequence version supported. */
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* Dbc.put */
+#define DB_APPEND 2 /* Db.put */
+#define DB_BEFORE 3 /* Dbc.put */
+#define DB_CONSUME 4 /* Db.get */
+#define DB_CONSUME_WAIT 5 /* Db.get */
+#define DB_CURRENT 6 /* Dbc.get, Dbc.put, DbLogc.get */
+#define DB_FIRST 7 /* Dbc.get, DbLogc->get */
+#define DB_GET_BOTH 8 /* Db.get, Dbc.get */
+#define DB_GET_BOTHC 9 /* Dbc.get (internal) */
+#define DB_GET_BOTH_RANGE 10 /* Db.get, Dbc.get */
+#define DB_GET_RECNO 11 /* Dbc.get */
+#define DB_JOIN_ITEM 12 /* Dbc.get; don't do primary lookup */
+#define DB_KEYFIRST 13 /* Dbc.put */
+#define DB_KEYLAST 14 /* Dbc.put */
+#define DB_LAST 15 /* Dbc.get, DbLogc->get */
+#define DB_NEXT 16 /* Dbc.get, DbLogc->get */
+#define DB_NEXT_DUP 17 /* Dbc.get */
+#define DB_NEXT_NODUP 18 /* Dbc.get */
+#define DB_NODUPDATA 19 /* Db.put, Dbc.put */
+#define DB_NOOVERWRITE 20 /* Db.put */
+#define DB_NOSYNC 21 /* Db.close */
+#define DB_POSITION 22 /* Dbc.dup */
+#define DB_PREV 23 /* Dbc.get, DbLogc->get */
+#define DB_PREV_DUP 24 /* Dbc.get */
+#define DB_PREV_NODUP 25 /* Dbc.get */
+#define DB_SET 26 /* Dbc.get, DbLogc->get */
+#define DB_SET_RANGE 27 /* Dbc.get */
+#define DB_SET_RECNO 28 /* Db.get, Dbc.get */
+#define DB_UPDATE_SECONDARY 29 /* Dbc.get, Dbc.del (internal) */
+#define DB_WRITECURSOR 30 /* Db.cursor */
+#define DB_WRITELOCK 31 /* Db.cursor (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+
+/*
+ * Masks for flags that can be OR'd into DB access method and cursor
+ * operation values. Three top bits have already been taken:
+ *
+ * DB_AUTO_COMMIT 0x02000000
+ * DB_READ_COMMITTED 0x04000000
+ * DB_READ_UNCOMMITTED 0x08000000
+ */
+#define DB_MULTIPLE 0x10000000 /* Return multiple data values. */
+#define DB_MULTIPLE_KEY 0x20000000 /* Return multiple data/key pairs. */
+#define DB_RMW 0x40000000 /* Acquire write lock immediately. */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* DB (public) error return codes. */
+#define DB_BUFFER_SMALL (-30999)/* User memory too small for return. */
+#define DB_DONOTINDEX (-30998)/* "Null" return from 2ndary callbk. */
+#define DB_KEYEMPTY (-30997)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30996)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30995)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30994)/* Lock unavailable. */
+#define DB_LOG_BUFFER_FULL (-30993)/* In-memory log buffer full. */
+#define DB_NOSERVER (-30992)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30991)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30990)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30989)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30988)/* Out-of-date version. */
+#define DB_PAGE_NOTFOUND (-30987)/* Requested page not found. */
+#define DB_REP_DUPMASTER (-30986)/* There are two masters. */
+#define DB_REP_HANDLE_DEAD (-30985)/* Rolled back a commit. */
+#define DB_REP_HOLDELECTION (-30984)/* Time to hold an election. */
+#define DB_REP_IGNORE (-30983)/* This msg should be ignored.*/
+#define DB_REP_ISPERM (-30982)/* Cached not written perm written.*/
+#define DB_REP_JOIN_FAILURE (-30981)/* Unable to join replication group. */
+#define DB_REP_LEASE_EXPIRED (-30980)/* Master lease has expired. */
+#define DB_REP_LOCKOUT (-30979)/* API/Replication lockout now. */
+#define DB_REP_NEWSITE (-30978)/* New site entered system. */
+#define DB_REP_NOTPERM (-30977)/* Permanent log record not written. */
+#define DB_REP_UNAVAIL (-30976)/* Site cannot currently be reached. */
+#define DB_RUNRECOVERY (-30975)/* Panic return. */
+#define DB_SECONDARY_BAD (-30974)/* Secondary index corrupt. */
+#define DB_VERIFY_BAD (-30973)/* Verify failed; bad format. */
+#define DB_VERSION_MISMATCH (-30972)/* Environment version mismatch. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_EVENT_NOT_HANDLED (-30897)/* Forward event to application. */
+#define DB_NEEDSPLIT (-30896)/* Page needs to be split. */
+#define DB_REP_BULKOVF (-30895)/* Rep bulk buffer overflow. */
+#define DB_REP_EGENCHG (-30894)/* Egen changed while in election. */
+#define DB_REP_LOGREADY (-30893)/* Rep log ready for recovery. */
+#define DB_REP_NEWMASTER (-30892)/* We have learned of a new master. */
+#define DB_REP_PAGEDONE (-30891)/* This page was already done. */
+#define DB_SURPRISE_KID (-30890)/* Child commit where parent
+ didn't know it was a parent. */
+#define DB_SWAPBYTES (-30889)/* Database needs byte swapping. */
+#define DB_TIMEOUT (-30888)/* Timed out waiting for election. */
+#define DB_TXN_CKP (-30887)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30886)/* DB->verify cannot proceed. */
+
+/* Database handle. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ uint32_t pgsize; /* Database logical page size. */
+ DB_CACHE_PRIORITY priority; /* Database priority in cache. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+
+ db_mutex_t mutex; /* Synchronization for free threading */
+
+ char *fname, *dname; /* File/database passed to DB->open. */
+ uint32_t open_flags; /* Flags passed to DB->open. */
+
+ uint8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ uint32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ FNAME *log_filename; /* File's naming info for logging. */
+
+ db_pgno_t meta_pgno; /* Meta page number */
+ DB_LOCKER *locker; /* Locker for handle locking. */
+ DB_LOCKER *cur_locker; /* Current handle lock holder. */
+ DB_TXN *cur_txn; /* Opening transaction. */
+ DB_LOCKER *associate_locker; /* Locker for DB->associate call. */
+ DB_LOCK handle_lock; /* Lock held on this handle. */
+
+ u_int cl_id; /* RPC: remote client id. */
+
+ time_t timestamp; /* Handle timestamp for replication. */
+ uint32_t fid_gen; /* Rep generation number for fids. */
+
+ /*
+ * Returned data memory for DB->get() and friends.
+ */
+ DBT my_rskey; /* Secondary key. */
+ DBT my_rkey; /* [Primary] key. */
+ DBT my_rdata; /* Data. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to keep track
+ * of all open db handles for cursor adjustment.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *tqe_next;
+ struct __db **tqe_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__cq_fq, __dbc) free_queue;
+ * TAILQ_HEAD(__cq_aq, __dbc) active_queue;
+ * TAILQ_HEAD(__cq_jq, __dbc) join_queue;
+ */
+ struct __cq_fq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct __cq_aq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct __cq_jq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ /*
+ * Secondary index support.
+ *
+ * Linked list of secondary indices -- set in the primary.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_HEAD(s_secondaries, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } s_secondaries;
+
+ /*
+ * List entries for secondaries, and reference count of how many
+ * threads are updating this secondary (see Dbc.put).
+ *
+ * !!!
+ * Note that these are synchronized by the primary's mutex, but
+ * filled in in the secondaries.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) s_links;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } s_links;
+ uint32_t s_refcnt;
+
+ /* Secondary callback and free functions -- set in the secondary. */
+ int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *));
+
+ /* Reference to primary -- set in the secondary. */
+ DB *s_primary;
+
+#define DB_ASSOC_IMMUTABLE_KEY 0x00000001 /* Secondary key is immutable. */
+
+ /* Flags passed to associate -- set in the secondary. */
+ uint32_t s_assoc_flags;
+
+ /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
+ void *api_internal;
+
+ /* Subsystem-private structure. */
+ void *bt_internal; /* Btree/Recno access method. */
+ void *h_internal; /* Hash access method. */
+ void *q_internal; /* Queue access method. */
+ void *xa_internal; /* XA. */
+
+ /* DB PUBLIC HANDLE LIST BEGIN */
+ int (*associate) __P((DB *, DB_TXN *, DB *,
+ int (*)(DB *, const DBT *, const DBT *, DBT *), uint32_t));
+ int (*close) __P((DB *, uint32_t));
+ int (*compact) __P((DB *,
+ DB_TXN *, DBT *, DBT *, DB_COMPACT *, uint32_t, DBT *));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, uint32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, uint32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*exists) __P((DB *, DB_TXN *, DBT *, uint32_t));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, uint32_t));
+ int (*get_bt_minkey) __P((DB *, uint32_t *));
+ int (*get_byteswapped) __P((DB *, int *));
+ int (*get_cachesize) __P((DB *, uint32_t *, uint32_t *, int *));
+ int (*get_dbname) __P((DB *, const char **, const char **));
+ int (*get_encrypt_flags) __P((DB *, uint32_t *));
+ DB_ENV *(*get_env) __P((DB *));
+ void (*get_errfile) __P((DB *, FILE **));
+ void (*get_errpfx) __P((DB *, const char **));
+ int (*get_flags) __P((DB *, uint32_t *));
+ int (*get_h_ffactor) __P((DB *, uint32_t *));
+ int (*get_h_nelem) __P((DB *, uint32_t *));
+ int (*get_lorder) __P((DB *, int *));
+ DB_MPOOLFILE *(*get_mpf) __P((DB *));
+ void (*get_msgfile) __P((DB *, FILE **));
+ int (*get_multiple) __P((DB *));
+ int (*get_open_flags) __P((DB *, uint32_t *));
+ int (*get_pagesize) __P((DB *, uint32_t *));
+ int (*get_priority) __P((DB *, DB_CACHE_PRIORITY *));
+ int (*get_q_extentsize) __P((DB *, uint32_t *));
+ int (*get_re_delim) __P((DB *, int *));
+ int (*get_re_len) __P((DB *, uint32_t *));
+ int (*get_re_pad) __P((DB *, int *));
+ int (*get_re_source) __P((DB *, const char **));
+ int (*get_transactional) __P((DB *));
+ int (*get_type) __P((DB *, DBTYPE *));
+ int (*join) __P((DB *, DBC **, DBC **, uint32_t));
+ int (*key_range)
+ __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, uint32_t));
+ int (*open) __P((DB *,
+ DB_TXN *, const char *, const char *, DBTYPE, uint32_t, int));
+ int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, uint32_t));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, uint32_t));
+ int (*remove) __P((DB *, const char *, const char *, uint32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, uint32_t));
+ int (*set_alloc) __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_bt_compare)
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_minkey) __P((DB *, uint32_t));
+ int (*set_bt_prefix)
+ __P((DB *, size_t (*)(DB *, const DBT *, const DBT *)));
+ int (*set_cachesize) __P((DB *, uint32_t, uint32_t, int));
+ int (*set_dup_compare)
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_encrypt) __P((DB *, const char *, uint32_t));
+ void (*set_errcall) __P((DB *,
+ void (*)(const DB_ENV *, const char *, const char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, uint32_t));
+ int (*set_h_compare)
+ __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_h_ffactor) __P((DB *, uint32_t));
+ int (*set_h_hash)
+ __P((DB *, uint32_t (*)(DB *, const void *, uint32_t)));
+ int (*set_h_nelem) __P((DB *, uint32_t));
+ int (*set_lorder) __P((DB *, int));
+ void (*set_msgcall) __P((DB *, void (*)(const DB_ENV *, const char *)));
+ void (*set_msgfile) __P((DB *, FILE *));
+ int (*set_pagesize) __P((DB *, uint32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*set_priority) __P((DB *, DB_CACHE_PRIORITY));
+ int (*set_q_extentsize) __P((DB *, uint32_t));
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, uint32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*stat) __P((DB *, DB_TXN *, void *, uint32_t));
+ int (*stat_print) __P((DB *, uint32_t));
+ int (*sync) __P((DB *, uint32_t));
+ int (*truncate) __P((DB *, DB_TXN *, uint32_t *, uint32_t));
+ int (*upgrade) __P((DB *, const char *, uint32_t));
+ int (*verify)
+ __P((DB *, const char *, const char *, FILE *, uint32_t));
+ /* DB PUBLIC HANDLE LIST END */
+
+ /* DB PRIVATE HANDLE LIST BEGIN */
+ int (*dump) __P((DB *, const char *,
+ int (*)(void *, const void *), void *, int, int));
+ int (*db_am_remove) __P((DB *, DB_TXN *, const char *, const char *));
+ int (*db_am_rename) __P((DB *, DB_TXN *,
+ const char *, const char *, const char *));
+ /* DB PRIVATE HANDLE LIST END */
+
+ /*
+ * Never called; these are a place to save function pointers
+ * so that we can undo an associate.
+ */
+ int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, uint32_t));
+ int (*stored_close) __P((DB *, uint32_t));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ uint32_t am_ok; /* Legal AM choices. */
+
+ /*
+ * This field really ought to be an AM_FLAG, but we have
+ * have run out of bits. If/when we decide to split up
+ * the flags, we can incorporate it.
+ */
+ int preserve_fid; /* Do not free fileid on close. */
+
+#define DB_AM_CHKSUM 0x00000001 /* Checksumming */
+#define DB_AM_COMPENSATE 0x00000002 /* Created by compensating txn */
+#define DB_AM_CREATED 0x00000004 /* Database was created upon open */
+#define DB_AM_CREATED_MSTR 0x00000008 /* Encompassing file was created */
+#define DB_AM_DBM_ERROR 0x00000010 /* Error in DBM/NDBM database */
+#define DB_AM_DELIMITER 0x00000020 /* Variable length delimiter set */
+#define DB_AM_DISCARD 0x00000040 /* Discard any cached pages */
+#define DB_AM_DUP 0x00000080 /* DB_DUP */
+#define DB_AM_DUPSORT 0x00000100 /* DB_DUPSORT */
+#define DB_AM_ENCRYPT 0x00000200 /* Encryption */
+#define DB_AM_FIXEDLEN 0x00000400 /* Fixed-length records */
+#define DB_AM_INMEM 0x00000800 /* In-memory; no sync on close */
+#define DB_AM_INORDER 0x00001000 /* DB_INORDER */
+#define DB_AM_IN_RENAME 0x00002000 /* File is being renamed */
+#define DB_AM_NOT_DURABLE 0x00004000 /* Do not log changes */
+#define DB_AM_OPEN_CALLED 0x00008000 /* DB->open called */
+#define DB_AM_PAD 0x00010000 /* Fixed-length record pad */
+#define DB_AM_PGDEF 0x00020000 /* Page size was defaulted */
+#define DB_AM_RDONLY 0x00040000 /* Database is readonly */
+#define DB_AM_READ_UNCOMMITTED 0x00080000 /* Support degree 1 isolation */
+#define DB_AM_RECNUM 0x00100000 /* DB_RECNUM */
+#define DB_AM_RECOVER 0x00200000 /* DB opened by recovery routine */
+#define DB_AM_RENUMBER 0x00400000 /* DB_RENUMBER */
+#define DB_AM_REVSPLITOFF 0x00800000 /* DB_REVSPLITOFF */
+#define DB_AM_SECONDARY 0x01000000 /* Database is a secondary index */
+#define DB_AM_SNAPSHOT 0x02000000 /* DB_SNAPSHOT */
+#define DB_AM_SUBDB 0x04000000 /* Subdatabases supported */
+#define DB_AM_SWAP 0x08000000 /* Pages need to be byte-swapped */
+#define DB_AM_TXN 0x10000000 /* Opened in a transaction */
+#define DB_AM_VERIFYING 0x20000000 /* DB handle is in the verifier */
+ uint32_t orig_flags; /* Flags at open, for refresh */
+ uint32_t flags;
+};
+
+/*
+ * Macros for bulk get. These are only intended for the C API.
+ * For C++, use DbMultiple*Iterator.
+ */
+#define DB_MULTIPLE_INIT(pointer, dbt) \
+ (pointer = (uint8_t *)(dbt)->data + \
+ (dbt)->ulen - sizeof(uint32_t))
+#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \
+ do { \
+ if (*((uint32_t *)(pointer)) == (uint32_t)-1) { \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retdata = (uint8_t *) \
+ (dbt)->data + *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ retdlen = *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ if (retdlen == 0 && \
+ retdata == (uint8_t *)(dbt)->data) \
+ retdata = NULL; \
+ } while (0)
+#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \
+ do { \
+ if (*((uint32_t *)(pointer)) == (uint32_t)-1) { \
+ retdata = NULL; \
+ retkey = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retkey = (uint8_t *) \
+ (dbt)->data + *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ retklen = *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ retdata = (uint8_t *) \
+ (dbt)->data + *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ retdlen = *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ } while (0)
+
+#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \
+ do { \
+ if (*((uint32_t *)(pointer)) == (uint32_t)0) { \
+ recno = 0; \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ recno = *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ retdata = (uint8_t *) \
+ (dbt)->data + *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ retdlen = *(uint32_t *)(pointer); \
+ (pointer) = (uint32_t *)(pointer) - 1; \
+ } while (0)
+
+/*******************************************************
+ * Access method cursors.
+ *******************************************************/
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+ DB_CACHE_PRIORITY priority; /* Priority in cache. */
+
+ /*
+ * Active/free cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__dbc) links;
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ /*
+ * The DBT *'s below are used by the cursor routines to return
+ * data to the user when DBT flags indicate that DB should manage
+ * the returned memory. They point at a DBT containing the buffer
+ * and length that will be used, and "belonging" to the handle that
+ * should "own" this memory. This may be a "my_*" field of this
+ * cursor--the default--or it may be the corresponding field of
+ * another cursor, a DB handle, a join cursor, etc. In general, it
+ * will be whatever handle the user originally used for the current
+ * DB interface call.
+ */
+ DBT *rskey; /* Returned secondary key. */
+ DBT *rkey; /* Returned [primary] key. */
+ DBT *rdata; /* Returned data. */
+
+ DBT my_rskey; /* Space for returned secondary key. */
+ DBT my_rkey; /* Space for returned [primary] key. */
+ DBT my_rdata; /* Space for returned data. */
+
+ void *lref; /* Reference to default locker. */
+ DB_LOCKER *locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* CDB lock held on this cursor. */
+
+ u_int cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ /* DBC PUBLIC HANDLE LIST BEGIN */
+ int (*close) __P((DBC *));
+ int (*count) __P((DBC *, db_recno_t *, uint32_t));
+ int (*del) __P((DBC *, uint32_t));
+ int (*dup) __P((DBC *, DBC **, uint32_t));
+ int (*get) __P((DBC *, DBT *, DBT *, uint32_t));
+ int (*get_priority) __P((DBC *, DB_CACHE_PRIORITY *));
+ int (*pget) __P((DBC *, DBT *, DBT *, DBT *, uint32_t));
+ int (*put) __P((DBC *, DBT *, DBT *, uint32_t));
+ int (*set_priority) __P((DBC *, DB_CACHE_PRIORITY));
+ /* DBC PUBLIC HANDLE LIST END */
+
+ /* The following are the method names deprecated in the 4.6 release. */
+ int (*c_close) __P((DBC *));
+ int (*c_count) __P((DBC *, db_recno_t *, uint32_t));
+ int (*c_del) __P((DBC *, uint32_t));
+ int (*c_dup) __P((DBC *, DBC **, uint32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, uint32_t));
+ int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, uint32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, uint32_t));
+
+ /* DBC PRIVATE HANDLE LIST BEGIN */
+ int (*am_bulk) __P((DBC *, DBT *, uint32_t));
+ int (*am_close) __P((DBC *, db_pgno_t, int *));
+ int (*am_del) __P((DBC *));
+ int (*am_destroy) __P((DBC *));
+ int (*am_get) __P((DBC *, DBT *, DBT *, uint32_t, db_pgno_t *));
+ int (*am_put) __P((DBC *, DBT *, DBT *, uint32_t, db_pgno_t *));
+ int (*am_writelock) __P((DBC *));
+ /* DBC PRIVATE HANDLE LIST END */
+
+/*
+ * DBC_DONTLOCK and DBC_RECOVER are used during recovery and transaction
+ * abort. If a transaction is being aborted or recovered then DBC_RECOVER
+ * will be set and locking and logging will be disabled on this cursor. If
+ * we are performing a compensating transaction (e.g. free page processing)
+ * then DB_DONTLOCK will be set to inhibit locking, but logging will still
+ * be required. DB_DONTLOCK is also used if the whole database is locked.
+ */
+#define DBC_ACTIVE 0x0001 /* Cursor in use. */
+#define DBC_DONTLOCK 0x0002 /* Don't lock on this cursor. */
+#define DBC_MULTIPLE 0x0004 /* Return Multiple data. */
+#define DBC_MULTIPLE_KEY 0x0008 /* Return Multiple keys and data. */
+#define DBC_OPD 0x0010 /* Cursor references off-page dups. */
+#define DBC_OWN_LID 0x0020 /* Free lock id on destroy. */
+#define DBC_READ_COMMITTED 0x0040 /* Cursor has degree 2 isolation. */
+#define DBC_READ_UNCOMMITTED 0x0080 /* Cursor has degree 1 isolation. */
+#define DBC_RECOVER 0x0100 /* Recovery cursor; don't log/lock. */
+#define DBC_RMW 0x0200 /* Acquire write flag in read op. */
+#define DBC_TRANSIENT 0x0400 /* Cursor is transient. */
+#define DBC_WRITECURSOR 0x0800 /* Cursor may be used to write (CDB). */
+#define DBC_WRITER 0x1000 /* Cursor immediately writing (CDB). */
+ uint32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ uint32_t bt_magic; /* Magic number. */
+ uint32_t bt_version; /* Version number. */
+ uint32_t bt_metaflags; /* Metadata flags. */
+ uint32_t bt_nkeys; /* Number of unique keys. */
+ uint32_t bt_ndata; /* Number of data items. */
+ uint32_t bt_pagecnt; /* Page count. */
+ uint32_t bt_pagesize; /* Page size. */
+ uint32_t bt_minkey; /* Minkey value. */
+ uint32_t bt_re_len; /* Fixed-length record length. */
+ uint32_t bt_re_pad; /* Fixed-length record pad. */
+ uint32_t bt_levels; /* Tree levels. */
+ uint32_t bt_int_pg; /* Internal pages. */
+ uint32_t bt_leaf_pg; /* Leaf pages. */
+ uint32_t bt_dup_pg; /* Duplicate pages. */
+ uint32_t bt_over_pg; /* Overflow pages. */
+ uint32_t bt_empty_pg; /* Empty pages. */
+ uint32_t bt_free; /* Pages on the free list. */
+ uint32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ uint32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ uint32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ uint32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+struct __db_compact {
+ /* Input Parameters. */
+ uint32_t compact_fillpercent; /* Desired fillfactor: 1-100 */
+ db_timeout_t compact_timeout; /* Lock timeout. */
+ uint32_t compact_pages; /* Max pages to process. */
+ /* Output Stats. */
+ uint32_t compact_pages_free; /* Number of pages freed. */
+ uint32_t compact_pages_examine; /* Number of pages examine. */
+ uint32_t compact_levels; /* Number of levels removed. */
+ uint32_t compact_deadlock; /* Number of deadlocks. */
+ db_pgno_t compact_pages_truncated; /* Pages truncated to OS. */
+ /* Internal. */
+ db_pgno_t compact_truncate; /* Page number for truncation */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ uint32_t hash_magic; /* Magic number. */
+ uint32_t hash_version; /* Version number. */
+ uint32_t hash_metaflags; /* Metadata flags. */
+ uint32_t hash_nkeys; /* Number of unique keys. */
+ uint32_t hash_ndata; /* Number of data items. */
+ uint32_t hash_pagecnt; /* Page count. */
+ uint32_t hash_pagesize; /* Page size. */
+ uint32_t hash_ffactor; /* Fill factor specified at create. */
+ uint32_t hash_buckets; /* Number of hash buckets. */
+ uint32_t hash_free; /* Pages on the free list. */
+ uint32_t hash_bfree; /* Bytes free on bucket pages. */
+ uint32_t hash_bigpages; /* Number of big key/data pages. */
+ uint32_t hash_big_bfree; /* Bytes free on big item pages. */
+ uint32_t hash_overflows; /* Number of overflow pages. */
+ uint32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ uint32_t hash_dup; /* Number of dup pages. */
+ uint32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ uint32_t qs_magic; /* Magic number. */
+ uint32_t qs_version; /* Version number. */
+ uint32_t qs_metaflags; /* Metadata flags. */
+ uint32_t qs_nkeys; /* Number of unique keys. */
+ uint32_t qs_ndata; /* Number of data items. */
+ uint32_t qs_pagesize; /* Page size. */
+ uint32_t qs_extentsize; /* Pages per extent. */
+ uint32_t qs_pages; /* Data pages. */
+ uint32_t qs_re_len; /* Fixed-length record length. */
+ uint32_t qs_re_pad; /* Fixed-length record pad. */
+ uint32_t qs_pgfree; /* Bytes free in data pages. */
+ uint32_t qs_first_recno; /* First not deleted record. */
+ uint32_t qs_cur_recno; /* Next available record number. */
+};
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+/* Database Environment handle. */
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ /* Error message callback. */
+ void (*db_errcall) __P((const DB_ENV *, const char *, const char *));
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+
+ FILE *db_msgfile; /* Statistics message file stream. */
+ /* Statistics message callback. */
+ void (*db_msgcall) __P((const DB_ENV *, const char *));
+
+ /* Other Callbacks. */
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+ void (*db_event_func) __P((DB_ENV *, uint32_t, void *));
+
+ /* App-specified alloc functions. */
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ void (*db_free) __P((void *));
+
+ /* Application callback to copy data to/from a custom data source. */
+#define DB_USERCOPY_GETDATA 0x0001
+#define DB_USERCOPY_SETDATA 0x0002
+ int (*dbt_usercopy)
+ __P((DBT *, uint32_t, void *, uint32_t, uint32_t));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_DEADLOCK 0x0001 /* Deadlock detection information. */
+#define DB_VERB_FILEOPS 0x0002 /* Major file operations. */
+#define DB_VERB_FILEOPS_ALL 0x0004 /* All file operations. */
+#define DB_VERB_RECOVERY 0x0008 /* Recovery information. */
+#define DB_VERB_REGISTER 0x0010 /* Dump waits-for table. */
+#define DB_VERB_REPLICATION 0x0020 /* Replication information. */
+#define DB_VERB_WAITSFOR 0x0040 /* Dump waits-for table. */
+ uint32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ int (*app_dispatch) /* User-specified recovery dispatch. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /* Mutexes. */
+ uint32_t mutex_align; /* Mutex alignment */
+ uint32_t mutex_cnt; /* Number of mutexes to configure */
+ uint32_t mutex_inc; /* Number of mutexes to add */
+ uint32_t mutex_tas_spins;/* Test-and-set spin count */
+
+ struct {
+ int alloc_id; /* Allocation ID argument */
+ uint32_t flags; /* Flags argument */
+ } *mutex_iq; /* Initial mutexes queue */
+ u_int mutex_iq_next; /* Count of initial mutexes */
+ u_int mutex_iq_max; /* Maximum initial mutexes */
+
+ /* Locking. */
+ uint8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ int lk_modes; /* Number of lock modes in table. */
+ uint32_t lk_max; /* Maximum number of locks. */
+ uint32_t lk_max_lockers;/* Maximum number of lockers. */
+ uint32_t lk_max_objects;/* Maximum number of locked objects. */
+ uint32_t lk_detect; /* Deadlock detect on all conflicts. */
+ db_timeout_t lk_timeout; /* Lock timeout period. */
+
+ /* Logging. */
+ uint32_t lg_bsize; /* Buffer size. */
+ uint32_t lg_size; /* Log file size. */
+ uint32_t lg_regionmax; /* Region size. */
+ int lg_filemode; /* Log file permission mode. */
+
+ /* Memory pool. */
+ u_int mp_ncache; /* Initial number of cache regions. */
+ uint32_t mp_gbytes; /* Cache size: GB. */
+ uint32_t mp_bytes; /* Cache size: bytes. */
+ uint32_t mp_max_gbytes; /* Maximum cache size: GB. */
+ uint32_t mp_max_bytes; /* Maximum cache size: bytes. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+ int mp_maxopenfd; /* Maximum open file descriptors. */
+ int mp_maxwrite; /* Maximum buffers to write. */
+ db_timeout_t mp_maxwrite_sleep; /* Sleep after writing max buffers. */
+
+ /* Transactions. */
+ uint32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ db_timeout_t tx_timeout; /* Timeout for transactions. */
+
+ /* Thread tracking. */
+ uint32_t thr_nbucket; /* Number of hash buckets. */
+ uint32_t thr_max; /* Max before garbage collection. */
+ void *thr_hashtab; /* Hash table of DB_THREAD_INFO. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ db_mutex_t mtx_env; /* General DbEnv structure mutex. */
+
+ pid_t pid_cache; /* Cached process ID. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+ int dir_mode; /* Intermediate directory perms. */
+ void *env_lref; /* Locker in non-threaded handles. */
+ uint32_t open_flags; /* Flags passed to DB_ENV->open. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+
+ DB_FH *registry; /* DB_REGISTER file handle. */
+ uint32_t registry_off; /*
+ * Offset of our slot. We can't use
+ * off_t because its size depends on
+ * build settings.
+ */
+
+ /* Return IDs. */
+ void (*thread_id) __P((DB_ENV *, pid_t *, db_threadid_t *));
+ /* Return if IDs alive. */
+ int (*is_alive)
+ __P((DB_ENV *, pid_t, db_threadid_t, uint32_t));
+ /* Format IDs into a string. */
+ char *(*thread_id_string)
+ __P((DB_ENV *, pid_t, db_threadid_t, char *));
+
+ int (**recover_dtab) /* Dispatch table for recover funcs. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t recover_dtab_size;
+ /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ u_int cl_id; /* RPC: remote client env id. */
+
+ int db_ref; /* DB reference count. */
+
+ long shm_key; /* shmget(2) key. */
+
+ /*
+ * List of open file handles for this DB_ENV. Must be protected
+ * for multi-threaded support.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * TAILQ_HEAD(__fdlist, __fh_t);
+ */
+ struct __fdlist {
+ struct __fh_t *tqh_first;
+ struct __fh_t **tqh_last;
+ } fdlist;
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * TAILQ_HEAD(__dblist, __db);
+ */
+ db_mutex_t mtx_dblist; /* Mutex. */
+ struct __dblist {
+ struct __db *tqh_first;
+ struct __db **tqh_last;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_env) links;
+ * TAILQ_HEAD(xa_txn, __db_txn);
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ struct __xa_txn { /* XA Active Transactions. */
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } xa_txn;
+ int xa_rmid; /* XA Resource Manager ID. */
+
+ char *passwd; /* Cryptography support. */
+ size_t passwd_len;
+ void *crypto_handle; /* Primary handle. */
+ db_mutex_t mtx_mt; /* Mersenne Twister mutex. */
+ int mti; /* Mersenne Twister index. */
+ u_long *mt; /* Mersenne Twister state vector. */
+
+ /* API-private structure. */
+ void *api1_internal; /* C++, Perl API private */
+ void *api2_internal; /* Java API private */
+
+ DB_LOCKTAB *lk_handle; /* Lock handle. */
+ DB_LOG *lg_handle; /* Log handle. */
+ DB_MPOOL *mp_handle; /* Mpool handle. */
+ DB_MUTEXMGR *mutex_handle; /* Mutex handle. */
+ DB_REP *rep_handle; /* Replication handle. */
+ DB_TXNMGR *tx_handle; /* Txn handle. */
+
+ /* DB_ENV PUBLIC HANDLE LIST BEGIN */
+ int (*cdsgroup_begin) __P((DB_ENV *, DB_TXN **));
+ int (*close) __P((DB_ENV *, uint32_t));
+ int (*dbremove) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, uint32_t));
+ int (*dbrename) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, const char *, uint32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*failchk) __P((DB_ENV *, uint32_t));
+ int (*fileid_reset) __P((DB_ENV *, const char *, uint32_t));
+ int (*get_cachesize) __P((DB_ENV *, uint32_t *, uint32_t *, int *));
+ int (*get_cache_max) __P((DB_ENV *, uint32_t *, uint32_t *));
+ int (*get_data_dirs) __P((DB_ENV *, const char ***));
+ int (*get_encrypt_flags) __P((DB_ENV *, uint32_t *));
+ void (*get_errfile) __P((DB_ENV *, FILE **));
+ void (*get_errpfx) __P((DB_ENV *, const char **));
+ int (*get_flags) __P((DB_ENV *, uint32_t *));
+ int (*get_home) __P((DB_ENV *, const char **));
+ int (*get_lg_bsize) __P((DB_ENV *, uint32_t *));
+ int (*get_lg_dir) __P((DB_ENV *, const char **));
+ int (*get_lg_filemode) __P((DB_ENV *, int *));
+ int (*get_lg_max) __P((DB_ENV *, uint32_t *));
+ int (*get_lg_regionmax) __P((DB_ENV *, uint32_t *));
+ int (*get_lk_conflicts) __P((DB_ENV *, const uint8_t **, int *));
+ int (*get_lk_detect) __P((DB_ENV *, uint32_t *));
+ int (*get_lk_max_lockers) __P((DB_ENV *, uint32_t *));
+ int (*get_lk_max_locks) __P((DB_ENV *, uint32_t *));
+ int (*get_lk_max_objects) __P((DB_ENV *, uint32_t *));
+ int (*get_mp_max_openfd) __P((DB_ENV *, int *));
+ int (*get_mp_max_write) __P((DB_ENV *, int *, db_timeout_t *));
+ int (*get_mp_mmapsize) __P((DB_ENV *, size_t *));
+ void (*get_msgfile) __P((DB_ENV *, FILE **));
+ int (*get_open_flags) __P((DB_ENV *, uint32_t *));
+ int (*get_shm_key) __P((DB_ENV *, long *));
+ int (*get_thread_count) __P((DB_ENV *, uint32_t *));
+ int (*get_timeout) __P((DB_ENV *, db_timeout_t *, uint32_t));
+ int (*get_tmp_dir) __P((DB_ENV *, const char **));
+ int (*get_tx_max) __P((DB_ENV *, uint32_t *));
+ int (*get_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*get_verbose) __P((DB_ENV *, uint32_t, int *));
+ int (*is_bigendian) __P((void));
+ int (*lock_detect) __P((DB_ENV *, uint32_t, uint32_t, int *));
+ int (*lock_get) __P((DB_ENV *,
+ uint32_t, uint32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ int (*lock_id) __P((DB_ENV *, uint32_t *));
+ int (*lock_id_free) __P((DB_ENV *, uint32_t));
+ int (*lock_put) __P((DB_ENV *, DB_LOCK *));
+ int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, uint32_t));
+ int (*lock_stat_print) __P((DB_ENV *, uint32_t));
+ int (*lock_vec) __P((DB_ENV *,
+ uint32_t, uint32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ int (*log_archive) __P((DB_ENV *, char **[], uint32_t));
+ int (*log_cursor) __P((DB_ENV *, DB_LOGC **, uint32_t));
+ int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ int (*log_flush) __P((DB_ENV *, const DB_LSN *));
+ int (*log_printf) __P((DB_ENV *, DB_TXN *, const char *, ...));
+ int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, uint32_t));
+ int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, uint32_t));
+ int (*log_stat_print) __P((DB_ENV *, uint32_t));
+ int (*lsn_reset) __P((DB_ENV *, const char *, uint32_t));
+ int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, uint32_t));
+ int (*memp_register) __P((DB_ENV *, int, int (*)(DB_ENV *,
+ db_pgno_t, void *, DBT *), int (*)(DB_ENV *,
+ db_pgno_t, void *, DBT *)));
+ int (*memp_stat) __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, uint32_t));
+ int (*memp_stat_print) __P((DB_ENV *, uint32_t));
+ int (*memp_sync) __P((DB_ENV *, DB_LSN *));
+ int (*memp_trickle) __P((DB_ENV *, int, int *));
+ int (*mutex_alloc) __P((DB_ENV *, uint32_t, db_mutex_t *));
+ int (*mutex_free) __P((DB_ENV *, db_mutex_t));
+ int (*mutex_get_align) __P((DB_ENV *, uint32_t *));
+ int (*mutex_get_increment) __P((DB_ENV *, uint32_t *));
+ int (*mutex_get_max) __P((DB_ENV *, uint32_t *));
+ int (*mutex_get_tas_spins) __P((DB_ENV *, uint32_t *));
+ int (*mutex_lock) __P((DB_ENV *, db_mutex_t));
+ int (*mutex_set_align) __P((DB_ENV *, uint32_t));
+ int (*mutex_set_increment) __P((DB_ENV *, uint32_t));
+ int (*mutex_set_max) __P((DB_ENV *, uint32_t));
+ int (*mutex_set_tas_spins) __P((DB_ENV *, uint32_t));
+ int (*mutex_stat) __P((DB_ENV *, DB_MUTEX_STAT **, uint32_t));
+ int (*mutex_stat_print) __P((DB_ENV *, uint32_t));
+ int (*mutex_unlock) __P((DB_ENV *, db_mutex_t));
+ int (*open) __P((DB_ENV *, const char *, uint32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, uint32_t));
+ int (*rep_elect) __P((DB_ENV *, int, int, uint32_t));
+ int (*rep_flush) __P((DB_ENV *));
+ int (*rep_get_config) __P((DB_ENV *, uint32_t, int *));
+ int (*rep_get_limit) __P((DB_ENV *, uint32_t *, uint32_t *));
+ int (*rep_get_nsites) __P((DB_ENV *, int *));
+ int (*rep_get_priority) __P((DB_ENV *, int *));
+ int (*rep_get_timeout) __P((DB_ENV *, int, uint32_t *));
+ int (*rep_process_message)
+ __P((DB_ENV *, DBT *, DBT *, int, DB_LSN *));
+ int (*rep_set_config) __P((DB_ENV *, uint32_t, int));
+ int (*rep_set_lease) __P((DB_ENV *, uint32_t, uint32_t));
+ int (*rep_set_limit) __P((DB_ENV *, uint32_t, uint32_t));
+ int (*rep_set_nsites) __P((DB_ENV *, int));
+ int (*rep_set_priority) __P((DB_ENV *, int));
+ int (*rep_set_timeout) __P((DB_ENV *, int, db_timeout_t));
+ int (*rep_set_transport) __P((DB_ENV *, int, int (*)(DB_ENV *,
+ const DBT *, const DBT *, const DB_LSN *, int, uint32_t)));
+ int (*rep_start) __P((DB_ENV *, DBT *, uint32_t));
+ int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, uint32_t));
+ int (*rep_stat_print) __P((DB_ENV *, uint32_t));
+ int (*rep_sync) __P((DB_ENV *, uint32_t));
+ int (*repmgr_add_remote_site) __P((DB_ENV *, const char *, u_int,
+ int *, uint32_t));
+ int (*repmgr_get_ack_policy) __P((DB_ENV *, int *));
+ int (*repmgr_set_ack_policy) __P((DB_ENV *, int));
+ int (*repmgr_set_local_site) __P((DB_ENV *, const char *, u_int,
+ uint32_t));
+ int (*repmgr_site_list) __P((DB_ENV *, u_int *,
+ DB_REPMGR_SITE **));
+ int (*repmgr_start) __P((DB_ENV *, int, uint32_t));
+ int (*repmgr_stat) __P((DB_ENV *, DB_REPMGR_STAT **, uint32_t));
+ int (*repmgr_stat_print) __P((DB_ENV *, uint32_t));
+ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_app_dispatch)
+ __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_cachesize) __P((DB_ENV *, uint32_t, uint32_t, int));
+ int (*set_cache_max) __P((DB_ENV *, uint32_t, uint32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ int (*set_encrypt) __P((DB_ENV *, const char *, uint32_t));
+ void (*set_errcall) __P((DB_ENV *,
+ void (*)(const DB_ENV *, const char *, const char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_event_notify)
+ __P((DB_ENV *, void (*)(DB_ENV *, uint32_t, void *)));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, uint32_t, int));
+ int (*set_intermediate_dir) __P((DB_ENV *, int, uint32_t));
+ int (*set_isalive) __P((DB_ENV *,
+ int (*)(DB_ENV *, pid_t, db_threadid_t, uint32_t)));
+ int (*set_lg_bsize) __P((DB_ENV *, uint32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_filemode) __P((DB_ENV *, int));
+ int (*set_lg_max) __P((DB_ENV *, uint32_t));
+ int (*set_lg_regionmax) __P((DB_ENV *, uint32_t));
+ int (*set_lk_conflicts) __P((DB_ENV *, uint8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, uint32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, uint32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, uint32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, uint32_t));
+ int (*set_mp_max_openfd) __P((DB_ENV *, int));
+ int (*set_mp_max_write) __P((DB_ENV *, int, db_timeout_t));
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ void (*set_msgcall)
+ __P((DB_ENV *, void (*)(const DB_ENV *, const char *)));
+ void (*set_msgfile) __P((DB_ENV *, FILE *));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_rep_request) __P((DB_ENV *, uint32_t, uint32_t));
+ int (*set_rpc_server)
+ __P((DB_ENV *, void *, const char *, long, long, uint32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_thread_count) __P((DB_ENV *, uint32_t));
+ int (*set_thread_id) __P((DB_ENV *,
+ void (*)(DB_ENV *, pid_t *, db_threadid_t *)));
+ int (*set_thread_id_string) __P((DB_ENV *,
+ char *(*)(DB_ENV *, pid_t, db_threadid_t, char *)));
+ int (*set_timeout) __P((DB_ENV *, db_timeout_t, uint32_t));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_tx_max) __P((DB_ENV *, uint32_t));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*set_verbose) __P((DB_ENV *, uint32_t, int));
+ int (*stat_print) __P((DB_ENV *, uint32_t));
+ int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, uint32_t));
+ int (*txn_checkpoint) __P((DB_ENV *, uint32_t, uint32_t, uint32_t));
+ int (*txn_recover)
+ __P((DB_ENV *, DB_PREPLIST *, long, long *, uint32_t));
+ int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, uint32_t));
+ int (*txn_stat_print) __P((DB_ENV *, uint32_t));
+ /* DB_ENV PUBLIC HANDLE LIST END */
+
+ /* DB_ENV PRIVATE HANDLE LIST BEGIN */
+ int (*prdbt) __P((DBT *,
+ int, const char *, void *, int (*)(void *, const void *), int));
+ /* DB_ENV PRIVATE HANDLE LIST END */
+
+#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */
+#define DB_TEST_ELECTVOTE1 2 /* after sending VOTE1 */
+#define DB_TEST_POSTDESTROY 3 /* after destroy op */
+#define DB_TEST_POSTLOG 4 /* after logging all pages */
+#define DB_TEST_POSTLOGMETA 5 /* after logging meta in btree */
+#define DB_TEST_POSTOPEN 6 /* after __os_open */
+#define DB_TEST_POSTSYNC 7 /* after syncing the log */
+#define DB_TEST_PREDESTROY 8 /* before destroy op */
+#define DB_TEST_PREOPEN 9 /* before __os_open */
+#define DB_TEST_RECYCLE 10 /* test rep and txn_recycle */
+#define DB_TEST_SUBDB_LOCKS 11 /* subdb locking tests */
+ int test_abort; /* Abort value for testing. */
+ int test_check; /* Checkpoint value for testing. */
+ int test_copy; /* Copy value for testing. */
+
+#define DB_ENV_AUTO_COMMIT 0x00000001 /* DB_AUTO_COMMIT. */
+#define DB_ENV_CDB 0x00000002 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x00000004 /* CDB environment wide locking. */
+#define DB_ENV_DBLOCAL 0x00000008 /* Environment for a private DB. */
+#define DB_ENV_DIRECT_DB 0x00000010 /* DB_DIRECT_DB set. */
+#define DB_ENV_DIRECT_LOG 0x00000020 /* DB_DIRECT_LOG set. */
+#define DB_ENV_DSYNC_DB 0x00000040 /* DB_DSYNC_DB set. */
+#define DB_ENV_DSYNC_LOG 0x00000080 /* DB_DSYNC_LOG set. */
+#define DB_ENV_LOCKDOWN 0x00000100 /* DB_LOCKDOWN set. */
+#define DB_ENV_LOG_AUTOREMOVE 0x00000200 /* DB_LOG_AUTOREMOVE set. */
+#define DB_ENV_LOG_INMEMORY 0x00000400 /* DB_LOG_INMEMORY set. */
+#define DB_ENV_MULTIVERSION 0x00000800 /* DB_MULTIVERSION set. */
+#define DB_ENV_NOLOCKING 0x00001000 /* DB_NOLOCKING set. */
+#define DB_ENV_NOMMAP 0x00002000 /* DB_NOMMAP set. */
+#define DB_ENV_NOPANIC 0x00004000 /* Okay if panic set. */
+#define DB_ENV_NO_OUTPUT_SET 0x00008000 /* No output channel set. */
+#define DB_ENV_OPEN_CALLED 0x00010000 /* DB_ENV->open called. */
+#define DB_ENV_OVERWRITE 0x00020000 /* DB_OVERWRITE set. */
+#define DB_ENV_PRIVATE 0x00040000 /* DB_PRIVATE set. */
+#define DB_ENV_RECOVER_FATAL 0x00080000 /* Doing fatal recovery in env. */
+#define DB_ENV_REF_COUNTED 0x00100000 /* Region references this handle. */
+#define DB_ENV_REGION_INIT 0x00200000 /* DB_REGION_INIT set. */
+#define DB_ENV_RPCCLIENT 0x00400000 /* DB_RPCCLIENT set. */
+#define DB_ENV_RPCCLIENT_GIVEN 0x00800000 /* User-supplied RPC client struct */
+#define DB_ENV_SYSTEM_MEM 0x01000000 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x02000000 /* DB_THREAD set. */
+#define DB_ENV_TIME_NOTGRANTED 0x04000000 /* DB_TIME_NOTGRANTED set. */
+#define DB_ENV_TXN_NOSYNC 0x08000000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_TXN_NOWAIT 0x10000000 /* DB_TXN_NOWAIT set. */
+#define DB_ENV_TXN_SNAPSHOT 0x20000000 /* DB_TXN_SNAPSHOT set. */
+#define DB_ENV_TXN_WRITE_NOSYNC 0x40000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define DB_ENV_YIELDCPU 0x80000000 /* DB_YIELDCPU set. */
+ uint32_t flags;
+};
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+typedef struct __db DBM;
+
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr(a)
+#define dbm_close(a) __db_ndbm_close(a)
+#define dbm_delete(a, b) __db_ndbm_delete(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno(a)
+#define dbm_error(a) __db_ndbm_error(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey(a)
+#define dbm_open(a, b, c) __db_ndbm_open(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly(a)
+#define dbm_store(a, b, c, d) \
+ __db_ndbm_store(a, b, c, d)
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init(a)
+#define dbmclose __db_dbm_close
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete(a)
+#endif
+#define fetch(a) __db_dbm_fetch(a)
+#define firstkey __db_dbm_firstkey
+#define nextkey(a) __db_dbm_nextkey(a)
+#define store(a, b) __db_dbm_store(a, b)
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+#define hcreate(a) __db_hcreate(a)
+#define hdestroy __db_hdestroy
+#define hsearch(a, b) __db_hsearch(a, b)
+
+#endif /* DB_DBM_HSEARCH */
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif /* !_DB_H_ */
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_PROT_IN_
+#define _DB_EXT_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int db_create __P((DB **, DB_ENV *, uint32_t));
+char *db_strerror __P((int));
+int db_env_create __P((DB_ENV **, uint32_t));
+char *db_version __P((int *, int *, int *));
+int log_compare __P((const DB_LSN *, const DB_LSN *));
+int db_env_set_func_close __P((int (*)(int)));
+int db_env_set_func_dirfree __P((void (*)(char **, int)));
+int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
+int db_env_set_func_exists __P((int (*)(const char *, int *)));
+int db_env_set_func_free __P((void (*)(void *)));
+int db_env_set_func_fsync __P((int (*)(int)));
+int db_env_set_func_ftruncate __P((int (*)(int, off_t)));
+int db_env_set_func_ioinfo __P((int (*)(const char *, int, uint32_t *, uint32_t *, uint32_t *)));
+int db_env_set_func_malloc __P((void *(*)(size_t)));
+int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
+int db_env_set_func_pread __P((ssize_t (*)(int, void *, size_t, off_t)));
+int db_env_set_func_pwrite __P((ssize_t (*)(int, const void *, size_t, off_t)));
+int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+int db_env_set_func_rename __P((int (*)(const char *, const char *)));
+int db_env_set_func_seek __P((int (*)(int, off_t, int)));
+int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+int db_env_set_func_unlink __P((int (*)(const char *)));
+int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
+int db_env_set_func_yield __P((int (*)(void)));
+int db_sequence_create __P((DB_SEQUENCE **, DB *, uint32_t));
+#if DB_DBM_HSEARCH != 0
+int __db_ndbm_clearerr __P((DBM *));
+void __db_ndbm_close __P((DBM *));
+int __db_ndbm_delete __P((DBM *, datum));
+int __db_ndbm_dirfno __P((DBM *));
+int __db_ndbm_error __P((DBM *));
+datum __db_ndbm_fetch __P((DBM *, datum));
+datum __db_ndbm_firstkey __P((DBM *));
+datum __db_ndbm_nextkey __P((DBM *));
+DBM *__db_ndbm_open __P((const char *, int, int));
+int __db_ndbm_pagfno __P((DBM *));
+int __db_ndbm_rdonly __P((DBM *));
+int __db_ndbm_store __P((DBM *, datum, datum, int));
+int __db_dbm_close __P((void));
+int __db_dbm_delete __P((datum));
+datum __db_dbm_fetch __P((datum));
+datum __db_dbm_firstkey __P((void));
+int __db_dbm_init __P((char *));
+datum __db_dbm_nextkey __P((datum));
+int __db_dbm_store __P((datum, datum));
+#endif
+#if DB_DBM_HSEARCH != 0
+int __db_hcreate __P((size_t));
+ENTRY *__db_hsearch __P((ENTRY, ACTION));
+void __db_hdestroy __P((void));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_PROT_IN_ */
diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
new file mode 100644
index 00000000..95d6207e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc
@@ -0,0 +1,845 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Make a db.h that will be link-time compatible with Sleepycat's Berkeley DB. */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+// Don't include toku_assert.h. Just use assert.h
+#include <assert.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define VISIBLE "__attribute__((__visibility__(\"default\")))"
+
+#define FIELD_LIMIT 100
+struct fieldinfo {
+ const char *decl_format_string;
+ const char *name;
+ size_t offset;
+} fields[FIELD_LIMIT];
+static int field_counter=0;
+
+static int compare_fields (const void *av, const void *bv) {
+ const struct fieldinfo *a = (const struct fieldinfo *) av;
+ const struct fieldinfo *b = (const struct fieldinfo *) bv;
+ if (a->offset< b->offset) return -1;
+ if (a->offset==b->offset) return 0;
+ return +1;
+}
+
+#define STRUCT_SETUP(typ, fname, fstring) ({ \
+ assert(field_counter<FIELD_LIMIT); \
+ fields[field_counter].decl_format_string = fstring; \
+ fields[field_counter].name = #fname; \
+ fields[field_counter].offset = __builtin_offsetof(typ, fname); \
+ field_counter++; })
+
+static void sort_and_dump_fields (const char *structname, bool has_internal, const char *extra_decls[]) {
+ int i;
+ qsort(fields, field_counter, sizeof(fields[0]), compare_fields);
+ printf("struct __toku_%s {\n", structname);
+ if (has_internal) {
+ printf(" struct __toku_%s_internal *i;\n", structname);
+ printf("#define %s_struct_i(x) ((x)->i)\n", structname);
+ }
+ if (extra_decls) {
+ while (*extra_decls) {
+ printf(" %s;\n", *extra_decls);
+ extra_decls++;
+ }
+ }
+ for (i=0; i<field_counter; i++) {
+ printf(" ");
+ printf(fields[i].decl_format_string, fields[i].name);
+ printf(";\n");
+ }
+ printf("};\n");
+}
+
+#include "db-4.6.19.h"
+
+static void print_dbtype(void) {
+ /* DBTYPE is mentioned by db_open.html */
+ printf("typedef enum {\n");
+ printf(" DB_BTREE=%d,\n", DB_BTREE);
+ printf(" DB_UNKNOWN=%d\n", DB_UNKNOWN);
+ printf("} DBTYPE;\n");
+}
+
+
+#define dodefine(name) printf("#define %s %d\n", #name, name)
+#define dodefine_track(flags, name) ({ assert((flags & name) != name); \
+ flags |= (name); \
+ printf("#define %s %d\n", #name, name); })
+#define dodefine_from_track(flags, name) ({\
+ uint32_t which; \
+ uint32_t bit; \
+ for (which = 0; which < 32; which++) { \
+ bit = 1U << which; \
+ if (!(flags & bit)) break; \
+ } \
+ assert(which < 32); \
+ printf("#define %s %u\n", #name, bit); \
+ flags |= bit; \
+ })
+
+#define dodefine_track_enum(flags, name) ({ assert(name>=0 && name<256); \
+ assert(!(flags[name])); \
+ flags[name] = 1; \
+ printf("#define %s %d\n", #name, (int)(name)); })
+#define dodefine_from_track_enum(flags, name) ({\
+ uint32_t which; \
+ /* don't use 0 */ \
+ for (which = 1; which < 256; which++) { \
+ if (!(flags[which])) break; \
+ } \
+ assert(which < 256); \
+ flags[which] = 1; \
+ printf("#define %s %u\n", #name, which); \
+ })
+
+enum {
+ TOKUDB_OUT_OF_LOCKS = -100000,
+ TOKUDB_SUCCEEDED_EARLY = -100001,
+ TOKUDB_FOUND_BUT_REJECTED = -100002,
+ TOKUDB_USER_CALLBACK_ERROR = -100003,
+ TOKUDB_DICTIONARY_TOO_OLD = -100004,
+ TOKUDB_DICTIONARY_TOO_NEW = -100005,
+ TOKUDB_DICTIONARY_NO_HEADER = -100006,
+ TOKUDB_CANCELED = -100007,
+ TOKUDB_NO_DATA = -100008,
+ TOKUDB_ACCEPT = -100009,
+ TOKUDB_MVCC_DICTIONARY_TOO_NEW = -100010,
+ TOKUDB_UPGRADE_FAILURE = -100011,
+ TOKUDB_TRY_AGAIN = -100012,
+ TOKUDB_NEEDS_REPAIR = -100013,
+ TOKUDB_CURSOR_CONTINUE = -100014,
+ TOKUDB_BAD_CHECKSUM = -100015,
+ TOKUDB_HUGE_PAGES_ENABLED = -100016,
+ TOKUDB_OUT_OF_RANGE = -100017,
+ TOKUDB_INTERRUPTED = -100018,
+ DONTUSE_I_JUST_PUT_THIS_HERE_SO_I_COULD_HAVE_A_COMMA_AFTER_EACH_ITEM
+};
+
+static void print_defines (void) {
+ dodefine(DB_VERB_DEADLOCK);
+ dodefine(DB_VERB_RECOVERY);
+ dodefine(DB_VERB_REPLICATION);
+ dodefine(DB_VERB_WAITSFOR);
+
+ dodefine(DB_ARCH_ABS);
+ dodefine(DB_ARCH_LOG);
+
+ dodefine(DB_CREATE);
+ dodefine(DB_CXX_NO_EXCEPTIONS);
+ dodefine(DB_EXCL);
+ dodefine(DB_PRIVATE);
+ dodefine(DB_RDONLY);
+ dodefine(DB_RECOVER);
+ dodefine(DB_RUNRECOVERY);
+ dodefine(DB_THREAD);
+ dodefine(DB_TXN_NOSYNC);
+
+ /* according to BDB 4.6.19, this is the next unused flag in the set of
+ * common flags plus private flags for DB->open */
+#define DB_BLACKHOLE 0x0080000
+ dodefine(DB_BLACKHOLE);
+#undef DB_BLACKHOLE
+
+ dodefine(DB_LOCK_DEFAULT);
+ dodefine(DB_LOCK_OLDEST);
+ dodefine(DB_LOCK_RANDOM);
+
+ //dodefine(DB_DUP); No longer supported #2862
+ //dodefine(DB_DUPSORT); No longer supported #2862
+
+ dodefine(DB_KEYFIRST);
+ dodefine(DB_KEYLAST);
+ {
+ static uint8_t insert_flags[256];
+ dodefine_track_enum(insert_flags, DB_NOOVERWRITE);
+ dodefine_track_enum(insert_flags, DB_NODUPDATA);
+ dodefine_from_track_enum(insert_flags, DB_NOOVERWRITE_NO_ERROR);
+ }
+ dodefine(DB_OPFLAGS_MASK);
+
+ dodefine(DB_AUTO_COMMIT);
+
+ dodefine(DB_INIT_LOCK);
+ dodefine(DB_INIT_LOG);
+ dodefine(DB_INIT_MPOOL);
+ dodefine(DB_INIT_TXN);
+
+ //dodefine(DB_KEYEMPTY); /// KEYEMPTY is no longer used. We just use DB_NOTFOUND
+ dodefine(DB_KEYEXIST);
+ dodefine(DB_LOCK_DEADLOCK);
+ dodefine(DB_LOCK_NOTGRANTED);
+ dodefine(DB_NOTFOUND);
+ dodefine(DB_SECONDARY_BAD);
+ dodefine(DB_DONOTINDEX);
+#ifdef DB_BUFFER_SMALL
+ dodefine(DB_BUFFER_SMALL);
+#endif
+ printf("#define DB_BADFORMAT -30500\n"); // private tokudb
+ printf("#define DB_DELETE_ANY %d\n", 1<<16); // private tokudb
+
+ dodefine(DB_FIRST);
+ dodefine(DB_LAST);
+ dodefine(DB_CURRENT);
+ dodefine(DB_NEXT);
+ dodefine(DB_PREV);
+ dodefine(DB_SET);
+ dodefine(DB_SET_RANGE);
+ printf("#define DB_CURRENT_BINDING 253\n"); // private tokudb
+ printf("#define DB_SET_RANGE_REVERSE 252\n"); // private tokudb
+ //printf("#define DB_GET_BOTH_RANGE_REVERSE 251\n"); // private tokudb. No longer supported #2862.
+ dodefine(DB_RMW);
+
+ printf("#define DB_LOCKING_READ 0x80000000\n");
+ printf("#define DB_IS_RESETTING_OP 0x01000000\n"); // private tokudb
+ printf("#define DB_PRELOCKED 0x00800000\n"); // private tokudb
+ printf("#define DB_PRELOCKED_WRITE 0x00400000\n"); // private tokudb
+ //printf("#define DB_PRELOCKED_FILE_READ 0x00200000\n"); // private tokudb. No longer supported in #4472
+ printf("#define DB_IS_HOT_INDEX 0x00100000\n"); // private tokudb
+ printf("#define DBC_DISABLE_PREFETCHING 0x20000000\n"); // private tokudb
+ printf("#define DB_UPDATE_CMP_DESCRIPTOR 0x40000000\n"); // private tokudb
+ printf("#define TOKUFT_DIRTY_SHUTDOWN %x\n", 1<<31);
+
+ {
+ //dbt flags
+ uint32_t dbt_flags = 0;
+ dodefine_track(dbt_flags, DB_DBT_APPMALLOC);
+ dodefine_track(dbt_flags, DB_DBT_DUPOK);
+ dodefine_track(dbt_flags, DB_DBT_MALLOC);
+#ifdef DB_DBT_MULTIPLE
+ dodefine_track(dbt_flags, DB_DBT_MULTIPLE);
+#endif
+ dodefine_track(dbt_flags, DB_DBT_REALLOC);
+ dodefine_track(dbt_flags, DB_DBT_USERMEM);
+ }
+
+ // flags for the env->set_flags function
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+ dodefine(DB_LOG_AUTOREMOVE);
+#endif
+
+ {
+ //Txn begin/commit flags
+ uint32_t txn_flags = 0;
+ dodefine_track(txn_flags, DB_TXN_WRITE_NOSYNC);
+ dodefine_track(txn_flags, DB_TXN_NOWAIT);
+ dodefine_track(txn_flags, DB_TXN_SYNC);
+#ifdef DB_TXN_SNAPSHOT
+ dodefine_track(txn_flags, DB_TXN_SNAPSHOT);
+#endif
+#ifdef DB_READ_UNCOMMITTED
+ dodefine_track(txn_flags, DB_READ_UNCOMMITTED);
+#endif
+#ifdef DB_READ_COMMITTED
+ dodefine_track(txn_flags, DB_READ_COMMITTED);
+#endif
+ //Add them if they didn't exist
+#ifndef DB_TXN_SNAPSHOT
+ dodefine_from_track(txn_flags, DB_TXN_SNAPSHOT);
+#endif
+#ifndef DB_READ_UNCOMMITTED
+ dodefine_from_track(txn_flags, DB_READ_UNCOMMITTED);
+#endif
+#ifndef DB_READ_COMMITTED
+ dodefine_from_track(txn_flags, DB_READ_COMMITTED);
+#endif
+ dodefine_from_track(txn_flags, DB_INHERIT_ISOLATION);
+ dodefine_from_track(txn_flags, DB_SERIALIZABLE);
+ dodefine_from_track(txn_flags, DB_TXN_READ_ONLY);
+ dodefine_from_track(txn_flags, DB_READ_COMMITTED_ALWAYS);
+ }
+
+ /* PerconaFT specific error codes*/
+ printf("/* PerconaFT specific error codes */\n");
+ dodefine(TOKUDB_OUT_OF_LOCKS);
+ dodefine(TOKUDB_SUCCEEDED_EARLY);
+ dodefine(TOKUDB_FOUND_BUT_REJECTED);
+ dodefine(TOKUDB_USER_CALLBACK_ERROR);
+ dodefine(TOKUDB_DICTIONARY_TOO_OLD);
+ dodefine(TOKUDB_DICTIONARY_TOO_NEW);
+ dodefine(TOKUDB_DICTIONARY_NO_HEADER);
+ dodefine(TOKUDB_CANCELED);
+ dodefine(TOKUDB_NO_DATA);
+ dodefine(TOKUDB_ACCEPT);
+ dodefine(TOKUDB_MVCC_DICTIONARY_TOO_NEW);
+ dodefine(TOKUDB_UPGRADE_FAILURE);
+ dodefine(TOKUDB_TRY_AGAIN);
+ dodefine(TOKUDB_NEEDS_REPAIR);
+ dodefine(TOKUDB_CURSOR_CONTINUE);
+ dodefine(TOKUDB_BAD_CHECKSUM);
+ dodefine(TOKUDB_HUGE_PAGES_ENABLED);
+ dodefine(TOKUDB_OUT_OF_RANGE);
+ dodefine(TOKUDB_INTERRUPTED);
+
+ /* LOADER flags */
+ printf("/* LOADER flags */\n");
+ {
+ uint32_t loader_flags = 0;
+ dodefine_from_track(loader_flags, LOADER_DISALLOW_PUTS); // Loader is only used for side effects.
+ dodefine_from_track(loader_flags, LOADER_COMPRESS_INTERMEDIATES);
+ }
+}
+
+static void print_db_env_struct (void) {
+ field_counter=0;
+ STRUCT_SETUP(DB_ENV, api1_internal, "void *%s"); /* Used for C++ hacking. */
+ STRUCT_SETUP(DB_ENV, app_private, "void *%s");
+ STRUCT_SETUP(DB_ENV, close, "int (*%s) (DB_ENV *, uint32_t)");
+ STRUCT_SETUP(DB_ENV, err, "void (*%s) (const DB_ENV *, int, const char *, ...) __attribute__ (( format (printf, 3, 4) ))");
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+ STRUCT_SETUP(DB_ENV, get_cachesize, "int (*%s) (DB_ENV *, uint32_t *, uint32_t *, int *)");
+ STRUCT_SETUP(DB_ENV, get_flags, "int (*%s) (DB_ENV *, uint32_t *)");
+ STRUCT_SETUP(DB_ENV, get_lg_max, "int (*%s) (DB_ENV *, uint32_t*)");
+#endif
+ STRUCT_SETUP(DB_ENV, log_archive, "int (*%s) (DB_ENV *, char **[], uint32_t)");
+ STRUCT_SETUP(DB_ENV, log_flush, "int (*%s) (DB_ENV *, const DB_LSN *)");
+ STRUCT_SETUP(DB_ENV, open, "int (*%s) (DB_ENV *, const char *, uint32_t, int)");
+ STRUCT_SETUP(DB_ENV, set_cachesize, "int (*%s) (DB_ENV *, uint32_t, uint32_t, int)");
+ STRUCT_SETUP(DB_ENV, set_data_dir, "int (*%s) (DB_ENV *, const char *)");
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1
+ STRUCT_SETUP(DB_ENV, set_errcall, "void (*%s) (DB_ENV *, void (*)(const char *, char *))");
+#endif
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+ STRUCT_SETUP(DB_ENV, set_errcall, "void (*%s) (DB_ENV *, void (*)(const DB_ENV *, const char *, const char *))");
+#endif
+ STRUCT_SETUP(DB_ENV, set_errfile, "void (*%s) (DB_ENV *, FILE*)");
+ STRUCT_SETUP(DB_ENV, set_errpfx, "void (*%s) (DB_ENV *, const char *)");
+ STRUCT_SETUP(DB_ENV, set_flags, "int (*%s) (DB_ENV *, uint32_t, int)");
+ STRUCT_SETUP(DB_ENV, set_lg_bsize, "int (*%s) (DB_ENV *, uint32_t)");
+ STRUCT_SETUP(DB_ENV, set_lg_dir, "int (*%s) (DB_ENV *, const char *)");
+ STRUCT_SETUP(DB_ENV, set_lg_max, "int (*%s) (DB_ENV *, uint32_t)");
+ STRUCT_SETUP(DB_ENV, set_lk_detect, "int (*%s) (DB_ENV *, uint32_t)");
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR <= 4
+ STRUCT_SETUP(DB_ENV, set_lk_max, "int (*%s) (DB_ENV *, uint32_t)");
+#endif
+ //STRUCT_SETUP(DB_ENV, set_noticecall, "void (*%s) (DB_ENV *, void (*)(DB_ENV *, db_notices))");
+ STRUCT_SETUP(DB_ENV, set_tmp_dir, "int (*%s) (DB_ENV *, const char *)");
+ STRUCT_SETUP(DB_ENV, set_verbose, "int (*%s) (DB_ENV *, uint32_t, int)");
+ STRUCT_SETUP(DB_ENV, txn_checkpoint, "int (*%s) (DB_ENV *, uint32_t, uint32_t, uint32_t)");
+ STRUCT_SETUP(DB_ENV, txn_stat, "int (*%s) (DB_ENV *, DB_TXN_STAT **, uint32_t)");
+ STRUCT_SETUP(DB_ENV, txn_begin, "int (*%s) (DB_ENV *, DB_TXN *, DB_TXN **, uint32_t)");
+ STRUCT_SETUP(DB_ENV, txn_recover, "int (*%s) (DB_ENV *, DB_PREPLIST preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags)");
+ STRUCT_SETUP(DB_ENV, dbremove, "int (*%s) (DB_ENV *, DB_TXN *, const char *, const char *, uint32_t)");
+ STRUCT_SETUP(DB_ENV, dbrename, "int (*%s) (DB_ENV *, DB_TXN *, const char *, const char *, const char *, uint32_t)");
+
+ const char *extra[]={
+ "int (*checkpointing_set_period) (DB_ENV*, uint32_t) /* Change the delay between automatic checkpoints. 0 means disabled. */",
+ "int (*checkpointing_get_period) (DB_ENV*, uint32_t*) /* Retrieve the delay between automatic checkpoints. 0 means disabled. */",
+ "int (*cleaner_set_period) (DB_ENV*, uint32_t) /* Change the delay between automatic cleaner attempts. 0 means disabled. */",
+ "int (*cleaner_get_period) (DB_ENV*, uint32_t*) /* Retrieve the delay between automatic cleaner attempts. 0 means disabled. */",
+ "int (*cleaner_set_iterations) (DB_ENV*, uint32_t) /* Change the number of attempts on each cleaner invocation. 0 means disabled. */",
+ "int (*cleaner_get_iterations) (DB_ENV*, uint32_t*) /* Retrieve the number of attempts on each cleaner invocation. 0 means disabled. */",
+ "int (*evictor_set_enable_partial_eviction) (DB_ENV*, bool) /* Enables or disabled partial eviction of nodes from cachetable. */",
+ "int (*evictor_get_enable_partial_eviction) (DB_ENV*, bool*) /* Retrieve the status of partial eviction of nodes from cachetable. */",
+ "int (*checkpointing_postpone) (DB_ENV*) /* Use for 'rename table' or any other operation that must be disjoint from a checkpoint */",
+ "int (*checkpointing_resume) (DB_ENV*) /* Alert tokuft that 'postpone' is no longer necessary */",
+ "int (*checkpointing_begin_atomic_operation) (DB_ENV*) /* Begin a set of operations (that must be atomic as far as checkpoints are concerned). i.e. inserting into every index in one table */",
+ "int (*checkpointing_end_atomic_operation) (DB_ENV*) /* End a set of operations (that must be atomic as far as checkpoints are concerned). */",
+ "int (*set_default_bt_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (key) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */",
+ "int (*get_engine_status_num_rows) (DB_ENV*, uint64_t*) /* return number of rows in engine status */",
+ "int (*get_engine_status) (DB_ENV*, TOKU_ENGINE_STATUS_ROW, uint64_t, uint64_t*, fs_redzone_state*, uint64_t*, char*, int, toku_engine_status_include_type) /* Fill in status struct and redzone state, possibly env panic string */",
+ "int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */",
+ "int (*crash) (DB_ENV*, const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/)",
+ "int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */",
+ "int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags)",
+ "int (*create_indexer) (DB_ENV *env, DB_TXN *txn, DB_INDEXER **idxrp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t indexer_flags)",
+ "int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,\n"
+ " const DBT *src_key, const DBT *src_val,\n"
+ " uint32_t num_dbs, DB **db_array, DBT_ARRAY *keys, DBT_ARRAY *vals, uint32_t *flags_array) /* insert into multiple DBs */",
+ "int (*set_generate_row_callback_for_put) (DB_ENV *env, generate_row_for_put_func generate_row_for_put)",
+ "int (*del_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,\n"
+ " const DBT *src_key, const DBT *src_val,\n"
+ " uint32_t num_dbs, DB **db_array, DBT_ARRAY *keys, uint32_t *flags_array) /* delete from multiple DBs */",
+ "int (*set_generate_row_callback_for_del) (DB_ENV *env, generate_row_for_del_func generate_row_for_del)",
+ "int (*update_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,\n"
+ " DBT *old_src_key, DBT *old_src_data,\n"
+ " DBT *new_src_key, DBT *new_src_data,\n"
+ " uint32_t num_dbs, DB **db_array, uint32_t *flags_array,\n"
+ " uint32_t num_keys, DBT_ARRAY *keys,\n"
+ " uint32_t num_vals, DBT_ARRAY *vals) /* update multiple DBs */",
+ "int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */",
+ "int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */",
+ "int (*set_lk_max_memory) (DB_ENV *env, uint64_t max)",
+ "int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max)",
+ "void (*set_update) (DB_ENV *env, int (*update_function)(DB *, const DBT *key, const DBT *old_val, const DBT *extra, void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra))",
+ "int (*set_lock_timeout) (DB_ENV *env, uint64_t default_lock_wait_time_msec, uint64_t (*get_lock_wait_time_cb)(uint64_t default_lock_wait_time))",
+ "int (*get_lock_timeout) (DB_ENV *env, uint64_t *lock_wait_time_msec)",
+ "int (*set_lock_timeout_callback) (DB_ENV *env, lock_timeout_callback callback)",
+ "int (*set_lock_wait_callback) (DB_ENV *env, lock_wait_callback callback)",
+ "int (*txn_xa_recover) (DB_ENV*, TOKU_XA_XID list[/*count*/], long count, /*out*/ long *retp, uint32_t flags)",
+ "int (*get_txn_from_xid) (DB_ENV*, /*in*/ TOKU_XA_XID *, /*out*/ DB_TXN **)",
+ "DB* (*get_db_for_directory) (DB_ENV*)",
+ "int (*get_cursor_for_directory) (DB_ENV*, /*in*/ DB_TXN *, /*out*/ DBC **)",
+ "int (*get_cursor_for_persistent_environment)(DB_ENV*, /*in*/ DB_TXN *, /*out*/ DBC **)",
+ "void (*change_fsync_log_period) (DB_ENV*, uint32_t)",
+ "int (*iterate_live_transactions) (DB_ENV *env, iterate_transactions_callback callback, void *extra)",
+ "int (*iterate_pending_lock_requests) (DB_ENV *env, iterate_requests_callback callback, void *extra)",
+ "void (*set_loader_memory_size)(DB_ENV *env, uint64_t (*get_loader_memory_size_callback)(void))",
+ "uint64_t (*get_loader_memory_size)(DB_ENV *env)",
+ "void (*set_killed_callback)(DB_ENV *env, uint64_t default_killed_time_msec, uint64_t (*get_killed_time_callback)(uint64_t default_killed_time_msec), int (*killed_callback)(void))",
+ "void (*do_backtrace) (DB_ENV *env)",
+ "int (*set_client_pool_threads)(DB_ENV *, uint32_t)",
+ "int (*set_cachetable_pool_threads)(DB_ENV *, uint32_t)",
+ "int (*set_checkpoint_pool_threads)(DB_ENV *, uint32_t)",
+ "void (*set_check_thp)(DB_ENV *, bool new_val)",
+ "bool (*get_check_thp)(DB_ENV *)",
+ "bool (*set_dir_per_db)(DB_ENV *, bool new_val)",
+ "bool (*get_dir_per_db)(DB_ENV *)",
+ "const char *(*get_data_dir)(DB_ENV *env)",
+ "int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)",
+ "int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)",
+ "int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)",
+ "void (*kill_waiter)(DB_ENV *, void *extra)",
+ NULL};
+
+ sort_and_dump_fields("db_env", true, extra);
+}
+
+static void print_db_key_range_struct (void) {
+ field_counter=0;
+ STRUCT_SETUP(DB_KEY_RANGE, less, "double %s");
+ STRUCT_SETUP(DB_KEY_RANGE, equal, "double %s");
+ STRUCT_SETUP(DB_KEY_RANGE, greater, "double %s");
+ sort_and_dump_fields("db_key_range", false, NULL);
+}
+
+static void print_db_lsn_struct(void) {
+ field_counter = 0;
+ // FT-692
+ STRUCT_SETUP(DB_LSN, file, "uint32_t %s");
+ STRUCT_SETUP(DB_LSN, offset, "uint32_t %s");
+ sort_and_dump_fields("db_lsn", false, NULL);
+}
+
+static void print_dbt_struct(void) {
+ field_counter=0;
+#if 0 && DB_VERSION_MAJOR==4 && DB_VERSION_MINOR==1
+ STRUCT_SETUP(DBT, app_private, "void*%s");
+#endif
+ STRUCT_SETUP(DBT, data, "void*%s");
+ STRUCT_SETUP(DBT, flags, "uint32_t %s");
+ STRUCT_SETUP(DBT, size, "uint32_t %s");
+ STRUCT_SETUP(DBT, ulen, "uint32_t %s");
+ sort_and_dump_fields("dbt", false, NULL);
+}
+
+static void print_db_struct (void) {
+ /* Do these in alphabetical order. */
+ field_counter=0;
+ STRUCT_SETUP(DB, api_internal, "void *%s"); /* Used for C++ hacking. */
+ STRUCT_SETUP(DB, app_private, "void *%s");
+ STRUCT_SETUP(DB, close, "int (*%s) (DB*, uint32_t)");
+ STRUCT_SETUP(DB, cursor, "int (*%s) (DB *, DB_TXN *, DBC **, uint32_t)");
+ STRUCT_SETUP(DB, dbenv, "DB_ENV *%s");
+ STRUCT_SETUP(DB, del, "int (*%s) (DB *, DB_TXN *, DBT *, uint32_t)");
+ STRUCT_SETUP(DB, fd, "int (*%s) (DB *, int *)");
+ STRUCT_SETUP(DB, get, "int (*%s) (DB *, DB_TXN *, DBT *, DBT *, uint32_t)");
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+ STRUCT_SETUP(DB, get_flags, "int (*%s) (DB *, uint32_t *)");
+ STRUCT_SETUP(DB, get_pagesize, "int (*%s) (DB *, uint32_t *)");
+#endif
+ STRUCT_SETUP(DB, key_range, "int (*%s) (DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, uint32_t)");
+ STRUCT_SETUP(DB, open, "int (*%s) (DB *, DB_TXN *, const char *, const char *, DBTYPE, uint32_t, int)");
+ STRUCT_SETUP(DB, put, "int (*%s) (DB *, DB_TXN *, DBT *, DBT *, uint32_t)");
+ STRUCT_SETUP(DB, set_errfile, "void (*%s) (DB *, FILE*)");
+ STRUCT_SETUP(DB, set_flags, "int (*%s) (DB *, uint32_t)");
+ STRUCT_SETUP(DB, set_pagesize, "int (*%s) (DB *, uint32_t)");
+ STRUCT_SETUP(DB, stat, "int (*%s) (DB *, void *, uint32_t)");
+ STRUCT_SETUP(DB, verify, "int (*%s) (DB *, const char *, const char *, FILE *, uint32_t)");
+ const char *extra[]={
+ "int (*key_range64)(DB*, DB_TXN *, DBT *, uint64_t *less, uint64_t *equal, uint64_t *greater, int *is_exact)",
+ "int (*get_key_after_bytes)(DB *, DB_TXN *, const DBT *, uint64_t, void (*callback)(const DBT *, uint64_t, void *), void *, uint32_t); /* given start_key and skip_len, find largest end_key such that the elements in [start_key,end_key) sum to <= skip_len bytes */",
+ "int (*keys_range64)(DB*, DB_TXN *, DBT *keyleft, DBT *keyright, uint64_t *less, uint64_t *left, uint64_t *between, uint64_t *right, uint64_t *greater, bool *middle_3_exact)",
+ "int (*stat64)(DB *, DB_TXN *, DB_BTREE_STAT64 *)",
+ "int (*pre_acquire_table_lock)(DB*, DB_TXN*)",
+ "int (*pre_acquire_fileops_lock)(DB*, DB_TXN*)",
+ "const DBT* (*dbt_pos_infty)(void) /* Return the special DBT that refers to positive infinity in the lock table.*/",
+ "const DBT* (*dbt_neg_infty)(void)/* Return the special DBT that refers to negative infinity in the lock table.*/",
+ "void (*get_max_row_size) (DB*, uint32_t *max_key_size, uint32_t *max_row_size)",
+ "DESCRIPTOR descriptor /* saved row/dictionary descriptor for aiding in comparisons */",
+ "DESCRIPTOR cmp_descriptor /* saved row/dictionary descriptor for aiding in comparisons */",
+ "int (*change_descriptor) (DB*, DB_TXN*, const DBT* descriptor, uint32_t) /* change row/dictionary descriptor for a db. Available only while db is open */",
+ "int (*getf_set)(DB*, DB_TXN*, uint32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */",
+ "int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */",
+ "int (*hot_optimize)(DB*, DBT*, DBT*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, uint64_t* loops_run)",
+ "int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION)",
+ "int (*change_pagesize)(DB*,uint32_t)",
+ "int (*change_readpagesize)(DB*,uint32_t)",
+ "int (*get_readpagesize)(DB*,uint32_t*)",
+ "int (*set_readpagesize)(DB*,uint32_t)",
+ "int (*change_compression_method)(DB*,TOKU_COMPRESSION_METHOD)",
+ "int (*get_compression_method)(DB*,TOKU_COMPRESSION_METHOD*)",
+ "int (*set_compression_method)(DB*,TOKU_COMPRESSION_METHOD)",
+ "int (*change_fanout)(DB *db, uint32_t fanout)",
+ "int (*get_fanout)(DB *db, uint32_t *fanout)",
+ "int (*set_fanout)(DB *db, uint32_t fanout)",
+ "int (*set_memcmp_magic)(DB *db, uint8_t magic)",
+ "int (*set_indexer)(DB*, DB_INDEXER*)",
+ "void (*get_indexer)(DB*, DB_INDEXER**)",
+ "int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going)",
+ "int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, uint32_t flags)",
+ "int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, uint32_t flags)",
+ "int (*get_fractal_tree_info64)(DB*,uint64_t*,uint64_t*,uint64_t*,uint64_t*)",
+ "int (*iterate_fractal_tree_block_map)(DB*,int(*)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*),void*)",
+ "const char *(*get_dname)(DB *db)",
+ "int (*get_last_key)(DB *db, YDB_CALLBACK_FUNCTION func, void* extra)",
+ "int (*recount_rows)(DB* db, int (*progress_callback)(uint64_t count, uint64_t deleted, void* progress_extra), void* progress_extra)",
+ NULL};
+ sort_and_dump_fields("db", true, extra);
+}
+
+static void print_db_txn_active_struct (void) {
+ field_counter=0;
+ STRUCT_SETUP(DB_TXN_ACTIVE, lsn, "DB_LSN %s");
+ STRUCT_SETUP(DB_TXN_ACTIVE, txnid, "uint32_t %s");
+ sort_and_dump_fields("db_txn_active", false, NULL);
+}
+
+static void print_db_txn_struct (void) {
+ field_counter=0;
+ STRUCT_SETUP(DB_TXN, abort, "int (*%s) (DB_TXN *)");
+ STRUCT_SETUP(DB_TXN, api_internal,"void *%s");
+ STRUCT_SETUP(DB_TXN, commit, "int (*%s) (DB_TXN*, uint32_t)");
+ STRUCT_SETUP(DB_TXN, prepare, "int (*%s) (DB_TXN*, uint8_t gid[DB_GID_SIZE], uint32_t flags)");
+ STRUCT_SETUP(DB_TXN, discard, "int (*%s) (DB_TXN*, uint32_t)");
+ STRUCT_SETUP(DB_TXN, id, "uint32_t (*%s) (DB_TXN *)");
+ STRUCT_SETUP(DB_TXN, mgrp, "DB_ENV *%s /* In PerconaFT, mgrp is a DB_ENV, not a DB_TXNMGR */");
+ STRUCT_SETUP(DB_TXN, parent, "DB_TXN *%s");
+ const char *extra[] = {
+ "int (*txn_stat)(DB_TXN *, struct txn_stat **)",
+ "int (*commit_with_progress)(DB_TXN*, uint32_t, TXN_PROGRESS_POLL_FUNCTION, void*)",
+ "int (*abort_with_progress)(DB_TXN*, TXN_PROGRESS_POLL_FUNCTION, void*)",
+ "int (*xa_prepare) (DB_TXN*, TOKU_XA_XID *, uint32_t flags)",
+ "uint64_t (*id64) (DB_TXN*)",
+ "void (*set_client_id)(DB_TXN *, uint64_t client_id, void *client_extra)",
+ "void (*get_client_id)(DB_TXN *, uint64_t *client_id, void **client_extra)",
+ "bool (*is_prepared)(DB_TXN *)",
+ "DB_TXN *(*get_child)(DB_TXN *)",
+ "uint64_t (*get_start_time)(DB_TXN *)",
+ NULL};
+ sort_and_dump_fields("db_txn", false, extra);
+}
+
+static void print_db_txn_stat_struct (void) {
+ field_counter=0;
+ STRUCT_SETUP(DB_TXN_STAT, st_nactive, "uint32_t %s");
+ STRUCT_SETUP(DB_TXN_STAT, st_txnarray, "DB_TXN_ACTIVE *%s");
+ sort_and_dump_fields("db_txn_stat", false, NULL);
+}
+
+static void print_dbc_struct (void) {
+ field_counter=0;
+ STRUCT_SETUP(DBC, c_close, "int (*%s) (DBC *)");
+ //STRUCT_SETUP(DBC, c_del, "int (*%s) (DBC *, uint32_t)"); // c_del was removed. See #4576.
+ STRUCT_SETUP(DBC, c_get, "int (*%s) (DBC *, DBT *, DBT *, uint32_t)");
+ STRUCT_SETUP(DBC, dbp, "DB *%s");
+ const char *extra[]={
+ "int (*c_getf_first)(DBC *, uint32_t, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_last)(DBC *, uint32_t, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_next)(DBC *, uint32_t, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_prev)(DBC *, uint32_t, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_current)(DBC *, uint32_t, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_set)(DBC *, uint32_t, DBT *, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_set_range)(DBC *, uint32_t, DBT *, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_set_range_reverse)(DBC *, uint32_t, DBT *, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_getf_set_range_with_bound)(DBC *, uint32_t, DBT *k, DBT *k_bound, YDB_CALLBACK_FUNCTION, void *)",
+ "int (*c_set_bounds)(DBC*, const DBT*, const DBT*, bool pre_acquire, int out_of_range_error)",
+ "void (*c_set_check_interrupt_callback)(DBC*, bool (*)(void*, uint64_t deleted_rows), void *)",
+ "void (*c_remove_restriction)(DBC*)",
+ "void (*c_set_txn)(DBC*, DB_TXN*)",
+ "char _internal[512]",
+ NULL};
+ sort_and_dump_fields("dbc", false, extra);
+}
+
+
+int main (int argc, char *const argv[] __attribute__((__unused__))) {
+ assert(argc==1);
+
+ printf("#ifndef _DB_H\n");
+ printf("#define _DB_H\n");
+ printf("/* This code generated by make_db_h. Copyright (c) 2006, 2015, Percona and/or its affiliates. */\n");
+ printf("#ident \"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.\"\n");
+ printf("#include <sys/types.h>\n");
+ printf("/*stdio is needed for the FILE* in db->verify*/\n");
+ printf("#include <stdio.h>\n");
+ printf("/*stdbool is needed for the bool in db_env_enable_engine_status*/\n");
+ printf("#include <stdbool.h>\n");
+ printf("#include <stdint.h>\n");
+ //printf("#include <inttypes.h>\n");
+ printf("#if defined(__cplusplus) || defined(__cilkplusplus)\nextern \"C\" {\n#endif\n");
+
+ printf("#define DB_VERSION_MAJOR %d\n", DB_VERSION_MAJOR);
+ printf("#define DB_VERSION_MINOR %d\n", DB_VERSION_MINOR);
+ printf("/* As of r40364 (post PerconaFT 5.2.7), the patch version number is 100+ the BDB header patch version number.*/\n");
+ printf("#define DB_VERSION_PATCH %d\n", 100+DB_VERSION_PATCH);
+ printf("#define DB_VERSION_STRING \"Percona: PerconaFT %d.%d.%d\"\n", DB_VERSION_MAJOR, DB_VERSION_MINOR, 100+DB_VERSION_PATCH);
+
+#ifndef DB_GID_SIZE
+#define DB_GID_SIZE DB_XIDDATASIZE
+#endif
+ dodefine(DB_GID_SIZE);
+
+ printf("typedef struct toku_xa_xid_s { /* This struct is intended to be binary compatible with the XID in the XA architecture. See source:/import/opengroup.org/C193.pdf */\n"
+ " long formatID; /* format identifier */\n"
+ " long gtrid_length; /* value from 1 through 64 */\n"
+ " long bqual_length; /* value from 1 through 64 */\n"
+ " char data[DB_GID_SIZE];\n"
+ "} TOKU_XA_XID;\n");
+
+ printf("#ifndef TOKU_OFF_T_DEFINED\n"
+ "#define TOKU_OFF_T_DEFINED\n"
+ "typedef int64_t toku_off_t;\n"
+ "#endif\n");
+
+ printf("typedef struct __toku_db_env DB_ENV;\n");
+ printf("typedef struct __toku_db_key_range DB_KEY_RANGE;\n");
+ printf("typedef struct __toku_db_lsn DB_LSN;\n");
+ printf("typedef struct __toku_db DB;\n");
+ printf("typedef struct __toku_db_txn DB_TXN;\n");
+ printf("typedef struct __toku_db_txn_active DB_TXN_ACTIVE;\n");
+ printf("typedef struct __toku_db_txn_stat DB_TXN_STAT;\n");
+ printf("typedef struct __toku_dbc DBC;\n");
+ printf("typedef struct __toku_dbt DBT;\n");
+ printf("typedef struct __toku_db_preplist { DB_TXN *txn; uint8_t gid[DB_GID_SIZE]; } DB_PREPLIST;\n");
+ printf("typedef uint32_t db_recno_t;\n");
+ printf("typedef int(*YDB_CALLBACK_FUNCTION)(DBT const*, DBT const*, void*);\n");
+
+ printf("struct simple_dbt {\n");
+ printf(" uint32_t len;\n");
+ printf(" void *data;\n");
+ printf("};\n");
+
+ //stat64
+ printf("typedef struct __toku_db_btree_stat64 {\n");
+ printf(" uint64_t bt_nkeys; /* how many unique keys (guaranteed only to be an estimate, even when flattened) */\n");
+ printf(" uint64_t bt_ndata; /* how many key-value pairs (an estimate, but exact when flattened) */\n");
+ printf(" uint64_t bt_dsize; /* how big are the keys+values (not counting the lengths) (an estimate, unless flattened) */\n");
+ printf(" uint64_t bt_fsize; /* how big is the underlying file */\n");
+ // 4018
+ printf(" uint64_t bt_create_time_sec; /* Creation time, in seconds */\n");
+ printf(" uint64_t bt_modify_time_sec; /* Time of last serialization, in seconds */\n");
+ printf(" uint64_t bt_verify_time_sec; /* Time of last verification, in seconds */\n");
+ printf("} DB_BTREE_STAT64;\n");
+
+ // compression methods
+ printf("typedef enum toku_compression_method {\n");
+ printf(" TOKU_NO_COMPRESSION = 0,\n"); // "identity" compression
+ printf(" TOKU_SNAPPY_METHOD = 7,\n"); // google snappy
+ printf(" TOKU_ZLIB_METHOD = 8,\n"); // RFC 1950 says use 8 for zlib. It reserves 15 to allow more bytes.
+ printf(" TOKU_QUICKLZ_METHOD = 9,\n"); // We use 9 for QUICKLZ (the QLZ compression level is stored int he high-order nibble). I couldn't find any standard for any other numbers, so I just use 9. -Bradley
+ printf(" TOKU_LZMA_METHOD = 10,\n"); // We use 10 for LZMA. (Note the compression level is stored in the high-order nibble).
+ printf(" TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD = 11,\n"); // We wrap a zlib without checksumming compression technique in our own checksummed metadata.
+ printf(" TOKU_DEFAULT_COMPRESSION_METHOD = 1,\n"); // default is actually quicklz
+ printf(" TOKU_FAST_COMPRESSION_METHOD = 2,\n"); // friendlier names
+ printf(" TOKU_SMALL_COMPRESSION_METHOD = 3,\n");
+ printf("} TOKU_COMPRESSION_METHOD;\n");
+
+ //bulk loader
+ printf("typedef struct __toku_loader DB_LOADER;\n");
+ printf("struct __toku_loader_internal;\n");
+ printf("struct __toku_loader {\n");
+ printf(" struct __toku_loader_internal *i;\n");
+ printf(" int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra), void *error_extra); /* set the error callback */\n");
+ printf(" int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress), void *poll_extra); /* set the polling function */\n");
+ printf(" int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */\n");
+ printf(" int (*close)(DB_LOADER *loader); /* finish loading, free memory */\n");
+ printf(" int (*abort)(DB_LOADER *loader); /* abort loading, free memory */\n");
+ printf("};\n");
+
+ //indexer
+ printf("typedef struct __toku_indexer DB_INDEXER;\n");
+ printf("struct __toku_indexer_internal;\n");
+ printf("struct __toku_indexer {\n");
+ printf(" struct __toku_indexer_internal *i;\n");
+ printf(" int (*set_error_callback)(DB_INDEXER *indexer, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra), void *error_extra); /* set the error callback */\n");
+ printf(" int (*set_poll_function)(DB_INDEXER *indexer, int (*poll_func)(void *extra, float progress), void *poll_extra); /* set the polling function */\n");
+ printf(" int (*build)(DB_INDEXER *indexer); /* build the indexes */\n");
+ printf(" int (*close)(DB_INDEXER *indexer); /* finish indexing, free memory */\n");
+ printf(" int (*abort)(DB_INDEXER *indexer); /* abort indexing, free memory */\n");
+ printf("};\n");
+
+ // Filesystem redzone state
+ printf("typedef enum { \n");
+ printf(" FS_GREEN = 0, // green zone (we have lots of space) \n");
+ printf(" FS_YELLOW = 1, // yellow zone (issue warning but allow operations) \n");
+ printf(" FS_RED = 2, // red zone (prevent insert operations) \n");
+ printf(" FS_BLOCKED = 3 // For reporting engine status, completely blocked \n");
+ printf("} fs_redzone_state;\n");
+
+ printf("// engine status info\n");
+ printf("// engine status is passed to handlerton as an array of TOKU_ENGINE_STATUS_ROW_S[]\n");
+
+ printf("typedef enum {\n");
+ printf(" FS_STATE = 0, // interpret as file system state (redzone) enum \n");
+ printf(" UINT64, // interpret as uint64_t \n");
+ printf(" CHARSTR, // interpret as char * \n");
+ printf(" UNIXTIME, // interpret as time_t \n");
+ printf(" TOKUTIME, // interpret as tokutime_t \n");
+ printf(" PARCOUNT, // interpret as PARTITIONED_COUNTER\n");
+ printf(" DOUBLE // interpret as double\n");
+ printf("} toku_engine_status_display_type; \n");
+
+ printf("typedef enum {\n");
+ printf(" TOKU_ENGINE_STATUS = (1ULL<<0), // Include when asking for engine status\n");
+ printf(" TOKU_GLOBAL_STATUS = (1ULL<<1), // Include when asking for information_schema.global_status\n");
+ printf("} toku_engine_status_include_type; \n");
+
+ printf("typedef struct __toku_engine_status_row {\n");
+ printf(" const char * keyname; // info schema key, should not change across revisions without good reason \n");
+ printf(" const char * columnname; // column for mysql, e.g. information_schema.global_status. TOKUDB_ will automatically be prefixed.\n");
+ printf(" const char * legend; // the text that will appear at user interface \n");
+ printf(" toku_engine_status_display_type type; // how to interpret the value \n");
+ printf(" toku_engine_status_include_type include; // which kinds of callers should get read this row?\n");
+ printf(" union { \n");
+ printf(" double dnum; \n");
+ printf(" uint64_t num; \n");
+ printf(" const char * str; \n");
+ printf(" char datebuf[26]; \n");
+ printf(" struct partitioned_counter *parcount;\n");
+ printf(" } value; \n");
+ printf("} * TOKU_ENGINE_STATUS_ROW, TOKU_ENGINE_STATUS_ROW_S; \n");
+
+ print_dbtype();
+ print_defines();
+
+ printf("typedef struct {\n");
+ printf(" uint32_t capacity;\n");
+ printf(" uint32_t size;\n");
+ printf(" DBT *dbts;\n");
+ printf("} DBT_ARRAY;\n\n");
+ printf("typedef int (*generate_row_for_put_func)(DB *dest_db, DB *src_db, DBT_ARRAY * dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val);\n");
+ printf("typedef int (*generate_row_for_del_func)(DB *dest_db, DB *src_db, DBT_ARRAY * dest_keys, const DBT *src_key, const DBT *src_val);\n");
+ printf("DBT_ARRAY * toku_dbt_array_init(DBT_ARRAY *dbts, uint32_t size) %s;\n", VISIBLE);
+ printf("void toku_dbt_array_destroy(DBT_ARRAY *dbts) %s;\n", VISIBLE);
+ printf("void toku_dbt_array_destroy_shallow(DBT_ARRAY *dbts) %s;\n", VISIBLE);
+ printf("void toku_dbt_array_resize(DBT_ARRAY *dbts, uint32_t size) %s;\n", VISIBLE);
+
+ printf("typedef void (*lock_timeout_callback)(DB *db, uint64_t requesting_txnid, const DBT *left_key, const DBT *right_key, uint64_t blocking_txnid);\n");
+ printf("typedef void (*lock_wait_callback)(void *arg, uint64_t requesting_txnid, uint64_t blocking_txnid);\n");
+ printf("typedef int (*iterate_row_locks_callback)(DB **db, DBT *left_key, DBT *right_key, void *extra);\n");
+ printf("typedef int (*iterate_transactions_callback)(DB_TXN *dbtxn, iterate_row_locks_callback cb, void *locks_extra, void *extra);\n");
+ printf("typedef int (*iterate_requests_callback)(DB *db, uint64_t requesting_txnid, const DBT *left_key, const DBT *right_key, uint64_t blocking_txnid, uint64_t start_time, void *extra);\n");
+ print_db_env_struct();
+ print_db_key_range_struct();
+ print_db_lsn_struct();
+ print_dbt_struct();
+
+ printf("typedef struct __toku_descriptor {\n");
+ printf(" DBT dbt;\n");
+ printf("} *DESCRIPTOR, DESCRIPTOR_S;\n");
+
+ //file fragmentation info
+ //a block is just a contiguous region in a file.
+ printf("//One header is included in 'data'\n");
+ printf("//One header is included in 'additional for checkpoint'\n");
+ printf("typedef struct __toku_db_fragmentation {\n");
+ printf(" uint64_t file_size_bytes; //Total file size in bytes\n");
+ printf(" uint64_t data_bytes; //Compressed User Data in bytes\n");
+ printf(" uint64_t data_blocks; //Number of blocks of compressed User Data\n");
+ printf(" uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system\n");
+ printf(" uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system \n");
+ printf(" uint64_t unused_bytes; //Unused space in file\n");
+ printf(" uint64_t unused_blocks; //Number of contiguous regions of unused space\n");
+ printf(" uint64_t largest_unused_block; //Size of largest contiguous unused space\n");
+ printf("} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;\n");
+
+ print_db_struct();
+
+ print_db_txn_active_struct();
+
+ printf("typedef struct __toku_txn_progress {\n");
+ printf(" uint64_t entries_total;\n");
+ printf(" uint64_t entries_processed;\n");
+ printf(" uint8_t is_commit;\n");
+ printf(" uint8_t stalled_on_checkpoint;\n");
+ printf("} *TOKU_TXN_PROGRESS, TOKU_TXN_PROGRESS_S;\n");
+ printf("typedef void(*TXN_PROGRESS_POLL_FUNCTION)(TOKU_TXN_PROGRESS, void*);\n");
+ printf("struct txn_stat {\n uint64_t rollback_raw_count;\n uint64_t rollback_num_entries;\n};\n");
+
+ print_db_txn_struct();
+ print_db_txn_stat_struct();
+ print_dbc_struct();
+
+ printf("int db_env_create(DB_ENV **, uint32_t) %s;\n", VISIBLE);
+ printf("int db_create(DB **, DB_ENV *, uint32_t) %s;\n", VISIBLE);
+ printf("const char *db_strerror(int) %s;\n", VISIBLE);
+ printf("const char *db_version(int*,int *,int *) %s;\n", VISIBLE);
+ printf("int log_compare (const DB_LSN*, const DB_LSN *) %s;\n", VISIBLE);
+ printf("int toku_set_trace_file (const char *fname) %s;\n", VISIBLE);
+ printf("int toku_close_trace_file (void) %s;\n", VISIBLE);
+ printf("void db_env_set_direct_io (bool direct_io_on) %s;\n", VISIBLE);
+ printf("void db_env_set_compress_buffers_before_eviction (bool compress_buffers) %s;\n", VISIBLE);
+ printf("void db_env_set_func_fsync (int (*)(int)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_free (void (*)(void*)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_malloc (void *(*)(size_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_realloc (void *(*)(void*, size_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_pwrite (ssize_t (*)(int, const void *, size_t, toku_off_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_full_pwrite (ssize_t (*)(int, const void *, size_t, toku_off_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_write (ssize_t (*)(int, const void *, size_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_full_write (ssize_t (*)(int, const void *, size_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_fdopen (FILE* (*)(int, const char *)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_fopen (FILE* (*)(const char *, const char *)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_open (int (*)(const char *, int, int)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_fclose (int (*)(FILE*)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_pread (ssize_t (*)(int, void *, size_t, off_t)) %s;\n", VISIBLE);
+ printf("void db_env_set_func_loader_fwrite (size_t (*fwrite_fun)(const void*,size_t,size_t,FILE*)) %s;\n", VISIBLE);
+ printf("void db_env_set_checkpoint_callback (void (*)(void*), void*) %s;\n", VISIBLE);
+ printf("void db_env_set_checkpoint_callback2 (void (*)(void*), void*) %s;\n", VISIBLE);
+ printf("void db_env_set_recover_callback (void (*)(void*), void*) %s;\n", VISIBLE);
+ printf("void db_env_set_recover_callback2 (void (*)(void*), void*) %s;\n", VISIBLE);
+ printf("void db_env_set_loader_size_factor (uint32_t) %s;\n", VISIBLE);
+ printf("void db_env_set_mvcc_garbage_collection_verification(uint32_t) %s;\n", VISIBLE);
+ printf("void db_env_enable_engine_status(bool) %s;\n", VISIBLE);
+ printf("void db_env_set_flusher_thread_callback (void (*)(int, void*), void*) %s;\n", VISIBLE);
+ printf("void db_env_set_num_bucket_mutexes(uint32_t) %s;\n", VISIBLE);
+ printf("int db_env_set_toku_product_name(const char*) %s;\n", VISIBLE);
+ printf("void db_env_try_gdb_stack_trace(const char *gdb_path) %s;\n", VISIBLE);
+
+ printf("#if defined(__cplusplus) || defined(__cilkplusplus)\n}\n#endif\n");
+ printf("#endif\n");
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/cmake/merge_archives_unix.cmake.in b/storage/tokudb/PerconaFT/cmake/merge_archives_unix.cmake.in
new file mode 100644
index 00000000..ef9af445
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake/merge_archives_unix.cmake.in
@@ -0,0 +1,96 @@
+# Copyright (c) 2009 Sun Microsystems, Inc.
+# Use is subject to license terms.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
+
+# This script merges many static libraries into
+# one big library on Unix.
+SET(TARGET "@TARGET@")
+SET(CMAKE_CURRENT_BINARY_DIR "@CMAKE_CURRENT_BINARY_DIR@")
+SET(CMAKE_AR "@CMAKE_AR@")
+SET(CMAKE_RANLIB "@CMAKE_RANLIB@")
+
+
+SET(TEMP_DIR ${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET})
+MAKE_DIRECTORY(${TEMP_DIR})
+# Extract each archive to its own subdirectory(avoid object filename
+# clashes) Since the lib may contain objects with the same name, we first
+# list the archive contents, then uniquify the object names as we extract
+# them.
+FOREACH(LIB ${STATIC_LIB_FILES})
+ GET_FILENAME_COMPONENT(NAME_NO_EXT ${LIB} NAME_WE)
+ SET(TEMP_SUBDIR ${TEMP_DIR}/${NAME_NO_EXT})
+ MAKE_DIRECTORY(${TEMP_SUBDIR})
+ EXECUTE_PROCESS(
+ COMMAND ${CMAKE_AR} -t ${LIB}
+ OUTPUT_VARIABLE LIB_OBJS
+ )
+ STRING(REGEX REPLACE "\n" ";" LIB_OBJ_LIST "${LIB_OBJS}")
+ STRING(REGEX REPLACE ";$" "" LIB_OBJ_LIST "${LIB_OBJ_LIST}")
+
+ LIST(LENGTH LIB_OBJ_LIST LENGTH_WITH_DUPS)
+ SET(LIB_OBJ_LIST_NO_DUPS ${LIB_OBJ_LIST})
+ IF (LENGTH_WITH_DUPS GREATER 0)
+ LIST(REMOVE_DUPLICATES LIB_OBJ_LIST_NO_DUPS)
+ ENDIF ()
+ LIST(LENGTH LIB_OBJ_LIST_NO_DUPS LENGTH_WITHOUT_DUPS)
+
+ IF(LENGTH_WITH_DUPS EQUAL LENGTH_WITHOUT_DUPS)
+ # Optimization for when lib doesn't actually have duplicate object
+ # names, we can just extract everything.
+ EXECUTE_PROCESS(
+ COMMAND ${CMAKE_AR} -x ${LIB}
+ WORKING_DIRECTORY ${TEMP_SUBDIR}
+ )
+ ELSE()
+ LIST(SORT LIB_OBJ_LIST)
+ SET(SAME_OBJ_COUNT 1)
+ SET(LAST_OBJ_NAME)
+ FOREACH(OBJ ${LIB_OBJ_LIST})
+ IF(OBJ STREQUAL LAST_OBJ_NAME)
+ GET_FILENAME_COMPONENT(OBJ_NO_EXT ${OBJ} NAME_WE)
+ FILE(RENAME "${TEMP_SUBDIR}/${OBJ}" "${TEMP_SUBDIR}/${OBJ_NO_EXT}.${SAME_OBJ_COUNT}.o")
+ MATH(EXPR SAME_OBJ_COUNT "${SAME_OBJ_COUNT}+1")
+ ELSE()
+ SET(SAME_OBJ_COUNT 1)
+ ENDIF()
+ SET(LAST_OBJ_NAME "${OBJ}")
+ EXECUTE_PROCESS(
+ COMMAND ${CMAKE_AR} -xN ${SAME_OBJ_COUNT} ${LIB} ${OBJ}
+ WORKING_DIRECTORY ${TEMP_SUBDIR}
+ )
+ ENDFOREACH()
+ ENDIF()
+
+ FILE(GLOB_RECURSE LIB_OBJECTS "${TEMP_SUBDIR}/*.o")
+ SET(OBJECTS ${OBJECTS} ${LIB_OBJECTS})
+ENDFOREACH()
+
+# Use relative paths, makes command line shorter.
+GET_FILENAME_COMPONENT(ABS_TEMP_DIR ${TEMP_DIR} ABSOLUTE)
+FOREACH(OBJ ${OBJECTS})
+ FILE(RELATIVE_PATH OBJ ${ABS_TEMP_DIR} ${OBJ})
+ FILE(TO_NATIVE_PATH ${OBJ} OBJ)
+ SET(ALL_OBJECTS ${ALL_OBJECTS} ${OBJ})
+ENDFOREACH()
+
+FILE(TO_NATIVE_PATH ${TARGET_FILE} TARGET_FILE)
+# Now pack the objects into library with ar.
+EXECUTE_PROCESS(
+ COMMAND ${CMAKE_AR} rcs ${TARGET_FILE} ${ALL_OBJECTS}
+ WORKING_DIRECTORY ${TEMP_DIR}
+)
+
+# Cleanup
+FILE(REMOVE_RECURSE ${TEMP_DIR})
diff --git a/storage/tokudb/PerconaFT/cmake_modules/FindValgrind.cmake b/storage/tokudb/PerconaFT/cmake_modules/FindValgrind.cmake
new file mode 100644
index 00000000..73841723
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/FindValgrind.cmake
@@ -0,0 +1,18 @@
+# Find Valgrind.
+#
+# This module defines:
+# VALGRIND_INCLUDE_DIR, where to find valgrind/memcheck.h, etc.
+# VALGRIND_PROGRAM, the valgrind executable.
+# VALGRIND_FOUND, If false, do not try to use valgrind.
+#
+# If you have valgrind installed in a non-standard place, you can define
+# VALGRIND_PREFIX to tell cmake where it is.
+
+find_path(VALGRIND_INCLUDE_DIR valgrind/memcheck.h)
+find_program(VALGRIND_PROGRAM NAMES valgrind)
+
+find_package_handle_standard_args(Valgrind DEFAULT_MSG
+ VALGRIND_INCLUDE_DIR
+ VALGRIND_PROGRAM)
+
+mark_as_advanced(VALGRIND_INCLUDE_DIR VALGRIND_PROGRAM)
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuBuildTagDatabases.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuBuildTagDatabases.cmake
new file mode 100644
index 00000000..4ee15f93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuBuildTagDatabases.cmake
@@ -0,0 +1,126 @@
+## set up lists of sources and headers for tags
+file(GLOB_RECURSE all_srcs
+ buildheader/*.cc
+ db-benchmark-test/*.cc
+ ft/*.cc
+ include/*.cc
+ locktree/*.cc
+ portability/*.cc
+ src/*.cc
+ utils/*.cc
+ util/*.cc
+ db-benchmark-test/*.cc
+ )
+list(APPEND all_srcs
+ ${CMAKE_CURRENT_BINARY_DIR}/ft/log_code.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/ft/log_print.cc
+ )
+file(GLOB_RECURSE all_hdrs
+ buildheader/*.h
+ db-benchmark-test/*.h
+ ft/*.h
+ include/*.h
+ locktree/*.h
+ portability/*.h
+ src/*.h
+ utils/*.h
+ util/*.h
+ db-benchmark-test/*.h
+ )
+list(APPEND all_hdrs
+ ${CMAKE_CURRENT_BINARY_DIR}/portability/toku_config.h
+ ${CMAKE_CURRENT_BINARY_DIR}/buildheader/db.h
+ ${CMAKE_CURRENT_BINARY_DIR}/ft/log_header.h
+ )
+
+option(USE_ETAGS "Build the etags database." ON)
+if (USE_ETAGS)
+ find_program(ETAGS "etags")
+ if (NOT ETAGS MATCHES NOTFOUND)
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/TAGS"
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/etags-stamp"
+ COMMAND ${ETAGS} -o TAGS ${all_srcs} ${all_hdrs}
+ COMMAND touch "${CMAKE_CURRENT_BINARY_DIR}/etags-stamp"
+ DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+ add_custom_target(build_etags ALL DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/TAGS" etags-stamp)
+ endif ()
+endif ()
+
+option(USE_CTAGS "Build the ctags database." ON)
+if (USE_CTAGS AND
+ # Macs by default are not case-sensitive, so tags and TAGS clobber each other. Do etags and not ctags in that case, because Emacs is superior. :P
+ (NOT APPLE OR NOT USE_ETAGS))
+ find_program(CTAGS "ctags")
+ if (NOT CTAGS MATCHES NOTFOUND)
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/tags"
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/ctags-stamp"
+ COMMAND ${CTAGS} -o tags ${all_srcs} ${all_hdrs}
+ COMMAND touch "${CMAKE_CURRENT_BINARY_DIR}/ctags-stamp"
+ DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+ add_custom_target(build_ctags ALL DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/tags" ctags-stamp)
+ endif ()
+endif ()
+
+option(USE_CSCOPE "Build the cscope database." ON)
+if (USE_CSCOPE)
+ find_program(CSCOPE "cscope")
+ if (NOT CSCOPE MATCHES NOTFOUND)
+ file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/cscope.files" "")
+ foreach(file ${all_srcs} ${all_hdrs})
+ file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/cscope.files" "${file}\n")
+ endforeach(file)
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/cscope.out"
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/cscope.in.out"
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/cscope.po.out"
+ COMMAND ${CSCOPE} -b -q -R -i"${CMAKE_CURRENT_BINARY_DIR}/cscope.files" -I"${CMAKE_CURRENT_SOURCE_DIR}" -I"${CMAKE_CURRENT_SOURCE_DIR}/include" -I"${CMAKE_CURRENT_SOURCE_DIR}/portability" -I"${CMAKE_CURRENT_SOURCE_DIR}/portability" -I"${CMAKE_CURRENT_SOURCE_DIR}/ft" -I"${CMAKE_CURRENT_SOURCE_DIR}/src" -I"${CMAKE_CURRENT_SOURCE_DIR}/locktree" -I"${CMAKE_CURRENT_SOURCE_DIR}/utils" -I"${CMAKE_CURRENT_SOURCE_DIR}/db-benchmark-test" -I"${CMAKE_CURRENT_BINARY_DIR}" -I"${CMAKE_CURRENT_BINARY_DIR}/portability" -I"${CMAKE_CURRENT_BINARY_DIR}/buildheader"
+ DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+ add_custom_target(build_cscope.out ALL DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/cscope.out"
+ "${CMAKE_CURRENT_SOURCE_DIR}/cscope.in.out"
+ "${CMAKE_CURRENT_SOURCE_DIR}/cscope.po.out")
+ endif ()
+endif ()
+
+option(USE_GTAGS "Build the gtags database." ON)
+if (USE_GTAGS)
+ find_program(GTAGS "gtags")
+ if (NOT GTAGS MATCHES NOTFOUND)
+ file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/gtags.files" "")
+ foreach(file ${all_srcs} ${all_hdrs})
+ file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/gtags.files" "${file}\n")
+ endforeach(file)
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GTAGS"
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GRTAGS"
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GPATH"
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GSYMS"
+ COMMAND ${GTAGS} -f "${CMAKE_CURRENT_BINARY_DIR}/gtags.files"
+ DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+ add_custom_target(build_GTAGS ALL DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/GTAGS"
+ "${CMAKE_CURRENT_SOURCE_DIR}/GRTAGS"
+ "${CMAKE_CURRENT_SOURCE_DIR}/GPATH"
+ "${CMAKE_CURRENT_SOURCE_DIR}/GSYMS")
+ endif ()
+endif ()
+
+option(USE_MKID "Build the idutils database." ON)
+if (USE_MKID)
+ find_program(MKID "mkid")
+ if (NOT MKID MATCHES NOTFOUND)
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/ID"
+ COMMAND ${MKID} ${all_srcs} ${all_hdrs}
+ DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
+ add_custom_target(build_MKID ALL DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/ID")
+ endif ()
+endif ()
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake
new file mode 100644
index 00000000..eac55725
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake
@@ -0,0 +1,137 @@
+## feature detection
+find_package(Threads)
+find_package(ZLIB REQUIRED)
+
+option(USE_VALGRIND "Build to run safely under valgrind (often slower)." ON)
+if(USE_VALGRIND)
+ find_package(Valgrind REQUIRED)
+endif()
+
+option(TOKU_DEBUG_PARANOID "Enable paranoid asserts." ON)
+
+include(CheckIncludeFiles)
+
+## check for some include files
+check_include_files(alloca.h HAVE_ALLOCA_H)
+check_include_files(arpa/inet.h HAVE_ARPA_INET_H)
+check_include_files(bits/functexcept.h HAVE_BITS_FUNCTEXCEPT_H)
+check_include_files(byteswap.h HAVE_BYTESWAP_H)
+check_include_files(endian.h HAVE_ENDIAN_H)
+check_include_files(fcntl.h HAVE_FCNTL_H)
+check_include_files(inttypes.h HAVE_INTTYPES_H)
+check_include_files(libkern/OSAtomic.h HAVE_LIBKERN_OSATOMIC_H)
+check_include_files(libkern/OSByteOrder.h HAVE_LIBKERN_OSBYTEORDER_H)
+check_include_files(limits.h HAVE_LIMITS_H)
+check_include_files(machine/endian.h HAVE_MACHINE_ENDIAN_H)
+check_include_files(malloc.h HAVE_MALLOC_H)
+check_include_files(malloc/malloc.h HAVE_MALLOC_MALLOC_H)
+check_include_files(malloc_np.h HAVE_MALLOC_NP_H)
+check_include_files(pthread.h HAVE_PTHREAD_H)
+check_include_files(pthread_np.h HAVE_PTHREAD_NP_H)
+check_include_files(stdint.h HAVE_STDINT_H)
+check_include_files(stdlib.h HAVE_STDLIB_H)
+check_include_files(string.h HAVE_STRING_H)
+check_include_files(syscall.h HAVE_SYSCALL_H)
+check_include_files(sys/endian.h HAVE_SYS_ENDIAN_H)
+check_include_files(sys/file.h HAVE_SYS_FILE_H)
+check_include_files(sys/malloc.h HAVE_SYS_MALLOC_H)
+check_include_files(sys/prctl.h HAVE_SYS_PRCTL_H)
+check_include_files(sys/resource.h HAVE_SYS_RESOURCE_H)
+check_include_files(sys/statvfs.h HAVE_SYS_STATVFS_H)
+check_include_files(sys/syscall.h HAVE_SYS_SYSCALL_H)
+check_include_files(sys/sysctl.h HAVE_SYS_SYSCTL_H)
+check_include_files(sys/syslimits.h HAVE_SYS_SYSLIMITS_H)
+check_include_files(sys/time.h HAVE_SYS_TIME_H)
+check_include_files(unistd.h HAVE_UNISTD_H)
+
+include(CheckSymbolExists)
+
+## check whether we can set the mmap threshold like we can in gnu libc's malloc
+check_symbol_exists(M_MMAP_THRESHOLD "malloc.h" HAVE_M_MMAP_THRESHOLD)
+## check whether we have CLOCK_REALTIME
+check_symbol_exists(CLOCK_REALTIME "time.h" HAVE_CLOCK_REALTIME)
+## check how to do direct I/O
+if (NOT CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
+ set(CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
+endif ()
+check_symbol_exists(O_DIRECT "fcntl.h" HAVE_O_DIRECT)
+check_symbol_exists(F_NOCACHE "fcntl.h" HAVE_F_NOCACHE)
+check_symbol_exists(MAP_ANONYMOUS "sys/mman.h" HAVE_MAP_ANONYMOUS)
+check_symbol_exists(PR_SET_PTRACER "sys/prctl.h" HAVE_PR_SET_PTRACER)
+check_symbol_exists(PR_SET_PTRACER_ANY "sys/prctl.h" HAVE_PR_SET_PTRACER_ANY)
+
+include(CheckFunctionExists)
+
+## check for the right way to get the actual allocation size of a pointer
+check_function_exists(malloc_size HAVE_MALLOC_SIZE)
+check_function_exists(malloc_usable_size HAVE_MALLOC_USABLE_SIZE)
+## check whether we have memalign or valloc (a weak substitute for memalign on darwin)
+check_function_exists(memalign HAVE_MEMALIGN)
+check_function_exists(valloc HAVE_VALLOC)
+## check whether we have random_r or nrand48 to use as a reentrant random function
+check_function_exists(nrand48 HAVE_NRAND48)
+check_function_exists(random_r HAVE_RANDOM_R)
+check_function_exists(mincore HAVE_MINCORE)
+
+## clear this out in case mysql modified it
+set(CMAKE_REQUIRED_LIBRARIES "")
+set(EXTRA_SYSTEM_LIBS "")
+check_function_exists(dlsym HAVE_DLSYM_WITHOUT_DL)
+if (NOT HAVE_DLSYM_WITHOUT_DL)
+ set(CMAKE_REQUIRED_LIBRARIES dl)
+ check_function_exists(dlsym HAVE_DLSYM_WITH_DL)
+ if (HAVE_DLSYM_WITH_DL)
+ list(APPEND EXTRA_SYSTEM_LIBS dl)
+ else ()
+ message(FATAL_ERROR "Cannot find dlsym(), even with -ldl.")
+ endif ()
+endif ()
+check_function_exists(backtrace HAVE_BACKTRACE_WITHOUT_EXECINFO)
+if (NOT HAVE_BACKTRACE_WITHOUT_EXECINFO)
+ set(CMAKE_REQUIRED_LIBRARIES execinfo)
+ check_function_exists(backtrace HAVE_BACKTRACE_WITH_EXECINFO)
+ if (HAVE_BACKTRACE_WITH_EXECINFO)
+ list(APPEND EXTRA_SYSTEM_LIBS execinfo)
+ else ()
+ message(WARNING "Cannot find backtrace(), even with -lexecinfo.")
+ endif ()
+endif ()
+
+if(HAVE_CLOCK_REALTIME AND (NOT APPLE))
+ list(APPEND EXTRA_SYSTEM_LIBS rt)
+else()
+ list(APPEND EXTRA_SYSTEM_LIBS System)
+endif()
+
+set(CMAKE_REQUIRED_LIBRARIES pthread)
+## check whether we can change rwlock preference
+check_function_exists(pthread_rwlockattr_setkind_np HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
+## check for the right way to yield using pthreads
+check_function_exists(pthread_yield HAVE_PTHREAD_YIELD)
+check_function_exists(pthread_yield_np HAVE_PTHREAD_YIELD_NP)
+## check if we have pthread_threadid_np() (i.e. osx)
+check_function_exists(pthread_threadid_np HAVE_PTHREAD_THREADID_NP)
+## check if we have pthread_getthreadid_np() (i.e. freebsd)
+check_function_exists(pthread_getthreadid_np HAVE_PTHREAD_GETTHREADID_NP)
+check_function_exists(sched_getcpu HAVE_SCHED_GETCPU)
+
+include(CheckCSourceCompiles)
+
+if (HAVE_PTHREAD_YIELD)
+ include(CheckPrototypeDefinition)
+
+ check_prototype_definition(pthread_yield "void pthread_yield(void)" "(void)0" "pthread.h" PTHREAD_YIELD_RETURNS_VOID)
+ check_c_source_compiles("#include <pthread.h>
+int main(void) {
+ int r = pthread_yield();
+ return r;
+}" PTHREAD_YIELD_RETURNS_INT)
+endif (HAVE_PTHREAD_YIELD)
+
+## check whether we have gcc-style thread-local storage using a storage class modifier
+check_c_source_compiles("#include <pthread.h>
+static __thread int tlsvar = 0;
+int main(void) { return tlsvar; }" HAVE_GNU_TLS)
+
+## set TOKUDB_REVISION
+set(CMAKE_TOKUDB_REVISION 0 CACHE INTERNAL "Revision of tokudb.")
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake
new file mode 100644
index 00000000..34133396
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuMergeLibs.cmake
@@ -0,0 +1,99 @@
+# Merge static libraries into a big static lib. The resulting library
+# should not not have dependencies on other static libraries.
+# We use it in MySQL to merge mysys,dbug,vio etc into mysqlclient
+FUNCTION(TOKU_GET_DEPENDEND_OS_LIBS target result)
+ SET(deps ${${target}_LIB_DEPENDS})
+ FOREACH(lib ${deps})
+ IF(TARGET ${lib})
+ SET(ret ${ret} ${lib})
+ ENDIF()
+ ENDFOREACH()
+ SET(${result} ${ret} PARENT_SCOPE)
+ENDFUNCTION(TOKU_GET_DEPENDEND_OS_LIBS)
+
+MACRO(TOKU_MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
+ # To produce a library we need at least one source file.
+ # It is created by ADD_CUSTOM_COMMAND below and will helps
+ # also help to track dependencies.
+ SET(SOURCE_FILE ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_depends.cc)
+ ADD_LIBRARY(${TARGET} STATIC ${SOURCE_FILE})
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES OUTPUT_NAME ${OUTPUT_NAME})
+
+ SET(OSLIBS)
+ FOREACH(LIB ${LIBS_TO_MERGE})
+ IF(TARGET ${LIB})
+ # This is a target in current project
+ # (can be a static or shared lib)
+ GET_TARGET_PROPERTY(LIB_TYPE ${LIB} TYPE)
+ IF(LIB_TYPE STREQUAL "STATIC_LIBRARY")
+ LIST(APPEND STATIC_LIBS ${LIB})
+ ADD_DEPENDENCIES(${TARGET} ${LIB})
+ # Extract dependend OS libraries
+ TOKU_GET_DEPENDEND_OS_LIBS(${LIB} LIB_OSLIBS)
+ LIST(APPEND OSLIBS ${LIB_OSLIBS})
+ ELSE()
+ # This is a shared library our static lib depends on.
+ LIST(APPEND OSLIBS ${LIB})
+ ENDIF()
+ ELSE()
+ # 3rd party library like libz.so. Make sure that everything
+ # that links to our library links to this one as well.
+ LIST(APPEND OSLIBS ${LIB})
+ ENDIF()
+ ENDFOREACH()
+ IF(OSLIBS)
+ # REMOVE_DUPLICATES destroys the order of the libs so disabled
+ # LIST(REMOVE_DUPLICATES OSLIBS)
+ TARGET_LINK_LIBRARIES(${TARGET} LINK_PUBLIC ${OSLIBS})
+ ENDIF()
+
+ # Make the generated dummy source file depended on all static input
+ # libs. If input lib changes,the source file is touched
+ # which causes the desired effect (relink).
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${SOURCE_FILE}
+ COMMAND ${CMAKE_COMMAND} -E touch ${SOURCE_FILE}
+ DEPENDS ${STATIC_LIBS})
+
+ IF(MSVC)
+ # To merge libs, just pass them to lib.exe command line.
+ SET(LINKER_EXTRA_FLAGS "")
+ FOREACH(LIB ${STATIC_LIBS})
+ SET(LINKER_EXTRA_FLAGS "${LINKER_EXTRA_FLAGS} $<TARGET_FILE:${LIB}>")
+ ENDFOREACH()
+ SET_TARGET_PROPERTIES(${TARGET} PROPERTIES STATIC_LIBRARY_FLAGS
+ "${LINKER_EXTRA_FLAGS}")
+ ELSE()
+ FOREACH(STATIC_LIB ${STATIC_LIBS})
+ LIST(APPEND STATIC_LIB_FILES $<TARGET_FILE:${STATIC_LIB}>)
+ ENDFOREACH()
+ IF(APPLE)
+ # Use OSX's libtool to merge archives (ihandles universal
+ # binaries properly)
+ ADD_CUSTOM_COMMAND(TARGET ${TARGET} POST_BUILD
+ COMMAND rm $<TARGET_FILE:${TARGET}>
+ COMMAND /usr/bin/libtool -static -o $<TARGET_FILE:${TARGET}>
+ ${STATIC_LIB_FILES}
+ )
+ ELSE()
+ # Generic Unix, Cygwin or MinGW. In post-build step, call
+ # script, that extracts objects from archives with "ar x"
+ # and repacks them with "ar r"
+ SET(TARGET ${TARGET})
+ CONFIGURE_FILE(
+ ${TOKU_CMAKE_SCRIPT_DIR}/merge_archives_unix.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
+ @ONLY
+ )
+ STRING(REGEX REPLACE ";" "\\\;" STATIC_LIB_FILES "${STATIC_LIB_FILES}")
+ ADD_CUSTOM_COMMAND(TARGET ${TARGET} POST_BUILD
+ COMMAND rm $<TARGET_FILE:${TARGET}>
+ COMMAND ${CMAKE_COMMAND}
+ -D TARGET_FILE=$<TARGET_FILE:${TARGET}>
+ -D STATIC_LIB_FILES="${STATIC_LIB_FILES}"
+ -P ${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake"
+ )
+ ENDIF()
+ ENDIF()
+ENDMACRO(TOKU_MERGE_STATIC_LIBS)
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCTest.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCTest.cmake
new file mode 100644
index 00000000..5b6882cc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCTest.cmake
@@ -0,0 +1,155 @@
+## some functions for getting system info so we can construct BUILDNAME
+
+## given an executable, follows symlinks and resolves paths until it runs
+## out of symlinks, then gives you the basename
+macro(real_executable_name filename_input out)
+ set(res 0)
+ set(filename ${filename_input})
+ while(NOT(res))
+ execute_process(
+ COMMAND which ${filename}
+ RESULT_VARIABLE res
+ ERROR_QUIET
+ OUTPUT_VARIABLE full_filename
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT(res))
+ execute_process(
+ COMMAND readlink ${full_filename}
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE link_target
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT(res))
+ execute_process(
+ COMMAND dirname ${full_filename}
+ OUTPUT_VARIABLE filepath
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set(filename "${filepath}/${link_target}")
+ else()
+ set(filename ${full_filename})
+ endif()
+ else()
+ set(filename ${filename})
+ endif()
+ endwhile()
+ execute_process(
+ COMMAND basename ${filename}
+ OUTPUT_VARIABLE real_filename
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set(${out} ${real_filename})
+endmacro(real_executable_name)
+
+## gives you `uname ${flag}`
+macro(uname flag out)
+ execute_process(
+ COMMAND uname ${flag}
+ OUTPUT_VARIABLE ${out}
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+endmacro(uname)
+
+## gives the current username
+macro(whoami out)
+ execute_process(
+ COMMAND whoami
+ OUTPUT_VARIABLE ${out}
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+endmacro(whoami)
+
+## gives the current hostname, minus .tokutek.com if it's there
+macro(hostname out)
+ execute_process(
+ COMMAND hostname
+ OUTPUT_VARIABLE fullhostname
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ string(REGEX REPLACE "\\.tokutek\\.com$" "" ${out} "${fullhostname}")
+endmacro(hostname)
+
+## gather machine info
+uname("-m" machine_type)
+real_executable_name("${CMAKE_CXX_COMPILER}" real_cxx_compiler)
+get_filename_component(branchname "${CMAKE_CURRENT_SOURCE_DIR}" NAME)
+hostname(host)
+whoami(user)
+
+## construct SITE, seems to have to happen before include(CTest)
+set(SITE "${user}@${host}")
+if (USE_GCOV)
+ set(buildname_build_type "Coverage")
+else (USE_GCOV)
+ set(buildname_build_type "${CMAKE_BUILD_TYPE}")
+endif (USE_GCOV)
+## construct BUILDNAME, seems to have to happen before include(CTest)
+set(BUILDNAME "${branchname} ${buildname_build_type} ${CMAKE_SYSTEM} ${machine_type} ${CMAKE_CXX_COMPILER_ID} ${real_cxx_compiler} ${CMAKE_CXX_COMPILER_VERSION}" CACHE STRING "CTest build name" FORCE)
+
+include(CTest)
+
+set(TOKUDB_DATA "${TokuDB_SOURCE_DIR}/../tokudb.data" CACHE FILEPATH "Path to data files for tests")
+
+if (BUILD_TESTING OR BUILD_FT_TESTS OR BUILD_SRC_TESTS)
+ set(WARNED_ABOUT_DATA 0)
+ if (NOT EXISTS "${TOKUDB_DATA}/" AND NOT WARNED_ABOUT_DATA AND CMAKE_PROJECT_NAME STREQUAL TokuDB)
+ message(WARNING "Test data files are missing from ${TOKUDB_DATA}, which will cause some tests to fail. Please put them there or modify TOKUDB_DATA to avoid this.")
+ set(WARNED_ABOUT_DATA 1)
+ endif ()
+
+ ## set up full valgrind suppressions file (concatenate the suppressions files)
+ file(READ ft/valgrind.suppressions valgrind_suppressions)
+ file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/valgrind.suppressions" "${valgrind_suppressions}")
+ file(READ bash.suppressions bash_suppressions)
+ file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/valgrind.suppressions" "${bash_suppressions}")
+
+ include(CMakeDependentOption)
+ set(helgrind_drd_depend_conditions "")
+ ## Helgrind and DRD explicitly state that they only run with the Linux
+ ## glibc-2.3 NPTL threading implementation [1,2]. If this ever changes
+ ## we can enable helgrind and drd on other systems.
+ ## [1]: http://valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use
+ ## [2]: http://valgrind.org/docs/manual/drd-manual.html#drd-manual.limitations
+ list(APPEND helgrind_drd_depend_conditions "CMAKE_SYSTEM_NAME STREQUAL Linux")
+ ## no point doing it with gcov
+ list(APPEND helgrind_drd_depend_conditions "NOT USE_GCOV")
+ cmake_dependent_option(RUN_DRD_TESTS "Run some tests under drd." ON
+ "${helgrind_drd_depend_conditions}" OFF)
+ cmake_dependent_option(RUN_HELGRIND_TESTS "Run some tests under helgrind." ON
+ "${helgrind_drd_depend_conditions}" OFF)
+
+ macro(setup_toku_test_properties test str)
+ set_tests_properties(${test} PROPERTIES ENVIRONMENT "TOKU_TEST_FILENAME=${str}.ctest-data")
+ set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${str}.ctest-data")
+ endmacro(setup_toku_test_properties)
+
+ macro(add_toku_test_aux pfx name bin)
+ add_test(${pfx}/${name} ${bin} ${ARGN})
+ setup_toku_test_properties(${pfx}/${name} ${name})
+ endmacro(add_toku_test_aux)
+ macro(add_toku_test pfx bin)
+ add_toku_test_aux(${pfx} ${bin} ${bin} ${ARGN})
+ endmacro(add_toku_test)
+
+ ## setup a function to write tests that will run with helgrind
+ set(CMAKE_HELGRIND_COMMAND_STRING "valgrind --quiet --tool=helgrind --error-exitcode=1 --soname-synonyms=somalloc=*tokuportability* --suppressions=${TokuDB_SOURCE_DIR}/src/tests/helgrind.suppressions --trace-children=yes --trace-children-skip=sh,*/sh,basename,*/basename,dirname,*/dirname,rm,*/rm,cp,*/cp,mv,*/mv,cat,*/cat,diff,*/diff,grep,*/grep,date,*/date,test,*/tokudb_dump* --trace-children-skip-by-arg=--only_create,--test,--no-shutdown,novalgrind")
+ function(add_helgrind_test pfx name)
+ separate_arguments(CMAKE_HELGRIND_COMMAND_STRING)
+ add_test(
+ NAME ${pfx}/${name}
+ COMMAND ${CMAKE_HELGRIND_COMMAND_STRING} ${ARGN}
+ )
+ setup_toku_test_properties(${pfx}/${name} ${name})
+ endfunction(add_helgrind_test)
+
+ ## setup a function to write tests that will run with drd
+ set(CMAKE_DRD_COMMAND_STRING "valgrind --quiet --tool=drd --error-exitcode=1 --soname-synonyms=somalloc=*tokuportability* --suppressions=${TokuDB_SOURCE_DIR}/src/tests/drd.suppressions --trace-children=yes --trace-children-skip=sh,*/sh,basename,*/basename,dirname,*/dirname,rm,*/rm,cp,*/cp,mv,*/mv,cat,*/cat,diff,*/diff,grep,*/grep,date,*/date,test,*/tokudb_dump* --trace-children-skip-by-arg=--only_create,--test,--no-shutdown,novalgrind")
+ function(add_drd_test pfx name)
+ separate_arguments(CMAKE_DRD_COMMAND_STRING)
+ add_test(
+ NAME ${pfx}/${name}
+ COMMAND ${CMAKE_DRD_COMMAND_STRING} ${ARGN}
+ )
+ setup_toku_test_properties(${pfx}/${name} ${name})
+ endfunction(add_drd_test)
+
+ option(RUN_LONG_TESTS "If set, run all tests, even the ones that take a long time to complete." OFF)
+ option(RUN_STRESS_TESTS "If set, run the stress tests." OFF)
+ option(RUN_PERF_TESTS "If set, run the perf tests." OFF)
+
+ configure_file(CTestCustom.cmake.in CTestCustom.cmake @ONLY)
+endif (BUILD_TESTING OR BUILD_FT_TESTS OR BUILD_SRC_TESTS)
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
new file mode 100644
index 00000000..c82521db
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@ -0,0 +1,191 @@
+function(add_c_defines)
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS ${ARGN})
+endfunction(add_c_defines)
+
+if (APPLE)
+ add_c_defines(DARWIN=1 _DARWIN_C_SOURCE)
+endif ()
+
+## preprocessor definitions we want everywhere
+add_c_defines(
+ _FILE_OFFSET_BITS=64
+ _LARGEFILE64_SOURCE
+ __STDC_FORMAT_MACROS
+ __STDC_LIMIT_MACROS
+ __LONG_LONG_SUPPORTED
+ )
+if (NOT CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
+ ## on FreeBSD these types of macros actually remove functionality
+ add_c_defines(
+ _DEFAULT_SOURCE
+ _XOPEN_SOURCE=600
+ )
+endif ()
+
+## add TOKU_PTHREAD_DEBUG for debug builds
+if (CMAKE_VERSION VERSION_LESS 3.0)
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG TOKU_PTHREAD_DEBUG=1 TOKU_DEBUG_TXN_SYNC=1)
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD TOKU_PTHREAD_DEBUG=1 TOKU_DEBUG_TXN_SYNC=1)
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD _FORTIFY_SOURCE=2)
+else ()
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
+ $<$<OR:$<CONFIG:DEBUG>,$<CONFIG:DRD>>:TOKU_PTHREAD_DEBUG=1 TOKU_DEBUG_TXN_SYNC=1>
+ $<$<CONFIG:DRD>:_FORTIFY_SOURCE=2>
+ )
+endif ()
+
+## coverage
+option(USE_GCOV "Use gcov for test coverage." OFF)
+if (USE_GCOV)
+ if (NOT CMAKE_CXX_COMPILER_ID MATCHES GNU)
+ message(FATAL_ERROR "Must use the GNU compiler to compile for test coverage.")
+ endif ()
+ find_program(COVERAGE_COMMAND NAMES gcov47 gcov)
+endif (USE_GCOV)
+
+include(CheckCCompilerFlag)
+include(CheckCXXCompilerFlag)
+
+## adds a compiler flag if the compiler supports it
+macro(prepend_cflags_if_supported)
+ foreach(flag ${ARGN})
+ MY_CHECK_AND_SET_COMPILER_FLAG(${flag})
+ endforeach(flag)
+endmacro(prepend_cflags_if_supported)
+
+if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ set (OPTIONAL_CFLAGS "${OPTIONAL_CFLAGS} -Wmissing-format-attribute")
+endif()
+
+## disable some warnings
+prepend_cflags_if_supported(
+ -Wno-missing-field-initializers
+ -Wstrict-null-sentinel
+ -Winit-self
+ -Wswitch
+ -Wtrampolines
+ -Wlogical-op
+ ${OPTIONAL_CFLAGS}
+ -Wno-error=missing-format-attribute
+ -Wno-error=address-of-array-temporary
+ -Wno-error=tautological-constant-out-of-range-compare
+ -Wno-error=maybe-uninitialized
+ -Wno-error=extern-c-compat
+ -fno-exceptions
+ -Wno-error=nonnull-compare
+ )
+
+## Clang has stricter POD checks. So, only enable this warning on our other builds (Linux + GCC)
+if (NOT CMAKE_CXX_COMPILER_ID MATCHES Clang)
+ prepend_cflags_if_supported(
+ -Wpacked
+ )
+endif ()
+
+option (PROFILING "Allow profiling and debug" ON)
+if (PROFILING)
+ prepend_cflags_if_supported(
+ -fno-omit-frame-pointer
+ )
+endif ()
+
+# new flag sets in MySQL 8.0 seem to explicitly disable this
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions")
+
+## set extra debugging flags and preprocessor definitions
+set(CMAKE_C_FLAGS_DEBUG "-g3 -O0 ${CMAKE_C_FLAGS_DEBUG}")
+set(CMAKE_CXX_FLAGS_DEBUG "-g3 -O0 ${CMAKE_CXX_FLAGS_DEBUG}")
+
+## flags to use when we want to run DRD on the resulting binaries
+## DRD needs debugging symbols.
+## -O0 makes it too slow, and -O2 inlines too much for our suppressions to work. -O1 is just right.
+set(CMAKE_C_FLAGS_DRD "-g3 -O1 ${CMAKE_C_FLAGS_DRD}")
+set(CMAKE_CXX_FLAGS_DRD "-g3 -O1 ${CMAKE_CXX_FLAGS_DRD}")
+
+## set extra release flags
+## need to set flags for RelWithDebInfo as well because we want the MySQL/MariaDB builds to use them
+if (CMAKE_CXX_COMPILER_ID STREQUAL Clang)
+ # have tried -flto and -O4, both make our statically linked executables break apple's linker
+ set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_C_FLAGS_RELEASE "-g -O3 ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
+else ()
+ if (APPLE)
+ set(FLTO_OPTS "-fwhole-program")
+ else ()
+ set(FLTO_OPTS "-fuse-linker-plugin")
+ endif()
+ # we overwrite this because the default passes -DNDEBUG and we don't want that
+ set(CMAKE_C_FLAGS_RELWITHDEBINFO "-flto ${FLTO_OPTS} ${CMAKE_C_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-flto ${FLTO_OPTS} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -g -O3 -UNDEBUG")
+ set(CMAKE_C_FLAGS_RELEASE "-g -O3 -flto ${FLTO_OPTS} ${CMAKE_C_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 -flto ${FLTO_OPTS} ${CMAKE_CXX_FLAGS_RELEASE} -UNDEBUG")
+ set(CMAKE_EXE_LINKER_FLAGS "-g ${FLTO_OPTS} ${CMAKE_EXE_LINKER_FLAGS}")
+ set(CMAKE_SHARED_LINKER_FLAGS "-g ${FLTO_OPTS} ${CMAKE_SHARED_LINKER_FLAGS}")
+endif ()
+
+## set warnings
+prepend_cflags_if_supported(
+ -Wextra
+ -Wbad-function-cast
+ -Wno-missing-noreturn
+ -Wstrict-prototypes
+ -Wmissing-prototypes
+ -Wmissing-declarations
+ -Wpointer-arith
+ #-Wshadow will fail with GCC-8
+ ${OPTIONAL_CFLAGS}
+ ## other flags to try:
+ #-Wunsafe-loop-optimizations
+ #-Wpointer-arith
+ #-Wc++-compat
+ #-Wc++11-compat
+ #-Wwrite-strings
+ #-Wzero-as-null-pointer-constant
+ #-Wlogical-op
+ #-Wvector-optimization-performance
+ )
+
+if (NOT CMAKE_CXX_COMPILER_ID STREQUAL Clang)
+ # Disabling -Wcast-align with clang. TODO: fix casting and re-enable it, someday.
+ prepend_cflags_if_supported(-Wcast-align)
+endif ()
+
+## never want these
+set(CMAKE_C_FLAGS "-Wno-error ${CMAKE_C_FLAGS}")
+set(CMAKE_CXX_FLAGS "-Wno-error ${CMAKE_CXX_FLAGS}")
+
+# pick language dialect
+set(CMAKE_C_FLAGS "-std=c99 ${CMAKE_C_FLAGS}")
+check_cxx_compiler_flag(-std=c++11 HAVE_STDCXX11)
+check_cxx_compiler_flag(-std=c++0x HAVE_STDCXX0X)
+if (HAVE_STDCXX11)
+ set(CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}")
+elseif (HAVE_STDCXX0X)
+ set(CMAKE_CXX_FLAGS "-std=c++0x ${CMAKE_CXX_FLAGS}")
+else ()
+ message(FATAL_ERROR "${CMAKE_CXX_COMPILER} doesn't support -std=c++11 or -std=c++0x, you need one that does.")
+endif ()
+
+function(add_space_separated_property type obj propname val)
+ get_property(oldval ${type} ${obj} PROPERTY ${propname})
+ if (oldval MATCHES NOTFOUND)
+ set_property(${type} ${obj} PROPERTY ${propname} "${val}")
+ else ()
+ set_property(${type} ${obj} PROPERTY ${propname} "${val} ${oldval}")
+ endif ()
+endfunction(add_space_separated_property)
+
+## this function makes sure that the libraries passed to it get compiled
+## with gcov-needed flags, we only add those flags to our libraries
+## because we don't really care whether our tests get covered
+function(maybe_add_gcov_to_libraries)
+ if (USE_GCOV)
+ foreach(lib ${ARGN})
+ add_space_separated_property(TARGET ${lib} COMPILE_FLAGS --coverage)
+ add_space_separated_property(TARGET ${lib} LINK_FLAGS --coverage)
+ target_link_libraries(${lib} LINK_PRIVATE gcov)
+ endforeach(lib)
+ endif (USE_GCOV)
+endfunction(maybe_add_gcov_to_libraries)
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuThirdParty.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuThirdParty.cmake
new file mode 100644
index 00000000..b312111a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuThirdParty.cmake
@@ -0,0 +1,111 @@
+include(ExternalProject)
+
+## add lzma with an external project
+set(xz_configure_opts --with-pic --enable-static)
+if (APPLE)
+ ## lzma has some assembly that doesn't work on darwin
+ list(APPEND xz_configure_opts --disable-assembler)
+endif ()
+
+list(APPEND xz_configure_opts "CC=${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_ARG1}")
+if (CMAKE_BUILD_TYPE STREQUAL Debug OR CMAKE_BUILD_TYPE STREQUAL drd)
+ list(APPEND xz_configure_opts --enable-debug)
+endif ()
+
+set(XZ_SOURCE_DIR "${TokuDB_SOURCE_DIR}/third_party/xz-4.999.9beta" CACHE FILEPATH "Where to find sources for xz (lzma).")
+if (NOT EXISTS "${XZ_SOURCE_DIR}/configure")
+ message(FATAL_ERROR "Can't find the xz sources. Please check them out to ${XZ_SOURCE_DIR} or modify XZ_SOURCE_DIR.")
+endif ()
+
+if (CMAKE_GENERATOR STREQUAL Ninja)
+ ## ninja doesn't understand "$(MAKE)"
+ set(SUBMAKE_COMMAND make)
+else ()
+ ## use "$(MAKE)" for submakes so they can use the jobserver, doesn't
+ ## seem to break Xcode...
+ set(SUBMAKE_COMMAND $(MAKE))
+endif ()
+
+FILE(GLOB XZ_ALL_FILES ${XZ_SOURCE_DIR}/*)
+ExternalProject_Add(build_lzma
+ PREFIX xz
+ DOWNLOAD_COMMAND
+ cp -a "${XZ_ALL_FILES}" "<SOURCE_DIR>/"
+ CONFIGURE_COMMAND
+ "<SOURCE_DIR>/configure" ${xz_configure_opts}
+ "--prefix=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz"
+ "--libdir=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/lib"
+ BUILD_COMMAND
+ ${SUBMAKE_COMMAND} -C src/liblzma
+ INSTALL_COMMAND
+ ${SUBMAKE_COMMAND} -C src/liblzma install
+)
+FILE(GLOB_RECURSE XZ_ALL_FILES_RECURSIVE ${XZ_SOURCE_DIR}/*)
+ExternalProject_Add_Step(build_lzma reclone_src # Names of project and custom step
+ COMMENT "(re)cloning xz source..." # Text printed when step executes
+ DEPENDERS download configure # Steps that depend on this step
+ DEPENDS ${XZ_ALL_FILES_RECURSIVE} # Files on which this step depends
+)
+
+set_source_files_properties(
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/base.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/bcj.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/block.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/check.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/container.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/delta.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/filter.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/index.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/index_hash.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/lzma.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/stream_flags.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/subblock.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/version.h"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include/lzma/vli.h"
+ PROPERTIES GENERATED TRUE)
+
+include_directories("${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/include")
+
+add_library(lzma STATIC IMPORTED)
+set_target_properties(lzma PROPERTIES IMPORTED_LOCATION
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/xz/lib/liblzma.a")
+add_dependencies(lzma build_lzma)
+
+
+## add snappy with an external project
+set(SNAPPY_SOURCE_DIR "${TokuDB_SOURCE_DIR}/third_party/snappy-1.1.2" CACHE FILEPATH "Where to find sources for snappy.")
+if (NOT EXISTS "${SNAPPY_SOURCE_DIR}/CMakeLists.txt")
+ message(FATAL_ERROR "Can't find the snappy sources. Please check them out to ${SNAPPY_SOURCE_DIR} or modify SNAPPY_SOURCE_DIR.")
+endif ()
+
+FILE(GLOB SNAPPY_ALL_FILES ${SNAPPY_SOURCE_DIR}/*)
+ExternalProject_Add(build_snappy
+ PREFIX snappy
+ DOWNLOAD_COMMAND
+ cp -a "${SNAPPY_ALL_FILES}" "<SOURCE_DIR>/"
+ CMAKE_ARGS
+ -DCMAKE_INSTALL_PREFIX=<INSTALL_DIR>
+ -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+ -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
+ -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+ -DCMAKE_AR=${CMAKE_AR}
+ -DCMAKE_NM=${CMAKE_NM}
+ -DCMAKE_RANLIB=${CMAKE_RANLIB}
+ -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
+ ${USE_PROJECT_CMAKE_MODULE_PATH}
+)
+FILE(GLOB_RECURSE SNAPPY_ALL_FILES_RECURSIVE ${SNAPPY_SOURCE_DIR}/*)
+ExternalProject_Add_Step(build_snappy reclone_src # Names of project and custom step
+ COMMENT "(re)cloning snappy source..." # Text printed when step executes
+ DEPENDERS download configure # Steps that depend on this step
+ DEPENDS ${SNAPPY_ALL_FILES_RECURSIVE} # Files on which this step depends
+)
+
+include_directories("${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/snappy/include")
+
+add_library(snappy STATIC IMPORTED)
+set_target_properties(snappy PROPERTIES IMPORTED_LOCATION
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/snappy/lib/libsnappy.a")
+add_dependencies(snappy build_snappy)
diff --git a/storage/tokudb/PerconaFT/ft/CMakeLists.txt b/storage/tokudb/PerconaFT/ft/CMakeLists.txt
new file mode 100644
index 00000000..6696c26e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/CMakeLists.txt
@@ -0,0 +1,96 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+## generate log_code.cc, log_print.cc, log_header.cc
+set_source_files_properties(
+ "${CMAKE_CURRENT_BINARY_DIR}/log_code"
+ "${CMAKE_CURRENT_BINARY_DIR}/log_print"
+ "${CMAKE_CURRENT_BINARY_DIR}/log_header.h"
+ PROPERTIES GENERATED TRUE)
+
+add_executable(logformat logger/logformat.cc)
+target_link_libraries(logformat ${LIBTOKUPORTABILITY}_static)
+if (USE_GCOV)
+ add_space_separated_property(TARGET logformat LINK_FLAGS --coverage)
+endif (USE_GCOV)
+
+add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/log_code.cc"
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/log_print.cc"
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/log_header.h"
+ COMMAND $<TARGET_FILE:logformat> .
+ DEPENDS logger/logformat
+ )
+add_custom_target(
+ generate_log_code
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/log_code.cc" "${CMAKE_CURRENT_BINARY_DIR}/log_print.cc" "${CMAKE_CURRENT_BINARY_DIR}/log_header.h"
+ )
+
+set(FT_SOURCES
+ bndata
+ cachetable/background_job_manager
+ cachetable/cachetable
+ cachetable/checkpoint
+ cursor
+ ft
+ ft-cachetable-wrappers
+ ft-flusher
+ ft-hot-flusher
+ ft-ops
+ ft-recount-rows
+ ft-status
+ ft-test-helpers
+ ft-verify
+ loader/callbacks
+ loader/dbufio
+ loader/loader
+ loader/pqueue
+ leafentry
+ le-cursor
+ logger/logcursor
+ logger/logfilemgr
+ logger/logger
+ logger/log_upgrade
+ logger/recover
+ msg
+ msg_buffer
+ node
+ pivotkeys
+ serialize/rbtree_mhs
+ serialize/block_allocator
+ serialize/block_table
+ serialize/compress
+ serialize/ft_node-serialize
+ serialize/ft-node-deserialize
+ serialize/ft-serialize
+ serialize/quicklz
+ serialize/sub_block
+ txn/rollback
+ txn/rollback-apply
+ txn/rollback-ct-callbacks
+ txn/rollback_log_node_cache
+ txn/roll
+ txn/txn
+ txn/txn_child_manager
+ txn/txn_manager
+ txn/xids
+ ule
+ "${CMAKE_CURRENT_BINARY_DIR}/log_code"
+ "${CMAKE_CURRENT_BINARY_DIR}/log_print"
+ )
+
+add_library(ft SHARED ${FT_SOURCES})
+add_library(ft_static STATIC ${FT_SOURCES})
+## we're going to link this into libtokudb.so so it needs to have PIC
+set_target_properties(ft_static PROPERTIES POSITION_INDEPENDENT_CODE ON)
+maybe_add_gcov_to_libraries(ft ft_static)
+
+## depend on other generated targets
+add_dependencies(ft install_tdb_h generate_log_code build_lzma build_snappy)
+add_dependencies(ft_static install_tdb_h generate_log_code build_lzma build_snappy)
+
+## link with lzma (which should be static) and link dependers with zlib
+target_link_libraries(ft LINK_PRIVATE util_static lzma snappy ${LIBTOKUPORTABILITY})
+target_link_libraries(ft LINK_PUBLIC z)
+target_link_libraries(ft_static LINK_PRIVATE lzma snappy)
+
+add_subdirectory(tests)
diff --git a/storage/tokudb/PerconaFT/ft/bndata.cc b/storage/tokudb/PerconaFT/ft/bndata.cc
new file mode 100644
index 00000000..ecacb28a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/bndata.cc
@@ -0,0 +1,675 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <ft/bndata.h>
+#include <ft/ft-internal.h>
+
+using namespace toku;
+uint32_t bn_data::klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const {
+ return sizeof(*klpair) + keylen_from_klpair_len(klpair_len) + leafentry_disksize(get_le_from_klpair(klpair));
+}
+
+void bn_data::init_zero() {
+ toku_mempool_zero(&m_buffer_mempool);
+ m_disksize_of_keys = 0;
+}
+
+void bn_data::initialize_empty() {
+ init_zero();
+ m_buffer.create();
+}
+
+void bn_data::add_key(uint32_t keylen) {
+ m_disksize_of_keys += sizeof(keylen) + keylen;
+}
+
+void bn_data::add_keys(uint32_t n_keys, uint32_t combined_klpair_len) {
+ invariant(n_keys * sizeof(uint32_t) <= combined_klpair_len);
+ m_disksize_of_keys += combined_klpair_len;
+}
+
+void bn_data::remove_key(uint32_t keylen) {
+ m_disksize_of_keys -= sizeof(keylen) + keylen;
+}
+
+// Deserialize from format optimized for keys being inlined.
+// Currently only supports fixed-length keys.
+void bn_data::initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version UU(),
+ uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length,
+ uint32_t fixed_klpair_length) {
+ paranoid_invariant(version >= FT_LAYOUT_VERSION_26); // Support was added @26
+ uint32_t ndone_before = rb->ndone;
+ init_zero();
+ invariant(all_keys_same_length); // Until otherwise supported.
+ const void *keys_src;
+ rbuf_literal_bytes(rb, &keys_src, key_data_size);
+ //Generate dmt
+ this->m_buffer.create_from_sorted_memory_of_fixed_size_elements(
+ keys_src, num_entries, key_data_size, fixed_klpair_length);
+ toku_mempool_construct(&this->m_buffer_mempool, val_data_size);
+
+ const void *vals_src;
+ rbuf_literal_bytes(rb, &vals_src, val_data_size);
+
+ if (num_entries > 0) {
+ void *vals_dest = toku_mempool_malloc(&this->m_buffer_mempool, val_data_size);
+ paranoid_invariant_notnull(vals_dest);
+ memcpy(vals_dest, vals_src, val_data_size);
+ }
+
+ add_keys(num_entries, num_entries * fixed_klpair_length);
+
+ toku_note_deserialized_basement_node(all_keys_same_length);
+
+ invariant(rb->ndone - ndone_before == data_size);
+}
+
+static int
+wbufwriteleafentry(const void* key, const uint32_t keylen, const LEAFENTRY &le, const uint32_t UU(idx), struct wbuf * const wb) {
+ // need to pack the leafentry as it was in versions
+ // where the key was integrated into it (< 26)
+ uint32_t begin_spot UU() = wb->ndone;
+ uint32_t le_disk_size = leafentry_disksize(le);
+ wbuf_nocrc_uint8_t(wb, le->type);
+ wbuf_nocrc_uint32_t(wb, keylen);
+ if (le->type == LE_CLEAN) {
+ wbuf_nocrc_uint32_t(wb, le->u.clean.vallen);
+ wbuf_nocrc_literal_bytes(wb, key, keylen);
+ wbuf_nocrc_literal_bytes(wb, le->u.clean.val, le->u.clean.vallen);
+ }
+ else {
+ paranoid_invariant(le->type == LE_MVCC);
+ wbuf_nocrc_uint32_t(wb, le->u.mvcc.num_cxrs);
+ wbuf_nocrc_uint8_t(wb, le->u.mvcc.num_pxrs);
+ wbuf_nocrc_literal_bytes(wb, key, keylen);
+ wbuf_nocrc_literal_bytes(wb, le->u.mvcc.xrs, le_disk_size - (1 + 4 + 1));
+ }
+ uint32_t end_spot UU() = wb->ndone;
+ paranoid_invariant((end_spot - begin_spot) == keylen + sizeof(keylen) + le_disk_size);
+ return 0;
+}
+
+void bn_data::serialize_to_wbuf(struct wbuf *const wb) {
+ prepare_to_serialize();
+ serialize_header(wb);
+ if (m_buffer.value_length_is_fixed()) {
+ serialize_rest(wb);
+ } else {
+ //
+ // iterate over leafentries and place them into the buffer
+ //
+ iterate<struct wbuf, wbufwriteleafentry>(wb);
+ }
+}
+
+// If we have fixed-length keys, we prepare the dmt and mempool.
+// The mempool is prepared by removing any fragmented space and ordering leafentries in the same order as their keys.
+void bn_data::prepare_to_serialize(void) {
+ if (m_buffer.value_length_is_fixed()) {
+ m_buffer.prepare_for_serialize();
+ dmt_compress_kvspace(0, nullptr, true); // Gets it ready for easy serialization.
+ }
+}
+
+void bn_data::serialize_header(struct wbuf *wb) const {
+ bool fixed = m_buffer.value_length_is_fixed();
+
+ //key_data_size
+ wbuf_nocrc_uint(wb, m_disksize_of_keys);
+ //val_data_size
+ wbuf_nocrc_uint(wb, toku_mempool_get_used_size(&m_buffer_mempool));
+ //fixed_klpair_length
+ wbuf_nocrc_uint(wb, m_buffer.get_fixed_length());
+ // all_keys_same_length
+ wbuf_nocrc_uint8_t(wb, fixed);
+ // keys_vals_separate
+ wbuf_nocrc_uint8_t(wb, fixed);
+}
+
+void bn_data::serialize_rest(struct wbuf *wb) const {
+ //Write keys
+ invariant(m_buffer.value_length_is_fixed()); //Assumes prepare_to_serialize was called
+ m_buffer.serialize_values(m_disksize_of_keys, wb);
+
+ //Write leafentries
+ //Just ran dmt_compress_kvspace so there is no fragmentation and also leafentries are in sorted order.
+ paranoid_invariant(toku_mempool_get_frag_size(&m_buffer_mempool) == 0);
+ uint32_t val_data_size = toku_mempool_get_used_size(&m_buffer_mempool);
+ wbuf_nocrc_literal_bytes(wb, toku_mempool_get_base(&m_buffer_mempool), val_data_size);
+}
+
+// Deserialize from rbuf
+void bn_data::deserialize_from_rbuf(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version) {
+ uint32_t key_data_size = data_size; // overallocate if < version 26 (best guess that is guaranteed not too small)
+ uint32_t val_data_size = data_size; // overallocate if < version 26 (best guess that is guaranteed not too small)
+
+ bool all_keys_same_length = false;
+ bool keys_vals_separate = false;
+ uint32_t fixed_klpair_length = 0;
+
+ // In version 25 and older there is no header. Skip reading header for old version.
+ if (version >= FT_LAYOUT_VERSION_26) {
+ uint32_t ndone_before = rb->ndone;
+ key_data_size = rbuf_int(rb);
+ val_data_size = rbuf_int(rb);
+ fixed_klpair_length = rbuf_int(rb); // 0 if !all_keys_same_length
+ all_keys_same_length = rbuf_char(rb);
+ keys_vals_separate = rbuf_char(rb);
+ invariant(all_keys_same_length == keys_vals_separate); // Until we support otherwise
+ uint32_t header_size = rb->ndone - ndone_before;
+ data_size -= header_size;
+ invariant(header_size == HEADER_LENGTH);
+ if (keys_vals_separate) {
+ invariant(fixed_klpair_length >= sizeof(klpair_struct) || num_entries == 0);
+ initialize_from_separate_keys_and_vals(num_entries, rb, data_size, version,
+ key_data_size, val_data_size, all_keys_same_length,
+ fixed_klpair_length);
+ return;
+ }
+ }
+ // Version >= 26 and version 25 deserialization are now identical except that <= 25 might allocate too much memory.
+ const void *bytes;
+ rbuf_literal_bytes(rb, &bytes, data_size);
+ const unsigned char *CAST_FROM_VOIDP(buf, bytes);
+ if (data_size == 0) {
+ invariant_zero(num_entries);
+ }
+ init_zero();
+ klpair_dmt_t::builder dmt_builder;
+ dmt_builder.create(num_entries, key_data_size);
+
+ // TODO(leif): clean this up (#149)
+ unsigned char *newmem = nullptr;
+ // add 25% extra wiggle room
+ uint32_t allocated_bytes_vals = val_data_size + (val_data_size / 4);
+ CAST_FROM_VOIDP(newmem, toku_xmalloc(allocated_bytes_vals));
+ const unsigned char* curr_src_pos = buf;
+ unsigned char* curr_dest_pos = newmem;
+ for (uint32_t i = 0; i < num_entries; i++) {
+ uint8_t curr_type = curr_src_pos[0];
+ curr_src_pos++;
+ // first thing we do is lay out the key,
+ // to do so, we must extract it from the leafentry
+ // and write it in
+ uint32_t keylen = 0;
+ const void* keyp = nullptr;
+ keylen = *(uint32_t *)curr_src_pos;
+ curr_src_pos += sizeof(uint32_t);
+ uint32_t clean_vallen = 0;
+ uint32_t num_cxrs = 0;
+ uint8_t num_pxrs = 0;
+ if (curr_type == LE_CLEAN) {
+ clean_vallen = toku_dtoh32(*(uint32_t *)curr_src_pos);
+ curr_src_pos += sizeof(clean_vallen); // val_len
+ keyp = curr_src_pos;
+ curr_src_pos += keylen;
+ }
+ else {
+ paranoid_invariant(curr_type == LE_MVCC);
+ num_cxrs = toku_htod32(*(uint32_t *)curr_src_pos);
+ curr_src_pos += sizeof(uint32_t); // num_cxrs
+ num_pxrs = curr_src_pos[0];
+ curr_src_pos += sizeof(uint8_t); //num_pxrs
+ keyp = curr_src_pos;
+ curr_src_pos += keylen;
+ }
+ uint32_t le_offset = curr_dest_pos - newmem;
+ dmt_builder.append(klpair_dmtwriter(keylen, le_offset, keyp));
+ add_key(keylen);
+
+ // now curr_dest_pos is pointing to where the leafentry should be packed
+ curr_dest_pos[0] = curr_type;
+ curr_dest_pos++;
+ if (curr_type == LE_CLEAN) {
+ *(uint32_t *)curr_dest_pos = toku_htod32(clean_vallen);
+ curr_dest_pos += sizeof(clean_vallen);
+ memcpy(curr_dest_pos, curr_src_pos, clean_vallen); // copy the val
+ curr_dest_pos += clean_vallen;
+ curr_src_pos += clean_vallen;
+ }
+ else {
+ // pack num_cxrs and num_pxrs
+ *(uint32_t *)curr_dest_pos = toku_htod32(num_cxrs);
+ curr_dest_pos += sizeof(num_cxrs);
+ *(uint8_t *)curr_dest_pos = num_pxrs;
+ curr_dest_pos += sizeof(num_pxrs);
+ // now we need to pack the rest of the data
+ uint32_t num_rest_bytes = leafentry_rest_memsize(num_pxrs, num_cxrs, const_cast<uint8_t*>(curr_src_pos));
+ memcpy(curr_dest_pos, curr_src_pos, num_rest_bytes);
+ curr_dest_pos += num_rest_bytes;
+ curr_src_pos += num_rest_bytes;
+ }
+ }
+ dmt_builder.build(&this->m_buffer);
+ toku_note_deserialized_basement_node(m_buffer.value_length_is_fixed());
+
+ uint32_t num_bytes_read = (uint32_t)(curr_src_pos - buf);
+ invariant(num_bytes_read == data_size);
+
+ uint32_t num_bytes_written = curr_dest_pos - newmem + m_disksize_of_keys;
+ invariant(num_bytes_written == data_size);
+ toku_mempool_init(&m_buffer_mempool, newmem, (size_t)(curr_dest_pos - newmem), allocated_bytes_vals);
+
+ invariant(get_disk_size() == data_size);
+ // Versions older than 26 might have allocated too much memory. Try to shrink the mempool now that we
+ // know how much memory we need.
+ if (version < FT_LAYOUT_VERSION_26) {
+ // Unnecessary after version 26
+ // Reallocate smaller mempool to save memory
+ invariant_zero(toku_mempool_get_frag_size(&m_buffer_mempool));
+ toku_mempool_realloc_larger(&m_buffer_mempool, toku_mempool_get_used_size(&m_buffer_mempool));
+ }
+}
+
+uint64_t bn_data::get_memory_size() {
+ uint64_t retval = 0;
+ //TODO: Maybe ask for memory_size instead of mempool_footprint (either this todo or the next)
+ // include fragmentation overhead but do not include space in the
+ // mempool that has not yet been allocated for leaf entries
+ size_t poolsize = toku_mempool_footprint(&m_buffer_mempool);
+ retval += poolsize;
+ // This one includes not-yet-allocated for nodes (just like old constant-key omt)
+ //TODO: Maybe ask for mempool_footprint instead of memory_size.
+ retval += m_buffer.memory_size();
+ invariant(retval >= get_disk_size());
+ return retval;
+}
+
+void bn_data::delete_leafentry (
+ uint32_t idx,
+ uint32_t keylen,
+ uint32_t old_le_size
+ )
+{
+ remove_key(keylen);
+ m_buffer.delete_at(idx);
+ toku_mempool_mfree(&m_buffer_mempool, nullptr, old_le_size);
+}
+
+/* mempool support */
+
+struct dmt_compressor_state {
+ struct mempool *new_kvspace;
+ class bn_data *bd;
+};
+
+static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU(), struct dmt_compressor_state * const oc) {
+ LEAFENTRY old_le = oc->bd->get_le_from_klpair(klpair);
+ uint32_t size = leafentry_memsize(old_le);
+ void* newdata = toku_mempool_malloc(oc->new_kvspace, size);
+ paranoid_invariant_notnull(newdata); // we do this on a fresh mempool, so nothing bad should happen
+ memcpy(newdata, old_le, size);
+ klpair->le_offset = toku_mempool_get_offset_from_pointer_and_base(oc->new_kvspace, newdata);
+ return 0;
+}
+
+// Compress things, and grow or shrink the mempool if needed.
+// May (always if force_compress) have a side effect of putting contents of mempool in sorted order.
+void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress) {
+ uint32_t total_size_needed = toku_mempool_get_used_size(&m_buffer_mempool) + added_size;
+
+ // If there is no fragmentation, e.g. in serial inserts, we can just increase the size
+ // of the mempool and move things over with a cheap memcpy. If force_compress is true,
+ // the caller needs the side effect that all contents are put in sorted order.
+ bool do_compress = toku_mempool_get_frag_size(&m_buffer_mempool) > 0 || force_compress;
+
+ void *old_mempool_base = toku_mempool_get_base(&m_buffer_mempool);
+ struct mempool new_kvspace;
+ if (do_compress) {
+ size_t requested_size = force_compress ? total_size_needed : ((total_size_needed * 3) / 2);
+ toku_mempool_construct(&new_kvspace, requested_size);
+ struct dmt_compressor_state oc = { &new_kvspace, this };
+ m_buffer.iterate_ptr< decltype(oc), move_it >(&oc);
+ } else {
+ toku_mempool_construct(&new_kvspace, total_size_needed);
+ size_t old_offset_limit = toku_mempool_get_offset_limit(&m_buffer_mempool);
+ void *new_mempool_base = toku_mempool_malloc(&new_kvspace, old_offset_limit);
+ memcpy(new_mempool_base, old_mempool_base, old_offset_limit);
+ }
+
+ if (maybe_free) {
+ *maybe_free = old_mempool_base;
+ } else {
+ toku_free(old_mempool_base);
+ }
+ m_buffer_mempool = new_kvspace;
+}
+
+// Effect: Allocate a new object of size SIZE in MP. If MP runs out of space, allocate new a new mempool space, and copy all the items
+// from the OMT (which items refer to items in the old mempool) into the new mempool.
+// If MAYBE_FREE is nullptr then free the old mempool's space.
+// Otherwise, store the old mempool's space in maybe_free.
+LEAFENTRY bn_data::mempool_malloc_and_update_dmt(size_t size, void **maybe_free) {
+ void *v = toku_mempool_malloc(&m_buffer_mempool, size);
+ if (v == nullptr) {
+ dmt_compress_kvspace(size, maybe_free, false);
+ v = toku_mempool_malloc(&m_buffer_mempool, size);
+ paranoid_invariant_notnull(v);
+ }
+ return (LEAFENTRY)v;
+}
+
+void bn_data::get_space_for_overwrite(
+ uint32_t idx,
+ const void* keyp UU(),
+ uint32_t keylen UU(),
+ uint32_t old_keylen,
+ uint32_t old_le_size,
+ uint32_t new_size,
+ LEAFENTRY* new_le_space,
+ void **const maybe_free
+ )
+{
+ *maybe_free = nullptr;
+ LEAFENTRY new_le = mempool_malloc_and_update_dmt(new_size, maybe_free);
+ toku_mempool_mfree(&m_buffer_mempool, nullptr, old_le_size);
+ klpair_struct* klp = nullptr;
+ uint32_t klpair_len;
+ int r = m_buffer.fetch(idx, &klpair_len, &klp);
+ invariant_zero(r);
+ paranoid_invariant(klp!=nullptr);
+ // Old key length should be consistent with what is stored in the DMT
+ invariant(keylen_from_klpair_len(klpair_len) == old_keylen);
+
+ size_t new_le_offset = toku_mempool_get_offset_from_pointer_and_base(&this->m_buffer_mempool, new_le);
+ paranoid_invariant(new_le_offset <= UINT32_MAX - new_size); // Not using > 4GB
+ klp->le_offset = new_le_offset;
+
+ paranoid_invariant(new_le == get_le_from_klpair(klp));
+ *new_le_space = new_le;
+}
+
+void bn_data::get_space_for_insert(
+ uint32_t idx,
+ const void* keyp,
+ uint32_t keylen,
+ size_t size,
+ LEAFENTRY* new_le_space,
+ void **const maybe_free
+ )
+{
+ add_key(keylen);
+
+ *maybe_free = nullptr;
+ LEAFENTRY new_le = mempool_malloc_and_update_dmt(size, maybe_free);
+ size_t new_le_offset = toku_mempool_get_offset_from_pointer_and_base(&this->m_buffer_mempool, new_le);
+
+ klpair_dmtwriter kl(keylen, new_le_offset, keyp);
+ m_buffer.insert_at(kl, idx);
+
+ *new_le_space = new_le;
+}
+
+class split_klpairs_extra {
+ bn_data *const m_left_bn;
+ bn_data *const m_right_bn;
+ klpair_dmt_t::builder *const m_left_builder;
+ klpair_dmt_t::builder *const m_right_builder;
+ struct mempool *const m_left_dest_mp;
+ uint32_t m_split_at;
+
+ struct mempool *left_dest_mp(void) const { return m_left_dest_mp; }
+ struct mempool *right_dest_mp(void) const { return &m_right_bn->m_buffer_mempool; }
+
+ void copy_klpair(const uint32_t klpair_len, const klpair_struct &klpair,
+ klpair_dmt_t::builder *const builder,
+ struct mempool *const dest_mp,
+ bn_data *const bn) {
+ LEAFENTRY old_le = m_left_bn->get_le_from_klpair(&klpair);
+ size_t le_size = leafentry_memsize(old_le);
+
+ void *new_le = toku_mempool_malloc(dest_mp, le_size);
+ paranoid_invariant_notnull(new_le);
+ memcpy(new_le, old_le, le_size);
+ size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(dest_mp, new_le);
+ size_t keylen = keylen_from_klpair_len(klpair_len);
+ builder->append(klpair_dmtwriter(keylen, le_offset, klpair.key));
+
+ bn->add_key(keylen);
+ }
+
+ int move_leafentry(const uint32_t klpair_len, const klpair_struct &klpair, const uint32_t idx) {
+ m_left_bn->remove_key(keylen_from_klpair_len(klpair_len));
+
+ if (idx < m_split_at) {
+ copy_klpair(klpair_len, klpair, m_left_builder, left_dest_mp(), m_left_bn);
+ } else {
+ copy_klpair(klpair_len, klpair, m_right_builder, right_dest_mp(), m_right_bn);
+ }
+ return 0;
+ }
+
+ public:
+ split_klpairs_extra(bn_data *const left_bn, bn_data *const right_bn,
+ klpair_dmt_t::builder *const left_builder,
+ klpair_dmt_t::builder *const right_builder,
+ struct mempool *const left_new_mp,
+ uint32_t split_at)
+ : m_left_bn(left_bn),
+ m_right_bn(right_bn),
+ m_left_builder(left_builder),
+ m_right_builder(right_builder),
+ m_left_dest_mp(left_new_mp),
+ m_split_at(split_at) {}
+ static int cb(const uint32_t klpair_len, const klpair_struct &klpair, const uint32_t idx, split_klpairs_extra *const thisp) {
+ return thisp->move_leafentry(klpair_len, klpair, idx);
+ }
+};
+
+void bn_data::split_klpairs(
+ bn_data* right_bd,
+ uint32_t split_at //lower bound inclusive for right_bd
+ )
+{
+ // We use move_leafentries_to during a split, and the split algorithm should never call this
+ // if it's splitting on a boundary, so there must be some leafentries in the range to move.
+ paranoid_invariant(split_at < num_klpairs());
+
+ right_bd->init_zero();
+
+ size_t mpsize = toku_mempool_get_used_size(&m_buffer_mempool); // overkill, but safe
+
+ struct mempool new_left_mp;
+ toku_mempool_construct(&new_left_mp, mpsize);
+
+ struct mempool *right_mp = &right_bd->m_buffer_mempool;
+ toku_mempool_construct(right_mp, mpsize);
+
+ klpair_dmt_t::builder left_dmt_builder;
+ left_dmt_builder.create(split_at, m_disksize_of_keys); // overkill, but safe (builder will realloc at the end)
+
+ klpair_dmt_t::builder right_dmt_builder;
+ right_dmt_builder.create(num_klpairs() - split_at, m_disksize_of_keys); // overkill, but safe (builder will realloc at the end)
+
+ split_klpairs_extra extra(this, right_bd, &left_dmt_builder, &right_dmt_builder, &new_left_mp, split_at);
+
+ int r = m_buffer.iterate<split_klpairs_extra, split_klpairs_extra::cb>(&extra);
+ invariant_zero(r);
+
+ m_buffer.destroy();
+ toku_mempool_destroy(&m_buffer_mempool);
+
+ m_buffer_mempool = new_left_mp;
+
+ left_dmt_builder.build(&m_buffer);
+ right_dmt_builder.build(&right_bd->m_buffer);
+
+ // Potentially shrink memory pool for destination.
+ // We overallocated ("overkill") above
+ struct mempool *const left_mp = &m_buffer_mempool;
+ paranoid_invariant_zero(toku_mempool_get_frag_size(left_mp));
+ toku_mempool_realloc_larger(left_mp, toku_mempool_get_used_size(left_mp));
+ paranoid_invariant_zero(toku_mempool_get_frag_size(right_mp));
+ toku_mempool_realloc_larger(right_mp, toku_mempool_get_used_size(right_mp));
+}
+
+uint64_t bn_data::get_disk_size() {
+ return m_disksize_of_keys +
+ toku_mempool_get_used_size(&m_buffer_mempool);
+}
+
+struct verify_le_in_mempool_state {
+ size_t offset_limit;
+ class bn_data *bd;
+};
+
+static int verify_le_in_mempool (const uint32_t, klpair_struct *klpair, const uint32_t idx UU(), struct verify_le_in_mempool_state * const state) {
+ invariant(klpair->le_offset < state->offset_limit);
+
+ LEAFENTRY le = state->bd->get_le_from_klpair(klpair);
+ uint32_t size = leafentry_memsize(le);
+
+ size_t end_offset = klpair->le_offset+size;
+
+ invariant(end_offset <= state->offset_limit);
+ return 0;
+}
+
+//This is a debug-only (paranoid) verification.
+//Verifies the dmt is valid, and all leafentries are entirely in the mempool's memory.
+void bn_data::verify_mempool(void) {
+ //Verify the dmt itself <- paranoid and slow
+ m_buffer.verify();
+
+ verify_le_in_mempool_state state = { .offset_limit = toku_mempool_get_offset_limit(&m_buffer_mempool), .bd = this };
+ //Verify every leafentry pointed to by the keys in the dmt are fully inside the mempool
+ m_buffer.iterate_ptr< decltype(state), verify_le_in_mempool >(&state);
+}
+
+uint32_t bn_data::num_klpairs(void) const {
+ return m_buffer.size();
+}
+
+void bn_data::destroy(void) {
+ // The buffer may have been freed already, in some cases.
+ m_buffer.destroy();
+ toku_mempool_destroy(&m_buffer_mempool);
+ m_disksize_of_keys = 0;
+}
+
+void bn_data::set_contents_as_clone_of_sorted_array(
+ uint32_t num_les,
+ const void** old_key_ptrs,
+ uint32_t* old_keylens,
+ LEAFENTRY* old_les,
+ size_t *le_sizes,
+ size_t total_key_size,
+ size_t total_le_size
+ )
+{
+ //Enforce "just created" invariant.
+ paranoid_invariant_zero(m_disksize_of_keys);
+ paranoid_invariant_zero(num_klpairs());
+ paranoid_invariant_null(toku_mempool_get_base(&m_buffer_mempool));
+ paranoid_invariant_zero(toku_mempool_get_size(&m_buffer_mempool));
+
+ toku_mempool_construct(&m_buffer_mempool, total_le_size);
+ m_buffer.destroy();
+ m_disksize_of_keys = 0;
+
+ klpair_dmt_t::builder dmt_builder;
+ dmt_builder.create(num_les, total_key_size);
+
+ for (uint32_t idx = 0; idx < num_les; idx++) {
+ void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx]);
+ paranoid_invariant_notnull(new_le);
+ memcpy(new_le, old_les[idx], le_sizes[idx]);
+ size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(&m_buffer_mempool, new_le);
+ dmt_builder.append(klpair_dmtwriter(old_keylens[idx], le_offset, old_key_ptrs[idx]));
+ add_key(old_keylens[idx]);
+ }
+ dmt_builder.build(&this->m_buffer);
+}
+
+LEAFENTRY bn_data::get_le_from_klpair(const klpair_struct *klpair) const {
+ void * ptr = toku_mempool_get_pointer_from_base_and_offset(&this->m_buffer_mempool, klpair->le_offset);
+ LEAFENTRY CAST_FROM_VOIDP(le, ptr);
+ return le;
+}
+
+
+// get info about a single leafentry by index
+int bn_data::fetch_le(uint32_t idx, LEAFENTRY *le) {
+ klpair_struct* klpair = nullptr;
+ int r = m_buffer.fetch(idx, nullptr, &klpair);
+ if (r == 0) {
+ *le = get_le_from_klpair(klpair);
+ }
+ return r;
+}
+
+int bn_data::fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key) {
+ klpair_struct* klpair = nullptr;
+ uint32_t klpair_len;
+ int r = m_buffer.fetch(idx, &klpair_len, &klpair);
+ if (r == 0) {
+ *len = keylen_from_klpair_len(klpair_len);
+ *key = klpair->key;
+ *le = get_le_from_klpair(klpair);
+ }
+ return r;
+}
+
+int bn_data::fetch_klpair_disksize(uint32_t idx, size_t *size) {
+ klpair_struct* klpair = nullptr;
+ uint32_t klpair_len;
+ int r = m_buffer.fetch(idx, &klpair_len, &klpair);
+ if (r == 0) {
+ *size = klpair_disksize(klpair_len, klpair);
+ }
+ return r;
+}
+
+int bn_data::fetch_key_and_len(uint32_t idx, uint32_t *len, void** key) {
+ klpair_struct* klpair = nullptr;
+ uint32_t klpair_len;
+ int r = m_buffer.fetch(idx, &klpair_len, &klpair);
+ if (r == 0) {
+ *len = keylen_from_klpair_len(klpair_len);
+ *key = klpair->key;
+ }
+ return r;
+}
+
+void bn_data::clone(bn_data* orig_bn_data) {
+ toku_mempool_clone(&orig_bn_data->m_buffer_mempool, &m_buffer_mempool);
+ m_buffer.clone(orig_bn_data->m_buffer);
+ this->m_disksize_of_keys = orig_bn_data->m_disksize_of_keys;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/bndata.h b/storage/tokudb/PerconaFT/ft/bndata.h
new file mode 100644
index 00000000..8b349426
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/bndata.h
@@ -0,0 +1,333 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "util/dmt.h"
+#include "util/mempool.h"
+
+#include "ft/leafentry.h"
+#include "ft/serialize/wbuf.h"
+
+// Key/leafentry pair stored in a dmt. The key is inlined, the offset (in leafentry mempool) is stored for the leafentry.
+struct klpair_struct {
+ uint32_t le_offset; //Offset of leafentry (in leafentry mempool)
+ uint8_t key[0]; // key, followed by le
+};
+
+static constexpr uint32_t keylen_from_klpair_len(const uint32_t klpair_len) {
+ return klpair_len - __builtin_offsetof(klpair_struct, key);
+}
+
+
+static_assert(__builtin_offsetof(klpair_struct, key) == 1*sizeof(uint32_t), "klpair alignment issues");
+static_assert(__builtin_offsetof(klpair_struct, key) == sizeof(klpair_struct), "klpair size issues");
+
+// A wrapper for the heaviside function provided to dmt->find*.
+// Needed because the heaviside functions provided to bndata do not know about the internal types.
+// Alternative to this wrapper is to expose accessor functions and rewrite all the external heaviside functions.
+template<typename dmtcmp_t,
+ int (*h)(const DBT &, const dmtcmp_t &)>
+static int klpair_find_wrapper(const uint32_t klpair_len, const klpair_struct &klpair, const dmtcmp_t &extra) {
+ DBT kdbt;
+ kdbt.data = const_cast<void*>(reinterpret_cast<const void*>(klpair.key));
+ kdbt.size = keylen_from_klpair_len(klpair_len);
+ return h(kdbt, extra);
+}
+
+template<typename inner_iterate_extra_t>
+struct klpair_iterate_extra {
+ public:
+ inner_iterate_extra_t *inner;
+ const class bn_data * bd;
+};
+
+// A wrapper for the high-order function provided to dmt->iterate*
+// Needed because the heaviside functions provided to bndata do not know about the internal types.
+// Alternative to this wrapper is to expose accessor functions and rewrite all the external heaviside functions.
+template<typename iterate_extra_t,
+ int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t idx, iterate_extra_t *const)>
+static int klpair_iterate_wrapper(const uint32_t klpair_len, const klpair_struct &klpair, const uint32_t idx, klpair_iterate_extra<iterate_extra_t> *const extra) {
+ const void* key = &klpair.key;
+ LEAFENTRY le = extra->bd->get_le_from_klpair(&klpair);
+ return f(key, keylen_from_klpair_len(klpair_len), le, idx, extra->inner);
+}
+
+
+namespace toku {
+// dmt writer for klpair_struct
+class klpair_dmtwriter {
+ public:
+ // Return the size needed for the klpair_struct that this dmtwriter represents
+ size_t get_size(void) const {
+ return sizeof(klpair_struct) + this->keylen;
+ }
+ // Write the klpair_struct this dmtwriter represents to a destination
+ void write_to(klpair_struct *const dest) const {
+ dest->le_offset = this->le_offset;
+ memcpy(dest->key, this->keyp, this->keylen);
+ }
+
+ klpair_dmtwriter(uint32_t _keylen, uint32_t _le_offset, const void* _keyp)
+ : keylen(_keylen), le_offset(_le_offset), keyp(_keyp) {}
+ klpair_dmtwriter(const uint32_t klpair_len, klpair_struct *const src)
+ : keylen(keylen_from_klpair_len(klpair_len)), le_offset(src->le_offset), keyp(src->key) {}
+ private:
+ const uint32_t keylen;
+ const uint32_t le_offset;
+ const void* keyp;
+};
+}
+
+typedef toku::dmt<klpair_struct, klpair_struct*, toku::klpair_dmtwriter> klpair_dmt_t;
+// This class stores the data associated with a basement node
+class bn_data {
+public:
+ // Initialize an empty bn_data _without_ a dmt backing.
+ // Externally only used for deserialization.
+ void init_zero(void);
+
+ // Initialize an empty bn_data _with_ a dmt
+ void initialize_empty(void);
+
+ // Deserialize a bn_data from rbuf.
+ // This is the entry point for deserialization.
+ void deserialize_from_rbuf(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version);
+
+ // Retrieve the memory footprint of this basement node.
+ // May over or under count: see Percona/PerconaFT#136
+ // Also see dmt's implementation.
+ uint64_t get_memory_size(void);
+
+ // Get the serialized size of this basement node.
+ uint64_t get_disk_size(void);
+
+ // Perform (paranoid) verification that all leafentries are fully contained within the mempool
+ void verify_mempool(void);
+
+ // size() of key dmt
+ uint32_t num_klpairs(void) const;
+
+ // iterate() on key dmt (and associated leafentries)
+ template<typename iterate_extra_t,
+ int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
+ int iterate(iterate_extra_t *const iterate_extra) const {
+ return iterate_on_range<iterate_extra_t, f>(0, num_klpairs(), iterate_extra);
+ }
+
+ // iterate_on_range() on key dmt (and associated leafentries)
+ template<typename iterate_extra_t,
+ int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
+ int iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
+ klpair_iterate_extra<iterate_extra_t> klpair_extra = { iterate_extra, this };
+ return m_buffer.iterate_on_range< klpair_iterate_extra<iterate_extra_t>, klpair_iterate_wrapper<iterate_extra_t, f> >(left, right, &klpair_extra);
+ }
+
+ // find_zero() on key dmt
+ template<typename dmtcmp_t,
+ int (*h)(const DBT &, const dmtcmp_t &)>
+ int find_zero(const dmtcmp_t &extra, LEAFENTRY *const value, void** key, uint32_t* keylen, uint32_t *const idxp) const {
+ klpair_struct* klpair = nullptr;
+ uint32_t klpair_len;
+ int r = m_buffer.find_zero< dmtcmp_t, klpair_find_wrapper<dmtcmp_t, h> >(extra, &klpair_len, &klpair, idxp);
+ if (r == 0) {
+ if (value) {
+ *value = get_le_from_klpair(klpair);
+ }
+ if (key) {
+ paranoid_invariant_notnull(keylen);
+ *key = klpair->key;
+ *keylen = keylen_from_klpair_len(klpair_len);
+ }
+ else {
+ paranoid_invariant_null(keylen);
+ }
+ }
+ return r;
+ }
+
+ // find() on key dmt (and associated leafentries)
+ template<typename dmtcmp_t,
+ int (*h)(const DBT &, const dmtcmp_t &)>
+ int find(const dmtcmp_t &extra, int direction, LEAFENTRY *const value, void** key, uint32_t* keylen, uint32_t *const idxp) const {
+ klpair_struct* klpair = nullptr;
+ uint32_t klpair_len;
+ int r = m_buffer.find< dmtcmp_t, klpair_find_wrapper<dmtcmp_t, h> >(extra, direction, &klpair_len, &klpair, idxp);
+ if (r == 0) {
+ if (value) {
+ *value = get_le_from_klpair(klpair);
+ }
+ if (key) {
+ paranoid_invariant_notnull(keylen);
+ *key = klpair->key;
+ *keylen = keylen_from_klpair_len(klpair_len);
+ }
+ else {
+ paranoid_invariant_null(keylen);
+ }
+ }
+ return r;
+ }
+
+ // Fetch leafentry by index
+ __attribute__((__nonnull__))
+ int fetch_le(uint32_t idx, LEAFENTRY *le);
+ // Fetch (leafentry, key, keylen) by index
+ __attribute__((__nonnull__))
+ int fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key);
+ // Fetch (serialized size of leafentry, key, and keylen) by index
+ __attribute__((__nonnull__))
+ int fetch_klpair_disksize(uint32_t idx, size_t *size);
+ // Fetch (key, keylen) by index
+ __attribute__((__nonnull__))
+ int fetch_key_and_len(uint32_t idx, uint32_t *len, void** key);
+
+ // Move leafentries (and associated key/keylens) from this basement node to dest_bd
+ // Moves indexes [lbi-ube)
+ __attribute__((__nonnull__))
+ void split_klpairs(bn_data* dest_bd, uint32_t first_index_for_dest);
+
+ // Destroy this basement node and free memory.
+ void destroy(void);
+
+ // Uses sorted array as input for this basement node.
+ // Expects this to be a basement node just initialized with initialize_empty()
+ void set_contents_as_clone_of_sorted_array(
+ uint32_t num_les,
+ const void** old_key_ptrs,
+ uint32_t* old_keylens,
+ LEAFENTRY* old_les,
+ size_t *le_sizes,
+ size_t total_key_size,
+ size_t total_le_size
+ );
+
+ // Make this basement node a clone of orig_bn_data.
+ // orig_bn_data still owns all its memory (dmt, mempool)
+ // this basement node will have a new dmt, mempool containing same data.
+ void clone(bn_data* orig_bn_data);
+
+ // Delete klpair index idx with provided keylen and old leafentry with size old_le_size
+ void delete_leafentry (
+ uint32_t idx,
+ uint32_t keylen,
+ uint32_t old_le_size
+ );
+
+ // Allocates space in the mempool to store a new leafentry.
+ // This may require reorganizing the mempool and updating the dmt.
+ __attribute__((__nonnull__))
+ void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_keylen, uint32_t old_size,
+ uint32_t new_size, LEAFENTRY* new_le_space, void **const maybe_free);
+
+ // Allocates space in the mempool to store a new leafentry
+ // and inserts a new key into the dmt
+ // This may require reorganizing the mempool and updating the dmt.
+ __attribute__((__nonnull__))
+ void get_space_for_insert(uint32_t idx, const void* keyp, uint32_t keylen, size_t size, LEAFENTRY* new_le_space, void **const maybe_free);
+
+ // Gets a leafentry given a klpair from this basement node.
+ LEAFENTRY get_le_from_klpair(const klpair_struct *klpair) const;
+
+ void serialize_to_wbuf(struct wbuf *const wb);
+
+ // Prepares this basement node for serialization.
+ // Must be called before serializing this basement node.
+ // Between calling prepare_to_serialize and actually serializing, the basement node may not be modified
+ void prepare_to_serialize(void);
+
+ // Serialize the basement node header to a wbuf
+ // Requires prepare_to_serialize() to have been called first.
+ void serialize_header(struct wbuf *wb) const;
+
+ // Serialize all keys and leafentries to a wbuf
+ // Requires prepare_to_serialize() (and serialize_header()) has been called first.
+ // Currently only supported when all keys are fixed-length.
+ void serialize_rest(struct wbuf *wb) const;
+
+ static const uint32_t HEADER_LENGTH = 0
+ + sizeof(uint32_t) // key_data_size
+ + sizeof(uint32_t) // val_data_size
+ + sizeof(uint32_t) // fixed_key_length
+ + sizeof(uint8_t) // all_keys_same_length
+ + sizeof(uint8_t) // keys_vals_separate
+ + 0;
+private:
+
+ // split_klpairs_extra should be a local class in split_klpairs, but
+ // the dmt template parameter for iterate needs linkage, so it has to be a
+ // separate class, but we want it to be able to call e.g. add_key
+ friend class split_klpairs_extra;
+
+ // Allocates space in the mempool.
+ // If there is insufficient space, the mempool is enlarged and leafentries may be shuffled to reduce fragmentation.
+ // If shuffling happens, the offsets stored in the dmt are updated.
+ LEAFENTRY mempool_malloc_and_update_dmt(size_t size, void **maybe_free);
+
+ // Change the size of the mempool to support what is already in it, plus added_size.
+ // possibly "compress" by shuffling leafentries around to reduce fragmentation to 0.
+ // If fragmentation is already 0 and force_compress is not true, shuffling may be skipped.
+ // If shuffling happens, leafentries will be stored in the mempool in sorted order.
+ void dmt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress);
+
+ // Note that a key was added (for maintaining disk-size of this basement node)
+ void add_key(uint32_t keylen);
+
+ // Note that multiple keys were added (for maintaining disk-size of this basement node)
+ void add_keys(uint32_t n_keys, uint32_t combined_klpair_len);
+
+ // Note that a key was removed (for maintaining disk-size of this basement node)
+ void remove_key(uint32_t keylen);
+
+ klpair_dmt_t m_buffer; // pointers to individual leaf entries
+ struct mempool m_buffer_mempool; // storage for all leaf entries
+
+ friend class bndata_bugfix_test;
+
+ // Get the serialized size of a klpair.
+ // As of Jan 14, 2014, serialized size of a klpair is independent of whether this basement node has fixed-length keys.
+ uint32_t klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const;
+
+ // The disk/memory size of all keys. (Note that the size of memory for the leafentries is maintained by m_buffer_mempool)
+ size_t m_disksize_of_keys;
+
+ // Deserialize this basement node from rbuf
+ // all keys will be first followed by all leafentries (both in sorted order)
+ void initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version,
+ uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length,
+ uint32_t fixed_klpair_length);
+};
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.cc b/storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.cc
new file mode 100644
index 00000000..c109185f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.cc
@@ -0,0 +1,109 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+#include <memory.h>
+#include <toku_pthread.h>
+
+#include "cachetable/background_job_manager.h"
+
+toku_instr_key *bjm_jobs_lock_mutex_key;
+toku_instr_key *bjm_jobs_wait_key;
+
+struct background_job_manager_struct {
+ bool accepting_jobs;
+ uint32_t num_jobs;
+ toku_cond_t jobs_wait;
+ toku_mutex_t jobs_lock;
+};
+
+void bjm_init(BACKGROUND_JOB_MANAGER *pbjm) {
+ BACKGROUND_JOB_MANAGER XCALLOC(bjm);
+ toku_mutex_init(*bjm_jobs_lock_mutex_key, &bjm->jobs_lock, nullptr);
+ toku_cond_init(*bjm_jobs_wait_key, &bjm->jobs_wait, nullptr);
+ bjm->accepting_jobs = true;
+ bjm->num_jobs = 0;
+ *pbjm = bjm;
+}
+
+void bjm_destroy(BACKGROUND_JOB_MANAGER bjm) {
+ assert(bjm->num_jobs == 0);
+ toku_cond_destroy(&bjm->jobs_wait);
+ toku_mutex_destroy(&bjm->jobs_lock);
+ toku_free(bjm);
+}
+
+void bjm_reset(BACKGROUND_JOB_MANAGER bjm) {
+ toku_mutex_lock(&bjm->jobs_lock);
+ assert(bjm->num_jobs == 0);
+ bjm->accepting_jobs = true;
+ toku_mutex_unlock(&bjm->jobs_lock);
+}
+
+int bjm_add_background_job(BACKGROUND_JOB_MANAGER bjm) {
+ int ret_val;
+ toku_mutex_lock(&bjm->jobs_lock);
+ if (bjm->accepting_jobs) {
+ bjm->num_jobs++;
+ ret_val = 0;
+ }
+ else {
+ ret_val = -1;
+ }
+ toku_mutex_unlock(&bjm->jobs_lock);
+ return ret_val;
+}
+void bjm_remove_background_job(BACKGROUND_JOB_MANAGER bjm){
+ toku_mutex_lock(&bjm->jobs_lock);
+ assert(bjm->num_jobs > 0);
+ bjm->num_jobs--;
+ if (bjm->num_jobs == 0 && !bjm->accepting_jobs) {
+ toku_cond_broadcast(&bjm->jobs_wait);
+ }
+ toku_mutex_unlock(&bjm->jobs_lock);
+}
+
+void bjm_wait_for_jobs_to_finish(BACKGROUND_JOB_MANAGER bjm) {
+ toku_mutex_lock(&bjm->jobs_lock);
+ bjm->accepting_jobs = false;
+ while (bjm->num_jobs > 0) {
+ toku_cond_wait(&bjm->jobs_wait, &bjm->jobs_lock);
+ }
+ toku_mutex_unlock(&bjm->jobs_lock);
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.h b/storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.h
new file mode 100644
index 00000000..ba654590
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/background_job_manager.h
@@ -0,0 +1,78 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+//
+// The background job manager keeps track of the existence of
+// background jobs running. We use the background job manager
+// to allow threads to perform background jobs on various pieces
+// of the system (e.g. cachefiles and cloned pairs being written out
+// for checkpoint)
+//
+
+typedef struct background_job_manager_struct *BACKGROUND_JOB_MANAGER;
+
+
+void bjm_init(BACKGROUND_JOB_MANAGER* bjm);
+void bjm_destroy(BACKGROUND_JOB_MANAGER bjm);
+
+//
+// Re-allows a background job manager to accept background jobs
+//
+void bjm_reset(BACKGROUND_JOB_MANAGER bjm);
+
+//
+// add a background job. If return value is 0, then the addition of the job
+// was successful and the user may perform the background job. If return
+// value is non-zero, then adding of the background job failed and the user
+// may not perform the background job.
+//
+int bjm_add_background_job(BACKGROUND_JOB_MANAGER bjm);
+
+//
+// remove a background job
+//
+void bjm_remove_background_job(BACKGROUND_JOB_MANAGER bjm);
+
+//
+// This function waits for all current background jobs to be removed. If the user
+// calls bjm_add_background_job while this function is running, or after this function
+// has completed, bjm_add_background_job returns an error.
+//
+void bjm_wait_for_jobs_to_finish(BACKGROUND_JOB_MANAGER bjm);
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h b/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h
new file mode 100644
index 00000000..05fb771d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h
@@ -0,0 +1,607 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "cachetable/background_job_manager.h"
+#include <portability/toku_random.h>
+#include <util/frwlock.h>
+#include <util/kibbutz.h>
+#include <util/nb_mutex.h>
+#include <util/partitioned_counter.h>
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// This file contains the classes and structs that make up the cachetable.
+// The structs are:
+// - cachefile
+// - ctpair
+// - pair_list
+// - cachefile_list
+// - checkpointer
+// - evictor
+// - cleaner
+//
+// The rest of this comment assumes familiarity with the locks used in these
+// classes/structs and what the locks protect. Nevertheless, here is
+// a list of the locks that we have:
+// - pair_list->list_lock
+// - pair_list->pending_lock_expensive
+// - pair_list->pending_lock_cheap
+// - cachefile_list->lock
+// - PAIR->mutex
+// - PAIR->value_rwlock
+// - PAIR->disk_nb_mutex
+//
+// Here are rules for how the locks interact:
+// - To grab any of the pair_list's locks, or the cachefile_list's lock,
+// the cachetable must be in existence
+// - To grab the PAIR mutex, we must know the PAIR will not dissappear:
+// - the PAIR must be pinned (value_rwlock or disk_nb_mutex is held)
+// - OR, the pair_list's list lock is held
+// - As a result, to get rid of a PAIR from the pair_list, we must hold
+// both the pair_list's list_lock and the PAIR's mutex
+// - To grab PAIR->value_rwlock, we must hold the PAIR's mutex
+// - To grab PAIR->disk_nb_mutex, we must hold the PAIR's mutex
+// and hold PAIR->value_rwlock
+//
+// Now let's talk about ordering. Here is an order from outer to inner (top locks must be grabbed first)
+// - pair_list->pending_lock_expensive
+// - pair_list->list_lock
+// - cachefile_list->lock
+// - PAIR->mutex
+// - pair_list->pending_lock_cheap <-- after grabbing this lock,
+// NO other locks
+// should be grabbed.
+// - when grabbing PAIR->value_rwlock or PAIR->disk_nb_mutex,
+// if the acquisition will not block, then it does not matter if any other locks held,
+// BUT if the acquisition will block, then NO other locks may be held besides
+// PAIR->mutex.
+//
+// HERE ARE TWO EXAMPLES:
+// To pin a PAIR on a client thread, the following must be done:
+// - first grab the list lock and find the PAIR
+// - with the list lock grabbed, grab PAIR->mutex
+// - with PAIR->mutex held:
+// - release list lock
+// - pin PAIR
+// - with PAIR pinned, grab pending_lock_cheap,
+// - copy and clear PAIR->checkpoint_pending,
+// - resolve checkpointing if necessary
+// - return to user.
+// The list lock may be held while pinning the PAIR if
+// the PAIR has no contention. Otherwise, we may have
+// get a deadlock with another thread that has the PAIR pinned,
+// tries to pin some other PAIR, and in doing so, grabs the list lock.
+//
+// To unpin a PAIR on a client thread:
+// - because the PAIR is pinned, we don't need the pair_list's list_lock
+// - so, simply acquire PAIR->mutex
+// - unpin the PAIR
+// - return
+//
+//////////////////////////////////////////////////////////////////////////////
+class evictor;
+class pair_list;
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Maps to a file on disk.
+//
+struct cachefile {
+ // these next two fields are protected by cachetable's list lock
+ // they are managed whenever we add or remove a pair from
+ // the cachetable. As of Riddler, this linked list is only used to
+ // make cachetable_flush_cachefile more efficient
+ PAIR cf_head; // doubly linked list that is NOT circular
+ uint32_t num_pairs; // count on number of pairs in the cachetable belong to this cachefile
+
+ bool for_checkpoint; //True if part of the in-progress checkpoint
+
+ // If set and the cachefile closes, the file will be removed.
+ // Clients must not operate on the cachefile after setting this,
+ // nor attempt to open any cachefile with the same fname (dname)
+ // until this cachefile has been fully closed and unlinked.
+ bool unlink_on_close;
+ // If set then fclose will not be logged in recovery log.
+ bool skip_log_recover_on_close;
+ int fd; /* Bug: If a file is opened read-only, then it is stuck in read-only. If it is opened read-write, then subsequent writers can write to it too. */
+ CACHETABLE cachetable;
+ struct fileid fileid;
+ // the filenum is used as an identifer of the cachefile
+ // for logging and recovery
+ FILENUM filenum;
+ // number used to generate hashes for blocks in the cachefile
+ // used in toku_cachetable_hash
+ // this used to be the filenum.fileid, but now it is separate
+ uint32_t hash_id;
+ char *fname_in_env; /* Used for logging */
+
+ void *userdata;
+ void (*log_fassociate_during_checkpoint)(CACHEFILE cf, void *userdata); // When starting a checkpoint we must log all open files.
+ void (*close_userdata)(CACHEFILE cf, int fd, void *userdata, bool lsnvalid, LSN); // when closing the last reference to a cachefile, first call this function.
+ void (*free_userdata)(CACHEFILE cf, void *userdata); // when closing the last reference to a cachefile, first call this function.
+ void (*begin_checkpoint_userdata)(LSN lsn_of_checkpoint, void *userdata); // before checkpointing cachefiles call this function.
+ void (*checkpoint_userdata)(CACHEFILE cf, int fd, void *userdata); // when checkpointing a cachefile, call this function.
+ void (*end_checkpoint_userdata)(CACHEFILE cf, int fd, void *userdata); // after checkpointing cachefiles call this function.
+ void (*note_pin_by_checkpoint)(CACHEFILE cf, void *userdata); // add a reference to the userdata to prevent it from being removed from memory
+ void (*note_unpin_by_checkpoint)(CACHEFILE cf, void *userdata); // add a reference to the userdata to prevent it from being removed from memory
+ BACKGROUND_JOB_MANAGER bjm;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// The pair represents the data stored in the cachetable.
+//
+struct ctpair {
+ // these fields are essentially constants. They do not change.
+ CACHEFILE cachefile;
+ CACHEKEY key;
+ uint32_t fullhash;
+ CACHETABLE_FLUSH_CALLBACK flush_callback;
+ CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback;
+ CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback;
+ CACHETABLE_CLEANER_CALLBACK cleaner_callback;
+ CACHETABLE_CLONE_CALLBACK clone_callback;
+ CACHETABLE_CHECKPOINT_COMPLETE_CALLBACK checkpoint_complete_callback;
+ void *write_extraargs;
+
+ // access to these fields are protected by disk_nb_mutex
+ void* cloned_value_data; // cloned copy of value_data used for checkpointing
+ long cloned_value_size; // size of cloned_value_data, used for accounting of size_current
+ void* disk_data; // data used to fetch/flush value_data to and from disk.
+
+ // access to these fields are protected by value_rwlock
+ void* value_data; // data used by client threads, FTNODEs and ROLLBACK_LOG_NODEs
+ PAIR_ATTR attr;
+ enum cachetable_dirty dirty;
+
+ // protected by PAIR->mutex
+ uint32_t count; // clock count
+ uint32_t refcount; // if > 0, then this PAIR is referenced by
+ // callers to the cachetable, and therefore cannot
+ // be evicted
+ uint32_t num_waiting_on_refs; // number of threads waiting on refcount to go to zero
+ toku_cond_t refcount_wait; // cond used to wait for refcount to go to zero
+
+ // locks
+ toku::frwlock value_rwlock;
+ struct nb_mutex disk_nb_mutex; // single writer, protects disk_data, is used for writing cloned nodes for checkpoint
+ toku_mutex_t* mutex; // gotten from the pair list
+
+ // Access to checkpoint_pending is protected by two mechanisms,
+ // the value_rwlock and the pair_list's pending locks (expensive and cheap).
+ // checkpoint_pending may be true of false.
+ // Here are the rules for reading/modifying this bit.
+ // - To transition this field from false to true during begin_checkpoint,
+ // we must be holding both of the pair_list's pending locks.
+ // - To transition this field from true to false during end_checkpoint,
+ // we must be holding the value_rwlock.
+ // - For a non-checkpoint thread to read the value, we must hold both the
+ // value_rwlock and one of the pair_list's pending locks
+ // - For the checkpoint thread to read the value, we must
+ // hold the value_rwlock
+ //
+ bool checkpoint_pending; // If this is on, then we have got to resolve checkpointing modifying it.
+
+ // these are variables that are only used to transfer information to background threads
+ // we cache them here to avoid a malloc. In the future, we should investigate if this
+ // is necessary, as having these fields here is not technically necessary
+ long size_evicting_estimate;
+ evictor* ev;
+ pair_list* list;
+
+ // A PAIR is stored in a pair_list (which happens to be PAIR->list).
+ // These variables are protected by the list lock in the pair_list
+ //
+ // clock_next,clock_prev represent a circular doubly-linked list.
+ PAIR clock_next,clock_prev; // In clock.
+ PAIR hash_chain;
+
+ // pending_next,pending_next represent a non-circular doubly-linked list.
+ PAIR pending_next;
+ PAIR pending_prev;
+
+ // cf_next, cf_prev represent a non-circular doubly-linked list.
+ // entries in linked list for PAIRs in a cachefile, these are protected
+ // by the list lock of the PAIR's pair_list. They are used to make
+ // cachetable_flush_cachefile cheaper so that we don't need
+ // to search the entire cachetable to find a particular cachefile's
+ // PAIRs
+ PAIR cf_next;
+ PAIR cf_prev;
+};
+
+//
+// This initializes the fields and members of the pair.
+//
+void pair_init(PAIR p,
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ void *value,
+ PAIR_ATTR attr,
+ enum cachetable_dirty dirty,
+ uint32_t fullhash,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ evictor *ev,
+ pair_list *list);
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// The pair list maintains the set of PAIR's that make up
+// the cachetable.
+//
+class pair_list {
+public:
+ //
+ // the following fields are protected by the list lock
+ //
+ uint32_t m_n_in_table; // number of pairs in the hash table
+ uint32_t m_table_size; // number of buckets in the hash table
+ uint32_t m_num_locks;
+ PAIR *m_table; // hash table
+ toku_mutex_aligned_t *m_mutexes;
+ //
+ // The following fields are the heads of various linked lists.
+ // They also protected by the list lock, but their
+ // usage is not as straightforward. For each of them,
+ // only ONE thread is allowed iterate over them with
+ // a read lock on the list lock. All other threads
+ // that want to modify elements in the lists or iterate over
+ // the lists must hold the write list lock. Here is the
+ // association between what threads may hold a read lock
+ // on the list lock while iterating:
+ // - clock_head -> eviction thread (evictor)
+ // - cleaner_head -> cleaner thread (cleaner)
+ // - pending_head -> checkpoint thread (checkpointer)
+ //
+ PAIR m_clock_head; // of clock . head is the next thing to be up for decrement.
+ PAIR m_cleaner_head; // for cleaner thread. head is the next thing to look at for possible cleaning.
+ PAIR m_checkpoint_head; // for begin checkpoint to iterate over PAIRs and mark as pending_checkpoint
+ PAIR m_pending_head; // list of pairs marked with checkpoint_pending
+
+ // this field is public so we are still POD
+
+ // usage of this lock is described above
+ toku_pthread_rwlock_t m_list_lock;
+ //
+ // these locks are the "pending locks" referenced
+ // in comments about PAIR->checkpoint_pending. There
+ // are two of them, but both serve the same purpose, which
+ // is to protect the transition of a PAIR's checkpoint pending
+ // value from false to true during begin_checkpoint.
+ // We use two locks, because threads that want to read the
+ // checkpoint_pending value may hold a lock for varying periods of time.
+ // Threads running eviction may need to protect checkpoint_pending
+ // while writing a node to disk, which is an expensive operation,
+ // so it uses pending_lock_expensive. Client threads that
+ // want to pin PAIRs will want to protect checkpoint_pending
+ // just long enough to read the value and wipe it out. This is
+ // a cheap operation, and as a result, uses pending_lock_cheap.
+ //
+ // By having two locks, and making begin_checkpoint first
+ // grab pending_lock_expensive and then pending_lock_cheap,
+ // we ensure that threads that want to pin nodes can grab
+ // only pending_lock_cheap, and never block behind threads
+ // holding pending_lock_expensive and writing a node out to disk
+ //
+ toku_pthread_rwlock_t m_pending_lock_expensive;
+ toku_pthread_rwlock_t m_pending_lock_cheap;
+ void init();
+ void destroy();
+ void evict_completely(PAIR pair);
+ void evict_from_cachetable(PAIR pair);
+ void evict_from_cachefile(PAIR pair);
+ void add_to_cachetable_only(PAIR p);
+ void put(PAIR pair);
+ PAIR find_pair(CACHEFILE file, CACHEKEY key, uint32_t hash);
+ void pending_pairs_remove (PAIR p);
+ void verify();
+ void get_state(int *num_entries, int *hash_size);
+ void read_list_lock();
+ void read_list_unlock();
+ void write_list_lock();
+ void write_list_unlock();
+ void read_pending_exp_lock();
+ void read_pending_exp_unlock();
+ void write_pending_exp_lock();
+ void write_pending_exp_unlock();
+ void read_pending_cheap_lock();
+ void read_pending_cheap_unlock();
+ void write_pending_cheap_lock();
+ void write_pending_cheap_unlock();
+ toku_mutex_t* get_mutex_for_pair(uint32_t fullhash);
+ void pair_lock_by_fullhash(uint32_t fullhash);
+ void pair_unlock_by_fullhash(uint32_t fullhash);
+
+private:
+ void pair_remove (PAIR p);
+ void remove_from_hash_chain(PAIR p);
+ void add_to_cf_list (PAIR p);
+ void add_to_clock (PAIR p);
+ void add_to_hash_chain(PAIR p);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Wrapper for the head of our cachefile list.
+//
+class cachefile_list {
+public:
+ void init();
+ void destroy();
+ void read_lock();
+ void read_unlock();
+ void write_lock();
+ void write_unlock();
+ int cachefile_of_iname_in_env(const char *iname_in_env, CACHEFILE *cf);
+ int cachefile_of_filenum(FILENUM filenum, CACHEFILE *cf);
+ void add_cf_unlocked(CACHEFILE newcf);
+ void add_stale_cf(CACHEFILE newcf);
+ void remove_cf(CACHEFILE cf);
+ void remove_stale_cf_unlocked(CACHEFILE cf);
+ FILENUM reserve_filenum();
+ uint32_t get_new_hash_id_unlocked();
+ CACHEFILE find_cachefile_unlocked(struct fileid* fileid);
+ CACHEFILE find_stale_cachefile_unlocked(struct fileid* fileid);
+ void verify_unused_filenum(FILENUM filenum);
+ bool evict_some_stale_pair(evictor* ev);
+ void free_stale_data(evictor* ev);
+ // access to these fields are protected by the lock
+ FILENUM m_next_filenum_to_use;
+ uint32_t m_next_hash_id_to_use;
+ toku_pthread_rwlock_t m_lock; // this field is publoc so we are still POD
+ toku::omt<CACHEFILE> m_active_filenum;
+ toku::omt<CACHEFILE> m_active_fileid;
+ toku::omt<CACHEFILE> m_stale_fileid;
+private:
+ CACHEFILE find_cachefile_in_list_unlocked(CACHEFILE start, struct fileid* fileid);
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// The checkpointer handles starting and finishing checkpoints of the
+// cachetable's data.
+//
+class checkpointer {
+public:
+ int init(pair_list *_pl, TOKULOGGER _logger, evictor *_ev, cachefile_list *files);
+ void destroy();
+ void set_checkpoint_period(uint32_t new_period);
+ uint32_t get_checkpoint_period();
+ int shutdown();
+ bool has_been_shutdown();
+ void begin_checkpoint();
+ void add_background_job();
+ void remove_background_job();
+ void end_checkpoint(void (*testcallback_f)(void*), void* testextra);
+ TOKULOGGER get_logger();
+ // used during begin_checkpoint
+ void increment_num_txns();
+private:
+ uint32_t m_checkpoint_num_txns; // how many transactions are in the checkpoint
+ TOKULOGGER m_logger;
+ LSN m_lsn_of_checkpoint_in_progress;
+ uint32_t m_checkpoint_num_files; // how many cachefiles are in the checkpoint
+ struct minicron m_checkpointer_cron; // the periodic checkpointing thread
+ cachefile_list *m_cf_list;
+ pair_list *m_list;
+ evictor *m_ev;
+ bool m_checkpointer_cron_init;
+ bool m_checkpointer_init;
+
+ // variable used by the checkpoint thread to know
+ // when all work induced by cloning on client threads is done
+ BACKGROUND_JOB_MANAGER m_checkpoint_clones_bjm;
+ // private methods for begin_checkpoint
+ void update_cachefiles();
+ void log_begin_checkpoint();
+ void turn_on_pending_bits();
+ // private methods for end_checkpoint
+ void fill_checkpoint_cfs(CACHEFILE* checkpoint_cfs);
+ void checkpoint_pending_pairs();
+ void checkpoint_userdata(CACHEFILE* checkpoint_cfs);
+ void log_end_checkpoint();
+ void end_checkpoint_userdata(CACHEFILE* checkpoint_cfs);
+ void remove_cachefiles(CACHEFILE* checkpoint_cfs);
+
+ // Unit test struct needs access to private members.
+ friend struct checkpointer_test;
+};
+
+//
+// This is how often we want the eviction thread
+// to run, in seconds.
+//
+const int EVICTION_PERIOD = 1;
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// The evictor handles the removal of pairs from the pair list/cachetable.
+//
+class evictor {
+public:
+ int init(long _size_limit, pair_list* _pl, cachefile_list* _cf_list, KIBBUTZ _kibbutz, uint32_t eviction_period);
+ void destroy();
+ void add_pair_attr(PAIR_ATTR attr);
+ void remove_pair_attr(PAIR_ATTR attr);
+ void change_pair_attr(PAIR_ATTR old_attr, PAIR_ATTR new_attr);
+ void add_cloned_data_size(long size);
+ void remove_cloned_data_size(long size);
+ uint64_t reserve_memory(double fraction, uint64_t upper_bound);
+ void release_reserved_memory(uint64_t reserved_memory);
+ void run_eviction_thread();
+ void do_partial_eviction(PAIR p);
+ void evict_pair(PAIR p, bool checkpoint_pending);
+ void wait_for_cache_pressure_to_subside();
+ void signal_eviction_thread();
+ void signal_eviction_thread_locked();
+ bool should_client_thread_sleep();
+ bool should_client_wake_eviction_thread();
+ // function needed for testing
+ void get_state(long *size_current_ptr, long *size_limit_ptr);
+ void fill_engine_status();
+ void set_enable_partial_eviction(bool enabled);
+ bool get_enable_partial_eviction(void) const;
+private:
+ void add_to_size_current(long size);
+ void remove_from_size_current(long size);
+ void run_eviction();
+ bool run_eviction_on_pair(PAIR p);
+ void try_evict_pair(PAIR p);
+ void decrease_size_evicting(long size_evicting_estimate);
+ bool should_sleeping_clients_wakeup();
+ bool eviction_needed();
+
+ // We have some intentional races with these variables because we're ok with reading something a little bit old.
+ // Provide some hooks for reading variables in an unsafe way so that there are function names we can stick in a valgrind suppression.
+ int64_t unsafe_read_size_current(void) const;
+ int64_t unsafe_read_size_evicting(void) const;
+
+ pair_list* m_pl;
+ cachefile_list* m_cf_list;
+ int64_t m_size_current; // the sum of the sizes of the pairs in the cachetable
+ int64_t m_size_cloned_data; // stores amount of cloned data we have, only used for engine status
+ // changes to these two values are protected
+ // by ev_thread_lock
+ int64_t m_size_reserved; // How much memory is reserved (e.g., by the loader)
+ int64_t m_size_evicting; // the sum of the sizes of the pairs being written
+
+ // these are constants
+ int64_t m_low_size_watermark; // target max size of cachetable that eviction thread aims for
+ int64_t m_low_size_hysteresis; // if cachetable grows to this size, client threads wake up eviction thread upon adding data
+ int64_t m_high_size_watermark; // if cachetable grows to this size, client threads sleep upon adding data
+ int64_t m_high_size_hysteresis; // if > cachetable size, then sleeping client threads may wake up
+
+ bool m_enable_partial_eviction; // true if partial evictions are permitted
+
+ // used to calculate random numbers
+ struct random_data m_random_data;
+ char m_random_statebuf[64];
+
+ // mutex that protects fields listed immedietly below
+ toku_mutex_t m_ev_thread_lock;
+ // the eviction thread
+ toku_pthread_t m_ev_thread;
+ // condition variable that controls the sleeping period
+ // of the eviction thread
+ toku_cond_t m_ev_thread_cond;
+ // number of client threads that are currently sleeping
+ // due to an over-subscribed cachetable
+ uint32_t m_num_sleepers;
+ // states if the eviction thread should run. set to true
+ // in init, set to false during destroy
+ bool m_run_thread;
+ // bool that states if the eviction thread is currently running
+ bool m_ev_thread_is_running;
+ // period which the eviction thread sleeps
+ uint32_t m_period_in_seconds;
+ // condition variable on which client threads wait on when sleeping
+ // due to an over-subscribed cachetable
+ toku_cond_t m_flow_control_cond;
+
+ // variables for engine status
+ PARTITIONED_COUNTER m_size_nonleaf;
+ PARTITIONED_COUNTER m_size_leaf;
+ PARTITIONED_COUNTER m_size_rollback;
+ PARTITIONED_COUNTER m_size_cachepressure;
+ PARTITIONED_COUNTER m_wait_pressure_count;
+ PARTITIONED_COUNTER m_wait_pressure_time;
+ PARTITIONED_COUNTER m_long_wait_pressure_count;
+ PARTITIONED_COUNTER m_long_wait_pressure_time;
+
+ KIBBUTZ m_kibbutz;
+
+ // this variable is ONLY used for testing purposes
+ uint64_t m_num_eviction_thread_runs;
+
+ bool m_ev_thread_init;
+ bool m_evictor_init;
+
+ friend class evictor_test_helpers;
+ friend class evictor_unit_test;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Iterates over the clean head in the pair list, calling the cleaner
+// callback on each node in that list.
+//
+class cleaner {
+public:
+ int init(uint32_t cleaner_iterations, pair_list* _pl, CACHETABLE _ct);
+ void destroy(void);
+ uint32_t get_iterations(void);
+ void set_iterations(uint32_t new_iterations);
+ uint32_t get_period_unlocked(void);
+ void set_period(uint32_t new_period);
+ int run_cleaner(void);
+
+private:
+ pair_list* m_pl;
+ CACHETABLE m_ct;
+ struct minicron m_cleaner_cron; // the periodic cleaner thread
+ uint32_t m_cleaner_iterations; // how many times to run the cleaner per
+ // cleaner period (minicron has a
+ // minimum period of 1s so if you want
+ // more frequent cleaner runs you must
+ // use this)
+ bool m_cleaner_cron_init;
+ bool m_cleaner_init;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// The cachetable is as close to an ENV as we get.
+//
+struct cachetable {
+ pair_list list;
+ cleaner cl;
+ evictor ev;
+ checkpointer cp;
+ cachefile_list cf_list;
+
+ KIBBUTZ client_kibbutz; // pool of worker threads and jobs to do asynchronously for the client.
+ KIBBUTZ ct_kibbutz; // pool of worker threads and jobs to do asynchronously for the cachetable
+ KIBBUTZ checkpointing_kibbutz; // small pool for checkpointing cloned pairs
+
+ char *env_dir;
+};
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc
new file mode 100644
index 00000000..034d5442
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc
@@ -0,0 +1,5018 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <string.h>
+#include <time.h>
+#include <stdarg.h>
+
+#include <portability/memory.h>
+#include <portability/toku_race_tools.h>
+#include <portability/toku_atomic.h>
+#include <portability/toku_pthread.h>
+#include <portability/toku_portability.h>
+#include <portability/toku_stdlib.h>
+#include <portability/toku_time.h>
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/cachetable/cachetable-internal.h"
+#include "ft/cachetable/checkpoint.h"
+#include "ft/logger/log-internal.h"
+#include "util/rwlock.h"
+#include "util/scoped_malloc.h"
+#include "util/status.h"
+#include "util/context.h"
+
+toku_instr_key *cachetable_m_mutex_key;
+toku_instr_key *cachetable_ev_thread_lock_mutex_key;
+
+toku_instr_key *cachetable_m_list_lock_key;
+toku_instr_key *cachetable_m_pending_lock_expensive_key;
+toku_instr_key *cachetable_m_pending_lock_cheap_key;
+toku_instr_key *cachetable_m_lock_key;
+
+toku_instr_key *cachetable_value_key;
+toku_instr_key *cachetable_disk_nb_rwlock_key;
+
+toku_instr_key *cachetable_p_refcount_wait_key;
+toku_instr_key *cachetable_m_flow_control_cond_key;
+toku_instr_key *cachetable_m_ev_thread_cond_key;
+
+toku_instr_key *cachetable_disk_nb_mutex_key;
+toku_instr_key *log_internal_lock_mutex_key;
+toku_instr_key *eviction_thread_key;
+
+///////////////////////////////////////////////////////////////////////////////////
+// Engine status
+//
+// Status is intended for display to humans to help understand system behavior.
+// It does not need to be perfectly thread-safe.
+
+// These should be in the cachetable object, but we make them file-wide so that gdb can get them easily.
+// They were left here after engine status cleanup (#2949, rather than moved into the status struct)
+// so they are still easily available to the debugger and to save lots of typing.
+static uint64_t cachetable_miss;
+static uint64_t cachetable_misstime; // time spent waiting for disk read
+static uint64_t cachetable_prefetches; // how many times has a block been prefetched into the cachetable?
+static uint64_t cachetable_evictions;
+static uint64_t cleaner_executions; // number of times the cleaner thread's loop has executed
+
+
+// Note, toku_cachetable_get_status() is below, after declaration of cachetable.
+
+static void * const zero_value = nullptr;
+static PAIR_ATTR const zero_attr = {
+ .size = 0,
+ .nonleaf_size = 0,
+ .leaf_size = 0,
+ .rollback_size = 0,
+ .cache_pressure_size = 0,
+ .is_valid = true
+};
+
+
+static inline void ctpair_destroy(PAIR p) {
+ p->value_rwlock.deinit();
+ paranoid_invariant(p->refcount == 0);
+ nb_mutex_destroy(&p->disk_nb_mutex);
+ toku_cond_destroy(&p->refcount_wait);
+ toku_free(p);
+}
+
+static inline void pair_lock(PAIR p) {
+ toku_mutex_lock(p->mutex);
+}
+
+static inline void pair_unlock(PAIR p) {
+ toku_mutex_unlock(p->mutex);
+}
+
+// adds a reference to the PAIR
+// on input and output, PAIR mutex is held
+static void pair_add_ref_unlocked(PAIR p) {
+ p->refcount++;
+}
+
+// releases a reference to the PAIR
+// on input and output, PAIR mutex is held
+static void pair_release_ref_unlocked(PAIR p) {
+ paranoid_invariant(p->refcount > 0);
+ p->refcount--;
+ if (p->refcount == 0 && p->num_waiting_on_refs > 0) {
+ toku_cond_broadcast(&p->refcount_wait);
+ }
+}
+
+static void pair_wait_for_ref_release_unlocked(PAIR p) {
+ p->num_waiting_on_refs++;
+ while (p->refcount > 0) {
+ toku_cond_wait(&p->refcount_wait, p->mutex);
+ }
+ p->num_waiting_on_refs--;
+}
+
+bool toku_ctpair_is_write_locked(PAIR pair) {
+ return pair->value_rwlock.writers() == 1;
+}
+
+void
+toku_cachetable_get_status(CACHETABLE ct, CACHETABLE_STATUS statp) {
+ ct_status.init();
+ CT_STATUS_VAL(CT_MISS) = cachetable_miss;
+ CT_STATUS_VAL(CT_MISSTIME) = cachetable_misstime;
+ CT_STATUS_VAL(CT_PREFETCHES) = cachetable_prefetches;
+ CT_STATUS_VAL(CT_EVICTIONS) = cachetable_evictions;
+ CT_STATUS_VAL(CT_CLEANER_EXECUTIONS) = cleaner_executions;
+ CT_STATUS_VAL(CT_CLEANER_PERIOD) = toku_get_cleaner_period_unlocked(ct);
+ CT_STATUS_VAL(CT_CLEANER_ITERATIONS) = toku_get_cleaner_iterations_unlocked(ct);
+ toku_kibbutz_get_status(ct->client_kibbutz,
+ &CT_STATUS_VAL(CT_POOL_CLIENT_NUM_THREADS),
+ &CT_STATUS_VAL(CT_POOL_CLIENT_NUM_THREADS_ACTIVE),
+ &CT_STATUS_VAL(CT_POOL_CLIENT_QUEUE_SIZE),
+ &CT_STATUS_VAL(CT_POOL_CLIENT_MAX_QUEUE_SIZE),
+ &CT_STATUS_VAL(CT_POOL_CLIENT_TOTAL_ITEMS_PROCESSED),
+ &CT_STATUS_VAL(CT_POOL_CLIENT_TOTAL_EXECUTION_TIME));
+ toku_kibbutz_get_status(ct->ct_kibbutz,
+ &CT_STATUS_VAL(CT_POOL_CACHETABLE_NUM_THREADS),
+ &CT_STATUS_VAL(CT_POOL_CACHETABLE_NUM_THREADS_ACTIVE),
+ &CT_STATUS_VAL(CT_POOL_CACHETABLE_QUEUE_SIZE),
+ &CT_STATUS_VAL(CT_POOL_CACHETABLE_MAX_QUEUE_SIZE),
+ &CT_STATUS_VAL(CT_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED),
+ &CT_STATUS_VAL(CT_POOL_CACHETABLE_TOTAL_EXECUTION_TIME));
+ toku_kibbutz_get_status(ct->checkpointing_kibbutz,
+ &CT_STATUS_VAL(CT_POOL_CHECKPOINT_NUM_THREADS),
+ &CT_STATUS_VAL(CT_POOL_CHECKPOINT_NUM_THREADS_ACTIVE),
+ &CT_STATUS_VAL(CT_POOL_CHECKPOINT_QUEUE_SIZE),
+ &CT_STATUS_VAL(CT_POOL_CHECKPOINT_MAX_QUEUE_SIZE),
+ &CT_STATUS_VAL(CT_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED),
+ &CT_STATUS_VAL(CT_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME));
+ ct->ev.fill_engine_status();
+ *statp = ct_status;
+}
+
+// FIXME global with no toku prefix
+void remove_background_job_from_cf(CACHEFILE cf)
+{
+ bjm_remove_background_job(cf->bjm);
+}
+
+// FIXME global with no toku prefix
+void cachefile_kibbutz_enq (CACHEFILE cf, void (*f)(void*), void *extra)
+// The function f must call remove_background_job_from_cf when it completes
+{
+ int r = bjm_add_background_job(cf->bjm);
+ // if client is adding a background job, then it must be done
+ // at a time when the manager is accepting background jobs, otherwise
+ // the client is screwing up
+ assert_zero(r);
+ toku_kibbutz_enq(cf->cachetable->client_kibbutz, f, extra);
+}
+
+static int
+checkpoint_thread (void *checkpointer_v)
+// Effect: If checkpoint_period>0 thn periodically run a checkpoint.
+// If someone changes the checkpoint_period (calling toku_set_checkpoint_period), then the checkpoint will run sooner or later.
+// If someone sets the checkpoint_shutdown boolean , then this thread exits.
+// This thread notices those changes by waiting on a condition variable.
+{
+ CHECKPOINTER CAST_FROM_VOIDP(cp, checkpointer_v);
+ int r = toku_checkpoint(cp, cp->get_logger(), NULL, NULL, NULL, NULL, SCHEDULED_CHECKPOINT);
+ invariant_zero(r);
+ return r;
+}
+
+void toku_set_checkpoint_period (CACHETABLE ct, uint32_t new_period) {
+ ct->cp.set_checkpoint_period(new_period);
+}
+
+uint32_t toku_get_checkpoint_period_unlocked (CACHETABLE ct) {
+ return ct->cp.get_checkpoint_period();
+}
+
+void toku_set_cleaner_period (CACHETABLE ct, uint32_t new_period) {
+ if(force_recovery) {
+ return;
+ }
+ ct->cl.set_period(new_period);
+}
+
+uint32_t toku_get_cleaner_period_unlocked (CACHETABLE ct) {
+ return ct->cl.get_period_unlocked();
+}
+
+void toku_set_cleaner_iterations (CACHETABLE ct, uint32_t new_iterations) {
+ ct->cl.set_iterations(new_iterations);
+}
+
+uint32_t toku_get_cleaner_iterations (CACHETABLE ct) {
+ return ct->cl.get_iterations();
+}
+
+uint32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct) {
+ return ct->cl.get_iterations();
+}
+
+void toku_set_enable_partial_eviction (CACHETABLE ct, bool enabled) {
+ ct->ev.set_enable_partial_eviction(enabled);
+}
+
+bool toku_get_enable_partial_eviction (CACHETABLE ct) {
+ return ct->ev.get_enable_partial_eviction();
+}
+
+// reserve 25% as "unreservable". The loader cannot have it.
+#define unreservable_memory(size) ((size)/4)
+
+int toku_cachetable_create_ex(CACHETABLE *ct_result, long size_limit,
+ unsigned long client_pool_threads,
+ unsigned long cachetable_pool_threads,
+ unsigned long checkpoint_pool_threads,
+ LSN UU(initial_lsn), TOKULOGGER logger) {
+ int result = 0;
+ int r;
+
+ if (size_limit == 0) {
+ size_limit = 128*1024*1024;
+ }
+
+ CACHETABLE XCALLOC(ct);
+ ct->list.init();
+ ct->cf_list.init();
+
+ int num_processors = toku_os_get_number_active_processors();
+ int checkpointing_nworkers = (num_processors/4) ? num_processors/4 : 1;
+ r = toku_kibbutz_create(client_pool_threads ? client_pool_threads : num_processors,
+ &ct->client_kibbutz);
+ if (r != 0) {
+ result = r;
+ goto cleanup;
+ }
+ r = toku_kibbutz_create(cachetable_pool_threads ? cachetable_pool_threads : 2*num_processors,
+ &ct->ct_kibbutz);
+ if (r != 0) {
+ result = r;
+ goto cleanup;
+ }
+ r = toku_kibbutz_create(checkpoint_pool_threads ? checkpoint_pool_threads : checkpointing_nworkers,
+ &ct->checkpointing_kibbutz);
+ if (r != 0) {
+ result = r;
+ goto cleanup;
+ }
+ // must be done after creating ct_kibbutz
+ r = ct->ev.init(size_limit, &ct->list, &ct->cf_list, ct->ct_kibbutz, EVICTION_PERIOD);
+ if (r != 0) {
+ result = r;
+ goto cleanup;
+ }
+ r = ct->cp.init(&ct->list, logger, &ct->ev, &ct->cf_list);
+ if (r != 0) {
+ result = r;
+ goto cleanup;
+ }
+ r = ct->cl.init(1, &ct->list, ct); // by default, start with one iteration
+ if (r != 0) {
+ result = r;
+ goto cleanup;
+ }
+ ct->env_dir = toku_xstrdup(".");
+cleanup:
+ if (result == 0) {
+ *ct_result = ct;
+ } else {
+ toku_cachetable_close(&ct);
+ }
+ return result;
+}
+
+// Returns a pointer to the checkpoint contained within
+// the given cachetable.
+CHECKPOINTER toku_cachetable_get_checkpointer(CACHETABLE ct) {
+ return &ct->cp;
+}
+
+uint64_t toku_cachetable_reserve_memory(CACHETABLE ct, double fraction, uint64_t upper_bound) {
+ uint64_t reserved_memory = ct->ev.reserve_memory(fraction, upper_bound);
+ return reserved_memory;
+}
+
+void toku_cachetable_release_reserved_memory(CACHETABLE ct, uint64_t reserved_memory) {
+ ct->ev.release_reserved_memory(reserved_memory);
+}
+
+void
+toku_cachetable_set_env_dir(CACHETABLE ct, const char *env_dir) {
+ toku_free(ct->env_dir);
+ ct->env_dir = toku_xstrdup(env_dir);
+}
+
+// What cachefile goes with particular iname (iname relative to env)?
+// The transaction that is adding the reference might not have a reference
+// to the ft, therefore the cachefile might be closing.
+// If closing, we want to return that it is not there, but must wait till after
+// the close has finished.
+// Once the close has finished, there must not be a cachefile with that name
+// in the cachetable.
+int toku_cachefile_of_iname_in_env (CACHETABLE ct, const char *iname_in_env, CACHEFILE *cf) {
+ return ct->cf_list.cachefile_of_iname_in_env(iname_in_env, cf);
+}
+
+// What cachefile goes with particular fd?
+// This function can only be called if the ft is still open, so file must
+// still be open
+int toku_cachefile_of_filenum (CACHETABLE ct, FILENUM filenum, CACHEFILE *cf) {
+ return ct->cf_list.cachefile_of_filenum(filenum, cf);
+}
+
+// TEST-ONLY function
+// If something goes wrong, close the fd. After this, the caller shouldn't close the fd, but instead should close the cachefile.
+int toku_cachetable_openfd (CACHEFILE *cfptr, CACHETABLE ct, int fd, const char *fname_in_env) {
+ FILENUM filenum = toku_cachetable_reserve_filenum(ct);
+ bool was_open;
+ return toku_cachetable_openfd_with_filenum(cfptr, ct, fd, fname_in_env, filenum, &was_open);
+}
+
+// Get a unique filenum from the cachetable
+FILENUM
+toku_cachetable_reserve_filenum(CACHETABLE ct) {
+ return ct->cf_list.reserve_filenum();
+}
+
+static void create_new_cachefile(
+ CACHETABLE ct,
+ FILENUM filenum,
+ uint32_t hash_id,
+ int fd,
+ const char *fname_in_env,
+ struct fileid fileid,
+ CACHEFILE *cfptr
+ ) {
+ // File is not open. Make a new cachefile.
+ CACHEFILE newcf = NULL;
+ XCALLOC(newcf);
+ newcf->cachetable = ct;
+ newcf->hash_id = hash_id;
+ newcf->fileid = fileid;
+
+ newcf->filenum = filenum;
+ newcf->fd = fd;
+ newcf->fname_in_env = toku_xstrdup(fname_in_env);
+ bjm_init(&newcf->bjm);
+ *cfptr = newcf;
+}
+
+int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd,
+ const char *fname_in_env,
+ FILENUM filenum, bool* was_open) {
+ int r;
+ CACHEFILE newcf;
+ struct fileid fileid;
+
+ assert(filenum.fileid != FILENUM_NONE.fileid);
+ r = toku_os_get_unique_file_id(fd, &fileid);
+ if (r != 0) {
+ r = get_error_errno();
+ close(fd);
+ return r;
+ }
+ ct->cf_list.write_lock();
+ CACHEFILE existing_cf = ct->cf_list.find_cachefile_unlocked(&fileid);
+ if (existing_cf) {
+ *was_open = true;
+ // Reuse an existing cachefile and close the caller's fd, whose
+ // responsibility has been passed to us.
+ r = close(fd);
+ assert(r == 0);
+ *cfptr = existing_cf;
+ r = 0;
+ goto exit;
+ }
+ *was_open = false;
+ ct->cf_list.verify_unused_filenum(filenum);
+ // now let's try to find it in the stale cachefiles
+ existing_cf = ct->cf_list.find_stale_cachefile_unlocked(&fileid);
+ // found the stale file,
+ if (existing_cf) {
+ // fix up the fields in the cachefile
+ existing_cf->filenum = filenum;
+ existing_cf->fd = fd;
+ existing_cf->fname_in_env = toku_xstrdup(fname_in_env);
+ bjm_init(&existing_cf->bjm);
+
+ // now we need to move all the PAIRs in it back into the cachetable
+ ct->list.write_list_lock();
+ for (PAIR curr_pair = existing_cf->cf_head; curr_pair; curr_pair = curr_pair->cf_next) {
+ pair_lock(curr_pair);
+ ct->list.add_to_cachetable_only(curr_pair);
+ pair_unlock(curr_pair);
+ }
+ ct->list.write_list_unlock();
+ // move the cachefile back to the list of active cachefiles
+ ct->cf_list.remove_stale_cf_unlocked(existing_cf);
+ ct->cf_list.add_cf_unlocked(existing_cf);
+ *cfptr = existing_cf;
+ r = 0;
+ goto exit;
+ }
+
+ create_new_cachefile(
+ ct,
+ filenum,
+ ct->cf_list.get_new_hash_id_unlocked(),
+ fd,
+ fname_in_env,
+ fileid,
+ &newcf
+ );
+
+ ct->cf_list.add_cf_unlocked(newcf);
+
+ *cfptr = newcf;
+ r = 0;
+ exit:
+ ct->cf_list.write_unlock();
+ return r;
+}
+
+static void cachetable_flush_cachefile (CACHETABLE, CACHEFILE cf, bool evict_completely);
+
+//TEST_ONLY_FUNCTION
+int toku_cachetable_openf (CACHEFILE *cfptr, CACHETABLE ct, const char *fname_in_env, int flags, mode_t mode) {
+ char *fname_in_cwd = toku_construct_full_name(2, ct->env_dir, fname_in_env);
+ int fd = open(fname_in_cwd, flags+O_BINARY, mode);
+ int r;
+ if (fd < 0) {
+ r = get_error_errno();
+ } else {
+ r = toku_cachetable_openfd (cfptr, ct, fd, fname_in_env);
+ }
+ toku_free(fname_in_cwd);
+ return r;
+}
+
+char *
+toku_cachefile_fname_in_env (CACHEFILE cf) {
+ if (cf) {
+ return cf->fname_in_env;
+ }
+ return nullptr;
+}
+
+void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env) {
+ cf->fname_in_env = new_fname_in_env;
+}
+
+int
+toku_cachefile_get_fd (CACHEFILE cf) {
+ return cf->fd;
+}
+
+static void cachefile_destroy(CACHEFILE cf) {
+ if (cf->free_userdata) {
+ cf->free_userdata(cf, cf->userdata);
+ }
+ toku_free(cf);
+}
+
+void toku_cachefile_close(CACHEFILE *cfp, bool oplsn_valid, LSN oplsn) {
+ CACHEFILE cf = *cfp;
+ CACHETABLE ct = cf->cachetable;
+
+ bjm_wait_for_jobs_to_finish(cf->bjm);
+
+ // Clients should never attempt to close a cachefile that is being
+ // checkpointed. We notify clients this is happening in the
+ // note_pin_by_checkpoint callback.
+ assert(!cf->for_checkpoint);
+
+ // Flush the cachefile and remove all of its pairs from the cachetable,
+ // but keep the PAIRs linked in the cachefile. We will store the cachefile
+ // away in case it gets opened immedietely
+ //
+ // if we are unlinking on close, then we want to evict completely,
+ // otherwise, we will keep the PAIRs and cachefile around in case
+ // a subsequent open comes soon
+ cachetable_flush_cachefile(ct, cf, cf->unlink_on_close);
+
+ // Call the close userdata callback to notify the client this cachefile
+ // and its underlying file are going to be closed
+ if (cf->close_userdata) {
+ cf->close_userdata(cf, cf->fd, cf->userdata, oplsn_valid, oplsn);
+ }
+ // fsync and close the fd.
+ toku_file_fsync_without_accounting(cf->fd);
+ int r = close(cf->fd);
+ assert(r == 0);
+ cf->fd = -1;
+
+ // destroy the parts of the cachefile
+ // that do not persist across opens/closes
+ bjm_destroy(cf->bjm);
+ cf->bjm = NULL;
+
+ // remove the cf from the list of active cachefiles
+ ct->cf_list.remove_cf(cf);
+ cf->filenum = FILENUM_NONE;
+
+ // Unlink the file if the bit was set
+ if (cf->unlink_on_close) {
+ char *fname_in_cwd = toku_cachetable_get_fname_in_cwd(cf->cachetable, cf->fname_in_env);
+ r = unlink(fname_in_cwd);
+ assert_zero(r);
+ toku_free(fname_in_cwd);
+ }
+ toku_free(cf->fname_in_env);
+ cf->fname_in_env = NULL;
+
+ // we destroy the cf if the unlink bit was set or if no PAIRs exist
+ // if no PAIRs exist, there is no sense in keeping the cachefile around
+ bool destroy_cf = cf->unlink_on_close || (cf->cf_head == NULL);
+ if (destroy_cf) {
+ cachefile_destroy(cf);
+ }
+ else {
+ ct->cf_list.add_stale_cf(cf);
+ }
+}
+
+// This hash function comes from Jenkins: http://burtleburtle.net/bob/c/lookup3.c
+// The idea here is to mix the bits thoroughly so that we don't have to do modulo by a prime number.
+// Instead we can use a bitmask on a table of size power of two.
+// This hash function does yield improved performance on ./db-benchmark-test-tokudb and ./scanscan
+static inline uint32_t rot(uint32_t x, uint32_t k) {
+ return (x<<k) | (x>>(32-k));
+}
+static inline uint32_t final (uint32_t a, uint32_t b, uint32_t c) {
+ c ^= b; c -= rot(b,14);
+ a ^= c; a -= rot(c,11);
+ b ^= a; b -= rot(a,25);
+ c ^= b; c -= rot(b,16);
+ a ^= c; a -= rot(c,4);
+ b ^= a; b -= rot(a,14);
+ c ^= b; c -= rot(b,24);
+ return c;
+}
+
+uint32_t toku_cachetable_hash (CACHEFILE cachefile, BLOCKNUM key)
+// Effect: Return a 32-bit hash key. The hash key shall be suitable for using with bitmasking for a table of size power-of-two.
+{
+ return final(cachefile->hash_id, (uint32_t)(key.b>>32), (uint32_t)key.b);
+}
+
+#define CLOCK_SATURATION 15
+#define CLOCK_INITIAL_COUNT 3
+
+// Requires pair's mutex to be held
+static void pair_touch (PAIR p) {
+ p->count = (p->count < CLOCK_SATURATION) ? p->count+1 : CLOCK_SATURATION;
+}
+
+// Remove a pair from the cachetable, requires write list lock to be held and p->mutex to be held
+// Effects: the pair is removed from the LRU list and from the cachetable's hash table.
+// The size of the objects in the cachetable is adjusted by the size of the pair being
+// removed.
+static void cachetable_remove_pair (pair_list* list, evictor* ev, PAIR p) {
+ list->evict_completely(p);
+ ev->remove_pair_attr(p->attr);
+}
+
+static void cachetable_free_pair(PAIR p) {
+ CACHETABLE_FLUSH_CALLBACK flush_callback = p->flush_callback;
+ CACHEKEY key = p->key;
+ void *value = p->value_data;
+ void* disk_data = p->disk_data;
+ void *write_extraargs = p->write_extraargs;
+ PAIR_ATTR old_attr = p->attr;
+
+ cachetable_evictions++;
+ PAIR_ATTR new_attr = p->attr;
+ // Note that flush_callback is called with write_me false, so the only purpose of this
+ // call is to tell the ft layer to evict the node (keep_me is false).
+ // Also, because we have already removed the PAIR from the cachetable in
+ // cachetable_remove_pair, we cannot pass in p->cachefile and p->cachefile->fd
+ // for the first two parameters, as these may be invalid (#5171), so, we
+ // pass in NULL and -1, dummy values
+ flush_callback(NULL, -1, key, value, &disk_data, write_extraargs, old_attr, &new_attr, false, false, true, false);
+
+ ctpair_destroy(p);
+}
+
+// assumes value_rwlock and disk_nb_mutex held on entry
+// responsibility of this function is to only write a locked PAIR to disk
+// and NOTHING else. We do not manipulate the state of the PAIR
+// of the cachetable here (with the exception of ct->size_current for clones)
+//
+// No pair_list lock should be held, and the PAIR mutex should not be held
+//
+static void cachetable_only_write_locked_data(
+ evictor* ev,
+ PAIR p,
+ bool for_checkpoint,
+ PAIR_ATTR* new_attr,
+ bool is_clone
+ )
+{
+ CACHETABLE_FLUSH_CALLBACK flush_callback = p->flush_callback;
+ CACHEFILE cachefile = p->cachefile;
+ CACHEKEY key = p->key;
+ void *value = is_clone ? p->cloned_value_data : p->value_data;
+ void *disk_data = p->disk_data;
+ void *write_extraargs = p->write_extraargs;
+ PAIR_ATTR old_attr;
+ // we do this for drd. If we are a cloned pair and only
+ // have the disk_nb_mutex, it is a race to access p->attr.
+ // Luckily, old_attr here is only used for some test applications,
+ // so inaccurate non-size fields are ok.
+ if (is_clone) {
+ old_attr = make_pair_attr(p->cloned_value_size);
+ }
+ else {
+ old_attr = p->attr;
+ }
+ bool dowrite = true;
+
+ // write callback
+ flush_callback(
+ cachefile,
+ cachefile->fd,
+ key,
+ value,
+ &disk_data,
+ write_extraargs,
+ old_attr,
+ new_attr,
+ dowrite,
+ is_clone ? false : true, // keep_me (only keep if this is not cloned pointer)
+ for_checkpoint,
+ is_clone //is_clone
+ );
+ p->disk_data = disk_data;
+ if (is_clone) {
+ p->cloned_value_data = NULL;
+ ev->remove_cloned_data_size(p->cloned_value_size);
+ p->cloned_value_size = 0;
+ }
+}
+
+
+//
+// This function writes a PAIR's value out to disk. Currently, it is called
+// by get_and_pin functions that write a PAIR out for checkpoint, by
+// evictor threads that evict dirty PAIRS, and by the checkpoint thread
+// that needs to write out a dirty node for checkpoint.
+//
+// Requires on entry for p->mutex to NOT be held, otherwise
+// calling cachetable_only_write_locked_data will be very expensive
+//
+static void cachetable_write_locked_pair(
+ evictor* ev,
+ PAIR p,
+ bool for_checkpoint
+ )
+{
+ PAIR_ATTR old_attr = p->attr;
+ PAIR_ATTR new_attr = p->attr;
+ // grabbing the disk_nb_mutex here ensures that
+ // after this point, no one is writing out a cloned value
+ // if we grab the disk_nb_mutex inside the if clause,
+ // then we may try to evict a PAIR that is in the process
+ // of having its clone be written out
+ pair_lock(p);
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+ // make sure that assumption about cloned_value_data is true
+ // if we have grabbed the disk_nb_mutex, then that means that
+ // there should be no cloned value data
+ assert(p->cloned_value_data == NULL);
+ if (p->dirty) {
+ cachetable_only_write_locked_data(ev, p, for_checkpoint, &new_attr, false);
+ //
+ // now let's update variables
+ //
+ if (new_attr.is_valid) {
+ p->attr = new_attr;
+ ev->change_pair_attr(old_attr, new_attr);
+ }
+ }
+ // the pair is no longer dirty once written
+ p->dirty = CACHETABLE_CLEAN;
+ pair_lock(p);
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ pair_unlock(p);
+}
+
+// Worker thread function to writes and evicts a pair from memory to its cachefile
+static void cachetable_evicter(void* extra) {
+ PAIR p = (PAIR)extra;
+ pair_list* pl = p->list;
+ CACHEFILE cf = p->cachefile;
+ pl->read_pending_exp_lock();
+ bool for_checkpoint = p->checkpoint_pending;
+ p->checkpoint_pending = false;
+ // per the contract of evictor::evict_pair,
+ // the pair's mutex, p->mutex, must be held on entry
+ pair_lock(p);
+ p->ev->evict_pair(p, for_checkpoint);
+ pl->read_pending_exp_unlock();
+ bjm_remove_background_job(cf->bjm);
+}
+
+static void cachetable_partial_eviction(void* extra) {
+ PAIR p = (PAIR)extra;
+ CACHEFILE cf = p->cachefile;
+ p->ev->do_partial_eviction(p);
+ bjm_remove_background_job(cf->bjm);
+}
+
+void toku_cachetable_swap_pair_values(PAIR old_pair, PAIR new_pair) {
+ void* old_value = old_pair->value_data;
+ void* new_value = new_pair->value_data;
+ old_pair->value_data = new_value;
+ new_pair->value_data = old_value;
+}
+
+void toku_cachetable_maybe_flush_some(CACHETABLE ct) {
+ // TODO: <CER> Maybe move this...
+ ct->ev.signal_eviction_thread();
+}
+
+// Initializes a pair's members.
+//
+void pair_init(PAIR p,
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ void *value,
+ PAIR_ATTR attr,
+ enum cachetable_dirty dirty,
+ uint32_t fullhash,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ evictor *ev,
+ pair_list *list)
+{
+ p->cachefile = cachefile;
+ p->key = key;
+ p->value_data = value;
+ p->cloned_value_data = NULL;
+ p->cloned_value_size = 0;
+ p->disk_data = NULL;
+ p->attr = attr;
+ p->dirty = dirty;
+ p->fullhash = fullhash;
+
+ p->flush_callback = write_callback.flush_callback;
+ p->pe_callback = write_callback.pe_callback;
+ p->pe_est_callback = write_callback.pe_est_callback;
+ p->cleaner_callback = write_callback.cleaner_callback;
+ p->clone_callback = write_callback.clone_callback;
+ p->checkpoint_complete_callback = write_callback.checkpoint_complete_callback;
+ p->write_extraargs = write_callback.write_extraargs;
+
+ p->count = 0; // <CER> Is zero the correct init value?
+ p->refcount = 0;
+ p->num_waiting_on_refs = 0;
+ toku_cond_init(*cachetable_p_refcount_wait_key, &p->refcount_wait, nullptr);
+ p->checkpoint_pending = false;
+
+ p->mutex = list->get_mutex_for_pair(fullhash);
+ assert(p->mutex);
+ p->value_rwlock.init(p->mutex
+#ifdef TOKU_MYSQL_WITH_PFS
+ ,
+ *cachetable_value_key
+#endif
+ );
+ nb_mutex_init(*cachetable_disk_nb_mutex_key,
+ *cachetable_disk_nb_rwlock_key,
+ &p->disk_nb_mutex);
+
+ p->size_evicting_estimate = 0; // <CER> Is zero the correct init value?
+
+ p->ev = ev;
+ p->list = list;
+
+ p->clock_next = p->clock_prev = NULL;
+ p->pending_next = p->pending_prev = NULL;
+ p->cf_next = p->cf_prev = NULL;
+ p->hash_chain = NULL;
+}
+
+// has ct locked on entry
+// This function MUST NOT release and reacquire the cachetable lock
+// Its callers (toku_cachetable_put_with_dep_pairs) depend on this behavior.
+//
+// Requires pair list's write lock to be held on entry.
+// the pair's mutex must be held as wel
+//
+//
+static PAIR cachetable_insert_at(CACHETABLE ct,
+ CACHEFILE cachefile, CACHEKEY key, void *value,
+ uint32_t fullhash,
+ PAIR_ATTR attr,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ enum cachetable_dirty dirty) {
+ PAIR MALLOC(p);
+ assert(p);
+ memset(p, 0, sizeof *p);
+ pair_init(p,
+ cachefile,
+ key,
+ value,
+ attr,
+ dirty,
+ fullhash,
+ write_callback,
+ &ct->ev,
+ &ct->list
+ );
+
+ ct->list.put(p);
+ ct->ev.add_pair_attr(attr);
+ return p;
+}
+
+// on input, the write list lock must be held AND
+// the pair's mutex must be held as wel
+static void cachetable_insert_pair_at(CACHETABLE ct, PAIR p, PAIR_ATTR attr) {
+ ct->list.put(p);
+ ct->ev.add_pair_attr(attr);
+}
+
+
+// has ct locked on entry
+// This function MUST NOT release and reacquire the cachetable lock
+// Its callers (toku_cachetable_put_with_dep_pairs) depend on this behavior.
+//
+// Requires pair list's write lock to be held on entry
+//
+static void cachetable_put_internal(
+ CACHEFILE cachefile,
+ PAIR p,
+ void *value,
+ PAIR_ATTR attr,
+ CACHETABLE_PUT_CALLBACK put_callback
+ )
+{
+ CACHETABLE ct = cachefile->cachetable;
+ //
+ //
+ // TODO: (Zardosht), make code run in debug only
+ //
+ //
+ //PAIR dummy_p = ct->list.find_pair(cachefile, key, fullhash);
+ //invariant_null(dummy_p);
+ cachetable_insert_pair_at(ct, p, attr);
+ invariant_notnull(put_callback);
+ put_callback(p->key, value, p);
+}
+
+// Pair mutex (p->mutex) is may or may not be held on entry,
+// Holding the pair mutex on entry is not important
+// for performance or corrrectness
+// Pair is pinned on entry
+static void
+clone_pair(evictor* ev, PAIR p) {
+ PAIR_ATTR old_attr = p->attr;
+ PAIR_ATTR new_attr;
+ long clone_size = 0;
+
+ // act of cloning should be fast,
+ // not sure if we have to release
+ // and regrab the cachetable lock,
+ // but doing it for now
+ p->clone_callback(
+ p->value_data,
+ &p->cloned_value_data,
+ &clone_size,
+ &new_attr,
+ true,
+ p->write_extraargs
+ );
+
+ // now we need to do the same actions we would do
+ // if the PAIR had been written to disk
+ //
+ // because we hold the value_rwlock,
+ // it doesn't matter whether we clear
+ // the pending bit before the clone
+ // or after the clone
+ p->dirty = CACHETABLE_CLEAN;
+ if (new_attr.is_valid) {
+ p->attr = new_attr;
+ ev->change_pair_attr(old_attr, new_attr);
+ }
+ p->cloned_value_size = clone_size;
+ ev->add_cloned_data_size(p->cloned_value_size);
+}
+
+static void checkpoint_cloned_pair(void* extra) {
+ PAIR p = (PAIR)extra;
+ CACHETABLE ct = p->cachefile->cachetable;
+ PAIR_ATTR new_attr;
+ // note that pending lock is not needed here because
+ // we KNOW we are in the middle of a checkpoint
+ // and that a begin_checkpoint cannot happen
+ cachetable_only_write_locked_data(
+ p->ev,
+ p,
+ true, //for_checkpoint
+ &new_attr,
+ true //is_clone
+ );
+ pair_lock(p);
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ pair_unlock(p);
+ ct->cp.remove_background_job();
+}
+
+static void
+checkpoint_cloned_pair_on_writer_thread(CACHETABLE ct, PAIR p) {
+ toku_kibbutz_enq(ct->checkpointing_kibbutz, checkpoint_cloned_pair, p);
+}
+
+
+//
+// Given a PAIR p with the value_rwlock altready held, do the following:
+// - If the PAIR needs to be written out to disk for checkpoint:
+// - If the PAIR is cloneable, clone the PAIR and place the work
+// of writing the PAIR on a background thread.
+// - If the PAIR is not cloneable, write the PAIR to disk for checkpoint
+// on the current thread
+//
+// On entry, pair's mutex is NOT held
+//
+static void
+write_locked_pair_for_checkpoint(CACHETABLE ct, PAIR p, bool checkpoint_pending)
+{
+ if (checkpoint_pending && p->checkpoint_complete_callback) {
+ p->checkpoint_complete_callback(p->value_data);
+ }
+ if (p->dirty && checkpoint_pending) {
+ if (p->clone_callback) {
+ pair_lock(p);
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+ assert(!p->cloned_value_data);
+ clone_pair(&ct->ev, p);
+ assert(p->cloned_value_data);
+ // place it on the background thread and continue
+ // responsibility of writer thread to release disk_nb_mutex
+ ct->cp.add_background_job();
+ checkpoint_cloned_pair_on_writer_thread(ct, p);
+ }
+ else {
+ // The pair is not cloneable, just write the pair to disk
+ // we already have p->value_rwlock and we just do the write in our own thread.
+ cachetable_write_locked_pair(&ct->ev, p, true); // keeps the PAIR's write lock
+ }
+ }
+}
+
+// On entry and exit: hold the pair's mutex (p->mutex)
+// Method: take write lock
+// maybe write out the node
+// Else release write lock
+//
+static void
+write_pair_for_checkpoint_thread (evictor* ev, PAIR p)
+{
+ // Grab an exclusive lock on the pair.
+ // If we grab an expensive lock, then other threads will return
+ // TRY_AGAIN rather than waiting. In production, the only time
+ // another thread will check if grabbing a lock is expensive is when
+ // we have a clone_callback (FTNODEs), so the act of checkpointing
+ // will be cheap. Also, much of the time we'll just be clearing
+ // pending bits and that's definitely cheap. (see #5427)
+ p->value_rwlock.write_lock(false);
+ if (p->checkpoint_pending && p->checkpoint_complete_callback) {
+ p->checkpoint_complete_callback(p->value_data);
+ }
+ if (p->dirty && p->checkpoint_pending) {
+ if (p->clone_callback) {
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ assert(!p->cloned_value_data);
+ clone_pair(ev, p);
+ assert(p->cloned_value_data);
+ }
+ else {
+ // The pair is not cloneable, just write the pair to disk
+ // we already have p->value_rwlock and we just do the write in our own thread.
+ // this will grab and release disk_nb_mutex
+ pair_unlock(p);
+ cachetable_write_locked_pair(ev, p, true); // keeps the PAIR's write lock
+ pair_lock(p);
+ }
+ p->checkpoint_pending = false;
+
+ // now release value_rwlock, before we write the PAIR out
+ // so that the PAIR is available to client threads
+ p->value_rwlock.write_unlock(); // didn't call cachetable_evict_pair so we have to unlock it ourselves.
+ if (p->clone_callback) {
+ // note that pending lock is not needed here because
+ // we KNOW we are in the middle of a checkpoint
+ // and that a begin_checkpoint cannot happen
+ PAIR_ATTR attr;
+ pair_unlock(p);
+ cachetable_only_write_locked_data(
+ ev,
+ p,
+ true, //for_checkpoint
+ &attr,
+ true //is_clone
+ );
+ pair_lock(p);
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ }
+ }
+ else {
+ //
+ // we may clear the pending bit here because we have
+ // both the cachetable lock and the PAIR lock.
+ // The rule, as mentioned in toku_cachetable_begin_checkpoint,
+ // is that to clear the bit, we must have both the PAIR lock
+ // and the pending lock
+ //
+ p->checkpoint_pending = false;
+ p->value_rwlock.write_unlock();
+ }
+}
+
+//
+// For each PAIR associated with these CACHEFILEs and CACHEKEYs
+// if the checkpoint_pending bit is set and the PAIR is dirty, write the PAIR
+// to disk.
+// We assume the PAIRs passed in have been locked by the client that made calls
+// into the cachetable that eventually make it here.
+//
+static void checkpoint_dependent_pairs(
+ CACHETABLE ct,
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ PAIR* dependent_pairs,
+ bool* checkpoint_pending,
+ enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
+ )
+{
+ for (uint32_t i =0; i < num_dependent_pairs; i++) {
+ PAIR curr_dep_pair = dependent_pairs[i];
+ // we need to update the dirtyness of the dependent pair,
+ // because the client may have dirtied it while holding its lock,
+ // and if the pair is pending a checkpoint, it needs to be written out
+ if (dependent_dirty[i]) curr_dep_pair->dirty = CACHETABLE_DIRTY;
+ if (checkpoint_pending[i]) {
+ write_locked_pair_for_checkpoint(ct, curr_dep_pair, checkpoint_pending[i]);
+ }
+ }
+}
+
+void toku_cachetable_put_with_dep_pairs(
+ CACHEFILE cachefile,
+ CACHETABLE_GET_KEY_AND_FULLHASH get_key_and_fullhash,
+ void *value,
+ PAIR_ATTR attr,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ void *get_key_and_fullhash_extra,
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ PAIR* dependent_pairs,
+ enum cachetable_dirty* dependent_dirty, // array stating dirty/cleanness of dependent pairs
+ CACHEKEY* key,
+ uint32_t* fullhash,
+ CACHETABLE_PUT_CALLBACK put_callback
+ )
+{
+ //
+ // need to get the key and filehash
+ //
+ CACHETABLE ct = cachefile->cachetable;
+ if (ct->ev.should_client_thread_sleep()) {
+ ct->ev.wait_for_cache_pressure_to_subside();
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+
+ PAIR p = NULL;
+ XMALLOC(p);
+ memset(p, 0, sizeof *p);
+
+ ct->list.write_list_lock();
+ get_key_and_fullhash(key, fullhash, get_key_and_fullhash_extra);
+ pair_init(
+ p,
+ cachefile,
+ *key,
+ value,
+ attr,
+ CACHETABLE_DIRTY,
+ *fullhash,
+ write_callback,
+ &ct->ev,
+ &ct->list
+ );
+ pair_lock(p);
+ p->value_rwlock.write_lock(true);
+ cachetable_put_internal(
+ cachefile,
+ p,
+ value,
+ attr,
+ put_callback
+ );
+ pair_unlock(p);
+ bool checkpoint_pending[num_dependent_pairs];
+ ct->list.write_pending_cheap_lock();
+ for (uint32_t i = 0; i < num_dependent_pairs; i++) {
+ checkpoint_pending[i] = dependent_pairs[i]->checkpoint_pending;
+ dependent_pairs[i]->checkpoint_pending = false;
+ }
+ ct->list.write_pending_cheap_unlock();
+ ct->list.write_list_unlock();
+
+ //
+ // now that we have inserted the row, let's checkpoint the
+ // dependent nodes, if they need checkpointing
+ //
+ checkpoint_dependent_pairs(
+ ct,
+ num_dependent_pairs,
+ dependent_pairs,
+ checkpoint_pending,
+ dependent_dirty
+ );
+}
+
+void toku_cachetable_put(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, void*value, PAIR_ATTR attr,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_PUT_CALLBACK put_callback
+ ) {
+ CACHETABLE ct = cachefile->cachetable;
+ if (ct->ev.should_client_thread_sleep()) {
+ ct->ev.wait_for_cache_pressure_to_subside();
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+
+ PAIR p = NULL;
+ XMALLOC(p);
+ memset(p, 0, sizeof *p);
+
+ ct->list.write_list_lock();
+ pair_init(
+ p,
+ cachefile,
+ key,
+ value,
+ attr,
+ CACHETABLE_DIRTY,
+ fullhash,
+ write_callback,
+ &ct->ev,
+ &ct->list
+ );
+ pair_lock(p);
+ p->value_rwlock.write_lock(true);
+ cachetable_put_internal(
+ cachefile,
+ p,
+ value,
+ attr,
+ put_callback
+ );
+ pair_unlock(p);
+ ct->list.write_list_unlock();
+}
+
+static uint64_t get_tnow(void) {
+ struct timeval tv;
+ int r = gettimeofday(&tv, NULL); assert(r == 0);
+ return tv.tv_sec * 1000000ULL + tv.tv_usec;
+}
+
+//
+// cachetable lock and PAIR lock are held on entry
+// On exit, cachetable lock is still held, but PAIR lock
+// is either released.
+//
+// No locks are held on entry (besides the rwlock write lock of the PAIR)
+//
+static void
+do_partial_fetch(
+ CACHETABLE ct,
+ CACHEFILE cachefile,
+ PAIR p,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ void *read_extraargs,
+ bool keep_pair_locked
+ )
+{
+ PAIR_ATTR old_attr = p->attr;
+ PAIR_ATTR new_attr = zero_attr;
+ // As of Dr. No, only clean PAIRs may have pieces missing,
+ // so we do a sanity check here.
+ assert(!p->dirty);
+
+ pair_lock(p);
+ invariant(p->value_rwlock.writers());
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+ int r = pf_callback(p->value_data, p->disk_data, read_extraargs, cachefile->fd, &new_attr);
+ lazy_assert_zero(r);
+ p->attr = new_attr;
+ ct->ev.change_pair_attr(old_attr, new_attr);
+ pair_lock(p);
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ if (!keep_pair_locked) {
+ p->value_rwlock.write_unlock();
+ }
+ pair_unlock(p);
+}
+
+void toku_cachetable_pf_pinned_pair(
+ void* value,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ void* read_extraargs,
+ CACHEFILE cf,
+ CACHEKEY key,
+ uint32_t fullhash
+ )
+{
+ PAIR_ATTR attr;
+ PAIR p = NULL;
+ CACHETABLE ct = cf->cachetable;
+ ct->list.pair_lock_by_fullhash(fullhash);
+ p = ct->list.find_pair(cf, key, fullhash);
+ assert(p != NULL);
+ assert(p->value_data == value);
+ assert(p->value_rwlock.writers());
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+
+ int fd = cf->fd;
+ pf_callback(value, p->disk_data, read_extraargs, fd, &attr);
+
+ pair_lock(p);
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ pair_unlock(p);
+}
+
+int toku_cachetable_get_and_pin (
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ uint32_t fullhash,
+ void**value,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ bool may_modify_value,
+ void* read_extraargs // parameter for fetch_callback, pf_req_callback, and pf_callback
+ )
+{
+ pair_lock_type lock_type = may_modify_value ? PL_WRITE_EXPENSIVE : PL_READ;
+ // We have separate parameters of read_extraargs and write_extraargs because
+ // the lifetime of the two parameters are different. write_extraargs may be used
+ // long after this function call (e.g. after a flush to disk), whereas read_extraargs
+ // will not be used after this function returns. As a result, the caller may allocate
+ // read_extraargs on the stack, whereas write_extraargs must be allocated
+ // on the heap.
+ return toku_cachetable_get_and_pin_with_dep_pairs (
+ cachefile,
+ key,
+ fullhash,
+ value,
+ write_callback,
+ fetch_callback,
+ pf_req_callback,
+ pf_callback,
+ lock_type,
+ read_extraargs,
+ 0, // number of dependent pairs that we may need to checkpoint
+ NULL, // array of dependent pairs
+ NULL // array stating dirty/cleanness of dependent pairs
+ );
+}
+
+// Read a pair from a cachefile into memory using the pair's fetch callback
+// on entry, pair mutex (p->mutex) is NOT held, but pair is pinned
+static void cachetable_fetch_pair(
+ CACHETABLE ct,
+ CACHEFILE cf,
+ PAIR p,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ void* read_extraargs,
+ bool keep_pair_locked
+ )
+{
+ // helgrind
+ CACHEKEY key = p->key;
+ uint32_t fullhash = p->fullhash;
+
+ void *toku_value = NULL;
+ void *disk_data = NULL;
+ PAIR_ATTR attr;
+
+ // FIXME this should be enum cachetable_dirty, right?
+ int dirty = 0;
+
+ pair_lock(p);
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+
+ int r;
+ r = fetch_callback(cf, p, cf->fd, key, fullhash, &toku_value, &disk_data, &attr, &dirty, read_extraargs);
+ if (dirty) {
+ p->dirty = CACHETABLE_DIRTY;
+ }
+ assert(r == 0);
+
+ p->value_data = toku_value;
+ p->disk_data = disk_data;
+ p->attr = attr;
+ ct->ev.add_pair_attr(attr);
+ pair_lock(p);
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ if (!keep_pair_locked) {
+ p->value_rwlock.write_unlock();
+ }
+ pair_unlock(p);
+}
+
+static bool get_checkpoint_pending(PAIR p, pair_list* pl) {
+ bool checkpoint_pending = false;
+ pl->read_pending_cheap_lock();
+ checkpoint_pending = p->checkpoint_pending;
+ p->checkpoint_pending = false;
+ pl->read_pending_cheap_unlock();
+ return checkpoint_pending;
+}
+
+static void checkpoint_pair_and_dependent_pairs(
+ CACHETABLE ct,
+ PAIR p,
+ bool p_is_pending_checkpoint,
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ PAIR* dependent_pairs,
+ bool* dependent_pairs_pending_checkpoint,
+ enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
+ )
+{
+
+ //
+ // A checkpoint must not begin while we are checking dependent pairs or pending bits.
+ // Here is why.
+ //
+ // Now that we have all of the locks on the pairs we
+ // care about, we can take care of the necessary checkpointing.
+ // For each pair, we simply need to write the pair if it is
+ // pending a checkpoint. If no pair is pending a checkpoint,
+ // then all of this work will be done with the cachetable lock held,
+ // so we don't need to worry about a checkpoint beginning
+ // in the middle of any operation below. If some pair
+ // is pending a checkpoint, then the checkpoint thread
+ // will not complete its current checkpoint until it can
+ // successfully grab a lock on the pending pair and
+ // remove it from its list of pairs pending a checkpoint.
+ // This cannot be done until we release the lock
+ // that we have, which is not done in this function.
+ // So, the point is, it is impossible for a checkpoint
+ // to begin while we write any of these locked pairs
+ // for checkpoint, even though writing a pair releases
+ // the cachetable lock.
+ //
+ write_locked_pair_for_checkpoint(ct, p, p_is_pending_checkpoint);
+
+ checkpoint_dependent_pairs(
+ ct,
+ num_dependent_pairs,
+ dependent_pairs,
+ dependent_pairs_pending_checkpoint,
+ dependent_dirty
+ );
+}
+
+static void unpin_pair(PAIR p, bool read_lock_grabbed) {
+ if (read_lock_grabbed) {
+ p->value_rwlock.read_unlock();
+ }
+ else {
+ p->value_rwlock.write_unlock();
+ }
+}
+
+
+// on input, the pair's mutex is held,
+// on output, the pair's mutex is not held.
+// if true, we must try again, and pair is not pinned
+// if false, we succeeded, the pair is pinned
+static bool try_pin_pair(
+ PAIR p,
+ CACHETABLE ct,
+ CACHEFILE cachefile,
+ pair_lock_type lock_type,
+ uint32_t num_dependent_pairs,
+ PAIR* dependent_pairs,
+ enum cachetable_dirty* dependent_dirty,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ void* read_extraargs,
+ bool already_slept
+ )
+{
+ bool dep_checkpoint_pending[num_dependent_pairs];
+ bool try_again = true;
+ bool expensive = (lock_type == PL_WRITE_EXPENSIVE);
+ if (lock_type != PL_READ) {
+ p->value_rwlock.write_lock(expensive);
+ }
+ else {
+ p->value_rwlock.read_lock();
+ }
+ pair_touch(p);
+ pair_unlock(p);
+
+ bool partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
+
+ if (partial_fetch_required) {
+ toku::context pf_ctx(CTX_PARTIAL_FETCH);
+
+ if (ct->ev.should_client_thread_sleep() && !already_slept) {
+ pair_lock(p);
+ unpin_pair(p, (lock_type == PL_READ));
+ pair_unlock(p);
+ try_again = true;
+ goto exit;
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+ //
+ // Just because the PAIR exists does necessarily mean the all the data the caller requires
+ // is in memory. A partial fetch may be required, which is evaluated above
+ // if the variable is true, a partial fetch is required so we must grab the PAIR's write lock
+ // and then call a callback to retrieve what we need
+ //
+ assert(partial_fetch_required);
+ // As of Dr. No, only clean PAIRs may have pieces missing,
+ // so we do a sanity check here.
+ assert(!p->dirty);
+
+ if (lock_type == PL_READ) {
+ pair_lock(p);
+ p->value_rwlock.read_unlock();
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+ }
+ else if (lock_type == PL_WRITE_CHEAP) {
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+ }
+
+ partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
+ if (partial_fetch_required) {
+ do_partial_fetch(ct, cachefile, p, pf_callback, read_extraargs, true);
+ }
+ if (lock_type == PL_READ) {
+ //
+ // TODO: Zardosht, somehow ensure that a partial eviction cannot happen
+ // between these two calls
+ //
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ p->value_rwlock.read_lock();
+ pair_unlock(p);
+ }
+ else if (lock_type == PL_WRITE_CHEAP) {
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ p->value_rwlock.write_lock(false);
+ pair_unlock(p);
+ }
+ // small hack here for #5439,
+ // for queries, pf_req_callback does some work for the caller,
+ // that information may be out of date after a write_unlock
+ // followed by a relock, so we do it again.
+ bool pf_required = pf_req_callback(p->value_data,read_extraargs);
+ assert(!pf_required);
+ }
+
+ if (lock_type != PL_READ) {
+ ct->list.read_pending_cheap_lock();
+ bool p_checkpoint_pending = p->checkpoint_pending;
+ p->checkpoint_pending = false;
+ for (uint32_t i = 0; i < num_dependent_pairs; i++) {
+ dep_checkpoint_pending[i] = dependent_pairs[i]->checkpoint_pending;
+ dependent_pairs[i]->checkpoint_pending = false;
+ }
+ ct->list.read_pending_cheap_unlock();
+ checkpoint_pair_and_dependent_pairs(
+ ct,
+ p,
+ p_checkpoint_pending,
+ num_dependent_pairs,
+ dependent_pairs,
+ dep_checkpoint_pending,
+ dependent_dirty
+ );
+ }
+
+ try_again = false;
+exit:
+ return try_again;
+}
+
+int toku_cachetable_get_and_pin_with_dep_pairs (
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ uint32_t fullhash,
+ void**value,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ pair_lock_type lock_type,
+ void* read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ PAIR* dependent_pairs,
+ enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
+ )
+// See cachetable/cachetable.h
+{
+ CACHETABLE ct = cachefile->cachetable;
+ bool wait = false;
+ bool already_slept = false;
+ bool dep_checkpoint_pending[num_dependent_pairs];
+
+ //
+ // If in the process of pinning the node we add data to the cachetable via a partial fetch
+ // or a full fetch, we may need to first sleep because there is too much data in the
+ // cachetable. In those cases, we set the bool wait to true and goto try_again, so that
+ // we can do our sleep and then restart the function.
+ //
+beginning:
+ if (wait) {
+ // We shouldn't be holding the read list lock while
+ // waiting for the evictor to remove pairs.
+ already_slept = true;
+ ct->ev.wait_for_cache_pressure_to_subside();
+ }
+
+ ct->list.pair_lock_by_fullhash(fullhash);
+ PAIR p = ct->list.find_pair(cachefile, key, fullhash);
+ if (p) {
+ // on entry, holds p->mutex (which is locked via pair_lock_by_fullhash)
+ // on exit, does not hold p->mutex
+ bool try_again = try_pin_pair(
+ p,
+ ct,
+ cachefile,
+ lock_type,
+ num_dependent_pairs,
+ dependent_pairs,
+ dependent_dirty,
+ pf_req_callback,
+ pf_callback,
+ read_extraargs,
+ already_slept
+ );
+ if (try_again) {
+ wait = true;
+ goto beginning;
+ }
+ else {
+ goto got_value;
+ }
+ }
+ else {
+ toku::context fetch_ctx(CTX_FULL_FETCH);
+
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ // we only want to sleep once per call to get_and_pin. If we have already
+ // slept and there is still cache pressure, then we might as
+ // well just complete the call, because the sleep did not help
+ // By sleeping only once per get_and_pin, we prevent starvation and ensure
+ // that we make progress (however slow) on each thread, which allows
+ // assumptions of the form 'x will eventually happen'.
+ // This happens in extreme scenarios.
+ if (ct->ev.should_client_thread_sleep() && !already_slept) {
+ wait = true;
+ goto beginning;
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+ // Since the pair was not found, we need the write list
+ // lock to add it. So, we have to release the read list lock
+ // first.
+ ct->list.write_list_lock();
+ ct->list.pair_lock_by_fullhash(fullhash);
+ p = ct->list.find_pair(cachefile, key, fullhash);
+ if (p != NULL) {
+ ct->list.write_list_unlock();
+ // on entry, holds p->mutex,
+ // on exit, does not hold p->mutex
+ bool try_again = try_pin_pair(
+ p,
+ ct,
+ cachefile,
+ lock_type,
+ num_dependent_pairs,
+ dependent_pairs,
+ dependent_dirty,
+ pf_req_callback,
+ pf_callback,
+ read_extraargs,
+ already_slept
+ );
+ if (try_again) {
+ wait = true;
+ goto beginning;
+ }
+ else {
+ goto got_value;
+ }
+ }
+ assert(p == NULL);
+
+ // Insert a PAIR into the cachetable
+ // NOTE: At this point we still have the write list lock held.
+ p = cachetable_insert_at(
+ ct,
+ cachefile,
+ key,
+ zero_value,
+ fullhash,
+ zero_attr,
+ write_callback,
+ CACHETABLE_CLEAN
+ );
+ invariant_notnull(p);
+
+ // Pin the pair.
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+
+
+ if (lock_type != PL_READ) {
+ ct->list.read_pending_cheap_lock();
+ invariant(!p->checkpoint_pending);
+ for (uint32_t i = 0; i < num_dependent_pairs; i++) {
+ dep_checkpoint_pending[i] = dependent_pairs[i]->checkpoint_pending;
+ dependent_pairs[i]->checkpoint_pending = false;
+ }
+ ct->list.read_pending_cheap_unlock();
+ }
+ // We should release the lock before we perform
+ // these expensive operations.
+ ct->list.write_list_unlock();
+
+ if (lock_type != PL_READ) {
+ checkpoint_dependent_pairs(
+ ct,
+ num_dependent_pairs,
+ dependent_pairs,
+ dep_checkpoint_pending,
+ dependent_dirty
+ );
+ }
+ uint64_t t0 = get_tnow();
+
+ // Retrieve the value of the PAIR from disk.
+ // The pair being fetched will be marked as pending if a checkpoint happens during the
+ // fetch because begin_checkpoint will mark as pending any pair that is locked even if it is clean.
+ cachetable_fetch_pair(ct, cachefile, p, fetch_callback, read_extraargs, true);
+ cachetable_miss++;
+ cachetable_misstime += get_tnow() - t0;
+
+ // If the lock_type requested was a PL_READ, we downgrade to PL_READ,
+ // but if the request was for a PL_WRITE_CHEAP, we don't bother
+ // downgrading, because we would have to possibly resolve the
+ // checkpointing again, and that would just make this function even
+ // messier.
+ //
+ // TODO(yoni): in case of PL_WRITE_CHEAP, write and use
+ // p->value_rwlock.write_change_status_to_not_expensive(); (Also name it better)
+ // to downgrade from an expensive write lock to a cheap one
+ if (lock_type == PL_READ) {
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ p->value_rwlock.read_lock();
+ pair_unlock(p);
+ // small hack here for #5439,
+ // for queries, pf_req_callback does some work for the caller,
+ // that information may be out of date after a write_unlock
+ // followed by a read_lock, so we do it again.
+ bool pf_required = pf_req_callback(p->value_data,read_extraargs);
+ assert(!pf_required);
+ }
+ goto got_value;
+ }
+got_value:
+ *value = p->value_data;
+ return 0;
+}
+
+// Lookup a key in the cachetable. If it is found and it is not being written, then
+// acquire a read lock on the pair, update the LRU list, and return sucess.
+//
+// However, if the page is clean or has checkpoint pending, don't return success.
+// This will minimize the number of dirty nodes.
+// Rationale: maybe_get_and_pin is used when the system has an alternative to modifying a node.
+// In the context of checkpointing, we don't want to gratuituously dirty a page, because it causes an I/O.
+// For example, imagine that we can modify a bit in a dirty parent, or modify a bit in a clean child, then we should modify
+// the dirty parent (which will have to do I/O eventually anyway) rather than incur a full block write to modify one bit.
+// Similarly, if the checkpoint is actually pending, we don't want to block on it.
+int toku_cachetable_maybe_get_and_pin (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, pair_lock_type lock_type, void**value) {
+ CACHETABLE ct = cachefile->cachetable;
+ int r = -1;
+ ct->list.pair_lock_by_fullhash(fullhash);
+ PAIR p = ct->list.find_pair(cachefile, key, fullhash);
+ if (p) {
+ const bool lock_is_expensive = (lock_type == PL_WRITE_EXPENSIVE);
+ bool got_lock = false;
+ switch (lock_type) {
+ case PL_READ:
+ if (p->value_rwlock.try_read_lock()) {
+ got_lock = p->dirty;
+
+ if (!got_lock) {
+ p->value_rwlock.read_unlock();
+ }
+ }
+ break;
+ case PL_WRITE_CHEAP:
+ case PL_WRITE_EXPENSIVE:
+ if (p->value_rwlock.try_write_lock(lock_is_expensive)) {
+ // we got the lock fast, so continue
+ ct->list.read_pending_cheap_lock();
+
+ // if pending a checkpoint, then we don't want to return
+ // the value to the user, because we are responsible for
+ // handling the checkpointing, which we do not want to do,
+ // because it is expensive
+ got_lock = p->dirty && !p->checkpoint_pending;
+
+ ct->list.read_pending_cheap_unlock();
+ if (!got_lock) {
+ p->value_rwlock.write_unlock();
+ }
+ }
+ break;
+ }
+ if (got_lock) {
+ pair_touch(p);
+ *value = p->value_data;
+ r = 0;
+ }
+ }
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ return r;
+}
+
+//Used by flusher threads to possibly pin child on client thread if pinning is cheap
+//Same as toku_cachetable_maybe_get_and_pin except that we don't care if the node is clean or dirty (return the node regardless).
+//All other conditions remain the same.
+int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, pair_lock_type lock_type, void**value) {
+ CACHETABLE ct = cachefile->cachetable;
+ int r = -1;
+ ct->list.pair_lock_by_fullhash(fullhash);
+ PAIR p = ct->list.find_pair(cachefile, key, fullhash);
+ if (p) {
+ const bool lock_is_expensive = (lock_type == PL_WRITE_EXPENSIVE);
+ bool got_lock = false;
+ switch (lock_type) {
+ case PL_READ:
+ if (p->value_rwlock.try_read_lock()) {
+ got_lock = true;
+ } else if (!p->value_rwlock.read_lock_is_expensive()) {
+ p->value_rwlock.write_lock(lock_is_expensive);
+ got_lock = true;
+ }
+ if (got_lock) {
+ pair_touch(p);
+ }
+ pair_unlock(p);
+ break;
+ case PL_WRITE_CHEAP:
+ case PL_WRITE_EXPENSIVE:
+ if (p->value_rwlock.try_write_lock(lock_is_expensive)) {
+ got_lock = true;
+ } else if (!p->value_rwlock.write_lock_is_expensive()) {
+ p->value_rwlock.write_lock(lock_is_expensive);
+ got_lock = true;
+ }
+ if (got_lock) {
+ pair_touch(p);
+ }
+ pair_unlock(p);
+ if (got_lock) {
+ bool checkpoint_pending = get_checkpoint_pending(p, &ct->list);
+ write_locked_pair_for_checkpoint(ct, p, checkpoint_pending);
+ }
+ break;
+ }
+ if (got_lock) {
+ *value = p->value_data;
+ r = 0;
+ }
+ } else {
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ }
+ return r;
+}
+
+int toku_cachetable_get_attr (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, PAIR_ATTR *attr) {
+ CACHETABLE ct = cachefile->cachetable;
+ int r;
+ ct->list.pair_lock_by_fullhash(fullhash);
+ PAIR p = ct->list.find_pair(cachefile, key, fullhash);
+ if (p) {
+ // Assumes pair lock and full hash lock are the same mutex
+ *attr = p->attr;
+ r = 0;
+ } else {
+ r = -1;
+ }
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ return r;
+}
+
+//
+// internal function to unpin a PAIR.
+// As of Clayface, this is may be called in two ways:
+// - with flush false
+// - with flush true
+// The first is for when this is run during run_unlockers in
+// toku_cachetable_get_and_pin_nonblocking, the second is during
+// normal operations. Only during normal operations do we want to possibly
+// induce evictions or sleep.
+//
+static int
+cachetable_unpin_internal(
+ CACHEFILE cachefile,
+ PAIR p,
+ enum cachetable_dirty dirty,
+ PAIR_ATTR attr,
+ bool flush
+ )
+{
+ invariant_notnull(p);
+
+ CACHETABLE ct = cachefile->cachetable;
+ bool added_data_to_cachetable = false;
+
+ // hack for #3969, only exists in case where we run unlockers
+ pair_lock(p);
+ PAIR_ATTR old_attr = p->attr;
+ PAIR_ATTR new_attr = attr;
+ if (dirty) {
+ p->dirty = CACHETABLE_DIRTY;
+ }
+ if (attr.is_valid) {
+ p->attr = attr;
+ }
+ bool read_lock_grabbed = p->value_rwlock.readers() != 0;
+ unpin_pair(p, read_lock_grabbed);
+ pair_unlock(p);
+
+ if (attr.is_valid) {
+ if (new_attr.size > old_attr.size) {
+ added_data_to_cachetable = true;
+ }
+ ct->ev.change_pair_attr(old_attr, new_attr);
+ }
+
+ // see comments above this function to understand this code
+ if (flush && added_data_to_cachetable) {
+ if (ct->ev.should_client_thread_sleep()) {
+ ct->ev.wait_for_cache_pressure_to_subside();
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+ }
+ return 0;
+}
+
+int toku_cachetable_unpin(CACHEFILE cachefile, PAIR p, enum cachetable_dirty dirty, PAIR_ATTR attr) {
+ return cachetable_unpin_internal(cachefile, p, dirty, attr, true);
+}
+int toku_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE cachefile, PAIR p, enum cachetable_dirty dirty, PAIR_ATTR attr) {
+ return cachetable_unpin_internal(cachefile, p, dirty, attr, false);
+}
+
+static void
+run_unlockers (UNLOCKERS unlockers) {
+ while (unlockers) {
+ assert(unlockers->locked);
+ unlockers->locked = false;
+ unlockers->f(unlockers->extra);
+ unlockers=unlockers->next;
+ }
+}
+
+//
+// This function tries to pin the pair without running the unlockers.
+// If it can pin the pair cheaply, it does so, and returns 0.
+// If the pin will be expensive, it runs unlockers,
+// pins the pair, then releases the pin,
+// and then returns TOKUDB_TRY_AGAIN
+//
+// on entry, pair mutex is held,
+// on exit, pair mutex is NOT held
+static int
+maybe_pin_pair(
+ PAIR p,
+ pair_lock_type lock_type,
+ UNLOCKERS unlockers
+ )
+{
+ int retval = 0;
+ bool expensive = (lock_type == PL_WRITE_EXPENSIVE);
+
+ // we can pin the PAIR. In each case, we check to see
+ // if acquiring the pin is expensive. If so, we run the unlockers, set the
+ // retval to TOKUDB_TRY_AGAIN, pin AND release the PAIR.
+ // If not, then we pin the PAIR, keep retval at 0, and do not
+ // run the unlockers, as we intend to return the value to the user
+ if (lock_type == PL_READ) {
+ if (p->value_rwlock.read_lock_is_expensive()) {
+ pair_add_ref_unlocked(p);
+ pair_unlock(p);
+ run_unlockers(unlockers);
+ retval = TOKUDB_TRY_AGAIN;
+ pair_lock(p);
+ pair_release_ref_unlocked(p);
+ }
+ p->value_rwlock.read_lock();
+ }
+ else if (lock_type == PL_WRITE_EXPENSIVE || lock_type == PL_WRITE_CHEAP){
+ if (p->value_rwlock.write_lock_is_expensive()) {
+ pair_add_ref_unlocked(p);
+ pair_unlock(p);
+ run_unlockers(unlockers);
+ // change expensive to false because
+ // we will unpin the pair immedietely
+ // after pinning it
+ expensive = false;
+ retval = TOKUDB_TRY_AGAIN;
+ pair_lock(p);
+ pair_release_ref_unlocked(p);
+ }
+ p->value_rwlock.write_lock(expensive);
+ }
+ else {
+ abort();
+ }
+
+ if (retval == TOKUDB_TRY_AGAIN) {
+ unpin_pair(p, (lock_type == PL_READ));
+ }
+ pair_touch(p);
+ pair_unlock(p);
+ return retval;
+}
+
+int toku_cachetable_get_and_pin_nonblocking(
+ CACHEFILE cf,
+ CACHEKEY key,
+ uint32_t fullhash,
+ void**value,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ pair_lock_type lock_type,
+ void *read_extraargs,
+ UNLOCKERS unlockers
+ )
+// See cachetable/cachetable.h.
+{
+ CACHETABLE ct = cf->cachetable;
+ assert(lock_type == PL_READ ||
+ lock_type == PL_WRITE_CHEAP ||
+ lock_type == PL_WRITE_EXPENSIVE
+ );
+try_again:
+ ct->list.pair_lock_by_fullhash(fullhash);
+ PAIR p = ct->list.find_pair(cf, key, fullhash);
+ if (p == NULL) {
+ toku::context fetch_ctx(CTX_FULL_FETCH);
+
+ // Not found
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ ct->list.write_list_lock();
+ ct->list.pair_lock_by_fullhash(fullhash);
+ p = ct->list.find_pair(cf, key, fullhash);
+ if (p != NULL) {
+ // we just did another search with the write list lock and
+ // found the pair this means that in between our
+ // releasing the read list lock and grabbing the write list lock,
+ // another thread snuck in and inserted the PAIR into
+ // the cachetable. For simplicity, we just return
+ // to the top and restart the function
+ ct->list.write_list_unlock();
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ goto try_again;
+ }
+
+ p = cachetable_insert_at(
+ ct,
+ cf,
+ key,
+ zero_value,
+ fullhash,
+ zero_attr,
+ write_callback,
+ CACHETABLE_CLEAN
+ );
+ assert(p);
+ // grab expensive write lock, because we are about to do a fetch
+ // off disk
+ // No one can access this pair because
+ // we hold the write list lock and we just injected
+ // the pair into the cachetable. Therefore, this lock acquisition
+ // will not block.
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+ run_unlockers(unlockers); // we hold the write list_lock.
+ ct->list.write_list_unlock();
+
+ // at this point, only the pair is pinned,
+ // and no pair mutex held, and
+ // no list lock is held
+ uint64_t t0 = get_tnow();
+ cachetable_fetch_pair(ct, cf, p, fetch_callback, read_extraargs, false);
+ cachetable_miss++;
+ cachetable_misstime += get_tnow() - t0;
+
+ if (ct->ev.should_client_thread_sleep()) {
+ ct->ev.wait_for_cache_pressure_to_subside();
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+
+ return TOKUDB_TRY_AGAIN;
+ }
+ else {
+ int r = maybe_pin_pair(p, lock_type, unlockers);
+ if (r == TOKUDB_TRY_AGAIN) {
+ return TOKUDB_TRY_AGAIN;
+ }
+ assert_zero(r);
+
+ if (lock_type != PL_READ) {
+ bool checkpoint_pending = get_checkpoint_pending(p, &ct->list);
+ write_locked_pair_for_checkpoint(ct, p, checkpoint_pending);
+ }
+
+ // At this point, we have pinned the PAIR
+ // and resolved its checkpointing. The pair's
+ // mutex is not held. The read list lock IS held. Before
+ // returning the PAIR to the user, we must
+ // still check for partial fetch
+ bool partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
+ if (partial_fetch_required) {
+ toku::context fetch_ctx(CTX_PARTIAL_FETCH);
+
+ run_unlockers(unlockers);
+
+ // we are now getting an expensive write lock, because we
+ // are doing a partial fetch. So, if we previously have
+ // either a read lock or a cheap write lock, we need to
+ // release and reacquire the correct lock type
+ if (lock_type == PL_READ) {
+ pair_lock(p);
+ p->value_rwlock.read_unlock();
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+ }
+ else if (lock_type == PL_WRITE_CHEAP) {
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+ }
+
+ // Now wait for the I/O to occur.
+ partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
+ if (partial_fetch_required) {
+ do_partial_fetch(ct, cf, p, pf_callback, read_extraargs, false);
+ }
+ else {
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ pair_unlock(p);
+ }
+
+ if (ct->ev.should_client_thread_sleep()) {
+ ct->ev.wait_for_cache_pressure_to_subside();
+ }
+ if (ct->ev.should_client_wake_eviction_thread()) {
+ ct->ev.signal_eviction_thread();
+ }
+
+ return TOKUDB_TRY_AGAIN;
+ }
+ else {
+ *value = p->value_data;
+ return 0;
+ }
+ }
+ // We should not get here. Above code should hit a return in all cases.
+ abort();
+}
+
+struct cachefile_prefetch_args {
+ PAIR p;
+ CACHETABLE_FETCH_CALLBACK fetch_callback;
+ void* read_extraargs;
+};
+
+struct cachefile_partial_prefetch_args {
+ PAIR p;
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback;
+ void *read_extraargs;
+};
+
+// Worker thread function to read a pair from a cachefile to memory
+static void cachetable_reader(void* extra) {
+ struct cachefile_prefetch_args* cpargs = (struct cachefile_prefetch_args*)extra;
+ CACHEFILE cf = cpargs->p->cachefile;
+ CACHETABLE ct = cf->cachetable;
+ cachetable_fetch_pair(
+ ct,
+ cpargs->p->cachefile,
+ cpargs->p,
+ cpargs->fetch_callback,
+ cpargs->read_extraargs,
+ false
+ );
+ bjm_remove_background_job(cf->bjm);
+ toku_free(cpargs);
+}
+
+static void cachetable_partial_reader(void* extra) {
+ struct cachefile_partial_prefetch_args *cpargs = (struct cachefile_partial_prefetch_args*)extra;
+ CACHEFILE cf = cpargs->p->cachefile;
+ CACHETABLE ct = cf->cachetable;
+ do_partial_fetch(ct, cpargs->p->cachefile, cpargs->p, cpargs->pf_callback, cpargs->read_extraargs, false);
+ bjm_remove_background_job(cf->bjm);
+ toku_free(cpargs);
+}
+
+int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, uint32_t fullhash,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ void *read_extraargs,
+ bool *doing_prefetch)
+// Effect: See the documentation for this function in cachetable/cachetable.h
+{
+ int r = 0;
+ PAIR p = NULL;
+ if (doing_prefetch) {
+ *doing_prefetch = false;
+ }
+ CACHETABLE ct = cf->cachetable;
+ // if cachetable has too much data, don't bother prefetching
+ if (ct->ev.should_client_thread_sleep()) {
+ goto exit;
+ }
+ ct->list.pair_lock_by_fullhash(fullhash);
+ // lookup
+ p = ct->list.find_pair(cf, key, fullhash);
+ // if not found then create a pair and fetch it
+ if (p == NULL) {
+ cachetable_prefetches++;
+ ct->list.pair_unlock_by_fullhash(fullhash);
+ ct->list.write_list_lock();
+ ct->list.pair_lock_by_fullhash(fullhash);
+ p = ct->list.find_pair(cf, key, fullhash);
+ if (p != NULL) {
+ ct->list.write_list_unlock();
+ goto found_pair;
+ }
+
+ r = bjm_add_background_job(cf->bjm);
+ assert_zero(r);
+ p = cachetable_insert_at(
+ ct,
+ cf,
+ key,
+ zero_value,
+ fullhash,
+ zero_attr,
+ write_callback,
+ CACHETABLE_CLEAN
+ );
+ assert(p);
+ p->value_rwlock.write_lock(true);
+ pair_unlock(p);
+ ct->list.write_list_unlock();
+
+ struct cachefile_prefetch_args *MALLOC(cpargs);
+ cpargs->p = p;
+ cpargs->fetch_callback = fetch_callback;
+ cpargs->read_extraargs = read_extraargs;
+ toku_kibbutz_enq(ct->ct_kibbutz, cachetable_reader, cpargs);
+ if (doing_prefetch) {
+ *doing_prefetch = true;
+ }
+ goto exit;
+ }
+
+found_pair:
+ // at this point, p is found, pair's mutex is grabbed, and
+ // no list lock is held
+ // TODO(leif): should this also just go ahead and wait if all there
+ // are to wait for are readers?
+ if (p->value_rwlock.try_write_lock(true)) {
+ // nobody else is using the node, so we should go ahead and prefetch
+ pair_touch(p);
+ pair_unlock(p);
+ bool partial_fetch_required = pf_req_callback(p->value_data, read_extraargs);
+
+ if (partial_fetch_required) {
+ r = bjm_add_background_job(cf->bjm);
+ assert_zero(r);
+ struct cachefile_partial_prefetch_args *MALLOC(cpargs);
+ cpargs->p = p;
+ cpargs->pf_callback = pf_callback;
+ cpargs->read_extraargs = read_extraargs;
+ toku_kibbutz_enq(ct->ct_kibbutz, cachetable_partial_reader, cpargs);
+ if (doing_prefetch) {
+ *doing_prefetch = true;
+ }
+ }
+ else {
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ pair_unlock(p);
+ }
+ }
+ else {
+ // Couldn't get the write lock cheaply
+ pair_unlock(p);
+ }
+exit:
+ return 0;
+}
+
+void toku_cachefile_verify (CACHEFILE cf) {
+ toku_cachetable_verify(cf->cachetable);
+}
+
+void toku_cachetable_verify (CACHETABLE ct) {
+ ct->list.verify();
+}
+
+
+
+struct pair_flush_for_close{
+ PAIR p;
+ BACKGROUND_JOB_MANAGER bjm;
+};
+
+static void cachetable_flush_pair_for_close(void* extra) {
+ struct pair_flush_for_close *CAST_FROM_VOIDP(args, extra);
+ PAIR p = args->p;
+ CACHEFILE cf = p->cachefile;
+ CACHETABLE ct = cf->cachetable;
+ PAIR_ATTR attr;
+ cachetable_only_write_locked_data(
+ &ct->ev,
+ p,
+ false, // not for a checkpoint, as we assert above
+ &attr,
+ false // not a clone
+ );
+ p->dirty = CACHETABLE_CLEAN;
+ bjm_remove_background_job(args->bjm);
+ toku_free(args);
+}
+
+
+static void flush_pair_for_close_on_background_thread(
+ PAIR p,
+ BACKGROUND_JOB_MANAGER bjm,
+ CACHETABLE ct
+ )
+{
+ pair_lock(p);
+ assert(p->value_rwlock.users() == 0);
+ assert(nb_mutex_users(&p->disk_nb_mutex) == 0);
+ assert(!p->cloned_value_data);
+ if (p->dirty == CACHETABLE_DIRTY) {
+ int r = bjm_add_background_job(bjm);
+ assert_zero(r);
+ struct pair_flush_for_close *XMALLOC(args);
+ args->p = p;
+ args->bjm = bjm;
+ toku_kibbutz_enq(ct->ct_kibbutz, cachetable_flush_pair_for_close, args);
+ }
+ pair_unlock(p);
+}
+
+static void remove_pair_for_close(PAIR p, CACHETABLE ct, bool completely) {
+ pair_lock(p);
+ assert(p->value_rwlock.users() == 0);
+ assert(nb_mutex_users(&p->disk_nb_mutex) == 0);
+ assert(!p->cloned_value_data);
+ assert(p->dirty == CACHETABLE_CLEAN);
+ assert(p->refcount == 0);
+ if (completely) {
+ cachetable_remove_pair(&ct->list, &ct->ev, p);
+ pair_unlock(p);
+ // TODO: Eventually, we should not hold the write list lock during free
+ cachetable_free_pair(p);
+ }
+ else {
+ // if we are not evicting completely,
+ // we only want to remove the PAIR from the cachetable,
+ // that is, remove from the hashtable and various linked
+ // list, but we will keep the PAIRS and the linked list
+ // in the cachefile intact, as they will be cached away
+ // in case an open comes soon.
+ ct->list.evict_from_cachetable(p);
+ pair_unlock(p);
+ }
+}
+
+// helper function for cachetable_flush_cachefile, which happens on a close
+// writes out the dirty pairs on background threads and returns when
+// the writing is done
+static void write_dirty_pairs_for_close(CACHETABLE ct, CACHEFILE cf) {
+ BACKGROUND_JOB_MANAGER bjm = NULL;
+ bjm_init(&bjm);
+ ct->list.write_list_lock(); // TODO: (Zardosht), verify that this lock is unnecessary to take here
+ PAIR p = NULL;
+ // write out dirty PAIRs
+ uint32_t i;
+ if (cf) {
+ for (i = 0, p = cf->cf_head;
+ i < cf->num_pairs;
+ i++, p = p->cf_next)
+ {
+ flush_pair_for_close_on_background_thread(p, bjm, ct);
+ }
+ }
+ else {
+ for (i = 0, p = ct->list.m_checkpoint_head;
+ i < ct->list.m_n_in_table;
+ i++, p = p->clock_next)
+ {
+ flush_pair_for_close_on_background_thread(p, bjm, ct);
+ }
+ }
+ ct->list.write_list_unlock();
+ bjm_wait_for_jobs_to_finish(bjm);
+ bjm_destroy(bjm);
+}
+
+static void remove_all_pairs_for_close(CACHETABLE ct, CACHEFILE cf, bool evict_completely) {
+ ct->list.write_list_lock();
+ if (cf) {
+ if (evict_completely) {
+ // if we are evicting completely, then the PAIRs will
+ // be removed from the linked list managed by the
+ // cachefile, so this while loop works
+ while (cf->num_pairs > 0) {
+ PAIR p = cf->cf_head;
+ remove_pair_for_close(p, ct, evict_completely);
+ }
+ }
+ else {
+ // on the other hand, if we are not evicting completely,
+ // then the cachefile's linked list stays intact, and we must
+ // iterate like this.
+ for (PAIR p = cf->cf_head; p; p = p->cf_next) {
+ remove_pair_for_close(p, ct, evict_completely);
+ }
+ }
+ }
+ else {
+ while (ct->list.m_n_in_table > 0) {
+ PAIR p = ct->list.m_checkpoint_head;
+ // if there is no cachefile, then we better
+ // be evicting completely because we have no
+ // cachefile to save the PAIRs to. At least,
+ // we have no guarantees that the cachefile
+ // will remain good
+ invariant(evict_completely);
+ remove_pair_for_close(p, ct, true);
+ }
+ }
+ ct->list.write_list_unlock();
+}
+
+static void verify_cachefile_flushed(CACHETABLE ct UU(), CACHEFILE cf UU()) {
+#ifdef TOKU_DEBUG_PARANOID
+ // assert here that cachefile is flushed by checking
+ // pair_list and finding no pairs belonging to this cachefile
+ // Make a list of pairs that belong to this cachefile.
+ if (cf) {
+ ct->list.write_list_lock();
+ // assert here that cachefile is flushed by checking
+ // pair_list and finding no pairs belonging to this cachefile
+ // Make a list of pairs that belong to this cachefile.
+ uint32_t i;
+ PAIR p = NULL;
+ for (i = 0, p = ct->list.m_checkpoint_head;
+ i < ct->list.m_n_in_table;
+ i++, p = p->clock_next)
+ {
+ assert(p->cachefile != cf);
+ }
+ ct->list.write_list_unlock();
+ }
+#endif
+}
+
+// Flush (write to disk) all of the pairs that belong to a cachefile (or all pairs if
+// the cachefile is NULL.
+// Must be holding cachetable lock on entry.
+//
+// This function assumes that no client thread is accessing or
+// trying to access the cachefile while this function is executing.
+// This implies no client thread will be trying to lock any nodes
+// belonging to the cachefile.
+//
+// This function also assumes that the cachefile is not in the process
+// of being used by a checkpoint. If a checkpoint is currently happening,
+// it does NOT include this cachefile.
+//
+static void cachetable_flush_cachefile(CACHETABLE ct, CACHEFILE cf, bool evict_completely) {
+ //
+ // Because work on a kibbutz is always done by the client thread,
+ // and this function assumes that no client thread is doing any work
+ // on the cachefile, we assume that no client thread will be adding jobs
+ // to this cachefile's kibbutz.
+ //
+ // The caller of this function must ensure that there are
+ // no jobs added to the kibbutz. This implies that the only work other
+ // threads may be doing is work by the writer threads.
+ //
+ // first write out dirty PAIRs
+ write_dirty_pairs_for_close(ct, cf);
+
+ // now that everything is clean, get rid of everything
+ remove_all_pairs_for_close(ct, cf, evict_completely);
+
+ verify_cachefile_flushed(ct, cf);
+}
+
+/* Requires that no locks be held that are used by the checkpoint logic */
+void
+toku_cachetable_minicron_shutdown(CACHETABLE ct) {
+ int r = ct->cp.shutdown();
+ assert(r==0);
+ ct->cl.destroy();
+}
+
+void toku_cachetable_prepare_close(CACHETABLE ct UU()) {
+ extern bool toku_serialize_in_parallel;
+ toku_unsafe_set(&toku_serialize_in_parallel, true);
+}
+
+/* Requires that it all be flushed. */
+void toku_cachetable_close (CACHETABLE *ctp) {
+ CACHETABLE ct = *ctp;
+ ct->cp.destroy();
+ ct->cl.destroy();
+ ct->cf_list.free_stale_data(&ct->ev);
+ cachetable_flush_cachefile(ct, NULL, true);
+ ct->ev.destroy();
+ ct->list.destroy();
+ ct->cf_list.destroy();
+
+ if (ct->client_kibbutz)
+ toku_kibbutz_destroy(ct->client_kibbutz);
+ if (ct->ct_kibbutz)
+ toku_kibbutz_destroy(ct->ct_kibbutz);
+ if (ct->checkpointing_kibbutz)
+ toku_kibbutz_destroy(ct->checkpointing_kibbutz);
+ toku_free(ct->env_dir);
+ toku_free(ct);
+ *ctp = 0;
+}
+
+static PAIR test_get_pair(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, bool have_ct_lock) {
+ CACHETABLE ct = cachefile->cachetable;
+
+ if (!have_ct_lock) {
+ ct->list.read_list_lock();
+ }
+
+ PAIR p = ct->list.find_pair(cachefile, key, fullhash);
+ assert(p != NULL);
+ if (!have_ct_lock) {
+ ct->list.read_list_unlock();
+ }
+ return p;
+}
+
+//test-only wrapper
+int toku_test_cachetable_unpin(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr) {
+ // By default we don't have the lock
+ PAIR p = test_get_pair(cachefile, key, fullhash, false);
+ return toku_cachetable_unpin(cachefile, p, dirty, attr); // assume read lock is not grabbed, and that it is a write lock
+}
+
+//test-only wrapper
+int toku_test_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr) {
+ // We hold the cachetable mutex.
+ PAIR p = test_get_pair(cachefile, key, fullhash, true);
+ return toku_cachetable_unpin_ct_prelocked_no_flush(cachefile, p, dirty, attr);
+}
+
+//test-only wrapper
+int toku_test_cachetable_unpin_and_remove (
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ CACHETABLE_REMOVE_KEY remove_key,
+ void* remove_key_extra)
+{
+ uint32_t fullhash = toku_cachetable_hash(cachefile, key);
+ PAIR p = test_get_pair(cachefile, key, fullhash, false);
+ return toku_cachetable_unpin_and_remove(cachefile, p, remove_key, remove_key_extra);
+}
+
+int toku_cachetable_unpin_and_remove (
+ CACHEFILE cachefile,
+ PAIR p,
+ CACHETABLE_REMOVE_KEY remove_key,
+ void* remove_key_extra
+ )
+{
+ invariant_notnull(p);
+ int r = ENOENT;
+ CACHETABLE ct = cachefile->cachetable;
+
+ p->dirty = CACHETABLE_CLEAN; // clear the dirty bit. We're just supposed to remove it.
+ // grab disk_nb_mutex to ensure any background thread writing
+ // out a cloned value completes
+ pair_lock(p);
+ assert(p->value_rwlock.writers());
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+ assert(p->cloned_value_data == NULL);
+
+ //
+ // take care of key removal
+ //
+ ct->list.write_list_lock();
+ ct->list.read_pending_cheap_lock();
+ bool for_checkpoint = p->checkpoint_pending;
+ // now let's wipe out the pending bit, because we are
+ // removing the PAIR
+ p->checkpoint_pending = false;
+
+ // For the PAIR to not be picked by the
+ // cleaner thread, we mark the cachepressure_size to be 0
+ // (This is redundant since we have the write_list_lock)
+ // This should not be an issue because we call
+ // cachetable_remove_pair before
+ // releasing the cachetable lock.
+ //
+ CACHEKEY key_to_remove = p->key;
+ p->attr.cache_pressure_size = 0;
+ //
+ // callback for removing the key
+ // for FTNODEs, this leads to calling
+ // toku_free_blocknum
+ //
+ if (remove_key) {
+ remove_key(
+ &key_to_remove,
+ for_checkpoint,
+ remove_key_extra
+ );
+ }
+ ct->list.read_pending_cheap_unlock();
+
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ //
+ // As of Clayface (6.5), only these threads may be
+ // blocked waiting to lock this PAIR:
+ // - the checkpoint thread (because a checkpoint is in progress
+ // and the PAIR was in the list of pending pairs)
+ // - a client thread running get_and_pin_nonblocking, who
+ // ran unlockers, then waited on the PAIR lock.
+ // While waiting on a PAIR lock, another thread comes in,
+ // locks the PAIR, and ends up calling unpin_and_remove,
+ // all while get_and_pin_nonblocking is waiting on the PAIR lock.
+ // We did not realize this at first, which caused bug #4357
+ // The following threads CANNOT be blocked waiting on
+ // the PAIR lock:
+ // - a thread trying to run eviction via run_eviction.
+ // That cannot happen because run_eviction only
+ // attempts to lock PAIRS that are not locked, and this PAIR
+ // is locked.
+ // - cleaner thread, for the same reason as a thread running
+ // eviction
+ // - client thread doing a normal get_and_pin. The client is smart
+ // enough to not try to lock a PAIR that another client thread
+ // is trying to unpin and remove. Note that this includes work
+ // done on kibbutzes.
+ // - writer thread. Writer threads do not grab PAIR locks. They
+ // get PAIR locks transferred to them by client threads.
+ //
+
+ // first thing we do is remove the PAIR from the various
+ // cachetable data structures, so no other thread can possibly
+ // access it. We do not want to risk some other thread
+ // trying to lock this PAIR if we release the write list lock
+ // below. If some thread is already waiting on the lock,
+ // then we let that thread grab the lock and finish, but
+ // we don't want any NEW threads to try to grab the PAIR
+ // lock.
+ //
+ // Because we call cachetable_remove_pair and wait,
+ // the threads that may be waiting
+ // on this PAIR lock must be careful to do NOTHING with the PAIR
+ // As per our analysis above, we only need
+ // to make sure the checkpoint thread and get_and_pin_nonblocking do
+ // nothing, and looking at those functions, it is clear they do nothing.
+ //
+ cachetable_remove_pair(&ct->list, &ct->ev, p);
+ ct->list.write_list_unlock();
+ if (p->refcount > 0) {
+ pair_wait_for_ref_release_unlocked(p);
+ }
+ if (p->value_rwlock.users() > 0) {
+ // Need to wait for everyone else to leave
+ // This write lock will be granted only after all waiting
+ // threads are done.
+ p->value_rwlock.write_lock(true);
+ assert(p->refcount == 0);
+ assert(p->value_rwlock.users() == 1); // us
+ assert(!p->checkpoint_pending);
+ assert(p->attr.cache_pressure_size == 0);
+ p->value_rwlock.write_unlock();
+ }
+ // just a sanity check
+ assert(nb_mutex_users(&p->disk_nb_mutex) == 0);
+ assert(p->cloned_value_data == NULL);
+ //Remove pair.
+ pair_unlock(p);
+ cachetable_free_pair(p);
+ r = 0;
+ return r;
+}
+
+int set_filenum_in_array(const FT &ft, const uint32_t index, FILENUM *const array);
+int set_filenum_in_array(const FT &ft, const uint32_t index, FILENUM *const array) {
+ array[index] = toku_cachefile_filenum(ft->cf);
+ return 0;
+}
+
+static int log_open_txn (TOKUTXN txn, void* extra) {
+ int r;
+ checkpointer* cp = (checkpointer *)extra;
+ TOKULOGGER logger = txn->logger;
+ FILENUMS open_filenums;
+ uint32_t num_filenums = txn->open_fts.size();
+ FILENUM array[num_filenums];
+ if (toku_txn_is_read_only(txn)) {
+ goto cleanup;
+ }
+ else {
+ cp->increment_num_txns();
+ }
+
+ open_filenums.num = num_filenums;
+ open_filenums.filenums = array;
+ //Fill in open_filenums
+ r = txn->open_fts.iterate<FILENUM, set_filenum_in_array>(array);
+ invariant(r==0);
+ switch (toku_txn_get_state(txn)) {
+ case TOKUTXN_LIVE:{
+ toku_log_xstillopen(logger, NULL, 0, txn,
+ toku_txn_get_txnid(txn),
+ toku_txn_get_txnid(toku_logger_txn_parent(txn)),
+ txn->roll_info.rollentry_raw_count,
+ open_filenums,
+ txn->force_fsync_on_commit,
+ txn->roll_info.num_rollback_nodes,
+ txn->roll_info.num_rollentries,
+ txn->roll_info.spilled_rollback_head,
+ txn->roll_info.spilled_rollback_tail,
+ txn->roll_info.current_rollback);
+ goto cleanup;
+ }
+ case TOKUTXN_PREPARING: {
+ TOKU_XA_XID xa_xid;
+ toku_txn_get_prepared_xa_xid(txn, &xa_xid);
+ toku_log_xstillopenprepared(logger, NULL, 0, txn,
+ toku_txn_get_txnid(txn),
+ &xa_xid,
+ txn->roll_info.rollentry_raw_count,
+ open_filenums,
+ txn->force_fsync_on_commit,
+ txn->roll_info.num_rollback_nodes,
+ txn->roll_info.num_rollentries,
+ txn->roll_info.spilled_rollback_head,
+ txn->roll_info.spilled_rollback_tail,
+ txn->roll_info.current_rollback);
+ goto cleanup;
+ }
+ case TOKUTXN_RETIRED:
+ case TOKUTXN_COMMITTING:
+ case TOKUTXN_ABORTING: {
+ assert(0);
+ }
+ }
+ // default is an error
+ assert(0);
+cleanup:
+ return 0;
+}
+
+// Requires: All three checkpoint-relevant locks must be held (see checkpoint.c).
+// Algorithm: Write a checkpoint record to the log, noting the LSN of that record.
+// Use the begin_checkpoint callback to take necessary snapshots (header, btt)
+// Mark every dirty node as "pending." ("Pending" means that the node must be
+// written to disk before it can be modified.)
+void toku_cachetable_begin_checkpoint (CHECKPOINTER cp, TOKULOGGER UU(logger)) {
+ cp->begin_checkpoint();
+}
+
+
+// This is used by the cachetable_race test.
+static volatile int toku_checkpointing_user_data_status = 0;
+static void toku_cachetable_set_checkpointing_user_data_status (int v) {
+ toku_checkpointing_user_data_status = v;
+}
+int toku_cachetable_get_checkpointing_user_data_status (void) {
+ return toku_checkpointing_user_data_status;
+}
+
+// Requires: The big checkpoint lock must be held (see checkpoint.c).
+// Algorithm: Write all pending nodes to disk
+// Use checkpoint callback to write snapshot information to disk (header, btt)
+// Use end_checkpoint callback to fsync dictionary and log, and to free unused blocks
+// Note: If testcallback is null (for testing purposes only), call it after writing dictionary but before writing log
+void toku_cachetable_end_checkpoint(CHECKPOINTER cp, TOKULOGGER UU(logger),
+ void (*testcallback_f)(void*), void* testextra) {
+ cp->end_checkpoint(testcallback_f, testextra);
+}
+
+TOKULOGGER toku_cachefile_logger (CACHEFILE cf) {
+ return cf->cachetable->cp.get_logger();
+}
+
+FILENUM toku_cachefile_filenum (CACHEFILE cf) {
+ return cf->filenum;
+}
+
+// debug functions
+
+int toku_cachetable_assert_all_unpinned (CACHETABLE ct) {
+ uint32_t i;
+ int some_pinned=0;
+ ct->list.read_list_lock();
+ for (i=0; i<ct->list.m_table_size; i++) {
+ PAIR p;
+ for (p=ct->list.m_table[i]; p; p=p->hash_chain) {
+ pair_lock(p);
+ if (p->value_rwlock.users()) {
+ //printf("%s:%d pinned: %" PRId64 " (%p)\n", __FILE__, __LINE__, p->key.b, p->value_data);
+ some_pinned=1;
+ }
+ pair_unlock(p);
+ }
+ }
+ ct->list.read_list_unlock();
+ return some_pinned;
+}
+
+int toku_cachefile_count_pinned (CACHEFILE cf, int print_them) {
+ assert(cf != NULL);
+ int n_pinned=0;
+ CACHETABLE ct = cf->cachetable;
+ ct->list.read_list_lock();
+
+ // Iterate over all the pairs to find pairs specific to the
+ // given cachefile.
+ for (uint32_t i = 0; i < ct->list.m_table_size; i++) {
+ for (PAIR p = ct->list.m_table[i]; p; p = p->hash_chain) {
+ if (p->cachefile == cf) {
+ pair_lock(p);
+ if (p->value_rwlock.users()) {
+ if (print_them) {
+ printf("%s:%d pinned: %" PRId64 " (%p)\n",
+ __FILE__,
+ __LINE__,
+ p->key.b,
+ p->value_data);
+ }
+ n_pinned++;
+ }
+ pair_unlock(p);
+ }
+ }
+ }
+
+ ct->list.read_list_unlock();
+ return n_pinned;
+}
+
+void toku_cachetable_print_state (CACHETABLE ct) {
+ uint32_t i;
+ ct->list.read_list_lock();
+ for (i=0; i<ct->list.m_table_size; i++) {
+ PAIR p = ct->list.m_table[i];
+ if (p != 0) {
+ pair_lock(p);
+ printf("t[%u]=", i);
+ for (p=ct->list.m_table[i]; p; p=p->hash_chain) {
+ printf(" {%" PRId64 ", %p, dirty=%d, pin=%d, size=%ld}", p->key.b, p->cachefile, (int) p->dirty, p->value_rwlock.users(), p->attr.size);
+ }
+ printf("\n");
+ pair_unlock(p);
+ }
+ }
+ ct->list.read_list_unlock();
+}
+
+void toku_cachetable_get_state (CACHETABLE ct, int *num_entries_ptr, int *hash_size_ptr, long *size_current_ptr, long *size_limit_ptr) {
+ ct->list.get_state(num_entries_ptr, hash_size_ptr);
+ ct->ev.get_state(size_current_ptr, size_limit_ptr);
+}
+
+int toku_cachetable_get_key_state (CACHETABLE ct, CACHEKEY key, CACHEFILE cf, void **value_ptr,
+ int *dirty_ptr, long long *pin_ptr, long *size_ptr) {
+ int r = -1;
+ uint32_t fullhash = toku_cachetable_hash(cf, key);
+ ct->list.read_list_lock();
+ PAIR p = ct->list.find_pair(cf, key, fullhash);
+ if (p) {
+ pair_lock(p);
+ if (value_ptr)
+ *value_ptr = p->value_data;
+ if (dirty_ptr)
+ *dirty_ptr = p->dirty;
+ if (pin_ptr)
+ *pin_ptr = p->value_rwlock.users();
+ if (size_ptr)
+ *size_ptr = p->attr.size;
+ r = 0;
+ pair_unlock(p);
+ }
+ ct->list.read_list_unlock();
+ return r;
+}
+
+void
+toku_cachefile_set_userdata (CACHEFILE cf,
+ void *userdata,
+ void (*log_fassociate_during_checkpoint)(CACHEFILE, void*),
+ void (*close_userdata)(CACHEFILE, int, void*, bool, LSN),
+ void (*free_userdata)(CACHEFILE, void*),
+ void (*checkpoint_userdata)(CACHEFILE, int, void*),
+ void (*begin_checkpoint_userdata)(LSN, void*),
+ void (*end_checkpoint_userdata)(CACHEFILE, int, void*),
+ void (*note_pin_by_checkpoint)(CACHEFILE, void*),
+ void (*note_unpin_by_checkpoint)(CACHEFILE, void*)) {
+ cf->userdata = userdata;
+ cf->log_fassociate_during_checkpoint = log_fassociate_during_checkpoint;
+ cf->close_userdata = close_userdata;
+ cf->free_userdata = free_userdata;
+ cf->checkpoint_userdata = checkpoint_userdata;
+ cf->begin_checkpoint_userdata = begin_checkpoint_userdata;
+ cf->end_checkpoint_userdata = end_checkpoint_userdata;
+ cf->note_pin_by_checkpoint = note_pin_by_checkpoint;
+ cf->note_unpin_by_checkpoint = note_unpin_by_checkpoint;
+}
+
+void *toku_cachefile_get_userdata(CACHEFILE cf) {
+ return cf->userdata;
+}
+
+CACHETABLE
+toku_cachefile_get_cachetable(CACHEFILE cf) {
+ return cf->cachetable;
+}
+
+CACHEFILE toku_pair_get_cachefile(PAIR pair) {
+ return pair->cachefile;
+}
+
+//Only called by ft_end_checkpoint
+//Must have access to cf->fd (must be protected)
+void toku_cachefile_fsync(CACHEFILE cf) {
+ toku_file_fsync(cf->fd);
+}
+
+// Make it so when the cachefile closes, the underlying file is unlinked
+void toku_cachefile_unlink_on_close(CACHEFILE cf) {
+ assert(!cf->unlink_on_close);
+ cf->unlink_on_close = true;
+}
+
+// is this cachefile marked as unlink on close?
+bool toku_cachefile_is_unlink_on_close(CACHEFILE cf) {
+ return cf->unlink_on_close;
+}
+
+void toku_cachefile_skip_log_recover_on_close(CACHEFILE cf) {
+ cf->skip_log_recover_on_close = true;
+}
+
+void toku_cachefile_do_log_recover_on_close(CACHEFILE cf) {
+ cf->skip_log_recover_on_close = false;
+}
+
+bool toku_cachefile_is_skip_log_recover_on_close(CACHEFILE cf) {
+ return cf->skip_log_recover_on_close;
+}
+
+uint64_t toku_cachefile_size(CACHEFILE cf) {
+ int64_t file_size;
+ int fd = toku_cachefile_get_fd(cf);
+ int r = toku_os_get_file_size(fd, &file_size);
+ assert_zero(r);
+ return file_size;
+}
+
+char *
+toku_construct_full_name(int count, ...) {
+ va_list ap;
+ char *name = NULL;
+ size_t n = 0;
+ int i;
+ va_start(ap, count);
+ for (i=0; i<count; i++) {
+ char *arg = va_arg(ap, char *);
+ if (arg) {
+ n += 1 + strlen(arg) + 1;
+ char *XMALLOC_N(n, newname);
+ if (name && !toku_os_is_absolute_name(arg))
+ snprintf(newname, n, "%s/%s", name, arg);
+ else
+ snprintf(newname, n, "%s", arg);
+ toku_free(name);
+ name = newname;
+ }
+ }
+ va_end(ap);
+
+ return name;
+}
+
+char *
+toku_cachetable_get_fname_in_cwd(CACHETABLE ct, const char * fname_in_env) {
+ return toku_construct_full_name(2, ct->env_dir, fname_in_env);
+}
+
+static long
+cleaner_thread_rate_pair(PAIR p)
+{
+ return p->attr.cache_pressure_size;
+}
+
+static int const CLEANER_N_TO_CHECK = 8;
+
+int toku_cleaner_thread_for_test (CACHETABLE ct) {
+ return ct->cl.run_cleaner();
+}
+
+int toku_cleaner_thread (void *cleaner_v) {
+ cleaner* cl = (cleaner *) cleaner_v;
+ assert(cl);
+ return cl->run_cleaner();
+}
+
+/////////////////////////////////////////////////////////////////////////
+//
+// cleaner methods
+//
+ENSURE_POD(cleaner);
+
+extern uint force_recovery;
+
+int cleaner::init(uint32_t _cleaner_iterations, pair_list* _pl, CACHETABLE _ct) {
+ // default is no cleaner, for now
+ m_cleaner_cron_init = false;
+ if (force_recovery) return 0;
+ int r = toku_minicron_setup(&m_cleaner_cron, 0, toku_cleaner_thread, this);
+ if (r == 0) {
+ m_cleaner_cron_init = true;
+ }
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&m_cleaner_iterations, sizeof m_cleaner_iterations);
+ m_cleaner_iterations = _cleaner_iterations;
+ m_pl = _pl;
+ m_ct = _ct;
+ m_cleaner_init = true;
+ return r;
+}
+
+// this function is allowed to be called multiple times
+void cleaner::destroy(void) {
+ if (!m_cleaner_init) {
+ return;
+ }
+ if (m_cleaner_cron_init && !toku_minicron_has_been_shutdown(&m_cleaner_cron)) {
+ // for test code only, production code uses toku_cachetable_minicron_shutdown()
+ int r = toku_minicron_shutdown(&m_cleaner_cron);
+ assert(r==0);
+ }
+}
+
+uint32_t cleaner::get_iterations(void) {
+ return m_cleaner_iterations;
+}
+
+void cleaner::set_iterations(uint32_t new_iterations) {
+ m_cleaner_iterations = new_iterations;
+}
+
+uint32_t cleaner::get_period_unlocked(void) {
+ return toku_minicron_get_period_in_seconds_unlocked(&m_cleaner_cron);
+}
+
+//
+// Sets how often the cleaner thread will run, in seconds
+//
+void cleaner::set_period(uint32_t new_period) {
+ toku_minicron_change_period(&m_cleaner_cron, new_period*1000);
+}
+
+// Effect: runs a cleaner.
+//
+// We look through some number of nodes, the first N that we see which are
+// unlocked and are not involved in a cachefile flush, pick one, and call
+// the cleaner callback. While we're picking a node, we have the
+// cachetable lock the whole time, so we don't need any extra
+// synchronization. Once we have one we want, we lock it and notify the
+// cachefile that we're doing some background work (so a flush won't
+// start). At this point, we can safely unlock the cachetable, do the
+// work (callback), and unlock/release our claim to the cachefile.
+int cleaner::run_cleaner(void) {
+ toku::context cleaner_ctx(CTX_CLEANER);
+
+ int r;
+ uint32_t num_iterations = this->get_iterations();
+ for (uint32_t i = 0; i < num_iterations; ++i) {
+ cleaner_executions++;
+ m_pl->read_list_lock();
+ PAIR best_pair = NULL;
+ int n_seen = 0;
+ long best_score = 0;
+ const PAIR first_pair = m_pl->m_cleaner_head;
+ if (first_pair == NULL) {
+ // nothing in the cachetable, just get out now
+ m_pl->read_list_unlock();
+ break;
+ }
+ // here we select a PAIR for cleaning
+ // look at some number of PAIRS, and
+ // pick what we think is the best one for cleaning
+ //***** IMPORTANT ******
+ // we MUST not pick a PAIR whose rating is 0. We have
+ // numerous assumptions in other parts of the code that
+ // this is the case:
+ // - this is how rollback nodes and leaf nodes are not selected for cleaning
+ // - this is how a thread that is calling unpin_and_remove will prevent
+ // the cleaner thread from picking its PAIR (see comments in that function)
+ do {
+ //
+ // We are already holding onto best_pair, if we run across a pair that
+ // has the same mutex due to a collision in the hashtable, we need
+ // to be careful.
+ //
+ if (best_pair && m_pl->m_cleaner_head->mutex == best_pair->mutex) {
+ // Advance the cleaner head.
+ long score = 0;
+ // only bother with this pair if it has no current users
+ if (m_pl->m_cleaner_head->value_rwlock.users() == 0) {
+ score = cleaner_thread_rate_pair(m_pl->m_cleaner_head);
+ if (score > best_score) {
+ best_score = score;
+ best_pair = m_pl->m_cleaner_head;
+ }
+ }
+ m_pl->m_cleaner_head = m_pl->m_cleaner_head->clock_next;
+ continue;
+ }
+ pair_lock(m_pl->m_cleaner_head);
+ if (m_pl->m_cleaner_head->value_rwlock.users() > 0) {
+ pair_unlock(m_pl->m_cleaner_head);
+ }
+ else {
+ n_seen++;
+ long score = 0;
+ score = cleaner_thread_rate_pair(m_pl->m_cleaner_head);
+ if (score > best_score) {
+ best_score = score;
+ // Since we found a new best pair, we need to
+ // free the old best pair.
+ if (best_pair) {
+ pair_unlock(best_pair);
+ }
+ best_pair = m_pl->m_cleaner_head;
+ }
+ else {
+ pair_unlock(m_pl->m_cleaner_head);
+ }
+ }
+ // Advance the cleaner head.
+ m_pl->m_cleaner_head = m_pl->m_cleaner_head->clock_next;
+ } while (m_pl->m_cleaner_head != first_pair && n_seen < CLEANER_N_TO_CHECK);
+ m_pl->read_list_unlock();
+
+ //
+ // at this point, if we have found a PAIR for cleaning,
+ // that is, best_pair != NULL, we do the clean
+ //
+ // if best_pair !=NULL, then best_pair->mutex is held
+ // no list lock is held
+ //
+ if (best_pair) {
+ CACHEFILE cf = best_pair->cachefile;
+ // try to add a background job to the manager
+ // if we can't, that means the cachefile is flushing, so
+ // we simply continue the for loop and this iteration
+ // becomes a no-op
+ r = bjm_add_background_job(cf->bjm);
+ if (r) {
+ pair_unlock(best_pair);
+ continue;
+ }
+ best_pair->value_rwlock.write_lock(true);
+ pair_unlock(best_pair);
+ // verify a key assumption.
+ assert(cleaner_thread_rate_pair(best_pair) > 0);
+ // check the checkpoint_pending bit
+ m_pl->read_pending_cheap_lock();
+ bool checkpoint_pending = best_pair->checkpoint_pending;
+ best_pair->checkpoint_pending = false;
+ m_pl->read_pending_cheap_unlock();
+ if (checkpoint_pending) {
+ write_locked_pair_for_checkpoint(m_ct, best_pair, true);
+ }
+
+ bool cleaner_callback_called = false;
+
+ // it's theoretically possible that after writing a PAIR for checkpoint, the
+ // PAIR's heuristic tells us nothing needs to be done. It is not possible
+ // in Dr. Noga, but unit tests verify this behavior works properly.
+ if (cleaner_thread_rate_pair(best_pair) > 0) {
+ r = best_pair->cleaner_callback(best_pair->value_data,
+ best_pair->key,
+ best_pair->fullhash,
+ best_pair->write_extraargs);
+ assert_zero(r);
+ cleaner_callback_called = true;
+ }
+
+ // The cleaner callback must have unlocked the pair, so we
+ // don't need to unlock it if the cleaner callback is called.
+ if (!cleaner_callback_called) {
+ pair_lock(best_pair);
+ best_pair->value_rwlock.write_unlock();
+ pair_unlock(best_pair);
+ }
+ // We need to make sure the cachefile sticks around so a close
+ // can't come destroy it. That's the purpose of this
+ // "add/remove_background_job" business, which means the
+ // cachefile is still valid here, even though the cleaner
+ // callback unlocks the pair.
+ bjm_remove_background_job(cf->bjm);
+ }
+ else {
+ // If we didn't find anything this time around the cachetable,
+ // we probably won't find anything if we run around again, so
+ // just break out from the for-loop now and
+ // we'll try again when the cleaner thread runs again.
+ break;
+ }
+ }
+ return 0;
+}
+
+static_assert(std::is_pod<pair_list>::value, "pair_list isn't POD");
+
+const uint32_t INITIAL_PAIR_LIST_SIZE = 1<<20;
+uint32_t PAIR_LOCK_SIZE = 1<<20;
+
+void toku_pair_list_set_lock_size(uint32_t num_locks) {
+ PAIR_LOCK_SIZE = num_locks;
+}
+
+static void evict_pair_from_cachefile(PAIR p) {
+ CACHEFILE cf = p->cachefile;
+ if (p->cf_next) {
+ p->cf_next->cf_prev = p->cf_prev;
+ }
+ if (p->cf_prev) {
+ p->cf_prev->cf_next = p->cf_next;
+ }
+ else if (p->cachefile->cf_head == p) {
+ cf->cf_head = p->cf_next;
+ }
+ p->cf_prev = p->cf_next = NULL;
+ cf->num_pairs--;
+}
+
+// Allocates the hash table of pairs inside this pair list.
+//
+void pair_list::init() {
+ m_table_size = INITIAL_PAIR_LIST_SIZE;
+ m_num_locks = PAIR_LOCK_SIZE;
+ m_n_in_table = 0;
+ m_clock_head = NULL;
+ m_cleaner_head = NULL;
+ m_checkpoint_head = NULL;
+ m_pending_head = NULL;
+ m_table = NULL;
+
+
+ pthread_rwlockattr_t attr;
+ pthread_rwlockattr_init(&attr);
+#if defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
+ pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+#else
+// TODO: need to figure out how to make writer-preferential rwlocks
+// happen on osx
+#endif
+ toku_pthread_rwlock_init(*cachetable_m_list_lock_key, &m_list_lock, &attr);
+ toku_pthread_rwlock_init(*cachetable_m_pending_lock_expensive_key,
+ &m_pending_lock_expensive,
+ &attr);
+ toku_pthread_rwlock_init(
+ *cachetable_m_pending_lock_cheap_key, &m_pending_lock_cheap, &attr);
+ XCALLOC_N(m_table_size, m_table);
+ XCALLOC_N(m_num_locks, m_mutexes);
+ for (uint64_t i = 0; i < m_num_locks; i++) {
+ toku_mutex_init(
+#ifdef TOKU_PFS_MUTEX_EXTENDED_CACHETABLEMMUTEX
+ *cachetable_m_mutex_key,
+#else
+ toku_uninstrumented,
+#endif
+ &m_mutexes[i].aligned_mutex,
+ nullptr);
+ }
+}
+
+// Frees the pair_list hash table. It is expected to be empty by
+// the time this is called. Returns an error if there are any
+// pairs in any of the hash table slots.
+void pair_list::destroy() {
+ // Check if any entries exist in the hash table.
+ for (uint32_t i = 0; i < m_table_size; ++i) {
+ invariant_null(m_table[i]);
+ }
+ for (uint64_t i = 0; i < m_num_locks; i++) {
+ toku_mutex_destroy(&m_mutexes[i].aligned_mutex);
+ }
+ toku_pthread_rwlock_destroy(&m_list_lock);
+ toku_pthread_rwlock_destroy(&m_pending_lock_expensive);
+ toku_pthread_rwlock_destroy(&m_pending_lock_cheap);
+ toku_free(m_table);
+ toku_free(m_mutexes);
+}
+
+// adds a PAIR to the cachetable's structures,
+// but does NOT add it to the list maintained by
+// the cachefile
+void pair_list::add_to_cachetable_only(PAIR p) {
+ // sanity check to make sure that the PAIR does not already exist
+ PAIR pp = this->find_pair(p->cachefile, p->key, p->fullhash);
+ assert(pp == NULL);
+
+ this->add_to_clock(p);
+ this->add_to_hash_chain(p);
+ m_n_in_table++;
+}
+
+// This places the given pair inside of the pair list.
+//
+// requires caller to have grabbed write lock on list.
+// requires caller to have p->mutex held as well
+//
+void pair_list::put(PAIR p) {
+ this->add_to_cachetable_only(p);
+ this->add_to_cf_list(p);
+}
+
+// This removes the given pair from completely from the pair list.
+//
+// requires caller to have grabbed write lock on list, and p->mutex held
+//
+void pair_list::evict_completely(PAIR p) {
+ this->evict_from_cachetable(p);
+ this->evict_from_cachefile(p);
+}
+
+// Removes the PAIR from the cachetable's lists,
+// but does NOT impact the list maintained by the cachefile
+void pair_list::evict_from_cachetable(PAIR p) {
+ this->pair_remove(p);
+ this->pending_pairs_remove(p);
+ this->remove_from_hash_chain(p);
+
+ assert(m_n_in_table > 0);
+ m_n_in_table--;
+}
+
+// Removes the PAIR from the cachefile's list of PAIRs
+void pair_list::evict_from_cachefile(PAIR p) {
+ evict_pair_from_cachefile(p);
+}
+
+//
+// Remove pair from linked list for cleaner/clock
+//
+//
+// requires caller to have grabbed write lock on list.
+//
+void pair_list::pair_remove (PAIR p) {
+ if (p->clock_prev == p) {
+ invariant(m_clock_head == p);
+ invariant(p->clock_next == p);
+ invariant(m_cleaner_head == p);
+ invariant(m_checkpoint_head == p);
+ m_clock_head = NULL;
+ m_cleaner_head = NULL;
+ m_checkpoint_head = NULL;
+ }
+ else {
+ if (p == m_clock_head) {
+ m_clock_head = m_clock_head->clock_next;
+ }
+ if (p == m_cleaner_head) {
+ m_cleaner_head = m_cleaner_head->clock_next;
+ }
+ if (p == m_checkpoint_head) {
+ m_checkpoint_head = m_checkpoint_head->clock_next;
+ }
+ p->clock_prev->clock_next = p->clock_next;
+ p->clock_next->clock_prev = p->clock_prev;
+ }
+ p->clock_prev = p->clock_next = NULL;
+}
+
+//Remove a pair from the list of pairs that were marked with the
+//pending bit for the in-progress checkpoint.
+//
+// requires that if the caller is the checkpoint thread, then a read lock
+// is grabbed on the list. Otherwise, must have write lock on list.
+//
+void pair_list::pending_pairs_remove (PAIR p) {
+ if (p->pending_next) {
+ p->pending_next->pending_prev = p->pending_prev;
+ }
+ if (p->pending_prev) {
+ p->pending_prev->pending_next = p->pending_next;
+ }
+ else if (m_pending_head==p) {
+ m_pending_head = p->pending_next;
+ }
+ p->pending_prev = p->pending_next = NULL;
+}
+
+void pair_list::remove_from_hash_chain(PAIR p) {
+ // Remove it from the hash chain.
+ unsigned int h = p->fullhash&(m_table_size - 1);
+ paranoid_invariant(m_table[h] != NULL);
+ if (m_table[h] == p) {
+ m_table[h] = p->hash_chain;
+ }
+ else {
+ PAIR curr = m_table[h];
+ while (curr->hash_chain != p) {
+ curr = curr->hash_chain;
+ }
+ // remove p from the singular linked list
+ curr->hash_chain = p->hash_chain;
+ }
+ p->hash_chain = NULL;
+}
+
+// Returns a pair from the pair list, using the given
+// pair. If the pair cannot be found, null is returned.
+//
+// requires caller to have grabbed either a read lock on the list or
+// bucket's mutex.
+//
+PAIR pair_list::find_pair(CACHEFILE file, CACHEKEY key, uint32_t fullhash) {
+ PAIR found_pair = nullptr;
+ for (PAIR p = m_table[fullhash&(m_table_size - 1)]; p; p = p->hash_chain) {
+ if (p->key.b == key.b && p->cachefile == file) {
+ found_pair = p;
+ break;
+ }
+ }
+ return found_pair;
+}
+
+// Add PAIR to linked list shared by cleaner thread and clock
+//
+// requires caller to have grabbed write lock on list.
+//
+void pair_list::add_to_clock (PAIR p) {
+ // requires that p is not currently in the table.
+ // inserts p into the clock list at the tail.
+
+ p->count = CLOCK_INITIAL_COUNT;
+ //assert either both head and tail are set or they are both NULL
+ // tail and head exist
+ if (m_clock_head) {
+ assert(m_cleaner_head);
+ assert(m_checkpoint_head);
+ // insert right before the head
+ p->clock_next = m_clock_head;
+ p->clock_prev = m_clock_head->clock_prev;
+
+ p->clock_prev->clock_next = p;
+ p->clock_next->clock_prev = p;
+
+ }
+ // this is the first element in the list
+ else {
+ m_clock_head = p;
+ p->clock_next = p->clock_prev = m_clock_head;
+ m_cleaner_head = p;
+ m_checkpoint_head = p;
+ }
+}
+
+// add the pair to the linked list that of PAIRs belonging
+// to the same cachefile. This linked list is used
+// in cachetable_flush_cachefile.
+void pair_list::add_to_cf_list(PAIR p) {
+ CACHEFILE cf = p->cachefile;
+ if (cf->cf_head) {
+ cf->cf_head->cf_prev = p;
+ }
+ p->cf_next = cf->cf_head;
+ p->cf_prev = NULL;
+ cf->cf_head = p;
+ cf->num_pairs++;
+}
+
+// Add PAIR to the hashtable
+//
+// requires caller to have grabbed write lock on list
+// and to have grabbed the p->mutex.
+void pair_list::add_to_hash_chain(PAIR p) {
+ uint32_t h = p->fullhash & (m_table_size - 1);
+ p->hash_chain = m_table[h];
+ m_table[h] = p;
+}
+
+// test function
+//
+// grabs and releases write list lock
+//
+void pair_list::verify() {
+ this->write_list_lock();
+ uint32_t num_found = 0;
+
+ // First clear all the verify flags by going through the hash chains
+ {
+ uint32_t i;
+ for (i = 0; i < m_table_size; i++) {
+ PAIR p;
+ for (p = m_table[i]; p; p = p->hash_chain) {
+ num_found++;
+ }
+ }
+ }
+ assert(num_found == m_n_in_table);
+ num_found = 0;
+ // Now go through the clock chain, make sure everything in the LRU chain is hashed.
+ {
+ PAIR p;
+ bool is_first = true;
+ for (p = m_clock_head; m_clock_head != NULL && (p != m_clock_head || is_first); p=p->clock_next) {
+ is_first=false;
+ PAIR p2;
+ uint32_t fullhash = p->fullhash;
+ //assert(fullhash==toku_cachetable_hash(p->cachefile, p->key));
+ for (p2 = m_table[fullhash&(m_table_size-1)]; p2; p2=p2->hash_chain) {
+ if (p2==p) {
+ /* found it */
+ num_found++;
+ goto next;
+ }
+ }
+ fprintf(stderr, "Something in the clock chain is not hashed\n");
+ assert(0);
+ next:;
+ }
+ assert (num_found == m_n_in_table);
+ }
+ this->write_list_unlock();
+}
+
+// If given pointers are not null, assign the hash table size of
+// this pair list and the number of pairs in this pair list.
+//
+//
+// grabs and releases read list lock
+//
+void pair_list::get_state(int *num_entries, int *hash_size) {
+ this->read_list_lock();
+ if (num_entries) {
+ *num_entries = m_n_in_table;
+ }
+ if (hash_size) {
+ *hash_size = m_table_size;
+ }
+ this->read_list_unlock();
+}
+
+void pair_list::read_list_lock() {
+ toku_pthread_rwlock_rdlock(&m_list_lock);
+}
+
+void pair_list::read_list_unlock() {
+ toku_pthread_rwlock_rdunlock(&m_list_lock);
+}
+
+void pair_list::write_list_lock() {
+ toku_pthread_rwlock_wrlock(&m_list_lock);
+}
+
+void pair_list::write_list_unlock() {
+ toku_pthread_rwlock_wrunlock(&m_list_lock);
+}
+
+void pair_list::read_pending_exp_lock() {
+ toku_pthread_rwlock_rdlock(&m_pending_lock_expensive);
+}
+
+void pair_list::read_pending_exp_unlock() {
+ toku_pthread_rwlock_rdunlock(&m_pending_lock_expensive);
+}
+
+void pair_list::write_pending_exp_lock() {
+ toku_pthread_rwlock_wrlock(&m_pending_lock_expensive);
+}
+
+void pair_list::write_pending_exp_unlock() {
+ toku_pthread_rwlock_wrunlock(&m_pending_lock_expensive);
+}
+
+void pair_list::read_pending_cheap_lock() {
+ toku_pthread_rwlock_rdlock(&m_pending_lock_cheap);
+}
+
+void pair_list::read_pending_cheap_unlock() {
+ toku_pthread_rwlock_rdunlock(&m_pending_lock_cheap);
+}
+
+void pair_list::write_pending_cheap_lock() {
+ toku_pthread_rwlock_wrlock(&m_pending_lock_cheap);
+}
+
+void pair_list::write_pending_cheap_unlock() {
+ toku_pthread_rwlock_wrunlock(&m_pending_lock_cheap);
+}
+
+toku_mutex_t* pair_list::get_mutex_for_pair(uint32_t fullhash) {
+ return &m_mutexes[fullhash&(m_num_locks - 1)].aligned_mutex;
+}
+
+void pair_list::pair_lock_by_fullhash(uint32_t fullhash) {
+ toku_mutex_lock(&m_mutexes[fullhash&(m_num_locks - 1)].aligned_mutex);
+}
+
+void pair_list::pair_unlock_by_fullhash(uint32_t fullhash) {
+ toku_mutex_unlock(&m_mutexes[fullhash&(m_num_locks - 1)].aligned_mutex);
+}
+
+
+ENSURE_POD(evictor);
+
+//
+// This is the function that runs eviction on its own thread.
+//
+static void *eviction_thread(void *evictor_v) {
+ evictor *CAST_FROM_VOIDP(evictor, evictor_v);
+ evictor->run_eviction_thread();
+ return toku_pthread_done(evictor_v);
+}
+
+//
+// Starts the eviction thread, assigns external object references,
+// and initializes all counters and condition variables.
+//
+int evictor::init(long _size_limit, pair_list* _pl, cachefile_list* _cf_list, KIBBUTZ _kibbutz, uint32_t eviction_period) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&m_ev_thread_is_running, sizeof m_ev_thread_is_running);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&m_size_evicting, sizeof m_size_evicting);
+
+ // set max difference to around 500MB
+ int64_t max_diff = (1 << 29);
+
+ m_low_size_watermark = _size_limit;
+ // these values are selected kind of arbitrarily right now as
+ // being a percentage more than low_size_watermark, which is provided
+ // by the caller.
+ m_low_size_hysteresis = (11 * _size_limit)/10; //10% more
+ if ((m_low_size_hysteresis - m_low_size_watermark) > max_diff) {
+ m_low_size_hysteresis = m_low_size_watermark + max_diff;
+ }
+ m_high_size_hysteresis = (5 * _size_limit)/4; // 20% more
+ if ((m_high_size_hysteresis - m_low_size_hysteresis) > max_diff) {
+ m_high_size_hysteresis = m_low_size_hysteresis + max_diff;
+ }
+ m_high_size_watermark = (3 * _size_limit)/2; // 50% more
+ if ((m_high_size_watermark - m_high_size_hysteresis) > max_diff) {
+ m_high_size_watermark = m_high_size_hysteresis + max_diff;
+ }
+
+ m_enable_partial_eviction = true;
+
+ m_size_reserved = unreservable_memory(_size_limit);
+ m_size_current = 0;
+ m_size_cloned_data = 0;
+ m_size_evicting = 0;
+
+ m_size_nonleaf = create_partitioned_counter();
+ m_size_leaf = create_partitioned_counter();
+ m_size_rollback = create_partitioned_counter();
+ m_size_cachepressure = create_partitioned_counter();
+ m_wait_pressure_count = create_partitioned_counter();
+ m_wait_pressure_time = create_partitioned_counter();
+ m_long_wait_pressure_count = create_partitioned_counter();
+ m_long_wait_pressure_time = create_partitioned_counter();
+
+ m_pl = _pl;
+ m_cf_list = _cf_list;
+ m_kibbutz = _kibbutz;
+ toku_mutex_init(
+ *cachetable_ev_thread_lock_mutex_key, &m_ev_thread_lock, nullptr);
+ toku_cond_init(
+ *cachetable_m_flow_control_cond_key, &m_flow_control_cond, nullptr);
+ toku_cond_init(
+ *cachetable_m_ev_thread_cond_key, &m_ev_thread_cond, nullptr);
+ m_num_sleepers = 0;
+ m_ev_thread_is_running = false;
+ m_period_in_seconds = eviction_period;
+
+ unsigned int seed = (unsigned int) time(NULL);
+ int r = myinitstate_r(seed, m_random_statebuf, sizeof m_random_statebuf, &m_random_data);
+ assert_zero(r);
+
+ // start the background thread
+ m_run_thread = true;
+ m_num_eviction_thread_runs = 0;
+ m_ev_thread_init = false;
+ r = toku_pthread_create(
+ *eviction_thread_key, &m_ev_thread, nullptr, eviction_thread, this);
+ if (r == 0) {
+ m_ev_thread_init = true;
+ }
+ m_evictor_init = true;
+ return r;
+}
+
+//
+// This stops the eviction thread and clears the condition variable.
+//
+// NOTE: This should only be called if there are no evictions in progress.
+//
+void evictor::destroy() {
+ if (!m_evictor_init) {
+ return;
+ }
+ assert(m_size_evicting == 0);
+ //
+ // commented out of Ming, because we could not finish
+ // #5672. Once #5672 is solved, we should restore this
+ //
+ //assert(m_size_current == 0);
+
+ // Stop the eviction thread.
+ if (m_ev_thread_init) {
+ toku_mutex_lock(&m_ev_thread_lock);
+ m_run_thread = false;
+ this->signal_eviction_thread_locked();
+ toku_mutex_unlock(&m_ev_thread_lock);
+ void *ret;
+ int r = toku_pthread_join(m_ev_thread, &ret);
+ assert_zero(r);
+ assert(!m_ev_thread_is_running);
+ }
+ destroy_partitioned_counter(m_size_nonleaf);
+ m_size_nonleaf = NULL;
+ destroy_partitioned_counter(m_size_leaf);
+ m_size_leaf = NULL;
+ destroy_partitioned_counter(m_size_rollback);
+ m_size_rollback = NULL;
+ destroy_partitioned_counter(m_size_cachepressure);
+ m_size_cachepressure = NULL;
+
+ destroy_partitioned_counter(m_wait_pressure_count); m_wait_pressure_count = NULL;
+ destroy_partitioned_counter(m_wait_pressure_time); m_wait_pressure_time = NULL;
+ destroy_partitioned_counter(m_long_wait_pressure_count); m_long_wait_pressure_count = NULL;
+ destroy_partitioned_counter(m_long_wait_pressure_time); m_long_wait_pressure_time = NULL;
+
+ toku_cond_destroy(&m_flow_control_cond);
+ toku_cond_destroy(&m_ev_thread_cond);
+ toku_mutex_destroy(&m_ev_thread_lock);
+}
+
+//
+// Increases status variables and the current size variable
+// of the evictor based on the given pair attribute.
+//
+void evictor::add_pair_attr(PAIR_ATTR attr) {
+ assert(attr.is_valid);
+ add_to_size_current(attr.size);
+ increment_partitioned_counter(m_size_nonleaf, attr.nonleaf_size);
+ increment_partitioned_counter(m_size_leaf, attr.leaf_size);
+ increment_partitioned_counter(m_size_rollback, attr.rollback_size);
+ increment_partitioned_counter(m_size_cachepressure, attr.cache_pressure_size);
+}
+
+//
+// Decreases status variables and the current size variable
+// of the evictor based on the given pair attribute.
+//
+void evictor::remove_pair_attr(PAIR_ATTR attr) {
+ assert(attr.is_valid);
+ remove_from_size_current(attr.size);
+ increment_partitioned_counter(m_size_nonleaf, 0 - attr.nonleaf_size);
+ increment_partitioned_counter(m_size_leaf, 0 - attr.leaf_size);
+ increment_partitioned_counter(m_size_rollback, 0 - attr.rollback_size);
+ increment_partitioned_counter(m_size_cachepressure, 0 - attr.cache_pressure_size);
+}
+
+//
+// Updates this evictor's stats to match the "new" pair attribute given
+// while also removing the given "old" pair attribute.
+//
+void evictor::change_pair_attr(PAIR_ATTR old_attr, PAIR_ATTR new_attr) {
+ this->add_pair_attr(new_attr);
+ this->remove_pair_attr(old_attr);
+}
+
+//
+// Adds the given size to the evictor's estimation of
+// the size of the cachetable.
+//
+void evictor::add_to_size_current(long size) {
+ (void) toku_sync_fetch_and_add(&m_size_current, size);
+}
+
+//
+// Subtracts the given size from the evictor's current
+// approximation of the cachetable size.
+//
+void evictor::remove_from_size_current(long size) {
+ (void) toku_sync_fetch_and_sub(&m_size_current, size);
+}
+
+//
+// Adds the size of cloned data to necessary variables in the evictor
+//
+void evictor::add_cloned_data_size(long size) {
+ (void) toku_sync_fetch_and_add(&m_size_cloned_data, size);
+ add_to_size_current(size);
+}
+
+//
+// Removes the size of cloned data to necessary variables in the evictor
+//
+void evictor::remove_cloned_data_size(long size) {
+ (void) toku_sync_fetch_and_sub(&m_size_cloned_data, size);
+ remove_from_size_current(size);
+}
+
+//
+// TODO: (Zardosht) comment this function
+//
+uint64_t evictor::reserve_memory(double fraction, uint64_t upper_bound) {
+ toku_mutex_lock(&m_ev_thread_lock);
+ uint64_t reserved_memory = fraction * (m_low_size_watermark - m_size_reserved);
+ if (0) { // debug
+ fprintf(stderr, "%s %" PRIu64 " %" PRIu64 "\n", __PRETTY_FUNCTION__, reserved_memory, upper_bound);
+ }
+ if (upper_bound > 0 && reserved_memory > upper_bound) {
+ reserved_memory = upper_bound;
+ }
+ m_size_reserved += reserved_memory;
+ (void) toku_sync_fetch_and_add(&m_size_current, reserved_memory);
+ this->signal_eviction_thread_locked();
+ toku_mutex_unlock(&m_ev_thread_lock);
+
+ if (this->should_client_thread_sleep()) {
+ this->wait_for_cache_pressure_to_subside();
+ }
+ return reserved_memory;
+}
+
+//
+// TODO: (Zardosht) comment this function
+//
+void evictor::release_reserved_memory(uint64_t reserved_memory){
+ (void) toku_sync_fetch_and_sub(&m_size_current, reserved_memory);
+ toku_mutex_lock(&m_ev_thread_lock);
+ m_size_reserved -= reserved_memory;
+ // signal the eviction thread in order to possibly wake up sleeping clients
+ if (m_num_sleepers > 0) {
+ this->signal_eviction_thread_locked();
+ }
+ toku_mutex_unlock(&m_ev_thread_lock);
+}
+
+//
+// This function is the eviction thread. It runs for the lifetime of
+// the evictor. Goes to sleep for period_in_seconds
+// by waiting on m_ev_thread_cond.
+//
+void evictor::run_eviction_thread(){
+ toku_mutex_lock(&m_ev_thread_lock);
+ while (m_run_thread) {
+ m_num_eviction_thread_runs++; // for test purposes only
+ m_ev_thread_is_running = true;
+ // responsibility of run_eviction to release and
+ // regrab ev_thread_lock as it sees fit
+ this->run_eviction();
+ m_ev_thread_is_running = false;
+
+ if (m_run_thread) {
+ //
+ // sleep until either we are signaled
+ // via signal_eviction_thread or
+ // m_period_in_seconds amount of time has passed
+ //
+ if (m_period_in_seconds) {
+ toku_timespec_t wakeup_time;
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ wakeup_time.tv_sec = tv.tv_sec;
+ wakeup_time.tv_nsec = tv.tv_usec * 1000LL;
+ wakeup_time.tv_sec += m_period_in_seconds;
+ toku_cond_timedwait(
+ &m_ev_thread_cond,
+ &m_ev_thread_lock,
+ &wakeup_time
+ );
+ }
+ // for test purposes, we have an option of
+ // not waiting on a period, but rather sleeping indefinitely
+ else {
+ toku_cond_wait(&m_ev_thread_cond, &m_ev_thread_lock);
+ }
+ }
+ }
+ toku_mutex_unlock(&m_ev_thread_lock);
+}
+
+//
+// runs eviction.
+// on entry, ev_thread_lock is grabbed, on exit, ev_thread_lock must still be grabbed
+// it is the responsibility of this function to release and reacquire ev_thread_lock as it sees fit.
+//
+void evictor::run_eviction(){
+ //
+ // These variables will help us detect if everything in the clock is currently being accessed.
+ // We must detect this case otherwise we will end up in an infinite loop below.
+ //
+ bool exited_early = false;
+ uint32_t num_pairs_examined_without_evicting = 0;
+
+ while (this->eviction_needed()) {
+ if (m_num_sleepers > 0 && this->should_sleeping_clients_wakeup()) {
+ toku_cond_broadcast(&m_flow_control_cond);
+ }
+ // release ev_thread_lock so that eviction may run without holding mutex
+ toku_mutex_unlock(&m_ev_thread_lock);
+
+ // first try to do an eviction from stale cachefiles
+ bool some_eviction_ran = m_cf_list->evict_some_stale_pair(this);
+ if (!some_eviction_ran) {
+ m_pl->read_list_lock();
+ PAIR curr_in_clock = m_pl->m_clock_head;
+ // if nothing to evict, we need to exit
+ if (!curr_in_clock) {
+ m_pl->read_list_unlock();
+ toku_mutex_lock(&m_ev_thread_lock);
+ exited_early = true;
+ goto exit;
+ }
+ if (num_pairs_examined_without_evicting > m_pl->m_n_in_table) {
+ // we have a cycle where everything in the clock is in use
+ // do not return an error
+ // just let memory be overfull
+ m_pl->read_list_unlock();
+ toku_mutex_lock(&m_ev_thread_lock);
+ exited_early = true;
+ goto exit;
+ }
+ bool eviction_run = run_eviction_on_pair(curr_in_clock);
+ if (eviction_run) {
+ // reset the count
+ num_pairs_examined_without_evicting = 0;
+ }
+ else {
+ num_pairs_examined_without_evicting++;
+ }
+ // at this point, either curr_in_clock is still in the list because it has not been fully evicted,
+ // and we need to move ct->m_clock_head over. Otherwise, curr_in_clock has been fully evicted
+ // and we do NOT need to move ct->m_clock_head, as the removal of curr_in_clock
+ // modified ct->m_clock_head
+ if (m_pl->m_clock_head && (m_pl->m_clock_head == curr_in_clock)) {
+ m_pl->m_clock_head = m_pl->m_clock_head->clock_next;
+ }
+ m_pl->read_list_unlock();
+ }
+ toku_mutex_lock(&m_ev_thread_lock);
+ }
+
+exit:
+ if (m_num_sleepers > 0 && (exited_early || this->should_sleeping_clients_wakeup())) {
+ toku_cond_broadcast(&m_flow_control_cond);
+ }
+ return;
+}
+
+//
+// NOTE: Cachetable lock held on entry.
+// Runs eviction on the given PAIR. This may be a
+// partial eviction or full eviction.
+//
+// on entry, pair mutex is NOT held, but pair list's read list lock
+// IS held
+// on exit, the same conditions must apply
+//
+bool evictor::run_eviction_on_pair(PAIR curr_in_clock) {
+ uint32_t n_in_table;
+ int64_t size_current;
+ bool ret_val = false;
+ // function meant to be called on PAIR that is not being accessed right now
+ CACHEFILE cf = curr_in_clock->cachefile;
+ int r = bjm_add_background_job(cf->bjm);
+ if (r) {
+ goto exit;
+ }
+ pair_lock(curr_in_clock);
+ // these are the circumstances under which we don't run eviction on a pair:
+ // - if other users are waiting on the lock
+ // - if the PAIR is referenced by users
+ // - if the PAIR's disk_nb_mutex is in use, implying that it is
+ // undergoing a checkpoint
+ if (curr_in_clock->value_rwlock.users() ||
+ curr_in_clock->refcount > 0 ||
+ nb_mutex_users(&curr_in_clock->disk_nb_mutex))
+ {
+ pair_unlock(curr_in_clock);
+ bjm_remove_background_job(cf->bjm);
+ goto exit;
+ }
+
+ // extract and use these values so that we don't risk them changing
+ // out from underneath us in calculations below.
+ n_in_table = m_pl->m_n_in_table;
+ size_current = m_size_current;
+
+ // now that we have the pair mutex we care about, we can
+ // release the read list lock and reacquire it at the end of the function
+ m_pl->read_list_unlock();
+ ret_val = true;
+ if (curr_in_clock->count > 0) {
+ toku::context pe_ctx(CTX_PARTIAL_EVICTION);
+
+ uint32_t curr_size = curr_in_clock->attr.size;
+ // if the size of this PAIR is greater than the average size of PAIRs
+ // in the cachetable, then decrement it, otherwise, decrement
+ // probabilistically
+ if (curr_size*n_in_table >= size_current) {
+ curr_in_clock->count--;
+ } else {
+ // generate a random number between 0 and 2^16
+ assert(size_current <= (INT64_MAX / ((1<<16)-1))); // to protect against possible overflows
+ int32_t rnd = myrandom_r(&m_random_data) % (1<<16);
+ // The if-statement below will be true with probability of
+ // curr_size/(average size of PAIR in cachetable)
+ // Here is how the math is done:
+ // average_size = size_current/n_in_table
+ // curr_size/average_size = curr_size*n_in_table/size_current
+ // we evaluate if a random number from 0 to 2^16 is less than
+ // than curr_size/average_size * 2^16. So, our if-clause should be
+ // if (2^16*curr_size/average_size > rnd)
+ // this evaluates to:
+ // if (2^16*curr_size*n_in_table/size_current > rnd)
+ // by multiplying each side of the equation by size_current, we get
+ // if (2^16*curr_size*n_in_table > rnd*size_current)
+ // and dividing each side by 2^16,
+ // we get the if-clause below
+ //
+ if ((((int64_t)curr_size) * n_in_table) >= (((int64_t)rnd) * size_current)>>16) {
+ curr_in_clock->count--;
+ }
+ }
+
+ if (m_enable_partial_eviction) {
+ // call the partial eviction callback
+ curr_in_clock->value_rwlock.write_lock(true);
+
+ void *value = curr_in_clock->value_data;
+ void* disk_data = curr_in_clock->disk_data;
+ void *write_extraargs = curr_in_clock->write_extraargs;
+ enum partial_eviction_cost cost;
+ long bytes_freed_estimate = 0;
+ curr_in_clock->pe_est_callback(value, disk_data,
+ &bytes_freed_estimate, &cost,
+ write_extraargs);
+ if (cost == PE_CHEAP) {
+ pair_unlock(curr_in_clock);
+ curr_in_clock->size_evicting_estimate = 0;
+ this->do_partial_eviction(curr_in_clock);
+ bjm_remove_background_job(cf->bjm);
+ } else if (cost == PE_EXPENSIVE) {
+ // only bother running an expensive partial eviction
+ // if it is expected to free space
+ if (bytes_freed_estimate > 0) {
+ pair_unlock(curr_in_clock);
+ curr_in_clock->size_evicting_estimate = bytes_freed_estimate;
+ toku_mutex_lock(&m_ev_thread_lock);
+ m_size_evicting += bytes_freed_estimate;
+ toku_mutex_unlock(&m_ev_thread_lock);
+ toku_kibbutz_enq(m_kibbutz, cachetable_partial_eviction,
+ curr_in_clock);
+ } else {
+ curr_in_clock->value_rwlock.write_unlock();
+ pair_unlock(curr_in_clock);
+ bjm_remove_background_job(cf->bjm);
+ }
+ } else {
+ assert(false);
+ }
+ } else {
+ pair_unlock(curr_in_clock);
+ bjm_remove_background_job(cf->bjm);
+ }
+ } else {
+ toku::context pe_ctx(CTX_FULL_EVICTION);
+
+ // responsibility of try_evict_pair to eventually remove background job
+ // pair's mutex is still grabbed here
+ this->try_evict_pair(curr_in_clock);
+ }
+ // regrab the read list lock, because the caller assumes
+ // that it is held. The contract requires this.
+ m_pl->read_list_lock();
+exit:
+ return ret_val;
+}
+
+struct pair_unpin_with_new_attr_extra {
+ pair_unpin_with_new_attr_extra(evictor *e, PAIR p) :
+ ev(e), pair(p) {
+ }
+ evictor *ev;
+ PAIR pair;
+};
+
+static void pair_unpin_with_new_attr(PAIR_ATTR new_attr, void *extra) {
+ struct pair_unpin_with_new_attr_extra *info =
+ reinterpret_cast<struct pair_unpin_with_new_attr_extra *>(extra);
+ PAIR p = info->pair;
+ evictor *ev = info->ev;
+
+ // change the attr in the evictor, then update the value in the pair
+ ev->change_pair_attr(p->attr, new_attr);
+ p->attr = new_attr;
+
+ // unpin
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ pair_unlock(p);
+}
+
+//
+// on entry and exit, pair's mutex is not held
+// on exit, PAIR is unpinned
+//
+void evictor::do_partial_eviction(PAIR p) {
+ // Copy the old attr
+ PAIR_ATTR old_attr = p->attr;
+ long long size_evicting_estimate = p->size_evicting_estimate;
+
+ struct pair_unpin_with_new_attr_extra extra(this, p);
+ p->pe_callback(p->value_data, old_attr, p->write_extraargs,
+ // passed as the finalize continuation, which allows the
+ // pe_callback to unpin the node before doing expensive cleanup
+ pair_unpin_with_new_attr, &extra);
+
+ // now that the pe_callback (and its pair_unpin_with_new_attr continuation)
+ // have finished, we can safely decrease size_evicting
+ this->decrease_size_evicting(size_evicting_estimate);
+}
+
+//
+// CT lock held on entry
+// background job has been added for p->cachefile on entry
+// responsibility of this function to make sure that background job is removed
+//
+// on entry, pair's mutex is held, on exit, the pair's mutex is NOT held
+//
+void evictor::try_evict_pair(PAIR p) {
+ CACHEFILE cf = p->cachefile;
+ // evictions without a write or unpinned pair's that are clean
+ // can be run in the current thread
+
+ // the only caller, run_eviction_on_pair, should call this function
+ // only if no one else is trying to use it
+ assert(!p->value_rwlock.users());
+ p->value_rwlock.write_lock(true);
+ // if the PAIR is dirty, the running eviction requires writing the
+ // PAIR out. if the disk_nb_mutex is grabbed, then running
+ // eviction requires waiting for the disk_nb_mutex to become available,
+ // which may be expensive. Hence, if either is true, we
+ // do the eviction on a writer thread
+ if (!p->dirty && (nb_mutex_writers(&p->disk_nb_mutex) == 0)) {
+ p->size_evicting_estimate = 0;
+ //
+ // This method will unpin PAIR and release PAIR mutex
+ //
+ // because the PAIR is not dirty, we can safely pass
+ // false for the for_checkpoint parameter
+ this->evict_pair(p, false);
+ bjm_remove_background_job(cf->bjm);
+ }
+ else {
+ pair_unlock(p);
+ toku_mutex_lock(&m_ev_thread_lock);
+ assert(m_size_evicting >= 0);
+ p->size_evicting_estimate = p->attr.size;
+ m_size_evicting += p->size_evicting_estimate;
+ assert(m_size_evicting >= 0);
+ toku_mutex_unlock(&m_ev_thread_lock);
+ toku_kibbutz_enq(m_kibbutz, cachetable_evicter, p);
+ }
+}
+
+//
+// Requires: This thread must hold the write lock (nb_mutex) for the pair.
+// The pair's mutex (p->mutex) is also held.
+// on exit, neither is held
+//
+void evictor::evict_pair(PAIR p, bool for_checkpoint) {
+ if (p->dirty) {
+ pair_unlock(p);
+ cachetable_write_locked_pair(this, p, for_checkpoint);
+ pair_lock(p);
+ }
+ // one thing we can do here is extract the size_evicting estimate,
+ // have decrease_size_evicting take the estimate and not the pair,
+ // and do this work after we have called
+ // cachetable_maybe_remove_and_free_pair
+ this->decrease_size_evicting(p->size_evicting_estimate);
+ // if we are to remove this pair, we need the write list lock,
+ // to get it in a way that avoids deadlocks, we must first release
+ // the pair's mutex, then grab the write list lock, then regrab the
+ // pair's mutex. The pair cannot go anywhere because
+ // the pair is still pinned
+ nb_mutex_lock(&p->disk_nb_mutex, p->mutex);
+ pair_unlock(p);
+ m_pl->write_list_lock();
+ pair_lock(p);
+ p->value_rwlock.write_unlock();
+ nb_mutex_unlock(&p->disk_nb_mutex);
+ // at this point, we have the pair list's write list lock
+ // and we have the pair's mutex (p->mutex) held
+
+ // this ensures that a clone running in the background first completes
+ bool removed = false;
+ if (p->value_rwlock.users() == 0 && p->refcount == 0) {
+ // assumption is that if we are about to remove the pair
+ // that no one has grabbed the disk_nb_mutex,
+ // and that there is no cloned_value_data, because
+ // no one is writing a cloned value out.
+ assert(nb_mutex_users(&p->disk_nb_mutex) == 0);
+ assert(p->cloned_value_data == NULL);
+ cachetable_remove_pair(m_pl, this, p);
+ removed = true;
+ }
+ pair_unlock(p);
+ m_pl->write_list_unlock();
+ // do not want to hold the write list lock while freeing a pair
+ if (removed) {
+ cachetable_free_pair(p);
+ }
+}
+
+//
+// this function handles the responsibilities for writer threads when they
+// decrease size_evicting. The responsibilities are:
+// - decrease m_size_evicting in a thread safe manner
+// - in some circumstances, signal the eviction thread
+//
+void evictor::decrease_size_evicting(long size_evicting_estimate) {
+ if (size_evicting_estimate > 0) {
+ toku_mutex_lock(&m_ev_thread_lock);
+ int64_t buffer = m_high_size_hysteresis - m_low_size_watermark;
+ // if size_evicting is transitioning from greater than buffer to below buffer, and
+ // some client threads are sleeping, we need to wake up the eviction thread.
+ // Here is why. In this scenario, we are in one of two cases:
+ // - size_current - size_evicting < low_size_watermark
+ // If this is true, then size_current < high_size_hysteresis, which
+ // means we need to wake up sleeping clients
+ // - size_current - size_evicting > low_size_watermark,
+ // which means more evictions must be run.
+ // The consequences of both cases are the responsibility
+ // of the eviction thread.
+ //
+ bool need_to_signal_ev_thread =
+ (m_num_sleepers > 0) &&
+ !m_ev_thread_is_running &&
+ (m_size_evicting > buffer) &&
+ ((m_size_evicting - size_evicting_estimate) <= buffer);
+ m_size_evicting -= size_evicting_estimate;
+ assert(m_size_evicting >= 0);
+ if (need_to_signal_ev_thread) {
+ this->signal_eviction_thread_locked();
+ }
+ toku_mutex_unlock(&m_ev_thread_lock);
+ }
+}
+
+//
+// Wait for cache table space to become available
+// size_current is number of bytes currently occupied by data (referred to by pairs)
+// size_evicting is number of bytes queued up to be evicted
+//
+void evictor::wait_for_cache_pressure_to_subside() {
+ uint64_t t0 = toku_current_time_microsec();
+ toku_mutex_lock(&m_ev_thread_lock);
+ m_num_sleepers++;
+ this->signal_eviction_thread_locked();
+ toku_cond_wait(&m_flow_control_cond, &m_ev_thread_lock);
+ m_num_sleepers--;
+ toku_mutex_unlock(&m_ev_thread_lock);
+ uint64_t t1 = toku_current_time_microsec();
+ increment_partitioned_counter(m_wait_pressure_count, 1);
+ uint64_t tdelta = t1 - t0;
+ increment_partitioned_counter(m_wait_pressure_time, tdelta);
+ if (tdelta > 1000000) {
+ increment_partitioned_counter(m_long_wait_pressure_count, 1);
+ increment_partitioned_counter(m_long_wait_pressure_time, tdelta);
+ }
+}
+
+//
+// Get the status of the current estimated size of the cachetable,
+// and the evictor's set limit.
+//
+void evictor::get_state(long *size_current_ptr, long *size_limit_ptr) {
+ if (size_current_ptr) {
+ *size_current_ptr = m_size_current;
+ }
+ if (size_limit_ptr) {
+ *size_limit_ptr = m_low_size_watermark;
+ }
+}
+
+//
+// Force the eviction thread to do some work.
+//
+// This function does not require any mutex to be held.
+// As a result, scheduling is not guaranteed, but that is tolerable.
+//
+void evictor::signal_eviction_thread() {
+ toku_mutex_lock(&m_ev_thread_lock);
+ toku_cond_signal(&m_ev_thread_cond);
+ toku_mutex_unlock(&m_ev_thread_lock);
+}
+
+void evictor::signal_eviction_thread_locked() {
+ toku_cond_signal(&m_ev_thread_cond);
+}
+
+//
+// Returns true if the cachetable is so over subscribed, that a client thread should sleep
+//
+// This function may be called in a thread-unsafe manner. Locks are not
+// required to read size_current. The result is that
+// the values may be a little off, but we think that is tolerable.
+//
+bool evictor::should_client_thread_sleep(){
+ return unsafe_read_size_current() > m_high_size_watermark;
+}
+
+//
+// Returns true if a sleeping client should be woken up because
+// the cachetable is not overly subscribed
+//
+// This function may be called in a thread-unsafe manner. Locks are not
+// required to read size_current. The result is that
+// the values may be a little off, but we think that is tolerable.
+//
+bool evictor::should_sleeping_clients_wakeup() {
+ return unsafe_read_size_current() <= m_high_size_hysteresis;
+}
+
+//
+// Returns true if a client thread should try to wake up the eviction
+// thread because the client thread has noticed too much data taken
+// up in the cachetable.
+//
+// This function may be called in a thread-unsafe manner. Locks are not
+// required to read size_current or size_evicting. The result is that
+// the values may be a little off, but we think that is tolerable.
+// If the caller wants to ensure that ev_thread_is_running and size_evicting
+// are accurate, then the caller must hold ev_thread_lock before
+// calling this function.
+//
+bool evictor::should_client_wake_eviction_thread() {
+ return
+ !m_ev_thread_is_running &&
+ ((unsafe_read_size_current() - m_size_evicting) > m_low_size_hysteresis);
+}
+
+//
+// Determines if eviction is needed. If the current size of
+// the cachetable exceeds the sum of our fixed size limit and
+// the amount of data currently being evicted, then eviction is needed
+//
+bool evictor::eviction_needed() {
+ return (m_size_current - m_size_evicting) > m_low_size_watermark;
+}
+
+inline int64_t evictor::unsafe_read_size_current(void) const {
+ return m_size_current;
+}
+
+void evictor::fill_engine_status() {
+ CT_STATUS_VAL(CT_SIZE_CURRENT) = m_size_current;
+ CT_STATUS_VAL(CT_SIZE_LIMIT) = m_low_size_hysteresis;
+ CT_STATUS_VAL(CT_SIZE_WRITING) = m_size_evicting;
+ CT_STATUS_VAL(CT_SIZE_NONLEAF) = read_partitioned_counter(m_size_nonleaf);
+ CT_STATUS_VAL(CT_SIZE_LEAF) = read_partitioned_counter(m_size_leaf);
+ CT_STATUS_VAL(CT_SIZE_ROLLBACK) = read_partitioned_counter(m_size_rollback);
+ CT_STATUS_VAL(CT_SIZE_CACHEPRESSURE) = read_partitioned_counter(m_size_cachepressure);
+ CT_STATUS_VAL(CT_SIZE_CLONED) = m_size_cloned_data;
+ CT_STATUS_VAL(CT_WAIT_PRESSURE_COUNT) = read_partitioned_counter(m_wait_pressure_count);
+ CT_STATUS_VAL(CT_WAIT_PRESSURE_TIME) = read_partitioned_counter(m_wait_pressure_time);
+ CT_STATUS_VAL(CT_LONG_WAIT_PRESSURE_COUNT) = read_partitioned_counter(m_long_wait_pressure_count);
+ CT_STATUS_VAL(CT_LONG_WAIT_PRESSURE_TIME) = read_partitioned_counter(m_long_wait_pressure_time);
+}
+
+void evictor::set_enable_partial_eviction(bool enabled) {
+ m_enable_partial_eviction = enabled;
+}
+
+bool evictor::get_enable_partial_eviction(void) const {
+ return m_enable_partial_eviction;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+ENSURE_POD(checkpointer);
+
+//
+// Sets the cachetable reference in this checkpointer class, this is temporary.
+//
+int checkpointer::init(pair_list *_pl,
+ TOKULOGGER _logger,
+ evictor *_ev,
+ cachefile_list *files) {
+ m_list = _pl;
+ m_logger = _logger;
+ m_ev = _ev;
+ m_cf_list = files;
+ bjm_init(&m_checkpoint_clones_bjm);
+
+ // Default is no checkpointing.
+ m_checkpointer_cron_init = false;
+ int r = toku_minicron_setup(&m_checkpointer_cron, 0, checkpoint_thread, this);
+ if (r == 0) {
+ m_checkpointer_cron_init = true;
+ }
+ m_checkpointer_init = true;
+ return r;
+}
+
+void checkpointer::destroy() {
+ if (!m_checkpointer_init) {
+ return;
+ }
+ if (m_checkpointer_cron_init && !this->has_been_shutdown()) {
+ // for test code only, production code uses toku_cachetable_minicron_shutdown()
+ int r = this->shutdown();
+ assert(r == 0);
+ }
+ bjm_destroy(m_checkpoint_clones_bjm);
+}
+
+//
+// Sets how often the checkpoint thread will run, in seconds
+//
+void checkpointer::set_checkpoint_period(uint32_t new_period) {
+ toku_minicron_change_period(&m_checkpointer_cron, new_period*1000);
+}
+
+//
+// Sets how often the checkpoint thread will run.
+//
+uint32_t checkpointer::get_checkpoint_period() {
+ return toku_minicron_get_period_in_seconds_unlocked(&m_checkpointer_cron);
+}
+
+//
+// Stops the checkpoint thread.
+//
+int checkpointer::shutdown() {
+ return toku_minicron_shutdown(&m_checkpointer_cron);
+}
+
+//
+// If checkpointing is running, this returns false.
+//
+bool checkpointer::has_been_shutdown() {
+ return toku_minicron_has_been_shutdown(&m_checkpointer_cron);
+}
+
+TOKULOGGER checkpointer::get_logger() {
+ return m_logger;
+}
+
+void checkpointer::increment_num_txns() {
+ m_checkpoint_num_txns++;
+}
+
+struct iterate_begin_checkpoint {
+ LSN lsn_of_checkpoint_in_progress;
+ iterate_begin_checkpoint(LSN lsn) : lsn_of_checkpoint_in_progress(lsn) { }
+ static int fn(const CACHEFILE &cf, const uint32_t UU(idx), struct iterate_begin_checkpoint *info) {
+ assert(cf->begin_checkpoint_userdata);
+ if (cf->for_checkpoint) {
+ cf->begin_checkpoint_userdata(info->lsn_of_checkpoint_in_progress, cf->userdata);
+ }
+ return 0;
+ }
+};
+
+//
+// Update the user data in any cachefiles in our checkpoint list.
+//
+void checkpointer::update_cachefiles() {
+ struct iterate_begin_checkpoint iterate(m_lsn_of_checkpoint_in_progress);
+ int r = m_cf_list->m_active_fileid.iterate<struct iterate_begin_checkpoint,
+ iterate_begin_checkpoint::fn>(&iterate);
+ assert_zero(r);
+}
+
+struct iterate_note_pin {
+ static int fn(const CACHEFILE &cf, uint32_t UU(idx), void **UU(extra)) {
+ assert(cf->note_pin_by_checkpoint);
+ cf->note_pin_by_checkpoint(cf, cf->userdata);
+ cf->for_checkpoint = true;
+ return 0;
+ }
+};
+
+//
+// Sets up and kicks off a checkpoint.
+//
+void checkpointer::begin_checkpoint() {
+ // 1. Initialize the accountability counters.
+ m_checkpoint_num_txns = 0;
+
+ // 2. Make list of cachefiles to be included in the checkpoint.
+ m_cf_list->read_lock();
+ m_cf_list->m_active_fileid.iterate<void *, iterate_note_pin::fn>(nullptr);
+ m_checkpoint_num_files = m_cf_list->m_active_fileid.size();
+ m_cf_list->read_unlock();
+
+ // 3. Create log entries for this checkpoint.
+ if (m_logger) {
+ this->log_begin_checkpoint();
+ }
+
+ bjm_reset(m_checkpoint_clones_bjm);
+
+ m_list->write_pending_exp_lock();
+ m_list->read_list_lock();
+ m_cf_list->read_lock(); // needed for update_cachefiles
+ m_list->write_pending_cheap_lock();
+ // 4. Turn on all the relevant checkpoint pending bits.
+ this->turn_on_pending_bits();
+
+ // 5.
+ this->update_cachefiles();
+ m_list->write_pending_cheap_unlock();
+ m_cf_list->read_unlock();
+ m_list->read_list_unlock();
+ m_list->write_pending_exp_unlock();
+}
+
+struct iterate_log_fassociate {
+ static int fn(const CACHEFILE &cf, uint32_t UU(idx), void **UU(extra)) {
+ assert(cf->log_fassociate_during_checkpoint);
+ cf->log_fassociate_during_checkpoint(cf, cf->userdata);
+ return 0;
+ }
+};
+
+//
+// Assuming the logger exists, this will write out the folloing
+// information to the log.
+//
+// 1. Writes the BEGIN_CHECKPOINT to the log.
+// 2. Writes the list of open dictionaries to the log.
+// 3. Writes the list of open transactions to the log.
+// 4. Writes the list of dicionaries that have had rollback logs suppresed.
+//
+// NOTE: This also has the side effecto of setting the LSN
+// of checkpoint in progress.
+//
+void checkpointer::log_begin_checkpoint() {
+ int r = 0;
+
+ // Write the BEGIN_CHECKPOINT to the log.
+ LSN begin_lsn={ .lsn = (uint64_t) -1 }; // we'll need to store the lsn of the checkpoint begin in all the trees that are checkpointed.
+ TXN_MANAGER mgr = toku_logger_get_txn_manager(m_logger);
+ TXNID last_xid = toku_txn_manager_get_last_xid(mgr);
+ toku_log_begin_checkpoint(m_logger, &begin_lsn, 0, 0, last_xid);
+ m_lsn_of_checkpoint_in_progress = begin_lsn;
+
+ // Log the list of open dictionaries.
+ m_cf_list->m_active_fileid.iterate<void *, iterate_log_fassociate::fn>(nullptr);
+
+ // Write open transactions to the log.
+ r = toku_txn_manager_iter_over_live_txns(
+ m_logger->txn_manager,
+ log_open_txn,
+ this
+ );
+ assert(r == 0);
+}
+
+//
+// Sets the pending bits of EVERY PAIR in the cachetable, regardless of
+// whether the PAIR is clean or not. It will be the responsibility of
+// end_checkpoint or client threads to simply clear the pending bit
+// if the PAIR is clean.
+//
+// On entry and exit , the pair list's read list lock is grabbed, and
+// both pending locks are grabbed
+//
+void checkpointer::turn_on_pending_bits() {
+ PAIR p = NULL;
+ uint32_t i;
+ for (i = 0, p = m_list->m_checkpoint_head; i < m_list->m_n_in_table; i++, p = p->clock_next) {
+ assert(!p->checkpoint_pending);
+ //Only include pairs belonging to cachefiles in the checkpoint
+ if (!p->cachefile->for_checkpoint) {
+ continue;
+ }
+ // Mark everything as pending a checkpoint
+ //
+ // The rule for the checkpoint_pending bit is as follows:
+ // - begin_checkpoint may set checkpoint_pending to true
+ // even though the pair lock on the node is not held.
+ // - any thread that wants to clear the pending bit must own
+ // the PAIR lock. Otherwise,
+ // we may end up clearing the pending bit before the
+ // current lock is ever released.
+ p->checkpoint_pending = true;
+ if (m_list->m_pending_head) {
+ m_list->m_pending_head->pending_prev = p;
+ }
+ p->pending_next = m_list->m_pending_head;
+ p->pending_prev = NULL;
+ m_list->m_pending_head = p;
+ }
+ invariant(p == m_list->m_checkpoint_head);
+}
+
+void checkpointer::add_background_job() {
+ int r = bjm_add_background_job(m_checkpoint_clones_bjm);
+ assert_zero(r);
+}
+void checkpointer::remove_background_job() {
+ bjm_remove_background_job(m_checkpoint_clones_bjm);
+}
+
+void checkpointer::end_checkpoint(void (*testcallback_f)(void*), void* testextra) {
+ toku::scoped_malloc checkpoint_cfs_buf(m_checkpoint_num_files * sizeof(CACHEFILE));
+ CACHEFILE *checkpoint_cfs = reinterpret_cast<CACHEFILE *>(checkpoint_cfs_buf.get());
+
+ this->fill_checkpoint_cfs(checkpoint_cfs);
+ this->checkpoint_pending_pairs();
+ this->checkpoint_userdata(checkpoint_cfs);
+ // For testing purposes only. Dictionary has been fsync-ed to disk but log has not yet been written.
+ if (testcallback_f) {
+ testcallback_f(testextra);
+ }
+ this->log_end_checkpoint();
+ this->end_checkpoint_userdata(checkpoint_cfs);
+
+ // Delete list of cachefiles in the checkpoint,
+ this->remove_cachefiles(checkpoint_cfs);
+}
+
+struct iterate_checkpoint_cfs {
+ CACHEFILE *checkpoint_cfs;
+ uint32_t checkpoint_num_files;
+ uint32_t curr_index;
+ iterate_checkpoint_cfs(CACHEFILE *cfs, uint32_t num_files) :
+ checkpoint_cfs(cfs), checkpoint_num_files(num_files), curr_index(0) {
+ }
+ static int fn(const CACHEFILE &cf, uint32_t UU(idx), struct iterate_checkpoint_cfs *info) {
+ if (cf->for_checkpoint) {
+ assert(info->curr_index < info->checkpoint_num_files);
+ info->checkpoint_cfs[info->curr_index] = cf;
+ info->curr_index++;
+ }
+ return 0;
+ }
+};
+
+void checkpointer::fill_checkpoint_cfs(CACHEFILE* checkpoint_cfs) {
+ struct iterate_checkpoint_cfs iterate(checkpoint_cfs, m_checkpoint_num_files);
+
+ m_cf_list->read_lock();
+ m_cf_list->m_active_fileid.iterate<struct iterate_checkpoint_cfs, iterate_checkpoint_cfs::fn>(&iterate);
+ assert(iterate.curr_index == m_checkpoint_num_files);
+ m_cf_list->read_unlock();
+}
+
+void checkpointer::checkpoint_pending_pairs() {
+ PAIR p;
+ m_list->read_list_lock();
+ while ((p = m_list->m_pending_head)!=0) {
+ // <CER> TODO: Investigate why we move pending head outisde of the pending_pairs_remove() call.
+ m_list->m_pending_head = m_list->m_pending_head->pending_next;
+ m_list->pending_pairs_remove(p);
+ // if still pending, clear the pending bit and write out the node
+ pair_lock(p);
+ m_list->read_list_unlock();
+ write_pair_for_checkpoint_thread(m_ev, p);
+ pair_unlock(p);
+ m_list->read_list_lock();
+ }
+ assert(!m_list->m_pending_head);
+ m_list->read_list_unlock();
+ bjm_wait_for_jobs_to_finish(m_checkpoint_clones_bjm);
+}
+
+void checkpointer::checkpoint_userdata(CACHEFILE* checkpoint_cfs) {
+ // have just written data blocks, so next write the translation and header for each open dictionary
+ for (uint32_t i = 0; i < m_checkpoint_num_files; i++) {
+ CACHEFILE cf = checkpoint_cfs[i];
+ assert(cf->for_checkpoint);
+ assert(cf->checkpoint_userdata);
+ toku_cachetable_set_checkpointing_user_data_status(1);
+ cf->checkpoint_userdata(cf, cf->fd, cf->userdata);
+ toku_cachetable_set_checkpointing_user_data_status(0);
+ }
+}
+
+void checkpointer::log_end_checkpoint() {
+ if (m_logger) {
+ toku_log_end_checkpoint(m_logger, NULL,
+ 1, // want the end_checkpoint to be fsync'd
+ m_lsn_of_checkpoint_in_progress,
+ 0,
+ m_checkpoint_num_files,
+ m_checkpoint_num_txns);
+ toku_logger_note_checkpoint(m_logger, m_lsn_of_checkpoint_in_progress);
+ }
+}
+
+void checkpointer::end_checkpoint_userdata(CACHEFILE* checkpoint_cfs) {
+ // everything has been written to file and fsynced
+ // ... call checkpoint-end function in block translator
+ // to free obsolete blocks on disk used by previous checkpoint
+ //cachefiles_in_checkpoint is protected by the checkpoint_safe_lock
+ for (uint32_t i = 0; i < m_checkpoint_num_files; i++) {
+ CACHEFILE cf = checkpoint_cfs[i];
+ assert(cf->for_checkpoint);
+ assert(cf->end_checkpoint_userdata);
+ cf->end_checkpoint_userdata(cf, cf->fd, cf->userdata);
+ }
+}
+
+//
+// Deletes all the cachefiles in this checkpointers cachefile list.
+//
+void checkpointer::remove_cachefiles(CACHEFILE* checkpoint_cfs) {
+ // making this a while loop because note_unpin_by_checkpoint may destroy the cachefile
+ for (uint32_t i = 0; i < m_checkpoint_num_files; i++) {
+ CACHEFILE cf = checkpoint_cfs[i];
+ // Checking for function existing so that this function
+ // can be called from cachetable tests.
+ assert(cf->for_checkpoint);
+ cf->for_checkpoint = false;
+ assert(cf->note_unpin_by_checkpoint);
+ // Clear the bit saying theis file is in the checkpoint.
+ cf->note_unpin_by_checkpoint(cf, cf->userdata);
+ }
+}
+
+
+////////////////////////////////////////////////////////
+//
+// cachefiles list
+//
+static_assert(std::is_pod<cachefile_list>::value, "cachefile_list isn't POD");
+
+void cachefile_list::init() {
+ m_next_filenum_to_use.fileid = 0;
+ m_next_hash_id_to_use = 0;
+ toku_pthread_rwlock_init(*cachetable_m_lock_key, &m_lock, nullptr);
+ m_active_filenum.create();
+ m_active_fileid.create();
+ m_stale_fileid.create();
+}
+
+void cachefile_list::destroy() {
+ m_active_filenum.destroy();
+ m_active_fileid.destroy();
+ m_stale_fileid.destroy();
+ toku_pthread_rwlock_destroy(&m_lock);
+}
+
+void cachefile_list::read_lock() {
+ toku_pthread_rwlock_rdlock(&m_lock);
+}
+
+void cachefile_list::read_unlock() {
+ toku_pthread_rwlock_rdunlock(&m_lock);
+}
+
+void cachefile_list::write_lock() {
+ toku_pthread_rwlock_wrlock(&m_lock);
+}
+
+void cachefile_list::write_unlock() {
+ toku_pthread_rwlock_wrunlock(&m_lock);
+}
+
+struct iterate_find_iname {
+ const char *iname_in_env;
+ CACHEFILE found_cf;
+ iterate_find_iname(const char *iname) : iname_in_env(iname), found_cf(nullptr) { }
+ static int fn(const CACHEFILE &cf, uint32_t UU(idx), struct iterate_find_iname *info) {
+ if (cf->fname_in_env && strcmp(cf->fname_in_env, info->iname_in_env) == 0) {
+ info->found_cf = cf;
+ return -1;
+ }
+ return 0;
+ }
+};
+
+int cachefile_list::cachefile_of_iname_in_env(const char *iname_in_env, CACHEFILE *cf) {
+ struct iterate_find_iname iterate(iname_in_env);
+
+ read_lock();
+ int r = m_active_fileid.iterate<iterate_find_iname, iterate_find_iname::fn>(&iterate);
+ if (iterate.found_cf != nullptr) {
+ assert(strcmp(iterate.found_cf->fname_in_env, iname_in_env) == 0);
+ *cf = iterate.found_cf;
+ r = 0;
+ } else {
+ r = ENOENT;
+ }
+ read_unlock();
+ return r;
+}
+
+static int cachefile_find_by_filenum(const CACHEFILE &a_cf, const FILENUM &b) {
+ const FILENUM a = a_cf->filenum;
+ if (a.fileid < b.fileid) {
+ return -1;
+ } else if (a.fileid == b.fileid) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+int cachefile_list::cachefile_of_filenum(FILENUM filenum, CACHEFILE *cf) {
+ read_lock();
+ int r = m_active_filenum.find_zero<FILENUM, cachefile_find_by_filenum>(filenum, cf, nullptr);
+ if (r == DB_NOTFOUND) {
+ r = ENOENT;
+ } else {
+ invariant_zero(r);
+ }
+ read_unlock();
+ return r;
+}
+
+static int cachefile_find_by_fileid(const CACHEFILE &a_cf, const struct fileid &b) {
+ return toku_fileid_cmp(a_cf->fileid, b);
+}
+
+void cachefile_list::add_cf_unlocked(CACHEFILE cf) {
+ int r;
+ r = m_active_filenum.insert<FILENUM, cachefile_find_by_filenum>(cf, cf->filenum, nullptr);
+ assert_zero(r);
+ r = m_active_fileid.insert<struct fileid, cachefile_find_by_fileid>(cf, cf->fileid, nullptr);
+ assert_zero(r);
+}
+
+void cachefile_list::add_stale_cf(CACHEFILE cf) {
+ write_lock();
+ int r = m_stale_fileid.insert<struct fileid, cachefile_find_by_fileid>(cf, cf->fileid, nullptr);
+ assert_zero(r);
+ write_unlock();
+}
+
+void cachefile_list::remove_cf(CACHEFILE cf) {
+ write_lock();
+
+ uint32_t idx;
+ int r;
+ r = m_active_filenum.find_zero<FILENUM, cachefile_find_by_filenum>(cf->filenum, nullptr, &idx);
+ assert_zero(r);
+ r = m_active_filenum.delete_at(idx);
+ assert_zero(r);
+
+ r = m_active_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(cf->fileid, nullptr, &idx);
+ assert_zero(r);
+ r = m_active_fileid.delete_at(idx);
+ assert_zero(r);
+
+ write_unlock();
+}
+
+void cachefile_list::remove_stale_cf_unlocked(CACHEFILE cf) {
+ uint32_t idx;
+ int r;
+ r = m_stale_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(cf->fileid, nullptr, &idx);
+ assert_zero(r);
+ r = m_stale_fileid.delete_at(idx);
+ assert_zero(r);
+}
+
+FILENUM cachefile_list::reserve_filenum() {
+ // taking a write lock because we are modifying next_filenum_to_use
+ FILENUM filenum = FILENUM_NONE;
+ write_lock();
+ while (1) {
+ int r = m_active_filenum.find_zero<FILENUM, cachefile_find_by_filenum>(m_next_filenum_to_use, nullptr, nullptr);
+ if (r == 0) {
+ m_next_filenum_to_use.fileid++;
+ continue;
+ }
+ assert(r == DB_NOTFOUND);
+
+ // skip the reserved value UINT32_MAX and wrap around to zero
+ if (m_next_filenum_to_use.fileid == FILENUM_NONE.fileid) {
+ m_next_filenum_to_use.fileid = 0;
+ continue;
+ }
+
+ filenum = m_next_filenum_to_use;
+ m_next_filenum_to_use.fileid++;
+ break;
+ }
+ write_unlock();
+ return filenum;
+}
+
+uint32_t cachefile_list::get_new_hash_id_unlocked() {
+ uint32_t retval = m_next_hash_id_to_use;
+ m_next_hash_id_to_use++;
+ return retval;
+}
+
+CACHEFILE cachefile_list::find_cachefile_unlocked(struct fileid* fileid) {
+ CACHEFILE cf = nullptr;
+ int r = m_active_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(*fileid, &cf, nullptr);
+ if (r == 0) {
+ assert(!cf->unlink_on_close);
+ }
+ return cf;
+}
+
+CACHEFILE cachefile_list::find_stale_cachefile_unlocked(struct fileid* fileid) {
+ CACHEFILE cf = nullptr;
+ int r = m_stale_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(*fileid, &cf, nullptr);
+ if (r == 0) {
+ assert(!cf->unlink_on_close);
+ }
+ return cf;
+}
+
+void cachefile_list::verify_unused_filenum(FILENUM filenum) {
+ int r = m_active_filenum.find_zero<FILENUM, cachefile_find_by_filenum>(filenum, nullptr, nullptr);
+ assert(r == DB_NOTFOUND);
+}
+
+// returns true if some eviction ran, false otherwise
+bool cachefile_list::evict_some_stale_pair(evictor* ev) {
+ write_lock();
+ if (m_stale_fileid.size() == 0) {
+ write_unlock();
+ return false;
+ }
+
+ CACHEFILE stale_cf = nullptr;
+ int r = m_stale_fileid.fetch(0, &stale_cf);
+ assert_zero(r);
+
+ // we should not have a cf in the stale list
+ // that does not have any pairs
+ PAIR p = stale_cf->cf_head;
+ paranoid_invariant(p != NULL);
+ evict_pair_from_cachefile(p);
+
+ // now that we have evicted something,
+ // let's check if the cachefile is needed anymore
+ //
+ // it is not needed if the latest eviction caused
+ // the cf_head for that cf to become null
+ bool destroy_cf = stale_cf->cf_head == nullptr;
+ if (destroy_cf) {
+ remove_stale_cf_unlocked(stale_cf);
+ }
+
+ write_unlock();
+
+ ev->remove_pair_attr(p->attr);
+ cachetable_free_pair(p);
+ if (destroy_cf) {
+ cachefile_destroy(stale_cf);
+ }
+ return true;
+}
+
+void cachefile_list::free_stale_data(evictor* ev) {
+ write_lock();
+ while (m_stale_fileid.size() != 0) {
+ CACHEFILE stale_cf = nullptr;
+ int r = m_stale_fileid.fetch(0, &stale_cf);
+ assert_zero(r);
+
+ // we should not have a cf in the stale list
+ // that does not have any pairs
+ PAIR p = stale_cf->cf_head;
+ paranoid_invariant(p != NULL);
+
+ evict_pair_from_cachefile(p);
+ ev->remove_pair_attr(p->attr);
+ cachetable_free_pair(p);
+
+ // now that we have evicted something,
+ // let's check if the cachefile is needed anymore
+ if (stale_cf->cf_head == NULL) {
+ remove_stale_cf_unlocked(stale_cf);
+ cachefile_destroy(stale_cf);
+ }
+ }
+ write_unlock();
+}
+
+void __attribute__((__constructor__)) toku_cachetable_helgrind_ignore(void);
+void
+toku_cachetable_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&cachetable_miss, sizeof cachetable_miss);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&cachetable_misstime, sizeof cachetable_misstime);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&cachetable_prefetches, sizeof cachetable_prefetches);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&cachetable_evictions, sizeof cachetable_evictions);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&cleaner_executions, sizeof cleaner_executions);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&ct_status, sizeof ct_status);
+}
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h
new file mode 100644
index 00000000..c5c21b49
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h
@@ -0,0 +1,588 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <fcntl.h>
+
+#include "ft/logger/logger.h"
+#include "ft/serialize/block_table.h"
+#include "ft/txn/txn.h"
+#include "ft/ft-status.h"
+#include "util/minicron.h"
+
+// Maintain a cache mapping from cachekeys to values (void*)
+// Some of the keys can be pinned. Don't pin too many or for too long.
+// If the cachetable is too full, it will call the flush_callback() function with the key, the value, and the otherargs
+// and then remove the key-value pair from the cache.
+// The callback won't be any of the currently pinned keys.
+// Also when flushing an object, the cachetable drops all references to it,
+// so you may need to free() it.
+// Note: The cachetable should use a common pool of memory, flushing things across cachetables.
+// (The first implementation doesn't)
+// If you pin something twice, you must unpin it twice.
+// table_size is the initial size of the cache table hash table (in number of entries)
+// size limit is the upper bound of the sum of size of the entries in the cache table (total number of bytes)
+
+typedef BLOCKNUM CACHEKEY;
+
+class checkpointer;
+typedef class checkpointer *CHECKPOINTER;
+typedef struct cachetable *CACHETABLE;
+typedef struct cachefile *CACHEFILE;
+typedef struct ctpair *PAIR;
+
+// This struct hold information about values stored in the cachetable.
+// As one can tell from the names, we are probably violating an
+// abstraction layer by placing names.
+//
+// The purpose of having this struct is to have a way for the
+// cachetable to accumulate the some totals we are interested in.
+// Breaking this abstraction layer by having these names was the
+// easiest way.
+//
+typedef struct pair_attr_s {
+ long size; // size PAIR's value takes in memory
+ long nonleaf_size; // size if PAIR is a nonleaf node, 0 otherwise, used only for engine status
+ long leaf_size; // size if PAIR is a leaf node, 0 otherwise, used only for engine status
+ long rollback_size; // size of PAIR is a rollback node, 0 otherwise, used only for engine status
+ long cache_pressure_size; // amount PAIR contributes to cache pressure, is sum of buffer sizes and workdone counts
+ bool is_valid;
+} PAIR_ATTR;
+
+static inline PAIR_ATTR make_pair_attr(long size) {
+ PAIR_ATTR result={
+ .size = size,
+ .nonleaf_size = 0,
+ .leaf_size = 0,
+ .rollback_size = 0,
+ .cache_pressure_size = 0,
+ .is_valid = true
+ };
+ return result;
+}
+
+void toku_set_cleaner_period (CACHETABLE ct, uint32_t new_period);
+uint32_t toku_get_cleaner_period_unlocked (CACHETABLE ct);
+void toku_set_cleaner_iterations (CACHETABLE ct, uint32_t new_iterations);
+uint32_t toku_get_cleaner_iterations (CACHETABLE ct);
+uint32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct);
+void toku_set_enable_partial_eviction (CACHETABLE ct, bool enabled);
+bool toku_get_enable_partial_eviction (CACHETABLE ct);
+
+// cachetable operations
+
+// create and initialize a cache table
+// size_limit is the upper limit on the size of the size of the values in the table
+// pass 0 if you want the default
+int toku_cachetable_create_ex(CACHETABLE *result, long size_limit,
+ unsigned long client_pool_threads,
+ unsigned long cachetable_pool_threads,
+ unsigned long checkpoint_pool_threads,
+ LSN initial_lsn, struct tokulogger *logger);
+
+#define toku_cachetable_create(r, s, l, o) \
+ toku_cachetable_create_ex(r, s, 0, 0, 0, l, o);
+
+// Create a new cachetable.
+// Effects: a new cachetable is created and initialized.
+// The cachetable pointer is stored into result.
+// The sum of the sizes of the memory objects is set to size_limit, in whatever
+// units make sense to the user of the cachetable.
+// Returns: If success, returns 0 and result points to the new cachetable. Otherwise,
+// returns an error number.
+
+// Returns a pointer to the checkpointer within the given cachetable.
+CHECKPOINTER toku_cachetable_get_checkpointer(CACHETABLE ct);
+
+// What is the cachefile that goes with a particular filenum?
+// During a transaction, we cannot reuse a filenum.
+int toku_cachefile_of_filenum (CACHETABLE t, FILENUM filenum, CACHEFILE *cf);
+
+// What is the cachefile that goes with a particular iname (relative to env)?
+// During a transaction, we cannot reuse an iname.
+int toku_cachefile_of_iname_in_env (CACHETABLE ct, const char *iname_in_env, CACHEFILE *cf);
+
+// Get the iname (within the cwd) associated with the cachefile
+// Return the filename
+char *toku_cachefile_fname_in_cwd (CACHEFILE cf);
+
+void toku_cachetable_begin_checkpoint (CHECKPOINTER cp, struct tokulogger *logger);
+
+void toku_cachetable_end_checkpoint(CHECKPOINTER cp, struct tokulogger *logger,
+ void (*testcallback_f)(void*), void * testextra);
+
+
+// Shuts down checkpoint thread
+// Requires no locks be held that are taken by the checkpoint function
+void toku_cachetable_minicron_shutdown(CACHETABLE ct);
+
+// Prepare to close the cachetable. This informs the cachetable that it is about to be closed
+// so that it can tune its checkpoint resource use.
+void toku_cachetable_prepare_close(CACHETABLE ct);
+
+// Close the cachetable.
+// Effects: All of the memory objects are flushed to disk, and the cachetable is destroyed.
+void toku_cachetable_close(CACHETABLE *ct);
+
+// Open a file and bind the file to a new cachefile object. (For use by test programs only.)
+int toku_cachetable_openf(CACHEFILE *,CACHETABLE, const char *fname_in_env, int flags, mode_t mode);
+
+// Bind a file to a new cachefile object.
+int toku_cachetable_openfd(CACHEFILE *,CACHETABLE, int fd,
+ const char *fname_relative_to_env);
+int toku_cachetable_openfd_with_filenum (CACHEFILE *,CACHETABLE, int fd,
+ const char *fname_in_env,
+ FILENUM filenum, bool* was_open);
+
+// reserve a unique filenum
+FILENUM toku_cachetable_reserve_filenum(CACHETABLE ct);
+
+// Effect: Reserve a fraction of the cachetable memory.
+// Returns the amount reserved.
+// To return the memory to the cachetable, call toku_cachetable_release_reserved_memory
+// Requires 0<fraction<1.
+uint64_t toku_cachetable_reserve_memory(CACHETABLE, double fraction, uint64_t upper_bound);
+void toku_cachetable_release_reserved_memory(CACHETABLE, uint64_t);
+
+// cachefile operations
+
+// Does an fsync of a cachefile.
+void toku_cachefile_fsync(CACHEFILE cf);
+
+enum partial_eviction_cost {
+ PE_CHEAP=0, // running partial eviction is cheap, and can be done on the client thread
+ PE_EXPENSIVE=1, // running partial eviction is expensive, and should not be done on the client thread
+};
+
+// cachetable pair clean or dirty WRT external memory
+enum cachetable_dirty {
+ CACHETABLE_CLEAN=0, // the cached object is clean WRT the cachefile
+ CACHETABLE_DIRTY=1, // the cached object is dirty WRT the cachefile
+};
+
+// The flush callback is called when a key value pair is being written to storage and possibly removed from the cachetable.
+// When write_me is true, the value should be written to storage.
+// When keep_me is false, the value should be freed.
+// When for_checkpoint is true, this was a 'pending' write
+// Returns: 0 if success, otherwise an error number.
+// Can access fd (fd is protected by a readlock during call)
+typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void **disk_data, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone);
+
+// The fetch callback is called when a thread is attempting to get and pin a memory
+// object and it is not in the cachetable.
+// Returns: 0 if success, otherwise an error number. The address and size of the object
+// associated with the key are returned.
+// Can access fd (fd is protected by a readlock during call)
+typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, PAIR p, int fd, CACHEKEY key, uint32_t fullhash, void **value_data, void **disk_data, PAIR_ATTR *sizep, int *dirtyp, void *read_extraargs);
+
+// The cachetable calls the partial eviction estimate callback to determine if
+// partial eviction is a cheap operation that may be called by on the client thread
+// or whether partial eviction is expensive and should be done on a background (writer) thread.
+// The callback conveys this information by setting cost to either PE_CHEAP or PE_EXPENSIVE.
+// If cost is PE_EXPENSIVE, then the callback also sets bytes_freed_estimate
+// to return an estimate of the number of bytes it will free
+// so that the cachetable can estimate how much data is being evicted on background threads.
+// If cost is PE_CHEAP, then the callback does not set bytes_freed_estimate.
+typedef void (*CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK)(void *ftnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void *write_extraargs);
+
+// The cachetable calls the partial eviction callback is to possibly try and partially evict pieces
+// of the PAIR. The callback determines the strategy for what to evict. The callback may choose to free
+// nothing, or may choose to free as much as possible. When the partial eviction callback is finished,
+// it must call finalize with the new PAIR_ATTR and the given finalize_extra. After this point, the
+// write lock will be released on the PAIR and it is no longer safe to operate on any of the passed arguments.
+// This is useful for doing expensive cleanup work outside of the PAIR's write lock (such as destroying objects, etc)
+//
+// on entry, requires a write lock to be held on the PAIR in the cachetable while this function is called
+// on exit, the finalize continuation is called
+typedef int (*CACHETABLE_PARTIAL_EVICTION_CALLBACK)(void *ftnode_pv, PAIR_ATTR old_attr, void *write_extraargs,
+ void (*finalize)(PAIR_ATTR new_attr, void *extra), void *finalize_extra);
+
+// The cachetable calls this function to determine if get_and_pin call requires a partial fetch. If this function returns true,
+// then the cachetable will subsequently call CACHETABLE_PARTIAL_FETCH_CALLBACK to perform
+// a partial fetch. If this function returns false, then the PAIR's value is returned to the caller as is.
+//
+// An alternative to having this callback is to always call CACHETABLE_PARTIAL_FETCH_CALLBACK, and let
+// CACHETABLE_PARTIAL_FETCH_CALLBACK decide whether to do any partial fetching or not.
+// There is no particular reason why this alternative was not chosen.
+// Requires: a read lock to be held on the PAIR
+typedef bool (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *ftnode_pv, void *read_extraargs);
+
+// The cachetable calls the partial fetch callback when a thread needs to read or decompress a subset of a PAIR into memory.
+// An example is needing to read a basement node into memory. Another example is decompressing an internal node's
+// message buffer. The cachetable determines if a partial fetch is necessary by first calling CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK.
+// The new PAIR_ATTR of the PAIR is returned in sizep
+// Can access fd (fd is protected by a readlock during call)
+// Returns: 0 if success, otherwise an error number.
+typedef int (*CACHETABLE_PARTIAL_FETCH_CALLBACK)(void *value_data, void* disk_data, void *read_extraargs, int fd, PAIR_ATTR *sizep);
+
+// The cachetable calls the put callback during a cachetable_put command to provide the opaque PAIR.
+// The PAIR can then be used to later unpin the pair.
+// Returns: 0 if success, otherwise an error number.
+typedef void (*CACHETABLE_PUT_CALLBACK)(CACHEKEY key, void *value_data, PAIR p);
+
+// TODO(leif) XXX TODO XXX
+typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *ftnode_pv, BLOCKNUM blocknum, uint32_t fullhash, void *write_extraargs);
+
+typedef void (*CACHETABLE_CLONE_CALLBACK)(void* value_data, void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
+
+typedef void (*CACHETABLE_CHECKPOINT_COMPLETE_CALLBACK)(void *value_data);
+
+typedef struct {
+ CACHETABLE_FLUSH_CALLBACK flush_callback;
+ CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback;
+ CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback;
+ CACHETABLE_CLEANER_CALLBACK cleaner_callback;
+ CACHETABLE_CLONE_CALLBACK clone_callback;
+ CACHETABLE_CHECKPOINT_COMPLETE_CALLBACK checkpoint_complete_callback;
+ void* write_extraargs; // parameter for flush_callback, pe_est_callback, pe_callback, and cleaner_callback
+} CACHETABLE_WRITE_CALLBACK;
+
+typedef void (*CACHETABLE_GET_KEY_AND_FULLHASH)(CACHEKEY* cachekey, uint32_t* fullhash, void* extra);
+
+typedef void (*CACHETABLE_REMOVE_KEY)(CACHEKEY* cachekey, bool for_checkpoint, void* extra);
+
+void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata,
+ void (*log_fassociate_during_checkpoint)(CACHEFILE, void*),
+ void (*close_userdata)(CACHEFILE, int, void*, bool, LSN),
+ void (*free_userdata)(CACHEFILE, void*),
+ void (*checkpoint_userdata)(CACHEFILE, int, void*),
+ void (*begin_checkpoint_userdata)(LSN, void*),
+ void (*end_checkpoint_userdata)(CACHEFILE, int, void*),
+ void (*note_pin_by_checkpoint)(CACHEFILE, void*),
+ void (*note_unpin_by_checkpoint)(CACHEFILE, void*));
+// Effect: Store some cachefile-specific user data. When the last reference to a cachefile is closed, we call close_userdata().
+// Before starting a checkpoint, we call checkpoint_prepare_userdata().
+// When the cachefile needs to be checkpointed, we call checkpoint_userdata().
+// If userdata is already non-NULL, then we simply overwrite it.
+
+void *toku_cachefile_get_userdata(CACHEFILE);
+// Effect: Get the user data.
+
+CACHETABLE toku_cachefile_get_cachetable(CACHEFILE cf);
+// Effect: Get the cachetable.
+
+CACHEFILE toku_pair_get_cachefile(PAIR);
+// Effect: Get the cachefile of the pair
+
+void toku_cachetable_swap_pair_values(PAIR old_pair, PAIR new_pair);
+// Effect: Swaps the value_data of old_pair and new_pair.
+// Requires: both old_pair and new_pair to be pinned with write locks.
+
+typedef enum {
+ PL_READ = 0,
+ PL_WRITE_CHEAP,
+ PL_WRITE_EXPENSIVE
+} pair_lock_type;
+
+// put something into the cachetable and checkpoint dependent pairs
+// if the checkpointing is necessary
+void toku_cachetable_put_with_dep_pairs(
+ CACHEFILE cachefile,
+ CACHETABLE_GET_KEY_AND_FULLHASH get_key_and_fullhash,
+ void *value,
+ PAIR_ATTR attr,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ void *get_key_and_fullhash_extra,
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ PAIR* dependent_pairs,
+ enum cachetable_dirty* dependent_dirty, // array stating dirty/cleanness of dependent pairs
+ CACHEKEY* key,
+ uint32_t* fullhash,
+ CACHETABLE_PUT_CALLBACK put_callback
+ );
+
+// Put a memory object into the cachetable.
+// Effects: Lookup the key in the cachetable. If the key is not in the cachetable,
+// then insert the pair and pin it. Otherwise return an error. Some of the key
+// value pairs may be evicted from the cachetable when the cachetable gets too big.
+void toku_cachetable_put(CACHEFILE cf, CACHEKEY key, uint32_t fullhash,
+ void *value, PAIR_ATTR size,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_PUT_CALLBACK put_callback
+ );
+
+// Get and pin the memory object of a PAIR, and write dependent pairs to disk
+// if the dependent pairs are pending a checkpoint.
+// Effects: If the memory object is in the cachetable, acquire a PAIR lock on it.
+// Otherwise, fetch it from storage by calling the fetch callback. If the fetch
+// succeeded, add the memory object to the cachetable with a PAIR lock on it.
+// Before returning to the user, if the PAIR object being retrieved, or any of the
+// dependent pairs passed in as parameters must be written to disk for checkpoint,
+// then the required PAIRs are written to disk for checkpoint.
+// KEY PROPERTY OF DEPENDENT PAIRS: They are already locked by the client
+// Returns: 0 if the memory object is in memory, otherwise an error number.
+int toku_cachetable_get_and_pin_with_dep_pairs (
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ uint32_t fullhash,
+ void**value,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ pair_lock_type lock_type,
+ void* read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ PAIR* dependent_pairs,
+ enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
+ );
+
+// Get and pin a memory object.
+// Effects: If the memory object is in the cachetable acquire the PAIR lock on it.
+// Otherwise, fetch it from storage by calling the fetch callback. If the fetch
+// succeeded, add the memory object to the cachetable with a read lock on it.
+// Returns: 0 if the memory object is in memory, otherwise an error number.
+int toku_cachetable_get_and_pin (
+ CACHEFILE cachefile,
+ CACHEKEY key,
+ uint32_t fullhash,
+ void**value,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ bool may_modify_value,
+ void* read_extraargs // parameter for fetch_callback, pf_req_callback, and pf_callback
+ );
+
+// does partial fetch on a pinned pair
+void toku_cachetable_pf_pinned_pair(
+ void* value,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ void* read_extraargs,
+ CACHEFILE cf,
+ CACHEKEY key,
+ uint32_t fullhash
+ );
+
+struct unlockers {
+ bool locked;
+ void (*f)(void* extra);
+ void *extra;
+ struct unlockers *next;
+};
+typedef struct unlockers *UNLOCKERS;
+
+// Effect: If the block is in the cachetable, then return it.
+// Otherwise call the functions in unlockers, fetch the data (but don't pin it, since we'll just end up pinning it again later), and return TOKUDB_TRY_AGAIN.
+int toku_cachetable_get_and_pin_nonblocking (
+ CACHEFILE cf,
+ CACHEKEY key,
+ uint32_t fullhash,
+ void**value,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ pair_lock_type lock_type,
+ void *read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
+ UNLOCKERS unlockers
+ );
+
+int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, pair_lock_type, void**);
+// Effect: Maybe get and pin a memory object.
+// This function is similar to the get_and_pin function except that it
+// will not attempt to fetch a memory object that is not in the cachetable or requires any kind of blocking to get it.
+// Returns: If the the item is already in memory, then return 0 and store it in the
+// void**. If the item is not in memory, then return a nonzero error number.
+
+int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, pair_lock_type, void**);
+// Effect: Like maybe get and pin, but may pin a clean pair.
+
+int toku_cachetable_get_attr(CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, PAIR_ATTR *);
+// Effect: get the attributes for cachekey
+// Returns: 0 if success, non-zero if cachekey is not cached
+// Notes: this function exists for tests
+
+int toku_cachetable_unpin(CACHEFILE, PAIR, enum cachetable_dirty dirty, PAIR_ATTR size);
+// Effect: Unpin a memory object
+// Modifies: If the memory object is in the cachetable, then OR the dirty flag,
+// update the size, and release the read lock on the memory object.
+// Returns: 0 if success, otherwise returns an error number.
+// Requires: The ct is locked.
+
+int toku_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE, PAIR, enum cachetable_dirty dirty, PAIR_ATTR size);
+// Effect: The same as tokud_cachetable_unpin, except that the ct must not be locked.
+// Requires: The ct is NOT locked.
+
+int toku_cachetable_unpin_and_remove (CACHEFILE, PAIR, CACHETABLE_REMOVE_KEY, void*); /* Removing something already present is OK. */
+// Effect: Remove an object from the cachetable. Don't write it back.
+// Requires: The object must be pinned exactly once.
+
+// test-only wrapper that use CACHEKEY and fullhash
+int toku_test_cachetable_unpin(CACHEFILE, CACHEKEY, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR size);
+
+// test-only wrapper that use CACHEKEY and fullhash
+int toku_test_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE, CACHEKEY, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR size);
+
+// test-only wrapper that use CACHEKEY
+int toku_test_cachetable_unpin_and_remove (CACHEFILE, CACHEKEY, CACHETABLE_REMOVE_KEY, void*); /* Removing something already present is OK. */
+
+int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, uint32_t fullhash,
+ CACHETABLE_WRITE_CALLBACK write_callback,
+ CACHETABLE_FETCH_CALLBACK fetch_callback,
+ CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
+ CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
+ void *read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
+ bool *doing_prefetch);
+// Effect: Prefetch a memory object for a given key into the cachetable
+// Precondition: The cachetable mutex is NOT held.
+// Postcondition: The cachetable mutex is NOT held.
+// Returns: 0 if success
+// Implement Note:
+// 1) The pair's rwlock is acquired (for write) (there is not a deadlock here because the rwlock is a pthread_cond_wait using the cachetable mutex).
+// Case A: Single-threaded.
+// A1) Call cachetable_fetch_pair, which
+// a) Obtains a readlock on the cachefile's fd (to prevent multipler readers at once)
+// b) Unlocks the cachetable
+// c) Does the fetch off disk.
+// d) Locks the cachetable
+// e) Unlocks the fd lock.
+// f) Unlocks the pair rwlock.
+// Case B: Multithreaded
+// a) Enqueue a cachetable_reader into the workqueue.
+// b) Unlock the cache table.
+// c) The enqueue'd job later locks the cachetable, and calls cachetable_fetch_pair (doing the steps in A1 above).
+
+int toku_cachetable_assert_all_unpinned (CACHETABLE);
+
+int toku_cachefile_count_pinned (CACHEFILE, int /*printthem*/ );
+
+// Close the cachefile.
+// Effects: All of the cached object associated with the cachefile are evicted from
+// the cachetable. The flush callback is called for each of these objects. The
+// close function does not return until all of the objects are evicted. The cachefile
+// object is freed.
+// If oplsn_valid is true then use oplsn as the LSN of the close instead of asking the logger. oplsn_valid being true is only allowed during recovery, and requires that you are removing the last reference (otherwise the lsn wouldn't make it in.)
+void toku_cachefile_close (CACHEFILE*, bool oplsn_valid, LSN oplsn);
+
+// Return on success (different from pread and pwrite)
+//int cachefile_pwrite (CACHEFILE, const void *buf, size_t count, toku_off_t offset);
+//int cachefile_pread (CACHEFILE, void *buf, size_t count, toku_off_t offset);
+
+// Get the file descriptor associated with the cachefile
+// Return the file descriptor
+// Grabs a read lock protecting the fd
+int toku_cachefile_get_fd (CACHEFILE);
+
+// Get the iname (within the environment) associated with the cachefile
+// Return the filename
+char * toku_cachefile_fname_in_env (CACHEFILE cf);
+
+void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env);
+
+// Make it so when the cachefile closes, the underlying file is unlinked
+void toku_cachefile_unlink_on_close(CACHEFILE cf);
+
+// is this cachefile marked as unlink on close?
+bool toku_cachefile_is_unlink_on_close(CACHEFILE cf);
+
+void toku_cachefile_skip_log_recover_on_close(CACHEFILE cf);
+void toku_cachefile_do_log_recover_on_close(CACHEFILE cf);
+bool toku_cachefile_is_skip_log_recover_on_close(CACHEFILE cf);
+
+// Return the logger associated with the cachefile
+struct tokulogger *toku_cachefile_logger(CACHEFILE cf);
+
+// Return the filenum associated with the cachefile
+FILENUM toku_cachefile_filenum(CACHEFILE cf);
+
+// Effect: Return a 32-bit hash key. The hash key shall be suitable for using with bitmasking for a table of size power-of-two.
+uint32_t toku_cachetable_hash(CACHEFILE cf, CACHEKEY key);
+
+uint32_t toku_cachefile_fullhash_of_header(CACHEFILE cf);
+
+// debug functions
+
+// Print the contents of the cachetable. This is mainly used from gdb
+void toku_cachetable_print_state (CACHETABLE ct);
+
+// Get the state of the cachetable. This is used to verify the cachetable
+void toku_cachetable_get_state(CACHETABLE ct, int *num_entries_ptr, int *hash_size_ptr, long *size_current_ptr, long *size_limit_ptr);
+
+// Get the state of a cachetable entry by key. This is used to verify the cachetable
+int toku_cachetable_get_key_state(CACHETABLE ct, CACHEKEY key, CACHEFILE cf,
+ void **value_ptr,
+ int *dirty_ptr,
+ long long *pin_ptr,
+ long *size_ptr);
+
+// Verify the whole cachetable that the cachefile is in. Slow.
+void toku_cachefile_verify (CACHEFILE cf);
+
+// Verify the cachetable. Slow.
+void toku_cachetable_verify (CACHETABLE t);
+
+// Not for use in production, but useful for testing.
+void toku_cachetable_print_hash_histogram (void) __attribute__((__visibility__("default")));
+
+void toku_cachetable_maybe_flush_some(CACHETABLE ct);
+
+// for stat64
+uint64_t toku_cachefile_size(CACHEFILE cf);
+
+void toku_cachetable_get_status(CACHETABLE ct, CACHETABLE_STATUS s);
+
+void toku_cachetable_set_env_dir(CACHETABLE ct, const char *env_dir);
+char * toku_construct_full_name(int count, ...);
+char * toku_cachetable_get_fname_in_cwd(CACHETABLE ct, const char * fname_in_env);
+
+void cachefile_kibbutz_enq (CACHEFILE cf, void (*f)(void*), void *extra);
+// Effect: Add a job to the cachetable's collection of work to do. Note that function f must call remove_background_job_from_cf()
+
+void remove_background_job_from_cf (CACHEFILE cf);
+// Effect: When a kibbutz job or cleaner thread finishes in a cachefile,
+// the cachetable must be notified.
+
+// test-only function
+int toku_cachetable_get_checkpointing_user_data_status(void);
+
+// test-only function
+int toku_cleaner_thread_for_test(CACHETABLE ct);
+int toku_cleaner_thread(void *cleaner_v);
+
+// test function. Exported in the ydb layer and used by tests that want to run DRD
+// The default of 1M is too high for drd tests, so this is a mechanism to set a smaller number.
+void toku_pair_list_set_lock_size(uint32_t num_locks);
+
+// Used by ft-ops.cc to figure out if it has the write lock on a pair.
+// Pretty hacky and not accurate enough, should be improved at the frwlock
+// layer.
+__attribute__((const,nonnull))
+bool toku_ctpair_is_write_locked(PAIR pair);
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/checkpoint.cc b/storage/tokudb/PerconaFT/ft/cachetable/checkpoint.cc
new file mode 100644
index 00000000..aad018f4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/checkpoint.cc
@@ -0,0 +1,333 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/***********
+ * The purpose of this file is to implement the high-level logic for
+ * taking a checkpoint.
+ *
+ * There are three locks used for taking a checkpoint. They are listed below.
+ *
+ * NOTE: The reader-writer locks may be held by either multiple clients
+ * or the checkpoint function. (The checkpoint function has the role
+ * of the writer, the clients have the reader roles.)
+ *
+ * - multi_operation_lock
+ * This is a new reader-writer lock.
+ * This lock is held by the checkpoint function only for as long as is required to
+ * to set all the "pending" bits and to create the checkpoint-in-progress versions
+ * of the header and translation table (btt).
+ * The following operations must take the multi_operation_lock:
+ * - any set of operations that must be atomic with respect to begin checkpoint
+ *
+ * - checkpoint_safe_lock
+ * This is a new reader-writer lock.
+ * This lock is held for the entire duration of the checkpoint.
+ * It is used to prevent more than one checkpoint from happening at a time
+ * (the checkpoint function is non-re-entrant), and to prevent certain operations
+ * that should not happen during a checkpoint.
+ * The following operations must take the checkpoint_safe lock:
+ * - delete a dictionary
+ * - rename a dictionary
+ * The application can use this lock to disable checkpointing during other sensitive
+ * operations, such as making a backup copy of the database.
+ *
+ * Once the "pending" bits are set and the snapshots are taken of the header and btt,
+ * most normal database operations are permitted to resume.
+ *
+ *
+ *
+ *****/
+
+#include <my_global.h>
+#include <time.h>
+
+#include "portability/toku_portability.h"
+#include "portability/toku_atomic.h"
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/cachetable/checkpoint.h"
+#include "ft/ft.h"
+#include "ft/logger/log-internal.h"
+#include "ft/logger/recover.h"
+#include "util/frwlock.h"
+#include "util/status.h"
+
+toku_instr_key *checkpoint_safe_mutex_key;
+toku_instr_key *checkpoint_safe_rwlock_key;
+toku_instr_key *multi_operation_lock_key;
+toku_instr_key *low_priority_multi_operation_lock_key;
+
+toku_instr_key *rwlock_cond_key;
+toku_instr_key *rwlock_wait_read_key;
+toku_instr_key *rwlock_wait_write_key;
+
+void toku_checkpoint_get_status(CACHETABLE ct, CHECKPOINT_STATUS statp) {
+ cp_status.init();
+ CP_STATUS_VAL(CP_PERIOD) = toku_get_checkpoint_period_unlocked(ct);
+ *statp = cp_status;
+}
+
+static LSN last_completed_checkpoint_lsn;
+
+static toku_mutex_t checkpoint_safe_mutex;
+static toku::frwlock checkpoint_safe_lock;
+static toku_pthread_rwlock_t multi_operation_lock;
+static toku_pthread_rwlock_t low_priority_multi_operation_lock;
+
+static bool initialized = false; // sanity check
+static volatile bool locked_mo = false; // true when the multi_operation write lock is held (by checkpoint)
+static volatile bool locked_cs = false; // true when the checkpoint_safe write lock is held (by checkpoint)
+static volatile uint64_t toku_checkpoint_begin_long_threshold = 1000000; // 1 second
+static volatile uint64_t toku_checkpoint_end_long_threshold = 1000000 * 60; // 1 minute
+
+// Note following static functions are called from checkpoint internal logic only,
+// and use the "writer" calls for locking and unlocking.
+
+static void
+multi_operation_lock_init(void) {
+ pthread_rwlockattr_t attr;
+ pthread_rwlockattr_init(&attr);
+#if defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
+ pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+#else
+// TODO: need to figure out how to make writer-preferential rwlocks
+// happen on osx
+#endif
+ toku_pthread_rwlock_init(
+ *multi_operation_lock_key, &multi_operation_lock, &attr);
+ toku_pthread_rwlock_init(*low_priority_multi_operation_lock_key,
+ &low_priority_multi_operation_lock,
+ &attr);
+ pthread_rwlockattr_destroy(&attr);
+ locked_mo = false;
+}
+
+static void
+multi_operation_lock_destroy(void) {
+ toku_pthread_rwlock_destroy(&multi_operation_lock);
+ toku_pthread_rwlock_destroy(&low_priority_multi_operation_lock);
+}
+
+static void
+multi_operation_checkpoint_lock(void) {
+ toku_pthread_rwlock_wrlock(&low_priority_multi_operation_lock);
+ toku_pthread_rwlock_wrlock(&multi_operation_lock);
+ locked_mo = true;
+}
+
+static void
+multi_operation_checkpoint_unlock(void) {
+ locked_mo = false;
+ toku_pthread_rwlock_wrunlock(&multi_operation_lock);
+ toku_pthread_rwlock_wrunlock(&low_priority_multi_operation_lock);
+}
+
+static void checkpoint_safe_lock_init(void) {
+ toku_mutex_init(
+ *checkpoint_safe_mutex_key, &checkpoint_safe_mutex, nullptr);
+ checkpoint_safe_lock.init(&checkpoint_safe_mutex
+#ifdef TOKU_MYSQL_WITH_PFS
+ ,
+ *checkpoint_safe_rwlock_key
+#endif
+ );
+ locked_cs = false;
+}
+
+static void
+checkpoint_safe_lock_destroy(void) {
+ checkpoint_safe_lock.deinit();
+ toku_mutex_destroy(&checkpoint_safe_mutex);
+}
+
+static void
+checkpoint_safe_checkpoint_lock(void) {
+ toku_mutex_lock(&checkpoint_safe_mutex);
+ checkpoint_safe_lock.write_lock(false);
+ toku_mutex_unlock(&checkpoint_safe_mutex);
+ locked_cs = true;
+}
+
+static void
+checkpoint_safe_checkpoint_unlock(void) {
+ locked_cs = false;
+ toku_mutex_lock(&checkpoint_safe_mutex);
+ checkpoint_safe_lock.write_unlock();
+ toku_mutex_unlock(&checkpoint_safe_mutex);
+}
+
+// toku_xxx_client_(un)lock() functions are only called from client code,
+// never from checkpoint code, and use the "reader" interface to the lock functions.
+
+void
+toku_multi_operation_client_lock(void) {
+ if (locked_mo)
+ (void) toku_sync_fetch_and_add(&CP_STATUS_VAL(CP_CLIENT_WAIT_ON_MO), 1);
+ toku_pthread_rwlock_rdlock(&multi_operation_lock);
+}
+
+void
+toku_multi_operation_client_unlock(void) {
+ toku_pthread_rwlock_rdunlock(&multi_operation_lock);
+}
+
+void toku_low_priority_multi_operation_client_lock(void) {
+ toku_pthread_rwlock_rdlock(&low_priority_multi_operation_lock);
+}
+
+void toku_low_priority_multi_operation_client_unlock(void) {
+ toku_pthread_rwlock_rdunlock(&low_priority_multi_operation_lock);
+}
+
+void
+toku_checkpoint_safe_client_lock(void) {
+ if (locked_cs)
+ (void) toku_sync_fetch_and_add(&CP_STATUS_VAL(CP_CLIENT_WAIT_ON_CS), 1);
+ toku_mutex_lock(&checkpoint_safe_mutex);
+ checkpoint_safe_lock.read_lock();
+ toku_mutex_unlock(&checkpoint_safe_mutex);
+ toku_multi_operation_client_lock();
+}
+
+void
+toku_checkpoint_safe_client_unlock(void) {
+ toku_mutex_lock(&checkpoint_safe_mutex);
+ checkpoint_safe_lock.read_unlock();
+ toku_mutex_unlock(&checkpoint_safe_mutex);
+ toku_multi_operation_client_unlock();
+}
+
+// Initialize the checkpoint mechanism, must be called before any client operations.
+void
+toku_checkpoint_init(void) {
+ multi_operation_lock_init();
+ checkpoint_safe_lock_init();
+ initialized = true;
+}
+
+void
+toku_checkpoint_destroy(void) {
+ multi_operation_lock_destroy();
+ checkpoint_safe_lock_destroy();
+ initialized = false;
+}
+
+#define SET_CHECKPOINT_FOOTPRINT(x) CP_STATUS_VAL(CP_FOOTPRINT) = footprint_offset + x
+
+
+// Take a checkpoint of all currently open dictionaries
+int
+toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger,
+ void (*callback_f)(void*), void * extra,
+ void (*callback2_f)(void*), void * extra2,
+ checkpoint_caller_t caller_id) {
+ int footprint_offset = (int) caller_id * 1000;
+
+ assert(initialized);
+
+ (void) toku_sync_fetch_and_add(&CP_STATUS_VAL(CP_WAITERS_NOW), 1);
+ checkpoint_safe_checkpoint_lock();
+ (void) toku_sync_fetch_and_sub(&CP_STATUS_VAL(CP_WAITERS_NOW), 1);
+
+ if (CP_STATUS_VAL(CP_WAITERS_NOW) > CP_STATUS_VAL(CP_WAITERS_MAX))
+ CP_STATUS_VAL(CP_WAITERS_MAX) = CP_STATUS_VAL(CP_WAITERS_NOW); // threadsafe, within checkpoint_safe lock
+
+ SET_CHECKPOINT_FOOTPRINT(10);
+ multi_operation_checkpoint_lock();
+ SET_CHECKPOINT_FOOTPRINT(20);
+ toku_ft_open_close_lock();
+
+ SET_CHECKPOINT_FOOTPRINT(30);
+ CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_BEGIN) = time(NULL);
+ uint64_t t_checkpoint_begin_start = toku_current_time_microsec();
+ toku_cachetable_begin_checkpoint(cp, logger);
+ uint64_t t_checkpoint_begin_end = toku_current_time_microsec();
+
+ toku_ft_open_close_unlock();
+ multi_operation_checkpoint_unlock();
+
+ SET_CHECKPOINT_FOOTPRINT(40);
+ if (callback_f) {
+ callback_f(extra); // callback is called with checkpoint_safe_lock still held
+ }
+
+ uint64_t t_checkpoint_end_start = toku_current_time_microsec();
+ toku_cachetable_end_checkpoint(cp, logger, callback2_f, extra2);
+ uint64_t t_checkpoint_end_end = toku_current_time_microsec();
+
+ SET_CHECKPOINT_FOOTPRINT(50);
+ if (logger) {
+ last_completed_checkpoint_lsn = logger->last_completed_checkpoint_lsn;
+ toku_logger_maybe_trim_log(logger, last_completed_checkpoint_lsn);
+ CP_STATUS_VAL(CP_LAST_LSN) = last_completed_checkpoint_lsn.lsn;
+ }
+
+ SET_CHECKPOINT_FOOTPRINT(60);
+ CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_END) = time(NULL);
+ CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_BEGIN_COMPLETE) = CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_BEGIN);
+ CP_STATUS_VAL(CP_CHECKPOINT_COUNT)++;
+ uint64_t duration = t_checkpoint_begin_end - t_checkpoint_begin_start;
+ CP_STATUS_VAL(CP_BEGIN_TIME) += duration;
+ if (duration >= toku_checkpoint_begin_long_threshold) {
+ CP_STATUS_VAL(CP_LONG_BEGIN_TIME) += duration;
+ CP_STATUS_VAL(CP_LONG_BEGIN_COUNT) += 1;
+ }
+ duration = t_checkpoint_end_end - t_checkpoint_end_start;
+ CP_STATUS_VAL(CP_END_TIME) += duration;
+ if (duration >= toku_checkpoint_end_long_threshold) {
+ CP_STATUS_VAL(CP_LONG_END_TIME) += duration;
+ CP_STATUS_VAL(CP_LONG_END_COUNT) += 1;
+ }
+ CP_STATUS_VAL(CP_TIME_CHECKPOINT_DURATION) += (uint64_t) ((time_t) CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_BEGIN));
+ CP_STATUS_VAL(CP_TIME_CHECKPOINT_DURATION_LAST) = (uint64_t) ((time_t) CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) CP_STATUS_VAL(CP_TIME_LAST_CHECKPOINT_BEGIN));
+ CP_STATUS_VAL(CP_FOOTPRINT) = 0;
+
+ checkpoint_safe_checkpoint_unlock();
+ return 0;
+}
+
+#include <toku_race_tools.h>
+void __attribute__((__constructor__)) toku_checkpoint_helgrind_ignore(void);
+void
+toku_checkpoint_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&cp_status, sizeof cp_status);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&locked_mo, sizeof locked_mo);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&locked_cs, sizeof locked_cs);
+}
+
+#undef SET_CHECKPOINT_FOOTPRINT
diff --git a/storage/tokudb/PerconaFT/ft/cachetable/checkpoint.h b/storage/tokudb/PerconaFT/ft/cachetable/checkpoint.h
new file mode 100644
index 00000000..1aff1738
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cachetable/checkpoint.h
@@ -0,0 +1,120 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdint.h>
+
+#include "ft/cachetable/cachetable.h"
+
+//Effect: Change [end checkpoint (n) - begin checkpoint (n+1)] delay to
+// new_period seconds. 0 means disable.
+void toku_set_checkpoint_period(CACHETABLE ct, uint32_t new_period);
+
+uint32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct);
+
+
+/******
+ *
+ * NOTE: checkpoint_safe_lock is highest level lock
+ * multi_operation_lock is next level lock
+ * ydb_big_lock is next level lock
+ *
+ * Locks must always be taken in this sequence (highest level first).
+ *
+ */
+
+
+/******
+ * Client code must hold the checkpoint_safe lock during the following operations:
+ * - delete a dictionary via DB->remove
+ * - delete a dictionary via DB_TXN->abort(txn) (where txn created a dictionary)
+ * - rename a dictionary //TODO: Handlerton rename needs to take this
+ * //TODO: Handlerton rename needs to be recoded for transaction recovery
+ *****/
+
+void toku_checkpoint_safe_client_lock(void);
+
+void toku_checkpoint_safe_client_unlock(void);
+
+
+
+/******
+ * These functions are called from the ydb level.
+ * Client code must hold the multi_operation lock during the following operations:
+ * - insertion into multiple indexes
+ * - replace into (simultaneous delete/insert on a single key)
+ *****/
+
+void toku_multi_operation_client_lock(void);
+void toku_low_priority_multi_operation_client_lock(void);
+
+void toku_multi_operation_client_unlock(void);
+void toku_low_priority_multi_operation_client_unlock(void);
+
+
+// Initialize the checkpoint mechanism, must be called before any client operations.
+// Must pass in function pointers to take/release ydb lock.
+void toku_checkpoint_init(void);
+
+void toku_checkpoint_destroy(void);
+
+typedef enum {SCHEDULED_CHECKPOINT = 0, // "normal" checkpoint taken on checkpoint thread
+ CLIENT_CHECKPOINT = 1, // induced by client, such as FLUSH LOGS or SAVEPOINT
+ INDEXER_CHECKPOINT = 2,
+ STARTUP_CHECKPOINT = 3,
+ UPGRADE_CHECKPOINT = 4,
+ RECOVERY_CHECKPOINT = 5,
+ SHUTDOWN_CHECKPOINT = 6} checkpoint_caller_t;
+
+// Take a checkpoint of all currently open dictionaries
+// Callbacks are called during checkpoint procedure while checkpoint_safe lock is still held.
+// Callbacks are primarily intended for use in testing.
+// caller_id identifies why the checkpoint is being taken.
+int toku_checkpoint(CHECKPOINTER cp, struct tokulogger *logger,
+ void (*callback_f)(void *extra), void *extra,
+ void (*callback2_f)(void *extra2), void *extra2,
+ checkpoint_caller_t caller_id);
+
+/******
+ * These functions are called from the ydb level.
+ * They return status information and have no side effects.
+ * Some status information may be incorrect because no locks are taken to collect status.
+ * (If checkpoint is in progress, it may overwrite status info while it is being read.)
+ *****/
+void toku_checkpoint_get_status(CACHETABLE ct, CHECKPOINT_STATUS stat);
diff --git a/storage/tokudb/PerconaFT/ft/comparator.h b/storage/tokudb/PerconaFT/ft/comparator.h
new file mode 100644
index 00000000..dfafd715
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/comparator.h
@@ -0,0 +1,150 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+#include <string.h>
+
+#include "portability/memory.h"
+
+#include "util/dbt.h"
+
+typedef int (*ft_compare_func)(DB *db, const DBT *a, const DBT *b);
+
+int toku_keycompare(const void *key1, uint32_t key1len, const void *key2, uint32_t key2len);
+
+int toku_builtin_compare_fun (DB *, const DBT *, const DBT*) __attribute__((__visibility__("default")));
+
+namespace toku {
+
+ // a comparator object encapsulates the data necessary for
+ // comparing two keys in a fractal tree. it further understands
+ // that points may be positive or negative infinity.
+
+ class comparator {
+ void init(ft_compare_func cmp, DESCRIPTOR desc, uint8_t memcmp_magic) {
+ _cmp = cmp;
+ _fake_db->cmp_descriptor = desc;
+ _memcmp_magic = memcmp_magic;
+ }
+
+ public:
+ // This magic value is reserved to mean that the magic has not been set.
+ static const uint8_t MEMCMP_MAGIC_NONE = 0;
+
+ void create(ft_compare_func cmp, DESCRIPTOR desc, uint8_t memcmp_magic = MEMCMP_MAGIC_NONE) {
+ XCALLOC(_fake_db);
+ init(cmp, desc, memcmp_magic);
+ }
+
+ // inherit the attributes of another comparator, but keep our own
+ // copy of fake_db that is owned separately from the one given.
+ void inherit(const comparator &cmp) {
+ invariant_notnull(_fake_db);
+ invariant_notnull(cmp._cmp);
+ invariant_notnull(cmp._fake_db);
+ init(cmp._cmp, cmp._fake_db->cmp_descriptor, cmp._memcmp_magic);
+ }
+
+ // like inherit, but doesn't require that the this comparator
+ // was already created
+ void create_from(const comparator &cmp) {
+ XCALLOC(_fake_db);
+ inherit(cmp);
+ }
+
+ void destroy() {
+ toku_free(_fake_db);
+ }
+
+ const DESCRIPTOR_S *get_descriptor() const {
+ return _fake_db->cmp_descriptor;
+ }
+
+ ft_compare_func get_compare_func() const {
+ return _cmp;
+ }
+
+ uint8_t get_memcmp_magic() const {
+ return _memcmp_magic;
+ }
+
+ bool valid() const {
+ return _cmp != nullptr;
+ }
+
+ inline bool dbt_has_memcmp_magic(const DBT *dbt) const {
+ return *reinterpret_cast<const char *>(dbt->data) == _memcmp_magic;
+ }
+
+ int operator()(const DBT *a, const DBT *b) const {
+ if (__builtin_expect(toku_dbt_is_infinite(a) || toku_dbt_is_infinite(b), 0)) {
+ return toku_dbt_infinite_compare(a, b);
+ } else if (_memcmp_magic != MEMCMP_MAGIC_NONE
+ // If `a' has the memcmp magic..
+ && dbt_has_memcmp_magic(a)
+ // ..then we expect `b' to also have the memcmp magic
+ && __builtin_expect(dbt_has_memcmp_magic(b), 1)) {
+ return toku_builtin_compare_fun(nullptr, a, b);
+ } else {
+ // yikes, const sadness here
+ return _cmp(const_cast<DB *>(_fake_db), a, b);
+ }
+ }
+
+ private:
+ DB *_fake_db;
+ ft_compare_func _cmp;
+ uint8_t _memcmp_magic;
+ };
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/ft/cursor.cc b/storage/tokudb/PerconaFT/ft/cursor.cc
new file mode 100644
index 00000000..5402763f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cursor.cc
@@ -0,0 +1,456 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft-internal.h"
+
+#include "ft/cursor.h"
+#include "ft/leafentry.h"
+#include "ft/txn/txn.h"
+#include "util/dbt.h"
+
+int toku_ft_cursor_create(FT_HANDLE ft_handle, FT_CURSOR cursor, TOKUTXN ttxn,
+ enum cursor_read_type read_type,
+ bool disable_prefetching,
+ bool is_temporary) {
+ if (read_type == C_READ_SNAPSHOT) {
+ invariant(ttxn != NULL);
+ int accepted = toku_txn_reads_txnid(ft_handle->ft->h->root_xid_that_created, ttxn, false); // last parameter is irrelevant
+ if (accepted != TOKUDB_ACCEPT) {
+ invariant(accepted == 0);
+ return TOKUDB_MVCC_DICTIONARY_TOO_NEW;
+ }
+ }
+
+ memset(cursor, 0, sizeof(*cursor));
+ cursor->ft_handle = ft_handle;
+ cursor->ttxn = ttxn;
+ cursor->read_type = read_type;
+ cursor->disable_prefetching = disable_prefetching;
+ cursor->is_temporary = is_temporary;
+ return 0;
+}
+
+void toku_ft_cursor_destroy(FT_CURSOR cursor) {
+ toku_destroy_dbt(&cursor->key);
+ toku_destroy_dbt(&cursor->val);
+ toku_destroy_dbt(&cursor->range_lock_left_key);
+ toku_destroy_dbt(&cursor->range_lock_right_key);
+}
+
+// deprecated, should only be used by tests
+int toku_ft_cursor(FT_HANDLE ft_handle, FT_CURSOR *cursorptr, TOKUTXN ttxn,
+ bool is_snapshot_read, bool disable_prefetching) {
+ FT_CURSOR XCALLOC(cursor);
+ enum cursor_read_type read_type = is_snapshot_read ? C_READ_SNAPSHOT : C_READ_ANY;
+ int r = toku_ft_cursor_create(ft_handle, cursor, ttxn, read_type, disable_prefetching, false);
+ if (r == 0) {
+ *cursorptr = cursor;
+ } else {
+ toku_free(cursor);
+ }
+ return r;
+}
+
+// deprecated, should only be used by tests
+void toku_ft_cursor_close(FT_CURSOR cursor) {
+ toku_ft_cursor_destroy(cursor);
+ toku_free(cursor);
+}
+
+void toku_ft_cursor_remove_restriction(FT_CURSOR cursor) {
+ cursor->out_of_range_error = 0;
+ cursor->direction = 0;
+}
+
+void toku_ft_cursor_set_check_interrupt_cb(FT_CURSOR cursor, FT_CHECK_INTERRUPT_CALLBACK cb, void *extra) {
+ cursor->interrupt_cb = cb;
+ cursor->interrupt_cb_extra = extra;
+}
+
+void toku_ft_cursor_set_leaf_mode(FT_CURSOR cursor) {
+ cursor->is_leaf_mode = true;
+}
+
+int toku_ft_cursor_is_leaf_mode(FT_CURSOR cursor) {
+ return cursor->is_leaf_mode;
+}
+
+// TODO: Rename / cleanup - this has nothing to do with locking
+void toku_ft_cursor_set_range_lock(FT_CURSOR cursor,
+ const DBT *left, const DBT *right,
+ bool left_is_neg_infty, bool right_is_pos_infty,
+ int out_of_range_error) {
+ // Destroy any existing keys and then clone the given left, right keys
+ toku_destroy_dbt(&cursor->range_lock_left_key);
+ if (left_is_neg_infty) {
+ cursor->left_is_neg_infty = true;
+ } else {
+ toku_clone_dbt(&cursor->range_lock_left_key, *left);
+ }
+
+ toku_destroy_dbt(&cursor->range_lock_right_key);
+ if (right_is_pos_infty) {
+ cursor->right_is_pos_infty = true;
+ } else {
+ toku_clone_dbt(&cursor->range_lock_right_key, *right);
+ }
+
+ // TOKUDB_FOUND_BUT_REJECTED is a DB_NOTFOUND with instructions to stop looking. (Faster)
+ cursor->out_of_range_error = out_of_range_error == DB_NOTFOUND ? TOKUDB_FOUND_BUT_REJECTED : out_of_range_error;
+ cursor->direction = 0;
+}
+
+void toku_ft_cursor_set_prefetching(FT_CURSOR cursor) {
+ cursor->prefetching = true;
+}
+
+bool toku_ft_cursor_prefetching(FT_CURSOR cursor) {
+ return cursor->prefetching;
+}
+
+//Return true if cursor is uninitialized. false otherwise.
+bool toku_ft_cursor_not_set(FT_CURSOR cursor) {
+ assert((cursor->key.data==NULL) == (cursor->val.data==NULL));
+ return (bool)(cursor->key.data == NULL);
+}
+
+struct ft_cursor_search_struct {
+ FT_GET_CALLBACK_FUNCTION getf;
+ void *getf_v;
+ FT_CURSOR cursor;
+ ft_search *search;
+};
+
+/* search for the first kv pair that matches the search object */
+static int ft_cursor_search(FT_CURSOR cursor, ft_search *search,
+ FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool can_bulk_fetch) {
+ int r = toku_ft_search(cursor->ft_handle, search, getf, getf_v, cursor, can_bulk_fetch);
+ return r;
+}
+
+static inline int compare_k_x(FT_HANDLE ft_handle, const DBT *k, const DBT *x) {
+ return ft_handle->ft->cmp(k, x);
+}
+
+int toku_ft_cursor_compare_one(const ft_search &UU(search), const DBT *UU(x)) {
+ return 1;
+}
+
+static int ft_cursor_compare_set(const ft_search &search, const DBT *x) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
+ return compare_k_x(ft_handle, search.k, x) <= 0; /* return min xy: kv <= xy */
+}
+
+static int
+ft_cursor_current_getf(uint32_t keylen, const void *key,
+ uint32_t vallen, const void *val,
+ void *v, bool lock_only) {
+ struct ft_cursor_search_struct *CAST_FROM_VOIDP(bcss, v);
+ int r;
+ if (key==NULL) {
+ r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only);
+ } else {
+ FT_CURSOR cursor = bcss->cursor;
+ DBT newkey;
+ toku_fill_dbt(&newkey, key, keylen);
+ if (compare_k_x(cursor->ft_handle, &cursor->key, &newkey) != 0) {
+ r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); // This was once DB_KEYEMPTY
+ if (r==0) r = TOKUDB_FOUND_BUT_REJECTED;
+ }
+ else
+ r = bcss->getf(keylen, key, vallen, val, bcss->getf_v, lock_only);
+ }
+ return r;
+}
+
+static int ft_cursor_compare_next(const ft_search &search, const DBT *x) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
+ return compare_k_x(ft_handle, search.k, x) < 0; /* return min xy: kv < xy */
+}
+
+int toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ if (toku_ft_cursor_not_set(cursor)) {
+ return EINVAL;
+ }
+ cursor->direction = 0;
+ if (op == DB_CURRENT) {
+ struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, 0};
+ ft_search search;
+ ft_search_init(&search, ft_cursor_compare_set, FT_SEARCH_LEFT, &cursor->key, nullptr, cursor->ft_handle);
+ int r = toku_ft_search(cursor->ft_handle, &search, ft_cursor_current_getf, &bcss, cursor, false);
+ ft_search_finish(&search);
+ return r;
+ }
+ return getf(cursor->key.size, cursor->key.data, cursor->val.size, cursor->val.data, getf_v, false); // ft_cursor_copyout(cursor, outkey, outval);
+}
+
+int toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = 0;
+ ft_search search;
+ ft_search_init(&search, toku_ft_cursor_compare_one, FT_SEARCH_LEFT, nullptr, nullptr, cursor->ft_handle);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
+ ft_search_finish(&search);
+ return r;
+}
+
+int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = 0;
+ ft_search search;
+ ft_search_init(&search, toku_ft_cursor_compare_one, FT_SEARCH_RIGHT, nullptr, nullptr, cursor->ft_handle);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
+ ft_search_finish(&search);
+ return r;
+}
+
+int toku_ft_cursor_check_restricted_range(FT_CURSOR c, const void *key, uint32_t keylen) {
+ if (c->out_of_range_error) {
+ FT ft = c->ft_handle->ft;
+ DBT found_key;
+ toku_fill_dbt(&found_key, key, keylen);
+ if ((!c->left_is_neg_infty && c->direction <= 0 && ft->cmp(&found_key, &c->range_lock_left_key) < 0) ||
+ (!c->right_is_pos_infty && c->direction >= 0 && ft->cmp(&found_key, &c->range_lock_right_key) > 0)) {
+ invariant(c->out_of_range_error);
+ return c->out_of_range_error;
+ }
+ }
+ // Reset cursor direction to mitigate risk if some query type doesn't set the direction.
+ // It is always correct to check both bounds (which happens when direction==0) but it can be slower.
+ c->direction = 0;
+ return 0;
+}
+
+int toku_ft_cursor_shortcut(FT_CURSOR cursor, int direction, uint32_t index, bn_data *bd,
+ FT_GET_CALLBACK_FUNCTION getf, void *getf_v,
+ uint32_t *keylen, void **key, uint32_t *vallen, void **val) {
+ int r = 0;
+ // if we are searching towards the end, limit is last element
+ // if we are searching towards the beginning, limit is the first element
+ uint32_t limit = (direction > 0) ? (bd->num_klpairs() - 1) : 0;
+
+ //Starting with the prev, find the first real (non-provdel) leafentry.
+ while (index != limit) {
+ index += direction;
+ LEAFENTRY le;
+ void* foundkey = NULL;
+ uint32_t foundkeylen = 0;
+
+ r = bd->fetch_klpair(index, &le, &foundkeylen, &foundkey);
+ invariant_zero(r);
+
+ if (toku_ft_cursor_is_leaf_mode(cursor) || !le_val_is_del(le, cursor->read_type, cursor->ttxn)) {
+ le_extract_val(
+ le,
+ toku_ft_cursor_is_leaf_mode(cursor),
+ cursor->read_type,
+ cursor->ttxn,
+ vallen,
+ val
+ );
+ *key = foundkey;
+ *keylen = foundkeylen;
+
+ cursor->direction = direction;
+ r = toku_ft_cursor_check_restricted_range(cursor, *key, *keylen);
+ if (r!=0) {
+ paranoid_invariant(r == cursor->out_of_range_error);
+ // We already got at least one entry from the bulk fetch.
+ // Return 0 (instead of out of range error).
+ r = 0;
+ break;
+ }
+ r = getf(*keylen, *key, *vallen, *val, getf_v, false);
+ if (r == TOKUDB_CURSOR_CONTINUE) {
+ continue;
+ }
+ else {
+ break;
+ }
+ }
+ }
+
+ return r;
+}
+
+int toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = +1;
+ ft_search search;
+ ft_search_init(&search, ft_cursor_compare_next, FT_SEARCH_LEFT, &cursor->key, nullptr, cursor->ft_handle);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, true);
+ ft_search_finish(&search);
+ if (r == 0) {
+ toku_ft_cursor_set_prefetching(cursor);
+ }
+ return r;
+}
+
+static int ft_cursor_search_eq_k_x_getf(uint32_t keylen, const void *key,
+ uint32_t vallen, const void *val,
+ void *v, bool lock_only) {
+ struct ft_cursor_search_struct *CAST_FROM_VOIDP(bcss, v);
+ int r;
+ if (key==NULL) {
+ r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, false);
+ } else {
+ FT_CURSOR cursor = bcss->cursor;
+ DBT newkey;
+ toku_fill_dbt(&newkey, key, keylen);
+ if (compare_k_x(cursor->ft_handle, bcss->search->k, &newkey) == 0) {
+ r = bcss->getf(keylen, key, vallen, val, bcss->getf_v, lock_only);
+ } else {
+ r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only);
+ if (r==0) r = TOKUDB_FOUND_BUT_REJECTED;
+ }
+ }
+ return r;
+}
+
+/* search for the kv pair that matches the search object and is equal to k */
+static int ft_cursor_search_eq_k_x(FT_CURSOR cursor, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, search};
+ int r = toku_ft_search(cursor->ft_handle, search, ft_cursor_search_eq_k_x_getf, &bcss, cursor, false);
+ return r;
+}
+
+static int ft_cursor_compare_prev(const ft_search &search, const DBT *x) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
+ return compare_k_x(ft_handle, search.k, x) > 0; /* return max xy: kv > xy */
+}
+
+int toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = -1;
+ ft_search search;
+ ft_search_init(&search, ft_cursor_compare_prev, FT_SEARCH_RIGHT, &cursor->key, nullptr, cursor->ft_handle);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, true);
+ ft_search_finish(&search);
+ return r;
+}
+
+int toku_ft_cursor_compare_set_range(const ft_search &search, const DBT *x) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
+ return compare_k_x(ft_handle, search.k, x) <= 0; /* return kv <= xy */
+}
+
+int toku_ft_cursor_set(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = 0;
+ ft_search search;
+ ft_search_init(&search, toku_ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, nullptr, cursor->ft_handle);
+ int r = ft_cursor_search_eq_k_x(cursor, &search, getf, getf_v);
+ ft_search_finish(&search);
+ return r;
+}
+
+int toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, DBT *key_bound, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = 0;
+ ft_search search;
+ ft_search_init(&search, toku_ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, key_bound, cursor->ft_handle);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
+ ft_search_finish(&search);
+ return r;
+}
+
+static int ft_cursor_compare_set_range_reverse(const ft_search &search, const DBT *x) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context);
+ return compare_k_x(ft_handle, search.k, x) >= 0; /* return kv >= xy */
+}
+
+int toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ cursor->direction = 0;
+ ft_search search;
+ ft_search_init(&search, ft_cursor_compare_set_range_reverse, FT_SEARCH_RIGHT, key, nullptr, cursor->ft_handle);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
+ ft_search_finish(&search);
+ return r;
+}
+
+//TODO: When tests have been rewritten, get rid of this function.
+//Only used by tests.
+int toku_ft_cursor_get (FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags) {
+ int op = get_flags & DB_OPFLAGS_MASK;
+ if (get_flags & ~DB_OPFLAGS_MASK)
+ return EINVAL;
+
+ switch (op) {
+ case DB_CURRENT:
+ case DB_CURRENT_BINDING:
+ return toku_ft_cursor_current(cursor, op, getf, getf_v);
+ case DB_FIRST:
+ return toku_ft_cursor_first(cursor, getf, getf_v);
+ case DB_LAST:
+ return toku_ft_cursor_last(cursor, getf, getf_v);
+ case DB_NEXT:
+ if (toku_ft_cursor_not_set(cursor)) {
+ return toku_ft_cursor_first(cursor, getf, getf_v);
+ } else {
+ return toku_ft_cursor_next(cursor, getf, getf_v);
+ }
+ case DB_PREV:
+ if (toku_ft_cursor_not_set(cursor)) {
+ return toku_ft_cursor_last(cursor, getf, getf_v);
+ } else {
+ return toku_ft_cursor_prev(cursor, getf, getf_v);
+ }
+ case DB_SET:
+ return toku_ft_cursor_set(cursor, key, getf, getf_v);
+ case DB_SET_RANGE:
+ return toku_ft_cursor_set_range(cursor, key, nullptr, getf, getf_v);
+ default: ;// Fall through
+ }
+ return EINVAL;
+}
+
+void toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval) {
+ *pkey = &cursor->key;
+ *pval = &cursor->val;
+}
+
+bool toku_ft_cursor_uninitialized(FT_CURSOR c) {
+ return toku_ft_cursor_not_set(c);
+}
+
+int toku_ft_lookup(FT_HANDLE ft_handle, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ FT_CURSOR cursor;
+ int r = toku_ft_cursor(ft_handle, &cursor, NULL, false, false);
+ if (r != 0) {
+ return r;
+ }
+
+ r = toku_ft_cursor_set(cursor, k, getf, getf_v);
+
+ toku_ft_cursor_close(cursor);
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/cursor.h b/storage/tokudb/PerconaFT/ft/cursor.h
new file mode 100644
index 00000000..194cb74c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/cursor.h
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "ft/ft-internal.h"
+
+/* an ft cursor is represented as a kv pair in a tree */
+struct ft_cursor {
+ FT_HANDLE ft_handle;
+ DBT key, val; // The key-value pair that the cursor currently points to
+ DBT range_lock_left_key, range_lock_right_key;
+ bool prefetching;
+ bool left_is_neg_infty, right_is_pos_infty;
+ enum cursor_read_type read_type; // true if query is reading from a snapshot, false otherwise
+ bool is_leaf_mode;
+ bool disable_prefetching;
+ bool is_temporary;
+ int out_of_range_error;
+ int direction;
+ TOKUTXN ttxn;
+ FT_CHECK_INTERRUPT_CALLBACK interrupt_cb;
+ void *interrupt_cb_extra;
+};
+typedef struct ft_cursor *FT_CURSOR;
+
+enum ft_search_direction_e {
+ FT_SEARCH_LEFT = 1, /* search left -> right, finds min xy as defined by the compare function */
+ FT_SEARCH_RIGHT = 2, /* search right -> left, finds max xy as defined by the compare function */
+};
+
+struct ft_search;
+
+/* the search compare function should return 0 for all xy < kv and 1 for all xy >= kv
+ the compare function should be a step function from 0 to 1 for a left to right search
+ and 1 to 0 for a right to left search */
+
+typedef int (*ft_search_compare_func_t)(const struct ft_search &, const DBT *);
+
+/* the search object contains the compare function, search direction, and the kv pair that
+ is used in the compare function. the context is the user's private data */
+
+struct ft_search {
+ ft_search_compare_func_t compare;
+ enum ft_search_direction_e direction;
+ const DBT *k;
+ void *context;
+
+ // To fix #3522, we need to remember the pivots that we have searched unsuccessfully.
+ // For example, when searching right (left), we call search->compare() on the ith pivot key. If search->compare(0 returns
+ // nonzero, then we search the ith subtree. If that subsearch returns DB_NOTFOUND then maybe the key isn't present in the
+ // tree. But maybe we are doing a DB_NEXT (DB_PREV), and everything was deleted. So we remember the pivot, and later we
+ // will only search subtrees which contain keys that are bigger than (less than) the pivot.
+ // The code is a kludge (even before this fix), and interacts strangely with the TOKUDB_FOUND_BUT_REJECTED (which is there
+ // because a failed DB_GET we would keep searching the rest of the tree). We probably should write the various lookup
+ // codes (NEXT, PREV, CURRENT, etc) more directly, and we should probably use a binary search within a node to search the
+ // pivots so that we can support a larger fanout.
+ // These changes (3312+3522) also (probably) introduce an isolation error (#3529).
+ // We must make sure we lock the right range for proper isolation level.
+ // There's probably a bug in which the following could happen.
+ // Thread A: Searches through deleted keys A,B,D,E and finds nothing, so searches the next leaf, releasing the YDB lock.
+ // Thread B: Inserts key C, and acquires the write lock, then commits.
+ // Thread A: Resumes, searching F,G,H and return success. Thread A then read-locks the range A-H, and doesn't notice
+ // the value C inserted by thread B. Thus a failure of serialization.
+ // See #3529.
+ // There also remains a potential thrashing problem. When we get a TOKUDB_TRY_AGAIN, we unpin everything. There's
+ // no guarantee that we will get everything pinned again. We ought to keep nodes pinned when we retry, except that on the
+ // way out with a DB_NOTFOUND we ought to unpin those nodes. See #3528.
+ DBT pivot_bound;
+ const DBT *k_bound;
+};
+
+/* initialize the search compare object */
+static inline ft_search *ft_search_init(ft_search *search, ft_search_compare_func_t compare,
+ enum ft_search_direction_e direction,
+ const DBT *k, const DBT *k_bound, void *context) {
+ search->compare = compare;
+ search->direction = direction;
+ search->k = k;
+ search->context = context;
+ toku_init_dbt(&search->pivot_bound);
+ search->k_bound = k_bound;
+ return search;
+}
+
+static inline void ft_search_finish(ft_search *search) {
+ toku_destroy_dbt(&search->pivot_bound);
+}
+
+
+int toku_ft_cursor_create(FT_HANDLE ft_handle, FT_CURSOR cursor, TOKUTXN txn,
+ enum cursor_read_type read_type,
+ bool disable_prefetching,
+ bool is_temporary);
+
+void toku_ft_cursor_destroy(FT_CURSOR cursor);
+
+int toku_ft_lookup(FT_HANDLE ft_h, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+void toku_ft_cursor_set_prefetching(FT_CURSOR cursor);
+
+bool toku_ft_cursor_prefetching(FT_CURSOR cursor);
+
+bool toku_ft_cursor_not_set(FT_CURSOR cursor);
+
+void toku_ft_cursor_set_leaf_mode(FT_CURSOR cursor);
+
+void toku_ft_cursor_remove_restriction(FT_CURSOR cursor);
+
+void toku_ft_cursor_set_check_interrupt_cb(FT_CURSOR cursor, FT_CHECK_INTERRUPT_CALLBACK cb, void *extra);
+
+int toku_ft_cursor_is_leaf_mode(FT_CURSOR cursor);
+
+void toku_ft_cursor_set_range_lock(FT_CURSOR, const DBT *, const DBT *, bool, bool, int);
+
+int toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_set(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, DBT *key_bound, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+int toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result));
+
+bool toku_ft_cursor_uninitialized(FT_CURSOR cursor) __attribute__ ((warn_unused_result));
+
+void toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval);
+
+int toku_ft_cursor_check_restricted_range(FT_CURSOR cursor, const void *key, uint32_t keylen);
+
+int toku_ft_cursor_shortcut(FT_CURSOR cursor, int direction, uint32_t index, bn_data *bd,
+ FT_GET_CALLBACK_FUNCTION getf, void *getf_v,
+ uint32_t *keylen, void **key, uint32_t *vallen, void **val);
+
+// used by get_key_after_bytes
+int toku_ft_cursor_compare_one(const ft_search &search, const DBT *x);
+int toku_ft_cursor_compare_set_range(const ft_search &search, const DBT *x);
+
+// deprecated, should only be used by tests, and eventually removed
+int toku_ft_cursor(FT_HANDLE ft_handle, FT_CURSOR *ftcursor_p, TOKUTXN txn, bool, bool) __attribute__ ((warn_unused_result));
+void toku_ft_cursor_close(FT_CURSOR cursor);
+int toku_ft_cursor_get(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags);
+int toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn);
diff --git a/storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc b/storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc
new file mode 100644
index 00000000..439e0688
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc
@@ -0,0 +1,373 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/serialize/block_table.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-flusher.h"
+#include "ft/ft-internal.h"
+#include "ft/ft.h"
+#include "ft/node.h"
+
+#include <util/context.h>
+
+static void
+ftnode_get_key_and_fullhash(
+ BLOCKNUM* cachekey,
+ uint32_t* fullhash,
+ void* extra)
+{
+ FT ft = (FT) extra;
+ BLOCKNUM blocknum;
+ ft->blocktable.allocate_blocknum(&blocknum, ft);
+ *cachekey = blocknum;
+ *fullhash = toku_cachetable_hash(ft->cf, blocknum);
+}
+
+void
+cachetable_put_empty_node_with_dep_nodes(
+ FT ft,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes,
+ BLOCKNUM* blocknum, //output
+ uint32_t* fullhash, //output
+ FTNODE* result)
+{
+ FTNODE XCALLOC(new_node);
+ PAIR dependent_pairs[num_dependent_nodes];
+ enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes];
+ for (uint32_t i = 0; i < num_dependent_nodes; i++) {
+ dependent_pairs[i] = dependent_nodes[i]->ct_pair;
+ dependent_dirty_bits[i] = (enum cachetable_dirty) dependent_nodes[i]->dirty();
+ }
+
+ toku_cachetable_put_with_dep_pairs(
+ ft->cf,
+ ftnode_get_key_and_fullhash,
+ new_node,
+ make_pair_attr(sizeof(FTNODE)),
+ get_write_callbacks_for_node(ft),
+ ft,
+ num_dependent_nodes,
+ dependent_pairs,
+ dependent_dirty_bits,
+ blocknum,
+ fullhash,
+ toku_ftnode_save_ct_pair);
+ *result = new_node;
+}
+
+void
+create_new_ftnode_with_dep_nodes(
+ FT ft,
+ FTNODE *result,
+ int height,
+ int n_children,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes)
+{
+ uint32_t fullhash = 0;
+ BLOCKNUM blocknum;
+
+ cachetable_put_empty_node_with_dep_nodes(
+ ft,
+ num_dependent_nodes,
+ dependent_nodes,
+ &blocknum,
+ &fullhash,
+ result);
+
+ assert(ft->h->basementnodesize > 0);
+ if (height == 0) {
+ assert(n_children > 0);
+ }
+
+ toku_initialize_empty_ftnode(
+ *result,
+ blocknum,
+ height,
+ n_children,
+ ft->h->layout_version,
+ ft->h->flags);
+
+ (*result)->fullhash = fullhash;
+}
+
+void
+toku_create_new_ftnode (
+ FT_HANDLE t,
+ FTNODE *result,
+ int height,
+ int n_children)
+{
+ return create_new_ftnode_with_dep_nodes(
+ t->ft,
+ result,
+ height,
+ n_children,
+ 0,
+ NULL);
+}
+
+//
+// On success, this function assumes that the caller is trying to pin the node
+// with a PL_READ lock. If message application is needed,
+// then a PL_WRITE_CHEAP lock is grabbed
+//
+int
+toku_pin_ftnode_for_query(
+ FT_HANDLE ft_handle,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ UNLOCKERS unlockers,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ ftnode_fetch_extra *bfe,
+ bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
+ FTNODE *node_p,
+ bool* msgs_applied)
+{
+ void *node_v;
+ *msgs_applied = false;
+ FTNODE node = nullptr;
+ MSN max_msn_in_path = ZERO_MSN;
+ bool needs_ancestors_messages = false;
+ // this function assumes that if you want ancestor messages applied,
+ // you are doing a read for a query. This is so we can make some optimizations
+ // below.
+ if (apply_ancestor_messages) {
+ paranoid_invariant(bfe->type == ftnode_fetch_subset);
+ }
+
+ int r = toku_cachetable_get_and_pin_nonblocking(
+ ft_handle->ft->cf,
+ blocknum,
+ fullhash,
+ &node_v,
+ get_write_callbacks_for_node(ft_handle->ft),
+ toku_ftnode_fetch_callback,
+ toku_ftnode_pf_req_callback,
+ toku_ftnode_pf_callback,
+ PL_READ,
+ bfe, //read_extraargs
+ unlockers);
+ if (r != 0) {
+ assert(r == TOKUDB_TRY_AGAIN); // Any other error and we should bomb out ASAP.
+ goto exit;
+ }
+ node = static_cast<FTNODE>(node_v);
+ if (apply_ancestor_messages && node->height == 0) {
+ needs_ancestors_messages = toku_ft_leaf_needs_ancestors_messages(
+ ft_handle->ft,
+ node,
+ ancestors,
+ bounds,
+ &max_msn_in_path,
+ bfe->child_to_read
+ );
+ if (needs_ancestors_messages) {
+ toku::context apply_messages_ctx(CTX_MESSAGE_APPLICATION);
+
+ toku_unpin_ftnode_read_only(ft_handle->ft, node);
+ int rr = toku_cachetable_get_and_pin_nonblocking(
+ ft_handle->ft->cf,
+ blocknum,
+ fullhash,
+ &node_v,
+ get_write_callbacks_for_node(ft_handle->ft),
+ toku_ftnode_fetch_callback,
+ toku_ftnode_pf_req_callback,
+ toku_ftnode_pf_callback,
+ PL_WRITE_CHEAP,
+ bfe, //read_extraargs
+ unlockers);
+ if (rr != 0) {
+ assert(rr == TOKUDB_TRY_AGAIN); // Any other error and we should bomb out ASAP.
+ r = TOKUDB_TRY_AGAIN;
+ goto exit;
+ }
+ node = static_cast<FTNODE>(node_v);
+ toku_apply_ancestors_messages_to_node(
+ ft_handle,
+ node,
+ ancestors,
+ bounds,
+ msgs_applied,
+ bfe->child_to_read
+ );
+ } else {
+ // At this point, we aren't going to run
+ // toku_apply_ancestors_messages_to_node but that doesn't
+ // mean max_msn_applied shouldn't be updated if possible
+ // (this saves the CPU work involved in
+ // toku_ft_leaf_needs_ancestors_messages).
+ //
+ // We still have a read lock, so we have not resolved
+ // checkpointing. If the node is pending and dirty, we
+ // can't modify anything, including max_msn, until we
+ // resolve checkpointing. If we do, the node might get
+ // written out that way as part of a checkpoint with a
+ // root that was already written out with a smaller
+ // max_msn. During recovery, we would then inject a
+ // message based on the root's max_msn, and that message
+ // would get filtered by the leaf because it had too high
+ // a max_msn value. (see #5407)
+ //
+ // So for simplicity we only update the max_msn if the
+ // node is clean. That way, in order for the node to get
+ // written out, it would have to be dirtied. That
+ // requires a write lock, and a write lock requires you to
+ // resolve checkpointing.
+ if (!node->dirty()) {
+ toku_ft_bn_update_max_msn(node, max_msn_in_path, bfe->child_to_read);
+ }
+ }
+ }
+ *node_p = node;
+exit:
+ return r;
+}
+
+void
+toku_pin_ftnode_with_dep_nodes(
+ FT ft,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ ftnode_fetch_extra *bfe,
+ pair_lock_type lock_type,
+ uint32_t num_dependent_nodes,
+ FTNODE *dependent_nodes,
+ FTNODE *node_p,
+ bool move_messages)
+{
+ void *node_v;
+ PAIR dependent_pairs[num_dependent_nodes];
+ enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes];
+ for (uint32_t i = 0; i < num_dependent_nodes; i++) {
+ dependent_pairs[i] = dependent_nodes[i]->ct_pair;
+ dependent_dirty_bits[i] = (enum cachetable_dirty) dependent_nodes[i]->dirty();
+ }
+
+ int r = toku_cachetable_get_and_pin_with_dep_pairs(
+ ft->cf,
+ blocknum,
+ fullhash,
+ &node_v,
+ get_write_callbacks_for_node(ft),
+ toku_ftnode_fetch_callback,
+ toku_ftnode_pf_req_callback,
+ toku_ftnode_pf_callback,
+ lock_type,
+ bfe,
+ num_dependent_nodes,
+ dependent_pairs,
+ dependent_dirty_bits
+ );
+ invariant_zero(r);
+ FTNODE node = (FTNODE) node_v;
+ if (lock_type != PL_READ && node->height > 0 && move_messages) {
+ toku_move_ftnode_messages_to_stale(ft, node);
+ }
+ *node_p = node;
+}
+
+void toku_pin_ftnode(FT ft,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ ftnode_fetch_extra *bfe,
+ pair_lock_type lock_type,
+ FTNODE *node_p,
+ bool move_messages) {
+ toku_pin_ftnode_with_dep_nodes(ft, blocknum, fullhash, bfe, lock_type, 0, nullptr, node_p, move_messages);
+}
+
+int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pair_lock_type lock_type, FTNODE *nodep) {
+ void *node_v;
+ int r = toku_cachetable_maybe_get_and_pin_clean(ft->cf, blocknum, fullhash, lock_type, &node_v);
+ if (r != 0) {
+ goto cleanup;
+ }
+ CAST_FROM_VOIDP(*nodep, node_v);
+ if ((*nodep)->height > 0 && lock_type != PL_READ) {
+ toku_move_ftnode_messages_to_stale(ft, *nodep);
+ }
+cleanup:
+ return r;
+}
+
+void toku_unpin_ftnode(FT ft, FTNODE node) {
+ int r = toku_cachetable_unpin(ft->cf,
+ node->ct_pair,
+ static_cast<enum cachetable_dirty>(node->dirty()),
+ make_ftnode_pair_attr(node));
+ invariant_zero(r);
+}
+
+void
+toku_unpin_ftnode_read_only(FT ft, FTNODE node)
+{
+ int r = toku_cachetable_unpin(
+ ft->cf,
+ node->ct_pair,
+ (enum cachetable_dirty) node->dirty(),
+ make_invalid_pair_attr()
+ );
+ assert(r==0);
+}
+
+void toku_ftnode_swap_pair_values(FTNODE a, FTNODE b)
+// Effect: Swap the blocknum, fullhash, and PAIR for for a and b
+// Requires: Both nodes are pinned
+{
+ BLOCKNUM tmp_blocknum = a->blocknum;
+ uint32_t tmp_fullhash = a->fullhash;
+ PAIR tmp_pair = a->ct_pair;
+
+ a->blocknum = b->blocknum;
+ a->fullhash = b->fullhash;
+ a->ct_pair = b->ct_pair;
+
+ b->blocknum = tmp_blocknum;
+ b->fullhash = tmp_fullhash;
+ b->ct_pair = tmp_pair;
+
+ // A and B swapped pair pointers, but we still have to swap
+ // the actual pair values (ie: the FTNODEs they represent)
+ // in the cachetable.
+ toku_cachetable_swap_pair_values(a->ct_pair, b->ct_pair);
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.h b/storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.h
new file mode 100644
index 00000000..79362453
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-cachetable-wrappers.h
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/ft-internal.h"
+#include "ft/node.h"
+
+/**
+ * Put an empty node (that is, no fields filled) into the cachetable.
+ * In the process, write dependent nodes out for checkpoint if
+ * necessary.
+ */
+void
+cachetable_put_empty_node_with_dep_nodes(
+ FT ft,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes,
+ BLOCKNUM* name, //output
+ uint32_t* fullhash, //output
+ FTNODE* result
+ );
+
+/**
+ * Create a new ftnode with specified height and number of children.
+ * In the process, write dependent nodes out for checkpoint if
+ * necessary.
+ */
+void
+create_new_ftnode_with_dep_nodes(
+ FT ft,
+ FTNODE *result,
+ int height,
+ int n_children,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes
+ );
+
+/**
+ * Create a new ftnode with specified height
+ * and children.
+ * Used for test functions only.
+ */
+void
+toku_create_new_ftnode (
+ FT_HANDLE t,
+ FTNODE *result,
+ int height,
+ int n_children
+ );
+
+// This function returns a pinned ftnode to the caller.
+int
+toku_pin_ftnode_for_query(
+ FT_HANDLE ft_h,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ UNLOCKERS unlockers,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ ftnode_fetch_extra *bfe,
+ bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
+ FTNODE *node_p,
+ bool* msgs_applied
+ );
+
+// Pins an ftnode without dependent pairs
+void toku_pin_ftnode(
+ FT ft,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ ftnode_fetch_extra *bfe,
+ pair_lock_type lock_type,
+ FTNODE *node_p,
+ bool move_messages
+ );
+
+// Pins an ftnode with dependent pairs
+// Unlike toku_pin_ftnode_for_query, this function blocks until the node is pinned.
+void toku_pin_ftnode_with_dep_nodes(
+ FT ft,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ ftnode_fetch_extra *bfe,
+ pair_lock_type lock_type,
+ uint32_t num_dependent_nodes,
+ FTNODE *dependent_nodes,
+ FTNODE *node_p,
+ bool move_messages
+ );
+
+/**
+ * This function may return a pinned ftnode to the caller, if pinning is cheap.
+ * If the node is already locked, or is pending a checkpoint, the node is not pinned and -1 is returned.
+ */
+int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pair_lock_type lock_type, FTNODE *nodep);
+
+/**
+ * Effect: Unpin an ftnode.
+ */
+void toku_unpin_ftnode(FT ft, FTNODE node);
+void toku_unpin_ftnode_read_only(FT ft, FTNODE node);
+
+// Effect: Swaps pair values of two pinned nodes
+void toku_ftnode_swap_pair_values(FTNODE nodea, FTNODE nodeb);
diff --git a/storage/tokudb/PerconaFT/ft/ft-flusher-internal.h b/storage/tokudb/PerconaFT/ft/ft-flusher-internal.h
new file mode 100644
index 00000000..0e47cbb7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-flusher-internal.h
@@ -0,0 +1,183 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#define flt_flush_before_applying_inbox 1
+#define flt_flush_before_child_pin 2
+#define ft_flush_aflter_child_pin 3
+#define flt_flush_before_split 4
+#define flt_flush_during_split 5
+#define flt_flush_before_merge 6
+#define ft_flush_aflter_merge 7
+#define ft_flush_aflter_rebalance 8
+#define flt_flush_before_unpin_remove 9
+#define flt_flush_before_pin_second_node_for_merge 10
+
+typedef struct flusher_advice FLUSHER_ADVICE;
+
+/**
+ * Choose a child to flush to. Returns a childnum, or -1 if we should
+ * go no further.
+ *
+ * Flusher threads: pick the heaviest child buffer
+ * Cleaner threads: pick the heaviest child buffer
+ * Cleaner thread merging leaf nodes: follow down to a key
+ * Hot optimize table: follow down to the right of a key
+ */
+typedef int (*FA_PICK_CHILD)(FT ft, FTNODE parent, void* extra);
+
+/**
+ * Decide whether to call `toku_ft_flush_some_child` on the child if it is
+ * stable and a nonleaf node.
+ *
+ * Flusher threads: yes if child is gorged
+ * Cleaner threads: yes if child is gorged
+ * Cleaner thread merging leaf nodes: always yes
+ * Hot optimize table: always yes
+ */
+typedef bool (*FA_SHOULD_RECURSIVELY_FLUSH)(FTNODE child, void* extra);
+
+/**
+ * Called if the child needs merging. Should do something to get the
+ * child out of a fusible state. Must unpin parent and child.
+ *
+ * Flusher threads: just do the merge
+ * Cleaner threads: if nonleaf, just merge, otherwise start a "cleaner
+ * thread merge"
+ * Cleaner thread merging leaf nodes: just do the merge
+ * Hot optimize table: just do the merge
+ */
+typedef void (*FA_MAYBE_MERGE_CHILD)(struct flusher_advice *fa,
+ FT ft,
+ FTNODE parent,
+ int childnum,
+ FTNODE child,
+ void* extra);
+
+/**
+ * Cleaner threads may need to destroy basement nodes which have been
+ * brought more up to date than the height 1 node flushing to them.
+ * This function is used to determine if we need to check for basement
+ * nodes that are too up to date, and then destroy them if we find
+ * them.
+ *
+ * Flusher threads: no
+ * Cleaner threads: yes
+ * Cleaner thread merging leaf nodes: no
+ * Hot optimize table: no
+ */
+typedef bool (*FA_SHOULD_DESTROY_BN)(void* extra);
+
+/**
+ * Update `ft_flusher_status` in whatever way necessary. Called once
+ * by `toku_ft_flush_some_child` right before choosing what to do next (split,
+ * merge, recurse), with the number of nodes that were dirtied by this
+ * execution of `toku_ft_flush_some_child`.
+ */
+typedef void (*FA_UPDATE_STATUS)(FTNODE child, int dirtied, void* extra);
+
+/**
+ * Choose whether to go to the left or right child after a split. Called
+ * by `ft_split_child`. If -1 is returned, `ft_split_child` defaults to
+ * the old behavior.
+ */
+typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT ft,
+ FTNODE node,
+ int childnuma,
+ int childnumb,
+ void* extra);
+
+/**
+ * A collection of callbacks used by the flushing machinery to make
+ * various decisions. There are implementations of each of these
+ * functions for flusher threads (flt_*), cleaner threads (ct_*), , and hot
+ * optimize table (hot_*).
+ */
+struct flusher_advice {
+ FA_PICK_CHILD pick_child;
+ FA_SHOULD_RECURSIVELY_FLUSH should_recursively_flush;
+ FA_MAYBE_MERGE_CHILD maybe_merge_child;
+ FA_SHOULD_DESTROY_BN should_destroy_basement_nodes;
+ FA_UPDATE_STATUS update_status;
+ FA_PICK_CHILD_AFTER_SPLIT pick_child_after_split;
+ void* extra; // parameter passed into callbacks
+};
+
+void
+flusher_advice_init(
+ struct flusher_advice *fa,
+ FA_PICK_CHILD pick_child,
+ FA_SHOULD_DESTROY_BN should_destroy_basement_nodes,
+ FA_SHOULD_RECURSIVELY_FLUSH should_recursively_flush,
+ FA_MAYBE_MERGE_CHILD maybe_merge_child,
+ FA_UPDATE_STATUS update_status,
+ FA_PICK_CHILD_AFTER_SPLIT pick_child_after_split,
+ void* extra
+ );
+
+void toku_ft_flush_some_child(
+ FT ft,
+ FTNODE parent,
+ struct flusher_advice *fa
+ );
+
+bool
+always_recursively_flush(FTNODE child, void* extra);
+
+bool
+never_recursively_flush(FTNODE UU(child), void* UU(extra));
+
+bool
+dont_destroy_basement_nodes(void* extra);
+
+void
+default_merge_child(struct flusher_advice *fa,
+ FT ft,
+ FTNODE parent,
+ int childnum,
+ FTNODE child,
+ void* extra);
+
+int
+default_pick_child_after_split(FT ft,
+ FTNODE parent,
+ int childnuma,
+ int childnumb,
+ void *extra);
+
diff --git a/storage/tokudb/PerconaFT/ft/ft-flusher.cc b/storage/tokudb/PerconaFT/ft/ft-flusher.cc
new file mode 100644
index 00000000..bbb2a170
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-flusher.cc
@@ -0,0 +1,1929 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-internal.h"
+#include "ft/ft-flusher.h"
+#include "ft/ft-flusher-internal.h"
+#include "ft/node.h"
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_atomic.h"
+#include "util/status.h"
+#include "util/context.h"
+
+
+void toku_ft_flusher_get_status(FT_FLUSHER_STATUS status) {
+ fl_status.init();
+ *status = fl_status;
+}
+
+//
+// For test purposes only.
+// These callbacks are never used in production code, only as a way
+// to test the system (for example, by causing crashes at predictable times).
+//
+static void (*flusher_thread_callback)(int, void*) = NULL;
+static void *flusher_thread_callback_extra = NULL;
+
+void toku_flusher_thread_set_callback(void (*callback_f)(int, void*),
+ void* extra) {
+ flusher_thread_callback = callback_f;
+ flusher_thread_callback_extra = extra;
+}
+
+static void call_flusher_thread_callback(int flt_state) {
+ if (flusher_thread_callback) {
+ flusher_thread_callback(flt_state, flusher_thread_callback_extra);
+ }
+}
+
+static int
+find_heaviest_child(FTNODE node)
+{
+ int max_child = 0;
+ uint64_t max_weight = toku_bnc_nbytesinbuf(BNC(node, 0)) + BP_WORKDONE(node, 0);
+
+ invariant(node->n_children > 0);
+ for (int i = 1; i < node->n_children; i++) {
+ uint64_t bytes_in_buf = toku_bnc_nbytesinbuf(BNC(node, i));
+ uint64_t workdone = BP_WORKDONE(node, i);
+ if (workdone > 0) {
+ invariant(bytes_in_buf > 0);
+ }
+ uint64_t this_weight = bytes_in_buf + workdone;
+ if (max_weight < this_weight) {
+ max_child = i;
+ max_weight = this_weight;
+ }
+ }
+ return max_child;
+}
+
+static void
+update_flush_status(FTNODE child, int cascades) {
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_TOTAL)++;
+ if (cascades > 0) {
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES)++;
+ switch (cascades) {
+ case 1:
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES_1)++; break;
+ case 2:
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES_2)++; break;
+ case 3:
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES_3)++; break;
+ case 4:
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES_4)++; break;
+ case 5:
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES_5)++; break;
+ default:
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_CASCADES_GT_5)++; break;
+ }
+ }
+ bool flush_needs_io = false;
+ for (int i = 0; !flush_needs_io && i < child->n_children; ++i) {
+ if (BP_STATE(child, i) == PT_ON_DISK) {
+ flush_needs_io = true;
+ }
+ }
+ if (flush_needs_io) {
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_NEEDED_IO)++;
+ } else {
+ FL_STATUS_VAL(FT_FLUSHER_FLUSH_IN_MEMORY)++;
+ }
+}
+
+static void
+maybe_destroy_child_blbs(FTNODE node, FTNODE child, FT ft)
+{
+ // If the node is already fully in memory, as in upgrade, we don't
+ // need to destroy the basement nodes because they are all equally
+ // up to date.
+ if (child->n_children > 1 &&
+ child->height == 0 &&
+ !child->dirty()) {
+ for (int i = 0; i < child->n_children; ++i) {
+ if (BP_STATE(child, i) == PT_AVAIL &&
+ node->max_msn_applied_to_node_on_disk.msn < BLB_MAX_MSN_APPLIED(child, i).msn)
+ {
+ toku_evict_bn_from_memory(child, i, ft);
+ }
+ }
+ }
+}
+
+static void
+ft_merge_child(
+ FT ft,
+ FTNODE node,
+ int childnum_to_merge,
+ bool *did_react,
+ struct flusher_advice *fa);
+
+static int
+pick_heaviest_child(FT UU(ft),
+ FTNODE parent,
+ void* UU(extra))
+{
+ int childnum = find_heaviest_child(parent);
+ paranoid_invariant(toku_bnc_n_entries(BNC(parent, childnum))>0);
+ return childnum;
+}
+
+bool
+dont_destroy_basement_nodes(void* UU(extra))
+{
+ return false;
+}
+
+static bool
+do_destroy_basement_nodes(void* UU(extra))
+{
+ return true;
+}
+
+bool
+always_recursively_flush(FTNODE UU(child), void* UU(extra))
+{
+ return true;
+}
+
+bool
+never_recursively_flush(FTNODE UU(child), void* UU(extra))
+{
+ return false;
+}
+
+/**
+ * Flusher thread ("normal" flushing) implementation.
+ */
+struct flush_status_update_extra {
+ int cascades;
+ uint32_t nodesize;
+};
+
+static bool
+recurse_if_child_is_gorged(FTNODE child, void* extra)
+{
+ struct flush_status_update_extra *fste = (flush_status_update_extra *)extra;
+ return toku_ftnode_nonleaf_is_gorged(child, fste->nodesize);
+}
+
+int
+default_pick_child_after_split(FT UU(ft),
+ FTNODE UU(parent),
+ int UU(childnuma),
+ int UU(childnumb),
+ void* UU(extra))
+{
+ return -1;
+}
+
+void
+default_merge_child(struct flusher_advice *fa,
+ FT ft,
+ FTNODE parent,
+ int childnum,
+ FTNODE child,
+ void* UU(extra))
+{
+ //
+ // There is probably a way to pass FTNODE child
+ // into ft_merge_child, but for simplicity for now,
+ // we are just going to unpin child and
+ // let ft_merge_child pin it again
+ //
+ toku_unpin_ftnode(ft, child);
+ //
+ //
+ // it is responsibility of ft_merge_child to unlock parent
+ //
+ bool did_react;
+ ft_merge_child(ft, parent, childnum, &did_react, fa);
+}
+
+void
+flusher_advice_init(
+ struct flusher_advice *fa,
+ FA_PICK_CHILD pick_child,
+ FA_SHOULD_DESTROY_BN should_destroy_basement_nodes,
+ FA_SHOULD_RECURSIVELY_FLUSH should_recursively_flush,
+ FA_MAYBE_MERGE_CHILD maybe_merge_child,
+ FA_UPDATE_STATUS update_status,
+ FA_PICK_CHILD_AFTER_SPLIT pick_child_after_split,
+ void* extra
+ )
+{
+ fa->pick_child = pick_child;
+ fa->should_destroy_basement_nodes = should_destroy_basement_nodes;
+ fa->should_recursively_flush = should_recursively_flush;
+ fa->maybe_merge_child = maybe_merge_child;
+ fa->update_status = update_status;
+ fa->pick_child_after_split = pick_child_after_split;
+ fa->extra = extra;
+}
+
+static void
+flt_update_status(FTNODE child,
+ int UU(dirtied),
+ void* extra)
+{
+ struct flush_status_update_extra *fste = (struct flush_status_update_extra *) extra;
+ update_flush_status(child, fste->cascades);
+ // If `toku_ft_flush_some_child` decides to recurse after this, we'll need
+ // cascades to increase. If not it doesn't matter.
+ fste->cascades++;
+}
+
+static void
+flt_flusher_advice_init(struct flusher_advice *fa, struct flush_status_update_extra *fste, uint32_t nodesize)
+{
+ fste->cascades = 0;
+ fste->nodesize = nodesize;
+ flusher_advice_init(fa,
+ pick_heaviest_child,
+ dont_destroy_basement_nodes,
+ recurse_if_child_is_gorged,
+ default_merge_child,
+ flt_update_status,
+ default_pick_child_after_split,
+ fste);
+}
+
+struct ctm_extra {
+ bool is_last_child;
+ DBT target_key;
+};
+
+static int
+ctm_pick_child(FT ft,
+ FTNODE parent,
+ void* extra)
+{
+ struct ctm_extra* ctme = (struct ctm_extra *) extra;
+ int childnum;
+ if (parent->height == 1 && ctme->is_last_child) {
+ childnum = parent->n_children - 1;
+ } else {
+ childnum = toku_ftnode_which_child(parent, &ctme->target_key, ft->cmp);
+ }
+ return childnum;
+}
+
+static void
+ctm_update_status(
+ FTNODE UU(child),
+ int dirtied,
+ void* UU(extra)
+ )
+{
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_NUM_DIRTIED_FOR_LEAF_MERGE) += dirtied;
+}
+
+static void
+ctm_maybe_merge_child(struct flusher_advice *fa,
+ FT ft,
+ FTNODE parent,
+ int childnum,
+ FTNODE child,
+ void *extra)
+{
+ if (child->height == 0) {
+ (void) toku_sync_fetch_and_add(&FL_STATUS_VAL(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_COMPLETED), 1);
+ }
+ default_merge_child(fa, ft, parent, childnum, child, extra);
+}
+
+static void
+ct_maybe_merge_child(struct flusher_advice *fa,
+ FT ft,
+ FTNODE parent,
+ int childnum,
+ FTNODE child,
+ void* extra)
+{
+ if (child->height > 0) {
+ default_merge_child(fa, ft, parent, childnum, child, extra);
+ }
+ else {
+ struct ctm_extra ctme;
+ paranoid_invariant(parent->n_children > 1);
+ int pivot_to_save;
+ //
+ // we have two cases, one where the childnum
+ // is the last child, and therefore the pivot we
+ // save is not of the pivot which we wish to descend
+ // and another where it is not the last child,
+ // so the pivot is sufficient for identifying the leaf
+ // to be merged
+ //
+ if (childnum == (parent->n_children - 1)) {
+ ctme.is_last_child = true;
+ pivot_to_save = childnum - 1;
+ }
+ else {
+ ctme.is_last_child = false;
+ pivot_to_save = childnum;
+ }
+ toku_clone_dbt(&ctme.target_key, parent->pivotkeys.get_pivot(pivot_to_save));
+
+ // at this point, ctme is properly setup, now we can do the merge
+ struct flusher_advice new_fa;
+ flusher_advice_init(
+ &new_fa,
+ ctm_pick_child,
+ dont_destroy_basement_nodes,
+ always_recursively_flush,
+ ctm_maybe_merge_child,
+ ctm_update_status,
+ default_pick_child_after_split,
+ &ctme);
+
+ toku_unpin_ftnode(ft, parent);
+ toku_unpin_ftnode(ft, child);
+
+ FTNODE root_node = NULL;
+ {
+ uint32_t fullhash;
+ CACHEKEY root;
+ toku_calculate_root_offset_pointer(ft, &root, &fullhash);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode(ft, root, fullhash, &bfe, PL_WRITE_EXPENSIVE, &root_node, true);
+ toku_ftnode_assert_fully_in_memory(root_node);
+ }
+
+ (void) toku_sync_fetch_and_add(&FL_STATUS_VAL(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_STARTED), 1);
+ (void) toku_sync_fetch_and_add(&FL_STATUS_VAL(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING), 1);
+
+ toku_ft_flush_some_child(ft, root_node, &new_fa);
+
+ (void) toku_sync_fetch_and_sub(&FL_STATUS_VAL(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING), 1);
+
+ toku_destroy_dbt(&ctme.target_key);
+ }
+}
+
+static void
+ct_update_status(FTNODE child,
+ int dirtied,
+ void* extra)
+{
+ struct flush_status_update_extra* fste = (struct flush_status_update_extra *) extra;
+ update_flush_status(child, fste->cascades);
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_NODES_DIRTIED) += dirtied;
+ // Incrementing this in case `toku_ft_flush_some_child` decides to recurse.
+ fste->cascades++;
+}
+
+static void
+ct_flusher_advice_init(struct flusher_advice *fa, struct flush_status_update_extra* fste, uint32_t nodesize)
+{
+ fste->cascades = 0;
+ fste->nodesize = nodesize;
+ flusher_advice_init(fa,
+ pick_heaviest_child,
+ do_destroy_basement_nodes,
+ recurse_if_child_is_gorged,
+ ct_maybe_merge_child,
+ ct_update_status,
+ default_pick_child_after_split,
+ fste);
+}
+
+//
+// This returns true if the node MAY be reactive,
+// false is we are absolutely sure that it is NOT reactive.
+// The reason for inaccuracy is that the node may be
+// a leaf node that is not entirely in memory. If so, then
+// we cannot be sure if the node is reactive.
+//
+static bool ft_ftnode_may_be_reactive(FT ft, FTNODE node)
+{
+ if (node->height == 0) {
+ return true;
+ } else {
+ return toku_ftnode_get_nonleaf_reactivity(node, ft->h->fanout) != RE_STABLE;
+ }
+}
+
+/* NODE is a node with a child.
+ * childnum was split into two nodes childa, and childb. childa is the same as the original child. childb is a new child.
+ * We must slide things around, & move things from the old table to the new tables.
+ * Requires: the CHILDNUMth buffer of node is empty.
+ * We don't push anything down to children. We split the node, and things land wherever they land.
+ * We must delete the old buffer (but the old child is already deleted.)
+ * On return, the new children and node STAY PINNED.
+ */
+static void
+handle_split_of_child(
+ FT ft,
+ FTNODE node,
+ int childnum,
+ FTNODE childa,
+ FTNODE childb,
+ DBT *splitk /* the data in the childsplitk is alloc'd and is consumed by this call. */
+ )
+{
+ paranoid_invariant(node->height>0);
+ paranoid_invariant(0 <= childnum);
+ paranoid_invariant(childnum < node->n_children);
+ toku_ftnode_assert_fully_in_memory(node);
+ toku_ftnode_assert_fully_in_memory(childa);
+ toku_ftnode_assert_fully_in_memory(childb);
+ NONLEAF_CHILDINFO old_bnc = BNC(node, childnum);
+ paranoid_invariant(toku_bnc_nbytesinbuf(old_bnc)==0);
+ WHEN_NOT_GCOV(
+ if (toku_ft_debug_mode) {
+ printf("%s:%d Child %d splitting on %s\n", __FILE__, __LINE__, childnum, (char*)splitk->data);
+ printf("%s:%d oldsplitkeys:", __FILE__, __LINE__);
+ for(int i = 0; i < node->n_children - 1; i++) printf(" %s", (char *) node->pivotkeys.get_pivot(i).data);
+ printf("\n");
+ }
+ )
+
+ node->set_dirty();
+
+ XREALLOC_N(node->n_children+1, node->bp);
+ // Slide the children over.
+ // suppose n_children is 10 and childnum is 5, meaning node->childnum[5] just got split
+ // this moves node->bp[6] through node->bp[9] over to
+ // node->bp[7] through node->bp[10]
+ for (int cnum=node->n_children; cnum>childnum+1; cnum--) {
+ node->bp[cnum] = node->bp[cnum-1];
+ }
+ memset(&node->bp[childnum+1],0,sizeof(node->bp[0]));
+ node->n_children++;
+
+ paranoid_invariant(BP_BLOCKNUM(node, childnum).b==childa->blocknum.b); // use the same child
+
+ // We never set the rightmost blocknum to be the root.
+ // Instead, we wait for the root to split and let promotion initialize the rightmost
+ // blocknum to be the first non-root leaf node on the right extreme to receive an insert.
+ BLOCKNUM rightmost_blocknum = toku_unsafe_fetch(&ft->rightmost_blocknum);
+ invariant(ft->h->root_blocknum.b != rightmost_blocknum.b);
+ if (childa->blocknum.b == rightmost_blocknum.b) {
+ // The rightmost leaf (a) split into (a) and (b). We want (b) to swap pair values
+ // with (a), now that it is the new rightmost leaf. This keeps the rightmost blocknum
+ // constant, the same the way we keep the root blocknum constant.
+ toku_ftnode_swap_pair_values(childa, childb);
+ BP_BLOCKNUM(node, childnum) = childa->blocknum;
+ }
+
+ BP_BLOCKNUM(node, childnum+1) = childb->blocknum;
+ BP_WORKDONE(node, childnum+1) = 0;
+ BP_STATE(node,childnum+1) = PT_AVAIL;
+
+ NONLEAF_CHILDINFO new_bnc = toku_create_empty_nl();
+ for (unsigned int i = 0; i < (sizeof new_bnc->flow) / (sizeof new_bnc->flow[0]); ++i) {
+ // just split the flows in half for now, can't guess much better
+ // at the moment
+ new_bnc->flow[i] = old_bnc->flow[i] / 2;
+ old_bnc->flow[i] = (old_bnc->flow[i] + 1) / 2;
+ }
+ set_BNC(node, childnum+1, new_bnc);
+
+ // Insert the new split key , sliding the other keys over
+ node->pivotkeys.insert_at(splitk, childnum);
+
+ WHEN_NOT_GCOV(
+ if (toku_ft_debug_mode) {
+ printf("%s:%d splitkeys:", __FILE__, __LINE__);
+ for (int i = 0; i < node->n_children - 2; i++) printf(" %s", (char *) node->pivotkeys.get_pivot(i).data);
+ printf("\n");
+ }
+ )
+
+ /* Keep pushing to the children, but not if the children would require a pushdown */
+ toku_ftnode_assert_fully_in_memory(node);
+ toku_ftnode_assert_fully_in_memory(childa);
+ toku_ftnode_assert_fully_in_memory(childb);
+
+ VERIFY_NODE(t, node);
+ VERIFY_NODE(t, childa);
+ VERIFY_NODE(t, childb);
+}
+
+static void
+verify_all_in_mempool(FTNODE UU() node)
+{
+#ifdef TOKU_DEBUG_PARANOID
+ if (node->height==0) {
+ for (int i = 0; i < node->n_children; i++) {
+ invariant(BP_STATE(node,i) == PT_AVAIL);
+ BLB_DATA(node, i)->verify_mempool();
+ }
+ }
+#endif
+}
+
+static uint64_t
+ftleaf_disk_size(FTNODE node)
+// Effect: get the disk size of a leafentry
+{
+ paranoid_invariant(node->height == 0);
+ toku_ftnode_assert_fully_in_memory(node);
+ uint64_t retval = 0;
+ for (int i = 0; i < node->n_children; i++) {
+ retval += BLB_DATA(node, i)->get_disk_size();
+ }
+ return retval;
+}
+
+static void
+ftleaf_get_split_loc(
+ FTNODE node,
+ enum split_mode split_mode,
+ int *num_left_bns, // which basement within leaf
+ int *num_left_les // which key within basement
+ )
+// Effect: Find the location within a leaf node where we want to perform a split
+// num_left_bns is how many basement nodes (which OMT) should be split to the left.
+// num_left_les is how many leafentries in OMT of the last bn should be on the left side of the split.
+{
+ switch (split_mode) {
+ case SPLIT_LEFT_HEAVY: {
+ *num_left_bns = node->n_children;
+ *num_left_les = BLB_DATA(node, *num_left_bns - 1)->num_klpairs();
+ if (*num_left_les == 0) {
+ *num_left_bns = node->n_children - 1;
+ *num_left_les = BLB_DATA(node, *num_left_bns - 1)->num_klpairs();
+ }
+ goto exit;
+ }
+ case SPLIT_RIGHT_HEAVY: {
+ *num_left_bns = 1;
+ *num_left_les = BLB_DATA(node, 0)->num_klpairs() ? 1 : 0;
+ goto exit;
+ }
+ case SPLIT_EVENLY: {
+ paranoid_invariant(node->height == 0);
+ // TODO: (Zardosht) see if we can/should make this faster, we iterate over the rows twice
+ uint64_t sumlesizes = ftleaf_disk_size(node);
+ uint32_t size_so_far = 0;
+ for (int i = 0; i < node->n_children; i++) {
+ bn_data* bd = BLB_DATA(node, i);
+ uint32_t n_leafentries = bd->num_klpairs();
+ for (uint32_t j=0; j < n_leafentries; j++) {
+ size_t size_this_le;
+ int rr = bd->fetch_klpair_disksize(j, &size_this_le);
+ invariant_zero(rr);
+ size_so_far += size_this_le;
+ if (size_so_far >= sumlesizes/2) {
+ *num_left_bns = i + 1;
+ *num_left_les = j + 1;
+ if (*num_left_bns == node->n_children &&
+ (unsigned int) *num_left_les == n_leafentries) {
+ // need to correct for when we're splitting after the
+ // last element, that makes no sense
+ if (*num_left_les > 1) {
+ (*num_left_les)--;
+ } else if (*num_left_bns > 1) {
+ (*num_left_bns)--;
+ *num_left_les = BLB_DATA(node, *num_left_bns - 1)->num_klpairs();
+ } else {
+ // we are trying to split a leaf with only one
+ // leafentry in it
+ abort();
+ }
+ }
+ goto exit;
+ }
+ }
+ }
+ }
+ }
+ abort();
+exit:
+ return;
+}
+
+static void
+move_leafentries(
+ BASEMENTNODE dest_bn,
+ BASEMENTNODE src_bn,
+ uint32_t lbi, //lower bound inclusive
+ uint32_t ube //upper bound exclusive
+ )
+//Effect: move leafentries in the range [lbi, upe) from src_omt to newly created dest_omt
+{
+ invariant(ube == src_bn->data_buffer.num_klpairs());
+ src_bn->data_buffer.split_klpairs(&dest_bn->data_buffer, lbi);
+}
+
+static void ftnode_finalize_split(FTNODE node, FTNODE B, MSN max_msn_applied_to_node) {
+// Effect: Finalizes a split by updating some bits and dirtying both nodes
+ toku_ftnode_assert_fully_in_memory(node);
+ toku_ftnode_assert_fully_in_memory(B);
+ verify_all_in_mempool(node);
+ verify_all_in_mempool(B);
+
+ node->max_msn_applied_to_node_on_disk = max_msn_applied_to_node;
+ B->max_msn_applied_to_node_on_disk = max_msn_applied_to_node;
+
+ // The new node in the split inherits the oldest known reference xid
+ B->oldest_referenced_xid_known = node->oldest_referenced_xid_known;
+
+ node->set_dirty();
+ B->set_dirty();
+}
+
+void
+ftleaf_split(
+ FT ft,
+ FTNODE node,
+ FTNODE *nodea,
+ FTNODE *nodeb,
+ DBT *splitk,
+ bool create_new_node,
+ enum split_mode split_mode,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes)
+// Effect: Split a leaf node.
+// Argument "node" is node to be split.
+// Upon return:
+// nodea and nodeb point to new nodes that result from split of "node"
+// nodea is the left node that results from the split
+// splitk is the right-most key of nodea
+{
+
+ paranoid_invariant(node->height == 0);
+ FL_STATUS_VAL(FT_FLUSHER_SPLIT_LEAF)++;
+ if (node->n_children) {
+ // First move all the accumulated stat64info deltas into the first basement.
+ // After the split, either both nodes or neither node will be included in the next checkpoint.
+ // The accumulated stats in the dictionary will be correct in either case.
+ // By moving all the deltas into one (arbitrary) basement, we avoid the need to maintain
+ // correct information for a basement that is divided between two leafnodes (i.e. when split is
+ // not on a basement boundary).
+ STAT64INFO_S delta_for_leafnode = toku_get_and_clear_basement_stats(node);
+ BASEMENTNODE bn = BLB(node,0);
+ bn->stat64_delta = delta_for_leafnode;
+ }
+
+
+ FTNODE B = nullptr;
+ uint32_t fullhash;
+ BLOCKNUM name;
+
+ if (create_new_node) {
+ // put value in cachetable and do checkpointing
+ // of dependent nodes
+ //
+ // We do this here, before evaluating the last_bn_on_left
+ // and last_le_on_left_within_bn because this operation
+ // may write to disk the dependent nodes.
+ // While doing so, we may rebalance the leaf node
+ // we are splitting, thereby invalidating the
+ // values of last_bn_on_left and last_le_on_left_within_bn.
+ // So, we must call this before evaluating
+ // those two values
+ cachetable_put_empty_node_with_dep_nodes(
+ ft,
+ num_dependent_nodes,
+ dependent_nodes,
+ &name,
+ &fullhash,
+ &B
+ );
+ // GCC 4.8 seems to get confused and think B is maybe uninitialized at link time.
+ // TODO(leif): figure out why it thinks this and actually fix it.
+ invariant_notnull(B);
+ }
+
+
+ paranoid_invariant(node->height==0);
+ toku_ftnode_assert_fully_in_memory(node);
+ verify_all_in_mempool(node);
+ MSN max_msn_applied_to_node = node->max_msn_applied_to_node_on_disk;
+
+ // variables that say where we will do the split.
+ // After the split, there will be num_left_bns basement nodes in the left node,
+ // and the last basement node in the left node will have num_left_les leafentries.
+ int num_left_bns;
+ int num_left_les;
+ ftleaf_get_split_loc(node, split_mode, &num_left_bns, &num_left_les);
+ {
+ // did we split right on the boundary between basement nodes?
+ const bool split_on_boundary = (num_left_les == 0) || (num_left_les == (int) BLB_DATA(node, num_left_bns - 1)->num_klpairs());
+ // Now we know where we are going to break it
+ // the two nodes will have a total of n_children+1 basement nodes
+ // and n_children-1 pivots
+ // the left node, node, will have last_bn_on_left+1 basement nodes
+ // the right node, B, will have n_children-last_bn_on_left basement nodes
+ // the pivots of node will be the first last_bn_on_left pivots that originally exist
+ // the pivots of B will be the last (n_children - 1 - last_bn_on_left) pivots that originally exist
+
+ // Note: The basements will not be rebalanced. Only the mempool of the basement that is split
+ // (if split_on_boundary is false) will be affected. All other mempools will remain intact. ???
+
+ //set up the basement nodes in the new node
+ int num_children_in_node = num_left_bns;
+ // In the SPLIT_RIGHT_HEAVY case, we need to add 1 back because
+ // while it's not on the boundary, we do need node->n_children
+ // children in B.
+ int num_children_in_b = node->n_children - num_left_bns + (!split_on_boundary ? 1 : 0);
+ if (num_children_in_b == 0) {
+ // for uneven split, make sure we have at least 1 bn
+ paranoid_invariant(split_mode == SPLIT_LEFT_HEAVY);
+ num_children_in_b = 1;
+ }
+ paranoid_invariant(num_children_in_node > 0);
+ if (create_new_node) {
+ toku_initialize_empty_ftnode(
+ B,
+ name,
+ 0,
+ num_children_in_b,
+ ft->h->layout_version,
+ ft->h->flags);
+ B->fullhash = fullhash;
+ }
+ else {
+ B = *nodeb;
+ REALLOC_N(num_children_in_b, B->bp);
+ B->n_children = num_children_in_b;
+ for (int i = 0; i < num_children_in_b; i++) {
+ BP_BLOCKNUM(B,i).b = 0;
+ BP_STATE(B,i) = PT_AVAIL;
+ BP_WORKDONE(B,i) = 0;
+ set_BLB(B, i, toku_create_empty_bn());
+ }
+ }
+
+ // now move all the data
+
+ int curr_src_bn_index = num_left_bns - 1;
+ int curr_dest_bn_index = 0;
+
+ // handle the move of a subset of data in last_bn_on_left from node to B
+ if (!split_on_boundary) {
+ BP_STATE(B,curr_dest_bn_index) = PT_AVAIL;
+ destroy_basement_node(BLB(B, curr_dest_bn_index)); // Destroy B's empty OMT, so I can rebuild it from an array
+ set_BNULL(B, curr_dest_bn_index);
+ set_BLB(B, curr_dest_bn_index, toku_create_empty_bn_no_buffer());
+ move_leafentries(BLB(B, curr_dest_bn_index),
+ BLB(node, curr_src_bn_index),
+ num_left_les, // first row to be moved to B
+ BLB_DATA(node, curr_src_bn_index)->num_klpairs() // number of rows in basement to be split
+ );
+ BLB_MAX_MSN_APPLIED(B, curr_dest_bn_index) = BLB_MAX_MSN_APPLIED(node, curr_src_bn_index);
+ curr_dest_bn_index++;
+ }
+ curr_src_bn_index++;
+
+ paranoid_invariant(B->n_children >= curr_dest_bn_index);
+ paranoid_invariant(node->n_children >= curr_src_bn_index);
+
+ // move the rest of the basement nodes
+ for ( ; curr_src_bn_index < node->n_children; curr_src_bn_index++, curr_dest_bn_index++) {
+ destroy_basement_node(BLB(B, curr_dest_bn_index));
+ set_BNULL(B, curr_dest_bn_index);
+ B->bp[curr_dest_bn_index] = node->bp[curr_src_bn_index];
+ }
+ if (curr_dest_bn_index < B->n_children) {
+ // B already has an empty basement node here.
+ BP_STATE(B, curr_dest_bn_index) = PT_AVAIL;
+ }
+
+ //
+ // now handle the pivots
+ //
+
+ // the child index in the original node that corresponds to the
+ // first node in the right node of the split
+ int split_idx = num_left_bns - (split_on_boundary ? 0 : 1);
+ node->pivotkeys.split_at(split_idx, &B->pivotkeys);
+ if (split_on_boundary && num_left_bns < node->n_children && splitk) {
+ toku_copyref_dbt(splitk, node->pivotkeys.get_pivot(num_left_bns - 1));
+ } else if (splitk) {
+ bn_data* bd = BLB_DATA(node, num_left_bns - 1);
+ uint32_t keylen;
+ void *key;
+ int rr = bd->fetch_key_and_len(bd->num_klpairs() - 1, &keylen, &key);
+ invariant_zero(rr);
+ toku_memdup_dbt(splitk, key, keylen);
+ }
+
+ node->n_children = num_children_in_node;
+ REALLOC_N(num_children_in_node, node->bp);
+ }
+
+ ftnode_finalize_split(node, B, max_msn_applied_to_node);
+ *nodea = node;
+ *nodeb = B;
+} // end of ftleaf_split()
+
+void
+ft_nonleaf_split(
+ FT ft,
+ FTNODE node,
+ FTNODE *nodea,
+ FTNODE *nodeb,
+ DBT *splitk,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes)
+{
+ //VERIFY_NODE(t,node);
+ FL_STATUS_VAL(FT_FLUSHER_SPLIT_NONLEAF)++;
+ toku_ftnode_assert_fully_in_memory(node);
+ int old_n_children = node->n_children;
+ int n_children_in_a = old_n_children/2;
+ int n_children_in_b = old_n_children-n_children_in_a;
+ MSN max_msn_applied_to_node = node->max_msn_applied_to_node_on_disk;
+ FTNODE B;
+ paranoid_invariant(node->height>0);
+ paranoid_invariant(node->n_children>=2); // Otherwise, how do we split? We need at least two children to split. */
+ create_new_ftnode_with_dep_nodes(ft, &B, node->height, n_children_in_b, num_dependent_nodes, dependent_nodes);
+ {
+ /* The first n_children_in_a go into node a.
+ * That means that the first n_children_in_a-1 keys go into node a.
+ * The splitter key is key number n_children_in_a */
+ for (int i = n_children_in_a; i<old_n_children; i++) {
+ int targchild = i-n_children_in_a;
+ // TODO: Figure out better way to handle this
+ // the problem is that create_new_ftnode_with_dep_nodes for B creates
+ // all the data structures, whereas we really don't want it to fill
+ // in anything for the bp's.
+ // Now we have to go free what it just created so we can
+ // slide the bp over
+ destroy_nonleaf_childinfo(BNC(B, targchild));
+ // now move the bp over
+ B->bp[targchild] = node->bp[i];
+ memset(&node->bp[i], 0, sizeof(node->bp[0]));
+ }
+
+ // the split key for our parent is the rightmost pivot key in node
+ node->pivotkeys.split_at(n_children_in_a, &B->pivotkeys);
+ toku_clone_dbt(splitk, node->pivotkeys.get_pivot(n_children_in_a - 1));
+ node->pivotkeys.delete_at(n_children_in_a - 1);
+
+ node->n_children = n_children_in_a;
+ REALLOC_N(node->n_children, node->bp);
+ }
+
+ ftnode_finalize_split(node, B, max_msn_applied_to_node);
+ *nodea = node;
+ *nodeb = B;
+}
+
+//
+// responsibility of ft_split_child is to take locked FTNODEs node and child
+// and do the following:
+// - split child,
+// - fix node,
+// - release lock on node
+// - possibly flush either new children created from split, otherwise unlock children
+//
+static void
+ft_split_child(
+ FT ft,
+ FTNODE node,
+ int childnum,
+ FTNODE child,
+ enum split_mode split_mode,
+ struct flusher_advice *fa)
+{
+ paranoid_invariant(node->height>0);
+ paranoid_invariant(toku_bnc_nbytesinbuf(BNC(node, childnum))==0); // require that the buffer for this child is empty
+ FTNODE nodea, nodeb;
+ DBT splitk;
+
+ // for test
+ call_flusher_thread_callback(flt_flush_before_split);
+
+ FTNODE dep_nodes[2];
+ dep_nodes[0] = node;
+ dep_nodes[1] = child;
+ if (child->height==0) {
+ ftleaf_split(ft, child, &nodea, &nodeb, &splitk, true, split_mode, 2, dep_nodes);
+ } else {
+ ft_nonleaf_split(ft, child, &nodea, &nodeb, &splitk, 2, dep_nodes);
+ }
+ // printf("%s:%d child did split\n", __FILE__, __LINE__);
+ handle_split_of_child (ft, node, childnum, nodea, nodeb, &splitk);
+
+ // for test
+ call_flusher_thread_callback(flt_flush_during_split);
+
+ // at this point, the split is complete
+ // now we need to unlock node,
+ // and possibly continue
+ // flushing one of the children
+ int picked_child = fa->pick_child_after_split(ft, node, childnum, childnum + 1, fa->extra);
+ toku_unpin_ftnode(ft, node);
+ if (picked_child == childnum ||
+ (picked_child < 0 && nodea->height > 0 && fa->should_recursively_flush(nodea, fa->extra))) {
+ toku_unpin_ftnode(ft, nodeb);
+ toku_ft_flush_some_child(ft, nodea, fa);
+ }
+ else if (picked_child == childnum + 1 ||
+ (picked_child < 0 && nodeb->height > 0 && fa->should_recursively_flush(nodeb, fa->extra))) {
+ toku_unpin_ftnode(ft, nodea);
+ toku_ft_flush_some_child(ft, nodeb, fa);
+ }
+ else {
+ toku_unpin_ftnode(ft, nodea);
+ toku_unpin_ftnode(ft, nodeb);
+ }
+
+ toku_destroy_dbt(&splitk);
+}
+
+static void bring_node_fully_into_memory(FTNODE node, FT ft) {
+ if (!toku_ftnode_fully_in_memory(node)) {
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_cachetable_pf_pinned_pair(
+ node,
+ toku_ftnode_pf_callback,
+ &bfe,
+ ft->cf,
+ node->blocknum,
+ toku_cachetable_hash(ft->cf, node->blocknum)
+ );
+ }
+}
+
+static void
+flush_this_child(
+ FT ft,
+ FTNODE node,
+ FTNODE child,
+ int childnum,
+ struct flusher_advice *fa)
+// Effect: Push everything in the CHILDNUMth buffer of node down into the child.
+{
+ update_flush_status(child, 0);
+ toku_ftnode_assert_fully_in_memory(node);
+ if (fa->should_destroy_basement_nodes(fa)) {
+ maybe_destroy_child_blbs(node, child, ft);
+ }
+ bring_node_fully_into_memory(child, ft);
+ toku_ftnode_assert_fully_in_memory(child);
+ paranoid_invariant(node->height>0);
+ paranoid_invariant(child->blocknum.b!=0);
+ // VERIFY_NODE does not work off client thread as of now
+ //VERIFY_NODE(t, child);
+ node->set_dirty();
+ child->set_dirty();
+
+ BP_WORKDONE(node, childnum) = 0; // this buffer is drained, no work has been done by its contents
+ NONLEAF_CHILDINFO bnc = BNC(node, childnum);
+ set_BNC(node, childnum, toku_create_empty_nl());
+
+ // now we have a bnc to flush to the child. pass down the parent's
+ // oldest known referenced xid as we flush down to the child.
+ toku_bnc_flush_to_child(ft, bnc, child, node->oldest_referenced_xid_known);
+ destroy_nonleaf_childinfo(bnc);
+}
+
+static void
+merge_leaf_nodes(FTNODE a, FTNODE b)
+{
+ FL_STATUS_VAL(FT_FLUSHER_MERGE_LEAF)++;
+ toku_ftnode_assert_fully_in_memory(a);
+ toku_ftnode_assert_fully_in_memory(b);
+ paranoid_invariant(a->height == 0);
+ paranoid_invariant(b->height == 0);
+ paranoid_invariant(a->n_children > 0);
+ paranoid_invariant(b->n_children > 0);
+
+ // Mark nodes as dirty before moving basements from b to a.
+ // This way, whatever deltas are accumulated in the basements are
+ // applied to the in_memory_stats in the header if they have not already
+ // been (if nodes are clean).
+ // TODO(leif): this is no longer the way in_memory_stats is
+ // maintained. verify that it's ok to move this just before the unpin
+ // and then do that.
+ a->set_dirty();
+ b->set_dirty();
+
+ bn_data* a_last_bd = BLB_DATA(a, a->n_children-1);
+ // this bool states if the last basement node in a has any items or not
+ // If it does, then it stays in the merge. If it does not, the last basement node
+ // of a gets eliminated because we do not have a pivot to store for it (because it has no elements)
+ const bool a_has_tail = a_last_bd->num_klpairs() > 0;
+
+ int num_children = a->n_children + b->n_children;
+ if (!a_has_tail) {
+ int lastchild = a->n_children - 1;
+ BASEMENTNODE bn = BLB(a, lastchild);
+
+ // verify that last basement in a is empty, then destroy mempool
+ size_t used_space = a_last_bd->get_disk_size();
+ invariant_zero(used_space);
+ destroy_basement_node(bn);
+ set_BNULL(a, lastchild);
+ num_children--;
+ if (lastchild < a->pivotkeys.num_pivots()) {
+ a->pivotkeys.delete_at(lastchild);
+ }
+ } else {
+ // fill in pivot for what used to be max of node 'a', if it is needed
+ uint32_t keylen;
+ void *key;
+ int r = a_last_bd->fetch_key_and_len(a_last_bd->num_klpairs() - 1, &keylen, &key);
+ invariant_zero(r);
+ DBT pivotkey;
+ toku_fill_dbt(&pivotkey, key, keylen);
+ a->pivotkeys.replace_at(&pivotkey, a->n_children - 1);
+ }
+
+ // realloc basement nodes in `a'
+ REALLOC_N(num_children, a->bp);
+
+ // move each basement node from b to a
+ uint32_t offset = a_has_tail ? a->n_children : a->n_children - 1;
+ for (int i = 0; i < b->n_children; i++) {
+ a->bp[i + offset] = b->bp[i];
+ memset(&b->bp[i], 0, sizeof(b->bp[0]));
+ }
+
+ // append b's pivots to a's pivots
+ a->pivotkeys.append(b->pivotkeys);
+
+ // now that all the data has been moved from b to a, we can destroy the data in b
+ a->n_children = num_children;
+ b->pivotkeys.destroy();
+ b->n_children = 0;
+}
+
+static void balance_leaf_nodes(
+ FTNODE a,
+ FTNODE b,
+ DBT *splitk)
+// Effect:
+// If b is bigger then move stuff from b to a until b is the smaller.
+// If a is bigger then move stuff from a to b until a is the smaller.
+{
+ FL_STATUS_VAL(FT_FLUSHER_BALANCE_LEAF)++;
+ // first merge all the data into a
+ merge_leaf_nodes(a,b);
+ // now split them
+ // because we are not creating a new node, we can pass in no dependent nodes
+ ftleaf_split(NULL, a, &a, &b, splitk, false, SPLIT_EVENLY, 0, NULL);
+}
+
+static void
+maybe_merge_pinned_leaf_nodes(
+ FTNODE a,
+ FTNODE b,
+ const DBT *parent_splitk,
+ bool *did_merge,
+ bool *did_rebalance,
+ DBT *splitk,
+ uint32_t nodesize
+ )
+// Effect: Either merge a and b into one one node (merge them into a) and set *did_merge = true.
+// (We do this if the resulting node is not fissible)
+// or distribute the leafentries evenly between a and b, and set *did_rebalance = true.
+// (If a and be are already evenly distributed, we may do nothing.)
+{
+ unsigned int sizea = toku_serialize_ftnode_size(a);
+ unsigned int sizeb = toku_serialize_ftnode_size(b);
+ uint32_t num_leafentries = toku_ftnode_leaf_num_entries(a) + toku_ftnode_leaf_num_entries(b);
+ if (num_leafentries > 1 && (sizea + sizeb)*4 > (nodesize*3)) {
+ // the combined size is more than 3/4 of a node, so don't merge them.
+ *did_merge = false;
+ if (sizea*4 > nodesize && sizeb*4 > nodesize) {
+ // no need to do anything if both are more than 1/4 of a node.
+ *did_rebalance = false;
+ toku_clone_dbt(splitk, *parent_splitk);
+ return;
+ }
+ // one is less than 1/4 of a node, and together they are more than 3/4 of a node.
+ *did_rebalance = true;
+ balance_leaf_nodes(a, b, splitk);
+ } else {
+ // we are merging them.
+ *did_merge = true;
+ *did_rebalance = false;
+ toku_init_dbt(splitk);
+ merge_leaf_nodes(a, b);
+ }
+}
+
+static void
+maybe_merge_pinned_nonleaf_nodes(
+ const DBT *parent_splitk,
+ FTNODE a,
+ FTNODE b,
+ bool *did_merge,
+ bool *did_rebalance,
+ DBT *splitk)
+{
+ toku_ftnode_assert_fully_in_memory(a);
+ toku_ftnode_assert_fully_in_memory(b);
+ invariant_notnull(parent_splitk->data);
+
+ int old_n_children = a->n_children;
+ int new_n_children = old_n_children + b->n_children;
+
+ XREALLOC_N(new_n_children, a->bp);
+ memcpy(a->bp + old_n_children, b->bp, b->n_children * sizeof(b->bp[0]));
+ memset(b->bp, 0, b->n_children * sizeof(b->bp[0]));
+
+ a->pivotkeys.insert_at(parent_splitk, old_n_children - 1);
+ a->pivotkeys.append(b->pivotkeys);
+ a->n_children = new_n_children;
+ b->n_children = 0;
+
+ a->set_dirty();
+ b->set_dirty();
+
+ *did_merge = true;
+ *did_rebalance = false;
+ toku_init_dbt(splitk);
+
+ FL_STATUS_VAL(FT_FLUSHER_MERGE_NONLEAF)++;
+}
+
+static void
+maybe_merge_pinned_nodes(
+ FTNODE parent,
+ const DBT *parent_splitk,
+ FTNODE a,
+ FTNODE b,
+ bool *did_merge,
+ bool *did_rebalance,
+ DBT *splitk,
+ uint32_t nodesize
+ )
+// Effect: either merge a and b into one node (merge them into a) and set *did_merge = true.
+// (We do this if the resulting node is not fissible)
+// or distribute a and b evenly and set *did_merge = false and *did_rebalance = true
+// (If a and be are already evenly distributed, we may do nothing.)
+// If we distribute:
+// For leaf nodes, we distribute the leafentries evenly.
+// For nonleaf nodes, we distribute the children evenly. That may leave one or both of the nodes overfull, but that's OK.
+// If we distribute, we set *splitk to a malloced pivot key.
+// Parameters:
+// t The FT.
+// parent The parent of the two nodes to be split.
+// parent_splitk The pivot key between a and b. This is either free()'d or returned in *splitk.
+// a The first node to merge.
+// b The second node to merge.
+// logger The logger.
+// did_merge (OUT): Did the two nodes actually get merged?
+// splitk (OUT): If the two nodes did not get merged, the new pivot key between the two nodes.
+{
+ MSN msn_max;
+ paranoid_invariant(a->height == b->height);
+ toku_ftnode_assert_fully_in_memory(parent);
+ toku_ftnode_assert_fully_in_memory(a);
+ toku_ftnode_assert_fully_in_memory(b);
+ parent->set_dirty(); // just to make sure
+ {
+ MSN msna = a->max_msn_applied_to_node_on_disk;
+ MSN msnb = b->max_msn_applied_to_node_on_disk;
+ msn_max = (msna.msn > msnb.msn) ? msna : msnb;
+ }
+ if (a->height == 0) {
+ maybe_merge_pinned_leaf_nodes(a, b, parent_splitk, did_merge, did_rebalance, splitk, nodesize);
+ } else {
+ maybe_merge_pinned_nonleaf_nodes(parent_splitk, a, b, did_merge, did_rebalance, splitk);
+ }
+ if (*did_merge || *did_rebalance) {
+ // accurate for leaf nodes because all msgs above have been
+ // applied, accurate for non-leaf nodes because buffer immediately
+ // above each node has been flushed
+ a->max_msn_applied_to_node_on_disk = msn_max;
+ b->max_msn_applied_to_node_on_disk = msn_max;
+ }
+}
+
+static void merge_remove_key_callback(BLOCKNUM *bp, bool for_checkpoint, void *extra) {
+ FT ft = (FT) extra;
+ ft->blocktable.free_blocknum(bp, ft, for_checkpoint);
+}
+
+//
+// Takes as input a locked node and a childnum_to_merge
+// As output, two of node's children are merged or rebalanced, and node is unlocked
+//
+static void
+ft_merge_child(
+ FT ft,
+ FTNODE node,
+ int childnum_to_merge,
+ bool *did_react,
+ struct flusher_advice *fa)
+{
+ // this function should not be called
+ // if the child is not mergable
+ paranoid_invariant(node->n_children > 1);
+ toku_ftnode_assert_fully_in_memory(node);
+
+ int childnuma,childnumb;
+ if (childnum_to_merge > 0) {
+ childnuma = childnum_to_merge-1;
+ childnumb = childnum_to_merge;
+ } else {
+ childnuma = childnum_to_merge;
+ childnumb = childnum_to_merge+1;
+ }
+ paranoid_invariant(0 <= childnuma);
+ paranoid_invariant(childnuma+1 == childnumb);
+ paranoid_invariant(childnumb < node->n_children);
+
+ paranoid_invariant(node->height>0);
+
+ // We suspect that at least one of the children is fusible, but they might not be.
+ // for test
+ call_flusher_thread_callback(flt_flush_before_merge);
+
+ FTNODE childa, childb;
+ {
+ uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnuma);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &node, &childa, true);
+ }
+ // for test
+ call_flusher_thread_callback(flt_flush_before_pin_second_node_for_merge);
+ {
+ FTNODE dep_nodes[2];
+ dep_nodes[0] = node;
+ dep_nodes[1] = childa;
+ uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnumb);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 2, dep_nodes, &childb, true);
+ }
+
+ if (toku_bnc_n_entries(BNC(node,childnuma))>0) {
+ flush_this_child(ft, node, childa, childnuma, fa);
+ }
+ if (toku_bnc_n_entries(BNC(node,childnumb))>0) {
+ flush_this_child(ft, node, childb, childnumb, fa);
+ }
+
+ // now we have both children pinned in main memory, and cachetable locked,
+ // so no checkpoints will occur.
+
+ bool did_merge, did_rebalance;
+ {
+ DBT splitk;
+ toku_init_dbt(&splitk);
+ const DBT old_split_key = node->pivotkeys.get_pivot(childnuma);
+ maybe_merge_pinned_nodes(node, &old_split_key, childa, childb, &did_merge, &did_rebalance, &splitk, ft->h->nodesize);
+ //toku_verify_estimates(t,childa);
+ // the tree did react if a merge (did_merge) or rebalance (new spkit key) occurred
+ *did_react = (bool)(did_merge || did_rebalance);
+
+ if (did_merge) {
+ invariant_null(splitk.data);
+ NONLEAF_CHILDINFO remaining_bnc = BNC(node, childnuma);
+ NONLEAF_CHILDINFO merged_bnc = BNC(node, childnumb);
+ for (unsigned int i = 0; i < (sizeof remaining_bnc->flow) / (sizeof remaining_bnc->flow[0]); ++i) {
+ remaining_bnc->flow[i] += merged_bnc->flow[i];
+ }
+ destroy_nonleaf_childinfo(merged_bnc);
+ set_BNULL(node, childnumb);
+ node->n_children--;
+ memmove(&node->bp[childnumb],
+ &node->bp[childnumb+1],
+ (node->n_children-childnumb)*sizeof(node->bp[0]));
+ REALLOC_N(node->n_children, node->bp);
+ node->pivotkeys.delete_at(childnuma);
+
+ // Handle a merge of the rightmost leaf node.
+ BLOCKNUM rightmost_blocknum = toku_unsafe_fetch(&ft->rightmost_blocknum);
+ if (did_merge && childb->blocknum.b == rightmost_blocknum.b) {
+ invariant(childb->blocknum.b != ft->h->root_blocknum.b);
+ toku_ftnode_swap_pair_values(childa, childb);
+ BP_BLOCKNUM(node, childnuma) = childa->blocknum;
+ }
+
+ paranoid_invariant(BP_BLOCKNUM(node, childnuma).b == childa->blocknum.b);
+ childa->set_dirty(); // just to make sure
+ childb->set_dirty(); // just to make sure
+ } else {
+ // flow will be inaccurate for a while, oh well. the children
+ // are leaves in this case so it's not a huge deal (we're
+ // pretty far down the tree)
+
+ // If we didn't merge the nodes, then we need the correct pivot.
+ invariant_notnull(splitk.data);
+ node->pivotkeys.replace_at(&splitk, childnuma);
+ node->set_dirty();
+ }
+ toku_destroy_dbt(&splitk);
+ }
+ //
+ // now we possibly flush the children
+ //
+ if (did_merge) {
+ // for test
+ call_flusher_thread_callback(flt_flush_before_unpin_remove);
+
+ // merge_remove_key_callback will free the blocknum
+ int rrb = toku_cachetable_unpin_and_remove(
+ ft->cf,
+ childb->ct_pair,
+ merge_remove_key_callback,
+ ft
+ );
+ assert_zero(rrb);
+
+ // for test
+ call_flusher_thread_callback(ft_flush_aflter_merge);
+
+ // unlock the parent
+ paranoid_invariant(node->dirty());
+ toku_unpin_ftnode(ft, node);
+ }
+ else {
+ // for test
+ call_flusher_thread_callback(ft_flush_aflter_rebalance);
+
+ // unlock the parent
+ paranoid_invariant(node->dirty());
+ toku_unpin_ftnode(ft, node);
+ toku_unpin_ftnode(ft, childb);
+ }
+ if (childa->height > 0 && fa->should_recursively_flush(childa, fa->extra)) {
+ toku_ft_flush_some_child(ft, childa, fa);
+ }
+ else {
+ toku_unpin_ftnode(ft, childa);
+ }
+}
+
+void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa)
+// Effect: This function does the following:
+// - Pick a child of parent (the heaviest child),
+// - flush from parent to child,
+// - possibly split/merge child.
+// - if child is gorged, recursively proceed with child
+// Note that parent is already locked
+// Upon exit of this function, parent is unlocked and no new
+// new nodes (such as a child) remain locked
+{
+ int dirtied = 0;
+ NONLEAF_CHILDINFO bnc = NULL;
+ paranoid_invariant(parent->height>0);
+ toku_ftnode_assert_fully_in_memory(parent);
+ TXNID parent_oldest_referenced_xid_known = parent->oldest_referenced_xid_known;
+
+ // pick the child we want to flush to
+ int childnum = fa->pick_child(ft, parent, fa->extra);
+
+ // for test
+ call_flusher_thread_callback(flt_flush_before_child_pin);
+
+ // get the child into memory
+ BLOCKNUM targetchild = BP_BLOCKNUM(parent, childnum);
+ ft->blocktable.verify_blocknum_allocated(targetchild);
+ uint32_t childfullhash = compute_child_fullhash(ft->cf, parent, childnum);
+ FTNODE child;
+ ftnode_fetch_extra bfe;
+ // Note that we don't read the entire node into memory yet.
+ // The idea is let's try to do the minimum work before releasing the parent lock
+ bfe.create_for_min_read(ft);
+ toku_pin_ftnode_with_dep_nodes(ft, targetchild, childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &parent, &child, true);
+
+ // for test
+ call_flusher_thread_callback(ft_flush_aflter_child_pin);
+
+ if (fa->should_destroy_basement_nodes(fa)) {
+ maybe_destroy_child_blbs(parent, child, ft);
+ }
+
+ //Note that at this point, we don't have the entire child in.
+ // Let's do a quick check to see if the child may be reactive
+ // If the child cannot be reactive, then we can safely unlock
+ // the parent before finishing reading in the entire child node.
+ bool may_child_be_reactive = ft_ftnode_may_be_reactive(ft, child);
+
+ paranoid_invariant(child->blocknum.b!=0);
+
+ // only do the following work if there is a flush to perform
+ if (toku_bnc_n_entries(BNC(parent, childnum)) > 0 || parent->height == 1) {
+ if (!parent->dirty()) {
+ dirtied++;
+ parent->set_dirty();
+ }
+ // detach buffer
+ BP_WORKDONE(parent, childnum) = 0; // this buffer is drained, no work has been done by its contents
+ bnc = BNC(parent, childnum);
+ NONLEAF_CHILDINFO new_bnc = toku_create_empty_nl();
+ memcpy(new_bnc->flow, bnc->flow, sizeof bnc->flow);
+ set_BNC(parent, childnum, new_bnc);
+ }
+
+ //
+ // at this point, the buffer has been detached from the parent
+ // and a new empty buffer has been placed in its stead
+ // so, if we are absolutely sure that the child is not
+ // reactive, we can unpin the parent
+ //
+ if (!may_child_be_reactive) {
+ toku_unpin_ftnode(ft, parent);
+ parent = NULL;
+ }
+
+ //
+ // now, if necessary, read/decompress the rest of child into memory,
+ // so that we can proceed and apply the flush
+ //
+ bring_node_fully_into_memory(child, ft);
+
+ // It is possible after reading in the entire child,
+ // that we now know that the child is not reactive
+ // if so, we can unpin parent right now
+ // we won't be splitting/merging child
+ // and we have already replaced the bnc
+ // for the root with a fresh one
+ enum reactivity child_re = toku_ftnode_get_reactivity(ft, child);
+ if (parent && child_re == RE_STABLE) {
+ toku_unpin_ftnode(ft, parent);
+ parent = NULL;
+ }
+
+ // from above, we know at this point that either the bnc
+ // is detached from the parent (which may be unpinned),
+ // and we have to apply the flush, or there was no data
+ // in the buffer to flush, and as a result, flushing is not necessary
+ // and bnc is NULL
+ if (bnc != NULL) {
+ if (!child->dirty()) {
+ dirtied++;
+ child->set_dirty();
+ }
+ // do the actual flush
+ toku_bnc_flush_to_child(
+ ft,
+ bnc,
+ child,
+ parent_oldest_referenced_xid_known
+ );
+ destroy_nonleaf_childinfo(bnc);
+ }
+
+ fa->update_status(child, dirtied, fa->extra);
+ // let's get the reactivity of the child again,
+ // it is possible that the flush got rid of some values
+ // and now the parent is no longer reactive
+ child_re = toku_ftnode_get_reactivity(ft, child);
+ // if the parent has been unpinned above, then
+ // this is our only option, even if the child is not stable
+ // if the child is not stable, we'll handle it the next
+ // time we need to flush to the child
+ if (!parent ||
+ child_re == RE_STABLE ||
+ (child_re == RE_FUSIBLE && parent->n_children == 1)
+ )
+ {
+ if (parent) {
+ toku_unpin_ftnode(ft, parent);
+ parent = NULL;
+ }
+ //
+ // it is the responsibility of toku_ft_flush_some_child to unpin child
+ //
+ if (child->height > 0 && fa->should_recursively_flush(child, fa->extra)) {
+ toku_ft_flush_some_child(ft, child, fa);
+ }
+ else {
+ toku_unpin_ftnode(ft, child);
+ }
+ }
+ else if (child_re == RE_FISSIBLE) {
+ //
+ // it is responsibility of `ft_split_child` to unlock nodes of
+ // parent and child as it sees fit
+ //
+ paranoid_invariant(parent); // just make sure we have not accidentally unpinned parent
+ ft_split_child(ft, parent, childnum, child, SPLIT_EVENLY, fa);
+ }
+ else if (child_re == RE_FUSIBLE) {
+ //
+ // it is responsibility of `maybe_merge_child to unlock nodes of
+ // parent and child as it sees fit
+ //
+ paranoid_invariant(parent); // just make sure we have not accidentally unpinned parent
+ fa->maybe_merge_child(fa, ft, parent, childnum, child, fa->extra);
+ }
+ else {
+ abort();
+ }
+}
+
+void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known) {
+ paranoid_invariant(bnc);
+
+ TOKULOGGER logger = toku_cachefile_logger(ft->cf);
+ TXN_MANAGER txn_manager = logger != nullptr ? toku_logger_get_txn_manager(logger) : nullptr;
+ TXNID oldest_referenced_xid_for_simple_gc = TXNID_NONE;
+
+ txn_manager_state txn_state_for_gc(txn_manager);
+ bool do_garbage_collection = child->height == 0 && txn_manager != nullptr;
+ if (do_garbage_collection) {
+ txn_state_for_gc.init();
+ oldest_referenced_xid_for_simple_gc = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager);
+ }
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_for_simple_gc,
+ child->oldest_referenced_xid_known,
+ true);
+ struct flush_msg_fn {
+ FT ft;
+ FTNODE child;
+ NONLEAF_CHILDINFO bnc;
+ txn_gc_info *gc_info;
+
+ STAT64INFO_S stats_delta;
+ int64_t logical_rows_delta = 0;
+ size_t remaining_memsize = bnc->msg_buffer.buffer_size_in_use();
+
+ flush_msg_fn(FT t, FTNODE n, NONLEAF_CHILDINFO nl, txn_gc_info *g) :
+ ft(t), child(n), bnc(nl), gc_info(g), remaining_memsize(bnc->msg_buffer.buffer_size_in_use()) {
+ stats_delta = { 0, 0 };
+ }
+ int operator()(const ft_msg &msg, bool is_fresh) {
+ size_t flow_deltas[] = { 0, 0 };
+ size_t memsize_in_buffer = message_buffer::msg_memsize_in_buffer(msg);
+ if (remaining_memsize <= bnc->flow[0]) {
+ // this message is in the current checkpoint's worth of
+ // the end of the message buffer
+ flow_deltas[0] = memsize_in_buffer;
+ } else if (remaining_memsize <= bnc->flow[0] + bnc->flow[1]) {
+ // this message is in the last checkpoint's worth of the
+ // end of the message buffer
+ flow_deltas[1] = memsize_in_buffer;
+ }
+ toku_ftnode_put_msg(
+ ft->cmp,
+ ft->update_fun,
+ child,
+ -1,
+ msg,
+ is_fresh,
+ gc_info,
+ flow_deltas,
+ &stats_delta,
+ &logical_rows_delta);
+ remaining_memsize -= memsize_in_buffer;
+ return 0;
+ }
+ } flush_fn(ft, child, bnc, &gc_info);
+ bnc->msg_buffer.iterate(flush_fn);
+
+ child->oldest_referenced_xid_known = parent_oldest_referenced_xid_known;
+
+ invariant(flush_fn.remaining_memsize == 0);
+ if (flush_fn.stats_delta.numbytes || flush_fn.stats_delta.numrows) {
+ toku_ft_update_stats(&ft->in_memory_stats, flush_fn.stats_delta);
+ }
+ toku_ft_adjust_logical_row_count(ft, flush_fn.logical_rows_delta);
+ if (do_garbage_collection) {
+ size_t buffsize = bnc->msg_buffer.buffer_size_in_use();
+ // may be misleading if there's a broadcast message in there
+ toku_ft_status_note_msg_bytes_out(buffsize);
+ }
+}
+
+static void
+update_cleaner_status(
+ FTNODE node,
+ int childnum)
+{
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_TOTAL_NODES)++;
+ if (node->height == 1) {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_H1_NODES)++;
+ } else {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_HGT1_NODES)++;
+ }
+
+ unsigned int nbytesinbuf = toku_bnc_nbytesinbuf(BNC(node, childnum));
+ if (nbytesinbuf == 0) {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_EMPTY_NODES)++;
+ } else {
+ if (nbytesinbuf > FL_STATUS_VAL(FT_FLUSHER_CLEANER_MAX_BUFFER_SIZE)) {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_MAX_BUFFER_SIZE) = nbytesinbuf;
+ }
+ if (nbytesinbuf < FL_STATUS_VAL(FT_FLUSHER_CLEANER_MIN_BUFFER_SIZE)) {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_MIN_BUFFER_SIZE) = nbytesinbuf;
+ }
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_TOTAL_BUFFER_SIZE) += nbytesinbuf;
+
+ uint64_t workdone = BP_WORKDONE(node, childnum);
+ if (workdone > FL_STATUS_VAL(FT_FLUSHER_CLEANER_MAX_BUFFER_WORKDONE)) {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_MAX_BUFFER_WORKDONE) = workdone;
+ }
+ if (workdone < FL_STATUS_VAL(FT_FLUSHER_CLEANER_MIN_BUFFER_WORKDONE)) {
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_MIN_BUFFER_WORKDONE) = workdone;
+ }
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_TOTAL_BUFFER_WORKDONE) += workdone;
+ }
+}
+
+static void
+dummy_update_status(
+ FTNODE UU(child),
+ int UU(dirtied),
+ void* UU(extra)
+ )
+{
+}
+
+static int
+dummy_pick_heaviest_child(FT UU(h),
+ FTNODE UU(parent),
+ void* UU(extra))
+{
+ abort();
+ return -1;
+}
+
+void toku_ft_split_child(
+ FT ft,
+ FTNODE node,
+ int childnum,
+ FTNODE child,
+ enum split_mode split_mode
+ )
+{
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ dummy_pick_heaviest_child,
+ dont_destroy_basement_nodes,
+ never_recursively_flush,
+ default_merge_child,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+ ft_split_child(
+ ft,
+ node,
+ childnum, // childnum to split
+ child,
+ split_mode,
+ &fa
+ );
+}
+
+void toku_ft_merge_child(
+ FT ft,
+ FTNODE node,
+ int childnum
+ )
+{
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ dummy_pick_heaviest_child,
+ dont_destroy_basement_nodes,
+ never_recursively_flush,
+ default_merge_child,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+ bool did_react;
+ ft_merge_child(
+ ft,
+ node,
+ childnum, // childnum to merge
+ &did_react,
+ &fa
+ );
+}
+
+int
+toku_ftnode_cleaner_callback(
+ void *ftnode_pv,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ void *extraargs)
+{
+ FTNODE node = (FTNODE) ftnode_pv;
+ invariant(node->blocknum.b == blocknum.b);
+ invariant(node->fullhash == fullhash);
+ invariant(node->height > 0); // we should never pick a leaf node (for now at least)
+ FT ft = (FT) extraargs;
+ bring_node_fully_into_memory(node, ft);
+ int childnum = find_heaviest_child(node);
+ update_cleaner_status(node, childnum);
+
+ // Either toku_ft_flush_some_child will unlock the node, or we do it here.
+ if (toku_bnc_nbytesinbuf(BNC(node, childnum)) > 0) {
+ struct flusher_advice fa;
+ struct flush_status_update_extra fste;
+ ct_flusher_advice_init(&fa, &fste, ft->h->nodesize);
+ toku_ft_flush_some_child(ft, node, &fa);
+ } else {
+ toku_unpin_ftnode(ft, node);
+ }
+ return 0;
+}
+
+struct flusher_extra {
+ FT ft;
+ FTNODE node;
+ NONLEAF_CHILDINFO bnc;
+ TXNID parent_oldest_referenced_xid_known;
+};
+
+//
+// This is the function that gets called by a
+// background thread. Its purpose is to complete
+// a flush, and possibly do a split/merge.
+//
+static void flush_node_fun(void *fe_v)
+{
+ toku::context flush_ctx(CTX_FLUSH);
+ struct flusher_extra* fe = (struct flusher_extra *) fe_v;
+ // The node that has been placed on the background
+ // thread may not be fully in memory. Some message
+ // buffers may be compressed. Before performing
+ // any operations, we must first make sure
+ // the node is fully in memory
+ //
+ // If we have a bnc, that means fe->node is a child, and we've already
+ // destroyed its basement nodes if necessary, so we now need to either
+ // read them back in, or just do the regular partial fetch. If we
+ // don't, that means fe->node is a parent, so we need to do this anyway.
+ bring_node_fully_into_memory(fe->node,fe->ft);
+ fe->node->set_dirty();
+
+ struct flusher_advice fa;
+ struct flush_status_update_extra fste;
+ flt_flusher_advice_init(&fa, &fste, fe->ft->h->nodesize);
+
+ if (fe->bnc) {
+ // In this case, we have a bnc to flush to a node
+
+ // for test purposes
+ call_flusher_thread_callback(flt_flush_before_applying_inbox);
+
+ toku_bnc_flush_to_child(
+ fe->ft,
+ fe->bnc,
+ fe->node,
+ fe->parent_oldest_referenced_xid_known
+ );
+ destroy_nonleaf_childinfo(fe->bnc);
+
+ // after the flush has completed, now check to see if the node needs flushing
+ // If so, call toku_ft_flush_some_child on the node (because this flush intends to
+ // pass a meaningful oldest referenced xid for simple garbage collection), and it is the
+ // responsibility of the flush to unlock the node. otherwise, we unlock it here.
+ if (fe->node->height > 0 && toku_ftnode_nonleaf_is_gorged(fe->node, fe->ft->h->nodesize)) {
+ toku_ft_flush_some_child(fe->ft, fe->node, &fa);
+ }
+ else {
+ toku_unpin_ftnode(fe->ft,fe->node);
+ }
+ }
+ else {
+ // In this case, we were just passed a node with no
+ // bnc, which means we are tasked with flushing some
+ // buffer in the node.
+ // It is the responsibility of flush some child to unlock the node
+ toku_ft_flush_some_child(fe->ft, fe->node, &fa);
+ }
+ remove_background_job_from_cf(fe->ft->cf);
+ toku_free(fe);
+}
+
+static void
+place_node_and_bnc_on_background_thread(
+ FT ft,
+ FTNODE node,
+ NONLEAF_CHILDINFO bnc,
+ TXNID parent_oldest_referenced_xid_known)
+{
+ struct flusher_extra *XMALLOC(fe);
+ fe->ft = ft;
+ fe->node = node;
+ fe->bnc = bnc;
+ fe->parent_oldest_referenced_xid_known = parent_oldest_referenced_xid_known;
+ cachefile_kibbutz_enq(ft->cf, flush_node_fun, fe);
+}
+
+//
+// This takes as input a gorged, locked, non-leaf node named parent
+// and sets up a flush to be done in the background.
+// The flush is setup like this:
+// - We call maybe_get_and_pin_clean on the child we want to flush to in order to try to lock the child
+// - if we successfully pin the child, and the child does not need to be split or merged
+// then we detach the buffer, place the child and buffer onto a background thread, and
+// have the flush complete in the background, and unlock the parent. The child will be
+// unlocked on the background thread
+// - if any of the above does not happen (child cannot be locked,
+// child needs to be split/merged), then we place the parent on the background thread.
+// The parent will be unlocked on the background thread
+//
+void toku_ft_flush_node_on_background_thread(FT ft, FTNODE parent)
+{
+ toku::context flush_ctx(CTX_FLUSH);
+ TXNID parent_oldest_referenced_xid_known = parent->oldest_referenced_xid_known;
+ //
+ // first let's see if we can detach buffer on client thread
+ // and pick the child we want to flush to
+ //
+ int childnum = find_heaviest_child(parent);
+ paranoid_invariant(toku_bnc_n_entries(BNC(parent, childnum))>0);
+ //
+ // see if we can pin the child
+ //
+ FTNODE child;
+ uint32_t childfullhash = compute_child_fullhash(ft->cf, parent, childnum);
+ int r = toku_maybe_pin_ftnode_clean(ft, BP_BLOCKNUM(parent, childnum), childfullhash, PL_WRITE_EXPENSIVE, &child);
+ if (r != 0) {
+ // In this case, we could not lock the child, so just place the parent on the background thread
+ // In the callback, we will use toku_ft_flush_some_child, which checks to
+ // see if we should blow away the old basement nodes.
+ place_node_and_bnc_on_background_thread(ft, parent, NULL, parent_oldest_referenced_xid_known);
+ }
+ else {
+ //
+ // successfully locked child
+ //
+ bool may_child_be_reactive = ft_ftnode_may_be_reactive(ft, child);
+ if (!may_child_be_reactive) {
+ // We're going to unpin the parent, so before we do, we must
+ // check to see if we need to blow away the basement nodes to
+ // keep the MSN invariants intact.
+ maybe_destroy_child_blbs(parent, child, ft);
+
+ //
+ // can detach buffer and unpin root here
+ //
+ parent->set_dirty();
+ BP_WORKDONE(parent, childnum) = 0; // this buffer is drained, no work has been done by its contents
+ NONLEAF_CHILDINFO bnc = BNC(parent, childnum);
+ NONLEAF_CHILDINFO new_bnc = toku_create_empty_nl();
+ memcpy(new_bnc->flow, bnc->flow, sizeof bnc->flow);
+ set_BNC(parent, childnum, new_bnc);
+
+ //
+ // at this point, the buffer has been detached from the parent
+ // and a new empty buffer has been placed in its stead
+ // so, because we know for sure the child is not
+ // reactive, we can unpin the parent
+ //
+ place_node_and_bnc_on_background_thread(ft, child, bnc, parent_oldest_referenced_xid_known);
+ toku_unpin_ftnode(ft, parent);
+ }
+ else {
+ // because the child may be reactive, we need to
+ // put parent on background thread.
+ // As a result, we unlock the child here.
+ toku_unpin_ftnode(ft, child);
+ // Again, we'll have the parent on the background thread, so
+ // we don't need to destroy the basement nodes yet.
+ place_node_and_bnc_on_background_thread(ft, parent, NULL, parent_oldest_referenced_xid_known);
+ }
+ }
+}
+
+#include <toku_race_tools.h>
+void __attribute__((__constructor__)) toku_ft_flusher_helgrind_ignore(void);
+void
+toku_ft_flusher_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&fl_status, sizeof fl_status);
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-flusher.h b/storage/tokudb/PerconaFT/ft/ft-flusher.h
new file mode 100644
index 00000000..347bc325
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-flusher.h
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/ft-internal.h"
+
+void toku_ft_flusher_get_status(FT_FLUSHER_STATUS);
+
+/**
+ * Only for testing, not for production.
+ *
+ * Set a callback the flusher thread will use to signal various points
+ * during its execution.
+ */
+void
+toku_flusher_thread_set_callback(
+ void (*callback_f)(int, void*),
+ void* extra
+ );
+
+/**
+ * Puts a workitem on the flusher thread queue, scheduling the node to be
+ * flushed by toku_ft_flush_some_child.
+ */
+void toku_ft_flush_node_on_background_thread(FT ft, FTNODE parent);
+
+enum split_mode {
+ SPLIT_EVENLY,
+ SPLIT_LEFT_HEAVY,
+ SPLIT_RIGHT_HEAVY
+};
+
+
+// Given pinned node and pinned child, split child into two
+// and update node with information about its new child.
+void toku_ft_split_child(
+ FT ft,
+ FTNODE node,
+ int childnum,
+ FTNODE child,
+ enum split_mode split_mode
+ );
+
+// Given pinned node, merge childnum with a neighbor and update node with
+// information about the change
+void toku_ft_merge_child(
+ FT ft,
+ FTNODE node,
+ int childnum
+ );
+
+/**
+ * Effect: Split a leaf node.
+ * Argument "node" is node to be split.
+ * Upon return:
+ * nodea and nodeb point to new nodes that result from split of "node"
+ * nodea is the left node that results from the split
+ * splitk is the right-most key of nodea
+ */
+// TODO: Rename toku_ft_leaf_split
+void
+ftleaf_split(
+ FT ft,
+ FTNODE node,
+ FTNODE *nodea,
+ FTNODE *nodeb,
+ DBT *splitk,
+ bool create_new_node,
+ enum split_mode split_mode,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes
+ );
+
+/**
+ * Effect: node must be a node-leaf node. It is split into two nodes, and
+ * the fanout is split between them.
+ * Sets splitk->data pointer to a malloc'd value
+ * Sets nodea, and nodeb to the two new nodes.
+ * The caller must replace the old node with the two new nodes.
+ * This function will definitely reduce the number of children for the node,
+ * but it does not guarantee that the resulting nodes are smaller than nodesize.
+ */
+void
+// TODO: Rename toku_ft_nonleaf_split
+ft_nonleaf_split(
+ FT ft,
+ FTNODE node,
+ FTNODE *nodea,
+ FTNODE *nodeb,
+ DBT *splitk,
+ uint32_t num_dependent_nodes,
+ FTNODE* dependent_nodes
+ );
+
+/************************************************************************
+ * HOT optimize, should perhaps be factored out to its own header file *
+ ************************************************************************
+ */
+void toku_ft_hot_get_status(FT_HOT_STATUS);
+
+/**
+ * Takes given FT and pushes all pending messages between left and right to the leaf nodes.
+ * All messages between left and right (inclusive) will be pushed, as will some others
+ * that happen to share buffers with messages near the boundary.
+ * If left is NULL, messages from beginning of FT are pushed. If right is NULL, that means
+ * we go until the end of the FT.
+ */
+int
+toku_ft_hot_optimize(FT_HANDLE ft_h, DBT* left, DBT* right,
+ int (*progress_callback)(void *extra, float progress),
+ void *progress_extra, uint64_t* loops_run);
diff --git a/storage/tokudb/PerconaFT/ft/ft-hot-flusher.cc b/storage/tokudb/PerconaFT/ft/ft-hot-flusher.cc
new file mode 100644
index 00000000..ffab8647
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-hot-flusher.cc
@@ -0,0 +1,362 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-flusher.h"
+#include "ft/ft-flusher-internal.h"
+#include "ft/ft-internal.h"
+#include "ft/node.h"
+#include "portability/toku_atomic.h"
+#include "util/context.h"
+#include "util/status.h"
+
+// Member Descirption:
+// 1. highest_pivot_key - this is the key that corresponds to the
+// most recently flushed leaf entry.
+// 2. max_current_key - this is the pivot/key that we inherit as
+// we descend down the tree. We use this to set the highest_pivot_key.
+// 3. sub_tree_size - this is the percentage of the entire tree that our
+// current position (in a sub-tree) encompasses.
+// 4. percentage_done - this is the percentage of leaf nodes that have
+// been flushed into.
+// 5. rightmost_leaf_seen - this is a boolean we use to determine if
+// if we have flushed to every leaf node.
+struct hot_flusher_extra {
+ DBT highest_pivot_key;
+ DBT max_current_key;
+ float sub_tree_size;
+ float percentage_done;
+ bool rightmost_leaf_seen;
+};
+
+void
+toku_ft_hot_get_status(FT_HOT_STATUS s) {
+ hot_status.init();
+ *s = hot_status;
+}
+
+// Copies the max current key to the highest pivot key seen.
+static void
+hot_set_highest_key(struct hot_flusher_extra *flusher)
+{
+ // The max current key will be NULL if we are traversing in the
+ // rightmost subtree of a given parent. As such, we don't want to
+ // allocate memory for this case.
+ toku_destroy_dbt(&flusher->highest_pivot_key);
+ if (flusher->max_current_key.data != NULL) {
+ // Otherwise, let's copy all the contents from one key to the other.
+ toku_clone_dbt(&flusher->highest_pivot_key, flusher->max_current_key);
+ }
+}
+
+static void
+hot_set_start_key(struct hot_flusher_extra *flusher, const DBT* start)
+{
+ toku_destroy_dbt(&flusher->highest_pivot_key);
+ if (start != NULL) {
+ // Otherwise, let's copy all the contents from one key to the other.
+ toku_clone_dbt(&flusher->highest_pivot_key, *start);
+ }
+}
+
+static int
+hot_just_pick_child(FT ft,
+ FTNODE parent,
+ struct hot_flusher_extra *flusher)
+{
+ int childnum = 0;
+
+ // Search through Parents pivots, see which one is greater than
+ // the highest_pivot_key seen so far.
+ if (flusher->highest_pivot_key.data == NULL)
+ {
+ // Special case of the first child of the root node.
+ // Also known as, NEGATIVE INFINITY....
+ childnum = 0;
+ } else {
+ // Find the pivot boundary.
+ childnum = toku_ftnode_hot_next_child(parent, &flusher->highest_pivot_key, ft->cmp);
+ }
+
+ return childnum;
+}
+
+static void
+hot_update_flusher_keys(FTNODE parent,
+ int childnum,
+ struct hot_flusher_extra *flusher)
+{
+ // Update maximum current key if the child is NOT the rightmost
+ // child node.
+ if (childnum < (parent->n_children - 1)) {
+ toku_destroy_dbt(&flusher->max_current_key);
+ toku_clone_dbt(&flusher->max_current_key, parent->pivotkeys.get_pivot(childnum));
+ }
+}
+
+// Picks which child toku_ft_flush_some_child will use for flushing and
+// recursion.
+static int
+hot_pick_child(FT ft,
+ FTNODE parent,
+ void *extra)
+{
+ struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra;
+ int childnum = hot_just_pick_child(ft, parent, flusher);
+
+ // Now we determine the percentage of the tree flushed so far.
+
+ // Whichever subtree we choose to recurse into, it is a fraction
+ // of the current parent.
+ flusher->sub_tree_size /= parent->n_children;
+
+ // Update the precentage complete, using our new sub tree size AND
+ // the number of children we have already flushed.
+ flusher->percentage_done += (flusher->sub_tree_size * childnum);
+
+ hot_update_flusher_keys(parent, childnum, flusher);
+
+ return childnum;
+}
+
+// Does nothing for now.
+static void
+hot_update_status(FTNODE UU(child),
+ int UU(dirtied),
+ void *UU(extra))
+{
+ return;
+}
+
+// If we've just split a node, HOT needs another chance to decide which
+// one to flush into. This gives it a chance to do that, and update the
+// keys it maintains.
+static int
+hot_pick_child_after_split(FT ft,
+ FTNODE parent,
+ int childnuma,
+ int childnumb,
+ void *extra)
+{
+ struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra;
+ int childnum = hot_just_pick_child(ft, parent, flusher);
+ assert(childnum == childnuma || childnum == childnumb);
+ hot_update_flusher_keys(parent, childnum, flusher);
+ if (parent->height == 1) {
+ // We don't want to recurse into a leaf node, but if we return
+ // anything valid, ft_split_child will try to go there, so we
+ // return -1 to allow ft_split_child to have its default
+ // behavior, which will be to stop recursing.
+ childnum = -1;
+ }
+ return childnum;
+}
+
+// Basic constructor/initializer for the hot flusher struct.
+static void
+hot_flusher_init(struct flusher_advice *advice,
+ struct hot_flusher_extra *flusher)
+{
+ // Initialize the highest pivot key seen to NULL. This represents
+ // NEGATIVE INFINITY and is used to cover the special case of our
+ // first traversal of the tree.
+ toku_init_dbt(&(flusher->highest_pivot_key));
+ toku_init_dbt(&(flusher->max_current_key));
+ flusher->rightmost_leaf_seen = 0;
+ flusher->sub_tree_size = 1.0;
+ flusher->percentage_done = 0.0;
+ flusher_advice_init(advice,
+ hot_pick_child,
+ dont_destroy_basement_nodes,
+ always_recursively_flush,
+ default_merge_child,
+ hot_update_status,
+ hot_pick_child_after_split,
+ flusher
+ );
+}
+
+// Erases any DBT keys we have copied from a traversal.
+static void
+hot_flusher_destroy(struct hot_flusher_extra *flusher)
+{
+ toku_destroy_dbt(&flusher->highest_pivot_key);
+ toku_destroy_dbt(&flusher->max_current_key);
+}
+
+// Entry point for Hot Optimize Table (HOT). Note, this function is
+// not recursive. It iterates over root-to-leaf paths.
+int
+toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right,
+ int (*progress_callback)(void *extra, float progress),
+ void *progress_extra, uint64_t* loops_run)
+{
+ toku::context flush_ctx(CTX_FLUSH);
+
+ int r = 0;
+ struct hot_flusher_extra flusher;
+ struct flusher_advice advice;
+
+ hot_flusher_init(&advice, &flusher);
+ hot_set_start_key(&flusher, left);
+
+ uint64_t loop_count = 0;
+ MSN msn_at_start_of_hot = ZERO_MSN; // capture msn from root at
+ // start of HOT operation
+ (void) toku_sync_fetch_and_add(&HOT_STATUS_VAL(FT_HOT_NUM_STARTED), 1);
+
+ toku_ft_note_hot_begin(ft_handle);
+
+ // Higher level logic prevents a dictionary from being deleted or
+ // truncated during a hot optimize operation. Doing so would violate
+ // the hot optimize contract.
+ do {
+ FTNODE root;
+ CACHEKEY root_key;
+ uint32_t fullhash;
+
+ {
+ // Get root node (the first parent of each successive HOT
+ // call.)
+ toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ toku_pin_ftnode(ft_handle->ft,
+ (BLOCKNUM) root_key,
+ fullhash,
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &root,
+ true);
+ toku_ftnode_assert_fully_in_memory(root);
+ }
+
+ // Prepare HOT diagnostics.
+ if (loop_count == 0) {
+ // The first time through, capture msn from root
+ msn_at_start_of_hot = root->max_msn_applied_to_node_on_disk;
+ }
+
+ loop_count++;
+
+ if (loop_count > HOT_STATUS_VAL(FT_HOT_MAX_ROOT_FLUSH_COUNT)) {
+ HOT_STATUS_VAL(FT_HOT_MAX_ROOT_FLUSH_COUNT) = loop_count;
+ }
+
+ // Initialize the maximum current key. We need to do this for
+ // every traversal.
+ toku_destroy_dbt(&flusher.max_current_key);
+
+ flusher.sub_tree_size = 1.0;
+ flusher.percentage_done = 0.0;
+
+ // This should recurse to the bottom of the tree and then
+ // return.
+ if (root->height > 0) {
+ toku_ft_flush_some_child(ft_handle->ft, root, &advice);
+ } else {
+ // Since there are no children to flush, we should abort
+ // the HOT call.
+ flusher.rightmost_leaf_seen = 1;
+ toku_unpin_ftnode(ft_handle->ft, root);
+ }
+
+ // Set the highest pivot key seen here, since the parent may
+ // be unlocked and NULL'd later in our caller:
+ // toku_ft_flush_some_child().
+ hot_set_highest_key(&flusher);
+
+ // This is where we determine if the traversal is finished or
+ // not.
+ if (flusher.max_current_key.data == NULL) {
+ flusher.rightmost_leaf_seen = 1;
+ }
+ else if (right) {
+ // if we have flushed past the bounds set for us,
+ // set rightmost_leaf_seen so we exit
+ int cmp = ft_handle->ft->cmp(&flusher.max_current_key, right);
+ if (cmp > 0) {
+ flusher.rightmost_leaf_seen = 1;
+ }
+ }
+
+ // Update HOT's progress.
+ if (progress_callback != NULL) {
+ r = progress_callback(progress_extra, flusher.percentage_done);
+
+ // Check if the callback wants us to stop running HOT.
+ if (r != 0) {
+ flusher.rightmost_leaf_seen = 1;
+ }
+ }
+
+ // Loop until the max key has been updated to positive
+ // infinity.
+ } while (!flusher.rightmost_leaf_seen);
+ *loops_run = loop_count;
+
+ // Cleanup.
+ hot_flusher_destroy(&flusher);
+
+ // More diagnostics.
+ {
+ bool success = false;
+ if (r == 0) { success = true; }
+
+ {
+ toku_ft_note_hot_complete(ft_handle, success, msn_at_start_of_hot);
+ }
+
+ if (success) {
+ (void) toku_sync_fetch_and_add(&HOT_STATUS_VAL(FT_HOT_NUM_COMPLETED), 1);
+ } else {
+ (void) toku_sync_fetch_and_add(&HOT_STATUS_VAL(FT_HOT_NUM_ABORTED), 1);
+ }
+ }
+ return r;
+}
+
+#include <toku_race_tools.h>
+void __attribute__((__constructor__)) toku_hot_helgrind_ignore(void);
+void
+toku_hot_helgrind_ignore(void) {
+ // incremented only while lock is held, but read by engine status asynchronously.
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&hot_status, sizeof hot_status);
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-internal.h b/storage/tokudb/PerconaFT/ft/ft-internal.h
new file mode 100644
index 00000000..130d3c30
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-internal.h
@@ -0,0 +1,495 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "portability/toku_config.h"
+#include "portability/toku_list.h"
+#include "portability/toku_race_tools.h"
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/comparator.h"
+#include "ft/ft.h"
+#include "ft/ft-ops.h"
+#include "ft/node.h"
+#include "ft/serialize/block_table.h"
+#include "ft/txn/rollback.h"
+#include "ft/ft-status.h"
+
+// Symbol TOKUDB_REVISION is not defined by fractal-tree makefiles, so
+// BUILD_ID of 1000 indicates development build of main, not a release build.
+#if defined(TOKUDB_REVISION)
+#define BUILD_ID TOKUDB_REVISION
+#else
+#error
+#endif
+
+struct ft_search;
+
+enum { FT_DEFAULT_FANOUT = 16 };
+enum { FT_DEFAULT_NODE_SIZE = 4 * 1024 * 1024 };
+enum { FT_DEFAULT_BASEMENT_NODE_SIZE = 128 * 1024 };
+
+// We optimize for a sequential insert pattern if 100 consecutive injections
+// happen into the rightmost leaf node due to promotion.
+enum { FT_SEQINSERT_SCORE_THRESHOLD = 100 };
+
+uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum);
+
+enum ft_type {
+ FT_CURRENT = 1,
+ FT_CHECKPOINT_INPROGRESS
+};
+
+extern "C" {
+extern uint force_recovery;
+}
+
+extern int writing_rollback;
+
+// The ft_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata.
+struct ft_header {
+ enum ft_type type;
+
+ int dirty_;
+
+ void set_dirty() {
+ if(force_recovery) assert(writing_rollback);
+ dirty_ = 1;
+ }
+
+ void clear_dirty() {
+ dirty_ = 0;
+ }
+
+ bool dirty() {
+ return dirty_;
+ }
+
+ // Free-running counter incremented once per checkpoint (toggling LSB).
+ // LSB indicates which header location is used on disk so this
+ // counter is effectively a boolean which alternates with each checkpoint.
+ uint64_t checkpoint_count;
+ // LSN of creation of "checkpoint-begin" record in log.
+ LSN checkpoint_lsn;
+
+ // see serialize/ft_layout_version.h. maybe don't need this if we assume
+ // it's always the current version after deserializing
+ const int layout_version;
+ // different (<) from layout_version if upgraded from a previous
+ // version (useful for debugging)
+ const int layout_version_original;
+ // build_id (svn rev number) of software that wrote this node to
+ // disk. (read from disk, overwritten when written to disk, I
+ // think).
+ const uint32_t build_id;
+ // build_id of software that created this tree
+ const uint32_t build_id_original;
+
+ // time this tree was created
+ const uint64_t time_of_creation;
+ // and the root transaction id that created it
+ TXNID root_xid_that_created;
+ // last time this header was serialized to disk (read from disk,
+ // overwritten when written to disk)
+ uint64_t time_of_last_modification;
+ // last time that this tree was verified
+ uint64_t time_of_last_verification;
+
+ // this field is essentially a const
+ BLOCKNUM root_blocknum;
+
+ const unsigned int flags;
+
+ //protected by toku_ft_lock
+ unsigned int nodesize;
+ unsigned int basementnodesize;
+ enum toku_compression_method compression_method;
+ unsigned int fanout;
+
+ // Current Minimum MSN to be used when upgrading pre-MSN FT's.
+ // This is decremented from our currnt MIN_MSN so as not to clash
+ // with any existing 'normal' MSN's.
+ MSN highest_unused_msn_for_upgrade;
+ // Largest MSN ever injected into the tree. Used to set the MSN for
+ // messages as they get injected.
+ MSN max_msn_in_ft;
+
+ // last time that a hot optimize operation was begun
+ uint64_t time_of_last_optimize_begin;
+ // last time that a hot optimize operation was successfully completed
+ uint64_t time_of_last_optimize_end;
+ // the number of hot optimize operations currently in progress on this tree
+ uint32_t count_of_optimize_in_progress;
+ // the number of hot optimize operations in progress on this tree at the time of the last crash (this field is in-memory only)
+ uint32_t count_of_optimize_in_progress_read_from_disk;
+ // all messages before this msn have been applied to leaf nodes
+ MSN msn_at_start_of_last_completed_optimize;
+
+ STAT64INFO_S on_disk_stats;
+
+ // This represents the balance of inserts - deletes and should be
+ // closer to a logical representation of the number of records in an index
+ uint64_t on_disk_logical_rows;
+};
+typedef struct ft_header *FT_HEADER;
+
+// ft_header is always the current version.
+struct ft {
+ FT_HEADER h;
+ FT_HEADER checkpoint_header;
+
+ // These are (mostly) read-only.
+
+ CACHEFILE cf;
+ // unique id for dictionary
+ DICTIONARY_ID dict_id;
+
+ // protected by locktree
+ DESCRIPTOR_S descriptor;
+
+ // protected by locktree and user.
+ // User makes sure this is only changed when no activity on tree
+ DESCRIPTOR_S cmp_descriptor;
+ // contains a pointer to cmp_descriptor (above) - their lifetimes are bound
+ toku::comparator cmp;
+
+ // the update function always utilizes the cmp_descriptor, not the regular one
+ ft_update_func update_fun;
+
+ // These are not read-only:
+
+ // protected by blocktable lock
+ block_table blocktable;
+
+ // protected by atomic builtins
+ STAT64INFO_S in_memory_stats;
+ uint64_t in_memory_logical_rows;
+
+ // transient, not serialized to disk. updated when we do write to
+ // disk. tells us whether we can do partial eviction (we can't if
+ // the on-disk layout version is from before basement nodes)
+ int layout_version_read_from_disk;
+
+ // Logically the reference count is zero if live_ft_handles is empty, txns is 0, and pinned_by_checkpoint is false.
+
+ // ft_ref_lock protects modifying live_ft_handles, txns, and pinned_by_checkpoint.
+ toku_mutex_t ft_ref_lock;
+ struct toku_list live_ft_handles;
+ // Number of transactions that are using this FT. you should only be able
+ // to modify this if you have a valid handle in live_ft_handles
+ uint32_t num_txns;
+ // A checkpoint is running. If true, then keep this header around for checkpoint, like a transaction
+ bool pinned_by_checkpoint;
+
+ // is this ft a blackhole? if so, all messages are dropped.
+ bool blackhole;
+
+ // The blocknum of the rightmost leaf node in the tree. Stays constant through splits
+ // and merges using pair-swapping (like the root node, see toku_ftnode_swap_pair_values())
+ //
+ // This field only transitions from RESERVED_BLOCKNUM_NULL to non-null, never back.
+ // We initialize it when promotion inserts into a non-root leaf node on the right extreme.
+ // We use the blocktable lock to protect the initialize transition, though it's not really
+ // necessary since all threads should be setting it to the same value. We maintain that invariant
+ // on first initialization, see ft_set_or_verify_rightmost_blocknum()
+ BLOCKNUM rightmost_blocknum;
+
+ // sequential access pattern heuristic
+ // - when promotion pushes a message directly into the rightmost leaf, the score goes up.
+ // - if the score is high enough, we optimistically attempt to insert directly into the rightmost leaf
+ // - if our attempt fails because the key was not in range of the rightmost leaf, we reset the score back to 0
+ uint32_t seqinsert_score;
+};
+
+// Allocate a DB struct off the stack and only set its comparison
+// descriptor. We don't bother setting any other fields because
+// the comparison function doesn't need it, and we would like to
+// reduce the CPU work done per comparison.
+#define FAKE_DB(db, desc) struct __toku_db db; do { db.cmp_descriptor = const_cast<DESCRIPTOR>(desc); } while (0)
+
+struct ft_options {
+ unsigned int nodesize;
+ unsigned int basementnodesize;
+ enum toku_compression_method compression_method;
+ unsigned int fanout;
+ unsigned int flags;
+ uint8_t memcmp_magic;
+ ft_compare_func compare_fun;
+ ft_update_func update_fun;
+};
+
+struct ft_handle {
+ // The fractal tree.
+ FT ft;
+
+ on_redirect_callback redirect_callback;
+ void *redirect_callback_extra;
+ struct toku_list live_ft_handle_link;
+ bool did_set_flags;
+
+ struct ft_options options;
+};
+
+PAIR_ATTR make_ftnode_pair_attr(FTNODE node);
+PAIR_ATTR make_invalid_pair_attr(void);
+
+//
+// Field in ftnode_fetch_extra that tells the
+// partial fetch callback what piece of the node
+// is needed by the ydb
+//
+enum ftnode_fetch_type {
+ ftnode_fetch_none = 1, // no partitions needed.
+ ftnode_fetch_subset, // some subset of partitions needed
+ ftnode_fetch_prefetch, // this is part of a prefetch call
+ ftnode_fetch_all, // every partition is needed
+ ftnode_fetch_keymatch, // one child is needed if it holds both keys
+};
+
+// Info passed to cachetable fetch callbacks to say which parts of a node
+// should be fetched (perhaps a subset, perhaps the whole thing, depending
+// on operation)
+class ftnode_fetch_extra {
+public:
+ // Used when the whole node must be in memory, such as for flushes.
+ void create_for_full_read(FT ft);
+
+ // A subset of children are necessary. Used by point queries.
+ void create_for_subset_read(FT ft, ft_search *search, const DBT *left, const DBT *right,
+ bool left_is_neg_infty, bool right_is_pos_infty,
+ bool disable_prefetching, bool read_all_partitions);
+
+ // No partitions are necessary - only pivots and/or subtree estimates.
+ // Currently used for stat64.
+ void create_for_min_read(FT ft);
+
+ // Used to prefetch partitions that fall within the bounds given by the cursor.
+ void create_for_prefetch(FT ft, struct ft_cursor *cursor);
+
+ // Only a portion of the node (within a keyrange) is required.
+ // Used by keysrange when the left and right key are in the same basement node.
+ void create_for_keymatch(FT ft, const DBT *left, const DBT *right,
+ bool disable_prefetching, bool read_all_partitions);
+
+ void destroy(void);
+
+ // return: true if a specific childnum is required to be in memory
+ bool wants_child_available(int childnum) const;
+
+ // return: the childnum of the leftmost child that is required to be in memory
+ int leftmost_child_wanted(FTNODE node) const;
+
+ // return: the childnum of the rightmost child that is required to be in memory
+ int rightmost_child_wanted(FTNODE node) const;
+
+ // needed for reading a node off disk
+ FT ft;
+
+ enum ftnode_fetch_type type;
+
+ // used in the case where type == ftnode_fetch_subset
+ // parameters needed to find out which child needs to be decompressed (so it can be read)
+ ft_search *search;
+ DBT range_lock_left_key, range_lock_right_key;
+ bool left_is_neg_infty, right_is_pos_infty;
+
+ // states if we should try to aggressively fetch basement nodes
+ // that are not specifically needed for current query,
+ // but may be needed for other cursor operations user is doing
+ // For example, if we have not disabled prefetching,
+ // and the user is doing a dictionary wide scan, then
+ // even though a query may only want one basement node,
+ // we fetch all basement nodes in a leaf node.
+ bool disable_prefetching;
+
+ // this value will be set during the fetch_callback call by toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback
+ // thi callbacks need to evaluate this anyway, so we cache it here so the search code does not reevaluate it
+ int child_to_read;
+
+ // when we read internal nodes, we want to read all the data off disk in one I/O
+ // then we'll treat it as normal and only decompress the needed partitions etc.
+ bool read_all_partitions;
+
+ // Accounting: How many bytes were read, and how much time did we spend doing I/O?
+ uint64_t bytes_read;
+ tokutime_t io_time;
+ tokutime_t decompress_time;
+ tokutime_t deserialize_time;
+
+private:
+ void _create_internal(FT ft_);
+};
+
+// Only exported for tests.
+// Cachetable callbacks for ftnodes.
+void toku_ftnode_clone_callback(void* value_data, void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
+void toku_ftnode_checkpoint_complete_callback(void *value_data);
+void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM blocknum, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone);
+int toku_ftnode_fetch_callback (CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs);
+void toku_ftnode_pe_est_callback(void* ftnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void* write_extraargs);
+int toku_ftnode_pe_callback(void *ftnode_pv, PAIR_ATTR old_attr, void *extraargs,
+ void (*finalize)(PAIR_ATTR new_attr, void *extra), void *finalize_extra);
+bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs);
+int toku_ftnode_pf_callback(void* ftnode_pv, void* UU(disk_data), void* read_extraargs, int fd, PAIR_ATTR* sizep);
+int toku_ftnode_cleaner_callback( void *ftnode_pv, BLOCKNUM blocknum, uint32_t fullhash, void *extraargs);
+
+CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT ft);
+
+// This is only exported for tests.
+// append a child node to a parent node
+void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey);
+
+// This is only exported for tests.
+// append a message to a nonleaf node child buffer
+void toku_ft_append_to_child_buffer(const toku::comparator &cmp, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val);
+
+STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode);
+
+//#define SLOW
+#ifdef SLOW
+#define VERIFY_NODE(t,n) (toku_verify_or_set_counts(n), toku_verify_estimates(t,n))
+#else
+#define VERIFY_NODE(t,n) ((void)0)
+#endif
+
+void toku_verify_or_set_counts(FTNODE);
+
+// TODO: consider moving this to ft/pivotkeys.cc
+class pivot_bounds {
+public:
+ pivot_bounds(const DBT &lbe_dbt, const DBT &ubi_dbt);
+
+ pivot_bounds next_bounds(FTNODE node, int childnum) const;
+
+ const DBT *lbe() const;
+ const DBT *ubi() const;
+
+ static pivot_bounds infinite_bounds();
+
+private:
+ DBT _prepivotkey(FTNODE node, int childnum, const DBT &lbe_dbt) const;
+ DBT _postpivotkey(FTNODE node, int childnum, const DBT &ubi_dbt) const;
+
+ // if toku_dbt_is_empty() is true for either bound, then it represents
+ // negative or positive infinity (which are exclusive in practice)
+ const DBT _lower_bound_exclusive;
+ const DBT _upper_bound_inclusive;
+};
+
+// allocate a block number
+// allocate and initialize a ftnode
+// put the ftnode into the cache table
+void toku_create_new_ftnode(FT_HANDLE ft_handle, FTNODE *result, int height, int n_children);
+
+/* Stuff for testing */
+// toku_testsetup_initialize() must be called before any other test_setup_xxx() functions are called.
+void toku_testsetup_initialize(void);
+int toku_testsetup_leaf(FT_HANDLE ft_h, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens);
+int toku_testsetup_nonleaf (FT_HANDLE ft_h, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens);
+int toku_testsetup_root(FT_HANDLE ft_h, BLOCKNUM);
+int toku_testsetup_get_sersize(FT_HANDLE ft_h, BLOCKNUM); // Return the size on disk.
+int toku_testsetup_insert_to_leaf (FT_HANDLE ft_h, BLOCKNUM, const char *key, int keylen, const char *val, int vallen);
+int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_h, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen);
+void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t);
+
+void toku_ft_root_put_msg(FT ft, const ft_msg &msg, txn_gc_info *gc_info);
+
+// TODO: Rename
+void toku_get_node_for_verify(BLOCKNUM blocknum, FT_HANDLE ft_h, FTNODE* nodep);
+
+int
+toku_verify_ftnode (FT_HANDLE ft_h,
+ MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above,
+ FTNODE node, int height,
+ const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
+ const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
+ int (*progress_callback)(void *extra, float progress), void *progress_extra,
+ int recurse, int verbose, int keep_going_on_failure)
+ __attribute__ ((warn_unused_result));
+
+int toku_db_badformat(void) __attribute__((__warn_unused_result__));
+
+typedef enum {
+ FT_UPGRADE_FOOTPRINT = 0,
+ FT_UPGRADE_STATUS_NUM_ROWS
+} ft_upgrade_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[FT_UPGRADE_STATUS_NUM_ROWS];
+} FT_UPGRADE_STATUS_S, *FT_UPGRADE_STATUS;
+
+void toku_ft_upgrade_get_status(FT_UPGRADE_STATUS);
+
+void toku_le_get_status(LE_STATUS);
+
+void toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe);
+void toku_ft_status_update_flush_reason(FTNODE node, uint64_t uncompressed_bytes_flushed, uint64_t bytes_written, tokutime_t write_time, bool for_checkpoint);
+void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time);
+void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time);
+void toku_ft_status_note_msn_discard(void);
+void toku_ft_status_note_update(bool broadcast);
+void toku_ft_status_note_msg_bytes_out(size_t buffsize);
+void toku_ft_status_note_ftnode(int height, bool created); // created = false means destroyed
+
+void toku_ft_get_status(FT_STATUS);
+
+void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra);
+
+// For upgrade
+int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT ft) __attribute__((nonnull));
+int toku_upgrade_msn_from_root_to_header(int fd, FT ft) __attribute__((nonnull));
+
+// A callback function is invoked with the key, and the data.
+// The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns.
+// Note: In the thread-safe version, the ftnode remains locked while the callback function runs. So return soon, and don't call the ft code from the callback function.
+// If the callback function returns a nonzero value (an error code), then that error code is returned from the get function itself.
+// The cursor object will have been updated (so that if result==0 the current value is the value being passed)
+// (If r!=0 then the cursor won't have been updated.)
+// If r!=0, it's up to the callback function to return that value of r.
+// A 'key' pointer of NULL means that element is not found (effectively infinity or
+// -infinity depending on direction)
+// When lock_only is false, the callback does optional lock tree locking and then processes the key and val.
+// When lock_only is true, the callback only does optional lock tree locking.
+typedef int (*FT_GET_CALLBACK_FUNCTION)(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only);
+
+typedef bool (*FT_CHECK_INTERRUPT_CALLBACK)(void *extra, uint64_t deleted_rows);
+
+struct ft_cursor;
+int toku_ft_search(FT_HANDLE ft_handle, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, struct ft_cursor *ftcursor, bool can_bulk_fetch);
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
new file mode 100644
index 00000000..d752f13c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -0,0 +1,5263 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/*
+
+Managing the tree shape: How insertion, deletion, and querying work
+
+When we insert a message into the FT_HANDLE, here's what happens.
+
+to insert a message at the root
+
+ - find the root node
+ - capture the next msn of the root node and assign it to the message
+ - split the root if it needs to be split
+ - insert the message into the root buffer
+ - if the root is too full, then toku_ft_flush_some_child() of the root on a flusher thread
+
+flusher functions use an advice struct with provides some functions to
+call that tell it what to do based on the context of the flush. see ft-flusher.h
+
+to flush some child, given a parent and some advice
+ - pick the child using advice->pick_child()
+ - remove that childs buffer from the parent
+ - flush the buffer to the child
+ - if the child has stable reactivity and
+ advice->should_recursively_flush() is true, then
+ toku_ft_flush_some_child() of the child
+ - otherwise split the child if it needs to be split
+ - otherwise maybe merge the child if it needs to be merged
+
+flusher threads:
+
+ flusher threads are created on demand as the result of internal nodes
+ becoming gorged by insertions. this allows flushing to be done somewhere
+ other than the client thread. these work items are enqueued onto
+ the cachetable kibbutz and are done in a first in first out order.
+
+cleaner threads:
+
+ the cleaner thread wakes up every so often (say, 1 second) and chooses
+ a small number (say, 5) of nodes as candidates for a flush. the one
+ with the largest cache pressure is chosen to be flushed. cache pressure
+ is a function of the size of the node in the cachetable plus the work done.
+ the cleaner thread need not actually do a flush when awoken, so only
+ nodes that have sufficient cache pressure are flushed.
+
+checkpointing:
+
+ the checkpoint thread wakes up every minute to checkpoint dirty nodes
+ to disk. at the time of this writing, nodes during checkpoint are
+ locked and cannot be queried or flushed to. a design in which nodes
+ are copied before checkpoint is being considered as a way to reduce
+ the performance variability caused by a checkpoint locking too
+ many nodes and preventing other threads from traversing down the tree,
+ for a query or otherwise.
+
+To shrink a file: Let X be the size of the reachable data.
+ We define an acceptable bloat constant of C. For example we set C=2 if we are willing to allow the file to be as much as 2X in size.
+ The goal is to find the smallest amount of stuff we can move to get the file down to size CX.
+ That seems like a difficult problem, so we use the following heuristics:
+ If we can relocate the last block to an lower location, then do so immediately. (The file gets smaller right away, so even though the new location
+ may even not be in the first CX bytes, we are making the file smaller.)
+ Otherwise all of the earlier blocks are smaller than the last block (of size L). So find the smallest region that has L free bytes in it.
+ (This can be computed in one pass)
+ Move the first allocated block in that region to some location not in the interior of the region.
+ (Outside of the region is OK, and reallocating the block at the edge of the region is OK).
+ This has the effect of creating a smaller region with at least L free bytes in it.
+ Go back to the top (because by now some other block may have been allocated or freed).
+ Claim: if there are no other allocations going on concurrently, then this algorithm will shrink the file reasonably efficiently. By this I mean that
+ each block of shrinkage does the smallest amount of work possible. That doesn't mean that the work overall is minimized.
+ Note: If there are other allocations and deallocations going on concurrently, we might never get enough space to move the last block. But it takes a lot
+ of allocations and deallocations to make that happen, and it's probably reasonable for the file not to shrink in this case.
+
+To split or merge a child of a node:
+Split_or_merge (node, childnum) {
+ If the child needs to be split (it's a leaf with too much stuff or a nonleaf with too much fanout)
+ fetch the node and the child into main memory.
+ split the child, producing two nodes A and B, and also a pivot. Don't worry if the resulting child is still too big or too small. Fix it on the next pass.
+ fixup node to point at the two new children. Don't worry about the node getting too much fanout.
+ return;
+ If the child needs to be merged (it's a leaf with too little stuff (less than 1/4 full) or a nonleaf with too little fanout (less than 1/4)
+ fetch node, the child and a sibling of the child into main memory.
+ move all messages from the node to the two children (so that the message buffers are empty)
+ If the two siblings together fit into one node then
+ merge the two siblings.
+ fixup the node to point at one child
+ Otherwise
+ load balance the content of the two nodes
+ Don't worry about the resulting children having too many messages or otherwise being too big or too small. Fix it on the next pass.
+ }
+}
+
+Here's how querying works:
+
+lookups:
+ - As of Dr. No, we don't do any tree shaping on lookup.
+ - We don't promote eagerly or use aggressive promotion or passive-aggressive
+ promotion. We just push messages down according to the traditional FT_HANDLE
+ algorithm on insertions.
+ - when a node is brought into memory, we apply ancestor messages above it.
+
+basement nodes, bulk fetch, and partial fetch:
+ - leaf nodes are comprised of N basement nodes, each of nominal size. when
+ a query hits a leaf node. it may require one or more basement nodes to be in memory.
+ - for point queries, we do not read the entire node into memory. instead,
+ we only read in the required basement node
+ - for range queries, cursors may return cursor continue in their callback
+ to take a the shortcut path until the end of the basement node.
+ - for range queries, cursors may prelock a range of keys (with or without a txn).
+ the fractal tree will prefetch nodes aggressively until the end of the range.
+ - without a prelocked range, range queries behave like successive point queries.
+
+*/
+
+#include <my_global.h>
+#include "ft/cachetable/checkpoint.h"
+#include "ft/cursor.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-flusher.h"
+#include "ft/ft-internal.h"
+#include "ft/ft.h"
+#include "ft/leafentry.h"
+#include "ft/logger/log-internal.h"
+#include "ft/msg.h"
+#include "ft/node.h"
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/ft-serialize.h"
+#include "ft/serialize/ft_layout_version.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/serialize/sub_block.h"
+#include "ft/txn/txn_manager.h"
+#include "ft/txn/xids.h"
+#include "ft/ule.h"
+#include "src/ydb-internal.h"
+
+#include <toku_race_tools.h>
+
+#include <portability/toku_atomic.h>
+
+#include <util/context.h>
+#include <util/mempool.h>
+#include <util/status.h>
+#include <util/rwlock.h>
+#include <util/sort.h>
+#include <util/scoped_malloc.h>
+
+#include <stdint.h>
+
+#include <memory>
+/* Status is intended for display to humans to help understand system behavior.
+ * It does not need to be perfectly thread-safe.
+ */
+
+static toku_mutex_t ft_open_close_lock;
+static toku_instr_key *ft_open_close_lock_mutex_key;
+// FIXME: the instrumentation keys below are defined here even though they
+// belong to other modules, because they are registered here. If desired, they
+// can be moved to their proper modules and registration done there in a
+// one-time init function
+// locktree
+toku_instr_key *treenode_mutex_key;
+toku_instr_key *manager_mutex_key;
+toku_instr_key *manager_escalation_mutex_key;
+toku_instr_key *manager_escalator_mutex_key;
+// src
+toku_instr_key *db_txn_struct_i_txn_mutex_key;
+toku_instr_key *indexer_i_indexer_lock_mutex_key;
+toku_instr_key *indexer_i_indexer_estimate_lock_mutex_key;
+toku_instr_key *result_i_open_dbs_rwlock_key;
+// locktree
+toku_instr_key *lock_request_m_wait_cond_key;
+toku_instr_key *manager_m_escalator_done_key;
+toku_instr_key *locktree_request_info_mutex_key;
+toku_instr_key *locktree_request_info_retry_mutex_key;
+toku_instr_key *locktree_request_info_retry_cv_key;
+
+// this is a sample probe for custom instrumentation
+static toku_instr_key *fti_probe_1_key;
+
+// This is a sample probe for custom instrumentation
+toku_instr_probe *toku_instr_probe_1;
+
+void toku_ft_get_status(FT_STATUS s) {
+ ft_status.init();
+ *s = ft_status;
+
+ // Calculate compression ratios for leaf and nonleaf nodes
+ const double compressed_leaf_bytes = FT_STATUS_VAL(FT_DISK_FLUSH_LEAF_BYTES) +
+ FT_STATUS_VAL(FT_DISK_FLUSH_LEAF_BYTES_FOR_CHECKPOINT);
+ const double uncompressed_leaf_bytes = FT_STATUS_VAL(FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES) +
+ FT_STATUS_VAL(FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT);
+ const double compressed_nonleaf_bytes = FT_STATUS_VAL(FT_DISK_FLUSH_NONLEAF_BYTES) +
+ FT_STATUS_VAL(FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT);
+ const double uncompressed_nonleaf_bytes = FT_STATUS_VAL(FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES) +
+ FT_STATUS_VAL(FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT);
+
+ if (compressed_leaf_bytes > 0) {
+ s->status[FT_STATUS_S::FT_DISK_FLUSH_LEAF_COMPRESSION_RATIO].value.dnum
+ = uncompressed_leaf_bytes / compressed_leaf_bytes;
+ }
+ if (compressed_nonleaf_bytes > 0) {
+ s->status[FT_STATUS_S::FT_DISK_FLUSH_NONLEAF_COMPRESSION_RATIO].value.dnum
+ = uncompressed_nonleaf_bytes / compressed_nonleaf_bytes;
+ }
+ if (compressed_leaf_bytes > 0 || compressed_nonleaf_bytes > 0) {
+ s->status[FT_STATUS_S::FT_DISK_FLUSH_OVERALL_COMPRESSION_RATIO].value.dnum
+ = (uncompressed_leaf_bytes + uncompressed_nonleaf_bytes) /
+ (compressed_leaf_bytes + compressed_nonleaf_bytes);
+ }
+}
+
+void toku_note_deserialized_basement_node(bool fixed_key_size) {
+ if (fixed_key_size) {
+ FT_STATUS_INC(FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, 1);
+ } else {
+ FT_STATUS_INC(FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, 1);
+ }
+}
+
+static void ft_verify_flags(FT UU(ft), FTNODE UU(node)) {
+ paranoid_invariant(ft->h->flags == node->flags);
+}
+
+int toku_ft_debug_mode = 0;
+
+uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum) {
+ paranoid_invariant(node->height>0);
+ paranoid_invariant(childnum<node->n_children);
+ return toku_cachetable_hash(cf, BP_BLOCKNUM(node, childnum));
+}
+
+//
+// pivot bounds
+// TODO: move me to ft/node.cc?
+//
+
+pivot_bounds::pivot_bounds(const DBT &lbe_dbt, const DBT &ubi_dbt) :
+ _lower_bound_exclusive(lbe_dbt), _upper_bound_inclusive(ubi_dbt) {
+}
+
+pivot_bounds pivot_bounds::infinite_bounds() {
+ DBT dbt;
+ toku_init_dbt(&dbt);
+
+ // infinity is represented by an empty dbt
+ invariant(toku_dbt_is_empty(&dbt));
+ return pivot_bounds(dbt, dbt);
+}
+
+const DBT *pivot_bounds::lbe() const {
+ return &_lower_bound_exclusive;
+}
+
+const DBT *pivot_bounds::ubi() const {
+ return &_upper_bound_inclusive;
+}
+
+DBT pivot_bounds::_prepivotkey(FTNODE node, int childnum, const DBT &lbe_dbt) const {
+ if (childnum == 0) {
+ return lbe_dbt;
+ } else {
+ return node->pivotkeys.get_pivot(childnum - 1);
+ }
+}
+
+DBT pivot_bounds::_postpivotkey(FTNODE node, int childnum, const DBT &ubi_dbt) const {
+ if (childnum + 1 == node->n_children) {
+ return ubi_dbt;
+ } else {
+ return node->pivotkeys.get_pivot(childnum);
+ }
+}
+
+pivot_bounds pivot_bounds::next_bounds(FTNODE node, int childnum) const {
+ return pivot_bounds(_prepivotkey(node, childnum, _lower_bound_exclusive),
+ _postpivotkey(node, childnum, _upper_bound_inclusive));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static long get_avail_internal_node_partition_size(FTNODE node, int i) {
+ paranoid_invariant(node->height > 0);
+ return toku_bnc_memory_size(BNC(node, i));
+}
+
+static long ftnode_cachepressure_size(FTNODE node) {
+ long retval = 0;
+ bool totally_empty = true;
+ if (node->height == 0) {
+ goto exit;
+ }
+ else {
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node,i) == PT_INVALID || BP_STATE(node,i) == PT_ON_DISK) {
+ continue;
+ }
+ else if (BP_STATE(node,i) == PT_COMPRESSED) {
+ SUB_BLOCK sb = BSB(node, i);
+ totally_empty = false;
+ retval += sb->compressed_size;
+ }
+ else if (BP_STATE(node,i) == PT_AVAIL) {
+ totally_empty = totally_empty && (toku_bnc_n_entries(BNC(node, i)) == 0);
+ retval += get_avail_internal_node_partition_size(node, i);
+ retval += BP_WORKDONE(node, i);
+ }
+ else {
+ abort();
+ }
+ }
+ }
+exit:
+ if (totally_empty) {
+ return 0;
+ }
+ return retval;
+}
+
+static long
+ftnode_memory_size (FTNODE node)
+// Effect: Estimate how much main memory a node requires.
+{
+ long retval = 0;
+ int n_children = node->n_children;
+ retval += sizeof(*node);
+ retval += (n_children)*(sizeof(node->bp[0]));
+ retval += node->pivotkeys.total_size();
+
+ // now calculate the sizes of the partitions
+ for (int i = 0; i < n_children; i++) {
+ if (BP_STATE(node,i) == PT_INVALID || BP_STATE(node,i) == PT_ON_DISK) {
+ continue;
+ }
+ else if (BP_STATE(node,i) == PT_COMPRESSED) {
+ SUB_BLOCK sb = BSB(node, i);
+ retval += sizeof(*sb);
+ retval += sb->compressed_size;
+ }
+ else if (BP_STATE(node,i) == PT_AVAIL) {
+ if (node->height > 0) {
+ retval += get_avail_internal_node_partition_size(node, i);
+ }
+ else {
+ BASEMENTNODE bn = BLB(node, i);
+ retval += sizeof(*bn);
+ retval += BLB_DATA(node, i)->get_memory_size();
+ }
+ }
+ else {
+ abort();
+ }
+ }
+ return retval;
+}
+
+PAIR_ATTR make_ftnode_pair_attr(FTNODE node) {
+ long size = ftnode_memory_size(node);
+ long cachepressure_size = ftnode_cachepressure_size(node);
+ PAIR_ATTR result={
+ .size = size,
+ .nonleaf_size = (node->height > 0) ? size : 0,
+ .leaf_size = (node->height > 0) ? 0 : size,
+ .rollback_size = 0,
+ .cache_pressure_size = cachepressure_size,
+ .is_valid = true
+ };
+ return result;
+}
+
+PAIR_ATTR make_invalid_pair_attr(void) {
+ PAIR_ATTR result={
+ .size = 0,
+ .nonleaf_size = 0,
+ .leaf_size = 0,
+ .rollback_size = 0,
+ .cache_pressure_size = 0,
+ .is_valid = false
+ };
+ return result;
+}
+
+
+// assign unique dictionary id
+static uint64_t dict_id_serial = 1;
+static DICTIONARY_ID
+next_dict_id(void) {
+ uint64_t i = toku_sync_fetch_and_add(&dict_id_serial, 1);
+ assert(i); // guarantee unique dictionary id by asserting 64-bit counter never wraps
+ DICTIONARY_ID d = {.dictid = i};
+ return d;
+}
+
+// TODO: This isn't so pretty
+void ftnode_fetch_extra::_create_internal(FT ft_) {
+ ft = ft_;
+ type = ftnode_fetch_none;
+ search = nullptr;
+
+ toku_init_dbt(&range_lock_left_key);
+ toku_init_dbt(&range_lock_right_key);
+ left_is_neg_infty = false;
+ right_is_pos_infty = false;
+
+ // -1 means 'unknown', which is the correct default state
+ child_to_read = -1;
+ disable_prefetching = false;
+ read_all_partitions = false;
+
+ bytes_read = 0;
+ io_time = 0;
+ deserialize_time = 0;
+ decompress_time = 0;
+}
+
+void ftnode_fetch_extra::create_for_full_read(FT ft_) {
+ _create_internal(ft_);
+
+ type = ftnode_fetch_all;
+}
+
+void ftnode_fetch_extra::create_for_keymatch(FT ft_, const DBT *left, const DBT *right,
+ bool disable_prefetching_, bool read_all_partitions_) {
+ _create_internal(ft_);
+ invariant(ft->h->type == FT_CURRENT);
+
+ type = ftnode_fetch_keymatch;
+ if (left != nullptr) {
+ toku_copyref_dbt(&range_lock_left_key, *left);
+ }
+ if (right != nullptr) {
+ toku_copyref_dbt(&range_lock_right_key, *right);
+ }
+ left_is_neg_infty = left == nullptr;
+ right_is_pos_infty = right == nullptr;
+ disable_prefetching = disable_prefetching_;
+ read_all_partitions = read_all_partitions_;
+}
+
+void ftnode_fetch_extra::create_for_subset_read(FT ft_, ft_search *search_,
+ const DBT *left, const DBT *right,
+ bool left_is_neg_infty_, bool right_is_pos_infty_,
+ bool disable_prefetching_, bool read_all_partitions_) {
+ _create_internal(ft_);
+ invariant(ft->h->type == FT_CURRENT);
+
+ type = ftnode_fetch_subset;
+ search = search_;
+ if (left != nullptr) {
+ toku_copyref_dbt(&range_lock_left_key, *left);
+ }
+ if (right != nullptr) {
+ toku_copyref_dbt(&range_lock_right_key, *right);
+ }
+ left_is_neg_infty = left_is_neg_infty_;
+ right_is_pos_infty = right_is_pos_infty_;
+ disable_prefetching = disable_prefetching_;
+ read_all_partitions = read_all_partitions_;
+}
+
+void ftnode_fetch_extra::create_for_min_read(FT ft_) {
+ _create_internal(ft_);
+ invariant(ft->h->type == FT_CURRENT);
+
+ type = ftnode_fetch_none;
+}
+
+void ftnode_fetch_extra::create_for_prefetch(FT ft_, struct ft_cursor *cursor) {
+ _create_internal(ft_);
+ invariant(ft->h->type == FT_CURRENT);
+
+ type = ftnode_fetch_prefetch;
+ const DBT *left = &cursor->range_lock_left_key;
+ if (left->data) {
+ toku_clone_dbt(&range_lock_left_key, *left);
+ }
+ const DBT *right = &cursor->range_lock_right_key;
+ if (right->data) {
+ toku_clone_dbt(&range_lock_right_key, *right);
+ }
+ left_is_neg_infty = cursor->left_is_neg_infty;
+ right_is_pos_infty = cursor->right_is_pos_infty;
+ disable_prefetching = cursor->disable_prefetching;
+}
+
+void ftnode_fetch_extra::destroy(void) {
+ toku_destroy_dbt(&range_lock_left_key);
+ toku_destroy_dbt(&range_lock_right_key);
+}
+
+// Requires: child_to_read to have been set
+bool ftnode_fetch_extra::wants_child_available(int childnum) const {
+ return type == ftnode_fetch_all ||
+ (child_to_read == childnum &&
+ (type == ftnode_fetch_subset || type == ftnode_fetch_keymatch));
+}
+
+int ftnode_fetch_extra::leftmost_child_wanted(FTNODE node) const {
+ paranoid_invariant(type == ftnode_fetch_subset ||
+ type == ftnode_fetch_prefetch ||
+ type == ftnode_fetch_keymatch);
+ if (left_is_neg_infty) {
+ return 0;
+ } else if (range_lock_left_key.data == nullptr) {
+ return -1;
+ } else {
+ return toku_ftnode_which_child(node, &range_lock_left_key, ft->cmp);
+ }
+}
+
+int ftnode_fetch_extra::rightmost_child_wanted(FTNODE node) const {
+ paranoid_invariant(type == ftnode_fetch_subset ||
+ type == ftnode_fetch_prefetch ||
+ type == ftnode_fetch_keymatch);
+ if (right_is_pos_infty) {
+ return node->n_children - 1;
+ } else if (range_lock_right_key.data == nullptr) {
+ return -1;
+ } else {
+ return toku_ftnode_which_child(node, &range_lock_right_key, ft->cmp);
+ }
+}
+
+static int
+ft_cursor_rightmost_child_wanted(FT_CURSOR cursor, FT_HANDLE ft_handle, FTNODE node)
+{
+ if (cursor->right_is_pos_infty) {
+ return node->n_children - 1;
+ } else if (cursor->range_lock_right_key.data == nullptr) {
+ return -1;
+ } else {
+ return toku_ftnode_which_child(node, &cursor->range_lock_right_key, ft_handle->ft->cmp);
+ }
+}
+
+STAT64INFO_S
+toku_get_and_clear_basement_stats(FTNODE leafnode) {
+ invariant(leafnode->height == 0);
+ STAT64INFO_S deltas = ZEROSTATS;
+ for (int i = 0; i < leafnode->n_children; i++) {
+ BASEMENTNODE bn = BLB(leafnode, i);
+ invariant(BP_STATE(leafnode,i) == PT_AVAIL);
+ deltas.numrows += bn->stat64_delta.numrows;
+ deltas.numbytes += bn->stat64_delta.numbytes;
+ bn->stat64_delta = ZEROSTATS;
+ }
+ return deltas;
+}
+
+void toku_ft_status_update_flush_reason(FTNODE node,
+ uint64_t uncompressed_bytes_flushed, uint64_t bytes_written,
+ tokutime_t write_time, bool for_checkpoint) {
+ if (node->height == 0) {
+ if (for_checkpoint) {
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_FOR_CHECKPOINT, 1);
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_BYTES_FOR_CHECKPOINT, bytes_written);
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT, uncompressed_bytes_flushed);
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_TOKUTIME_FOR_CHECKPOINT, write_time);
+ }
+ else {
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF, 1);
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_BYTES, bytes_written);
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES, uncompressed_bytes_flushed);
+ FT_STATUS_INC(FT_DISK_FLUSH_LEAF_TOKUTIME, write_time);
+ }
+ }
+ else {
+ if (for_checkpoint) {
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_FOR_CHECKPOINT, 1);
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT, bytes_written);
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT, uncompressed_bytes_flushed);
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT, write_time);
+ }
+ else {
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF, 1);
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_BYTES, bytes_written);
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES, uncompressed_bytes_flushed);
+ FT_STATUS_INC(FT_DISK_FLUSH_NONLEAF_TOKUTIME, write_time);
+ }
+ }
+}
+
+void toku_ftnode_checkpoint_complete_callback(void *value_data) {
+ FTNODE node = static_cast<FTNODE>(value_data);
+ if (node->height > 0) {
+ for (int i = 0; i < node->n_children; ++i) {
+ if (BP_STATE(node, i) == PT_AVAIL) {
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ bnc->flow[1] = bnc->flow[0];
+ bnc->flow[0] = 0;
+ }
+ }
+ }
+}
+
+void toku_ftnode_clone_callback(void *value_data,
+ void **cloned_value_data,
+ long *clone_size,
+ PAIR_ATTR *new_attr,
+ bool for_checkpoint,
+ void *write_extraargs) {
+ FTNODE node = static_cast<FTNODE>(value_data);
+ toku_ftnode_assert_fully_in_memory(node);
+ FT ft = static_cast<FT>(write_extraargs);
+ FTNODE XCALLOC(cloned_node);
+ if (node->height == 0) {
+ // set header stats, must be done before rebalancing
+ toku_ftnode_update_disk_stats(node, ft, for_checkpoint);
+ // rebalance the leaf node
+ toku_ftnode_leaf_rebalance(node, ft->h->basementnodesize);
+ }
+
+ cloned_node->oldest_referenced_xid_known =
+ node->oldest_referenced_xid_known;
+ cloned_node->max_msn_applied_to_node_on_disk =
+ node->max_msn_applied_to_node_on_disk;
+ cloned_node->flags = node->flags;
+ cloned_node->blocknum = node->blocknum;
+ cloned_node->layout_version = node->layout_version;
+ cloned_node->layout_version_original = node->layout_version_original;
+ cloned_node->layout_version_read_from_disk =
+ node->layout_version_read_from_disk;
+ cloned_node->build_id = node->build_id;
+ cloned_node->height = node->height;
+ cloned_node->dirty_ = node->dirty_;
+ cloned_node->fullhash = node->fullhash;
+ cloned_node->n_children = node->n_children;
+
+ XMALLOC_N(node->n_children, cloned_node->bp);
+ // clone pivots
+ cloned_node->pivotkeys.create_from_pivot_keys(node->pivotkeys);
+ if (node->height > 0) {
+ // need to move messages here so that we don't serialize stale
+ // messages to the fresh tree - ft verify code complains otherwise.
+ toku_move_ftnode_messages_to_stale(ft, node);
+ }
+ // clone partition
+ toku_ftnode_clone_partitions(node, cloned_node);
+
+ // clear dirty bit
+ node->clear_dirty();
+ cloned_node->clear_dirty();
+ node->layout_version_read_from_disk = FT_LAYOUT_VERSION;
+ // set new pair attr if necessary
+ if (node->height == 0) {
+ *new_attr = make_ftnode_pair_attr(node);
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node, i) == PT_AVAIL) {
+ BLB_LRD(node, i) = 0;
+ BLB_LRD(cloned_node, i) = 0;
+ }
+ }
+ } else {
+ new_attr->is_valid = false;
+ }
+ *clone_size = ftnode_memory_size(cloned_node);
+ *cloned_value_data = cloned_node;
+}
+
+void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
+ int fd,
+ BLOCKNUM blocknum,
+ void *ftnode_v,
+ void **disk_data,
+ void *extraargs,
+ PAIR_ATTR size __attribute__((unused)),
+ PAIR_ATTR *new_size,
+ bool write_me,
+ bool keep_me,
+ bool for_checkpoint,
+ bool is_clone) {
+ FT ft = (FT)extraargs;
+ FTNODE ftnode = (FTNODE)ftnode_v;
+ FTNODE_DISK_DATA *ndd = (FTNODE_DISK_DATA *)disk_data;
+ assert(ftnode->blocknum.b == blocknum.b);
+ int height = ftnode->height;
+ if (write_me) {
+ toku_ftnode_assert_fully_in_memory(ftnode);
+ if (height > 0 && !is_clone) {
+ // cloned nodes already had their stale messages moved, see
+ // toku_ftnode_clone_callback()
+ toku_move_ftnode_messages_to_stale(ft, ftnode);
+ } else if (height == 0) {
+ toku_ftnode_leaf_run_gc(ft, ftnode);
+ if (!is_clone) {
+ toku_ftnode_update_disk_stats(ftnode, ft, for_checkpoint);
+ }
+ }
+ int r = toku_serialize_ftnode_to(
+ fd, ftnode->blocknum, ftnode, ndd, !is_clone, ft, for_checkpoint);
+ assert_zero(r);
+ ftnode->layout_version_read_from_disk = FT_LAYOUT_VERSION;
+ }
+ if (!keep_me) {
+ if (!is_clone) {
+ long node_size = ftnode_memory_size(ftnode);
+ if (ftnode->height == 0) {
+ FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
+ FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
+
+ // A leaf node (height == 0) is being evicted (!keep_me) and is
+ // not a checkpoint clone (!is_clone). This leaf node may have
+ // had messages applied to satisfy a query, but was never
+ // actually dirtied (!ftnode->dirty && !write_me). **Note that
+ // if (write_me) would persist the node and clear the dirty
+ // flag **. This message application may have updated the trees
+ // logical row count. Since these message applications are not
+ // persisted, we need undo the logical row count adjustments as
+ // they may occur again in the future if/when the node is
+ // re-read from disk for another query or change.
+ if (!ftnode->dirty() && !write_me) {
+ int64_t lrc_delta = 0;
+ for (int i = 0; i < ftnode->n_children; i++) {
+ if (BP_STATE(ftnode, i) == PT_AVAIL) {
+ lrc_delta -= BLB_LRD(ftnode, i);
+ BLB_LRD(ftnode, i) = 0;
+ }
+ }
+ toku_ft_adjust_logical_row_count(ft, lrc_delta);
+ }
+ } else {
+ FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
+ FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF_BYTES, node_size);
+ }
+ toku_free(*disk_data);
+ } else {
+ if (ftnode->height == 0) {
+ // No need to adjust logical row counts when flushing a clone
+ // as they should have been zeroed out anyway when cloned.
+ // Clones are 'copies' of work already done so doing it again
+ // (adjusting row counts) would be redundant and leads to
+ // inaccurate counts.
+ for (int i = 0; i < ftnode->n_children; i++) {
+ if (BP_STATE(ftnode, i) == PT_AVAIL) {
+ BASEMENTNODE bn = BLB(ftnode, i);
+ toku_ft_decrease_stats(&ft->in_memory_stats,
+ bn->stat64_delta);
+ }
+ }
+ }
+ }
+ toku_ftnode_free(&ftnode);
+ } else {
+ *new_size = make_ftnode_pair_attr(ftnode);
+ }
+}
+
+void
+toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe)
+{
+ if (bfe->type == ftnode_fetch_prefetch) {
+ FT_STATUS_INC(FT_NUM_PIVOTS_FETCHED_PREFETCH, 1);
+ FT_STATUS_INC(FT_BYTES_PIVOTS_FETCHED_PREFETCH, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_PIVOTS_FETCHED_PREFETCH, bfe->io_time);
+ } else if (bfe->type == ftnode_fetch_all) {
+ FT_STATUS_INC(FT_NUM_PIVOTS_FETCHED_WRITE, 1);
+ FT_STATUS_INC(FT_BYTES_PIVOTS_FETCHED_WRITE, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_PIVOTS_FETCHED_WRITE, bfe->io_time);
+ } else if (bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_keymatch) {
+ FT_STATUS_INC(FT_NUM_PIVOTS_FETCHED_QUERY, 1);
+ FT_STATUS_INC(FT_BYTES_PIVOTS_FETCHED_QUERY, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_PIVOTS_FETCHED_QUERY, bfe->io_time);
+ }
+}
+
+int toku_ftnode_fetch_callback(CACHEFILE UU(cachefile),
+ PAIR p,
+ int fd,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ void **ftnode_pv,
+ void **disk_data,
+ PAIR_ATTR *sizep,
+ int *dirtyp,
+ void *extraargs) {
+ assert(extraargs);
+ assert(*ftnode_pv == nullptr);
+ FTNODE_DISK_DATA *ndd = (FTNODE_DISK_DATA *)disk_data;
+ ftnode_fetch_extra *bfe = (ftnode_fetch_extra *)extraargs;
+ FTNODE *node = (FTNODE *)ftnode_pv;
+ // deserialize the node, must pass the bfe in because we cannot
+ // evaluate what piece of the the node is necessary until we get it at
+ // least partially into memory
+ int r =
+ toku_deserialize_ftnode_from(fd, blocknum, fullhash, node, ndd, bfe);
+ if (r != 0) {
+ if (r == TOKUDB_BAD_CHECKSUM) {
+ fprintf(
+ stderr,
+ "%s:%d:toku_ftnode_fetch_callback - "
+ "file[%s], blocknum[%lld], toku_deserialize_ftnode_from "
+ "failed with a checksum error.\n",
+ __FILE__,
+ __LINE__,
+ toku_cachefile_fname_in_env(cachefile),
+ (longlong)blocknum.b);
+ } else {
+ fprintf(
+ stderr,
+ "%s:%d:toku_ftnode_fetch_callback - "
+ "file[%s], blocknum[%lld], toku_deserialize_ftnode_from "
+ "failed with %d.\n",
+ __FILE__,
+ __LINE__,
+ toku_cachefile_fname_in_env(cachefile),
+ (longlong)blocknum.b,
+ r);
+ }
+ // make absolutely sure we crash before doing anything else.
+ abort();
+ }
+
+ if (r == 0) {
+ *sizep = make_ftnode_pair_attr(*node);
+ (*node)->ct_pair = p;
+ *dirtyp = (*node)->dirty(); // deserialize could mark the node as dirty
+ // (presumably for upgrade)
+ }
+ return r;
+}
+
+static bool ft_compress_buffers_before_eviction = true;
+
+void toku_ft_set_compress_buffers_before_eviction(bool compress_buffers) {
+ ft_compress_buffers_before_eviction = compress_buffers;
+}
+
+void toku_ftnode_pe_est_callback(
+ void* ftnode_pv,
+ void* disk_data,
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ paranoid_invariant(ftnode_pv != NULL);
+ long bytes_to_free = 0;
+ FTNODE node = static_cast<FTNODE>(ftnode_pv);
+ if (node->dirty() || node->height == 0 ||
+ node->layout_version_read_from_disk < FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES) {
+ *bytes_freed_estimate = 0;
+ *cost = PE_CHEAP;
+ goto exit;
+ }
+
+ //
+ // we are dealing with a clean internal node
+ //
+ *cost = PE_EXPENSIVE;
+ // now lets get an estimate for how much data we can free up
+ // we estimate the compressed size of data to be how large
+ // the compressed data is on disk
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node,i) == PT_AVAIL && BP_SHOULD_EVICT(node,i)) {
+ // calculate how much data would be freed if
+ // we compress this node and add it to
+ // bytes_to_free
+
+ if (ft_compress_buffers_before_eviction) {
+ // first get an estimate for how much space will be taken
+ // after compression, it is simply the size of compressed
+ // data on disk plus the size of the struct that holds it
+ FTNODE_DISK_DATA ndd = (FTNODE_DISK_DATA) disk_data;
+ uint32_t compressed_data_size = BP_SIZE(ndd, i);
+ compressed_data_size += sizeof(struct sub_block);
+
+ // now get the space taken now
+ uint32_t decompressed_data_size = get_avail_internal_node_partition_size(node,i);
+ bytes_to_free += (decompressed_data_size - compressed_data_size);
+ } else {
+ bytes_to_free += get_avail_internal_node_partition_size(node, i);
+ }
+ }
+ }
+
+ *bytes_freed_estimate = bytes_to_free;
+exit:
+ return;
+}
+
+// replace the child buffer with a compressed version of itself.
+static void compress_internal_node_partition(FTNODE node, int i, enum toku_compression_method compression_method) {
+ // if we should evict, compress the
+ // message buffer into a sub_block
+ assert(BP_STATE(node, i) == PT_AVAIL);
+ assert(node->height > 0);
+ SUB_BLOCK XMALLOC(sb);
+ sub_block_init(sb);
+ toku_create_compressed_partition_from_available(node, i, compression_method, sb);
+
+ // now set the state to compressed
+ set_BSB(node, i, sb);
+ BP_STATE(node,i) = PT_COMPRESSED;
+}
+
+// callback for partially evicting a node
+int toku_ftnode_pe_callback(void *ftnode_pv,
+ PAIR_ATTR old_attr,
+ void *write_extraargs,
+ void (*finalize)(PAIR_ATTR new_attr, void *extra),
+ void *finalize_extra) {
+ FTNODE node = (FTNODE)ftnode_pv;
+ FT ft = (FT)write_extraargs;
+ int num_partial_evictions = 0;
+
+ // Hold things we intend to destroy here.
+ // They will be taken care of after finalize().
+ int num_basements_to_destroy = 0;
+ int num_buffers_to_destroy = 0;
+ int num_pointers_to_free = 0;
+ BASEMENTNODE basements_to_destroy[node->n_children];
+ NONLEAF_CHILDINFO buffers_to_destroy[node->n_children];
+ void *pointers_to_free[node->n_children * 2];
+
+ // Don't partially evict dirty nodes
+ if (node->dirty()) {
+ goto exit;
+ }
+ // Don't partially evict nodes whose partitions can't be read back
+ // from disk individually
+ if (node->layout_version_read_from_disk <
+ FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES) {
+ goto exit;
+ }
+ //
+ // partial eviction for nonleaf nodes
+ //
+ if (node->height > 0) {
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node, i) == PT_AVAIL) {
+ if (BP_SHOULD_EVICT(node, i)) {
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ if (ft_compress_buffers_before_eviction &&
+ // We may not serialize and compress a partition in
+ // memory if its in memory layout version is different
+ // than what's on disk (and therefore requires upgrade).
+ //
+ // Auto-upgrade code assumes that if a node's layout
+ // version read from disk is not current, it MUST
+ // require upgrade.
+ // Breaking this rule would cause upgrade code to
+ // upgrade this partition again after we serialize it as
+ // the current version, which is bad.
+ node->layout_version ==
+ node->layout_version_read_from_disk) {
+ toku_ft_bnc_move_messages_to_stale(ft, bnc);
+ compress_internal_node_partition(
+ node,
+ i,
+ // Always compress with quicklz
+ TOKU_QUICKLZ_METHOD);
+ } else {
+ // We're not compressing buffers before eviction. Simply
+ // detach the buffer and set the child's state to
+ // on-disk.
+ set_BNULL(node, i);
+ BP_STATE(node, i) = PT_ON_DISK;
+ }
+ buffers_to_destroy[num_buffers_to_destroy++] = bnc;
+ num_partial_evictions++;
+ } else {
+ BP_SWEEP_CLOCK(node, i);
+ }
+ } else {
+ continue;
+ }
+ }
+ } else {
+ //
+ // partial eviction strategy for basement nodes:
+ // if the bn is compressed, evict it
+ // else: check if it requires eviction, if it does, evict it, if not,
+ // sweep the clock count
+ //
+ for (int i = 0; i < node->n_children; i++) {
+ // Get rid of compressed stuff no matter what.
+ if (BP_STATE(node, i) == PT_COMPRESSED) {
+ SUB_BLOCK sb = BSB(node, i);
+ pointers_to_free[num_pointers_to_free++] = sb->compressed_ptr;
+ pointers_to_free[num_pointers_to_free++] = sb;
+ set_BNULL(node, i);
+ BP_STATE(node, i) = PT_ON_DISK;
+ num_partial_evictions++;
+ } else if (BP_STATE(node, i) == PT_AVAIL) {
+ if (BP_SHOULD_EVICT(node, i)) {
+ BASEMENTNODE bn = BLB(node, i);
+ basements_to_destroy[num_basements_to_destroy++] = bn;
+ toku_ft_decrease_stats(&ft->in_memory_stats,
+ bn->stat64_delta);
+ // A basement node is being partially evicted.
+ // This masement node may have had messages applied to it to
+ // satisfy a query, but was never actually dirtied.
+ // This message application may have updated the trees
+ // logical row count. Since these message applications are
+ // not being persisted, we need undo the logical row count
+ // adjustments as they may occur again in the future if/when
+ // the node is re-read from disk for another query or change.
+ toku_ft_adjust_logical_row_count(ft,
+ -bn->logical_rows_delta);
+ set_BNULL(node, i);
+ BP_STATE(node, i) = PT_ON_DISK;
+ num_partial_evictions++;
+ } else {
+ BP_SWEEP_CLOCK(node, i);
+ }
+ } else if (BP_STATE(node, i) == PT_ON_DISK) {
+ continue;
+ } else {
+ abort();
+ }
+ }
+ }
+
+exit:
+ // call the finalize callback with a new pair attr
+ int height = node->height;
+ PAIR_ATTR new_attr = make_ftnode_pair_attr(node);
+ finalize(new_attr, finalize_extra);
+
+ // destroy everything now that we've called finalize(),
+ // and, by contract, and it's safe to do expensive work.
+ for (int i = 0; i < num_basements_to_destroy; i++) {
+ destroy_basement_node(basements_to_destroy[i]);
+ }
+ for (int i = 0; i < num_buffers_to_destroy; i++) {
+ destroy_nonleaf_childinfo(buffers_to_destroy[i]);
+ }
+ for (int i = 0; i < num_pointers_to_free; i++) {
+ toku_free(pointers_to_free[i]);
+ }
+ // stats
+ if (num_partial_evictions > 0) {
+ if (height == 0) {
+ long delta = old_attr.leaf_size - new_attr.leaf_size;
+ FT_STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, num_partial_evictions);
+ FT_STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF_BYTES, delta);
+ } else {
+ long delta = old_attr.nonleaf_size - new_attr.nonleaf_size;
+ FT_STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF, num_partial_evictions);
+ FT_STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, delta);
+ }
+ }
+ return 0;
+}
+
+// We touch the clock while holding a read lock.
+// DRD reports a race but we want to ignore it.
+// Using a valgrind suppressions file is better than the DRD_IGNORE_VAR macro because it's more targeted.
+// We need a function to have something a drd suppression can reference
+// see src/tests/drd.suppressions (unsafe_touch_clock)
+static void unsafe_touch_clock(FTNODE node, int i) {
+ toku_unsafe_set(&node->bp[i].clock_count, static_cast<unsigned char>(1));
+}
+
+// Callback that states if a partial fetch of the node is necessary
+// Currently, this function is responsible for the following things:
+// - reporting to the cachetable whether a partial fetch is required (as required by the contract of the callback)
+// - A couple of things that are NOT required by the callback, but we do for efficiency and simplicity reasons:
+// - for queries, set the value of bfe->child_to_read so that the query that called this can proceed with the query
+// as opposed to having to evaluate toku_ft_search_which_child again. This is done to make the in-memory query faster
+// - touch the necessary partition's clock. The reason we do it here is so that there is one central place it is done, and not done
+// by all the various callers
+//
+bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
+ // placeholder for now
+ bool retval = false;
+ FTNODE node = (FTNODE) ftnode_pv;
+ ftnode_fetch_extra *bfe = (ftnode_fetch_extra *) read_extraargs;
+ //
+ // The three types of fetches that the ft layer may request are:
+ // - ftnode_fetch_none: no partitions are necessary (example use: stat64)
+ // - ftnode_fetch_subset: some subset is necessary (example use: toku_ft_search)
+ // - ftnode_fetch_all: entire node is necessary (example use: flush, split, merge)
+ // The code below checks if the necessary partitions are already in memory,
+ // and if they are, return false, and if not, return true
+ //
+ if (bfe->type == ftnode_fetch_none) {
+ retval = false;
+ }
+ else if (bfe->type == ftnode_fetch_all) {
+ retval = false;
+ for (int i = 0; i < node->n_children; i++) {
+ unsafe_touch_clock(node,i);
+ // if we find a partition that is not available,
+ // then a partial fetch is required because
+ // the entire node must be made available
+ if (BP_STATE(node,i) != PT_AVAIL) {
+ retval = true;
+ }
+ }
+ }
+ else if (bfe->type == ftnode_fetch_subset) {
+ // we do not take into account prefetching yet
+ // as of now, if we need a subset, the only thing
+ // we can possibly require is a single basement node
+ // we find out what basement node the query cares about
+ // and check if it is available
+ paranoid_invariant(bfe->search);
+ bfe->child_to_read = toku_ft_search_which_child(
+ bfe->ft->cmp,
+ node,
+ bfe->search
+ );
+ unsafe_touch_clock(node,bfe->child_to_read);
+ // child we want to read is not available, must set retval to true
+ retval = (BP_STATE(node, bfe->child_to_read) != PT_AVAIL);
+ }
+ else if (bfe->type == ftnode_fetch_prefetch) {
+ // makes no sense to have prefetching disabled
+ // and still call this function
+ paranoid_invariant(!bfe->disable_prefetching);
+ int lc = bfe->leftmost_child_wanted(node);
+ int rc = bfe->rightmost_child_wanted(node);
+ for (int i = lc; i <= rc; ++i) {
+ if (BP_STATE(node, i) != PT_AVAIL) {
+ retval = true;
+ }
+ }
+ } else if (bfe->type == ftnode_fetch_keymatch) {
+ // we do not take into account prefetching yet
+ // as of now, if we need a subset, the only thing
+ // we can possibly require is a single basement node
+ // we find out what basement node the query cares about
+ // and check if it is available
+ if (node->height == 0) {
+ int left_child = bfe->leftmost_child_wanted(node);
+ int right_child = bfe->rightmost_child_wanted(node);
+ if (left_child == right_child) {
+ bfe->child_to_read = left_child;
+ unsafe_touch_clock(node,bfe->child_to_read);
+ // child we want to read is not available, must set retval to true
+ retval = (BP_STATE(node, bfe->child_to_read) != PT_AVAIL);
+ }
+ }
+ } else {
+ // we have a bug. The type should be known
+ abort();
+ }
+ return retval;
+}
+
+static void
+ft_status_update_partial_fetch_reason(
+ ftnode_fetch_extra *bfe,
+ int childnum,
+ enum pt_state state,
+ bool is_leaf
+ )
+{
+ invariant(state == PT_COMPRESSED || state == PT_ON_DISK);
+ if (is_leaf) {
+ if (bfe->type == ftnode_fetch_prefetch) {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_FETCHED_PREFETCH, 1);
+ FT_STATUS_INC(FT_BYTES_BASEMENTS_FETCHED_PREFETCH, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_BASEMENTS_FETCHED_PREFETCH, bfe->io_time);
+ }
+ } else if (bfe->type == ftnode_fetch_all) {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_DECOMPRESSED_WRITE, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_FETCHED_WRITE, 1);
+ FT_STATUS_INC(FT_BYTES_BASEMENTS_FETCHED_WRITE, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_BASEMENTS_FETCHED_WRITE, bfe->io_time);
+ }
+ } else if (childnum == bfe->child_to_read) {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_DECOMPRESSED_NORMAL, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_FETCHED_NORMAL, 1);
+ FT_STATUS_INC(FT_BYTES_BASEMENTS_FETCHED_NORMAL, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_BASEMENTS_FETCHED_NORMAL, bfe->io_time);
+ }
+ } else {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE, 1);
+ FT_STATUS_INC(FT_BYTES_BASEMENTS_FETCHED_AGGRESSIVE, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_BASEMENTS_FETCHED_AGGRESSIVE, bfe->io_time);
+ }
+ }
+ }
+ else {
+ if (bfe->type == ftnode_fetch_prefetch) {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_FETCHED_PREFETCH, 1);
+ FT_STATUS_INC(FT_BYTES_MSG_BUFFER_FETCHED_PREFETCH, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_MSG_BUFFER_FETCHED_PREFETCH, bfe->io_time);
+ }
+ } else if (bfe->type == ftnode_fetch_all) {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_FETCHED_WRITE, 1);
+ FT_STATUS_INC(FT_BYTES_MSG_BUFFER_FETCHED_WRITE, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_MSG_BUFFER_FETCHED_WRITE, bfe->io_time);
+ }
+ } else if (childnum == bfe->child_to_read) {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_DECOMPRESSED_NORMAL, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_FETCHED_NORMAL, 1);
+ FT_STATUS_INC(FT_BYTES_MSG_BUFFER_FETCHED_NORMAL, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_MSG_BUFFER_FETCHED_NORMAL, bfe->io_time);
+ }
+ } else {
+ if (state == PT_COMPRESSED) {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_DECOMPRESSED_AGGRESSIVE, 1);
+ } else {
+ FT_STATUS_INC(FT_NUM_MSG_BUFFER_FETCHED_AGGRESSIVE, 1);
+ FT_STATUS_INC(FT_BYTES_MSG_BUFFER_FETCHED_AGGRESSIVE, bfe->bytes_read);
+ FT_STATUS_INC(FT_TOKUTIME_MSG_BUFFER_FETCHED_AGGRESSIVE, bfe->io_time);
+ }
+ }
+ }
+}
+
+void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time) {
+ if (node->height == 0) {
+ FT_STATUS_INC(FT_LEAF_SERIALIZE_TOKUTIME, serialize_time);
+ FT_STATUS_INC(FT_LEAF_COMPRESS_TOKUTIME, compress_time);
+ } else {
+ FT_STATUS_INC(FT_NONLEAF_SERIALIZE_TOKUTIME, serialize_time);
+ FT_STATUS_INC(FT_NONLEAF_COMPRESS_TOKUTIME, compress_time);
+ }
+}
+
+void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time) {
+ if (node->height == 0) {
+ FT_STATUS_INC(FT_LEAF_DESERIALIZE_TOKUTIME, deserialize_time);
+ FT_STATUS_INC(FT_LEAF_DECOMPRESS_TOKUTIME, decompress_time);
+ } else {
+ FT_STATUS_INC(FT_NONLEAF_DESERIALIZE_TOKUTIME, deserialize_time);
+ FT_STATUS_INC(FT_NONLEAF_DECOMPRESS_TOKUTIME, decompress_time);
+ }
+}
+
+void toku_ft_status_note_msn_discard(void) {
+ FT_STATUS_INC(FT_MSN_DISCARDS, 1);
+}
+
+void toku_ft_status_note_update(bool broadcast) {
+ if (broadcast) {
+ FT_STATUS_INC(FT_UPDATES_BROADCAST, 1);
+ } else {
+ FT_STATUS_INC(FT_UPDATES, 1);
+ }
+}
+
+void toku_ft_status_note_msg_bytes_out(size_t buffsize) {
+ FT_STATUS_INC(FT_MSG_BYTES_OUT, buffsize);
+ FT_STATUS_INC(FT_MSG_BYTES_CURR, -buffsize);
+}
+void toku_ft_status_note_ftnode(int height, bool created) {
+ if (created) {
+ if (height == 0) {
+ FT_STATUS_INC(FT_CREATE_LEAF, 1);
+ } else {
+ FT_STATUS_INC(FT_CREATE_NONLEAF, 1);
+ }
+ } else {
+ // created = false means destroyed
+ }
+}
+
+// callback for partially reading a node
+// could have just used toku_ftnode_fetch_callback, but wanted to separate the two cases to separate functions
+int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraargs, int fd, PAIR_ATTR* sizep) {
+ int r = 0;
+ FTNODE node = (FTNODE) ftnode_pv;
+ FTNODE_DISK_DATA ndd = (FTNODE_DISK_DATA) disk_data;
+ ftnode_fetch_extra *bfe = (ftnode_fetch_extra *) read_extraargs;
+ // there must be a reason this is being called. If we get a garbage type or the type is ftnode_fetch_none,
+ // then something went wrong
+ assert((bfe->type == ftnode_fetch_subset) || (bfe->type == ftnode_fetch_all) || (bfe->type == ftnode_fetch_prefetch) || (bfe->type == ftnode_fetch_keymatch));
+ // determine the range to prefetch
+ int lc, rc;
+ if (!bfe->disable_prefetching &&
+ (bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_prefetch)
+ )
+ {
+ lc = bfe->leftmost_child_wanted(node);
+ rc = bfe->rightmost_child_wanted(node);
+ } else {
+ lc = -1;
+ rc = -1;
+ }
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node,i) == PT_AVAIL) {
+ continue;
+ }
+ if ((lc <= i && i <= rc) || bfe->wants_child_available(i)) {
+ enum pt_state state = BP_STATE(node, i);
+ if (state == PT_COMPRESSED) {
+ r = toku_deserialize_bp_from_compressed(node, i, bfe);
+ } else {
+ invariant(state == PT_ON_DISK);
+ r = toku_deserialize_bp_from_disk(node, ndd, i, fd, bfe);
+ }
+ ft_status_update_partial_fetch_reason(bfe, i, state, (node->height == 0));
+ }
+
+ if (r != 0) {
+ if (r == TOKUDB_BAD_CHECKSUM) {
+ fprintf(stderr,
+ "Checksum failure while reading node partition in file %s.\n",
+ toku_cachefile_fname_in_env(bfe->ft->cf));
+ } else {
+ fprintf(stderr,
+ "Error while reading node partition %d\n",
+ get_maybe_error_errno());
+ }
+ abort();
+ }
+ }
+
+ *sizep = make_ftnode_pair_attr(node);
+
+ return 0;
+}
+
+int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_heaviside_extra &be) {
+ return be.cmp(&kdbt, be.key);
+}
+
+static void
+ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp)
+// Effect: Create a new root node whose two children are the split of oldroot.
+// oldroot is unpinned in the process.
+// Leave the new root pinned.
+{
+ FTNODE newroot;
+
+ BLOCKNUM old_blocknum = oldroot->blocknum;
+ uint32_t old_fullhash = oldroot->fullhash;
+
+ int new_height = oldroot->height+1;
+ uint32_t new_fullhash;
+ BLOCKNUM new_blocknum;
+
+ cachetable_put_empty_node_with_dep_nodes(
+ ft,
+ 1,
+ &oldroot,
+ &new_blocknum,
+ &new_fullhash,
+ &newroot
+ );
+
+ assert(newroot);
+ assert(new_height > 0);
+ toku_initialize_empty_ftnode (
+ newroot,
+ new_blocknum,
+ new_height,
+ 1,
+ ft->h->layout_version,
+ ft->h->flags
+ );
+ newroot->fullhash = new_fullhash;
+ MSN msna = oldroot->max_msn_applied_to_node_on_disk;
+ newroot->max_msn_applied_to_node_on_disk = msna;
+ BP_STATE(newroot,0) = PT_AVAIL;
+ newroot->set_dirty();
+
+ // Set the first child to have the new blocknum,
+ // and then swap newroot with oldroot. The new root
+ // will inherit the hash/blocknum/pair from oldroot,
+ // keeping the root blocknum constant.
+ BP_BLOCKNUM(newroot, 0) = new_blocknum;
+ toku_ftnode_swap_pair_values(newroot, oldroot);
+
+ toku_ft_split_child(
+ ft,
+ newroot,
+ 0, // childnum to split
+ oldroot,
+ SPLIT_EVENLY
+ );
+
+ // ft_split_child released locks on newroot
+ // and oldroot, so now we repin and
+ // return to caller
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode(
+ ft,
+ old_blocknum,
+ old_fullhash,
+ &bfe,
+ PL_WRITE_EXPENSIVE, // may_modify_node
+ newrootp,
+ true
+ );
+}
+
+static void inject_message_in_locked_node(
+ FT ft,
+ FTNODE node,
+ int childnum,
+ const ft_msg &msg,
+ size_t flow_deltas[],
+ txn_gc_info *gc_info
+ )
+{
+ // No guarantee that we're the writer, but oh well.
+ // TODO(leif): Implement "do I have the lock or is it someone else?"
+ // check in frwlock. Should be possible with TOKU_PTHREAD_DEBUG, nop
+ // otherwise.
+ invariant(toku_ctpair_is_write_locked(node->ct_pair));
+ toku_ftnode_assert_fully_in_memory(node);
+
+ // Take the newer of the two oldest referenced xid values from the node and gc_info.
+ // The gc_info usually has a newer value, because we got it at the top of this call
+ // stack from the txn manager. But sometimes the node has a newer value, if some
+ // other thread sees a newer value and writes to this node before we got the lock.
+ if (gc_info->oldest_referenced_xid_for_implicit_promotion > node->oldest_referenced_xid_known) {
+ node->oldest_referenced_xid_known = gc_info->oldest_referenced_xid_for_implicit_promotion;
+ } else if (gc_info->oldest_referenced_xid_for_implicit_promotion < node->oldest_referenced_xid_known) {
+ gc_info->oldest_referenced_xid_for_implicit_promotion = node->oldest_referenced_xid_known;
+ }
+
+ // Get the MSN from the header. Now that we have a write lock on the
+ // node we're injecting into, we know no other thread will get an MSN
+ // after us and get that message into our subtree before us.
+ MSN msg_msn = { .msn = toku_sync_add_and_fetch(&ft->h->max_msn_in_ft.msn, 1) };
+ ft_msg msg_with_msn(msg.kdbt(), msg.vdbt(), msg.type(), msg_msn, msg.xids());
+ paranoid_invariant(msg_with_msn.msn().msn > node->max_msn_applied_to_node_on_disk.msn);
+
+ STAT64INFO_S stats_delta = { 0,0 };
+ int64_t logical_rows_delta = 0;
+ toku_ftnode_put_msg(
+ ft->cmp,
+ ft->update_fun,
+ node,
+ childnum,
+ msg_with_msn,
+ true,
+ gc_info,
+ flow_deltas,
+ &stats_delta,
+ &logical_rows_delta);
+ if (stats_delta.numbytes || stats_delta.numrows) {
+ toku_ft_update_stats(&ft->in_memory_stats, stats_delta);
+ }
+ toku_ft_adjust_logical_row_count(ft, logical_rows_delta);
+ //
+ // assumption is that toku_ftnode_put_msg will
+ // mark the node as dirty.
+ // enforcing invariant here.
+ //
+ paranoid_invariant(node->dirty() != 0);
+
+ // update some status variables
+ if (node->height != 0) {
+ size_t msgsize = msg.total_size();
+ FT_STATUS_INC(FT_MSG_BYTES_IN, msgsize);
+ FT_STATUS_INC(FT_MSG_BYTES_CURR, msgsize);
+ FT_STATUS_INC(FT_MSG_NUM, 1);
+ if (ft_msg_type_applies_all(msg.type())) {
+ FT_STATUS_INC(FT_MSG_NUM_BROADCAST, 1);
+ }
+ }
+
+ // verify that msn of latest message was captured in root node
+ paranoid_invariant(msg_with_msn.msn().msn == node->max_msn_applied_to_node_on_disk.msn);
+
+ if (node->blocknum.b == ft->rightmost_blocknum.b) {
+ if (toku_unsafe_fetch(&ft->seqinsert_score) < FT_SEQINSERT_SCORE_THRESHOLD) {
+ // we promoted to the rightmost leaf node and the seqinsert score has not yet saturated.
+ toku_sync_fetch_and_add(&ft->seqinsert_score, 1);
+ }
+ } else if (toku_unsafe_fetch(&ft->seqinsert_score) != 0) {
+ // we promoted to something other than the rightmost leaf node and the score should reset
+ toku_unsafe_set(&ft->seqinsert_score, static_cast<uint32_t>(0));
+ }
+
+ // if we call toku_ft_flush_some_child, then that function unpins the root
+ // otherwise, we unpin ourselves
+ if (node->height > 0 && toku_ftnode_nonleaf_is_gorged(node, ft->h->nodesize)) {
+ toku_ft_flush_node_on_background_thread(ft, node);
+ }
+ else {
+ toku_unpin_ftnode(ft, node);
+ }
+}
+
+// seqinsert_loc is a bitmask.
+// The root counts as being both on the "left extreme" and on the "right extreme".
+// Therefore, at the root, you're at LEFT_EXTREME | RIGHT_EXTREME.
+typedef char seqinsert_loc;
+static const seqinsert_loc NEITHER_EXTREME = 0;
+static const seqinsert_loc LEFT_EXTREME = 1;
+static const seqinsert_loc RIGHT_EXTREME = 2;
+
+static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int childnum, seqinsert_loc loc)
+// Effect:
+// If child needs to be split or merged, do that.
+// parent and child will be unlocked if this happens
+// Requires: parent and child are read locked
+// Returns:
+// true if relocking is needed
+// false otherwise
+{
+ enum reactivity re = toku_ftnode_get_reactivity(ft, child);
+ enum reactivity newre;
+ BLOCKNUM child_blocknum;
+ uint32_t child_fullhash;
+ switch (re) {
+ case RE_STABLE:
+ return false;
+ case RE_FISSIBLE:
+ {
+ // We only have a read lock on the parent. We need to drop both locks, and get write locks.
+ BLOCKNUM parent_blocknum = parent->blocknum;
+ uint32_t parent_fullhash = toku_cachetable_hash(ft->cf, parent_blocknum);
+ int parent_height = parent->height;
+ int parent_n_children = parent->n_children;
+ toku_unpin_ftnode_read_only(ft, child);
+ toku_unpin_ftnode_read_only(ft, parent);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ FTNODE newparent, newchild;
+ toku_pin_ftnode(ft, parent_blocknum, parent_fullhash, &bfe, PL_WRITE_CHEAP, &newparent, true);
+ if (newparent->height != parent_height || newparent->n_children != parent_n_children ||
+ childnum >= newparent->n_children || toku_bnc_n_entries(BNC(newparent, childnum))) {
+ // If the height changed or childnum is now off the end, something clearly got split or merged out from under us.
+ // If something got injected in this node, then it got split or merged and we shouldn't be splitting it.
+ // But we already unpinned the child so we need to have the caller re-try the pins.
+ toku_unpin_ftnode_read_only(ft, newparent);
+ return true;
+ }
+ // It's ok to reuse the same childnum because if we get something
+ // else we need to split, well, that's crazy, but let's go ahead
+ // and split it.
+ child_blocknum = BP_BLOCKNUM(newparent, childnum);
+ child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum);
+ toku_pin_ftnode_with_dep_nodes(ft, child_blocknum, child_fullhash, &bfe, PL_WRITE_CHEAP, 1, &newparent, &newchild, true);
+ newre = toku_ftnode_get_reactivity(ft, newchild);
+ if (newre == RE_FISSIBLE) {
+ enum split_mode split_mode;
+ if (newparent->height == 1 && (loc & LEFT_EXTREME) && childnum == 0) {
+ split_mode = SPLIT_RIGHT_HEAVY;
+ } else if (newparent->height == 1 && (loc & RIGHT_EXTREME) && childnum == newparent->n_children - 1) {
+ split_mode = SPLIT_LEFT_HEAVY;
+ } else {
+ split_mode = SPLIT_EVENLY;
+ }
+ toku_ft_split_child(ft, newparent, childnum, newchild, split_mode);
+ } else {
+ // some other thread already got it, just unpin and tell the
+ // caller to retry
+ toku_unpin_ftnode_read_only(ft, newchild);
+ toku_unpin_ftnode_read_only(ft, newparent);
+ }
+ return true;
+ }
+ case RE_FUSIBLE:
+ {
+ if (parent->height == 1) {
+ // prevent re-merging of recently unevenly-split nodes
+ if (((loc & LEFT_EXTREME) && childnum <= 1) ||
+ ((loc & RIGHT_EXTREME) && childnum >= parent->n_children - 2)) {
+ return false;
+ }
+ }
+
+ int parent_height = parent->height;
+ BLOCKNUM parent_blocknum = parent->blocknum;
+ uint32_t parent_fullhash = toku_cachetable_hash(ft->cf, parent_blocknum);
+ toku_unpin_ftnode_read_only(ft, child);
+ toku_unpin_ftnode_read_only(ft, parent);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ FTNODE newparent, newchild;
+ toku_pin_ftnode(ft, parent_blocknum, parent_fullhash, &bfe, PL_WRITE_CHEAP, &newparent, true);
+ if (newparent->height != parent_height || childnum >= newparent->n_children) {
+ // looks like this is the root and it got merged, let's just start over (like in the split case above)
+ toku_unpin_ftnode_read_only(ft, newparent);
+ return true;
+ }
+ child_blocknum = BP_BLOCKNUM(newparent, childnum);
+ child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum);
+ toku_pin_ftnode_with_dep_nodes(ft, child_blocknum, child_fullhash, &bfe, PL_READ, 1, &newparent, &newchild, true);
+ newre = toku_ftnode_get_reactivity(ft, newchild);
+ if (newre == RE_FUSIBLE && newparent->n_children >= 2) {
+ toku_unpin_ftnode_read_only(ft, newchild);
+ toku_ft_merge_child(ft, newparent, childnum);
+ } else {
+ // Could be a weird case where newparent has only one
+ // child. In this case, we want to inject here but we've
+ // already unpinned the caller's copy of parent so we have
+ // to ask them to re-pin, or they could (very rarely)
+ // dereferenced memory in a freed node. TODO: we could
+ // give them back the copy of the parent we pinned.
+ //
+ // Otherwise, some other thread already got it, just unpin
+ // and tell the caller to retry
+ toku_unpin_ftnode_read_only(ft, newchild);
+ toku_unpin_ftnode_read_only(ft, newparent);
+ }
+ return true;
+ }
+ }
+ abort();
+}
+
+static void inject_message_at_this_blocknum(FT ft, CACHEKEY cachekey, uint32_t fullhash, const ft_msg &msg, size_t flow_deltas[], txn_gc_info *gc_info)
+// Effect:
+// Inject message into the node at this blocknum (cachekey).
+// Gets a write lock on the node for you.
+{
+ toku::context inject_ctx(CTX_MESSAGE_INJECTION);
+ FTNODE node;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode(ft, cachekey, fullhash, &bfe, PL_WRITE_CHEAP, &node, true);
+ toku_ftnode_assert_fully_in_memory(node);
+ paranoid_invariant(node->fullhash==fullhash);
+ ft_verify_flags(ft, node);
+ inject_message_in_locked_node(ft, node, -1, msg, flow_deltas, gc_info);
+}
+
+__attribute__((const))
+static inline bool should_inject_in_node(seqinsert_loc loc, int height, int depth)
+// We should inject directly in a node if:
+// - it's a leaf, or
+// - it's a height 1 node not at either extreme, or
+// - it's a depth 2 node not at either extreme
+{
+ return (height == 0 || (loc == NEITHER_EXTREME && (height <= 1 || depth >= 2)));
+}
+
+static void ft_verify_or_set_rightmost_blocknum(FT ft, BLOCKNUM b)
+// Given: 'b', the _definitive_ and constant rightmost blocknum of 'ft'
+{
+ if (toku_unsafe_fetch(&ft->rightmost_blocknum.b) == RESERVED_BLOCKNUM_NULL) {
+ toku_ft_lock(ft);
+ if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) {
+ toku_unsafe_set(&ft->rightmost_blocknum, b);
+ }
+ toku_ft_unlock(ft);
+ }
+ // The rightmost blocknum only transitions from RESERVED_BLOCKNUM_NULL to non-null.
+ // If it's already set, verify that the stored value is consistent with 'b'
+ invariant(toku_unsafe_fetch(&ft->rightmost_blocknum.b) == b.b);
+}
+
+bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) {
+ static const double factor = 0.125;
+ const uint64_t flow_threshold = ft->h->nodesize * factor;
+ return bnc->flow[0] >= flow_threshold || bnc->flow[1] >= flow_threshold;
+}
+
+static void push_something_in_subtree(
+ FT ft,
+ FTNODE subtree_root,
+ int target_childnum,
+ const ft_msg &msg,
+ size_t flow_deltas[],
+ txn_gc_info *gc_info,
+ int depth,
+ seqinsert_loc loc,
+ bool just_did_split_or_merge
+ )
+// Effects:
+// Assign message an MSN from ft->h.
+// Put message in the subtree rooted at node. Due to promotion the message may not be injected directly in this node.
+// Unlock node or schedule it to be unlocked (after a background flush).
+// Either way, the caller is not responsible for unlocking node.
+// Requires:
+// subtree_root is read locked and fully in memory.
+// Notes:
+// In Ming, the basic rules of promotion are as follows:
+// Don't promote broadcast messages.
+// Don't promote past non-empty buffers.
+// Otherwise, promote at most to height 1 or depth 2 (whichever is highest), as far as the birdie asks you to promote.
+// We don't promote to leaves because injecting into leaves is expensive, mostly because of #5605 and some of #5552.
+// We don't promote past depth 2 because we found that gives us enough parallelism without costing us too much pinning work.
+//
+// This is true with the following caveats:
+// We always promote all the way to the leaves on the rightmost and leftmost edges of the tree, for sequential insertions.
+// (That means we can promote past depth 2 near the edges of the tree.)
+//
+// When the birdie is still saying we should promote, we use get_and_pin so that we wait to get the node.
+// If the birdie doesn't say to promote, we try maybe_get_and_pin. If we get the node cheaply, and it's dirty, we promote anyway.
+{
+ toku_ftnode_assert_fully_in_memory(subtree_root);
+ if (should_inject_in_node(loc, subtree_root->height, depth)) {
+ switch (depth) {
+ case 0:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_0, 1); break;
+ case 1:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_1, 1); break;
+ case 2:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_2, 1); break;
+ case 3:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_3, 1); break;
+ default:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_GT3, 1); break;
+ }
+ // If the target node is a non-root leaf node on the right extreme,
+ // set the rightmost blocknum. We know there are no messages above us
+ // because promotion would not chose to inject directly into this leaf
+ // otherwise. We explicitly skip the root node because then we don't have
+ // to worry about changing the rightmost blocknum when the root splits.
+ if (subtree_root->height == 0 && loc == RIGHT_EXTREME && subtree_root->blocknum.b != ft->h->root_blocknum.b) {
+ ft_verify_or_set_rightmost_blocknum(ft, subtree_root->blocknum);
+ }
+ inject_message_in_locked_node(ft, subtree_root, target_childnum, msg, flow_deltas, gc_info);
+ } else {
+ int r;
+ int childnum;
+ NONLEAF_CHILDINFO bnc;
+
+ // toku_ft_root_put_msg should not have called us otherwise.
+ paranoid_invariant(ft_msg_type_applies_once(msg.type()));
+
+ childnum = (target_childnum >= 0 ? target_childnum
+ : toku_ftnode_which_child(subtree_root, msg.kdbt(), ft->cmp));
+ bnc = BNC(subtree_root, childnum);
+
+ if (toku_bnc_n_entries(bnc) > 0) {
+ // The buffer is non-empty, give up on promoting.
+ FT_STATUS_INC(FT_PRO_NUM_STOP_NONEMPTY_BUF, 1);
+ goto relock_and_push_here;
+ }
+
+ seqinsert_loc next_loc;
+ if ((loc & LEFT_EXTREME) && childnum == 0) {
+ next_loc = LEFT_EXTREME;
+ } else if ((loc & RIGHT_EXTREME) && childnum == subtree_root->n_children - 1) {
+ next_loc = RIGHT_EXTREME;
+ } else {
+ next_loc = NEITHER_EXTREME;
+ }
+
+ if (next_loc == NEITHER_EXTREME && subtree_root->height <= 1) {
+ // Never promote to leaf nodes except on the edges
+ FT_STATUS_INC(FT_PRO_NUM_STOP_H1, 1);
+ goto relock_and_push_here;
+ }
+
+ {
+ const BLOCKNUM child_blocknum = BP_BLOCKNUM(subtree_root, childnum);
+ ft->blocktable.verify_blocknum_allocated(child_blocknum);
+ const uint32_t child_fullhash = toku_cachetable_hash(ft->cf, child_blocknum);
+
+ FTNODE child;
+ {
+ const int child_height = subtree_root->height - 1;
+ const int child_depth = depth + 1;
+ // If we're locking a leaf, or a height 1 node or depth 2
+ // node in the middle, we know we won't promote further
+ // than that, so just get a write lock now.
+ const pair_lock_type lock_type = (should_inject_in_node(next_loc, child_height, child_depth)
+ ? PL_WRITE_CHEAP
+ : PL_READ);
+ if (next_loc != NEITHER_EXTREME || (toku_bnc_should_promote(ft, bnc) && depth <= 1)) {
+ // If we're on either extreme, or the birdie wants to
+ // promote and we're in the top two levels of the
+ // tree, don't stop just because someone else has the
+ // node locked.
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ if (lock_type == PL_WRITE_CHEAP) {
+ // We intend to take the write lock for message injection
+ toku::context inject_ctx(CTX_MESSAGE_INJECTION);
+ toku_pin_ftnode(ft, child_blocknum, child_fullhash, &bfe, lock_type, &child, true);
+ } else {
+ // We're going to keep promoting
+ toku::context promo_ctx(CTX_PROMO);
+ toku_pin_ftnode(ft, child_blocknum, child_fullhash, &bfe, lock_type, &child, true);
+ }
+ } else {
+ r = toku_maybe_pin_ftnode_clean(ft, child_blocknum, child_fullhash, lock_type, &child);
+ if (r != 0) {
+ // We couldn't get the child cheaply, so give up on promoting.
+ FT_STATUS_INC(FT_PRO_NUM_STOP_LOCK_CHILD, 1);
+ goto relock_and_push_here;
+ }
+ if (toku_ftnode_fully_in_memory(child)) {
+ // toku_pin_ftnode... touches the clock but toku_maybe_pin_ftnode... doesn't.
+ // This prevents partial eviction.
+ for (int i = 0; i < child->n_children; ++i) {
+ BP_TOUCH_CLOCK(child, i);
+ }
+ } else {
+ // We got the child, but it's not fully in memory. Give up on promoting.
+ FT_STATUS_INC(FT_PRO_NUM_STOP_CHILD_INMEM, 1);
+ goto unlock_child_and_push_here;
+ }
+ }
+ }
+ paranoid_invariant_notnull(child);
+
+ if (!just_did_split_or_merge) {
+ BLOCKNUM subtree_root_blocknum = subtree_root->blocknum;
+ uint32_t subtree_root_fullhash = toku_cachetable_hash(ft->cf, subtree_root_blocknum);
+ const bool did_split_or_merge = process_maybe_reactive_child(ft, subtree_root, child, childnum, loc);
+ if (did_split_or_merge) {
+ // Need to re-pin this node and try at this level again.
+ FTNODE newparent;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft); // should be fully in memory, we just split it
+ toku_pin_ftnode(ft, subtree_root_blocknum, subtree_root_fullhash, &bfe, PL_READ, &newparent, true);
+ push_something_in_subtree(ft, newparent, -1, msg, flow_deltas, gc_info, depth, loc, true);
+ return;
+ }
+ }
+
+ if (next_loc != NEITHER_EXTREME || child->dirty() || toku_bnc_should_promote(ft, bnc)) {
+ push_something_in_subtree(ft, child, -1, msg, flow_deltas, gc_info, depth + 1, next_loc, false);
+ toku_sync_fetch_and_add(&bnc->flow[0], flow_deltas[0]);
+ // The recursive call unpinned the child, but
+ // we're responsible for unpinning subtree_root.
+ toku_unpin_ftnode_read_only(ft, subtree_root);
+ return;
+ }
+
+ FT_STATUS_INC(FT_PRO_NUM_DIDNT_WANT_PROMOTE, 1);
+ unlock_child_and_push_here:
+ // We locked the child, but we decided not to promote.
+ // Unlock the child, and fall through to the next case.
+ toku_unpin_ftnode_read_only(ft, child);
+ }
+ relock_and_push_here:
+ // Give up on promoting.
+ // We have subtree_root read-locked and we don't have a child locked.
+ // Drop the read lock, grab a write lock, and inject here.
+ {
+ // Right now we have a read lock on subtree_root, but we want
+ // to inject into it so we get a write lock instead.
+ BLOCKNUM subtree_root_blocknum = subtree_root->blocknum;
+ uint32_t subtree_root_fullhash = toku_cachetable_hash(ft->cf, subtree_root_blocknum);
+ toku_unpin_ftnode_read_only(ft, subtree_root);
+ switch (depth) {
+ case 0:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_0, 1); break;
+ case 1:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_1, 1); break;
+ case 2:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_2, 1); break;
+ case 3:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_3, 1); break;
+ default:
+ FT_STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_GT3, 1); break;
+ }
+ inject_message_at_this_blocknum(ft, subtree_root_blocknum, subtree_root_fullhash, msg, flow_deltas, gc_info);
+ }
+ }
+}
+
+void toku_ft_root_put_msg(
+ FT ft,
+ const ft_msg &msg,
+ txn_gc_info *gc_info
+ )
+// Effect:
+// - assign msn to message and update msn in the header
+// - push the message into the ft
+
+// As of Clayface, the root blocknum is a constant, so preventing a
+// race between message injection and the split of a root is the job
+// of the cachetable's locking rules.
+//
+// We also hold the MO lock for a number of reasons, but an important
+// one is to make sure that a begin_checkpoint may not start while
+// this code is executing. A begin_checkpoint does (at least) two things
+// that can interfere with the operations here:
+// - Copies the header to a checkpoint header. Because we may change
+// the max_msn_in_ft below, we don't want the header to be copied in
+// the middle of these operations.
+// - Takes note of the log's LSN. Because this put operation has
+// already been logged, this message injection must be included
+// in any checkpoint that contains this put's logentry.
+// Holding the mo lock throughout this function ensures that fact.
+{
+ toku::context promo_ctx(CTX_PROMO);
+
+ // blackhole fractal trees drop all messages, so do nothing.
+ if (ft->blackhole) {
+ return;
+ }
+
+ FTNODE node;
+
+ uint32_t fullhash;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft, &root_key, &fullhash);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+
+ size_t flow_deltas[] = { message_buffer::msg_memsize_in_buffer(msg), 0 };
+
+ pair_lock_type lock_type;
+ lock_type = PL_READ; // try first for a read lock
+ // If we need to split the root, we'll have to change from a read lock
+ // to a write lock and check again. We change the variable lock_type
+ // and jump back to here.
+ change_lock_type:
+ // get the root node
+ toku_pin_ftnode(ft, root_key, fullhash, &bfe, lock_type, &node, true);
+ toku_ftnode_assert_fully_in_memory(node);
+ paranoid_invariant(node->fullhash==fullhash);
+ ft_verify_flags(ft, node);
+
+ // First handle a reactive root.
+ // This relocking for split algorithm will cause every message
+ // injection thread to change lock type back and forth, when only one
+ // of them needs to in order to handle the split. That's not great,
+ // but root splits are incredibly rare.
+ enum reactivity re = toku_ftnode_get_reactivity(ft, node);
+ switch (re) {
+ case RE_STABLE:
+ case RE_FUSIBLE: // cannot merge anything at the root
+ if (lock_type != PL_READ) {
+ // We thought we needed to split, but someone else got to
+ // it before us. Downgrade to a read lock.
+ toku_unpin_ftnode_read_only(ft, node);
+ lock_type = PL_READ;
+ goto change_lock_type;
+ }
+ break;
+ case RE_FISSIBLE:
+ if (lock_type == PL_READ) {
+ // Here, we only have a read lock on the root. In order
+ // to split it, we need a write lock, but in the course of
+ // gaining the write lock, someone else may have gotten in
+ // before us and split it. So we upgrade to a write lock
+ // and check again.
+ toku_unpin_ftnode_read_only(ft, node);
+ lock_type = PL_WRITE_CHEAP;
+ goto change_lock_type;
+ } else {
+ // We have a write lock, now we can split.
+ ft_init_new_root(ft, node, &node);
+ // Then downgrade back to a read lock, and we can finally
+ // do the injection.
+ toku_unpin_ftnode(ft, node);
+ lock_type = PL_READ;
+ FT_STATUS_INC(FT_PRO_NUM_ROOT_SPLIT, 1);
+ goto change_lock_type;
+ }
+ break;
+ }
+ // If we get to here, we have a read lock and the root doesn't
+ // need to be split. It's safe to inject the message.
+ paranoid_invariant(lock_type == PL_READ);
+ // We cannot assert that we have the read lock because frwlock asserts
+ // that its mutex is locked when we check if there are any readers.
+ // That wouldn't give us a strong guarantee that we have the read lock
+ // anyway.
+
+ // Now, either inject here or promote. We decide based on a heuristic:
+ if (node->height == 0 || !ft_msg_type_applies_once(msg.type())) {
+ // If the root's a leaf or we're injecting a broadcast, drop the read lock and inject here.
+ toku_unpin_ftnode_read_only(ft, node);
+ FT_STATUS_INC(FT_PRO_NUM_ROOT_H0_INJECT, 1);
+ inject_message_at_this_blocknum(ft, root_key, fullhash, msg, flow_deltas, gc_info);
+ } else if (node->height > 1) {
+ // If the root's above height 1, we are definitely eligible for promotion.
+ push_something_in_subtree(ft, node, -1, msg, flow_deltas, gc_info, 0, LEFT_EXTREME | RIGHT_EXTREME, false);
+ } else {
+ // The root's height 1. We may be eligible for promotion here.
+ // On the extremes, we want to promote, in the middle, we don't.
+ int childnum = toku_ftnode_which_child(node, msg.kdbt(), ft->cmp);
+ if (childnum == 0 || childnum == node->n_children - 1) {
+ // On the extremes, promote. We know which childnum we're going to, so pass that down too.
+ push_something_in_subtree(ft, node, childnum, msg, flow_deltas, gc_info, 0, LEFT_EXTREME | RIGHT_EXTREME, false);
+ } else {
+ // At height 1 in the middle, don't promote, drop the read lock and inject here.
+ toku_unpin_ftnode_read_only(ft, node);
+ FT_STATUS_INC(FT_PRO_NUM_ROOT_H1_INJECT, 1);
+ inject_message_at_this_blocknum(ft, root_key, fullhash, msg, flow_deltas, gc_info);
+ }
+ }
+}
+
+// TODO: Remove me, I'm boring.
+static int ft_compare_keys(FT ft, const DBT *a, const DBT *b)
+// Effect: Compare two keys using the given fractal tree's comparator/descriptor
+{
+ return ft->cmp(a, b);
+}
+
+static LEAFENTRY bn_get_le_and_key(BASEMENTNODE bn, int idx, DBT *key)
+// Effect: Gets the i'th leafentry from the given basement node and
+// fill its key in *key
+// Requires: The i'th leafentry exists.
+{
+ LEAFENTRY le;
+ uint32_t le_len;
+ void *le_key;
+ int r = bn->data_buffer.fetch_klpair(idx, &le, &le_len, &le_key);
+ invariant_zero(r);
+ toku_fill_dbt(key, le_key, le_len);
+ return le;
+}
+
+static LEAFENTRY ft_leaf_leftmost_le_and_key(FTNODE leaf, DBT *leftmost_key)
+// Effect: If a leftmost key exists in the given leaf, toku_fill_dbt()
+// the key into *leftmost_key
+// Requires: Leaf is fully in memory and pinned for read or write.
+// Return: leafentry if it exists, nullptr otherwise
+{
+ for (int i = 0; i < leaf->n_children; i++) {
+ BASEMENTNODE bn = BLB(leaf, i);
+ if (bn->data_buffer.num_klpairs() > 0) {
+ // Get the first (leftmost) leafentry and its key
+ return bn_get_le_and_key(bn, 0, leftmost_key);
+ }
+ }
+ return nullptr;
+}
+
+static LEAFENTRY ft_leaf_rightmost_le_and_key(FTNODE leaf, DBT *rightmost_key)
+// Effect: If a rightmost key exists in the given leaf, toku_fill_dbt()
+// the key into *rightmost_key
+// Requires: Leaf is fully in memory and pinned for read or write.
+// Return: leafentry if it exists, nullptr otherwise
+{
+ for (int i = leaf->n_children - 1; i >= 0; i--) {
+ BASEMENTNODE bn = BLB(leaf, i);
+ size_t num_les = bn->data_buffer.num_klpairs();
+ if (num_les > 0) {
+ // Get the last (rightmost) leafentry and its key
+ return bn_get_le_and_key(bn, num_les - 1, rightmost_key);
+ }
+ }
+ return nullptr;
+}
+
+static int ft_leaf_get_relative_key_pos(FT ft, FTNODE leaf, const DBT *key, bool *nondeleted_key_found, int *target_childnum)
+// Effect: Determines what the relative position of the given key is with
+// respect to a leaf node, and if it exists.
+// Requires: Leaf is fully in memory and pinned for read or write.
+// Requires: target_childnum is non-null
+// Return: < 0 if key is less than the leftmost key in the leaf OR the relative position is unknown, for any reason.
+// 0 if key is in the bounds [leftmost_key, rightmost_key] for this leaf or the leaf is empty
+// > 0 if key is greater than the rightmost key in the leaf
+// *nondeleted_key_found is set (if non-null) if the target key was found and is not deleted, unmodified otherwise
+// *target_childnum is set to the child that (does or would) contain the key, if calculated, unmodified otherwise
+{
+ DBT rightmost_key;
+ LEAFENTRY rightmost_le = ft_leaf_rightmost_le_and_key(leaf, &rightmost_key);
+ if (rightmost_le == nullptr) {
+ // If we can't get a rightmost key then the leaf is empty.
+ // In such a case, we don't have any information about what keys would be in this leaf.
+ // We have to assume the leaf node that would contain this key is to the left.
+ return -1;
+ }
+ // We have a rightmost leafentry, so it must exist in some child node
+ invariant(leaf->n_children > 0);
+
+ int relative_pos = 0;
+ int c = ft_compare_keys(ft, key, &rightmost_key);
+ if (c > 0) {
+ relative_pos = 1;
+ *target_childnum = leaf->n_children - 1;
+ } else if (c == 0) {
+ if (nondeleted_key_found != nullptr && !le_latest_is_del(rightmost_le)) {
+ *nondeleted_key_found = true;
+ }
+ relative_pos = 0;
+ *target_childnum = leaf->n_children - 1;
+ } else {
+ // The key is less than the rightmost. It may still be in bounds if it's >= the leftmost.
+ DBT leftmost_key;
+ LEAFENTRY leftmost_le = ft_leaf_leftmost_le_and_key(leaf, &leftmost_key);
+ invariant_notnull(leftmost_le); // Must exist because a rightmost exists
+ c = ft_compare_keys(ft, key, &leftmost_key);
+ if (c > 0) {
+ if (nondeleted_key_found != nullptr) {
+ // The caller wants to know if a nondeleted key can be found.
+ LEAFENTRY target_le;
+ int childnum = toku_ftnode_which_child(leaf, key, ft->cmp);
+ BASEMENTNODE bn = BLB(leaf, childnum);
+ struct toku_msg_leafval_heaviside_extra extra(ft->cmp, key);
+ int r = bn->data_buffer.find_zero<decltype(extra), toku_msg_leafval_heaviside>(
+ extra,
+ &target_le,
+ nullptr, nullptr, nullptr
+ );
+ *target_childnum = childnum;
+ if (r == 0 && !le_latest_is_del(target_le)) {
+ *nondeleted_key_found = true;
+ }
+ }
+ relative_pos = 0;
+ } else if (c == 0) {
+ if (nondeleted_key_found != nullptr && !le_latest_is_del(leftmost_le)) {
+ *nondeleted_key_found = true;
+ }
+ relative_pos = 0;
+ *target_childnum = 0;
+ } else {
+ relative_pos = -1;
+ }
+ }
+
+ return relative_pos;
+}
+
+static void ft_insert_directly_into_leaf(FT ft, FTNODE leaf, int target_childnum, DBT *key, DBT *val,
+ XIDS message_xids, enum ft_msg_type type, txn_gc_info *gc_info);
+static int getf_nothing(uint32_t, const void *, uint32_t, const void *, void *, bool);
+
+static int ft_maybe_insert_into_rightmost_leaf(FT ft, DBT *key, DBT *val, XIDS message_xids, enum ft_msg_type type,
+ txn_gc_info *gc_info, bool unique)
+// Effect: Pins the rightmost leaf node and attempts to do an insert.
+// There are three reasons why we may not succeed.
+// - The rightmost leaf is too full and needs a split.
+// - The key to insert is not within the provable bounds of this leaf node.
+// - The key is within bounds, but it already exists.
+// Return: 0 if this function did insert, DB_KEYEXIST if a unique key constraint exists and
+// some nondeleted leafentry with the same key exists
+// < 0 if this function did not insert, for a reason other than DB_KEYEXIST.
+// Note: Treat this function as a possible, but not necessary, optimization for insert.
+// Rationale: We want O(1) insertions down the rightmost path of the tree.
+{
+ int r = -1;
+
+ uint32_t rightmost_fullhash;
+ BLOCKNUM rightmost_blocknum;
+ FTNODE rightmost_leaf = nullptr;
+
+ // Don't do the optimization if our heurstic suggests that
+ // insertion pattern is not sequential.
+ if (toku_unsafe_fetch(&ft->seqinsert_score) < FT_SEQINSERT_SCORE_THRESHOLD) {
+ goto cleanup;
+ }
+
+ // We know the seqinsert score is high enough that we should
+ // attempt to directly insert into the rightmost leaf. Because
+ // the score is non-zero, the rightmost blocknum must have been
+ // set. See inject_message_in_locked_node(), which only increases
+ // the score if the target node blocknum == rightmost_blocknum
+ rightmost_blocknum = ft->rightmost_blocknum;
+ invariant(rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL);
+
+ // Pin the rightmost leaf with a write lock.
+ rightmost_fullhash = toku_cachetable_hash(ft->cf, rightmost_blocknum);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode(ft, rightmost_blocknum, rightmost_fullhash, &bfe, PL_WRITE_CHEAP, &rightmost_leaf, true);
+
+ // The rightmost blocknum never chances once it is initialized to something
+ // other than null. Verify that the pinned node has the correct blocknum.
+ invariant(rightmost_leaf->blocknum.b == rightmost_blocknum.b);
+
+ // If the rightmost leaf is reactive, bail out out and let the normal promotion pass
+ // take care of it. This also ensures that if any of our ancestors are reactive,
+ // they'll be taken care of too.
+ if (toku_ftnode_get_leaf_reactivity(rightmost_leaf, ft->h->nodesize) != RE_STABLE) {
+ FT_STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, 1);
+ goto cleanup;
+ }
+
+ // The groundwork has been laid for an insertion directly into the rightmost
+ // leaf node. We know that it is pinned for write, fully in memory, has
+ // no messages above it, and is not reactive.
+ //
+ // Now, two more things must be true for this insertion to actually happen:
+ // 1. The key to insert is within the bounds of this leafnode, or to the right.
+ // 2. If there is a uniqueness constraint, it passes.
+ bool nondeleted_key_found;
+ int relative_pos;
+ int target_childnum;
+
+ nondeleted_key_found = false;
+ target_childnum = -1;
+ relative_pos = ft_leaf_get_relative_key_pos(ft, rightmost_leaf, key,
+ unique ? &nondeleted_key_found : nullptr,
+ &target_childnum);
+ if (relative_pos >= 0) {
+ FT_STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, 1);
+ if (unique && nondeleted_key_found) {
+ r = DB_KEYEXIST;
+ } else {
+ ft_insert_directly_into_leaf(ft, rightmost_leaf, target_childnum,
+ key, val, message_xids, type, gc_info);
+ r = 0;
+ }
+ } else {
+ FT_STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, 1);
+ r = -1;
+ }
+
+cleanup:
+ // If we did the insert, the rightmost leaf was unpinned for us.
+ if (r != 0 && rightmost_leaf != nullptr) {
+ toku_unpin_ftnode(ft, rightmost_leaf);
+ }
+
+ return r;
+}
+
+static void ft_txn_log_insert(FT ft, DBT *key, DBT *val, TOKUTXN txn, bool do_logging, enum ft_msg_type type);
+
+int toku_ft_insert_unique(FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool do_logging) {
+// Effect: Insert a unique key-val pair into the fractal tree.
+// Return: 0 on success, DB_KEYEXIST if the overwrite constraint failed
+ XIDS message_xids = txn != nullptr ? toku_txn_get_xids(txn) : toku_xids_get_root_xids();
+
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ // no messages above us, we can implicitly promote uxrs based on this xid
+ oldest_referenced_xid_estimate,
+ true);
+ int r = ft_maybe_insert_into_rightmost_leaf(ft_h->ft, key, val, message_xids, FT_INSERT, &gc_info, true);
+ if (r != 0 && r != DB_KEYEXIST) {
+ // Default to a regular unique check + insert algorithm if we couldn't
+ // do it based on the rightmost leaf alone.
+ int lookup_r = toku_ft_lookup(ft_h, key, getf_nothing, nullptr);
+ if (lookup_r == DB_NOTFOUND) {
+ toku_ft_send_insert(ft_h, key, val, message_xids, FT_INSERT, &gc_info);
+ r = 0;
+ } else {
+ r = DB_KEYEXIST;
+ }
+ }
+
+ if (r == 0) {
+ ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, FT_INSERT);
+ toku_ft_adjust_logical_row_count(ft_h->ft, 1);
+ }
+ return r;
+}
+
+// Effect: Insert the key-val pair into an ft.
+void toku_ft_insert (FT_HANDLE ft_handle, DBT *key, DBT *val, TOKUTXN txn) {
+ toku_ft_maybe_insert(ft_handle, key, val, txn, false, ZERO_LSN, true, FT_INSERT);
+}
+
+void toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn) {
+ paranoid_invariant(txn);
+ toku_txn_force_fsync_on_commit(txn); //If the txn commits, the commit MUST be in the log
+ //before the (old) file is actually unlinked
+ TOKULOGGER logger = toku_txn_logger(txn);
+
+ BYTESTRING new_iname_bs = {.len=(uint32_t) strlen(new_iname), .data=(char*)new_iname};
+ toku_logger_save_rollback_load(txn, old_filenum, &new_iname_bs);
+ if (do_log && logger) {
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ toku_log_load(logger, load_lsn, do_fsync, txn, xid, old_filenum, new_iname_bs);
+ }
+}
+
+// 2954
+// this function handles the tasks needed to be recoverable
+// - write to rollback log
+// - write to recovery log
+void toku_ft_hot_index_recovery(TOKUTXN txn, FILENUMS filenums, int do_fsync, int do_log, LSN *hot_index_lsn)
+{
+ paranoid_invariant(txn);
+ TOKULOGGER logger = toku_txn_logger(txn);
+
+ // write to the rollback log
+ toku_logger_save_rollback_hot_index(txn, &filenums);
+ if (do_log && logger) {
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ // write to the recovery log
+ toku_log_hot_index(logger, hot_index_lsn, do_fsync, txn, xid, filenums);
+ }
+}
+
+// Effect: Optimize the ft.
+void toku_ft_optimize (FT_HANDLE ft_h) {
+ TOKULOGGER logger = toku_cachefile_logger(ft_h->ft->cf);
+ if (logger) {
+ TXNID oldest = toku_txn_manager_get_oldest_living_xid(logger->txn_manager);
+
+ XIDS root_xids = toku_xids_get_root_xids();
+ XIDS message_xids;
+ if (oldest == TXNID_NONE_LIVING) {
+ message_xids = root_xids;
+ }
+ else {
+ int r = toku_xids_create_child(root_xids, &message_xids, oldest);
+ invariant(r == 0);
+ }
+
+ DBT key;
+ DBT val;
+ toku_init_dbt(&key);
+ toku_init_dbt(&val);
+ ft_msg msg(&key, &val, FT_OPTIMIZE, ZERO_MSN, message_xids);
+
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ // no messages above us, we can implicitly promote uxrs based on this xid
+ oldest_referenced_xid_estimate,
+ true);
+ toku_ft_root_put_msg(ft_h->ft, msg, &gc_info);
+ toku_xids_destroy(&message_xids);
+ }
+}
+
+void toku_ft_load(FT_HANDLE ft_handle, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *load_lsn) {
+ FILENUM old_filenum = toku_cachefile_filenum(ft_handle->ft->cf);
+ int do_log = 1;
+ toku_ft_load_recovery(txn, old_filenum, new_iname, do_fsync, do_log, load_lsn);
+}
+
+// ft actions for logging hot index filenums
+void toku_ft_hot_index(FT_HANDLE ft_handle __attribute__ ((unused)), TOKUTXN txn, FILENUMS filenums, int do_fsync, LSN *lsn) {
+ int do_log = 1;
+ toku_ft_hot_index_recovery(txn, filenums, do_fsync, do_log, lsn);
+}
+
+void
+toku_ft_log_put (TOKUTXN txn, FT_HANDLE ft_handle, const DBT *key, const DBT *val) {
+ TOKULOGGER logger = toku_txn_logger(txn);
+ if (logger) {
+ BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
+ BYTESTRING valbs = {.len=val->size, .data=(char *) val->data};
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_handle->ft->cf), xid, keybs, valbs);
+ }
+}
+
+void
+toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val) {
+ assert(txn);
+ assert(num_fts > 0);
+ TOKULOGGER logger = toku_txn_logger(txn);
+ if (logger) {
+ FILENUM fnums[num_fts];
+ uint32_t i;
+ for (i = 0; i < num_fts; i++) {
+ fnums[i] = toku_cachefile_filenum(fts[i]->ft->cf);
+ }
+ FILENUMS filenums = {.num = num_fts, .filenums = fnums};
+ BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
+ BYTESTRING valbs = {.len=val->size, .data=(char *) val->data};
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ FILENUM src_filenum = src_ft ? toku_cachefile_filenum(src_ft->ft->cf) : FILENUM_NONE;
+ toku_log_enq_insert_multiple(logger, (LSN*)0, 0, txn, src_filenum, filenums, xid, keybs, valbs);
+ }
+}
+
+TXN_MANAGER toku_ft_get_txn_manager(FT_HANDLE ft_h) {
+ TOKULOGGER logger = toku_cachefile_logger(ft_h->ft->cf);
+ return logger != nullptr ? toku_logger_get_txn_manager(logger) : nullptr;
+}
+
+TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h) {
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ return txn_manager != nullptr ? toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager) : TXNID_NONE;
+}
+
+static void ft_txn_log_insert(FT ft, DBT *key, DBT *val, TOKUTXN txn, bool do_logging, enum ft_msg_type type) {
+ paranoid_invariant(type == FT_INSERT || type == FT_INSERT_NO_OVERWRITE);
+
+ //By default use committed messages
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ if (txn) {
+ BYTESTRING keybs = {key->size, (char *) key->data};
+ toku_logger_save_rollback_cmdinsert(txn, toku_cachefile_filenum(ft->cf), &keybs);
+ toku_txn_maybe_note_ft(txn, ft);
+ }
+ TOKULOGGER logger = toku_txn_logger(txn);
+ if (do_logging && logger) {
+ BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
+ BYTESTRING valbs = {.len=val->size, .data=(char *) val->data};
+ if (type == FT_INSERT) {
+ toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft->cf), xid, keybs, valbs);
+ }
+ else {
+ toku_log_enq_insert_no_overwrite(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft->cf), xid, keybs, valbs);
+ }
+ }
+}
+
+void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) {
+ ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, type);
+
+ LSN treelsn;
+ if (oplsn_valid && oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) {
+ // do nothing
+ } else {
+ XIDS message_xids = txn ? toku_txn_get_xids(txn) : toku_xids_get_root_xids();
+
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ // no messages above us, we can implicitly promote uxrs based on this xid
+ oldest_referenced_xid_estimate,
+ txn != nullptr ? !txn->for_recovery : false);
+ int r = ft_maybe_insert_into_rightmost_leaf(ft_h->ft, key, val, message_xids, FT_INSERT, &gc_info, false);
+ if (r != 0) {
+ toku_ft_send_insert(ft_h, key, val, message_xids, type, &gc_info);
+ }
+ toku_ft_adjust_logical_row_count(ft_h->ft, 1);
+ }
+}
+
+static void ft_insert_directly_into_leaf(FT ft, FTNODE leaf, int target_childnum, DBT *key, DBT *val,
+ XIDS message_xids, enum ft_msg_type type, txn_gc_info *gc_info)
+// Effect: Insert directly into a leaf node a fractal tree. Does not do any logging.
+// Requires: Leaf is fully in memory and pinned for write.
+// Requires: If this insertion were to happen through the root node, the promotion
+// algorithm would have selected the given leaf node as the point of injection.
+// That means this function relies on the current implementation of promotion.
+{
+ ft_msg msg(key, val, type, ZERO_MSN, message_xids);
+ size_t flow_deltas[] = { 0, 0 };
+ inject_message_in_locked_node(ft, leaf, target_childnum, msg, flow_deltas, gc_info);
+}
+
+static void
+ft_send_update_msg(FT_HANDLE ft_h, const ft_msg &msg, TOKUTXN txn) {
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ // no messages above us, we can implicitly promote uxrs based on this xid
+ oldest_referenced_xid_estimate,
+ txn != nullptr ? !txn->for_recovery : false);
+ toku_ft_root_put_msg(ft_h->ft, msg, &gc_info);
+}
+
+void toku_ft_maybe_update(FT_HANDLE ft_h,
+ const DBT *key,
+ const DBT *update_function_extra,
+ TOKUTXN txn,
+ bool oplsn_valid,
+ LSN oplsn,
+ bool do_logging) {
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ if (txn) {
+ BYTESTRING keybs = {key->size, (char *)key->data};
+ toku_logger_save_rollback_cmdupdate(
+ txn, toku_cachefile_filenum(ft_h->ft->cf), &keybs);
+ toku_txn_maybe_note_ft(txn, ft_h->ft);
+ }
+
+ TOKULOGGER logger;
+ logger = toku_txn_logger(txn);
+ if (do_logging && logger) {
+ BYTESTRING keybs = {.len = key->size, .data = (char *)key->data};
+ BYTESTRING extrabs = {.len = update_function_extra->size,
+ .data = (char *)update_function_extra->data};
+ toku_log_enq_update(logger,
+ NULL,
+ 0,
+ txn,
+ toku_cachefile_filenum(ft_h->ft->cf),
+ xid,
+ keybs,
+ extrabs);
+ }
+
+ LSN treelsn;
+ if (oplsn_valid &&
+ oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) {
+ // do nothing
+ } else {
+ XIDS message_xids =
+ txn ? toku_txn_get_xids(txn) : toku_xids_get_root_xids();
+ ft_msg msg(
+ key, update_function_extra, FT_UPDATE, ZERO_MSN, message_xids);
+ ft_send_update_msg(ft_h, msg, txn);
+ }
+ // updates get converted to insert messages, which should do a -1 on the
+ // logical row count when the messages are permanently applied
+ toku_ft_adjust_logical_row_count(ft_h->ft, 1);
+}
+
+void toku_ft_maybe_update_broadcast(FT_HANDLE ft_h, const DBT *update_function_extra,
+ TOKUTXN txn, bool oplsn_valid, LSN oplsn,
+ bool do_logging, bool is_resetting_op) {
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ uint8_t resetting = is_resetting_op ? 1 : 0;
+ if (txn) {
+ toku_logger_save_rollback_cmdupdatebroadcast(txn, toku_cachefile_filenum(ft_h->ft->cf), resetting);
+ toku_txn_maybe_note_ft(txn, ft_h->ft);
+ }
+
+ TOKULOGGER logger;
+ logger = toku_txn_logger(txn);
+ if (do_logging && logger) {
+ BYTESTRING extrabs = {.len=update_function_extra->size,
+ .data = (char *) update_function_extra->data};
+ toku_log_enq_updatebroadcast(logger, NULL, 0, txn,
+ toku_cachefile_filenum(ft_h->ft->cf),
+ xid, extrabs, resetting);
+ }
+
+ //TODO(yoni): remove treelsn here and similar calls (no longer being used)
+ LSN treelsn;
+ if (oplsn_valid &&
+ oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) {
+
+ } else {
+ DBT empty_dbt;
+ XIDS message_xids = txn ? toku_txn_get_xids(txn) : toku_xids_get_root_xids();
+ ft_msg msg(toku_init_dbt(&empty_dbt), update_function_extra, FT_UPDATE_BROADCAST_ALL, ZERO_MSN, message_xids);
+ ft_send_update_msg(ft_h, msg, txn);
+ }
+}
+
+void toku_ft_send_insert(FT_HANDLE ft_handle, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info) {
+ ft_msg msg(key, val, type, ZERO_MSN, xids);
+ toku_ft_root_put_msg(ft_handle->ft, msg, gc_info);
+}
+
+void toku_ft_send_commit_any(FT_HANDLE ft_handle, DBT *key, XIDS xids, txn_gc_info *gc_info) {
+ DBT val;
+ ft_msg msg(key, toku_init_dbt(&val), FT_COMMIT_ANY, ZERO_MSN, xids);
+ toku_ft_root_put_msg(ft_handle->ft, msg, gc_info);
+}
+
+void toku_ft_delete(FT_HANDLE ft_handle, DBT *key, TOKUTXN txn) {
+ toku_ft_maybe_delete(ft_handle, key, txn, false, ZERO_LSN, true);
+}
+
+void
+toku_ft_log_del(TOKUTXN txn, FT_HANDLE ft_handle, const DBT *key) {
+ TOKULOGGER logger = toku_txn_logger(txn);
+ if (logger) {
+ BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ toku_log_enq_delete_any(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_handle->ft->cf), xid, keybs);
+ }
+}
+
+void
+toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val) {
+ assert(txn);
+ assert(num_fts > 0);
+ TOKULOGGER logger = toku_txn_logger(txn);
+ if (logger) {
+ FILENUM fnums[num_fts];
+ uint32_t i;
+ for (i = 0; i < num_fts; i++) {
+ fnums[i] = toku_cachefile_filenum(fts[i]->ft->cf);
+ }
+ FILENUMS filenums = {.num = num_fts, .filenums = fnums};
+ BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
+ BYTESTRING valbs = {.len=val->size, .data=(char *) val->data};
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ FILENUM src_filenum = src_ft ? toku_cachefile_filenum(src_ft->ft->cf) : FILENUM_NONE;
+ toku_log_enq_delete_multiple(logger, (LSN*)0, 0, txn, src_filenum, filenums, xid, keybs, valbs);
+ }
+}
+
+void toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging) {
+ XIDS message_xids = toku_xids_get_root_xids(); //By default use committed messages
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ if (txn) {
+ BYTESTRING keybs = {key->size, (char *) key->data};
+ toku_logger_save_rollback_cmddelete(txn, toku_cachefile_filenum(ft_h->ft->cf), &keybs);
+ toku_txn_maybe_note_ft(txn, ft_h->ft);
+ message_xids = toku_txn_get_xids(txn);
+ }
+ TOKULOGGER logger = toku_txn_logger(txn);
+ if (do_logging && logger) {
+ BYTESTRING keybs = {.len=key->size, .data=(char *) key->data};
+ toku_log_enq_delete_any(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_h->ft->cf), xid, keybs);
+ }
+
+ LSN treelsn;
+ if (oplsn_valid && oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) {
+ // do nothing
+ } else {
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ // no messages above us, we can implicitly promote uxrs based on this xid
+ oldest_referenced_xid_estimate,
+ txn != nullptr ? !txn->for_recovery : false);
+ toku_ft_send_delete(ft_h, key, message_xids, &gc_info);
+ toku_ft_adjust_logical_row_count(ft_h->ft, -1);
+ }
+}
+
+void toku_ft_send_delete(FT_HANDLE ft_handle, DBT *key, XIDS xids, txn_gc_info *gc_info) {
+ DBT val; toku_init_dbt(&val);
+ ft_msg msg(key, toku_init_dbt(&val), FT_DELETE_ANY, ZERO_MSN, xids);
+ toku_ft_root_put_msg(ft_handle->ft, msg, gc_info);
+}
+
+/* ******************** open,close and create ********************** */
+
+// Test only function (not used in running system). This one has no env
+int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *ft_handle_p, int nodesize,
+ int basementnodesize,
+ enum toku_compression_method compression_method,
+ CACHETABLE cachetable, TOKUTXN txn,
+ int (*compare_fun)(DB *, const DBT*,const DBT*)) {
+ FT_HANDLE ft_handle;
+ const int only_create = 0;
+
+ toku_ft_handle_create(&ft_handle);
+ toku_ft_handle_set_nodesize(ft_handle, nodesize);
+ toku_ft_handle_set_basementnodesize(ft_handle, basementnodesize);
+ toku_ft_handle_set_compression_method(ft_handle, compression_method);
+ toku_ft_handle_set_fanout(ft_handle, 16);
+ toku_ft_set_bt_compare(ft_handle, compare_fun);
+
+ int r = toku_ft_handle_open(ft_handle, fname, is_create, only_create, cachetable, txn);
+ if (r != 0) {
+ return r;
+ }
+
+ *ft_handle_p = ft_handle;
+ return r;
+}
+
+static bool use_direct_io = true;
+
+void toku_ft_set_direct_io (bool direct_io_on) {
+ use_direct_io = direct_io_on;
+}
+
+static inline int ft_open_maybe_direct(const char *filename,
+ int oflag,
+ int mode) {
+ if (use_direct_io) {
+ return toku_os_open_direct(
+ filename, oflag, mode, *tokudb_file_data_key);
+ } else {
+ return toku_os_open(filename, oflag, mode, *tokudb_file_data_key);
+ }
+}
+
+static const mode_t file_mode = S_IRUSR+S_IWUSR+S_IRGRP+S_IWGRP+S_IROTH+S_IWOTH;
+
+inline bool toku_file_is_root(const char *path, const char *last_slash) {
+ return last_slash == path;
+}
+
+static std::unique_ptr<char[], decltype(&toku_free)> toku_file_get_parent_dir(
+ const char *path) {
+ std::unique_ptr<char[], decltype(&toku_free)> result(nullptr, &toku_free);
+
+ bool has_trailing_slash = false;
+
+ /* Find the offset of the last slash */
+ const char *last_slash = strrchr(path, OS_PATH_SEPARATOR);
+
+ if (!last_slash) {
+ /* No slash in the path, return NULL */
+ return result;
+ }
+
+ /* Ok, there is a slash. Is there anything after it? */
+ if (static_cast<size_t>(last_slash - path + 1) == strlen(path)) {
+ has_trailing_slash = true;
+ }
+
+ /* Reduce repetative slashes. */
+ while (last_slash > path && last_slash[-1] == OS_PATH_SEPARATOR) {
+ last_slash--;
+ }
+
+ /* Check for the root of a drive. */
+ if (toku_file_is_root(path, last_slash)) {
+ return result;
+ }
+
+ /* If a trailing slash prevented the first strrchr() from trimming
+ the last component of the path, trim that component now. */
+ if (has_trailing_slash) {
+ /* Back up to the previous slash. */
+ last_slash--;
+ while (last_slash > path && last_slash[0] != OS_PATH_SEPARATOR) {
+ last_slash--;
+ }
+
+ /* Reduce repetative slashes. */
+ while (last_slash > path && last_slash[-1] == OS_PATH_SEPARATOR) {
+ last_slash--;
+ }
+ }
+
+ /* Check for the root of a drive. */
+ if (toku_file_is_root(path, last_slash)) {
+ return result;
+ }
+
+ result.reset(toku_strndup(path, last_slash - path));
+ return result;
+}
+
+bool toku_create_subdirs_if_needed(const char *path) {
+ static const mode_t dir_mode = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP |
+ S_IWGRP | S_IXGRP | S_IROTH | S_IXOTH;
+
+ toku_struct_stat stat;
+ bool subdir_exists = true;
+ auto subdir = toku_file_get_parent_dir(path);
+
+ if (!subdir.get())
+ return true;
+
+ if (toku_stat(subdir.get(), &stat, toku_uninstrumented) == -1) {
+ if (ENOENT == get_error_errno())
+ subdir_exists = false;
+ else
+ return false;
+ }
+
+ if (subdir_exists) {
+ if (!S_ISDIR(stat.st_mode))
+ return false;
+ return true;
+ }
+
+ if (!toku_create_subdirs_if_needed(subdir.get()))
+ return false;
+
+ if (toku_os_mkdir(subdir.get(), dir_mode))
+ return false;
+
+ return true;
+}
+
+// open a file for use by the ft
+// Requires: File does not exist.
+static int ft_create_file(FT_HANDLE UU(ft_handle), const char *fname, int *fdp) {
+ int r;
+ int fd;
+ int er;
+ if (!toku_create_subdirs_if_needed(fname))
+ return get_error_errno();
+ fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, file_mode);
+ assert(fd==-1);
+ if ((er = get_maybe_error_errno()) != ENOENT) {
+ return er;
+ }
+ fd = ft_open_maybe_direct(fname, O_RDWR | O_CREAT | O_BINARY, file_mode);
+ if (fd==-1) {
+ r = get_error_errno();
+ return r;
+ }
+
+ r = toku_fsync_directory(fname);
+ if (r == 0) {
+ *fdp = fd;
+ } else {
+ int rr = close(fd);
+ assert_zero(rr);
+ }
+ return r;
+}
+
+// open a file for use by the ft. if the file does not exist, error
+static int ft_open_file(const char *fname, int *fdp, bool rw) {
+ int fd;
+ fd = ft_open_maybe_direct(fname, (rw ? O_RDWR : O_RDONLY) | O_BINARY, file_mode);
+ if (fd==-1) {
+ return get_error_errno();
+ }
+ *fdp = fd;
+ return 0;
+}
+
+void
+toku_ft_handle_set_compression_method(FT_HANDLE t, enum toku_compression_method method)
+{
+ if (t->ft) {
+ toku_ft_set_compression_method(t->ft, method);
+ }
+ else {
+ t->options.compression_method = method;
+ }
+}
+
+void
+toku_ft_handle_get_compression_method(FT_HANDLE t, enum toku_compression_method *methodp)
+{
+ if (t->ft) {
+ toku_ft_get_compression_method(t->ft, methodp);
+ }
+ else {
+ *methodp = t->options.compression_method;
+ }
+}
+
+void
+toku_ft_handle_set_fanout(FT_HANDLE ft_handle, unsigned int fanout)
+{
+ if (ft_handle->ft) {
+ toku_ft_set_fanout(ft_handle->ft, fanout);
+ }
+ else {
+ ft_handle->options.fanout = fanout;
+ }
+}
+
+void
+toku_ft_handle_get_fanout(FT_HANDLE ft_handle, unsigned int *fanout)
+{
+ if (ft_handle->ft) {
+ toku_ft_get_fanout(ft_handle->ft, fanout);
+ }
+ else {
+ *fanout = ft_handle->options.fanout;
+ }
+}
+
+// The memcmp magic byte may be set on a per fractal tree basis to communicate
+// that if two keys begin with this byte, they may be compared with the builtin
+// key comparison function. This greatly optimizes certain in-memory workloads,
+// such as lookups by OID primary key in TokuMX.
+int toku_ft_handle_set_memcmp_magic(FT_HANDLE ft_handle, uint8_t magic) {
+ if (magic == comparator::MEMCMP_MAGIC_NONE) {
+ return EINVAL;
+ }
+ if (ft_handle->ft != nullptr) {
+ // if the handle is already open, then we cannot set the memcmp magic
+ // (because it may or may not have been set by someone else already)
+ return EINVAL;
+ }
+ ft_handle->options.memcmp_magic = magic;
+ return 0;
+}
+
+static int
+verify_builtin_comparisons_consistent(FT_HANDLE t, uint32_t flags) {
+ if ((flags & TOKU_DB_KEYCMP_BUILTIN) && (t->options.compare_fun != toku_builtin_compare_fun)) {
+ return EINVAL;
+ }
+ return 0;
+}
+
+//
+// See comments in toku_db_change_descriptor to understand invariants
+// in the system when this function is called
+//
+void toku_ft_change_descriptor(
+ FT_HANDLE ft_h,
+ const DBT* old_descriptor,
+ const DBT* new_descriptor,
+ bool do_log,
+ TOKUTXN txn,
+ bool update_cmp_descriptor
+ )
+{
+ DESCRIPTOR_S new_d;
+
+ // if running with txns, save to rollback + write to recovery log
+ if (txn) {
+ // put information into rollback file
+ BYTESTRING old_desc_bs = { old_descriptor->size, (char *) old_descriptor->data };
+ BYTESTRING new_desc_bs = { new_descriptor->size, (char *) new_descriptor->data };
+ toku_logger_save_rollback_change_fdescriptor(
+ txn,
+ toku_cachefile_filenum(ft_h->ft->cf),
+ &old_desc_bs
+ );
+ toku_txn_maybe_note_ft(txn, ft_h->ft);
+
+ if (do_log) {
+ TOKULOGGER logger = toku_txn_logger(txn);
+ TXNID_PAIR xid = toku_txn_get_txnid(txn);
+ toku_log_change_fdescriptor(
+ logger, NULL, 0,
+ txn,
+ toku_cachefile_filenum(ft_h->ft->cf),
+ xid,
+ old_desc_bs,
+ new_desc_bs,
+ update_cmp_descriptor
+ );
+ }
+ }
+
+ // write new_descriptor to header
+ new_d.dbt = *new_descriptor;
+ toku_ft_update_descriptor(ft_h->ft, &new_d);
+ // very infrequent operation, worth precise threadsafe count
+ FT_STATUS_INC(FT_DESCRIPTOR_SET, 1);
+
+ if (update_cmp_descriptor) {
+ toku_ft_update_cmp_descriptor(ft_h->ft);
+ }
+}
+
+static void
+toku_ft_handle_inherit_options(FT_HANDLE t, FT ft) {
+ struct ft_options options = {
+ .nodesize = ft->h->nodesize,
+ .basementnodesize = ft->h->basementnodesize,
+ .compression_method = ft->h->compression_method,
+ .fanout = ft->h->fanout,
+ .flags = ft->h->flags,
+ .memcmp_magic = ft->cmp.get_memcmp_magic(),
+ .compare_fun = ft->cmp.get_compare_func(),
+ .update_fun = ft->update_fun
+ };
+ t->options = options;
+ t->did_set_flags = true;
+}
+
+// This is the actual open, used for various purposes, such as normal use, recovery, and redirect.
+// fname_in_env is the iname, relative to the env_dir (data_dir is already in iname as prefix).
+// The checkpointed version (checkpoint_lsn) of the dictionary must be no later than max_acceptable_lsn .
+// Requires: The multi-operation client lock must be held to prevent a checkpoint from occuring.
+static int
+ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, FILENUM use_filenum, DICTIONARY_ID use_dictionary_id, LSN max_acceptable_lsn, bool open_rw = true) {
+ int r;
+ bool txn_created = false;
+ char *fname_in_cwd = NULL;
+ CACHEFILE cf = NULL;
+ FT ft = NULL;
+ bool did_create = false;
+ bool was_already_open = false;
+
+ toku_ft_open_close_lock();
+
+ if (ft_h->did_set_flags) {
+ r = verify_builtin_comparisons_consistent(ft_h, ft_h->options.flags);
+ if (r!=0) { goto exit; }
+ }
+
+ assert(is_create || !only_create);
+ FILENUM reserved_filenum;
+ reserved_filenum = use_filenum;
+ fname_in_cwd = toku_cachetable_get_fname_in_cwd(cachetable, fname_in_env);
+ {
+ int fd = -1;
+ r = ft_open_file(fname_in_cwd, &fd, open_rw);
+ if (reserved_filenum.fileid == FILENUM_NONE.fileid) {
+ reserved_filenum = toku_cachetable_reserve_filenum(cachetable);
+ }
+ if (r==ENOENT && is_create) {
+ did_create = true;
+ if (txn) {
+ BYTESTRING bs = { .len=(uint32_t) strlen(fname_in_env), .data = (char*)fname_in_env };
+ toku_logger_save_rollback_fcreate(txn, reserved_filenum, &bs); // bs is a copy of the fname relative to the environment
+ }
+ txn_created = (bool)(txn!=NULL);
+ toku_logger_log_fcreate(txn, fname_in_env, reserved_filenum, file_mode, ft_h->options.flags, ft_h->options.nodesize, ft_h->options.basementnodesize, ft_h->options.compression_method);
+ r = ft_create_file(ft_h, fname_in_cwd, &fd);
+ if (r) { goto exit; }
+ }
+ if (r) { goto exit; }
+ r=toku_cachetable_openfd_with_filenum(&cf, cachetable, fd, fname_in_env, reserved_filenum, &was_already_open);
+ if (r) { goto exit; }
+ }
+ assert(ft_h->options.nodesize>0);
+ if (is_create) {
+ r = toku_read_ft_and_store_in_cachefile(ft_h, cf, max_acceptable_lsn, &ft);
+ if (r==TOKUDB_DICTIONARY_NO_HEADER) {
+ toku_ft_create(&ft, &ft_h->options, cf, txn);
+ }
+ else if (r!=0) {
+ goto exit;
+ }
+ else if (only_create) {
+ assert_zero(r);
+ r = EEXIST;
+ goto exit;
+ }
+ // if we get here, then is_create was true but only_create was false,
+ // so it is ok for toku_read_ft_and_store_in_cachefile to have read
+ // the header via toku_read_ft_and_store_in_cachefile
+ } else {
+ r = toku_read_ft_and_store_in_cachefile(ft_h, cf, max_acceptable_lsn, &ft);
+ if (r) { goto exit; }
+ }
+ if (!ft_h->did_set_flags) {
+ r = verify_builtin_comparisons_consistent(ft_h, ft_h->options.flags);
+ if (r) { goto exit; }
+ } else if (ft_h->options.flags != ft->h->flags) { /* if flags have been set then flags must match */
+ r = EINVAL;
+ goto exit;
+ }
+
+ // Ensure that the memcmp magic bits are consistent, if set.
+ if (ft->cmp.get_memcmp_magic() != toku::comparator::MEMCMP_MAGIC_NONE &&
+ ft_h->options.memcmp_magic != toku::comparator::MEMCMP_MAGIC_NONE &&
+ ft_h->options.memcmp_magic != ft->cmp.get_memcmp_magic()) {
+ r = EINVAL;
+ goto exit;
+ }
+ toku_ft_handle_inherit_options(ft_h, ft);
+
+ if (!was_already_open) {
+ if (!did_create) { //Only log the fopen that OPENs the file. If it was already open, don't log.
+ toku_logger_log_fopen(txn, fname_in_env, toku_cachefile_filenum(cf), ft_h->options.flags);
+ }
+ }
+ int use_reserved_dict_id;
+ use_reserved_dict_id = use_dictionary_id.dictid != DICTIONARY_ID_NONE.dictid;
+ if (!was_already_open) {
+ DICTIONARY_ID dict_id;
+ if (use_reserved_dict_id) {
+ dict_id = use_dictionary_id;
+ }
+ else {
+ dict_id = next_dict_id();
+ }
+ ft->dict_id = dict_id;
+ }
+ else {
+ // dict_id is already in header
+ if (use_reserved_dict_id) {
+ assert(ft->dict_id.dictid == use_dictionary_id.dictid);
+ }
+ }
+ assert(ft);
+ assert(ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
+ assert(ft->dict_id.dictid < dict_id_serial);
+
+ // important note here,
+ // after this point, where we associate the header
+ // with the ft_handle, the function is not allowed to fail
+ // Code that handles failure (located below "exit"),
+ // depends on this
+ toku_ft_note_ft_handle_open(ft, ft_h);
+ if (txn_created) {
+ assert(txn);
+ toku_txn_maybe_note_ft(txn, ft);
+ }
+
+ // Opening an ft may restore to previous checkpoint.
+ // Truncate if necessary.
+ {
+ int fd = toku_cachefile_get_fd (ft->cf);
+ ft->blocktable.maybe_truncate_file_on_open(fd);
+ }
+
+ r = 0;
+exit:
+ if (fname_in_cwd) {
+ toku_free(fname_in_cwd);
+ }
+ if (r != 0 && cf) {
+ if (ft) {
+ // we only call toku_ft_note_ft_handle_open
+ // when the function succeeds, so if we are here,
+ // then that means we have a reference to the header
+ // but we have not linked it to this ft. So,
+ // we can simply try to remove the header.
+ // We don't need to unlink this ft from the header
+ toku_ft_grab_reflock(ft);
+ bool needed = toku_ft_needed_unlocked(ft);
+ toku_ft_release_reflock(ft);
+ if (!needed) {
+ // close immediately.
+ toku_ft_evict_from_memory(ft, false, ZERO_LSN);
+ }
+ }
+ else {
+ toku_cachefile_close(&cf, false, ZERO_LSN);
+ }
+ }
+ toku_ft_open_close_unlock();
+ return r;
+}
+
+// Open an ft for the purpose of recovery, which requires that the ft be open to a pre-determined FILENUM
+// and may require a specific checkpointed version of the file.
+// (dict_id is assigned by the ft_handle_open() function.)
+int
+toku_ft_handle_open_recovery(FT_HANDLE t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, FILENUM use_filenum, LSN max_acceptable_lsn) {
+ int r;
+ assert(use_filenum.fileid != FILENUM_NONE.fileid);
+ r = ft_handle_open(t, fname_in_env, is_create, only_create, cachetable,
+ txn, use_filenum, DICTIONARY_ID_NONE, max_acceptable_lsn);
+ return r;
+}
+
+// Open an ft in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function.
+// Requires: The multi-operation client lock must be held to prevent a checkpoint from occuring.
+int
+toku_ft_handle_open(FT_HANDLE t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, bool open_rw) {
+ int r;
+ r = ft_handle_open(t, fname_in_env, is_create, only_create, cachetable, txn, FILENUM_NONE, DICTIONARY_ID_NONE, MAX_LSN, open_rw);
+ return r;
+}
+
+// clone an ft handle. the cloned handle has a new dict_id but refers to the same fractal tree
+int
+toku_ft_handle_clone(FT_HANDLE *cloned_ft_handle, FT_HANDLE ft_handle, TOKUTXN txn, bool open_rw) {
+ FT_HANDLE result_ft_handle;
+ toku_ft_handle_create(&result_ft_handle);
+
+ // we're cloning, so the handle better have an open ft and open cf
+ invariant(ft_handle->ft);
+ invariant(ft_handle->ft->cf);
+
+ // inherit the options of the ft whose handle is being cloned.
+ toku_ft_handle_inherit_options(result_ft_handle, ft_handle->ft);
+
+ // we can clone the handle by creating a new handle with the same fname
+ CACHEFILE cf = ft_handle->ft->cf;
+ CACHETABLE ct = toku_cachefile_get_cachetable(cf);
+ const char *fname_in_env = toku_cachefile_fname_in_env(cf);
+ int r = toku_ft_handle_open(result_ft_handle, fname_in_env, false, false, ct, txn, open_rw);
+ if (r != 0) {
+ toku_ft_handle_close(result_ft_handle);
+ result_ft_handle = NULL;
+ }
+ *cloned_ft_handle = result_ft_handle;
+ return r;
+}
+
+// Open an ft in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function.
+int
+toku_ft_handle_open_with_dict_id(
+ FT_HANDLE t,
+ const char *fname_in_env,
+ int is_create,
+ int only_create,
+ CACHETABLE cachetable,
+ TOKUTXN txn,
+ DICTIONARY_ID use_dictionary_id
+ )
+{
+ int r;
+ r = ft_handle_open(
+ t,
+ fname_in_env,
+ is_create,
+ only_create,
+ cachetable,
+ txn,
+ FILENUM_NONE,
+ use_dictionary_id,
+ MAX_LSN
+ );
+ return r;
+}
+
+DICTIONARY_ID
+toku_ft_get_dictionary_id(FT_HANDLE ft_handle) {
+ FT ft = ft_handle->ft;
+ return ft->dict_id;
+}
+
+void toku_ft_set_flags(FT_HANDLE ft_handle, unsigned int flags) {
+ ft_handle->did_set_flags = true;
+ ft_handle->options.flags = flags;
+}
+
+void toku_ft_get_flags(FT_HANDLE ft_handle, unsigned int *flags) {
+ *flags = ft_handle->options.flags;
+}
+
+void toku_ft_get_maximum_advised_key_value_lengths (unsigned int *max_key_len, unsigned int *max_val_len)
+// return the maximum advisable key value lengths. The ft doesn't enforce these.
+{
+ *max_key_len = 32*1024;
+ *max_val_len = 32*1024*1024;
+}
+
+
+void toku_ft_handle_set_nodesize(FT_HANDLE ft_handle, unsigned int nodesize) {
+ if (ft_handle->ft) {
+ toku_ft_set_nodesize(ft_handle->ft, nodesize);
+ }
+ else {
+ ft_handle->options.nodesize = nodesize;
+ }
+}
+
+void toku_ft_handle_get_nodesize(FT_HANDLE ft_handle, unsigned int *nodesize) {
+ if (ft_handle->ft) {
+ toku_ft_get_nodesize(ft_handle->ft, nodesize);
+ }
+ else {
+ *nodesize = ft_handle->options.nodesize;
+ }
+}
+
+void toku_ft_handle_set_basementnodesize(FT_HANDLE ft_handle, unsigned int basementnodesize) {
+ if (ft_handle->ft) {
+ toku_ft_set_basementnodesize(ft_handle->ft, basementnodesize);
+ }
+ else {
+ ft_handle->options.basementnodesize = basementnodesize;
+ }
+}
+
+void toku_ft_handle_get_basementnodesize(FT_HANDLE ft_handle, unsigned int *basementnodesize) {
+ if (ft_handle->ft) {
+ toku_ft_get_basementnodesize(ft_handle->ft, basementnodesize);
+ }
+ else {
+ *basementnodesize = ft_handle->options.basementnodesize;
+ }
+}
+
+void toku_ft_set_bt_compare(FT_HANDLE ft_handle, int (*bt_compare)(DB*, const DBT*, const DBT*)) {
+ ft_handle->options.compare_fun = bt_compare;
+}
+
+void toku_ft_set_redirect_callback(FT_HANDLE ft_handle, on_redirect_callback redir_cb, void* extra) {
+ ft_handle->redirect_callback = redir_cb;
+ ft_handle->redirect_callback_extra = extra;
+}
+
+void toku_ft_set_update(FT_HANDLE ft_handle, ft_update_func update_fun) {
+ ft_handle->options.update_fun = update_fun;
+}
+
+const toku::comparator &toku_ft_get_comparator(FT_HANDLE ft_handle) {
+ invariant_notnull(ft_handle->ft);
+ return ft_handle->ft->cmp;
+}
+
+static void
+ft_remove_handle_ref_callback(FT UU(ft), void *extra) {
+ FT_HANDLE CAST_FROM_VOIDP(handle, extra);
+ toku_list_remove(&handle->live_ft_handle_link);
+}
+
+static void ft_handle_close(FT_HANDLE ft_handle, bool oplsn_valid, LSN oplsn) {
+ FT ft = ft_handle->ft;
+ // There are error paths in the ft_handle_open that end with ft_handle->ft == nullptr.
+ if (ft != nullptr) {
+ toku_ft_remove_reference(ft, oplsn_valid, oplsn, ft_remove_handle_ref_callback, ft_handle);
+ }
+ toku_free(ft_handle);
+}
+
+// close an ft handle during normal operation. the underlying ft may or may not close,
+// depending if there are still references. an lsn for this close will come from the logger.
+void toku_ft_handle_close(FT_HANDLE ft_handle) {
+ ft_handle_close(ft_handle, false, ZERO_LSN);
+}
+
+// close an ft handle during recovery. the underlying ft must close, and will use the given lsn.
+void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn) {
+ // the ft must exist if closing during recovery. error paths during
+ // open for recovery should close handles using toku_ft_handle_close()
+ invariant_notnull(ft_handle->ft);
+ ft_handle_close(ft_handle, true, oplsn);
+}
+
+// TODO: remove this, callers should instead just use toku_ft_handle_close()
+int toku_close_ft_handle_nolsn(FT_HANDLE ft_handle, char **UU(error_string)) {
+ toku_ft_handle_close(ft_handle);
+ return 0;
+}
+
+void toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) {
+ FT_HANDLE XMALLOC(ft_handle);
+ memset(ft_handle, 0, sizeof *ft_handle);
+ toku_list_init(&ft_handle->live_ft_handle_link);
+ ft_handle->options.flags = 0;
+ ft_handle->did_set_flags = false;
+ ft_handle->options.nodesize = FT_DEFAULT_NODE_SIZE;
+ ft_handle->options.basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE;
+ ft_handle->options.compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
+ ft_handle->options.fanout = FT_DEFAULT_FANOUT;
+ ft_handle->options.compare_fun = toku_builtin_compare_fun;
+ ft_handle->options.update_fun = NULL;
+ *ft_handle_ptr = ft_handle;
+}
+
+/******************************* search ***************************************/
+
+// Return true if this key is within the search bound. If there is no search bound then the tree search continues.
+static bool search_continue(ft_search *search, void *key, uint32_t key_len) {
+ bool result = true;
+ if (search->direction == FT_SEARCH_LEFT && search->k_bound) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search->context);
+ DBT this_key = { .data = key, .size = key_len };
+ // search continues if this key <= key bound
+ result = (ft_handle->ft->cmp(&this_key, search->k_bound) <= 0);
+ }
+ return result;
+}
+
+static int heaviside_from_search_t(const DBT &kdbt, ft_search &search) {
+ int cmp = search.compare(search,
+ search.k ? &kdbt : 0);
+ // The search->compare function returns only 0 or 1
+ switch (search.direction) {
+ case FT_SEARCH_LEFT: return cmp==0 ? -1 : +1;
+ case FT_SEARCH_RIGHT: return cmp==0 ? +1 : -1; // Because the comparison runs backwards for right searches.
+ }
+ abort(); return 0;
+}
+
+// This is a bottom layer of the search functions.
+static int
+ft_search_basement_node(
+ BASEMENTNODE bn,
+ ft_search *search,
+ FT_GET_CALLBACK_FUNCTION getf,
+ void *getf_v,
+ bool *doprefetch,
+ FT_CURSOR ftcursor,
+ bool can_bulk_fetch
+ )
+{
+ // Now we have to convert from ft_search to the heaviside function with a direction. What a pain...
+
+ int direction;
+ switch (search->direction) {
+ case FT_SEARCH_LEFT: direction = +1; goto ok;
+ case FT_SEARCH_RIGHT: direction = -1; goto ok;
+ }
+ return EINVAL; // This return and the goto are a hack to get both compile-time and run-time checking on enum
+ok: ;
+ uint32_t idx = 0;
+ LEAFENTRY le;
+ uint32_t keylen;
+ void *key;
+ int r = bn->data_buffer.find<decltype(*search), heaviside_from_search_t>(
+ *search,
+ direction,
+ &le,
+ &key,
+ &keylen,
+ &idx
+ );
+ if (r!=0) return r;
+
+ if (toku_ft_cursor_is_leaf_mode(ftcursor))
+ goto got_a_good_value; // leaf mode cursors see all leaf entries
+ if (le_val_is_del(le, ftcursor->read_type, ftcursor->ttxn)) {
+ // Provisionally deleted stuff is gone.
+ // So we need to scan in the direction to see if we can find something.
+ // Every 64 deleted leaf entries check if the leaf's key is within the search bounds.
+ for (uint64_t n_deleted = 1; ; n_deleted++) {
+ switch (search->direction) {
+ case FT_SEARCH_LEFT:
+ idx++;
+ if (idx >= bn->data_buffer.num_klpairs() || ((n_deleted % 64) == 0 && !search_continue(search, key, keylen))) {
+ FT_STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
+ if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra, n_deleted)) {
+ return TOKUDB_INTERRUPTED;
+ }
+ return DB_NOTFOUND;
+ }
+ break;
+ case FT_SEARCH_RIGHT:
+ if (idx == 0) {
+ FT_STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
+ if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra, n_deleted)) {
+ return TOKUDB_INTERRUPTED;
+ }
+ return DB_NOTFOUND;
+ }
+ idx--;
+ break;
+ default:
+ abort();
+ }
+ r = bn->data_buffer.fetch_klpair(idx, &le, &keylen, &key);
+ assert_zero(r); // we just validated the index
+ if (!le_val_is_del(le, ftcursor->read_type, ftcursor->ttxn)) {
+ FT_STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
+ if (ftcursor->interrupt_cb)
+ ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra, n_deleted);
+ goto got_a_good_value;
+ }
+ }
+ }
+got_a_good_value:
+ {
+ uint32_t vallen;
+ void *val;
+
+ le_extract_val(le, toku_ft_cursor_is_leaf_mode(ftcursor),
+ ftcursor->read_type, ftcursor->ttxn,
+ &vallen, &val);
+ r = toku_ft_cursor_check_restricted_range(ftcursor, key, keylen);
+ if (r == 0) {
+ r = getf(keylen, key, vallen, val, getf_v, false);
+ }
+ if (r == 0 || r == TOKUDB_CURSOR_CONTINUE) {
+ //
+ // IMPORTANT: bulk fetch CANNOT go past the current basement node,
+ // because there is no guarantee that messages have been applied
+ // to other basement nodes, as part of #5770
+ //
+ if (r == TOKUDB_CURSOR_CONTINUE && can_bulk_fetch) {
+ r = toku_ft_cursor_shortcut(ftcursor, direction, idx, &bn->data_buffer,
+ getf, getf_v, &keylen, &key, &vallen, &val);
+ }
+
+ toku_destroy_dbt(&ftcursor->key);
+ toku_destroy_dbt(&ftcursor->val);
+ if (!ftcursor->is_temporary) {
+ toku_memdup_dbt(&ftcursor->key, key, keylen);
+ toku_memdup_dbt(&ftcursor->val, val, vallen);
+ }
+ // The search was successful. Prefetching can continue.
+ *doprefetch = true;
+ }
+ }
+ if (r == TOKUDB_CURSOR_CONTINUE) r = 0;
+ return r;
+}
+
+static int
+ft_search_node (
+ FT_HANDLE ft_handle,
+ FTNODE node,
+ ft_search *search,
+ int child_to_search,
+ FT_GET_CALLBACK_FUNCTION getf,
+ void *getf_v,
+ bool *doprefetch,
+ FT_CURSOR ftcursor,
+ UNLOCKERS unlockers,
+ ANCESTORS,
+ const pivot_bounds &bounds,
+ bool can_bulk_fetch
+ );
+
+static int
+ftnode_fetch_callback_and_free_bfe(CACHEFILE cf, PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int *dirtyp, void *extraargs)
+{
+ int r = toku_ftnode_fetch_callback(cf, p, fd, blocknum, fullhash, ftnode_pv, disk_data, sizep, dirtyp, extraargs);
+ ftnode_fetch_extra *CAST_FROM_VOIDP(bfe, extraargs);
+ bfe->destroy();
+ toku_free(bfe);
+ return r;
+}
+
+static int
+ftnode_pf_callback_and_free_bfe(void *ftnode_pv, void* disk_data, void *read_extraargs, int fd, PAIR_ATTR *sizep)
+{
+ int r = toku_ftnode_pf_callback(ftnode_pv, disk_data, read_extraargs, fd, sizep);
+ ftnode_fetch_extra *CAST_FROM_VOIDP(bfe, read_extraargs);
+ bfe->destroy();
+ toku_free(bfe);
+ return r;
+}
+
+CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT ft) {
+ CACHETABLE_WRITE_CALLBACK wc;
+ wc.flush_callback = toku_ftnode_flush_callback;
+ wc.pe_est_callback = toku_ftnode_pe_est_callback;
+ wc.pe_callback = toku_ftnode_pe_callback;
+ wc.cleaner_callback = toku_ftnode_cleaner_callback;
+ wc.clone_callback = toku_ftnode_clone_callback;
+ wc.checkpoint_complete_callback = toku_ftnode_checkpoint_complete_callback;
+ wc.write_extraargs = ft;
+ return wc;
+}
+
+static void
+ft_node_maybe_prefetch(FT_HANDLE ft_handle, FTNODE node, int childnum, FT_CURSOR ftcursor, bool *doprefetch) {
+ // the number of nodes to prefetch
+ const int num_nodes_to_prefetch = 1;
+
+ // if we want to prefetch in the tree
+ // then prefetch the next children if there are any
+ if (*doprefetch && toku_ft_cursor_prefetching(ftcursor) && !ftcursor->disable_prefetching) {
+ int rc = ft_cursor_rightmost_child_wanted(ftcursor, ft_handle, node);
+ for (int i = childnum + 1; (i <= childnum + num_nodes_to_prefetch) && (i <= rc); i++) {
+ BLOCKNUM nextchildblocknum = BP_BLOCKNUM(node, i);
+ uint32_t nextfullhash = compute_child_fullhash(ft_handle->ft->cf, node, i);
+ ftnode_fetch_extra *XCALLOC(bfe);
+ bfe->create_for_prefetch(ft_handle->ft, ftcursor);
+ bool doing_prefetch = false;
+ toku_cachefile_prefetch(
+ ft_handle->ft->cf,
+ nextchildblocknum,
+ nextfullhash,
+ get_write_callbacks_for_node(ft_handle->ft),
+ ftnode_fetch_callback_and_free_bfe,
+ toku_ftnode_pf_req_callback,
+ ftnode_pf_callback_and_free_bfe,
+ bfe,
+ &doing_prefetch
+ );
+ if (!doing_prefetch) {
+ bfe->destroy();
+ toku_free(bfe);
+ }
+ *doprefetch = false;
+ }
+ }
+}
+
+struct unlock_ftnode_extra {
+ FT_HANDLE ft_handle;
+ FTNODE node;
+ bool msgs_applied;
+};
+
+// When this is called, the cachetable lock is held
+static void
+unlock_ftnode_fun (void *v) {
+ struct unlock_ftnode_extra *x = NULL;
+ CAST_FROM_VOIDP(x, v);
+ FT_HANDLE ft_handle = x->ft_handle;
+ FTNODE node = x->node;
+ // CT lock is held
+ int r = toku_cachetable_unpin_ct_prelocked_no_flush(
+ ft_handle->ft->cf,
+ node->ct_pair,
+ (enum cachetable_dirty) node->dirty(),
+ x->msgs_applied ? make_ftnode_pair_attr(node) : make_invalid_pair_attr()
+ );
+ assert_zero(r);
+}
+
+/* search in a node's child */
+static int
+ft_search_child(FT_HANDLE ft_handle, FTNODE node, int childnum, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers,
+ ANCESTORS ancestors, const pivot_bounds &bounds, bool can_bulk_fetch)
+// Effect: Search in a node's child. Searches are read-only now (at least as far as the hardcopy is concerned).
+{
+ struct ancestors next_ancestors = {node, childnum, ancestors};
+
+ BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum);
+ uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, childnum);
+ FTNODE childnode = nullptr;
+
+ // If the current node's height is greater than 1, then its child is an internal node.
+ // Therefore, to warm the cache better (#5798), we want to read all the partitions off disk in one shot.
+ bool read_all_partitions = node->height > 1;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_subset_read(
+ ft_handle->ft,
+ search,
+ &ftcursor->range_lock_left_key,
+ &ftcursor->range_lock_right_key,
+ ftcursor->left_is_neg_infty,
+ ftcursor->right_is_pos_infty,
+ ftcursor->disable_prefetching,
+ read_all_partitions
+ );
+ bool msgs_applied = false;
+ {
+ int rr = toku_pin_ftnode_for_query(ft_handle, childblocknum, fullhash,
+ unlockers,
+ &next_ancestors, bounds,
+ &bfe,
+ true,
+ &childnode,
+ &msgs_applied);
+ if (rr==TOKUDB_TRY_AGAIN) {
+ return rr;
+ }
+ invariant_zero(rr);
+ }
+
+ struct unlock_ftnode_extra unlock_extra = { ft_handle, childnode, msgs_applied };
+ struct unlockers next_unlockers = { true, unlock_ftnode_fun, (void *) &unlock_extra, unlockers };
+ int r = ft_search_node(ft_handle, childnode, search, bfe.child_to_read, getf, getf_v, doprefetch, ftcursor, &next_unlockers, &next_ancestors, bounds, can_bulk_fetch);
+ if (r!=TOKUDB_TRY_AGAIN) {
+ // maybe prefetch the next child
+ if (r == 0 && node->height == 1) {
+ ft_node_maybe_prefetch(ft_handle, node, childnum, ftcursor, doprefetch);
+ }
+
+ assert(next_unlockers.locked);
+ if (msgs_applied) {
+ toku_unpin_ftnode(ft_handle->ft, childnode);
+ }
+ else {
+ toku_unpin_ftnode_read_only(ft_handle->ft, childnode);
+ }
+ } else {
+ // try again.
+
+ // there are two cases where we get TOKUDB_TRY_AGAIN
+ // case 1 is when some later call to toku_pin_ftnode returned
+ // that value and unpinned all the nodes anyway. case 2
+ // is when ft_search_node had to stop its search because
+ // some piece of a node that it needed was not in memory. In this case,
+ // the node was not unpinned, so we unpin it here
+ if (next_unlockers.locked) {
+ if (msgs_applied) {
+ toku_unpin_ftnode(ft_handle->ft, childnode);
+ }
+ else {
+ toku_unpin_ftnode_read_only(ft_handle->ft, childnode);
+ }
+ }
+ }
+
+ return r;
+}
+
+static inline int
+search_which_child_cmp_with_bound(const toku::comparator &cmp, FTNODE node, int childnum,
+ ft_search *search, DBT *dbt) {
+ return cmp(toku_copyref_dbt(dbt, node->pivotkeys.get_pivot(childnum)), &search->pivot_bound);
+}
+
+int
+toku_ft_search_which_child(const toku::comparator &cmp, FTNODE node, ft_search *search) {
+ if (node->n_children <= 1) return 0;
+
+ DBT pivotkey;
+ toku_init_dbt(&pivotkey);
+ int lo = 0;
+ int hi = node->n_children - 1;
+ int mi;
+ while (lo < hi) {
+ mi = (lo + hi) / 2;
+ node->pivotkeys.fill_pivot(mi, &pivotkey);
+ // search->compare is really strange, and only works well with a
+ // linear search, it makes binary search a pita.
+ //
+ // if you are searching left to right, it returns
+ // "0" for pivots that are < the target, and
+ // "1" for pivots that are >= the target
+ // if you are searching right to left, it's the opposite.
+ //
+ // so if we're searching from the left and search->compare says
+ // "1", we want to go left from here, if it says "0" we want to go
+ // right. searching from the right does the opposite.
+ bool c = search->compare(*search, &pivotkey);
+ if (((search->direction == FT_SEARCH_LEFT) && c) ||
+ ((search->direction == FT_SEARCH_RIGHT) && !c)) {
+ hi = mi;
+ } else {
+ assert(((search->direction == FT_SEARCH_LEFT) && !c) ||
+ ((search->direction == FT_SEARCH_RIGHT) && c));
+ lo = mi + 1;
+ }
+ }
+ // ready to return something, if the pivot is bounded, we have to move
+ // over a bit to get away from what we've already searched
+ if (search->pivot_bound.data != nullptr) {
+ if (search->direction == FT_SEARCH_LEFT) {
+ while (lo < node->n_children - 1 &&
+ search_which_child_cmp_with_bound(cmp, node, lo, search, &pivotkey) <= 0) {
+ // searching left to right, if the comparison says the
+ // current pivot (lo) is left of or equal to our bound,
+ // don't search that child again
+ lo++;
+ }
+ } else {
+ while (lo > 0 &&
+ search_which_child_cmp_with_bound(cmp, node, lo - 1, search, &pivotkey) >= 0) {
+ // searching right to left, same argument as just above
+ // (but we had to pass lo - 1 because the pivot between lo
+ // and the thing just less than it is at that position in
+ // the pivot keys array)
+ lo--;
+ }
+ }
+ }
+ return lo;
+}
+
+static void
+maybe_search_save_bound(
+ FTNODE node,
+ int child_searched,
+ ft_search *search)
+{
+ int p = (search->direction == FT_SEARCH_LEFT) ? child_searched : child_searched - 1;
+ if (p >= 0 && p < node->n_children-1) {
+ toku_destroy_dbt(&search->pivot_bound);
+ toku_clone_dbt(&search->pivot_bound, node->pivotkeys.get_pivot(p));
+ }
+}
+
+// Returns true if there are still children left to search in this node within the search bound (if any).
+static bool search_try_again(FTNODE node, int child_to_search, ft_search *search) {
+ bool try_again = false;
+ if (search->direction == FT_SEARCH_LEFT) {
+ if (child_to_search < node->n_children-1) {
+ try_again = true;
+ // if there is a search bound and the bound is within the search pivot then continue the search
+ if (search->k_bound) {
+ FT_HANDLE CAST_FROM_VOIDP(ft_handle, search->context);
+ try_again = (ft_handle->ft->cmp(search->k_bound, &search->pivot_bound) > 0);
+ }
+ }
+ } else if (search->direction == FT_SEARCH_RIGHT) {
+ if (child_to_search > 0)
+ try_again = true;
+ }
+ return try_again;
+}
+
+static int
+ft_search_node(
+ FT_HANDLE ft_handle,
+ FTNODE node,
+ ft_search *search,
+ int child_to_search,
+ FT_GET_CALLBACK_FUNCTION getf,
+ void *getf_v,
+ bool *doprefetch,
+ FT_CURSOR ftcursor,
+ UNLOCKERS unlockers,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ bool can_bulk_fetch
+ )
+{
+ int r = 0;
+ // assert that we got a valid child_to_search
+ invariant(child_to_search >= 0);
+ invariant(child_to_search < node->n_children);
+ //
+ // At this point, we must have the necessary partition available to continue the search
+ //
+ assert(BP_STATE(node,child_to_search) == PT_AVAIL);
+ const pivot_bounds next_bounds = bounds.next_bounds(node, child_to_search);
+ if (node->height > 0) {
+ r = ft_search_child(
+ ft_handle,
+ node,
+ child_to_search,
+ search,
+ getf,
+ getf_v,
+ doprefetch,
+ ftcursor,
+ unlockers,
+ ancestors,
+ next_bounds,
+ can_bulk_fetch
+ );
+ }
+ else {
+ r = ft_search_basement_node(
+ BLB(node, child_to_search),
+ search,
+ getf,
+ getf_v,
+ doprefetch,
+ ftcursor,
+ can_bulk_fetch
+ );
+ }
+ if (r == 0) {
+ return r; //Success
+ }
+
+ if (r != DB_NOTFOUND) {
+ return r; //Error (or message to quit early, such as TOKUDB_FOUND_BUT_REJECTED or TOKUDB_TRY_AGAIN)
+ }
+ // not really necessary, just put this here so that reading the
+ // code becomes simpler. The point is at this point in the code,
+ // we know that we got DB_NOTFOUND and we have to continue
+ assert(r == DB_NOTFOUND);
+ // we have a new pivotkey
+ if (node->height == 0) {
+ // when we run off the end of a basement, try to lock the range up to the pivot. solves #3529
+ const DBT *pivot = search->direction == FT_SEARCH_LEFT ? next_bounds.ubi() : // left -> right
+ next_bounds.lbe(); // right -> left
+ if (pivot != nullptr) {
+ int rr = getf(pivot->size, pivot->data, 0, nullptr, getf_v, true);
+ if (rr != 0) {
+ return rr; // lock was not granted
+ }
+ }
+ }
+
+ // If we got a DB_NOTFOUND then we have to search the next record. Possibly everything present is not visible.
+ // This way of doing DB_NOTFOUND is a kludge, and ought to be simplified. Something like this is needed for DB_NEXT, but
+ // for point queries, it's overkill. If we got a DB_NOTFOUND on a point query then we should just stop looking.
+ // When releasing locks on I/O we must not search the same subtree again, or we won't be guaranteed to make forward progress.
+ // If we got a DB_NOTFOUND, then the pivot is too small if searching from left to right (too large if searching from right to left).
+ // So save the pivot key in the search object.
+ maybe_search_save_bound(node, child_to_search, search);
+
+ // as part of #5770, if we can continue searching,
+ // we MUST return TOKUDB_TRY_AGAIN,
+ // because there is no guarantee that messages have been applied
+ // on any other path.
+ if (search_try_again(node, child_to_search, search)) {
+ r = TOKUDB_TRY_AGAIN;
+ }
+
+ return r;
+}
+
+int toku_ft_search(FT_HANDLE ft_handle, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, bool can_bulk_fetch)
+// Effect: Perform a search. Associate cursor with a leaf if possible.
+// All searches are performed through this function.
+{
+ int r;
+ uint trycount = 0; // How many tries did it take to get the result?
+ FT ft = ft_handle->ft;
+
+ toku::context search_ctx(CTX_SEARCH);
+
+try_again:
+
+ trycount++;
+
+ //
+ // Here is how searches work
+ // At a high level, we descend down the tree, using the search parameter
+ // to guide us towards where to look. But the search parameter is not
+ // used here to determine which child of a node to read (regardless
+ // of whether that child is another node or a basement node)
+ // The search parameter is used while we are pinning the node into
+ // memory, because that is when the system needs to ensure that
+ // the appropriate partition of the child we are using is in memory.
+ // So, here are the steps for a search (and this applies to this function
+ // as well as ft_search_child:
+ // - Take the search parameter, and create a ftnode_fetch_extra, that will be used by toku_pin_ftnode
+ // - Call toku_pin_ftnode with the bfe as the extra for the fetch callback (in case the node is not at all in memory)
+ // and the partial fetch callback (in case the node is perhaps partially in memory) to the fetch the node
+ // - This eventually calls either toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback depending on whether the node is in
+ // memory at all or not.
+ // - Within these functions, the "ft_search search" parameter is used to evaluate which child the search is interested in.
+ // If the node is not in memory at all, toku_ftnode_fetch_callback will read the node and decompress only the partition for the
+ // relevant child, be it a message buffer or basement node. If the node is in memory, then toku_ftnode_pf_req_callback
+ // will tell the cachetable that a partial fetch is required if and only if the relevant child is not in memory. If the relevant child
+ // is not in memory, then toku_ftnode_pf_callback is called to fetch the partition.
+ // - These functions set bfe->child_to_read so that the search code does not need to reevaluate it.
+ // - Just to reiterate, all of the last item happens within toku_ftnode_pin(_holding_lock)
+ // - At this point, toku_ftnode_pin_holding_lock has returned, with bfe.child_to_read set,
+ // - ft_search_node is called, assuming that the node and its relevant partition are in memory.
+ //
+ ftnode_fetch_extra bfe;
+ bfe.create_for_subset_read(
+ ft,
+ search,
+ &ftcursor->range_lock_left_key,
+ &ftcursor->range_lock_right_key,
+ ftcursor->left_is_neg_infty,
+ ftcursor->right_is_pos_infty,
+ ftcursor->disable_prefetching,
+ true // We may as well always read the whole root into memory, if it's a leaf node it's a tiny tree anyway.
+ );
+ FTNODE node = NULL;
+ {
+ uint32_t fullhash;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft, &root_key, &fullhash);
+ toku_pin_ftnode(
+ ft,
+ root_key,
+ fullhash,
+ &bfe,
+ PL_READ, // may_modify_node set to false, because root cannot change during search
+ &node,
+ true
+ );
+ }
+
+ uint tree_height = node->height + 1; // How high is the tree? This is the height of the root node plus one (leaf is at height 0).
+
+
+ struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false};
+ struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
+
+ {
+ bool doprefetch = false;
+ //static int counter = 0; counter++;
+ r = ft_search_node(ft_handle, node, search, bfe.child_to_read, getf, getf_v, &doprefetch, ftcursor, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds(), can_bulk_fetch);
+ if (r==TOKUDB_TRY_AGAIN) {
+ // there are two cases where we get TOKUDB_TRY_AGAIN
+ // case 1 is when some later call to toku_pin_ftnode returned
+ // that value and unpinned all the nodes anyway. case 2
+ // is when ft_search_node had to stop its search because
+ // some piece of a node that it needed was not in memory.
+ // In this case, the node was not unpinned, so we unpin it here
+ if (unlockers.locked) {
+ toku_unpin_ftnode_read_only(ft_handle->ft, node);
+ }
+ goto try_again;
+ } else {
+ assert(unlockers.locked);
+ }
+ }
+
+ assert(unlockers.locked);
+ toku_unpin_ftnode_read_only(ft_handle->ft, node);
+
+
+ //Heaviside function (+direction) queries define only a lower or upper
+ //bound. Some queries require both an upper and lower bound.
+ //They do this by wrapping the FT_GET_CALLBACK_FUNCTION with another
+ //test that checks for the other bound. If the other bound fails,
+ //it returns TOKUDB_FOUND_BUT_REJECTED which means not found, but
+ //stop searching immediately, as opposed to DB_NOTFOUND
+ //which can mean not found, but keep looking in another leaf.
+ if (r==TOKUDB_FOUND_BUT_REJECTED) r = DB_NOTFOUND;
+ else if (r==DB_NOTFOUND) {
+ //We truly did not find an answer to the query.
+ //Therefore, the FT_GET_CALLBACK_FUNCTION has NOT been called.
+ //The contract specifies that the callback function must be called
+ //for 'r= (0|DB_NOTFOUND|TOKUDB_FOUND_BUT_REJECTED)'
+ //TODO: #1378 This is not the ultimate location of this call to the
+ //callback. It is surely wrong for node-level locking, and probably
+ //wrong for the STRADDLE callback for heaviside function(two sets of key/vals)
+ int r2 = getf(0,NULL, 0,NULL, getf_v, false);
+ if (r2!=0) r = r2;
+ }
+ { // accounting (to detect and measure thrashing)
+ uint retrycount = trycount - 1; // how many retries were needed?
+ if (retrycount) {
+ FT_STATUS_INC(FT_TOTAL_RETRIES, retrycount);
+ }
+ if (retrycount > tree_height) { // if at least one node was read from disk more than once
+ FT_STATUS_INC(FT_SEARCH_TRIES_GT_HEIGHT, 1);
+ if (retrycount > (tree_height+3))
+ FT_STATUS_INC(FT_SEARCH_TRIES_GT_HEIGHTPLUS3, 1);
+ }
+ }
+ return r;
+}
+
+/* ********************************* delete **************************************/
+static int
+getf_nothing (uint32_t UU(keylen), const void *UU(key), uint32_t UU(vallen), const void *UU(val), void *UU(pair_v), bool UU(lock_only)) {
+ return 0;
+}
+
+int toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn) {
+ int r;
+
+ int unchecked_flags = flags;
+ bool error_if_missing = (bool) !(flags&DB_DELETE_ANY);
+ unchecked_flags &= ~DB_DELETE_ANY;
+ if (unchecked_flags!=0) r = EINVAL;
+ else if (toku_ft_cursor_not_set(cursor)) r = EINVAL;
+ else {
+ r = 0;
+ if (error_if_missing) {
+ r = toku_ft_cursor_current(cursor, DB_CURRENT, getf_nothing, NULL);
+ }
+ if (r == 0) {
+ toku_ft_delete(cursor->ft_handle, &cursor->key, txn);
+ }
+ }
+ return r;
+}
+
+/* ********************* keyrange ************************ */
+
+struct keyrange_compare_s {
+ FT ft;
+ const DBT *key;
+};
+
+// TODO: Remove me, I'm boring
+static int keyrange_compare(DBT const &kdbt,
+ const struct keyrange_compare_s &s) {
+ return s.ft->cmp(&kdbt, s.key);
+}
+
+static void keysrange_in_leaf_partition(FT_HANDLE ft_handle,
+ FTNODE node,
+ DBT *key_left,
+ DBT *key_right,
+ int left_child_number,
+ int right_child_number,
+ uint64_t estimated_num_rows,
+ uint64_t *less,
+ uint64_t *equal_left,
+ uint64_t *middle,
+ uint64_t *equal_right,
+ uint64_t *greater,
+ bool *single_basement_node)
+// If the partition is in main memory then estimate the number
+// Treat key_left == NULL as negative infinity
+// Treat key_right == NULL as positive infinity
+{
+ paranoid_invariant(node->height == 0); // we are in a leaf
+ paranoid_invariant(!(key_left == NULL && key_right != NULL));
+ paranoid_invariant(left_child_number <= right_child_number);
+ bool single_basement = left_child_number == right_child_number;
+ paranoid_invariant(!single_basement ||
+ (BP_STATE(node, left_child_number) == PT_AVAIL));
+ if (BP_STATE(node, left_child_number) == PT_AVAIL) {
+ int r;
+ // The partition is in main memory then get an exact count.
+ struct keyrange_compare_s s_left = {ft_handle->ft, key_left};
+ BASEMENTNODE bn = BLB(node, left_child_number);
+ uint32_t idx_left = 0;
+ // if key_left is NULL then set r==-1 and idx==0.
+ r = key_left
+ ? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>(
+ s_left, nullptr, nullptr, nullptr, &idx_left)
+ : -1;
+ *less = idx_left;
+ *equal_left = (r == 0) ? 1 : 0;
+
+ uint32_t size = bn->data_buffer.num_klpairs();
+ uint32_t idx_right = size;
+ r = -1;
+ if (single_basement && key_right) {
+ struct keyrange_compare_s s_right = {ft_handle->ft, key_right};
+ r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(
+ s_right, nullptr, nullptr, nullptr, &idx_right);
+ }
+ *middle = idx_right - idx_left - *equal_left;
+ *equal_right = (r == 0) ? 1 : 0;
+ *greater = size - idx_right - *equal_right;
+ } else {
+ paranoid_invariant(!single_basement);
+ uint32_t idx_left = estimated_num_rows / 2;
+ if (!key_left) {
+ // Both nullptr, assume key_left belongs before leftmost entry,
+ // key_right belongs after rightmost entry
+ idx_left = 0;
+ paranoid_invariant(!key_right);
+ }
+ // Assume idx_left and idx_right point to where key_left and key_right
+ // belong, (but are not there).
+ *less = idx_left;
+ *equal_left = 0;
+ *middle = estimated_num_rows - idx_left;
+ *equal_right = 0;
+ *greater = 0;
+ }
+ *single_basement_node = single_basement;
+}
+
+static int toku_ft_keysrange_internal(
+ FT_HANDLE ft_handle,
+ FTNODE node,
+ DBT *key_left,
+ DBT *key_right,
+ bool may_find_right,
+ uint64_t *less,
+ uint64_t *equal_left,
+ uint64_t *middle,
+ uint64_t *equal_right,
+ uint64_t *greater,
+ bool *single_basement_node,
+ uint64_t estimated_num_rows,
+ ftnode_fetch_extra *min_bfe, // set up to read a minimal read.
+ ftnode_fetch_extra
+ *match_bfe, // set up to read a basement node iff both keys in it
+ struct unlockers *unlockers,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds)
+// Implementation note: Assign values to less, equal, and greater, and then on
+// the way out (returning up the stack) we add more values in.
+{
+ int r = 0;
+ // if KEY is NULL then use the leftmost key.
+ int left_child_number =
+ key_left ? toku_ftnode_which_child(node, key_left, ft_handle->ft->cmp)
+ : 0;
+ int right_child_number =
+ node->n_children; // Sentinel that does not equal left_child_number.
+ if (may_find_right) {
+ right_child_number =
+ key_right
+ ? toku_ftnode_which_child(node, key_right, ft_handle->ft->cmp)
+ : node->n_children - 1;
+ }
+
+ uint64_t rows_per_child = estimated_num_rows / node->n_children;
+ if (node->height == 0) {
+ keysrange_in_leaf_partition(ft_handle,
+ node,
+ key_left,
+ key_right,
+ left_child_number,
+ right_child_number,
+ rows_per_child,
+ less,
+ equal_left,
+ middle,
+ equal_right,
+ greater,
+ single_basement_node);
+
+ *less += rows_per_child * left_child_number;
+ if (*single_basement_node) {
+ *greater +=
+ rows_per_child * (node->n_children - left_child_number - 1);
+ } else {
+ *middle +=
+ rows_per_child * (node->n_children - left_child_number - 1);
+ }
+ } else {
+ // do the child.
+ struct ancestors next_ancestors = {node, left_child_number, ancestors};
+ BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number);
+ uint32_t fullhash =
+ compute_child_fullhash(ft_handle->ft->cf, node, left_child_number);
+ FTNODE childnode;
+ bool msgs_applied = false;
+ bool child_may_find_right =
+ may_find_right && left_child_number == right_child_number;
+ r = toku_pin_ftnode_for_query(
+ ft_handle,
+ childblocknum,
+ fullhash,
+ unlockers,
+ &next_ancestors,
+ bounds,
+ child_may_find_right ? match_bfe : min_bfe,
+ false,
+ &childnode,
+ &msgs_applied);
+ paranoid_invariant(!msgs_applied);
+ if (r != TOKUDB_TRY_AGAIN) {
+ assert_zero(r);
+
+ struct unlock_ftnode_extra unlock_extra = {
+ ft_handle, childnode, false};
+ struct unlockers next_unlockers = {
+ true, unlock_ftnode_fun, (void *)&unlock_extra, unlockers};
+ const pivot_bounds next_bounds =
+ bounds.next_bounds(node, left_child_number);
+
+ r = toku_ft_keysrange_internal(ft_handle,
+ childnode,
+ key_left,
+ key_right,
+ child_may_find_right,
+ less,
+ equal_left,
+ middle,
+ equal_right,
+ greater,
+ single_basement_node,
+ rows_per_child,
+ min_bfe,
+ match_bfe,
+ &next_unlockers,
+ &next_ancestors,
+ next_bounds);
+ if (r != TOKUDB_TRY_AGAIN) {
+ assert_zero(r);
+
+ *less += rows_per_child * left_child_number;
+ if (*single_basement_node) {
+ *greater += rows_per_child *
+ (node->n_children - left_child_number - 1);
+ } else {
+ *middle += rows_per_child *
+ (node->n_children - left_child_number - 1);
+ }
+
+ assert(unlockers->locked);
+ toku_unpin_ftnode_read_only(ft_handle->ft, childnode);
+ }
+ }
+ }
+ return r;
+}
+
+void toku_ft_keysrange(FT_HANDLE ft_handle,
+ DBT *key_left,
+ DBT *key_right,
+ uint64_t *less_p,
+ uint64_t *equal_left_p,
+ uint64_t *middle_p,
+ uint64_t *equal_right_p,
+ uint64_t *greater_p,
+ bool *middle_3_exact_p)
+// Effect: Return an estimate of the number of keys to the left, the number
+// equal (to left key), number between keys, number equal to right key, and the
+// number to the right of both keys.
+// The values are an estimate.
+// If you perform a keyrange on two keys that are in the same basement,
+// equal_less, middle, and equal_right will be exact.
+// 4184: What to do with a NULL key?
+// key_left==NULL is treated as -infinity
+// key_right==NULL is treated as +infinity
+// If KEY is NULL then the system picks an arbitrary key and returns it.
+// key_right can be non-null only if key_left is non-null;
+{
+ if (!key_left && key_right) {
+ // Simplify internals by only supporting key_right != null when key_left
+ // != null
+ // If key_right != null and key_left == null, then swap them and fix up
+ // numbers.
+ uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0,
+ greater = 0;
+ toku_ft_keysrange(ft_handle,
+ key_right,
+ nullptr,
+ &less,
+ &equal_left,
+ &middle,
+ &equal_right,
+ &greater,
+ middle_3_exact_p);
+ *less_p = 0;
+ *equal_left_p = 0;
+ *middle_p = less;
+ *equal_right_p = equal_left;
+ *greater_p = middle;
+ invariant_zero(equal_right);
+ invariant_zero(greater);
+ return;
+ }
+ paranoid_invariant(!(!key_left && key_right));
+ ftnode_fetch_extra min_bfe;
+ ftnode_fetch_extra match_bfe;
+ min_bfe.create_for_min_read(
+ ft_handle->ft); // read pivot keys but not message buffers
+ match_bfe.create_for_keymatch(
+ ft_handle->ft,
+ key_left,
+ key_right,
+ false,
+ false); // read basement node only if both keys in it.
+try_again : {
+ uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
+ bool single_basement_node = false;
+ FTNODE node = NULL;
+ {
+ uint32_t fullhash;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
+ toku_pin_ftnode(
+ ft_handle->ft,
+ root_key,
+ fullhash,
+ &match_bfe,
+ PL_READ, // may_modify_node, cannot change root during keyrange
+ &node,
+ true);
+ }
+
+ struct unlock_ftnode_extra unlock_extra = {ft_handle, node, false};
+ struct unlockers unlockers = {
+ true, unlock_ftnode_fun, (void *)&unlock_extra, (UNLOCKERS)NULL};
+
+ {
+ int r;
+ int64_t numrows = ft_handle->ft->in_memory_logical_rows;
+ if (numrows < 0)
+ numrows = 0; // prevent appearance of a negative number
+ r = toku_ft_keysrange_internal(ft_handle,
+ node,
+ key_left,
+ key_right,
+ true,
+ &less,
+ &equal_left,
+ &middle,
+ &equal_right,
+ &greater,
+ &single_basement_node,
+ numrows,
+ &min_bfe,
+ &match_bfe,
+ &unlockers,
+ (ANCESTORS)NULL,
+ pivot_bounds::infinite_bounds());
+ assert(r == 0 || r == TOKUDB_TRY_AGAIN);
+ if (r == TOKUDB_TRY_AGAIN) {
+ assert(!unlockers.locked);
+ goto try_again;
+ }
+ // May need to do a second query.
+ if (!single_basement_node && key_right != nullptr) {
+ // "greater" is stored in "middle"
+ invariant_zero(equal_right);
+ invariant_zero(greater);
+ uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0,
+ greater2 = 0;
+ bool ignore;
+ r = toku_ft_keysrange_internal(ft_handle,
+ node,
+ key_right,
+ nullptr,
+ false,
+ &less2,
+ &equal_left2,
+ &middle2,
+ &equal_right2,
+ &greater2,
+ &ignore,
+ numrows,
+ &min_bfe,
+ &match_bfe,
+ &unlockers,
+ (ANCESTORS) nullptr,
+ pivot_bounds::infinite_bounds());
+ assert(r == 0 || r == TOKUDB_TRY_AGAIN);
+ if (r == TOKUDB_TRY_AGAIN) {
+ assert(!unlockers.locked);
+ goto try_again;
+ }
+ invariant_zero(equal_right2);
+ invariant_zero(greater2);
+ // Update numbers.
+ // less is already correct.
+ // equal_left is already correct.
+
+ // "middle" currently holds everything greater than left_key in
+ // first query
+ // 'middle2' currently holds everything greater than right_key in
+ // second query
+ // 'equal_left2' is how many match right_key
+
+ // Prevent underflow.
+ if (middle >= equal_left2 + middle2) {
+ middle -= equal_left2 + middle2;
+ } else {
+ middle = 0;
+ }
+ equal_right = equal_left2;
+ greater = middle2;
+ }
+ }
+ assert(unlockers.locked);
+ toku_unpin_ftnode_read_only(ft_handle->ft, node);
+ if (!key_right) {
+ paranoid_invariant_zero(equal_right);
+ paranoid_invariant_zero(greater);
+ }
+ if (!key_left) {
+ paranoid_invariant_zero(less);
+ paranoid_invariant_zero(equal_left);
+ }
+ *less_p = less;
+ *equal_left_p = equal_left;
+ *middle_p = middle;
+ *equal_right_p = equal_right;
+ *greater_p = greater;
+ *middle_3_exact_p = single_basement_node;
+}
+}
+
+struct get_key_after_bytes_iterate_extra {
+ uint64_t skip_len;
+ uint64_t *skipped;
+ void (*callback)(const DBT *, uint64_t, void *);
+ void *cb_extra;
+};
+
+static int get_key_after_bytes_iterate(const void* key, const uint32_t keylen, const LEAFENTRY & le, const uint32_t UU(idx), struct get_key_after_bytes_iterate_extra * const e) {
+ // only checking the latest val, mvcc will make this inaccurate
+ uint64_t pairlen = keylen + le_latest_vallen(le);
+ if (*e->skipped + pairlen > e->skip_len) {
+ // found our key!
+ DBT end_key;
+ toku_fill_dbt(&end_key, key, keylen);
+ e->callback(&end_key, *e->skipped, e->cb_extra);
+ return 1;
+ } else {
+ *e->skipped += pairlen;
+ return 0;
+ }
+}
+
+static int get_key_after_bytes_in_basementnode(FT ft, BASEMENTNODE bn, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) {
+ int r;
+ uint32_t idx_left = 0;
+ if (start_key != nullptr) {
+ struct keyrange_compare_s cmp = {ft, start_key};
+ r = bn->data_buffer.find_zero<decltype(cmp), keyrange_compare>(cmp, nullptr, nullptr, nullptr, &idx_left);
+ assert(r == 0 || r == DB_NOTFOUND);
+ }
+ struct get_key_after_bytes_iterate_extra iter_extra = {skip_len, skipped, callback, cb_extra};
+ r = bn->data_buffer.iterate_on_range<get_key_after_bytes_iterate_extra, get_key_after_bytes_iterate>(idx_left, bn->data_buffer.num_klpairs(), &iter_extra);
+
+ // Invert the sense of r == 0 (meaning the iterate finished, which means we didn't find what we wanted)
+ if (r == 1) {
+ r = 0;
+ } else {
+ r = DB_NOTFOUND;
+ }
+ return r;
+}
+
+static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, const pivot_bounds &bounds, ftnode_fetch_extra *bfe, ft_search *search, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped);
+
+static int get_key_after_bytes_in_child(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, const pivot_bounds &bounds, ftnode_fetch_extra *bfe, ft_search *search, int childnum, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) {
+ int r;
+ struct ancestors next_ancestors = {node, childnum, ancestors};
+ BLOCKNUM childblocknum = BP_BLOCKNUM(node, childnum);
+ uint32_t fullhash = compute_child_fullhash(ft->cf, node, childnum);
+ FTNODE child;
+ bool msgs_applied = false;
+ r = toku_pin_ftnode_for_query(ft_h, childblocknum, fullhash, unlockers, &next_ancestors, bounds, bfe, false, &child, &msgs_applied);
+ paranoid_invariant(!msgs_applied);
+ if (r == TOKUDB_TRY_AGAIN) {
+ return r;
+ }
+ assert_zero(r);
+ struct unlock_ftnode_extra unlock_extra = {ft_h, child, false};
+ struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void *) &unlock_extra, unlockers};
+ const pivot_bounds next_bounds = bounds.next_bounds(node, childnum);
+ return get_key_after_bytes_in_subtree(ft_h, ft, child, &next_unlockers, &next_ancestors, next_bounds, bfe, search, subtree_bytes, start_key, skip_len, callback, cb_extra, skipped);
+}
+
+static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, const pivot_bounds &bounds, ftnode_fetch_extra *bfe, ft_search *search, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) {
+ int r;
+ int childnum = toku_ft_search_which_child(ft->cmp, node, search);
+ const uint64_t child_subtree_bytes = subtree_bytes / node->n_children;
+ if (node->height == 0) {
+ r = DB_NOTFOUND;
+ for (int i = childnum; r == DB_NOTFOUND && i < node->n_children; ++i) {
+ // The theory here is that a leaf node could only be very
+ // unbalanced if it's dirty, which means all its basements are
+ // available. So if a basement node is available, we should
+ // check it as carefully as possible, but if it's compressed
+ // or on disk, then it should be fairly well balanced so we
+ // can trust the fanout calculation.
+ if (BP_STATE(node, i) == PT_AVAIL) {
+ r = get_key_after_bytes_in_basementnode(ft, BLB(node, i), (i == childnum) ? start_key : nullptr, skip_len, callback, cb_extra, skipped);
+ } else {
+ *skipped += child_subtree_bytes;
+ if (*skipped >= skip_len && i < node->n_children - 1) {
+ DBT pivot;
+ callback(node->pivotkeys.fill_pivot(i, &pivot), *skipped, cb_extra);
+ r = 0;
+ }
+ // Otherwise, r is still DB_NOTFOUND. If this is the last
+ // basement node, we'll return DB_NOTFOUND and that's ok.
+ // Some ancestor in the call stack will check the next
+ // node over and that will call the callback, or if no
+ // such node exists, we're at the max key and we should
+ // return DB_NOTFOUND up to the top.
+ }
+ }
+ } else {
+ r = get_key_after_bytes_in_child(ft_h, ft, node, unlockers, ancestors, bounds, bfe, search, childnum, child_subtree_bytes, start_key, skip_len, callback, cb_extra, skipped);
+ for (int i = childnum + 1; r == DB_NOTFOUND && i < node->n_children; ++i) {
+ if (*skipped + child_subtree_bytes < skip_len) {
+ *skipped += child_subtree_bytes;
+ } else {
+ r = get_key_after_bytes_in_child(ft_h, ft, node, unlockers, ancestors, bounds, bfe, search, i, child_subtree_bytes, nullptr, skip_len, callback, cb_extra, skipped);
+ }
+ }
+ }
+
+ if (r != TOKUDB_TRY_AGAIN) {
+ assert(unlockers->locked);
+ toku_unpin_ftnode_read_only(ft, node);
+ unlockers->locked = false;
+ }
+ return r;
+}
+
+int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *end_key, uint64_t actually_skipped, void *extra), void *cb_extra)
+// Effect:
+// Call callback with end_key set to the largest key such that the sum of the sizes of the key/val pairs in the range [start_key, end_key) is <= skip_len.
+// Call callback with actually_skipped set to the sum of the sizes of the key/val pairs in the range [start_key, end_key).
+// Notes:
+// start_key == nullptr is interpreted as negative infinity.
+// end_key == nullptr is interpreted as positive infinity.
+// Only the latest val is counted toward the size, in the case of MVCC data.
+// Implementation:
+// This is an estimated calculation. We assume for a node that each of its subtrees have equal size. If the tree is a single basement node, then we will be accurate, but otherwise we could be quite off.
+// Returns:
+// 0 on success
+// an error code otherwise
+{
+ FT ft = ft_h->ft;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft);
+ while (true) {
+ FTNODE root;
+ {
+ uint32_t fullhash;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft, &root_key, &fullhash);
+ toku_pin_ftnode(ft, root_key, fullhash, &bfe, PL_READ, &root, true);
+ }
+ struct unlock_ftnode_extra unlock_extra = {ft_h, root, false};
+ struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS) nullptr};
+ ft_search search;
+ ft_search_init(&search, (start_key == nullptr ? toku_ft_cursor_compare_one : toku_ft_cursor_compare_set_range), FT_SEARCH_LEFT, start_key, nullptr, ft_h);
+
+ int r;
+ // We can't do this because of #5768, there may be dictionaries in the wild that have negative stats. This won't affect mongo so it's ok:
+ //paranoid_invariant(ft->in_memory_stats.numbytes >= 0);
+ int64_t numbytes = ft->in_memory_stats.numbytes;
+ if (numbytes < 0) {
+ numbytes = 0;
+ }
+ uint64_t skipped = 0;
+ r = get_key_after_bytes_in_subtree(ft_h, ft, root, &unlockers, nullptr, pivot_bounds::infinite_bounds(), &bfe, &search, (uint64_t) numbytes, start_key, skip_len, callback, cb_extra, &skipped);
+ assert(!unlockers.locked);
+ if (r != TOKUDB_TRY_AGAIN) {
+ if (r == DB_NOTFOUND) {
+ callback(nullptr, skipped, cb_extra);
+ r = 0;
+ }
+ return r;
+ }
+ }
+}
+
+//Test-only wrapper for the old one-key range function
+void toku_ft_keyrange(FT_HANDLE ft_handle, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater) {
+ uint64_t zero_equal_right, zero_greater;
+ bool ignore;
+ toku_ft_keysrange(ft_handle, key, nullptr, less, equal, greater, &zero_equal_right, &zero_greater, &ignore);
+ invariant_zero(zero_equal_right);
+ invariant_zero(zero_greater);
+}
+
+void toku_ft_handle_stat64 (FT_HANDLE ft_handle, TOKUTXN UU(txn), struct ftstat64_s *s) {
+ toku_ft_stat64(ft_handle->ft, s);
+}
+
+void toku_ft_handle_get_fractal_tree_info64(FT_HANDLE ft_h, struct ftinfo64 *s) {
+ toku_ft_get_fractal_tree_info64(ft_h->ft, s);
+}
+
+int toku_ft_handle_iterate_fractal_tree_block_map(FT_HANDLE ft_h, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra) {
+ return toku_ft_iterate_fractal_tree_block_map(ft_h->ft, iter, iter_extra);
+}
+
+/* ********************* debugging dump ************************ */
+static int
+toku_dump_ftnode (FILE *file, FT_HANDLE ft_handle, BLOCKNUM blocknum, int depth, const DBT *lorange, const DBT *hirange) {
+ int result=0;
+ FTNODE node;
+ toku_get_node_for_verify(blocknum, ft_handle, &node);
+ result=toku_verify_ftnode(ft_handle, ft_handle->ft->h->max_msn_in_ft, ft_handle->ft->h->max_msn_in_ft, false, node, -1, lorange, hirange, NULL, NULL, 0, 1, 0);
+ uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ toku_pin_ftnode(
+ ft_handle->ft,
+ blocknum,
+ fullhash,
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->fullhash==fullhash);
+ fprintf(file, "%*sNode=%p\n", depth, "", node);
+
+ fprintf(file, "%*sNode %" PRId64 " height=%d n_children=%d keyrange=%s %s\n",
+ depth, "", blocknum.b, node->height, node->n_children, (char*)(lorange ? lorange->data : 0), (char*)(hirange ? hirange->data : 0));
+ {
+ int i;
+ for (i=0; i+1< node->n_children; i++) {
+ fprintf(file, "%*spivotkey %d =", depth+1, "", i);
+ toku_print_BYTESTRING(file, node->pivotkeys.get_pivot(i).size, (char *) node->pivotkeys.get_pivot(i).data);
+ fprintf(file, "\n");
+ }
+ for (i=0; i< node->n_children; i++) {
+ if (node->height > 0) {
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ fprintf(file, "%*schild %d buffered (%d entries):", depth+1, "", i, toku_bnc_n_entries(bnc));
+ struct print_msg_fn {
+ FILE *file;
+ int depth;
+ print_msg_fn(FILE *f, int d) : file(f), depth(d) { }
+ int operator()(const ft_msg &msg, bool UU(is_fresh)) {
+ fprintf(file, "%*s xid=%" PRIu64 " %u (type=%d) msn=0x%" PRIu64 "\n",
+ depth+2, "",
+ toku_xids_get_innermost_xid(msg.xids()),
+ static_cast<unsigned>(toku_dtoh32(*(int*)msg.kdbt()->data)),
+ msg.type(), msg.msn().msn);
+ return 0;
+ }
+ } print_fn(file, depth);
+ bnc->msg_buffer.iterate(print_fn);
+ }
+ else {
+ int size = BLB_DATA(node, i)->num_klpairs();
+ if (0)
+ for (int j=0; j<size; j++) {
+ LEAFENTRY le;
+ void* keyp = NULL;
+ uint32_t keylen = 0;
+ int r = BLB_DATA(node,i)->fetch_klpair(j, &le, &keylen, &keyp);
+ assert_zero(r);
+ fprintf(file, " [%d]=", j);
+ print_klpair(file, keyp, keylen, le);
+ fprintf(file, "\n");
+ }
+ fprintf(file, "\n");
+ }
+ }
+ if (node->height > 0) {
+ for (i=0; i<node->n_children; i++) {
+ fprintf(file, "%*schild %d\n", depth, "", i);
+ if (i>0) {
+ char *CAST_FROM_VOIDP(key, node->pivotkeys.get_pivot(i - 1).data);
+ fprintf(file, "%*spivot %d len=%u %u\n", depth+1, "", i-1, node->pivotkeys.get_pivot(i - 1).size, (unsigned)toku_dtoh32(*(int*)key));
+ }
+ DBT x, y;
+ toku_dump_ftnode(file, ft_handle, BP_BLOCKNUM(node, i), depth+4,
+ (i==0) ? lorange : node->pivotkeys.fill_pivot(i - 1, &x),
+ (i==node->n_children-1) ? hirange : node->pivotkeys.fill_pivot(i, &y));
+ }
+ }
+ }
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return result;
+}
+
+int toku_dump_ft(FILE *f, FT_HANDLE ft_handle) {
+ FT ft = ft_handle->ft;
+ invariant_notnull(ft);
+ ft->blocktable.dump_translation_table(f);
+
+ uint32_t fullhash = 0;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
+ return toku_dump_ftnode(f, ft_handle, root_key, 0, 0, 0);
+}
+
+
+static void toku_pfs_keys_init(const char *toku_instr_group_name) {
+ kibbutz_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name, "kibbutz_mutex");
+ minicron_p_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "minicron_p_mutex");
+ queue_result_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "queue_result_mutex");
+ tpool_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "tpool_lock_mutex");
+ workset_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "workset_lock_mutex");
+ bjm_jobs_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "bjm_jobs_lock_mutex");
+ log_internal_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "log_internal_lock_mutex");
+ cachetable_ev_thread_lock_mutex_key =
+ new toku_instr_key(toku_instr_object_type::mutex,
+ toku_instr_group_name,
+ "cachetable_ev_thread_lock_mutex");
+ cachetable_disk_nb_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "cachetable_disk_nb_mutex");
+ safe_file_size_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "safe_file_size_lock_mutex");
+ cachetable_m_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "cachetable_m_mutex_key");
+ checkpoint_safe_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "checkpoint_safe_mutex");
+ ft_ref_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "ft_ref_lock_mutex");
+ ft_open_close_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "ft_open_close_lock_mutex");
+ loader_error_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "loader_error_mutex");
+ bfs_mutex_key =
+ new toku_instr_key(toku_instr_object_type::mutex, toku_instr_group_name,
+ "bfs_mutex");
+ loader_bl_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "loader_bl_mutex");
+ loader_fi_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "loader_fi_lock_mutex");
+ loader_out_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "loader_out_mutex");
+ result_output_condition_lock_mutex_key =
+ new toku_instr_key(toku_instr_object_type::mutex,
+ toku_instr_group_name,
+ "result_output_condition_lock_mutex");
+ block_table_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "block_table_mutex");
+ rollback_log_node_cache_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "rollback_log_node_cache_mutex");
+ txn_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name, "txn_lock_mutex");
+ txn_state_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "txn_state_lock_mutex");
+ txn_child_manager_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "txn_child_manager_mutex");
+ txn_manager_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "txn_manager_lock_mutex");
+ treenode_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name, "treenode_mutex");
+ locktree_request_info_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "locktree_request_info_mutex");
+ locktree_request_info_retry_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "locktree_request_info_retry_mutex_key");
+ manager_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name, "manager_mutex");
+ manager_escalation_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "manager_escalation_mutex");
+ db_txn_struct_i_txn_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "db_txn_struct_i_txn_mutex");
+ manager_escalator_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "manager_escalator_mutex");
+ indexer_i_indexer_lock_mutex_key = new toku_instr_key(
+ toku_instr_object_type::mutex, toku_instr_group_name,
+ "indexer_i_indexer_lock_mutex");
+ indexer_i_indexer_estimate_lock_mutex_key =
+ new toku_instr_key(toku_instr_object_type::mutex,
+ toku_instr_group_name,
+ "indexer_i_indexer_estimate_lock_mutex");
+
+ tokudb_file_data_key = new toku_instr_key(
+ toku_instr_object_type::file, toku_instr_group_name, "tokudb_data_file");
+ tokudb_file_load_key = new toku_instr_key(
+ toku_instr_object_type::file, toku_instr_group_name, "tokudb_load_file");
+ tokudb_file_tmp_key = new toku_instr_key(
+ toku_instr_object_type::file, toku_instr_group_name, "tokudb_tmp_file");
+ tokudb_file_log_key = new toku_instr_key(
+ toku_instr_object_type::file, toku_instr_group_name, "tokudb_log_file");
+
+ fti_probe_1_key =
+ new toku_instr_key(toku_instr_object_type::mutex, toku_instr_group_name,
+ "fti_probe_1");
+
+ extractor_thread_key = new toku_instr_key(
+ toku_instr_object_type::thread, toku_instr_group_name,
+ "extractor_thread");
+ fractal_thread_key = new toku_instr_key(
+ toku_instr_object_type::thread, toku_instr_group_name, "fractal_thread");
+ io_thread_key =
+ new toku_instr_key(toku_instr_object_type::thread, toku_instr_group_name,
+ "io_thread");
+ eviction_thread_key = new toku_instr_key(
+ toku_instr_object_type::thread, toku_instr_group_name,
+ "eviction_thread");
+ kibbutz_thread_key = new toku_instr_key(
+ toku_instr_object_type::thread, toku_instr_group_name, "kibbutz_thread");
+ minicron_thread_key = new toku_instr_key(
+ toku_instr_object_type::thread, toku_instr_group_name,
+ "minicron_thread");
+ tp_internal_thread_key = new toku_instr_key(
+ toku_instr_object_type::thread, toku_instr_group_name,
+ "tp_internal_thread");
+
+ result_state_cond_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "result_state_cond");
+ bjm_jobs_wait_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name, "bjm_jobs_wait");
+ cachetable_p_refcount_wait_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "cachetable_p_refcount_wait");
+ cachetable_m_flow_control_cond_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "cachetable_m_flow_control_cond");
+ cachetable_m_ev_thread_cond_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "cachetable_m_ev_thread_cond");
+ bfs_cond_key =
+ new toku_instr_key(toku_instr_object_type::cond, toku_instr_group_name,
+ "bfs_cond");
+ result_output_condition_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "result_output_condition");
+ manager_m_escalator_done_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "manager_m_escalator_done");
+ lock_request_m_wait_cond_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "lock_request_m_wait_cond");
+ queue_result_cond_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "queue_result_cond");
+ ws_worker_wait_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name, "ws_worker_wait");
+ rwlock_wait_read_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name, "rwlock_wait_read");
+ rwlock_wait_write_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "rwlock_wait_write");
+ rwlock_cond_key =
+ new toku_instr_key(toku_instr_object_type::cond, toku_instr_group_name,
+ "rwlock_cond");
+ tp_thread_wait_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name, "tp_thread_wait");
+ tp_pool_wait_free_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "tp_pool_wait_free");
+ frwlock_m_wait_read_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "frwlock_m_wait_read");
+ kibbutz_k_cond_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name, "kibbutz_k_cond");
+ minicron_p_condvar_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "minicron_p_condvar");
+ locktree_request_info_retry_cv_key = new toku_instr_key(
+ toku_instr_object_type::cond, toku_instr_group_name,
+ "locktree_request_info_retry_cv_key");
+
+ multi_operation_lock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "multi_operation_lock");
+ low_priority_multi_operation_lock_key =
+ new toku_instr_key(toku_instr_object_type::rwlock,
+ toku_instr_group_name,
+ "low_priority_multi_operation_lock");
+ cachetable_m_list_lock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "cachetable_m_list_lock");
+ cachetable_m_pending_lock_expensive_key =
+ new toku_instr_key(toku_instr_object_type::rwlock,
+ toku_instr_group_name,
+ "cachetable_m_pending_lock_expensive");
+ cachetable_m_pending_lock_cheap_key =
+ new toku_instr_key(toku_instr_object_type::rwlock,
+ toku_instr_group_name,
+ "cachetable_m_pending_lock_cheap");
+ cachetable_m_lock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "cachetable_m_lock");
+ result_i_open_dbs_rwlock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "result_i_open_dbs_rwlock");
+ checkpoint_safe_rwlock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "checkpoint_safe_rwlock");
+ cachetable_value_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "cachetable_value");
+ safe_file_size_lock_rwlock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "safe_file_size_lock_rwlock");
+ cachetable_disk_nb_rwlock_key = new toku_instr_key(
+ toku_instr_object_type::rwlock, toku_instr_group_name,
+ "cachetable_disk_nb_rwlock");
+
+ toku_instr_probe_1 = new toku_instr_probe(*fti_probe_1_key);
+}
+
+static void toku_pfs_keys_destroy(void) {
+ delete kibbutz_mutex_key;
+ delete minicron_p_mutex_key;
+ delete queue_result_mutex_key;
+ delete tpool_lock_mutex_key;
+ delete workset_lock_mutex_key;
+ delete bjm_jobs_lock_mutex_key;
+ delete log_internal_lock_mutex_key;
+ delete cachetable_ev_thread_lock_mutex_key;
+ delete cachetable_disk_nb_mutex_key;
+ delete safe_file_size_lock_mutex_key;
+ delete cachetable_m_mutex_key;
+ delete checkpoint_safe_mutex_key;
+ delete ft_ref_lock_mutex_key;
+ delete ft_open_close_lock_mutex_key;
+ delete loader_error_mutex_key;
+ delete bfs_mutex_key;
+ delete loader_bl_mutex_key;
+ delete loader_fi_lock_mutex_key;
+ delete loader_out_mutex_key;
+ delete result_output_condition_lock_mutex_key;
+ delete block_table_mutex_key;
+ delete rollback_log_node_cache_mutex_key;
+ delete txn_lock_mutex_key;
+ delete txn_state_lock_mutex_key;
+ delete txn_child_manager_mutex_key;
+ delete txn_manager_lock_mutex_key;
+ delete treenode_mutex_key;
+ delete locktree_request_info_mutex_key;
+ delete locktree_request_info_retry_mutex_key;
+ delete manager_mutex_key;
+ delete manager_escalation_mutex_key;
+ delete db_txn_struct_i_txn_mutex_key;
+ delete manager_escalator_mutex_key;
+ delete indexer_i_indexer_lock_mutex_key;
+ delete indexer_i_indexer_estimate_lock_mutex_key;
+
+ delete tokudb_file_data_key;
+ delete tokudb_file_load_key;
+ delete tokudb_file_tmp_key;
+ delete tokudb_file_log_key;
+
+ delete fti_probe_1_key;
+
+ delete extractor_thread_key;
+ delete fractal_thread_key;
+ delete io_thread_key;
+ delete eviction_thread_key;
+ delete kibbutz_thread_key;
+ delete minicron_thread_key;
+ delete tp_internal_thread_key;
+
+ delete result_state_cond_key;
+ delete bjm_jobs_wait_key;
+ delete cachetable_p_refcount_wait_key;
+ delete cachetable_m_flow_control_cond_key;
+ delete cachetable_m_ev_thread_cond_key;
+ delete bfs_cond_key;
+ delete result_output_condition_key;
+ delete manager_m_escalator_done_key;
+ delete lock_request_m_wait_cond_key;
+ delete queue_result_cond_key;
+ delete ws_worker_wait_key;
+ delete rwlock_wait_read_key;
+ delete rwlock_wait_write_key;
+ delete rwlock_cond_key;
+ delete tp_thread_wait_key;
+ delete tp_pool_wait_free_key;
+ delete frwlock_m_wait_read_key;
+ delete kibbutz_k_cond_key;
+ delete minicron_p_condvar_key;
+ delete locktree_request_info_retry_cv_key;
+
+ delete multi_operation_lock_key;
+ delete low_priority_multi_operation_lock_key;
+ delete cachetable_m_list_lock_key;
+ delete cachetable_m_pending_lock_expensive_key;
+ delete cachetable_m_pending_lock_cheap_key;
+ delete cachetable_m_lock_key;
+ delete result_i_open_dbs_rwlock_key;
+ delete checkpoint_safe_rwlock_key;
+ delete cachetable_value_key;
+ delete safe_file_size_lock_rwlock_key;
+
+ delete cachetable_disk_nb_rwlock_key;
+ delete toku_instr_probe_1;
+}
+
+int toku_ft_layer_init(void) {
+ static bool ft_layer_init_started = false;
+
+ if(ft_layer_init_started) {
+ return 0;
+ }
+
+ ft_layer_init_started = true;
+
+ int r = 0;
+
+ // Portability must be initialized first
+ r = toku_portability_init();
+ assert(r==0);
+ if (r) {
+ goto exit;
+ }
+
+ toku_pfs_keys_init("fti");
+
+ r = db_env_set_toku_product_name("tokudb");
+ assert(r==0);
+ if (r) {
+ goto exit;
+ }
+
+ partitioned_counters_init();
+ toku_status_init();
+ toku_context_status_init();
+ toku_checkpoint_init();
+ toku_ft_serialize_layer_init();
+ toku_mutex_init(
+ *ft_open_close_lock_mutex_key, &ft_open_close_lock, nullptr);
+ toku_scoped_malloc_init();
+exit:
+ return r;
+}
+
+void toku_ft_layer_destroy(void) {
+ static bool ft_layer_destroy_started = false;
+
+ if(ft_layer_destroy_started) {
+ return;
+ }
+
+ ft_layer_destroy_started = true;
+
+ toku_mutex_destroy(&ft_open_close_lock);
+ toku_ft_serialize_layer_destroy();
+ toku_checkpoint_destroy();
+ toku_context_status_destroy();
+ toku_status_destroy();
+ partitioned_counters_destroy();
+ toku_scoped_malloc_destroy();
+ toku_pfs_keys_destroy();
+
+ // Portability must be cleaned up last
+ toku_portability_destroy();
+}
+
+// This lock serializes all opens and closes because the cachetable requires that clients do not try to open or close a cachefile in parallel. We made
+// it coarser by not allowing any cachefiles to be open or closed in parallel.
+void toku_ft_open_close_lock(void) {
+ toku_mutex_lock(&ft_open_close_lock);
+}
+
+void toku_ft_open_close_unlock(void) {
+ toku_mutex_unlock(&ft_open_close_lock);
+}
+
+// Prepare to remove a dictionary from the database when this transaction is committed:
+// - mark transaction as NEED fsync on commit
+// - make entry in rollback log
+// - make fdelete entry in recovery log
+//
+// Effect: when the txn commits, the ft's cachefile will be marked as unlink
+// on close. see toku_commit_fdelete and how unlink on close works
+// in toku_cachefile_close();
+// Requires: serialized with begin checkpoint
+// this does not need to take the open close lock because
+// 1.) the ft/cf cannot go away because we have a live handle.
+// 2.) we're not setting the unlink on close bit _here_. that
+// happens on txn commit (as the name suggests).
+// 3.) we're already holding the multi operation lock to
+// synchronize with begin checkpoint.
+// Contract: the iname of the ft should never be reused.
+void toku_ft_unlink_on_commit(FT_HANDLE handle, TOKUTXN txn) {
+ assert(txn);
+
+ CACHEFILE cf = handle->ft->cf;
+ FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf));
+
+ toku_txn_maybe_note_ft(txn, ft);
+
+ // If the txn commits, the commit MUST be in the log before the file is actually unlinked
+ toku_txn_force_fsync_on_commit(txn);
+ // make entry in rollback log
+ FILENUM filenum = toku_cachefile_filenum(cf);
+ toku_logger_save_rollback_fdelete(txn, filenum);
+ // make entry in recovery log
+ toku_logger_log_fdelete(txn, filenum);
+}
+
+// Non-transactional version of fdelete
+//
+// Effect: The ft file is unlinked when the handle closes and it's ft is not
+// pinned by checkpoint. see toku_remove_ft_ref() and how unlink on
+// close works in toku_cachefile_close();
+// Requires: serialized with begin checkpoint
+void toku_ft_unlink(FT_HANDLE handle) {
+ CACHEFILE cf;
+ cf = handle->ft->cf;
+ toku_cachefile_unlink_on_close(cf);
+}
+
+int toku_ft_rename_iname(DB_TXN *txn,
+ const char *data_dir,
+ const char *old_iname,
+ const char *new_iname,
+ CACHETABLE ct) {
+ int r = 0;
+
+ std::unique_ptr<char[], decltype(&toku_free)> new_iname_full(nullptr,
+ &toku_free);
+ std::unique_ptr<char[], decltype(&toku_free)> old_iname_full(nullptr,
+ &toku_free);
+
+ new_iname_full.reset(toku_construct_full_name(2, data_dir, new_iname));
+ old_iname_full.reset(toku_construct_full_name(2, data_dir, old_iname));
+
+ if (txn) {
+ BYTESTRING bs_old_name = {static_cast<uint32_t>(strlen(old_iname) + 1),
+ const_cast<char *>(old_iname)};
+ BYTESTRING bs_new_name = {static_cast<uint32_t>(strlen(new_iname) + 1),
+ const_cast<char *>(new_iname)};
+ FILENUM filenum = FILENUM_NONE;
+ {
+ CACHEFILE cf;
+ r = toku_cachefile_of_iname_in_env(ct, old_iname, &cf);
+ if (r != ENOENT) {
+ char *old_fname_in_cf = toku_cachefile_fname_in_env(cf);
+ toku_cachefile_set_fname_in_env(cf, toku_xstrdup(new_iname));
+ toku_free(old_fname_in_cf);
+ filenum = toku_cachefile_filenum(cf);
+ }
+ }
+ toku_logger_save_rollback_frename(
+ db_txn_struct_i(txn)->tokutxn, &bs_old_name, &bs_new_name);
+ toku_log_frename(db_txn_struct_i(txn)->tokutxn->logger,
+ (LSN *)0,
+ 0,
+ toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn),
+ bs_old_name,
+ filenum,
+ bs_new_name);
+ }
+
+ if (!toku_create_subdirs_if_needed(new_iname_full.get()))
+ return get_error_errno();
+ r = toku_os_rename(old_iname_full.get(), new_iname_full.get());
+ if (r != 0)
+ return r;
+ r = toku_fsync_directory(new_iname_full.get());
+ return r;
+}
+
+int toku_ft_get_fragmentation(FT_HANDLE ft_handle, TOKU_DB_FRAGMENTATION report) {
+ int fd = toku_cachefile_get_fd(ft_handle->ft->cf);
+ toku_ft_lock(ft_handle->ft);
+
+ int64_t file_size;
+ int r = toku_os_get_file_size(fd, &file_size);
+ if (r == 0) {
+ report->file_size_bytes = file_size;
+ ft_handle->ft->blocktable.get_fragmentation_unlocked(report);
+ }
+ toku_ft_unlock(ft_handle->ft);
+ return r;
+}
+
+static bool is_empty_fast_iter (FT_HANDLE ft_handle, FTNODE node) {
+ if (node->height > 0) {
+ for (int childnum=0; childnum<node->n_children; childnum++) {
+ if (toku_bnc_nbytesinbuf(BNC(node, childnum)) != 0) {
+ return 0; // it's not empty if there are bytes in buffers
+ }
+ FTNODE childnode;
+ {
+ BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum);
+ uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, childnum);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ // don't need to pass in dependent nodes as we are not
+ // modifying nodes we are pinning
+ toku_pin_ftnode(
+ ft_handle->ft,
+ childblocknum,
+ fullhash,
+ &bfe,
+ PL_READ, // may_modify_node set to false, as nodes not modified
+ &childnode,
+ true
+ );
+ }
+ int child_is_empty = is_empty_fast_iter(ft_handle, childnode);
+ toku_unpin_ftnode(ft_handle->ft, childnode);
+ if (!child_is_empty) return 0;
+ }
+ return 1;
+ } else {
+ // leaf: If the dmt is empty, we are happy.
+ for (int i = 0; i < node->n_children; i++) {
+ if (BLB_DATA(node, i)->num_klpairs()) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
+
+bool toku_ft_is_empty_fast (FT_HANDLE ft_handle)
+// A fast check to see if the tree is empty. If there are any messages or leafentries, we consider the tree to be nonempty. It's possible that those
+// messages and leafentries would all optimize away and that the tree is empty, but we'll say it is nonempty.
+{
+ uint32_t fullhash;
+ FTNODE node;
+ {
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ toku_pin_ftnode(
+ ft_handle->ft,
+ root_key,
+ fullhash,
+ &bfe,
+ PL_READ, // may_modify_node set to false, node does not change
+ &node,
+ true
+ );
+ }
+ bool r = is_empty_fast_iter(ft_handle, node);
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return r;
+}
+
+// test-only
+int toku_ft_strerror_r(int error, char *buf, size_t buflen)
+{
+ if (error>=0) {
+ return (long) strerror_r(error, buf, buflen);
+ } else {
+ switch (error) {
+ case DB_KEYEXIST:
+ snprintf(buf, buflen, "Key exists");
+ return 0;
+ case TOKUDB_CANCELED:
+ snprintf(buf, buflen, "User canceled operation");
+ return 0;
+ default:
+ snprintf(buf, buflen, "Unknown error %d", error);
+ return EINVAL;
+ }
+ }
+}
+
+int toku_keycompare(const void *key1, uint32_t key1len, const void *key2, uint32_t key2len) {
+ int comparelen = key1len < key2len ? key1len : key2len;
+ int c = memcmp(key1, key2, comparelen);
+ if (__builtin_expect(c != 0, 1)) {
+ return c;
+ } else {
+ if (key1len < key2len) {
+ return -1;
+ } else if (key1len > key2len) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+int toku_builtin_compare_fun(DB *db __attribute__((__unused__)), const DBT *a, const DBT*b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+#include <toku_race_tools.h>
+void __attribute__((__constructor__)) toku_ft_helgrind_ignore(void);
+void
+toku_ft_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&ft_status, sizeof ft_status);
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.h b/storage/tokudb/PerconaFT/ft/ft-ops.h
new file mode 100644
index 00000000..7b6d0634
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.h
@@ -0,0 +1,295 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// This must be first to make the 64-bit file mode work right in Linux
+#define _FILE_OFFSET_BITS 64
+
+#include <db.h>
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/comparator.h"
+#include "ft/msg.h"
+#include "util/dbt.h"
+
+#define OS_PATH_SEPARATOR '/'
+
+typedef struct ft_handle *FT_HANDLE;
+
+int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int nodesize, int basementnodesize, enum toku_compression_method compression_method, CACHETABLE, TOKUTXN, int(*)(DB *,const DBT*,const DBT*)) __attribute__ ((warn_unused_result));
+
+// effect: changes the descriptor for the ft of the given handle.
+// requires:
+// - cannot change descriptor for same ft in two threads in parallel.
+// - can only update cmp descriptor immidiately after opening the FIRST ft handle for this ft and before
+// ANY operations. to update the cmp descriptor after any operations have already happened, all handles
+// and transactions must close and reopen before the change, then you can update the cmp descriptor
+void toku_ft_change_descriptor(FT_HANDLE t, const DBT* old_descriptor, const DBT* new_descriptor, bool do_log, TOKUTXN txn, bool update_cmp_descriptor);
+uint32_t toku_serialize_descriptor_size(DESCRIPTOR desc);
+
+void toku_ft_handle_create(FT_HANDLE *ft);
+void toku_ft_set_flags(FT_HANDLE, unsigned int flags);
+void toku_ft_get_flags(FT_HANDLE, unsigned int *flags);
+void toku_ft_handle_set_nodesize(FT_HANDLE, unsigned int nodesize);
+void toku_ft_handle_get_nodesize(FT_HANDLE, unsigned int *nodesize);
+void toku_ft_get_maximum_advised_key_value_lengths(unsigned int *klimit, unsigned int *vlimit);
+void toku_ft_handle_set_basementnodesize(FT_HANDLE, unsigned int basementnodesize);
+void toku_ft_handle_get_basementnodesize(FT_HANDLE, unsigned int *basementnodesize);
+void toku_ft_handle_set_compression_method(FT_HANDLE, enum toku_compression_method);
+void toku_ft_handle_get_compression_method(FT_HANDLE, enum toku_compression_method *);
+void toku_ft_handle_set_fanout(FT_HANDLE, unsigned int fanout);
+void toku_ft_handle_get_fanout(FT_HANDLE, unsigned int *fanout);
+int toku_ft_handle_set_memcmp_magic(FT_HANDLE, uint8_t magic);
+
+void toku_ft_set_bt_compare(FT_HANDLE ft_handle, ft_compare_func cmp_func);
+const toku::comparator &toku_ft_get_comparator(FT_HANDLE ft_handle);
+
+typedef void (*on_redirect_callback)(FT_HANDLE ft_handle, void *extra);
+void toku_ft_set_redirect_callback(FT_HANDLE ft_handle, on_redirect_callback cb, void *extra);
+
+// How updates (update/insert/deletes) work:
+// There are two flavers of upsertdels: Singleton and broadcast.
+// When a singleton upsertdel message arrives it contains a key and an extra DBT.
+//
+// At the YDB layer, the function looks like
+//
+// int (*update_function)(DB*, DB_TXN*, const DBT *key, const DBT *old_val, const DBT *extra,
+// void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra);
+//
+// And there are two DB functions
+//
+// int DB->update(DB *, DB_TXN *, const DBT *key, const DBT *extra);
+// Effect:
+// If there is a key-value pair visible to the txn with value old_val then the system calls
+// update_function(DB, key, old_val, extra, set_val, set_extra)
+// where set_val and set_extra are a function and a void* provided by the system.
+// The update_function can do one of two things:
+// a) call set_val(new_val, set_extra)
+// which has the effect of doing DB->put(db, txn, key, new_val, 0)
+// overwriting the old value.
+// b) Return DB_DELETE (a new return code)
+// c) Return 0 (success) without calling set_val, which leaves the old value unchanged.
+// If there is no such key-value pair visible to the txn, then the system calls
+// update_function(DB, key, NULL, extra, set_val, set_extra)
+// and the update_function can do one of the same three things.
+// Implementation notes: Update acquires a write lock (just as DB->put
+// does). This function works by sending a UPDATE message containing
+// the key and extra.
+//
+// int DB->update_broadcast(DB *, DB_TXN*, const DBT *extra);
+// Effect: This has the same effect as building a cursor that walks
+// through the DB, calling DB->update() on every key that the cursor
+// finds.
+// Implementation note: Acquires a write lock on the entire database.
+// This function works by sending an BROADCAST-UPDATE message containing
+// the key and the extra.
+typedef int (*ft_update_func)(DB *db, const DBT *key, const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val, void *set_extra),
+ void *set_extra);
+void toku_ft_set_update(FT_HANDLE ft_h, ft_update_func update_fun);
+
+int toku_ft_handle_open(FT_HANDLE, const char *fname_in_env,
+ int is_create, int only_create, CACHETABLE ct, TOKUTXN txn, bool open_rw=true) __attribute__ ((warn_unused_result));
+int toku_ft_handle_open_recovery(FT_HANDLE, const char *fname_in_env, int is_create, int only_create, CACHETABLE ct, TOKUTXN txn,
+ FILENUM use_filenum, LSN max_acceptable_lsn) __attribute__ ((warn_unused_result));
+
+// clone an ft handle. the cloned handle has a new dict_id but refers to the same fractal tree
+int toku_ft_handle_clone(FT_HANDLE *cloned_ft_handle, FT_HANDLE ft_handle, TOKUTXN txn, bool open_rw=true);
+
+// close an ft handle during normal operation. the underlying ft may or may not close,
+// depending if there are still references. an lsn for this close will come from the logger.
+void toku_ft_handle_close(FT_HANDLE ft_handle);
+// close an ft handle during recovery. the underlying ft must close, and will use the given lsn.
+void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn);
+
+// At the ydb layer, a DICTIONARY_ID uniquely identifies an open dictionary.
+// With the introduction of the loader (ticket 2216), it is possible for the file that holds
+// an open dictionary to change, so these are now separate and independent unique identifiers (see FILENUM)
+struct DICTIONARY_ID {
+ uint64_t dictid;
+};
+static const DICTIONARY_ID DICTIONARY_ID_NONE = { .dictid = 0 };
+
+int
+toku_ft_handle_open_with_dict_id(
+ FT_HANDLE ft_h,
+ const char *fname_in_env,
+ int is_create,
+ int only_create,
+ CACHETABLE cachetable,
+ TOKUTXN txn,
+ DICTIONARY_ID use_dictionary_id
+ ) __attribute__ ((warn_unused_result));
+
+// Effect: Insert a key and data pair into an ft
+void toku_ft_insert (FT_HANDLE ft_h, DBT *k, DBT *v, TOKUTXN txn);
+
+// Returns: 0 if the key was inserted, DB_KEYEXIST if the key already exists
+int toku_ft_insert_unique(FT_HANDLE ft, DBT *k, DBT *v, TOKUTXN txn, bool do_logging);
+
+// Effect: Optimize the ft
+void toku_ft_optimize (FT_HANDLE ft_h);
+
+// Effect: Insert a key and data pair into an ft if the oplsn is newer than the ft's lsn. This function is called during recovery.
+void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *k, DBT *v, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type);
+
+// Effect: Send an update message into an ft. This function is called
+// during recovery.
+void toku_ft_maybe_update(FT_HANDLE ft_h, const DBT *key, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging);
+
+// Effect: Send a broadcasting update message into an ft. This function
+// is called during recovery.
+void toku_ft_maybe_update_broadcast(FT_HANDLE ft_h, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, bool is_resetting_op);
+
+void toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn);
+void toku_ft_load(FT_HANDLE ft_h, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *get_lsn);
+void toku_ft_hot_index_recovery(TOKUTXN txn, FILENUMS filenums, int do_fsync, int do_log, LSN *hot_index_lsn);
+void toku_ft_hot_index(FT_HANDLE ft_h, TOKUTXN txn, FILENUMS filenums, int do_fsync, LSN *lsn);
+
+void toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val);
+void toku_ft_log_put (TOKUTXN txn, FT_HANDLE ft_h, const DBT *key, const DBT *val);
+void toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_t num_fts, const DBT *key, const DBT *val);
+void toku_ft_log_del (TOKUTXN txn, FT_HANDLE ft_h, const DBT *key);
+
+// Effect: Delete a key from an ft
+void toku_ft_delete (FT_HANDLE ft_h, DBT *k, TOKUTXN txn);
+
+// Effect: Delete a key from an ft if the oplsn is newer than the ft lsn. This function is called during recovery.
+void toku_ft_maybe_delete (FT_HANDLE ft_h, DBT *k, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging);
+
+TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h);
+struct txn_manager *toku_ft_get_txn_manager(FT_HANDLE ft_h);
+
+struct txn_gc_info;
+void toku_ft_send_insert(FT_HANDLE ft_h, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info);
+void toku_ft_send_delete(FT_HANDLE ft_h, DBT *key, XIDS xids, txn_gc_info *gc_info);
+void toku_ft_send_commit_any(FT_HANDLE ft_h, DBT *key, XIDS xids, txn_gc_info *gc_info);
+
+int toku_close_ft_handle_nolsn (FT_HANDLE, char **error_string) __attribute__ ((warn_unused_result));
+
+int toku_dump_ft (FILE *,FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
+
+extern int toku_ft_debug_mode;
+int toku_verify_ft (FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
+int toku_verify_ft_with_progress (FT_HANDLE ft_h, int (*progress_callback)(void *extra, float progress), void *extra, int verbose, int keep_going) __attribute__ ((warn_unused_result));
+
+int toku_ft_recount_rows(
+ FT_HANDLE ft,
+ int (*progress_callback)(
+ uint64_t count,
+ uint64_t deleted,
+ void* progress_extra),
+ void* progress_extra);
+
+
+DICTIONARY_ID toku_ft_get_dictionary_id(FT_HANDLE);
+
+enum ft_flags {
+ //TOKU_DB_DUP = (1<<0), //Obsolete #2862
+ //TOKU_DB_DUPSORT = (1<<1), //Obsolete #2862
+ TOKU_DB_KEYCMP_BUILTIN = (1<<2),
+ TOKU_DB_VALCMP_BUILTIN_13 = (1<<3),
+};
+
+void toku_ft_keyrange(FT_HANDLE ft_h, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater);
+void toku_ft_keysrange(FT_HANDLE ft_h, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p);
+
+int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *end_key, uint64_t actually_skipped, void *extra), void *cb_extra);
+
+struct ftstat64_s {
+ uint64_t nkeys; /* estimate how many unique keys (even when flattened this may be an estimate) */
+ uint64_t ndata; /* estimate the number of pairs (exact when flattened and committed) */
+ uint64_t dsize; /* estimate the sum of the sizes of the pairs (exact when flattened and committed) */
+ uint64_t fsize; /* the size of the underlying file */
+ uint64_t ffree; /* Number of free bytes in the underlying file */
+ uint64_t create_time_sec; /* creation time in seconds. */
+ uint64_t modify_time_sec; /* time of last serialization, in seconds. */
+ uint64_t verify_time_sec; /* time of last verification, in seconds */
+};
+
+void toku_ft_handle_stat64 (FT_HANDLE, TOKUTXN, struct ftstat64_s *stat);
+
+struct ftinfo64 {
+ uint64_t num_blocks_allocated; // number of blocks in the blocktable
+ uint64_t num_blocks_in_use; // number of blocks in use by most recent checkpoint
+ uint64_t size_allocated; // sum of sizes of blocks in blocktable
+ uint64_t size_in_use; // sum of sizes of blocks in use by most recent checkpoint
+};
+
+void toku_ft_handle_get_fractal_tree_info64(FT_HANDLE, struct ftinfo64 *);
+
+int toku_ft_handle_iterate_fractal_tree_block_map(FT_HANDLE, int (*)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *);
+
+int toku_ft_layer_init(void) __attribute__ ((warn_unused_result));
+void toku_ft_open_close_lock(void);
+void toku_ft_open_close_unlock(void);
+void toku_ft_layer_destroy(void);
+void toku_ft_serialize_layer_init(void);
+void toku_ft_serialize_layer_destroy(void);
+
+void toku_maybe_truncate_file (int fd, uint64_t size_used, uint64_t expected_size, uint64_t *new_size);
+// Effect: truncate file if overallocated by at least 32MiB
+
+void toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size, int64_t *new_size);
+// Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size
+// Return 0 on success, otherwise an error number.
+
+int toku_ft_get_fragmentation(FT_HANDLE ft_h, TOKU_DB_FRAGMENTATION report) __attribute__ ((warn_unused_result));
+
+bool toku_ft_is_empty_fast (FT_HANDLE ft_h) __attribute__ ((warn_unused_result));
+// Effect: Return true if there are no messages or leaf entries in the tree. If so, it's empty. If there are messages or leaf entries, we say it's not empty
+// even though if we were to optimize the tree it might turn out that they are empty.
+
+int toku_ft_strerror_r(int error, char *buf, size_t buflen);
+// Effect: LIke the XSI-compliant strerorr_r, extended to db_strerror().
+// If error>=0 then the result is to do strerror_r(error, buf, buflen), that is fill buf with a descriptive error message.
+// If error<0 then return a PerconaFT-specific error code. For unknown cases, we return -1 and set errno=EINVAL, even for cases that *should* be known. (Not all DB errors are known by this function which is a bug.)
+
+extern bool garbage_collection_debug;
+
+// This is a poor place to put global options like these.
+void toku_ft_set_direct_io(bool direct_io_on);
+void toku_ft_set_compress_buffers_before_eviction(bool compress_buffers);
+
+void toku_note_deserialized_basement_node(bool fixed_key_size);
+
+// Creates all directories for the path if necessary,
+// returns true if all dirs are created successfully or
+// all dirs exist, false otherwise.
+bool toku_create_subdirs_if_needed(const char* path);
diff --git a/storage/tokudb/PerconaFT/ft/ft-recount-rows.cc b/storage/tokudb/PerconaFT/ft/ft-recount-rows.cc
new file mode 100644
index 00000000..3b5501b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-recount-rows.cc
@@ -0,0 +1,106 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/serialize/block_table.h"
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/cursor.h"
+
+struct recount_rows_extra_t {
+ int (*_progress_callback)(
+ uint64_t count,
+ uint64_t deleted,
+ void* progress_extra);
+ void* _progress_extra;
+ uint64_t _keys;
+ bool _cancelled;
+};
+
+static int recount_rows_found(
+ uint32_t UU(keylen),
+ const void* key,
+ uint32_t UU(vallen),
+ const void* UU(val),
+ void* extra,
+ bool UU(lock_only)) {
+
+ recount_rows_extra_t* rre = (recount_rows_extra_t*)extra;
+
+ if (FT_LIKELY(key != nullptr)) {
+ rre->_keys++;
+ }
+ return rre->_cancelled
+ = rre->_progress_callback(rre->_keys, 0, rre->_progress_extra);
+}
+static bool recount_rows_interrupt(void* extra, uint64_t deleted_rows) {
+ recount_rows_extra_t* rre = (recount_rows_extra_t*)extra;
+
+ return rre->_cancelled =
+ rre->_progress_callback(rre->_keys, deleted_rows, rre->_progress_extra);
+}
+int toku_ft_recount_rows(FT_HANDLE ft,
+ int (*progress_callback)(uint64_t count,
+ uint64_t deleted,
+ void* progress_extra),
+ void* progress_extra) {
+ int ret = 0;
+ recount_rows_extra_t rre = {progress_callback, progress_extra, 0, false};
+
+ ft_cursor c;
+ ret = toku_ft_cursor_create(ft, &c, nullptr, C_READ_ANY, false, false);
+ if (ret)
+ return ret;
+
+ toku_ft_cursor_set_check_interrupt_cb(&c, recount_rows_interrupt, &rre);
+
+ ret = toku_ft_cursor_first(&c, recount_rows_found, &rre);
+ while (FT_LIKELY(ret == 0)) {
+ ret = toku_ft_cursor_next(&c, recount_rows_found, &rre);
+ }
+
+ toku_ft_cursor_destroy(&c);
+
+ if (rre._cancelled == false) {
+ // update ft count
+ toku_unsafe_set(&ft->ft->in_memory_logical_rows, rre._keys);
+ ft->ft->h->set_dirty();
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-status.cc b/storage/tokudb/PerconaFT/ft/ft-status.cc
new file mode 100644
index 00000000..9b45ba21
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-status.cc
@@ -0,0 +1,503 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft.h"
+#include "ft/ft-status.h"
+
+#include <toku_race_tools.h>
+
+LE_STATUS_S le_status;
+void LE_STATUS_S::init() {
+ if (m_initialized) return;
+#define LE_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "le: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ LE_STATUS_INIT(LE_MAX_COMMITTED_XR, LEAF_ENTRY_MAX_COMMITTED_XR, UINT64, "max committed xr");
+ LE_STATUS_INIT(LE_MAX_PROVISIONAL_XR, LEAF_ENTRY_MAX_PROVISIONAL_XR, UINT64, "max provisional xr");
+ LE_STATUS_INIT(LE_EXPANDED, LEAF_ENTRY_EXPANDED, UINT64, "expanded");
+ LE_STATUS_INIT(LE_MAX_MEMSIZE, LEAF_ENTRY_MAX_MEMSIZE, UINT64, "max memsize");
+ LE_STATUS_INIT(LE_APPLY_GC_BYTES_IN, LEAF_ENTRY_APPLY_GC_BYTES_IN, PARCOUNT, "size of leafentries before garbage collection (during message application)");
+ LE_STATUS_INIT(LE_APPLY_GC_BYTES_OUT, LEAF_ENTRY_APPLY_GC_BYTES_OUT, PARCOUNT, "size of leafentries after garbage collection (during message application)");
+ LE_STATUS_INIT(LE_NORMAL_GC_BYTES_IN, LEAF_ENTRY_NORMAL_GC_BYTES_IN, PARCOUNT, "size of leafentries before garbage collection (outside message application)");
+ LE_STATUS_INIT(LE_NORMAL_GC_BYTES_OUT, LEAF_ENTRY_NORMAL_GC_BYTES_OUT, PARCOUNT, "size of leafentries after garbage collection (outside message application)");
+ m_initialized = true;
+#undef LE_STATUS_INIT
+}
+void LE_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < LE_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+
+CHECKPOINT_STATUS_S cp_status;
+void CHECKPOINT_STATUS_S::init(void) {
+ if (m_initialized) return;
+#define CP_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "checkpoint: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ CP_STATUS_INIT(CP_PERIOD, CHECKPOINT_PERIOD, UINT64, "period");
+ CP_STATUS_INIT(CP_FOOTPRINT, CHECKPOINT_FOOTPRINT, UINT64, "footprint");
+ CP_STATUS_INIT(CP_TIME_LAST_CHECKPOINT_BEGIN, CHECKPOINT_LAST_BEGAN, UNIXTIME, "last checkpoint began");
+ CP_STATUS_INIT(CP_TIME_LAST_CHECKPOINT_BEGIN_COMPLETE, CHECKPOINT_LAST_COMPLETE_BEGAN, UNIXTIME, "last complete checkpoint began");
+ CP_STATUS_INIT(CP_TIME_LAST_CHECKPOINT_END, CHECKPOINT_LAST_COMPLETE_ENDED, UNIXTIME, "last complete checkpoint ended");
+ CP_STATUS_INIT(CP_TIME_CHECKPOINT_DURATION, CHECKPOINT_DURATION, UINT64, "time spent during checkpoint (begin and end phases)");
+ CP_STATUS_INIT(CP_TIME_CHECKPOINT_DURATION_LAST, CHECKPOINT_DURATION_LAST, UINT64, "time spent during last checkpoint (begin and end phases)");
+ CP_STATUS_INIT(CP_LAST_LSN, CHECKPOINT_LAST_LSN, UINT64, "last complete checkpoint LSN");
+ CP_STATUS_INIT(CP_CHECKPOINT_COUNT, CHECKPOINT_TAKEN, UINT64, "checkpoints taken ");
+ CP_STATUS_INIT(CP_CHECKPOINT_COUNT_FAIL, CHECKPOINT_FAILED, UINT64, "checkpoints failed");
+ CP_STATUS_INIT(CP_WAITERS_NOW, CHECKPOINT_WAITERS_NOW, UINT64, "waiters now");
+ CP_STATUS_INIT(CP_WAITERS_MAX, CHECKPOINT_WAITERS_MAX, UINT64, "waiters max");
+ CP_STATUS_INIT(CP_CLIENT_WAIT_ON_MO, CHECKPOINT_CLIENT_WAIT_ON_MO, UINT64, "non-checkpoint client wait on mo lock");
+ CP_STATUS_INIT(CP_CLIENT_WAIT_ON_CS, CHECKPOINT_CLIENT_WAIT_ON_CS, UINT64, "non-checkpoint client wait on cs lock");
+ CP_STATUS_INIT(CP_BEGIN_TIME, CHECKPOINT_BEGIN_TIME, UINT64, "checkpoint begin time");
+ CP_STATUS_INIT(CP_LONG_BEGIN_COUNT, CHECKPOINT_LONG_BEGIN_COUNT, UINT64, "long checkpoint begin count");
+ CP_STATUS_INIT(CP_LONG_BEGIN_TIME, CHECKPOINT_LONG_BEGIN_TIME, UINT64, "long checkpoint begin time");
+ CP_STATUS_INIT(CP_END_TIME, CHECKPOINT_END_TIME, UINT64, "checkpoint end time");
+ CP_STATUS_INIT(CP_LONG_END_COUNT, CHECKPOINT_LONG_END_COUNT, UINT64, "long checkpoint end count");
+ CP_STATUS_INIT(CP_LONG_END_TIME, CHECKPOINT_LONG_END_TIME, UINT64, "long checkpoint end time");
+
+ m_initialized = true;
+#undef CP_STATUS_INIT
+}
+void CHECKPOINT_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < CP_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+CACHETABLE_STATUS_S ct_status;
+void CACHETABLE_STATUS_S::init() {
+ if (m_initialized) return;
+#define CT_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "cachetable: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ CT_STATUS_INIT(CT_MISS, CACHETABLE_MISS, UINT64, "miss");
+ CT_STATUS_INIT(CT_MISSTIME, CACHETABLE_MISS_TIME, UINT64, "miss time");
+ CT_STATUS_INIT(CT_PREFETCHES, CACHETABLE_PREFETCHES, UINT64, "prefetches");
+ CT_STATUS_INIT(CT_SIZE_CURRENT, CACHETABLE_SIZE_CURRENT, UINT64, "size current");
+ CT_STATUS_INIT(CT_SIZE_LIMIT, CACHETABLE_SIZE_LIMIT, UINT64, "size limit");
+ CT_STATUS_INIT(CT_SIZE_WRITING, CACHETABLE_SIZE_WRITING, UINT64, "size writing");
+ CT_STATUS_INIT(CT_SIZE_NONLEAF, CACHETABLE_SIZE_NONLEAF, UINT64, "size nonleaf");
+ CT_STATUS_INIT(CT_SIZE_LEAF, CACHETABLE_SIZE_LEAF, UINT64, "size leaf");
+ CT_STATUS_INIT(CT_SIZE_ROLLBACK, CACHETABLE_SIZE_ROLLBACK, UINT64, "size rollback");
+ CT_STATUS_INIT(CT_SIZE_CACHEPRESSURE, CACHETABLE_SIZE_CACHEPRESSURE, UINT64, "size cachepressure");
+ CT_STATUS_INIT(CT_SIZE_CLONED, CACHETABLE_SIZE_CLONED, UINT64, "size currently cloned data for checkpoint");
+ CT_STATUS_INIT(CT_EVICTIONS, CACHETABLE_EVICTIONS, UINT64, "evictions");
+ CT_STATUS_INIT(CT_CLEANER_EXECUTIONS, CACHETABLE_CLEANER_EXECUTIONS, UINT64, "cleaner executions");
+ CT_STATUS_INIT(CT_CLEANER_PERIOD, CACHETABLE_CLEANER_PERIOD, UINT64, "cleaner period");
+ CT_STATUS_INIT(CT_CLEANER_ITERATIONS, CACHETABLE_CLEANER_ITERATIONS, UINT64, "cleaner iterations");
+ CT_STATUS_INIT(CT_WAIT_PRESSURE_COUNT, CACHETABLE_WAIT_PRESSURE_COUNT, UINT64, "number of waits on cache pressure");
+ CT_STATUS_INIT(CT_WAIT_PRESSURE_TIME, CACHETABLE_WAIT_PRESSURE_TIME, UINT64, "time waiting on cache pressure");
+ CT_STATUS_INIT(CT_LONG_WAIT_PRESSURE_COUNT, CACHETABLE_LONG_WAIT_PRESSURE_COUNT, UINT64, "number of long waits on cache pressure");
+ CT_STATUS_INIT(CT_LONG_WAIT_PRESSURE_TIME, CACHETABLE_LONG_WAIT_PRESSURE_TIME, UINT64, "long time waiting on cache pressure");
+
+ CT_STATUS_INIT(CT_POOL_CLIENT_NUM_THREADS, CACHETABLE_POOL_CLIENT_NUM_THREADS, UINT64, "client pool: number of threads in pool");
+ CT_STATUS_INIT(CT_POOL_CLIENT_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CLIENT_NUM_THREADS_ACTIVE, UINT64, "client pool: number of currently active threads in pool");
+ CT_STATUS_INIT(CT_POOL_CLIENT_QUEUE_SIZE, CACHETABLE_POOL_CLIENT_QUEUE_SIZE, UINT64, "client pool: number of currently queued work items");
+ CT_STATUS_INIT(CT_POOL_CLIENT_MAX_QUEUE_SIZE, CACHETABLE_POOL_CLIENT_MAX_QUEUE_SIZE, UINT64, "client pool: largest number of queued work items");
+ CT_STATUS_INIT(CT_POOL_CLIENT_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CLIENT_TOTAL_ITEMS_PROCESSED, UINT64, "client pool: total number of work items processed");
+ CT_STATUS_INIT(CT_POOL_CLIENT_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CLIENT_TOTAL_EXECUTION_TIME, UINT64, "client pool: total execution time of processing work items");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_NUM_THREADS, CACHETABLE_POOL_CACHETABLE_NUM_THREADS, UINT64, "cachetable pool: number of threads in pool");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CACHETABLE_NUM_THREADS_ACTIVE, UINT64, "cachetable pool: number of currently active threads in pool");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_QUEUE_SIZE, CACHETABLE_POOL_CACHETABLE_QUEUE_SIZE, UINT64, "cachetable pool: number of currently queued work items");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_MAX_QUEUE_SIZE, CACHETABLE_POOL_CACHETABLE_MAX_QUEUE_SIZE, UINT64, "cachetable pool: largest number of queued work items");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED, UINT64, "cachetable pool: total number of work items processed");
+ CT_STATUS_INIT(CT_POOL_CACHETABLE_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CACHETABLE_TOTAL_EXECUTION_TIME, UINT64, "cachetable pool: total execution time of processing work items");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_NUM_THREADS, CACHETABLE_POOL_CHECKPOINT_NUM_THREADS, UINT64, "checkpoint pool: number of threads in pool");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_NUM_THREADS_ACTIVE, CACHETABLE_POOL_CHECKPOINT_NUM_THREADS_ACTIVE, UINT64, "checkpoint pool: number of currently active threads in pool");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_QUEUE_SIZE, CACHETABLE_POOL_CHECKPOINT_QUEUE_SIZE, UINT64, "checkpoint pool: number of currently queued work items");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_MAX_QUEUE_SIZE, CACHETABLE_POOL_CHECKPOINT_MAX_QUEUE_SIZE, UINT64, "checkpoint pool: largest number of queued work items");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED, CACHETABLE_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED, UINT64, "checkpoint pool: total number of work items processed");
+ CT_STATUS_INIT(CT_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME, CACHETABLE_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME, UINT64, "checkpoint pool: total execution time of processing work items");
+
+ m_initialized = true;
+#undef CT_STATUS_INIT
+}
+void CACHETABLE_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < CT_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+
+LTM_STATUS_S ltm_status;
+void LTM_STATUS_S::init() {
+ if (m_initialized) return;
+#define LTM_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "locktree: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ LTM_STATUS_INIT(LTM_SIZE_CURRENT, LOCKTREE_MEMORY_SIZE, UINT64, "memory size");
+ LTM_STATUS_INIT(LTM_SIZE_LIMIT, LOCKTREE_MEMORY_SIZE_LIMIT, UINT64, "memory size limit");
+ LTM_STATUS_INIT(LTM_ESCALATION_COUNT, LOCKTREE_ESCALATION_NUM, UINT64, "number of times lock escalation ran");
+ LTM_STATUS_INIT(LTM_ESCALATION_TIME, LOCKTREE_ESCALATION_SECONDS, TOKUTIME, "time spent running escalation (seconds)");
+ LTM_STATUS_INIT(LTM_ESCALATION_LATEST_RESULT, LOCKTREE_LATEST_POST_ESCALATION_MEMORY_SIZE, UINT64, "latest post-escalation memory size");
+ LTM_STATUS_INIT(LTM_NUM_LOCKTREES, LOCKTREE_OPEN_CURRENT, UINT64, "number of locktrees open now");
+ LTM_STATUS_INIT(LTM_LOCK_REQUESTS_PENDING, LOCKTREE_PENDING_LOCK_REQUESTS, UINT64, "number of pending lock requests");
+ LTM_STATUS_INIT(LTM_STO_NUM_ELIGIBLE, LOCKTREE_STO_ELIGIBLE_NUM, UINT64, "number of locktrees eligible for the STO");
+ LTM_STATUS_INIT(LTM_STO_END_EARLY_COUNT, LOCKTREE_STO_ENDED_NUM, UINT64, "number of times a locktree ended the STO early");
+ LTM_STATUS_INIT(LTM_STO_END_EARLY_TIME, LOCKTREE_STO_ENDED_SECONDS, TOKUTIME, "time spent ending the STO early (seconds)");
+ LTM_STATUS_INIT(LTM_WAIT_COUNT, LOCKTREE_WAIT_COUNT, UINT64, "number of wait locks");
+ LTM_STATUS_INIT(LTM_WAIT_TIME, LOCKTREE_WAIT_TIME, UINT64, "time waiting for locks");
+ LTM_STATUS_INIT(LTM_LONG_WAIT_COUNT, LOCKTREE_LONG_WAIT_COUNT, UINT64, "number of long wait locks");
+ LTM_STATUS_INIT(LTM_LONG_WAIT_TIME, LOCKTREE_LONG_WAIT_TIME, UINT64, "long time waiting for locks");
+ LTM_STATUS_INIT(LTM_TIMEOUT_COUNT, LOCKTREE_TIMEOUT_COUNT, UINT64, "number of lock timeouts");
+ LTM_STATUS_INIT(LTM_WAIT_ESCALATION_COUNT, LOCKTREE_WAIT_ESCALATION_COUNT, UINT64, "number of waits on lock escalation");
+ LTM_STATUS_INIT(LTM_WAIT_ESCALATION_TIME, LOCKTREE_WAIT_ESCALATION_TIME, UINT64, "time waiting on lock escalation");
+ LTM_STATUS_INIT(LTM_LONG_WAIT_ESCALATION_COUNT, LOCKTREE_LONG_WAIT_ESCALATION_COUNT, UINT64, "number of long waits on lock escalation");
+ LTM_STATUS_INIT(LTM_LONG_WAIT_ESCALATION_TIME, LOCKTREE_LONG_WAIT_ESCALATION_TIME, UINT64, "long time waiting on lock escalation");
+
+ m_initialized = true;
+#undef LTM_STATUS_INIT
+}
+void LTM_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < LTM_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+
+FT_STATUS_S ft_status;
+void FT_STATUS_S::init() {
+ if (m_initialized) return;
+#define FT_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "ft: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ FT_STATUS_INIT(FT_UPDATES, DICTIONARY_UPDATES, PARCOUNT, "dictionary updates");
+ FT_STATUS_INIT(FT_UPDATES_BROADCAST, DICTIONARY_BROADCAST_UPDATES, PARCOUNT, "dictionary broadcast updates");
+ FT_STATUS_INIT(FT_DESCRIPTOR_SET, DESCRIPTOR_SET, PARCOUNT, "descriptor set");
+ FT_STATUS_INIT(FT_MSN_DISCARDS, MESSAGES_IGNORED_BY_LEAF_DUE_TO_MSN, PARCOUNT, "messages ignored by leaf due to msn");
+ FT_STATUS_INIT(FT_TOTAL_RETRIES, TOTAL_SEARCH_RETRIES, PARCOUNT, "total search retries due to TRY_AGAIN");
+ FT_STATUS_INIT(FT_SEARCH_TRIES_GT_HEIGHT, SEARCH_TRIES_GT_HEIGHT, PARCOUNT, "searches requiring more tries than the height of the tree");
+ FT_STATUS_INIT(FT_SEARCH_TRIES_GT_HEIGHTPLUS3, SEARCH_TRIES_GT_HEIGHTPLUS3, PARCOUNT, "searches requiring more tries than the height of the tree plus three");
+ FT_STATUS_INIT(FT_CREATE_LEAF, LEAF_NODES_CREATED, PARCOUNT, "leaf nodes created");
+ FT_STATUS_INIT(FT_CREATE_NONLEAF, NONLEAF_NODES_CREATED, PARCOUNT, "nonleaf nodes created");
+ FT_STATUS_INIT(FT_DESTROY_LEAF, LEAF_NODES_DESTROYED, PARCOUNT, "leaf nodes destroyed");
+ FT_STATUS_INIT(FT_DESTROY_NONLEAF, NONLEAF_NODES_DESTROYED, PARCOUNT, "nonleaf nodes destroyed");
+ FT_STATUS_INIT(FT_MSG_BYTES_IN, MESSAGES_INJECTED_AT_ROOT_BYTES, PARCOUNT, "bytes of messages injected at root (all trees)");
+ FT_STATUS_INIT(FT_MSG_BYTES_OUT, MESSAGES_FLUSHED_FROM_H1_TO_LEAVES_BYTES, PARCOUNT, "bytes of messages flushed from h1 nodes to leaves");
+ FT_STATUS_INIT(FT_MSG_BYTES_CURR, MESSAGES_IN_TREES_ESTIMATE_BYTES, PARCOUNT, "bytes of messages currently in trees (estimate)");
+ FT_STATUS_INIT(FT_MSG_NUM, MESSAGES_INJECTED_AT_ROOT, PARCOUNT, "messages injected at root");
+ FT_STATUS_INIT(FT_MSG_NUM_BROADCAST, BROADCASE_MESSAGES_INJECTED_AT_ROOT, PARCOUNT, "broadcast messages injected at root");
+
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_DECOMPRESSED_NORMAL, BASEMENTS_DECOMPRESSED_TARGET_QUERY, PARCOUNT, "basements decompressed as a target of a query");
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE, BASEMENTS_DECOMPRESSED_PRELOCKED_RANGE, PARCOUNT, "basements decompressed for prelocked range");
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH, BASEMENTS_DECOMPRESSED_PREFETCH, PARCOUNT, "basements decompressed for prefetch");
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_DECOMPRESSED_WRITE, BASEMENTS_DECOMPRESSED_FOR_WRITE, PARCOUNT, "basements decompressed for write");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_NORMAL, BUFFERS_DECOMPRESSED_TARGET_QUERY, PARCOUNT, "buffers decompressed as a target of a query");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_AGGRESSIVE, BUFFERS_DECOMPRESSED_PRELOCKED_RANGE, PARCOUNT, "buffers decompressed for prelocked range");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH, BUFFERS_DECOMPRESSED_PREFETCH, PARCOUNT, "buffers decompressed for prefetch");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE, BUFFERS_DECOMPRESSED_FOR_WRITE, PARCOUNT, "buffers decompressed for write");
+
+ // Eviction statistics:
+ FT_STATUS_INIT(FT_FULL_EVICTIONS_LEAF, LEAF_NODE_FULL_EVICTIONS, PARCOUNT, "leaf node full evictions");
+ FT_STATUS_INIT(FT_FULL_EVICTIONS_LEAF_BYTES, LEAF_NODE_FULL_EVICTIONS_BYTES, PARCOUNT, "leaf node full evictions (bytes)");
+ FT_STATUS_INIT(FT_FULL_EVICTIONS_NONLEAF, NONLEAF_NODE_FULL_EVICTIONS, PARCOUNT, "nonleaf node full evictions");
+ FT_STATUS_INIT(FT_FULL_EVICTIONS_NONLEAF_BYTES, NONLEAF_NODE_FULL_EVICTIONS_BYTES, PARCOUNT, "nonleaf node full evictions (bytes)");
+ FT_STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF, LEAF_NODE_PARTIAL_EVICTIONS, PARCOUNT, "leaf node partial evictions");
+ FT_STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF_BYTES, LEAF_NODE_PARTIAL_EVICTIONS_BYTES, PARCOUNT, "leaf node partial evictions (bytes)");
+ FT_STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF, NONLEAF_NODE_PARTIAL_EVICTIONS, PARCOUNT, "nonleaf node partial evictions");
+ FT_STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, NONLEAF_NODE_PARTIAL_EVICTIONS_BYTES, PARCOUNT, "nonleaf node partial evictions (bytes)");
+
+ // Disk read statistics:
+ //
+ // Pivots: For queries, prefetching, or writing.
+ FT_STATUS_INIT(FT_NUM_PIVOTS_FETCHED_QUERY, PIVOTS_FETCHED_FOR_QUERY, PARCOUNT, "pivots fetched for query");
+ FT_STATUS_INIT(FT_BYTES_PIVOTS_FETCHED_QUERY, PIVOTS_FETCHED_FOR_QUERY_BYTES, PARCOUNT, "pivots fetched for query (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_PIVOTS_FETCHED_QUERY, PIVOTS_FETCHED_FOR_QUERY_SECONDS, TOKUTIME, "pivots fetched for query (seconds)");
+ FT_STATUS_INIT(FT_NUM_PIVOTS_FETCHED_PREFETCH, PIVOTS_FETCHED_FOR_PREFETCH, PARCOUNT, "pivots fetched for prefetch");
+ FT_STATUS_INIT(FT_BYTES_PIVOTS_FETCHED_PREFETCH, PIVOTS_FETCHED_FOR_PREFETCH_BYTES, PARCOUNT, "pivots fetched for prefetch (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_PIVOTS_FETCHED_PREFETCH, PIVOTS_FETCHED_FOR_PREFETCH_SECONDS, TOKUTIME, "pivots fetched for prefetch (seconds)");
+ FT_STATUS_INIT(FT_NUM_PIVOTS_FETCHED_WRITE, PIVOTS_FETCHED_FOR_WRITE, PARCOUNT, "pivots fetched for write");
+ FT_STATUS_INIT(FT_BYTES_PIVOTS_FETCHED_WRITE, PIVOTS_FETCHED_FOR_WRITE_BYTES, PARCOUNT, "pivots fetched for write (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_PIVOTS_FETCHED_WRITE, PIVOTS_FETCHED_FOR_WRITE_SECONDS, TOKUTIME, "pivots fetched for write (seconds)");
+ // Basements: For queries, aggressive fetching in prelocked range, prefetching, or writing.
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_FETCHED_NORMAL, BASEMENTS_FETCHED_TARGET_QUERY, PARCOUNT, "basements fetched as a target of a query");
+ FT_STATUS_INIT(FT_BYTES_BASEMENTS_FETCHED_NORMAL, BASEMENTS_FETCHED_TARGET_QUERY_BYTES, PARCOUNT, "basements fetched as a target of a query (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_BASEMENTS_FETCHED_NORMAL, BASEMENTS_FETCHED_TARGET_QUERY_SECONDS, TOKUTIME, "basements fetched as a target of a query (seconds)");
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE, BASEMENTS_FETCHED_PRELOCKED_RANGE, PARCOUNT, "basements fetched for prelocked range");
+ FT_STATUS_INIT(FT_BYTES_BASEMENTS_FETCHED_AGGRESSIVE, BASEMENTS_FETCHED_PRELOCKED_RANGE_BYTES, PARCOUNT, "basements fetched for prelocked range (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_BASEMENTS_FETCHED_AGGRESSIVE, BASEMENTS_FETCHED_PRELOCKED_RANGE_SECONDS, TOKUTIME, "basements fetched for prelocked range (seconds)");
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_FETCHED_PREFETCH, BASEMENTS_FETCHED_PREFETCH, PARCOUNT, "basements fetched for prefetch");
+ FT_STATUS_INIT(FT_BYTES_BASEMENTS_FETCHED_PREFETCH, BASEMENTS_FETCHED_PREFETCH_BYTES, PARCOUNT, "basements fetched for prefetch (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_BASEMENTS_FETCHED_PREFETCH, BASEMENTS_FETCHED_PREFETCH_SECONDS, TOKUTIME, "basements fetched for prefetch (seconds)");
+ FT_STATUS_INIT(FT_NUM_BASEMENTS_FETCHED_WRITE, BASEMENTS_FETCHED_FOR_WRITE, PARCOUNT, "basements fetched for write");
+ FT_STATUS_INIT(FT_BYTES_BASEMENTS_FETCHED_WRITE, BASEMENTS_FETCHED_FOR_WRITE_BYTES, PARCOUNT, "basements fetched for write (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_BASEMENTS_FETCHED_WRITE, BASEMENTS_FETCHED_FOR_WRITE_SECONDS, TOKUTIME, "basements fetched for write (seconds)");
+ // Buffers: For queries, aggressive fetching in prelocked range, prefetching, or writing.
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_FETCHED_NORMAL, BUFFERS_FETCHED_TARGET_QUERY, PARCOUNT, "buffers fetched as a target of a query");
+ FT_STATUS_INIT(FT_BYTES_MSG_BUFFER_FETCHED_NORMAL, BUFFERS_FETCHED_TARGET_QUERY_BYTES, PARCOUNT, "buffers fetched as a target of a query (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_MSG_BUFFER_FETCHED_NORMAL, BUFFERS_FETCHED_TARGET_QUERY_SECONDS, TOKUTIME, "buffers fetched as a target of a query (seconds)");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_FETCHED_AGGRESSIVE, BUFFERS_FETCHED_PRELOCKED_RANGE, PARCOUNT, "buffers fetched for prelocked range");
+ FT_STATUS_INIT(FT_BYTES_MSG_BUFFER_FETCHED_AGGRESSIVE, BUFFERS_FETCHED_PRELOCKED_RANGE_BYTES, PARCOUNT, "buffers fetched for prelocked range (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_MSG_BUFFER_FETCHED_AGGRESSIVE, BUFFERS_FETCHED_PRELOCKED_RANGE_SECONDS, TOKUTIME, "buffers fetched for prelocked range (seconds)");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_FETCHED_PREFETCH, BUFFERS_FETCHED_PREFETCH, PARCOUNT, "buffers fetched for prefetch");
+ FT_STATUS_INIT(FT_BYTES_MSG_BUFFER_FETCHED_PREFETCH, BUFFERS_FETCHED_PREFETCH_BYTES, PARCOUNT, "buffers fetched for prefetch (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_MSG_BUFFER_FETCHED_PREFETCH, BUFFERS_FETCHED_PREFETCH_SECONDS, TOKUTIME, "buffers fetched for prefetch (seconds)");
+ FT_STATUS_INIT(FT_NUM_MSG_BUFFER_FETCHED_WRITE, BUFFERS_FETCHED_FOR_WRITE, PARCOUNT, "buffers fetched for write");
+ FT_STATUS_INIT(FT_BYTES_MSG_BUFFER_FETCHED_WRITE, BUFFERS_FETCHED_FOR_WRITE_BYTES, PARCOUNT, "buffers fetched for write (bytes)");
+ FT_STATUS_INIT(FT_TOKUTIME_MSG_BUFFER_FETCHED_WRITE, BUFFERS_FETCHED_FOR_WRITE_SECONDS, TOKUTIME, "buffers fetched for write (seconds)");
+
+ // Disk write statistics.
+ //
+ // Leaf/Nonleaf: Not for checkpoint
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF, LEAF_NODES_FLUSHED_NOT_CHECKPOINT, PARCOUNT, "leaf nodes flushed to disk (not for checkpoint)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_BYTES, LEAF_NODES_FLUSHED_NOT_CHECKPOINT_BYTES, PARCOUNT, "leaf nodes flushed to disk (not for checkpoint) (bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES, LEAF_NODES_FLUSHED_NOT_CHECKPOINT_UNCOMPRESSED_BYTES, PARCOUNT, "leaf nodes flushed to disk (not for checkpoint) (uncompressed bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_TOKUTIME, LEAF_NODES_FLUSHED_NOT_CHECKPOINT_SECONDS, TOKUTIME, "leaf nodes flushed to disk (not for checkpoint) (seconds)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF, NONLEAF_NODES_FLUSHED_TO_DISK_NOT_CHECKPOINT, PARCOUNT, "nonleaf nodes flushed to disk (not for checkpoint)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_BYTES, NONLEAF_NODES_FLUSHED_TO_DISK_NOT_CHECKPOINT_BYTES, PARCOUNT, "nonleaf nodes flushed to disk (not for checkpoint) (bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES, NONLEAF_NODES_FLUSHED_TO_DISK_NOT_CHECKPOINT_UNCOMPRESSED_BYTES, PARCOUNT, "nonleaf nodes flushed to disk (not for checkpoint) (uncompressed bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_TOKUTIME, NONLEAF_NODES_FLUSHED_TO_DISK_NOT_CHECKPOINT_SECONDS, TOKUTIME, "nonleaf nodes flushed to disk (not for checkpoint) (seconds)");
+ // Leaf/Nonleaf: For checkpoint
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_FOR_CHECKPOINT, LEAF_NODES_FLUSHED_CHECKPOINT, PARCOUNT, "leaf nodes flushed to disk (for checkpoint)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_BYTES_FOR_CHECKPOINT, LEAF_NODES_FLUSHED_CHECKPOINT_BYTES, PARCOUNT, "leaf nodes flushed to disk (for checkpoint) (bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT, LEAF_NODES_FLUSHED_CHECKPOINT_UNCOMPRESSED_BYTES, PARCOUNT, "leaf nodes flushed to disk (for checkpoint) (uncompressed bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_TOKUTIME_FOR_CHECKPOINT, LEAF_NODES_FLUSHED_CHECKPOINT_SECONDS, TOKUTIME, "leaf nodes flushed to disk (for checkpoint) (seconds)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_FOR_CHECKPOINT, NONLEAF_NODES_FLUSHED_TO_DISK_CHECKPOINT, PARCOUNT, "nonleaf nodes flushed to disk (for checkpoint)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT, NONLEAF_NODES_FLUSHED_TO_DISK_CHECKPOINT_BYTES, PARCOUNT, "nonleaf nodes flushed to disk (for checkpoint) (bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT, NONLEAF_NODES_FLUSHED_TO_DISK_CHECKPOINT_UNCOMPRESSED_BYTES, PARCOUNT, "nonleaf nodes flushed to disk (for checkpoint) (uncompressed bytes)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT, NONLEAF_NODES_FLUSHED_TO_DISK_CHECKPOINT_SECONDS, TOKUTIME, "nonleaf nodes flushed to disk (for checkpoint) (seconds)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_LEAF_COMPRESSION_RATIO, LEAF_NODE_COMPRESSION_RATIO, DOUBLE, "uncompressed / compressed bytes written (leaf)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_NONLEAF_COMPRESSION_RATIO, NONLEAF_NODE_COMPRESSION_RATIO, DOUBLE, "uncompressed / compressed bytes written (nonleaf)");
+ FT_STATUS_INIT(FT_DISK_FLUSH_OVERALL_COMPRESSION_RATIO, OVERALL_NODE_COMPRESSION_RATIO, DOUBLE, "uncompressed / compressed bytes written (overall)");
+
+ // CPU time statistics for [de]serialization and [de]compression.
+ FT_STATUS_INIT(FT_LEAF_COMPRESS_TOKUTIME, LEAF_COMPRESSION_TO_MEMORY_SECONDS, TOKUTIME, "leaf compression to memory (seconds)");
+ FT_STATUS_INIT(FT_LEAF_SERIALIZE_TOKUTIME, LEAF_SERIALIZATION_TO_MEMORY_SECONDS, TOKUTIME, "leaf serialization to memory (seconds)");
+ FT_STATUS_INIT(FT_LEAF_DECOMPRESS_TOKUTIME, LEAF_DECOMPRESSION_TO_MEMORY_SECONDS, TOKUTIME, "leaf decompression to memory (seconds)");
+ FT_STATUS_INIT(FT_LEAF_DESERIALIZE_TOKUTIME, LEAF_DESERIALIZATION_TO_MEMORY_SECONDS, TOKUTIME, "leaf deserialization to memory (seconds)");
+ FT_STATUS_INIT(FT_NONLEAF_COMPRESS_TOKUTIME, NONLEAF_COMPRESSION_TO_MEMORY_SECONDS, TOKUTIME, "nonleaf compression to memory (seconds)");
+ FT_STATUS_INIT(FT_NONLEAF_SERIALIZE_TOKUTIME, NONLEAF_SERIALIZATION_TO_MEMORY_SECONDS, TOKUTIME, "nonleaf serialization to memory (seconds)");
+ FT_STATUS_INIT(FT_NONLEAF_DECOMPRESS_TOKUTIME, NONLEAF_DECOMPRESSION_TO_MEMORY_SECONDS, TOKUTIME, "nonleaf decompression to memory (seconds)");
+ FT_STATUS_INIT(FT_NONLEAF_DESERIALIZE_TOKUTIME, NONLEAF_DESERIALIZATION_TO_MEMORY_SECONDS, TOKUTIME, "nonleaf deserialization to memory (seconds)");
+
+ // Promotion statistics.
+ FT_STATUS_INIT(FT_PRO_NUM_ROOT_SPLIT, PROMOTION_ROOTS_SPLIT, PARCOUNT, "promotion: roots split");
+ FT_STATUS_INIT(FT_PRO_NUM_ROOT_H0_INJECT, PROMOTION_LEAF_ROOTS_INJECTED_INTO, PARCOUNT, "promotion: leaf roots injected into");
+ FT_STATUS_INIT(FT_PRO_NUM_ROOT_H1_INJECT, PROMOTION_H1_ROOTS_INJECTED_INTO, PARCOUNT, "promotion: h1 roots injected into");
+ FT_STATUS_INIT(FT_PRO_NUM_INJECT_DEPTH_0, PROMOTION_INJECTIONS_AT_DEPTH_0, PARCOUNT, "promotion: injections at depth 0");
+ FT_STATUS_INIT(FT_PRO_NUM_INJECT_DEPTH_1, PROMOTION_INJECTIONS_AT_DEPTH_1, PARCOUNT, "promotion: injections at depth 1");
+ FT_STATUS_INIT(FT_PRO_NUM_INJECT_DEPTH_2, PROMOTION_INJECTIONS_AT_DEPTH_2, PARCOUNT, "promotion: injections at depth 2");
+ FT_STATUS_INIT(FT_PRO_NUM_INJECT_DEPTH_3, PROMOTION_INJECTIONS_AT_DEPTH_3, PARCOUNT, "promotion: injections at depth 3");
+ FT_STATUS_INIT(FT_PRO_NUM_INJECT_DEPTH_GT3, PROMOTION_INJECTIONS_LOWER_THAN_DEPTH_3, PARCOUNT, "promotion: injections lower than depth 3");
+ FT_STATUS_INIT(FT_PRO_NUM_STOP_NONEMPTY_BUF, PROMOTION_STOPPED_NONEMPTY_BUFFER, PARCOUNT, "promotion: stopped because of a nonempty buffer");
+ FT_STATUS_INIT(FT_PRO_NUM_STOP_H1, PROMOTION_STOPPED_AT_HEIGHT_1, PARCOUNT, "promotion: stopped at height 1");
+ FT_STATUS_INIT(FT_PRO_NUM_STOP_LOCK_CHILD, PROMOTION_STOPPED_CHILD_LOCKED_OR_NOT_IN_MEMORY, PARCOUNT, "promotion: stopped because the child was locked or not at all in memory");
+ FT_STATUS_INIT(FT_PRO_NUM_STOP_CHILD_INMEM, PROMOTION_STOPPED_CHILD_NOT_FULLY_IN_MEMORY, PARCOUNT, "promotion: stopped because the child was not fully in memory");
+ FT_STATUS_INIT(FT_PRO_NUM_DIDNT_WANT_PROMOTE, PROMOTION_STOPPED_AFTER_LOCKING_CHILD, PARCOUNT, "promotion: stopped anyway, after locking the child");
+ FT_STATUS_INIT(FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, BASEMENT_DESERIALIZATION_FIXED_KEY, PARCOUNT, "basement nodes deserialized with fixed-keysize");
+ FT_STATUS_INIT(FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, BASEMENT_DESERIALIZATION_VARIABLE_KEY, PARCOUNT, "basement nodes deserialized with variable-keysize");
+ FT_STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, PARCOUNT, "promotion: succeeded in using the rightmost leaf shortcut");
+ FT_STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (out-of-bounds)");
+ FT_STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (child reactive)");
+
+ FT_STATUS_INIT(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, CURSOR_SKIP_DELETED_LEAF_ENTRY, PARCOUNT, "cursor skipped deleted leaf entries");
+
+ m_initialized = true;
+#undef FT_STATUS_INIT
+}
+void FT_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < FT_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+
+FT_FLUSHER_STATUS_S fl_status;
+void FT_FLUSHER_STATUS_S::init() {
+ if (m_initialized) return;
+#define FL_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "ft flusher: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_TOTAL_NODES, FLUSHER_CLEANER_TOTAL_NODES, UINT64, "total nodes potentially flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_H1_NODES, FLUSHER_CLEANER_H1_NODES, UINT64, "height-one nodes flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_HGT1_NODES, FLUSHER_CLEANER_HGT1_NODES, UINT64, "height-greater-than-one nodes flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_EMPTY_NODES, FLUSHER_CLEANER_EMPTY_NODES, UINT64, "nodes cleaned which had empty buffers");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_NODES_DIRTIED, FLUSHER_CLEANER_NODES_DIRTIED, UINT64, "nodes dirtied by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_MAX_BUFFER_SIZE, FLUSHER_CLEANER_MAX_BUFFER_SIZE, UINT64, "max bytes in a buffer flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_MIN_BUFFER_SIZE, FLUSHER_CLEANER_MIN_BUFFER_SIZE, UINT64, "min bytes in a buffer flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_TOTAL_BUFFER_SIZE, FLUSHER_CLEANER_TOTAL_BUFFER_SIZE, UINT64, "total bytes in buffers flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_MAX_BUFFER_WORKDONE, FLUSHER_CLEANER_MAX_BUFFER_WORKDONE, UINT64, "max workdone in a buffer flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_MIN_BUFFER_WORKDONE, FLUSHER_CLEANER_MIN_BUFFER_WORKDONE, UINT64, "min workdone in a buffer flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_TOTAL_BUFFER_WORKDONE, FLUSHER_CLEANER_TOTAL_BUFFER_WORKDONE, UINT64, "total workdone in buffers flushed by cleaner thread");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_STARTED, FLUSHER_CLEANER_NUM_LEAF_MERGES_STARTED, UINT64, "times cleaner thread tries to merge a leaf");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING, FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING, UINT64, "cleaner thread leaf merges in progress");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_COMPLETED, FLUSHER_CLEANER_NUM_LEAF_MERGES_COMPLETED, UINT64, "cleaner thread leaf merges successful");
+ FL_STATUS_INIT(FT_FLUSHER_CLEANER_NUM_DIRTIED_FOR_LEAF_MERGE, FLUSHER_CLEANER_NUM_DIRTIED_FOR_LEAF_MERGE, UINT64, "nodes dirtied by cleaner thread leaf merges");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_TOTAL, FLUSHER_FLUSH_TOTAL, UINT64, "total number of flushes done by flusher threads or cleaner threads");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_IN_MEMORY, FLUSHER_FLUSH_IN_MEMORY, UINT64, "number of in memory flushes");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_NEEDED_IO, FLUSHER_FLUSH_NEEDED_IO, UINT64, "number of flushes that read something off disk");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES, FLUSHER_FLUSH_CASCADES, UINT64, "number of flushes that triggered another flush in child");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES_1, FLUSHER_FLUSH_CASCADES_1, UINT64, "number of flushes that triggered 1 cascading flush");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES_2, FLUSHER_FLUSH_CASCADES_2, UINT64, "number of flushes that triggered 2 cascading flushes");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES_3, FLUSHER_FLUSH_CASCADES_3, UINT64, "number of flushes that triggered 3 cascading flushes");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES_4, FLUSHER_FLUSH_CASCADES_4, UINT64, "number of flushes that triggered 4 cascading flushes");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES_5, FLUSHER_FLUSH_CASCADES_5, UINT64, "number of flushes that triggered 5 cascading flushes");
+ FL_STATUS_INIT(FT_FLUSHER_FLUSH_CASCADES_GT_5, FLUSHER_FLUSH_CASCADES_GT_5, UINT64, "number of flushes that triggered over 5 cascading flushes");
+ FL_STATUS_INIT(FT_FLUSHER_SPLIT_LEAF, FLUSHER_SPLIT_LEAF, UINT64, "leaf node splits");
+ FL_STATUS_INIT(FT_FLUSHER_SPLIT_NONLEAF, FLUSHER_SPLIT_NONLEAF, UINT64, "nonleaf node splits");
+ FL_STATUS_INIT(FT_FLUSHER_MERGE_LEAF, FLUSHER_MERGE_LEAF, UINT64, "leaf node merges");
+ FL_STATUS_INIT(FT_FLUSHER_MERGE_NONLEAF, FLUSHER_MERGE_NONLEAF, UINT64, "nonleaf node merges");
+ FL_STATUS_INIT(FT_FLUSHER_BALANCE_LEAF, FLUSHER_BALANCE_LEAF, UINT64, "leaf node balances");
+
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_MIN_BUFFER_SIZE) = UINT64_MAX;
+ FL_STATUS_VAL(FT_FLUSHER_CLEANER_MIN_BUFFER_WORKDONE) = UINT64_MAX;
+
+ m_initialized = true;
+#undef FL_STATUS_INIT
+}
+void FT_FLUSHER_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < FT_FLUSHER_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+
+FT_HOT_STATUS_S hot_status;
+void FT_HOT_STATUS_S::init() {
+ if (m_initialized) return;
+#define HOT_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "hot: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ HOT_STATUS_INIT(FT_HOT_NUM_STARTED, HOT_NUM_STARTED, UINT64, "operations ever started");
+ HOT_STATUS_INIT(FT_HOT_NUM_COMPLETED, HOT_NUM_COMPLETED, UINT64, "operations successfully completed");
+ HOT_STATUS_INIT(FT_HOT_NUM_ABORTED, HOT_NUM_ABORTED, UINT64, "operations aborted");
+ HOT_STATUS_INIT(FT_HOT_MAX_ROOT_FLUSH_COUNT, HOT_MAX_ROOT_FLUSH_COUNT, UINT64, "max number of flushes from root ever required to optimize a tree");
+
+ m_initialized = true;
+#undef HOT_STATUS_INIT
+}
+void FT_HOT_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < FT_HOT_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+
+TXN_STATUS_S txn_status;
+void TXN_STATUS_S::init() {
+ if (m_initialized) return;
+#define TXN_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "txn: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ TXN_STATUS_INIT(TXN_BEGIN, TXN_BEGIN, PARCOUNT, "begin");
+ TXN_STATUS_INIT(TXN_READ_BEGIN, TXN_BEGIN_READ_ONLY, PARCOUNT, "begin read only");
+ TXN_STATUS_INIT(TXN_COMMIT, TXN_COMMITS, PARCOUNT, "successful commits");
+ TXN_STATUS_INIT(TXN_ABORT, TXN_ABORTS, PARCOUNT, "aborts");
+ m_initialized = true;
+#undef TXN_STATUS_INIT
+}
+void TXN_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < TXN_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+
+LOGGER_STATUS_S log_status;
+void LOGGER_STATUS_S::init() {
+ if (m_initialized) return;
+#define LOG_STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT((*this), k, c, t, "logger: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+ LOG_STATUS_INIT(LOGGER_NEXT_LSN, LOGGER_NEXT_LSN, UINT64, "next LSN");
+ LOG_STATUS_INIT(LOGGER_NUM_WRITES, LOGGER_WRITES, UINT64, "writes");
+ LOG_STATUS_INIT(LOGGER_BYTES_WRITTEN, LOGGER_WRITES_BYTES, UINT64, "writes (bytes)");
+ LOG_STATUS_INIT(LOGGER_UNCOMPRESSED_BYTES_WRITTEN, LOGGER_WRITES_UNCOMPRESSED_BYTES, UINT64, "writes (uncompressed bytes)");
+ LOG_STATUS_INIT(LOGGER_TOKUTIME_WRITES, LOGGER_WRITES_SECONDS, TOKUTIME, "writes (seconds)");
+ LOG_STATUS_INIT(LOGGER_WAIT_BUF_LONG, LOGGER_WAIT_LONG, UINT64, "number of long logger write operations");
+ m_initialized = true;
+#undef LOG_STATUS_INIT
+}
+void LOGGER_STATUS_S::destroy() {
+ if (!m_initialized) return;
+ for (int i = 0; i < LOGGER_STATUS_NUM_ROWS; ++i) {
+ if (status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(status[i].value.parcount);
+ }
+ }
+}
+
+void toku_status_init(void) {
+ le_status.init();
+ cp_status.init();
+ ltm_status.init();
+ ft_status.init();
+ fl_status.init();
+ hot_status.init();
+ txn_status.init();
+ log_status.init();
+}
+void toku_status_destroy(void) {
+ log_status.destroy();
+ txn_status.destroy();
+ hot_status.destroy();
+ fl_status.destroy();
+ ft_status.destroy();
+ ltm_status.destroy();
+ cp_status.destroy();
+ le_status.destroy();
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-status.h b/storage/tokudb/PerconaFT/ft/ft-status.h
new file mode 100644
index 00000000..4775487c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-status.h
@@ -0,0 +1,539 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_config.h"
+#include "portability/toku_list.h"
+#include "portability/toku_race_tools.h"
+
+#include "util/status.h"
+
+//
+// Leaf Entry statistics
+//
+class LE_STATUS_S {
+public:
+ enum {
+ LE_MAX_COMMITTED_XR = 0,
+ LE_MAX_PROVISIONAL_XR,
+ LE_EXPANDED,
+ LE_MAX_MEMSIZE,
+ LE_APPLY_GC_BYTES_IN,
+ LE_APPLY_GC_BYTES_OUT,
+ LE_NORMAL_GC_BYTES_IN,
+ LE_NORMAL_GC_BYTES_OUT,
+ LE_STATUS_NUM_ROWS
+ };
+
+ void init();
+ void destroy();
+
+ TOKU_ENGINE_STATUS_ROW_S status[LE_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef LE_STATUS_S* LE_STATUS;
+extern LE_STATUS_S le_status;
+
+// executed too often to be worth making threadsafe
+#define LE_STATUS_VAL(x) le_status.status[LE_STATUS_S::x].value.num
+#define LE_STATUS_INC(x, d) \
+ do { \
+ if (le_status.status[LE_STATUS_S::x].type == PARCOUNT) { \
+ increment_partitioned_counter(le_status.status[LE_STATUS_S::x].value.parcount, d); \
+ } else { \
+ toku_sync_fetch_and_add(&le_status.status[LE_STATUS_S::x].value.num, d); \
+ } \
+ } while (0)
+
+
+
+//
+// Checkpoint statistics
+//
+class CHECKPOINT_STATUS_S {
+public:
+ enum {
+ CP_PERIOD,
+ CP_FOOTPRINT,
+ CP_TIME_LAST_CHECKPOINT_BEGIN,
+ CP_TIME_LAST_CHECKPOINT_BEGIN_COMPLETE,
+ CP_TIME_LAST_CHECKPOINT_END,
+ CP_TIME_CHECKPOINT_DURATION,
+ CP_TIME_CHECKPOINT_DURATION_LAST,
+ CP_LAST_LSN,
+ CP_CHECKPOINT_COUNT,
+ CP_CHECKPOINT_COUNT_FAIL,
+ CP_WAITERS_NOW, // how many threads are currently waiting for the checkpoint_safe lock to perform a checkpoint
+ CP_WAITERS_MAX, // max threads ever simultaneously waiting for the checkpoint_safe lock to perform a checkpoint
+ CP_CLIENT_WAIT_ON_MO, // how many times a client thread waited to take the multi_operation lock, not for checkpoint
+ CP_CLIENT_WAIT_ON_CS, // how many times a client thread waited for the checkpoint_safe lock, not for checkpoint
+ CP_BEGIN_TIME,
+ CP_LONG_BEGIN_TIME,
+ CP_LONG_BEGIN_COUNT,
+ CP_END_TIME,
+ CP_LONG_END_TIME,
+ CP_LONG_END_COUNT,
+ CP_STATUS_NUM_ROWS // number of rows in this status array. must be last.
+ };
+
+ void init();
+ void destroy();
+
+ TOKU_ENGINE_STATUS_ROW_S status[CP_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef CHECKPOINT_STATUS_S* CHECKPOINT_STATUS;
+extern CHECKPOINT_STATUS_S cp_status;
+
+#define CP_STATUS_VAL(x) cp_status.status[CHECKPOINT_STATUS_S::x].value.num
+
+
+
+//
+// Cachetable statistics
+//
+class CACHETABLE_STATUS_S {
+public:
+ enum {
+ CT_MISS = 0,
+ CT_MISSTIME, // how many usec spent waiting for disk read because of cache miss
+ CT_PREFETCHES, // how many times has a block been prefetched into the cachetable?
+ CT_SIZE_CURRENT, // the sum of the sizes of the nodes represented in the cachetable
+ CT_SIZE_LIMIT, // the limit to the sum of the node sizes
+ CT_SIZE_WRITING, // the sum of the sizes of the nodes being written
+ CT_SIZE_NONLEAF, // number of bytes in cachetable belonging to nonleaf nodes
+ CT_SIZE_LEAF, // number of bytes in cachetable belonging to leaf nodes
+ CT_SIZE_ROLLBACK, // number of bytes in cachetable belonging to rollback nodes
+ CT_SIZE_CACHEPRESSURE, // number of bytes causing cache pressure (sum of buffers and workdone counters)
+ CT_SIZE_CLONED, // number of bytes of cloned data in the system
+ CT_EVICTIONS,
+ CT_CLEANER_EXECUTIONS, // number of times the cleaner thread's loop has executed
+ CT_CLEANER_PERIOD,
+ CT_CLEANER_ITERATIONS, // number of times the cleaner thread runs the cleaner per period
+ CT_WAIT_PRESSURE_COUNT,
+ CT_WAIT_PRESSURE_TIME,
+ CT_LONG_WAIT_PRESSURE_COUNT,
+ CT_LONG_WAIT_PRESSURE_TIME,
+
+ CT_POOL_CLIENT_NUM_THREADS,
+ CT_POOL_CLIENT_NUM_THREADS_ACTIVE,
+ CT_POOL_CLIENT_QUEUE_SIZE,
+ CT_POOL_CLIENT_MAX_QUEUE_SIZE,
+ CT_POOL_CLIENT_TOTAL_ITEMS_PROCESSED,
+ CT_POOL_CLIENT_TOTAL_EXECUTION_TIME,
+ CT_POOL_CACHETABLE_NUM_THREADS,
+ CT_POOL_CACHETABLE_NUM_THREADS_ACTIVE,
+ CT_POOL_CACHETABLE_QUEUE_SIZE,
+ CT_POOL_CACHETABLE_MAX_QUEUE_SIZE,
+ CT_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED,
+ CT_POOL_CACHETABLE_TOTAL_EXECUTION_TIME,
+ CT_POOL_CHECKPOINT_NUM_THREADS,
+ CT_POOL_CHECKPOINT_NUM_THREADS_ACTIVE,
+ CT_POOL_CHECKPOINT_QUEUE_SIZE,
+ CT_POOL_CHECKPOINT_MAX_QUEUE_SIZE,
+ CT_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED,
+ CT_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME,
+
+ CT_STATUS_NUM_ROWS
+ };
+
+ void init();
+ void destroy();
+
+ TOKU_ENGINE_STATUS_ROW_S status[CT_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef CACHETABLE_STATUS_S* CACHETABLE_STATUS;
+extern CACHETABLE_STATUS_S ct_status;
+
+#define CT_STATUS_VAL(x) ct_status.status[CACHETABLE_STATUS_S::x].value.num
+
+
+
+//
+// Lock Tree Manager statistics
+//
+class LTM_STATUS_S {
+public:
+ enum {
+ LTM_SIZE_CURRENT = 0,
+ LTM_SIZE_LIMIT,
+ LTM_ESCALATION_COUNT,
+ LTM_ESCALATION_TIME,
+ LTM_ESCALATION_LATEST_RESULT,
+ LTM_NUM_LOCKTREES,
+ LTM_LOCK_REQUESTS_PENDING,
+ LTM_STO_NUM_ELIGIBLE,
+ LTM_STO_END_EARLY_COUNT,
+ LTM_STO_END_EARLY_TIME,
+ LTM_WAIT_COUNT,
+ LTM_WAIT_TIME,
+ LTM_LONG_WAIT_COUNT,
+ LTM_LONG_WAIT_TIME,
+ LTM_TIMEOUT_COUNT,
+ LTM_WAIT_ESCALATION_COUNT,
+ LTM_WAIT_ESCALATION_TIME,
+ LTM_LONG_WAIT_ESCALATION_COUNT,
+ LTM_LONG_WAIT_ESCALATION_TIME,
+ LTM_STATUS_NUM_ROWS // must be last
+ };
+
+ void init(void);
+ void destroy(void);
+
+ TOKU_ENGINE_STATUS_ROW_S status[LTM_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef LTM_STATUS_S* LTM_STATUS;
+extern LTM_STATUS_S ltm_status;
+
+#define LTM_STATUS_VAL(x) ltm_status.status[LTM_STATUS_S::x].value.num
+
+
+//
+// Fractal Tree statistics
+//
+class FT_STATUS_S {
+public:
+ enum {
+ FT_UPDATES = 0,
+ FT_UPDATES_BROADCAST,
+ FT_DESCRIPTOR_SET,
+ FT_MSN_DISCARDS, // how many messages were ignored by leaf because of msn
+ FT_TOTAL_RETRIES, // total number of search retries due to TRY_AGAIN
+ FT_SEARCH_TRIES_GT_HEIGHT, // number of searches that required more tries than the height of the tree
+ FT_SEARCH_TRIES_GT_HEIGHTPLUS3, // number of searches that required more tries than the height of the tree plus three
+ FT_DISK_FLUSH_LEAF, // number of leaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_LEAF_BYTES, // number of leaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES, // number of leaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_LEAF_TOKUTIME, // number of leaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_NONLEAF, // number of nonleaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_NONLEAF_BYTES, // number of nonleaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES, // number of nonleaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_NONLEAF_TOKUTIME, // number of nonleaf nodes flushed to disk, not for checkpoint
+ FT_DISK_FLUSH_LEAF_FOR_CHECKPOINT, // number of leaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_LEAF_BYTES_FOR_CHECKPOINT, // number of leaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of leaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_LEAF_TOKUTIME_FOR_CHECKPOINT,// number of leaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_NONLEAF_FOR_CHECKPOINT, // number of nonleaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
+ FT_DISK_FLUSH_LEAF_COMPRESSION_RATIO, // effective compression ratio for leaf bytes flushed to disk
+ FT_DISK_FLUSH_NONLEAF_COMPRESSION_RATIO, // effective compression ratio for nonleaf bytes flushed to disk
+ FT_DISK_FLUSH_OVERALL_COMPRESSION_RATIO, // effective compression ratio for all bytes flushed to disk
+ FT_PARTIAL_EVICTIONS_NONLEAF, // number of nonleaf node partial evictions
+ FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, // number of nonleaf node partial evictions
+ FT_PARTIAL_EVICTIONS_LEAF, // number of leaf node partial evictions
+ FT_PARTIAL_EVICTIONS_LEAF_BYTES, // number of leaf node partial evictions
+ FT_FULL_EVICTIONS_LEAF, // number of full cachetable evictions on leaf nodes
+ FT_FULL_EVICTIONS_LEAF_BYTES, // number of full cachetable evictions on leaf nodes (bytes)
+ FT_FULL_EVICTIONS_NONLEAF, // number of full cachetable evictions on nonleaf nodes
+ FT_FULL_EVICTIONS_NONLEAF_BYTES, // number of full cachetable evictions on nonleaf nodes (bytes)
+ FT_CREATE_LEAF, // number of leaf nodes created
+ FT_CREATE_NONLEAF, // number of nonleaf nodes created
+ FT_DESTROY_LEAF, // number of leaf nodes destroyed
+ FT_DESTROY_NONLEAF, // number of nonleaf nodes destroyed
+ FT_MSG_BYTES_IN, // how many bytes of messages injected at root (for all trees)
+ FT_MSG_BYTES_OUT, // how many bytes of messages flushed from h1 nodes to leaves
+ FT_MSG_BYTES_CURR, // how many bytes of messages currently in trees (estimate)
+ FT_MSG_NUM, // how many messages injected at root
+ FT_MSG_NUM_BROADCAST, // how many broadcast messages injected at root
+ FT_NUM_BASEMENTS_DECOMPRESSED_NORMAL, // how many basement nodes were decompressed because they were the target of a query
+ FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH,
+ FT_NUM_BASEMENTS_DECOMPRESSED_WRITE,
+ FT_NUM_MSG_BUFFER_DECOMPRESSED_NORMAL, // how many msg buffers were decompressed because they were the target of a query
+ FT_NUM_MSG_BUFFER_DECOMPRESSED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH,
+ FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE,
+ FT_NUM_PIVOTS_FETCHED_QUERY, // how many pivots were fetched for a query
+ FT_BYTES_PIVOTS_FETCHED_QUERY, // how many pivots were fetched for a query
+ FT_TOKUTIME_PIVOTS_FETCHED_QUERY, // how many pivots were fetched for a query
+ FT_NUM_PIVOTS_FETCHED_PREFETCH, // ... for a prefetch
+ FT_BYTES_PIVOTS_FETCHED_PREFETCH, // ... for a prefetch
+ FT_TOKUTIME_PIVOTS_FETCHED_PREFETCH, // ... for a prefetch
+ FT_NUM_PIVOTS_FETCHED_WRITE, // ... for a write
+ FT_BYTES_PIVOTS_FETCHED_WRITE, // ... for a write
+ FT_TOKUTIME_PIVOTS_FETCHED_WRITE, // ... for a write
+ FT_NUM_BASEMENTS_FETCHED_NORMAL, // how many basement nodes were fetched because they were the target of a query
+ FT_BYTES_BASEMENTS_FETCHED_NORMAL, // how many basement nodes were fetched because they were the target of a query
+ FT_TOKUTIME_BASEMENTS_FETCHED_NORMAL, // how many basement nodes were fetched because they were the target of a query
+ FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_BYTES_BASEMENTS_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_TOKUTIME_BASEMENTS_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_NUM_BASEMENTS_FETCHED_PREFETCH,
+ FT_BYTES_BASEMENTS_FETCHED_PREFETCH,
+ FT_TOKUTIME_BASEMENTS_FETCHED_PREFETCH,
+ FT_NUM_BASEMENTS_FETCHED_WRITE,
+ FT_BYTES_BASEMENTS_FETCHED_WRITE,
+ FT_TOKUTIME_BASEMENTS_FETCHED_WRITE,
+ FT_NUM_MSG_BUFFER_FETCHED_NORMAL, // how many msg buffers were fetched because they were the target of a query
+ FT_BYTES_MSG_BUFFER_FETCHED_NORMAL, // how many msg buffers were fetched because they were the target of a query
+ FT_TOKUTIME_MSG_BUFFER_FETCHED_NORMAL, // how many msg buffers were fetched because they were the target of a query
+ FT_NUM_MSG_BUFFER_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_BYTES_MSG_BUFFER_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_TOKUTIME_MSG_BUFFER_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
+ FT_NUM_MSG_BUFFER_FETCHED_PREFETCH,
+ FT_BYTES_MSG_BUFFER_FETCHED_PREFETCH,
+ FT_TOKUTIME_MSG_BUFFER_FETCHED_PREFETCH,
+ FT_NUM_MSG_BUFFER_FETCHED_WRITE,
+ FT_BYTES_MSG_BUFFER_FETCHED_WRITE,
+ FT_TOKUTIME_MSG_BUFFER_FETCHED_WRITE,
+ FT_LEAF_COMPRESS_TOKUTIME, // seconds spent compressing leaf leaf nodes to memory
+ FT_LEAF_SERIALIZE_TOKUTIME, // seconds spent serializing leaf node to memory
+ FT_LEAF_DECOMPRESS_TOKUTIME, // seconds spent decompressing leaf nodes to memory
+ FT_LEAF_DESERIALIZE_TOKUTIME, // seconds spent deserializing leaf nodes to memory
+ FT_NONLEAF_COMPRESS_TOKUTIME, // seconds spent compressing nonleaf nodes to memory
+ FT_NONLEAF_SERIALIZE_TOKUTIME, // seconds spent serializing nonleaf nodes to memory
+ FT_NONLEAF_DECOMPRESS_TOKUTIME, // seconds spent decompressing nonleaf nodes to memory
+ FT_NONLEAF_DESERIALIZE_TOKUTIME, // seconds spent deserializing nonleaf nodes to memory
+ FT_PRO_NUM_ROOT_SPLIT,
+ FT_PRO_NUM_ROOT_H0_INJECT,
+ FT_PRO_NUM_ROOT_H1_INJECT,
+ FT_PRO_NUM_INJECT_DEPTH_0,
+ FT_PRO_NUM_INJECT_DEPTH_1,
+ FT_PRO_NUM_INJECT_DEPTH_2,
+ FT_PRO_NUM_INJECT_DEPTH_3,
+ FT_PRO_NUM_INJECT_DEPTH_GT3,
+ FT_PRO_NUM_STOP_NONEMPTY_BUF,
+ FT_PRO_NUM_STOP_H1,
+ FT_PRO_NUM_STOP_LOCK_CHILD,
+ FT_PRO_NUM_STOP_CHILD_INMEM,
+ FT_PRO_NUM_DIDNT_WANT_PROMOTE,
+ FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, // how many basement nodes were deserialized with a fixed keysize
+ FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, // how many basement nodes were deserialized with a variable keysize
+ FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS,
+ FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS,
+ FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,
+ FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, // how many deleted leaf entries were skipped by a cursor
+ FT_STATUS_NUM_ROWS
+ };
+
+ void init(void);
+ void destroy(void);
+
+ TOKU_ENGINE_STATUS_ROW_S status[FT_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef FT_STATUS_S* FT_STATUS;
+extern FT_STATUS_S ft_status;
+
+#define FT_STATUS_VAL(x) \
+ (ft_status.status[FT_STATUS_S::x].type == PARCOUNT ? \
+ read_partitioned_counter(ft_status.status[FT_STATUS_S::x].value.parcount) : \
+ ft_status.status[FT_STATUS_S::x].value.num)
+
+#define FT_STATUS_INC(x, d) \
+ do { \
+ if (ft_status.status[FT_STATUS_S::x].type == PARCOUNT) { \
+ increment_partitioned_counter(ft_status.status[FT_STATUS_S::x].value.parcount, d); \
+ } else { \
+ toku_sync_fetch_and_add(&ft_status.status[FT_STATUS_S::x].value.num, d); \
+ } \
+ } while (0)
+
+
+
+//
+// Flusher statistics
+//
+class FT_FLUSHER_STATUS_S {
+public:
+ enum {
+ FT_FLUSHER_CLEANER_TOTAL_NODES = 0, // total number of nodes whose buffers are potentially flushed by cleaner thread
+ FT_FLUSHER_CLEANER_H1_NODES, // number of nodes of height one whose message buffers are flushed by cleaner thread
+ FT_FLUSHER_CLEANER_HGT1_NODES, // number of nodes of height > 1 whose message buffers are flushed by cleaner thread
+ FT_FLUSHER_CLEANER_EMPTY_NODES, // number of nodes that are selected by cleaner, but whose buffers are empty
+ FT_FLUSHER_CLEANER_NODES_DIRTIED, // number of nodes that are made dirty by the cleaner thread
+ FT_FLUSHER_CLEANER_MAX_BUFFER_SIZE, // max number of bytes in message buffer flushed by cleaner thread
+ FT_FLUSHER_CLEANER_MIN_BUFFER_SIZE,
+ FT_FLUSHER_CLEANER_TOTAL_BUFFER_SIZE,
+ FT_FLUSHER_CLEANER_MAX_BUFFER_WORKDONE, // max workdone value of any message buffer flushed by cleaner thread
+ FT_FLUSHER_CLEANER_MIN_BUFFER_WORKDONE,
+ FT_FLUSHER_CLEANER_TOTAL_BUFFER_WORKDONE,
+ FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_STARTED, // number of times cleaner thread tries to merge a leaf
+ FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING, // number of cleaner thread leaf merges in progress
+ FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_COMPLETED, // number of times cleaner thread successfully merges a leaf
+ FT_FLUSHER_CLEANER_NUM_DIRTIED_FOR_LEAF_MERGE, // nodes dirtied by the "flush from root" process to merge a leaf node
+ FT_FLUSHER_FLUSH_TOTAL, // total number of flushes done by flusher threads or cleaner threads
+ FT_FLUSHER_FLUSH_IN_MEMORY, // number of in memory flushes
+ FT_FLUSHER_FLUSH_NEEDED_IO, // number of flushes that had to read a child (or part) off disk
+ FT_FLUSHER_FLUSH_CASCADES, // number of flushes that triggered another flush in the child
+ FT_FLUSHER_FLUSH_CASCADES_1, // number of flushes that triggered 1 cascading flush
+ FT_FLUSHER_FLUSH_CASCADES_2, // number of flushes that triggered 2 cascading flushes
+ FT_FLUSHER_FLUSH_CASCADES_3, // number of flushes that triggered 3 cascading flushes
+ FT_FLUSHER_FLUSH_CASCADES_4, // number of flushes that triggered 4 cascading flushes
+ FT_FLUSHER_FLUSH_CASCADES_5, // number of flushes that triggered 5 cascading flushes
+ FT_FLUSHER_FLUSH_CASCADES_GT_5, // number of flushes that triggered more than 5 cascading flushes
+ FT_FLUSHER_SPLIT_LEAF, // number of leaf nodes split
+ FT_FLUSHER_SPLIT_NONLEAF, // number of nonleaf nodes split
+ FT_FLUSHER_MERGE_LEAF, // number of times leaf nodes are merged
+ FT_FLUSHER_MERGE_NONLEAF, // number of times nonleaf nodes are merged
+ FT_FLUSHER_BALANCE_LEAF, // number of times a leaf node is balanced
+ FT_FLUSHER_STATUS_NUM_ROWS
+ };
+
+ void init(void);
+ void destroy(void);
+
+ TOKU_ENGINE_STATUS_ROW_S status[FT_FLUSHER_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef FT_FLUSHER_STATUS_S* FT_FLUSHER_STATUS;
+extern FT_FLUSHER_STATUS_S fl_status;
+
+#define FL_STATUS_VAL(x) fl_status.status[FT_FLUSHER_STATUS_S::x].value.num
+
+
+
+//
+// Hot Flusher
+//
+class FT_HOT_STATUS_S {
+public:
+ enum {
+ FT_HOT_NUM_STARTED = 0, // number of HOT operations that have begun
+ FT_HOT_NUM_COMPLETED, // number of HOT operations that have successfully completed
+ FT_HOT_NUM_ABORTED, // number of HOT operations that have been aborted
+ FT_HOT_MAX_ROOT_FLUSH_COUNT, // max number of flushes from root ever required to optimize a tree
+ FT_HOT_STATUS_NUM_ROWS
+ };
+
+ void init(void);
+ void destroy(void);
+
+ TOKU_ENGINE_STATUS_ROW_S status[FT_HOT_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef FT_HOT_STATUS_S* FT_HOT_STATUS;
+extern FT_HOT_STATUS_S hot_status;
+
+#define HOT_STATUS_VAL(x) hot_status.status[FT_HOT_STATUS_S::x].value.num
+
+
+
+//
+// Transaction statistics
+//
+class TXN_STATUS_S {
+public:
+ enum {
+ TXN_BEGIN, // total number of transactions begun (does not include recovered txns)
+ TXN_READ_BEGIN, // total number of read only transactions begun (does not include recovered txns)
+ TXN_COMMIT, // successful commits
+ TXN_ABORT,
+ TXN_STATUS_NUM_ROWS
+ };
+
+ void init(void);
+ void destroy(void);
+
+ TOKU_ENGINE_STATUS_ROW_S status[TXN_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef TXN_STATUS_S* TXN_STATUS;
+extern TXN_STATUS_S txn_status;
+
+#define TXN_STATUS_INC(x, d) increment_partitioned_counter(txn_status.status[TXN_STATUS_S::x].value.parcount, d)
+
+
+
+//
+// Logger statistics
+//
+class LOGGER_STATUS_S {
+public:
+ enum {
+ LOGGER_NEXT_LSN = 0,
+ LOGGER_NUM_WRITES,
+ LOGGER_BYTES_WRITTEN,
+ LOGGER_UNCOMPRESSED_BYTES_WRITTEN,
+ LOGGER_TOKUTIME_WRITES,
+ LOGGER_WAIT_BUF_LONG,
+ LOGGER_STATUS_NUM_ROWS
+ };
+
+ void init(void);
+ void destroy(void);
+
+ TOKU_ENGINE_STATUS_ROW_S status[LOGGER_STATUS_NUM_ROWS];
+
+private:
+ bool m_initialized;
+};
+typedef LOGGER_STATUS_S* LOGGER_STATUS;
+extern LOGGER_STATUS_S log_status;
+
+#define LOG_STATUS_VAL(x) log_status.status[LOGGER_STATUS_S::x].value.num
+
+void toku_status_init(void);
+void toku_status_destroy(void);
diff --git a/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc b/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc
new file mode 100644
index 00000000..ad1dda01
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-test-helpers.cc
@@ -0,0 +1,268 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-internal.h"
+#include "ft/ft-flusher.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/node.h"
+#include "ft/ule.h"
+
+// dummymsn needed to simulate msn because messages are injected at a lower level than toku_ft_root_put_msg()
+#define MIN_DUMMYMSN ((MSN) {(uint64_t)1 << 62})
+static MSN dummymsn;
+static int testsetup_initialized = 0;
+
+
+// Must be called before any other test_setup_xxx() functions are called.
+void
+toku_testsetup_initialize(void) {
+ if (testsetup_initialized == 0) {
+ testsetup_initialized = 1;
+ dummymsn = MIN_DUMMYMSN;
+ }
+}
+
+static MSN
+next_dummymsn(void) {
+ ++(dummymsn.msn);
+ return dummymsn;
+}
+
+
+bool ignore_if_was_already_open;
+int toku_testsetup_leaf(FT_HANDLE ft_handle, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens) {
+ FTNODE node;
+ assert(testsetup_initialized);
+ toku_create_new_ftnode(ft_handle, &node, 0, n_children);
+ for (int i = 0; i < n_children; i++) {
+ BP_STATE(node, i) = PT_AVAIL;
+ }
+
+ DBT *XMALLOC_N(n_children - 1, pivotkeys);
+ for (int i = 0; i + 1 < n_children; i++) {
+ toku_memdup_dbt(&pivotkeys[i], keys[i], keylens[i]);
+ }
+ node->pivotkeys.create_from_dbts(pivotkeys, n_children - 1);
+ for (int i = 0; i + 1 < n_children; i++) {
+ toku_destroy_dbt(&pivotkeys[i]);
+ }
+ toku_free(pivotkeys);
+
+ *blocknum = node->blocknum;
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return 0;
+}
+
+// Don't bother to clean up carefully if something goes wrong. (E.g., it's OK to have malloced stuff that hasn't been freed.)
+int toku_testsetup_nonleaf (FT_HANDLE ft_handle, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens) {
+ FTNODE node;
+ assert(testsetup_initialized);
+ toku_create_new_ftnode(ft_handle, &node, height, n_children);
+ for (int i = 0; i < n_children; i++) {
+ BP_BLOCKNUM(node, i) = children[i];
+ BP_STATE(node,i) = PT_AVAIL;
+ }
+ DBT *XMALLOC_N(n_children - 1, pivotkeys);
+ for (int i = 0; i + 1 < n_children; i++) {
+ toku_memdup_dbt(&pivotkeys[i], keys[i], keylens[i]);
+ }
+ node->pivotkeys.create_from_dbts(pivotkeys, n_children - 1);
+ for (int i = 0; i + 1 < n_children; i++) {
+ toku_destroy_dbt(&pivotkeys[i]);
+ }
+ toku_free(pivotkeys);
+
+ *blocknum = node->blocknum;
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return 0;
+}
+
+int toku_testsetup_root(FT_HANDLE ft_handle, BLOCKNUM blocknum) {
+ assert(testsetup_initialized);
+ ft_handle->ft->h->root_blocknum = blocknum;
+ return 0;
+}
+
+int toku_testsetup_get_sersize(FT_HANDLE ft_handle, BLOCKNUM diskoff) // Return the size on disk
+{
+ assert(testsetup_initialized);
+ void *node_v;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ int r = toku_cachetable_get_and_pin(
+ ft_handle->ft->cf, diskoff,
+ toku_cachetable_hash(ft_handle->ft->cf, diskoff),
+ &node_v,
+ get_write_callbacks_for_node(ft_handle->ft),
+ toku_ftnode_fetch_callback,
+ toku_ftnode_pf_req_callback,
+ toku_ftnode_pf_callback,
+ true,
+ &bfe
+ );
+ assert(r==0);
+ FTNODE CAST_FROM_VOIDP(node, node_v);
+ int size = toku_serialize_ftnode_size(node);
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return size;
+}
+
+int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const char *key, int keylen, const char *val, int vallen) {
+ void *node_v;
+ int r;
+
+ assert(testsetup_initialized);
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ r = toku_cachetable_get_and_pin(
+ ft_handle->ft->cf,
+ blocknum,
+ toku_cachetable_hash(ft_handle->ft->cf, blocknum),
+ &node_v,
+ get_write_callbacks_for_node(ft_handle->ft),
+ toku_ftnode_fetch_callback,
+ toku_ftnode_pf_req_callback,
+ toku_ftnode_pf_callback,
+ true,
+ &bfe
+ );
+ if (r!=0) return r;
+ FTNODE CAST_FROM_VOIDP(node, node_v);
+ toku_verify_or_set_counts(node);
+ assert(node->height==0);
+
+ DBT kdbt, vdbt;
+ ft_msg msg(
+ toku_fill_dbt(&kdbt, key, keylen),
+ toku_fill_dbt(&vdbt, val, vallen),
+ FT_INSERT,
+ next_dummymsn(),
+ toku_xids_get_root_xids());
+
+ static size_t zero_flow_deltas[] = { 0, 0 };
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
+ toku_ftnode_put_msg(
+ ft_handle->ft->cmp,
+ ft_handle->ft->update_fun,
+ node,
+ -1,
+ msg,
+ true,
+ &gc_info,
+ zero_flow_deltas,
+ NULL,
+ NULL);
+
+ toku_verify_or_set_counts(node);
+
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return 0;
+}
+
+static int
+testhelper_string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
+{
+ char *CAST_FROM_VOIDP(s, a->data), *CAST_FROM_VOIDP(t, b->data);
+ return strcmp(s, t);
+}
+
+
+void
+toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t)
+{
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ b,
+ toku_cachetable_hash(t->ft->cf, b),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ node,
+ true
+ );
+}
+
+int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, enum ft_msg_type msgtype, const char *key, int keylen, const char *val, int vallen) {
+ void *node_v;
+ int r;
+
+ assert(testsetup_initialized);
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ r = toku_cachetable_get_and_pin(
+ ft_handle->ft->cf,
+ blocknum,
+ toku_cachetable_hash(ft_handle->ft->cf, blocknum),
+ &node_v,
+ get_write_callbacks_for_node(ft_handle->ft),
+ toku_ftnode_fetch_callback,
+ toku_ftnode_pf_req_callback,
+ toku_ftnode_pf_callback,
+ true,
+ &bfe
+ );
+ if (r!=0) return r;
+ FTNODE CAST_FROM_VOIDP(node, node_v);
+ assert(node->height>0);
+
+ DBT k;
+ int childnum = toku_ftnode_which_child(node, toku_fill_dbt(&k, key, keylen), ft_handle->ft->cmp);
+
+ XIDS xids_0 = toku_xids_get_root_xids();
+ MSN msn = next_dummymsn();
+ toku::comparator cmp;
+ cmp.create(testhelper_string_key_cmp, nullptr);
+ toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, cmp);
+ cmp.destroy();
+ // Hack to get the test working. The problem is that this test
+ // is directly queueing something in a FIFO instead of
+ // using ft APIs.
+ node->max_msn_applied_to_node_on_disk = msn;
+ node->set_dirty();
+ // Also hack max_msn_in_ft
+ ft_handle->ft->h->max_msn_in_ft = msn;
+
+ toku_unpin_ftnode(ft_handle->ft, node);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft-verify.cc b/storage/tokudb/PerconaFT/ft/ft-verify.cc
new file mode 100644
index 00000000..4f6e07e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft-verify.cc
@@ -0,0 +1,524 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Verify an FT. */
+/* Check:
+ * The tree is of uniform depth (and the height is correct at every node)
+ * For each pivot key: the max of the stuff to the left is <= the pivot key < the min of the stuff to the right.
+ * For each leaf node: All the keys are in strictly increasing order.
+ * For each nonleaf node: All the messages have keys that are between the associated pivot keys ( left_pivot_key < message <= right_pivot_key)
+ */
+
+#include <my_global.h>
+#include "ft/serialize/block_table.h"
+#include "ft/ft.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-internal.h"
+#include "ft/node.h"
+
+static int
+compare_pairs (FT_HANDLE ft_handle, const DBT *a, const DBT *b) {
+ return ft_handle->ft->cmp(a, b);
+}
+
+static int
+compare_pair_to_key (FT_HANDLE ft_handle, const DBT *a, const void *key, uint32_t keylen) {
+ DBT y;
+ return ft_handle->ft->cmp(a, toku_fill_dbt(&y, key, keylen));
+}
+
+static int
+verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, const void *key, uint32_t keylen, const void *UU(data), uint32_t UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot)
+ __attribute__((warn_unused_result));
+
+UU()
+static int
+verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, const void *key, uint32_t keylen, const void *UU(data), uint32_t UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) {
+ int result = 0;
+ if (msn.msn == ZERO_MSN.msn)
+ result = EINVAL;
+ switch (type) {
+ default:
+ break;
+ case FT_INSERT:
+ case FT_INSERT_NO_OVERWRITE:
+ case FT_DELETE_ANY:
+ case FT_ABORT_ANY:
+ case FT_COMMIT_ANY:
+ // verify key in bounds
+ if (lesser_pivot) {
+ int compare = compare_pair_to_key(ft_handle, lesser_pivot, key, keylen);
+ if (compare >= 0)
+ result = EINVAL;
+ }
+ if (result == 0 && greatereq_pivot) {
+ int compare = compare_pair_to_key(ft_handle, greatereq_pivot, key, keylen);
+ if (compare < 0)
+ result = EINVAL;
+ }
+ break;
+ }
+ return result;
+}
+
+static DBT
+get_ith_key_dbt (BASEMENTNODE bn, int i) {
+ DBT kdbt;
+ int r = bn->data_buffer.fetch_key_and_len(i, &kdbt.size, &kdbt.data);
+ invariant_zero(r); // this is a bad failure if it happens.
+ return kdbt;
+}
+
+#define VERIFY_ASSERTION(predicate, i, string) ({ \
+ if(!(predicate)) { \
+ fprintf(stderr, "%s:%d: Looking at child %d of block %" PRId64 ": %s\n", __FILE__, __LINE__, i, blocknum.b, string); \
+ result = TOKUDB_NEEDS_REPAIR; \
+ if (!keep_going_on_failure) goto done; \
+ }})
+
+#define VERIFY_ASSERTION_BASEMENT(predicate, bn, entry, string) ({ \
+ if(!(predicate)) { \
+ fprintf(stderr, "%s:%d: Looking at block %" PRId64 " bn %d entry %d: %s\n", __FILE__, __LINE__, blocknum.b, bn, entry, string); \
+ result = TOKUDB_NEEDS_REPAIR; \
+ if (!keep_going_on_failure) goto done; \
+ }})
+
+struct count_msgs_extra {
+ int count;
+ MSN msn;
+ message_buffer *msg_buffer;
+};
+
+// template-only function, but must be extern
+int count_msgs(const int32_t &offset, const uint32_t UU(idx), struct count_msgs_extra *const e)
+ __attribute__((nonnull(3)));
+int count_msgs(const int32_t &offset, const uint32_t UU(idx), struct count_msgs_extra *const e)
+{
+ MSN msn;
+ e->msg_buffer->get_message_key_msn(offset, nullptr, &msn);
+ if (msn.msn == e->msn.msn) {
+ e->count++;
+ }
+ return 0;
+}
+
+struct verify_message_tree_extra {
+ message_buffer *msg_buffer;
+ bool broadcast;
+ bool is_fresh;
+ int i;
+ int verbose;
+ BLOCKNUM blocknum;
+ int keep_going_on_failure;
+ bool messages_have_been_moved;
+};
+
+int verify_message_tree(const int32_t &offset, const uint32_t UU(idx), struct verify_message_tree_extra *const e) __attribute__((nonnull(3)));
+int verify_message_tree(const int32_t &offset, const uint32_t UU(idx), struct verify_message_tree_extra *const e)
+{
+ BLOCKNUM blocknum = e->blocknum;
+ int keep_going_on_failure = e->keep_going_on_failure;
+ int result = 0;
+ DBT k, v;
+ ft_msg msg = e->msg_buffer->get_message(offset, &k, &v);
+ bool is_fresh = e->msg_buffer->get_freshness(offset);
+ if (e->broadcast) {
+ VERIFY_ASSERTION(ft_msg_type_applies_all((enum ft_msg_type) msg.type()) || ft_msg_type_does_nothing((enum ft_msg_type) msg.type()),
+ e->i, "message found in broadcast list that is not a broadcast");
+ } else {
+ VERIFY_ASSERTION(ft_msg_type_applies_once((enum ft_msg_type) msg.type()),
+ e->i, "message found in fresh or stale message tree that does not apply once");
+ if (e->is_fresh) {
+ if (e->messages_have_been_moved) {
+ VERIFY_ASSERTION(is_fresh,
+ e->i, "message found in fresh message tree that is not fresh");
+ }
+ } else {
+ VERIFY_ASSERTION(!is_fresh,
+ e->i, "message found in stale message tree that is fresh");
+ }
+ }
+done:
+ return result;
+}
+
+int error_on_iter(const int32_t &UU(offset), const uint32_t UU(idx), void *UU(e));
+int error_on_iter(const int32_t &UU(offset), const uint32_t UU(idx), void *UU(e)) {
+ return TOKUDB_NEEDS_REPAIR;
+}
+
+int verify_marked_messages(const int32_t &offset, const uint32_t UU(idx), struct verify_message_tree_extra *const e) __attribute__((nonnull(3)));
+int verify_marked_messages(const int32_t &offset, const uint32_t UU(idx), struct verify_message_tree_extra *const e)
+{
+ BLOCKNUM blocknum = e->blocknum;
+ int keep_going_on_failure = e->keep_going_on_failure;
+ int result = 0;
+ bool is_fresh = e->msg_buffer->get_freshness(offset);
+ VERIFY_ASSERTION(!is_fresh, e->i, "marked message found in the fresh message tree that is fresh");
+ done:
+ return result;
+}
+
+template<typename verify_omt_t>
+static int
+verify_sorted_by_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const verify_omt_t &mt) {
+ int result = 0;
+ size_t last_offset = 0;
+ for (uint32_t i = 0; i < mt.size(); i++) {
+ int32_t offset;
+ int r = mt.fetch(i, &offset);
+ assert_zero(r);
+ if (i > 0) {
+ struct toku_msg_buffer_key_msn_cmp_extra extra(ft_handle->ft->cmp, msg_buffer);
+ if (toku_msg_buffer_key_msn_cmp(extra, last_offset, offset) >= 0) {
+ result = TOKUDB_NEEDS_REPAIR;
+ break;
+ }
+ }
+ last_offset = offset;
+ }
+ return result;
+}
+
+template<typename count_omt_t>
+static int
+count_eq_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const count_omt_t &mt, const DBT *key, MSN msn) {
+ struct toku_msg_buffer_key_msn_heaviside_extra extra(ft_handle->ft->cmp, msg_buffer, key, msn);
+ int r = mt.template find_zero<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(extra, nullptr, nullptr);
+ int count;
+ if (r == 0) {
+ count = 1;
+ } else {
+ assert(r == DB_NOTFOUND);
+ count = 0;
+ }
+ return count;
+}
+
+void
+toku_get_node_for_verify(
+ BLOCKNUM blocknum,
+ FT_HANDLE ft_handle,
+ FTNODE* nodep
+ )
+{
+ uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum);
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_handle->ft);
+ toku_pin_ftnode(
+ ft_handle->ft,
+ blocknum,
+ fullhash,
+ &bfe,
+ PL_WRITE_EXPENSIVE, // may_modify_node
+ nodep,
+ false
+ );
+}
+
+struct verify_msg_fn {
+ FT_HANDLE ft_handle;
+ NONLEAF_CHILDINFO bnc;
+ const DBT *curr_less_pivot;
+ const DBT *curr_geq_pivot;
+ BLOCKNUM blocknum;
+ MSN this_msn;
+ int verbose;
+ int keep_going_on_failure;
+ bool messages_have_been_moved;
+
+ MSN last_msn;
+ int msg_i;
+ int result = 0; // needed by VERIFY_ASSERTION
+
+ verify_msg_fn(FT_HANDLE handle, NONLEAF_CHILDINFO nl, const DBT *less, const DBT *geq,
+ BLOCKNUM b, MSN tmsn, int v, int k, bool m) :
+ ft_handle(handle), bnc(nl), curr_less_pivot(less), curr_geq_pivot(geq),
+ blocknum(b), this_msn(tmsn), verbose(v), keep_going_on_failure(k), messages_have_been_moved(m), last_msn(ZERO_MSN), msg_i(0) {
+ }
+
+ int operator()(const ft_msg &msg, bool is_fresh) {
+ enum ft_msg_type type = (enum ft_msg_type) msg.type();
+ MSN msn = msg.msn();
+ XIDS xid = msg.xids();
+ const void *key = msg.kdbt()->data;
+ const void *data = msg.vdbt()->data;
+ uint32_t keylen = msg.kdbt()->size;
+ uint32_t datalen = msg.vdbt()->size;
+
+ int r = verify_msg_in_child_buffer(ft_handle, type, msn, key, keylen, data, datalen, xid,
+ curr_less_pivot,
+ curr_geq_pivot);
+ VERIFY_ASSERTION(r == 0, msg_i, "A message in the buffer is out of place");
+ VERIFY_ASSERTION((msn.msn > last_msn.msn), msg_i, "msn per msg must be monotonically increasing toward newer messages in buffer");
+ VERIFY_ASSERTION((msn.msn <= this_msn.msn), msg_i, "all messages must have msn within limit of this node's max_msn_applied_to_node_in_memory");
+ if (ft_msg_type_applies_once(type)) {
+ int count;
+ DBT keydbt;
+ toku_fill_dbt(&keydbt, key, keylen);
+ int total_count = 0;
+ count = count_eq_key_msn(ft_handle, &bnc->msg_buffer, bnc->fresh_message_tree, toku_fill_dbt(&keydbt, key, keylen), msn);
+ total_count += count;
+ if (is_fresh) {
+ VERIFY_ASSERTION(count == 1, msg_i, "a fresh message was not found in the fresh message tree");
+ } else if (messages_have_been_moved) {
+ VERIFY_ASSERTION(count == 0, msg_i, "a stale message was found in the fresh message tree");
+ }
+ VERIFY_ASSERTION(count <= 1, msg_i, "a message was found multiple times in the fresh message tree");
+ count = count_eq_key_msn(ft_handle, &bnc->msg_buffer, bnc->stale_message_tree, &keydbt, msn);
+
+ total_count += count;
+ if (is_fresh) {
+ VERIFY_ASSERTION(count == 0, msg_i, "a fresh message was found in the stale message tree");
+ } else if (messages_have_been_moved) {
+ VERIFY_ASSERTION(count == 1, msg_i, "a stale message was not found in the stale message tree");
+ }
+ VERIFY_ASSERTION(count <= 1, msg_i, "a message was found multiple times in the stale message tree");
+
+ VERIFY_ASSERTION(total_count <= 1, msg_i, "a message was found in both message trees (or more than once in a single tree)");
+ VERIFY_ASSERTION(total_count >= 1, msg_i, "a message was not found in either message tree");
+ } else {
+ VERIFY_ASSERTION(ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type), msg_i, "a message was found that does not apply either to all or to only one key");
+ struct count_msgs_extra extra = { .count = 0, .msn = msn, .msg_buffer = &bnc->msg_buffer };
+ bnc->broadcast_list.iterate<struct count_msgs_extra, count_msgs>(&extra);
+ VERIFY_ASSERTION(extra.count == 1, msg_i, "a broadcast message was not found in the broadcast list");
+ }
+ last_msn = msn;
+ msg_i++;
+done:
+ return result;
+ }
+};
+
+static int
+toku_verify_ftnode_internal(FT_HANDLE ft_handle,
+ MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above,
+ FTNODE node, int height,
+ const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
+ const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
+ int verbose, int keep_going_on_failure, bool messages_have_been_moved)
+{
+ int result=0;
+ MSN this_msn;
+ BLOCKNUM blocknum = node->blocknum;
+
+ //printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v);
+ toku_ftnode_assert_fully_in_memory(node);
+ this_msn = node->max_msn_applied_to_node_on_disk;
+
+ if (height >= 0) {
+ invariant(height == node->height); // this is a bad failure if wrong
+ }
+ if (node->height > 0 && messages_exist_above) {
+ VERIFY_ASSERTION((parentmsn_with_messages.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top");
+ }
+ // Verify that all the pivot keys are in order.
+ for (int i = 0; i < node->n_children-2; i++) {
+ DBT x, y;
+ int compare = compare_pairs(ft_handle, node->pivotkeys.fill_pivot(i, &x), node->pivotkeys.fill_pivot(i + 1, &y));
+ VERIFY_ASSERTION(compare < 0, i, "Value is >= the next value");
+ }
+ // Verify that all the pivot keys are lesser_pivot < pivot <= greatereq_pivot
+ for (int i = 0; i < node->n_children-1; i++) {
+ DBT x;
+ if (lesser_pivot) {
+ int compare = compare_pairs(ft_handle, lesser_pivot, node->pivotkeys.fill_pivot(i, &x));
+ VERIFY_ASSERTION(compare < 0, i, "Pivot is >= the lower-bound pivot");
+ }
+ if (greatereq_pivot) {
+ int compare = compare_pairs(ft_handle, greatereq_pivot, node->pivotkeys.fill_pivot(i, &x));
+ VERIFY_ASSERTION(compare >= 0, i, "Pivot is < the upper-bound pivot");
+ }
+ }
+
+ for (int i = 0; i < node->n_children; i++) {
+ DBT x, y;
+ const DBT *curr_less_pivot = (i==0) ? lesser_pivot : node->pivotkeys.fill_pivot(i - 1, &x);
+ const DBT *curr_geq_pivot = (i==node->n_children-1) ? greatereq_pivot : node->pivotkeys.fill_pivot(i, &y);
+ if (node->height > 0) {
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ // Verify that messages in the buffers are in the right place.
+ VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, &bnc->msg_buffer, bnc->fresh_message_tree) == 0, i, "fresh_message_tree");
+ VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, &bnc->msg_buffer, bnc->stale_message_tree) == 0, i, "stale_message_tree");
+
+ verify_msg_fn verify_msg(ft_handle, bnc, curr_less_pivot, curr_geq_pivot,
+ blocknum, this_msn, verbose, keep_going_on_failure, messages_have_been_moved);
+ int r = bnc->msg_buffer.iterate(verify_msg);
+ if (r != 0) { result = r; goto done; }
+
+ struct verify_message_tree_extra extra = { .msg_buffer = &bnc->msg_buffer, .broadcast = false, .is_fresh = true, .i = i, .verbose = verbose, .blocknum = node->blocknum, .keep_going_on_failure = keep_going_on_failure, .messages_have_been_moved = messages_have_been_moved };
+ r = bnc->fresh_message_tree.iterate<struct verify_message_tree_extra, verify_message_tree>(&extra);
+ if (r != 0) { result = r; goto done; }
+ extra.is_fresh = false;
+ r = bnc->stale_message_tree.iterate<struct verify_message_tree_extra, verify_message_tree>(&extra);
+ if (r != 0) { result = r; goto done; }
+
+ bnc->fresh_message_tree.verify_marks_consistent();
+ if (messages_have_been_moved) {
+ VERIFY_ASSERTION(!bnc->fresh_message_tree.has_marks(), i, "fresh message tree still has marks after moving messages");
+ r = bnc->fresh_message_tree.iterate_over_marked<void, error_on_iter>(nullptr);
+ if (r != 0) { result = r; goto done; }
+ }
+ else {
+ r = bnc->fresh_message_tree.iterate_over_marked<struct verify_message_tree_extra, verify_marked_messages>(&extra);
+ if (r != 0) { result = r; goto done; }
+ }
+
+ extra.broadcast = true;
+ r = bnc->broadcast_list.iterate<struct verify_message_tree_extra, verify_message_tree>(&extra);
+ if (r != 0) { result = r; goto done; }
+ }
+ else {
+ BASEMENTNODE bn = BLB(node, i);
+ for (uint32_t j = 0; j < bn->data_buffer.num_klpairs(); j++) {
+ VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn");
+ DBT kdbt = get_ith_key_dbt(bn, j);
+ if (curr_less_pivot) {
+ int compare = compare_pairs(ft_handle, curr_less_pivot, &kdbt);
+ VERIFY_ASSERTION_BASEMENT(compare < 0, i, j, "The leafentry is >= the lower-bound pivot");
+ }
+ if (curr_geq_pivot) {
+ int compare = compare_pairs(ft_handle, curr_geq_pivot, &kdbt);
+ VERIFY_ASSERTION_BASEMENT(compare >= 0, i, j, "The leafentry is < the upper-bound pivot");
+ }
+ if (0 < j) {
+ DBT prev_key_dbt = get_ith_key_dbt(bn, j-1);
+ int compare = compare_pairs(ft_handle, &prev_key_dbt, &kdbt);
+ VERIFY_ASSERTION_BASEMENT(compare < 0, i, j, "Adjacent leafentries are out of order");
+ }
+ }
+ }
+ }
+
+done:
+ return result;
+}
+
+
+// input is a pinned node, on exit, node is unpinned
+int
+toku_verify_ftnode (FT_HANDLE ft_handle,
+ MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above,
+ FTNODE node, int height,
+ const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
+ const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
+ int (*progress_callback)(void *extra, float progress), void *progress_extra,
+ int recurse, int verbose, int keep_going_on_failure)
+{
+ MSN this_msn;
+
+ //printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v);
+ toku_ftnode_assert_fully_in_memory(node);
+ this_msn = node->max_msn_applied_to_node_on_disk;
+
+ int result = 0;
+ int result2 = 0;
+ if (node->height > 0) {
+ // Otherwise we'll just do the next call
+
+ result = toku_verify_ftnode_internal(
+ ft_handle, rootmsn, parentmsn_with_messages, messages_exist_above, node, height, lesser_pivot, greatereq_pivot,
+ verbose, keep_going_on_failure, false);
+ if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done;
+ }
+ if (node->height > 0) {
+ toku_move_ftnode_messages_to_stale(ft_handle->ft, node);
+ }
+ result2 = toku_verify_ftnode_internal(
+ ft_handle, rootmsn, parentmsn_with_messages, messages_exist_above, node, height, lesser_pivot, greatereq_pivot,
+ verbose, keep_going_on_failure, true);
+ if (result == 0) {
+ result = result2;
+ if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done;
+ }
+
+ // Verify that the subtrees have the right properties.
+ if (recurse && node->height > 0) {
+ for (int i = 0; i < node->n_children; i++) {
+ FTNODE child_node;
+ toku_get_node_for_verify(BP_BLOCKNUM(node, i), ft_handle, &child_node);
+ DBT x, y;
+ int r = toku_verify_ftnode(ft_handle, rootmsn,
+ (toku_bnc_n_entries(BNC(node, i)) > 0
+ ? this_msn
+ : parentmsn_with_messages),
+ messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0,
+ child_node, node->height-1,
+ (i==0) ? lesser_pivot : node->pivotkeys.fill_pivot(i - 1, &x),
+ (i==node->n_children-1) ? greatereq_pivot : node->pivotkeys.fill_pivot(i, &y),
+ progress_callback, progress_extra,
+ recurse, verbose, keep_going_on_failure);
+ if (r) {
+ result = r;
+ if (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR) goto done;
+ }
+ }
+ }
+done:
+ toku_unpin_ftnode(ft_handle->ft, node);
+
+ if (result == 0 && progress_callback)
+ result = progress_callback(progress_extra, 0.0);
+
+ return result;
+}
+
+int
+toku_verify_ft_with_progress (FT_HANDLE ft_handle, int (*progress_callback)(void *extra, float progress), void *progress_extra, int verbose, int keep_on_going) {
+ assert(ft_handle->ft);
+ FTNODE root_node = NULL;
+ {
+ uint32_t root_hash;
+ CACHEKEY root_key;
+ toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &root_hash);
+ toku_get_node_for_verify(root_key, ft_handle, &root_node);
+ }
+ int r = toku_verify_ftnode(ft_handle, ft_handle->ft->h->max_msn_in_ft, ft_handle->ft->h->max_msn_in_ft, false, root_node, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going);
+ if (r == 0) {
+ toku_ft_lock(ft_handle->ft);
+ ft_handle->ft->h->time_of_last_verification = time(NULL);
+ ft_handle->ft->h->set_dirty();
+ toku_ft_unlock(ft_handle->ft);
+ }
+ return r;
+}
+
+int
+toku_verify_ft (FT_HANDLE ft_handle) {
+ return toku_verify_ft_with_progress(ft_handle, NULL, NULL, 0, 0);
+}
diff --git a/storage/tokudb/PerconaFT/ft/ft.cc b/storage/tokudb/PerconaFT/ft/ft.cc
new file mode 100644
index 00000000..1106abfb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft.cc
@@ -0,0 +1,1186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/serialize/block_table.h"
+#include "ft/ft.h"
+#include "ft/ft-cachetable-wrappers.h"
+#include "ft/ft-internal.h"
+#include "ft/logger/log-internal.h"
+#include "ft/log_header.h"
+#include "ft/node.h"
+#include "ft/serialize/ft-serialize.h"
+#include "ft/serialize/ft_node-serialize.h"
+
+#include <memory.h>
+#include <toku_assert.h>
+#include <portability/toku_atomic.h>
+
+toku_instr_key *ft_ref_lock_mutex_key;
+
+void toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created) {
+ // Reset the root_xid_that_created field to the given value.
+ // This redefines which xid created the dictionary.
+
+ // hold lock around setting and clearing of dirty bit
+ // (see cooperative use of dirty bit in ft_begin_checkpoint())
+ toku_ft_lock(ft);
+ ft->h->root_xid_that_created = new_root_xid_that_created;
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+static void
+ft_destroy(FT ft) {
+ //header and checkpoint_header have same Blocktable pointer
+ //cannot destroy since it is still in use by CURRENT
+ assert(ft->h->type == FT_CURRENT);
+ ft->blocktable.destroy();
+ ft->cmp.destroy();
+ toku_destroy_dbt(&ft->descriptor.dbt);
+ toku_destroy_dbt(&ft->cmp_descriptor.dbt);
+ toku_ft_destroy_reflock(ft);
+ toku_free(ft->h);
+}
+
+// Make a copy of the header for the purpose of a checkpoint
+// Not reentrant for a single FT.
+// See ft_checkpoint for explanation of why
+// FT lock must be held.
+static void
+ft_copy_for_checkpoint_unlocked(FT ft, LSN checkpoint_lsn) {
+ assert(ft->h->type == FT_CURRENT);
+ assert(ft->checkpoint_header == NULL);
+
+ FT_HEADER XMEMDUP(ch, ft->h);
+ ch->type = FT_CHECKPOINT_INPROGRESS; //Different type
+ //printf("checkpoint_lsn=%" PRIu64 "\n", checkpoint_lsn.lsn);
+ ch->checkpoint_lsn = checkpoint_lsn;
+
+ //ch->blocktable is SHARED between the two headers
+ ft->checkpoint_header = ch;
+}
+
+void
+toku_ft_free (FT ft) {
+ ft_destroy(ft);
+ toku_free(ft);
+}
+
+void toku_ft_init_reflock(FT ft) {
+ toku_mutex_init(*ft_ref_lock_mutex_key, &ft->ft_ref_lock, nullptr);
+}
+
+void toku_ft_destroy_reflock(FT ft) { toku_mutex_destroy(&ft->ft_ref_lock); }
+
+void
+toku_ft_grab_reflock(FT ft) {
+ toku_mutex_lock(&ft->ft_ref_lock);
+}
+
+void
+toku_ft_release_reflock(FT ft) {
+ toku_mutex_unlock(&ft->ft_ref_lock);
+}
+
+/////////////////////////////////////////////////////////////////////////
+// Start of Functions that are callbacks to the cachefule
+//
+
+// maps to cf->log_fassociate_during_checkpoint
+static void
+ft_log_fassociate_during_checkpoint (CACHEFILE cf, void *header_v) {
+ FT ft = (FT) header_v;
+ char* fname_in_env = toku_cachefile_fname_in_env(cf);
+ BYTESTRING bs = { .len = (uint32_t) strlen(fname_in_env), // don't include the NUL
+ .data = fname_in_env };
+ TOKULOGGER logger = toku_cachefile_logger(cf);
+ FILENUM filenum = toku_cachefile_filenum(cf);
+ bool unlink_on_close = toku_cachefile_is_unlink_on_close(cf);
+ toku_log_fassociate(logger, NULL, 0, filenum, ft->h->flags, bs, unlink_on_close);
+}
+
+// Maps to cf->begin_checkpoint_userdata
+// Create checkpoint-in-progress versions of header and translation (btt)
+// Has access to fd (it is protected).
+//
+// Not reentrant for a single FT (see ft_checkpoint)
+static void ft_begin_checkpoint (LSN checkpoint_lsn, void *header_v) {
+ FT ft = (FT) header_v;
+ // hold lock around copying and clearing of dirty bit
+ toku_ft_lock (ft);
+ assert(ft->h->type == FT_CURRENT);
+ assert(ft->checkpoint_header == NULL);
+ ft_copy_for_checkpoint_unlocked(ft, checkpoint_lsn);
+ ft->h->clear_dirty(); // this is only place this bit is cleared (in currentheader)
+ ft->blocktable.note_start_checkpoint_unlocked();
+ toku_ft_unlock (ft);
+}
+
+// #4922: Hack to remove data corruption race condition.
+// Reading (and upgrading) a node up to version 19 causes this.
+// We COULD skip this if we know that no nodes remained (as of last checkpoint)
+// that are below version 19.
+// If there are no nodes < version 19 this is harmless (field is unused).
+// If there are, this will make certain the value is at least as low as necessary,
+// and not much lower. (Too low is good, too high can cause data corruption).
+// TODO(yoni): If we ever stop supporting upgrades of nodes < version 19 we can delete this.
+// TODO(yoni): If we know no nodes are left to upgrade, we can skip this. (Probably not worth doing).
+static void
+ft_hack_highest_unused_msn_for_upgrade_for_checkpoint(FT ft) {
+ if (ft->h->layout_version_original < FT_LAYOUT_VERSION_19) {
+ ft->checkpoint_header->highest_unused_msn_for_upgrade = ft->h->highest_unused_msn_for_upgrade;
+ }
+}
+
+// maps to cf->checkpoint_userdata
+// Write checkpoint-in-progress versions of header and translation to disk (really to OS internal buffer).
+// Copy current header's version of checkpoint_staging stat64info to checkpoint header.
+// Must have access to fd (protected).
+// Requires: all pending bits are clear. This implies that no thread will modify the checkpoint_staging
+// version of the stat64info.
+//
+// No locks are taken for checkpoint_count/lsn because this is single threaded. Can be called by:
+// - ft_close
+// - end_checkpoint
+// checkpoints hold references to FTs and so they cannot be closed during a checkpoint.
+// ft_close is not reentrant for a single FT
+// end_checkpoint is not reentrant period
+static void ft_checkpoint (CACHEFILE cf, int fd, void *header_v) {
+ FT ft = (FT) header_v;
+ FT_HEADER ch = ft->checkpoint_header;
+ assert(ch);
+ assert(ch->type == FT_CHECKPOINT_INPROGRESS);
+ if (ch->dirty()) { // this is only place this bit is tested (in checkpoint_header)
+ TOKULOGGER logger = toku_cachefile_logger(cf);
+ if (logger) {
+ toku_logger_fsync_if_lsn_not_fsynced(logger, ch->checkpoint_lsn);
+ }
+ uint64_t now = (uint64_t) time(NULL);
+ ft->h->time_of_last_modification = now;
+ ch->time_of_last_modification = now;
+ ch->checkpoint_count++;
+ ft_hack_highest_unused_msn_for_upgrade_for_checkpoint(ft);
+ ch->on_disk_logical_rows =
+ ft->h->on_disk_logical_rows = ft->in_memory_logical_rows;
+
+ // write translation and header to disk (or at least to OS internal buffer)
+ toku_serialize_ft_to(fd, ch, &ft->blocktable, ft->cf);
+ ch->clear_dirty(); // this is only place this bit is cleared (in checkpoint_header)
+
+ // fsync the cachefile
+ toku_cachefile_fsync(cf);
+ ft->h->checkpoint_count++; // checkpoint succeeded, next checkpoint will save to alternate header location
+ ft->h->checkpoint_lsn = ch->checkpoint_lsn; //Header updated.
+ } else {
+ ft->blocktable.note_skipped_checkpoint();
+ }
+}
+
+// maps to cf->end_checkpoint_userdata
+// free unused disk space
+// (i.e. tell BlockAllocator to liberate blocks used by previous checkpoint).
+// Must have access to fd (protected)
+static void ft_end_checkpoint(CACHEFILE UU(cf), int fd, void *header_v) {
+ FT ft = (FT) header_v;
+ assert(ft->h->type == FT_CURRENT);
+ ft->blocktable.note_end_checkpoint(fd);
+ toku_free(ft->checkpoint_header);
+ ft->checkpoint_header = nullptr;
+}
+
+// maps to cf->close_userdata
+// Has access to fd (it is protected).
+static void ft_close(CACHEFILE cachefile, int fd, void *header_v, bool oplsn_valid, LSN oplsn) {
+ FT ft = (FT) header_v;
+ assert(ft->h->type == FT_CURRENT);
+ // We already have exclusive access to this field already, so skip the locking.
+ // This should already never fail.
+ invariant(!toku_ft_needed_unlocked(ft));
+ assert(ft->cf == cachefile);
+ TOKULOGGER logger = toku_cachefile_logger(cachefile);
+ LSN lsn = ZERO_LSN;
+ //Get LSN
+ if (oplsn_valid) {
+ //Use recovery-specified lsn
+ lsn = oplsn;
+ //Recovery cannot reduce lsn of a header.
+ if (lsn.lsn < ft->h->checkpoint_lsn.lsn) {
+ lsn = ft->h->checkpoint_lsn;
+ }
+ }
+ else {
+ //Get LSN from logger
+ lsn = ZERO_LSN; // if there is no logger, we use zero for the lsn
+ if (logger) {
+ char* fname_in_env = toku_cachefile_fname_in_env(cachefile);
+ assert(fname_in_env);
+ BYTESTRING bs = {.len=(uint32_t) strlen(fname_in_env), .data=fname_in_env};
+ if (!toku_cachefile_is_skip_log_recover_on_close(cachefile)) {
+ toku_log_fclose(
+ logger,
+ &lsn,
+ ft->h->dirty(),
+ bs,
+ toku_cachefile_filenum(cachefile)); // flush the log on
+ // close (if new header
+ // is being written),
+ // otherwise it might
+ // not make it out.
+ toku_cachefile_do_log_recover_on_close(cachefile);
+ }
+ }
+ }
+ if (ft->h->dirty()) { // this is the only place this bit is tested (in currentheader)
+ bool do_checkpoint = true;
+ if (logger && logger->rollback_cachefile == cachefile) {
+ do_checkpoint = false;
+ }
+ if (do_checkpoint) {
+ ft_begin_checkpoint(lsn, header_v);
+ ft_checkpoint(cachefile, fd, ft);
+ ft_end_checkpoint(cachefile, fd, header_v);
+ assert(!ft->h->dirty()); // dirty bit should be cleared by begin_checkpoint and never set again (because we're closing the dictionary)
+ }
+ }
+}
+
+// maps to cf->free_userdata
+static void ft_free(CACHEFILE cachefile UU(), void *header_v) {
+ FT ft = (FT) header_v;
+ toku_ft_free(ft);
+}
+
+// maps to cf->note_pin_by_checkpoint
+//Must be protected by ydb lock.
+//Is only called by checkpoint begin, which holds it
+static void ft_note_pin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v) {
+ // Note: open_close lock is held by checkpoint begin
+ FT ft = (FT) header_v;
+ toku_ft_grab_reflock(ft);
+ assert(!ft->pinned_by_checkpoint);
+ assert(toku_ft_needed_unlocked(ft));
+ ft->pinned_by_checkpoint = true;
+ toku_ft_release_reflock(ft);
+}
+
+// Requires: the reflock is held.
+static void unpin_by_checkpoint_callback(FT ft, void *extra) {
+ invariant(extra == NULL);
+ invariant(ft->pinned_by_checkpoint);
+ ft->pinned_by_checkpoint = false;
+}
+
+// maps to cf->note_unpin_by_checkpoint
+//Must be protected by ydb lock.
+//Called by end_checkpoint, which grabs ydb lock around note_unpin
+static void ft_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v) {
+ FT ft = (FT) header_v;
+ toku_ft_remove_reference(ft, false, ZERO_LSN, unpin_by_checkpoint_callback, NULL);
+}
+
+//
+// End of Functions that are callbacks to the cachefile
+/////////////////////////////////////////////////////////////////////////
+
+static void setup_initial_ft_root_node(FT ft, BLOCKNUM blocknum) {
+ FTNODE XCALLOC(node);
+ toku_initialize_empty_ftnode(node, blocknum, 0, 1, ft->h->layout_version, ft->h->flags);
+ BP_STATE(node,0) = PT_AVAIL;
+
+ uint32_t fullhash = toku_cachetable_hash(ft->cf, blocknum);
+ node->fullhash = fullhash;
+ toku_cachetable_put(ft->cf, blocknum, fullhash,
+ node, make_ftnode_pair_attr(node),
+ get_write_callbacks_for_node(ft),
+ toku_ftnode_save_ct_pair);
+ toku_unpin_ftnode(ft, node);
+}
+
+static void ft_init(FT ft, FT_OPTIONS options, CACHEFILE cf) {
+ // fake, prevent unnecessary upgrade logic
+ ft->layout_version_read_from_disk = FT_LAYOUT_VERSION;
+ ft->checkpoint_header = NULL;
+
+ toku_list_init(&ft->live_ft_handles);
+
+ // intuitively, the comparator points to the FT's cmp descriptor
+ ft->cmp.create(options->compare_fun, &ft->cmp_descriptor, options->memcmp_magic);
+ ft->update_fun = options->update_fun;
+
+ if (ft->cf != NULL) {
+ assert(ft->cf == cf);
+ }
+ ft->cf = cf;
+ ft->in_memory_stats = ZEROSTATS;
+
+ setup_initial_ft_root_node(ft, ft->h->root_blocknum);
+ toku_cachefile_set_userdata(ft->cf,
+ ft,
+ ft_log_fassociate_during_checkpoint,
+ ft_close,
+ ft_free,
+ ft_checkpoint,
+ ft_begin_checkpoint,
+ ft_end_checkpoint,
+ ft_note_pin_by_checkpoint,
+ ft_note_unpin_by_checkpoint);
+
+ ft->blocktable.verify_no_free_blocknums();
+}
+
+
+static FT_HEADER
+ft_header_create(FT_OPTIONS options, BLOCKNUM root_blocknum, TXNID root_xid_that_created)
+{
+ uint64_t now = (uint64_t) time(NULL);
+ struct ft_header h = {
+ .type = FT_CURRENT,
+ .dirty_ = 0,
+ .checkpoint_count = 0,
+ .checkpoint_lsn = ZERO_LSN,
+ .layout_version = FT_LAYOUT_VERSION,
+ .layout_version_original = FT_LAYOUT_VERSION,
+ .build_id = BUILD_ID,
+ .build_id_original = BUILD_ID,
+ .time_of_creation = now,
+ .root_xid_that_created = root_xid_that_created,
+ .time_of_last_modification = now,
+ .time_of_last_verification = 0,
+ .root_blocknum = root_blocknum,
+ .flags = options->flags,
+ .nodesize = options->nodesize,
+ .basementnodesize = options->basementnodesize,
+ .compression_method = options->compression_method,
+ .fanout = options->fanout,
+ .highest_unused_msn_for_upgrade = { .msn = (MIN_MSN.msn - 1) },
+ .max_msn_in_ft = ZERO_MSN,
+ .time_of_last_optimize_begin = 0,
+ .time_of_last_optimize_end = 0,
+ .count_of_optimize_in_progress = 0,
+ .count_of_optimize_in_progress_read_from_disk = 0,
+ .msn_at_start_of_last_completed_optimize = ZERO_MSN,
+ .on_disk_stats = ZEROSTATS,
+ .on_disk_logical_rows = 0
+ };
+ return (FT_HEADER) toku_xmemdup(&h, sizeof h);
+}
+
+// allocate and initialize a fractal tree.
+void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn) {
+ invariant(ftp);
+
+ FT XCALLOC(ft);
+ ft->h = ft_header_create(options, make_blocknum(0), (txn ? txn->txnid.parent_id64: TXNID_NONE));
+
+ toku_ft_init_reflock(ft);
+
+ // Assign blocknum for root block, also dirty the header
+ ft->blocktable.create();
+ ft->blocktable.allocate_blocknum(&ft->h->root_blocknum, ft);
+
+ ft_init(ft, options, cf);
+
+ *ftp = ft;
+}
+
+// TODO: (Zardosht) get rid of ft parameter
+int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN max_acceptable_lsn, FT *header)
+// If the cachefile already has the header, then just get it.
+// If the cachefile has not been initialized, then don't modify anything.
+// max_acceptable_lsn is the latest acceptable checkpointed version of the file.
+{
+ FT ft = nullptr;
+ if ((ft = (FT) toku_cachefile_get_userdata(cf)) != nullptr) {
+ *header = ft;
+ assert(ft_handle->options.update_fun == ft->update_fun);
+ return 0;
+ }
+
+ int fd = toku_cachefile_get_fd(cf);
+ const char *fn = toku_cachefile_fname_in_env(cf);
+ int r = toku_deserialize_ft_from(fd, fn, max_acceptable_lsn, &ft);
+ if (r == TOKUDB_BAD_CHECKSUM) {
+ fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
+ assert(false); // make absolutely sure we crash before doing anything else
+ } else if (r != 0) {
+ return r;
+ }
+
+ invariant_notnull(ft);
+ // intuitively, the comparator points to the FT's cmp descriptor
+ ft->cmp.create(ft_handle->options.compare_fun, &ft->cmp_descriptor, ft_handle->options.memcmp_magic);
+ ft->update_fun = ft_handle->options.update_fun;
+ ft->cf = cf;
+ toku_cachefile_set_userdata(cf,
+ reinterpret_cast<void *>(ft),
+ ft_log_fassociate_during_checkpoint,
+ ft_close,
+ ft_free,
+ ft_checkpoint,
+ ft_begin_checkpoint,
+ ft_end_checkpoint,
+ ft_note_pin_by_checkpoint,
+ ft_note_unpin_by_checkpoint);
+ *header = ft;
+ return 0;
+}
+
+void
+toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live) {
+ toku_ft_grab_reflock(ft);
+ live->ft = ft;
+ toku_list_push(&ft->live_ft_handles, &live->live_ft_handle_link);
+ toku_ft_release_reflock(ft);
+}
+
+// the reference count for a ft is the number of txn's that
+// touched it plus the number of open handles plus one if
+// pinned by a checkpoint.
+static int
+ft_get_reference_count(FT ft) {
+ uint32_t pinned_by_checkpoint = ft->pinned_by_checkpoint ? 1 : 0;
+ int num_handles = toku_list_num_elements_est(&ft->live_ft_handles);
+ return pinned_by_checkpoint + ft->num_txns + num_handles;
+}
+
+// a ft is needed in memory iff its reference count is non-zero
+bool
+toku_ft_needed_unlocked(FT ft) {
+ return ft_get_reference_count(ft) != 0;
+}
+
+// get the reference count and return true if it was 1
+bool
+toku_ft_has_one_reference_unlocked(FT ft) {
+ return ft_get_reference_count(ft) == 1;
+}
+
+// evict a ft from memory by closing its cachefile. any future work
+// will have to read in the ft in a new cachefile and new FT object.
+void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn) {
+ assert(ft->cf);
+ toku_cachefile_close(&ft->cf, oplsn_valid, oplsn);
+}
+
+// Verifies there exists exactly one ft handle and returns it.
+FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft) {
+ FT_HANDLE ft_handle_ret = NULL;
+ toku_ft_grab_reflock(ft);
+ assert(toku_list_num_elements_est(&ft->live_ft_handles) == 1);
+ ft_handle_ret = toku_list_struct(toku_list_head(&ft->live_ft_handles), struct ft_handle, live_ft_handle_link);
+ toku_ft_release_reflock(ft);
+ return ft_handle_ret;
+}
+
+// Purpose: set fields in ft_header to capture accountability info for start of HOT optimize.
+// Note: HOT accountability variables in header are modified only while holding header lock.
+// (Header lock is really needed for touching the dirty bit, but it's useful and
+// convenient here for keeping the HOT variables threadsafe.)
+void
+toku_ft_note_hot_begin(FT_HANDLE ft_handle) {
+ FT ft = ft_handle->ft;
+ time_t now = time(NULL);
+
+ // hold lock around setting and clearing of dirty bit
+ // (see cooperative use of dirty bit in ft_begin_checkpoint())
+ toku_ft_lock(ft);
+ ft->h->time_of_last_optimize_begin = now;
+ ft->h->count_of_optimize_in_progress++;
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+
+// Purpose: set fields in ft_header to capture accountability info for end of HOT optimize.
+// Note: See note for toku_ft_note_hot_begin().
+void
+toku_ft_note_hot_complete(FT_HANDLE ft_handle, bool success, MSN msn_at_start_of_hot) {
+ FT ft = ft_handle->ft;
+ time_t now = time(NULL);
+
+ toku_ft_lock(ft);
+ ft->h->count_of_optimize_in_progress--;
+ if (success) {
+ ft->h->time_of_last_optimize_end = now;
+ ft->h->msn_at_start_of_last_completed_optimize = msn_at_start_of_hot;
+ // If we just successfully completed an optimization and no other thread is performing
+ // an optimization, then the number of optimizations in progress is zero.
+ // If there was a crash during a HOT optimization, this is how count_of_optimize_in_progress
+ // would be reset to zero on the disk after recovery from that crash.
+ if (ft->h->count_of_optimize_in_progress == ft->h->count_of_optimize_in_progress_read_from_disk)
+ ft->h->count_of_optimize_in_progress = 0;
+ }
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+
+void
+toku_ft_init(FT ft,
+ BLOCKNUM root_blocknum_on_disk,
+ LSN checkpoint_lsn,
+ TXNID root_xid_that_created,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method compression_method,
+ uint32_t fanout
+ )
+{
+ memset(ft, 0, sizeof *ft);
+ struct ft_options options = {
+ .nodesize = target_nodesize,
+ .basementnodesize = target_basementnodesize,
+ .compression_method = compression_method,
+ .fanout = fanout,
+ .flags = 0,
+ .memcmp_magic = 0,
+ .compare_fun = NULL,
+ .update_fun = NULL
+ };
+ ft->h = ft_header_create(&options, root_blocknum_on_disk, root_xid_that_created);
+ ft->h->checkpoint_count = 1;
+ ft->h->checkpoint_lsn = checkpoint_lsn;
+}
+
+// Open an ft for use by redirect. The new ft must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.)
+static int
+ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_ft) {
+ FT_HANDLE ft_handle;
+ assert(old_ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
+ toku_ft_handle_create(&ft_handle);
+ toku_ft_set_bt_compare(ft_handle, old_ft->cmp.get_compare_func());
+ toku_ft_set_update(ft_handle, old_ft->update_fun);
+ toku_ft_handle_set_nodesize(ft_handle, old_ft->h->nodesize);
+ toku_ft_handle_set_basementnodesize(ft_handle, old_ft->h->basementnodesize);
+ toku_ft_handle_set_compression_method(ft_handle, old_ft->h->compression_method);
+ toku_ft_handle_set_fanout(ft_handle, old_ft->h->fanout);
+ CACHETABLE ct = toku_cachefile_get_cachetable(old_ft->cf);
+ int r = toku_ft_handle_open_with_dict_id(ft_handle, fname_in_env, 0, 0, ct, txn, old_ft->dict_id);
+ if (r != 0) {
+ goto cleanup;
+ }
+ assert(ft_handle->ft->dict_id.dictid == old_ft->dict_id.dictid);
+ *new_ftp = ft_handle;
+
+ cleanup:
+ if (r != 0) {
+ toku_ft_handle_close(ft_handle);
+ }
+ return r;
+}
+
+// This function performs most of the work to redirect a dictionary to different file.
+// It is called for redirect and to abort a redirect. (This function is almost its own inverse.)
+static int
+dictionary_redirect_internal(const char *dst_fname_in_env, FT src_ft, TOKUTXN txn, FT *dst_ftp) {
+ int r;
+
+ FILENUM src_filenum = toku_cachefile_filenum(src_ft->cf);
+ FILENUM dst_filenum = FILENUM_NONE;
+
+ FT dst_ft = NULL;
+ struct toku_list *list;
+ // open a dummy ft based off of
+ // dst_fname_in_env to get the header
+ // then we will change all the ft's to have
+ // their headers point to dst_ft instead of src_ft
+ FT_HANDLE tmp_dst_ft = NULL;
+ r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_ft);
+ if (r != 0) {
+ goto cleanup;
+ }
+ dst_ft = tmp_dst_ft->ft;
+
+ // some sanity checks on dst_filenum
+ dst_filenum = toku_cachefile_filenum(dst_ft->cf);
+ assert(dst_filenum.fileid!=FILENUM_NONE.fileid);
+ assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file.
+
+ // for each live ft_handle, ft_handle->ft is currently src_ft
+ // we want to change it to dummy_dst
+ toku_ft_grab_reflock(src_ft);
+ while (!toku_list_empty(&src_ft->live_ft_handles)) {
+ list = src_ft->live_ft_handles.next;
+ FT_HANDLE src_handle = NULL;
+ src_handle = toku_list_struct(list, struct ft_handle, live_ft_handle_link);
+
+ toku_list_remove(&src_handle->live_ft_handle_link);
+
+ toku_ft_note_ft_handle_open(dst_ft, src_handle);
+ if (src_handle->redirect_callback) {
+ src_handle->redirect_callback(src_handle, src_handle->redirect_callback_extra);
+ }
+ }
+ assert(dst_ft);
+ // making sure that we are not leaking src_ft
+ assert(toku_ft_needed_unlocked(src_ft));
+ toku_ft_release_reflock(src_ft);
+
+ toku_ft_handle_close(tmp_dst_ft);
+
+ *dst_ftp = dst_ft;
+cleanup:
+ return r;
+}
+
+
+
+//This is the 'abort redirect' function. The redirect of old_ft to new_ft was done
+//and now must be undone, so here we redirect new_ft back to old_ft.
+int
+toku_dictionary_redirect_abort(FT old_ft, FT new_ft, TOKUTXN txn) {
+ char *old_fname_in_env = toku_cachefile_fname_in_env(old_ft->cf);
+ int r;
+ {
+ FILENUM old_filenum = toku_cachefile_filenum(old_ft->cf);
+ FILENUM new_filenum = toku_cachefile_filenum(new_ft->cf);
+ assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file.
+
+ //No living fts in old header.
+ toku_ft_grab_reflock(old_ft);
+ assert(toku_list_empty(&old_ft->live_ft_handles));
+ toku_ft_release_reflock(old_ft);
+ }
+
+ FT dst_ft;
+ // redirect back from new_ft to old_ft
+ r = dictionary_redirect_internal(old_fname_in_env, new_ft, txn, &dst_ft);
+ if (r == 0) {
+ assert(dst_ft == old_ft);
+ }
+ return r;
+}
+
+/****
+ * on redirect or abort:
+ * if redirect txn_note_doing_work(txn)
+ * if redirect connect src ft to txn (txn modified this ft)
+ * for each src ft
+ * open ft to dst file (create new ft struct)
+ * if redirect connect dst ft to txn
+ * redirect db to new ft
+ * redirect cursors to new ft
+ * close all src fts
+ * if redirect make rollback log entry
+ *
+ * on commit:
+ * nothing to do
+ *
+ *****/
+
+int
+toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft_h, TOKUTXN txn) {
+// Input args:
+// new file name for dictionary (relative to env)
+// old_ft_h is a live ft of open handle ({DB, FT_HANDLE} pair) that currently refers to old dictionary file.
+// (old_ft_h may be one of many handles to the dictionary.)
+// txn that created the loader
+// Requires:
+// multi operation lock is held.
+// The ft is open. (which implies there can be no zombies.)
+// The new file must be a valid dictionary.
+// The block size and flags in the new file must match the existing FT.
+// The new file must already have its descriptor in it (and it must match the existing descriptor).
+// Effect:
+// Open new FTs (and related header and cachefile) to the new dictionary file with a new FILENUM.
+// Redirect all DBs that point to fts that point to the old file to point to fts that point to the new file.
+// Copy the dictionary id (dict_id) from the header of the original file to the header of the new file.
+// Create a rollback log entry.
+// The original FT, header, cachefile and file remain unchanged. They will be cleaned up on commmit.
+// If the txn aborts, then this operation will be undone
+ int r;
+
+ FT old_ft = old_ft_h->ft;
+
+ // dst file should not be open. (implies that dst and src are different because src must be open.)
+ {
+ CACHETABLE ct = toku_cachefile_get_cachetable(old_ft->cf);
+ CACHEFILE cf;
+ r = toku_cachefile_of_iname_in_env(ct, dst_fname_in_env, &cf);
+ if (r==0) {
+ r = EINVAL;
+ goto cleanup;
+ }
+ assert(r==ENOENT);
+ r = 0;
+ }
+
+ if (txn) {
+ toku_txn_maybe_note_ft(txn, old_ft); // mark old ft as touched by this txn
+ }
+
+ FT new_ft;
+ r = dictionary_redirect_internal(dst_fname_in_env, old_ft, txn, &new_ft);
+ if (r != 0) {
+ goto cleanup;
+ }
+
+ // make rollback log entry
+ if (txn) {
+ toku_txn_maybe_note_ft(txn, new_ft); // mark new ft as touched by this txn
+
+ // There is no recovery log entry for redirect,
+ // and rollback log entries are not allowed for read-only transactions.
+ // Normally the recovery log entry would ensure the begin was logged.
+ if (!txn->begin_was_logged) {
+ toku_maybe_log_begin_txn_for_write_operation(txn);
+ }
+ FILENUM old_filenum = toku_cachefile_filenum(old_ft->cf);
+ FILENUM new_filenum = toku_cachefile_filenum(new_ft->cf);
+ toku_logger_save_rollback_dictionary_redirect(txn, old_filenum, new_filenum);
+ }
+
+cleanup:
+ return r;
+}
+
+// Insert reference to transaction into ft
+void
+toku_ft_add_txn_ref(FT ft) {
+ toku_ft_grab_reflock(ft);
+ ++ft->num_txns;
+ toku_ft_release_reflock(ft);
+}
+
+static void
+remove_txn_ref_callback(FT ft, void *UU(context)) {
+ invariant(ft->num_txns > 0);
+ --ft->num_txns;
+}
+
+void
+toku_ft_remove_txn_ref(FT ft) {
+ toku_ft_remove_reference(ft, false, ZERO_LSN, remove_txn_ref_callback, NULL);
+}
+
+void toku_calculate_root_offset_pointer (
+ FT ft,
+ CACHEKEY* root_key,
+ uint32_t *roothash
+ )
+{
+ *roothash = toku_cachetable_hash(ft->cf, ft->h->root_blocknum);
+ *root_key = ft->h->root_blocknum;
+}
+
+void toku_ft_set_new_root_blocknum(
+ FT ft,
+ CACHEKEY new_root_key
+ )
+{
+ ft->h->root_blocknum = new_root_key;
+}
+
+LSN toku_ft_checkpoint_lsn(FT ft) {
+ return ft->h->checkpoint_lsn;
+}
+
+void
+toku_ft_stat64 (FT ft, struct ftstat64_s *s) {
+ s->fsize = toku_cachefile_size(ft->cf);
+ // just use the in memory stats from the header
+ // prevent appearance of negative numbers for numrows, numbytes
+ // if the logical count was never properly re-counted on an upgrade,
+ // return the existing physical count instead.
+ int64_t n;
+ if (ft->in_memory_logical_rows == (uint64_t)-1) {
+ n = ft->in_memory_stats.numrows;
+ } else {
+ n = ft->in_memory_logical_rows;
+ }
+ if (n < 0) {
+ n = 0;
+ }
+ s->nkeys = s->ndata = n;
+ n = ft->in_memory_stats.numbytes;
+ if (n < 0) {
+ n = 0;
+ }
+ s->dsize = n;
+ s->create_time_sec = ft->h->time_of_creation;
+ s->modify_time_sec = ft->h->time_of_last_modification;
+ s->verify_time_sec = ft->h->time_of_last_verification;
+}
+
+void toku_ft_get_fractal_tree_info64(FT ft, struct ftinfo64 *info) {
+ ft->blocktable.get_info64(info);
+}
+
+int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra) {
+ uint64_t this_checkpoint_count = ft->h->checkpoint_count;
+ return ft->blocktable.iterate_translation_tables(this_checkpoint_count, iter, iter_extra);
+}
+
+void
+toku_ft_update_descriptor(FT ft, DESCRIPTOR desc)
+// Effect: Changes the descriptor in a tree (log the change, make sure it makes it to disk eventually).
+// requires: the ft is fully user-opened with a valid cachefile.
+// descriptor updates cannot happen in parallel for an FT
+// (ydb layer uses a row lock to enforce this)
+{
+ assert(ft->cf);
+ int fd = toku_cachefile_get_fd(ft->cf);
+ toku_ft_update_descriptor_with_fd(ft, desc, fd);
+}
+
+// upadate the descriptor for an ft and serialize it using
+// the given descriptor instead of reading the descriptor
+// from the ft's cachefile. we do this so serialize code can
+// update a descriptor before the ft is fully opened and has
+// a valid cachefile.
+void
+toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR desc, int fd) {
+ // the checksum is four bytes, so that's where the magic number comes from
+ // make space for the new descriptor and write it out to disk
+ DISKOFF offset, size;
+ size = toku_serialize_descriptor_size(desc) + 4;
+ ft->blocktable.realloc_descriptor_on_disk(size, &offset, ft, fd);
+ toku_serialize_descriptor_contents_to_fd(fd, desc, offset);
+
+ // cleanup the old descriptor and set the in-memory descriptor to the new one
+ toku_destroy_dbt(&ft->descriptor.dbt);
+ toku_clone_dbt(&ft->descriptor.dbt, desc->dbt);
+}
+
+void toku_ft_update_cmp_descriptor(FT ft) {
+ // cleanup the old cmp descriptor and clone it as the in-memory descriptor
+ toku_destroy_dbt(&ft->cmp_descriptor.dbt);
+ toku_clone_dbt(&ft->cmp_descriptor.dbt, ft->descriptor.dbt);
+}
+
+DESCRIPTOR toku_ft_get_descriptor(FT_HANDLE ft_handle) {
+ return &ft_handle->ft->descriptor;
+}
+
+DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) {
+ return &ft_handle->ft->cmp_descriptor;
+}
+
+void toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
+ (void) toku_sync_fetch_and_add(&(headerstats->numrows), delta.numrows);
+ (void) toku_sync_fetch_and_add(&(headerstats->numbytes), delta.numbytes);
+}
+
+void toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
+ (void) toku_sync_fetch_and_sub(&(headerstats->numrows), delta.numrows);
+ (void) toku_sync_fetch_and_sub(&(headerstats->numbytes), delta.numbytes);
+}
+
+void toku_ft_adjust_logical_row_count(FT ft, int64_t delta) {
+ // In order to make sure that the correct count is returned from
+ // toku_ft_stat64, the ft->(in_memory|on_disk)_logical_rows _MUST_NOT_ be
+ // modified from anywhere else from here with the exceptions of
+ // serializing in a header, initializing a new header and analyzing
+ // an index for a logical_row count.
+ // The gist is that on an index upgrade, all logical_rows values
+ // in the ft header are set to -1 until an analyze can reset it to an
+ // accurate value. Until then, the physical count from in_memory_stats
+ // must be returned in toku_ft_stat64.
+ if (delta != 0 && ft->in_memory_logical_rows != (uint64_t)-1) {
+ toku_sync_fetch_and_add(&(ft->in_memory_logical_rows), delta);
+ if (ft->in_memory_logical_rows == (uint64_t)-1) {
+ toku_sync_fetch_and_add(&(ft->in_memory_logical_rows), 1);
+ }
+ }
+}
+
+void toku_ft_remove_reference(
+ FT ft,
+ bool oplsn_valid,
+ LSN oplsn,
+ remove_ft_ref_callback remove_ref,
+ void *extra) {
+
+ toku_ft_grab_reflock(ft);
+ if (toku_ft_has_one_reference_unlocked(ft)) {
+ toku_ft_release_reflock(ft);
+
+ toku_ft_open_close_lock();
+ toku_ft_grab_reflock(ft);
+
+ remove_ref(ft, extra);
+ bool needed = toku_ft_needed_unlocked(ft);
+ toku_ft_release_reflock(ft);
+
+ // if we're running during recovery, we must close the underlying ft.
+ // we know we're running in recovery if we were passed a valid lsn.
+ if (oplsn_valid) {
+ assert(!needed);
+ }
+ if (!needed) {
+ // close header
+ toku_ft_evict_from_memory(ft, oplsn_valid, oplsn);
+ }
+
+ toku_ft_open_close_unlock();
+ }
+ else {
+ remove_ref(ft, extra);
+ toku_ft_release_reflock(ft);
+ }
+}
+
+void toku_ft_set_nodesize(FT ft, unsigned int nodesize) {
+ toku_ft_lock(ft);
+ ft->h->nodesize = nodesize;
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_get_nodesize(FT ft, unsigned int *nodesize) {
+ toku_ft_lock(ft);
+ *nodesize = ft->h->nodesize;
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_set_basementnodesize(FT ft, unsigned int basementnodesize) {
+ toku_ft_lock(ft);
+ ft->h->basementnodesize = basementnodesize;
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_get_basementnodesize(FT ft, unsigned int *basementnodesize) {
+ toku_ft_lock(ft);
+ *basementnodesize = ft->h->basementnodesize;
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_set_compression_method(FT ft, enum toku_compression_method method) {
+ toku_ft_lock(ft);
+ ft->h->compression_method = method;
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp) {
+ toku_ft_lock(ft);
+ *methodp = ft->h->compression_method;
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_set_fanout(FT ft, unsigned int fanout) {
+ toku_ft_lock(ft);
+ ft->h->fanout = fanout;
+ ft->h->set_dirty();
+ toku_ft_unlock(ft);
+}
+
+void toku_ft_get_fanout(FT ft, unsigned int *fanout) {
+ toku_ft_lock(ft);
+ *fanout = ft->h->fanout;
+ toku_ft_unlock(ft);
+}
+
+// mark the ft as a blackhole. any message injections will be a no op.
+void toku_ft_set_blackhole(FT_HANDLE ft_handle) {
+ ft_handle->ft->blackhole = true;
+}
+
+struct garbage_helper_extra {
+ FT ft;
+ size_t total_space;
+ size_t used_space;
+};
+
+static int
+garbage_leafentry_helper(const void* key UU(), const uint32_t keylen, const LEAFENTRY & le, uint32_t UU(idx), struct garbage_helper_extra * const info) {
+ //TODO #warning need to reanalyze for split
+ info->total_space += leafentry_disksize(le) + keylen + sizeof(keylen);
+ if (!le_latest_is_del(le)) {
+ info->used_space += LE_CLEAN_MEMSIZE(le_latest_vallen(le)) + keylen + sizeof(keylen);
+ }
+ return 0;
+}
+
+static int
+garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *extra) {
+ struct garbage_helper_extra *CAST_FROM_VOIDP(info, extra);
+ FTNODE node;
+ FTNODE_DISK_DATA ndd;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(info->ft);
+ int fd = toku_cachefile_get_fd(info->ft->cf);
+ int r = toku_deserialize_ftnode_from(fd, blocknum, 0, &node, &ndd, &bfe);
+ if (r != 0) {
+ goto no_node;
+ }
+ if (node->height > 0) {
+ goto exit;
+ }
+ for (int i = 0; i < node->n_children; ++i) {
+ bn_data* bd = BLB_DATA(node, i);
+ r = bd->iterate<struct garbage_helper_extra, garbage_leafentry_helper>(info);
+ if (r != 0) {
+ goto exit;
+ }
+ }
+ {
+ float a = info->used_space, b=info->total_space;
+ float percentage = (1 - (a / b)) * 100;
+ printf("LeafNode# %d has %d BasementNodes and %2.1f%% of the allocated space is garbage\n", (int)blocknum.b, node->n_children, percentage);
+ }
+exit:
+ toku_ftnode_free(&node);
+ toku_free(ndd);
+no_node:
+ return r;
+}
+
+void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space) {
+// Effect: Iterates the FT's blocktable and calculates the total and used space for leaf blocks.
+// Note: It is ok to call this function concurrently with reads/writes to the table since
+// the blocktable lock is held, which means no new allocations or file writes can occur.
+ invariant_notnull(total_space);
+ invariant_notnull(used_space);
+ struct garbage_helper_extra info = {
+ .ft = ft,
+ .total_space = 0,
+ .used_space = 0
+ };
+ ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED, garbage_helper, &info, true, true);
+ *total_space = info.total_space;
+ *used_space = info.used_space;
+}
+
+
+#if !defined(TOKUDB_REVISION)
+#error
+#endif
+
+#define xstr(X) str(X)
+#define str(X) #X
+#define static_version_string xstr(DB_VERSION_MAJOR) "." \
+ xstr(DB_VERSION_MINOR) "." \
+ xstr(DB_VERSION_PATCH) " build " \
+ xstr(TOKUDB_REVISION)
+struct toku_product_name_strings_struct toku_product_name_strings;
+
+char toku_product_name[TOKU_MAX_PRODUCT_NAME_LENGTH];
+void tokuft_update_product_name_strings(void) {
+ // DO ALL STRINGS HERE.. maybe have a separate FT layer version as well
+ {
+ int n = snprintf(toku_product_name_strings.db_version,
+ sizeof(toku_product_name_strings.db_version),
+ "%s %s", toku_product_name, static_version_string);
+ assert(n >= 0);
+ assert((unsigned)n < sizeof(toku_product_name_strings.db_version));
+ }
+ {
+ int n = snprintf(toku_product_name_strings.fileopsdirectory,
+ sizeof(toku_product_name_strings.fileopsdirectory),
+ "%s.directory", toku_product_name);
+ assert(n >= 0);
+ assert((unsigned)n < sizeof(toku_product_name_strings.fileopsdirectory));
+ }
+ {
+ int n = snprintf(toku_product_name_strings.environmentdictionary,
+ sizeof(toku_product_name_strings.environmentdictionary),
+ "%s.environment", toku_product_name);
+ assert(n >= 0);
+ assert((unsigned)n < sizeof(toku_product_name_strings.environmentdictionary));
+ }
+ {
+ int n = snprintf(toku_product_name_strings.rollback_cachefile,
+ sizeof(toku_product_name_strings.rollback_cachefile),
+ "%s.rollback", toku_product_name);
+ assert(n >= 0);
+ assert((unsigned)n < sizeof(toku_product_name_strings.rollback_cachefile));
+ }
+ {
+ int n = snprintf(toku_product_name_strings.single_process_lock,
+ sizeof(toku_product_name_strings.single_process_lock),
+ "__%s_lock_dont_delete_me", toku_product_name);
+ assert(n >= 0);
+ assert((unsigned)n < sizeof(toku_product_name_strings.single_process_lock));
+ }
+}
+#undef xstr
+#undef str
+
+int
+toku_single_process_lock(const char *lock_dir, const char *which, int *lockfd) {
+ if (!lock_dir)
+ return ENOENT;
+ int namelen=strlen(lock_dir)+strlen(which);
+ char lockfname[namelen+sizeof("/_") + strlen(toku_product_name_strings.single_process_lock)];
+
+ int l = snprintf(lockfname, sizeof(lockfname), "%s/%s_%s",
+ lock_dir, toku_product_name_strings.single_process_lock, which);
+ assert(l+1 == (signed)(sizeof(lockfname)));
+ *lockfd = toku_os_lock_file(lockfname);
+ if (*lockfd < 0) {
+ int e = get_error_errno();
+ fprintf(stderr, "Couldn't start tokuft because some other tokuft process is using the same directory [%s] for [%s]\n", lock_dir, which);
+ return e;
+ }
+ return 0;
+}
+
+int
+toku_single_process_unlock(int *lockfd) {
+ int fd = *lockfd;
+ *lockfd = -1;
+ if (fd>=0) {
+ int r = toku_os_unlock_file(fd);
+ if (r != 0)
+ return get_error_errno();
+ }
+ return 0;
+}
+
+int tokuft_num_envs = 0;
+int
+db_env_set_toku_product_name(const char *name) {
+ if (tokuft_num_envs > 0) {
+ return EINVAL;
+ }
+ if (!name || strlen(name) < 1) {
+ return EINVAL;
+ }
+ if (strlen(name) >= sizeof(toku_product_name)) {
+ return ENAMETOOLONG;
+ }
+ if (strncmp(toku_product_name, name, sizeof(toku_product_name))) {
+ strcpy(toku_product_name, name);
+ tokuft_update_product_name_strings();
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/ft.h b/storage/tokudb/PerconaFT/ft/ft.h
new file mode 100644
index 00000000..5c6caead
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@ -0,0 +1,195 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/ft-ops.h"
+#include "ft/logger/log.h"
+#include "util/dbt.h"
+#ifndef TOKU_MYSQL_WITH_PFS
+#include <my_global.h>
+#endif
+
+typedef struct ft *FT;
+typedef struct ft_options *FT_OPTIONS;
+
+// unlink a ft from the filesystem with or without a txn.
+// if with a txn, then the unlink happens on commit.
+void toku_ft_unlink(FT_HANDLE handle);
+void toku_ft_unlink_on_commit(FT_HANDLE handle, TOKUTXN txn);
+
+int toku_ft_rename_iname(DB_TXN *txn,
+ const char *data_dir,
+ const char *old_iname,
+ const char *new_iname,
+ CACHETABLE ct);
+
+void toku_ft_init_reflock(FT ft);
+void toku_ft_destroy_reflock(FT ft);
+void toku_ft_grab_reflock(FT ft);
+void toku_ft_release_reflock(FT ft);
+
+void toku_ft_lock(struct ft *ft);
+void toku_ft_unlock(struct ft *ft);
+
+void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn);
+void toku_ft_free (FT ft);
+
+int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_h, CACHEFILE cf, LSN max_acceptable_lsn, FT *header);
+void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live);
+
+bool toku_ft_needed_unlocked(FT ft);
+bool toku_ft_has_one_reference_unlocked(FT ft);
+
+// evict a ft from memory by closing its cachefile. any future work
+// will have to read in the ft in a new cachefile and new FT object.
+void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn);
+
+FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft);
+
+void toku_ft_note_hot_begin(FT_HANDLE ft_h);
+void toku_ft_note_hot_complete(FT_HANDLE ft_h, bool success, MSN msn_at_start_of_hot);
+
+void
+toku_ft_init(
+ FT ft,
+ BLOCKNUM root_blocknum_on_disk,
+ LSN checkpoint_lsn,
+ TXNID root_xid_that_created,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method compression_method,
+ uint32_t fanout
+ );
+
+int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result));
+int toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft, TOKUTXN txn);
+void toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created);
+// Reset the root_xid_that_created field to the given value.
+// This redefines which xid created the dictionary.
+
+void toku_ft_add_txn_ref(FT ft);
+void toku_ft_remove_txn_ref(FT ft);
+
+void toku_calculate_root_offset_pointer (FT ft, CACHEKEY* root_key, uint32_t *roothash);
+void toku_ft_set_new_root_blocknum(FT ft, CACHEKEY new_root_key);
+LSN toku_ft_checkpoint_lsn(FT ft) __attribute__ ((warn_unused_result));
+void toku_ft_stat64 (FT ft, struct ftstat64_s *s);
+void toku_ft_get_fractal_tree_info64 (FT ft, struct ftinfo64 *s);
+int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra);
+
+// unconditionally set the descriptor for an open FT. can't do this when
+// any operation has already occurred on the ft.
+// see toku_ft_change_descriptor(), which is the transactional version
+// used by the ydb layer. it better describes the client contract.
+void toku_ft_update_descriptor(FT ft, DESCRIPTOR desc);
+// use this version if the FT is not fully user-opened with a valid cachefile.
+// this is a clean hack to get deserialization code to update a descriptor
+// while the FT and cf are in the process of opening, for upgrade purposes
+void toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR desc, int fd);
+void toku_ft_update_cmp_descriptor(FT ft);
+
+// get the descriptor for a ft. safe to read as long as clients honor the
+// strict contract put forth by toku_ft_update_descriptor/toku_ft_change_descriptor
+// essentially, there should never be a reader while there is a writer, enforced
+// by the client, not the FT.
+DESCRIPTOR toku_ft_get_descriptor(FT_HANDLE ft_handle);
+DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle);
+
+typedef struct {
+ // delta versions in basements could be negative
+ // These represent the physical leaf entries and do not account
+ // for pending deletes or other in-flight messages that have not been
+ // applied to a leaf entry.
+ int64_t numrows;
+ int64_t numbytes;
+} STAT64INFO_S, *STAT64INFO;
+static const STAT64INFO_S ZEROSTATS = { .numrows = 0, .numbytes = 0 };
+
+void toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta);
+void toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta);
+void toku_ft_adjust_logical_row_count(FT ft, int64_t delta);
+
+typedef void (*remove_ft_ref_callback)(FT ft, void *extra);
+void toku_ft_remove_reference(FT ft,
+ bool oplsn_valid, LSN oplsn,
+ remove_ft_ref_callback remove_ref, void *extra);
+
+void toku_ft_set_nodesize(FT ft, unsigned int nodesize);
+void toku_ft_get_nodesize(FT ft, unsigned int *nodesize);
+void toku_ft_set_basementnodesize(FT ft, unsigned int basementnodesize);
+void toku_ft_get_basementnodesize(FT ft, unsigned int *basementnodesize);
+void toku_ft_set_compression_method(FT ft, enum toku_compression_method method);
+void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp);
+void toku_ft_set_fanout(FT ft, unsigned int fanout);
+void toku_ft_get_fanout(FT ft, unsigned int *fanout);
+
+// mark the ft as a blackhole. any message injections will be a no op.
+void toku_ft_set_blackhole(FT_HANDLE ft_handle);
+
+// Effect: Calculates the total space and used space for a FT's leaf data.
+// The difference between the two is MVCC garbage.
+void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space);
+
+// TODO: Should be in portability
+int get_num_cores(void);
+
+// TODO: Use the cachetable's worker pool instead of something managed by the FT...
+struct toku_thread_pool *get_ft_pool(void);
+
+// TODO: Should be in portability
+int toku_single_process_lock(const char *lock_dir, const char *which, int *lockfd);
+int toku_single_process_unlock(int *lockfd);
+
+void tokuft_update_product_name_strings(void);
+#define TOKU_MAX_PRODUCT_NAME_LENGTH (256)
+extern char toku_product_name[TOKU_MAX_PRODUCT_NAME_LENGTH];
+
+struct toku_product_name_strings_struct {
+ char db_version[sizeof(toku_product_name) + sizeof("1.2.3 build ") + 256 + 1];
+ char environmentdictionary[sizeof(toku_product_name) + sizeof(".environment") + 1];
+ char fileopsdirectory[sizeof(toku_product_name) + sizeof(".directory") + 1];
+ char single_process_lock[sizeof(toku_product_name) + sizeof("___lock_dont_delete_me") + 1];
+ char rollback_cachefile[sizeof(toku_product_name) + sizeof(".rollback") + 1];
+};
+
+extern struct toku_product_name_strings_struct toku_product_name_strings;
+extern int tokuft_num_envs;
diff --git a/storage/tokudb/PerconaFT/ft/le-cursor.cc b/storage/tokudb/PerconaFT/ft/le-cursor.cc
new file mode 100644
index 00000000..b90d48dc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/le-cursor.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/le-cursor.h"
+#include "ft/cursor.h"
+
+// A LE_CURSOR is a special purpose FT_CURSOR that:
+// - enables prefetching
+// - does not perform snapshot reads. it reads everything, including uncommitted.
+//
+// A LE_CURSOR is good for scanning a FT from beginning to end. Useful for hot indexing.
+
+struct le_cursor {
+ FT_CURSOR ft_cursor;
+ bool neg_infinity; // true when the le cursor is positioned at -infinity (initial setting)
+ bool pos_infinity; // true when the le cursor is positioned at +infinity (when _next returns DB_NOTFOUND)
+};
+
+int
+toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE ft_handle, TOKUTXN txn) {
+ int result = 0;
+ LE_CURSOR MALLOC(le_cursor);
+ if (le_cursor == NULL) {
+ result = get_error_errno();
+ }
+ else {
+ result = toku_ft_cursor(ft_handle, &le_cursor->ft_cursor, txn, false, false);
+ if (result == 0) {
+ // TODO move the leaf mode to the ft cursor constructor
+ toku_ft_cursor_set_leaf_mode(le_cursor->ft_cursor);
+ le_cursor->neg_infinity = false;
+ le_cursor->pos_infinity = true;
+ }
+ }
+
+ if (result == 0) {
+ *le_cursor_result = le_cursor;
+ } else {
+ toku_free(le_cursor);
+ }
+
+ return result;
+}
+
+void toku_le_cursor_close(LE_CURSOR le_cursor) {
+ toku_ft_cursor_close(le_cursor->ft_cursor);
+ toku_free(le_cursor);
+}
+
+// Move to the next leaf entry under the LE_CURSOR
+// Success: returns zero, calls the getf callback with the getf_v parameter
+// Failure: returns a non-zero error number
+int
+toku_le_cursor_next(LE_CURSOR le_cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) {
+ int result;
+ if (le_cursor->neg_infinity) {
+ result = DB_NOTFOUND;
+ } else {
+ le_cursor->pos_infinity = false;
+ // TODO replace this with a non deprecated function. Which?
+ result = toku_ft_cursor_get(le_cursor->ft_cursor, NULL, getf, getf_v, DB_PREV);
+ if (result == DB_NOTFOUND) {
+ le_cursor->neg_infinity = true;
+ }
+ }
+ return result;
+}
+
+bool
+toku_le_cursor_is_key_greater_or_equal(LE_CURSOR le_cursor, const DBT *key) {
+ bool result;
+ if (le_cursor->neg_infinity) {
+ result = true; // all keys are greater than -infinity
+ } else if (le_cursor->pos_infinity) {
+ result = false; // all keys are less than +infinity
+ } else {
+ FT ft = le_cursor->ft_cursor->ft_handle->ft;
+ // get the current position from the cursor and compare it to the given key.
+ int r = ft->cmp(&le_cursor->ft_cursor->key, key);
+ if (r <= 0) {
+ result = true; // key is right of the cursor key
+ } else {
+ result = false; // key is at or left of the cursor key
+ }
+ }
+ return result;
+}
+
+void
+toku_le_cursor_update_estimate(LE_CURSOR le_cursor, DBT* estimate) {
+ // don't handle these edge cases, not worth it.
+ // estimate stays same
+ if (le_cursor->pos_infinity || le_cursor->neg_infinity) {
+ return;
+ }
+ DBT *cursor_key = &le_cursor->ft_cursor->key;
+ estimate->data = toku_xrealloc(estimate->data, cursor_key->size);
+ memcpy(estimate->data, cursor_key->data, cursor_key->size);
+ estimate->size = cursor_key->size;
+ estimate->flags = DB_DBT_REALLOC;
+}
diff --git a/storage/tokudb/PerconaFT/ft/le-cursor.h b/storage/tokudb/PerconaFT/ft/le-cursor.h
new file mode 100644
index 00000000..ffd891e4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/le-cursor.h
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/ft-internal.h"
+
+// A leaf entry cursor (LE_CURSOR) is a special type of FT_CURSOR that visits all of the leaf entries in a tree
+// and returns the leaf entry to the caller. It maintains a copy of the key that it was last positioned over to
+// speed up key comparisions with a given key. For example, the hot indexing could use the _key_right_of_cursor
+// function to determine where a given key sits relative to the LE_CURSOR position.
+
+// When _next and _key_right_of_cursor functions are run on multiple threads, they must be protected by a lock. This
+// lock is assumed to exist outside of the LE_CURSOR.
+
+typedef struct le_cursor *LE_CURSOR;
+
+// Create a leaf cursor for a tree (ft_h) within a transaction (txn)
+// Success: returns 0, stores the LE_CURSOR in the le_cursor_result
+// Failure: returns a non-zero error number
+int toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE ft_h, TOKUTXN txn);
+
+// Close and free the LE_CURSOR
+void toku_le_cursor_close(LE_CURSOR le_cursor);
+
+// Move to the next leaf entry under the LE_CURSOR
+// Success: returns zero, calls the getf callback with the getf_v parameter
+// Failure: returns a non-zero error number
+int toku_le_cursor_next(LE_CURSOR le_cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v);
+
+// Return true if the key is to the right of the LE_CURSOR position. that is, current cursor key < given key
+// Otherwise returns false when the key is at or to the left of the LE_CURSOR position. that is, current cursor key >= given key
+// The LE_CURSOR position is intialized to -infinity. Any key comparision with -infinity returns true.
+// When the cursor runs off the right edge of the tree, the LE_CURSOR position is set to +infinity. Any key comparision with +infinity
+// returns false.
+bool toku_le_cursor_is_key_greater_or_equal(LE_CURSOR le_cursor, const DBT *key);
+
+// extracts position of le_cursor into estimate. Responsibility of caller to handle
+// thread safety. Caller (the indexer), does so by ensuring indexer lock is held
+void toku_le_cursor_update_estimate(LE_CURSOR le_cursor, DBT* estimate);
diff --git a/storage/tokudb/PerconaFT/ft/leafentry.cc b/storage/tokudb/PerconaFT/ft/leafentry.cc
new file mode 100644
index 00000000..fcb9a344
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/leafentry.cc
@@ -0,0 +1,45 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "serialize/wbuf.h"
+#include "leafentry.h"
+
+void wbuf_nocrc_LEAFENTRY(struct wbuf *w, LEAFENTRY le) {
+ wbuf_nocrc_literal_bytes(w, le, leafentry_disksize(le));
+}
diff --git a/storage/tokudb/PerconaFT/ft/leafentry.h b/storage/tokudb/PerconaFT/ft/leafentry.h
new file mode 100644
index 00000000..7274a148
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/leafentry.h
@@ -0,0 +1,236 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_portability.h>
+
+#include <util/mempool.h>
+#include <util/omt.h>
+
+#include "ft/txn/txn_manager.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/msg.h"
+
+/*
+ Memory format of packed leaf entry
+ CONSTANTS:
+ num_uxrs
+ keylen
+ Run-time-constants
+ voffset of val/vallen??? (for le_any_val) This must be small if it is interpreted as voffset = realoffset_of_val - keylen
+ GOOD performance optimization.
+ ALSO good for simplicity (no having to scan packed version)
+ key[]
+ variable length
+
+
+ Memory format of packed dup leaf entry
+ CONSTANTS:
+ num_uxrs
+ keylen
+ vallen
+ Run-time-constants
+ key[]
+ val[]
+*/
+
+enum cursor_read_type {
+ C_READ_ANY = 0,
+ C_READ_SNAPSHOT = 1,
+ C_READ_COMMITTED = 2
+};
+
+//
+// enum of possible values for LEAFENTRY->type field
+// LE_CLEAN means that there is a single committed value in a format that saves disk space
+// LE_MVCC means that there may be multiple committed values or there are provisional values
+//
+enum { LE_CLEAN = 0, LE_MVCC = 1 };
+
+// This is an on-disk format. static_asserts verify everything is packed and aligned correctly.
+struct leafentry {
+ struct leafentry_clean {
+ uint32_t vallen;
+ uint8_t val[0]; //actual val
+ }; // For the case where LEAFENTRY->type is LE_CLEAN
+ static_assert(4 == sizeof(leafentry::leafentry_clean), "leafentry_clean size is wrong");
+ static_assert(4 == __builtin_offsetof(leafentry::leafentry_clean, val), "val is in the wrong place");
+ struct __attribute__ ((__packed__)) leafentry_mvcc {
+ uint32_t num_cxrs; // number of committed transaction records
+ uint8_t num_pxrs; // number of provisional transaction records
+ uint8_t xrs[0]; //then TXNIDs of XRs relevant for reads:
+ // if provisional XRs exist, store OUTERMOST TXNID
+ // store committed TXNIDs, from most recently committed to least recently committed (newest first)
+ //then lengths of XRs relevant for reads (length is at most 1<<31, MSB is 1 for insert, 0 for delete):
+ // if provisional XRs exist (num_pxrs>0), store length and insert/delete flag associated with INNERMOST TXNID
+ // store length and insert/delete flag associated with each committed TXNID, in same order as above (newest first)
+ //then data of XRs relevant for reads
+ // if provisional XRs exist (num_pxrs>0), store data associated with INNERMOST provisional TXNID
+ // store data associated with committed TXNIDs (all committed data, newest committed values first)
+ //if provisional XRs still exist (that is, num_puxrs > 1, so INNERMOST provisional TXNID != OUTERMOST provisional TXNID):
+ // for OUTERMOST provisional XR:
+ // 1 byte: store type (insert/delete/placeholder)
+ // 4 bytes: length (if type is INSERT, no length stored if placeholder or delete)
+ // data
+ // for rest of provisional stack (if num_pxrs > 2), from second-outermost to second-innermost (outermost is stored above, innermost is stored separately):
+ // 8 bytes: TXNID
+ // 1 byte: store type (insert/delete/placeholder)
+ // 4 bytes: length (if type is INSERT)
+ // data
+ // for INNERMOST provisional XR:
+ // 8 bytes: TXNID
+ // (innermost data and length with insert/delete flag are stored above, cannot be a placeholder)
+ }; // For the case where LEAFENTRY->type is LE_MVCC
+ static_assert(5 == sizeof(leafentry::leafentry_mvcc), "leafentry_mvcc size is wrong");
+ static_assert(5 == __builtin_offsetof(leafentry::leafentry_mvcc, xrs), "xrs is in the wrong place");
+
+ uint8_t type; // type is LE_CLEAN or LE_MVCC
+ //uint32_t keylen;
+ union __attribute__ ((__packed__)) {
+ struct leafentry_clean clean;
+ struct leafentry_mvcc mvcc;
+ } u;
+};
+static_assert(6 == sizeof(leafentry), "leafentry size is wrong");
+static_assert(1 == __builtin_offsetof(leafentry, u), "union is in the wrong place");
+
+#define LE_CLEAN_MEMSIZE(_vallen) \
+ (sizeof(((LEAFENTRY)NULL)->type) /* type */ \
+ +sizeof(((LEAFENTRY)NULL)->u.clean.vallen) /* vallen */ \
+ +(_vallen)) /* actual val */
+
+#define LE_MVCC_COMMITTED_HEADER_MEMSIZE \
+ (sizeof(((LEAFENTRY)NULL)->type) /* type */ \
+ +sizeof(((LEAFENTRY)NULL)->u.mvcc.num_cxrs) /* committed */ \
+ +sizeof(((LEAFENTRY)NULL)->u.mvcc.num_pxrs) /* provisional */ \
+ +sizeof(TXNID) /* transaction */ \
+ +sizeof(uint32_t) /* length+bit */ \
+ +sizeof(uint32_t)) /* length+bit */
+
+#define LE_MVCC_COMMITTED_MEMSIZE(_vallen) \
+ (LE_MVCC_COMMITTED_HEADER_MEMSIZE \
+ +(_vallen)) /* actual val */
+
+
+typedef struct leafentry *LEAFENTRY;
+typedef struct leafentry_13 *LEAFENTRY_13;
+
+//
+// TODO: consistency among names is very poor.
+//
+
+// TODO: rename this helper function for deserialization
+size_t leafentry_rest_memsize(uint32_t num_puxrs, uint32_t num_cuxrs, uint8_t* start);
+size_t leafentry_memsize (LEAFENTRY le); // the size of a leafentry in memory.
+size_t leafentry_disksize (LEAFENTRY le); // this is the same as logsizeof_LEAFENTRY. The size of a leafentry on disk.
+void wbuf_nocrc_LEAFENTRY(struct wbuf *w, LEAFENTRY le);
+int print_klpair (FILE *outf, const void* key, uint32_t keylen, LEAFENTRY v); // Print a leafentry out in human-readable form.
+
+int le_latest_is_del(LEAFENTRY le); // Return true if it is a provisional delete.
+int le_val_is_del(LEAFENTRY le, enum cursor_read_type read_type, TOKUTXN txn); // Returns true if the value that is to be read is empty
+bool le_is_clean(LEAFENTRY le); //Return how many xids exist (0 does not count)
+bool le_has_xids(LEAFENTRY le, XIDS xids); // Return true transaction represented by xids is still provisional in this leafentry (le's xid stack is a superset or equal to xids)
+void* le_latest_val (LEAFENTRY le); // Return the latest val (return NULL for provisional deletes)
+uint32_t le_latest_vallen (LEAFENTRY le); // Return the latest vallen. Returns 0 for provisional deletes.
+void* le_latest_val_and_len (LEAFENTRY le, uint32_t *len);
+
+uint64_t le_outermost_uncommitted_xid (LEAFENTRY le);
+
+//Callback contract:
+// Function checks to see if id is accepted by context.
+// Returns:
+// 0: context ignores this entry, id.
+// TOKUDB_ACCEPT: context accepts id
+// r|r!=0&&r!=TOKUDB_ACCEPT: Quit early, return r, because something unexpected went wrong (error case)
+typedef int(*LE_ITERATE_CALLBACK)(TXNID id, TOKUTXN context, bool is_provisional);
+
+int le_iterate_val(
+ LEAFENTRY le,
+ LE_ITERATE_CALLBACK f,
+ void** valpp,
+ uint32_t* vallenp,
+ TOKUTXN context);
+
+void le_extract_val(
+ LEAFENTRY le,
+ // should we return the entire leafentry as the val?
+ bool is_leaf_mode,
+ enum cursor_read_type read_type,
+ TOKUTXN ttxn,
+ uint32_t* vallen,
+ void** val);
+
+size_t leafentry_disksize_13(LEAFENTRY_13 le);
+
+int toku_le_upgrade_13_14(
+ // NULL if there was no stored data.
+ LEAFENTRY_13 old_leafentry,
+ void** keyp,
+ uint32_t* keylen,
+ size_t* new_leafentry_memorysize,
+ LEAFENTRY *new_leafentry_p);
+
+class bn_data;
+
+int64_t toku_le_apply_msg(
+ const ft_msg &msg,
+ // NULL if there was no stored data.
+ LEAFENTRY old_leafentry,
+ // bn_data storing leafentry, if NULL, means there is no bn_data
+ bn_data* data_buffer,
+ // index in data_buffer where leafentry is stored (and should be replaced
+ uint32_t idx,
+ uint32_t old_keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY *new_leafentry_p,
+ int64_t* numbytes_delta_p);
+
+bool toku_le_worth_running_garbage_collection(
+ LEAFENTRY le,
+ txn_gc_info* gc_info);
+
+void toku_le_garbage_collect(
+ LEAFENTRY old_leaf_entry,
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY* new_leaf_entry,
+ int64_t* numbytes_delta_p);
diff --git a/storage/tokudb/PerconaFT/ft/loader/callbacks.cc b/storage/tokudb/PerconaFT/ft/loader/callbacks.cc
new file mode 100644
index 00000000..ac69fb7e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/callbacks.cc
@@ -0,0 +1,148 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <toku_pthread.h>
+#include <errno.h>
+#include <string.h>
+
+#include "loader/loader-internal.h"
+#include "util/dbt.h"
+
+toku_instr_key *loader_error_mutex_key;
+
+static void error_callback_lock(ft_loader_error_callback loader_error) {
+ toku_mutex_lock(&loader_error->mutex);
+}
+
+static void error_callback_unlock(ft_loader_error_callback loader_error) {
+ toku_mutex_unlock(&loader_error->mutex);
+}
+
+void ft_loader_init_error_callback(ft_loader_error_callback loader_error) {
+ memset(loader_error, 0, sizeof *loader_error);
+ toku_init_dbt(&loader_error->key);
+ toku_init_dbt(&loader_error->val);
+ toku_mutex_init(*loader_error_mutex_key, &loader_error->mutex, nullptr);
+}
+
+void ft_loader_destroy_error_callback(ft_loader_error_callback loader_error) {
+ toku_mutex_destroy(&loader_error->mutex);
+ toku_destroy_dbt(&loader_error->key);
+ toku_destroy_dbt(&loader_error->val);
+ memset(loader_error, 0, sizeof *loader_error);
+}
+
+int ft_loader_get_error(ft_loader_error_callback loader_error) {
+ error_callback_lock(loader_error);
+ int r = loader_error->error;
+ error_callback_unlock(loader_error);
+ return r;
+}
+
+void ft_loader_set_error_function(ft_loader_error_callback loader_error, ft_loader_error_func error_function, void *error_extra) {
+ loader_error->error_callback = error_function;
+ loader_error->extra = error_extra;
+}
+
+int ft_loader_set_error(ft_loader_error_callback loader_error, int error, DB *db, int which_db, DBT *key, DBT *val) {
+ int r;
+ error_callback_lock(loader_error);
+ if (loader_error->error) { // there can be only one
+ r = EEXIST;
+ } else {
+ r = 0;
+ loader_error->error = error; // set the error
+ loader_error->db = db;
+ loader_error->which_db = which_db;
+ if (key != nullptr) {
+ toku_clone_dbt(&loader_error->key, *key);
+ }
+ if (val != nullptr) {
+ toku_clone_dbt(&loader_error->val, *val);
+ }
+ }
+ error_callback_unlock(loader_error);
+ return r;
+}
+
+int ft_loader_call_error_function(ft_loader_error_callback loader_error) {
+ int r;
+ error_callback_lock(loader_error);
+ r = loader_error->error;
+ if (r && loader_error->error_callback && !loader_error->did_callback) {
+ loader_error->did_callback = true;
+ loader_error->error_callback(loader_error->db,
+ loader_error->which_db,
+ loader_error->error,
+ &loader_error->key,
+ &loader_error->val,
+ loader_error->extra);
+ }
+ error_callback_unlock(loader_error);
+ return r;
+}
+
+int ft_loader_set_error_and_callback(ft_loader_error_callback loader_error, int error, DB *db, int which_db, DBT *key, DBT *val) {
+ int r = ft_loader_set_error(loader_error, error, db, which_db, key, val);
+ if (r == 0)
+ r = ft_loader_call_error_function(loader_error);
+ return r;
+}
+
+int ft_loader_init_poll_callback(ft_loader_poll_callback p) {
+ memset(p, 0, sizeof *p);
+ return 0;
+}
+
+void ft_loader_destroy_poll_callback(ft_loader_poll_callback p) {
+ memset(p, 0, sizeof *p);
+}
+
+void ft_loader_set_poll_function(ft_loader_poll_callback p, ft_loader_poll_func poll_function, void *poll_extra) {
+ p->poll_function = poll_function;
+ p->poll_extra = poll_extra;
+}
+
+int ft_loader_call_poll_function(ft_loader_poll_callback p, float progress) {
+ int r = 0;
+ if (p->poll_function)
+ r = p->poll_function(p->poll_extra, progress);
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/loader/dbufio.cc b/storage/tokudb/PerconaFT/ft/loader/dbufio.cc
new file mode 100644
index 00000000..90f76cec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/dbufio.cc
@@ -0,0 +1,598 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "portability/toku_assert.h"
+#include "portability/memory.h"
+
+#include "ft/ft-internal.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "loader/dbufio.h"
+#include "loader/loader-internal.h"
+
+toku_instr_key *bfs_mutex_key;
+toku_instr_key *bfs_cond_key;
+toku_instr_key *io_thread_key;
+
+struct dbufio_file {
+ // i/o thread owns these
+ int fd;
+
+ // consumers own these
+ size_t offset_in_buf;
+ toku_off_t offset_in_uncompressed_file;
+
+ // need the mutex to modify these
+ struct dbufio_file *next;
+ bool second_buf_ready; // if true, the i/o thread is not touching anything.
+
+ // consumers own [0], i/o thread owns [1], they are swapped by the consumer only when the condition mutex is held and second_buf_ready is true.
+ char *buf[2];
+ size_t n_in_buf[2];
+ int error_code[2]; // includes errno or eof. [0] is the error code associated with buf[0], [1] is the code for buf[1]
+
+ bool io_done;
+};
+
+
+/* A dbufio_fileset */
+struct dbufio_fileset {
+ // The mutex/condition variables protect
+ // the singly-linked list of files that need I/O (head/tail in the fileset, and next in each file)
+ // in each file:
+ // the second_buf_ready boolean (which says the second buffer is full of data).
+ // the swapping of the buf[], n_in_buf[], and error_code[] values.
+ toku_mutex_t mutex;
+ toku_cond_t cond;
+ int N; // How many files. This is constant once established.
+ int n_not_done; // how many of the files require more I/O? Owned by the i/o thread.
+ struct dbufio_file *files; // an array of length N.
+ struct dbufio_file *head, *tail; // must have the mutex to fiddle with these.
+ size_t bufsize; // the bufsize is the constant (the same for all buffers).
+
+ bool panic;
+ bool compressed;
+ int panic_errno;
+ toku_pthread_t iothread;
+};
+
+
+static void enq (DBUFIO_FILESET bfs, struct dbufio_file *f) {
+ if (bfs->tail==NULL) {
+ bfs->head = f;
+ } else {
+ bfs->tail->next = f;
+ }
+ bfs->tail = f;
+ f->next = NULL;
+}
+
+static void panic (DBUFIO_FILESET bfs, int r) {
+ if (bfs->panic) return;
+ bfs->panic_errno = r; // Don't really care about a race on this variable... Writes to it are atomic, so at least one good panic reason will be stored.
+ bfs->panic = true;
+ return;
+}
+
+static bool paniced (DBUFIO_FILESET bfs) {
+ return bfs->panic;
+}
+
+static ssize_t dbf_read_some_compressed(struct dbufio_file *dbf, char *buf, size_t bufsize) {
+ ssize_t ret;
+ invariant(bufsize >= MAX_UNCOMPRESSED_BUF);
+ unsigned char *raw_block = NULL;
+
+ // deserialize the sub block header
+
+ // total_size
+ // num_sub_blocks
+ // compressed_size,uncompressed_size,xsum (repeated num_sub_blocks times)
+ ssize_t readcode;
+ const uint32_t header_size = sizeof(uint32_t);
+ char header[header_size];
+
+ readcode = toku_os_read(dbf->fd, &header, header_size);
+ if (readcode < 0) {
+ ret = -1;
+ goto exit;
+ }
+ if (readcode == 0) {
+ ret = 0;
+ goto exit;
+ }
+ if (readcode < (ssize_t) header_size) {
+ errno = TOKUDB_NO_DATA;
+ ret = -1;
+ goto exit;
+ }
+ uint32_t total_size;
+ {
+ uint32_t *p = (uint32_t *) &header[0];
+ total_size = toku_dtoh32(p[0]);
+ }
+ if (total_size == 0 || total_size > (1<<30)) {
+ errno = toku_db_badformat();
+ ret = -1;
+ goto exit;
+ }
+
+ //Cannot use XMALLOC
+ MALLOC_N(total_size, raw_block);
+ if (raw_block == nullptr) {
+ errno = ENOMEM;
+ ret = -1;
+ goto exit;
+ }
+ readcode = toku_os_read(dbf->fd, raw_block, total_size);
+ if (readcode < 0) {
+ ret = -1;
+ goto exit;
+ }
+ if (readcode < (ssize_t) total_size) {
+ errno = TOKUDB_NO_DATA;
+ ret = -1;
+ goto exit;
+ }
+
+ struct sub_block sub_block[max_sub_blocks];
+ uint32_t *sub_block_header;
+ sub_block_header = (uint32_t *) &raw_block[0];
+ int32_t n_sub_blocks;
+ n_sub_blocks = toku_dtoh32(sub_block_header[0]);
+ sub_block_header++;
+ size_t size_subblock_header;
+ size_subblock_header = sub_block_header_size(n_sub_blocks);
+ if (n_sub_blocks == 0 || n_sub_blocks > max_sub_blocks || size_subblock_header > total_size) {
+ errno = toku_db_badformat();
+ ret = -1;
+ goto exit;
+ }
+ for (int i = 0; i < n_sub_blocks; i++) {
+ sub_block_init(&sub_block[i]);
+ sub_block[i].compressed_size = toku_dtoh32(sub_block_header[0]);
+ sub_block[i].uncompressed_size = toku_dtoh32(sub_block_header[1]);
+ sub_block[i].xsum = toku_dtoh32(sub_block_header[2]);
+ sub_block_header += 3;
+ }
+
+ // verify sub block sizes
+ size_t total_compressed_size;
+ total_compressed_size = 0;
+ for (int i = 0; i < n_sub_blocks; i++) {
+ uint32_t compressed_size = sub_block[i].compressed_size;
+ if (compressed_size<=0 || compressed_size>(1<<30)) {
+ errno = toku_db_badformat();
+ ret = -1;
+ goto exit;
+ }
+
+ uint32_t uncompressed_size = sub_block[i].uncompressed_size;
+ if (uncompressed_size<=0 || uncompressed_size>(1<<30)) {
+ errno = toku_db_badformat();
+ ret = -1;
+ goto exit;
+ }
+ total_compressed_size += compressed_size;
+ }
+ if (total_size != total_compressed_size + size_subblock_header) {
+ errno = toku_db_badformat();
+ ret = -1;
+ goto exit;
+ }
+
+ // sum up the uncompressed size of the sub blocks
+ size_t uncompressed_size;
+ uncompressed_size = get_sum_uncompressed_size(n_sub_blocks, sub_block);
+ if (uncompressed_size > bufsize || uncompressed_size > MAX_UNCOMPRESSED_BUF) {
+ errno = toku_db_badformat();
+ ret = -1;
+ goto exit;
+ }
+
+ unsigned char *uncompressed_data;
+ uncompressed_data = (unsigned char *)buf;
+
+ // point at the start of the compressed data (past the node header, the sub block header, and the header checksum)
+ unsigned char *compressed_data;
+ compressed_data = raw_block + size_subblock_header;
+
+ // decompress all the compressed sub blocks into the uncompressed buffer
+ {
+ int r;
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_block, compressed_data, uncompressed_data, get_num_cores(), get_ft_pool());
+ if (r != 0) {
+ fprintf(stderr, "%s:%d loader failed %d at %p size %" PRIu32"\n", __FUNCTION__, __LINE__, r, raw_block, total_size);
+ dump_bad_block(raw_block, total_size);
+ errno = r;
+ ret = -1;
+ goto exit;
+ }
+ }
+ ret = uncompressed_size;
+exit:
+ if (raw_block) {
+ toku_free(raw_block);
+ }
+ return ret;
+}
+
+static ssize_t dbf_read_compressed(struct dbufio_file *dbf, char *buf, size_t bufsize) {
+ invariant(bufsize >= MAX_UNCOMPRESSED_BUF);
+ size_t count = 0;
+
+ while (count + MAX_UNCOMPRESSED_BUF <= bufsize) {
+ ssize_t readcode = dbf_read_some_compressed(dbf, buf + count, bufsize - count);
+ if (readcode < 0) {
+ return readcode;
+ }
+ count += readcode;
+ if (readcode == 0) {
+ break;
+ }
+ }
+ return count;
+}
+
+static void* io_thread (void *v)
+// The dbuf_thread does all the asynchronous I/O.
+{
+ DBUFIO_FILESET bfs = (DBUFIO_FILESET)v;
+ toku_mutex_lock(&bfs->mutex);
+ //printf("%s:%d Locked\n", __FILE__, __LINE__);
+ while (1) {
+ if (paniced(bfs)) {
+ toku_mutex_unlock(&bfs->mutex); // ignore any error
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+ }
+ // printf("n_not_done=%d\n", bfs->n_not_done);
+ if (bfs->n_not_done == 0) {
+ // all done (meaning we stored EOF (or another error) in
+ // error_code[0] for the file.
+ // printf("unlocked\n");
+ toku_mutex_unlock(&bfs->mutex);
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+ }
+
+ struct dbufio_file *dbf = bfs->head;
+ if (dbf == NULL) {
+ // No I/O needs to be done yet.
+ // Wait until something happens that will wake us up.
+ toku_cond_wait(&bfs->cond, &bfs->mutex);
+ if (paniced(bfs)) {
+ toku_mutex_unlock(&bfs->mutex); // ignore any error
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+ }
+ // Have the lock so go around.
+ } else {
+ // Some I/O needs to be done.
+ // printf("%s:%d Need I/O\n", __FILE__, __LINE__);
+ assert(dbf->second_buf_ready == false);
+ assert(!dbf->io_done);
+ bfs->head = dbf->next;
+ if (bfs->head == NULL)
+ bfs->tail = NULL;
+
+ // Unlock the mutex now that we have ownership of dbf to allow
+ // consumers to get the mutex and perform swaps. They won't swap
+ // this buffer because second_buf_ready is false.
+ toku_mutex_unlock(&bfs->mutex);
+ //printf("%s:%d Doing read fd=%d\n", __FILE__, __LINE__, dbf->fd);
+ {
+ ssize_t readcode;
+ if (bfs->compressed) {
+ readcode = dbf_read_compressed(dbf, dbf->buf[1], bfs->bufsize);
+ }
+ else {
+ readcode = toku_os_read(dbf->fd, dbf->buf[1], bfs->bufsize);
+ }
+ //printf("%s:%d readcode=%ld\n", __FILE__, __LINE__, readcode);
+ if (readcode==-1) {
+ // a real error. Save the real error.
+ int the_errno = get_error_errno();
+ fprintf(stderr, "%s:%d dbf=%p fd=%d errno=%d\n", __FILE__, __LINE__, dbf, dbf->fd, the_errno);
+ dbf->error_code[1] = the_errno;
+ dbf->n_in_buf[1] = 0;
+ } else if (readcode==0) {
+ // End of file. Save it.
+ dbf->error_code[1] = EOF;
+ dbf->n_in_buf[1] = 0;
+ dbf->io_done = true;
+
+ } else {
+ dbf->error_code[1] = 0;
+ dbf->n_in_buf[1] = readcode;
+ }
+
+ //printf("%s:%d locking mutex again=%ld\n", __FILE__, __LINE__, readcode);
+ {
+ toku_mutex_lock(&bfs->mutex);
+ if (paniced(bfs)) {
+ toku_mutex_unlock(&bfs->mutex); // ignore any error
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+ }
+ }
+ // Now that we have the mutex, we can decrement n_not_done (if
+ // applicable) and set second_buf_ready
+ if (readcode<=0) {
+ bfs->n_not_done--;
+ }
+ //printf("%s:%d n_not_done=%d\n", __FILE__, __LINE__, bfs->n_not_done);
+ dbf->second_buf_ready = true;
+ toku_cond_broadcast(&bfs->cond);
+ //printf("%s:%d did broadcast=%d\n", __FILE__, __LINE__, bfs->n_not_done);
+ // Still have the lock so go around the loop
+ }
+ }
+ }
+}
+
+int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t bufsize, bool compressed) {
+ //printf("%s:%d here\n", __FILE__, __LINE__);
+ int result = 0;
+ DBUFIO_FILESET CALLOC(bfs);
+ if (bfs==0) { result = get_error_errno(); }
+
+ bfs->compressed = compressed;
+
+ bool mutex_inited = false, cond_inited = false;
+ if (result==0) {
+ CALLOC_N(N, bfs->files);
+ if (bfs->files==NULL) { result = get_error_errno(); }
+ else {
+ for (int i=0; i<N; i++) {
+ bfs->files[i].buf[0] = bfs->files[i].buf[1] = NULL;
+ }
+ }
+ }
+ // printf("%s:%d here\n", __FILE__, __LINE__);
+ if (result == 0) {
+ toku_mutex_init(*bfs_mutex_key, &bfs->mutex, nullptr);
+ mutex_inited = true;
+ }
+ if (result == 0) {
+ toku_cond_init(*bfs_cond_key, &bfs->cond, nullptr);
+ cond_inited = true;
+ }
+ if (result == 0) {
+ bfs->N = N;
+ bfs->n_not_done = N;
+ bfs->head = bfs->tail = NULL;
+ for (int i = 0; i < N; i++) {
+ bfs->files[i].fd = fds[i];
+ bfs->files[i].offset_in_buf = 0;
+ bfs->files[i].offset_in_uncompressed_file = 0;
+ bfs->files[i].next = NULL;
+ bfs->files[i].second_buf_ready = false;
+ for (int j = 0; j < 2; j++) {
+ if (result == 0) {
+ MALLOC_N(bufsize, bfs->files[i].buf[j]);
+ if (bfs->files[i].buf[j] == NULL) {
+ result = get_error_errno();
+ }
+ }
+ bfs->files[i].n_in_buf[j] = 0;
+ bfs->files[i].error_code[j] = 0;
+ }
+ bfs->files[i].io_done = false;
+ ssize_t r;
+ if (bfs->compressed) {
+ r = dbf_read_compressed(&bfs->files[i], bfs->files[i].buf[0], bufsize);
+ } else {
+ r = toku_os_read(bfs->files[i].fd, bfs->files[i].buf[0], bufsize);
+ }
+ {
+ if (r<0) {
+ result=get_error_errno();
+ break;
+ } else if (r==0) {
+ // it's EOF
+ bfs->files[i].io_done = true;
+ bfs->n_not_done--;
+ bfs->files[i].error_code[0] = EOF;
+ } else {
+ bfs->files[i].n_in_buf[0] = r;
+ //printf("%s:%d enq [%d]\n", __FILE__, __LINE__, i);
+ enq(bfs, &bfs->files[i]);
+ }
+ }
+ }
+ bfs->bufsize = bufsize;
+ bfs->panic = false;
+ bfs->panic_errno = 0;
+ }
+ // printf("Creating IO thread\n");
+ if (result == 0) {
+ result = toku_pthread_create(*io_thread_key,
+ &bfs->iothread,
+ nullptr,
+ io_thread,
+ static_cast<void *>(bfs));
+ }
+ if (result == 0) {
+ *bfsp = bfs;
+ return 0;
+ }
+ // Now undo everything.
+ // If we got here, there is no thread (either result was zero before the
+ // thread was created, or else the thread creation itself failed.
+ if (bfs) {
+ if (bfs->files) {
+ // the files were allocated, so we have to free all the bufs.
+ for (int i=0; i<N; i++) {
+ for (int j=0; j<2; j++) {
+ if (bfs->files[i].buf[j])
+ toku_free(bfs->files[i].buf[j]);
+ bfs->files[i].buf[j]=NULL;
+ }
+ }
+ toku_free(bfs->files);
+ bfs->files=NULL;
+ }
+ if (cond_inited) {
+ toku_cond_destroy(&bfs->cond); // don't check error status
+ }
+ if (mutex_inited) {
+ toku_mutex_destroy(&bfs->mutex); // don't check error status
+ }
+ toku_free(bfs);
+ }
+ return result;
+}
+
+int panic_dbufio_fileset(DBUFIO_FILESET bfs, int error) {
+ toku_mutex_lock(&bfs->mutex);
+ panic(bfs, error);
+ toku_cond_broadcast(&bfs->cond);
+ toku_mutex_unlock(&bfs->mutex);
+ return 0;
+}
+
+int destroy_dbufio_fileset (DBUFIO_FILESET bfs) {
+ int result = 0;
+ {
+ void *retval;
+ int r = toku_pthread_join(bfs->iothread, &retval);
+ assert(r==0);
+ assert(retval==NULL);
+ }
+ {
+ toku_mutex_destroy(&bfs->mutex);
+ }
+ {
+ toku_cond_destroy(&bfs->cond);
+ }
+ if (bfs->files) {
+ for (int i=0; i<bfs->N; i++) {
+ for (int j=0; j<2; j++) {
+ //printf("%s:%d free([%d][%d]=%p\n", __FILE__, __LINE__, i,j, bfs->files[i].buf[j]);
+ toku_free(bfs->files[i].buf[j]);
+ }
+ }
+ toku_free(bfs->files);
+ }
+ toku_free(bfs);
+ return result;
+}
+
+int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t count, size_t *n_read) {
+ char *buf = (char*)buf_v;
+ struct dbufio_file *dbf = &bfs->files[filenum];
+ if (dbf->error_code[0]!=0) return dbf->error_code[0];
+ if (dbf->offset_in_buf + count <= dbf->n_in_buf[0]) {
+ // Enough data is present to do it all now
+ memcpy(buf, dbf->buf[0]+dbf->offset_in_buf, count);
+ dbf->offset_in_buf += count;
+ dbf->offset_in_uncompressed_file += count;
+ *n_read = count;
+ return 0;
+ } else if (dbf->n_in_buf[0] > dbf->offset_in_buf) {
+ // There is something in buf[0]
+ size_t this_count = dbf->n_in_buf[0]-dbf->offset_in_buf;
+ assert(dbf->offset_in_buf + this_count <= bfs->bufsize);
+ memcpy(buf, dbf->buf[0]+dbf->offset_in_buf, this_count);
+ dbf->offset_in_buf += this_count;
+ dbf->offset_in_uncompressed_file += this_count;
+ size_t sub_n_read;
+ int r = dbufio_fileset_read(bfs, filenum, buf+this_count, count-this_count, &sub_n_read);
+ if (r==0) {
+ *n_read = this_count + sub_n_read;
+ return 0;
+ } else {
+ // The error code will have been saved. We got some data so return that
+ *n_read = this_count;
+ return 0;
+ }
+ } else {
+ // There is nothing in buf[0]. So we need to swap buffers
+ toku_mutex_lock(&bfs->mutex);
+ while (1) {
+ if (dbf->second_buf_ready) {
+ dbf->n_in_buf[0] = dbf->n_in_buf[1];
+ {
+ char *tmp = dbf->buf[0];
+ dbf->buf[0] = dbf->buf[1];
+ dbf->buf[1] = tmp;
+ }
+ dbf->error_code[0] = dbf->error_code[1];
+ dbf->second_buf_ready = false;
+ dbf->offset_in_buf = 0;
+ if (!dbf->io_done) {
+ // Don't enqueue it if the I/O is all done.
+ //printf("%s:%d enq [%ld]\n", __FILE__, __LINE__, dbf-&bfs->files[0]);
+ enq(bfs, dbf);
+ }
+ toku_cond_broadcast(&bfs->cond);
+ toku_mutex_unlock(&bfs->mutex);
+ if (dbf->error_code[0]==0) {
+ assert(dbf->n_in_buf[0]>0);
+ return dbufio_fileset_read(bfs, filenum, buf_v, count, n_read);
+ } else {
+ *n_read = 0;
+ return dbf->error_code[0];
+ }
+ } else {
+ toku_cond_wait(&bfs->cond, &bfs->mutex);
+ }
+ }
+ assert(0); // cannot get here.
+ }
+}
+
+void
+dbufio_print(DBUFIO_FILESET bfs) {
+ fprintf(stderr, "%s:%d bfs=%p", __FILE__, __LINE__, bfs);
+ if (bfs->panic)
+ fprintf(stderr, " panic=%d", bfs->panic_errno);
+ fprintf(stderr, " N=%d %d %" PRIuMAX, bfs->N, bfs->n_not_done, (uintmax_t) bfs->bufsize);
+ for (int i = 0; i < bfs->N; i++) {
+ struct dbufio_file *dbf = &bfs->files[i];
+ if (dbf->error_code[0] || dbf->error_code[1])
+ fprintf(stderr, " %d=[%d,%d]", i, dbf->error_code[0], dbf->error_code[1]);
+ }
+ fprintf(stderr, "\n");
+
+}
diff --git a/storage/tokudb/PerconaFT/ft/loader/dbufio.h b/storage/tokudb/PerconaFT/ft/loader/dbufio.h
new file mode 100644
index 00000000..0191f535
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/dbufio.h
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_portability.h>
+#include <toku_pthread.h>
+
+/* Maintain a set of files for reading, with double buffering for the reads. */
+
+/* A DBUFIO_FILESET is a set of files. The files are indexed from 0 to N-1, where N is specified when the set is created (and the files are also provided when the set is creaed). */
+/* An implementation would typically use a separate thread or asynchronous I/O to fetch ahead data for each file. The system will typically fill two buffers of size M for each file. One buffer is being read out of using dbuf_read(), and the other buffer is either empty (waiting on the asynchronous I/O to start), being filled in by the asynchronous I/O mechanism, or is waiting for the caller to read data from it. */
+typedef struct dbufio_fileset *DBUFIO_FILESET;
+
+int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t bufsize, bool compressed);
+
+int destroy_dbufio_fileset(DBUFIO_FILESET);
+
+int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t count, size_t *n_read);
+
+int panic_dbufio_fileset(DBUFIO_FILESET, int error);
+
+void dbufio_print(DBUFIO_FILESET);
diff --git a/storage/tokudb/PerconaFT/ft/loader/loader-internal.h b/storage/tokudb/PerconaFT/ft/loader/loader-internal.h
new file mode 100644
index 00000000..6f7b0147
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/loader-internal.h
@@ -0,0 +1,320 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_pthread.h"
+
+#include "loader/dbufio.h"
+#include "loader/loader.h"
+#include "util/queue.h"
+
+enum {
+ EXTRACTOR_QUEUE_DEPTH = 2,
+ FILE_BUFFER_SIZE = 1<<24,
+ MIN_ROWSET_MEMORY = 1<<23,
+ MIN_MERGE_FANIN = 2,
+ FRACTAL_WRITER_QUEUE_DEPTH = 3,
+ FRACTAL_WRITER_ROWSETS = FRACTAL_WRITER_QUEUE_DEPTH + 2,
+ DBUFIO_DEPTH = 2,
+ TARGET_MERGE_BUF_SIZE = 1<<24, // we'd like the merge buffer to be this big.
+ MIN_MERGE_BUF_SIZE = 1<<20, // always use at least this much
+ MAX_UNCOMPRESSED_BUF = MIN_MERGE_BUF_SIZE
+};
+
+/* These functions are exported to allow the tests to compile. */
+
+/* These structures maintain a collection of all the open temporary files used by the loader. */
+struct file_info {
+ bool is_open;
+ bool is_extant; // if true, the file must be unlinked.
+ char *fname;
+ TOKU_FILE *file;
+ uint64_t n_rows; // how many rows were written into that file
+ size_t buffer_size;
+ void *buffer;
+};
+struct file_infos {
+ int n_files;
+ int n_files_limit;
+ struct file_info *file_infos;
+ int n_files_open, n_files_extant;
+ toku_mutex_t lock; // must protect this data structure because current activity performs a REALLOC(fi->file_infos).
+};
+typedef struct fidx { int idx; } FIDX;
+static const FIDX FIDX_NULL __attribute__((__unused__)) = {-1};
+static int fidx_is_null(const FIDX f) __attribute__((__unused__));
+static int fidx_is_null(const FIDX f) { return f.idx == -1; }
+TOKU_FILE *toku_bl_fidx2file(FTLOADER bl, FIDX i);
+
+int ft_loader_open_temp_file(FTLOADER bl, FIDX *file_idx);
+
+/* These data structures are used for manipulating a collection of rows in main memory. */
+struct row {
+ size_t off; // the offset in the data array.
+ int klen,vlen;
+};
+struct rowset {
+ uint64_t memory_budget;
+ size_t n_rows, n_rows_limit;
+ struct row *rows;
+ size_t n_bytes, n_bytes_limit;
+ char *data;
+};
+
+int init_rowset (struct rowset *rows, uint64_t memory_budget);
+void destroy_rowset(struct rowset *rows);
+int add_row(struct rowset *rows, DBT *key, DBT *val);
+
+int loader_write_row(DBT *key,
+ DBT *val,
+ FIDX data,
+ TOKU_FILE *,
+ uint64_t *dataoff,
+ struct wbuf *wb,
+ FTLOADER bl);
+int loader_read_row(TOKU_FILE *f, DBT *key, DBT *val);
+
+struct merge_fileset {
+ bool have_sorted_output; // Is there an previous key?
+ FIDX sorted_output; // this points to one of the data_fidxs. If output_is_sorted then this is the file containing sorted data. It's still open
+ DBT prev_key; // What is it? If it's here, its the last output in the merge fileset
+
+ int n_temp_files, n_temp_files_limit;
+ FIDX *data_fidxs;
+};
+
+void init_merge_fileset (struct merge_fileset *fs);
+void destroy_merge_fileset (struct merge_fileset *fs);
+
+struct poll_callback_s {
+ ft_loader_poll_func poll_function;
+ void *poll_extra;
+};
+typedef struct poll_callback_s *ft_loader_poll_callback;
+
+int ft_loader_init_poll_callback(ft_loader_poll_callback);
+
+void ft_loader_destroy_poll_callback(ft_loader_poll_callback);
+
+void ft_loader_set_poll_function(ft_loader_poll_callback, ft_loader_poll_func poll_function, void *poll_extra);
+
+int ft_loader_call_poll_function(ft_loader_poll_callback, float progress);
+
+struct error_callback_s {
+ int error;
+ ft_loader_error_func error_callback;
+ void *extra;
+ DB *db;
+ int which_db;
+ DBT key;
+ DBT val;
+ bool did_callback;
+ toku_mutex_t mutex;
+};
+typedef struct error_callback_s *ft_loader_error_callback;
+
+void ft_loader_init_error_callback(ft_loader_error_callback);
+
+void ft_loader_destroy_error_callback(ft_loader_error_callback);
+
+int ft_loader_get_error(ft_loader_error_callback);
+
+void ft_loader_set_error_function(ft_loader_error_callback, ft_loader_error_func error_function, void *extra);
+
+int ft_loader_set_error(ft_loader_error_callback, int error, DB *db, int which_db, DBT *key, DBT *val);
+
+int ft_loader_call_error_function(ft_loader_error_callback);
+
+int ft_loader_set_error_and_callback(ft_loader_error_callback, int error, DB *db, int which_db, DBT *key, DBT *val);
+
+struct ft_loader_s {
+ // These two are set in the close function, and used while running close
+ struct error_callback_s error_callback;
+ struct poll_callback_s poll_callback;
+
+ generate_row_for_put_func generate_row_for_put;
+ ft_compare_func *bt_compare_funs;
+
+ DB *src_db;
+ int N;
+ DB **dbs; // N of these
+ DESCRIPTOR *descriptors; // N of these.
+ TXNID *root_xids_that_created; // N of these.
+ const char **new_fnames_in_env; // N of these. The file names that the final data will be written to (relative to env).
+
+ uint64_t *extracted_datasizes; // N of these.
+
+ struct rowset primary_rowset; // the primary rows that have been put, but the secondary rows haven't been generated.
+ struct rowset primary_rowset_temp; // the primary rows that are being worked on by the extractor_thread.
+
+ QUEUE primary_rowset_queue; // main thread enqueues rowsets in this queue (in maybe 64MB chunks). The extractor thread removes them, sorts them, adn writes to file.
+ toku_pthread_t extractor_thread; // the thread that takes primary rowset and does extraction and the first level sort and write to file.
+ bool extractor_live;
+
+ DBT *last_key; // for each rowset, remember the most recently output key. The system may choose not to keep this up-to-date when a rowset is unsorted. These keys are malloced and ulen maintains the size of the malloced block.
+
+ struct rowset *rows; // secondary rows that have been put, but haven't been sorted and written to a file.
+ uint64_t n_rows; // how many rows have been put?
+ struct merge_fileset *fs;
+
+ const char *temp_file_template;
+
+ CACHETABLE cachetable;
+ bool did_reserve_memory;
+ bool compress_intermediates;
+ bool allow_puts;
+ uint64_t reserved_memory; // how much memory are we allowed to use?
+
+ /* To make it easier to recover from errors, we don't use TOKU_FILE*,
+ * instead we use an index into the file_infos. */
+ struct file_infos file_infos;
+
+#define PROGRESS_MAX (1 << 16)
+ int progress; // Progress runs from 0 to PROGRESS_MAX. When we call the poll function we convert to a float from 0.0 to 1.0
+ // We use an integer so that we can add to the progress using a fetch-and-add instruction.
+
+ int progress_callback_result; // initially zero, if any call to the poll function callback returns nonzero, we save the result here (and don't call the poll callback function again).
+
+ LSN load_lsn; //LSN of the fsynced 'load' log entry. Write this LSN (as checkpoint_lsn) in ft headers made by this loader.
+ TXNID load_root_xid; //(Root) transaction that performed the load.
+
+ QUEUE *fractal_queues; // an array of work queues, one for each secondary index.
+ toku_pthread_t *fractal_threads;
+ bool *fractal_threads_live; // an array of bools indicating that fractal_threads[i] is a live thread. (There is no NULL for a pthread_t, so we have to maintain this separately).
+
+ unsigned fractal_workers; // number of fractal tree writer threads
+
+ toku_mutex_t mutex;
+ bool mutex_init;
+};
+
+// Set the number of rows in the loader. Used for test.
+void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows);
+
+// Get the number of rows in the loader. Used for test.
+uint64_t toku_ft_loader_get_n_rows(FTLOADER bl);
+
+// The data passed into a fractal_thread via pthread_create.
+struct fractal_thread_args {
+ FTLOADER bl;
+ const DESCRIPTOR descriptor;
+ int fd; // write the ft into fd.
+ int progress_allocation;
+ QUEUE q;
+ uint64_t total_disksize_estimate;
+ int errno_result; // the final result.
+ int which_db;
+ uint32_t target_nodesize;
+ uint32_t target_basementnodesize;
+ enum toku_compression_method target_compression_method;
+ uint32_t target_fanout;
+};
+
+void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows);
+uint64_t toku_ft_loader_get_n_rows(FTLOADER bl);
+
+int merge_row_arrays_base (struct row dest[/*an+bn*/], struct row a[/*an*/], int an, struct row b[/*bn*/], int bn,
+ int which_db, DB *dest_db, ft_compare_func,
+ FTLOADER,
+ struct rowset *);
+
+int merge_files (struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func, int progress_allocation, QUEUE);
+
+int sort_and_write_rows (struct rowset rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func);
+
+int mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *);
+
+//int write_file_to_dbfile (int outfile, FIDX infile, FTLOADER bl, const DESCRIPTOR descriptor, int progress_allocation);
+int toku_merge_some_files_using_dbufio (const bool to_q, FIDX dest_data, QUEUE q, int n_sources, DBUFIO_FILESET bfs, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation);
+
+int ft_loader_sort_and_write_rows (struct rowset *rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func);
+
+// This is probably only for testing.
+int toku_loader_write_ft_from_q_in_C (FTLOADER bl,
+ const DESCRIPTOR descriptor,
+ int fd, // write to here
+ int progress_allocation,
+ QUEUE q,
+ uint64_t total_disksize_estimate,
+ int which_db,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method,
+ uint32_t fanout);
+
+int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *);
+
+int ft_loader_write_file_to_dbfile (int outfile, FIDX infile, FTLOADER bl, const DESCRIPTOR descriptor, int progress_allocation);
+
+int ft_loader_init_file_infos (struct file_infos *fi);
+void ft_loader_fi_destroy (struct file_infos *fi, bool is_error);
+int ft_loader_fi_close (struct file_infos *fi, FIDX idx, bool require_open);
+int ft_loader_fi_close_all (struct file_infos *fi);
+int ft_loader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode);
+int ft_loader_fi_unlink (struct file_infos *fi, FIDX idx);
+
+int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
+ CACHETABLE cachetable,
+ generate_row_for_put_func g,
+ DB *src_db,
+ int N, FT_HANDLE ft_hs[/*N*/], DB* dbs[/*N*/],
+ const char *new_fnames_in_env[/*N*/],
+ ft_compare_func bt_compare_functions[/*N*/],
+ const char *temp_file_template,
+ LSN load_lsn,
+ TOKUTXN txn,
+ bool reserve_memory,
+ uint64_t reserve_memory_size,
+ bool compress_intermediates,
+ bool allow_puts);
+
+void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error);
+
+// For test purposes only. (In production, the rowset size is determined by negotiation with the cachetable for some memory. See #2613.)
+uint64_t toku_ft_loader_get_rowset_budget_for_testing (void);
+
+int toku_ft_loader_finish_extractor(FTLOADER bl);
+
+int toku_ft_loader_get_error(FTLOADER bl, int *loader_errno);
+
+void ft_loader_lock_init(FTLOADER bl);
+void ft_loader_lock_destroy(FTLOADER bl);
+void ft_loader_set_fractal_workers_count_from_c(FTLOADER bl);
diff --git a/storage/tokudb/PerconaFT/ft/loader/loader.cc b/storage/tokudb/PerconaFT/ft/loader/loader.cc
new file mode 100644
index 00000000..3ff237f0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/loader.cc
@@ -0,0 +1,3424 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <toku_portability.h>
+
+#include <arpa/inet.h>
+
+#include <stdio.h>
+#include <memory.h>
+#include <errno.h>
+#include <toku_assert.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/leafentry.h"
+#include "ft/loader/loader-internal.h"
+#include "ft/loader/pqueue.h"
+#include "ft/loader/dbufio.h"
+#include "ft/logger/log-internal.h"
+#include "ft/node.h"
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/ft-serialize.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/serialize/sub_block.h"
+
+#include "util/x1764.h"
+
+toku_instr_key *loader_bl_mutex_key;
+toku_instr_key *loader_fi_lock_mutex_key;
+toku_instr_key *loader_out_mutex_key;
+
+toku_instr_key *extractor_thread_key;
+toku_instr_key *fractal_thread_key;
+
+toku_instr_key *tokudb_file_tmp_key;
+toku_instr_key *tokudb_file_load_key;
+
+// 1024 is the right size_factor for production.
+// Different values for these sizes may be used for testing.
+static uint32_t size_factor = 1024;
+static uint32_t default_loader_nodesize = FT_DEFAULT_NODE_SIZE;
+static uint32_t default_loader_basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE;
+
+void
+toku_ft_loader_set_size_factor(uint32_t factor) {
+// For test purposes only
+ size_factor = factor;
+ default_loader_nodesize = (size_factor==1) ? (1<<15) : FT_DEFAULT_NODE_SIZE;
+}
+
+uint64_t
+toku_ft_loader_get_rowset_budget_for_testing (void)
+// For test purposes only. In production, the rowset size is determined by negotiation with the cachetable for some memory. (See #2613).
+{
+ return 16ULL*size_factor*1024ULL;
+}
+
+void ft_loader_lock_init(FTLOADER bl) {
+ invariant(!bl->mutex_init);
+ toku_mutex_init(*loader_bl_mutex_key, &bl->mutex, nullptr);
+ bl->mutex_init = true;
+}
+
+void ft_loader_lock_destroy(FTLOADER bl) {
+ if (bl->mutex_init) {
+ toku_mutex_destroy(&bl->mutex);
+ bl->mutex_init = false;
+ }
+}
+
+static void ft_loader_lock(FTLOADER bl) {
+ invariant(bl->mutex_init);
+ toku_mutex_lock(&bl->mutex);
+}
+
+static void ft_loader_unlock(FTLOADER bl) {
+ invariant(bl->mutex_init);
+ toku_mutex_unlock(&bl->mutex);
+}
+
+static int add_big_buffer(struct file_info *file) {
+ int result = 0;
+ bool newbuffer = false;
+ if (file->buffer == NULL) {
+ file->buffer = toku_malloc(file->buffer_size);
+ if (file->buffer == NULL)
+ result = get_error_errno();
+ else
+ newbuffer = true;
+ }
+ if (result == 0) {
+ int r = setvbuf(file->file->file,
+ static_cast<char *>(file->buffer),
+ _IOFBF,
+ file->buffer_size);
+ if (r != 0) {
+ result = get_error_errno();
+ if (newbuffer) {
+ toku_free(file->buffer);
+ file->buffer = NULL;
+ }
+ }
+ }
+ return result;
+}
+
+static void cleanup_big_buffer(struct file_info *file) {
+ if (file->buffer) {
+ toku_free(file->buffer);
+ file->buffer = NULL;
+ }
+}
+
+int ft_loader_init_file_infos(struct file_infos *fi) {
+ int result = 0;
+ toku_mutex_init(*loader_fi_lock_mutex_key, &fi->lock, nullptr);
+ fi->n_files = 0;
+ fi->n_files_limit = 1;
+ fi->n_files_open = 0;
+ fi->n_files_extant = 0;
+ MALLOC_N(fi->n_files_limit, fi->file_infos);
+ if (fi->file_infos == NULL)
+ result = get_error_errno();
+ return result;
+}
+
+void ft_loader_fi_destroy (struct file_infos *fi, bool is_error)
+// Effect: Free the resources in the fi.
+// If is_error then we close and unlink all the temp files.
+// If !is_error then requires that all the temp files have been closed and destroyed
+// No error codes are returned. If anything goes wrong with closing and unlinking then it's only in an is_error case, so we don't care.
+{
+ if (fi->file_infos == NULL) {
+ // ft_loader_init_file_infos guarantees this isn't null, so if it is, we know it hasn't been inited yet and we don't need to destroy it.
+ return;
+ }
+ toku_mutex_destroy(&fi->lock);
+ if (!is_error) {
+ invariant(fi->n_files_open==0);
+ invariant(fi->n_files_extant==0);
+ }
+ for (int i=0; i<fi->n_files; i++) {
+ if (fi->file_infos[i].is_open) {
+ invariant(is_error);
+ toku_os_fclose(fi->file_infos[i].file); // don't check for errors, since we are in an error case.
+ }
+ if (fi->file_infos[i].is_extant) {
+ invariant(is_error);
+ unlink(fi->file_infos[i].fname);
+ toku_free(fi->file_infos[i].fname);
+ }
+ cleanup_big_buffer(&fi->file_infos[i]);
+ }
+ toku_free(fi->file_infos);
+ fi->n_files=0;
+ fi->n_files_limit=0;
+ fi->file_infos = NULL;
+}
+
+static int open_file_add(struct file_infos *fi,
+ TOKU_FILE *file,
+ char *fname,
+ /* out */ FIDX *idx) {
+ int result = 0;
+ toku_mutex_lock(&fi->lock);
+ if (fi->n_files >= fi->n_files_limit) {
+ fi->n_files_limit *=2;
+ XREALLOC_N(fi->n_files_limit, fi->file_infos);
+ }
+ invariant(fi->n_files < fi->n_files_limit);
+ fi->file_infos[fi->n_files].is_open = true;
+ fi->file_infos[fi->n_files].is_extant = true;
+ fi->file_infos[fi->n_files].fname = fname;
+ fi->file_infos[fi->n_files].file = file;
+ fi->file_infos[fi->n_files].n_rows = 0;
+ fi->file_infos[fi->n_files].buffer_size = FILE_BUFFER_SIZE;
+ fi->file_infos[fi->n_files].buffer = NULL;
+ result = add_big_buffer(&fi->file_infos[fi->n_files]);
+ if (result == 0) {
+ idx->idx = fi->n_files;
+ fi->n_files++;
+ fi->n_files_extant++;
+ fi->n_files_open++;
+ }
+ toku_mutex_unlock(&fi->lock);
+ return result;
+}
+
+int ft_loader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) {
+ int result = 0;
+ toku_mutex_lock(&fi->lock);
+ int i = idx.idx;
+ invariant(i >= 0 && i < fi->n_files);
+ invariant(!fi->file_infos[i].is_open);
+ invariant(fi->file_infos[i].is_extant);
+ fi->file_infos[i].file =
+ toku_os_fopen(fi->file_infos[i].fname, mode, *tokudb_file_load_key);
+ if (fi->file_infos[i].file == NULL) {
+ result = get_error_errno();
+ } else {
+ fi->file_infos[i].is_open = true;
+ // No longer need the big buffer for reopened files. Don't allocate the space, we need it elsewhere.
+ //add_big_buffer(&fi->file_infos[i]);
+ fi->n_files_open++;
+ }
+ toku_mutex_unlock(&fi->lock);
+ return result;
+}
+
+int ft_loader_fi_close (struct file_infos *fi, FIDX idx, bool require_open)
+{
+ int result = 0;
+ toku_mutex_lock(&fi->lock);
+ invariant(idx.idx >=0 && idx.idx < fi->n_files);
+ if (fi->file_infos[idx.idx].is_open) {
+ invariant(fi->n_files_open>0); // loader-cleanup-test failure
+ fi->n_files_open--;
+ fi->file_infos[idx.idx].is_open = false;
+ int r = toku_os_fclose(fi->file_infos[idx.idx].file);
+ if (r)
+ result = get_error_errno();
+ cleanup_big_buffer(&fi->file_infos[idx.idx]);
+ } else if (require_open)
+ result = EINVAL;
+ toku_mutex_unlock(&fi->lock);
+ return result;
+}
+
+int ft_loader_fi_unlink (struct file_infos *fi, FIDX idx) {
+ int result = 0;
+ toku_mutex_lock(&fi->lock);
+ int id = idx.idx;
+ invariant(id >=0 && id < fi->n_files);
+ if (fi->file_infos[id].is_extant) { // must still exist
+ invariant(fi->n_files_extant>0);
+ fi->n_files_extant--;
+ invariant(!fi->file_infos[id].is_open); // must be closed before we unlink
+ fi->file_infos[id].is_extant = false;
+ int r = unlink(fi->file_infos[id].fname);
+ if (r != 0)
+ result = get_error_errno();
+ toku_free(fi->file_infos[id].fname);
+ fi->file_infos[id].fname = NULL;
+ } else
+ result = EINVAL;
+ toku_mutex_unlock(&fi->lock);
+ return result;
+}
+
+int
+ft_loader_fi_close_all(struct file_infos *fi) {
+ int rval = 0;
+ for (int i = 0; i < fi->n_files; i++) {
+ int r;
+ FIDX idx = { i };
+ r = ft_loader_fi_close(fi, idx, false); // ignore files that are already closed
+ if (rval == 0 && r)
+ rval = r; // capture first error
+ }
+ return rval;
+}
+
+int ft_loader_open_temp_file (FTLOADER bl, FIDX *file_idx)
+/* Effect: Open a temporary file in read-write mode. Save enough information to close and delete the file later.
+ * Return value: 0 on success, an error number otherwise.
+ * On error, *file_idx and *fnamep will be unmodified.
+ * The open file will be saved in bl->file_infos so that even if errors happen we can free them all.
+ */
+{
+ int result = 0;
+ if (result) // debug hack
+ return result;
+ TOKU_FILE *f = NULL;
+ int fd = -1;
+ char *fname = toku_strdup(bl->temp_file_template);
+ if (fname == NULL)
+ result = get_error_errno();
+ else {
+ fd = mkstemp(fname);
+ if (fd < 0) {
+ result = get_error_errno();
+ } else {
+ f = toku_os_fdopen(fd, "r+", fname, *tokudb_file_tmp_key);
+ if (f->file == nullptr)
+ result = get_error_errno();
+ else
+ result = open_file_add(&bl->file_infos, f, fname, file_idx);
+ }
+ }
+ if (result != 0) {
+ if (fd >= 0) {
+ toku_os_close(fd);
+ unlink(fname);
+ }
+ if (f != NULL)
+ toku_os_fclose(f); // don't check for error because we're already in an error case
+ if (fname != NULL)
+ toku_free(fname);
+ }
+ return result;
+}
+
+void toku_ft_loader_internal_destroy(FTLOADER bl, bool is_error) {
+ ft_loader_lock_destroy(bl);
+
+ // These frees rely on the fact that if you free a NULL pointer then nothing bad happens.
+ toku_free(bl->dbs);
+ toku_free(bl->descriptors);
+ toku_free(bl->root_xids_that_created);
+ if (bl->new_fnames_in_env) {
+ for (int i = 0; i < bl->N; i++)
+ toku_free((char*)bl->new_fnames_in_env[i]);
+ toku_free(bl->new_fnames_in_env);
+ }
+ toku_free(bl->extracted_datasizes);
+ toku_free(bl->bt_compare_funs);
+ toku_free((char*)bl->temp_file_template);
+ ft_loader_fi_destroy(&bl->file_infos, is_error);
+
+ for (int i = 0; i < bl->N; i++)
+ destroy_rowset(&bl->rows[i]);
+ toku_free(bl->rows);
+
+ for (int i = 0; i < bl->N; i++)
+ destroy_merge_fileset(&bl->fs[i]);
+ toku_free(bl->fs);
+
+ if (bl->last_key) {
+ for (int i=0; i < bl->N; i++) {
+ toku_free(bl->last_key[i].data);
+ }
+ toku_free(bl->last_key);
+ bl->last_key = NULL;
+ }
+
+ destroy_rowset(&bl->primary_rowset);
+ if (bl->primary_rowset_queue) {
+ toku_queue_destroy(bl->primary_rowset_queue);
+ bl->primary_rowset_queue = nullptr;
+ }
+
+ for (int i=0; i<bl->N; i++) {
+ if ( bl->fractal_queues ) {
+ invariant(bl->fractal_queues[i]==NULL);
+ }
+ }
+ toku_free(bl->fractal_threads);
+ toku_free(bl->fractal_queues);
+ toku_free(bl->fractal_threads_live);
+
+ if (bl->did_reserve_memory) {
+ invariant(bl->cachetable);
+ toku_cachetable_release_reserved_memory(bl->cachetable, bl->reserved_memory);
+ }
+
+ ft_loader_destroy_error_callback(&bl->error_callback);
+ ft_loader_destroy_poll_callback(&bl->poll_callback);
+
+ //printf("Progress=%d/%d\n", bl->progress, PROGRESS_MAX);
+
+ toku_free(bl);
+}
+
+static void *extractor_thread (void*);
+
+#define MAX(a,b) (((a)<(b)) ? (b) : (a))
+
+static uint64_t memory_per_rowset_during_extract (FTLOADER bl)
+// Return how much memory can be allocated for each rowset.
+{
+ if (size_factor==1) {
+ return 16*1024;
+ } else {
+ // There is a primary rowset being maintained by the foreground thread.
+ // There could be two more in the queue.
+ // There is one rowset for each index (bl->N) being filled in.
+ // Later we may have sort_and_write operations spawning in parallel, and will need to account for that.
+ int n_copies = (1 // primary rowset
+ +EXTRACTOR_QUEUE_DEPTH // the number of primaries in the queue
+ +bl->N // the N rowsets being constructed by the extractor thread.
+ +bl->N // the N sort buffers
+ +1 // Give the extractor thread one more so that it can have temporary space for sorting. This is overkill.
+ );
+ int64_t extra_reserved_memory = bl->N * FILE_BUFFER_SIZE; // for each index we are writing to a file at any given time.
+ int64_t tentative_rowset_size = ((int64_t)(bl->reserved_memory - extra_reserved_memory))/(n_copies);
+ return MAX(tentative_rowset_size, (int64_t)MIN_ROWSET_MEMORY);
+ }
+}
+
+static unsigned ft_loader_get_fractal_workers_count(FTLOADER bl) {
+ unsigned w = 0;
+ while (1) {
+ ft_loader_lock(bl);
+ w = bl->fractal_workers;
+ ft_loader_unlock(bl);
+ if (w != 0)
+ break;
+ toku_pthread_yield(); // maybe use a cond var instead
+ }
+ return w;
+}
+
+static void ft_loader_set_fractal_workers_count(FTLOADER bl) {
+ ft_loader_lock(bl);
+ if (bl->fractal_workers == 0)
+ bl->fractal_workers = 1;
+ ft_loader_unlock(bl);
+}
+
+// To compute a merge, we have a certain amount of memory to work with.
+// We perform only one fanin at a time.
+// If the fanout is F then we are using
+// F merges. Each merge uses
+// DBUFIO_DEPTH buffers for double buffering. Each buffer is of size at least MERGE_BUF_SIZE
+// so the memory is
+// F*MERGE_BUF_SIZE*DBUFIO_DEPTH storage.
+// We use some additional space to buffer the outputs.
+// That's FILE_BUFFER_SIZE for writing to a merge file if we are writing to a mergefile.
+// And we have FRACTAL_WRITER_ROWSETS*MERGE_BUF_SIZE per queue
+// And if we are doing a fractal, each worker could have have a fractal tree that it's working on.
+//
+// DBUFIO_DEPTH*F*MERGE_BUF_SIZE + FRACTAL_WRITER_ROWSETS*MERGE_BUF_SIZE + WORKERS*NODESIZE*2 <= RESERVED_MEMORY
+
+static int64_t memory_avail_during_merge(FTLOADER bl, bool is_fractal_node) {
+ // avail memory = reserved memory - WORKERS*NODESIZE*2 for the last merge stage only
+ int64_t avail_memory = bl->reserved_memory;
+ if (is_fractal_node) {
+ // reserve space for the fractal writer thread buffers
+ avail_memory -= (int64_t)ft_loader_get_fractal_workers_count(bl) * (int64_t)default_loader_nodesize * 2; // compressed and uncompressed buffers
+ }
+ return avail_memory;
+}
+
+static int merge_fanin (FTLOADER bl, bool is_fractal_node) {
+ // return number of temp files to read in this pass
+ int64_t memory_avail = memory_avail_during_merge(bl, is_fractal_node);
+ int64_t nbuffers = memory_avail / (int64_t)TARGET_MERGE_BUF_SIZE;
+ if (is_fractal_node)
+ nbuffers -= FRACTAL_WRITER_ROWSETS;
+ return MAX(nbuffers / (int64_t)DBUFIO_DEPTH, (int)MIN_MERGE_FANIN);
+}
+
+static uint64_t memory_per_rowset_during_merge (FTLOADER bl, int merge_factor, bool is_fractal_node // if it is being sent to a q
+ ) {
+ int64_t memory_avail = memory_avail_during_merge(bl, is_fractal_node);
+ int64_t nbuffers = DBUFIO_DEPTH * merge_factor;
+ if (is_fractal_node)
+ nbuffers += FRACTAL_WRITER_ROWSETS;
+ return MAX(memory_avail / nbuffers, (int64_t)MIN_MERGE_BUF_SIZE);
+}
+
+int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
+ CACHETABLE cachetable,
+ generate_row_for_put_func g,
+ DB *src_db,
+ int N, FT_HANDLE fts[/*N*/], DB* dbs[/*N*/],
+ const char *new_fnames_in_env[/*N*/],
+ ft_compare_func bt_compare_functions[/*N*/],
+ const char *temp_file_template,
+ LSN load_lsn,
+ TOKUTXN txn,
+ bool reserve_memory,
+ uint64_t reserve_memory_size,
+ bool compress_intermediates,
+ bool allow_puts)
+// Effect: Allocate and initialize a FTLOADER, but do not create the extractor thread.
+{
+ FTLOADER CALLOC(bl); // initialized to all zeros (hence CALLOC)
+ if (!bl) return get_error_errno();
+
+ bl->generate_row_for_put = g;
+ bl->cachetable = cachetable;
+ if (reserve_memory && bl->cachetable) {
+ bl->did_reserve_memory = true;
+ bl->reserved_memory = toku_cachetable_reserve_memory(bl->cachetable, 2.0/3.0, reserve_memory_size); // allocate 2/3 of the unreserved part (which is 3/4 of the memory to start with).
+ }
+ else {
+ bl->did_reserve_memory = false;
+ bl->reserved_memory = 512*1024*1024; // if no cache table use 512MB.
+ }
+ bl->compress_intermediates = compress_intermediates;
+ bl->allow_puts = allow_puts;
+ bl->src_db = src_db;
+ bl->N = N;
+ bl->load_lsn = load_lsn;
+ if (txn) {
+ bl->load_root_xid = txn->txnid.parent_id64;
+ }
+ else {
+ bl->load_root_xid = TXNID_NONE;
+ }
+
+ ft_loader_init_error_callback(&bl->error_callback);
+ ft_loader_init_poll_callback(&bl->poll_callback);
+
+#define MY_CALLOC_N(n,v) CALLOC_N(n,v); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, true); return r; }
+#define SET_TO_MY_STRDUP(lval, s) do { char *v = toku_strdup(s); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, true); return r; } lval = v; } while (0)
+
+ MY_CALLOC_N(N, bl->root_xids_that_created);
+ for (int i=0; i<N; i++) if (fts[i]) bl->root_xids_that_created[i]=fts[i]->ft->h->root_xid_that_created;
+ MY_CALLOC_N(N, bl->dbs);
+ for (int i=0; i<N; i++) if (fts[i]) bl->dbs[i]=dbs[i];
+ MY_CALLOC_N(N, bl->descriptors);
+ for (int i=0; i<N; i++) if (fts[i]) bl->descriptors[i]=&fts[i]->ft->descriptor;
+ MY_CALLOC_N(N, bl->new_fnames_in_env);
+ for (int i=0; i<N; i++) SET_TO_MY_STRDUP(bl->new_fnames_in_env[i], new_fnames_in_env[i]);
+ MY_CALLOC_N(N, bl->extracted_datasizes); // the calloc_n zeroed everything, which is what we want
+ MY_CALLOC_N(N, bl->bt_compare_funs);
+ for (int i=0; i<N; i++) bl->bt_compare_funs[i] = bt_compare_functions[i];
+
+ MY_CALLOC_N(N, bl->fractal_queues);
+ for (int i=0; i<N; i++) bl->fractal_queues[i]=NULL;
+ MY_CALLOC_N(N, bl->fractal_threads);
+ MY_CALLOC_N(N, bl->fractal_threads_live);
+ for (int i=0; i<N; i++) bl->fractal_threads_live[i] = false;
+
+ {
+ int r = ft_loader_init_file_infos(&bl->file_infos);
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
+ }
+
+ SET_TO_MY_STRDUP(bl->temp_file_template, temp_file_template);
+
+ bl->n_rows = 0;
+ bl->progress = 0;
+ bl->progress_callback_result = 0;
+
+ MY_CALLOC_N(N, bl->rows);
+ MY_CALLOC_N(N, bl->fs);
+ MY_CALLOC_N(N, bl->last_key);
+ for(int i=0;i<N;i++) {
+ {
+ int r = init_rowset(&bl->rows[i], memory_per_rowset_during_extract(bl));
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
+ }
+ init_merge_fileset(&bl->fs[i]);
+ bl->last_key[i].flags = DB_DBT_REALLOC; // don't really need this, but it's nice to maintain it. We use ulen to keep track of the realloced space.
+ }
+
+ {
+ int r = init_rowset(&bl->primary_rowset, memory_per_rowset_during_extract(bl));
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
+ }
+ { int r = toku_queue_create(&bl->primary_rowset_queue, EXTRACTOR_QUEUE_DEPTH);
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
+ }
+ {
+ ft_loader_lock_init(bl);
+ }
+
+ *blp = bl;
+
+ return 0;
+}
+
+int toku_ft_loader_open (FTLOADER *blp, /* out */
+ CACHETABLE cachetable,
+ generate_row_for_put_func g,
+ DB *src_db,
+ int N, FT_HANDLE fts[/*N*/], DB* dbs[/*N*/],
+ const char *new_fnames_in_env[/*N*/],
+ ft_compare_func bt_compare_functions[/*N*/],
+ const char *temp_file_template,
+ LSN load_lsn,
+ TOKUTXN txn,
+ bool reserve_memory,
+ uint64_t reserve_memory_size,
+ bool compress_intermediates,
+ bool allow_puts) {
+// Effect: called by DB_ENV->create_loader to create an ft loader.
+// Arguments:
+// blp Return a ft loader ("bulk loader") here.
+// g The function for generating a row
+// src_db The source database. Needed by g. May be NULL if that's ok with g.
+// N The number of dbs to create.
+// dbs An array of open databases. Used by g. The data will be put in these database.
+// new_fnames The file names (these strings are owned by the caller: we make a copy for our own purposes).
+// temp_file_template A template suitable for mkstemp()
+// reserve_memory Cause the loader to reserve memory for its use from the cache table.
+// compress_intermediates Cause the loader to compress intermediate loader files.
+// allow_puts Prepare the loader for rows to insert. When puts are disabled, the loader does not run the
+// extractor or the fractal tree writer threads.
+// Return value: 0 on success, an error number otherwise.
+ int result = 0;
+ {
+ int r = toku_ft_loader_internal_init(blp, cachetable, g, src_db,
+ N, fts, dbs,
+ new_fnames_in_env,
+ bt_compare_functions,
+ temp_file_template,
+ load_lsn,
+ txn,
+ reserve_memory,
+ reserve_memory_size,
+ compress_intermediates,
+ allow_puts);
+ if (r!=0) result = r;
+ }
+ if (result == 0 && allow_puts) {
+ FTLOADER bl = *blp;
+ int r = toku_pthread_create(*extractor_thread_key,
+ &bl->extractor_thread,
+ nullptr,
+ extractor_thread,
+ static_cast<void *>(bl));
+ if (r == 0) {
+ bl->extractor_live = true;
+ } else {
+ result = r;
+ (void) toku_ft_loader_internal_destroy(bl, true);
+ }
+ }
+ return result;
+}
+
+static void ft_loader_set_panic(FTLOADER bl, int error, bool callback, int which_db, DBT *key, DBT *val) {
+ DB *db = nullptr;
+ if (bl && bl->dbs && which_db >= 0 && which_db < bl->N) {
+ db = bl->dbs[which_db];
+ }
+ int r = ft_loader_set_error(&bl->error_callback, error, db, which_db, key, val);
+ if (r == 0 && callback)
+ ft_loader_call_error_function(&bl->error_callback);
+}
+
+// One of the tests uses this.
+TOKU_FILE *toku_bl_fidx2file(FTLOADER bl, FIDX i) {
+ toku_mutex_lock(&bl->file_infos.lock);
+ invariant(i.idx >= 0 && i.idx < bl->file_infos.n_files);
+ invariant(bl->file_infos.file_infos[i.idx].is_open);
+ TOKU_FILE *result = bl->file_infos.file_infos[i.idx].file;
+ toku_mutex_unlock(&bl->file_infos.lock);
+ return result;
+}
+
+static int bl_finish_compressed_write(TOKU_FILE *stream, struct wbuf *wb) {
+ int r = 0;
+ char *compressed_buf = NULL;
+ const size_t data_size = wb->ndone;
+ invariant(data_size > 0);
+ invariant(data_size <= MAX_UNCOMPRESSED_BUF);
+
+ int n_sub_blocks = 0;
+ int sub_block_size = 0;
+
+ r = choose_sub_block_size(wb->ndone, max_sub_blocks, &sub_block_size, &n_sub_blocks);
+ invariant(r==0);
+ invariant(0 < n_sub_blocks && n_sub_blocks <= max_sub_blocks);
+ invariant(sub_block_size > 0);
+
+ struct sub_block sub_block[max_sub_blocks];
+ // set the initial sub block size for all of the sub blocks
+ for (int i = 0; i < n_sub_blocks; i++) {
+ sub_block_init(&sub_block[i]);
+ }
+ set_all_sub_block_sizes(data_size, sub_block_size, n_sub_blocks, sub_block);
+
+ size_t compressed_len = get_sum_compressed_size_bound(n_sub_blocks, sub_block, TOKU_DEFAULT_COMPRESSION_METHOD);
+ const size_t sub_block_header_len = sub_block_header_size(n_sub_blocks);
+ const size_t other_overhead = sizeof(uint32_t); //total_size
+ const size_t header_len = sub_block_header_len + other_overhead;
+ MALLOC_N(header_len + compressed_len, compressed_buf);
+ if (compressed_buf == nullptr) {
+ return ENOMEM;
+ }
+
+ // compress all of the sub blocks
+ char *uncompressed_ptr = (char*)wb->buf;
+ char *compressed_ptr = compressed_buf + header_len;
+ compressed_len = compress_all_sub_blocks(n_sub_blocks, sub_block, uncompressed_ptr, compressed_ptr,
+ get_num_cores(), get_ft_pool(), TOKU_DEFAULT_COMPRESSION_METHOD);
+
+ //total_size does NOT include itself
+ uint32_t total_size = compressed_len + sub_block_header_len;
+ // serialize the sub block header
+ uint32_t *ptr = (uint32_t *)(compressed_buf);
+ *ptr++ = toku_htod32(total_size);
+ *ptr++ = toku_htod32(n_sub_blocks);
+ for (int i=0; i<n_sub_blocks; i++) {
+ ptr[0] = toku_htod32(sub_block[i].compressed_size);
+ ptr[1] = toku_htod32(sub_block[i].uncompressed_size);
+ ptr[2] = toku_htod32(sub_block[i].xsum);
+ ptr += 3;
+ }
+ // Mark as written
+ wb->ndone = 0;
+
+ size_t size_to_write = total_size + 4; // Includes writing total_size
+
+ r = toku_os_fwrite(compressed_buf, 1, size_to_write, stream);
+
+ if (compressed_buf) {
+ toku_free(compressed_buf);
+ }
+ return r;
+}
+
+static int bl_compressed_write(void *ptr,
+ size_t nbytes,
+ TOKU_FILE *stream,
+ struct wbuf *wb) {
+ invariant(wb->size <= MAX_UNCOMPRESSED_BUF);
+ size_t bytes_left = nbytes;
+ char *buf = (char *)ptr;
+
+ while (bytes_left > 0) {
+ size_t bytes_to_copy = bytes_left;
+ if (wb->ndone + bytes_to_copy > wb->size) {
+ bytes_to_copy = wb->size - wb->ndone;
+ }
+ wbuf_nocrc_literal_bytes(wb, buf, bytes_to_copy);
+ if (wb->ndone == wb->size) {
+ //Compress, write to disk, and empty out wb
+ int r = bl_finish_compressed_write(stream, wb);
+ if (r != 0) {
+ errno = r;
+ return -1;
+ }
+ wb->ndone = 0;
+ }
+ bytes_left -= bytes_to_copy;
+ buf += bytes_to_copy;
+ }
+ return 0;
+}
+
+static int bl_fwrite(void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ struct wbuf *wb,
+ FTLOADER bl)
+/* Effect: this is a wrapper for fwrite that returns 0 on success, otherwise
+ * returns an error number.
+ * Arguments:
+ * ptr the data to be writen.
+ * size the amount of data to be written.
+ * nmemb the number of units of size to be written.
+ * stream write the data here.
+ * wb where to write uncompressed data (if we're compressing) or ignore if
+ * NULL
+ * bl passed so we can panic the ft_loader if something goes wrong
+ * (recording the error number).
+ * Return value: 0 on success, an error number otherwise.
+ */
+{
+ if (!bl->compress_intermediates || !wb) {
+ return toku_os_fwrite(ptr, size, nmemb, stream);
+ } else {
+ size_t num_bytes = size * nmemb;
+ int r = bl_compressed_write(ptr, num_bytes, stream, wb);
+ if (r != 0) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int bl_fread(void *ptr, size_t size, size_t nmemb, TOKU_FILE *stream)
+/* Effect: this is a wrapper for fread that returns 0 on success, otherwise
+ * returns an error number.
+ * Arguments:
+ * ptr read data into here.
+ * size size of data element to be read.
+ * nmemb number of data elements to be read.
+ * stream where to read the data from.
+ * Return value: 0 on success, an error number otherwise.
+ */
+{
+ return toku_os_fread(ptr, size, nmemb, stream);
+}
+
+static int bl_write_dbt(DBT *dbt,
+ TOKU_FILE *datafile,
+ uint64_t *dataoff,
+ struct wbuf *wb,
+ FTLOADER bl) {
+ int r;
+ int dlen = dbt->size;
+ if ((r=bl_fwrite(&dlen, sizeof(dlen), 1, datafile, wb, bl))) return r;
+ if ((r=bl_fwrite(dbt->data, 1, dlen, datafile, wb, bl))) return r;
+ if (dataoff)
+ *dataoff += dlen + sizeof(dlen);
+ return 0;
+}
+
+static int bl_read_dbt(/*in*/ DBT *dbt, TOKU_FILE *stream) {
+ int len;
+ {
+ int r;
+ if ((r = bl_fread(&len, sizeof(len), 1, stream))) return r;
+ invariant(len>=0);
+ }
+ if ((int)dbt->ulen<len) { dbt->ulen=len; dbt->data=toku_xrealloc(dbt->data, len); }
+ {
+ int r;
+ if ((r = bl_fread(dbt->data, 1, len, stream))) return r;
+ }
+ dbt->size = len;
+ return 0;
+}
+
+static int bl_read_dbt_from_dbufio (/*in*/DBT *dbt, DBUFIO_FILESET bfs, int filenum)
+{
+ int result = 0;
+ uint32_t len;
+ {
+ size_t n_read;
+ int r = dbufio_fileset_read(bfs, filenum, &len, sizeof(len), &n_read);
+ if (r!=0) {
+ result = r;
+ } else if (n_read<sizeof(len)) {
+ result = TOKUDB_NO_DATA; // must have run out of data prematurely. This is not EOF, it's a real error.
+ }
+ }
+ if (result==0) {
+ if (dbt->ulen<len) {
+ void * data = toku_realloc(dbt->data, len);
+ if (data==NULL) {
+ result = get_error_errno();
+ } else {
+ dbt->ulen=len;
+ dbt->data=data;
+ }
+ }
+ }
+ if (result==0) {
+ size_t n_read;
+ int r = dbufio_fileset_read(bfs, filenum, dbt->data, len, &n_read);
+ if (r!=0) {
+ result = r;
+ } else if (n_read<len) {
+ result = TOKUDB_NO_DATA; // must have run out of data prematurely. This is not EOF, it's a real error.
+ } else {
+ dbt->size = len;
+ }
+ }
+ return result;
+}
+
+int loader_write_row(DBT *key,
+ DBT *val,
+ FIDX data,
+ TOKU_FILE *dataf,
+ uint64_t *dataoff,
+ struct wbuf *wb,
+ FTLOADER bl)
+/* Effect: Given a key and a val (both DBTs), write them to a file. Increment
+ * *dataoff so that it's up to date.
+ * Arguments:
+ * key, val write these.
+ * data the file to write them to
+ * dataoff a pointer to a counter that keeps track of the amount of data
+ * written so far.
+ * wb a pointer (possibly NULL) to buffer uncompressed output
+ * bl the ft_loader (passed so we can panic if needed).
+ * Return value: 0 on success, an error number otherwise.
+ */
+{
+ //int klen = key->size;
+ //int vlen = val->size;
+ int r;
+ // we have a chance to handle the errors because when we close we can delete all the files.
+ if ((r=bl_write_dbt(key, dataf, dataoff, wb, bl))) return r;
+ if ((r=bl_write_dbt(val, dataf, dataoff, wb, bl))) return r;
+ toku_mutex_lock(&bl->file_infos.lock);
+ bl->file_infos.file_infos[data.idx].n_rows++;
+ toku_mutex_unlock(&bl->file_infos.lock);
+ return 0;
+}
+
+int loader_read_row(TOKU_FILE *f, DBT *key, DBT *val)
+/* Effect: Read a key value pair from a file. The DBTs must have DB_DBT_REALLOC
+ * set.
+ * Arguments:
+ * f where to read it from.
+ * key, val read it into these.
+ * bl passed so we can panic if needed.
+ * Return value: 0 on success, an error number otherwise.
+ * Requires: The DBTs must have DB_DBT_REALLOC
+ */
+{
+ {
+ int r = bl_read_dbt(key, f);
+ if (r!=0) return r;
+ }
+ {
+ int r = bl_read_dbt(val, f);
+ if (r!=0) return r;
+ }
+ return 0;
+}
+
+static int loader_read_row_from_dbufio (DBUFIO_FILESET bfs, int filenum, DBT *key, DBT *val)
+/* Effect: Read a key value pair from a file. The DBTs must have DB_DBT_REALLOC set.
+ * Arguments:
+ * f where to read it from.
+ * key, val read it into these.
+ * bl passed so we can panic if needed.
+ * Return value: 0 on success, an error number otherwise.
+ * Requires: The DBTs must have DB_DBT_REALLOC
+ */
+{
+ {
+ int r = bl_read_dbt_from_dbufio(key, bfs, filenum);
+ if (r!=0) return r;
+ }
+ {
+ int r = bl_read_dbt_from_dbufio(val, bfs, filenum);
+ if (r!=0) return r;
+ }
+ return 0;
+}
+
+
+int init_rowset (struct rowset *rows, uint64_t memory_budget)
+/* Effect: Initialize a collection of rows to be empty. */
+{
+ int result = 0;
+
+ rows->memory_budget = memory_budget;
+
+ rows->rows = NULL;
+ rows->data = NULL;
+
+ rows->n_rows = 0;
+ rows->n_rows_limit = 100;
+ MALLOC_N(rows->n_rows_limit, rows->rows);
+ if (rows->rows == NULL)
+ result = get_error_errno();
+ rows->n_bytes = 0;
+ rows->n_bytes_limit = (size_factor==1) ? 1024*size_factor*16 : memory_budget;
+ //printf("%s:%d n_bytes_limit=%ld (size_factor based limit=%d)\n", __FILE__, __LINE__, rows->n_bytes_limit, 1024*size_factor*16);
+ rows->data = (char *) toku_malloc(rows->n_bytes_limit);
+ if (rows->rows==NULL || rows->data==NULL) {
+ if (result == 0)
+ result = get_error_errno();
+ toku_free(rows->rows);
+ toku_free(rows->data);
+ rows->rows = NULL;
+ rows->data = NULL;
+ }
+ return result;
+}
+
+static void zero_rowset (struct rowset *rows) {
+ memset(rows, 0, sizeof(*rows));
+}
+
+void destroy_rowset (struct rowset *rows) {
+ if ( rows ) {
+ toku_free(rows->data);
+ toku_free(rows->rows);
+ zero_rowset(rows);
+ }
+}
+
+static int row_wont_fit (struct rowset *rows, size_t size)
+/* Effect: Return nonzero if adding a row of size SIZE would be too big (bigger than the buffer limit) */
+{
+ // Account for the memory used by the data and also the row structures.
+ size_t memory_in_use = (rows->n_rows*sizeof(struct row)
+ + rows->n_bytes);
+ return (rows->memory_budget < memory_in_use + size);
+}
+
+int add_row (struct rowset *rows, DBT *key, DBT *val)
+/* Effect: add a row to a collection. */
+{
+ int result = 0;
+ if (rows->n_rows >= rows->n_rows_limit) {
+ struct row *old_rows = rows->rows;
+ size_t old_n_rows_limit = rows->n_rows_limit;
+ rows->n_rows_limit *= 2;
+ REALLOC_N(rows->n_rows_limit, rows->rows);
+ if (rows->rows == NULL) {
+ result = get_error_errno();
+ rows->rows = old_rows;
+ rows->n_rows_limit = old_n_rows_limit;
+ return result;
+ }
+ }
+ size_t off = rows->n_bytes;
+ size_t next_off = off + key->size + val->size;
+
+ struct row newrow;
+ memset(&newrow, 0, sizeof newrow); newrow.off = off; newrow.klen = key->size; newrow.vlen = val->size;
+
+ rows->rows[rows->n_rows++] = newrow;
+ if (next_off > rows->n_bytes_limit) {
+ size_t old_n_bytes_limit = rows->n_bytes_limit;
+ while (next_off > rows->n_bytes_limit) {
+ rows->n_bytes_limit = rows->n_bytes_limit*2;
+ }
+ invariant(next_off <= rows->n_bytes_limit);
+ char *old_data = rows->data;
+ REALLOC_N(rows->n_bytes_limit, rows->data);
+ if (rows->data == NULL) {
+ result = get_error_errno();
+ rows->data = old_data;
+ rows->n_bytes_limit = old_n_bytes_limit;
+ return result;
+ }
+ }
+ memcpy(rows->data+off, key->data, key->size);
+ memcpy(rows->data+off+key->size, val->data, val->size);
+ rows->n_bytes = next_off;
+ return result;
+}
+
+static int process_primary_rows (FTLOADER bl, struct rowset *primary_rowset);
+
+static int finish_primary_rows_internal (FTLOADER bl)
+// now we have been asked to finish up.
+// Be sure to destroy the rowsets.
+{
+ int *MALLOC_N(bl->N, ra);
+ if (ra==NULL) return get_error_errno();
+
+ for (int i = 0; i < bl->N; i++) {
+ //printf("%s:%d extractor finishing index %d with %ld rows\n", __FILE__, __LINE__, i, rows->n_rows);
+ ra[i] = sort_and_write_rows(bl->rows[i], &(bl->fs[i]), bl, i, bl->dbs[i], bl->bt_compare_funs[i]);
+ zero_rowset(&bl->rows[i]);
+ }
+
+ // accept any of the error codes (in this case, the last one).
+ int r = 0;
+ for (int i = 0; i < bl->N; i++)
+ if (ra[i] != 0)
+ r = ra[i];
+
+ toku_free(ra);
+ return r;
+}
+
+static int finish_primary_rows (FTLOADER bl) {
+ return finish_primary_rows_internal (bl);
+}
+
+static void* extractor_thread (void *blv) {
+ FTLOADER bl = (FTLOADER)blv;
+ int r = 0;
+ while (1) {
+ void *item = nullptr;
+ {
+ int rq = toku_queue_deq(bl->primary_rowset_queue, &item, NULL, NULL);
+ if (rq==EOF) break;
+ invariant(rq==0); // other errors are arbitrarily bad.
+ }
+ struct rowset *primary_rowset = (struct rowset *)item;
+
+ //printf("%s:%d extractor got %ld rows\n", __FILE__, __LINE__, primary_rowset.n_rows);
+
+ // Now we have some rows to output
+ {
+ r = process_primary_rows(bl, primary_rowset);
+ if (r)
+ ft_loader_set_panic(bl, r, false, 0, nullptr, nullptr);
+ }
+ }
+
+ //printf("%s:%d extractor finishing\n", __FILE__, __LINE__);
+ if (r == 0) {
+ r = finish_primary_rows(bl);
+ if (r)
+ ft_loader_set_panic(bl, r, false, 0, nullptr, nullptr);
+ }
+ toku_instr_delete_current_thread();
+ return nullptr;
+}
+
+static void enqueue_for_extraction(FTLOADER bl) {
+ //printf("%s:%d enqueing %ld items\n", __FILE__, __LINE__, bl->primary_rowset.n_rows);
+ struct rowset *XMALLOC(enqueue_me);
+ *enqueue_me = bl->primary_rowset;
+ zero_rowset(&bl->primary_rowset);
+ int r = toku_queue_enq(bl->primary_rowset_queue, (void*)enqueue_me, 1, NULL);
+ resource_assert_zero(r);
+}
+
+static int loader_do_put(FTLOADER bl,
+ DBT *pkey,
+ DBT *pval)
+{
+ int result;
+ result = add_row(&bl->primary_rowset, pkey, pval);
+ if (result == 0 && row_wont_fit(&bl->primary_rowset, 0)) {
+ // queue the rows for further processing by the extractor thread.
+ //printf("%s:%d please extract %ld\n", __FILE__, __LINE__, bl->primary_rowset.n_rows);
+ enqueue_for_extraction(bl);
+ {
+ int r = init_rowset(&bl->primary_rowset, memory_per_rowset_during_extract(bl));
+ // bl->primary_rowset will get destroyed by toku_ft_loader_abort
+ if (r != 0)
+ result = r;
+ }
+ }
+ return result;
+}
+
+static int
+finish_extractor (FTLOADER bl) {
+ //printf("%s:%d now finishing extraction\n", __FILE__, __LINE__);
+
+ int rval;
+
+ if (bl->primary_rowset.n_rows>0) {
+ enqueue_for_extraction(bl);
+ } else {
+ destroy_rowset(&bl->primary_rowset);
+ }
+ //printf("%s:%d please finish extraction\n", __FILE__, __LINE__);
+ {
+ int r = toku_queue_eof(bl->primary_rowset_queue);
+ invariant(r==0);
+ }
+ //printf("%s:%d joining\n", __FILE__, __LINE__);
+ {
+ void *toku_pthread_retval;
+ int r = toku_pthread_join(bl->extractor_thread, &toku_pthread_retval);
+ resource_assert_zero(r);
+ invariant(toku_pthread_retval == NULL);
+ bl->extractor_live = false;
+ }
+ {
+ int r = toku_queue_destroy(bl->primary_rowset_queue);
+ invariant(r==0);
+ bl->primary_rowset_queue = nullptr;
+ }
+
+ rval = ft_loader_fi_close_all(&bl->file_infos);
+
+ //printf("%s:%d joined\n", __FILE__, __LINE__);
+ return rval;
+}
+
+static const DBT zero_dbt = {0,0,0,0};
+
+static DBT make_dbt (void *data, uint32_t size) {
+ DBT result = zero_dbt;
+ result.data = data;
+ result.size = size;
+ return result;
+}
+
+#define inc_error_count() error_count++
+
+static TXNID leafentry_xid(FTLOADER bl, int which_db) {
+ TXNID le_xid = TXNID_NONE;
+ if (bl->root_xids_that_created && bl->load_root_xid != bl->root_xids_that_created[which_db])
+ le_xid = bl->load_root_xid;
+ return le_xid;
+}
+
+size_t ft_loader_leafentry_size(size_t key_size, size_t val_size, TXNID xid) {
+ size_t s = 0;
+ if (xid == TXNID_NONE)
+ s = LE_CLEAN_MEMSIZE(val_size) + key_size + sizeof(uint32_t);
+ else
+ s = LE_MVCC_COMMITTED_MEMSIZE(val_size) + key_size + sizeof(uint32_t);
+ return s;
+}
+
+static int process_primary_rows_internal (FTLOADER bl, struct rowset *primary_rowset)
+// process the rows in primary_rowset, and then destroy the rowset.
+// if FLUSH is true then write all the buffered rows out.
+// if primary_rowset is NULL then treat it as empty.
+{
+ int error_count = 0;
+ int *XMALLOC_N(bl->N, error_codes);
+
+ // If we parallelize the first for loop, dest_keys/dest_vals init&cleanup need to move inside
+ DBT_ARRAY dest_keys;
+ DBT_ARRAY dest_vals;
+ toku_dbt_array_init(&dest_keys, 1);
+ toku_dbt_array_init(&dest_vals, 1);
+
+ for (int i = 0; i < bl->N; i++) {
+ unsigned int klimit,vlimit; // maximum row sizes.
+ toku_ft_get_maximum_advised_key_value_lengths(&klimit, &vlimit);
+
+ error_codes[i] = 0;
+ struct rowset *rows = &(bl->rows[i]);
+ struct merge_fileset *fs = &(bl->fs[i]);
+ ft_compare_func compare = bl->bt_compare_funs[i];
+
+ // Don't parallelize this loop, or we have to lock access to add_row() which would be a lot of overehad.
+ // Also this way we can reuse the DB_DBT_REALLOC'd values inside dest_keys/dest_vals without a race.
+ for (size_t prownum=0; prownum<primary_rowset->n_rows; prownum++) {
+ if (error_count) break;
+
+ struct row *prow = &primary_rowset->rows[prownum];
+ DBT pkey = zero_dbt;
+ DBT pval = zero_dbt;
+ pkey.data = primary_rowset->data + prow->off;
+ pkey.size = prow->klen;
+ pval.data = primary_rowset->data + prow->off + prow->klen;
+ pval.size = prow->vlen;
+
+
+ DBT_ARRAY key_array;
+ DBT_ARRAY val_array;
+ if (bl->dbs[i] != bl->src_db) {
+ int r = bl->generate_row_for_put(bl->dbs[i], bl->src_db, &dest_keys, &dest_vals, &pkey, &pval);
+ if (r != 0) {
+ error_codes[i] = r;
+ inc_error_count();
+ break;
+ }
+ paranoid_invariant(dest_keys.size <= dest_keys.capacity);
+ paranoid_invariant(dest_vals.size <= dest_vals.capacity);
+ paranoid_invariant(dest_keys.size == dest_vals.size);
+
+ key_array = dest_keys;
+ val_array = dest_vals;
+ } else {
+ key_array.size = key_array.capacity = 1;
+ key_array.dbts = &pkey;
+
+ val_array.size = val_array.capacity = 1;
+ val_array.dbts = &pval;
+ }
+ for (uint32_t row = 0; row < key_array.size; row++) {
+ DBT *dest_key = &key_array.dbts[row];
+ DBT *dest_val = &val_array.dbts[row];
+ if (dest_key->size > klimit) {
+ error_codes[i] = EINVAL;
+ fprintf(stderr, "Key too big (keysize=%d bytes, limit=%d bytes)\n", dest_key->size, klimit);
+ inc_error_count();
+ break;
+ }
+ if (dest_val->size > vlimit) {
+ error_codes[i] = EINVAL;
+ fprintf(stderr, "Row too big (rowsize=%d bytes, limit=%d bytes)\n", dest_val->size, vlimit);
+ inc_error_count();
+ break;
+ }
+
+ bl->extracted_datasizes[i] += ft_loader_leafentry_size(dest_key->size, dest_val->size, leafentry_xid(bl, i));
+
+ if (row_wont_fit(rows, dest_key->size + dest_val->size)) {
+ //printf("%s:%d rows.n_rows=%ld rows.n_bytes=%ld\n", __FILE__, __LINE__, rows->n_rows, rows->n_bytes);
+ int r = sort_and_write_rows(*rows, fs, bl, i, bl->dbs[i], compare); // cannot spawn this because of the race on rows. If we were to create a new rows, and if sort_and_write_rows were to destroy the rows it is passed, we could spawn it, however.
+ // If we do spawn this, then we must account for the additional storage in the memory_per_rowset() function.
+ init_rowset(rows, memory_per_rowset_during_extract(bl)); // we passed the contents of rows to sort_and_write_rows.
+ if (r != 0) {
+ error_codes[i] = r;
+ inc_error_count();
+ break;
+ }
+ }
+ int r = add_row(rows, dest_key, dest_val);
+ if (r != 0) {
+ error_codes[i] = r;
+ inc_error_count();
+ break;
+ }
+ }
+ }
+ }
+ toku_dbt_array_destroy(&dest_keys);
+ toku_dbt_array_destroy(&dest_vals);
+
+ destroy_rowset(primary_rowset);
+ toku_free(primary_rowset);
+ int r = 0;
+ if (error_count > 0) {
+ for (int i=0; i<bl->N; i++) {
+ if (error_codes[i]) {
+ r = error_codes[i];
+ ft_loader_set_panic(bl, r, false, i, nullptr, nullptr);
+ }
+ }
+ invariant(r); // found the error
+ }
+ toku_free(error_codes);
+ return r;
+}
+
+static int process_primary_rows (FTLOADER bl, struct rowset *primary_rowset) {
+ int r = process_primary_rows_internal (bl, primary_rowset);
+ return r;
+}
+
+int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val)
+/* Effect: Put a key-value pair into the ft loader. Called by DB_LOADER->put().
+ * Return value: 0 on success, an error number otherwise.
+ */
+{
+ if (!bl->allow_puts || ft_loader_get_error(&bl->error_callback))
+ return EINVAL; // previous panic
+ bl->n_rows++;
+ return loader_do_put(bl, key, val);
+}
+
+void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows) {
+ bl->n_rows = n_rows;
+}
+
+uint64_t toku_ft_loader_get_n_rows(FTLOADER bl) {
+ return bl->n_rows;
+}
+
+int merge_row_arrays_base (struct row dest[/*an+bn*/], struct row a[/*an*/], int an, struct row b[/*bn*/], int bn,
+ int which_db, DB *dest_db, ft_compare_func compare,
+
+ FTLOADER bl,
+ struct rowset *rowset)
+/* Effect: Given two arrays of rows, a and b, merge them using the comparison function, and write them into dest.
+ * This function is suitable for use in a mergesort.
+ * If a pair of duplicate keys is ever noticed, then call the error_callback function (if it exists), and return DB_KEYEXIST.
+ * Arguments:
+ * dest write the rows here
+ * a,b the rows being merged
+ * an,bn the length of a and b respectively.
+ * dest_db We need the dest_db to run the comparison function.
+ * compare We need the compare function for the dest_db.
+ */
+{
+ while (an>0 && bn>0) {
+ DBT akey; memset(&akey, 0, sizeof akey); akey.data=rowset->data+a->off; akey.size=a->klen;
+ DBT bkey; memset(&bkey, 0, sizeof bkey); bkey.data=rowset->data+b->off; bkey.size=b->klen;
+
+ int compare_result = compare(dest_db, &akey, &bkey);
+ if (compare_result==0) {
+ if (bl->error_callback.error_callback) {
+ DBT aval; memset(&aval, 0, sizeof aval); aval.data=rowset->data + a->off + a->klen; aval.size = a->vlen;
+ ft_loader_set_error(&bl->error_callback, DB_KEYEXIST, dest_db, which_db, &akey, &aval);
+ }
+ return DB_KEYEXIST;
+ } else if (compare_result<0) {
+ // a is smaller
+ *dest = *a;
+ dest++; a++; an--;
+ } else {
+ *dest = *b;
+ dest++; b++; bn--;
+ }
+ }
+ while (an>0) {
+ *dest = *a;
+ dest++; a++; an--;
+ }
+ while (bn>0) {
+ *dest = *b;
+ dest++; b++; bn--;
+ }
+ return 0;
+}
+
+static int binary_search (int *location,
+ const DBT *key,
+ struct row a[/*an*/], int an,
+ int abefore,
+ int which_db, DB *dest_db, ft_compare_func compare,
+ FTLOADER bl,
+ struct rowset *rowset)
+// Given a sorted array of rows a, and a dbt key, find the first row in a that is > key.
+// If no such row exists, then consider the result to be equal to an.
+// On success store abefore+the index into *location
+// Return 0 on success.
+// Return DB_KEYEXIST if we find a row that is equal to key.
+{
+ if (an==0) {
+ *location = abefore;
+ return 0;
+ } else {
+ int a2 = an/2;
+ DBT akey = make_dbt(rowset->data+a[a2].off, a[a2].klen);
+ int compare_result = compare(dest_db, key, &akey);
+ if (compare_result==0) {
+ if (bl->error_callback.error_callback) {
+ DBT aval = make_dbt(rowset->data + a[a2].off + a[a2].klen, a[a2].vlen);
+ ft_loader_set_error(&bl->error_callback, DB_KEYEXIST, dest_db, which_db, &akey, &aval);
+ }
+ return DB_KEYEXIST;
+ } else if (compare_result<0) {
+ // key is before a2
+ if (an==1) {
+ *location = abefore;
+ return 0;
+ } else {
+ return binary_search(location, key,
+ a, a2,
+ abefore,
+ which_db, dest_db, compare, bl, rowset);
+ }
+ } else {
+ // key is after a2
+ if (an==1) {
+ *location = abefore + 1;
+ return 0;
+ } else {
+ return binary_search(location, key,
+ a+a2, an-a2,
+ abefore+a2,
+ which_db, dest_db, compare, bl, rowset);
+ }
+ }
+ }
+}
+
+
+#define SWAP(typ,x,y) { typ tmp = x; x=y; y=tmp; }
+
+static int merge_row_arrays (struct row dest[/*an+bn*/], struct row a[/*an*/], int an, struct row b[/*bn*/], int bn,
+ int which_db, DB *dest_db, ft_compare_func compare,
+ FTLOADER bl,
+ struct rowset *rowset)
+/* Effect: Given two sorted arrays of rows, a and b, merge them using the comparison function, and write them into dest.
+ * Arguments:
+ * dest write the rows here
+ * a,b the rows being merged
+ * an,bn the length of a and b respectively.
+ * dest_db We need the dest_db to run the comparison function.
+ * compare We need the compare function for the dest_db.
+ */
+{
+ if (an + bn < 10000) {
+ return merge_row_arrays_base(dest, a, an, b, bn, which_db, dest_db, compare, bl, rowset);
+ }
+ if (an < bn) {
+ SWAP(struct row *,a, b)
+ SWAP(int ,an,bn)
+ }
+ // an >= bn
+ int a2 = an/2;
+ DBT akey = make_dbt(rowset->data+a[a2].off, a[a2].klen);
+ int b2 = 0; // initialize to zero so we can add the answer in.
+ {
+ int r = binary_search(&b2, &akey, b, bn, 0, which_db, dest_db, compare, bl, rowset);
+ if (r!=0) return r; // for example if we found a duplicate, called the error_callback, and now we return an error code.
+ }
+ int ra, rb;
+ ra = merge_row_arrays(dest, a, a2, b, b2, which_db, dest_db, compare, bl, rowset);
+ rb = merge_row_arrays(dest+a2+b2, a+a2, an-a2, b+b2, bn-b2, which_db, dest_db, compare, bl, rowset);
+ if (ra!=0) return ra;
+ else return rb;
+}
+
+int mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func compare, FTLOADER bl, struct rowset *rowset)
+/* Sort an array of rows (using mergesort).
+ * Arguments:
+ * rows sort this array of rows.
+ * n the length of the array.
+ * dest_db used by the comparison function.
+ * compare the compare function
+ */
+{
+ if (n<=1) return 0; // base case is sorted
+ int mid = n/2;
+ int r1, r2;
+ r1 = mergesort_row_array (rows, mid, which_db, dest_db, compare, bl, rowset);
+
+ // Don't spawn this one explicitly
+ r2 = mergesort_row_array (rows+mid, n-mid, which_db, dest_db, compare, bl, rowset);
+
+ if (r1!=0) return r1;
+ if (r2!=0) return r2;
+
+ struct row *MALLOC_N(n, tmp);
+ if (tmp == NULL) return get_error_errno();
+ {
+ int r = merge_row_arrays(tmp, rows, mid, rows+mid, n-mid, which_db, dest_db, compare, bl, rowset);
+ if (r!=0) {
+ toku_free(tmp);
+ return r;
+ }
+ }
+ memcpy(rows, tmp, sizeof(*tmp)*n);
+ toku_free(tmp);
+ return 0;
+}
+
+// C function for testing mergesort_row_array
+int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func compare, FTLOADER bl, struct rowset *rowset) {
+ return mergesort_row_array (rows, n, which_db, dest_db, compare, bl, rowset);
+}
+
+static int sort_rows (struct rowset *rows, int which_db, DB *dest_db, ft_compare_func compare,
+ FTLOADER bl)
+/* Effect: Sort a collection of rows.
+ * If any duplicates are found, then call the error_callback function and return non zero.
+ * Otherwise return 0.
+ * Arguments:
+ * rowset the */
+{
+ return mergesort_row_array(rows->rows, rows->n_rows, which_db, dest_db, compare, bl, rows);
+}
+
+/* filesets Maintain a collection of files. Typically these files are each individually sorted, and we will merge them.
+ * These files have two parts, one is for the data rows, and the other is a collection of offsets so we an more easily parallelize the manipulation (e.g., by allowing us to find the offset of the ith row quickly). */
+
+void init_merge_fileset (struct merge_fileset *fs)
+/* Effect: Initialize a fileset */
+{
+ fs->have_sorted_output = false;
+ fs->sorted_output = FIDX_NULL;
+ fs->prev_key = zero_dbt;
+ fs->prev_key.flags = DB_DBT_REALLOC;
+
+ fs->n_temp_files = 0;
+ fs->n_temp_files_limit = 0;
+ fs->data_fidxs = NULL;
+}
+
+void destroy_merge_fileset (struct merge_fileset *fs)
+/* Effect: Destroy a fileset. */
+{
+ if ( fs ) {
+ toku_destroy_dbt(&fs->prev_key);
+ fs->n_temp_files = 0;
+ fs->n_temp_files_limit = 0;
+ toku_free(fs->data_fidxs);
+ fs->data_fidxs = NULL;
+ }
+}
+
+
+static int extend_fileset (FTLOADER bl, struct merge_fileset *fs, FIDX*ffile)
+/* Effect: Add two files (one for data and one for idx) to the fileset.
+ * Arguments:
+ * bl the ft_loader (needed to panic if anything goes wrong, and also to get the temp_file_template.
+ * fs the fileset
+ * ffile the data file (which will be open)
+ * fidx the index file (which will be open)
+ */
+{
+ FIDX sfile;
+ int r;
+ r = ft_loader_open_temp_file(bl, &sfile); if (r!=0) return r;
+
+ if (fs->n_temp_files+1 > fs->n_temp_files_limit) {
+ fs->n_temp_files_limit = (fs->n_temp_files+1)*2;
+ XREALLOC_N(fs->n_temp_files_limit, fs->data_fidxs);
+ }
+ fs->data_fidxs[fs->n_temp_files] = sfile;
+ fs->n_temp_files++;
+
+ *ffile = sfile;
+ return 0;
+}
+
+// RFP maybe this should be buried in the ft_loader struct
+static toku_mutex_t update_progress_lock = TOKU_MUTEX_INITIALIZER;
+
+static int update_progress (int N,
+ FTLOADER bl,
+ const char *UU(message))
+{
+ // Must protect the increment and the call to the poll_function.
+ toku_mutex_lock(&update_progress_lock);
+ bl->progress+=N;
+
+ int result;
+ if (bl->progress_callback_result == 0) {
+ //printf(" %20s: %d ", message, bl->progress);
+ result = ft_loader_call_poll_function(&bl->poll_callback, (float)bl->progress/(float)PROGRESS_MAX);
+ if (result!=0) {
+ bl->progress_callback_result = result;
+ }
+ } else {
+ result = bl->progress_callback_result;
+ }
+ toku_mutex_unlock(&update_progress_lock);
+ return result;
+}
+
+
+static int write_rowset_to_file (FTLOADER bl, FIDX sfile, const struct rowset rows) {
+ int r = 0;
+ // Allocate a buffer if we're compressing intermediates.
+ char *uncompressed_buffer = nullptr;
+ if (bl->compress_intermediates) {
+ MALLOC_N(MAX_UNCOMPRESSED_BUF, uncompressed_buffer);
+ if (uncompressed_buffer == nullptr) {
+ return ENOMEM;
+ }
+ }
+ struct wbuf wb;
+ wbuf_init(&wb, uncompressed_buffer, MAX_UNCOMPRESSED_BUF);
+
+ TOKU_FILE *sstream = toku_bl_fidx2file(bl, sfile);
+ for (size_t i = 0; i < rows.n_rows; i++) {
+ DBT skey = make_dbt(rows.data + rows.rows[i].off, rows.rows[i].klen);
+ DBT sval = make_dbt(rows.data + rows.rows[i].off + rows.rows[i].klen,
+ rows.rows[i].vlen);
+
+ uint64_t soffset=0; // don't really need this.
+ r = loader_write_row(&skey, &sval, sfile, sstream, &soffset, &wb, bl);
+ if (r != 0) {
+ goto exit;
+ }
+ }
+
+ if (bl->compress_intermediates && wb.ndone > 0) {
+ r = bl_finish_compressed_write(sstream, &wb);
+ if (r != 0) {
+ goto exit;
+ }
+ }
+ r = 0;
+exit:
+ if (uncompressed_buffer) {
+ toku_free(uncompressed_buffer);
+ }
+ return r;
+}
+
+
+int sort_and_write_rows (struct rowset rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare)
+/* Effect: Given a rowset, sort it and write it to a temporary file.
+ * Note: The loader maintains for each index the most recently written-to file, as well as the DBT for the last key written into that file.
+ * If this rowset is sorted and all greater than that dbt, then we append to the file (skipping the sort, and reducing the number of temporary files).
+ * Arguments:
+ * rows the rowset
+ * fs the fileset into which the sorted data will be added
+ * bl the ft_loader
+ * dest_db the DB, needed for the comparison function.
+ * compare The comparison function.
+ * Returns 0 on success, otherwise an error number.
+ * Destroy the rowset after finishing it.
+ * Note: There is no sense in trying to calculate progress by this function since it's done concurrently with the loader->put operation.
+ * Note first time called: invariant: fs->have_sorted_output == false
+ */
+{
+ //printf(" sort_and_write use %d progress=%d fin at %d\n", progress_allocation, bl->progress, bl->progress+progress_allocation);
+
+ // TODO: erase the files, and deal with all the cleanup on error paths
+ //printf("%s:%d sort_rows n_rows=%ld\n", __FILE__, __LINE__, rows->n_rows);
+ //bl_time_t before_sort = bl_time_now();
+
+ int result;
+ if (rows.n_rows == 0) {
+ result = 0;
+ } else {
+ result = sort_rows(&rows, which_db, dest_db, compare, bl);
+
+ //bl_time_t after_sort = bl_time_now();
+
+ if (result == 0) {
+ DBT min_rowset_key = make_dbt(rows.data+rows.rows[0].off, rows.rows[0].klen);
+ if (fs->have_sorted_output && compare(dest_db, &fs->prev_key, &min_rowset_key) < 0) {
+ // write everything to the same output if the max key in the temp file (prev_key) is < min of the sorted rowset
+ result = write_rowset_to_file(bl, fs->sorted_output, rows);
+ if (result == 0) {
+ // set the max key in the temp file to the max key in the sorted rowset
+ result = toku_dbt_set(rows.rows[rows.n_rows-1].klen, rows.data + rows.rows[rows.n_rows-1].off, &fs->prev_key, NULL);
+ }
+ } else {
+ // write the sorted rowset into a new temp file
+ if (fs->have_sorted_output) {
+ fs->have_sorted_output = false;
+ result = ft_loader_fi_close(&bl->file_infos, fs->sorted_output, true);
+ }
+ if (result == 0) {
+ FIDX sfile = FIDX_NULL;
+ result = extend_fileset(bl, fs, &sfile);
+ if (result == 0) {
+ result = write_rowset_to_file(bl, sfile, rows);
+ if (result == 0) {
+ fs->have_sorted_output = true; fs->sorted_output = sfile;
+ // set the max key in the temp file to the max key in the sorted rowset
+ result = toku_dbt_set(rows.rows[rows.n_rows-1].klen, rows.data + rows.rows[rows.n_rows-1].off, &fs->prev_key, NULL);
+ }
+ }
+ }
+ // Note: if result == 0 then invariant fs->have_sorted_output == true
+ }
+ }
+ }
+
+ destroy_rowset(&rows);
+
+ //bl_time_t after_write = bl_time_now();
+
+ return result;
+}
+
+// C function for testing sort_and_write_rows
+int ft_loader_sort_and_write_rows (struct rowset *rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare) {
+ return sort_and_write_rows (*rows, fs, bl, which_db, dest_db, compare);
+}
+
+int toku_merge_some_files_using_dbufio(const bool to_q,
+ FIDX dest_data,
+ QUEUE q,
+ int n_sources,
+ DBUFIO_FILESET bfs,
+ FIDX srcs_fidxs[/*n_sources*/],
+ FTLOADER bl,
+ int which_db,
+ DB *dest_db,
+ ft_compare_func compare,
+ int progress_allocation)
+/* Effect: Given an array of FILE*'s each containing sorted, merge the data and
+ * write it to an output. All the files remain open after the merge.
+ * This merge is performed in one pass, so don't pass too many files in. If
+ * you need a tree of merges do it elsewhere.
+ * If TO_Q is true then we write rowsets into queue Q. Otherwise we write
+ * into dest_data.
+ * Modifies: May modify the arrays of files (but if modified, it must be a
+ * permutation so the caller can use that array to close everything.)
+ * Requires: The number of sources is at least one, and each of the input files
+ * must have at least one row in it.
+ * Arguments:
+ * to_q boolean indicating that output is queue (true) or a file
+ * (false)
+ * dest_data where to write the sorted data
+ * q where to write the sorted data
+ * n_sources how many source files.
+ * srcs_data the array of source data files.
+ * bl the ft_loader.
+ * dest_db the destination DB (used in the comparison function).
+ * Return value: 0 on success, otherwise an error number.
+ * The fidxs are not closed by this function.
+ */
+{
+ int result = 0;
+
+ TOKU_FILE *dest_stream = to_q ? nullptr : toku_bl_fidx2file(bl, dest_data);
+
+ // printf(" merge_some_files progress=%d fin at %d\n", bl->progress,
+ // bl->progress+progress_allocation);
+ DBT keys[n_sources];
+ DBT vals[n_sources];
+ uint64_t dataoff[n_sources];
+ DBT zero = zero_dbt; zero.flags=DB_DBT_REALLOC;
+
+ for (int i=0; i<n_sources; i++) {
+ keys[i] = vals[i] = zero; // fill these all in with zero so we can delete stuff more reliably.
+ }
+
+ pqueue_t *pq = NULL;
+ pqueue_node_t *MALLOC_N(n_sources, pq_nodes); // freed in cleanup
+ if (pq_nodes == NULL) { result = get_error_errno(); }
+
+ if (result==0) {
+ int r = pqueue_init(&pq, n_sources, which_db, dest_db, compare, &bl->error_callback);
+ if (r!=0) result = r;
+ }
+
+ uint64_t n_rows = 0;
+ if (result==0) {
+ // load pqueue with first value from each source
+ for (int i=0; i<n_sources; i++) {
+ int r = loader_read_row_from_dbufio(bfs, i, &keys[i], &vals[i]);
+ if (r==EOF) continue; // if the file is empty, don't initialize the pqueue.
+ if (r!=0) {
+ result = r;
+ break;
+ }
+
+ pq_nodes[i].key = &keys[i];
+ pq_nodes[i].val = &vals[i];
+ pq_nodes[i].i = i;
+ r = pqueue_insert(pq, &pq_nodes[i]);
+ if (r!=0) {
+ result = r;
+ // path tested by loader-dup-test5.tdbrun
+ // printf("%s:%d returning\n", __FILE__, __LINE__);
+ break;
+ }
+
+ dataoff[i] = 0;
+ toku_mutex_lock(&bl->file_infos.lock);
+ n_rows += bl->file_infos.file_infos[srcs_fidxs[i].idx].n_rows;
+ toku_mutex_unlock(&bl->file_infos.lock);
+ }
+ }
+ uint64_t n_rows_done = 0;
+
+ struct rowset *output_rowset = NULL;
+ if (result==0 && to_q) {
+ XMALLOC(output_rowset); // freed in cleanup
+ int r = init_rowset(output_rowset, memory_per_rowset_during_merge(bl, n_sources, to_q));
+ if (r!=0) result = r;
+ }
+
+ // Allocate a buffer if we're compressing intermediates.
+ char *uncompressed_buffer = nullptr;
+ struct wbuf wb;
+ if (bl->compress_intermediates && !to_q) {
+ MALLOC_N(MAX_UNCOMPRESSED_BUF, uncompressed_buffer);
+ if (uncompressed_buffer == nullptr) {
+ result = ENOMEM;
+ }
+ }
+ wbuf_init(&wb, uncompressed_buffer, MAX_UNCOMPRESSED_BUF);
+
+ //printf(" n_rows=%ld\n", n_rows);
+ while (result==0 && pqueue_size(pq)>0) {
+ int mini;
+ {
+ // get the minimum
+ pqueue_node_t *node;
+ int r = pqueue_pop(pq, &node);
+ if (r!=0) {
+ result = r;
+ invariant(0);
+ break;
+ }
+ mini = node->i;
+ }
+ if (to_q) {
+ if (row_wont_fit(output_rowset, keys[mini].size + vals[mini].size)) {
+ {
+ int r = toku_queue_enq(q, (void*)output_rowset, 1, NULL);
+ if (r!=0) {
+ result = r;
+ break;
+ }
+ }
+ XMALLOC(output_rowset); // freed in cleanup
+ {
+ int r = init_rowset(output_rowset, memory_per_rowset_during_merge(bl, n_sources, to_q));
+ if (r!=0) {
+ result = r;
+ break;
+ }
+ }
+ }
+ {
+ int r = add_row(output_rowset, &keys[mini], &vals[mini]);
+ if (r!=0) {
+ result = r;
+ break;
+ }
+ }
+ } else {
+ // write it to the dest file
+ int r = loader_write_row(&keys[mini], &vals[mini], dest_data, dest_stream, &dataoff[mini], &wb, bl);
+ if (r!=0) {
+ result = r;
+ break;
+ }
+ }
+
+ {
+ // read next row from file that just sourced min value
+ int r = loader_read_row_from_dbufio(bfs, mini, &keys[mini], &vals[mini]);
+ if (r!=0) {
+ if (r==EOF) {
+ // on feof, queue size permanently smaller
+ toku_free(keys[mini].data); keys[mini].data = NULL;
+ toku_free(vals[mini].data); vals[mini].data = NULL;
+ } else {
+ fprintf(stderr, "%s:%d r=%d errno=%d bfs=%p mini=%d\n", __FILE__, __LINE__, r, get_maybe_error_errno(), bfs, mini);
+ dbufio_print(bfs);
+ result = r;
+ break;
+ }
+ } else {
+ // insert value into queue (re-populate queue)
+ pq_nodes[mini].key = &keys[mini];
+ r = pqueue_insert(pq, &pq_nodes[mini]);
+ if (r!=0) {
+ // Note: This error path tested by loader-dup-test1.tdbrun (and by loader-dup-test4)
+ result = r;
+ // printf("%s:%d returning\n", __FILE__, __LINE__);
+ break;
+ }
+ }
+ }
+
+ n_rows_done++;
+ const uint64_t rows_per_report = size_factor*1024;
+ if (n_rows_done%rows_per_report==0) {
+ // need to update the progress.
+ double fraction_of_remaining_we_just_did = (double)rows_per_report / (double)(n_rows - n_rows_done + rows_per_report);
+ invariant(0<= fraction_of_remaining_we_just_did && fraction_of_remaining_we_just_did<=1);
+ int progress_just_done = fraction_of_remaining_we_just_did * progress_allocation;
+ progress_allocation -= progress_just_done;
+ // ignore the result from update_progress here, we'll call update_progress again below, which will give us the nonzero result.
+ int r = update_progress(progress_just_done, bl, "in file merge");
+ if (0) printf("%s:%d Progress=%d\n", __FILE__, __LINE__, r);
+ }
+ }
+ if (result == 0 && uncompressed_buffer != nullptr && wb.ndone > 0) {
+ result = bl_finish_compressed_write(dest_stream, &wb);
+ }
+
+ if (result==0 && to_q) {
+ int r = toku_queue_enq(q, (void*)output_rowset, 1, NULL);
+ if (r!=0)
+ result = r;
+ else
+ output_rowset = NULL;
+ }
+
+ // cleanup
+ if (uncompressed_buffer) {
+ toku_free(uncompressed_buffer);
+ }
+ for (int i=0; i<n_sources; i++) {
+ toku_free(keys[i].data); keys[i].data = NULL;
+ toku_free(vals[i].data); vals[i].data = NULL;
+ }
+ if (output_rowset) {
+ destroy_rowset(output_rowset);
+ toku_free(output_rowset);
+ }
+ if (pq) { pqueue_free(pq); pq=NULL; }
+ toku_free(pq_nodes);
+ {
+ int r = update_progress(progress_allocation, bl, "end of merge_some_files");
+ //printf("%s:%d Progress=%d\n", __FILE__, __LINE__, r);
+ if (r!=0 && result==0) result = r;
+ }
+ return result;
+}
+
+static int merge_some_files (const bool to_q, FIDX dest_data, QUEUE q, int n_sources, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation)
+{
+ int result = 0;
+ DBUFIO_FILESET bfs = NULL;
+ int *MALLOC_N(n_sources, fds);
+ if (fds == NULL)
+ result = get_error_errno();
+ if (result == 0) {
+ for (int i = 0; i < n_sources; i++) {
+ int r = fileno(
+ toku_bl_fidx2file(bl, srcs_fidxs[i])->file); // we rely on the
+ // fact that when
+ // the files are
+ // closed, the fd
+ // is also closed.
+ if (r == -1) {
+ result = get_error_errno();
+ break;
+ }
+ fds[i] = r;
+ }
+ }
+ if (result==0) {
+ int r = create_dbufio_fileset(&bfs, n_sources, fds,
+ memory_per_rowset_during_merge(bl, n_sources, to_q), bl->compress_intermediates);
+ if (r!=0) { result = r; }
+ }
+
+ if (result==0) {
+ int r = toku_merge_some_files_using_dbufio (to_q, dest_data, q, n_sources, bfs, srcs_fidxs, bl, which_db, dest_db, compare, progress_allocation);
+ if (r!=0) { result = r; }
+ }
+
+ if (bfs!=NULL) {
+ if (result != 0)
+ (void) panic_dbufio_fileset(bfs, result);
+ int r = destroy_dbufio_fileset(bfs);
+ if (r!=0 && result==0) result=r;
+ bfs = NULL;
+ }
+ if (fds!=NULL) {
+ toku_free(fds);
+ fds = NULL;
+ }
+ return result;
+}
+
+static int int_min (int a, int b)
+{
+ if (a<b) return a;
+ else return b;
+}
+
+static int n_passes (int N, int B) {
+ int result = 0;
+ while (N>1) {
+ N = (N+B-1)/B;
+ result++;
+ }
+ return result;
+}
+
+int merge_files (struct merge_fileset *fs,
+ FTLOADER bl,
+ // These are needed for the comparison function and error callback.
+ int which_db, DB *dest_db, ft_compare_func compare,
+ int progress_allocation,
+ // Write rowsets into this queue.
+ QUEUE output_q
+ )
+/* Effect: Given a fileset, merge all the files writing all the answers into a queue.
+ * All the files in fs, and any temporary files will be closed and unlinked (and the fileset will be empty)
+ * Return value: 0 on success, otherwise an error number.
+ * On error *fs will contain no open files. All the files (including any temporary files) will be closed and unlinked.
+ * (however the fs will still need to be deallocated.)
+ */
+{
+ //printf(" merge_files %d files\n", fs->n_temp_files);
+ //printf(" merge_files use %d progress=%d fin at %d\n", progress_allocation, bl->progress, bl->progress+progress_allocation);
+ const int final_mergelimit = (size_factor == 1) ? 4 : merge_fanin(bl, true); // try for a merge to the leaf level
+ const int earlier_mergelimit = (size_factor == 1) ? 4 : merge_fanin(bl, false); // try for a merge at nonleaf.
+ int n_passes_left = (fs->n_temp_files<=final_mergelimit)
+ ? 1
+ : 1+n_passes((fs->n_temp_files+final_mergelimit-1)/final_mergelimit, earlier_mergelimit);
+ // printf("%d files, %d on last pass, %d on earlier passes, %d passes\n", fs->n_temp_files, final_mergelimit, earlier_mergelimit, n_passes_left);
+ int result = 0;
+ while (fs->n_temp_files > 0) {
+ int progress_allocation_for_this_pass = progress_allocation/n_passes_left;
+ progress_allocation -= progress_allocation_for_this_pass;
+ //printf("%s:%d n_passes_left=%d progress_allocation_for_this_pass=%d\n", __FILE__, __LINE__, n_passes_left, progress_allocation_for_this_pass);
+
+ invariant(fs->n_temp_files>0);
+ struct merge_fileset next_file_set;
+ bool to_queue = (bool)(fs->n_temp_files <= final_mergelimit);
+ init_merge_fileset(&next_file_set);
+ while (fs->n_temp_files>0) {
+ // grab some files and merge them.
+ int n_to_merge = int_min(to_queue?final_mergelimit:earlier_mergelimit, fs->n_temp_files);
+
+ // We are about to do n_to_merge/n_temp_files of the remaining for this pass.
+ int progress_allocation_for_this_subpass = progress_allocation_for_this_pass * (double)n_to_merge / (double)fs->n_temp_files;
+ // printf("%s:%d progress_allocation_for_this_subpass=%d n_temp_files=%d b=%llu\n", __FILE__, __LINE__, progress_allocation_for_this_subpass, fs->n_temp_files, (long long unsigned) memory_per_rowset_during_merge(bl, n_to_merge, to_queue));
+ progress_allocation_for_this_pass -= progress_allocation_for_this_subpass;
+
+ //printf("%s:%d merging\n", __FILE__, __LINE__);
+ FIDX merged_data = FIDX_NULL;
+
+ FIDX *XMALLOC_N(n_to_merge, data_fidxs);
+ for (int i=0; i<n_to_merge; i++) {
+ data_fidxs[i] = FIDX_NULL;
+ }
+ for (int i=0; i<n_to_merge; i++) {
+ int idx = fs->n_temp_files -1 -i;
+ FIDX fidx = fs->data_fidxs[idx];
+ result = ft_loader_fi_reopen(&bl->file_infos, fidx, "r");
+ if (result) break;
+ data_fidxs[i] = fidx;
+ }
+ if (result==0 && !to_queue) {
+ result = extend_fileset(bl, &next_file_set, &merged_data);
+ }
+
+ if (result==0) {
+ result = merge_some_files(to_queue, merged_data, output_q, n_to_merge, data_fidxs, bl, which_db, dest_db, compare, progress_allocation_for_this_subpass);
+ // if result!=0, fall through
+ if (result==0) {
+ /*nothing*/;// this is gratuitous, but we need something to give code coverage tools to help us know that it's important to distinguish between result==0 and result!=0
+ }
+ }
+
+ //printf("%s:%d merged\n", __FILE__, __LINE__);
+ for (int i=0; i<n_to_merge; i++) {
+ if (!fidx_is_null(data_fidxs[i])) {
+ {
+ int r = ft_loader_fi_close(&bl->file_infos, data_fidxs[i], true);
+ if (r!=0 && result==0) result = r;
+ }
+ {
+ int r = ft_loader_fi_unlink(&bl->file_infos, data_fidxs[i]);
+ if (r!=0 && result==0) result = r;
+ }
+ data_fidxs[i] = FIDX_NULL;
+ }
+ }
+
+ fs->n_temp_files -= n_to_merge;
+ if (!to_queue && !fidx_is_null(merged_data)) {
+ int r = ft_loader_fi_close(&bl->file_infos, merged_data, true);
+ if (r!=0 && result==0) result = r;
+ }
+ toku_free(data_fidxs);
+
+ if (result!=0) break;
+ }
+
+ destroy_merge_fileset(fs);
+ *fs = next_file_set;
+
+ // Update the progress
+ n_passes_left--;
+
+ if (result==0) { invariant(progress_allocation_for_this_pass==0); }
+
+ if (result!=0) break;
+ }
+ if (result) ft_loader_set_panic(bl, result, true, which_db, nullptr, nullptr);
+
+ {
+ int r = toku_queue_eof(output_q);
+ if (r!=0 && result==0) result = r;
+ }
+ // It's conceivable that the progress_allocation could be nonzero (for example if bl->N==0)
+ {
+ int r = update_progress(progress_allocation, bl, "did merge_files");
+ if (r!=0 && result==0) result = r;
+ }
+ return result;
+}
+
+struct subtree_info {
+ int64_t block;
+};
+
+struct subtrees_info {
+ int64_t next_free_block;
+ int64_t n_subtrees; // was n_blocks
+ int64_t n_subtrees_limit;
+ struct subtree_info *subtrees;
+};
+
+static void subtrees_info_init(struct subtrees_info *p) {
+ p->next_free_block = p->n_subtrees = p->n_subtrees_limit = 0;
+ p->subtrees = NULL;
+}
+
+static void subtrees_info_destroy(struct subtrees_info *p) {
+ toku_free(p->subtrees);
+ p->subtrees = NULL;
+}
+
+static void allocate_node (struct subtrees_info *sts, int64_t b) {
+ if (sts->n_subtrees >= sts->n_subtrees_limit) {
+ sts->n_subtrees_limit *= 2;
+ XREALLOC_N(sts->n_subtrees_limit, sts->subtrees);
+ }
+ sts->subtrees[sts->n_subtrees].block = b;
+ sts->n_subtrees++;
+}
+
+// dbuf will always contained 512-byte aligned buffer, but the length might not be a multiple of 512 bytes. If that's what you want, then pad it.
+struct dbuf {
+ unsigned char *buf;
+ int buflen;
+ int off;
+ int error;
+};
+
+struct leaf_buf {
+ BLOCKNUM blocknum;
+ TXNID xid;
+ uint64_t nkeys, ndata, dsize;
+ FTNODE node;
+ XIDS xids;
+ uint64_t off;
+};
+
+struct translation {
+ int64_t off, size;
+};
+
+struct dbout {
+ int fd;
+ toku_off_t current_off;
+
+ int64_t n_translations;
+ int64_t n_translations_limit;
+ struct translation *translation;
+ toku_mutex_t mutex;
+ FT ft;
+};
+
+static inline void dbout_init(struct dbout *out, FT ft) {
+ out->fd = -1;
+ out->current_off = 0;
+ out->n_translations = out->n_translations_limit = 0;
+ out->translation = NULL;
+ toku_mutex_init(*loader_out_mutex_key, &out->mutex, nullptr);
+ out->ft = ft;
+}
+
+static inline void dbout_destroy(struct dbout *out) {
+ if (out->fd >= 0) {
+ toku_os_close(out->fd);
+ out->fd = -1;
+ }
+ toku_free(out->translation);
+ out->translation = NULL;
+ toku_mutex_destroy(&out->mutex);
+}
+
+static inline void dbout_lock(struct dbout *out) {
+ toku_mutex_lock(&out->mutex);
+}
+
+static inline void dbout_unlock(struct dbout *out) {
+ toku_mutex_unlock(&out->mutex);
+}
+
+static void seek_align_locked(struct dbout *out) {
+ toku_off_t old_current_off = out->current_off;
+ int alignment = 4096;
+ out->current_off += alignment-1;
+ out->current_off &= ~(alignment-1);
+ toku_off_t r = lseek(out->fd, out->current_off, SEEK_SET);
+ invariant(r==out->current_off);
+ invariant(out->current_off >= old_current_off);
+ invariant(out->current_off < old_current_off+alignment);
+ invariant(out->current_off % alignment == 0);
+}
+
+static void seek_align(struct dbout *out) {
+ dbout_lock(out);
+ seek_align_locked(out);
+ dbout_unlock(out);
+}
+
+static void dbuf_init (struct dbuf *dbuf) {
+ dbuf->buf = 0;
+ dbuf->buflen = 0;
+ dbuf->off = 0;
+ dbuf->error = 0;
+}
+
+static void dbuf_destroy (struct dbuf *dbuf) {
+ toku_free(dbuf->buf); dbuf->buf = NULL;
+}
+
+static int allocate_block (struct dbout *out, int64_t *ret_block_number)
+// Return the new block number
+{
+ int result = 0;
+ dbout_lock(out);
+ int64_t block_number = out->n_translations;
+ if (block_number >= out->n_translations_limit) {
+ int64_t old_n_translations_limit = out->n_translations_limit;
+ struct translation *old_translation = out->translation;
+ if (out->n_translations_limit==0) {
+ out->n_translations_limit = 1;
+ } else {
+ out->n_translations_limit *= 2;
+ }
+ REALLOC_N(out->n_translations_limit, out->translation);
+ if (out->translation == NULL) {
+ result = get_error_errno();
+ invariant(result);
+ out->n_translations_limit = old_n_translations_limit;
+ out->translation = old_translation;
+ goto cleanup;
+ }
+ }
+ out->n_translations++;
+ *ret_block_number = block_number;
+cleanup:
+ dbout_unlock(out);
+ return result;
+}
+
+static void putbuf_bytes (struct dbuf *dbuf, const void *bytes, int nbytes) {
+ if (!dbuf->error && dbuf->off + nbytes > dbuf->buflen) {
+ unsigned char *oldbuf = dbuf->buf;
+ int oldbuflen = dbuf->buflen;
+ dbuf->buflen += dbuf->off + nbytes;
+ dbuf->buflen *= 2;
+ REALLOC_N_ALIGNED(512, dbuf->buflen, dbuf->buf);
+ if (dbuf->buf == NULL) {
+ dbuf->error = get_error_errno();
+ dbuf->buf = oldbuf;
+ dbuf->buflen = oldbuflen;
+ }
+ }
+ if (!dbuf->error) {
+ memcpy(dbuf->buf + dbuf->off, bytes, nbytes);
+ dbuf->off += nbytes;
+ }
+}
+
+static void putbuf_int32 (struct dbuf *dbuf, int v) {
+ putbuf_bytes(dbuf, &v, 4);
+}
+
+static void putbuf_int64 (struct dbuf *dbuf, long long v) {
+ putbuf_int32(dbuf, v>>32);
+ putbuf_int32(dbuf, v&0xFFFFFFFF);
+}
+
+static struct leaf_buf *start_leaf (struct dbout *out, const DESCRIPTOR UU(desc), int64_t lblocknum, TXNID xid, uint32_t UU(target_nodesize)) {
+ invariant(lblocknum < out->n_translations_limit);
+
+ struct leaf_buf *XMALLOC(lbuf);
+ lbuf->blocknum.b = lblocknum;
+ lbuf->xid = xid;
+ lbuf->nkeys = lbuf->ndata = lbuf->dsize = 0;
+ lbuf->off = 0;
+
+ lbuf->xids = toku_xids_get_root_xids();
+ if (xid != TXNID_NONE) {
+ XIDS new_xids = NULL;
+ int r = toku_xids_create_child(lbuf->xids, &new_xids, xid);
+ assert(r == 0 && new_xids);
+ toku_xids_destroy(&lbuf->xids);
+ lbuf->xids = new_xids;
+ }
+
+ FTNODE XMALLOC(node);
+ toku_initialize_empty_ftnode(node, lbuf->blocknum, 0 /*height*/, 1 /*basement nodes*/, FT_LAYOUT_VERSION, 0);
+ BP_STATE(node, 0) = PT_AVAIL;
+ lbuf->node = node;
+
+ return lbuf;
+}
+
+static void finish_leafnode(
+ struct dbout* out,
+ struct leaf_buf* lbuf,
+ int progress_allocation,
+ FTLOADER bl,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method);
+
+static int write_nonleaves(
+ FTLOADER bl,
+ FIDX pivots_fidx,
+ struct dbout* out,
+ struct subtrees_info* sts,
+ const DESCRIPTOR descriptor,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method);
+
+static void add_pair_to_leafnode(
+ struct leaf_buf* lbuf,
+ unsigned char* key,
+ int keylen,
+ unsigned char* val,
+ int vallen,
+ int this_leafentry_size,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+static int write_translation_table(
+ struct dbout* out,
+ long long* off_of_translation_p);
+
+static int write_header(
+ struct dbout* out,
+ long long translation_location_on_disk,
+ long long translation_size_on_disk);
+
+static void drain_writer_q(QUEUE q) {
+ void *item;
+ while (1) {
+ int r = toku_queue_deq(q, &item, NULL, NULL);
+ if (r == EOF)
+ break;
+ invariant(r == 0);
+ struct rowset *rowset = (struct rowset *) item;
+ destroy_rowset(rowset);
+ toku_free(rowset);
+ }
+}
+
+static void cleanup_maxkey(DBT *maxkey) {
+ if (maxkey->flags == DB_DBT_REALLOC) {
+ toku_free(maxkey->data);
+ maxkey->data = NULL;
+ maxkey->flags = 0;
+ }
+}
+
+static void update_maxkey(DBT *maxkey, DBT *key) {
+ cleanup_maxkey(maxkey);
+ *maxkey = *key;
+}
+
+static int copy_maxkey(DBT *maxkey) {
+ DBT newkey;
+ toku_init_dbt_flags(&newkey, DB_DBT_REALLOC);
+ int r = toku_dbt_set(maxkey->size, maxkey->data, &newkey, NULL);
+ if (r == 0)
+ update_maxkey(maxkey, &newkey);
+ return r;
+}
+
+static int toku_loader_write_ft_from_q (FTLOADER bl,
+ const DESCRIPTOR descriptor,
+ int fd, // write to here
+ int progress_allocation,
+ QUEUE q,
+ uint64_t total_disksize_estimate,
+ int which_db,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method,
+ uint32_t target_fanout)
+// Effect: Consume a sequence of rowsets work from a queue, creating a fractal tree. Closes fd.
+{
+ // set the number of fractal tree writer threads so that we can partition memory in the merger
+ ft_loader_set_fractal_workers_count(bl);
+
+ int result = 0;
+ int r;
+
+ // The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>)
+ // The pivots_fname is the name of the pivots file.
+ // Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree.
+ int64_t n_pivots=0; // number of pivots in pivots_file
+ FIDX pivots_file; // the file
+
+ r = ft_loader_open_temp_file (bl, &pivots_file);
+ if (r) {
+ result = r;
+ drain_writer_q(q);
+ r = toku_os_close(fd);
+ assert_zero(r);
+ return result;
+ }
+ TOKU_FILE *pivots_stream = toku_bl_fidx2file(bl, pivots_file);
+
+ TXNID root_xid_that_created = TXNID_NONE;
+ if (bl->root_xids_that_created)
+ root_xid_that_created = bl->root_xids_that_created[which_db];
+
+ // TODO: (Zardosht/Yoni/Leif), do this code properly
+ struct ft ft;
+ toku_ft_init(&ft, (BLOCKNUM){0}, bl->load_lsn, root_xid_that_created, target_nodesize, target_basementnodesize, target_compression_method, target_fanout);
+
+ struct dbout out;
+ ZERO_STRUCT(out);
+ dbout_init(&out, &ft);
+ out.fd = fd;
+ out.current_off = 8192; // leave 8K reserved at beginning
+ out.n_translations = 3; // 3 translations reserved at the beginning
+ out.n_translations_limit = 4;
+ MALLOC_N(out.n_translations_limit, out.translation);
+ if (out.translation == NULL) {
+ result = get_error_errno();
+ dbout_destroy(&out);
+ drain_writer_q(q);
+ toku_free(ft.h);
+ return result;
+ }
+
+ // The blocks_array will contain all the block numbers that correspond to the pivots. Generally there should be one more block than pivot.
+ struct subtrees_info sts;
+ subtrees_info_init(&sts);
+ sts.next_free_block = 3;
+ sts.n_subtrees = 0;
+ sts.n_subtrees_limit = 1;
+ MALLOC_N(sts.n_subtrees_limit, sts.subtrees);
+ if (sts.subtrees == NULL) {
+ result = get_error_errno();
+ subtrees_info_destroy(&sts);
+ dbout_destroy(&out);
+ drain_writer_q(q);
+ toku_free(ft.h);
+ return result;
+ }
+
+ out.translation[0].off = -2LL; out.translation[0].size = 0; // block 0 is NULL
+ invariant(1==RESERVED_BLOCKNUM_TRANSLATION);
+ invariant(2==RESERVED_BLOCKNUM_DESCRIPTOR);
+ out.translation[1].off = -1; // block 1 is the block translation, filled in later
+ out.translation[2].off = -1; // block 2 is the descriptor
+ seek_align(&out);
+ int64_t lblock = 0; // make gcc --happy
+ result = allocate_block(&out, &lblock);
+ invariant(result == 0); // can not fail since translations reserved above
+
+ TXNID le_xid = leafentry_xid(bl, which_db);
+ struct leaf_buf *lbuf = start_leaf(&out, descriptor, lblock, le_xid, target_nodesize);
+ uint64_t n_rows_remaining = bl->n_rows;
+ uint64_t old_n_rows_remaining = bl->n_rows;
+
+ uint64_t used_estimate = 0; // how much diskspace have we used up?
+
+ DBT maxkey = make_dbt(0, 0); // keep track of the max key of the current node
+
+ STAT64INFO_S deltas = ZEROSTATS;
+ // This is just a placeholder and not used in the loader, the real/accurate
+ // stats will come out of 'deltas' because this loader is not pushing
+ // messages down into the top of a fractal tree where the logical row count
+ // is done, it is directly creating leaf entries so it must also take on
+ // performing the logical row counting on its own
+ int64_t logical_rows_delta = 0;
+ while (result == 0) {
+ void *item;
+ {
+ int rr = toku_queue_deq(q, &item, NULL, NULL);
+ if (rr == EOF) break;
+ if (rr != 0) {
+ ft_loader_set_panic(bl, rr, true, which_db, nullptr, nullptr);
+ break;
+ }
+ }
+ struct rowset *output_rowset = (struct rowset *)item;
+
+ for (unsigned int i = 0; i < output_rowset->n_rows; i++) {
+ DBT key = make_dbt(output_rowset->data+output_rowset->rows[i].off, output_rowset->rows[i].klen);
+ DBT val = make_dbt(output_rowset->data+output_rowset->rows[i].off + output_rowset->rows[i].klen, output_rowset->rows[i].vlen);
+
+ size_t this_leafentry_size = ft_loader_leafentry_size(key.size, val.size, le_xid);
+
+ used_estimate += this_leafentry_size;
+
+ // Spawn off a node if
+ // a) there is at least one row in it, and
+ // b) this item would make the nodesize too big, or
+ // c) the remaining amount won't fit in the current node and the current node's data is more than the remaining amount
+ uint64_t remaining_amount = total_disksize_estimate - used_estimate;
+ uint64_t used_here = lbuf->off + 1000; // leave 1000 for various overheads.
+ uint64_t target_size = (target_nodesize*7L)/8; // use only 7/8 of the node.
+ uint64_t used_here_with_next_key = used_here + this_leafentry_size;
+ if (lbuf->nkeys > 0 &&
+ ((used_here_with_next_key >= target_size) || (used_here + remaining_amount >= target_size && lbuf->off > remaining_amount))) {
+
+ int progress_this_node = progress_allocation * (double)(old_n_rows_remaining - n_rows_remaining)/(double)old_n_rows_remaining;
+ progress_allocation -= progress_this_node;
+ old_n_rows_remaining = n_rows_remaining;
+
+ allocate_node(&sts, lblock);
+
+ n_pivots++;
+
+ invariant(maxkey.data != NULL);
+ if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) {
+ ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
+ if (result == 0) result = r;
+ break;
+ }
+
+ finish_leafnode(&out, lbuf, progress_this_node, bl, target_basementnodesize, target_compression_method);
+ lbuf = NULL;
+
+ r = allocate_block(&out, &lblock);
+ if (r != 0) {
+ ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
+ if (result == 0) result = r;
+ break;
+ }
+ lbuf = start_leaf(&out, descriptor, lblock, le_xid, target_nodesize);
+ }
+
+ add_pair_to_leafnode(
+ lbuf,
+ (unsigned char*)key.data,
+ key.size,
+ (unsigned char*)val.data,
+ val.size,
+ this_leafentry_size,
+ &deltas,
+ &logical_rows_delta);
+ n_rows_remaining--;
+
+ update_maxkey(&maxkey, &key); // set the new maxkey to the current key
+ }
+
+ r = copy_maxkey(&maxkey); // make a copy of maxkey before the rowset is destroyed
+ if (result == 0)
+ result = r;
+ destroy_rowset(output_rowset);
+ toku_free(output_rowset);
+
+ if (result == 0)
+ result = ft_loader_get_error(&bl->error_callback); // check if an error was posted and terminate this quickly
+ }
+
+ if (deltas.numrows || deltas.numbytes) {
+ toku_ft_update_stats(&ft.in_memory_stats, deltas);
+ }
+
+ // As noted above, the loader directly creates a tree structure without
+ // going through the higher level ft API and tus bypasses the logical row
+ // counting performed at that level. So, we must manually update the logical
+ // row count with the info we have from the physical delta that comes out of
+ // add_pair_to_leafnode.
+ toku_ft_adjust_logical_row_count(&ft, deltas.numrows);
+
+ cleanup_maxkey(&maxkey);
+
+ if (lbuf) {
+ allocate_node(&sts, lblock);
+ {
+ int p = progress_allocation/2;
+ finish_leafnode(&out, lbuf, p, bl, target_basementnodesize, target_compression_method);
+ progress_allocation -= p;
+ }
+ }
+
+
+ if (result == 0) {
+ result = ft_loader_get_error(&bl->error_callback); // if there were any prior errors then exit
+ }
+
+ if (result != 0) goto error;
+
+ // We haven't paniced, so the sum should add up.
+ invariant(used_estimate == total_disksize_estimate);
+
+ n_pivots++;
+
+ {
+ DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file.
+ r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl);
+ if (r) {
+ result = r; goto error;
+ }
+ }
+
+ r = write_nonleaves(bl, pivots_file, &out, &sts, descriptor, target_nodesize, target_basementnodesize, target_compression_method);
+ if (r) {
+ result = r; goto error;
+ }
+
+ {
+ invariant(sts.n_subtrees==1);
+ out.ft->h->root_blocknum = make_blocknum(sts.subtrees[0].block);
+ toku_free(sts.subtrees); sts.subtrees = NULL;
+
+ // write the descriptor
+ {
+ seek_align(&out);
+ invariant(out.n_translations >= RESERVED_BLOCKNUM_DESCRIPTOR);
+ invariant(out.translation[RESERVED_BLOCKNUM_DESCRIPTOR].off == -1);
+ out.translation[RESERVED_BLOCKNUM_DESCRIPTOR].off = out.current_off;
+ size_t desc_size = 4+toku_serialize_descriptor_size(descriptor);
+ invariant(desc_size>0);
+ out.translation[RESERVED_BLOCKNUM_DESCRIPTOR].size = desc_size;
+ struct wbuf wbuf;
+ char *XMALLOC_N(desc_size, buf);
+ wbuf_init(&wbuf, buf, desc_size);
+ toku_serialize_descriptor_contents_to_wbuf(&wbuf, descriptor);
+ uint32_t checksum = toku_x1764_finish(&wbuf.checksum);
+ wbuf_int(&wbuf, checksum);
+ invariant(wbuf.ndone==desc_size);
+ r = toku_os_write(out.fd, wbuf.buf, wbuf.ndone);
+ out.current_off += desc_size;
+ toku_free(buf); // wbuf_destroy
+ if (r) {
+ result = r; goto error;
+ }
+ }
+
+ long long off_of_translation;
+ r = write_translation_table(&out, &off_of_translation);
+ if (r) {
+ result = r; goto error;
+ }
+
+ r = write_header(&out, off_of_translation, (out.n_translations+1)*16+4);
+ if (r) {
+ result = r; goto error;
+ }
+
+ r = update_progress(progress_allocation, bl, "wrote tdb file");
+ if (r) {
+ result = r; goto error;
+ }
+ }
+
+ r = fsync(out.fd);
+ if (r) {
+ result = get_error_errno(); goto error;
+ }
+
+ // Do we need to pay attention to user_said_stop? Or should the guy at the other end of the queue pay attention and send in an EOF.
+
+ error:
+ {
+ int rr = toku_os_close(fd);
+ if (rr)
+ result = get_error_errno();
+ }
+ out.fd = -1;
+
+ subtrees_info_destroy(&sts);
+ dbout_destroy(&out);
+ drain_writer_q(q);
+ toku_free(ft.h);
+
+ return result;
+}
+
+int toku_loader_write_ft_from_q_in_C (FTLOADER bl,
+ const DESCRIPTOR descriptor,
+ int fd, // write to here
+ int progress_allocation,
+ QUEUE q,
+ uint64_t total_disksize_estimate,
+ int which_db,
+ uint32_t target_nodesize,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method,
+ uint32_t target_fanout)
+// This is probably only for testing.
+{
+ target_nodesize = target_nodesize == 0 ? default_loader_nodesize : target_nodesize;
+ target_basementnodesize = target_basementnodesize == 0 ? default_loader_basementnodesize : target_basementnodesize;
+ return toku_loader_write_ft_from_q (bl, descriptor, fd, progress_allocation, q, total_disksize_estimate, which_db, target_nodesize, target_basementnodesize, target_compression_method, target_fanout);
+}
+
+
+static void* fractal_thread (void *ftav) {
+ struct fractal_thread_args *fta = (struct fractal_thread_args *)ftav;
+ int r = toku_loader_write_ft_from_q(fta->bl,
+ fta->descriptor,
+ fta->fd,
+ fta->progress_allocation,
+ fta->q,
+ fta->total_disksize_estimate,
+ fta->which_db,
+ fta->target_nodesize,
+ fta->target_basementnodesize,
+ fta->target_compression_method,
+ fta->target_fanout);
+ fta->errno_result = r;
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+}
+
+static int loader_do_i(FTLOADER bl,
+ int which_db,
+ DB *dest_db,
+ ft_compare_func compare,
+ const DESCRIPTOR descriptor,
+ const char *new_fname,
+ int progress_allocation // how much progress do I need
+ // to add into bl->progress by
+ // the end..
+ )
+/* Effect: Handle the file creating for one particular DB in the bulk loader. */
+/* Requires: The data is fully extracted, so we can do merges out of files and
+ write the ft file. */
+{
+ //printf("doing i use %d progress=%d fin at %d\n", progress_allocation, bl->progress, bl->progress+progress_allocation);
+ struct merge_fileset *fs = &(bl->fs[which_db]);
+ struct rowset *rows = &(bl->rows[which_db]);
+ invariant(rows->data==NULL); // the rows should be all cleaned up already
+
+ int r = toku_queue_create(&bl->fractal_queues[which_db], FRACTAL_WRITER_QUEUE_DEPTH);
+ if (r) goto error;
+
+ {
+ mode_t mode = S_IRUSR + S_IWUSR + S_IRGRP + S_IWGRP;
+ int fd = toku_os_open(new_fname,
+ O_RDWR | O_CREAT | O_BINARY,
+ mode,
+ *tokudb_file_load_key); // #2621
+ if (fd < 0) {
+ r = get_error_errno();
+ goto error;
+ }
+
+ uint32_t target_nodesize, target_basementnodesize, target_fanout;
+ enum toku_compression_method target_compression_method;
+ r = dest_db->get_pagesize(dest_db, &target_nodesize);
+ invariant_zero(r);
+ r = dest_db->get_readpagesize(dest_db, &target_basementnodesize);
+ invariant_zero(r);
+ r = dest_db->get_compression_method(dest_db, &target_compression_method);
+ invariant_zero(r);
+ r = dest_db->get_fanout(dest_db, &target_fanout);
+ invariant_zero(r);
+
+ if (bl->allow_puts) {
+ // a better allocation would be to figure out roughly how many merge passes we'll need.
+ int allocation_for_merge = (2*progress_allocation)/3;
+ progress_allocation -= allocation_for_merge;
+
+ // This structure must stay live until the join below.
+ struct fractal_thread_args fta = {bl,
+ descriptor,
+ fd,
+ progress_allocation,
+ bl->fractal_queues[which_db],
+ bl->extracted_datasizes[which_db],
+ 0,
+ which_db,
+ target_nodesize,
+ target_basementnodesize,
+ target_compression_method,
+ target_fanout};
+
+ r = toku_pthread_create(*fractal_thread_key,
+ bl->fractal_threads + which_db,
+ nullptr,
+ fractal_thread,
+ static_cast<void *>(&fta));
+ if (r) {
+ int r2 __attribute__((__unused__)) =
+ toku_queue_destroy(bl->fractal_queues[which_db]);
+ // ignore r2, since we already have an error
+ bl->fractal_queues[which_db] = nullptr;
+ goto error;
+ }
+ invariant(bl->fractal_threads_live[which_db]==false);
+ bl->fractal_threads_live[which_db] = true;
+
+ r = merge_files(fs, bl, which_db, dest_db, compare, allocation_for_merge, bl->fractal_queues[which_db]);
+
+ {
+ void *toku_pthread_retval;
+ int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval);
+ invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug put that struct into a C block statement.
+ resource_assert_zero(r2);
+ invariant(toku_pthread_retval==NULL);
+ invariant(bl->fractal_threads_live[which_db]);
+ bl->fractal_threads_live[which_db] = false;
+ if (r == 0) r = fta.errno_result;
+ }
+ } else {
+ toku_queue_eof(bl->fractal_queues[which_db]);
+ r = toku_loader_write_ft_from_q(bl, descriptor, fd, progress_allocation,
+ bl->fractal_queues[which_db], bl->extracted_datasizes[which_db], which_db,
+ target_nodesize, target_basementnodesize, target_compression_method, target_fanout);
+ }
+ }
+
+ error: // this is the cleanup code. Even if r==0 (no error) we fall through to here.
+ if (bl->fractal_queues[which_db]) {
+ int r2 = toku_queue_destroy(bl->fractal_queues[which_db]);
+ invariant(r2==0);
+ bl->fractal_queues[which_db] = nullptr;
+ }
+
+ // if we get here we need to free up the merge_fileset and the rowset, as well as the keys
+ toku_free(rows->data); rows->data = NULL;
+ toku_free(rows->rows); rows->rows = NULL;
+ toku_free(fs->data_fidxs); fs->data_fidxs = NULL;
+ return r;
+}
+
+static int toku_ft_loader_close_internal (FTLOADER bl)
+/* Effect: Close the bulk loader.
+ * Return all the file descriptors in the array fds. */
+{
+ int result = 0;
+ if (bl->N == 0)
+ result = update_progress(PROGRESS_MAX, bl, "done");
+ else {
+ int remaining_progress = PROGRESS_MAX;
+ for (int i = 0; i < bl->N; i++) {
+ // Take the unallocated progress and divide it among the unfinished jobs.
+ // This calculation allocates all of the PROGRESS_MAX bits of progress to some job.
+ int allocate_here = remaining_progress/(bl->N - i);
+ remaining_progress -= allocate_here;
+ char *fname_in_cwd = toku_cachetable_get_fname_in_cwd(bl->cachetable, bl->new_fnames_in_env[i]);
+ result = loader_do_i(bl, i, bl->dbs[i], bl->bt_compare_funs[i], bl->descriptors[i], fname_in_cwd, allocate_here);
+ toku_free(fname_in_cwd);
+ if (result != 0)
+ goto error;
+ invariant(0 <= bl->progress && bl->progress <= PROGRESS_MAX);
+ }
+ if (result==0) invariant(remaining_progress==0);
+
+ // fsync the directory containing the new tokudb files.
+ char *fname0 = toku_cachetable_get_fname_in_cwd(bl->cachetable, bl->new_fnames_in_env[0]);
+ int r = toku_fsync_directory(fname0);
+ toku_free(fname0);
+ if (r != 0) {
+ result = r; goto error;
+ }
+ }
+ invariant(bl->file_infos.n_files_open == 0);
+ invariant(bl->file_infos.n_files_extant == 0);
+ invariant(bl->progress == PROGRESS_MAX);
+ error:
+ toku_ft_loader_internal_destroy(bl, (bool)(result!=0));
+ return result;
+}
+
+int toku_ft_loader_close (FTLOADER bl,
+ ft_loader_error_func error_function, void *error_extra,
+ ft_loader_poll_func poll_function, void *poll_extra
+ )
+{
+ int result = 0;
+
+ int r;
+
+ //printf("Closing\n");
+
+ ft_loader_set_error_function(&bl->error_callback, error_function, error_extra);
+
+ ft_loader_set_poll_function(&bl->poll_callback, poll_function, poll_extra);
+
+ if (bl->extractor_live) {
+ r = finish_extractor(bl);
+ if (r)
+ result = r;
+ invariant(!bl->extractor_live);
+ } else {
+ r = finish_primary_rows(bl);
+ if (r)
+ result = r;
+ }
+
+ // check for an error during extraction
+ if (result == 0) {
+ r = ft_loader_call_error_function(&bl->error_callback);
+ if (r)
+ result = r;
+ }
+
+ if (result == 0) {
+ r = toku_ft_loader_close_internal(bl);
+ if (r && result == 0)
+ result = r;
+ } else
+ toku_ft_loader_internal_destroy(bl, true);
+
+ return result;
+}
+
+int toku_ft_loader_finish_extractor(FTLOADER bl) {
+ int result = 0;
+ if (bl->extractor_live) {
+ int r = finish_extractor(bl);
+ if (r)
+ result = r;
+ invariant(!bl->extractor_live);
+ } else
+ result = EINVAL;
+ return result;
+}
+
+int toku_ft_loader_abort(FTLOADER bl, bool is_error)
+/* Effect : Abort the bulk loader, free ft_loader resources */
+{
+ int result = 0;
+
+ // cleanup the extractor thread
+ if (bl->extractor_live) {
+ int r = finish_extractor(bl);
+ if (r)
+ result = r;
+ invariant(!bl->extractor_live);
+ }
+
+ for (int i = 0; i < bl->N; i++)
+ invariant(!bl->fractal_threads_live[i]);
+
+ toku_ft_loader_internal_destroy(bl, is_error);
+ return result;
+}
+
+int toku_ft_loader_get_error(FTLOADER bl, int *error) {
+ *error = ft_loader_get_error(&bl->error_callback);
+ return 0;
+}
+
+static void add_pair_to_leafnode(
+ struct leaf_buf* lbuf,
+ unsigned char* key,
+ int keylen,
+ unsigned char* val,
+ int vallen,
+ int this_leafentry_size,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
+ lbuf->nkeys++;
+ lbuf->ndata++;
+ lbuf->dsize += keylen + vallen;
+ lbuf->off += this_leafentry_size;
+
+ // append this key val pair to the leafnode
+ // #3588 TODO just make a clean ule and append it to the omt
+ // #3588 TODO can do the rebalancing here and avoid a lot of work later
+ FTNODE leafnode = lbuf->node;
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+ DBT kdbt, vdbt;
+ ft_msg msg(
+ toku_fill_dbt(&kdbt, key, keylen),
+ toku_fill_dbt(&vdbt, val, vallen),
+ FT_INSERT,
+ ZERO_MSN,
+ lbuf->xids);
+ uint64_t workdone = 0;
+ // there's no mvcc garbage in a bulk-loaded FT, so there's no need to pass useful gc info
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ &workdone,
+ stats_to_update,
+ logical_rows_delta);
+}
+
+static int write_literal(struct dbout *out, void*data, size_t len) {
+ invariant(out->current_off%4096==0);
+ int result = toku_os_write(out->fd, data, len);
+ if (result == 0)
+ out->current_off+=len;
+ return result;
+}
+
+static void finish_leafnode(
+ struct dbout* out,
+ struct leaf_buf* lbuf,
+ int progress_allocation,
+ FTLOADER bl,
+ uint32_t target_basementnodesize,
+ enum toku_compression_method target_compression_method) {
+
+ int result = 0;
+
+ // serialize leaf to buffer
+ size_t serialized_leaf_size = 0;
+ size_t uncompressed_serialized_leaf_size = 0;
+ char *serialized_leaf = NULL;
+ FTNODE_DISK_DATA ndd = NULL;
+ result = toku_serialize_ftnode_to_memory(
+ lbuf->node,
+ &ndd,
+ target_basementnodesize,
+ target_compression_method,
+ true,
+ true,
+ &serialized_leaf_size,
+ &uncompressed_serialized_leaf_size,
+ &serialized_leaf);
+
+ // write it out
+ if (result == 0) {
+ dbout_lock(out);
+ long long off_of_leaf = out->current_off;
+ result = write_literal(out, serialized_leaf, serialized_leaf_size);
+ if (result == 0) {
+ out->translation[lbuf->blocknum.b].off = off_of_leaf;
+ out->translation[lbuf->blocknum.b].size = serialized_leaf_size;
+ seek_align_locked(out);
+ }
+ dbout_unlock(out);
+ }
+
+ // free the node
+ if (serialized_leaf) {
+ toku_free(ndd);
+ toku_free(serialized_leaf);
+ }
+ toku_ftnode_free(&lbuf->node);
+ toku_xids_destroy(&lbuf->xids);
+ toku_free(lbuf);
+
+ //printf("Nodewrite %d (%.1f%%):", progress_allocation, 100.0*progress_allocation/PROGRESS_MAX);
+ if (result == 0)
+ result = update_progress(progress_allocation, bl, "wrote node");
+
+ if (result)
+ ft_loader_set_panic(bl, result, true, 0, nullptr, nullptr);
+}
+
+static int write_translation_table (struct dbout *out, long long *off_of_translation_p) {
+ seek_align(out);
+ struct dbuf ttable;
+ dbuf_init(&ttable);
+ long long off_of_translation = out->current_off;
+ long long bt_size_on_disk = out->n_translations * 16 + 20;
+ putbuf_int64(&ttable, out->n_translations); // number of records
+ putbuf_int64(&ttable, -1LL); // the linked list
+ out->translation[1].off = off_of_translation;
+ out->translation[1].size = bt_size_on_disk;
+ for (int i=0; i<out->n_translations; i++) {
+ putbuf_int64(&ttable, out->translation[i].off);
+ putbuf_int64(&ttable, out->translation[i].size);
+ }
+ unsigned int checksum = toku_x1764_memory(ttable.buf, ttable.off);
+ putbuf_int32(&ttable, checksum);
+ // pad it to 512 zeros
+ long long encoded_length = ttable.off;
+ {
+ int nbytes_to_add = roundup_to_multiple(512, ttable.off) - encoded_length;
+ char zeros[nbytes_to_add];
+ for (int i=0; i<nbytes_to_add; i++) zeros[i]=0;
+ putbuf_bytes(&ttable, zeros, nbytes_to_add);
+ }
+ int result = ttable.error;
+ if (result == 0) {
+ invariant(bt_size_on_disk==encoded_length);
+ result = toku_os_pwrite(out->fd, ttable.buf, ttable.off, off_of_translation);
+ }
+ dbuf_destroy(&ttable);
+ *off_of_translation_p = off_of_translation;
+ return result;
+}
+
+static int write_header(
+ struct dbout* out,
+ long long translation_location_on_disk,
+ long long translation_size_on_disk) {
+
+ int result = 0;
+ size_t size = toku_serialize_ft_size(out->ft->h);
+ size_t alloced_size = roundup_to_multiple(512, size);
+ struct wbuf wbuf;
+ char *MALLOC_N_ALIGNED(512, alloced_size, buf);
+ if (buf == NULL) {
+ result = get_error_errno();
+ } else {
+ wbuf_init(&wbuf, buf, size);
+ out->ft->h->on_disk_stats = out->ft->in_memory_stats;
+ out->ft->h->on_disk_logical_rows = out->ft->in_memory_logical_rows;
+ toku_serialize_ft_to_wbuf(&wbuf, out->ft->h, translation_location_on_disk, translation_size_on_disk);
+ for (size_t i=size; i<alloced_size; i++) buf[i]=0; // initialize all those unused spots to zero
+ if (wbuf.ndone != size)
+ result = EINVAL;
+ else {
+ assert(wbuf.ndone <= alloced_size);
+ result = toku_os_pwrite(out->fd, wbuf.buf, alloced_size, 0);
+ }
+ toku_free(buf);
+ }
+ return result;
+}
+
+static int read_some_pivots (FIDX pivots_file, int n_to_read, FTLOADER bl,
+ /*out*/ DBT pivots[/*n_to_read*/])
+// pivots is an array to be filled in. The pivots array is uninitialized.
+{
+ for (int i = 0; i < n_to_read; i++)
+ pivots[i] = zero_dbt;
+
+ TOKU_FILE *pivots_stream = toku_bl_fidx2file(bl, pivots_file);
+
+ int result = 0;
+ for (int i = 0; i < n_to_read; i++) {
+ int r = bl_read_dbt(&pivots[i], pivots_stream);
+ if (r != 0) {
+ result = r;
+ break;
+ }
+ }
+ return result;
+}
+
+static void delete_pivots(DBT pivots[], int n) {
+ for (int i = 0; i < n; i++)
+ toku_free(pivots[i].data);
+ toku_free(pivots);
+}
+
+static int setup_nonleaf_block (int n_children,
+ struct subtrees_info *subtrees, FIDX pivots_file, int64_t first_child_offset_in_subtrees,
+ struct subtrees_info *next_subtrees, FIDX next_pivots_file,
+ struct dbout *out, FTLOADER bl,
+ /*out*/int64_t *blocknum,
+ /*out*/struct subtree_info **subtrees_info_p,
+ /*out*/DBT **pivots_p)
+// Do the serial part of setting up a non leaf block.
+// Read the pivots out of the file, and store them in a newly allocated array of DBTs (returned in *pivots_p) There are (n_blocks_to_use-1) of these.
+// Copy the final pivot into the next_pivots file instead of returning it.
+// Copy the subtree_info from the subtrees structure, and store them in a newly allocated array of subtree_infos (return in *subtrees_info_p). There are n_blocks_to_use of these.
+// Allocate a block number and return it in *blocknum.
+// Store the blocknum in the next_blocks structure, so it can be combined with the pivots at the next level of the tree.
+// Update n_blocks_used and n_translations.
+// This code cannot be called in parallel because of all the race conditions.
+// The actual creation of the node can be called in parallel after this work is done.
+{
+ //printf("Nonleaf has children :"); for(int i=0; i<n_children; i++) printf(" %ld", subtrees->subtrees[i].block); printf("\n");
+
+ int result = 0;
+
+ DBT *MALLOC_N(n_children, pivots);
+ if (pivots == NULL) {
+ result = get_error_errno();
+ }
+
+ if (result == 0) {
+ int r = read_some_pivots(pivots_file, n_children, bl, pivots);
+ if (r)
+ result = r;
+ }
+
+ if (result == 0) {
+ TOKU_FILE *next_pivots_stream = toku_bl_fidx2file(bl, next_pivots_file);
+ int r = bl_write_dbt(
+ &pivots[n_children - 1], next_pivots_stream, NULL, nullptr, bl);
+ if (r)
+ result = r;
+ }
+
+ if (result == 0) {
+ // The last pivot was written to the next_pivots file, so we free it now instead of returning it.
+ toku_free(pivots[n_children-1].data);
+ pivots[n_children-1] = zero_dbt;
+
+ struct subtree_info *XMALLOC_N(n_children, subtrees_array);
+ for (int i = 0; i < n_children; i++) {
+ int64_t from_blocknum = first_child_offset_in_subtrees + i;
+ subtrees_array[i] = subtrees->subtrees[from_blocknum];
+ }
+
+ int r = allocate_block(out, blocknum);
+ if (r) {
+ toku_free(subtrees_array);
+ result = r;
+ } else {
+ allocate_node(next_subtrees, *blocknum);
+
+ *pivots_p = pivots;
+ *subtrees_info_p = subtrees_array;
+ }
+ }
+
+ if (result != 0) {
+ if (pivots) {
+ delete_pivots(pivots, n_children); pivots = NULL;
+ }
+ }
+
+ return result;
+}
+
+static void write_nonleaf_node (FTLOADER bl, struct dbout *out, int64_t blocknum_of_new_node, int n_children,
+ DBT *pivots, /* must free this array, as well as the things it points t */
+ struct subtree_info *subtree_info, int height, const DESCRIPTOR UU(desc), uint32_t UU(target_nodesize), uint32_t target_basementnodesize, enum toku_compression_method target_compression_method)
+{
+ //Nodes do not currently touch descriptors
+ invariant(height > 0);
+
+ int result = 0;
+
+ FTNODE XMALLOC(node);
+ toku_initialize_empty_ftnode(node, make_blocknum(blocknum_of_new_node), height, n_children,
+ FT_LAYOUT_VERSION, 0);
+ node->pivotkeys.create_from_dbts(pivots, n_children - 1);
+ assert(node->bp);
+ for (int i=0; i<n_children; i++) {
+ BP_BLOCKNUM(node,i) = make_blocknum(subtree_info[i].block);
+ BP_STATE(node,i) = PT_AVAIL;
+ }
+
+ FTNODE_DISK_DATA ndd = NULL;
+ if (result == 0) {
+ size_t n_bytes;
+ size_t n_uncompressed_bytes;
+ char *bytes;
+ int r;
+ r = toku_serialize_ftnode_to_memory(node, &ndd, target_basementnodesize, target_compression_method, true, true, &n_bytes, &n_uncompressed_bytes, &bytes);
+ if (r) {
+ result = r;
+ } else {
+ dbout_lock(out);
+ out->translation[blocknum_of_new_node].off = out->current_off;
+ out->translation[blocknum_of_new_node].size = n_bytes;
+ //fprintf(stderr, "Wrote internal node at %ld (%ld bytes)\n", out->current_off, n_bytes);
+ //for (uint32_t i=0; i<n_bytes; i++) { unsigned char b = bytes[i]; printf("%d:%02x (%d) ('%c')\n", i, b, b, (b>=' ' && b<128) ? b : '*'); }
+ r = write_literal(out, bytes, n_bytes);
+ if (r)
+ result = r;
+ else
+ seek_align_locked(out);
+ dbout_unlock(out);
+ toku_free(bytes);
+ }
+ }
+
+ for (int i=0; i<n_children-1; i++) {
+ toku_free(pivots[i].data);
+ }
+ for (int i=0; i<n_children; i++) {
+ destroy_nonleaf_childinfo(BNC(node,i));
+ }
+ toku_free(pivots);
+ // TODO: Should be using toku_destroy_ftnode_internals, which should be renamed to toku_ftnode_destroy
+ toku_free(node->bp);
+ node->pivotkeys.destroy();
+ toku_free(node);
+ toku_free(ndd);
+ toku_free(subtree_info);
+
+ if (result != 0)
+ ft_loader_set_panic(bl, result, true, 0, nullptr, nullptr);
+}
+
+static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, struct subtrees_info *sts, const DESCRIPTOR descriptor, uint32_t target_nodesize, uint32_t target_basementnodesize, enum toku_compression_method target_compression_method) {
+ int result = 0;
+ int height = 1;
+
+ // Watch out for the case where we saved the last pivot but didn't write any more nodes out.
+ // The trick is not to look at n_pivots, but to look at blocks.n_blocks
+ while (sts->n_subtrees > 1) {
+ // If there is more than one block in blocks, then we must build another level of the tree.
+
+ // we need to create a pivots file for the pivots of the next level.
+ // and a blocks_array
+ // So for example.
+ // 1) we grab 16 pivots and 16 blocks.
+ // 2) We put the 15 pivots and 16 blocks into an non-leaf node.
+ // 3) We put the 16th pivot into the next pivots file.
+ {
+ int r =
+ fseek(toku_bl_fidx2file(bl, pivots_fidx)->file, 0, SEEK_SET);
+ if (r != 0) {
+ return get_error_errno();
+ }
+ }
+
+ FIDX next_pivots_file;
+ {
+ int r = ft_loader_open_temp_file (bl, &next_pivots_file);
+ if (r != 0) { result = r; break; }
+ }
+
+ struct subtrees_info next_sts;
+ subtrees_info_init(&next_sts);
+ next_sts.n_subtrees = 0;
+ next_sts.n_subtrees_limit = 1;
+ XMALLOC_N(next_sts.n_subtrees_limit, next_sts.subtrees);
+
+ const int n_per_block = 15;
+ int64_t n_subtrees_used = 0;
+ while (sts->n_subtrees - n_subtrees_used >= n_per_block*2) {
+ // grab the first N_PER_BLOCK and build a node.
+ DBT *pivots;
+ int64_t blocknum_of_new_node = 0;
+ struct subtree_info *subtree_info;
+ int r = setup_nonleaf_block (n_per_block,
+ sts, pivots_fidx, n_subtrees_used,
+ &next_sts, next_pivots_file,
+ out, bl,
+ &blocknum_of_new_node, &subtree_info, &pivots);
+ if (r) {
+ result = r;
+ break;
+ } else {
+ write_nonleaf_node(bl, out, blocknum_of_new_node, n_per_block, pivots, subtree_info, height, descriptor, target_nodesize, target_basementnodesize, target_compression_method); // frees all the data structures that go into making the node.
+ n_subtrees_used += n_per_block;
+ }
+ }
+
+ int64_t n_blocks_left = sts->n_subtrees - n_subtrees_used;
+ if (result == 0) {
+ // Now we have a one or two blocks at the end to handle.
+ invariant(n_blocks_left>=2);
+ if (n_blocks_left > n_per_block) {
+ // Write half the remaining blocks
+ int64_t n_first = n_blocks_left/2;
+ DBT *pivots;
+ int64_t blocknum_of_new_node;
+ struct subtree_info *subtree_info;
+ int r = setup_nonleaf_block(n_first,
+ sts, pivots_fidx, n_subtrees_used,
+ &next_sts, next_pivots_file,
+ out, bl,
+ &blocknum_of_new_node, &subtree_info, &pivots);
+ if (r) {
+ result = r;
+ } else {
+ write_nonleaf_node(bl, out, blocknum_of_new_node, n_first, pivots, subtree_info, height, descriptor, target_nodesize, target_basementnodesize, target_compression_method);
+ n_blocks_left -= n_first;
+ n_subtrees_used += n_first;
+ }
+ }
+ }
+ if (result == 0) {
+ // Write the last block.
+ DBT *pivots;
+ int64_t blocknum_of_new_node;
+ struct subtree_info *subtree_info;
+ int r = setup_nonleaf_block(n_blocks_left,
+ sts, pivots_fidx, n_subtrees_used,
+ &next_sts, next_pivots_file,
+ out, bl,
+ &blocknum_of_new_node, &subtree_info, &pivots);
+ if (r) {
+ result = r;
+ } else {
+ write_nonleaf_node(bl, out, blocknum_of_new_node, n_blocks_left, pivots, subtree_info, height, descriptor, target_nodesize, target_basementnodesize, target_compression_method);
+ n_subtrees_used += n_blocks_left;
+ }
+ }
+ if (result == 0)
+ invariant(n_subtrees_used == sts->n_subtrees);
+
+
+ if (result == 0) // pick up write_nonleaf_node errors
+ result = ft_loader_get_error(&bl->error_callback);
+
+ // Now set things up for the next iteration.
+ int r = ft_loader_fi_close(&bl->file_infos, pivots_fidx, true); if (r != 0 && result == 0) result = r;
+ r = ft_loader_fi_unlink(&bl->file_infos, pivots_fidx); if (r != 0 && result == 0) result = r;
+ pivots_fidx = next_pivots_file;
+ toku_free(sts->subtrees); sts->subtrees = NULL;
+ *sts = next_sts;
+ height++;
+
+ if (result)
+ break;
+ }
+ { int r = ft_loader_fi_close (&bl->file_infos, pivots_fidx, true); if (r != 0 && result == 0) result = r; }
+ { int r = ft_loader_fi_unlink(&bl->file_infos, pivots_fidx); if (r != 0 && result == 0) result = r; }
+ return result;
+}
+
+void ft_loader_set_fractal_workers_count_from_c(FTLOADER bl) {
+ ft_loader_set_fractal_workers_count (bl);
+}
+
+
diff --git a/storage/tokudb/PerconaFT/ft/loader/loader.h b/storage/tokudb/PerconaFT/ft/loader/loader.h
new file mode 100644
index 00000000..cea2e8df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/loader.h
@@ -0,0 +1,83 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "toku_portability.h"
+#include "ft/txn/txn.h"
+#include "ft/cachetable/cachetable.h"
+#include "ft/comparator.h"
+#include "ft/ft-ops.h"
+
+// The loader callbacks are C functions and need to be defined as such
+
+typedef void (*ft_loader_error_func)(DB *, int which_db, int err, DBT *key, DBT *val, void *extra);
+
+typedef int (*ft_loader_poll_func)(void *extra, float progress);
+
+typedef struct ft_loader_s *FTLOADER;
+
+int toku_ft_loader_open (FTLOADER *bl,
+ CACHETABLE cachetable,
+ generate_row_for_put_func g,
+ DB *src_db,
+ int N,
+ FT_HANDLE ft_hs[/*N*/], DB* dbs[/*N*/],
+ const char * new_fnames_in_env[/*N*/],
+ ft_compare_func bt_compare_functions[/*N*/],
+ const char *temp_file_template,
+ LSN load_lsn,
+ TOKUTXN txn,
+ bool reserve_memory,
+ uint64_t reserve_memory_size,
+ bool compress_intermediates,
+ bool allow_puts);
+
+int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val);
+
+int toku_ft_loader_close (FTLOADER bl,
+ ft_loader_error_func error_callback, void *error_callback_extra,
+ ft_loader_poll_func poll_callback, void *poll_callback_extra);
+
+int toku_ft_loader_abort(FTLOADER bl,
+ bool is_error);
+
+// For test purposes only
+void toku_ft_loader_set_size_factor(uint32_t factor);
+
+size_t ft_loader_leafentry_size(size_t key_size, size_t val_size, TXNID xid);
diff --git a/storage/tokudb/PerconaFT/ft/loader/pqueue.cc b/storage/tokudb/PerconaFT/ft/loader/pqueue.cc
new file mode 100644
index 00000000..950ab259
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/pqueue.cc
@@ -0,0 +1,181 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <toku_portability.h>
+#include "toku_os.h"
+#include "ft-internal.h"
+#include "loader/loader-internal.h"
+#include "loader/pqueue.h"
+
+#define pqueue_left(i) ((i) << 1)
+#define pqueue_right(i) (((i) << 1) + 1)
+#define pqueue_parent(i) ((i) >> 1)
+
+int pqueue_init(pqueue_t **result, size_t n, int which_db, DB *db, ft_compare_func compare, struct error_callback_s *err_callback)
+{
+ pqueue_t *MALLOC(q);
+ if (!q) {
+ return get_error_errno();
+ }
+
+ /* Need to allocate n+1 elements since element 0 isn't used. */
+ MALLOC_N(n + 1, q->d);
+ if (!q->d) {
+ int r = get_error_errno();
+ toku_free(q);
+ return r;
+ }
+ q->size = 1;
+ q->avail = q->step = (n+1); /* see comment above about n+1 */
+
+ q->which_db = which_db;
+ q->db = db;
+ q->compare = compare;
+ q->dup_error = 0;
+
+ q->error_callback = err_callback;
+
+ *result = q;
+ return 0;
+}
+
+void pqueue_free(pqueue_t *q)
+{
+ toku_free(q->d);
+ toku_free(q);
+}
+
+
+size_t pqueue_size(pqueue_t *q)
+{
+ /* queue element 0 exists but doesn't count since it isn't used. */
+ return (q->size - 1);
+}
+
+static int pqueue_compare(pqueue_t *q, DBT *next_key, DBT *next_val, DBT *curr_key)
+{
+ int r = q->compare(q->db, next_key, curr_key);
+ if ( r == 0 ) { // duplicate key : next_key == curr_key
+ q->dup_error = 1;
+ if (q->error_callback)
+ ft_loader_set_error_and_callback(q->error_callback, DB_KEYEXIST, q->db, q->which_db, next_key, next_val);
+ }
+ return ( r > -1 );
+}
+
+static void pqueue_bubble_up(pqueue_t *q, size_t i)
+{
+ size_t parent_node;
+ pqueue_node_t *moving_node = q->d[i];
+ DBT *moving_key = moving_node->key;
+
+ for (parent_node = pqueue_parent(i);
+ ((i > 1) && pqueue_compare(q, q->d[parent_node]->key, q->d[parent_node]->val, moving_key));
+ i = parent_node, parent_node = pqueue_parent(i))
+ {
+ q->d[i] = q->d[parent_node];
+ }
+
+ q->d[i] = moving_node;
+}
+
+
+static size_t pqueue_maxchild(pqueue_t *q, size_t i)
+{
+ size_t child_node = pqueue_left(i);
+
+ if (child_node >= q->size)
+ return 0;
+
+ if ((child_node+1) < q->size &&
+ pqueue_compare(q, q->d[child_node]->key, q->d[child_node]->val, q->d[child_node+1]->key))
+ child_node++; /* use right child instead of left */
+
+ return child_node;
+}
+
+
+static void pqueue_percolate_down(pqueue_t *q, size_t i)
+{
+ size_t child_node;
+ pqueue_node_t *moving_node = q->d[i];
+ DBT *moving_key = moving_node->key;
+ DBT *moving_val = moving_node->val;
+
+ while ((child_node = pqueue_maxchild(q, i)) &&
+ pqueue_compare(q, moving_key, moving_val, q->d[child_node]->key))
+ {
+ q->d[i] = q->d[child_node];
+ i = child_node;
+ }
+
+ q->d[i] = moving_node;
+}
+
+
+int pqueue_insert(pqueue_t *q, pqueue_node_t *d)
+{
+ size_t i;
+
+ if (!q) return 1;
+ if (q->size >= q->avail) return 1;
+
+ /* insert item */
+ i = q->size++;
+ q->d[i] = d;
+ pqueue_bubble_up(q, i);
+
+ if ( q->dup_error ) return DB_KEYEXIST;
+ return 0;
+}
+
+int pqueue_pop(pqueue_t *q, pqueue_node_t **d)
+{
+ if (!q || q->size == 1) {
+ *d = NULL;
+ return 0;
+ }
+
+ *d = q->d[1];
+ q->d[1] = q->d[--q->size];
+ pqueue_percolate_down(q, 1);
+
+ if ( q->dup_error ) return DB_KEYEXIST;
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/loader/pqueue.h b/storage/tokudb/PerconaFT/ft/loader/pqueue.h
new file mode 100644
index 00000000..0b37e24f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/loader/pqueue.h
@@ -0,0 +1,68 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+typedef struct ft_pqueue_node_t
+{
+ DBT *key;
+ DBT *val;
+ int i;
+} pqueue_node_t;
+
+typedef struct ft_pqueue_t
+{
+ size_t size;
+ size_t avail;
+ size_t step;
+
+ int which_db;
+ DB *db; // needed for compare function
+ ft_compare_func compare;
+ pqueue_node_t **d;
+ int dup_error;
+
+ struct error_callback_s *error_callback;
+
+} pqueue_t;
+
+int pqueue_init(pqueue_t **result, size_t n, int which_db, DB *db, ft_compare_func compare, struct error_callback_s *err_callback);
+void pqueue_free(pqueue_t *q);
+size_t pqueue_size(pqueue_t *q);
+int pqueue_insert(pqueue_t *q, pqueue_node_t *d);
+int pqueue_pop(pqueue_t *q, pqueue_node_t **d);
diff --git a/storage/tokudb/PerconaFT/ft/logger/log-internal.h b/storage/tokudb/PerconaFT/ft/logger/log-internal.h
new file mode 100644
index 00000000..be19e134
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/log-internal.h
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <dirent.h>
+
+#include "portability/toku_list.h"
+#include "portability/toku_pthread.h"
+#include "ft/ft-internal.h"
+#include "ft/logger/log.h"
+#include "ft/logger/logfilemgr.h"
+#include "ft/txn/txn.h"
+#include "ft/txn/txn_manager.h"
+#include "ft/txn/rollback_log_node_cache.h"
+
+#include "util/memarena.h"
+#include "util/omt.h"
+
+using namespace toku;
+// Locking for the logger
+// For most purposes we use the big ydb lock.
+// To log: grab the buf lock
+// If the buf would overflow, then grab the file lock, swap file&buf, release buf lock, write the file, write the entry, release the file lock
+// else append to buf & release lock
+
+#define LOGGER_MIN_BUF_SIZE (1<<24)
+
+// TODO: Remove mylock, it has no value
+struct mylock {
+ toku_mutex_t lock;
+};
+
+static inline void ml_init(struct mylock *l) {
+ toku_mutex_init(*log_internal_lock_mutex_key, &l->lock, nullptr);
+}
+// TODO: source location info might have be to be pulled up one caller
+// to be useful
+static inline void ml_lock(struct mylock *l) { toku_mutex_lock(&l->lock); }
+static inline void ml_unlock(struct mylock *l) {
+ toku_mutex_unlock(&l->lock);
+}
+static inline void ml_destroy(struct mylock *l) {
+ toku_mutex_destroy(&l->lock);
+}
+
+struct logbuf {
+ int n_in_buf;
+ int buf_size;
+ char *buf;
+ LSN max_lsn_in_buf;
+};
+
+struct tokulogger {
+ struct mylock input_lock;
+
+ toku_mutex_t output_condition_lock; // if you need both this lock and input_lock, acquire the output_lock first, then input_lock. More typical is to get the output_is_available condition to be false, and then acquire the input_lock.
+ toku_cond_t output_condition; //
+ bool output_is_available; // this is part of the predicate for the output condition. It's true if no thread is modifying the output (either doing an fsync or otherwise fiddling with the output).
+
+ bool is_open;
+ bool write_log_files;
+ bool trim_log_files; // for test purposes
+ char *directory; // file system directory
+ DIR *dir; // descriptor for directory
+ int fd;
+ CACHETABLE ct;
+ int lg_max; // The size of the single file in the log. Default is 100MB.
+
+ // To access these, you must have the input lock
+ LSN lsn; // the next available lsn
+ struct logbuf inbuf; // data being accumulated for the write
+
+ // To access these, you must have the output condition lock.
+ LSN written_lsn; // the last lsn written
+ LSN fsynced_lsn; // What is the LSN of the highest fsynced log entry (accessed only while holding the output lock, and updated only when the output lock and output permission are held)
+ LSN last_completed_checkpoint_lsn; // What is the LSN of the most recent completed checkpoint.
+ long long next_log_file_number;
+ struct logbuf outbuf; // data being written to the file
+ int n_in_file; // The amount of data in the current file
+
+ // To access the logfilemgr you must have the output condition lock.
+ TOKULOGFILEMGR logfilemgr;
+
+ uint32_t write_block_size; // How big should the blocks be written to various logs?
+
+ uint64_t num_writes_to_disk; // how many times did we write to disk?
+ uint64_t bytes_written_to_disk; // how many bytes have been written to disk?
+ tokutime_t time_spent_writing_to_disk; // how much tokutime did we spend writing to disk?
+ uint64_t num_wait_buf_long; // how many times we waited >= 100ms for the in buf
+
+ CACHEFILE rollback_cachefile;
+ rollback_log_node_cache rollback_cache;
+ TXN_MANAGER txn_manager;
+};
+
+int toku_logger_find_next_unused_log_file(const char *directory, long long *result);
+int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_logfiles);
+void toku_logger_free_logfiles (char **logfiles, int n_logfiles);
+
+static inline int
+txn_has_current_rollback_log(TOKUTXN txn) {
+ return txn->roll_info.current_rollback.b != ROLLBACK_NONE.b;
+}
+
+static inline int
+txn_has_spilled_rollback_logs(TOKUTXN txn) {
+ return txn->roll_info.spilled_rollback_tail.b != ROLLBACK_NONE.b;
+}
+
+struct txninfo {
+ uint64_t rollentry_raw_count; // the total count of every byte in the transaction and all its children.
+ uint32_t num_fts;
+ FT *open_fts;
+ bool force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn)
+ uint64_t num_rollback_nodes;
+ uint64_t num_rollentries;
+ BLOCKNUM spilled_rollback_head;
+ BLOCKNUM spilled_rollback_tail;
+ BLOCKNUM current_rollback;
+};
+
+static inline int toku_logsizeof_uint8_t (uint32_t v __attribute__((__unused__))) {
+ return 1;
+}
+
+static inline int toku_logsizeof_uint32_t (uint32_t v __attribute__((__unused__))) {
+ return 4;
+}
+
+static inline int toku_logsizeof_uint64_t (uint32_t v __attribute__((__unused__))) {
+ return 8;
+}
+
+static inline int toku_logsizeof_bool (uint32_t v __attribute__((__unused__))) {
+ return 1;
+}
+
+static inline int toku_logsizeof_FILENUM (FILENUM v __attribute__((__unused__))) {
+ return 4;
+}
+
+static inline int toku_logsizeof_DISKOFF (DISKOFF v __attribute__((__unused__))) {
+ return 8;
+}
+static inline int toku_logsizeof_BLOCKNUM (BLOCKNUM v __attribute__((__unused__))) {
+ return 8;
+}
+
+static inline int toku_logsizeof_LSN (LSN lsn __attribute__((__unused__))) {
+ return 8;
+}
+
+static inline int toku_logsizeof_TXNID (TXNID txnid __attribute__((__unused__))) {
+ return 8;
+}
+
+static inline int toku_logsizeof_TXNID_PAIR (TXNID_PAIR txnid __attribute__((__unused__))) {
+ return 16;
+}
+
+static inline int toku_logsizeof_XIDP (XIDP xid) {
+ assert(0<=xid->gtrid_length && xid->gtrid_length<=64);
+ assert(0<=xid->bqual_length && xid->bqual_length<=64);
+ return xid->gtrid_length
+ + xid->bqual_length
+ + 4 // formatID
+ + 1 // gtrid_length
+ + 1; // bqual_length
+}
+
+static inline int toku_logsizeof_FILENUMS (FILENUMS fs) {
+ static const FILENUM f = {0}; //fs could have .num==0 and then we cannot dereference
+ return 4 + fs.num * toku_logsizeof_FILENUM(f);
+}
+
+static inline int toku_logsizeof_BYTESTRING (BYTESTRING bs) {
+ return 4+bs.len;
+}
+
+static inline char *fixup_fname(BYTESTRING *f) {
+ assert(f->len>0);
+ char *fname = (char*)toku_xmalloc(f->len+1);
+ memcpy(fname, f->data, f->len);
+ fname[f->len]=0;
+ return fname;
+}
diff --git a/storage/tokudb/PerconaFT/ft/logger/log.h b/storage/tokudb/PerconaFT/ft/logger/log.h
new file mode 100644
index 00000000..2c30247b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/log.h
@@ -0,0 +1,69 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+#include <errno.h>
+
+#include "portability/memory.h"
+#include "portability/toku_portability.h"
+
+#include "ft/logger/recover.h"
+#include "ft/txn/rollback.h"
+#include "ft/txn/txn.h"
+#include "util/bytestring.h"
+
+struct roll_entry;
+
+static inline void toku_free_TXNID(TXNID txnid __attribute__((__unused__))) {}
+static inline void toku_free_TXNID_PAIR(TXNID_PAIR txnid __attribute__((__unused__))) {}
+
+static inline void toku_free_LSN(LSN lsn __attribute__((__unused__))) {}
+static inline void toku_free_uint64_t(uint64_t u __attribute__((__unused__))) {}
+static inline void toku_free_uint32_t(uint32_t u __attribute__((__unused__))) {}
+static inline void toku_free_uint8_t(uint8_t u __attribute__((__unused__))) {}
+static inline void toku_free_FILENUM(FILENUM u __attribute__((__unused__))) {}
+static inline void toku_free_BLOCKNUM(BLOCKNUM u __attribute__((__unused__))) {}
+static inline void toku_free_bool(bool u __attribute__((__unused__))) {}
+static inline void toku_free_XIDP(XIDP xidp) { toku_free(xidp); }
+static inline void toku_free_BYTESTRING(BYTESTRING val) { toku_free(val.data); }
+static inline void toku_free_FILENUMS(FILENUMS val) { toku_free(val.filenums); }
+
+int toku_maybe_upgrade_log (const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, bool * upgrade_in_progress);
+uint64_t toku_log_upgrade_get_footprint(void);
diff --git a/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc b/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
new file mode 100644
index 00000000..3da97063
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/log_upgrade.cc
@@ -0,0 +1,295 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <ft/log_header.h>
+
+#include "log-internal.h"
+#include "logger/logcursor.h"
+#include "cachetable/checkpoint.h"
+
+static uint64_t footprint = 0; // for debug and accountability
+
+uint64_t
+toku_log_upgrade_get_footprint(void) {
+ return footprint;
+}
+
+// Footprint concept here is that each function increments a different decimal digit.
+// The cumulative total shows the path taken for the upgrade.
+// Each function must have a single return for this to work.
+#define FOOTPRINT(x) function_footprint=(x*footprint_increment)
+#define FOOTPRINTSETUP(increment) uint64_t function_footprint = 0; uint64_t footprint_increment=increment;
+#define FOOTPRINTCAPTURE footprint+=function_footprint;
+
+
+// return 0 if clean shutdown, TOKUDB_UPGRADE_FAILURE if not clean shutdown
+static int
+verify_clean_shutdown_of_log_version_current(const char *log_dir, LSN * last_lsn, TXNID *last_xid) {
+ int rval = TOKUDB_UPGRADE_FAILURE;
+ TOKULOGCURSOR cursor = NULL;
+ int r;
+ FOOTPRINTSETUP(100);
+
+ FOOTPRINT(1);
+
+ r = toku_logcursor_create(&cursor, log_dir);
+ assert(r == 0);
+ struct log_entry *le = NULL;
+ r = toku_logcursor_last(cursor, &le);
+ if (r == 0) {
+ FOOTPRINT(2);
+ if (le->cmd==LT_shutdown) {
+ LSN lsn = le->u.shutdown.lsn;
+ if (last_lsn) {
+ *last_lsn = lsn;
+ }
+ if (last_xid) {
+ *last_xid = le->u.shutdown.last_xid;
+ }
+ rval = 0;
+ }
+ }
+ r = toku_logcursor_destroy(&cursor);
+ assert(r == 0);
+ FOOTPRINTCAPTURE;
+ return rval;
+}
+
+
+// return 0 if clean shutdown, TOKUDB_UPGRADE_FAILURE if not clean shutdown
+static int
+verify_clean_shutdown_of_log_version_old(const char *log_dir, LSN * last_lsn, TXNID *last_xid, uint32_t version) {
+ int rval = TOKUDB_UPGRADE_FAILURE;
+ int r;
+ FOOTPRINTSETUP(10);
+
+ FOOTPRINT(1);
+
+ int n_logfiles;
+ char **logfiles;
+ r = toku_logger_find_logfiles(log_dir, &logfiles, &n_logfiles);
+ if (r!=0) return r;
+
+ char *basename;
+ TOKULOGCURSOR cursor;
+ struct log_entry *entry;
+ // Only look at newest log
+ // basename points to first char after last / in file pathname
+ basename = strrchr(logfiles[n_logfiles-1], '/') + 1;
+ uint32_t version_name;
+ long long index = -1;
+ r = sscanf(basename, "log%lld.tokulog%u", &index, &version_name);
+ assert(r==2); // found index and version
+ invariant(version_name == version);
+ assert(version>=TOKU_LOG_MIN_SUPPORTED_VERSION);
+ assert(version< TOKU_LOG_VERSION); //Must be old
+ // find last LSN
+ r = toku_logcursor_create_for_file(&cursor, log_dir, basename);
+ if (r != 0) {
+ goto cleanup_no_logcursor;
+ }
+ r = toku_logcursor_last(cursor, &entry);
+ if (r != 0) {
+ goto cleanup;
+ }
+ FOOTPRINT(2);
+ //TODO: Remove this special case once FT_LAYOUT_VERSION_19 (and older) are not supported.
+ if (version <= FT_LAYOUT_VERSION_19) {
+ if (entry->cmd==LT_shutdown_up_to_19) {
+ LSN lsn = entry->u.shutdown_up_to_19.lsn;
+ if (last_lsn) {
+ *last_lsn = lsn;
+ }
+ if (last_xid) {
+ // Use lsn as last_xid.
+ *last_xid = lsn.lsn;
+ }
+ rval = 0;
+ }
+ }
+ else if (entry->cmd==LT_shutdown) {
+ LSN lsn = entry->u.shutdown.lsn;
+ if (last_lsn) {
+ *last_lsn = lsn;
+ }
+ if (last_xid) {
+ *last_xid = entry->u.shutdown.last_xid;
+ }
+ rval = 0;
+ }
+cleanup:
+ r = toku_logcursor_destroy(&cursor);
+ assert(r == 0);
+cleanup_no_logcursor:
+ toku_logger_free_logfiles(logfiles, n_logfiles);
+ FOOTPRINTCAPTURE;
+ return rval;
+}
+
+
+static int
+verify_clean_shutdown_of_log_version(const char *log_dir, uint32_t version, LSN *last_lsn, TXNID *last_xid) {
+ // return 0 if clean shutdown, TOKUDB_UPGRADE_FAILURE if not clean shutdown
+ int r = 0;
+ FOOTPRINTSETUP(1000);
+
+ if (version < TOKU_LOG_VERSION) {
+ FOOTPRINT(1);
+ r = verify_clean_shutdown_of_log_version_old(log_dir, last_lsn, last_xid, version);
+ }
+ else {
+ FOOTPRINT(2);
+ assert(version == TOKU_LOG_VERSION);
+ r = verify_clean_shutdown_of_log_version_current(log_dir, last_lsn, last_xid);
+ }
+ FOOTPRINTCAPTURE;
+ return r;
+}
+
+
+// Actually create a log file of the current version, making the environment be of the current version.
+// TODO: can't fail
+static int
+upgrade_log(const char *env_dir, const char *log_dir, LSN last_lsn, TXNID last_xid) { // the real deal
+ int r;
+ FOOTPRINTSETUP(10000);
+
+ LSN initial_lsn = last_lsn;
+ initial_lsn.lsn++;
+ CACHETABLE ct;
+ TOKULOGGER logger;
+
+ FOOTPRINT(1);
+
+ { //Create temporary environment
+ toku_cachetable_create(&ct, 1<<25, initial_lsn, NULL);
+ toku_cachetable_set_env_dir(ct, env_dir);
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+ toku_logger_set_cachetable(logger, ct);
+ r = toku_logger_open_with_last_xid(log_dir, logger, last_xid);
+ assert(r==0);
+ }
+ { //Checkpoint
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, UPGRADE_CHECKPOINT); //fsyncs log dir
+ assert(r == 0);
+ }
+ { //Close cachetable and logger
+ toku_logger_shutdown(logger);
+ toku_cachetable_close(&ct);
+ r = toku_logger_close(&logger);
+ assert(r==0);
+ }
+ {
+ r = verify_clean_shutdown_of_log_version(log_dir, TOKU_LOG_VERSION, NULL, NULL);
+ assert(r==0);
+ }
+ FOOTPRINTCAPTURE;
+ return 0;
+}
+
+// If log on disk is old (environment is old) and clean shutdown, then create log of current version,
+// which will make the environment of the current version (and delete the old logs).
+int
+toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, bool * upgrade_in_progress) {
+ int r;
+ int lockfd = -1;
+ FOOTPRINTSETUP(100000);
+
+ footprint = 0;
+ *upgrade_in_progress = false; // set true only if all criteria are met and we're actually doing an upgrade
+
+ FOOTPRINT(1);
+ r = toku_recover_lock(log_dir, &lockfd);
+ if (r != 0) {
+ goto cleanup_no_lock;
+ }
+ FOOTPRINT(2);
+ assert(log_dir);
+ assert(env_dir);
+
+ uint32_t version_of_logs_on_disk;
+ bool found_any_logs;
+ r = toku_get_version_of_logs_on_disk(log_dir, &found_any_logs, &version_of_logs_on_disk);
+ if (r != 0) {
+ goto cleanup;
+ }
+ FOOTPRINT(3);
+ if (!found_any_logs)
+ r = 0; //No logs means no logs to upgrade.
+ else if (version_of_logs_on_disk > TOKU_LOG_VERSION)
+ r = TOKUDB_DICTIONARY_TOO_NEW;
+ else if (version_of_logs_on_disk < TOKU_LOG_MIN_SUPPORTED_VERSION)
+ r = TOKUDB_DICTIONARY_TOO_OLD;
+ else if (version_of_logs_on_disk == TOKU_LOG_VERSION)
+ r = 0; //Logs are up to date
+ else {
+ FOOTPRINT(4);
+ LSN last_lsn = ZERO_LSN;
+ TXNID last_xid = TXNID_NONE;
+ r = verify_clean_shutdown_of_log_version(log_dir, version_of_logs_on_disk, &last_lsn, &last_xid);
+ if (r != 0) {
+ if (version_of_logs_on_disk >= TOKU_LOG_VERSION_25 &&
+ version_of_logs_on_disk <= TOKU_LOG_VERSION_29 &&
+ TOKU_LOG_VERSION_29 == TOKU_LOG_VERSION) {
+ r = 0; // can do recovery on dirty shutdown
+ } else {
+ fprintf(stderr, "Cannot upgrade PerconaFT version %d database.", version_of_logs_on_disk);
+ fprintf(stderr, " Previous improper shutdown detected.\n");
+ }
+ goto cleanup;
+ }
+ FOOTPRINT(5);
+ *lsn_of_clean_shutdown = last_lsn;
+ *upgrade_in_progress = true;
+ r = upgrade_log(env_dir, log_dir, last_lsn, last_xid);
+ }
+cleanup:
+ {
+ //Clean up
+ int rc;
+ rc = toku_recover_unlock(lockfd);
+ if (r==0) r = rc;
+ }
+cleanup_no_lock:
+ FOOTPRINTCAPTURE;
+ return r;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/logger/logcursor.cc b/storage/tokudb/PerconaFT/ft/logger/logcursor.cc
new file mode 100644
index 00000000..07f57220
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logcursor.cc
@@ -0,0 +1,497 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "log-internal.h"
+#include "logger/logcursor.h"
+#include <limits.h>
+#include <unistd.h>
+
+enum lc_direction { LC_FORWARD, LC_BACKWARD, LC_FIRST, LC_LAST };
+
+struct toku_logcursor {
+ char *logdir; // absolute directory name
+ char **logfiles;
+ int n_logfiles;
+ int cur_logfiles_index;
+ FILE *cur_fp;
+ size_t buffer_size;
+ void *buffer;
+ bool is_open;
+ struct log_entry entry;
+ bool entry_valid;
+ LSN cur_lsn;
+ enum lc_direction last_direction;
+};
+
+#define LC_LSN_ERROR (DB_RUNRECOVERY)
+
+void toku_logcursor_print(TOKULOGCURSOR lc) {
+ printf("lc = %p\n", lc);
+ printf(" logdir = %s\n", lc->logdir);
+ printf(" logfiles = %p\n", lc->logfiles);
+ for (int lf=0;lf<lc->n_logfiles;lf++) {
+ printf(" logfile[%d] = %p (%s)\n", lf, lc->logfiles[lf], lc->logfiles[lf]);
+ }
+ printf(" n_logfiles = %d\n", lc->n_logfiles);
+ printf(" cur_logfiles_index = %d\n", lc->cur_logfiles_index);
+ printf(" cur_fp = %p\n", lc->cur_fp);
+ printf(" cur_lsn = %" PRIu64 "\n", lc->cur_lsn.lsn);
+ printf(" last_direction = %d\n", (int) lc->last_direction);
+}
+
+static int lc_close_cur_logfile(TOKULOGCURSOR lc) {
+ int r=0;
+ if ( lc->is_open ) {
+ r = fclose(lc->cur_fp);
+ assert(0==r);
+ lc->is_open = false;
+ }
+ return 0;
+}
+
+static toku_off_t lc_file_len(const char *name) {
+ toku_struct_stat buf;
+ int r = toku_stat(name, &buf, *tokudb_file_data_key);
+ assert(r == 0);
+ return buf.st_size;
+}
+
+// Cat the file and throw away the contents. This brings the file into the file system cache
+// and makes subsequent accesses to it fast. The intention is to speed up backward scans of the
+// file.
+static void lc_catfile(const char *fname, void *buffer, size_t buffer_size) {
+ int fd = open(fname, O_RDONLY);
+ if (fd >= 0) {
+ while (1) {
+ ssize_t r = read(fd, buffer, buffer_size);
+ if ((int)r <= 0)
+ break;
+ }
+ close(fd);
+ }
+}
+
+static int lc_open_logfile(TOKULOGCURSOR lc, int index) {
+ int r=0;
+ assert( !lc->is_open );
+ if( index == -1 || index >= lc->n_logfiles) return DB_NOTFOUND;
+ lc_catfile(lc->logfiles[index], lc->buffer, lc->buffer_size);
+ lc->cur_fp = fopen(lc->logfiles[index], "rb");
+ if ( lc->cur_fp == NULL )
+ return DB_NOTFOUND;
+ r = setvbuf(lc->cur_fp, (char *) lc->buffer, _IOFBF, lc->buffer_size);
+ assert(r == 0);
+ // position fp past header, ignore 0 length file (t:2384)
+ unsigned int version=0;
+ if ( lc_file_len(lc->logfiles[index]) >= 12 ) {
+ r = toku_read_logmagic(lc->cur_fp, &version);
+ if (r!=0)
+ return DB_BADFORMAT;
+ if (version < TOKU_LOG_MIN_SUPPORTED_VERSION || version > TOKU_LOG_VERSION)
+ return DB_BADFORMAT;
+ }
+ // mark as open
+ lc->is_open = true;
+ return r;
+}
+
+static int lc_check_lsn(TOKULOGCURSOR lc, int dir) {
+ int r=0;
+ LSN lsn = toku_log_entry_get_lsn(&(lc->entry));
+ if (((dir == LC_FORWARD) && ( lsn.lsn != lc->cur_lsn.lsn + 1 )) ||
+ ((dir == LC_BACKWARD) && ( lsn.lsn != lc->cur_lsn.lsn - 1 ))) {
+// int index = lc->cur_logfiles_index;
+// fprintf(stderr, "Bad LSN: %d %s direction = %d, lsn.lsn = %" PRIu64 ", cur_lsn.lsn=%" PRIu64 "\n",
+// index, lc->logfiles[index], dir, lsn.lsn, lc->cur_lsn.lsn);
+ if (tokuft_recovery_trace)
+ printf("DB_RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, 0);
+ return LC_LSN_ERROR;
+ }
+ lc->cur_lsn.lsn = lsn.lsn;
+ return r;
+}
+
+// toku_logcursor_create()
+// - returns a pointer to a logcursor
+
+static int lc_create(TOKULOGCURSOR *lc, const char *log_dir) {
+
+ // malloc a cursor
+ TOKULOGCURSOR cursor = (TOKULOGCURSOR) toku_xmalloc(sizeof(struct toku_logcursor));
+ // find logfiles in logdir
+ cursor->is_open = false;
+ cursor->cur_logfiles_index = 0;
+ cursor->entry_valid = false;
+ cursor->buffer_size = 1<<20; // use a 1MB stream buffer (setvbuf)
+ cursor->buffer = toku_malloc(cursor->buffer_size); // it does not matter if it failes
+ // cursor->logdir must be an absolute path
+ if (toku_os_is_absolute_name(log_dir)) {
+ cursor->logdir = (char *) toku_xmalloc(strlen(log_dir)+1);
+ sprintf(cursor->logdir, "%s", log_dir);
+ } else {
+ char cwdbuf[PATH_MAX];
+ char *cwd = getcwd(cwdbuf, PATH_MAX);
+ assert(cwd);
+ cursor->logdir = (char *) toku_xmalloc(strlen(cwd)+strlen(log_dir)+2);
+ sprintf(cursor->logdir, "%s/%s", cwd, log_dir);
+ }
+ cursor->logfiles = NULL;
+ cursor->n_logfiles = 0;
+ cursor->cur_fp = NULL;
+ cursor->cur_lsn.lsn=0;
+ cursor->last_direction=LC_FIRST;
+
+ *lc = cursor;
+ return 0;
+}
+
+static int lc_fix_bad_logfile(TOKULOGCURSOR lc);
+
+int toku_logcursor_create(TOKULOGCURSOR *lc, const char *log_dir) {
+ TOKULOGCURSOR cursor;
+ int r = lc_create(&cursor, log_dir);
+ if ( r!=0 )
+ return r;
+
+ r = toku_logger_find_logfiles(cursor->logdir, &(cursor->logfiles), &(cursor->n_logfiles));
+ if (r!=0) {
+ toku_logcursor_destroy(&cursor);
+ } else {
+ *lc = cursor;
+ }
+ return r;
+}
+
+int toku_logcursor_create_for_file(TOKULOGCURSOR *lc, const char *log_dir, const char *log_file) {
+ int r = lc_create(lc, log_dir);
+ if ( r!=0 )
+ return r;
+
+ TOKULOGCURSOR cursor = *lc;
+ int fullnamelen = strlen(cursor->logdir) + strlen(log_file) + 3;
+ char *XMALLOC_N(fullnamelen, log_file_fullname);
+ sprintf(log_file_fullname, "%s/%s", cursor->logdir, log_file);
+
+ cursor->n_logfiles=1;
+
+ char **XMALLOC(logfiles);
+ cursor->logfiles = logfiles;
+ cursor->logfiles[0] = log_file_fullname;
+ *lc = cursor;
+ return 0;
+}
+
+int toku_logcursor_destroy(TOKULOGCURSOR *lc) {
+ int r=0;
+ if ( *lc ) {
+ if ( (*lc)->entry_valid ) {
+ toku_log_free_log_entry_resources(&((*lc)->entry));
+ (*lc)->entry_valid = false;
+ }
+ r = lc_close_cur_logfile(*lc);
+ toku_logger_free_logfiles((*lc)->logfiles, (*lc)->n_logfiles);
+ if ( (*lc)->logdir ) toku_free((*lc)->logdir);
+ if ( (*lc)->buffer ) toku_free((*lc)->buffer);
+ toku_free(*lc);
+ *lc = NULL;
+ }
+ return r;
+}
+
+static int lc_log_read(TOKULOGCURSOR lc)
+{
+ int r = toku_log_fread(lc->cur_fp, &(lc->entry));
+ while ( r == EOF ) {
+ // move to next file
+ r = lc_close_cur_logfile(lc);
+ if (r!=0) return r;
+ if ( lc->cur_logfiles_index == lc->n_logfiles-1) return DB_NOTFOUND;
+ lc->cur_logfiles_index++;
+ r = lc_open_logfile(lc, lc->cur_logfiles_index);
+ if (r!=0) return r;
+ r = toku_log_fread(lc->cur_fp, &(lc->entry));
+ }
+ if (r!=0) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ time_t tnow = time(NULL);
+ if (r==DB_BADFORMAT) {
+ fprintf(stderr, "%.24s PerconaFT bad log format in %s\n", ctime(&tnow), lc->logfiles[lc->cur_logfiles_index]);
+ }
+ else {
+ fprintf(stderr, "%.24s PerconaFT unexpected log format error '%s' in %s\n", ctime(&tnow), strerror(r), lc->logfiles[lc->cur_logfiles_index]);
+ }
+ }
+ return r;
+}
+
+static int lc_log_read_backward(TOKULOGCURSOR lc)
+{
+ int r = toku_log_fread_backward(lc->cur_fp, &(lc->entry));
+ while ( -1 == r) { // if within header length of top of file
+ // move to previous file
+ r = lc_close_cur_logfile(lc);
+ if (r!=0)
+ return r;
+ if ( lc->cur_logfiles_index == 0 )
+ return DB_NOTFOUND;
+ lc->cur_logfiles_index--;
+ r = lc_open_logfile(lc, lc->cur_logfiles_index);
+ if (r!=0)
+ return r;
+ // seek to end
+ r = fseek(lc->cur_fp, 0, SEEK_END);
+ assert(0==r);
+ r = toku_log_fread_backward(lc->cur_fp, &(lc->entry));
+ }
+ if (r!=0) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ time_t tnow = time(NULL);
+ if (r==DB_BADFORMAT) {
+ fprintf(stderr, "%.24s PerconaFT bad log format in %s\n", ctime(&tnow), lc->logfiles[lc->cur_logfiles_index]);
+ }
+ else {
+ fprintf(stderr, "%.24s PerconaFT uUnexpected log format error '%s' in %s\n", ctime(&tnow), strerror(r), lc->logfiles[lc->cur_logfiles_index]);
+ }
+ }
+ return r;
+}
+
+int toku_logcursor_next(TOKULOGCURSOR lc, struct log_entry **le) {
+ int r=0;
+ if ( lc->entry_valid ) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ lc->entry_valid = false;
+ if (lc->last_direction == LC_BACKWARD) {
+ struct log_entry junk;
+ r = toku_log_fread(lc->cur_fp, &junk);
+ assert(r == 0);
+ toku_log_free_log_entry_resources(&junk);
+ }
+ } else {
+ r = toku_logcursor_first(lc, le);
+ return r;
+ }
+ // read the entry
+ r = lc_log_read(lc);
+ if (r!=0) return r;
+ r = lc_check_lsn(lc, LC_FORWARD);
+ if (r!=0) return r;
+ lc->last_direction = LC_FORWARD;
+ lc->entry_valid = true;
+ *le = &(lc->entry);
+ return r;
+}
+
+int toku_logcursor_prev(TOKULOGCURSOR lc, struct log_entry **le) {
+ int r=0;
+ if ( lc->entry_valid ) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ lc->entry_valid = false;
+ if (lc->last_direction == LC_FORWARD) {
+ struct log_entry junk;
+ r = toku_log_fread_backward(lc->cur_fp, &junk);
+ assert(r == 0);
+ toku_log_free_log_entry_resources(&junk);
+ }
+ } else {
+ r = toku_logcursor_last(lc, le);
+ return r;
+ }
+ // read the entry
+ r = lc_log_read_backward(lc);
+ if (r!=0) return r;
+ r = lc_check_lsn(lc, LC_BACKWARD);
+ if (r!=0) return r;
+ lc->last_direction = LC_BACKWARD;
+ lc->entry_valid = true;
+ *le = &(lc->entry);
+ return r;
+}
+
+int toku_logcursor_first(TOKULOGCURSOR lc, struct log_entry **le) {
+ int r=0;
+ if ( lc->entry_valid ) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ lc->entry_valid = false;
+ }
+ // close any but the first log file
+ if ( lc->cur_logfiles_index != 0 ) {
+ lc_close_cur_logfile(lc);
+ }
+ // open first log file if needed
+ if ( !lc->is_open ) {
+ r = lc_open_logfile(lc, 0);
+ if (r!=0)
+ return r;
+ lc->cur_logfiles_index = 0;
+ }
+ // read the entry
+ r = lc_log_read(lc);
+ if (r!=0) return r;
+
+ r = lc_check_lsn(lc, LC_FIRST);
+ if (r!=0) return r;
+ lc->last_direction = LC_FIRST;
+ lc->entry_valid = true;
+ *le = &(lc->entry);
+ return r;
+}
+
+//get last entry in the logfile specified by logcursor
+int toku_logcursor_last(TOKULOGCURSOR lc, struct log_entry **le) {
+ int r=0;
+ if ( lc->entry_valid ) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ lc->entry_valid = false;
+ }
+ // close any but last log file
+ if ( lc->cur_logfiles_index != lc->n_logfiles-1 ) {
+ lc_close_cur_logfile(lc);
+ }
+ // open last log file if needed
+ if ( !lc->is_open ) {
+ r = lc_open_logfile(lc, lc->n_logfiles-1);
+ if (r!=0)
+ return r;
+ lc->cur_logfiles_index = lc->n_logfiles-1;
+ }
+ while (1) {
+ // seek to end
+ r = fseek(lc->cur_fp, 0, SEEK_END); assert(r==0);
+ // read backward
+ r = toku_log_fread_backward(lc->cur_fp, &(lc->entry));
+ if (r==0) // got a good entry
+ break;
+ if (r>0) {
+ toku_log_free_log_entry_resources(&(lc->entry));
+ // got an error,
+ // probably a corrupted last log entry due to a crash
+ // try scanning forward from the beginning to find the last good entry
+ time_t tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery repairing log\n", ctime(&tnow));
+ r = lc_fix_bad_logfile(lc);
+ if ( r != 0 ) {
+ fprintf(stderr, "%.24s PerconaFT recovery repair unsuccessful\n", ctime(&tnow));
+ return DB_BADFORMAT;
+ }
+ // try reading again
+ r = toku_log_fread_backward(lc->cur_fp, &(lc->entry));
+ if (r==0) // got a good entry
+ break;
+ }
+ // move to previous file
+ r = lc_close_cur_logfile(lc);
+ if (r!=0)
+ return r;
+ if ( lc->cur_logfiles_index == 0 )
+ return DB_NOTFOUND;
+ lc->cur_logfiles_index--;
+ r = lc_open_logfile(lc, lc->cur_logfiles_index);
+ if (r!=0)
+ return r;
+ }
+ r = lc_check_lsn(lc, LC_LAST);
+ if (r!=0)
+ return r;
+ lc->last_direction = LC_LAST;
+ lc->entry_valid = true;
+ *le = &(lc->entry);
+ return r;
+}
+
+// return 0 if log exists, ENOENT if no log
+int
+toku_logcursor_log_exists(const TOKULOGCURSOR lc) {
+ int r;
+
+ if (lc->n_logfiles)
+ r = 0;
+ else
+ r = ENOENT;
+
+ return r;
+}
+
+// fix a logfile with a bad last entry
+// - return with fp pointing to end-of-file so that toku_logcursor_last can be retried
+static int lc_fix_bad_logfile(TOKULOGCURSOR lc) {
+ struct log_entry le;
+ unsigned int version=0;
+ int r = 0;
+
+ r = fseek(lc->cur_fp, 0, SEEK_SET);
+ if ( r!=0 )
+ return r;
+ r = toku_read_logmagic(lc->cur_fp, &version);
+ if ( r!=0 )
+ return r;
+ if (version != TOKU_LOG_VERSION)
+ return -1;
+
+ toku_off_t last_good_pos;
+ last_good_pos = ftello(lc->cur_fp);
+ while (1) {
+ // initialize le
+ // - reading incomplete entries can result in fields that cannot be freed
+ memset(&le, 0, sizeof(le));
+ r = toku_log_fread(lc->cur_fp, &le);
+ toku_log_free_log_entry_resources(&le);
+ if ( r!=0 )
+ break;
+ last_good_pos = ftello(lc->cur_fp);
+ }
+ // now have position of last good entry
+ // 1) close the file
+ // 2) truncate the file to remove the error
+ // 3) reopen the file
+ // 4) set the pos to last
+ r = lc_close_cur_logfile(lc);
+ if ( r!=0 )
+ return r;
+ r = truncate(lc->logfiles[lc->n_logfiles - 1], last_good_pos);
+ if ( r!=0 )
+ return r;
+ r = lc_open_logfile(lc, lc->n_logfiles-1);
+ if ( r!=0 )
+ return r;
+ r = fseek(lc->cur_fp, 0, SEEK_END);
+ if ( r!=0 )
+ return r;
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/logger/logcursor.h b/storage/tokudb/PerconaFT/ft/logger/logcursor.h
new file mode 100644
index 00000000..59b7de93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logcursor.h
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ft/log_header.h>
+
+struct toku_logcursor;
+typedef struct toku_logcursor *TOKULOGCURSOR;
+
+// All routines return 0 on success
+
+// toku_logcursor_create()
+// - creates a logcursor (lc)
+// - following toku_logcursor_create()
+// if toku_logcursor_next() is called, it returns the first entry in the log
+// if toku_logcursor_prev() is called, it returns the last entry in the log
+int toku_logcursor_create(TOKULOGCURSOR *lc, const char *log_dir);
+// toku_logcursor_create_for_file()
+// - creates a logcusor (lc) that only knows about the file log_file
+int toku_logcursor_create_for_file(TOKULOGCURSOR *lc, const char *log_dir, const char *log_file);
+// toku_logcursor_destroy()
+// - frees all resources associated with the logcursor, including the log_entry
+// associated with the latest cursor action
+int toku_logcursor_destroy(TOKULOGCURSOR *lc);
+
+// toku_logcursor_[next,prev,first,last] take care of malloc'ing and free'ing log_entrys.
+// - routines NULL out the **le pointers on entry, then set the **le pointers to
+// the malloc'ed entries when successful,
+int toku_logcursor_next(TOKULOGCURSOR lc, struct log_entry **le);
+int toku_logcursor_prev(TOKULOGCURSOR lc, struct log_entry **le);
+
+int toku_logcursor_first(const TOKULOGCURSOR lc, struct log_entry **le);
+int toku_logcursor_last(const TOKULOGCURSOR lc, struct log_entry **le);
+
+// return 0 if log exists, ENOENT if no log
+int toku_logcursor_log_exists(const TOKULOGCURSOR lc);
+
+void toku_logcursor_print(TOKULOGCURSOR lc);
diff --git a/storage/tokudb/PerconaFT/ft/logger/logfilemgr.cc b/storage/tokudb/PerconaFT/ft/logger/logfilemgr.cc
new file mode 100644
index 00000000..e9028f49
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logfilemgr.cc
@@ -0,0 +1,205 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "logger/log-internal.h"
+#include "logger/logcursor.h"
+#include "logger/logfilemgr.h"
+
+// for now, implement with singlely-linked-list
+// first = oldest (delete from beginning)
+// last = newest (add to end)
+
+struct lfm_entry {
+ TOKULOGFILEINFO lf_info;
+ struct lfm_entry *next;
+};
+
+struct toku_logfilemgr {
+ struct lfm_entry *first;
+ struct lfm_entry *last;
+ int n_entries;
+};
+
+int toku_logfilemgr_create(TOKULOGFILEMGR *lfm) {
+ // malloc a logfilemgr
+ TOKULOGFILEMGR XMALLOC(mgr);
+ mgr->first = NULL;
+ mgr->last = NULL;
+ mgr->n_entries = 0;
+ *lfm = mgr;
+ return 0;
+}
+
+int toku_logfilemgr_destroy(TOKULOGFILEMGR *lfm) {
+ int r=0;
+ if ( *lfm != NULL ) { // be tolerant of being passed a NULL
+ TOKULOGFILEMGR mgr = *lfm;
+ while ( mgr->n_entries > 0 ) {
+ toku_logfilemgr_delete_oldest_logfile_info(mgr);
+ }
+ toku_free(*lfm);
+ *lfm = NULL;
+ }
+ return r;
+}
+
+int toku_logfilemgr_init(TOKULOGFILEMGR lfm, const char *log_dir, TXNID *last_xid_if_clean_shutdown) {
+ invariant_notnull(lfm);
+ invariant_notnull(last_xid_if_clean_shutdown);
+
+ int r;
+ int n_logfiles;
+ char **logfiles;
+ r = toku_logger_find_logfiles(log_dir, &logfiles, &n_logfiles);
+ if (r!=0)
+ return r;
+
+ TOKULOGCURSOR cursor;
+ struct log_entry *entry;
+ TOKULOGFILEINFO lf_info;
+ long long index = -1;
+ char *basename;
+ LSN tmp_lsn = {0};
+ TXNID last_xid = TXNID_NONE;
+ for(int i=0;i<n_logfiles;i++){
+ XMALLOC(lf_info);
+ // find the index
+ // basename is the filename of the i-th logfile
+ basename = strrchr(logfiles[i], '/') + 1;
+ int version;
+ r = sscanf(basename, "log%lld.tokulog%d", &index, &version);
+ assert(r==2); // found index and version
+ assert(version>=TOKU_LOG_MIN_SUPPORTED_VERSION);
+ assert(version<=TOKU_LOG_VERSION);
+ lf_info->index = index;
+ lf_info->version = version;
+ // find last LSN in logfile
+ r = toku_logcursor_create_for_file(&cursor, log_dir, basename);
+ if (r!=0) {
+ return r;
+ }
+ r = toku_logcursor_last(cursor, &entry); // set "entry" to last log entry in logfile
+ if (r == 0) {
+ lf_info->maxlsn = toku_log_entry_get_lsn(entry);
+
+ invariant(lf_info->maxlsn.lsn >= tmp_lsn.lsn);
+ tmp_lsn = lf_info->maxlsn;
+ if (entry->cmd == LT_shutdown) {
+ last_xid = entry->u.shutdown.last_xid;
+ } else {
+ last_xid = TXNID_NONE;
+ }
+ }
+ else {
+ lf_info->maxlsn = tmp_lsn; // handle empty logfile (no LSN in file) case
+ }
+
+ // add to logfilemgr
+ toku_logfilemgr_add_logfile_info(lfm, lf_info);
+ toku_logcursor_destroy(&cursor);
+ }
+ toku_logger_free_logfiles(logfiles, n_logfiles);
+ *last_xid_if_clean_shutdown = last_xid;
+ return 0;
+}
+
+int toku_logfilemgr_num_logfiles(TOKULOGFILEMGR lfm) {
+ assert(lfm);
+ return lfm->n_entries;
+}
+
+int toku_logfilemgr_add_logfile_info(TOKULOGFILEMGR lfm, TOKULOGFILEINFO lf_info) {
+ assert(lfm);
+ struct lfm_entry *XMALLOC(entry);
+ entry->lf_info = lf_info;
+ entry->next = NULL;
+ if ( lfm->n_entries != 0 )
+ lfm->last->next = entry;
+ lfm->last = entry;
+ lfm->n_entries++;
+ if (lfm->n_entries == 1 ) {
+ lfm->first = lfm->last;
+ }
+ return 0;
+}
+
+TOKULOGFILEINFO toku_logfilemgr_get_oldest_logfile_info(TOKULOGFILEMGR lfm) {
+ assert(lfm);
+ return lfm->first->lf_info;
+}
+
+void toku_logfilemgr_delete_oldest_logfile_info(TOKULOGFILEMGR lfm) {
+ assert(lfm);
+ if ( lfm->n_entries > 0 ) {
+ struct lfm_entry *entry = lfm->first;
+ toku_free(entry->lf_info);
+ lfm->first = entry->next;
+ toku_free(entry);
+ lfm->n_entries--;
+ if ( lfm->n_entries == 0 ) {
+ lfm->last = lfm->first = NULL;
+ }
+ }
+}
+
+LSN toku_logfilemgr_get_last_lsn(TOKULOGFILEMGR lfm) {
+ assert(lfm);
+ if ( lfm->n_entries == 0 ) {
+ LSN lsn;
+ lsn.lsn = 0;
+ return lsn;
+ }
+ return lfm->last->lf_info->maxlsn;
+}
+
+void toku_logfilemgr_update_last_lsn(TOKULOGFILEMGR lfm, LSN lsn) {
+ assert(lfm);
+ assert(lfm->last!=NULL);
+ lfm->last->lf_info->maxlsn = lsn;
+}
+
+void toku_logfilemgr_print(TOKULOGFILEMGR lfm) {
+ assert(lfm);
+ printf("toku_logfilemgr_print [%p] : %d entries \n", lfm, lfm->n_entries);
+ struct lfm_entry *entry = lfm->first;
+ for (int i=0;i<lfm->n_entries;i++) {
+ printf(" entry %d : index = %" PRId64 ", maxlsn = %" PRIu64 "\n", i, entry->lf_info->index, entry->lf_info->maxlsn.lsn);
+ entry = entry->next;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/ft/logger/logfilemgr.h b/storage/tokudb/PerconaFT/ft/logger/logfilemgr.h
new file mode 100644
index 00000000..846da513
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logfilemgr.h
@@ -0,0 +1,65 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ft/log_header.h>
+
+// this is the basic information we need to keep per logfile
+struct toku_logfile_info {
+ int64_t index;
+ LSN maxlsn;
+ uint32_t version;
+};
+typedef struct toku_logfile_info *TOKULOGFILEINFO;
+
+struct toku_logfilemgr;
+typedef struct toku_logfilemgr *TOKULOGFILEMGR;
+
+int toku_logfilemgr_create(TOKULOGFILEMGR *lfm);
+int toku_logfilemgr_destroy(TOKULOGFILEMGR *lfm);
+
+int toku_logfilemgr_init(TOKULOGFILEMGR lfm, const char *log_dir, TXNID *last_xid_if_clean_shutdown);
+int toku_logfilemgr_num_logfiles(TOKULOGFILEMGR lfm);
+int toku_logfilemgr_add_logfile_info(TOKULOGFILEMGR lfm, TOKULOGFILEINFO lf_info);
+TOKULOGFILEINFO toku_logfilemgr_get_oldest_logfile_info(TOKULOGFILEMGR lfm);
+void toku_logfilemgr_delete_oldest_logfile_info(TOKULOGFILEMGR lfm);
+LSN toku_logfilemgr_get_last_lsn(TOKULOGFILEMGR lfm);
+void toku_logfilemgr_update_last_lsn(TOKULOGFILEMGR lfm, LSN lsn);
+
+void toku_logfilemgr_print(TOKULOGFILEMGR lfm);
diff --git a/storage/tokudb/PerconaFT/ft/logger/logformat.cc b/storage/tokudb/PerconaFT/ft/logger/logformat.cc
new file mode 100644
index 00000000..49b61138
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logformat.cc
@@ -0,0 +1,835 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* This file defines the logformat in an executable fashion.
+ * This code is used to generate
+ * The code that writes into the log.
+ * The code that reads the log and prints it to stdout (the log_print utility)
+ * The code that reads the log for recovery.
+ * The struct definitions.
+ * The Latex documentation.
+ */
+#include <ctype.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <toku_portability.h>
+#include <toku_assert.h>
+
+
+typedef struct field {
+ const char *type;
+ const char *name;
+ const char *format; // optional format string
+} F;
+
+#define NULLFIELD {0,0,0}
+#define FA (F[])
+
+enum log_begin_action {
+ IGNORE_LOG_BEGIN,
+ SHOULD_LOG_BEGIN,
+ ASSERT_BEGIN_WAS_LOGGED,
+ LOG_BEGIN_ACTION_NA = IGNORE_LOG_BEGIN
+};
+
+struct logtype {
+ const char *name;
+ unsigned int command_and_flags;
+ struct field *fields;
+ enum log_begin_action log_begin_action;
+};
+
+// In the fields, don't mention the command, the LSN, the CRC or the trailing LEN.
+
+const struct logtype rollbacks[] = {
+ //TODO: #2037 Add dname
+ {"fdelete", 'U', FA{{"FILENUM", "filenum", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ //TODO: #2037 Add dname
+ {"fcreate", 'F', FA{{"FILENUM", "filenum", 0},
+ {"BYTESTRING", "iname", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ //rename file
+ {"frename", 'n', FA{{"BYTESTRING", "old_iname", 0},
+ {"BYTESTRING", "new_iname", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ // cmdinsert is used to insert a key-value pair into a DB. For rollback we don't need the data.
+ {"cmdinsert", 'i', FA{
+ {"FILENUM", "filenum", 0},
+ {"BYTESTRING", "key", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"cmddelete", 'd', FA{
+ {"FILENUM", "filenum", 0},
+ {"BYTESTRING", "key", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"rollinclude", 'r', FA{{"TXNID_PAIR", "xid", 0},
+ {"uint64_t", "num_nodes", 0},
+ {"BLOCKNUM", "spilled_head", 0},
+ {"BLOCKNUM", "spilled_tail", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"load", 'l', FA{{"FILENUM", "old_filenum", 0},
+ {"BYTESTRING", "new_iname", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ // #2954
+ {"hot_index", 'h', FA{{"FILENUMS", "hot_index_filenums", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"dictionary_redirect", 'R', FA{{"FILENUM", "old_filenum", 0},
+ {"FILENUM", "new_filenum", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"cmdupdate", 'u', FA{{"FILENUM", "filenum", 0},
+ {"BYTESTRING", "key", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"cmdupdatebroadcast", 'B', FA{{"FILENUM", "filenum", 0},
+ {"bool", "is_resetting_op", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {"change_fdescriptor", 'D', FA{{"FILENUM", "filenum", 0},
+ {"BYTESTRING", "old_descriptor", 0},
+ NULLFIELD}, LOG_BEGIN_ACTION_NA},
+ {0,0,FA{NULLFIELD}, LOG_BEGIN_ACTION_NA}
+};
+
+const struct logtype logtypes[] = {
+ // Records produced by checkpoints
+#if 0 // no longer used, but reserve the type
+ {"local_txn_checkpoint", 'c', FA{{"TXNID", "xid", 0}, NULLFIELD}},
+#endif
+ {"begin_checkpoint", 'x', FA{{"uint64_t", "timestamp", 0}, {"TXNID", "last_xid", 0}, NULLFIELD}, IGNORE_LOG_BEGIN},
+ {"end_checkpoint", 'X', FA{{"LSN", "lsn_begin_checkpoint", 0},
+ {"uint64_t", "timestamp", 0},
+ {"uint32_t", "num_fassociate_entries", 0}, // how many files were checkpointed
+ {"uint32_t", "num_xstillopen_entries", 0}, // how many txns were checkpointed
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ //TODO: #2037 Add dname
+ {"fassociate", 'f', FA{{"FILENUM", "filenum", 0},
+ {"uint32_t", "treeflags", 0},
+ {"BYTESTRING", "iname", 0}, // pathname of file
+ {"uint8_t", "unlink_on_close", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ //We do not use a txninfo struct since recovery log has
+ //FILENUMS and TOKUTXN has FTs (for open_fts)
+ {"xstillopen", 's', FA{{"TXNID_PAIR", "xid", 0},
+ {"TXNID_PAIR", "parentxid", 0},
+ {"uint64_t", "rollentry_raw_count", 0},
+ {"FILENUMS", "open_filenums", 0},
+ {"uint8_t", "force_fsync_on_commit", 0},
+ {"uint64_t", "num_rollback_nodes", 0},
+ {"uint64_t", "num_rollentries", 0},
+ {"BLOCKNUM", "spilled_rollback_head", 0},
+ {"BLOCKNUM", "spilled_rollback_tail", 0},
+ {"BLOCKNUM", "current_rollback", 0},
+ NULLFIELD}, ASSERT_BEGIN_WAS_LOGGED}, // record all transactions
+ // prepared txns need a gid
+ {"xstillopenprepared", 'p', FA{{"TXNID_PAIR", "xid", 0},
+ {"XIDP", "xa_xid", 0}, // prepared transactions need a gid, and have no parentxid.
+ {"uint64_t", "rollentry_raw_count", 0},
+ {"FILENUMS", "open_filenums", 0},
+ {"uint8_t", "force_fsync_on_commit", 0},
+ {"uint64_t", "num_rollback_nodes", 0},
+ {"uint64_t", "num_rollentries", 0},
+ {"BLOCKNUM", "spilled_rollback_head", 0},
+ {"BLOCKNUM", "spilled_rollback_tail", 0},
+ {"BLOCKNUM", "current_rollback", 0},
+ NULLFIELD}, ASSERT_BEGIN_WAS_LOGGED}, // record all transactions
+ // Records produced by transactions
+ {"xbegin", 'b', FA{{"TXNID_PAIR", "xid", 0},{"TXNID_PAIR", "parentxid", 0},NULLFIELD}, IGNORE_LOG_BEGIN},
+ {"xcommit",'C', FA{{"TXNID_PAIR", "xid", 0},NULLFIELD}, ASSERT_BEGIN_WAS_LOGGED},
+ {"xprepare",'P', FA{{"TXNID_PAIR", "xid", 0}, {"XIDP", "xa_xid", 0}, NULLFIELD}, ASSERT_BEGIN_WAS_LOGGED},
+ {"xabort", 'q', FA{{"TXNID_PAIR", "xid", 0},NULLFIELD}, ASSERT_BEGIN_WAS_LOGGED},
+ //TODO: #2037 Add dname
+ {"fcreate", 'F', FA{{"TXNID_PAIR", "xid", 0},
+ {"FILENUM", "filenum", 0},
+ {"BYTESTRING", "iname", 0},
+ {"uint32_t", "mode", "0%o"},
+ {"uint32_t", "treeflags", 0},
+ {"uint32_t", "nodesize", 0},
+ {"uint32_t", "basementnodesize", 0},
+ {"uint32_t", "compression_method", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ //TODO: #2037 Add dname
+ {"fopen", 'O', FA{{"BYTESTRING", "iname", 0},
+ {"FILENUM", "filenum", 0},
+ {"uint32_t", "treeflags", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ //TODO: #2037 Add dname
+ {"fclose", 'e', FA{{"BYTESTRING", "iname", 0},
+ {"FILENUM", "filenum", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ //TODO: #2037 Add dname
+ {"fdelete", 'U', FA{{"TXNID_PAIR", "xid", 0},
+ {"FILENUM", "filenum", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"frename", 'n', FA{{"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "old_iname", 0},
+ {"FILENUM", "old_filenum", 0},
+ {"BYTESTRING", "new_iname", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ {"enq_insert", 'I', FA{{"FILENUM", "filenum", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "key", 0},
+ {"BYTESTRING", "value", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"enq_insert_no_overwrite", 'i', FA{{"FILENUM", "filenum", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "key", 0},
+ {"BYTESTRING", "value", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"enq_delete_any", 'E', FA{{"FILENUM", "filenum", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "key", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"enq_insert_multiple", 'm', FA{{"FILENUM", "src_filenum", 0},
+ {"FILENUMS", "dest_filenums", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "src_key", 0},
+ {"BYTESTRING", "src_val", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"enq_delete_multiple", 'M', FA{{"FILENUM", "src_filenum", 0},
+ {"FILENUMS", "dest_filenums", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "src_key", 0},
+ {"BYTESTRING", "src_val", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"comment", 'T', FA{{"uint64_t", "timestamp", 0},
+ {"BYTESTRING", "comment", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ // Note: shutdown_up_to_19 log entry is NOT ALLOWED TO BE CHANGED.
+ // Do not change the letter ('Q'), do not add fields,
+ // do not remove fields.
+ // TODO: Kill this logentry entirely once we no longer support version 19.
+ {"shutdown_up_to_19", 'Q', FA{{"uint64_t", "timestamp", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ // Note: Shutdown log entry is NOT ALLOWED TO BE CHANGED.
+ // Do not change the letter ('0'), do not add fields,
+ // do not remove fields.
+ // You CAN leave this alone and add a new one, but then you have
+ // to deal with the upgrade mechanism again.
+ // This is how we detect clean shutdowns from OLDER VERSIONS.
+ // This log entry must always be readable for future versions.
+ // If you DO change it, you need to write a separate log upgrade mechanism.
+ {"shutdown", '0', FA{{"uint64_t", "timestamp", 0},
+ {"TXNID", "last_xid", 0},
+ NULLFIELD}, IGNORE_LOG_BEGIN},
+ {"load", 'l', FA{{"TXNID_PAIR", "xid", 0},
+ {"FILENUM", "old_filenum", 0},
+ {"BYTESTRING", "new_iname", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ // #2954
+ {"hot_index", 'h', FA{{"TXNID_PAIR", "xid", 0},
+ {"FILENUMS", "hot_index_filenums", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"enq_update", 'u', FA{{"FILENUM", "filenum", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "key", 0},
+ {"BYTESTRING", "extra", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"enq_updatebroadcast", 'B', FA{{"FILENUM", "filenum", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "extra", 0},
+ {"bool", "is_resetting_op", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {"change_fdescriptor", 'D', FA{{"FILENUM", "filenum", 0},
+ {"TXNID_PAIR", "xid", 0},
+ {"BYTESTRING", "old_descriptor", 0},
+ {"BYTESTRING", "new_descriptor", 0},
+ {"bool", "update_cmp_descriptor", 0},
+ NULLFIELD}, SHOULD_LOG_BEGIN},
+ {0,0,FA{NULLFIELD}, (enum log_begin_action) 0}
+};
+
+
+#define DO_STRUCTS(lt, array, body) do { \
+ const struct logtype *lt; \
+ for (lt=&array[0]; lt->name; lt++) { \
+ body; \
+ } } while (0)
+
+#define DO_ROLLBACKS(lt, body) DO_STRUCTS(lt, rollbacks, body)
+
+#define DO_LOGTYPES(lt, body) DO_STRUCTS(lt, logtypes, body)
+
+#define DO_LOGTYPES_AND_ROLLBACKS(lt, body) (DO_ROLLBACKS(lt,body), DO_LOGTYPES(lt, body))
+
+#define DO_FIELDS(fld, lt, body) do { \
+ struct field *fld; \
+ for (fld=lt->fields; fld->type; fld++) { \
+ body; \
+ } } while (0)
+
+
+static void __attribute__((format (printf, 3, 4))) fprintf2 (FILE *f1, FILE *f2, const char *format, ...) {
+ va_list ap;
+ int r;
+ va_start(ap, format);
+ r=vfprintf(f1, format, ap); assert(r>=0);
+ va_end(ap);
+ va_start(ap, format);
+ r=vfprintf(f2, format, ap); assert(r>=0);
+ va_end(ap);
+}
+
+FILE *hf=0, *cf=0, *pf=0;
+
+static void
+generate_enum_internal (const char *enum_name, const char *enum_prefix, const struct logtype *lts) {
+ char used_cmds[256];
+ int count=0;
+ memset(used_cmds, 0, 256);
+ fprintf(hf, "enum %s {", enum_name);
+ DO_STRUCTS(lt, lts,
+ {
+ unsigned char cmd = (unsigned char)(lt->command_and_flags&0xff);
+ if (count!=0) fprintf(hf, ",");
+ count++;
+ fprintf(hf, "\n");
+ fprintf(hf," %s_%-16s = '%c'", enum_prefix, lt->name, cmd);
+ if (used_cmds[cmd]!=0) { fprintf(stderr, "%s:%d: error: Command %d (%c) was used twice (second time for %s)\n", __FILE__, __LINE__, cmd, cmd, lt->name); abort(); }
+ used_cmds[cmd]=1;
+ });
+ fprintf(hf, "\n};\n\n");
+
+}
+
+static void
+generate_enum (void) {
+ generate_enum_internal("lt_cmd", "LT", logtypes);
+ generate_enum_internal("rt_cmd", "RT", rollbacks);
+}
+
+static void
+generate_log_struct (void) {
+ DO_LOGTYPES(lt,
+ { fprintf(hf, "struct logtype_%s {\n", lt->name);
+ fprintf(hf, " %-16s lsn;\n", "LSN");
+ DO_FIELDS(field_type, lt,
+ fprintf(hf, " %-16s %s;\n", field_type->type, field_type->name));
+ fprintf(hf, " %-16s crc;\n", "uint32_t");
+ fprintf(hf, " %-16s len;\n", "uint32_t");
+ fprintf(hf, "};\n");
+ //fprintf(hf, "void toku_recover_%s (LSN lsn", lt->name);
+ //DO_FIELDS(field_type, lt, fprintf(hf, ", %s %s", field_type->type, field_type->name));
+ //fprintf(hf, ");\n");
+ });
+ DO_ROLLBACKS(lt,
+ { fprintf(hf, "struct rolltype_%s {\n", lt->name);
+ DO_FIELDS(field_type, lt,
+ fprintf(hf, " %-16s %s;\n", field_type->type, field_type->name));
+ fprintf(hf, "};\n");
+ fprintf(hf, "int toku_rollback_%s (", lt->name);
+ DO_FIELDS(field_type, lt, fprintf(hf, "%s %s,", field_type->type, field_type->name));
+ fprintf(hf, "TOKUTXN txn, LSN oplsn);\n");
+ fprintf(hf, "int toku_commit_%s (", lt->name);
+ DO_FIELDS(field_type, lt, fprintf(hf, "%s %s,", field_type->type, field_type->name));
+ fprintf(hf, "TOKUTXN txn, LSN oplsn);\n");
+ });
+ fprintf(hf, "struct log_entry {\n");
+ fprintf(hf, " enum lt_cmd cmd;\n");
+ fprintf(hf, " union {\n");
+ DO_LOGTYPES(lt, fprintf(hf," struct logtype_%s %s;\n", lt->name, lt->name));
+ fprintf(hf, " } u;\n");
+ fprintf(hf, "};\n");
+
+ fprintf(hf, "struct roll_entry {\n");
+ fprintf(hf, " enum rt_cmd cmd;\n");
+ fprintf(hf, " struct roll_entry *prev; /* for in-memory list of log entries. Threads from newest to oldest. */\n");
+ fprintf(hf, " union {\n");
+ DO_ROLLBACKS(lt, fprintf(hf," struct rolltype_%s %s;\n", lt->name, lt->name));
+ fprintf(hf, " } u;\n");
+ fprintf(hf, "};\n");
+
+}
+
+static void
+generate_dispatch (void) {
+ fprintf(hf, "#define rolltype_dispatch(s, funprefix) ({ switch((s)->cmd) {\\\n");
+ DO_ROLLBACKS(lt, fprintf(hf, " case RT_%s: funprefix ## %s (&(s)->u.%s); break;\\\n", lt->name, lt->name, lt->name));
+ fprintf(hf, " }})\n");
+
+ fprintf(hf, "#define logtype_dispatch_assign(s, funprefix, var, ...) do { switch((s)->cmd) {\\\n");
+ DO_LOGTYPES(lt, fprintf(hf, " case LT_%s: var = funprefix ## %s (&(s)->u.%s, __VA_ARGS__); break;\\\n", lt->name, lt->name, lt->name));
+ fprintf(hf, " }} while (0)\n");
+
+ fprintf(hf, "#define rolltype_dispatch_assign(s, funprefix, var, ...) do { \\\n");
+ fprintf(hf, " switch((s)->cmd) {\\\n");
+ DO_ROLLBACKS(lt, {
+ fprintf(hf, " case RT_%s: var = funprefix ## %s (", lt->name, lt->name);
+ int fieldcount=0;
+ DO_FIELDS(field_type, lt, {
+ if (fieldcount>0) fprintf(hf, ",");
+ fprintf(hf, "(s)->u.%s.%s", lt->name, field_type->name);
+ fieldcount++;
+ });
+ fprintf(hf, ", __VA_ARGS__); break;\\\n");
+ });
+ fprintf(hf, " default: assert(0);} } while (0)\n");
+
+ fprintf(hf, "#define logtype_dispatch_args(s, funprefix, ...) do { switch((s)->cmd) {\\\n");
+ DO_LOGTYPES(lt,
+ {
+ fprintf(hf, " case LT_%s: funprefix ## %s ((s)->u.%s.lsn", lt->name, lt->name, lt->name);
+ DO_FIELDS(field_type, lt, fprintf(hf, ",(s)->u.%s.%s", lt->name, field_type->name));
+ fprintf(hf, ", __VA_ARGS__); break;\\\n");
+ });
+ fprintf(hf, " }} while (0)\n");
+}
+
+static void
+generate_get_timestamp(void) {
+ fprintf(cf, "static uint64_t toku_get_timestamp(void) {\n");
+ fprintf(cf, " struct timeval tv; int r = gettimeofday(&tv, NULL);\n");
+ fprintf(cf, " assert(r==0);\n");
+ fprintf(cf, " return tv.tv_sec * 1000000ULL + tv.tv_usec;\n");
+ fprintf(cf, "}\n");
+}
+
+static void
+generate_log_writer (void) {
+ generate_get_timestamp();
+ DO_LOGTYPES(lt, {
+ //TODO(yoni): The overhead variables are NOT correct for BYTESTRING, FILENUMS (or any other variable length type)
+ // We should switch to something like using toku_logsizeof_*.
+ fprintf(hf, "static const size_t toku_log_%s_overhead = (+4+1+8", lt->name);
+ DO_FIELDS(field_type, lt, fprintf(hf, "+sizeof(%s)", field_type->type));
+ fprintf(hf, "+8);\n");
+ fprintf2(cf, hf, "void toku_log_%s (TOKULOGGER logger, LSN *lsnp, int do_fsync", lt->name);
+ switch (lt->log_begin_action) {
+ case SHOULD_LOG_BEGIN:
+ case ASSERT_BEGIN_WAS_LOGGED: {
+ fprintf2(cf, hf, ", TOKUTXN txn");
+ break;
+ }
+ case IGNORE_LOG_BEGIN: break;
+ }
+ DO_FIELDS(field_type, lt, fprintf2(cf, hf, ", %s %s", field_type->type, field_type->name));
+ fprintf(hf, ");\n");
+ fprintf(cf, ") {\n");
+ fprintf(cf, " if (logger == NULL) {\n");
+ fprintf(cf, " return;\n");
+ fprintf(cf, " }\n");
+ switch (lt->log_begin_action) {
+ case SHOULD_LOG_BEGIN: {
+ fprintf(cf, " //txn can be NULL during tests\n");
+ fprintf(cf, " //never null when not checkpoint.\n");
+ fprintf(cf, " if (txn && !txn->begin_was_logged) {\n");
+ fprintf(cf, " invariant(!txn_declared_read_only(txn));\n");
+ fprintf(cf, " toku_maybe_log_begin_txn_for_write_operation(txn);\n");
+ fprintf(cf, " }\n");
+ break;
+ }
+ case ASSERT_BEGIN_WAS_LOGGED: {
+ fprintf(cf, " //txn can be NULL during tests\n");
+ fprintf(cf, " invariant(!txn || txn->begin_was_logged);\n");
+ fprintf(cf, " invariant(!txn || !txn_declared_read_only(txn));\n");
+ break;
+ }
+ case IGNORE_LOG_BEGIN: break;
+ }
+ fprintf(cf, " if (!logger->write_log_files) {\n");
+ fprintf(cf, " ml_lock(&logger->input_lock);\n");
+ fprintf(cf, " logger->lsn.lsn++;\n");
+ fprintf(cf, " if (lsnp) *lsnp=logger->lsn;\n");
+ fprintf(cf, " ml_unlock(&logger->input_lock);\n");
+ fprintf(cf, " return;\n");
+ fprintf(cf, " }\n");
+ fprintf(cf, " const unsigned int buflen= (+4 // len at the beginning\n");
+ fprintf(cf, " +1 // log command\n");
+ fprintf(cf, " +8 // lsn\n");
+ DO_FIELDS(field_type, lt,
+ fprintf(cf, " +toku_logsizeof_%s(%s)\n", field_type->type, field_type->name));
+ fprintf(cf, " +8 // crc + len\n");
+ fprintf(cf, " );\n");
+ fprintf(cf, " struct wbuf wbuf;\n");
+ fprintf(cf, " ml_lock(&logger->input_lock);\n");
+ fprintf(cf, " toku_logger_make_space_in_inbuf(logger, buflen);\n");
+ fprintf(cf, " wbuf_nocrc_init(&wbuf, logger->inbuf.buf+logger->inbuf.n_in_buf, buflen);\n");
+ fprintf(cf, " wbuf_nocrc_int(&wbuf, buflen);\n");
+ fprintf(cf, " wbuf_nocrc_char(&wbuf, '%c');\n", (char)(0xff&lt->command_and_flags));
+ fprintf(cf, " logger->lsn.lsn++;\n");
+ fprintf(cf, " logger->inbuf.max_lsn_in_buf = logger->lsn;\n");
+ fprintf(cf, " wbuf_nocrc_LSN(&wbuf, logger->lsn);\n");
+ fprintf(cf, " if (lsnp) *lsnp=logger->lsn;\n");
+ DO_FIELDS(field_type, lt,
+ if (strcmp(field_type->name, "timestamp") == 0)
+ fprintf(cf, " if (timestamp == 0) timestamp = toku_get_timestamp();\n");
+ fprintf(cf, " wbuf_nocrc_%s(&wbuf, %s);\n", field_type->type, field_type->name));
+ fprintf(cf, " wbuf_nocrc_int(&wbuf, toku_x1764_memory(wbuf.buf, wbuf.ndone));\n");
+ fprintf(cf, " wbuf_nocrc_int(&wbuf, buflen);\n");
+ fprintf(cf, " assert(wbuf.ndone==buflen);\n");
+ fprintf(cf, " logger->inbuf.n_in_buf += buflen;\n");
+ fprintf(cf, " toku_logger_maybe_fsync(logger, logger->lsn, do_fsync, true);\n");
+ fprintf(cf, "}\n\n");
+ });
+}
+
+static void
+generate_log_reader (void) {
+ DO_LOGTYPES(lt, {
+ fprintf(cf, "static int toku_log_fread_%s (FILE *infile, uint32_t len1, struct logtype_%s *data, struct x1764 *checksum)", lt->name, lt->name);
+ fprintf(cf, " {\n");
+ fprintf(cf, " int r=0;\n");
+ fprintf(cf, " uint32_t actual_len=5; // 1 for the command, 4 for the first len.\n");
+ fprintf(cf, " r=toku_fread_%-16s(infile, &data->%-16s, checksum, &actual_len); if (r!=0) return r;\n", "LSN", "lsn");
+ DO_FIELDS(field_type, lt,
+ fprintf(cf, " r=toku_fread_%-16s(infile, &data->%-16s, checksum, &actual_len); if (r!=0) return r;\n", field_type->type, field_type->name));
+ fprintf(cf, " uint32_t checksum_in_file, len_in_file;\n");
+ fprintf(cf, " r=toku_fread_uint32_t_nocrclen(infile, &checksum_in_file); actual_len+=4; if (r!=0) return r;\n");
+ fprintf(cf, " r=toku_fread_uint32_t_nocrclen(infile, &len_in_file); actual_len+=4; if (r!=0) return r;\n");
+ fprintf(cf, " if (checksum_in_file!=toku_x1764_finish(checksum) || len_in_file!=actual_len || len1 != len_in_file) return DB_BADFORMAT;\n");
+ fprintf(cf, " return 0;\n");
+ fprintf(cf, "}\n\n");
+ });
+ fprintf2(cf, hf, "int toku_log_fread (FILE *infile, struct log_entry *le)");
+ fprintf(hf, ";\n");
+ fprintf(cf, " {\n");
+ fprintf(cf, " uint32_t len1; int r;\n");
+ fprintf(cf, " uint32_t ignorelen=0;\n");
+ fprintf(cf, " struct x1764 checksum;\n");
+ fprintf(cf, " toku_x1764_init(&checksum);\n");
+ fprintf(cf, " r = toku_fread_uint32_t(infile, &len1, &checksum, &ignorelen); if (r!=0) return r;\n");
+ fprintf(cf, " int cmd=fgetc(infile);\n");
+ fprintf(cf, " if (cmd==EOF) return EOF;\n");
+ fprintf(cf, " char cmdchar = (char)cmd;\n");
+ fprintf(cf, " toku_x1764_add(&checksum, &cmdchar, 1);\n");
+ fprintf(cf, " le->cmd=(enum lt_cmd)cmd;\n");
+ fprintf(cf, " switch ((enum lt_cmd)cmd) {\n");
+ DO_LOGTYPES(lt, {
+ fprintf(cf, " case LT_%s:\n", lt->name);
+ fprintf(cf, " return toku_log_fread_%s (infile, len1, &le->u.%s, &checksum);\n", lt->name, lt->name);
+ });
+ fprintf(cf, " };\n");
+ fprintf(cf, " return DB_BADFORMAT;\n"); // Should read past the record using the len field.
+ fprintf(cf, "}\n\n");
+ //fprintf2(cf, hf, "// Return 0 if there is something to read, return -1 if nothing to read, abort if an error.\n");
+ fprintf2(cf, hf, "// Return 0 if there is something to read, -1 if nothing to read, >0 on error\n");
+ fprintf2(cf, hf, "int toku_log_fread_backward (FILE *infile, struct log_entry *le)");
+ fprintf(hf, ";\n");
+ fprintf(cf, "{\n");
+ fprintf(cf, " memset(le, 0, sizeof(*le));\n");
+ fprintf(cf, " long pos = ftell(infile);\n");
+ fprintf(cf, " if (pos<=12) return -1;\n");
+ fprintf(cf, " int r = fseek(infile, -4, SEEK_CUR); \n");// assert(r==0);\n");
+ fprintf(cf, " if (r!=0) return get_error_errno();\n");
+ fprintf(cf, " uint32_t len;\n");
+ fprintf(cf, " r = toku_fread_uint32_t_nocrclen(infile, &len); \n");// assert(r==0);\n");
+ fprintf(cf, " if (r!=0) return 1;\n");
+ fprintf(cf, " r = fseek(infile, -(int)len, SEEK_CUR) ; \n");// assert(r==0);\n");
+ fprintf(cf, " if (r!=0) return get_error_errno();\n");
+ fprintf(cf, " r = toku_log_fread(infile, le); \n");// assert(r==0);\n");
+ fprintf(cf, " if (r!=0) return 1;\n");
+ fprintf(cf, " long afterpos = ftell(infile);\n");
+ fprintf(cf, " if (afterpos != pos) return 1;\n");
+ fprintf(cf, " r = fseek(infile, -(int)len, SEEK_CUR); \n");// assert(r==0);\n");
+ fprintf(cf, " if (r!=0) return get_error_errno();\n");
+ fprintf(cf, " return 0;\n");
+ fprintf(cf, "}\n\n");
+
+ DO_LOGTYPES(lt, ({
+ fprintf(cf, "static void toku_log_free_log_entry_%s_resources (struct logtype_%s *data", lt->name, lt->name);
+ if (!lt->fields->type) fprintf(cf, " __attribute__((__unused__))");
+ fprintf(cf, ") {\n");
+ DO_FIELDS(field_type, lt,
+ fprintf(cf, " toku_free_%s(data->%s);\n", field_type->type, field_type->name);
+ );
+ fprintf(cf, "}\n\n");
+ }));
+ fprintf2(cf, hf, "void toku_log_free_log_entry_resources (struct log_entry *le)");
+ fprintf(hf, ";\n");
+ fprintf(cf, " {\n");
+ fprintf(cf, " switch ((enum lt_cmd)le->cmd) {\n");
+ DO_LOGTYPES(lt, {
+ fprintf(cf, " case LT_%s:\n", lt->name);
+ fprintf(cf, " return toku_log_free_log_entry_%s_resources (&(le->u.%s));\n", lt->name, lt->name);
+ });
+ fprintf(cf, " };\n");
+ fprintf(cf, " return;\n");
+ fprintf(cf, "}\n\n");
+}
+
+static void
+generate_logprint (void) {
+ unsigned maxnamelen=0;
+ fprintf2(pf, hf, "int toku_logprint_one_record(FILE *outf, FILE *f)");
+ fprintf(hf, ";\n");
+ fprintf(pf, " {\n");
+ fprintf(pf, " int cmd, r;\n");
+ fprintf(pf, " uint32_t len1, crc_in_file;\n");
+ fprintf(pf, " uint32_t ignorelen=0;\n");
+ fprintf(pf, " struct x1764 checksum;\n");
+ fprintf(pf, " toku_x1764_init(&checksum);\n");
+ fprintf(pf, " r=toku_fread_uint32_t(f, &len1, &checksum, &ignorelen);\n");
+ fprintf(pf, " if (r==EOF) return EOF;\n");
+ fprintf(pf, " cmd=fgetc(f);\n");
+ fprintf(pf, " if (cmd==EOF) return DB_BADFORMAT;\n");
+ fprintf(pf, " uint32_t len_in_file, len=1+4; // cmd + len1\n");
+ fprintf(pf, " char charcmd = (char)cmd;\n");
+ fprintf(pf, " toku_x1764_add(&checksum, &charcmd, 1);\n");
+ fprintf(pf, " switch ((enum lt_cmd)cmd) {\n");
+ DO_LOGTYPES(lt, { if (strlen(lt->name)>maxnamelen) maxnamelen=strlen(lt->name); });
+ DO_LOGTYPES(lt, {
+ unsigned char cmd = (unsigned char)(0xff&lt->command_and_flags);
+ fprintf(pf, " case LT_%s: \n", lt->name);
+ // We aren't using the log reader here because we want better diagnostics as soon as things go wrong.
+ fprintf(pf, " fprintf(outf, \"%%-%us \", \"%s\");\n", maxnamelen, lt->name);
+ if (isprint(cmd)) fprintf(pf," fprintf(outf, \" '%c':\");\n", cmd);
+ else fprintf(pf," fprintf(outf, \"0%03o:\");\n", cmd);
+ fprintf(pf, " r = toku_logprint_%-16s(outf, f, \"lsn\", &checksum, &len, 0); if (r!=0) return r;\n", "LSN");
+ DO_FIELDS(field_type, lt, {
+ fprintf(pf, " r = toku_logprint_%-16s(outf, f, \"%s\", &checksum, &len,", field_type->type, field_type->name);
+ if (field_type->format) fprintf(pf, "\"%s\"", field_type->format);
+ else fprintf(pf, "0");
+ fprintf(pf, "); if (r!=0) return r;\n");
+ });
+ fprintf(pf, " {\n");
+ fprintf(pf, " uint32_t actual_murmur = toku_x1764_finish(&checksum);\n");
+ fprintf(pf, " r = toku_fread_uint32_t_nocrclen (f, &crc_in_file); len+=4; if (r!=0) return r;\n");
+ fprintf(pf, " fprintf(outf, \" crc=%%08x\", crc_in_file);\n");
+ fprintf(pf, " if (crc_in_file!=actual_murmur) fprintf(outf, \" checksum=%%08x\", actual_murmur);\n");
+ fprintf(pf, " r = toku_fread_uint32_t_nocrclen (f, &len_in_file); len+=4; if (r!=0) return r;\n");
+ fprintf(pf, " fprintf(outf, \" len=%%u\", len_in_file);\n");
+ fprintf(pf, " if (len_in_file!=len) fprintf(outf, \" actual_len=%%u\", len);\n");
+ fprintf(pf, " if (len_in_file!=len || crc_in_file!=actual_murmur) return DB_BADFORMAT;\n");
+ fprintf(pf, " };\n");
+ fprintf(pf, " fprintf(outf, \"\\n\");\n");
+ fprintf(pf, " return 0;\n\n");
+ });
+ fprintf(pf, " }\n");
+ fprintf(pf, " fprintf(outf, \"Unknown command %%d ('%%c')\", cmd, cmd);\n");
+ fprintf(pf, " return DB_BADFORMAT;\n");
+ fprintf(pf, "}\n\n");
+}
+
+static void
+generate_rollbacks (void) {
+ DO_ROLLBACKS(lt, {
+ fprintf2(cf, hf, "void toku_logger_save_rollback_%s (TOKUTXN txn", lt->name);
+ DO_FIELDS(field_type, lt, {
+ if ( strcmp(field_type->type, "BYTESTRING") == 0 ) {
+ fprintf2(cf, hf, ", BYTESTRING *%s_ptr", field_type->name);
+ }
+ else if ( strcmp(field_type->type, "FILENUMS") == 0 ) {
+ fprintf2(cf, hf, ", FILENUMS *%s_ptr", field_type->name);
+ }
+ else {
+ fprintf2(cf, hf, ", %s %s", field_type->type, field_type->name);
+ }
+ });
+
+ fprintf(hf, ");\n");
+ fprintf(cf, ") {\n");
+ fprintf(cf, " toku_txn_lock(txn);\n");
+ fprintf(cf, " ROLLBACK_LOG_NODE log;\n");
+ fprintf(cf, " toku_get_and_pin_rollback_log_for_new_entry(txn, &log);\n");
+ // 'memdup' all BYTESTRINGS here
+ DO_FIELDS(field_type, lt, {
+ if ( strcmp(field_type->type, "BYTESTRING") == 0 ) {
+ fprintf(cf, " BYTESTRING %s = {\n"
+ " .len = %s_ptr->len,\n"
+ " .data = cast_to_typeof(%s.data) toku_memdup_in_rollback(log, %s_ptr->data, %s_ptr->len)\n"
+ " };\n",
+ field_type->name, field_type->name, field_type->name, field_type->name, field_type->name);
+ }
+ if ( strcmp(field_type->type, "FILENUMS") == 0 ) {
+ fprintf(cf, " FILENUMS %s = {\n"
+ " .num = %s_ptr->num,\n"
+ " .filenums = cast_to_typeof(%s.filenums) toku_memdup_in_rollback(log, %s_ptr->filenums, %s_ptr->num * (sizeof (FILENUM)))\n"
+ " };\n",
+ field_type->name, field_type->name, field_type->name, field_type->name, field_type->name);
+ }
+ });
+ {
+ int count=0;
+ fprintf(cf, " uint32_t rollback_fsize = toku_logger_rollback_fsize_%s(", lt->name);
+ DO_FIELDS(field_type, lt, fprintf(cf, "%s%s", (count++>0)?", ":"", field_type->name));
+ fprintf(cf, ");\n");
+ }
+ fprintf(cf, " struct roll_entry *v;\n");
+ fprintf(cf, " size_t mem_needed = sizeof(v->u.%s) + __builtin_offsetof(struct roll_entry, u.%s);\n", lt->name, lt->name);
+ fprintf(cf, " CAST_FROM_VOIDP(v, toku_malloc_in_rollback(log, mem_needed));\n");
+ fprintf(cf, " assert(v);\n");
+ fprintf(cf, " v->cmd = (enum rt_cmd)%u;\n", lt->command_and_flags&0xff);
+ DO_FIELDS(field_type, lt, fprintf(cf, " v->u.%s.%s = %s;\n", lt->name, field_type->name, field_type->name));
+ fprintf(cf, " v->prev = log->newest_logentry;\n");
+ fprintf(cf, " if (log->oldest_logentry==NULL) log->oldest_logentry=v;\n");
+ fprintf(cf, " log->newest_logentry = v;\n");
+ fprintf(cf, " log->rollentry_resident_bytecount += rollback_fsize;\n");
+ fprintf(cf, " txn->roll_info.rollentry_raw_count += rollback_fsize;\n");
+ fprintf(cf, " txn->roll_info.num_rollentries++;\n");
+ fprintf(cf, " log->dirty = true;\n");
+ fprintf(cf, " // spill and unpin assert success internally\n");
+ fprintf(cf, " toku_maybe_spill_rollbacks(txn, log);\n");
+ fprintf(cf, " toku_rollback_log_unpin(txn, log);\n");
+ fprintf(cf, " toku_txn_unlock(txn);\n");
+ fprintf(cf, "}\n");
+ });
+
+ DO_ROLLBACKS(lt, {
+ fprintf2(cf, hf, "void toku_logger_rollback_wbuf_nocrc_write_%s (struct wbuf *wbuf", lt->name);
+ DO_FIELDS(field_type, lt, fprintf2(cf, hf, ", %s %s", field_type->type, field_type->name));
+ fprintf2(cf, hf, ")");
+ fprintf(hf, ";\n");
+ fprintf(cf, " {\n");
+
+ {
+ int count=0;
+ fprintf(cf, " uint32_t rollback_fsize = toku_logger_rollback_fsize_%s(", lt->name);
+ DO_FIELDS(field_type, lt, fprintf(cf, "%s%s", (count++>0)?", ":"", field_type->name));
+ fprintf(cf, ");\n");
+ fprintf(cf, " wbuf_nocrc_int(wbuf, rollback_fsize);\n");
+ }
+ fprintf(cf, " wbuf_nocrc_char(wbuf, '%c');\n", (char)(0xff&lt->command_and_flags));
+ DO_FIELDS(field_type, lt, fprintf(cf, " wbuf_nocrc_%s(wbuf, %s);\n", field_type->type, field_type->name));
+ fprintf(cf, "}\n");
+ });
+ fprintf2(cf, hf, "void toku_logger_rollback_wbuf_nocrc_write (struct wbuf *wbuf, struct roll_entry *r)");
+ fprintf(hf, ";\n");
+ fprintf(cf, " {\n switch (r->cmd) {\n");
+ DO_ROLLBACKS(lt, {
+ fprintf(cf, " case RT_%s: toku_logger_rollback_wbuf_nocrc_write_%s(wbuf", lt->name, lt->name);
+ DO_FIELDS(field_type, lt, fprintf(cf, ", r->u.%s.%s", lt->name, field_type->name));
+ fprintf(cf, "); return;\n");
+ });
+ fprintf(cf, " }\n assert(0);\n");
+ fprintf(cf, "}\n");
+ DO_ROLLBACKS(lt, {
+ fprintf2(cf, hf, "uint32_t toku_logger_rollback_fsize_%s (", lt->name);
+ int count=0;
+ DO_FIELDS(field_type, lt, fprintf2(cf, hf, "%s%s %s", (count++>0)?", ":"", field_type->type, field_type->name));
+ fprintf(hf, ");\n");
+ fprintf(cf, ") {\n");
+ fprintf(cf, " return 1 /* the cmd*/\n");
+ fprintf(cf, " + 4 /* the int at the end saying the size */");
+ DO_FIELDS(field_type, lt,
+ fprintf(cf, "\n + toku_logsizeof_%s(%s)", field_type->type, field_type->name));
+ fprintf(cf, ";\n}\n");
+ });
+ fprintf2(cf, hf, "uint32_t toku_logger_rollback_fsize(struct roll_entry *item)");
+ fprintf(hf, ";\n");
+ fprintf(cf, "{\n switch(item->cmd) {\n");
+ DO_ROLLBACKS(lt, {
+ fprintf(cf, " case RT_%s: return toku_logger_rollback_fsize_%s(", lt->name, lt->name);
+ int count=0;
+ DO_FIELDS(field_type, lt, fprintf(cf, "%sitem->u.%s.%s", (count++>0)?", ":"", lt->name, field_type->name));
+ fprintf(cf, ");\n");
+ });
+ fprintf(cf, " }\n assert(0);\n return 0;\n");
+ fprintf(cf, "}\n");
+
+ fprintf2(cf, hf, "int toku_parse_rollback(unsigned char *buf, uint32_t n_bytes, struct roll_entry **itemp, memarena *ma)");
+ fprintf(hf, ";\n");
+ fprintf(cf, " {\n assert(n_bytes>0);\n struct roll_entry *item;\n enum rt_cmd cmd = (enum rt_cmd)(buf[0]);\n size_t mem_needed;\n");
+ fprintf(cf, " struct rbuf rc = {buf, n_bytes, 1};\n");
+ fprintf(cf, " switch(cmd) {\n");
+ DO_ROLLBACKS(lt, {
+ fprintf(cf, " case RT_%s:\n", lt->name);
+ fprintf(cf, " mem_needed = sizeof(item->u.%s) + __builtin_offsetof(struct roll_entry, u.%s);\n", lt->name, lt->name);
+ fprintf(cf, " CAST_FROM_VOIDP(item, ma->malloc_from_arena(mem_needed));\n");
+ fprintf(cf, " item->cmd = cmd;\n");
+ DO_FIELDS(field_type, lt, fprintf(cf, " rbuf_ma_%s(&rc, ma, &item->u.%s.%s);\n", field_type->type, lt->name, field_type->name));
+ fprintf(cf, " *itemp = item;\n");
+ fprintf(cf, " return 0;\n");
+ });
+ fprintf(cf, " }\n return EINVAL;\n}\n");
+}
+
+static void
+generate_log_entry_functions(void) {
+ fprintf(hf, "LSN toku_log_entry_get_lsn(struct log_entry *);\n");
+ fprintf(cf, "LSN toku_log_entry_get_lsn(struct log_entry *le) {\n");
+ fprintf(cf, " return le->u.begin_checkpoint.lsn;\n");
+ fprintf(cf, "}\n");
+}
+
+const char codefile[] = "log_code.cc";
+const char printfile[] = "log_print.cc";
+const char headerfile[] = "log_header.h";
+int main (int argc, const char *const argv[]) {
+ assert(argc==2); // the single argument is the directory into which to put things
+ const char *dir = argv[1];
+ size_t codepathlen = sizeof(codefile) + strlen(dir) + 4;
+ size_t printpathlen = sizeof(printfile) + strlen(dir) + 4;
+ size_t headerpathlen = sizeof(headerfile) + strlen(dir) + 4;
+ char codepath[codepathlen];
+ char printpath[printpathlen];
+ char headerpath[headerpathlen];
+ { int r = snprintf(codepath, codepathlen, "%s/%s", argv[1], codefile); assert(r<(int)codepathlen); }
+ { int r = snprintf(printpath, printpathlen, "%s/%s", argv[1], printfile); assert(r<(int)printpathlen); }
+ { int r = snprintf(headerpath, headerpathlen, "%s/%s", argv[1], headerfile); assert(r<(int)headerpathlen); }
+ chmod(codepath, S_IRUSR|S_IWUSR);
+ chmod(headerpath, S_IRUSR|S_IWUSR);
+ unlink(codepath);
+ unlink(headerpath);
+ cf = fopen(codepath, "w");
+ if (cf==0) { int r = get_error_errno(); printf("fopen of %s failed because of errno=%d (%s)\n", codepath, r, strerror(r)); } // sometimes this is failing, so let's make a better diagnostic
+ assert(cf!=0);
+ hf = fopen(headerpath, "w"); assert(hf!=0);
+ pf = fopen(printpath, "w"); assert(pf!=0);
+ fprintf2(cf, hf, "/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */\n");
+ fprintf2(cf, hf, "// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:\n");
+ fprintf(hf, "#pragma once\n");
+ fprintf2(cf, hf, "/* Do not edit this file. This code generated by logformat.c. Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. */\n");
+ fprintf2(cf, hf, "#ident \"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.\"\n");
+ fprintf2(cf, pf, "#include <stdint.h>\n");
+ fprintf2(cf, pf, "#include <sys/time.h>\n");
+ fprintf2(cf, pf, "#include <ft/logger/log-internal.h>\n");
+ fprintf(hf, "#include <ft/ft-internal.h>\n");
+ fprintf(hf, "#include <util/bytestring.h>\n");
+ fprintf(hf, "#include <util/memarena.h>\n");
+ generate_enum();
+ generate_log_struct();
+ generate_dispatch();
+ generate_log_writer();
+ generate_log_reader();
+ generate_rollbacks();
+ generate_log_entry_functions();
+ generate_logprint();
+ {
+ int r=fclose(hf); assert(r==0);
+ r=fclose(cf); assert(r==0);
+ r=fclose(pf); assert(r==0);
+ // Make it tougher to modify by mistake
+ chmod(codepath, S_IRUSR|S_IRGRP|S_IROTH);
+ chmod(headerpath, S_IRUSR|S_IRGRP|S_IROTH);
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/logger/logger.cc b/storage/tokudb/PerconaFT/ft/logger/logger.cc
new file mode 100644
index 00000000..3965714e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logger.cc
@@ -0,0 +1,1436 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <memory.h>
+#include <ctype.h>
+#include <limits.h>
+#include <unistd.h>
+
+#include "ft/serialize/block_table.h"
+#include "ft/ft.h"
+#include "ft/logger/log-internal.h"
+#include "ft/txn/txn_manager.h"
+#include "ft/txn/rollback_log_node_cache.h"
+
+#include "util/status.h"
+
+int writing_rollback = 0;
+extern "C" {
+ uint force_recovery = 0;
+}
+
+static const int log_format_version = TOKU_LOG_VERSION;
+
+toku_instr_key *result_output_condition_lock_mutex_key;
+toku_instr_key *result_output_condition_key;
+toku_instr_key *tokudb_file_log_key;
+
+static int open_logfile(TOKULOGGER logger);
+static void logger_write_buffer(TOKULOGGER logger, LSN *fsynced_lsn);
+static void delete_logfile(TOKULOGGER logger,
+ long long index,
+ uint32_t version);
+static void grab_output(TOKULOGGER logger, LSN *fsynced_lsn);
+static void release_output(TOKULOGGER logger, LSN fsynced_lsn);
+
+static void toku_print_bytes (FILE *outf, uint32_t len, char *data) {
+ fprintf(outf, "\"");
+ uint32_t i;
+ for (i=0; i<len; i++) {
+ switch (data[i]) {
+ case '"': fprintf(outf, "\\\""); break;
+ case '\\': fprintf(outf, "\\\\"); break;
+ case '\n': fprintf(outf, "\\n"); break;
+ default:
+ if (isprint(data[i])) fprintf(outf, "%c", data[i]);
+ else fprintf(outf, "\\%03o", (unsigned char)(data[i]));
+ }
+ }
+ fprintf(outf, "\"");
+}
+
+static bool is_a_logfile_any_version (const char *name, uint64_t *number_result, uint32_t *version_of_log) {
+ bool rval = true;
+ uint64_t result;
+ int n;
+ int r;
+ uint32_t version;
+ r = sscanf(name, "log%" SCNu64 ".tokulog%" SCNu32 "%n", &result, &version, &n);
+ if (r!=2 || name[n]!='\0' || version <= TOKU_LOG_VERSION_1) {
+ //Version 1 does NOT append 'version' to end of '.tokulog'
+ version = TOKU_LOG_VERSION_1;
+ r = sscanf(name, "log%" SCNu64 ".tokulog%n", &result, &n);
+ if (r!=1 || name[n]!='\0') {
+ rval = false;
+ }
+ }
+ if (rval) {
+ *number_result = result;
+ *version_of_log = version;
+ }
+
+ return rval;
+}
+
+// added for #2424, improved for #2521
+static bool is_a_logfile (const char *name, long long *number_result) {
+ bool rval;
+ uint64_t result;
+ uint32_t version;
+ rval = is_a_logfile_any_version(name, &result, &version);
+ if (rval && version != TOKU_LOG_VERSION)
+ rval = false;
+ if (rval)
+ *number_result = result;
+ return rval;
+}
+
+
+// TODO: can't fail
+int toku_logger_create (TOKULOGGER *resultp) {
+ TOKULOGGER CALLOC(result);
+ if (result==0) return get_error_errno();
+ result->is_open=false;
+ result->write_log_files = true;
+ result->trim_log_files = true;
+ result->directory=0;
+ // fd is uninitialized on purpose
+ // ct is uninitialized on purpose
+ result->lg_max = 100<<20; // 100MB default
+ // lsn is uninitialized
+ result->inbuf = (struct logbuf) {0, LOGGER_MIN_BUF_SIZE, (char *) toku_xmalloc(LOGGER_MIN_BUF_SIZE), ZERO_LSN};
+ result->outbuf = (struct logbuf) {0, LOGGER_MIN_BUF_SIZE, (char *) toku_xmalloc(LOGGER_MIN_BUF_SIZE), ZERO_LSN};
+ // written_lsn is uninitialized
+ // fsynced_lsn is uninitialized
+ result->last_completed_checkpoint_lsn = ZERO_LSN;
+ // next_log_file_number is uninitialized
+ // n_in_file is uninitialized
+ result->write_block_size = FT_DEFAULT_NODE_SIZE; // default logging size is the same as the default ft block size
+ toku_logfilemgr_create(&result->logfilemgr);
+ *resultp = result;
+ ml_init(&result->input_lock);
+ toku_mutex_init(*result_output_condition_lock_mutex_key,
+ &result->output_condition_lock,
+ nullptr);
+ toku_cond_init(
+ *result_output_condition_key, &result->output_condition, nullptr);
+ result->rollback_cachefile = NULL;
+ result->output_is_available = true;
+ toku_txn_manager_init(&result->txn_manager);
+ return 0;
+}
+
+static void fsync_logdir(TOKULOGGER logger) {
+ toku_fsync_dirfd_without_accounting(logger->dir);
+}
+
+static int open_logdir(TOKULOGGER logger, const char *directory) {
+ if (toku_os_is_absolute_name(directory)) {
+ logger->directory = toku_strdup(directory);
+ } else {
+ char cwdbuf[PATH_MAX];
+ char *cwd = getcwd(cwdbuf, PATH_MAX);
+ if (cwd == NULL)
+ return -1;
+ char *MALLOC_N(strlen(cwd) + strlen(directory) + 2, new_log_dir);
+ if (new_log_dir == NULL) {
+ return -2;
+ }
+ sprintf(new_log_dir, "%s/%s", cwd, directory);
+ logger->directory = new_log_dir;
+ }
+ if (logger->directory==0) return get_error_errno();
+
+ logger->dir = opendir(logger->directory);
+ if ( logger->dir == NULL ) return -1;
+ return 0;
+}
+
+static int close_logdir(TOKULOGGER logger) {
+ return closedir(logger->dir);
+}
+
+int
+toku_logger_open_with_last_xid(const char *directory, TOKULOGGER logger, TXNID last_xid) {
+ if (logger->is_open) return EINVAL;
+
+ int r;
+ TXNID last_xid_if_clean_shutdown = TXNID_NONE;
+ r = toku_logfilemgr_init(logger->logfilemgr, directory, &last_xid_if_clean_shutdown);
+ if ( r!=0 )
+ return r;
+ logger->lsn = toku_logfilemgr_get_last_lsn(logger->logfilemgr);
+ logger->written_lsn = logger->lsn;
+ logger->fsynced_lsn = logger->lsn;
+ logger->inbuf.max_lsn_in_buf = logger->lsn;
+ logger->outbuf.max_lsn_in_buf = logger->lsn;
+
+ // open directory, save pointer for fsyncing t:2445
+ r = open_logdir(logger, directory);
+ if (r!=0) return r;
+
+ long long nexti;
+ r = toku_logger_find_next_unused_log_file(logger->directory, &nexti);
+ if (r!=0) return r;
+
+ logger->next_log_file_number = nexti;
+ r = open_logfile(logger);
+ if (r!=0) return r;
+ if (last_xid == TXNID_NONE) {
+ last_xid = last_xid_if_clean_shutdown;
+ }
+ toku_txn_manager_set_last_xid_from_logger(logger->txn_manager, last_xid);
+
+ logger->is_open = true;
+ return 0;
+}
+
+int toku_logger_open (const char *directory, TOKULOGGER logger) {
+ return toku_logger_open_with_last_xid(directory, logger, TXNID_NONE);
+}
+
+bool toku_logger_rollback_is_open (TOKULOGGER logger) {
+ return logger->rollback_cachefile != NULL;
+}
+
+#define MAX_CACHED_ROLLBACK_NODES 4096
+
+void toku_logger_initialize_rollback_cache(TOKULOGGER logger, FT ft) {
+ ft->blocktable.free_unused_blocknums(ft->h->root_blocknum);
+ logger->rollback_cache.init(MAX_CACHED_ROLLBACK_NODES);
+}
+
+int toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create) {
+ writing_rollback++;
+ assert(logger->is_open);
+ assert(!logger->rollback_cachefile);
+
+ FT_HANDLE ft_handle = nullptr; // Note, there is no DB associated with this FT.
+ toku_ft_handle_create(&ft_handle);
+ int r = toku_ft_handle_open(ft_handle, toku_product_name_strings.rollback_cachefile, create, create, cachetable, nullptr);
+ if (r == 0) {
+ FT ft = ft_handle->ft;
+ logger->rollback_cachefile = ft->cf;
+ toku_logger_initialize_rollback_cache(logger, ft_handle->ft);
+
+ // Verify it is empty
+ // Must have no data blocks (rollback logs or otherwise).
+ ft->blocktable.verify_no_data_blocks_except_root(ft->h->root_blocknum);
+ bool is_empty = toku_ft_is_empty_fast(ft_handle);
+ assert(is_empty);
+ } else {
+ toku_ft_handle_close(ft_handle);
+ }
+ writing_rollback--;
+ return r;
+}
+
+
+// Requires: Rollback cachefile can only be closed immediately after a checkpoint,
+// so it will always be clean (!h->dirty) when about to be closed.
+// Rollback log can only be closed when there are no open transactions,
+// so it will always be empty (no data blocks) when about to be closed.
+void toku_logger_close_rollback_check_empty(TOKULOGGER logger, bool clean_shutdown) {
+ CACHEFILE cf = logger->rollback_cachefile; // stored in logger at rollback cachefile open
+ if (cf) {
+ FT_HANDLE ft_to_close;
+ { //Find "ft_to_close"
+ logger->rollback_cache.destroy();
+ FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf));
+ if (clean_shutdown) {
+ //Verify it is safe to close it.
+ assert(!ft->h->dirty()); //Must not be dirty.
+ ft->blocktable.free_unused_blocknums(ft->h->root_blocknum);
+ // Must have no data blocks (rollback logs or otherwise).
+ ft->blocktable.verify_no_data_blocks_except_root(ft->h->root_blocknum);
+ assert(!ft->h->dirty());
+ } else {
+ ft->h->clear_dirty();
+ }
+ ft_to_close = toku_ft_get_only_existing_ft_handle(ft);
+ if (clean_shutdown) {
+ bool is_empty;
+ is_empty = toku_ft_is_empty_fast(ft_to_close);
+ assert(is_empty);
+ assert(!ft->h->dirty()); // it should not have been dirtied by the toku_ft_is_empty test.
+ }
+ }
+
+ toku_ft_handle_close(ft_to_close);
+ //Set as dealt with already.
+ logger->rollback_cachefile = NULL;
+ }
+}
+
+void toku_logger_close_rollback(TOKULOGGER logger) {
+ toku_logger_close_rollback_check_empty(logger, true);
+}
+
+// No locks held on entry
+// No locks held on exit.
+// No locks are needed, since you cannot legally close the log concurrently with doing anything else.
+// TODO: can't fail
+int toku_logger_close(TOKULOGGER *loggerp) {
+ int r;
+ TOKULOGGER logger = *loggerp;
+ if (!logger->is_open) {
+ goto is_closed;
+ }
+ ml_lock(&logger->input_lock);
+ LSN fsynced_lsn;
+ grab_output(logger, &fsynced_lsn);
+ logger_write_buffer(logger, &fsynced_lsn);
+ if (logger->fd!=-1) {
+ if (logger->write_log_files) {
+ toku_file_fsync_without_accounting(logger->fd);
+ }
+ r = toku_os_close(logger->fd);
+ assert(r == 0);
+ }
+ r = close_logdir(logger);
+ assert(r == 0);
+ logger->fd=-1;
+ release_output(logger, fsynced_lsn);
+
+is_closed:
+ toku_free(logger->inbuf.buf);
+ toku_free(logger->outbuf.buf);
+ // before destroying locks they must be left in the unlocked state.
+ ml_destroy(&logger->input_lock);
+ toku_mutex_destroy(&logger->output_condition_lock);
+ toku_cond_destroy(&logger->output_condition);
+ toku_txn_manager_destroy(logger->txn_manager);
+ if (logger->directory) toku_free(logger->directory);
+ toku_logfilemgr_destroy(&logger->logfilemgr);
+ toku_free(logger);
+ *loggerp=0;
+ return 0;
+}
+
+void toku_logger_shutdown(TOKULOGGER logger) {
+ if (logger->is_open) {
+ TXN_MANAGER mgr = logger->txn_manager;
+ if (toku_txn_manager_num_live_root_txns(mgr) == 0) {
+ TXNID last_xid = toku_txn_manager_get_last_xid(mgr);
+ toku_log_shutdown(logger, NULL, true, 0, last_xid);
+ }
+ }
+}
+
+static int close_and_open_logfile (TOKULOGGER logger, LSN *fsynced_lsn)
+// Effect: close the current file, and open the next one.
+// Entry: This thread has permission to modify the output.
+// Exit: This thread has permission to modify the output.
+{
+ int r;
+ if (logger->write_log_files) {
+ toku_file_fsync_without_accounting(logger->fd);
+ *fsynced_lsn = logger->written_lsn;
+ toku_logfilemgr_update_last_lsn(logger->logfilemgr,
+ logger->written_lsn); // fixes t:2294
+ }
+ r = toku_os_close(logger->fd);
+
+ if (r != 0)
+ return get_error_errno();
+ return open_logfile(logger);
+}
+
+static int
+max_int (int a, int b)
+{
+ if (a>b) return a;
+ return b;
+}
+
+// ***********************************************************
+// output mutex/condition manipulation routines
+// ***********************************************************
+
+static void
+wait_till_output_available (TOKULOGGER logger)
+// Effect: Wait until output becomes available.
+// Implementation hint: Use a pthread_cond_wait.
+// Entry: Holds the output_condition_lock (but not the inlock)
+// Exit: Holds the output_condition_lock and logger->output_is_available
+//
+{
+ tokutime_t t0 = toku_time_now();
+ while (!logger->output_is_available) {
+ toku_cond_wait(&logger->output_condition, &logger->output_condition_lock);
+ }
+ if (tokutime_to_seconds(toku_time_now() - t0) >= 0.100) {
+ logger->num_wait_buf_long++;
+ }
+}
+
+static void
+grab_output(TOKULOGGER logger, LSN *fsynced_lsn)
+// Effect: Wait until output becomes available and get permission to modify output.
+// Entry: Holds no lock (including not holding the input lock, since we never hold both at once).
+// Exit: Hold permission to modify output (but none of the locks).
+{
+ toku_mutex_lock(&logger->output_condition_lock);
+ wait_till_output_available(logger);
+ logger->output_is_available = false;
+ if (fsynced_lsn) {
+ *fsynced_lsn = logger->fsynced_lsn;
+ }
+ toku_mutex_unlock(&logger->output_condition_lock);
+}
+
+static bool
+wait_till_output_already_written_or_output_buffer_available (TOKULOGGER logger, LSN lsn, LSN *fsynced_lsn)
+// Effect: Wait until either the output is available or the lsn has been written.
+// Return true iff the lsn has been written.
+// If returning true, then on exit we don't hold output permission.
+// If returning false, then on exit we do hold output permission.
+// Entry: Hold no locks.
+// Exit: Hold the output permission if returns false.
+{
+ bool result;
+ toku_mutex_lock(&logger->output_condition_lock);
+ while (1) {
+ if (logger->fsynced_lsn.lsn >= lsn.lsn) { // we can look at the fsynced lsn since we have the lock.
+ result = true;
+ break;
+ }
+ if (logger->output_is_available) {
+ logger->output_is_available = false;
+ result = false;
+ break;
+ }
+ // otherwise wait for a good time to look again.
+ toku_cond_wait(&logger->output_condition, &logger->output_condition_lock);
+ }
+ *fsynced_lsn = logger->fsynced_lsn;
+ toku_mutex_unlock(&logger->output_condition_lock);
+ return result;
+}
+
+static void
+release_output (TOKULOGGER logger, LSN fsynced_lsn)
+// Effect: Release output permission.
+// Entry: Holds output permissions, but no locks.
+// Exit: Holds neither locks nor output permission.
+{
+ toku_mutex_lock(&logger->output_condition_lock);
+ logger->output_is_available = true;
+ if (logger->fsynced_lsn.lsn < fsynced_lsn.lsn) {
+ logger->fsynced_lsn = fsynced_lsn;
+ }
+ toku_cond_broadcast(&logger->output_condition);
+ toku_mutex_unlock(&logger->output_condition_lock);
+}
+
+static void
+swap_inbuf_outbuf (TOKULOGGER logger)
+// Effect: Swap the inbuf and outbuf
+// Entry and exit: Hold the input lock and permission to modify output.
+{
+ struct logbuf tmp = logger->inbuf;
+ logger->inbuf = logger->outbuf;
+ logger->outbuf = tmp;
+ assert(logger->inbuf.n_in_buf == 0);
+}
+
+static void
+write_outbuf_to_logfile (TOKULOGGER logger, LSN *fsynced_lsn)
+// Effect: Write the contents of outbuf to logfile. Don't necessarily fsync (but it might, in which case fynced_lsn is updated).
+// If the logfile gets too big, open the next one (that's the case where an fsync might happen).
+// Entry and exit: Holds permission to modify output (and doesn't let it go, so it's ok to also hold the inlock).
+{
+ if (logger->outbuf.n_in_buf>0) {
+ // Write the outbuf to disk, take accounting measurements
+ tokutime_t io_t0 = toku_time_now();
+ toku_os_full_write(logger->fd, logger->outbuf.buf, logger->outbuf.n_in_buf);
+ tokutime_t io_t1 = toku_time_now();
+ logger->num_writes_to_disk++;
+ logger->bytes_written_to_disk += logger->outbuf.n_in_buf;
+ logger->time_spent_writing_to_disk += (io_t1 - io_t0);
+
+ assert(logger->outbuf.max_lsn_in_buf.lsn > logger->written_lsn.lsn); // since there is something in the buffer, its LSN must be bigger than what's previously written.
+ logger->written_lsn = logger->outbuf.max_lsn_in_buf;
+ logger->n_in_file += logger->outbuf.n_in_buf;
+ logger->outbuf.n_in_buf = 0;
+ }
+ // If the file got too big, then open a new file.
+ if (logger->n_in_file > logger->lg_max) {
+ int r = close_and_open_logfile(logger, fsynced_lsn);
+ assert_zero(r);
+ }
+}
+
+void
+toku_logger_make_space_in_inbuf (TOKULOGGER logger, int n_bytes_needed)
+// Entry: Holds the inlock
+// Exit: Holds the inlock
+// Effect: Upon exit, the inlock is held and there are at least n_bytes_needed in the buffer.
+// May release the inlock (and then reacquire it), so this is not atomic.
+// May obtain the output lock and output permission (but if it does so, it will have released the inlock, since we don't hold both locks at once).
+// (But may hold output permission and inlock at the same time.)
+// Implementation hint: Makes space in the inbuf, possibly by writing the inbuf to disk or increasing the size of the inbuf. There might not be an fsync.
+// Arguments: logger: the logger (side effects)
+// n_bytes_needed: how many bytes to make space for.
+{
+ if (logger->inbuf.n_in_buf + n_bytes_needed <= LOGGER_MIN_BUF_SIZE) {
+ return;
+ }
+ ml_unlock(&logger->input_lock);
+ LSN fsynced_lsn;
+ grab_output(logger, &fsynced_lsn);
+
+ ml_lock(&logger->input_lock);
+ // Some other thread may have written the log out while we didn't have the lock. If we have space now, then be happy.
+ if (logger->inbuf.n_in_buf + n_bytes_needed <= LOGGER_MIN_BUF_SIZE) {
+ release_output(logger, fsynced_lsn);
+ return;
+ }
+ if (logger->inbuf.n_in_buf > 0) {
+ // There isn't enough space, and there is something in the buffer, so write the inbuf.
+ swap_inbuf_outbuf(logger);
+
+ // Don't release the inlock in this case, because we don't want to get starved.
+ write_outbuf_to_logfile(logger, &fsynced_lsn);
+ }
+ // the inbuf is empty. Make it big enough (just in case it is somehow smaller than a single log entry).
+ if (n_bytes_needed > logger->inbuf.buf_size) {
+ assert(n_bytes_needed < (1<<30)); // it seems unlikely to work if a logentry gets that big.
+ int new_size = max_int(logger->inbuf.buf_size * 2, n_bytes_needed); // make it at least twice as big, and big enough for n_bytes
+ assert(new_size < (1<<30));
+ XREALLOC_N(new_size, logger->inbuf.buf);
+ logger->inbuf.buf_size = new_size;
+ }
+ release_output(logger, fsynced_lsn);
+}
+
+void toku_logger_fsync(TOKULOGGER logger)
+// Effect: This is the exported fsync used by ydb.c for env_log_flush. Group commit doesn't have to work.
+// Entry: Holds no locks
+// Exit: Holds no locks
+// Implementation note: Acquire the output condition lock, then the output permission, then release the output condition lock, then get the input lock.
+// Then release everything. Hold the input lock while reading the current max lsn in buf to make drd happy that there is no data race.
+{
+ ml_lock(&logger->input_lock);
+ const LSN max_lsn_in_buf = logger->inbuf.max_lsn_in_buf;
+ ml_unlock(&logger->input_lock);
+
+ toku_logger_maybe_fsync(logger, max_lsn_in_buf, true, false);
+}
+
+void toku_logger_fsync_if_lsn_not_fsynced (TOKULOGGER logger, LSN lsn) {
+ if (logger->write_log_files) {
+ toku_logger_maybe_fsync(logger, lsn, true, false);
+ }
+}
+
+int toku_logger_is_open(TOKULOGGER logger) {
+ if (logger==0) return 0;
+ return logger->is_open;
+}
+
+void toku_logger_set_cachetable (TOKULOGGER logger, CACHETABLE ct) {
+ logger->ct = ct;
+}
+
+int toku_logger_set_lg_max(TOKULOGGER logger, uint32_t lg_max) {
+ if (logger==0) return EINVAL; // no logger
+ if (logger->is_open) return EINVAL;
+ if (lg_max>(1<<30)) return EINVAL; // too big
+ logger->lg_max = lg_max;
+ return 0;
+}
+int toku_logger_get_lg_max(TOKULOGGER logger, uint32_t *lg_maxp) {
+ if (logger==0) return EINVAL; // no logger
+ *lg_maxp = logger->lg_max;
+ return 0;
+}
+
+int toku_logger_set_lg_bsize(TOKULOGGER logger, uint32_t bsize) {
+ if (logger==0) return EINVAL; // no logger
+ if (logger->is_open) return EINVAL;
+ if (bsize<=0 || bsize>(1<<30)) return EINVAL;
+ logger->write_block_size = bsize;
+ return 0;
+}
+
+int toku_logger_find_next_unused_log_file(const char *directory, long long *result)
+// This is called during logger initialalization, and no locks are required.
+{
+ DIR *d=opendir(directory);
+ long long maxf=-1; *result = maxf;
+ struct dirent *de;
+ if (d==0) return get_error_errno();
+ while ((de=readdir(d))) {
+ if (de==0) return get_error_errno();
+ long long thisl = -1;
+ if ( is_a_logfile(de->d_name, &thisl) ) {
+ if ((long long)thisl > maxf) maxf = thisl;
+ }
+ }
+ *result=maxf+1;
+ int r = closedir(d);
+ return r;
+}
+
+// TODO: Put this in portability layer when ready
+// in: file pathname that may have a dirname prefix
+// return: file leaf name
+static char * fileleafname(char *pathname) {
+ const char delimiter = '/';
+ char *leafname = strrchr(pathname, delimiter);
+ if (leafname)
+ leafname++;
+ else
+ leafname = pathname;
+ return leafname;
+}
+
+static int logfilenamecompare (const void *ap, const void *bp) {
+ char *a=*(char**)ap;
+ char *a_leafname = fileleafname(a);
+ char *b=*(char**)bp;
+ char * b_leafname = fileleafname(b);
+ int rval;
+ bool valid;
+ uint64_t num_a = 0; // placate compiler
+ uint64_t num_b = 0;
+ uint32_t ver_a = 0;
+ uint32_t ver_b = 0;
+ valid = is_a_logfile_any_version(a_leafname, &num_a, &ver_a);
+ invariant(valid);
+ valid = is_a_logfile_any_version(b_leafname, &num_b, &ver_b);
+ invariant(valid);
+ if (ver_a < ver_b) rval = -1;
+ else if (ver_a > ver_b) rval = +1;
+ else if (num_a < num_b) rval = -1;
+ else if (num_a > num_b) rval = +1;
+ else rval = 0;
+ return rval;
+}
+
+// Return the log files in sorted order
+// Return a null_terminated array of strings, and also return the number of strings in the array.
+// Requires: Race conditions must be dealt with by caller. Either call during initialization or grab the output permission.
+int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_logfiles)
+{
+ int result_limit=2;
+ int n_results=0;
+ char **MALLOC_N(result_limit, result);
+ assert(result!= NULL);
+ struct dirent *de;
+ DIR *d=opendir(directory);
+ if (d==0) {
+ int er = get_error_errno();
+ toku_free(result);
+ return er;
+ }
+ int dirnamelen = strlen(directory);
+ while ((de=readdir(d))) {
+ uint64_t thisl;
+ uint32_t version_ignore;
+ if ( !(is_a_logfile_any_version(de->d_name, &thisl, &version_ignore)) ) continue; //#2424: Skip over files that don't match the exact logfile template
+ if (n_results+1>=result_limit) {
+ result_limit*=2;
+ XREALLOC_N(result_limit, result);
+ }
+ int fnamelen = dirnamelen + strlen(de->d_name) + 2; // One for the slash and one for the trailing NUL.
+ char *XMALLOC_N(fnamelen, fname);
+ snprintf(fname, fnamelen, "%s/%s", directory, de->d_name);
+ result[n_results++] = fname;
+ }
+ // Return them in increasing order.
+ qsort(result, n_results, sizeof(result[0]), logfilenamecompare);
+ *resultp = result;
+ *n_logfiles = n_results;
+ result[n_results]=0; // make a trailing null
+ return d ? closedir(d) : 0;
+}
+
+void toku_logger_free_logfiles(char **logfiles, int n_logfiles) {
+ for (int i = 0; i < n_logfiles; i++)
+ toku_free(logfiles[i]);
+ toku_free(logfiles);
+}
+
+static int open_logfile (TOKULOGGER logger)
+// Entry and Exit: This thread has permission to modify the output.
+{
+ int fnamelen = strlen(logger->directory)+50;
+ char fname[fnamelen];
+ snprintf(fname,
+ fnamelen,
+ "%s/log%012lld.tokulog%d",
+ logger->directory,
+ logger->next_log_file_number,
+ TOKU_LOG_VERSION);
+ long long index = logger->next_log_file_number;
+ if (logger->write_log_files) {
+ logger->fd =
+ toku_os_open(fname,
+ O_CREAT + O_WRONLY + O_TRUNC + O_EXCL + O_BINARY,
+ S_IRUSR + S_IWUSR,
+ *tokudb_file_log_key);
+ if (logger->fd == -1) {
+ return get_error_errno();
+ }
+ fsync_logdir(logger);
+ logger->next_log_file_number++;
+ } else {
+ logger->fd = toku_os_open(
+ DEV_NULL_FILE, O_WRONLY + O_BINARY, S_IWUSR, *tokudb_file_log_key);
+ if (logger->fd == -1) {
+ return get_error_errno();
+ }
+ }
+ toku_os_full_write(logger->fd, "tokulogg", 8);
+ int version_l = toku_htonl(log_format_version); //version MUST be in network byte order regardless of disk order
+ toku_os_full_write(logger->fd, &version_l, 4);
+ if ( logger->write_log_files ) {
+ TOKULOGFILEINFO XMALLOC(lf_info);
+ lf_info->index = index;
+ lf_info->maxlsn = logger->written_lsn;
+ lf_info->version = TOKU_LOG_VERSION;
+ toku_logfilemgr_add_logfile_info(logger->logfilemgr, lf_info);
+ }
+ logger->fsynced_lsn = logger->written_lsn;
+ logger->n_in_file = 12;
+ return 0;
+}
+
+static void delete_logfile(TOKULOGGER logger, long long index, uint32_t version)
+// Entry and Exit: This thread has permission to modify the output.
+{
+ int fnamelen = strlen(logger->directory)+50;
+ char fname[fnamelen];
+ snprintf(fname, fnamelen, "%s/log%012lld.tokulog%d", logger->directory, index, version);
+ int r = remove(fname);
+ invariant_zero(r);
+}
+
+void toku_logger_maybe_trim_log(TOKULOGGER logger, LSN trim_lsn)
+// On entry and exit: No logger locks held.
+// Acquires and releases output permission.
+{
+ LSN fsynced_lsn;
+ grab_output(logger, &fsynced_lsn);
+ TOKULOGFILEMGR lfm = logger->logfilemgr;
+ int n_logfiles = toku_logfilemgr_num_logfiles(lfm);
+
+ TOKULOGFILEINFO lf_info = NULL;
+
+ if ( logger->write_log_files && logger->trim_log_files) {
+ while ( n_logfiles > 1 ) { // don't delete current logfile
+ uint32_t log_version;
+ lf_info = toku_logfilemgr_get_oldest_logfile_info(lfm);
+ log_version = lf_info->version;
+ if ( lf_info->maxlsn.lsn >= trim_lsn.lsn ) {
+ // file contains an open LSN, can't delete this or any newer log files
+ break;
+ }
+ // need to save copy - toku_logfilemgr_delete_oldest_logfile_info free's the lf_info
+ long index = lf_info->index;
+ toku_logfilemgr_delete_oldest_logfile_info(lfm);
+ n_logfiles--;
+ delete_logfile(logger, index, log_version);
+ }
+ }
+ release_output(logger, fsynced_lsn);
+}
+
+void toku_logger_write_log_files (TOKULOGGER logger, bool write_log_files)
+// Called only during initialization (or just after recovery), so no locks are needed.
+{
+ logger->write_log_files = write_log_files;
+}
+
+void toku_logger_trim_log_files (TOKULOGGER logger, bool trim_log_files)
+// Called only during initialization, so no locks are needed.
+{
+ logger->trim_log_files = trim_log_files;
+}
+
+bool toku_logger_txns_exist(TOKULOGGER logger)
+// Called during close of environment to ensure that transactions don't exist
+{
+ return toku_txn_manager_txns_exist(logger->txn_manager);
+}
+
+
+void toku_logger_maybe_fsync(TOKULOGGER logger, LSN lsn, int do_fsync, bool holds_input_lock)
+// Effect: If fsync is nonzero, then make sure that the log is flushed and synced at least up to lsn.
+// Entry: Holds input lock iff 'holds_input_lock'. The log entry has already been written to the input buffer.
+// Exit: Holds no locks.
+// The input lock may be released and then reacquired. Thus this function does not run atomically with respect to other threads.
+{
+ if (holds_input_lock) {
+ ml_unlock(&logger->input_lock);
+ }
+ if (do_fsync) {
+ // reacquire the locks (acquire output permission first)
+ LSN fsynced_lsn;
+ bool already_done = wait_till_output_already_written_or_output_buffer_available(logger, lsn, &fsynced_lsn);
+ if (already_done) {
+ return;
+ }
+
+ // otherwise we now own the output permission, and our lsn isn't outputed.
+
+ ml_lock(&logger->input_lock);
+
+ swap_inbuf_outbuf(logger);
+
+ ml_unlock(&logger->input_lock); // release the input lock now, so other threads can fill the inbuf. (Thus enabling group commit.)
+
+ write_outbuf_to_logfile(logger, &fsynced_lsn);
+ if (fsynced_lsn.lsn < lsn.lsn) {
+ // it may have gotten fsynced by the write_outbuf_to_logfile.
+ toku_file_fsync_without_accounting(logger->fd);
+ assert(fsynced_lsn.lsn <= logger->written_lsn.lsn);
+ fsynced_lsn = logger->written_lsn;
+ }
+ // the last lsn is only accessed while holding output permission or else when the log file is old.
+ if (logger->write_log_files) {
+ toku_logfilemgr_update_last_lsn(logger->logfilemgr, logger->written_lsn);
+ }
+ release_output(logger, fsynced_lsn);
+ }
+}
+
+static void
+logger_write_buffer(TOKULOGGER logger, LSN *fsynced_lsn)
+// Entry: Holds the input lock and permission to modify output.
+// Exit: Holds only the permission to modify output.
+// Effect: Write the buffers to the output. If DO_FSYNC is true, then fsync.
+// Note: Only called during single-threaded activity from toku_logger_restart, so locks aren't really needed.
+{
+ swap_inbuf_outbuf(logger);
+ ml_unlock(&logger->input_lock);
+ write_outbuf_to_logfile(logger, fsynced_lsn);
+ if (logger->write_log_files) {
+ toku_file_fsync_without_accounting(logger->fd);
+ toku_logfilemgr_update_last_lsn(logger->logfilemgr, logger->written_lsn); // t:2294
+ }
+}
+
+int toku_logger_restart(TOKULOGGER logger, LSN lastlsn)
+// Entry and exit: Holds no locks (this is called only during single-threaded activity, such as initial start).
+{
+ int r;
+
+ // flush out the log buffer
+ LSN fsynced_lsn;
+ grab_output(logger, &fsynced_lsn);
+ ml_lock(&logger->input_lock);
+ logger_write_buffer(logger, &fsynced_lsn);
+
+ // close the log file
+ if (logger->write_log_files) { // fsyncs don't work to /dev/null
+ toku_file_fsync_without_accounting(logger->fd);
+ }
+ r = toku_os_close(logger->fd);
+ assert(r == 0);
+ logger->fd = -1;
+
+ // reset the LSN's to the lastlsn when the logger was opened
+ logger->lsn = logger->written_lsn = logger->fsynced_lsn = lastlsn;
+ logger->write_log_files = true;
+ logger->trim_log_files = true;
+
+ // open a new log file
+ r = open_logfile(logger);
+ release_output(logger, fsynced_lsn);
+ return r;
+}
+
+// fname is the iname
+void toku_logger_log_fcreate (TOKUTXN txn, const char *fname, FILENUM filenum, uint32_t mode,
+ uint32_t treeflags, uint32_t nodesize, uint32_t basementnodesize,
+ enum toku_compression_method compression_method) {
+ if (txn) {
+ BYTESTRING bs_fname = { .len = (uint32_t) strlen(fname), .data = (char *) fname };
+ // fsync log on fcreate
+ toku_log_fcreate (txn->logger, (LSN*)0, 1, txn, toku_txn_get_txnid(txn), filenum,
+ bs_fname, mode, treeflags, nodesize, basementnodesize, compression_method);
+ }
+}
+
+
+// We only do fdelete on open ft's, so we pass the filenum here
+void toku_logger_log_fdelete (TOKUTXN txn, FILENUM filenum) {
+ if (txn) {
+ //No fsync.
+ toku_log_fdelete (txn->logger, (LSN*)0, 0, txn, toku_txn_get_txnid(txn), filenum);
+ }
+}
+
+
+
+/* fopen isn't really an action. It's just for bookkeeping. We need to know the filename that goes with a filenum. */
+void toku_logger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum, uint32_t treeflags) {
+ if (txn) {
+ BYTESTRING bs;
+ bs.len = strlen(fname);
+ bs.data = (char*)fname;
+ toku_log_fopen (txn->logger, (LSN*)0, 0, bs, filenum, treeflags);
+ }
+}
+
+static int toku_fread_uint8_t_nocrclen (FILE *f, uint8_t *v) {
+ int vi=fgetc(f);
+ if (vi==EOF) return -1;
+ uint8_t vc=(uint8_t)vi;
+ *v = vc;
+ return 0;
+}
+
+int toku_fread_uint8_t (FILE *f, uint8_t *v, struct x1764 *mm, uint32_t *len) {
+ int vi=fgetc(f);
+ if (vi==EOF) return -1;
+ uint8_t vc=(uint8_t)vi;
+ toku_x1764_add(mm, &vc, 1);
+ (*len)++;
+ *v = vc;
+ return 0;
+}
+
+int toku_fread_uint32_t_nocrclen (FILE *f, uint32_t *v) {
+ uint32_t result;
+ uint8_t *cp = (uint8_t*)&result;
+ int r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+0); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+1); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+2); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+3); if (r!=0) return r;
+ *v = toku_dtoh32(result);
+
+ return 0;
+}
+int toku_fread_uint32_t (FILE *f, uint32_t *v, struct x1764 *checksum, uint32_t *len) {
+ uint32_t result;
+ uint8_t *cp = (uint8_t*)&result;
+ int r;
+ r = toku_fread_uint8_t (f, cp+0, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+1, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+2, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+3, checksum, len); if(r!=0) return r;
+ *v = toku_dtoh32(result);
+ return 0;
+}
+
+int toku_fread_uint64_t (FILE *f, uint64_t *v, struct x1764 *checksum, uint32_t *len) {
+ uint32_t v1,v2;
+ int r;
+ r=toku_fread_uint32_t(f, &v1, checksum, len); if (r!=0) return r;
+ r=toku_fread_uint32_t(f, &v2, checksum, len); if (r!=0) return r;
+ *v = (((uint64_t)v1)<<32 ) | ((uint64_t)v2);
+ return 0;
+}
+
+int toku_fread_bool (FILE *f, bool *v, struct x1764 *mm, uint32_t *len) {
+ uint8_t iv;
+ int r = toku_fread_uint8_t(f, &iv, mm, len);
+ if (r == 0) {
+ *v = (iv!=0);
+ }
+ return r;
+}
+
+int toku_fread_LSN (FILE *f, LSN *lsn, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint64_t (f, &lsn->lsn, checksum, len);
+}
+
+int toku_fread_BLOCKNUM (FILE *f, BLOCKNUM *b, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint64_t (f, (uint64_t*)&b->b, checksum, len);
+}
+
+int toku_fread_FILENUM (FILE *f, FILENUM *filenum, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint32_t (f, &filenum->fileid, checksum, len);
+}
+
+int toku_fread_TXNID (FILE *f, TXNID *txnid, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint64_t (f, txnid, checksum, len);
+}
+
+int toku_fread_TXNID_PAIR (FILE *f, TXNID_PAIR *txnid, struct x1764 *checksum, uint32_t *len) {
+ TXNID parent;
+ TXNID child;
+ int r;
+ r = toku_fread_TXNID(f, &parent, checksum, len); if (r != 0) { return r; }
+ r = toku_fread_TXNID(f, &child, checksum, len); if (r != 0) { return r; }
+ txnid->parent_id64 = parent;
+ txnid->child_id64 = child;
+ return 0;
+}
+
+
+int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, uint32_t *len) {
+ // These reads are verbose because XA defined the fields as "long", but we use 4 bytes, 1 byte and 1 byte respectively.
+ TOKU_XA_XID *XMALLOC(xid);
+ {
+ uint32_t formatID;
+ int r = toku_fread_uint32_t(f, &formatID, checksum, len);
+ if (r!=0) return r;
+ xid->formatID = formatID;
+ }
+ {
+ uint8_t gtrid_length;
+ int r = toku_fread_uint8_t (f, &gtrid_length, checksum, len);
+ if (r!=0) return r;
+ xid->gtrid_length = gtrid_length;
+ }
+ {
+ uint8_t bqual_length;
+ int r = toku_fread_uint8_t (f, &bqual_length, checksum, len);
+ if (r!=0) return r;
+ xid->bqual_length = bqual_length;
+ }
+ for (int i=0; i< xid->gtrid_length + xid->bqual_length; i++) {
+ uint8_t byte;
+ int r = toku_fread_uint8_t(f, &byte, checksum, len);
+ if (r!=0) return r;
+ xid->data[i] = byte;
+ }
+ *xidp = xid;
+ return 0;
+}
+
+// fills in the bs with malloced data.
+int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, uint32_t *len) {
+ int r=toku_fread_uint32_t(f, (uint32_t*)&bs->len, checksum, len);
+ if (r!=0) return r;
+ XMALLOC_N(bs->len, bs->data);
+ uint32_t i;
+ for (i=0; i<bs->len; i++) {
+ r=toku_fread_uint8_t(f, (uint8_t*)&bs->data[i], checksum, len);
+ if (r!=0) {
+ toku_free(bs->data);
+ bs->data=0;
+ return r;
+ }
+ }
+ return 0;
+}
+
+// fills in the fs with malloced data.
+int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, uint32_t *len) {
+ int r=toku_fread_uint32_t(f, (uint32_t*)&fs->num, checksum, len);
+ if (r!=0) return r;
+ XMALLOC_N(fs->num, fs->filenums);
+ uint32_t i;
+ for (i=0; i<fs->num; i++) {
+ r=toku_fread_FILENUM (f, &fs->filenums[i], checksum, len);
+ if (r!=0) {
+ toku_free(fs->filenums);
+ fs->filenums=0;
+ return r;
+ }
+ }
+ return 0;
+}
+
+int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ LSN v;
+ int r = toku_fread_LSN(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=%" PRIu64, fieldname, v.lsn);
+ return 0;
+}
+
+int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ TXNID v;
+ int r = toku_fread_TXNID(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=%" PRIu64, fieldname, v);
+ return 0;
+}
+
+int toku_logprint_TXNID_PAIR (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ TXNID_PAIR v;
+ int r = toku_fread_TXNID_PAIR(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=%" PRIu64 ",%" PRIu64, fieldname, v.parent_id64, v.child_id64);
+ return 0;
+}
+
+int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ XIDP vp;
+ int r = toku_fread_XIDP(inf, &vp, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s={formatID=0x%lx gtrid_length=%ld bqual_length=%ld data=", fieldname, vp->formatID, vp->gtrid_length, vp->bqual_length);
+ toku_print_bytes(outf, vp->gtrid_length + vp->bqual_length, vp->data);
+ fprintf(outf, "}");
+ toku_free(vp);
+ return 0;
+}
+
+int toku_logprint_uint8_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ uint8_t v;
+ int r = toku_fread_uint8_t(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=%d", fieldname, v);
+ if (format) fprintf(outf, format, v);
+ else if (v=='\'') fprintf(outf, "('\'')");
+ else if (isprint(v)) fprintf(outf, "('%c')", v);
+ else {}/*nothing*/
+ return 0;
+}
+
+int toku_logprint_uint32_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ uint32_t v;
+ int r = toku_fread_uint32_t(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=", fieldname);
+ fprintf(outf, format ? format : "%d", v);
+ return 0;
+}
+
+int toku_logprint_uint64_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ uint64_t v;
+ int r = toku_fread_uint64_t(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=", fieldname);
+ fprintf(outf, format ? format : "%" PRId64, v);
+ return 0;
+}
+
+int toku_logprint_bool (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ bool v;
+ int r = toku_fread_bool(inf, &v, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=%s", fieldname, v ? "true" : "false");
+ return 0;
+
+}
+
+void toku_print_BYTESTRING (FILE *outf, uint32_t len, char *data) {
+ fprintf(outf, "{len=%u data=", len);
+ toku_print_bytes(outf, len, data);
+ fprintf(outf, "}");
+
+}
+
+int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ BYTESTRING bs;
+ int r = toku_fread_BYTESTRING(inf, &bs, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=", fieldname);
+ toku_print_BYTESTRING(outf, bs.len, bs.data);
+ toku_free(bs.data);
+ return 0;
+}
+
+int toku_logprint_BLOCKNUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ return toku_logprint_uint64_t(outf, inf, fieldname, checksum, len, format);
+
+}
+
+int toku_logprint_FILENUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ return toku_logprint_uint32_t(outf, inf, fieldname, checksum, len, format);
+
+}
+
+static void
+toku_print_FILENUMS (FILE *outf, uint32_t num, FILENUM *filenums) {
+ fprintf(outf, "{num=%u filenums=\"", num);
+ uint32_t i;
+ for (i=0; i<num; i++) {
+ if (i>0)
+ fprintf(outf, ",");
+ fprintf(outf, "0x%" PRIx32, filenums[i].fileid);
+ }
+ fprintf(outf, "\"}");
+
+}
+
+int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ FILENUMS bs;
+ int r = toku_fread_FILENUMS(inf, &bs, checksum, len);
+ if (r!=0) return r;
+ fprintf(outf, " %s=", fieldname);
+ toku_print_FILENUMS(outf, bs.num, bs.filenums);
+ toku_free(bs.filenums);
+ return 0;
+}
+
+int toku_read_and_print_logmagic (FILE *f, uint32_t *versionp) {
+ {
+ char magic[8];
+ int r=fread(magic, 1, 8, f);
+ if (r!=8) {
+ return DB_BADFORMAT;
+ }
+ if (memcmp(magic, "tokulogg", 8)!=0) {
+ return DB_BADFORMAT;
+ }
+ }
+ {
+ int version;
+ int r=fread(&version, 1, 4, f);
+ if (r!=4) {
+ return DB_BADFORMAT;
+ }
+ printf("tokulog v.%u\n", toku_ntohl(version));
+ //version MUST be in network order regardless of disk order
+ *versionp=toku_ntohl(version);
+ }
+ return 0;
+}
+
+int toku_read_logmagic (FILE *f, uint32_t *versionp) {
+ {
+ char magic[8];
+ int r=fread(magic, 1, 8, f);
+ if (r!=8) {
+ return DB_BADFORMAT;
+ }
+ if (memcmp(magic, "tokulogg", 8)!=0) {
+ return DB_BADFORMAT;
+ }
+ }
+ {
+ int version;
+ int r=fread(&version, 1, 4, f);
+ if (r!=4) {
+ return DB_BADFORMAT;
+ }
+ *versionp=toku_ntohl(version);
+ }
+ return 0;
+}
+
+TXNID_PAIR toku_txn_get_txnid (TOKUTXN txn) {
+ TXNID_PAIR tp = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE};
+ if (txn==0) return tp;
+ else return txn->txnid;
+}
+
+LSN toku_logger_last_lsn(TOKULOGGER logger) {
+ return logger->lsn;
+}
+
+TOKULOGGER toku_txn_logger (TOKUTXN txn) {
+ return txn ? txn->logger : 0;
+}
+
+void toku_txnid2txn(TOKULOGGER logger, TXNID_PAIR txnid, TOKUTXN *result) {
+ TOKUTXN root_txn = NULL;
+ toku_txn_manager_suspend(logger->txn_manager);
+ toku_txn_manager_id2txn_unlocked(logger->txn_manager, txnid, &root_txn);
+ if (root_txn == NULL || root_txn->txnid.child_id64 == txnid.child_id64) {
+ *result = root_txn;
+ }
+ else if (root_txn != NULL) {
+ root_txn->child_manager->suspend();
+ root_txn->child_manager->find_tokutxn_by_xid_unlocked(txnid, result);
+ root_txn->child_manager->resume();
+ }
+ toku_txn_manager_resume(logger->txn_manager);
+}
+
+// Find the earliest LSN in a log. No locks are needed.
+static int peek_at_log(TOKULOGGER logger, char *filename, LSN *first_lsn) {
+ int fd = toku_os_open(
+ filename, O_RDONLY + O_BINARY, S_IRUSR, *tokudb_file_log_key);
+ if (fd < 0) {
+ int er = get_error_errno();
+ if (logger->write_log_files)
+ printf("couldn't open: %s\n", strerror(er));
+ return er;
+ }
+ enum { SKIP = 12+1+4 }; // read the 12 byte header, the first message, and the first len
+ unsigned char header[SKIP+8];
+ int r = read(fd, header, SKIP+8);
+ if (r!=SKIP+8) return 0; // cannot determine that it's archivable, so we'll assume no. If a later-log is archivable is then this one will be too.
+
+ uint64_t lsn;
+ {
+ struct rbuf rb;
+ rb.buf = header+SKIP;
+ rb.size = 8;
+ rb.ndone = 0;
+ lsn = rbuf_ulonglong(&rb);
+ }
+
+ r = toku_os_close(fd);
+
+ if (r != 0) {
+ return 0;
+ }
+
+ first_lsn->lsn = lsn;
+ return 0;
+}
+
+// Return a malloc'd array of malloc'd strings which are the filenames that can be archived.
+// Output permission are obtained briefly so we can get a list of the log files without conflicting.
+int toku_logger_log_archive (TOKULOGGER logger, char ***logs_p, int flags) {
+ if (flags!=0) return EINVAL; // don't know what to do.
+ int all_n_logs;
+ int i;
+ char **all_logs;
+ int n_logfiles;
+ LSN fsynced_lsn;
+ grab_output(logger, &fsynced_lsn);
+ int r = toku_logger_find_logfiles (logger->directory, &all_logs, &n_logfiles);
+ release_output(logger, fsynced_lsn);
+ if (r!=0) return r;
+
+ for (i=0; all_logs[i]; i++);
+ all_n_logs=i;
+ // get them into increasing order
+ qsort(all_logs, all_n_logs, sizeof(all_logs[0]), logfilenamecompare);
+
+ LSN save_lsn = logger->last_completed_checkpoint_lsn;
+
+ // Now starting at the last one, look for archivable ones.
+ // Count the total number of bytes, because we have to return a single big array. (That's the BDB interface. Bleah...)
+ LSN earliest_lsn_in_logfile={(unsigned long long)(-1LL)};
+ r = peek_at_log(logger, all_logs[all_n_logs-1], &earliest_lsn_in_logfile); // try to find the lsn that's in the most recent log
+ if (earliest_lsn_in_logfile.lsn <= save_lsn.lsn) {
+ i=all_n_logs-1;
+ } else {
+ for (i=all_n_logs-2; i>=0; i--) { // start at all_n_logs-2 because we never archive the most recent log
+ r = peek_at_log(logger, all_logs[i], &earliest_lsn_in_logfile);
+ if (r!=0) continue; // In case of error, just keep going
+
+ if (earliest_lsn_in_logfile.lsn <= save_lsn.lsn) {
+ break;
+ }
+ }
+ }
+
+ // all log files up to, but but not including, i can be archived.
+ int n_to_archive=i;
+ int count_bytes=0;
+ for (i=0; i<n_to_archive; i++) {
+ count_bytes+=1+strlen(all_logs[i]);
+ }
+ char **result;
+ if (i==0) {
+ result=0;
+ } else {
+ CAST_FROM_VOIDP(result, toku_xmalloc((1+n_to_archive)*sizeof(*result) + count_bytes));
+ char *base = (char*)(result+1+n_to_archive);
+ for (i=0; i<n_to_archive; i++) {
+ int len=1+strlen(all_logs[i]);
+ result[i]=base;
+ memcpy(base, all_logs[i], len);
+ base+=len;
+ }
+ result[n_to_archive]=0;
+ }
+ for (i=0; all_logs[i]; i++) {
+ toku_free(all_logs[i]);
+ }
+ toku_free(all_logs);
+ *logs_p = result;
+ return 0;
+}
+
+
+TOKUTXN toku_logger_txn_parent (TOKUTXN txn) {
+ return txn->parent;
+}
+
+void toku_logger_note_checkpoint(TOKULOGGER logger, LSN lsn) {
+ logger->last_completed_checkpoint_lsn = lsn;
+}
+
+void
+toku_logger_get_status(TOKULOGGER logger, LOGGER_STATUS statp) {
+ log_status.init();
+ if (logger) {
+ LOG_STATUS_VAL(LOGGER_NEXT_LSN) = logger->lsn.lsn;
+ LOG_STATUS_VAL(LOGGER_NUM_WRITES) = logger->num_writes_to_disk;
+ LOG_STATUS_VAL(LOGGER_BYTES_WRITTEN) = logger->bytes_written_to_disk;
+ // No compression on logfiles so the uncompressed size is just number of bytes written
+ LOG_STATUS_VAL(LOGGER_UNCOMPRESSED_BYTES_WRITTEN) = logger->bytes_written_to_disk;
+ LOG_STATUS_VAL(LOGGER_TOKUTIME_WRITES) = logger->time_spent_writing_to_disk;
+ LOG_STATUS_VAL(LOGGER_WAIT_BUF_LONG) = logger->num_wait_buf_long;
+ }
+ *statp = log_status;
+}
+
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////
+// Used for upgrade:
+// if any valid log files exist in log_dir, then
+// set *found_any_logs to true and set *version_found to version number of latest log
+int
+toku_get_version_of_logs_on_disk(const char *log_dir, bool *found_any_logs, uint32_t *version_found) {
+ bool found = false;
+ uint32_t highest_version = 0;
+ int r = 0;
+
+ struct dirent *de;
+ DIR *d=opendir(log_dir);
+ if (d==NULL) {
+ r = get_error_errno();
+ }
+ else {
+ // Examine every file in the directory and find highest version
+ while ((de=readdir(d))) {
+ uint32_t this_log_version;
+ uint64_t this_log_number;
+ bool is_log = is_a_logfile_any_version(de->d_name, &this_log_number, &this_log_version);
+ if (is_log) {
+ if (!found) { // first log file found
+ found = true;
+ highest_version = this_log_version;
+ }
+ else
+ highest_version = highest_version > this_log_version ? highest_version : this_log_version;
+ }
+ }
+ int r2 = closedir(d);
+ if (r==0) r = r2;
+ }
+ if (r==0) {
+ *found_any_logs = found;
+ if (found)
+ *version_found = highest_version;
+ }
+ return r;
+}
+
+TXN_MANAGER toku_logger_get_txn_manager(TOKULOGGER logger) {
+ return logger->txn_manager;
+}
diff --git a/storage/tokudb/PerconaFT/ft/logger/logger.h b/storage/tokudb/PerconaFT/ft/logger/logger.h
new file mode 100644
index 00000000..d9595d71
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/logger.h
@@ -0,0 +1,274 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/ft_layout_version.h"
+#include "ft/txn/txn.h"
+
+typedef struct tokulogger *TOKULOGGER;
+
+enum {
+ TOKU_LOG_VERSION_1 = 1,
+ TOKU_LOG_VERSION_2 = 2,
+ //After 2 we linked the log version to the FT_LAYOUT VERSION.
+ //So it went from 2 to 13 (3-12 do not exist)
+ TOKU_LOG_VERSION_24 = 24,
+ TOKU_LOG_VERSION_25 = 25, // change rollinclude rollback log entry
+ TOKU_LOG_VERSION_26 = 26, // no change from 25
+ TOKU_LOG_VERSION_27 = 27, // no change from 26
+ TOKU_LOG_VERSION_28 = 28, // no change from 27
+ TOKU_LOG_VERSION_29 = 29, // no change from 28
+ TOKU_LOG_VERSION = FT_LAYOUT_VERSION,
+ TOKU_LOG_MIN_SUPPORTED_VERSION = FT_LAYOUT_MIN_SUPPORTED_VERSION,
+};
+
+int toku_logger_create (TOKULOGGER *resultp);
+int toku_logger_open (const char *directory, TOKULOGGER logger);
+int toku_logger_open_with_last_xid(const char *directory, TOKULOGGER logger, TXNID last_xid);
+void toku_logger_shutdown(TOKULOGGER logger);
+int toku_logger_close(TOKULOGGER *loggerp);
+void toku_logger_initialize_rollback_cache(TOKULOGGER logger, struct ft *ft);
+int toku_logger_open_rollback(TOKULOGGER logger, struct cachetable *ct, bool create);
+void toku_logger_close_rollback(TOKULOGGER logger);
+void toku_logger_close_rollback_check_empty(TOKULOGGER logger, bool clean_shutdown);
+bool toku_logger_rollback_is_open (TOKULOGGER); // return true iff the rollback is open.
+
+void toku_logger_fsync (TOKULOGGER logger);
+void toku_logger_fsync_if_lsn_not_fsynced(TOKULOGGER logger, LSN lsn);
+int toku_logger_is_open(TOKULOGGER logger);
+void toku_logger_set_cachetable (TOKULOGGER logger, struct cachetable *ct);
+int toku_logger_set_lg_max(TOKULOGGER logger, uint32_t lg_max);
+int toku_logger_get_lg_max(TOKULOGGER logger, uint32_t *lg_maxp);
+int toku_logger_set_lg_bsize(TOKULOGGER logger, uint32_t bsize);
+
+void toku_logger_write_log_files (TOKULOGGER logger, bool write_log_files);
+void toku_logger_trim_log_files(TOKULOGGER logger, bool trim_log_files);
+bool toku_logger_txns_exist(TOKULOGGER logger);
+
+// Restart the logger. This function is used by recovery to really start
+// logging.
+// Effects: Flush the current log buffer, reset the logger's lastlsn, and
+// open a new log file.
+// Returns: 0 if success
+int toku_logger_restart(TOKULOGGER logger, LSN lastlsn);
+
+// Maybe trim the log entries from the log that are older than the given LSN
+// Effect: find all of the log files whose largest LSN is smaller than the
+// given LSN and delete them.
+void toku_logger_maybe_trim_log(TOKULOGGER logger, LSN oldest_open_lsn);
+
+// At the ft layer, a FILENUM uniquely identifies an open file.
+struct FILENUM {
+ uint32_t fileid;
+};
+static const FILENUM FILENUM_NONE = { .fileid = UINT32_MAX };
+
+struct FILENUMS {
+ uint32_t num;
+ FILENUM *filenums;
+};
+
+void toku_logger_log_fcreate(TOKUTXN txn, const char *fname, FILENUM filenum, uint32_t mode, uint32_t flags, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method);
+void toku_logger_log_fdelete(TOKUTXN txn, FILENUM filenum);
+void toku_logger_log_fopen(TOKUTXN txn, const char * fname, FILENUM filenum, uint32_t treeflags);
+
+// the log generation code requires a typedef if we want to pass by pointer
+typedef TOKU_XA_XID *XIDP;
+
+int toku_fread_uint8_t (FILE *f, uint8_t *v, struct x1764 *mm, uint32_t *len);
+int toku_fread_uint32_t_nocrclen (FILE *f, uint32_t *v);
+int toku_fread_uint32_t (FILE *f, uint32_t *v, struct x1764 *checksum, uint32_t *len);
+int toku_fread_uint64_t (FILE *f, uint64_t *v, struct x1764 *checksum, uint32_t *len);
+int toku_fread_bool (FILE *f, bool *v, struct x1764 *checksum, uint32_t *len);
+int toku_fread_LSN (FILE *f, LSN *lsn, struct x1764 *checksum, uint32_t *len);
+int toku_fread_BLOCKNUM (FILE *f, BLOCKNUM *lsn, struct x1764 *checksum, uint32_t *len);
+int toku_fread_FILENUM (FILE *f, FILENUM *filenum, struct x1764 *checksum, uint32_t *len);
+int toku_fread_TXNID (FILE *f, TXNID *txnid, struct x1764 *checksum, uint32_t *len);
+int toku_fread_TXNID_PAIR (FILE *f, TXNID_PAIR *txnid, struct x1764 *checksum, uint32_t *len);
+int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, uint32_t *len);
+int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, uint32_t *len);
+int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, uint32_t *len);
+
+int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_TXNID_PAIR (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_uint8_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_uint32_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_BLOCKNUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_uint64_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_bool (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+void toku_print_BYTESTRING (FILE *outf, uint32_t len, char *data);
+int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_FILENUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_read_and_print_logmagic (FILE *f, uint32_t *versionp);
+int toku_read_logmagic (FILE *f, uint32_t *versionp);
+
+TXNID_PAIR toku_txn_get_txnid (TOKUTXN txn);
+LSN toku_logger_last_lsn(TOKULOGGER logger);
+TOKULOGGER toku_txn_logger (TOKUTXN txn);
+
+void toku_txnid2txn (TOKULOGGER logger, TXNID_PAIR txnid, TOKUTXN *result);
+
+int toku_logger_log_archive (TOKULOGGER logger, char ***logs_p, int flags);
+
+TOKUTXN toku_logger_txn_parent (TOKUTXN txn);
+void toku_logger_note_checkpoint(TOKULOGGER logger, LSN lsn);
+
+void toku_logger_make_space_in_inbuf (TOKULOGGER logger, int n_bytes_needed);
+
+int toku_logger_write_inbuf (TOKULOGGER logger);
+// Effect: Write the buffered data (from the inbuf) to a file. No fsync, however.
+// As a side effect, the inbuf will be made empty.
+// Return 0 on success, otherwise return an error number.
+// Requires: The inbuf lock is currently held, and the outbuf lock is not held.
+// Upon return, the inbuf lock will be held, and the outbuf lock is not held.
+// However, no side effects should have been made to the logger. The lock was acquired simply to determine that the buffer will overflow if we try to put something into it.
+// The inbuf lock will be released, so the operations before and after this function call will not be atomic.
+// Rationale: When the buffer becomes nearly full, call this function so that more can be put in.
+// Implementation note: Since the output lock is acquired first, we must release the input lock, and then grab both in the right order.
+
+void toku_logger_maybe_fsync (TOKULOGGER logger, LSN lsn, int do_fsync, bool holds_input_lock);
+// Effect: If fsync is nonzero, then make sure that the log is flushed and synced at least up to lsn.
+// Entry: Holds input lock iff 'holds_input_lock'.
+// Exit: Holds no locks.
+
+// Discussion: How does the logger work:
+// The logger has two buffers: an inbuf and an outbuf.
+// There are two locks, called the inlock, and the outlock. To write, both locks must be held, and the outlock is acquired first.
+// Roughly speaking, the inbuf is used to accumulate logged data, and the outbuf is used to write to disk.
+// When something is to be logged we do the following:
+// acquire the inlock.
+// Make sure there is space in the inbuf for the logentry. (We know the size of the logentry in advance):
+// if the inbuf doesn't have enough space then
+// release the inlock
+// acquire the outlock
+// acquire the inlock
+// it's possible that some other thread made space.
+// if there still isn't space
+// swap the inbuf and the outbuf
+// release the inlock
+// write the outbuf
+// acquire the inlock
+// release the outlock
+// if the inbuf is still too small, then increase the size of the inbuf
+// Increment the LSN and fill the inbuf.
+// If fsync is required then
+// release the inlock
+// acquire the outlock
+// acquire the inlock
+// if the LSN has been flushed and fsynced (if so we are done. Some other thread did the flush.)
+// release the locks
+// if the LSN has been flushed but not fsynced up to the LSN:
+// release the inlock
+// fsync
+// release the outlock
+// otherwise:
+// swap the outbuf and the inbuf
+// release the inlock
+// write the outbuf
+// fsync
+// release the outlock
+
+void toku_logger_get_status(TOKULOGGER logger, LOGGER_STATUS s);
+
+int toku_get_version_of_logs_on_disk(const char *log_dir, bool *found_any_logs, uint32_t *version_found);
+
+struct txn_manager *toku_logger_get_txn_manager(TOKULOGGER logger);
+
+// For serialize / deserialize
+
+#include "ft/serialize/wbuf.h"
+
+static inline void wbuf_nocrc_FILENUM(struct wbuf *wb, FILENUM fileid) {
+ wbuf_nocrc_uint(wb, fileid.fileid);
+}
+
+static inline void wbuf_FILENUM(struct wbuf *wb, FILENUM fileid) {
+ wbuf_uint(wb, fileid.fileid);
+}
+
+static inline void wbuf_nocrc_FILENUMS(struct wbuf *wb, FILENUMS v) {
+ wbuf_nocrc_uint(wb, v.num);
+ for (uint32_t i = 0; i < v.num; i++) {
+ wbuf_nocrc_FILENUM(wb, v.filenums[i]);
+ }
+}
+
+static inline void wbuf_FILENUMS(struct wbuf *wb, FILENUMS v) {
+ wbuf_uint(wb, v.num);
+ for (uint32_t i = 0; i < v.num; i++) {
+ wbuf_FILENUM(wb, v.filenums[i]);
+ }
+}
+
+static inline void wbuf_nocrc_XIDP (struct wbuf *w, TOKU_XA_XID *xid) {
+ wbuf_nocrc_uint32_t(w, xid->formatID);
+ wbuf_nocrc_uint8_t(w, xid->gtrid_length);
+ wbuf_nocrc_uint8_t(w, xid->bqual_length);
+ wbuf_nocrc_literal_bytes(w, xid->data, xid->gtrid_length+xid->bqual_length);
+}
+
+#include "ft/serialize/rbuf.h"
+
+static inline void rbuf_FILENUM(struct rbuf *rb, FILENUM *filenum) {
+ filenum->fileid = rbuf_int(rb);
+}
+static inline void rbuf_ma_FILENUM(struct rbuf *rb, memarena *UU(ma), FILENUM *filenum) {
+ rbuf_FILENUM(rb, filenum);
+}
+
+static inline void rbuf_FILENUMS(struct rbuf *rb, FILENUMS *filenums) {
+ filenums->num = rbuf_int(rb);
+ XMALLOC_N(filenums->num, filenums->filenums);
+ for (uint32_t i = 0; i < filenums->num; i++) {
+ rbuf_FILENUM(rb, &(filenums->filenums[i]));
+ }
+}
+
+static inline void rbuf_ma_FILENUMS(struct rbuf *rb, memarena *ma, FILENUMS *filenums) {
+ rbuf_ma_uint32_t(rb, ma, &(filenums->num));
+ filenums->filenums = (FILENUM *) ma->malloc_from_arena(filenums->num * sizeof(FILENUM));
+ assert(filenums->filenums != NULL);
+ for (uint32_t i = 0; i < filenums->num; i++) {
+ rbuf_ma_FILENUM(rb, ma, &(filenums->filenums[i]));
+ }
+}
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.cc b/storage/tokudb/PerconaFT/ft/logger/recover.cc
new file mode 100644
index 00000000..9a9a1214
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.cc
@@ -0,0 +1,1740 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <memory>
+#include "ft/cachetable/cachetable.h"
+#include "ft/cachetable/checkpoint.h"
+#include "ft/ft.h"
+#include "ft/log_header.h"
+#include "ft/logger/log-internal.h"
+#include "ft/logger/logcursor.h"
+#include "ft/txn/txn_manager.h"
+#include "util/omt.h"
+
+int tokuft_recovery_trace = 0; // turn on recovery tracing, default off.
+
+//#define DO_VERIFY_COUNTS
+#ifdef DO_VERIFY_COUNTS
+#define VERIFY_COUNTS(n) toku_verify_or_set_counts(n, false)
+#else
+#define VERIFY_COUNTS(n) ((void)0)
+#endif
+
+// time in seconds between recovery progress reports
+#define TOKUFT_RECOVERY_PROGRESS_TIME 15
+time_t tokuft_recovery_progress_time = TOKUFT_RECOVERY_PROGRESS_TIME;
+
+enum ss {
+ BACKWARD_NEWER_CHECKPOINT_END = 1,
+ BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END,
+ FORWARD_BETWEEN_CHECKPOINT_BEGIN_END,
+ FORWARD_NEWER_CHECKPOINT_END,
+};
+
+struct scan_state {
+ enum ss ss;
+ LSN checkpoint_begin_lsn;
+ LSN checkpoint_end_lsn;
+ uint64_t checkpoint_end_timestamp;
+ uint64_t checkpoint_begin_timestamp;
+ uint32_t checkpoint_num_fassociate;
+ uint32_t checkpoint_num_xstillopen;
+ TXNID last_xid;
+};
+
+static const char *scan_state_strings[] = {
+ "?", "bw_newer", "bw_between", "fw_between", "fw_newer",
+};
+
+static void scan_state_init(struct scan_state *ss) {
+ ss->ss = BACKWARD_NEWER_CHECKPOINT_END;
+ ss->checkpoint_begin_lsn = ZERO_LSN;
+ ss->checkpoint_end_lsn = ZERO_LSN;
+ ss->checkpoint_num_fassociate = 0;
+ ss->checkpoint_num_xstillopen = 0;
+ ss->last_xid = 0;
+}
+
+static const char *scan_state_string(struct scan_state *ss) {
+ assert(BACKWARD_NEWER_CHECKPOINT_END <= ss->ss && ss->ss <= FORWARD_NEWER_CHECKPOINT_END);
+ return scan_state_strings[ss->ss];
+}
+
+// File map tuple
+struct file_map_tuple {
+ FILENUM filenum;
+ FT_HANDLE ft_handle; // NULL ft_handle means it's a rollback file.
+ char *iname;
+ struct __toku_db fake_db;
+};
+
+static void file_map_tuple_init(struct file_map_tuple *tuple, FILENUM filenum, FT_HANDLE ft_handle, char *iname) {
+ tuple->filenum = filenum;
+ tuple->ft_handle = ft_handle;
+ tuple->iname = iname;
+ // use a fake DB for comparisons, using the ft's cmp descriptor
+ memset(&tuple->fake_db, 0, sizeof(tuple->fake_db));
+ tuple->fake_db.cmp_descriptor = &tuple->ft_handle->ft->cmp_descriptor;
+ tuple->fake_db.descriptor = &tuple->ft_handle->ft->descriptor;
+}
+
+static void file_map_tuple_destroy(struct file_map_tuple *tuple) {
+ if (tuple->iname) {
+ toku_free(tuple->iname);
+ tuple->iname = NULL;
+ }
+}
+
+// Map filenum to ft_handle
+struct file_map {
+ toku::omt<struct file_map_tuple *> *filenums;
+};
+
+// The recovery environment
+struct recover_env {
+ DB_ENV *env;
+ prepared_txn_callback_t prepared_txn_callback; // at the end of recovery, all the prepared txns are passed back to the ydb layer to make them into valid transactions.
+ keep_cachetable_callback_t keep_cachetable_callback; // after recovery, store the cachetable into the environment.
+ CACHETABLE ct;
+ TOKULOGGER logger;
+ CHECKPOINTER cp;
+ ft_compare_func bt_compare;
+ ft_update_func update_function;
+ generate_row_for_put_func generate_row_for_put;
+ generate_row_for_del_func generate_row_for_del;
+ DBT_ARRAY dest_keys;
+ DBT_ARRAY dest_vals;
+ struct scan_state ss;
+ struct file_map fmap;
+ bool goforward;
+ bool destroy_logger_at_end; // If true then destroy the logger when we are done. If false then set the logger into write-files mode when we are done with recovery.*/
+};
+typedef struct recover_env *RECOVER_ENV;
+
+
+static void file_map_init(struct file_map *fmap) {
+ XMALLOC(fmap->filenums);
+ fmap->filenums->create();
+}
+
+static void file_map_destroy(struct file_map *fmap) {
+ fmap->filenums->destroy();
+ toku_free(fmap->filenums);
+ fmap->filenums = nullptr;
+}
+
+static uint32_t file_map_get_num_dictionaries(struct file_map *fmap) {
+ return fmap->filenums->size();
+}
+
+static void file_map_close_dictionaries(struct file_map *fmap, LSN oplsn) {
+ int r;
+
+ while (1) {
+ uint32_t n = fmap->filenums->size();
+ if (n == 0) {
+ break;
+ }
+ struct file_map_tuple *tuple;
+ r = fmap->filenums->fetch(n - 1, &tuple);
+ assert(r == 0);
+ r = fmap->filenums->delete_at(n - 1);
+ assert(r == 0);
+ assert(tuple->ft_handle);
+ // Logging is on again, but we must pass the right LSN into close.
+ if (tuple->ft_handle) { // it's a DB, not a rollback file
+ toku_ft_handle_close_recovery(tuple->ft_handle, oplsn);
+ }
+ file_map_tuple_destroy(tuple);
+ toku_free(tuple);
+ }
+}
+
+static int file_map_h(struct file_map_tuple *const &a, const FILENUM &b) {
+ if (a->filenum.fileid < b.fileid) {
+ return -1;
+ } else if (a->filenum.fileid > b.fileid) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static int file_map_insert (struct file_map *fmap, FILENUM fnum, FT_HANDLE ft_handle, char *iname) {
+ struct file_map_tuple *XMALLOC(tuple);
+ file_map_tuple_init(tuple, fnum, ft_handle, iname);
+ int r = fmap->filenums->insert<FILENUM, file_map_h>(tuple, fnum, nullptr);
+ return r;
+}
+
+static void file_map_remove(struct file_map *fmap, FILENUM fnum) {
+ uint32_t idx;
+ struct file_map_tuple *tuple;
+ int r = fmap->filenums->find_zero<FILENUM, file_map_h>(fnum, &tuple, &idx);
+ if (r == 0) {
+ r = fmap->filenums->delete_at(idx);
+ file_map_tuple_destroy(tuple);
+ toku_free(tuple);
+ }
+}
+
+// Look up file info: given FILENUM, return file_map_tuple (or DB_NOTFOUND)
+static int file_map_find(struct file_map *fmap, FILENUM fnum, struct file_map_tuple **file_map_tuple) {
+ uint32_t idx;
+ struct file_map_tuple *tuple;
+ int r = fmap->filenums->find_zero<FILENUM, file_map_h>(fnum, &tuple, &idx);
+ if (r == 0) {
+ assert(tuple->filenum.fileid == fnum.fileid);
+ *file_map_tuple = tuple;
+ } else {
+ assert(r == DB_NOTFOUND);
+ }
+ return r;
+}
+
+static int recover_env_init (RECOVER_ENV renv,
+ const char *env_dir,
+ DB_ENV *env,
+ prepared_txn_callback_t prepared_txn_callback,
+ keep_cachetable_callback_t keep_cachetable_callback,
+ TOKULOGGER logger,
+ ft_compare_func bt_compare,
+ ft_update_func update_function,
+ generate_row_for_put_func generate_row_for_put,
+ generate_row_for_del_func generate_row_for_del,
+ size_t cachetable_size) {
+ int r = 0;
+
+ // If we are passed a logger use it, otherwise create one.
+ renv->destroy_logger_at_end = logger==NULL;
+ if (logger) {
+ renv->logger = logger;
+ } else {
+ r = toku_logger_create(&renv->logger);
+ assert(r == 0);
+ }
+ toku_logger_write_log_files(renv->logger, false);
+ toku_cachetable_create(&renv->ct, cachetable_size ? cachetable_size : 1<<25, (LSN){0}, renv->logger);
+ toku_cachetable_set_env_dir(renv->ct, env_dir);
+ if (keep_cachetable_callback) keep_cachetable_callback(env, renv->ct);
+ toku_logger_set_cachetable(renv->logger, renv->ct);
+ renv->env = env;
+ renv->prepared_txn_callback = prepared_txn_callback;
+ renv->keep_cachetable_callback = keep_cachetable_callback;
+ renv->bt_compare = bt_compare;
+ renv->update_function = update_function;
+ renv->generate_row_for_put = generate_row_for_put;
+ renv->generate_row_for_del = generate_row_for_del;
+ file_map_init(&renv->fmap);
+ renv->goforward = false;
+ renv->cp = toku_cachetable_get_checkpointer(renv->ct);
+ toku_dbt_array_init(&renv->dest_keys, 1);
+ toku_dbt_array_init(&renv->dest_vals, 1);
+ if (tokuft_recovery_trace)
+ fprintf(stderr, "%s:%d\n", __FUNCTION__, __LINE__);
+ return r;
+}
+
+static void recover_env_cleanup (RECOVER_ENV renv) {
+ invariant_zero(renv->fmap.filenums->size());
+ file_map_destroy(&renv->fmap);
+
+ if (renv->destroy_logger_at_end) {
+ toku_logger_close_rollback(renv->logger);
+ int r = toku_logger_close(&renv->logger);
+ assert(r == 0);
+ } else {
+ toku_logger_write_log_files(renv->logger, true);
+ }
+
+ if (renv->keep_cachetable_callback) {
+ renv->ct = NULL;
+ } else {
+ toku_cachetable_close(&renv->ct);
+ }
+ toku_dbt_array_destroy(&renv->dest_keys);
+ toku_dbt_array_destroy(&renv->dest_vals);
+
+ if (tokuft_recovery_trace)
+ fprintf(stderr, "%s:%d\n", __FUNCTION__, __LINE__);
+}
+
+static const char *recover_state(RECOVER_ENV renv) {
+ return scan_state_string(&renv->ss);
+}
+
+// Open the file if it is not already open. If it is already open, then do nothing.
+static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, bool must_create, int UU(mode), BYTESTRING *bs_iname, FILENUM filenum, uint32_t treeflags,
+ TOKUTXN txn, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method, LSN max_acceptable_lsn) {
+ int r = 0;
+ FT_HANDLE ft_handle = NULL;
+ char *iname = fixup_fname(bs_iname);
+
+ toku_ft_handle_create(&ft_handle);
+ toku_ft_set_flags(ft_handle, treeflags);
+
+ if (nodesize != 0) {
+ toku_ft_handle_set_nodesize(ft_handle, nodesize);
+ }
+
+ if (basementnodesize != 0) {
+ toku_ft_handle_set_basementnodesize(ft_handle, basementnodesize);
+ }
+
+ if (compression_method != TOKU_DEFAULT_COMPRESSION_METHOD) {
+ toku_ft_handle_set_compression_method(ft_handle, compression_method);
+ }
+
+ // set the key compare functions
+ if (!(treeflags & TOKU_DB_KEYCMP_BUILTIN) && renv->bt_compare) {
+ toku_ft_set_bt_compare(ft_handle, renv->bt_compare);
+ }
+
+ if (renv->update_function) {
+ toku_ft_set_update(ft_handle, renv->update_function);
+ }
+
+ // TODO mode (FUTURE FEATURE)
+ //mode = mode;
+
+ r = toku_ft_handle_open_recovery(ft_handle, iname, must_create, must_create, renv->ct, txn, filenum, max_acceptable_lsn);
+ if (r != 0) {
+ //Note: If ft_handle_open fails, then close_ft will NOT write a header to disk.
+ //No need to provide lsn, so use the regular toku_ft_handle_close function
+ toku_ft_handle_close(ft_handle);
+ toku_free(iname);
+ if (r == ENOENT) //Not an error to simply be missing.
+ r = 0;
+ return r;
+ }
+
+ file_map_insert(&renv->fmap, filenum, ft_handle, iname);
+ return 0;
+}
+
+static int toku_recover_begin_checkpoint (struct logtype_begin_checkpoint *l, RECOVER_ENV renv) {
+ int r;
+ TXN_MANAGER mgr = toku_logger_get_txn_manager(renv->logger);
+ switch (renv->ss.ss) {
+ case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END:
+ assert(l->lsn.lsn == renv->ss.checkpoint_begin_lsn.lsn);
+ invariant(renv->ss.last_xid == TXNID_NONE);
+ renv->ss.last_xid = l->last_xid;
+ toku_txn_manager_set_last_xid_from_recovered_checkpoint(mgr, l->last_xid);
+
+ r = 0;
+ break;
+ case FORWARD_NEWER_CHECKPOINT_END:
+ assert(l->lsn.lsn > renv->ss.checkpoint_end_lsn.lsn);
+ // Verify last_xid is no older than the previous begin
+ invariant(l->last_xid >= renv->ss.last_xid);
+ // Verify last_xid is no older than the newest txn
+ invariant(l->last_xid >= toku_txn_manager_get_last_xid(mgr));
+
+ r = 0; // ignore it (log only has a begin checkpoint)
+ break;
+ default:
+ fprintf(stderr, "PerconaFT recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss);
+ abort();
+ break;
+ }
+ return r;
+}
+
+static int toku_recover_backward_begin_checkpoint (struct logtype_begin_checkpoint *l, RECOVER_ENV renv) {
+ int r;
+ time_t tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery bw_begin_checkpoint at %" PRIu64 " timestamp %" PRIu64 " (%s)\n", ctime(&tnow), l->lsn.lsn, l->timestamp, recover_state(renv));
+ switch (renv->ss.ss) {
+ case BACKWARD_NEWER_CHECKPOINT_END:
+ // incomplete checkpoint, nothing to do
+ r = 0;
+ break;
+ case BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END:
+ assert(l->lsn.lsn == renv->ss.checkpoint_begin_lsn.lsn);
+ renv->ss.ss = FORWARD_BETWEEN_CHECKPOINT_BEGIN_END;
+ renv->ss.checkpoint_begin_timestamp = l->timestamp;
+ renv->goforward = true;
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery turning around at begin checkpoint %" PRIu64 " time %" PRIu64 "\n",
+ ctime(&tnow), l->lsn.lsn,
+ renv->ss.checkpoint_end_timestamp - renv->ss.checkpoint_begin_timestamp);
+ r = 0;
+ break;
+ default:
+ fprintf(stderr, "PerconaFT recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss);
+ abort();
+ break;
+ }
+ return r;
+}
+
+static int toku_recover_end_checkpoint (struct logtype_end_checkpoint *l, RECOVER_ENV renv) {
+ int r;
+ switch (renv->ss.ss) {
+ case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END:
+ assert(l->lsn_begin_checkpoint.lsn == renv->ss.checkpoint_begin_lsn.lsn);
+ assert(l->lsn.lsn == renv->ss.checkpoint_end_lsn.lsn);
+ assert(l->num_fassociate_entries == renv->ss.checkpoint_num_fassociate);
+ assert(l->num_xstillopen_entries == renv->ss.checkpoint_num_xstillopen);
+ renv->ss.ss = FORWARD_NEWER_CHECKPOINT_END;
+ r = 0;
+ break;
+ case FORWARD_NEWER_CHECKPOINT_END:
+ assert(0);
+ return 0;
+ default:
+ assert(0);
+ return 0;
+ }
+ return r;
+}
+
+static int toku_recover_backward_end_checkpoint (struct logtype_end_checkpoint *l, RECOVER_ENV renv) {
+ time_t tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery bw_end_checkpoint at %" PRIu64 " timestamp %" PRIu64 " xid %" PRIu64 " (%s)\n", ctime(&tnow), l->lsn.lsn, l->timestamp, l->lsn_begin_checkpoint.lsn, recover_state(renv));
+ switch (renv->ss.ss) {
+ case BACKWARD_NEWER_CHECKPOINT_END:
+ renv->ss.ss = BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END;
+ renv->ss.checkpoint_begin_lsn.lsn = l->lsn_begin_checkpoint.lsn;
+ renv->ss.checkpoint_end_lsn.lsn = l->lsn.lsn;
+ renv->ss.checkpoint_end_timestamp = l->timestamp;
+ return 0;
+ case BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END:
+ fprintf(stderr, "PerconaFT recovery %s:%d Should not see two end_checkpoint log entries without an intervening begin_checkpoint\n", __FILE__, __LINE__);
+ abort();
+ default:
+ break;
+ }
+ fprintf(stderr, "PerconaFT recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss);
+ abort();
+}
+
+static int toku_recover_fassociate (struct logtype_fassociate *l, RECOVER_ENV renv) {
+ struct file_map_tuple *tuple = NULL;
+ int r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ char *fname = fixup_fname(&l->iname);
+ switch (renv->ss.ss) {
+ case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END:
+ renv->ss.checkpoint_num_fassociate++;
+ assert(r==DB_NOTFOUND); //Not open
+ // Open it if it exists.
+ // If rollback file, specify which checkpointed version of file we need (not just the latest)
+ // because we cannot use a rollback log that is later than the last complete checkpoint. See #3113.
+ {
+ bool rollback_file = (0==strcmp(fname, toku_product_name_strings.rollback_cachefile));
+ LSN max_acceptable_lsn = MAX_LSN;
+ if (rollback_file) {
+ max_acceptable_lsn = renv->ss.checkpoint_begin_lsn;
+ FT_HANDLE t;
+ toku_ft_handle_create(&t);
+ r = toku_ft_handle_open_recovery(t, toku_product_name_strings.rollback_cachefile, false, false, renv->ct, (TOKUTXN)NULL, l->filenum, max_acceptable_lsn);
+ renv->logger->rollback_cachefile = t->ft->cf;
+ toku_logger_initialize_rollback_cache(renv->logger, t->ft);
+ } else {
+ r = internal_recover_fopen_or_fcreate(renv, false, 0, &l->iname, l->filenum, l->treeflags, NULL, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, max_acceptable_lsn);
+ assert(r==0);
+ }
+ }
+ // try to open the file again and if we get it, restore
+ // the unlink on close bit.
+ int ret;
+ ret = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (ret == 0 && l->unlink_on_close) {
+ toku_cachefile_unlink_on_close(tuple->ft_handle->ft->cf);
+ }
+ break;
+ case FORWARD_NEWER_CHECKPOINT_END:
+ if (r == 0) { //IF it is open
+ // assert that the filenum maps to the correct iname
+ assert(strcmp(fname, tuple->iname) == 0);
+ }
+ r = 0;
+ break;
+ default:
+ assert(0);
+ return 0;
+ }
+ toku_free(fname);
+
+ return r;
+}
+
+static int toku_recover_backward_fassociate (struct logtype_fassociate *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int
+recover_transaction(TOKUTXN *txnp, TXNID_PAIR xid, TXNID_PAIR parentxid, TOKULOGGER logger) {
+ int r;
+
+ // lookup the parent
+ TOKUTXN parent = NULL;
+ if (!txn_pair_is_none(parentxid)) {
+ toku_txnid2txn(logger, parentxid, &parent);
+ assert(parent!=NULL);
+ }
+ else {
+ invariant(xid.child_id64 == TXNID_NONE);
+ }
+
+ // create a transaction and bind it to the transaction id
+ TOKUTXN txn = NULL;
+ {
+ //Verify it does not yet exist.
+ toku_txnid2txn(logger, xid, &txn);
+ assert(txn==NULL);
+ }
+ r = toku_txn_begin_with_xid(
+ parent,
+ &txn,
+ logger,
+ xid,
+ TXN_SNAPSHOT_NONE,
+ NULL,
+ true, // for_recovery
+ false // read_only
+ );
+ assert(r == 0);
+ // We only know about it because it was logged. Restore the log bit.
+ // Logging is 'off' but it will still set the bit.
+ toku_maybe_log_begin_txn_for_write_operation(txn);
+ if (txnp) *txnp = txn;
+ return 0;
+}
+
+static int recover_xstillopen_internal (TOKUTXN *txnp,
+ LSN UU(lsn),
+ TXNID_PAIR xid,
+ TXNID_PAIR parentxid,
+ uint64_t rollentry_raw_count,
+ FILENUMS open_filenums,
+ bool force_fsync_on_commit,
+ uint64_t num_rollback_nodes,
+ uint64_t num_rollentries,
+ BLOCKNUM spilled_rollback_head,
+ BLOCKNUM spilled_rollback_tail,
+ BLOCKNUM current_rollback,
+ uint32_t UU(crc),
+ uint32_t UU(len),
+ RECOVER_ENV renv) {
+ int r;
+ *txnp = NULL;
+ switch (renv->ss.ss) {
+ case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END: {
+ renv->ss.checkpoint_num_xstillopen++;
+ invariant(renv->ss.last_xid != TXNID_NONE);
+ invariant(xid.parent_id64 <= renv->ss.last_xid);
+ TOKUTXN txn = NULL;
+ { //Create the transaction.
+ r = recover_transaction(&txn, xid, parentxid, renv->logger);
+ assert(r==0);
+ assert(txn!=NULL);
+ *txnp = txn;
+ }
+ { //Recover rest of transaction.
+#define COPY_TO_INFO(field) .field = field
+ struct txninfo info = {
+ COPY_TO_INFO(rollentry_raw_count),
+ .num_fts = 0, //Set afterwards
+ .open_fts = NULL, //Set afterwards
+ COPY_TO_INFO(force_fsync_on_commit),
+ COPY_TO_INFO(num_rollback_nodes),
+ COPY_TO_INFO(num_rollentries),
+ COPY_TO_INFO(spilled_rollback_head),
+ COPY_TO_INFO(spilled_rollback_tail),
+ COPY_TO_INFO(current_rollback)
+ };
+#undef COPY_TO_INFO
+ //Generate open_fts
+ FT array[open_filenums.num]; //Allocate maximum possible requirement
+ info.open_fts = array;
+ uint32_t i;
+ for (i = 0; i < open_filenums.num; i++) {
+ //open_filenums.filenums[]
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, open_filenums.filenums[i], &tuple);
+ if (r==0) {
+ info.open_fts[info.num_fts++] = tuple->ft_handle->ft;
+ }
+ else {
+ assert(r==DB_NOTFOUND);
+ }
+ }
+ r = toku_txn_load_txninfo(txn, &info);
+ assert(r==0);
+ }
+ break;
+ }
+ case FORWARD_NEWER_CHECKPOINT_END: {
+ // assert that the transaction exists
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, xid, &txn);
+ r = 0;
+ *txnp = txn;
+ break;
+ }
+ default:
+ assert(0);
+ return 0;
+ }
+ return r;
+}
+
+static int toku_recover_xstillopen (struct logtype_xstillopen *l, RECOVER_ENV renv) {
+ TOKUTXN txn;
+ return recover_xstillopen_internal (&txn,
+ l->lsn,
+ l->xid,
+ l->parentxid,
+ l->rollentry_raw_count,
+ l->open_filenums,
+ l->force_fsync_on_commit,
+ l->num_rollback_nodes,
+ l->num_rollentries,
+ l->spilled_rollback_head,
+ l->spilled_rollback_tail,
+ l->current_rollback,
+ l->crc,
+ l->len,
+ renv);
+}
+
+static int toku_recover_xstillopenprepared (struct logtype_xstillopenprepared *l, RECOVER_ENV renv) {
+ TOKUTXN txn;
+ int r = recover_xstillopen_internal (&txn,
+ l->lsn,
+ l->xid,
+ TXNID_PAIR_NONE,
+ l->rollentry_raw_count,
+ l->open_filenums,
+ l->force_fsync_on_commit,
+ l->num_rollback_nodes,
+ l->num_rollentries,
+ l->spilled_rollback_head,
+ l->spilled_rollback_tail,
+ l->current_rollback,
+ l->crc,
+ l->len,
+ renv);
+ if (r != 0) {
+ goto exit;
+ }
+ switch (renv->ss.ss) {
+ case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END: {
+ toku_txn_prepare_txn(txn, l->xa_xid, 0);
+ break;
+ }
+ case FORWARD_NEWER_CHECKPOINT_END: {
+ assert(txn->state == TOKUTXN_PREPARING);
+ break;
+ }
+ default: {
+ assert(0);
+ }
+ }
+exit:
+ return r;
+}
+
+static int toku_recover_backward_xstillopen (struct logtype_xstillopen *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+static int toku_recover_backward_xstillopenprepared (struct logtype_xstillopenprepared *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_xbegin (struct logtype_xbegin *l, RECOVER_ENV renv) {
+ int r;
+ r = recover_transaction(NULL, l->xid, l->parentxid, renv->logger);
+ return r;
+}
+
+static int toku_recover_backward_xbegin (struct logtype_xbegin *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+struct toku_txn_progress_extra {
+ time_t tlast;
+ LSN lsn;
+ const char *type;
+ TXNID_PAIR xid;
+ uint64_t last_total;
+};
+
+static void toku_recover_txn_progress(TOKU_TXN_PROGRESS txn_progress, void *extra) {
+ toku_txn_progress_extra *txn_progress_extra = static_cast<toku_txn_progress_extra *>(extra);
+ if (txn_progress_extra->last_total == 0)
+ txn_progress_extra->last_total = txn_progress->entries_total;
+ else
+ assert(txn_progress_extra->last_total == txn_progress->entries_total);
+ time_t tnow = time(NULL);
+ if (tnow - txn_progress_extra->tlast >= tokuft_recovery_progress_time) {
+ txn_progress_extra->tlast = tnow;
+ fprintf(stderr, "%.24s PerconaFT ", ctime(&tnow));
+ if (txn_progress_extra->lsn.lsn != 0)
+ fprintf(stderr, "lsn %" PRIu64 " ", txn_progress_extra->lsn.lsn);
+ fprintf(stderr, "%s xid %" PRIu64 ":%" PRIu64 " ",
+ txn_progress_extra->type, txn_progress_extra->xid.parent_id64, txn_progress_extra->xid.child_id64);
+ fprintf(stderr, "%" PRIu64 "/%" PRIu64 " ",
+ txn_progress->entries_processed, txn_progress->entries_total);
+ if (txn_progress->entries_total > 0)
+ fprintf(stderr, "%.0f%% ", ((double) txn_progress->entries_processed / (double) txn_progress->entries_total) * 100.0);
+ fprintf(stderr, "\n");
+ }
+}
+
+static int toku_recover_xcommit (struct logtype_xcommit *l, RECOVER_ENV renv) {
+ // find the transaction by transaction id
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+
+ // commit the transaction
+ toku_txn_progress_extra extra = { time(NULL), l->lsn, "commit", l->xid, 0 };
+ int r = toku_txn_commit_with_lsn(txn, true, l->lsn, toku_recover_txn_progress, &extra);
+ assert(r == 0);
+
+ // close the transaction
+ toku_txn_close_txn(txn);
+
+ return 0;
+}
+
+static int toku_recover_backward_xcommit (struct logtype_xcommit *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_xprepare (struct logtype_xprepare *l, RECOVER_ENV renv) {
+ // find the transaction by transaction id
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+
+ // Save the transaction
+ toku_txn_prepare_txn(txn, l->xa_xid, 0);
+
+ return 0;
+}
+
+static int toku_recover_backward_xprepare (struct logtype_xprepare *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+
+
+static int toku_recover_xabort (struct logtype_xabort *l, RECOVER_ENV renv) {
+ int r;
+
+ // find the transaction by transaction id
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+
+ // abort the transaction
+ toku_txn_progress_extra extra = { time(NULL), l->lsn, "abort", l->xid, 0 };
+ r = toku_txn_abort_with_lsn(txn, l->lsn, toku_recover_txn_progress, &extra);
+ assert(r == 0);
+
+ // close the transaction
+ toku_txn_close_txn(txn);
+
+ return 0;
+}
+
+static int toku_recover_backward_xabort (struct logtype_xabort *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+// fcreate is like fopen except that the file must be created.
+static int toku_recover_fcreate (struct logtype_fcreate *l, RECOVER_ENV renv) {
+ int r;
+
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+
+ // assert that filenum is closed
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ assert(r==DB_NOTFOUND);
+
+ assert(txn!=NULL);
+
+ //unlink if it exists (recreate from scratch).
+ char *iname = fixup_fname(&l->iname);
+ char *iname_in_cwd = toku_cachetable_get_fname_in_cwd(renv->ct, iname);
+ r = unlink(iname_in_cwd);
+ if (r != 0) {
+ int er = get_error_errno();
+ if (er != ENOENT) {
+ fprintf(stderr, "PerconaFT recovery %s:%d unlink %s %d\n", __FUNCTION__, __LINE__, iname, er);
+ toku_free(iname);
+ return r;
+ }
+ }
+ assert(0!=strcmp(iname, toku_product_name_strings.rollback_cachefile)); //Creation of rollback cachefile never gets logged.
+ toku_free(iname_in_cwd);
+ toku_free(iname);
+
+ bool must_create = true;
+ r = internal_recover_fopen_or_fcreate(renv, must_create, l->mode, &l->iname, l->filenum, l->treeflags, txn, l->nodesize, l->basementnodesize, (enum toku_compression_method) l->compression_method, MAX_LSN);
+ return r;
+}
+
+static int toku_recover_backward_fcreate (struct logtype_fcreate *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+
+
+static int toku_recover_fopen (struct logtype_fopen *l, RECOVER_ENV renv) {
+ int r;
+
+ // assert that filenum is closed
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ assert(r==DB_NOTFOUND);
+
+ bool must_create = false;
+ TOKUTXN txn = NULL;
+ char *fname = fixup_fname(&l->iname);
+
+ assert(0!=strcmp(fname, toku_product_name_strings.rollback_cachefile)); //Rollback cachefile can be opened only via fassociate.
+ r = internal_recover_fopen_or_fcreate(renv, must_create, 0, &l->iname, l->filenum, l->treeflags, txn, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, MAX_LSN);
+
+ toku_free(fname);
+ return r;
+}
+
+static int toku_recover_backward_fopen (struct logtype_fopen *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_change_fdescriptor (struct logtype_change_fdescriptor *l, RECOVER_ENV renv) {
+ int r;
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r==0) {
+ TOKUTXN txn = NULL;
+ //Maybe do the descriptor (lsn filter)
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ DBT old_descriptor, new_descriptor;
+ toku_fill_dbt(
+ &old_descriptor,
+ l->old_descriptor.data,
+ l->old_descriptor.len
+ );
+ toku_fill_dbt(
+ &new_descriptor,
+ l->new_descriptor.data,
+ l->new_descriptor.len
+ );
+ toku_ft_change_descriptor(
+ tuple->ft_handle,
+ &old_descriptor,
+ &new_descriptor,
+ false,
+ txn,
+ l->update_cmp_descriptor
+ );
+ }
+ return 0;
+}
+
+static int toku_recover_backward_change_fdescriptor (struct logtype_change_fdescriptor *UU(l), RECOVER_ENV UU(renv)) {
+ return 0;
+}
+
+
+// if file referred to in l is open, close it
+static int toku_recover_fclose (struct logtype_fclose *l, RECOVER_ENV renv) {
+ struct file_map_tuple *tuple = NULL;
+ int r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r == 0) { // if file is open
+ char *iname = fixup_fname(&l->iname);
+ assert(strcmp(tuple->iname, iname) == 0); // verify that file_map has same iname as log entry
+
+ if (0!=strcmp(iname, toku_product_name_strings.rollback_cachefile)) {
+ //Rollback cachefile is closed manually at end of recovery, not here
+ toku_ft_handle_close_recovery(tuple->ft_handle, l->lsn);
+ }
+ file_map_remove(&renv->fmap, l->filenum);
+ toku_free(iname);
+ }
+ return 0;
+}
+
+static int toku_recover_backward_fclose (struct logtype_fclose *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+// fdelete is a transactional file delete.
+static int toku_recover_fdelete (struct logtype_fdelete *l, RECOVER_ENV renv) {
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn != NULL);
+
+ // if the forward scan in recovery found this file and opened it, we
+ // need to mark the txn to remove the ft on commit. if the file was
+ // not found and not opened, we don't need to do anything - the ft
+ // is already gone, so we're happy.
+ struct file_map_tuple *tuple;
+ int r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r == 0) {
+ toku_ft_unlink_on_commit(tuple->ft_handle, txn);
+ }
+ return 0;
+}
+
+static int toku_recover_backward_fdelete (struct logtype_fdelete *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_frename(struct logtype_frename *l, RECOVER_ENV renv) {
+ assert(renv);
+ assert(renv->env);
+
+ toku_struct_stat stat;
+ const char *data_dir = renv->env->get_data_dir(renv->env);
+ bool old_exist = true;
+ bool new_exist = true;
+
+ assert(data_dir);
+
+ struct file_map_tuple *tuple;
+
+ std::unique_ptr<char[], decltype(&toku_free)> old_iname_full(
+ toku_construct_full_name(2, data_dir, l->old_iname.data), &toku_free);
+ std::unique_ptr<char[], decltype(&toku_free)> new_iname_full(
+ toku_construct_full_name(2, data_dir, l->new_iname.data), &toku_free);
+
+ if (toku_stat(old_iname_full.get(), &stat, toku_uninstrumented) == -1) {
+ if (ENOENT == errno)
+ old_exist = false;
+ else
+ return 1;
+ }
+
+ if (toku_stat(new_iname_full.get(), &stat, toku_uninstrumented) == -1) {
+ if (ENOENT == errno)
+ new_exist = false;
+ else
+ return 1;
+ }
+
+ // Both old and new files can exist if:
+ // - rename() is not completed
+ // - fcreate was replayed during recovery
+ // 'Stalled cachefiles' container cachefile_list::m_stale_fileid contains
+ // closed but not yet evicted cachefiles and the key of this container is
+ // fs-dependent file id - (device id, inode number) pair. As it is supposed
+ // new file have not yet created during recovery process the 'stalled
+ // cachefile' container can contain only cache file of old file.
+ // To preserve the old cachefile file's id and keep it in
+ // 'stalled cachefiles' container the new file is removed
+ // and the old file is renamed.
+ if (old_exist && new_exist &&
+ (toku_os_delete(new_iname_full.get()) == -1 ||
+ toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1))
+ return 1;
+
+ if (old_exist && !new_exist &&
+ (!toku_create_subdirs_if_needed(new_iname_full.get()) ||
+ toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1))
+ return 1;
+
+ if (file_map_find(&renv->fmap, l->old_filenum, &tuple) != DB_NOTFOUND) {
+ if (tuple->iname)
+ toku_free(tuple->iname);
+ tuple->iname = toku_xstrdup(l->new_iname.data);
+ }
+
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+
+ if (txn)
+ toku_logger_save_rollback_frename(txn, &l->old_iname, &l->new_iname);
+
+ return 0;
+}
+
+static int toku_recover_backward_frename(struct logtype_frename *UU(l),
+ RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_enq_insert (struct logtype_enq_insert *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r==0) {
+ //Maybe do the insertion if we found the cachefile.
+ DBT keydbt, valdbt;
+ toku_fill_dbt(&keydbt, l->key.data, l->key.len);
+ toku_fill_dbt(&valdbt, l->value.data, l->value.len);
+ toku_ft_maybe_insert(tuple->ft_handle, &keydbt, &valdbt, txn, true, l->lsn, false, FT_INSERT);
+ toku_txn_maybe_note_ft(txn, tuple->ft_handle->ft);
+ }
+ return 0;
+}
+
+static int toku_recover_backward_enq_insert (struct logtype_enq_insert *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_enq_insert_no_overwrite (struct logtype_enq_insert_no_overwrite *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r==0) {
+ //Maybe do the insertion if we found the cachefile.
+ DBT keydbt, valdbt;
+ toku_fill_dbt(&keydbt, l->key.data, l->key.len);
+ toku_fill_dbt(&valdbt, l->value.data, l->value.len);
+ toku_ft_maybe_insert(tuple->ft_handle, &keydbt, &valdbt, txn, true, l->lsn, false, FT_INSERT_NO_OVERWRITE);
+ }
+ return 0;
+}
+
+static int toku_recover_backward_enq_insert_no_overwrite (struct logtype_enq_insert_no_overwrite *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_enq_delete_any (struct logtype_enq_delete_any *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r==0) {
+ //Maybe do the deletion if we found the cachefile.
+ DBT keydbt;
+ toku_fill_dbt(&keydbt, l->key.data, l->key.len);
+ toku_ft_maybe_delete(tuple->ft_handle, &keydbt, txn, true, l->lsn, false);
+ }
+ return 0;
+}
+
+static int toku_recover_backward_enq_delete_any (struct logtype_enq_delete_any *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_enq_insert_multiple (struct logtype_enq_insert_multiple *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ DB *src_db = NULL;
+ bool do_inserts = true;
+ {
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->src_filenum, &tuple);
+ if (l->src_filenum.fileid == FILENUM_NONE.fileid)
+ assert(r==DB_NOTFOUND);
+ else {
+ if (r == 0)
+ src_db = &tuple->fake_db;
+ else
+ do_inserts = false; // src file was probably deleted, #3129
+ }
+ }
+
+ if (do_inserts) {
+ DBT src_key, src_val;
+
+ toku_fill_dbt(&src_key, l->src_key.data, l->src_key.len);
+ toku_fill_dbt(&src_val, l->src_val.data, l->src_val.len);
+
+ for (uint32_t file = 0; file < l->dest_filenums.num; file++) {
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->dest_filenums.filenums[file], &tuple);
+ if (r==0) {
+ // We found the cachefile. (maybe) Do the insert.
+ DB *db = &tuple->fake_db;
+
+ DBT_ARRAY key_array;
+ DBT_ARRAY val_array;
+ if (db != src_db) {
+ r = renv->generate_row_for_put(db, src_db, &renv->dest_keys, &renv->dest_vals, &src_key, &src_val);
+ assert(r==0);
+ invariant(renv->dest_keys.size <= renv->dest_keys.capacity);
+ invariant(renv->dest_vals.size <= renv->dest_vals.capacity);
+ invariant(renv->dest_keys.size == renv->dest_vals.size);
+ key_array = renv->dest_keys;
+ val_array = renv->dest_vals;
+ } else {
+ key_array.size = key_array.capacity = 1;
+ key_array.dbts = &src_key;
+
+ val_array.size = val_array.capacity = 1;
+ val_array.dbts = &src_val;
+ }
+ for (uint32_t i = 0; i < key_array.size; i++) {
+ toku_ft_maybe_insert(tuple->ft_handle, &key_array.dbts[i], &val_array.dbts[i], txn, true, l->lsn, false, FT_INSERT);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int toku_recover_backward_enq_insert_multiple (struct logtype_enq_insert_multiple *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_enq_delete_multiple (struct logtype_enq_delete_multiple *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ DB *src_db = NULL;
+ bool do_deletes = true;
+ {
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->src_filenum, &tuple);
+ if (l->src_filenum.fileid == FILENUM_NONE.fileid)
+ assert(r==DB_NOTFOUND);
+ else {
+ if (r == 0) {
+ src_db = &tuple->fake_db;
+ } else {
+ do_deletes = false; // src file was probably deleted, #3129
+ }
+ }
+ }
+
+ if (do_deletes) {
+ DBT src_key, src_val;
+ toku_fill_dbt(&src_key, l->src_key.data, l->src_key.len);
+ toku_fill_dbt(&src_val, l->src_val.data, l->src_val.len);
+
+ for (uint32_t file = 0; file < l->dest_filenums.num; file++) {
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->dest_filenums.filenums[file], &tuple);
+ if (r==0) {
+ // We found the cachefile. (maybe) Do the delete.
+ DB *db = &tuple->fake_db;
+
+ DBT_ARRAY key_array;
+ if (db != src_db) {
+ r = renv->generate_row_for_del(db, src_db, &renv->dest_keys, &src_key, &src_val);
+ assert(r==0);
+ invariant(renv->dest_keys.size <= renv->dest_keys.capacity);
+ key_array = renv->dest_keys;
+ } else {
+ key_array.size = key_array.capacity = 1;
+ key_array.dbts = &src_key;
+ }
+ for (uint32_t i = 0; i < key_array.size; i++) {
+ toku_ft_maybe_delete(tuple->ft_handle, &key_array.dbts[i], txn, true, l->lsn, false);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int toku_recover_backward_enq_delete_multiple (struct logtype_enq_delete_multiple *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_enq_update(struct logtype_enq_update *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn != NULL);
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r == 0) {
+ // Maybe do the update if we found the cachefile.
+ DBT key, extra;
+ toku_fill_dbt(&key, l->key.data, l->key.len);
+ toku_fill_dbt(&extra, l->extra.data, l->extra.len);
+ toku_ft_maybe_update(tuple->ft_handle, &key, &extra, txn, true, l->lsn, false);
+ }
+ return 0;
+}
+
+static int toku_recover_enq_updatebroadcast(struct logtype_enq_updatebroadcast *l, RECOVER_ENV renv) {
+ int r;
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn != NULL);
+ struct file_map_tuple *tuple = NULL;
+ r = file_map_find(&renv->fmap, l->filenum, &tuple);
+ if (r == 0) {
+ // Maybe do the update broadcast if we found the cachefile.
+ DBT extra;
+ toku_fill_dbt(&extra, l->extra.data, l->extra.len);
+ toku_ft_maybe_update_broadcast(tuple->ft_handle, &extra, txn, true,
+ l->lsn, false, l->is_resetting_op);
+ }
+ return 0;
+}
+
+static int toku_recover_backward_enq_update(struct logtype_enq_update *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_backward_enq_updatebroadcast(struct logtype_enq_updatebroadcast *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_comment (struct logtype_comment *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_backward_comment (struct logtype_comment *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_shutdown_up_to_19 (struct logtype_shutdown_up_to_19 *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_backward_shutdown_up_to_19 (struct logtype_shutdown_up_to_19 *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_shutdown (struct logtype_shutdown *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_backward_shutdown (struct logtype_shutdown *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+static int toku_recover_load(struct logtype_load *UU(l), RECOVER_ENV UU(renv)) {
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ char *new_iname = fixup_fname(&l->new_iname);
+
+ toku_ft_load_recovery(txn, l->old_filenum, new_iname, 0, 0, (LSN*)NULL);
+
+ toku_free(new_iname);
+ return 0;
+}
+
+static int toku_recover_backward_load(struct logtype_load *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+// #2954
+static int toku_recover_hot_index(struct logtype_hot_index *UU(l), RECOVER_ENV UU(renv)) {
+ TOKUTXN txn = NULL;
+ toku_txnid2txn(renv->logger, l->xid, &txn);
+ assert(txn!=NULL);
+ // just make an entry in the rollback log
+ // - set do_log = 0 -> don't write to recovery log
+ toku_ft_hot_index_recovery(txn, l->hot_index_filenums, 0, 0, (LSN*)NULL);
+ return 0;
+}
+
+// #2954
+static int toku_recover_backward_hot_index(struct logtype_hot_index *UU(l), RECOVER_ENV UU(renv)) {
+ // nothing
+ return 0;
+}
+
+// Effects: If there are no log files, or if there is a clean "shutdown" at
+// the end of the log, then we don't need recovery to run.
+// Returns: true if we need recovery, otherwise false.
+int tokuft_needs_recovery(const char *log_dir, bool ignore_log_empty) {
+ int needs_recovery;
+ int r;
+ TOKULOGCURSOR logcursor = NULL;
+
+ r = toku_logcursor_create(&logcursor, log_dir);
+ if (r != 0) {
+ needs_recovery = true; goto exit;
+ }
+
+ struct log_entry *le;
+ le = NULL;
+ r = toku_logcursor_last(logcursor, &le);
+ if (r == 0) {
+ needs_recovery = le->cmd != LT_shutdown;
+ }
+ else {
+ needs_recovery = !(r == DB_NOTFOUND && ignore_log_empty);
+ }
+ exit:
+ if (logcursor) {
+ r = toku_logcursor_destroy(&logcursor);
+ assert(r == 0);
+ }
+ return needs_recovery;
+}
+
+static uint32_t recover_get_num_live_txns(RECOVER_ENV renv) {
+ return toku_txn_manager_num_live_root_txns(renv->logger->txn_manager);
+}
+
+static int is_txn_unprepared(TOKUTXN txn, void* extra) {
+ TOKUTXN* ptxn = (TOKUTXN *)extra;
+ if (txn->state != TOKUTXN_PREPARING) {
+ *ptxn = txn;
+ return -1; // return -1 to get iterator to return
+ }
+ return 0;
+}
+
+static int find_an_unprepared_txn (RECOVER_ENV renv, TOKUTXN *txnp) {
+ TOKUTXN txn = nullptr;
+ int r = toku_txn_manager_iter_over_live_root_txns(
+ renv->logger->txn_manager,
+ is_txn_unprepared,
+ &txn
+ );
+ assert(r == 0 || r == -1);
+ if (txn != nullptr) {
+ *txnp = txn;
+ return 0;
+ }
+ return DB_NOTFOUND;
+}
+
+static int call_prepare_txn_callback_iter(TOKUTXN txn, void* extra) {
+ RECOVER_ENV* renv = (RECOVER_ENV *)extra;
+ invariant(txn->state == TOKUTXN_PREPARING);
+ invariant(txn->child == NULL);
+ (*renv)->prepared_txn_callback((*renv)->env, txn);
+ return 0;
+}
+
+static void recover_abort_live_txn(TOKUTXN txn) {
+ fprintf(stderr, "%s %" PRIu64 "\n", __FUNCTION__, txn->txnid.parent_id64);
+ // recursively abort all children first
+ if (txn->child != NULL) {
+ recover_abort_live_txn(txn->child);
+ }
+ // sanity check that the recursive call successfully NULLs out txn->child
+ invariant(txn->child == NULL);
+ // abort the transaction
+ toku_txn_progress_extra extra = { time(NULL), ZERO_LSN, "abort live", txn->txnid, 0 };
+ int r = toku_txn_abort_txn(txn, toku_recover_txn_progress, &extra);
+ assert(r == 0);
+
+ // close the transaction
+ toku_txn_close_txn(txn);
+}
+
+// abort all of the remaining live transactions in descending transaction id order
+static void recover_abort_all_live_txns(RECOVER_ENV renv) {
+ while (1) {
+ TOKUTXN txn;
+ int r = find_an_unprepared_txn(renv, &txn);
+ if (r==0) {
+ recover_abort_live_txn(txn);
+ } else if (r==DB_NOTFOUND) {
+ break;
+ } else {
+ abort();
+ }
+ }
+
+ // Now we have only prepared txns. These prepared txns don't have full DB_TXNs in them, so we need to make some.
+ int r = toku_txn_manager_iter_over_live_root_txns(
+ renv->logger->txn_manager,
+ call_prepare_txn_callback_iter,
+ &renv
+ );
+ assert_zero(r);
+}
+
+static void recover_trace_le(const char *f, int l, int r, struct log_entry *le) {
+ if (le) {
+ LSN thislsn = toku_log_entry_get_lsn(le);
+ fprintf(stderr, "%s:%d r=%d cmd=%c lsn=%" PRIu64 "\n", f, l, r, le->cmd, thislsn.lsn);
+ } else
+ fprintf(stderr, "%s:%d r=%d cmd=?\n", f, l, r);
+}
+
+// For test purposes only.
+static void (*recover_callback_fx)(void*) = NULL;
+static void * recover_callback_args = NULL;
+static void (*recover_callback2_fx)(void*) = NULL;
+static void * recover_callback2_args = NULL;
+
+
+static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_dir) {
+ int r;
+ int rr = 0;
+ TOKULOGCURSOR logcursor = NULL;
+ struct log_entry *le = NULL;
+
+ time_t tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery starting in env %s\n", ctime(&tnow), env_dir);
+
+ char org_wd[1000];
+ {
+ char *wd=getcwd(org_wd, sizeof(org_wd));
+ assert(wd!=0);
+ }
+
+ r = toku_logger_open(log_dir, renv->logger);
+ assert(r == 0);
+
+ // grab the last LSN so that it can be restored when the log is restarted
+ LSN lastlsn = toku_logger_last_lsn(renv->logger);
+ LSN thislsn;
+
+ // there must be at least one log entry
+ r = toku_logcursor_create(&logcursor, log_dir);
+ assert(r == 0);
+
+ r = toku_logcursor_last(logcursor, &le);
+ if (r != 0) {
+ if (tokuft_recovery_trace)
+ fprintf(stderr, "RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, r);
+ rr = DB_RUNRECOVERY; goto errorexit;
+ }
+
+ r = toku_logcursor_destroy(&logcursor);
+ assert(r == 0);
+
+ r = toku_logcursor_create(&logcursor, log_dir);
+ assert(r == 0);
+
+ {
+ toku_struct_stat buf;
+ if (toku_stat(env_dir, &buf, toku_uninstrumented)) {
+ rr = get_error_errno();
+ fprintf(stderr,
+ "%.24s PerconaFT recovery error: directory does not exist: "
+ "%s\n",
+ ctime(&tnow),
+ env_dir);
+ goto errorexit;
+ } else if (!S_ISDIR(buf.st_mode)) {
+ fprintf(stderr, "%.24s PerconaFT recovery error: this file is supposed to be a directory, but is not: %s\n", ctime(&tnow), env_dir);
+ rr = ENOTDIR; goto errorexit;
+ }
+ }
+ // scan backwards
+ scan_state_init(&renv->ss);
+ tnow = time(NULL);
+ time_t tlast;
+ tlast = tnow;
+ fprintf(stderr, "%.24s PerconaFT recovery scanning backward from %" PRIu64 "\n", ctime(&tnow), lastlsn.lsn);
+ for (unsigned i=0; 1; i++) {
+
+ // get the previous log entry (first time gets the last one)
+ le = NULL;
+ r = toku_logcursor_prev(logcursor, &le);
+ if (tokuft_recovery_trace)
+ recover_trace_le(__FUNCTION__, __LINE__, r, le);
+ if (r != 0) {
+ if (r == DB_NOTFOUND)
+ break;
+ rr = DB_RUNRECOVERY;
+ goto errorexit;
+ }
+
+ // trace progress
+ if ((i % 1000) == 0) {
+ tnow = time(NULL);
+ if (tnow - tlast >= tokuft_recovery_progress_time) {
+ thislsn = toku_log_entry_get_lsn(le);
+ fprintf(stderr, "%.24s PerconaFT recovery scanning backward from %" PRIu64 " at %" PRIu64 " (%s)\n",
+ ctime(&tnow), lastlsn.lsn, thislsn.lsn, recover_state(renv));
+ tlast = tnow;
+ }
+ }
+
+ // dispatch the log entry handler
+ assert(renv->ss.ss == BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END ||
+ renv->ss.ss == BACKWARD_NEWER_CHECKPOINT_END);
+ logtype_dispatch_assign(le, toku_recover_backward_, r, renv);
+ if (tokuft_recovery_trace)
+ recover_trace_le(__FUNCTION__, __LINE__, r, le);
+ if (r != 0) {
+ if (tokuft_recovery_trace)
+ fprintf(stderr, "DB_RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, r);
+ rr = DB_RUNRECOVERY;
+ goto errorexit;
+ }
+ if (renv->goforward)
+ break;
+ }
+
+ // run first callback
+ if (recover_callback_fx)
+ recover_callback_fx(recover_callback_args);
+
+ // scan forwards
+ assert(le);
+ thislsn = toku_log_entry_get_lsn(le);
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery starts scanning forward to %" PRIu64 " from %" PRIu64 " left %" PRIu64 " (%s)\n",
+ ctime(&tnow), lastlsn.lsn, thislsn.lsn, lastlsn.lsn - thislsn.lsn, recover_state(renv));
+
+ for (unsigned i=0; 1; i++) {
+
+ // trace progress
+ if ((i % 1000) == 0) {
+ tnow = time(NULL);
+ if (tnow - tlast >= tokuft_recovery_progress_time) {
+ thislsn = toku_log_entry_get_lsn(le);
+ fprintf(stderr, "%.24s PerconaFT recovery scanning forward to %" PRIu64 " at %" PRIu64 " left %" PRIu64 " (%s)\n",
+ ctime(&tnow), lastlsn.lsn, thislsn.lsn, lastlsn.lsn - thislsn.lsn, recover_state(renv));
+ tlast = tnow;
+ }
+ }
+
+ // dispatch the log entry handler (first time calls the forward handler for the log entry at the turnaround
+ assert(renv->ss.ss == FORWARD_BETWEEN_CHECKPOINT_BEGIN_END ||
+ renv->ss.ss == FORWARD_NEWER_CHECKPOINT_END);
+ logtype_dispatch_assign(le, toku_recover_, r, renv);
+ if (tokuft_recovery_trace)
+ recover_trace_le(__FUNCTION__, __LINE__, r, le);
+ if (r != 0) {
+ if (tokuft_recovery_trace)
+ fprintf(stderr, "DB_RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, r);
+ rr = DB_RUNRECOVERY;
+ goto errorexit;
+ }
+
+ // get the next log entry
+ le = NULL;
+ r = toku_logcursor_next(logcursor, &le);
+ if (tokuft_recovery_trace)
+ recover_trace_le(__FUNCTION__, __LINE__, r, le);
+ if (r != 0) {
+ if (r == DB_NOTFOUND)
+ break;
+ rr = DB_RUNRECOVERY;
+ goto errorexit;
+ }
+ }
+
+ // verify the final recovery state
+ assert(renv->ss.ss == FORWARD_NEWER_CHECKPOINT_END);
+
+ r = toku_logcursor_destroy(&logcursor);
+ assert(r == 0);
+
+ // run second callback
+ if (recover_callback2_fx)
+ recover_callback2_fx(recover_callback2_args);
+
+ // restart logging
+ toku_logger_restart(renv->logger, lastlsn);
+
+ // abort the live transactions
+ {
+ uint32_t n = recover_get_num_live_txns(renv);
+ if (n > 0) {
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery has %" PRIu32 " live transaction%s\n", ctime(&tnow), n, n > 1 ? "s" : "");
+ }
+ }
+ recover_abort_all_live_txns(renv);
+ {
+ uint32_t n = recover_get_num_live_txns(renv);
+ if (n > 0) {
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery has %" PRIu32 " prepared transaction%s\n", ctime(&tnow), n, n > 1 ? "s" : "");
+ }
+ }
+
+ // close the open dictionaries
+ uint32_t n;
+ n = file_map_get_num_dictionaries(&renv->fmap);
+ if (n > 0) {
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery closing %" PRIu32 " dictionar%s\n", ctime(&tnow), n, n > 1 ? "ies" : "y");
+ }
+ file_map_close_dictionaries(&renv->fmap, lastlsn);
+
+ {
+ // write a recovery log entry
+ BYTESTRING recover_comment = { static_cast<uint32_t>(strlen("recover")), (char *) "recover" };
+ toku_log_comment(renv->logger, NULL, true, 0, recover_comment);
+ }
+
+ // checkpoint
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery making a checkpoint\n", ctime(&tnow));
+ r = toku_checkpoint(renv->cp, renv->logger, NULL, NULL, NULL, NULL, RECOVERY_CHECKPOINT);
+ assert(r == 0);
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery done\n", ctime(&tnow));
+
+ return 0;
+
+ errorexit:
+ tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT recovery failed %d\n", ctime(&tnow), rr);
+
+ if (logcursor) {
+ r = toku_logcursor_destroy(&logcursor);
+ assert(r == 0);
+ }
+
+ return rr;
+}
+
+int
+toku_recover_lock(const char *lock_dir, int *lockfd) {
+ int e = toku_single_process_lock(lock_dir, "recovery", lockfd);
+ if (e != 0 && e != ENOENT) {
+ fprintf(stderr, "Couldn't run recovery because some other process holds the recovery lock\n");
+ }
+ return e;
+}
+
+int
+toku_recover_unlock(int lockfd) {
+ int lockfd_copy = lockfd;
+ return toku_single_process_unlock(&lockfd_copy);
+}
+
+int tokuft_recover(DB_ENV *env,
+ prepared_txn_callback_t prepared_txn_callback,
+ keep_cachetable_callback_t keep_cachetable_callback,
+ TOKULOGGER logger,
+ const char *env_dir, const char *log_dir,
+ ft_compare_func bt_compare,
+ ft_update_func update_function,
+ generate_row_for_put_func generate_row_for_put,
+ generate_row_for_del_func generate_row_for_del,
+ size_t cachetable_size) {
+ int r;
+ int lockfd = -1;
+
+ r = toku_recover_lock(log_dir, &lockfd);
+ if (r != 0)
+ return r;
+
+ int rr = 0;
+ if (tokuft_needs_recovery(log_dir, false)) {
+ struct recover_env renv;
+ r = recover_env_init(&renv,
+ env_dir,
+ env,
+ prepared_txn_callback,
+ keep_cachetable_callback,
+ logger,
+ bt_compare,
+ update_function,
+ generate_row_for_put,
+ generate_row_for_del,
+ cachetable_size);
+ assert(r == 0);
+
+ rr = do_recovery(&renv, env_dir, log_dir);
+
+ recover_env_cleanup(&renv);
+ }
+
+ r = toku_recover_unlock(lockfd);
+ if (r != 0)
+ return r;
+
+ return rr;
+}
+
+// Return 0 if recovery log exists, ENOENT if log is missing
+int
+tokuft_recover_log_exists(const char * log_dir) {
+ int r;
+ TOKULOGCURSOR logcursor;
+
+ r = toku_logcursor_create(&logcursor, log_dir);
+ if (r == 0) {
+ int rclose;
+ r = toku_logcursor_log_exists(logcursor); // return ENOENT if no log
+ rclose = toku_logcursor_destroy(&logcursor);
+ assert(rclose == 0);
+ }
+ else
+ r = ENOENT;
+
+ return r;
+}
+
+void toku_recover_set_callback (void (*callback_fx)(void*), void* callback_args) {
+ recover_callback_fx = callback_fx;
+ recover_callback_args = callback_args;
+}
+
+void toku_recover_set_callback2 (void (*callback_fx)(void*), void* callback_args) {
+ recover_callback2_fx = callback_fx;
+ recover_callback2_args = callback_args;
+}
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.h b/storage/tokudb/PerconaFT/ft/logger/recover.h
new file mode 100644
index 00000000..bdd44d56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.h
@@ -0,0 +1,85 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+#include <errno.h>
+
+#include "portability/memory.h"
+#include "portability/toku_portability.h"
+
+#include "ft/comparator.h"
+#include "ft/ft-ops.h"
+#include "util/x1764.h"
+
+typedef void (*prepared_txn_callback_t)(DB_ENV *env, struct tokutxn *txn);
+typedef void (*keep_cachetable_callback_t)(DB_ENV *env, struct cachetable *ct);
+
+// Run tokuft recovery from the log
+// Returns 0 if success
+int tokuft_recover(DB_ENV *env,
+ prepared_txn_callback_t prepared_txn_callback,
+ keep_cachetable_callback_t keep_cachetable_callback,
+ struct tokulogger *logger,
+ const char *env_dir,
+ const char *log_dir,
+ ft_compare_func bt_compare,
+ ft_update_func update_function,
+ generate_row_for_put_func generate_row_for_put,
+ generate_row_for_del_func generate_row_for_del,
+ size_t cachetable_size);
+
+// Effect: Check the tokuft logs to determine whether or not we need to run recovery.
+// If the log is empty or if there is a clean shutdown at the end of the log, then we
+// don't need to run recovery.
+// Returns: true if we need recovery, otherwise false.
+int tokuft_needs_recovery(const char *logdir, bool ignore_empty_log);
+
+// Return 0 if recovery log exists, ENOENT if log is missing
+int tokuft_recover_log_exists(const char * log_dir);
+
+// For test only - set callbacks for recovery testing
+void toku_recover_set_callback (void (*)(void*), void*);
+void toku_recover_set_callback2 (void (*)(void*), void*);
+
+extern int tokuft_recovery_trace;
+
+int toku_recover_lock (const char *lock_dir, int *lockfd);
+
+int toku_recover_unlock(int lockfd);
diff --git a/storage/tokudb/PerconaFT/ft/msg.cc b/storage/tokudb/PerconaFT/ft/msg.cc
new file mode 100644
index 00000000..b53b946b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/msg.cc
@@ -0,0 +1,120 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "portability/toku_portability.h"
+
+#include "ft/msg.h"
+#include "ft/txn/xids.h"
+#include "util/dbt.h"
+
+ft_msg::ft_msg(const DBT *key, const DBT *val, enum ft_msg_type t, MSN m, XIDS x) :
+ _key(key ? *key : toku_empty_dbt()),
+ _val(val ? *val : toku_empty_dbt()),
+ _type(t), _msn(m), _xids(x) {
+}
+
+ft_msg ft_msg::deserialize_from_rbuf(struct rbuf *rb, XIDS *x, bool *is_fresh) {
+ const void *keyp, *valp;
+ uint32_t keylen, vallen;
+ enum ft_msg_type t = (enum ft_msg_type) rbuf_char(rb);
+ *is_fresh = rbuf_char(rb);
+ MSN m = rbuf_MSN(rb);
+ toku_xids_create_from_buffer(rb, x);
+ rbuf_bytes(rb, &keyp, &keylen);
+ rbuf_bytes(rb, &valp, &vallen);
+
+ DBT k, v;
+ return ft_msg(toku_fill_dbt(&k, keyp, keylen), toku_fill_dbt(&v, valp, vallen), t, m, *x);
+}
+
+ft_msg ft_msg::deserialize_from_rbuf_v13(struct rbuf *rb, MSN m, XIDS *x) {
+ const void *keyp, *valp;
+ uint32_t keylen, vallen;
+ enum ft_msg_type t = (enum ft_msg_type) rbuf_char(rb);
+ toku_xids_create_from_buffer(rb, x);
+ rbuf_bytes(rb, &keyp, &keylen);
+ rbuf_bytes(rb, &valp, &vallen);
+
+ DBT k, v;
+ return ft_msg(toku_fill_dbt(&k, keyp, keylen), toku_fill_dbt(&v, valp, vallen), t, m, *x);
+}
+
+const DBT *ft_msg::kdbt() const {
+ return &_key;
+}
+
+const DBT *ft_msg::vdbt() const {
+ return &_val;
+}
+
+enum ft_msg_type ft_msg::type() const {
+ return _type;
+}
+
+MSN ft_msg::msn() const {
+ return _msn;
+}
+
+XIDS ft_msg::xids() const {
+ return _xids;
+}
+
+size_t ft_msg::total_size() const {
+ // Must store two 4-byte lengths
+ static const size_t key_val_overhead = 8;
+
+ // 1 byte type, 1 byte freshness, then 8 byte MSN
+ static const size_t msg_overhead = 2 + sizeof(MSN);
+
+ static const size_t total_overhead = key_val_overhead + msg_overhead;
+
+ const size_t keyval_size = _key.size + _val.size;
+ const size_t xids_size = toku_xids_get_serialize_size(xids());
+ return total_overhead + keyval_size + xids_size;
+}
+
+void ft_msg::serialize_to_wbuf(struct wbuf *wb, bool is_fresh) const {
+ wbuf_nocrc_char(wb, (unsigned char) _type);
+ wbuf_nocrc_char(wb, (unsigned char) is_fresh);
+ wbuf_MSN(wb, _msn);
+ wbuf_nocrc_xids(wb, _xids);
+ wbuf_nocrc_bytes(wb, _key.data, _key.size);
+ wbuf_nocrc_bytes(wb, _val.data, _val.size);
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/msg.h b/storage/tokudb/PerconaFT/ft/msg.h
new file mode 100644
index 00000000..94c72305
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/msg.h
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The purpose of this file is to provide access to the ft_msg,
+ * which is the ephemeral version of the messages that lives in
+ * a message buffer.
+ */
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_assert.h"
+#include "portability/toku_stdint.h"
+
+#include "ft/txn/xids.h"
+
+// Message Sequence Number (MSN)
+typedef struct __toku_msn { uint64_t msn; } MSN;
+
+// dummy used for message construction, to be filled in when msg is applied to tree
+static const MSN ZERO_MSN = { .msn = 0 };
+
+// first 2^62 values reserved for messages created before Dr. No (for upgrade)
+static const MSN MIN_MSN = { .msn = 1ULL << 62 };
+static const MSN MAX_MSN = { .msn = UINT64_MAX };
+
+/* tree command types */
+enum ft_msg_type {
+ FT_NONE = 0,
+ FT_INSERT = 1,
+ FT_DELETE_ANY = 2, // Delete any matching key. This used to be called FT_DELETE.
+ //FT_DELETE_BOTH = 3,
+ FT_ABORT_ANY = 4, // Abort any commands on any matching key.
+ //FT_ABORT_BOTH = 5, // Abort commands that match both the key and the value
+ FT_COMMIT_ANY = 6,
+ //FT_COMMIT_BOTH = 7,
+ FT_COMMIT_BROADCAST_ALL = 8, // Broadcast to all leafentries, (commit all transactions).
+ FT_COMMIT_BROADCAST_TXN = 9, // Broadcast to all leafentries, (commit specific transaction).
+ FT_ABORT_BROADCAST_TXN = 10, // Broadcast to all leafentries, (commit specific transaction).
+ FT_INSERT_NO_OVERWRITE = 11,
+ FT_OPTIMIZE = 12, // Broadcast
+ FT_OPTIMIZE_FOR_UPGRADE = 13, // same as FT_OPTIMIZE, but record version number in leafnode
+ FT_UPDATE = 14,
+ FT_UPDATE_BROADCAST_ALL = 15
+};
+
+static inline bool
+ft_msg_type_applies_once(enum ft_msg_type type)
+{
+ bool ret_val;
+ switch (type) {
+ case FT_INSERT_NO_OVERWRITE:
+ case FT_INSERT:
+ case FT_DELETE_ANY:
+ case FT_ABORT_ANY:
+ case FT_COMMIT_ANY:
+ case FT_UPDATE:
+ ret_val = true;
+ break;
+ case FT_COMMIT_BROADCAST_ALL:
+ case FT_COMMIT_BROADCAST_TXN:
+ case FT_ABORT_BROADCAST_TXN:
+ case FT_OPTIMIZE:
+ case FT_OPTIMIZE_FOR_UPGRADE:
+ case FT_UPDATE_BROADCAST_ALL:
+ case FT_NONE:
+ ret_val = false;
+ break;
+ default:
+ assert(false);
+ }
+ return ret_val;
+}
+
+static inline bool
+ft_msg_type_applies_all(enum ft_msg_type type)
+{
+ bool ret_val;
+ switch (type) {
+ case FT_NONE:
+ case FT_INSERT_NO_OVERWRITE:
+ case FT_INSERT:
+ case FT_DELETE_ANY:
+ case FT_ABORT_ANY:
+ case FT_COMMIT_ANY:
+ case FT_UPDATE:
+ ret_val = false;
+ break;
+ case FT_COMMIT_BROADCAST_ALL:
+ case FT_COMMIT_BROADCAST_TXN:
+ case FT_ABORT_BROADCAST_TXN:
+ case FT_OPTIMIZE:
+ case FT_OPTIMIZE_FOR_UPGRADE:
+ case FT_UPDATE_BROADCAST_ALL:
+ ret_val = true;
+ break;
+ default:
+ assert(false);
+ }
+ return ret_val;
+}
+
+static inline bool
+ft_msg_type_does_nothing(enum ft_msg_type type)
+{
+ return (type == FT_NONE);
+}
+
+class ft_msg {
+public:
+ ft_msg(const DBT *key, const DBT *val, enum ft_msg_type t, MSN m, XIDS x);
+
+ enum ft_msg_type type() const;
+
+ MSN msn() const;
+
+ XIDS xids() const;
+
+ const DBT *kdbt() const;
+
+ const DBT *vdbt() const;
+
+ size_t total_size() const;
+
+ void serialize_to_wbuf(struct wbuf *wb, bool is_fresh) const;
+
+ // deserialization goes through a static factory function so the ft msg
+ // API stays completely const and there's no default constructor
+ static ft_msg deserialize_from_rbuf(struct rbuf *rb, XIDS *xids, bool *is_fresh);
+
+ // Version 13/14 messages did not have an msn - so `m' is the MSN
+ // that will be assigned to the message that gets deserialized.
+ static ft_msg deserialize_from_rbuf_v13(struct rbuf *rb, MSN m, XIDS *xids);
+
+private:
+ const DBT _key;
+ const DBT _val;
+ enum ft_msg_type _type;
+ MSN _msn;
+ XIDS _xids;
+};
+
+// For serialize / deserialize
+
+#include "ft/serialize/wbuf.h"
+
+static inline void wbuf_MSN(struct wbuf *wb, MSN msn) {
+ wbuf_ulonglong(wb, msn.msn);
+}
+
+#include "ft/serialize/rbuf.h"
+
+static inline MSN rbuf_MSN(struct rbuf *rb) {
+ MSN msn = { .msn = rbuf_ulonglong(rb) };
+ return msn;
+}
diff --git a/storage/tokudb/PerconaFT/ft/msg_buffer.cc b/storage/tokudb/PerconaFT/ft/msg_buffer.cc
new file mode 100644
index 00000000..65e9f5e7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/msg_buffer.cc
@@ -0,0 +1,292 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/msg_buffer.h"
+#include "util/dbt.h"
+
+void message_buffer::create() {
+ _num_entries = 0;
+ _memory = nullptr;
+ _memory_usable = 0;
+ _memory_size = 0;
+ _memory_used = 0;
+}
+
+void message_buffer::clone(message_buffer *src) {
+ _num_entries = src->_num_entries;
+ _memory_used = src->_memory_used;
+ _memory_size = src->_memory_size;
+ XMALLOC_N(_memory_size, _memory);
+ memcpy(_memory, src->_memory, _memory_size);
+ _memory_usable = toku_malloc_usable_size(_memory);
+}
+
+void message_buffer::destroy() {
+ if (_memory != nullptr) {
+ toku_free(_memory);
+ _memory_usable = 0;
+ }
+}
+
+void message_buffer::deserialize_from_rbuf(struct rbuf *rb,
+ int32_t **fresh_offsets, int32_t *nfresh,
+ int32_t **stale_offsets, int32_t *nstale,
+ int32_t **broadcast_offsets, int32_t *nbroadcast) {
+ // read the number of messages in this buffer
+ int n_in_this_buffer = rbuf_int(rb);
+ if (fresh_offsets != nullptr) {
+ XMALLOC_N(n_in_this_buffer, *fresh_offsets);
+ }
+ if (stale_offsets != nullptr) {
+ XMALLOC_N(n_in_this_buffer, *stale_offsets);
+ }
+ if (broadcast_offsets != nullptr) {
+ XMALLOC_N(n_in_this_buffer, *broadcast_offsets);
+ }
+
+ _resize(rb->size + 64); // rb->size is a good hint for how big the buffer will be
+
+ // deserialize each message individually, noting whether it was fresh
+ // and putting its buffer offset in the appropriate offsets array
+ for (int i = 0; i < n_in_this_buffer; i++) {
+ XIDS xids;
+ bool is_fresh;
+ const ft_msg msg = ft_msg::deserialize_from_rbuf(rb, &xids, &is_fresh);
+
+ int32_t *dest;
+ if (ft_msg_type_applies_once(msg.type())) {
+ if (is_fresh) {
+ dest = fresh_offsets ? *fresh_offsets + (*nfresh)++ : nullptr;
+ } else {
+ dest = stale_offsets ? *stale_offsets + (*nstale)++ : nullptr;
+ }
+ } else {
+ invariant(ft_msg_type_applies_all(msg.type()) || ft_msg_type_does_nothing(msg.type()));
+ dest = broadcast_offsets ? *broadcast_offsets + (*nbroadcast)++ : nullptr;
+ }
+
+ enqueue(msg, is_fresh, dest);
+ toku_xids_destroy(&xids);
+ }
+
+ invariant(_num_entries == n_in_this_buffer);
+}
+
+MSN message_buffer::deserialize_from_rbuf_v13(struct rbuf *rb,
+ MSN *highest_unused_msn_for_upgrade,
+ int32_t **fresh_offsets, int32_t *nfresh,
+ int32_t **broadcast_offsets, int32_t *nbroadcast) {
+ // read the number of messages in this buffer
+ int n_in_this_buffer = rbuf_int(rb);
+ if (fresh_offsets != nullptr) {
+ XMALLOC_N(n_in_this_buffer, *fresh_offsets);
+ }
+ if (broadcast_offsets != nullptr) {
+ XMALLOC_N(n_in_this_buffer, *broadcast_offsets);
+ }
+
+ // Atomically decrement the header's MSN count by the number
+ // of messages in the buffer.
+ MSN highest_msn_in_this_buffer = {
+ .msn = toku_sync_sub_and_fetch(&highest_unused_msn_for_upgrade->msn, n_in_this_buffer)
+ };
+
+ // Create the message buffers from the deserialized buffer.
+ for (int i = 0; i < n_in_this_buffer; i++) {
+ XIDS xids;
+ // There were no stale messages at this version, so call it fresh.
+ const bool is_fresh = true;
+
+ // Increment our MSN, the last message should have the
+ // newest/highest MSN. See above for a full explanation.
+ highest_msn_in_this_buffer.msn++;
+ const ft_msg msg = ft_msg::deserialize_from_rbuf_v13(rb, highest_msn_in_this_buffer, &xids);
+
+ int32_t *dest;
+ if (ft_msg_type_applies_once(msg.type())) {
+ dest = fresh_offsets ? *fresh_offsets + (*nfresh)++ : nullptr;
+ } else {
+ invariant(ft_msg_type_applies_all(msg.type()) || ft_msg_type_does_nothing(msg.type()));
+ dest = broadcast_offsets ? *broadcast_offsets + (*nbroadcast)++ : nullptr;
+ }
+
+ enqueue(msg, is_fresh, dest);
+ toku_xids_destroy(&xids);
+ }
+
+ return highest_msn_in_this_buffer;
+}
+
+void message_buffer::_resize(size_t new_size) {
+ XREALLOC_N(new_size, _memory);
+ _memory_size = new_size;
+ _memory_usable = toku_malloc_usable_size(_memory);
+}
+
+static int next_power_of_two (int n) {
+ int r = 4096;
+ while (r < n) {
+ r*=2;
+ assert(r>0);
+ }
+ return r;
+}
+
+struct message_buffer::buffer_entry *message_buffer::get_buffer_entry(int32_t offset) const {
+ return (struct buffer_entry *) (_memory + offset);
+}
+
+void message_buffer::enqueue(const ft_msg &msg, bool is_fresh, int32_t *offset) {
+ int need_space_here = msg_memsize_in_buffer(msg);
+ int need_space_total = _memory_used + need_space_here;
+ if (_memory == nullptr || need_space_total > _memory_size) {
+ // resize the buffer to the next power of 2 greater than the needed space
+ int next_2 = next_power_of_two(need_space_total);
+ _resize(next_2);
+ }
+ uint32_t keylen = msg.kdbt()->size;
+ uint32_t datalen = msg.vdbt()->size;
+ struct buffer_entry *entry = get_buffer_entry(_memory_used);
+ entry->type = (unsigned char) msg.type();
+ entry->msn = msg.msn();
+ toku_xids_cpy(&entry->xids_s, msg.xids());
+ entry->is_fresh = is_fresh;
+ unsigned char *e_key = toku_xids_get_end_of_array(&entry->xids_s);
+ entry->keylen = keylen;
+ memcpy(e_key, msg.kdbt()->data, keylen);
+ entry->vallen = datalen;
+ memcpy(e_key + keylen, msg.vdbt()->data, datalen);
+ if (offset) {
+ *offset = _memory_used;
+ }
+ _num_entries++;
+ _memory_used += need_space_here;
+}
+
+void message_buffer::set_freshness(int32_t offset, bool is_fresh) {
+ struct buffer_entry *entry = get_buffer_entry(offset);
+ entry->is_fresh = is_fresh;
+}
+
+bool message_buffer::get_freshness(int32_t offset) const {
+ struct buffer_entry *entry = get_buffer_entry(offset);
+ return entry->is_fresh;
+}
+
+ft_msg message_buffer::get_message(int32_t offset, DBT *keydbt, DBT *valdbt) const {
+ struct buffer_entry *entry = get_buffer_entry(offset);
+ uint32_t keylen = entry->keylen;
+ uint32_t vallen = entry->vallen;
+ enum ft_msg_type type = (enum ft_msg_type) entry->type;
+ MSN msn = entry->msn;
+ const XIDS xids = (XIDS) &entry->xids_s;
+ const void *key = toku_xids_get_end_of_array(xids);
+ const void *val = (uint8_t *) key + entry->keylen;
+ return ft_msg(toku_fill_dbt(keydbt, key, keylen), toku_fill_dbt(valdbt, val, vallen), type, msn, xids);
+}
+
+void message_buffer::get_message_key_msn(int32_t offset, DBT *key, MSN *msn) const {
+ struct buffer_entry *entry = get_buffer_entry(offset);
+ if (key != nullptr) {
+ toku_fill_dbt(key, toku_xids_get_end_of_array((XIDS) &entry->xids_s), entry->keylen);
+ }
+ if (msn != nullptr) {
+ *msn = entry->msn;
+ }
+}
+
+int message_buffer::num_entries() const {
+ return _num_entries;
+}
+
+size_t message_buffer::buffer_size_in_use() const {
+ return _memory_used;
+}
+
+size_t message_buffer::memory_size_in_use() const {
+ return sizeof(*this) + _memory_used;
+}
+
+size_t message_buffer::memory_footprint() const {
+#ifdef TOKU_DEBUG_PARANOID
+ // Enable this code if you want to verify that the new way of computing
+ // the memory footprint is the same as the old.
+ // It slows the code down by perhaps 10%.
+ assert(_memory_usable == toku_malloc_usable_size(_memory));
+ size_t fp = toku_memory_footprint(_memory, _memory_used);
+ size_t fpg = toku_memory_footprint_given_usable_size(_memory_used, _memory_usable);
+ if (fp != fpg) printf("ptr=%p mu=%ld fp=%ld fpg=%ld\n", _memory, _memory_usable, fp, fpg);
+ assert(fp == fpg);
+#endif // TOKU_DEBUG_PARANOID
+ return sizeof(*this) + toku_memory_footprint_given_usable_size(_memory_used, _memory_usable);
+}
+
+bool message_buffer::equals(message_buffer *other) const {
+ return (_memory_used == other->_memory_used &&
+ memcmp(_memory, other->_memory, _memory_used) == 0);
+}
+
+void message_buffer::serialize_to_wbuf(struct wbuf *wb) const {
+ wbuf_nocrc_int(wb, _num_entries);
+ struct msg_serialize_fn {
+ struct wbuf *wb;
+ msg_serialize_fn(struct wbuf *w) : wb(w) { }
+ int operator()(const ft_msg &msg, bool is_fresh) {
+ msg.serialize_to_wbuf(wb, is_fresh);
+ return 0;
+ }
+ } serialize_fn(wb);
+ iterate(serialize_fn);
+}
+//void static stats(struct wbuf *wb) const {
+// wbuf_nocrc_int(wb, _num_entries);
+// struct msg_serialize_fn {
+// struct wbuf *wb;
+// msg_serialize_fn(struct wbuf *w) : wb(w) { }
+// int operator()(const ft_msg &msg, bool is_fresh) {
+// msg.serialize_to_wbuf(wb, is_fresh);
+// return 0;
+// }
+// } serialize_fn(wb);
+// iterate(serialize_fn);
+//}
+size_t message_buffer::msg_memsize_in_buffer(const ft_msg &msg) {
+ const uint32_t keylen = msg.kdbt()->size;
+ const uint32_t datalen = msg.vdbt()->size;
+ const size_t xidslen = toku_xids_get_size(msg.xids());
+ return sizeof(struct buffer_entry) + keylen + datalen + xidslen - sizeof(XIDS_S);
+}
diff --git a/storage/tokudb/PerconaFT/ft/msg_buffer.h b/storage/tokudb/PerconaFT/ft/msg_buffer.h
new file mode 100644
index 00000000..a68b7129
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/msg_buffer.h
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/msg.h"
+#include "ft/txn/xids.h"
+#include "util/dbt.h"
+
+class message_buffer {
+public:
+ void create();
+
+ void clone(message_buffer *dst);
+
+ void destroy();
+
+ // effect: deserializes a message buffer from the given rbuf
+ // returns: *fresh_offsets (etc) malloc'd to be num_entries large and
+ // populated with *nfresh (etc) offsets in the message buffer
+ // requires: if fresh_offsets (etc) != nullptr, then nfresh != nullptr
+ void deserialize_from_rbuf(struct rbuf *rb,
+ int32_t **fresh_offsets, int32_t *nfresh,
+ int32_t **stale_offsets, int32_t *nstale,
+ int32_t **broadcast_offsets, int32_t *nbroadcast);
+
+ // effect: deserializes a message buffer whose messages are at version 13/14
+ // returns: similar to deserialize_from_rbuf(), excpet there are no stale messages
+ // and each message is assigned a sequential value from *highest_unused_msn_for_upgrade,
+ // which is modified as needed using toku_sync_fech_and_sub()
+ // returns: the highest MSN assigned to any message in this buffer
+ // requires: similar to deserialize_from_rbuf(), and highest_unused_msn_for_upgrade != nullptr
+ MSN deserialize_from_rbuf_v13(struct rbuf *rb,
+ MSN *highest_unused_msn_for_upgrade,
+ int32_t **fresh_offsets, int32_t *nfresh,
+ int32_t **broadcast_offsets, int32_t *nbroadcast);
+
+ void enqueue(const ft_msg &msg, bool is_fresh, int32_t *offset);
+
+ void set_freshness(int32_t offset, bool is_fresh);
+
+ bool get_freshness(int32_t offset) const;
+
+ ft_msg get_message(int32_t offset, DBT *keydbt, DBT *valdbt) const;
+
+ void get_message_key_msn(int32_t offset, DBT *key, MSN *msn) const;
+
+ int num_entries() const;
+
+ size_t buffer_size_in_use() const;
+
+ size_t memory_size_in_use() const;
+
+ size_t memory_footprint() const;
+
+ template <typename F>
+ int iterate(F &fn) const {
+ for (int32_t offset = 0; offset < _memory_used; ) {
+ DBT k, v;
+ const ft_msg msg = get_message(offset, &k, &v);
+ bool is_fresh = get_freshness(offset);
+ int r = fn(msg, is_fresh);
+ if (r != 0) {
+ return r;
+ }
+ offset += msg_memsize_in_buffer(msg);
+ }
+ return 0;
+ }
+
+ bool equals(message_buffer *other) const;
+
+ void serialize_to_wbuf(struct wbuf *wb) const;
+
+ static size_t msg_memsize_in_buffer(const ft_msg &msg);
+
+private:
+ void _resize(size_t new_size);
+
+ // If this isn't packged, the compiler aligns the xids array and we waste a lot of space
+ struct __attribute__((__packed__)) buffer_entry {
+ unsigned int keylen;
+ unsigned int vallen;
+ unsigned char type;
+ bool is_fresh;
+ MSN msn;
+ XIDS_S xids_s;
+ };
+
+ struct buffer_entry *get_buffer_entry(int32_t offset) const;
+
+ int _num_entries;
+ char *_memory; // An array of bytes into which buffer entries are embedded.
+ int _memory_size; // How big is _memory
+ int _memory_used; // How many bytes are in use?
+ size_t _memory_usable; // a cached result of toku_malloc_usable_size(_memory).
+};
diff --git a/storage/tokudb/PerconaFT/ft/node.cc b/storage/tokudb/PerconaFT/ft/node.cc
new file mode 100644
index 00000000..88f46c78
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/node.cc
@@ -0,0 +1,2150 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/node.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/serialize/wbuf.h"
+#include "util/scoped_malloc.h"
+#include "util/sort.h"
+
+// Effect: Fill in N as an empty ftnode.
+// TODO: Rename toku_ftnode_create
+void toku_initialize_empty_ftnode(FTNODE n, BLOCKNUM blocknum, int height, int num_children, int layout_version, unsigned int flags) {
+ paranoid_invariant(layout_version != 0);
+ paranoid_invariant(height >= 0);
+
+ n->max_msn_applied_to_node_on_disk = ZERO_MSN; // correct value for root node, harmless for others
+ n->flags = flags;
+ n->blocknum = blocknum;
+ n->layout_version = layout_version;
+ n->layout_version_original = layout_version;
+ n->layout_version_read_from_disk = layout_version;
+ n->height = height;
+ n->pivotkeys.create_empty();
+ n->bp = 0;
+ n->n_children = num_children;
+ n->oldest_referenced_xid_known = TXNID_NONE;
+
+ if (num_children > 0) {
+ XMALLOC_N(num_children, n->bp);
+ for (int i = 0; i < num_children; i++) {
+ BP_BLOCKNUM(n,i).b=0;
+ BP_STATE(n,i) = PT_INVALID;
+ BP_WORKDONE(n,i) = 0;
+ BP_INIT_TOUCHED_CLOCK(n, i);
+ set_BNULL(n,i);
+ if (height > 0) {
+ set_BNC(n, i, toku_create_empty_nl());
+ } else {
+ set_BLB(n, i, toku_create_empty_bn());
+ }
+ }
+ }
+ n->set_dirty(); // special case exception, it's okay to mark as dirty because the basements are empty
+
+ toku_ft_status_note_ftnode(height, true);
+}
+
+// destroys the internals of the ftnode, but it does not free the values
+// that are stored
+// this is common functionality for toku_ftnode_free and rebalance_ftnode_leaf
+// MUST NOT do anything besides free the structures that have been allocated
+void toku_destroy_ftnode_internals(FTNODE node) {
+ node->pivotkeys.destroy();
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node,i) == PT_AVAIL) {
+ if (node->height > 0) {
+ destroy_nonleaf_childinfo(BNC(node,i));
+ } else {
+ paranoid_invariant(BLB_LRD(node, i) == 0);
+ destroy_basement_node(BLB(node, i));
+ }
+ } else if (BP_STATE(node,i) == PT_COMPRESSED) {
+ SUB_BLOCK sb = BSB(node,i);
+ toku_free(sb->compressed_ptr);
+ toku_free(sb);
+ } else {
+ paranoid_invariant(is_BNULL(node, i));
+ }
+ set_BNULL(node, i);
+ }
+ toku_free(node->bp);
+ node->bp = NULL;
+}
+
+/* Frees a node, including all the stuff in the hash table. */
+void toku_ftnode_free(FTNODE *nodep) {
+ FTNODE node = *nodep;
+ toku_ft_status_note_ftnode(node->height, false);
+ toku_destroy_ftnode_internals(node);
+ toku_free(node);
+ *nodep = nullptr;
+}
+
+void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint) {
+ STAT64INFO_S deltas = ZEROSTATS;
+ // capture deltas before rebalancing basements for serialization
+ deltas = toku_get_and_clear_basement_stats(ftnode);
+ // locking not necessary here with respect to checkpointing
+ // in Clayface (because of the pending lock and cachetable lock
+ // in toku_cachetable_begin_checkpoint)
+ // essentially, if we are dealing with a for_checkpoint
+ // parameter in a function that is called by the flush_callback,
+ // then the cachetable needs to ensure that this is called in a safe
+ // manner that does not interfere with the beginning
+ // of a checkpoint, which it does with the cachetable lock
+ // and pending lock
+ toku_ft_update_stats(&ft->h->on_disk_stats, deltas);
+ if (for_checkpoint) {
+ toku_ft_update_stats(&ft->checkpoint_header->on_disk_stats, deltas);
+ }
+}
+
+void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) {
+ for (int i = 0; i < node->n_children; i++) {
+ BP_BLOCKNUM(cloned_node,i) = BP_BLOCKNUM(node,i);
+ paranoid_invariant(BP_STATE(node,i) == PT_AVAIL);
+ BP_STATE(cloned_node,i) = PT_AVAIL;
+ BP_WORKDONE(cloned_node, i) = BP_WORKDONE(node, i);
+ if (node->height == 0) {
+ set_BLB(cloned_node, i, toku_clone_bn(BLB(node,i)));
+ } else {
+ set_BNC(cloned_node, i, toku_clone_nl(BNC(node,i)));
+ }
+ }
+}
+
+void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft) {
+ // free the basement node
+ assert(!node->dirty());
+ BASEMENTNODE bn = BLB(node, childnum);
+ toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
+ toku_ft_adjust_logical_row_count(ft, -BLB_LRD(node, childnum));
+ BLB_LRD(node, childnum) = 0;
+ destroy_basement_node(bn);
+ set_BNULL(node, childnum);
+ BP_STATE(node, childnum) = PT_ON_DISK;
+}
+
+BASEMENTNODE toku_detach_bn(FTNODE node, int childnum) {
+ assert(BP_STATE(node, childnum) == PT_AVAIL);
+ BASEMENTNODE bn = BLB(node, childnum);
+ set_BNULL(node, childnum);
+ BP_STATE(node, childnum) = PT_ON_DISK;
+ return bn;
+}
+
+//
+// Orthopush
+//
+
+struct store_msg_buffer_offset_extra {
+ int32_t *offsets;
+ int i;
+};
+
+int store_msg_buffer_offset(const int32_t &offset, const uint32_t UU(idx), struct store_msg_buffer_offset_extra *const extra) __attribute__((nonnull(3)));
+int store_msg_buffer_offset(const int32_t &offset, const uint32_t UU(idx), struct store_msg_buffer_offset_extra *const extra)
+{
+ extra->offsets[extra->i] = offset;
+ extra->i++;
+ return 0;
+}
+
+/**
+ * Given pointers to offsets within a message buffer where we can find messages,
+ * figure out the MSN of each message, and compare those MSNs. Returns 1,
+ * 0, or -1 if a is larger than, equal to, or smaller than b.
+ */
+int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, const int32_t &bo);
+int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, const int32_t &bo)
+{
+ MSN amsn, bmsn;
+ msg_buffer.get_message_key_msn(ao, nullptr, &amsn);
+ msg_buffer.get_message_key_msn(bo, nullptr, &bmsn);
+ if (amsn.msn > bmsn.msn) {
+ return +1;
+ }
+ if (amsn.msn < bmsn.msn) {
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Given a message buffer and and offset, apply the message with
+ * toku_ft_bn_apply_msg, or discard it,
+ * based on its MSN and the MSN of the basement node.
+ */
+static void do_bn_apply_msg(
+ FT_HANDLE ft_handle,
+ BASEMENTNODE bn,
+ message_buffer* msg_buffer,
+ int32_t offset,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
+ DBT k, v;
+ ft_msg msg = msg_buffer->get_message(offset, &k, &v);
+
+ // The messages are being iterated over in (key,msn) order or just in
+ // msn order, so all the messages for one key, from one buffer, are in
+ // ascending msn order. So it's ok that we don't update the basement
+ // node's msn until the end.
+ if (msg.msn().msn > bn->max_msn_applied.msn) {
+ toku_ft_bn_apply_msg(
+ ft_handle->ft->cmp,
+ ft_handle->ft->update_fun,
+ bn,
+ msg,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ } else {
+ toku_ft_status_note_msn_discard();
+ }
+
+ // We must always mark message as stale since it has been marked
+ // (using omt::iterate_and_mark_range)
+ // It is possible to call do_bn_apply_msg even when it won't apply the
+ // message because the node containing it could have been evicted and
+ // brought back in.
+ msg_buffer->set_freshness(offset, false);
+}
+
+
+struct iterate_do_bn_apply_msg_extra {
+ FT_HANDLE t;
+ BASEMENTNODE bn;
+ NONLEAF_CHILDINFO bnc;
+ txn_gc_info *gc_info;
+ uint64_t *workdone;
+ STAT64INFO stats_to_update;
+ int64_t *logical_rows_delta;
+};
+
+int iterate_do_bn_apply_msg(
+ const int32_t &offset,
+ const uint32_t UU(idx),
+ struct iterate_do_bn_apply_msg_extra* const e)
+ __attribute__((nonnull(3)));
+
+int iterate_do_bn_apply_msg(
+ const int32_t &offset,
+ const uint32_t UU(idx),
+ struct iterate_do_bn_apply_msg_extra* const e)
+{
+ do_bn_apply_msg(
+ e->t,
+ e->bn,
+ &e->bnc->msg_buffer,
+ offset,
+ e->gc_info,
+ e->workdone,
+ e->stats_to_update,
+ e->logical_rows_delta);
+ return 0;
+}
+
+/**
+ * Given the bounds of the basement node to which we will apply messages,
+ * find the indexes within message_tree which contain the range of
+ * relevant messages.
+ *
+ * The message tree contains offsets into the buffer, where messages are
+ * found. The pivot_bounds are the lower bound exclusive and upper bound
+ * inclusive, because they come from pivot keys in the tree. We want OMT
+ * indices, which must have the lower bound be inclusive and the upper
+ * bound exclusive. We will get these by telling omt::find to look
+ * for something strictly bigger than each of our pivot bounds.
+ *
+ * Outputs the OMT indices in lbi (lower bound inclusive) and ube (upper
+ * bound exclusive).
+ */
+template<typename find_bounds_omt_t>
+static void
+find_bounds_within_message_tree(
+ const toku::comparator &cmp,
+ const find_bounds_omt_t &message_tree, /// tree holding message buffer offsets, in which we want to look for indices
+ message_buffer *msg_buffer, /// message buffer in which messages are found
+ const pivot_bounds &bounds, /// key bounds within the basement node we're applying messages to
+ uint32_t *lbi, /// (output) "lower bound inclusive" (index into message_tree)
+ uint32_t *ube /// (output) "upper bound exclusive" (index into message_tree)
+ )
+{
+ int r = 0;
+
+ if (!toku_dbt_is_empty(bounds.lbe())) {
+ // By setting msn to MAX_MSN and by using direction of +1, we will
+ // get the first message greater than (in (key, msn) order) any
+ // message (with any msn) with the key lower_bound_exclusive.
+ // This will be a message we want to try applying, so it is the
+ // "lower bound inclusive" within the message_tree.
+ struct toku_msg_buffer_key_msn_heaviside_extra lbi_extra(cmp, msg_buffer, bounds.lbe(), MAX_MSN);
+ int32_t found_lb;
+ r = message_tree.template find<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(lbi_extra, +1, &found_lb, lbi);
+ if (r == DB_NOTFOUND) {
+ // There is no relevant data (the lower bound is bigger than
+ // any message in this tree), so we have no range and we're
+ // done.
+ *lbi = 0;
+ *ube = 0;
+ return;
+ }
+ if (!toku_dbt_is_empty(bounds.ubi())) {
+ // Check if what we found for lbi is greater than the upper
+ // bound inclusive that we have. If so, there are no relevant
+ // messages between these bounds.
+ const DBT *ubi = bounds.ubi();
+ const int32_t offset = found_lb;
+ DBT found_lbidbt;
+ msg_buffer->get_message_key_msn(offset, &found_lbidbt, nullptr);
+ int c = cmp(&found_lbidbt, ubi);
+ // These DBTs really are both inclusive bounds, so we need
+ // strict inequality in order to determine that there's
+ // nothing between them. If they're equal, then we actually
+ // need to apply the message pointed to by lbi, and also
+ // anything with the same key but a bigger msn.
+ if (c > 0) {
+ *lbi = 0;
+ *ube = 0;
+ return;
+ }
+ }
+ } else {
+ // No lower bound given, it's negative infinity, so we start at
+ // the first message in the OMT.
+ *lbi = 0;
+ }
+ if (!toku_dbt_is_empty(bounds.ubi())) {
+ // Again, we use an msn of MAX_MSN and a direction of +1 to get
+ // the first thing bigger than the upper_bound_inclusive key.
+ // This is therefore the smallest thing we don't want to apply,
+ // and omt::iterate_on_range will not examine it.
+ struct toku_msg_buffer_key_msn_heaviside_extra ube_extra(cmp, msg_buffer, bounds.ubi(), MAX_MSN);
+ r = message_tree.template find<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(ube_extra, +1, nullptr, ube);
+ if (r == DB_NOTFOUND) {
+ // Couldn't find anything in the buffer bigger than our key,
+ // so we need to look at everything up to the end of
+ // message_tree.
+ *ube = message_tree.size();
+ }
+ } else {
+ // No upper bound given, it's positive infinity, so we need to go
+ // through the end of the OMT.
+ *ube = message_tree.size();
+ }
+}
+
+// For each message in the ancestor's buffer (determined by childnum) that
+// is key-wise between lower_bound_exclusive and upper_bound_inclusive,
+// apply the message to the basement node. We treat the bounds as minus
+// or plus infinity respectively if they are NULL. Do not mark the node
+// as dirty (preserve previous state of 'dirty' bit).
+static void bnc_apply_messages_to_basement_node(
+ FT_HANDLE t, // used for comparison function
+ BASEMENTNODE bn, // where to apply messages
+ FTNODE ancestor, // the ancestor node where we can find messages to apply
+ int childnum, // which child buffer of ancestor contains messages we want
+ const pivot_bounds &
+ bounds, // contains pivot key bounds of this basement node
+ txn_gc_info *gc_info,
+ bool *msgs_applied) {
+ int r;
+ NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
+
+ // Determine the offsets in the message trees between which we need to
+ // apply messages from this buffer
+ STAT64INFO_S stats_delta = {0, 0};
+ uint64_t workdone_this_ancestor = 0;
+ int64_t logical_rows_delta = 0;
+
+ uint32_t stale_lbi, stale_ube;
+ if (!bn->stale_ancestor_messages_applied) {
+ find_bounds_within_message_tree(t->ft->cmp,
+ bnc->stale_message_tree,
+ &bnc->msg_buffer,
+ bounds,
+ &stale_lbi,
+ &stale_ube);
+ } else {
+ stale_lbi = 0;
+ stale_ube = 0;
+ }
+ uint32_t fresh_lbi, fresh_ube;
+ find_bounds_within_message_tree(t->ft->cmp,
+ bnc->fresh_message_tree,
+ &bnc->msg_buffer,
+ bounds,
+ &fresh_lbi,
+ &fresh_ube);
+
+ // We now know where all the messages we must apply are, so one of the
+ // following 4 cases will do the application, depending on which of
+ // the lists contains relevant messages:
+ //
+ // 1. broadcast messages and anything else, or a mix of fresh and stale
+ // 2. only fresh messages
+ // 3. only stale messages
+ if (bnc->broadcast_list.size() > 0 ||
+ (stale_lbi != stale_ube && fresh_lbi != fresh_ube)) {
+ // We have messages in multiple trees, so we grab all
+ // the relevant messages' offsets and sort them by MSN, then apply
+ // them in MSN order.
+ const int buffer_size =
+ ((stale_ube - stale_lbi) + (fresh_ube - fresh_lbi) +
+ bnc->broadcast_list.size());
+ toku::scoped_malloc offsets_buf(buffer_size * sizeof(int32_t));
+ int32_t *offsets = reinterpret_cast<int32_t *>(offsets_buf.get());
+ struct store_msg_buffer_offset_extra sfo_extra = {.offsets = offsets,
+ .i = 0};
+
+ // Populate offsets array with offsets to stale messages
+ r = bnc->stale_message_tree
+ .iterate_on_range<struct store_msg_buffer_offset_extra,
+ store_msg_buffer_offset>(
+ stale_lbi, stale_ube, &sfo_extra);
+ assert_zero(r);
+
+ // Then store fresh offsets, and mark them to be moved to stale later.
+ r = bnc->fresh_message_tree
+ .iterate_and_mark_range<struct store_msg_buffer_offset_extra,
+ store_msg_buffer_offset>(
+ fresh_lbi, fresh_ube, &sfo_extra);
+ assert_zero(r);
+
+ // Store offsets of all broadcast messages.
+ r = bnc->broadcast_list.iterate<struct store_msg_buffer_offset_extra,
+ store_msg_buffer_offset>(&sfo_extra);
+ assert_zero(r);
+ invariant(sfo_extra.i == buffer_size);
+
+ // Sort by MSN.
+ toku::sort<int32_t, message_buffer, msg_buffer_offset_msn_cmp>::
+ mergesort_r(offsets, buffer_size, bnc->msg_buffer);
+
+ // Apply the messages in MSN order.
+ for (int i = 0; i < buffer_size; ++i) {
+ *msgs_applied = true;
+ do_bn_apply_msg(t,
+ bn,
+ &bnc->msg_buffer,
+ offsets[i],
+ gc_info,
+ &workdone_this_ancestor,
+ &stats_delta,
+ &logical_rows_delta);
+ }
+ } else if (stale_lbi == stale_ube) {
+ // No stale messages to apply, we just apply fresh messages, and mark
+ // them to be moved to stale later.
+ struct iterate_do_bn_apply_msg_extra iter_extra = {
+ .t = t,
+ .bn = bn,
+ .bnc = bnc,
+ .gc_info = gc_info,
+ .workdone = &workdone_this_ancestor,
+ .stats_to_update = &stats_delta,
+ .logical_rows_delta = &logical_rows_delta};
+ if (fresh_ube - fresh_lbi > 0)
+ *msgs_applied = true;
+ r = bnc->fresh_message_tree
+ .iterate_and_mark_range<struct iterate_do_bn_apply_msg_extra,
+ iterate_do_bn_apply_msg>(
+ fresh_lbi, fresh_ube, &iter_extra);
+ assert_zero(r);
+ } else {
+ invariant(fresh_lbi == fresh_ube);
+ // No fresh messages to apply, we just apply stale messages.
+
+ if (stale_ube - stale_lbi > 0)
+ *msgs_applied = true;
+ struct iterate_do_bn_apply_msg_extra iter_extra = {
+ .t = t,
+ .bn = bn,
+ .bnc = bnc,
+ .gc_info = gc_info,
+ .workdone = &workdone_this_ancestor,
+ .stats_to_update = &stats_delta,
+ .logical_rows_delta = &logical_rows_delta};
+
+ r = bnc->stale_message_tree
+ .iterate_on_range<struct iterate_do_bn_apply_msg_extra,
+ iterate_do_bn_apply_msg>(
+ stale_lbi, stale_ube, &iter_extra);
+ assert_zero(r);
+ }
+ //
+ // update stats
+ //
+ if (workdone_this_ancestor > 0) {
+ (void)toku_sync_fetch_and_add(&BP_WORKDONE(ancestor, childnum),
+ workdone_this_ancestor);
+ }
+ if (stats_delta.numbytes || stats_delta.numrows) {
+ toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
+ }
+ toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
+ bn->logical_rows_delta += logical_rows_delta;
+}
+
+static void
+apply_ancestors_messages_to_bn(
+ FT_HANDLE t,
+ FTNODE node,
+ int childnum,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ txn_gc_info *gc_info,
+ bool* msgs_applied
+ )
+{
+ BASEMENTNODE curr_bn = BLB(node, childnum);
+ const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
+ for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
+ if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > curr_bn->max_msn_applied.msn) {
+ paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL);
+ bnc_apply_messages_to_basement_node(
+ t,
+ curr_bn,
+ curr_ancestors->node,
+ curr_ancestors->childnum,
+ curr_bounds,
+ gc_info,
+ msgs_applied
+ );
+ // We don't want to check this ancestor node again if the
+ // next time we query it, the msn hasn't changed.
+ curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk;
+ }
+ }
+ // At this point, we know all the stale messages above this
+ // basement node have been applied, and any new messages will be
+ // fresh, so we don't need to look at stale messages for this
+ // basement node, unless it gets evicted (and this field becomes
+ // false when it's read in again).
+ curr_bn->stale_ancestor_messages_applied = true;
+}
+
+void
+toku_apply_ancestors_messages_to_node (
+ FT_HANDLE t,
+ FTNODE node,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ bool* msgs_applied,
+ int child_to_read
+ )
+// Effect:
+// Bring a leaf node up-to-date according to all the messages in the ancestors.
+// If the leaf node is already up-to-date then do nothing.
+// If the leaf node is not already up-to-date, then record the work done
+// for that leaf in each ancestor.
+// Requires:
+// This is being called when pinning a leaf node for the query path.
+// The entire root-to-leaf path is pinned and appears in the ancestors list.
+{
+ VERIFY_NODE(t, node);
+ paranoid_invariant(node->height == 0);
+
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(t);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_for_simple_gc = toku_ft_get_oldest_referenced_xid_estimate(t);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_for_simple_gc,
+ node->oldest_referenced_xid_known,
+ true);
+ if (!node->dirty() && child_to_read >= 0) {
+ paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL);
+ apply_ancestors_messages_to_bn(
+ t,
+ node,
+ child_to_read,
+ ancestors,
+ bounds,
+ &gc_info,
+ msgs_applied
+ );
+ }
+ else {
+ // know we are a leaf node
+ // An important invariant:
+ // We MUST bring every available basement node for a dirty node up to date.
+ // flushing on the cleaner thread depends on this. This invariant
+ // allows the cleaner thread to just pick an internal node and flush it
+ // as opposed to being forced to start from the root.
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node, i) != PT_AVAIL) { continue; }
+ apply_ancestors_messages_to_bn(
+ t,
+ node,
+ i,
+ ancestors,
+ bounds,
+ &gc_info,
+ msgs_applied
+ );
+ }
+ }
+ VERIFY_NODE(t, node);
+}
+
+static bool bn_needs_ancestors_messages(
+ FT ft,
+ FTNODE node,
+ int childnum,
+ const pivot_bounds &bounds,
+ ANCESTORS ancestors,
+ MSN* max_msn_applied
+ )
+{
+ BASEMENTNODE bn = BLB(node, childnum);
+ const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
+ bool needs_ancestors_messages = false;
+ for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
+ if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > bn->max_msn_applied.msn) {
+ paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL);
+ NONLEAF_CHILDINFO bnc = BNC(curr_ancestors->node, curr_ancestors->childnum);
+ if (bnc->broadcast_list.size() > 0) {
+ needs_ancestors_messages = true;
+ goto cleanup;
+ }
+ if (!bn->stale_ancestor_messages_applied) {
+ uint32_t stale_lbi, stale_ube;
+ find_bounds_within_message_tree(ft->cmp,
+ bnc->stale_message_tree,
+ &bnc->msg_buffer,
+ curr_bounds,
+ &stale_lbi,
+ &stale_ube);
+ if (stale_lbi < stale_ube) {
+ needs_ancestors_messages = true;
+ goto cleanup;
+ }
+ }
+ uint32_t fresh_lbi, fresh_ube;
+ find_bounds_within_message_tree(ft->cmp,
+ bnc->fresh_message_tree,
+ &bnc->msg_buffer,
+ curr_bounds,
+ &fresh_lbi,
+ &fresh_ube);
+ if (fresh_lbi < fresh_ube) {
+ needs_ancestors_messages = true;
+ goto cleanup;
+ }
+ if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > max_msn_applied->msn) {
+ max_msn_applied->msn = curr_ancestors->node->max_msn_applied_to_node_on_disk.msn;
+ }
+ }
+ }
+cleanup:
+ return needs_ancestors_messages;
+}
+
+bool toku_ft_leaf_needs_ancestors_messages(
+ FT ft,
+ FTNODE node,
+ ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ MSN *const max_msn_in_path,
+ int child_to_read
+ )
+// Effect: Determine whether there are messages in a node's ancestors
+// which must be applied to it. These messages are in the correct
+// keyrange for any available basement nodes, and are in nodes with the
+// correct max_msn_applied_to_node_on_disk.
+// Notes:
+// This is an approximate query.
+// Output:
+// max_msn_in_path: max of "max_msn_applied_to_node_on_disk" over
+// ancestors. This is used later to update basement nodes'
+// max_msn_applied values in case we don't do the full algorithm.
+// Returns:
+// true if there may be some such messages
+// false only if there are definitely no such messages
+// Rationale:
+// When we pin a node with a read lock, we want to quickly determine if
+// we should exchange it for a write lock in preparation for applying
+// messages. If there are no messages, we don't need the write lock.
+{
+ paranoid_invariant(node->height == 0);
+ bool needs_ancestors_messages = false;
+ // child_to_read may be -1 in test cases
+ if (!node->dirty() && child_to_read >= 0) {
+ paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL);
+ needs_ancestors_messages = bn_needs_ancestors_messages(
+ ft,
+ node,
+ child_to_read,
+ bounds,
+ ancestors,
+ max_msn_in_path
+ );
+ }
+ else {
+ for (int i = 0; i < node->n_children; ++i) {
+ if (BP_STATE(node, i) != PT_AVAIL) { continue; }
+ needs_ancestors_messages = bn_needs_ancestors_messages(
+ ft,
+ node,
+ i,
+ bounds,
+ ancestors,
+ max_msn_in_path
+ );
+ if (needs_ancestors_messages) {
+ goto cleanup;
+ }
+ }
+ }
+cleanup:
+ return needs_ancestors_messages;
+}
+
+void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read) {
+ invariant(node->height == 0);
+ if (!node->dirty() && child_to_read >= 0) {
+ paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL);
+ BASEMENTNODE bn = BLB(node, child_to_read);
+ if (max_msn_applied.msn > bn->max_msn_applied.msn) {
+ // see comment below
+ (void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn);
+ }
+ }
+ else {
+ for (int i = 0; i < node->n_children; ++i) {
+ if (BP_STATE(node, i) != PT_AVAIL) { continue; }
+ BASEMENTNODE bn = BLB(node, i);
+ if (max_msn_applied.msn > bn->max_msn_applied.msn) {
+ // This function runs in a shared access context, so to silence tools
+ // like DRD, we use a CAS and ignore the result.
+ // Any threads trying to update these basement nodes should be
+ // updating them to the same thing (since they all have a read lock on
+ // the same root-to-leaf path) so this is safe.
+ (void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn);
+ }
+ }
+ }
+}
+
+struct copy_to_stale_extra {
+ FT ft;
+ NONLEAF_CHILDINFO bnc;
+};
+
+int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra) __attribute__((nonnull(3)));
+int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra)
+{
+ MSN msn;
+ DBT key;
+ extra->bnc->msg_buffer.get_message_key_msn(offset, &key, &msn);
+ struct toku_msg_buffer_key_msn_heaviside_extra heaviside_extra(extra->ft->cmp, &extra->bnc->msg_buffer, &key, msn);
+ int r = extra->bnc->stale_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, heaviside_extra, nullptr);
+ invariant_zero(r);
+ return 0;
+}
+
+void toku_ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc) {
+ struct copy_to_stale_extra cts_extra = { .ft = ft, .bnc = bnc };
+ int r = bnc->fresh_message_tree.iterate_over_marked<struct copy_to_stale_extra, copy_to_stale>(&cts_extra);
+ invariant_zero(r);
+ bnc->fresh_message_tree.delete_all_marked();
+}
+
+void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node) {
+ invariant(node->height > 0);
+ for (int i = 0; i < node->n_children; ++i) {
+ if (BP_STATE(node, i) != PT_AVAIL) {
+ continue;
+ }
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ // We can't delete things out of the fresh tree inside the above
+ // procedures because we're still looking at the fresh tree. Instead
+ // we have to move messages after we're done looking at it.
+ toku_ft_bnc_move_messages_to_stale(ft, bnc);
+ }
+}
+
+//
+// Balance // Availibility // Size
+
+struct rebalance_array_info {
+ uint32_t offset;
+ LEAFENTRY *le_array;
+ uint32_t *key_sizes_array;
+ const void **key_ptr_array;
+ static int fn(const void* key, const uint32_t keylen, const LEAFENTRY &le,
+ const uint32_t idx, struct rebalance_array_info *const ai) {
+ ai->le_array[idx+ai->offset] = le;
+ ai->key_sizes_array[idx+ai->offset] = keylen;
+ ai->key_ptr_array[idx+ai->offset] = key;
+ return 0;
+ }
+};
+
+// There must still be at least one child
+// Requires that all messages in buffers above have been applied.
+// Because all messages above have been applied, setting msn of all new basements
+// to max msn of existing basements is correct. (There cannot be any messages in
+// buffers above that still need to be applied.)
+void toku_ftnode_leaf_rebalance(FTNODE node, unsigned int basementnodesize) {
+
+ assert(node->height == 0);
+ assert(node->dirty());
+
+ uint32_t num_orig_basements = node->n_children;
+ // Count number of leaf entries in this leaf (num_le).
+ uint32_t num_le = 0;
+ for (uint32_t i = 0; i < num_orig_basements; i++) {
+ num_le += BLB_DATA(node, i)->num_klpairs();
+ }
+
+ uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array
+
+ // Create an array of OMTVALUE's that store all the pointers to all the data.
+ // Each element in leafpointers is a pointer to a leaf.
+ toku::scoped_malloc leafpointers_buf(sizeof(LEAFENTRY) * num_alloc);
+ LEAFENTRY *leafpointers = reinterpret_cast<LEAFENTRY *>(leafpointers_buf.get());
+ leafpointers[0] = NULL;
+
+ toku::scoped_malloc key_pointers_buf(sizeof(void *) * num_alloc);
+ const void **key_pointers = reinterpret_cast<const void **>(key_pointers_buf.get());
+ key_pointers[0] = NULL;
+
+ toku::scoped_malloc key_sizes_buf(sizeof(uint32_t) * num_alloc);
+ uint32_t *key_sizes = reinterpret_cast<uint32_t *>(key_sizes_buf.get());
+
+ // Capture pointers to old mempools' buffers (so they can be destroyed)
+ toku::scoped_malloc old_bns_buf(sizeof(BASEMENTNODE) * num_orig_basements);
+ BASEMENTNODE *old_bns = reinterpret_cast<BASEMENTNODE *>(old_bns_buf.get());
+ old_bns[0] = NULL;
+
+ uint32_t curr_le = 0;
+ for (uint32_t i = 0; i < num_orig_basements; i++) {
+ bn_data* bd = BLB_DATA(node, i);
+ struct rebalance_array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers };
+ bd->iterate<rebalance_array_info, rebalance_array_info::fn>(&ai);
+ curr_le += bd->num_klpairs();
+ }
+
+ // Create an array that will store indexes of new pivots.
+ // Each element in new_pivots is the index of a pivot key.
+ // (Allocating num_le of them is overkill, but num_le is an upper bound.)
+ toku::scoped_malloc new_pivots_buf(sizeof(uint32_t) * num_alloc);
+ uint32_t *new_pivots = reinterpret_cast<uint32_t *>(new_pivots_buf.get());
+ new_pivots[0] = 0;
+
+ // Each element in le_sizes is the size of the leafentry pointed to by leafpointers.
+ toku::scoped_malloc le_sizes_buf(sizeof(size_t) * num_alloc);
+ size_t *le_sizes = reinterpret_cast<size_t *>(le_sizes_buf.get());
+ le_sizes[0] = 0;
+
+ // Create an array that will store the size of each basement.
+ // This is the sum of the leaf sizes of all the leaves in that basement.
+ // We don't know how many basements there will be, so we use num_le as the upper bound.
+
+ // Sum of all le sizes in a single basement
+ toku::scoped_calloc bn_le_sizes_buf(sizeof(size_t) * num_alloc);
+ size_t *bn_le_sizes = reinterpret_cast<size_t *>(bn_le_sizes_buf.get());
+
+ // Sum of all key sizes in a single basement
+ toku::scoped_calloc bn_key_sizes_buf(sizeof(size_t) * num_alloc);
+ size_t *bn_key_sizes = reinterpret_cast<size_t *>(bn_key_sizes_buf.get());
+
+ // TODO 4050: All these arrays should be combined into a single array of some bn_info struct (pivot, msize, num_les).
+ // Each entry is the number of leafentries in this basement. (Again, num_le is overkill upper baound.)
+ toku::scoped_malloc num_les_this_bn_buf(sizeof(uint32_t) * num_alloc);
+ uint32_t *num_les_this_bn = reinterpret_cast<uint32_t *>(num_les_this_bn_buf.get());
+ num_les_this_bn[0] = 0;
+
+ // Figure out the new pivots.
+ // We need the index of each pivot, and for each basement we need
+ // the number of leaves and the sum of the sizes of the leaves (memory requirement for basement).
+ uint32_t curr_pivot = 0;
+ uint32_t num_le_in_curr_bn = 0;
+ uint32_t bn_size_so_far = 0;
+ for (uint32_t i = 0; i < num_le; i++) {
+ uint32_t curr_le_size = leafentry_disksize((LEAFENTRY) leafpointers[i]);
+ le_sizes[i] = curr_le_size;
+ if ((bn_size_so_far + curr_le_size + sizeof(uint32_t) + key_sizes[i] > basementnodesize) && (num_le_in_curr_bn != 0)) {
+ // cap off the current basement node to end with the element before i
+ new_pivots[curr_pivot] = i-1;
+ curr_pivot++;
+ num_le_in_curr_bn = 0;
+ bn_size_so_far = 0;
+ }
+ num_le_in_curr_bn++;
+ num_les_this_bn[curr_pivot] = num_le_in_curr_bn;
+ bn_le_sizes[curr_pivot] += curr_le_size;
+ bn_key_sizes[curr_pivot] += sizeof(uint32_t) + key_sizes[i]; // uint32_t le_offset
+ bn_size_so_far += curr_le_size + sizeof(uint32_t) + key_sizes[i];
+ }
+ // curr_pivot is now the total number of pivot keys in the leaf node
+ int num_pivots = curr_pivot;
+ int num_children = num_pivots + 1;
+
+ // now we need to fill in the new basement nodes and pivots
+
+ // TODO: (Zardosht) this is an ugly thing right now
+ // Need to figure out how to properly deal with seqinsert.
+ // I am not happy with how this is being
+ // handled with basement nodes
+ uint32_t tmp_seqinsert = BLB_SEQINSERT(node, num_orig_basements - 1);
+
+ // choose the max msn applied to any basement as the max msn applied to all new basements
+ MSN max_msn = ZERO_MSN;
+ for (uint32_t i = 0; i < num_orig_basements; i++) {
+ MSN curr_msn = BLB_MAX_MSN_APPLIED(node,i);
+ max_msn = (curr_msn.msn > max_msn.msn) ? curr_msn : max_msn;
+ }
+ // remove the basement node in the node, we've saved a copy
+ for (uint32_t i = 0; i < num_orig_basements; i++) {
+ // save a reference to the old basement nodes
+ // we will need them to ensure that the memory
+ // stays intact
+ old_bns[i] = toku_detach_bn(node, i);
+ }
+ // Now destroy the old basements, but do not destroy leaves
+ toku_destroy_ftnode_internals(node);
+
+ // now reallocate pieces and start filling them in
+ invariant(num_children > 0);
+
+ node->n_children = num_children;
+ XCALLOC_N(num_children, node->bp); // allocate pointers to basements (bp)
+ for (int i = 0; i < num_children; i++) {
+ set_BLB(node, i, toku_create_empty_bn()); // allocate empty basements and set bp pointers
+ }
+
+ // now we start to fill in the data
+
+ // first the pivots
+ toku::scoped_malloc pivotkeys_buf(num_pivots * sizeof(DBT));
+ DBT *pivotkeys = reinterpret_cast<DBT *>(pivotkeys_buf.get());
+ for (int i = 0; i < num_pivots; i++) {
+ uint32_t size = key_sizes[new_pivots[i]];
+ const void *key = key_pointers[new_pivots[i]];
+ toku_fill_dbt(&pivotkeys[i], key, size);
+ }
+ node->pivotkeys.create_from_dbts(pivotkeys, num_pivots);
+
+ uint32_t baseindex_this_bn = 0;
+ // now the basement nodes
+ for (int i = 0; i < num_children; i++) {
+ // put back seqinsert
+ BLB_SEQINSERT(node, i) = tmp_seqinsert;
+
+ // create start (inclusive) and end (exclusive) boundaries for data of basement node
+ uint32_t curr_start = (i==0) ? 0 : new_pivots[i-1]+1; // index of first leaf in basement
+ uint32_t curr_end = (i==num_pivots) ? num_le : new_pivots[i]+1; // index of first leaf in next basement
+ uint32_t num_in_bn = curr_end - curr_start; // number of leaves in this basement
+
+ // create indexes for new basement
+ invariant(baseindex_this_bn == curr_start);
+ uint32_t num_les_to_copy = num_les_this_bn[i];
+ invariant(num_les_to_copy == num_in_bn);
+
+ bn_data* bd = BLB_DATA(node, i);
+ bd->set_contents_as_clone_of_sorted_array(
+ num_les_to_copy,
+ &key_pointers[baseindex_this_bn],
+ &key_sizes[baseindex_this_bn],
+ &leafpointers[baseindex_this_bn],
+ &le_sizes[baseindex_this_bn],
+ bn_key_sizes[i], // Total key sizes
+ bn_le_sizes[i] // total le sizes
+ );
+
+ BP_STATE(node,i) = PT_AVAIL;
+ BP_TOUCH_CLOCK(node,i);
+ BLB_MAX_MSN_APPLIED(node,i) = max_msn;
+ baseindex_this_bn += num_les_to_copy; // set to index of next bn
+ }
+ node->max_msn_applied_to_node_on_disk = max_msn;
+
+ // destroy buffers of old mempools
+ for (uint32_t i = 0; i < num_orig_basements; i++) {
+ destroy_basement_node(old_bns[i]);
+ }
+}
+
+bool toku_ftnode_fully_in_memory(FTNODE node) {
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node,i) != PT_AVAIL) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void toku_ftnode_assert_fully_in_memory(FTNODE UU(node)) {
+ paranoid_invariant(toku_ftnode_fully_in_memory(node));
+}
+
+uint32_t toku_ftnode_leaf_num_entries(FTNODE node) {
+ toku_ftnode_assert_fully_in_memory(node);
+ uint32_t num_entries = 0;
+ for (int i = 0; i < node->n_children; i++) {
+ num_entries += BLB_DATA(node, i)->num_klpairs();
+ }
+ return num_entries;
+}
+
+enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize) {
+ enum reactivity re = RE_STABLE;
+ toku_ftnode_assert_fully_in_memory(node);
+ paranoid_invariant(node->height==0);
+ unsigned int size = toku_serialize_ftnode_size(node);
+ if (size > nodesize && toku_ftnode_leaf_num_entries(node) > 1) {
+ re = RE_FISSIBLE;
+ } else if ((size*4) < nodesize && !BLB_SEQINSERT(node, node->n_children-1)) {
+ re = RE_FUSIBLE;
+ }
+ return re;
+}
+
+enum reactivity toku_ftnode_get_nonleaf_reactivity(FTNODE node, unsigned int fanout) {
+ paranoid_invariant(node->height > 0);
+ int n_children = node->n_children;
+ if (n_children > (int) fanout) {
+ return RE_FISSIBLE;
+ }
+ if (n_children * 4 < (int) fanout) {
+ return RE_FUSIBLE;
+ }
+ return RE_STABLE;
+}
+
+enum reactivity toku_ftnode_get_reactivity(FT ft, FTNODE node) {
+ toku_ftnode_assert_fully_in_memory(node);
+ if (node->height == 0) {
+ return toku_ftnode_get_leaf_reactivity(node, ft->h->nodesize);
+ } else {
+ return toku_ftnode_get_nonleaf_reactivity(node, ft->h->fanout);
+ }
+}
+
+unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc) {
+ return bnc->msg_buffer.buffer_size_in_use();
+}
+
+// Return true if the size of the buffers plus the amount of work done is large enough.
+// Return false if there is nothing to be flushed (the buffers empty).
+bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize) {
+ uint64_t size = toku_serialize_ftnode_size(node);
+
+ bool buffers_are_empty = true;
+ toku_ftnode_assert_fully_in_memory(node);
+ //
+ // the nonleaf node is gorged if the following holds true:
+ // - the buffers are non-empty
+ // - the total workdone by the buffers PLUS the size of the buffers
+ // is greater than nodesize (which as of Maxwell should be
+ // 4MB)
+ //
+ paranoid_invariant(node->height > 0);
+ for (int child = 0; child < node->n_children; ++child) {
+ size += BP_WORKDONE(node, child);
+ }
+ for (int child = 0; child < node->n_children; ++child) {
+ if (toku_bnc_nbytesinbuf(BNC(node, child)) > 0) {
+ buffers_are_empty = false;
+ break;
+ }
+ }
+ return ((size > nodesize)
+ &&
+ (!buffers_are_empty));
+}
+
+int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc) {
+ return bnc->msg_buffer.num_entries();
+}
+
+// how much memory does this child buffer consume?
+long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc) {
+ return (sizeof(*bnc) +
+ bnc->msg_buffer.memory_footprint() +
+ bnc->fresh_message_tree.memory_size() +
+ bnc->stale_message_tree.memory_size() +
+ bnc->broadcast_list.memory_size());
+}
+
+// how much memory in this child buffer holds useful data?
+// originally created solely for use by test program(s).
+long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc) {
+ return (sizeof(*bnc) +
+ bnc->msg_buffer.memory_size_in_use() +
+ bnc->fresh_message_tree.memory_size() +
+ bnc->stale_message_tree.memory_size() +
+ bnc->broadcast_list.memory_size());
+}
+
+//
+// Garbage collection
+// Message injection
+// Message application
+//
+
+// Used only by test programs: append a child node to a parent node
+void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey) {
+ int childnum = node->n_children;
+ node->n_children++;
+ REALLOC_N(node->n_children, node->bp);
+ BP_BLOCKNUM(node,childnum) = child->blocknum;
+ BP_STATE(node,childnum) = PT_AVAIL;
+ BP_WORKDONE(node, childnum) = 0;
+ set_BNC(node, childnum, toku_create_empty_nl());
+ if (pivotkey) {
+ invariant(childnum > 0);
+ node->pivotkeys.insert_at(pivotkey, childnum - 1);
+ }
+ node->set_dirty();
+}
+
+void
+toku_ft_bn_apply_msg_once (
+ BASEMENTNODE bn,
+ const ft_msg &msg,
+ uint32_t idx,
+ uint32_t le_keylen,
+ LEAFENTRY le,
+ txn_gc_info *gc_info,
+ uint64_t *workdone,
+ STAT64INFO stats_to_update,
+ int64_t *logical_rows_delta
+ )
+// Effect: Apply msg to leafentry (msn is ignored)
+// Calculate work done by message on leafentry and add it to caller's workdone counter.
+// idx is the location where it goes
+// le is old leafentry
+{
+ size_t newsize=0, oldsize=0, workdone_this_le=0;
+ LEAFENTRY new_le=0;
+ // how many bytes of user data (not including overhead) were added or
+ // deleted from this row
+ int64_t numbytes_delta = 0;
+ // will be +1 or -1 or 0 (if row was added or deleted or not)
+ int64_t numrows_delta = 0;
+ // will be +1, -1 or 0 if a message that was accounted for logically has
+ // changed in meaning such as an insert changed to an update or a delete
+ // changed to a noop
+ int64_t logical_rows_delta_le = 0;
+ uint32_t key_storage_size = msg.kdbt()->size + sizeof(uint32_t);
+ if (le) {
+ oldsize = leafentry_memsize(le) + key_storage_size;
+ }
+
+ // toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt()
+ // to allocate more space. That means le is guaranteed to not cause a
+ // sigsegv but it may point to a mempool that is no longer in use.
+ // We'll have to release the old mempool later.
+ logical_rows_delta_le = toku_le_apply_msg(
+ msg,
+ le,
+ &bn->data_buffer,
+ idx,
+ le_keylen,
+ gc_info,
+ &new_le,
+ &numbytes_delta);
+
+ // at this point, we cannot trust cmd->u.id.key to be valid.
+ // The dmt may have realloced its mempool and freed the one containing key.
+
+ newsize = new_le ? (leafentry_memsize(new_le) + + key_storage_size) : 0;
+ if (le && new_le) {
+ workdone_this_le = (oldsize > newsize ? oldsize : newsize); // work done is max of le size before and after message application
+
+ } else { // we did not just replace a row, so ...
+ if (le) {
+ // ... we just deleted a row ...
+ workdone_this_le = oldsize;
+ numrows_delta = -1;
+ }
+ if (new_le) {
+ // ... or we just added a row
+ workdone_this_le = newsize;
+ numrows_delta = 1;
+ }
+ }
+ if (FT_LIKELY(workdone != NULL)) { // test programs may call with NULL
+ *workdone += workdone_this_le;
+ }
+
+ if (FT_LIKELY(logical_rows_delta != NULL)) {
+ *logical_rows_delta += logical_rows_delta_le;
+ }
+ // now update stat64 statistics
+ bn->stat64_delta.numrows += numrows_delta;
+ bn->stat64_delta.numbytes += numbytes_delta;
+ // the only reason stats_to_update may be null is for tests
+ if (FT_LIKELY(stats_to_update != NULL)) {
+ stats_to_update->numrows += numrows_delta;
+ stats_to_update->numbytes += numbytes_delta;
+ }
+}
+
+static const uint32_t setval_tag = 0xee0ccb99; // this was gotten by doing "cat /dev/random|head -c4|od -x" to get a random number. We want to make sure that the user actually passes us the setval_extra_s that we passed in.
+struct setval_extra_s {
+ uint32_t tag;
+ bool did_set_val;
+ // any error code that setval_fun wants to return goes here.
+ int setval_r;
+ // need arguments for toku_ft_bn_apply_msg_once
+ BASEMENTNODE bn;
+ // captured from original message, not currently used
+ MSN msn;
+ XIDS xids;
+ const DBT* key;
+ uint32_t idx;
+ uint32_t le_keylen;
+ LEAFENTRY le;
+ txn_gc_info* gc_info;
+ uint64_t* workdone; // set by toku_ft_bn_apply_msg_once()
+ STAT64INFO stats_to_update;
+ int64_t* logical_rows_delta;
+};
+
+/*
+ * If new_val == NULL, we send a delete message instead of an insert.
+ * This happens here instead of in do_delete() for consistency.
+ * setval_fun() is called from handlerton, passing in svextra_v
+ * from setval_extra_s input arg to ft->update_fun().
+ */
+static void setval_fun (const DBT *new_val, void *svextra_v) {
+ struct setval_extra_s *CAST_FROM_VOIDP(svextra, svextra_v);
+ paranoid_invariant(svextra->tag==setval_tag);
+ paranoid_invariant(!svextra->did_set_val);
+ svextra->did_set_val = true;
+
+ {
+ // can't leave scope until toku_ft_bn_apply_msg_once if
+ // this is a delete
+ DBT val;
+ ft_msg msg(
+ svextra->key,
+ new_val ? new_val : toku_init_dbt(&val),
+ new_val ? FT_INSERT : FT_DELETE_ANY,
+ svextra->msn,
+ svextra->xids);
+ toku_ft_bn_apply_msg_once(
+ svextra->bn,
+ msg,
+ svextra->idx,
+ svextra->le_keylen,
+ svextra->le,
+ svextra->gc_info,
+ svextra->workdone,
+ svextra->stats_to_update,
+ svextra->logical_rows_delta);
+ svextra->setval_r = 0;
+ }
+}
+
+// We are already past the msn filter (in toku_ft_bn_apply_msg(), which calls
+// do_update()), so capturing the msn in the setval_extra_s is not strictly
+// required. The alternative would be to put a dummy msn in the messages
+// created by setval_fun(), but preserving the original msn seems cleaner and
+// it preserves accountability at a lower layer.
+static int do_update(
+ ft_update_func update_fun,
+ const DESCRIPTOR_S* desc,
+ BASEMENTNODE bn,
+ const ft_msg &msg,
+ uint32_t idx,
+ LEAFENTRY le,
+ void* keydata,
+ uint32_t keylen,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
+ LEAFENTRY le_for_update;
+ DBT key;
+ const DBT *keyp;
+ const DBT *update_function_extra;
+ DBT vdbt;
+ const DBT *vdbtp;
+
+ // the location of data depends whether this is a regular or
+ // broadcast update
+ if (msg.type() == FT_UPDATE) {
+ // key is passed in with command (should be same as from le)
+ // update function extra is passed in with command
+ keyp = msg.kdbt();
+ update_function_extra = msg.vdbt();
+ } else {
+ invariant(msg.type() == FT_UPDATE_BROADCAST_ALL);
+ // key is not passed in with broadcast, it comes from le
+ // update function extra is passed in with command
+ paranoid_invariant(le); // for broadcast updates, we just hit all leafentries
+ // so this cannot be null
+ paranoid_invariant(keydata);
+ paranoid_invariant(keylen);
+ paranoid_invariant(msg.kdbt()->size == 0);
+ keyp = toku_fill_dbt(&key, keydata, keylen);
+ update_function_extra = msg.vdbt();
+ }
+ toku_ft_status_note_update(msg.type() == FT_UPDATE_BROADCAST_ALL);
+
+ if (le && !le_latest_is_del(le)) {
+ // if the latest val exists, use it, and we'll use the leafentry later
+ uint32_t vallen;
+ void *valp = le_latest_val_and_len(le, &vallen);
+ vdbtp = toku_fill_dbt(&vdbt, valp, vallen);
+ } else {
+ // otherwise, the val and leafentry are both going to be null
+ vdbtp = NULL;
+ }
+ le_for_update = le;
+
+ struct setval_extra_s setval_extra = {
+ setval_tag,
+ false,
+ 0,
+ bn,
+ msg.msn(),
+ msg.xids(),
+ keyp,
+ idx,
+ keylen,
+ le_for_update,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta
+ };
+ // call handlerton's ft->update_fun(), which passes setval_extra
+ // to setval_fun()
+ FAKE_DB(db, desc);
+ int r = update_fun(
+ &db,
+ keyp,
+ vdbtp,
+ update_function_extra,
+ setval_fun,
+ &setval_extra);
+
+ if (r == 0) { r = setval_extra.setval_r; }
+ return r;
+}
+
+// Should be renamed as something like "apply_msg_to_basement()."
+void toku_ft_bn_apply_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ BASEMENTNODE bn,
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+// Effect:
+// Put a msg into a leaf.
+// Calculate work done by message on leafnode and add it to caller's
+// workdone counter.
+// The leaf could end up "too big" or "too small". The caller must fix that up.
+ LEAFENTRY storeddata;
+ void* key = NULL;
+ uint32_t keylen = 0;
+
+ uint32_t num_klpairs;
+ int r;
+ struct toku_msg_leafval_heaviside_extra be(cmp, msg.kdbt());
+
+ unsigned int doing_seqinsert = bn->seqinsert;
+ bn->seqinsert = 0;
+
+ switch (msg.type()) {
+ case FT_INSERT_NO_OVERWRITE:
+ case FT_INSERT: {
+ uint32_t idx;
+ if (doing_seqinsert) {
+ idx = bn->data_buffer.num_klpairs();
+ DBT kdbt;
+ r = bn->data_buffer.fetch_key_and_len(idx-1, &kdbt.size, &kdbt.data);
+ if (r != 0) goto fz;
+ int c = toku_msg_leafval_heaviside(kdbt, be);
+ if (c >= 0) goto fz;
+ r = DB_NOTFOUND;
+ } else {
+ fz:
+ r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>(
+ be,
+ &storeddata,
+ &key,
+ &keylen,
+ &idx
+ );
+ }
+ if (r==DB_NOTFOUND) {
+ storeddata = 0;
+ } else {
+ assert_zero(r);
+ }
+ toku_ft_bn_apply_msg_once(
+ bn,
+ msg,
+ idx,
+ keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+
+ // if the insertion point is within a window of the right edge of
+ // the leaf then it is sequential
+ // window = min(32, number of leaf entries/16)
+ {
+ uint32_t s = bn->data_buffer.num_klpairs();
+ uint32_t w = s / 16;
+ if (w == 0) w = 1;
+ if (w > 32) w = 32;
+
+ // within the window?
+ if (s - idx <= w)
+ bn->seqinsert = doing_seqinsert + 1;
+ }
+ break;
+ }
+ case FT_DELETE_ANY:
+ case FT_ABORT_ANY:
+ case FT_COMMIT_ANY: {
+ uint32_t idx;
+ // Apply to all the matches
+
+ r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>(
+ be,
+ &storeddata,
+ &key,
+ &keylen,
+ &idx);
+ if (r == DB_NOTFOUND) break;
+ assert_zero(r);
+ toku_ft_bn_apply_msg_once(
+ bn,
+ msg,
+ idx,
+ keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ break;
+ }
+ case FT_OPTIMIZE_FOR_UPGRADE:
+ // fall through so that optimize_for_upgrade performs rest of the optimize logic
+ case FT_COMMIT_BROADCAST_ALL:
+ case FT_OPTIMIZE:
+ // Apply to all leafentries
+ num_klpairs = bn->data_buffer.num_klpairs();
+ for (uint32_t idx = 0; idx < num_klpairs; ) {
+ void* curr_keyp = NULL;
+ uint32_t curr_keylen = 0;
+ r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_keyp);
+ assert_zero(r);
+ int deleted = 0;
+ if (!le_is_clean(storeddata)) { //If already clean, nothing to do.
+ // message application code needs a key in order to determine
+ // how much work was done by this message. since this is a
+ // broadcast message, we have to create a new message whose
+ // key is the current le's key.
+ DBT curr_keydbt;
+ ft_msg curr_msg(
+ toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen),
+ msg.vdbt(),
+ msg.type(),
+ msg.msn(),
+ msg.xids());
+ toku_ft_bn_apply_msg_once(
+ bn,
+ curr_msg,
+ idx,
+ curr_keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ // at this point, we cannot trust msg.kdbt to be valid.
+ uint32_t new_dmt_size = bn->data_buffer.num_klpairs();
+ if (new_dmt_size != num_klpairs) {
+ paranoid_invariant(new_dmt_size + 1 == num_klpairs);
+ //Item was deleted.
+ deleted = 1;
+ }
+ }
+ if (deleted)
+ num_klpairs--;
+ else
+ idx++;
+ }
+ paranoid_invariant(bn->data_buffer.num_klpairs() == num_klpairs);
+
+ break;
+ case FT_COMMIT_BROADCAST_TXN:
+ case FT_ABORT_BROADCAST_TXN:
+ // Apply to all leafentries if txn is represented
+ num_klpairs = bn->data_buffer.num_klpairs();
+ for (uint32_t idx = 0; idx < num_klpairs; ) {
+ void* curr_keyp = NULL;
+ uint32_t curr_keylen = 0;
+ r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_keyp);
+ assert_zero(r);
+ int deleted = 0;
+ if (le_has_xids(storeddata, msg.xids())) {
+ // message application code needs a key in order to determine
+ // how much work was done by this message. since this is a
+ // broadcast message, we have to create a new message whose key
+ // is the current le's key.
+ DBT curr_keydbt;
+ ft_msg curr_msg(
+ toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen),
+ msg.vdbt(),
+ msg.type(),
+ msg.msn(),
+ msg.xids());
+ toku_ft_bn_apply_msg_once(
+ bn,
+ curr_msg,
+ idx,
+ curr_keylen,
+ storeddata,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ uint32_t new_dmt_size = bn->data_buffer.num_klpairs();
+ if (new_dmt_size != num_klpairs) {
+ paranoid_invariant(new_dmt_size + 1 == num_klpairs);
+ //Item was deleted.
+ deleted = 1;
+ }
+ }
+ if (deleted)
+ num_klpairs--;
+ else
+ idx++;
+ }
+ paranoid_invariant(bn->data_buffer.num_klpairs() == num_klpairs);
+
+ break;
+ case FT_UPDATE: {
+ uint32_t idx;
+ r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>(
+ be,
+ &storeddata,
+ &key,
+ &keylen,
+ &idx
+ );
+ if (r==DB_NOTFOUND) {
+ {
+ //Point to msg's copy of the key so we don't worry about le being freed
+ //TODO: 46 MAYBE Get rid of this when le_apply message memory is better handled
+ key = msg.kdbt()->data;
+ keylen = msg.kdbt()->size;
+ }
+ r = do_update(
+ update_fun,
+ cmp.get_descriptor(),
+ bn,
+ msg,
+ idx,
+ NULL,
+ NULL,
+ 0,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ } else if (r==0) {
+ r = do_update(
+ update_fun,
+ cmp.get_descriptor(),
+ bn,
+ msg,
+ idx,
+ storeddata,
+ key,
+ keylen,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ } // otherwise, a worse error, just return it
+ break;
+ }
+ case FT_UPDATE_BROADCAST_ALL: {
+ // apply to all leafentries.
+ uint32_t idx = 0;
+ uint32_t num_leafentries_before;
+ // This is used to avoid having the logical row count changed on apply
+ // of this message since it will return a negative number of the number
+ // of leaf entries visited and cause the ft header value to go to 0;
+ // This message will not change the number of rows, so just use the
+ // bogus value.
+ int64_t temp_logical_rows_delta = 0;
+ while (idx < (num_leafentries_before = bn->data_buffer.num_klpairs())) {
+ void* curr_key = nullptr;
+ uint32_t curr_keylen = 0;
+ r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_key);
+ assert_zero(r);
+
+ //TODO: 46 replace this with something better than cloning key
+ // TODO: (Zardosht) This may be unnecessary now, due to how the key
+ // is handled in the bndata. Investigate and determine
+ char clone_mem[curr_keylen]; // only lasts one loop, alloca would overflow (end of function)
+ memcpy((void*)clone_mem, curr_key, curr_keylen);
+ curr_key = (void*)clone_mem;
+
+ // This is broken below. Have a compilation error checked
+ // in as a reminder
+ r = do_update(
+ update_fun,
+ cmp.get_descriptor(),
+ bn,
+ msg,
+ idx,
+ storeddata,
+ curr_key,
+ curr_keylen,
+ gc_info,
+ workdone,
+ stats_to_update,
+ &temp_logical_rows_delta);
+ assert_zero(r);
+
+ if (num_leafentries_before == bn->data_buffer.num_klpairs()) {
+ // we didn't delete something, so increment the index.
+ idx++;
+ }
+ }
+ break;
+ }
+ case FT_NONE: break; // don't do anything
+ }
+
+ return;
+}
+
+static inline int
+key_msn_cmp(const DBT *a, const DBT *b, const MSN amsn, const MSN bmsn, const toku::comparator &cmp) {
+ int r = cmp(a, b);
+ if (r == 0) {
+ if (amsn.msn > bmsn.msn) {
+ r = +1;
+ } else if (amsn.msn < bmsn.msn) {
+ r = -1;
+ } else {
+ r = 0;
+ }
+ }
+ return r;
+}
+
+int toku_msg_buffer_key_msn_heaviside(const int32_t &offset, const struct toku_msg_buffer_key_msn_heaviside_extra &extra) {
+ MSN query_msn;
+ DBT query_key;
+ extra.msg_buffer->get_message_key_msn(offset, &query_key, &query_msn);
+ return key_msn_cmp(&query_key, extra.key, query_msn, extra.msn, extra.cmp);
+}
+
+int toku_msg_buffer_key_msn_cmp(const struct toku_msg_buffer_key_msn_cmp_extra &extra, const int32_t &ao, const int32_t &bo) {
+ MSN amsn, bmsn;
+ DBT akey, bkey;
+ extra.msg_buffer->get_message_key_msn(ao, &akey, &amsn);
+ extra.msg_buffer->get_message_key_msn(bo, &bkey, &bmsn);
+ return key_msn_cmp(&akey, &bkey, amsn, bmsn, extra.cmp);
+}
+
+// Effect: Enqueue the message represented by the parameters into the
+// bnc's buffer, and put it in either the fresh or stale message tree,
+// or the broadcast list.
+static void bnc_insert_msg(NONLEAF_CHILDINFO bnc, const ft_msg &msg, bool is_fresh, const toku::comparator &cmp) {
+ int r = 0;
+ int32_t offset;
+ bnc->msg_buffer.enqueue(msg, is_fresh, &offset);
+ enum ft_msg_type type = msg.type();
+ if (ft_msg_type_applies_once(type)) {
+ DBT key;
+ toku_fill_dbt(&key, msg.kdbt()->data, msg.kdbt()->size);
+ struct toku_msg_buffer_key_msn_heaviside_extra extra(cmp, &bnc->msg_buffer, &key, msg.msn());
+ if (is_fresh) {
+ r = bnc->fresh_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, extra, nullptr);
+ assert_zero(r);
+ } else {
+ r = bnc->stale_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, extra, nullptr);
+ assert_zero(r);
+ }
+ } else {
+ invariant(ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type));
+ const uint32_t idx = bnc->broadcast_list.size();
+ r = bnc->broadcast_list.insert_at(offset, idx);
+ assert_zero(r);
+ }
+}
+
+// This is only exported for tests.
+void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, uint32_t keylen, const void *data, uint32_t datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const toku::comparator &cmp)
+{
+ DBT k, v;
+ ft_msg msg(toku_fill_dbt(&k, key, keylen), toku_fill_dbt(&v, data, datalen), type, msn, xids);
+ bnc_insert_msg(bnc, msg, is_fresh, cmp);
+}
+
+// append a msg to a nonleaf node's child buffer
+static void ft_append_msg_to_child_buffer(const toku::comparator &cmp, FTNODE node,
+ int childnum, const ft_msg &msg, bool is_fresh) {
+ paranoid_invariant(BP_STATE(node,childnum) == PT_AVAIL);
+ bnc_insert_msg(BNC(node, childnum), msg, is_fresh, cmp);
+ node->set_dirty();
+}
+
+// This is only exported for tests.
+void toku_ft_append_to_child_buffer(const toku::comparator &cmp, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val) {
+ ft_msg msg(key, val, type, msn, xids);
+ ft_append_msg_to_child_buffer(cmp, node, childnum, msg, is_fresh);
+}
+
+static void ft_nonleaf_msg_once_to_child(const toku::comparator &cmp, FTNODE node, int target_childnum, const ft_msg &msg, bool is_fresh, size_t flow_deltas[])
+// Previously we had passive aggressive promotion, but that causes a lot of I/O a the checkpoint. So now we are just putting it in the buffer here.
+// Also we don't worry about the node getting overfull here. It's the caller's problem.
+{
+ unsigned int childnum = (target_childnum >= 0
+ ? target_childnum
+ : toku_ftnode_which_child(node, msg.kdbt(), cmp));
+ ft_append_msg_to_child_buffer(cmp, node, childnum, msg, is_fresh);
+ NONLEAF_CHILDINFO bnc = BNC(node, childnum);
+ bnc->flow[0] += flow_deltas[0];
+ bnc->flow[1] += flow_deltas[1];
+}
+
+// TODO: Remove me, I'm boring.
+static int ft_compare_pivot(const toku::comparator &cmp, const DBT *key, const DBT *pivot) {
+ return cmp(key, pivot);
+}
+
+/* Find the leftmost child that may contain the key.
+ * If the key exists it will be in the child whose number
+ * is the return value of this function.
+ */
+int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp) {
+ // a funny case of no pivots
+ if (node->n_children <= 1) return 0;
+
+ DBT pivot;
+
+ // check the last key to optimize seq insertions
+ int n = node->n_children-1;
+ int c = ft_compare_pivot(cmp, k, node->pivotkeys.fill_pivot(n - 1, &pivot));
+ if (c > 0) return n;
+
+ // binary search the pivots
+ int lo = 0;
+ int hi = n-1; // skip the last one, we checked it above
+ int mi;
+ while (lo < hi) {
+ mi = (lo + hi) / 2;
+ c = ft_compare_pivot(cmp, k, node->pivotkeys.fill_pivot(mi, &pivot));
+ if (c > 0) {
+ lo = mi+1;
+ continue;
+ }
+ if (c < 0) {
+ hi = mi;
+ continue;
+ }
+ return mi;
+ }
+ return lo;
+}
+
+// Used for HOT.
+int toku_ftnode_hot_next_child(FTNODE node, const DBT *k, const toku::comparator &cmp) {
+ DBT pivot;
+ int low = 0;
+ int hi = node->n_children - 1;
+ int mi;
+ while (low < hi) {
+ mi = (low + hi) / 2;
+ int r = ft_compare_pivot(cmp, k, node->pivotkeys.fill_pivot(mi, &pivot));
+ if (r > 0) {
+ low = mi + 1;
+ } else if (r < 0) {
+ hi = mi;
+ } else {
+ // if they were exactly equal, then we want the sub-tree under
+ // the next pivot.
+ return mi + 1;
+ }
+ }
+ invariant(low == hi);
+ return low;
+}
+
+void toku_ftnode_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p) {
+ FTNODE CAST_FROM_VOIDP(node, value_data);
+ node->ct_pair = p;
+}
+
+static void
+ft_nonleaf_msg_all(const toku::comparator &cmp, FTNODE node, const ft_msg &msg, bool is_fresh, size_t flow_deltas[])
+// Effect: Put the message into a nonleaf node. We put it into all children, possibly causing the children to become reactive.
+// We don't do the splitting and merging. That's up to the caller after doing all the puts it wants to do.
+// The re_array[i] gets set to the reactivity of any modified child i. (And there may be several such children.)
+{
+ for (int i = 0; i < node->n_children; i++) {
+ ft_nonleaf_msg_once_to_child(cmp, node, i, msg, is_fresh, flow_deltas);
+ }
+}
+
+static void
+ft_nonleaf_put_msg(const toku::comparator &cmp, FTNODE node, int target_childnum, const ft_msg &msg, bool is_fresh, size_t flow_deltas[])
+// Effect: Put the message into a nonleaf node. We may put it into a child, possibly causing the child to become reactive.
+// We don't do the splitting and merging. That's up to the caller after doing all the puts it wants to do.
+// The re_array[i] gets set to the reactivity of any modified child i. (And there may be several such children.)
+//
+{
+
+ //
+ // see comments in toku_ft_leaf_apply_msg
+ // to understand why we handle setting
+ // node->max_msn_applied_to_node_on_disk here,
+ // and don't do it in toku_ftnode_put_msg
+ //
+ MSN msg_msn = msg.msn();
+ invariant(msg_msn.msn > node->max_msn_applied_to_node_on_disk.msn);
+ node->max_msn_applied_to_node_on_disk = msg_msn;
+
+ if (ft_msg_type_applies_once(msg.type())) {
+ ft_nonleaf_msg_once_to_child(cmp, node, target_childnum, msg, is_fresh, flow_deltas);
+ } else if (ft_msg_type_applies_all(msg.type())) {
+ ft_nonleaf_msg_all(cmp, node, msg, is_fresh, flow_deltas);
+ } else {
+ paranoid_invariant(ft_msg_type_does_nothing(msg.type()));
+ }
+}
+
+// Garbage collect one leaf entry.
+static void
+ft_basement_node_gc_once(BASEMENTNODE bn,
+ uint32_t index,
+ void* keyp,
+ uint32_t keylen,
+ LEAFENTRY leaf_entry,
+ txn_gc_info *gc_info,
+ STAT64INFO_S * delta)
+{
+ paranoid_invariant(leaf_entry);
+
+ // Don't run garbage collection on non-mvcc leaf entries.
+ if (leaf_entry->type != LE_MVCC) {
+ goto exit;
+ }
+
+ // Don't run garbage collection if this leafentry decides it's not worth it.
+ if (!toku_le_worth_running_garbage_collection(leaf_entry, gc_info)) {
+ goto exit;
+ }
+
+ LEAFENTRY new_leaf_entry;
+ new_leaf_entry = NULL;
+
+ // The mempool doesn't free itself. When it allocates new memory,
+ // this pointer will be set to the older memory that must now be
+ // freed.
+ void * maybe_free;
+ maybe_free = NULL;
+
+ // These will represent the number of bytes and rows changed as
+ // part of the garbage collection.
+ int64_t numbytes_delta;
+ int64_t numrows_delta;
+ toku_le_garbage_collect(leaf_entry,
+ &bn->data_buffer,
+ index,
+ keyp,
+ keylen,
+ gc_info,
+ &new_leaf_entry,
+ &numbytes_delta);
+
+ numrows_delta = 0;
+ if (new_leaf_entry) {
+ numrows_delta = 0;
+ } else {
+ numrows_delta = -1;
+ }
+
+ // If we created a new mempool buffer we must free the
+ // old/original buffer.
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+
+ // Update stats.
+ bn->stat64_delta.numrows += numrows_delta;
+ bn->stat64_delta.numbytes += numbytes_delta;
+ delta->numrows += numrows_delta;
+ delta->numbytes += numbytes_delta;
+
+exit:
+ return;
+}
+
+// Garbage collect all leaf entries for a given basement node.
+static void
+basement_node_gc_all_les(BASEMENTNODE bn,
+ txn_gc_info *gc_info,
+ STAT64INFO_S * delta)
+{
+ int r = 0;
+ uint32_t index = 0;
+ uint32_t num_leafentries_before;
+ while (index < (num_leafentries_before = bn->data_buffer.num_klpairs())) {
+ void* keyp = NULL;
+ uint32_t keylen = 0;
+ LEAFENTRY leaf_entry;
+ r = bn->data_buffer.fetch_klpair(index, &leaf_entry, &keylen, &keyp);
+ assert_zero(r);
+ ft_basement_node_gc_once(
+ bn,
+ index,
+ keyp,
+ keylen,
+ leaf_entry,
+ gc_info,
+ delta
+ );
+ // Check if the leaf entry was deleted or not.
+ if (num_leafentries_before == bn->data_buffer.num_klpairs()) {
+ ++index;
+ }
+ }
+}
+
+// Garbage collect all leaf entires in all basement nodes.
+static void
+ft_leaf_gc_all_les(FT ft, FTNODE node, txn_gc_info *gc_info)
+{
+ toku_ftnode_assert_fully_in_memory(node);
+ paranoid_invariant_zero(node->height);
+ // Loop through each leaf entry, garbage collecting as we go.
+ for (int i = 0; i < node->n_children; ++i) {
+ // Perform the garbage collection.
+ BASEMENTNODE bn = BLB(node, i);
+ STAT64INFO_S delta;
+ delta.numrows = 0;
+ delta.numbytes = 0;
+ basement_node_gc_all_les(bn, gc_info, &delta);
+ toku_ft_update_stats(&ft->in_memory_stats, delta);
+ }
+}
+
+void toku_ftnode_leaf_run_gc(FT ft, FTNODE node) {
+ TOKULOGGER logger = toku_cachefile_logger(ft->cf);
+ if (logger) {
+ TXN_MANAGER txn_manager = toku_logger_get_txn_manager(logger);
+ txn_manager_state txn_state_for_gc(txn_manager);
+ txn_state_for_gc.init();
+ TXNID oldest_referenced_xid_for_simple_gc = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager);
+
+ // Perform full garbage collection.
+ //
+ // - txn_state_for_gc
+ // a fresh snapshot of the transaction system.
+ // - oldest_referenced_xid_for_simple_gc
+ // the oldest xid in any live list as of right now - suitible for simple gc
+ // - node->oldest_referenced_xid_known
+ // the last known oldest referenced xid for this node and any unapplied messages.
+ // it is a lower bound on the actual oldest referenced xid - but becasue there
+ // may be abort messages above us, we need to be careful to only use this value
+ // for implicit promotion (as opposed to the oldest referenced xid for simple gc)
+ //
+ // The node has its own oldest referenced xid because it must be careful not to implicitly promote
+ // provisional entries for transactions that are no longer live, but may have abort messages
+ // somewhere above us in the tree.
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_for_simple_gc,
+ node->oldest_referenced_xid_known,
+ true);
+ ft_leaf_gc_all_les(ft, node, &gc_info);
+ }
+}
+
+void toku_ftnode_put_msg(
+ const toku::comparator &cmp,
+ ft_update_func update_fun,
+ FTNODE node,
+ int target_childnum,
+ const ft_msg &msg,
+ bool is_fresh,
+ txn_gc_info* gc_info,
+ size_t flow_deltas[],
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+// Effect: Push message into the subtree rooted at NODE.
+// If NODE is a leaf, then
+// put message into leaf, applying it to the leafentries
+// If NODE is a nonleaf, then push the message into the message buffer(s) of the relevent child(ren).
+// The node may become overfull. That's not our problem.
+ toku_ftnode_assert_fully_in_memory(node);
+ //
+ // see comments in toku_ft_leaf_apply_msg
+ // to understand why we don't handle setting
+ // node->max_msn_applied_to_node_on_disk here,
+ // and instead defer to these functions
+ //
+ if (node->height==0) {
+ toku_ft_leaf_apply_msg(
+ cmp,
+ update_fun,
+ node,
+ target_childnum, msg,
+ gc_info,
+ nullptr,
+ stats_to_update,
+ logical_rows_delta);
+ } else {
+ ft_nonleaf_put_msg(
+ cmp,
+ node,
+ target_childnum,
+ msg,
+ is_fresh,
+ flow_deltas);
+ }
+}
+
+// Effect: applies the message to the leaf if the appropriate basement node is
+// in memory. This function is called during message injection and/or
+// flushing, so the entire node MUST be in memory.
+void toku_ft_leaf_apply_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ FTNODE node,
+ int target_childnum, // which child to inject to, or -1 if unknown
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta) {
+
+ VERIFY_NODE(t, node);
+ toku_ftnode_assert_fully_in_memory(node);
+
+ //
+ // Because toku_ft_leaf_apply_msg is called with the intent of permanently
+ // applying a message to a leaf node (meaning the message is permanently applied
+ // and will be purged from the system after this call, as opposed to
+ // toku_apply_ancestors_messages_to_node, which applies a message
+ // for a query, but the message may still reside in the system and
+ // be reapplied later), we mark the node as dirty and
+ // take the opportunity to update node->max_msn_applied_to_node_on_disk.
+ //
+ node->set_dirty();
+
+ //
+ // we cannot blindly update node->max_msn_applied_to_node_on_disk,
+ // we must check to see if the msn is greater that the one already stored,
+ // because the message may have already been applied earlier (via
+ // toku_apply_ancestors_messages_to_node) to answer a query
+ //
+ // This is why we handle node->max_msn_applied_to_node_on_disk both here
+ // and in ft_nonleaf_put_msg, as opposed to in one location, toku_ftnode_put_msg.
+ //
+ MSN msg_msn = msg.msn();
+ if (msg_msn.msn > node->max_msn_applied_to_node_on_disk.msn) {
+ node->max_msn_applied_to_node_on_disk = msg_msn;
+ }
+
+ if (ft_msg_type_applies_once(msg.type())) {
+ unsigned int childnum = (target_childnum >= 0
+ ? target_childnum
+ : toku_ftnode_which_child(node, msg.kdbt(), cmp));
+ BASEMENTNODE bn = BLB(node, childnum);
+ if (msg.msn().msn > bn->max_msn_applied.msn) {
+ bn->max_msn_applied = msg.msn();
+ toku_ft_bn_apply_msg(
+ cmp,
+ update_fun,
+ bn,
+ msg,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ } else {
+ toku_ft_status_note_msn_discard();
+ }
+ } else if (ft_msg_type_applies_all(msg.type())) {
+ for (int childnum=0; childnum<node->n_children; childnum++) {
+ if (msg.msn().msn > BLB(node, childnum)->max_msn_applied.msn) {
+ BLB(node, childnum)->max_msn_applied = msg.msn();
+ toku_ft_bn_apply_msg(
+ cmp,
+ update_fun,
+ BLB(node, childnum),
+ msg,
+ gc_info,
+ workdone,
+ stats_to_update,
+ logical_rows_delta);
+ } else {
+ toku_ft_status_note_msn_discard();
+ }
+ }
+ } else if (!ft_msg_type_does_nothing(msg.type())) {
+ invariant(ft_msg_type_does_nothing(msg.type()));
+ }
+ VERIFY_NODE(t, node);
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/node.h b/storage/tokudb/PerconaFT/ft/node.h
new file mode 100644
index 00000000..61093f3e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/node.h
@@ -0,0 +1,608 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/bndata.h"
+#include "ft/comparator.h"
+#include "ft/ft.h"
+#include "ft/msg_buffer.h"
+
+/* Pivot keys.
+ * Child 0's keys are <= pivotkeys[0].
+ * Child 1's keys are <= pivotkeys[1].
+ * Child 1's keys are > pivotkeys[0].
+ * etc
+ */
+class ftnode_pivot_keys {
+public:
+ // effect: create an empty set of pivot keys
+ void create_empty();
+
+ // effect: create pivot keys by copying the given DBT array
+ void create_from_dbts(const DBT *keys, int n);
+
+ // effect: create pivot keys as a clone of an existing set of pivotkeys
+ void create_from_pivot_keys(const ftnode_pivot_keys &pivotkeys);
+
+ void destroy();
+
+ // effect: deserialize pivot keys previously serialized by serialize_to_wbuf()
+ void deserialize_from_rbuf(struct rbuf *rb, int n);
+
+ // returns: unowned DBT representing the i'th pivot key
+ DBT get_pivot(int i) const;
+
+ // effect: fills a DBT with the i'th pivot key
+ // returns: the given dbt
+ DBT *fill_pivot(int i, DBT *dbt) const;
+
+ // effect: insert a pivot into the i'th position, shifting others to the right
+ void insert_at(const DBT *key, int i);
+
+ // effect: append pivotkeys to the end of our own pivot keys
+ void append(const ftnode_pivot_keys &pivotkeys);
+
+ // effect: replace the pivot at the i'th position
+ void replace_at(const DBT *key, int i);
+
+ // effect: removes the i'th pivot key, shifting others to the left
+ void delete_at(int i);
+
+ // effect: split the pivot keys, removing all pivots at position greater
+ // than or equal to `i' and storing them in *other
+ // requires: *other is empty (size == 0)
+ void split_at(int i, ftnode_pivot_keys *other);
+
+ // effect: serialize pivot keys to a wbuf
+ // requires: wbuf has at least ftnode_pivot_keys::total_size() bytes available
+ void serialize_to_wbuf(struct wbuf *wb) const;
+
+ int num_pivots() const;
+
+ // return: the total size of this data structure
+ size_t total_size() const;
+
+ // return: the sum of the keys sizes of each pivot (for serialization)
+ size_t serialized_size() const;
+
+private:
+ inline size_t _align4(size_t x) const {
+ return roundup_to_multiple(4, x);
+ }
+
+ // effect: create pivot keys, in fixed key format, by copying the given key array
+ void _create_from_fixed_keys(const char *fixedkeys, size_t fixed_keylen, int n);
+
+ char *_fixed_key(int i) const {
+ return &_fixed_keys[i * _fixed_keylen_aligned];
+ }
+
+ bool _fixed_format() const {
+ return _fixed_keys != nullptr;
+ }
+
+ void sanity_check() const;
+
+ void _insert_at_dbt(const DBT *key, int i);
+ void _append_dbt(const ftnode_pivot_keys &pivotkeys);
+ void _replace_at_dbt(const DBT *key, int i);
+ void _delete_at_dbt(int i);
+ void _split_at_dbt(int i, ftnode_pivot_keys *other);
+
+ void _insert_at_fixed(const DBT *key, int i);
+ void _append_fixed(const ftnode_pivot_keys &pivotkeys);
+ void _replace_at_fixed(const DBT *key, int i);
+ void _delete_at_fixed(int i);
+ void _split_at_fixed(int i, ftnode_pivot_keys *other);
+
+ // adds/destroys keys at a certain index (in dbt format),
+ // maintaining _total_size, but not _num_pivots
+ void _add_key_dbt(const DBT *key, int i);
+ void _destroy_key_dbt(int i);
+
+ // conversions to and from packed key array format
+ void _convert_to_dbt_format();
+ void _convert_to_fixed_format();
+
+ // If every key is _fixed_keylen long, then _fixed_key is a
+ // packed array of keys..
+ char *_fixed_keys;
+ // The actual length of the fixed key
+ size_t _fixed_keylen;
+ // The aligned length that we use for fixed key storage
+ size_t _fixed_keylen_aligned;
+
+ // ..otherwise _fixed_keys is null and we store an array of dbts,
+ // each representing a key. this is simpler but less cache-efficient.
+ DBT *_dbt_keys;
+
+ int _num_pivots;
+ size_t _total_size;
+};
+
+extern int writing_rollback;
+
+extern "C" {
+extern uint force_recovery;
+}
+
+// TODO: class me up
+struct ftnode {
+ // max_msn_applied that will be written to disk
+ MSN max_msn_applied_to_node_on_disk;
+ unsigned int flags;
+ // Which block number is this node?
+ BLOCKNUM blocknum;
+ // What version of the data structure?
+ int layout_version;
+ // different (<) from layout_version if upgraded from a previous version
+ // (useful for debugging)
+ int layout_version_original;
+ // transient, not serialized to disk, (useful for debugging)
+ int layout_version_read_from_disk;
+ // build_id (svn rev number) of software that wrote this node to disk
+ uint32_t build_id;
+ // height is always >= 0. 0 for leaf, >0 for nonleaf.
+ int height;
+ int dirty_;
+ uint32_t fullhash;
+
+ void set_dirty() {
+ if(force_recovery) assert(writing_rollback);
+ dirty_ = 1;
+ }
+
+ void clear_dirty() {
+ dirty_ = 0;
+ }
+
+ bool dirty() {
+ return dirty_;
+ }
+
+ // for internal nodes, if n_children==fanout+1 then the tree needs to be
+ // rebalanced. for leaf nodes, represents number of basement nodes
+ int n_children;
+ ftnode_pivot_keys pivotkeys;
+
+ // What's the oldest referenced xid that this node knows about? The real
+ // oldest referenced xid might be younger, but this is our best estimate.
+ // We use it as a heuristic to transition provisional mvcc entries from
+ // provisional to committed (from implicity committed to really committed).
+ //
+ // A better heuristic would be the oldest live txnid, but we use this since
+ // it still works well most of the time, and its readily available on the
+ // inject code path.
+ TXNID oldest_referenced_xid_known;
+
+ // array of size n_children, consisting of ftnode partitions
+ // each one is associated with a child for internal nodes, the ith
+ // partition corresponds to the ith message buffer for leaf nodes, the ith
+ // partition corresponds to the ith basement node
+ struct ftnode_partition *bp;
+ struct ctpair *ct_pair;
+};
+typedef struct ftnode *FTNODE;
+
+// data of an available partition of a leaf ftnode
+struct ftnode_leaf_basement_node {
+ bn_data data_buffer;
+ unsigned int seqinsert; // number of sequential inserts to this leaf
+ MSN max_msn_applied; // max message sequence number applied
+ bool stale_ancestor_messages_applied;
+ // current count of rows added or removed as a result of message application
+ // to this basement node, gets reset when node is undirtied.
+ // Used to back out tree scoped LRC id node is evicted but not persisted
+ int64_t logical_rows_delta;
+ STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk
+};
+typedef struct ftnode_leaf_basement_node *BASEMENTNODE;
+
+enum pt_state { // declare this to be packed so that when used below it will only take 1 byte.
+ PT_INVALID = 0,
+ PT_ON_DISK = 1,
+ PT_COMPRESSED = 2,
+ PT_AVAIL = 3};
+
+enum ftnode_child_tag {
+ BCT_INVALID = 0,
+ BCT_NULL,
+ BCT_SUBBLOCK,
+ BCT_LEAF,
+ BCT_NONLEAF
+};
+
+typedef toku::omt<int32_t> off_omt_t;
+typedef toku::omt<int32_t, int32_t, true> marked_off_omt_t;
+
+// data of an available partition of a nonleaf ftnode
+struct ftnode_nonleaf_childinfo {
+ message_buffer msg_buffer;
+ off_omt_t broadcast_list;
+ marked_off_omt_t fresh_message_tree;
+ off_omt_t stale_message_tree;
+ uint64_t flow[2]; // current and last checkpoint
+};
+typedef struct ftnode_nonleaf_childinfo *NONLEAF_CHILDINFO;
+
+typedef struct ftnode_child_pointer {
+ union {
+ struct sub_block *subblock;
+ struct ftnode_nonleaf_childinfo *nonleaf;
+ struct ftnode_leaf_basement_node *leaf;
+ } u;
+ enum ftnode_child_tag tag;
+} FTNODE_CHILD_POINTER;
+
+struct ftnode_disk_data {
+ //
+ // stores the offset to the beginning of the partition on disk from the ftnode, and the length, needed to read a partition off of disk
+ // the value is only meaningful if the node is clean. If the node is dirty, then the value is meaningless
+ // The START is the distance from the end of the compressed node_info data, to the beginning of the compressed partition
+ // The SIZE is the size of the compressed partition.
+ // Rationale: We cannot store the size from the beginning of the node since we don't know how big the header will be.
+ // However, later when we are doing aligned writes, we won't be able to store the size from the end since we want things to align.
+ uint32_t start;
+ uint32_t size;
+};
+typedef struct ftnode_disk_data *FTNODE_DISK_DATA;
+
+// TODO: Turn these into functions instead of macros
+#define BP_START(node_dd,i) ((node_dd)[i].start)
+#define BP_SIZE(node_dd,i) ((node_dd)[i].size)
+
+// a ftnode partition, associated with a child of a node
+struct ftnode_partition {
+ // the following three variables are used for nonleaf nodes
+ // for leaf nodes, they are meaningless
+ BLOCKNUM blocknum; // blocknum of child
+
+ // How many bytes worth of work was performed by messages in each buffer.
+ uint64_t workdone;
+
+ //
+ // pointer to the partition. Depending on the state, they may be different things
+ // if state == PT_INVALID, then the node was just initialized and ptr == NULL
+ // if state == PT_ON_DISK, then ptr == NULL
+ // if state == PT_COMPRESSED, then ptr points to a struct sub_block*
+ // if state == PT_AVAIL, then ptr is:
+ // a struct ftnode_nonleaf_childinfo for internal nodes,
+ // a struct ftnode_leaf_basement_node for leaf nodes
+ //
+ struct ftnode_child_pointer ptr;
+ //
+ // at any time, the partitions may be in one of the following three states (stored in pt_state):
+ // PT_INVALID - means that the partition was just initialized
+ // PT_ON_DISK - means that the partition is not in memory and needs to be read from disk. To use, must read off disk and decompress
+ // PT_COMPRESSED - means that the partition is compressed in memory. To use, must decompress
+ // PT_AVAIL - means the partition is decompressed and in memory
+ //
+ enum pt_state state; // make this an enum to make debugging easier.
+
+ // clock count used to for pe_callback to determine if a node should be evicted or not
+ // for now, saturating the count at 1
+ uint8_t clock_count;
+};
+
+//
+// TODO: Fix all these names
+// Organize declarations
+// Fix widespread parameter ordering inconsistencies
+//
+BASEMENTNODE toku_create_empty_bn(void);
+BASEMENTNODE toku_create_empty_bn_no_buffer(void); // create a basement node with a null buffer.
+NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo);
+BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn);
+NONLEAF_CHILDINFO toku_create_empty_nl(void);
+void destroy_basement_node (BASEMENTNODE bn);
+void destroy_nonleaf_childinfo (NONLEAF_CHILDINFO nl);
+void toku_destroy_ftnode_internals(FTNODE node);
+void toku_ftnode_free (FTNODE *node);
+bool toku_ftnode_fully_in_memory(FTNODE node);
+void toku_ftnode_assert_fully_in_memory(FTNODE node);
+void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft);
+BASEMENTNODE toku_detach_bn(FTNODE node, int childnum);
+void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint);
+void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node);
+
+void toku_initialize_empty_ftnode(FTNODE node, BLOCKNUM blocknum, int height, int num_children,
+ int layout_version, unsigned int flags);
+
+int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp);
+void toku_ftnode_save_ct_pair(CACHEKEY key, void *value_data, PAIR p);
+
+//
+// TODO: put the heaviside functions into their respective 'struct .*extra;' namespaces
+//
+struct toku_msg_buffer_key_msn_heaviside_extra {
+ const toku::comparator &cmp;
+ message_buffer *msg_buffer;
+ const DBT *key;
+ MSN msn;
+ toku_msg_buffer_key_msn_heaviside_extra(const toku::comparator &c, message_buffer *mb, const DBT *k, MSN m) :
+ cmp(c), msg_buffer(mb), key(k), msn(m) {
+ }
+};
+int toku_msg_buffer_key_msn_heaviside(const int32_t &v, const struct toku_msg_buffer_key_msn_heaviside_extra &extra);
+
+struct toku_msg_buffer_key_msn_cmp_extra {
+ const toku::comparator &cmp;
+ message_buffer *msg_buffer;
+ toku_msg_buffer_key_msn_cmp_extra(const toku::comparator &c, message_buffer *mb) :
+ cmp(c), msg_buffer(mb) {
+ }
+};
+int toku_msg_buffer_key_msn_cmp(const struct toku_msg_buffer_key_msn_cmp_extra &extrap, const int &a, const int &b);
+
+struct toku_msg_leafval_heaviside_extra {
+ const toku::comparator &cmp;
+ DBT const *const key;
+ toku_msg_leafval_heaviside_extra(const toku::comparator &c, const DBT *k) :
+ cmp(c), key(k) {
+ }
+};
+int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_heaviside_extra &be);
+
+unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc);
+int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc);
+long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc);
+long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc);
+void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, uint32_t keylen, const void *data, uint32_t datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const toku::comparator &cmp);
+void toku_bnc_empty(NONLEAF_CHILDINFO bnc);
+void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known);
+bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull));
+
+bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize);
+uint32_t toku_ftnode_leaf_num_entries(FTNODE node);
+void toku_ftnode_leaf_rebalance(FTNODE node, unsigned int basementnodesize);
+
+void toku_ftnode_leaf_run_gc(FT ft, FTNODE node);
+
+enum reactivity {
+ RE_STABLE,
+ RE_FUSIBLE,
+ RE_FISSIBLE
+};
+
+enum reactivity toku_ftnode_get_reactivity(FT ft, FTNODE node);
+enum reactivity toku_ftnode_get_nonleaf_reactivity(FTNODE node, unsigned int fanout);
+enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize);
+
+inline const char* toku_ftnode_get_cachefile_fname_in_env(FTNODE node) {
+ if (node->ct_pair) {
+ CACHEFILE cf = toku_pair_get_cachefile(node->ct_pair);
+ if (cf) {
+ return toku_cachefile_fname_in_env(cf);
+ }
+ }
+ return nullptr;
+}
+
+/**
+ * Finds the next child for HOT to flush to, given that everything up to
+ * and including k has been flattened.
+ *
+ * If k falls between pivots in node, then we return the childnum where k
+ * lies.
+ *
+ * If k is equal to some pivot, then we return the next (to the right)
+ * childnum.
+ */
+int toku_ftnode_hot_next_child(
+ FTNODE node,
+ const DBT* k,
+ const toku::comparator &cmp);
+
+void toku_ftnode_put_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ FTNODE node,
+ int target_childnum,
+ const ft_msg& msg,
+ bool is_fresh,
+ txn_gc_info* gc_info,
+ size_t flow_deltas[],
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+void toku_ft_bn_apply_msg_once(
+ BASEMENTNODE bn,
+ const ft_msg& msg,
+ uint32_t idx,
+ uint32_t le_keylen,
+ LEAFENTRY le,
+ txn_gc_info* gc_info,
+ uint64_t* workdonep,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+void toku_ft_bn_apply_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ BASEMENTNODE bn,
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+void toku_ft_leaf_apply_msg(
+ const toku::comparator& cmp,
+ ft_update_func update_fun,
+ FTNODE node,
+ int target_childnum,
+ const ft_msg& msg,
+ txn_gc_info* gc_info,
+ uint64_t* workdone,
+ STAT64INFO stats_to_update,
+ int64_t* logical_rows_delta);
+
+//
+// Message management for orthopush
+//
+
+struct ancestors {
+ // This is the root node if next is NULL (since the root has no ancestors)
+ FTNODE node;
+ // Which buffer holds messages destined to the node whose ancestors this list represents.
+ int childnum;
+ struct ancestors *next;
+};
+typedef struct ancestors *ANCESTORS;
+
+void toku_ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc);
+
+void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node);
+
+// TODO: Should ft_handle just be FT?
+class pivot_bounds;
+void toku_apply_ancestors_messages_to_node(FT_HANDLE t, FTNODE node, ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ bool *msgs_applied, int child_to_read);
+
+bool toku_ft_leaf_needs_ancestors_messages(FT ft, FTNODE node, ANCESTORS ancestors,
+ const pivot_bounds &bounds,
+ MSN *const max_msn_in_path, int child_to_read);
+
+void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read);
+
+struct ft_search;
+int toku_ft_search_which_child(const toku::comparator &cmp, FTNODE node, ft_search *search);
+
+//
+// internal node inline functions
+// TODO: Turn the macros into real functions
+//
+
+static inline void set_BNULL(FTNODE node, int i) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ node->bp[i].ptr.tag = BCT_NULL;
+}
+
+static inline bool is_BNULL (FTNODE node, int i) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ return node->bp[i].ptr.tag == BCT_NULL;
+}
+
+static inline NONLEAF_CHILDINFO BNC(FTNODE node, int i) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ FTNODE_CHILD_POINTER p = node->bp[i].ptr;
+ paranoid_invariant(p.tag==BCT_NONLEAF);
+ return p.u.nonleaf;
+}
+
+static inline void set_BNC(FTNODE node, int i, NONLEAF_CHILDINFO nl) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ FTNODE_CHILD_POINTER *p = &node->bp[i].ptr;
+ p->tag = BCT_NONLEAF;
+ p->u.nonleaf = nl;
+}
+
+static inline BASEMENTNODE BLB(FTNODE node, int i) {
+ paranoid_invariant(i >= 0);
+ // The optimizer really doesn't like it when we compare
+ // i to n_children as signed integers. So we assert that
+ // n_children is in fact positive before doing a comparison
+ // on the values forcibly cast to unsigned ints.
+ paranoid_invariant(node->n_children > 0);
+ paranoid_invariant((unsigned) i < (unsigned) node->n_children);
+ FTNODE_CHILD_POINTER p = node->bp[i].ptr;
+ paranoid_invariant(p.tag==BCT_LEAF);
+ return p.u.leaf;
+}
+
+static inline void set_BLB(FTNODE node, int i, BASEMENTNODE bn) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ FTNODE_CHILD_POINTER *p = &node->bp[i].ptr;
+ p->tag = BCT_LEAF;
+ p->u.leaf = bn;
+}
+
+static inline struct sub_block *BSB(FTNODE node, int i) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ FTNODE_CHILD_POINTER p = node->bp[i].ptr;
+ paranoid_invariant(p.tag==BCT_SUBBLOCK);
+ return p.u.subblock;
+}
+
+static inline void set_BSB(FTNODE node, int i, struct sub_block *sb) {
+ paranoid_invariant(i >= 0);
+ paranoid_invariant(i < node->n_children);
+ FTNODE_CHILD_POINTER *p = &node->bp[i].ptr;
+ p->tag = BCT_SUBBLOCK;
+ p->u.subblock = sb;
+}
+
+// ftnode partition macros
+// BP stands for ftnode_partition
+#define BP_BLOCKNUM(node,i) ((node)->bp[i].blocknum)
+#define BP_STATE(node,i) ((node)->bp[i].state)
+#define BP_WORKDONE(node, i)((node)->bp[i].workdone)
+
+//
+// macros for managing a node's clock
+// Should be managed by ft-ops.c, NOT by serialize/deserialize
+//
+
+//
+// BP_TOUCH_CLOCK uses a compare and swap because multiple threads
+// that have a read lock on an internal node may try to touch the clock
+// simultaneously
+//
+#define BP_TOUCH_CLOCK(node, i) ((node)->bp[i].clock_count = 1)
+#define BP_SWEEP_CLOCK(node, i) ((node)->bp[i].clock_count = 0)
+#define BP_SHOULD_EVICT(node, i) ((node)->bp[i].clock_count == 0)
+// not crazy about having these two here, one is for the case where we create new
+// nodes, such as in splits and creating new roots, and the other is for when
+// we are deserializing a node and not all bp's are touched
+#define BP_INIT_TOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 1)
+#define BP_INIT_UNTOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 0)
+
+// ftnode leaf basementnode macros,
+#define BLB_MAX_MSN_APPLIED(node,i) (BLB(node,i)->max_msn_applied)
+#define BLB_MAX_DSN_APPLIED(node,i) (BLB(node,i)->max_dsn_applied)
+#define BLB_DATA(node,i) (&(BLB(node,i)->data_buffer))
+#define BLB_NBYTESINDATA(node,i) (BLB_DATA(node,i)->get_disk_size())
+#define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert)
+#define BLB_LRD(node, i) (BLB(node,i)->logical_rows_delta)
diff --git a/storage/tokudb/PerconaFT/ft/pivotkeys.cc b/storage/tokudb/PerconaFT/ft/pivotkeys.cc
new file mode 100644
index 00000000..b941ac62
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/pivotkeys.cc
@@ -0,0 +1,438 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <string>
+
+#include "portability/memory.h"
+
+#include "ft/node.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/serialize/wbuf.h"
+
+void ftnode_pivot_keys::create_empty() {
+ _num_pivots = 0;
+ _total_size = 0;
+ _fixed_keys = nullptr;
+ _fixed_keylen = 0;
+ _fixed_keylen_aligned = 0;
+ _dbt_keys = nullptr;
+}
+
+void ftnode_pivot_keys::create_from_dbts(const DBT *keys, int n) {
+ create_empty();
+ _num_pivots = n;
+
+ // see if every key has the same length
+ bool keys_same_size = true;
+ for (int i = 1; i < _num_pivots; i++) {
+ if (keys[i].size != keys[i - 1].size) {
+ keys_same_size = false;
+ break;
+ }
+ }
+
+ if (keys_same_size && _num_pivots > 0) {
+ // if so, store pivots in a tightly packed array of fixed length keys
+ _fixed_keylen = keys[0].size;
+ _fixed_keylen_aligned = _align4(_fixed_keylen);
+ _total_size = _fixed_keylen_aligned * _num_pivots;
+ XMALLOC_N_ALIGNED(64, _total_size, _fixed_keys);
+ for (int i = 0; i < _num_pivots; i++) {
+ invariant(keys[i].size == _fixed_keylen);
+ memcpy(_fixed_key(i), keys[i].data, _fixed_keylen);
+ }
+ } else {
+ // otherwise we'll just store the pivots in an array of dbts
+ XMALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys);
+ for (int i = 0; i < _num_pivots; i++) {
+ size_t size = keys[i].size;
+ toku_memdup_dbt(&_dbt_keys[i], keys[i].data, size);
+ _total_size += size;
+ }
+ }
+
+ sanity_check();
+}
+
+void ftnode_pivot_keys::_create_from_fixed_keys(const char *fixedkeys, size_t fixed_keylen, int n) {
+ create_empty();
+ _num_pivots = n;
+ _fixed_keylen = fixed_keylen;
+ _fixed_keylen_aligned = _align4(fixed_keylen);
+ _total_size = _fixed_keylen_aligned * _num_pivots;
+ XMEMDUP_N(_fixed_keys, fixedkeys, _total_size);
+}
+
+// effect: create pivot keys as a clone of an existing set of pivotkeys
+void ftnode_pivot_keys::create_from_pivot_keys(const ftnode_pivot_keys &pivotkeys) {
+ if (pivotkeys._fixed_format()) {
+ _create_from_fixed_keys(pivotkeys._fixed_keys, pivotkeys._fixed_keylen, pivotkeys._num_pivots);
+ } else {
+ create_from_dbts(pivotkeys._dbt_keys, pivotkeys._num_pivots);
+ }
+
+ sanity_check();
+}
+
+void ftnode_pivot_keys::destroy() {
+ if (_dbt_keys != nullptr) {
+ for (int i = 0; i < _num_pivots; i++) {
+ toku_destroy_dbt(&_dbt_keys[i]);
+ }
+ toku_free(_dbt_keys);
+ _dbt_keys = nullptr;
+ }
+ if (_fixed_keys != nullptr) {
+ toku_free(_fixed_keys);
+ _fixed_keys = nullptr;
+ }
+ _fixed_keylen = 0;
+ _fixed_keylen_aligned = 0;
+ _num_pivots = 0;
+ _total_size = 0;
+}
+
+void ftnode_pivot_keys::_convert_to_fixed_format() {
+ invariant(!_fixed_format());
+
+ // convert to a tightly packed array of fixed length keys
+ _fixed_keylen = _dbt_keys[0].size;
+ _fixed_keylen_aligned = _align4(_fixed_keylen);
+ _total_size = _fixed_keylen_aligned * _num_pivots;
+ XMALLOC_N_ALIGNED(64, _total_size, _fixed_keys);
+ for (int i = 0; i < _num_pivots; i++) {
+ invariant(_dbt_keys[i].size == _fixed_keylen);
+ memcpy(_fixed_key(i), _dbt_keys[i].data, _fixed_keylen);
+ }
+
+ // destroy the dbt array format
+ for (int i = 0; i < _num_pivots; i++) {
+ toku_destroy_dbt(&_dbt_keys[i]);
+ }
+ toku_free(_dbt_keys);
+ _dbt_keys = nullptr;
+
+ invariant(_fixed_format());
+ sanity_check();
+}
+
+void ftnode_pivot_keys::_convert_to_dbt_format() {
+ invariant(_fixed_format());
+
+ // convert to an aray of dbts
+ REALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys);
+ for (int i = 0; i < _num_pivots; i++) {
+ toku_memdup_dbt(&_dbt_keys[i], _fixed_key(i), _fixed_keylen);
+ }
+ // pivots sizes are not aligned up dbt format
+ _total_size = _num_pivots * _fixed_keylen;
+
+ // destroy the fixed key format
+ toku_free(_fixed_keys);
+ _fixed_keys = nullptr;
+ _fixed_keylen = 0;
+ _fixed_keylen_aligned = 0;
+
+ invariant(!_fixed_format());
+ sanity_check();
+}
+
+void ftnode_pivot_keys::deserialize_from_rbuf(struct rbuf *rb, int n) {
+ _num_pivots = n;
+ _total_size = 0;
+ _fixed_keys = nullptr;
+ _fixed_keylen = 0;
+ _dbt_keys = nullptr;
+
+ XMALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys);
+ bool keys_same_size = true;
+ for (int i = 0; i < _num_pivots; i++) {
+ const void *pivotkeyptr;
+ uint32_t size;
+ rbuf_bytes(rb, &pivotkeyptr, &size);
+ toku_memdup_dbt(&_dbt_keys[i], pivotkeyptr, size);
+ _total_size += size;
+ if (i > 0 && keys_same_size && _dbt_keys[i].size != _dbt_keys[i - 1].size) {
+ // not all keys are the same size, we'll stick to the dbt array format
+ keys_same_size = false;
+ }
+ }
+
+ if (keys_same_size && _num_pivots > 0) {
+ _convert_to_fixed_format();
+ }
+
+ sanity_check();
+}
+
+DBT ftnode_pivot_keys::get_pivot(int i) const {
+ paranoid_invariant(i < _num_pivots);
+ if (_fixed_format()) {
+ paranoid_invariant(i * _fixed_keylen_aligned < _total_size);
+ DBT dbt;
+ toku_fill_dbt(&dbt, _fixed_key(i), _fixed_keylen);
+ return dbt;
+ } else {
+ return _dbt_keys[i];
+ }
+}
+
+DBT *ftnode_pivot_keys::fill_pivot(int i, DBT *dbt) const {
+ paranoid_invariant(i < _num_pivots);
+ if (_fixed_format()) {
+ toku_fill_dbt(dbt, _fixed_key(i), _fixed_keylen);
+ } else {
+ toku_copyref_dbt(dbt, _dbt_keys[i]);
+ }
+ return dbt;
+}
+
+void ftnode_pivot_keys::_add_key_dbt(const DBT *key, int i) {
+ toku_clone_dbt(&_dbt_keys[i], *key);
+ _total_size += _dbt_keys[i].size;
+}
+
+void ftnode_pivot_keys::_destroy_key_dbt(int i) {
+ invariant(_total_size >= _dbt_keys[i].size);
+ _total_size -= _dbt_keys[i].size;
+ toku_destroy_dbt(&_dbt_keys[i]);
+}
+
+void ftnode_pivot_keys::_insert_at_dbt(const DBT *key, int i) {
+ // make space for a new pivot, slide existing keys to the right
+ REALLOC_N_ALIGNED(64, _num_pivots + 1, _dbt_keys);
+ memmove(&_dbt_keys[i + 1], &_dbt_keys[i], (_num_pivots - i) * sizeof(DBT));
+ _add_key_dbt(key, i);
+}
+
+void ftnode_pivot_keys::_insert_at_fixed(const DBT *key, int i) {
+ REALLOC_N_ALIGNED(64, (_num_pivots + 1) * _fixed_keylen_aligned, _fixed_keys);
+ // TODO: This is not going to be valgrind-safe, because we do not initialize the space
+ // between _fixed_keylen and _fixed_keylen_aligned (but we probably should)
+ memmove(_fixed_key(i + 1), _fixed_key(i), (_num_pivots - i) * _fixed_keylen_aligned);
+ memcpy(_fixed_key(i), key->data, _fixed_keylen);
+ _total_size += _fixed_keylen_aligned;
+}
+
+void ftnode_pivot_keys::insert_at(const DBT *key, int i) {
+ invariant(i <= _num_pivots); // it's ok to insert at the end, so we check <= n
+
+ // if the new key doesn't have the same size, we can't be in fixed format
+ if (_fixed_format() && key->size != _fixed_keylen) {
+ _convert_to_dbt_format();
+ }
+
+ if (_fixed_format()) {
+ _insert_at_fixed(key, i);
+ } else {
+ _insert_at_dbt(key, i);
+ }
+ _num_pivots++;
+
+ invariant(total_size() > 0);
+}
+
+void ftnode_pivot_keys::_append_dbt(const ftnode_pivot_keys &pivotkeys) {
+ REALLOC_N_ALIGNED(64, _num_pivots + pivotkeys._num_pivots, _dbt_keys);
+ bool other_fixed = pivotkeys._fixed_format();
+ for (int i = 0; i < pivotkeys._num_pivots; i++) {
+ size_t size = other_fixed ? pivotkeys._fixed_keylen :
+ pivotkeys._dbt_keys[i].size;
+ toku_memdup_dbt(&_dbt_keys[_num_pivots + i],
+ other_fixed ? pivotkeys._fixed_key(i) :
+ pivotkeys._dbt_keys[i].data,
+ size);
+ _total_size += size;
+ }
+}
+
+void ftnode_pivot_keys::_append_fixed(const ftnode_pivot_keys &pivotkeys) {
+ if (pivotkeys._fixed_format() && pivotkeys._fixed_keylen == _fixed_keylen) {
+ // other pivotkeys have the same fixed keylen
+ REALLOC_N_ALIGNED(64, (_num_pivots + pivotkeys._num_pivots) * _fixed_keylen_aligned, _fixed_keys);
+ memcpy(_fixed_key(_num_pivots), pivotkeys._fixed_keys, pivotkeys._total_size);
+ _total_size += pivotkeys._total_size;
+ } else {
+ // must convert to dbt format, other pivotkeys have different length'd keys
+ _convert_to_dbt_format();
+ _append_dbt(pivotkeys);
+ }
+}
+
+void ftnode_pivot_keys::append(const ftnode_pivot_keys &pivotkeys) {
+ if (_fixed_format()) {
+ _append_fixed(pivotkeys);
+ } else {
+ _append_dbt(pivotkeys);
+ }
+ _num_pivots += pivotkeys._num_pivots;
+
+ sanity_check();
+}
+
+void ftnode_pivot_keys::_replace_at_dbt(const DBT *key, int i) {
+ _destroy_key_dbt(i);
+ _add_key_dbt(key, i);
+}
+
+void ftnode_pivot_keys::_replace_at_fixed(const DBT *key, int i) {
+ if (key->size == _fixed_keylen) {
+ memcpy(_fixed_key(i), key->data, _fixed_keylen);
+ } else {
+ // must convert to dbt format, replacement key has different length
+ _convert_to_dbt_format();
+ _replace_at_dbt(key, i);
+ }
+}
+
+void ftnode_pivot_keys::replace_at(const DBT *key, int i) {
+ if (i < _num_pivots) {
+ if (_fixed_format()) {
+ _replace_at_fixed(key, i);
+ } else {
+ _replace_at_dbt(key, i);
+ }
+ } else {
+ invariant(i == _num_pivots); // appending to the end is ok
+ insert_at(key, i);
+ }
+ invariant(total_size() > 0);
+}
+
+void ftnode_pivot_keys::_delete_at_fixed(int i) {
+ memmove(_fixed_key(i), _fixed_key(i + 1), (_num_pivots - 1 - i) * _fixed_keylen_aligned);
+ _total_size -= _fixed_keylen_aligned;
+}
+
+void ftnode_pivot_keys::_delete_at_dbt(int i) {
+ // slide over existing keys, then shrink down to size
+ _destroy_key_dbt(i);
+ memmove(&_dbt_keys[i], &_dbt_keys[i + 1], (_num_pivots - 1 - i) * sizeof(DBT));
+ REALLOC_N_ALIGNED(64, _num_pivots - 1, _dbt_keys);
+}
+
+void ftnode_pivot_keys::delete_at(int i) {
+ invariant(i < _num_pivots);
+
+ if (_fixed_format()) {
+ _delete_at_fixed(i);
+ } else {
+ _delete_at_dbt(i);
+ }
+
+ _num_pivots--;
+}
+
+void ftnode_pivot_keys::_split_at_fixed(int i, ftnode_pivot_keys *other) {
+ // recreate the other set of pivots from index >= i
+ other->_create_from_fixed_keys(_fixed_key(i), _fixed_keylen, _num_pivots - i);
+
+ // shrink down to size
+ _total_size = i * _fixed_keylen_aligned;
+ REALLOC_N_ALIGNED(64, _total_size, _fixed_keys);
+}
+
+void ftnode_pivot_keys::_split_at_dbt(int i, ftnode_pivot_keys *other) {
+ // recreate the other set of pivots from index >= i
+ other->create_from_dbts(&_dbt_keys[i], _num_pivots - i);
+
+ // destroy everything greater, shrink down to size
+ for (int k = i; k < _num_pivots; k++) {
+ _destroy_key_dbt(k);
+ }
+ REALLOC_N_ALIGNED(64, i, _dbt_keys);
+}
+
+void ftnode_pivot_keys::split_at(int i, ftnode_pivot_keys *other) {
+ if (i < _num_pivots) {
+ if (_fixed_format()) {
+ _split_at_fixed(i, other);
+ } else {
+ _split_at_dbt(i, other);
+ }
+ _num_pivots = i;
+ }
+
+ sanity_check();
+}
+
+void ftnode_pivot_keys::serialize_to_wbuf(struct wbuf *wb) const {
+ bool fixed = _fixed_format();
+ size_t written = 0;
+ for (int i = 0; i < _num_pivots; i++) {
+ size_t size = fixed ? _fixed_keylen : _dbt_keys[i].size;
+ invariant(size);
+ wbuf_nocrc_bytes(wb, fixed ? _fixed_key(i) : _dbt_keys[i].data, size);
+ written += size;
+ }
+ invariant(written == serialized_size());
+}
+
+int ftnode_pivot_keys::num_pivots() const {
+ // if we have fixed size keys, the number of pivots should be consistent
+ paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen_aligned * _num_pivots));
+ return _num_pivots;
+}
+
+size_t ftnode_pivot_keys::total_size() const {
+ // if we have fixed size keys, the total size should be consistent
+ paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen_aligned * _num_pivots));
+ return _total_size;
+}
+
+size_t ftnode_pivot_keys::serialized_size() const {
+ // we only return the size that will be used when serialized, so we calculate based
+ // on the fixed keylen and not the aligned keylen.
+ return _fixed_format() ? _num_pivots * _fixed_keylen : _total_size;
+}
+
+void ftnode_pivot_keys::sanity_check() const {
+ if (_fixed_format()) {
+ invariant(_dbt_keys == nullptr);
+ invariant(_fixed_keylen_aligned == _align4(_fixed_keylen));
+ invariant(_num_pivots * _fixed_keylen <= _total_size);
+ invariant(_num_pivots * _fixed_keylen_aligned == _total_size);
+ } else {
+ invariant(_num_pivots == 0 || _dbt_keys != nullptr);
+ size_t size = 0;
+ for (int i = 0; i < _num_pivots; i++) {
+ size += _dbt_keys[i].size;
+ }
+ invariant(size == _total_size);
+ }
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc b/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc
new file mode 100644
index 00000000..e64139f0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/block_allocator.cc
@@ -0,0 +1,260 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <algorithm>
+
+#include <string.h>
+
+#include "toku_portability.h"
+#include "portability/memory.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_stdint.h"
+#include "portability/toku_stdlib.h"
+
+#include "ft/serialize/block_allocator.h"
+#include "ft/serialize/rbtree_mhs.h"
+
+#if defined(TOKU_DEBUG_PARANOID) && TOKU_DEBUG_PARANOID
+#define VALIDATE() Validate()
+#else
+#define VALIDATE()
+#endif
+
+void BlockAllocator::CreateInternal(uint64_t reserve_at_beginning,
+ uint64_t alignment) {
+ // the alignment must be at least 512 and aligned with 512 to work with
+ // direct I/O
+ invariant(alignment >= 512 && (alignment % 512) == 0);
+
+ _reserve_at_beginning = reserve_at_beginning;
+ _alignment = alignment;
+ _n_blocks = 0;
+ _n_bytes_in_use = reserve_at_beginning;
+ _tree = new MhsRbTree::Tree(alignment);
+}
+
+void BlockAllocator::Create(uint64_t reserve_at_beginning, uint64_t alignment) {
+ CreateInternal(reserve_at_beginning, alignment);
+ _tree->Insert({reserve_at_beginning, MAX_BYTE});
+ VALIDATE();
+}
+
+void BlockAllocator::Destroy() {
+ delete _tree;
+}
+
+void BlockAllocator::CreateFromBlockPairs(uint64_t reserve_at_beginning,
+ uint64_t alignment,
+ struct BlockPair *translation_pairs,
+ uint64_t n_blocks) {
+ CreateInternal(reserve_at_beginning, alignment);
+ _n_blocks = n_blocks;
+
+ struct BlockPair *XMALLOC_N(n_blocks, pairs);
+ memcpy(pairs, translation_pairs, n_blocks * sizeof(struct BlockPair));
+ std::sort(pairs, pairs + n_blocks);
+
+ if (pairs[0]._offset > reserve_at_beginning) {
+ _tree->Insert(
+ {reserve_at_beginning, pairs[0]._offset - reserve_at_beginning});
+ }
+ for (uint64_t i = 0; i < _n_blocks; i++) {
+ // Allocator does not support size 0 blocks. See
+ // block_allocator_free_block.
+ invariant(pairs[i]._size > 0);
+ invariant(pairs[i]._offset >= _reserve_at_beginning);
+ invariant(pairs[i]._offset % _alignment == 0);
+
+ _n_bytes_in_use += pairs[i]._size;
+
+ MhsRbTree::OUUInt64 free_size(MAX_BYTE);
+ MhsRbTree::OUUInt64 free_offset(pairs[i]._offset + pairs[i]._size);
+ if (i < n_blocks - 1) {
+ MhsRbTree::OUUInt64 next_offset(pairs[i + 1]._offset);
+ invariant(next_offset >= free_offset);
+ free_size = next_offset - free_offset;
+ if (free_size == 0)
+ continue;
+ }
+ _tree->Insert({free_offset, free_size});
+ }
+ toku_free(pairs);
+ VALIDATE();
+}
+
+// Effect: align a value by rounding up.
+static inline uint64_t Align(uint64_t value, uint64_t ba_alignment) {
+ return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment;
+}
+
+// Effect: Allocate a block. The resulting block must be aligned on the
+// ba->alignment (which to make direct_io happy must be a positive multiple of
+// 512).
+void BlockAllocator::AllocBlock(uint64_t size,
+ uint64_t *offset) {
+ // Allocator does not support size 0 blocks. See block_allocator_free_block.
+ invariant(size > 0);
+
+ _n_bytes_in_use += size;
+ *offset = _tree->Remove(size);
+
+ _n_blocks++;
+ VALIDATE();
+}
+
+// To support 0-sized blocks, we need to include size as an input to this
+// function.
+// All 0-sized blocks at the same offset can be considered identical, but
+// a 0-sized block can share offset with a non-zero sized block.
+// The non-zero sized block is not exchangable with a zero sized block (or vice
+// versa), so inserting 0-sized blocks can cause corruption here.
+void BlockAllocator::FreeBlock(uint64_t offset, uint64_t size) {
+ VALIDATE();
+ _n_bytes_in_use -= size;
+ _tree->Insert({offset, size});
+ _n_blocks--;
+ VALIDATE();
+}
+
+uint64_t BlockAllocator::AllocatedLimit() const {
+ MhsRbTree::Node *max_node = _tree->MaxNode();
+ return rbn_offset(max_node).ToInt();
+}
+
+// Effect: Consider the blocks in sorted order. The reserved block at the
+// beginning is number 0. The next one is number 1 and so forth.
+// Return the offset and size of the block with that number.
+// Return 0 if there is a block that big, return nonzero if b is too big.
+int BlockAllocator::NthBlockInLayoutOrder(uint64_t b,
+ uint64_t *offset,
+ uint64_t *size) {
+ MhsRbTree::Node *x, *y;
+ if (b == 0) {
+ *offset = 0;
+ *size = _reserve_at_beginning;
+ return 0;
+ } else if (b > _n_blocks) {
+ return -1;
+ } else {
+ x = _tree->MinNode();
+ for (uint64_t i = 1; i <= b; i++) {
+ y = x;
+ x = _tree->Successor(x);
+ }
+ *size = (rbn_offset(x) - (rbn_offset(y) + rbn_size(y))).ToInt();
+ *offset = (rbn_offset(y) + rbn_size(y)).ToInt();
+ return 0;
+ }
+}
+
+struct VisUnusedExtra {
+ TOKU_DB_FRAGMENTATION _report;
+ uint64_t _align;
+};
+
+static void VisUnusedCollector(void *extra,
+ MhsRbTree::Node *node,
+ uint64_t UU(depth)) {
+ struct VisUnusedExtra *v_e = (struct VisUnusedExtra *)extra;
+ TOKU_DB_FRAGMENTATION report = v_e->_report;
+ uint64_t alignm = v_e->_align;
+
+ MhsRbTree::OUUInt64 offset = rbn_offset(node);
+ MhsRbTree::OUUInt64 size = rbn_size(node);
+ MhsRbTree::OUUInt64 answer_offset(Align(offset.ToInt(), alignm));
+ uint64_t free_space = (offset + size - answer_offset).ToInt();
+ if (free_space > 0) {
+ report->unused_bytes += free_space;
+ report->unused_blocks++;
+ if (free_space > report->largest_unused_block) {
+ report->largest_unused_block = free_space;
+ }
+ }
+}
+// Requires: report->file_size_bytes is filled in
+// Requires: report->data_bytes is filled in
+// Requires: report->checkpoint_bytes_additional is filled in
+void BlockAllocator::UnusedStatistics(TOKU_DB_FRAGMENTATION report) {
+ invariant(_n_bytes_in_use ==
+ report->data_bytes + report->checkpoint_bytes_additional);
+
+ report->unused_bytes = 0;
+ report->unused_blocks = 0;
+ report->largest_unused_block = 0;
+ struct VisUnusedExtra extra = {report, _alignment};
+ _tree->InOrderVisitor(VisUnusedCollector, &extra);
+}
+
+void BlockAllocator::Statistics(TOKU_DB_FRAGMENTATION report) {
+ report->data_bytes = _n_bytes_in_use;
+ report->data_blocks = _n_blocks;
+ report->file_size_bytes = 0;
+ report->checkpoint_bytes_additional = 0;
+ UnusedStatistics(report);
+}
+
+struct ValidateExtra {
+ uint64_t _bytes;
+ MhsRbTree::Node *_pre_node;
+};
+static void VisUsedBlocksInOrder(void *extra,
+ MhsRbTree::Node *cur_node,
+ uint64_t UU(depth)) {
+ struct ValidateExtra *v_e = (struct ValidateExtra *)extra;
+ MhsRbTree::Node *pre_node = v_e->_pre_node;
+ // verify no overlaps
+ if (pre_node) {
+ invariant(rbn_size(pre_node) > 0);
+ invariant(rbn_offset(cur_node) >
+ rbn_offset(pre_node) + rbn_size(pre_node));
+ MhsRbTree::OUUInt64 used_space =
+ rbn_offset(cur_node) - (rbn_offset(pre_node) + rbn_size(pre_node));
+ v_e->_bytes += used_space.ToInt();
+ } else {
+ v_e->_bytes += rbn_offset(cur_node).ToInt();
+ }
+ v_e->_pre_node = cur_node;
+}
+
+void BlockAllocator::Validate() const {
+ _tree->ValidateBalance();
+ _tree->ValidateMhs();
+ struct ValidateExtra extra = {0, nullptr};
+ _tree->InOrderVisitor(VisUsedBlocksInOrder, &extra);
+ invariant(extra._bytes == _n_bytes_in_use);
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/block_allocator.h b/storage/tokudb/PerconaFT/ft/serialize/block_allocator.h
new file mode 100644
index 00000000..648ea9a9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/block_allocator.h
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_pthread.h"
+#include "portability/toku_stdint.h"
+#include "portability/toku_stdlib.h"
+#include "ft/serialize/rbtree_mhs.h"
+
+// Block allocator.
+//
+// A block allocator manages the allocation of variable-sized blocks.
+// The translation of block numbers to addresses is handled elsewhere.
+// The allocation of block numbers is handled elsewhere.
+//
+// When creating a block allocator we also specify a certain-sized
+// block at the beginning that is preallocated (and cannot be allocated or
+// freed)
+//
+// We can allocate blocks of a particular size at a particular location.
+// We can free blocks.
+// We can determine the size of a block.
+#define MAX_BYTE 0xffffffffffffffff
+class BlockAllocator {
+ public:
+ static const size_t BLOCK_ALLOCATOR_ALIGNMENT = 4096;
+
+ // How much must be reserved at the beginning for the block?
+ // The actual header is 8+4+4+8+8_4+8+ the length of the db names + 1
+ // pointer for each root.
+ // So 4096 should be enough.
+ static const size_t BLOCK_ALLOCATOR_HEADER_RESERVE = 4096;
+
+ static_assert(BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT ==
+ 0,
+ "block allocator header must have proper alignment");
+
+ static const size_t BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE =
+ BLOCK_ALLOCATOR_HEADER_RESERVE * 2;
+
+ struct BlockPair {
+ uint64_t _offset;
+ uint64_t _size;
+ BlockPair(uint64_t o, uint64_t s) : _offset(o), _size(s) {}
+ int operator<(const struct BlockPair &rhs) const {
+ return _offset < rhs._offset;
+ }
+ int operator<(const uint64_t &o) const { return _offset < o; }
+ };
+
+ // Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING
+ // bytes are not put into a block.
+ // The default allocation strategy is first fit
+ // (BA_STRATEGY_FIRST_FIT)
+ // All blocks be start on a multiple of ALIGNMENT.
+ // Aborts if we run out of memory.
+ // Parameters
+ // reserve_at_beginning (IN) Size of reserved block at beginning.
+ // This size does not have to be aligned.
+ // alignment (IN) Block alignment.
+ void Create(uint64_t reserve_at_beginning, uint64_t alignment);
+
+ // Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING
+ // bytes are not put into a block.
+ // The allocator is initialized to contain `n_blocks' of BlockPairs,
+ // taken from `pairs'
+ // All blocks be start on a multiple of ALIGNMENT.
+ // Aborts if we run out of memory.
+ // Parameters
+ // pairs, unowned array of pairs to copy
+ // n_blocks, Size of pairs array
+ // reserve_at_beginning (IN) Size of reserved block at beginning.
+ // This size does not have to be aligned.
+ // alignment (IN) Block alignment.
+ void CreateFromBlockPairs(uint64_t reserve_at_beginning,
+ uint64_t alignment,
+ struct BlockPair *pairs,
+ uint64_t n_blocks);
+
+ // Effect: Destroy this block allocator
+ void Destroy();
+
+ // Effect: Allocate a block of the specified size at an address chosen by
+ // the allocator.
+ // Aborts if anything goes wrong.
+ // The block address will be a multiple of the alignment.
+ // Parameters:
+ // size (IN): The size of the block. (The size does not have to be
+ // aligned.)
+ // offset (OUT): The location of the block.
+ // block soon (perhaps in the next checkpoint)
+ // Heat values are lexiographically ordered (like integers),
+ // but their specific values are arbitrary
+ void AllocBlock(uint64_t size, uint64_t *offset);
+
+ // Effect: Free the block at offset.
+ // Requires: There must be a block currently allocated at that offset.
+ // Parameters:
+ // offset (IN): The offset of the block.
+ void FreeBlock(uint64_t offset, uint64_t size);
+
+ // Effect: Check to see if the block allocator is OK. This may take a long
+ // time.
+ // Usage Hints: Probably only use this for unit tests.
+ // TODO: Private?
+ void Validate() const;
+
+ // Effect: Return the unallocated block address of "infinite" size.
+ // That is, return the smallest address that is above all the allocated
+ // blocks.
+ uint64_t AllocatedLimit() const;
+
+ // Effect: Consider the blocks in sorted order. The reserved block at the
+ // beginning is number 0. The next one is number 1 and so forth.
+ // Return the offset and size of the block with that number.
+ // Return 0 if there is a block that big, return nonzero if b is too big.
+ // Rationale: This is probably useful only for tests.
+ int NthBlockInLayoutOrder(uint64_t b, uint64_t *offset, uint64_t *size);
+
+ // Effect: Fill in report to indicate how the file is used.
+ // Requires:
+ // report->file_size_bytes is filled in
+ // report->data_bytes is filled in
+ // report->checkpoint_bytes_additional is filled in
+ void UnusedStatistics(TOKU_DB_FRAGMENTATION report);
+
+ // Effect: Fill in report->data_bytes with the number of bytes in use
+ // Fill in report->data_blocks with the number of BlockPairs in use
+ // Fill in unused statistics using this->get_unused_statistics()
+ // Requires:
+ // report->file_size is ignored on return
+ // report->checkpoint_bytes_additional is ignored on return
+ void Statistics(TOKU_DB_FRAGMENTATION report);
+
+ virtual ~BlockAllocator(){};
+
+ private:
+ void CreateInternal(uint64_t reserve_at_beginning, uint64_t alignment);
+
+ // How much to reserve at the beginning
+ uint64_t _reserve_at_beginning;
+ // Block alignment
+ uint64_t _alignment;
+ // How many blocks
+ uint64_t _n_blocks;
+ uint64_t _n_bytes_in_use;
+
+ // These blocks are sorted by address.
+ MhsRbTree::Tree *_tree;
+};
diff --git a/storage/tokudb/PerconaFT/ft/serialize/block_table.cc b/storage/tokudb/PerconaFT/ft/serialize/block_table.cc
new file mode 100644
index 00000000..e3606c11
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/block_table.cc
@@ -0,0 +1,1157 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include "portability/memory.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_portability.h"
+#include "portability/toku_pthread.h"
+
+// ugly but pragmatic, need access to dirty bits while holding translation lock
+// TODO: Refactor this (possibly with FT-301)
+#include "ft/ft-internal.h"
+
+// TODO: reorganize this dependency (FT-303)
+#include "ft/ft-ops.h" // for toku_maybe_truncate_file
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/serialize/wbuf.h"
+#include "ft/serialize/block_allocator.h"
+#include "util/nb_mutex.h"
+#include "util/scoped_malloc.h"
+
+
+toku_instr_key *block_table_mutex_key;
+toku_instr_key *safe_file_size_lock_mutex_key;
+toku_instr_key *safe_file_size_lock_rwlock_key;
+
+// indicates the end of a freelist
+static const BLOCKNUM freelist_null = {-1};
+
+// value of block_translation_pair.size if blocknum is unused
+static const DISKOFF size_is_free = (DISKOFF)-1;
+
+// value of block_translation_pair.u.diskoff if blocknum is used but does not
+// yet have a diskblock
+static const DISKOFF diskoff_unused = (DISKOFF)-2;
+
+void block_table::_mutex_lock() { toku_mutex_lock(&_mutex); }
+
+void block_table::_mutex_unlock() { toku_mutex_unlock(&_mutex); }
+
+// TODO: Move lock to FT
+void toku_ft_lock(FT ft) {
+ block_table *bt = &ft->blocktable;
+ bt->_mutex_lock();
+}
+
+// TODO: Move lock to FT
+void toku_ft_unlock(FT ft) {
+ block_table *bt = &ft->blocktable;
+ toku_mutex_assert_locked(&bt->_mutex);
+ bt->_mutex_unlock();
+}
+
+// There are two headers: the reserve must fit them both and be suitably
+// aligned.
+static_assert(BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE %
+ BlockAllocator::BLOCK_ALLOCATOR_ALIGNMENT ==
+ 0,
+ "Block allocator's header reserve must be suitibly aligned");
+static_assert(
+ BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE * 2 ==
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE,
+ "Block allocator's total header reserve must exactly fit two headers");
+
+// does NOT initialize the block allocator: the caller is responsible
+void block_table::_create_internal() {
+ memset(&_current, 0, sizeof(struct translation));
+ memset(&_inprogress, 0, sizeof(struct translation));
+ memset(&_checkpointed, 0, sizeof(struct translation));
+ memset(&_mutex, 0, sizeof(_mutex));
+ _bt_block_allocator = new BlockAllocator();
+ toku_mutex_init(*block_table_mutex_key, &_mutex, nullptr);
+ nb_mutex_init(*safe_file_size_lock_mutex_key,
+ *safe_file_size_lock_rwlock_key,
+ &_safe_file_size_lock);
+}
+
+// Fill in the checkpointed translation from buffer, and copy checkpointed to
+// current.
+// The one read from disk is the last known checkpointed one, so we are keeping
+// it in
+// place and then setting current (which is never stored on disk) for current
+// use.
+// The translation_buffer has translation only, we create the rest of the
+// block_table.
+int block_table::create_from_buffer(
+ int fd,
+ DISKOFF location_on_disk, // Location of translation_buffer
+ DISKOFF size_on_disk,
+ unsigned char *translation_buffer) {
+ // Does not initialize the block allocator
+ _create_internal();
+
+ // Deserialize the translation and copy it to current
+ int r = _translation_deserialize_from_buffer(
+ &_checkpointed, location_on_disk, size_on_disk, translation_buffer);
+ if (r != 0) {
+ return r;
+ }
+ _copy_translation(&_current, &_checkpointed, TRANSLATION_CURRENT);
+
+ // Determine the file size
+ int64_t file_size = 0;
+ r = toku_os_get_file_size(fd, &file_size);
+ lazy_assert_zero(r);
+ invariant(file_size >= 0);
+ _safe_file_size = file_size;
+
+ // Gather the non-empty translations and use them to create the block
+ // allocator
+ toku::scoped_malloc pairs_buf(_checkpointed.smallest_never_used_blocknum.b *
+ sizeof(struct BlockAllocator::BlockPair));
+ struct BlockAllocator::BlockPair *CAST_FROM_VOIDP(pairs, pairs_buf.get());
+ uint64_t n_pairs = 0;
+ for (int64_t i = 0; i < _checkpointed.smallest_never_used_blocknum.b; i++) {
+ struct block_translation_pair pair = _checkpointed.block_translation[i];
+ if (pair.size > 0) {
+ invariant(pair.u.diskoff != diskoff_unused);
+ pairs[n_pairs++] =
+ BlockAllocator::BlockPair(pair.u.diskoff, pair.size);
+ }
+ }
+
+ _bt_block_allocator->CreateFromBlockPairs(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE,
+ BlockAllocator::BLOCK_ALLOCATOR_ALIGNMENT,
+ pairs,
+ n_pairs);
+
+ return 0;
+}
+
+void block_table::create() {
+ // Does not initialize the block allocator
+ _create_internal();
+
+ _checkpointed.type = TRANSLATION_CHECKPOINTED;
+ _checkpointed.smallest_never_used_blocknum =
+ make_blocknum(RESERVED_BLOCKNUMS);
+ _checkpointed.length_of_array =
+ _checkpointed.smallest_never_used_blocknum.b;
+ _checkpointed.blocknum_freelist_head = freelist_null;
+ XMALLOC_N(_checkpointed.length_of_array, _checkpointed.block_translation);
+ for (int64_t i = 0; i < _checkpointed.length_of_array; i++) {
+ _checkpointed.block_translation[i].size = 0;
+ _checkpointed.block_translation[i].u.diskoff = diskoff_unused;
+ }
+
+ // we just created a default checkpointed, now copy it to current.
+ _copy_translation(&_current, &_checkpointed, TRANSLATION_CURRENT);
+
+ // Create an empty block allocator.
+ _bt_block_allocator->Create(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE,
+ BlockAllocator::BLOCK_ALLOCATOR_ALIGNMENT);
+}
+
+// TODO: Refactor with FT-303
+static void ft_set_dirty(FT ft, bool for_checkpoint) {
+ invariant(ft->h->type == FT_CURRENT);
+ if (for_checkpoint) {
+ invariant(ft->checkpoint_header->type == FT_CHECKPOINT_INPROGRESS);
+ ft->checkpoint_header->set_dirty();
+ } else {
+ ft->h->set_dirty();
+ }
+}
+
+void block_table::_maybe_truncate_file(int fd, uint64_t size_needed_before) {
+ toku_mutex_assert_locked(&_mutex);
+ uint64_t new_size_needed = _bt_block_allocator->AllocatedLimit();
+ // Save a call to toku_os_get_file_size (kernel call) if unlikely to be
+ // useful.
+ if (new_size_needed < size_needed_before &&
+ new_size_needed < _safe_file_size) {
+ nb_mutex_lock(&_safe_file_size_lock, &_mutex);
+
+ // Must hold _safe_file_size_lock to change _safe_file_size.
+ if (new_size_needed < _safe_file_size) {
+ int64_t safe_file_size_before = _safe_file_size;
+ // Not safe to use the 'to-be-truncated' portion until truncate is
+ // done.
+ _safe_file_size = new_size_needed;
+ _mutex_unlock();
+
+ uint64_t size_after;
+ toku_maybe_truncate_file(
+ fd, new_size_needed, safe_file_size_before, &size_after);
+ _mutex_lock();
+
+ _safe_file_size = size_after;
+ }
+ nb_mutex_unlock(&_safe_file_size_lock);
+ }
+}
+
+void block_table::maybe_truncate_file_on_open(int fd) {
+ _mutex_lock();
+ _maybe_truncate_file(fd, _safe_file_size);
+ _mutex_unlock();
+}
+
+void block_table::_copy_translation(struct translation *dst,
+ struct translation *src,
+ enum translation_type newtype) {
+ // We intend to malloc a fresh block, so the incoming translation should be
+ // empty
+ invariant_null(dst->block_translation);
+
+ invariant(src->length_of_array >= src->smallest_never_used_blocknum.b);
+ invariant(newtype == TRANSLATION_DEBUG ||
+ (src->type == TRANSLATION_CURRENT &&
+ newtype == TRANSLATION_INPROGRESS) ||
+ (src->type == TRANSLATION_CHECKPOINTED &&
+ newtype == TRANSLATION_CURRENT));
+ dst->type = newtype;
+ dst->smallest_never_used_blocknum = src->smallest_never_used_blocknum;
+ dst->blocknum_freelist_head = src->blocknum_freelist_head;
+
+ // destination btt is of fixed size. Allocate + memcpy the exact length
+ // necessary.
+ dst->length_of_array = dst->smallest_never_used_blocknum.b;
+ XMALLOC_N(dst->length_of_array, dst->block_translation);
+ memcpy(dst->block_translation,
+ src->block_translation,
+ dst->length_of_array * sizeof(*dst->block_translation));
+
+ // New version of btt is not yet stored on disk.
+ dst->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size = 0;
+ dst->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff =
+ diskoff_unused;
+}
+
+int64_t block_table::get_blocks_in_use_unlocked() {
+ BLOCKNUM b;
+ struct translation *t = &_current;
+ int64_t num_blocks = 0;
+ {
+ // Reserved blocknums do not get upgraded; They are part of the header.
+ for (b.b = RESERVED_BLOCKNUMS; b.b < t->smallest_never_used_blocknum.b;
+ b.b++) {
+ if (t->block_translation[b.b].size != size_is_free) {
+ num_blocks++;
+ }
+ }
+ }
+ return num_blocks;
+}
+
+void block_table::_maybe_optimize_translation(struct translation *t) {
+ // Reduce 'smallest_never_used_blocknum.b' (completely free blocknums
+ // instead of just
+ // on a free list. Doing so requires us to regenerate the free list.
+ // This is O(n) work, so do it only if you're already doing that.
+
+ BLOCKNUM b;
+ paranoid_invariant(t->smallest_never_used_blocknum.b >= RESERVED_BLOCKNUMS);
+ // Calculate how large the free suffix is.
+ int64_t freed;
+ {
+ for (b.b = t->smallest_never_used_blocknum.b; b.b > RESERVED_BLOCKNUMS;
+ b.b--) {
+ if (t->block_translation[b.b - 1].size != size_is_free) {
+ break;
+ }
+ }
+ freed = t->smallest_never_used_blocknum.b - b.b;
+ }
+ if (freed > 0) {
+ t->smallest_never_used_blocknum.b = b.b;
+ if (t->length_of_array / 4 > t->smallest_never_used_blocknum.b) {
+ // We're using more memory than necessary to represent this now.
+ // Reduce.
+ uint64_t new_length = t->smallest_never_used_blocknum.b * 2;
+ XREALLOC_N(new_length, t->block_translation);
+ t->length_of_array = new_length;
+ // No need to zero anything out.
+ }
+
+ // Regenerate free list.
+ t->blocknum_freelist_head.b = freelist_null.b;
+ for (b.b = RESERVED_BLOCKNUMS; b.b < t->smallest_never_used_blocknum.b;
+ b.b++) {
+ if (t->block_translation[b.b].size == size_is_free) {
+ t->block_translation[b.b].u.next_free_blocknum =
+ t->blocknum_freelist_head;
+ t->blocknum_freelist_head = b;
+ }
+ }
+ }
+}
+
+// block table must be locked by caller of this function
+void block_table::note_start_checkpoint_unlocked() {
+ toku_mutex_assert_locked(&_mutex);
+
+ // We're going to do O(n) work to copy the translation, so we
+ // can afford to do O(n) work by optimizing the translation
+ _maybe_optimize_translation(&_current);
+
+ // Copy current translation to inprogress translation.
+ _copy_translation(&_inprogress, &_current, TRANSLATION_INPROGRESS);
+
+ _checkpoint_skipped = false;
+}
+
+void block_table::note_skipped_checkpoint() {
+ // Purpose, alert block translation that the checkpoint was skipped, e.x.
+ // for a non-dirty header
+ _mutex_lock();
+ paranoid_invariant_notnull(_inprogress.block_translation);
+ _checkpoint_skipped = true;
+ _mutex_unlock();
+}
+
+// Purpose: free any disk space used by previous checkpoint that isn't in use by
+// either
+// - current state
+// - in-progress checkpoint
+// capture inprogress as new checkpointed.
+// For each entry in checkpointBTT
+// if offset does not match offset in inprogress
+// assert offset does not match offset in current
+// free (offset,len) from checkpoint
+// move inprogress to checkpoint (resetting type)
+// inprogress = NULL
+void block_table::note_end_checkpoint(int fd) {
+ // Free unused blocks
+ _mutex_lock();
+ uint64_t allocated_limit_at_start = _bt_block_allocator->AllocatedLimit();
+ paranoid_invariant_notnull(_inprogress.block_translation);
+ if (_checkpoint_skipped) {
+ toku_free(_inprogress.block_translation);
+ memset(&_inprogress, 0, sizeof(_inprogress));
+ goto end;
+ }
+
+ // Make certain inprogress was allocated space on disk
+ invariant(
+ _inprogress.block_translation[RESERVED_BLOCKNUM_TRANSLATION].size > 0);
+ invariant(
+ _inprogress.block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff >
+ 0);
+
+ {
+ struct translation *t = &_checkpointed;
+ for (int64_t i = 0; i < t->length_of_array; i++) {
+ struct block_translation_pair *pair = &t->block_translation[i];
+ if (pair->size > 0 &&
+ !_translation_prevents_freeing(
+ &_inprogress, make_blocknum(i), pair)) {
+ invariant(!_translation_prevents_freeing(
+ &_current, make_blocknum(i), pair));
+ _bt_block_allocator->FreeBlock(pair->u.diskoff, pair->size);
+ }
+ }
+ toku_free(_checkpointed.block_translation);
+ _checkpointed = _inprogress;
+ _checkpointed.type = TRANSLATION_CHECKPOINTED;
+ memset(&_inprogress, 0, sizeof(_inprogress));
+ _maybe_truncate_file(fd, allocated_limit_at_start);
+ }
+end:
+ _mutex_unlock();
+}
+
+bool block_table::_is_valid_blocknum(struct translation *t, BLOCKNUM b) {
+ invariant(t->length_of_array >= t->smallest_never_used_blocknum.b);
+ return b.b >= 0 && b.b < t->smallest_never_used_blocknum.b;
+}
+
+void block_table::_verify_valid_blocknum(struct translation *UU(t),
+ BLOCKNUM UU(b)) {
+ invariant(_is_valid_blocknum(t, b));
+}
+
+bool block_table::_is_valid_freeable_blocknum(struct translation *t,
+ BLOCKNUM b) {
+ invariant(t->length_of_array >= t->smallest_never_used_blocknum.b);
+ return b.b >= RESERVED_BLOCKNUMS && b.b < t->smallest_never_used_blocknum.b;
+}
+
+// should be freeable
+void block_table::_verify_valid_freeable_blocknum(struct translation *UU(t),
+ BLOCKNUM UU(b)) {
+ invariant(_is_valid_freeable_blocknum(t, b));
+}
+
+// Also used only in ft-serialize-test.
+void block_table::block_free(uint64_t offset, uint64_t size) {
+ _mutex_lock();
+ _bt_block_allocator->FreeBlock(offset, size);
+ _mutex_unlock();
+}
+
+int64_t block_table::_calculate_size_on_disk(struct translation *t) {
+ return 8 + // smallest_never_used_blocknum
+ 8 + // blocknum_freelist_head
+ t->smallest_never_used_blocknum.b * 16 + // Array
+ 4; // 4 for checksum
+}
+
+// We cannot free the disk space allocated to this blocknum if it is still in
+// use by the given translation table.
+bool block_table::_translation_prevents_freeing(
+ struct translation *t,
+ BLOCKNUM b,
+ struct block_translation_pair *old_pair) {
+ return t->block_translation && b.b < t->smallest_never_used_blocknum.b &&
+ old_pair->u.diskoff == t->block_translation[b.b].u.diskoff;
+}
+
+void block_table::_realloc_on_disk_internal(BLOCKNUM b,
+ DISKOFF size,
+ DISKOFF *offset,
+ FT ft,
+ bool for_checkpoint) {
+ toku_mutex_assert_locked(&_mutex);
+ ft_set_dirty(ft, for_checkpoint);
+
+ struct translation *t = &_current;
+ struct block_translation_pair old_pair = t->block_translation[b.b];
+ // Free the old block if it is not still in use by the checkpoint in
+ // progress or the previous checkpoint
+ bool cannot_free =
+ (!for_checkpoint &&
+ _translation_prevents_freeing(&_inprogress, b, &old_pair)) ||
+ _translation_prevents_freeing(&_checkpointed, b, &old_pair);
+ if (!cannot_free && old_pair.u.diskoff != diskoff_unused) {
+ _bt_block_allocator->FreeBlock(old_pair.u.diskoff, old_pair.size);
+ }
+
+ uint64_t allocator_offset = diskoff_unused;
+ t->block_translation[b.b].size = size;
+ if (size > 0) {
+ // Allocate a new block if the size is greater than 0,
+ // if the size is just 0, offset will be set to diskoff_unused
+ _bt_block_allocator->AllocBlock(size, &allocator_offset);
+ }
+ t->block_translation[b.b].u.diskoff = allocator_offset;
+ *offset = allocator_offset;
+
+ // Update inprogress btt if appropriate (if called because Pending bit is
+ // set).
+ if (for_checkpoint) {
+ paranoid_invariant(b.b < _inprogress.length_of_array);
+ _inprogress.block_translation[b.b] = t->block_translation[b.b];
+ }
+}
+
+void block_table::_ensure_safe_write_unlocked(int fd,
+ DISKOFF block_size,
+ DISKOFF block_offset) {
+ // Requires: holding _mutex
+ uint64_t size_needed = block_size + block_offset;
+ if (size_needed > _safe_file_size) {
+ // Must hold _safe_file_size_lock to change _safe_file_size.
+ nb_mutex_lock(&_safe_file_size_lock, &_mutex);
+ if (size_needed > _safe_file_size) {
+ _mutex_unlock();
+
+ int64_t size_after;
+ toku_maybe_preallocate_in_file(
+ fd, size_needed, _safe_file_size, &size_after);
+
+ _mutex_lock();
+ _safe_file_size = size_after;
+ }
+ nb_mutex_unlock(&_safe_file_size_lock);
+ }
+}
+
+void block_table::realloc_on_disk(BLOCKNUM b,
+ DISKOFF size,
+ DISKOFF *offset,
+ FT ft,
+ int fd,
+ bool for_checkpoint) {
+ _mutex_lock();
+ struct translation *t = &_current;
+ _verify_valid_freeable_blocknum(t, b);
+ _realloc_on_disk_internal(b, size, offset, ft, for_checkpoint);
+
+ _ensure_safe_write_unlocked(fd, size, *offset);
+ _mutex_unlock();
+}
+
+bool block_table::_pair_is_unallocated(struct block_translation_pair *pair) {
+ return pair->size == 0 && pair->u.diskoff == diskoff_unused;
+}
+
+// Effect: figure out where to put the inprogress btt on disk, allocate space
+// for it there.
+// The space must be 512-byte aligned (both the starting address and the
+// size).
+// As a result, the allcoated space may be a little bit bigger (up to the next
+// 512-byte boundary) than the actual btt.
+void block_table::_alloc_inprogress_translation_on_disk_unlocked() {
+ toku_mutex_assert_locked(&_mutex);
+
+ struct translation *t = &_inprogress;
+ paranoid_invariant_notnull(t->block_translation);
+ BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION);
+ // Each inprogress is allocated only once
+ paranoid_invariant(_pair_is_unallocated(&t->block_translation[b.b]));
+
+ // Allocate a new block
+ int64_t size = _calculate_size_on_disk(t);
+ uint64_t offset;
+ _bt_block_allocator->AllocBlock(size, &offset);
+ t->block_translation[b.b].u.diskoff = offset;
+ t->block_translation[b.b].size = size;
+}
+
+// Effect: Serializes the blocktable to a wbuf (which starts uninitialized)
+// A clean shutdown runs checkpoint start so that current and inprogress are
+// copies.
+// The resulting wbuf buffer is guaranteed to be be 512-byte aligned and the
+// total length is a multiple of 512 (so we pad with zeros at the end if
+// needd)
+// The address is guaranteed to be 512-byte aligned, but the size is not
+// guaranteed.
+// It *is* guaranteed that we can read up to the next 512-byte boundary,
+// however
+void block_table::serialize_translation_to_wbuf(int fd,
+ struct wbuf *w,
+ int64_t *address,
+ int64_t *size) {
+ _mutex_lock();
+ struct translation *t = &_inprogress;
+
+ BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION);
+ _alloc_inprogress_translation_on_disk_unlocked(); // The allocated block
+ // must be 512-byte
+ // aligned to make
+ // O_DIRECT happy.
+ uint64_t size_translation = _calculate_size_on_disk(t);
+ uint64_t size_aligned = roundup_to_multiple(512, size_translation);
+ invariant((int64_t)size_translation == t->block_translation[b.b].size);
+ {
+ // Init wbuf
+ if (0)
+ printf(
+ "%s:%d writing translation table of size_translation %" PRIu64
+ " at %" PRId64 "\n",
+ __FILE__,
+ __LINE__,
+ size_translation,
+ t->block_translation[b.b].u.diskoff);
+ char *XMALLOC_N_ALIGNED(512, size_aligned, buf);
+ for (uint64_t i = size_translation; i < size_aligned; i++)
+ buf[i] = 0; // fill in the end of the buffer with zeros.
+ wbuf_init(w, buf, size_aligned);
+ }
+ wbuf_BLOCKNUM(w, t->smallest_never_used_blocknum);
+ wbuf_BLOCKNUM(w, t->blocknum_freelist_head);
+ int64_t i;
+ for (i = 0; i < t->smallest_never_used_blocknum.b; i++) {
+ if (0)
+ printf("%s:%d %" PRId64 ",%" PRId64 "\n",
+ __FILE__,
+ __LINE__,
+ t->block_translation[i].u.diskoff,
+ t->block_translation[i].size);
+ wbuf_DISKOFF(w, t->block_translation[i].u.diskoff);
+ wbuf_DISKOFF(w, t->block_translation[i].size);
+ }
+ uint32_t checksum = toku_x1764_finish(&w->checksum);
+ wbuf_int(w, checksum);
+ *address = t->block_translation[b.b].u.diskoff;
+ *size = size_translation;
+ invariant((*address) % 512 == 0);
+
+ _ensure_safe_write_unlocked(fd, size_aligned, *address);
+ _mutex_unlock();
+}
+
+// Perhaps rename: purpose is get disk address of a block, given its blocknum
+// (blockid?)
+void block_table::_translate_blocknum_to_offset_size_unlocked(BLOCKNUM b,
+ DISKOFF *offset,
+ DISKOFF *size) {
+ struct translation *t = &_current;
+ _verify_valid_blocknum(t, b);
+ if (offset) {
+ *offset = t->block_translation[b.b].u.diskoff;
+ }
+ if (size) {
+ *size = t->block_translation[b.b].size;
+ }
+}
+
+// Perhaps rename: purpose is get disk address of a block, given its blocknum
+// (blockid?)
+void block_table::translate_blocknum_to_offset_size(BLOCKNUM b,
+ DISKOFF *offset,
+ DISKOFF *size) {
+ _mutex_lock();
+ _translate_blocknum_to_offset_size_unlocked(b, offset, size);
+ _mutex_unlock();
+}
+
+// Only called by toku_allocate_blocknum
+// Effect: expand the array to maintain size invariant
+// given that one more never-used blocknum will soon be used.
+void block_table::_maybe_expand_translation(struct translation *t) {
+ if (t->length_of_array <= t->smallest_never_used_blocknum.b) {
+ // expansion is necessary
+ uint64_t new_length = t->smallest_never_used_blocknum.b * 2;
+ XREALLOC_N(new_length, t->block_translation);
+ uint64_t i;
+ for (i = t->length_of_array; i < new_length; i++) {
+ t->block_translation[i].u.next_free_blocknum = freelist_null;
+ t->block_translation[i].size = size_is_free;
+ }
+ t->length_of_array = new_length;
+ }
+}
+
+void block_table::_allocate_blocknum_unlocked(BLOCKNUM *res, FT ft) {
+ toku_mutex_assert_locked(&_mutex);
+ BLOCKNUM result;
+ struct translation *t = &_current;
+ if (t->blocknum_freelist_head.b == freelist_null.b) {
+ // no previously used blocknums are available
+ // use a never used blocknum
+ _maybe_expand_translation(
+ t); // Ensure a never used blocknums is available
+ result = t->smallest_never_used_blocknum;
+ t->smallest_never_used_blocknum.b++;
+ } else { // reuse a previously used blocknum
+ result = t->blocknum_freelist_head;
+ BLOCKNUM next = t->block_translation[result.b].u.next_free_blocknum;
+ t->blocknum_freelist_head = next;
+ }
+ // Verify the blocknum is free
+ paranoid_invariant(t->block_translation[result.b].size == size_is_free);
+ // blocknum is not free anymore
+ t->block_translation[result.b].u.diskoff = diskoff_unused;
+ t->block_translation[result.b].size = 0;
+ _verify_valid_freeable_blocknum(t, result);
+ *res = result;
+ ft_set_dirty(ft, false);
+}
+
+void block_table::allocate_blocknum(BLOCKNUM *res, FT ft) {
+ _mutex_lock();
+ _allocate_blocknum_unlocked(res, ft);
+ _mutex_unlock();
+}
+
+void block_table::_free_blocknum_in_translation(struct translation *t,
+ BLOCKNUM b) {
+ _verify_valid_freeable_blocknum(t, b);
+ paranoid_invariant(t->block_translation[b.b].size != size_is_free);
+
+ t->block_translation[b.b].size = size_is_free;
+ t->block_translation[b.b].u.next_free_blocknum = t->blocknum_freelist_head;
+ t->blocknum_freelist_head = b;
+}
+
+// Effect: Free a blocknum.
+// If the blocknum holds the only reference to a block on disk, free that block
+void block_table::_free_blocknum_unlocked(BLOCKNUM *bp,
+ FT ft,
+ bool for_checkpoint) {
+ toku_mutex_assert_locked(&_mutex);
+ BLOCKNUM b = *bp;
+ bp->b = 0; // Remove caller's reference.
+
+ struct block_translation_pair old_pair = _current.block_translation[b.b];
+
+ _free_blocknum_in_translation(&_current, b);
+ if (for_checkpoint) {
+ paranoid_invariant(ft->checkpoint_header->type ==
+ FT_CHECKPOINT_INPROGRESS);
+ _free_blocknum_in_translation(&_inprogress, b);
+ }
+
+ // If the size is 0, no disk block has ever been assigned to this blocknum.
+ if (old_pair.size > 0) {
+ // Free the old block if it is not still in use by the checkpoint in
+ // progress or the previous checkpoint
+ bool cannot_free =
+ _translation_prevents_freeing(&_inprogress, b, &old_pair) ||
+ _translation_prevents_freeing(&_checkpointed, b, &old_pair);
+ if (!cannot_free) {
+ _bt_block_allocator->FreeBlock(old_pair.u.diskoff, old_pair.size);
+ }
+ } else {
+ paranoid_invariant(old_pair.size == 0);
+ paranoid_invariant(old_pair.u.diskoff == diskoff_unused);
+ }
+ ft_set_dirty(ft, for_checkpoint);
+}
+
+void block_table::free_blocknum(BLOCKNUM *bp, FT ft, bool for_checkpoint) {
+ _mutex_lock();
+ _free_blocknum_unlocked(bp, ft, for_checkpoint);
+ _mutex_unlock();
+}
+
+// Verify there are no free blocks.
+void block_table::verify_no_free_blocknums() {
+ invariant(_current.blocknum_freelist_head.b == freelist_null.b);
+}
+
+// Frees blocknums that have a size of 0 and unused diskoff
+// Currently used for eliminating unused cached rollback log nodes
+void block_table::free_unused_blocknums(BLOCKNUM root) {
+ _mutex_lock();
+ int64_t smallest = _current.smallest_never_used_blocknum.b;
+ for (int64_t i = RESERVED_BLOCKNUMS; i < smallest; i++) {
+ if (i == root.b) {
+ continue;
+ }
+ BLOCKNUM b = make_blocknum(i);
+ if (_current.block_translation[b.b].size == 0) {
+ invariant(_current.block_translation[b.b].u.diskoff ==
+ diskoff_unused);
+ _free_blocknum_in_translation(&_current, b);
+ }
+ }
+ _mutex_unlock();
+}
+
+bool block_table::_no_data_blocks_except_root(BLOCKNUM root) {
+ bool ok = true;
+ _mutex_lock();
+ int64_t smallest = _current.smallest_never_used_blocknum.b;
+ if (root.b < RESERVED_BLOCKNUMS) {
+ ok = false;
+ goto cleanup;
+ }
+ for (int64_t i = RESERVED_BLOCKNUMS; i < smallest; i++) {
+ if (i == root.b) {
+ continue;
+ }
+ BLOCKNUM b = make_blocknum(i);
+ if (_current.block_translation[b.b].size != size_is_free) {
+ ok = false;
+ goto cleanup;
+ }
+ }
+cleanup:
+ _mutex_unlock();
+ return ok;
+}
+
+// Verify there are no data blocks except root.
+// TODO(leif): This actually takes a lock, but I don't want to fix all the
+// callers right now.
+void block_table::verify_no_data_blocks_except_root(BLOCKNUM UU(root)) {
+ paranoid_invariant(_no_data_blocks_except_root(root));
+}
+
+bool block_table::_blocknum_allocated(BLOCKNUM b) {
+ _mutex_lock();
+ struct translation *t = &_current;
+ _verify_valid_blocknum(t, b);
+ bool ok = t->block_translation[b.b].size != size_is_free;
+ _mutex_unlock();
+ return ok;
+}
+
+// Verify a blocknum is currently allocated.
+void block_table::verify_blocknum_allocated(BLOCKNUM UU(b)) {
+ paranoid_invariant(_blocknum_allocated(b));
+}
+
+// Only used by toku_dump_translation table (debug info)
+void block_table::_dump_translation_internal(FILE *f, struct translation *t) {
+ if (t->block_translation) {
+ BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION);
+ fprintf(f, " length_of_array[%" PRId64 "]", t->length_of_array);
+ fprintf(f,
+ " smallest_never_used_blocknum[%" PRId64 "]",
+ t->smallest_never_used_blocknum.b);
+ fprintf(f,
+ " blocknum_free_list_head[%" PRId64 "]",
+ t->blocknum_freelist_head.b);
+ fprintf(
+ f, " size_on_disk[%" PRId64 "]", t->block_translation[b.b].size);
+ fprintf(f,
+ " location_on_disk[%" PRId64 "]\n",
+ t->block_translation[b.b].u.diskoff);
+ int64_t i;
+ for (i = 0; i < t->length_of_array; i++) {
+ fprintf(f,
+ " %" PRId64 ": %" PRId64 " %" PRId64 "\n",
+ i,
+ t->block_translation[i].u.diskoff,
+ t->block_translation[i].size);
+ }
+ fprintf(f, "\n");
+ } else {
+ fprintf(f, " does not exist\n");
+ }
+}
+
+// Only used by toku_ft_dump which is only for debugging purposes
+// "pretty" just means we use tabs so we can parse output easier later
+void block_table::dump_translation_table_pretty(FILE *f) {
+ _mutex_lock();
+ struct translation *t = &_checkpointed;
+ invariant(t->block_translation != nullptr);
+ for (int64_t i = 0; i < t->length_of_array; ++i) {
+ fprintf(f,
+ "%" PRId64 "\t%" PRId64 "\t%" PRId64 "\n",
+ i,
+ t->block_translation[i].u.diskoff,
+ t->block_translation[i].size);
+ }
+ _mutex_unlock();
+}
+
+// Only used by toku_ft_dump which is only for debugging purposes
+void block_table::dump_translation_table(FILE *f) {
+ _mutex_lock();
+ fprintf(f, "Current block translation:");
+ _dump_translation_internal(f, &_current);
+ fprintf(f, "Checkpoint in progress block translation:");
+ _dump_translation_internal(f, &_inprogress);
+ fprintf(f, "Checkpointed block translation:");
+ _dump_translation_internal(f, &_checkpointed);
+ _mutex_unlock();
+}
+
+// Only used by ftdump
+void block_table::blocknum_dump_translation(BLOCKNUM b) {
+ _mutex_lock();
+
+ struct translation *t = &_current;
+ if (b.b < t->length_of_array) {
+ struct block_translation_pair *bx = &t->block_translation[b.b];
+ printf("%" PRId64 ": %" PRId64 " %" PRId64 "\n",
+ b.b,
+ bx->u.diskoff,
+ bx->size);
+ }
+ _mutex_unlock();
+}
+
+// Must not call this function when anything else is using the blocktable.
+// No one may use the blocktable afterwards.
+void block_table::destroy(void) {
+ // TODO: translation.destroy();
+ toku_free(_current.block_translation);
+ toku_free(_inprogress.block_translation);
+ toku_free(_checkpointed.block_translation);
+
+ _bt_block_allocator->Destroy();
+ delete _bt_block_allocator;
+ toku_mutex_destroy(&_mutex);
+ nb_mutex_destroy(&_safe_file_size_lock);
+}
+
+int block_table::_translation_deserialize_from_buffer(
+ struct translation *t,
+ DISKOFF location_on_disk,
+ uint64_t size_on_disk,
+ // out: buffer with serialized translation
+ unsigned char *translation_buffer) {
+ int r = 0;
+ invariant(location_on_disk != 0);
+ t->type = TRANSLATION_CHECKPOINTED;
+
+ // check the checksum
+ uint32_t x1764 = toku_x1764_memory(translation_buffer, size_on_disk - 4);
+ uint64_t offset = size_on_disk - 4;
+ uint32_t stored_x1764 = toku_dtoh32(*(int *)(translation_buffer + offset));
+ if (x1764 != stored_x1764) {
+ fprintf(stderr,
+ "Translation table checksum failure: calc=0x%08x read=0x%08x\n",
+ x1764,
+ stored_x1764);
+ r = TOKUDB_BAD_CHECKSUM;
+ goto exit;
+ }
+
+ struct rbuf rb;
+ rb.buf = translation_buffer;
+ rb.ndone = 0;
+ rb.size = size_on_disk - 4; // 4==checksum
+
+ t->smallest_never_used_blocknum = rbuf_blocknum(&rb);
+ t->length_of_array = t->smallest_never_used_blocknum.b;
+ invariant(t->smallest_never_used_blocknum.b >= RESERVED_BLOCKNUMS);
+ t->blocknum_freelist_head = rbuf_blocknum(&rb);
+ XMALLOC_N(t->length_of_array, t->block_translation);
+ for (int64_t i = 0; i < t->length_of_array; i++) {
+ t->block_translation[i].u.diskoff = rbuf_DISKOFF(&rb);
+ t->block_translation[i].size = rbuf_DISKOFF(&rb);
+ }
+ invariant(_calculate_size_on_disk(t) == (int64_t)size_on_disk);
+ invariant(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size ==
+ (int64_t)size_on_disk);
+ invariant(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff ==
+ location_on_disk);
+
+exit:
+ return r;
+}
+
+int block_table::iterate(enum translation_type type,
+ BLOCKTABLE_CALLBACK f,
+ void *extra,
+ bool data_only,
+ bool used_only) {
+ struct translation *src;
+
+ int r = 0;
+ switch (type) {
+ case TRANSLATION_CURRENT:
+ src = &_current;
+ break;
+ case TRANSLATION_INPROGRESS:
+ src = &_inprogress;
+ break;
+ case TRANSLATION_CHECKPOINTED:
+ src = &_checkpointed;
+ break;
+ default:
+ r = EINVAL;
+ }
+
+ struct translation fakecurrent;
+ memset(&fakecurrent, 0, sizeof(struct translation));
+
+ struct translation *t = &fakecurrent;
+ if (r == 0) {
+ _mutex_lock();
+ _copy_translation(t, src, TRANSLATION_DEBUG);
+ t->block_translation[RESERVED_BLOCKNUM_TRANSLATION] =
+ src->block_translation[RESERVED_BLOCKNUM_TRANSLATION];
+ _mutex_unlock();
+ int64_t i;
+ for (i = 0; i < t->smallest_never_used_blocknum.b; i++) {
+ struct block_translation_pair pair = t->block_translation[i];
+ if (data_only && i < RESERVED_BLOCKNUMS)
+ continue;
+ if (used_only && pair.size <= 0)
+ continue;
+ r = f(make_blocknum(i), pair.size, pair.u.diskoff, extra);
+ if (r != 0)
+ break;
+ }
+ toku_free(t->block_translation);
+ }
+ return r;
+}
+
+typedef struct {
+ int64_t used_space;
+ int64_t total_space;
+} frag_extra;
+
+static int frag_helper(BLOCKNUM UU(b),
+ int64_t size,
+ int64_t address,
+ void *extra) {
+ frag_extra *info = (frag_extra *)extra;
+
+ if (size + address > info->total_space)
+ info->total_space = size + address;
+ info->used_space += size;
+ return 0;
+}
+
+void block_table::internal_fragmentation(int64_t *total_sizep,
+ int64_t *used_sizep) {
+ frag_extra info = {0, 0};
+ int r = iterate(TRANSLATION_CHECKPOINTED, frag_helper, &info, false, true);
+ invariant_zero(r);
+
+ if (total_sizep)
+ *total_sizep = info.total_space;
+ if (used_sizep)
+ *used_sizep = info.used_space;
+}
+
+void block_table::_realloc_descriptor_on_disk_unlocked(DISKOFF size,
+ DISKOFF *offset,
+ FT ft) {
+ toku_mutex_assert_locked(&_mutex);
+ BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR);
+ _realloc_on_disk_internal(b, size, offset, ft, false);
+}
+
+void block_table::realloc_descriptor_on_disk(DISKOFF size,
+ DISKOFF *offset,
+ FT ft,
+ int fd) {
+ _mutex_lock();
+ _realloc_descriptor_on_disk_unlocked(size, offset, ft);
+ _ensure_safe_write_unlocked(fd, size, *offset);
+ _mutex_unlock();
+}
+
+void block_table::get_descriptor_offset_size(DISKOFF *offset, DISKOFF *size) {
+ _mutex_lock();
+ BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR);
+ _translate_blocknum_to_offset_size_unlocked(b, offset, size);
+ _mutex_unlock();
+}
+
+void block_table::get_fragmentation_unlocked(TOKU_DB_FRAGMENTATION report) {
+ // Requires: blocktable lock is held.
+ // Requires: report->file_size_bytes is already filled in.
+
+ // Count the headers.
+ report->data_bytes = BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
+ report->data_blocks = 1;
+ report->checkpoint_bytes_additional =
+ BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
+ report->checkpoint_blocks_additional = 1;
+
+ struct translation *current = &_current;
+ for (int64_t i = 0; i < current->length_of_array; i++) {
+ struct block_translation_pair *pair = &current->block_translation[i];
+ if (pair->size > 0) {
+ report->data_bytes += pair->size;
+ report->data_blocks++;
+ }
+ }
+
+ struct translation *checkpointed = &_checkpointed;
+ for (int64_t i = 0; i < checkpointed->length_of_array; i++) {
+ struct block_translation_pair *pair =
+ &checkpointed->block_translation[i];
+ if (pair->size > 0 &&
+ !(i < current->length_of_array &&
+ current->block_translation[i].size > 0 &&
+ current->block_translation[i].u.diskoff == pair->u.diskoff)) {
+ report->checkpoint_bytes_additional += pair->size;
+ report->checkpoint_blocks_additional++;
+ }
+ }
+
+ struct translation *inprogress = &_inprogress;
+ for (int64_t i = 0; i < inprogress->length_of_array; i++) {
+ struct block_translation_pair *pair = &inprogress->block_translation[i];
+ if (pair->size > 0 &&
+ !(i < current->length_of_array &&
+ current->block_translation[i].size > 0 &&
+ current->block_translation[i].u.diskoff == pair->u.diskoff) &&
+ !(i < checkpointed->length_of_array &&
+ checkpointed->block_translation[i].size > 0 &&
+ checkpointed->block_translation[i].u.diskoff ==
+ pair->u.diskoff)) {
+ report->checkpoint_bytes_additional += pair->size;
+ report->checkpoint_blocks_additional++;
+ }
+ }
+
+ _bt_block_allocator->UnusedStatistics(report);
+}
+
+void block_table::get_info64(struct ftinfo64 *s) {
+ _mutex_lock();
+
+ struct translation *current = &_current;
+ s->num_blocks_allocated = current->length_of_array;
+ s->num_blocks_in_use = 0;
+ s->size_allocated = 0;
+ s->size_in_use = 0;
+
+ for (int64_t i = 0; i < current->length_of_array; ++i) {
+ struct block_translation_pair *block = &current->block_translation[i];
+ if (block->size != size_is_free) {
+ ++s->num_blocks_in_use;
+ s->size_in_use += block->size;
+ if (block->u.diskoff != diskoff_unused) {
+ uint64_t limit = block->u.diskoff + block->size;
+ if (limit > s->size_allocated) {
+ s->size_allocated = limit;
+ }
+ }
+ }
+ }
+
+ _mutex_unlock();
+}
+
+int block_table::iterate_translation_tables(
+ uint64_t checkpoint_count,
+ int (*iter)(uint64_t checkpoint_count,
+ int64_t total_num_rows,
+ int64_t blocknum,
+ int64_t diskoff,
+ int64_t size,
+ void *extra),
+ void *iter_extra) {
+ int error = 0;
+ _mutex_lock();
+
+ int64_t total_num_rows =
+ _current.length_of_array + _checkpointed.length_of_array;
+ for (int64_t i = 0; error == 0 && i < _current.length_of_array; ++i) {
+ struct block_translation_pair *block = &_current.block_translation[i];
+ error = iter(checkpoint_count,
+ total_num_rows,
+ i,
+ block->u.diskoff,
+ block->size,
+ iter_extra);
+ }
+ for (int64_t i = 0; error == 0 && i < _checkpointed.length_of_array; ++i) {
+ struct block_translation_pair *block =
+ &_checkpointed.block_translation[i];
+ error = iter(checkpoint_count - 1,
+ total_num_rows,
+ i,
+ block->u.diskoff,
+ block->size,
+ iter_extra);
+ }
+
+ _mutex_unlock();
+ return error;
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/block_table.h b/storage/tokudb/PerconaFT/ft/serialize/block_table.h
new file mode 100644
index 00000000..dd732d4f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/block_table.h
@@ -0,0 +1,340 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_stdint.h"
+#include "portability/toku_pthread.h"
+
+#include "ft/serialize/block_allocator.h"
+#include "util/nb_mutex.h"
+
+struct ft;
+
+typedef struct blocknum_s { int64_t b; } BLOCKNUM;
+
+// Offset in a disk. -1 is the 'null' pointer.
+typedef int64_t DISKOFF;
+
+// Unmovable reserved first, then reallocable.
+// We reserve one blocknum for the translation table itself.
+enum {
+ RESERVED_BLOCKNUM_NULL = 0,
+ RESERVED_BLOCKNUM_TRANSLATION = 1,
+ RESERVED_BLOCKNUM_DESCRIPTOR = 2,
+ RESERVED_BLOCKNUMS
+};
+
+typedef int (*BLOCKTABLE_CALLBACK)(BLOCKNUM b,
+ int64_t size,
+ int64_t address,
+ void *extra);
+
+static inline BLOCKNUM make_blocknum(int64_t b) {
+ BLOCKNUM result = {.b = b};
+ return result;
+}
+static const BLOCKNUM ROLLBACK_NONE = {.b = 0};
+
+/**
+ * There are three copies of the translation table (btt) in the block table:
+ *
+ * checkpointed Is initialized by deserializing from disk,
+ * and is the only version ever read from disk.
+ * When read from disk it is copied to current.
+ * It is immutable. It can be replaced by an inprogress btt.
+ *
+ * inprogress Is only filled by copying from current,
+ * and is the only version ever serialized to disk.
+ * (It is serialized to disk on checkpoint and clean
+ *shutdown.)
+ * At end of checkpoint it replaces 'checkpointed'.
+ * During a checkpoint, any 'pending' dirty writes will update
+ * inprogress.
+ *
+ * current Is initialized by copying from checkpointed,
+ * is the only version ever modified while the database is in
+ *use,
+ * and is the only version ever copied to inprogress.
+ * It is never stored on disk.
+ */
+class block_table {
+ public:
+ enum translation_type {
+ TRANSLATION_NONE = 0,
+ TRANSLATION_CURRENT,
+ TRANSLATION_INPROGRESS,
+ TRANSLATION_CHECKPOINTED,
+ TRANSLATION_DEBUG
+ };
+
+ void create();
+
+ int create_from_buffer(int fd,
+ DISKOFF location_on_disk,
+ DISKOFF size_on_disk,
+ unsigned char *translation_buffer);
+
+ void destroy();
+
+ // Checkpointing
+ void note_start_checkpoint_unlocked();
+ void note_end_checkpoint(int fd);
+ void note_skipped_checkpoint();
+ void maybe_truncate_file_on_open(int fd);
+
+ // Blocknums
+ void allocate_blocknum(BLOCKNUM *res, struct ft *ft);
+ void realloc_on_disk(BLOCKNUM b,
+ DISKOFF size,
+ DISKOFF *offset,
+ struct ft *ft,
+ int fd,
+ bool for_checkpoint);
+ void free_blocknum(BLOCKNUM *b, struct ft *ft, bool for_checkpoint);
+ void translate_blocknum_to_offset_size(BLOCKNUM b,
+ DISKOFF *offset,
+ DISKOFF *size);
+ void free_unused_blocknums(BLOCKNUM root);
+ void realloc_descriptor_on_disk(DISKOFF size,
+ DISKOFF *offset,
+ struct ft *ft,
+ int fd);
+ void get_descriptor_offset_size(DISKOFF *offset, DISKOFF *size);
+
+ // External verfication
+ void verify_blocknum_allocated(BLOCKNUM b);
+ void verify_no_data_blocks_except_root(BLOCKNUM root);
+ void verify_no_free_blocknums();
+
+ // Serialization
+ void serialize_translation_to_wbuf(int fd,
+ struct wbuf *w,
+ int64_t *address,
+ int64_t *size);
+
+ // DEBUG ONLY (ftdump included), tests included
+ void blocknum_dump_translation(BLOCKNUM b);
+ void dump_translation_table_pretty(FILE *f);
+ void dump_translation_table(FILE *f);
+ void block_free(uint64_t offset, uint64_t size);
+
+ int iterate(enum translation_type type,
+ BLOCKTABLE_CALLBACK f,
+ void *extra,
+ bool data_only,
+ bool used_only);
+ void internal_fragmentation(int64_t *total_sizep, int64_t *used_sizep);
+
+ // Requires: blocktable lock is held.
+ // Requires: report->file_size_bytes is already filled in.
+ void get_fragmentation_unlocked(TOKU_DB_FRAGMENTATION report);
+
+ int64_t get_blocks_in_use_unlocked();
+
+ void get_info64(struct ftinfo64 *);
+
+ int iterate_translation_tables(
+ uint64_t,
+ int (*)(uint64_t, int64_t, int64_t, int64_t, int64_t, void *),
+ void *);
+
+ private:
+ struct block_translation_pair {
+ // If in the freelist, use next_free_blocknum, otherwise diskoff.
+ union {
+ DISKOFF diskoff;
+ BLOCKNUM next_free_blocknum;
+ } u;
+
+ // Set to 0xFFFFFFFFFFFFFFFF for free
+ DISKOFF size;
+ };
+
+ // This is the BTT (block translation table)
+ // When the translation (btt) is stored on disk:
+ // In Header:
+ // size_on_disk
+ // location_on_disk
+ // In block translation table (in order):
+ // smallest_never_used_blocknum
+ // blocknum_freelist_head
+ // array
+ // a checksum
+ struct translation {
+ enum translation_type type;
+
+ // Number of elements in array (block_translation). always >=
+ // smallest_never_used_blocknum
+ int64_t length_of_array;
+ BLOCKNUM smallest_never_used_blocknum;
+
+ // Next (previously used) unused blocknum (free list)
+ BLOCKNUM blocknum_freelist_head;
+ struct block_translation_pair *block_translation;
+
+ // size_on_disk is stored in
+ // block_translation[RESERVED_BLOCKNUM_TRANSLATION].size
+ // location_on is stored in
+ // block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff
+ };
+
+ void _create_internal();
+ int _translation_deserialize_from_buffer(
+ struct translation *t, // destination into which to deserialize
+ DISKOFF location_on_disk, // location of translation_buffer
+ uint64_t size_on_disk,
+ unsigned char *
+ translation_buffer); // buffer with serialized translation
+
+ void _copy_translation(struct translation *dst,
+ struct translation *src,
+ enum translation_type newtype);
+ void _maybe_optimize_translation(struct translation *t);
+ void _maybe_expand_translation(struct translation *t);
+ bool _translation_prevents_freeing(struct translation *t,
+ BLOCKNUM b,
+ struct block_translation_pair *old_pair);
+ void _free_blocknum_in_translation(struct translation *t, BLOCKNUM b);
+ int64_t _calculate_size_on_disk(struct translation *t);
+ bool _pair_is_unallocated(struct block_translation_pair *pair);
+ void _alloc_inprogress_translation_on_disk_unlocked();
+ void _dump_translation_internal(FILE *f, struct translation *t);
+
+ // Blocknum management
+ void _allocate_blocknum_unlocked(BLOCKNUM *res, struct ft *ft);
+ void _free_blocknum_unlocked(BLOCKNUM *bp,
+ struct ft *ft,
+ bool for_checkpoint);
+ void _realloc_descriptor_on_disk_unlocked(DISKOFF size,
+ DISKOFF *offset,
+ struct ft *ft);
+ void _realloc_on_disk_internal(BLOCKNUM b,
+ DISKOFF size,
+ DISKOFF *offset,
+ struct ft *ft,
+ bool for_checkpoint);
+ void _translate_blocknum_to_offset_size_unlocked(BLOCKNUM b,
+ DISKOFF *offset,
+ DISKOFF *size);
+
+ // File management
+ void _maybe_truncate_file(int fd, uint64_t size_needed_before);
+ void _ensure_safe_write_unlocked(int fd,
+ DISKOFF block_size,
+ DISKOFF block_offset);
+
+ // Verification
+ bool _is_valid_blocknum(struct translation *t, BLOCKNUM b);
+ void _verify_valid_blocknum(struct translation *t, BLOCKNUM b);
+ bool _is_valid_freeable_blocknum(struct translation *t, BLOCKNUM b);
+ void _verify_valid_freeable_blocknum(struct translation *t, BLOCKNUM b);
+ bool _no_data_blocks_except_root(BLOCKNUM root);
+ bool _blocknum_allocated(BLOCKNUM b);
+
+ // Locking
+ //
+ // TODO: Move the lock to the FT
+ void _mutex_lock();
+ void _mutex_unlock();
+
+ // The current translation is the one used by client threads.
+ // It is not represented on disk.
+ struct translation _current;
+
+ // The translation used by the checkpoint currently in progress.
+ // If the checkpoint thread allocates a block, it must also update the
+ // current translation.
+ struct translation _inprogress;
+
+ // The translation for the data that shall remain inviolate on disk until
+ // the next checkpoint finishes,
+ // after which any blocks used only in this translation can be freed.
+ struct translation _checkpointed;
+
+ // The in-memory data structure for block allocation.
+ // There is no on-disk data structure for block allocation.
+ // Note: This is *allocation* not *translation* - the block allocator is
+ // unaware of which
+ // blocks are used for which translation, but simply allocates and
+ // deallocates blocks.
+ BlockAllocator *_bt_block_allocator;
+ toku_mutex_t _mutex;
+ struct nb_mutex _safe_file_size_lock;
+ bool _checkpoint_skipped;
+ uint64_t _safe_file_size;
+
+ // Because the lock is in a weird place right now
+ friend void toku_ft_lock(struct ft *ft);
+ friend void toku_ft_unlock(struct ft *ft);
+};
+
+// For serialize / deserialize
+
+#include "ft/serialize/wbuf.h"
+
+static inline void wbuf_BLOCKNUM(struct wbuf *w, BLOCKNUM b) {
+ wbuf_ulonglong(w, b.b);
+}
+
+static inline void wbuf_nocrc_BLOCKNUM(struct wbuf *w, BLOCKNUM b) {
+ wbuf_nocrc_ulonglong(w, b.b);
+}
+
+static inline void wbuf_DISKOFF(struct wbuf *wb, DISKOFF off) {
+ wbuf_ulonglong(wb, (uint64_t)off);
+}
+
+#include "ft/serialize/rbuf.h"
+
+static inline DISKOFF rbuf_DISKOFF(struct rbuf *rb) {
+ return rbuf_ulonglong(rb);
+}
+
+static inline BLOCKNUM rbuf_blocknum(struct rbuf *rb) {
+ BLOCKNUM result = make_blocknum(rbuf_longlong(rb));
+ return result;
+}
+
+static inline void rbuf_ma_BLOCKNUM(struct rbuf *rb,
+ memarena *UU(ma),
+ BLOCKNUM *blocknum) {
+ *blocknum = rbuf_blocknum(rb);
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/compress.cc b/storage/tokudb/PerconaFT/ft/serialize/compress.cc
new file mode 100644
index 00000000..584faa5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/compress.cc
@@ -0,0 +1,259 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <toku_portability.h>
+#include <util/scoped_malloc.h>
+
+#include <zlib.h>
+#include <lzma.h>
+#include <snappy.h>
+
+#include "compress.h"
+#include "memory.h"
+#include "quicklz.h"
+#include "toku_assert.h"
+
+static inline enum toku_compression_method
+normalize_compression_method(enum toku_compression_method method)
+// Effect: resolve "friendly" names like "fast" and "small" into their real values.
+{
+ switch (method) {
+ case TOKU_DEFAULT_COMPRESSION_METHOD:
+ case TOKU_FAST_COMPRESSION_METHOD:
+ return TOKU_QUICKLZ_METHOD;
+ case TOKU_SMALL_COMPRESSION_METHOD:
+ return TOKU_LZMA_METHOD;
+ default:
+ return method; // everything else is fine
+ }
+}
+
+size_t toku_compress_bound (enum toku_compression_method a, size_t size)
+// See compress.h for the specification of this function.
+{
+ a = normalize_compression_method(a);
+ switch (a) {
+ case TOKU_NO_COMPRESSION:
+ return size + 1;
+ case TOKU_LZMA_METHOD:
+ return 1+lzma_stream_buffer_bound(size); // We need one extra for the rfc1950-style header byte (bits -03 are TOKU_LZMA_METHOD (1), bits 4-7 are the compression level)
+ case TOKU_QUICKLZ_METHOD:
+ return size+400 + 1; // quicklz manual says 400 bytes is enough. We need one more byte for the rfc1950-style header byte. bits 0-3 are 9, bits 4-7 are the QLZ_COMPRESSION_LEVEL.
+ case TOKU_ZLIB_METHOD:
+ return compressBound (size);
+ case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
+ return 2+deflateBound(nullptr, size); // We need one extra for the rfc1950-style header byte, and one extra to store windowBits (a bit over cautious about future upgrades maybe).
+ case TOKU_SNAPPY_METHOD:
+ return (1 + snappy::MaxCompressedLength(size));
+ default:
+ break;
+ }
+ // fall through for bad enum (thus compiler can warn us if we didn't use all the enums
+ assert(0); return 0;
+}
+
+void toku_compress (enum toku_compression_method a,
+ // the following types and naming conventions come from zlib.h
+ Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen)
+// See compress.h for the specification of this function.
+{
+ static const int zlib_compression_level = 5;
+ static const int zlib_without_checksum_windowbits = -15;
+
+ a = normalize_compression_method(a);
+ switch (a) {
+ case TOKU_NO_COMPRESSION:
+ dest[0] = TOKU_NO_COMPRESSION;
+ memcpy(dest + 1, source, sourceLen);
+ *destLen = sourceLen + 1;
+ return;
+ case TOKU_ZLIB_METHOD: {
+ int r = compress2(dest, destLen, source, sourceLen, zlib_compression_level);
+ assert(r == Z_OK);
+ assert((dest[0]&0xF) == TOKU_ZLIB_METHOD);
+ return;
+ }
+ case TOKU_QUICKLZ_METHOD: {
+ if (sourceLen==0) {
+ // quicklz requires at least one byte, so we handle this ourselves
+ assert(1 <= *destLen);
+ *destLen = 1;
+ } else {
+ toku::scoped_calloc qsc_buf(sizeof(qlz_state_compress));
+ qlz_state_compress *qsc = reinterpret_cast<qlz_state_compress *>(qsc_buf.get());
+ size_t actual_destlen = qlz_compress(source, (char*)(dest+1), sourceLen, qsc);
+ assert(actual_destlen + 1 <= *destLen);
+ // add one for the rfc1950-style header byte.
+ *destLen = actual_destlen + 1;
+ }
+ // Fill in that first byte
+ dest[0] = TOKU_QUICKLZ_METHOD + (QLZ_COMPRESSION_LEVEL << 4);
+ return;
+ }
+ case TOKU_LZMA_METHOD: {
+ const int lzma_compression_level = 2;
+ if (sourceLen==0) {
+ // lzma version 4.999 requires at least one byte, so we'll do it ourselves.
+ assert(1<=*destLen);
+ *destLen = 1;
+ } else {
+ size_t out_pos = 1;
+ lzma_ret r = lzma_easy_buffer_encode(lzma_compression_level,
+ LZMA_CHECK_NONE, NULL,
+ source, sourceLen,
+ dest, &out_pos, *destLen);
+ assert(out_pos < *destLen);
+ if (r != LZMA_OK) {
+ fprintf(stderr, "lzma_easy_buffer_encode() returned %d\n", (int) r);
+ }
+ assert(r==LZMA_OK);
+ *destLen = out_pos;
+ }
+ dest[0] = TOKU_LZMA_METHOD + (lzma_compression_level << 4);
+ return;
+ }
+ case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD: {
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.next_in = const_cast<Bytef *>(source);
+ strm.avail_in = sourceLen;
+ int r = deflateInit2(&strm, zlib_compression_level, Z_DEFLATED,
+ zlib_without_checksum_windowbits, 8, Z_DEFAULT_STRATEGY);
+ lazy_assert(r == Z_OK);
+ strm.next_out = dest + 2;
+ strm.avail_out = *destLen - 2;
+ r = deflate(&strm, Z_FINISH);
+ lazy_assert(r == Z_STREAM_END);
+ r = deflateEnd(&strm);
+ lazy_assert(r == Z_OK);
+ *destLen = strm.total_out + 2;
+ dest[0] = TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD + (zlib_compression_level << 4);
+ dest[1] = zlib_without_checksum_windowbits;
+ return;
+ }
+ case TOKU_SNAPPY_METHOD: {
+ size_t tmp_dest= *destLen;
+ snappy::RawCompress((char*)source, sourceLen, (char*)dest + 1,
+ &tmp_dest);
+ *destLen= tmp_dest + 1;
+ dest[0] = TOKU_SNAPPY_METHOD;
+ return;
+ }
+ default:
+ break;
+ }
+ // default fall through to error.
+ assert(0);
+}
+
+void toku_decompress (Bytef *dest, uLongf destLen,
+ const Bytef *source, uLongf sourceLen)
+// See compress.h for the specification of this function.
+{
+ assert(sourceLen>=1); // need at least one byte for the RFC header.
+ switch (source[0] & 0xF) {
+ case TOKU_NO_COMPRESSION:
+ memcpy(dest, source + 1, sourceLen - 1);
+ return;
+ case TOKU_ZLIB_METHOD: {
+ uLongf actual_destlen = destLen;
+ int r = uncompress(dest, &actual_destlen, source, sourceLen);
+ assert(r == Z_OK);
+ assert(actual_destlen == destLen);
+ return;
+ }
+ case TOKU_QUICKLZ_METHOD:
+ if (sourceLen>1) {
+ toku::scoped_calloc state_buf(sizeof(qlz_state_decompress));
+ qlz_state_decompress *qsd = reinterpret_cast<qlz_state_decompress *>(state_buf.get());
+ uLongf actual_destlen = qlz_decompress((char*)source+1, dest, qsd);
+ assert(actual_destlen == destLen);
+ } else {
+ // length 1 means there is no data, so do nothing.
+ assert(destLen==0);
+ }
+ return;
+ case TOKU_LZMA_METHOD: {
+ if (sourceLen>1) {
+ uint64_t memlimit = UINT64_MAX;
+ size_t out_pos = 0;
+ size_t in_pos = 1;
+ lzma_ret r = lzma_stream_buffer_decode(&memlimit, // memlimit, use UINT64_MAX to disable this check
+ 0, // flags
+ NULL, // allocator
+ source, &in_pos, sourceLen,
+ dest, &out_pos, destLen);
+ assert(r==LZMA_OK);
+ assert(out_pos == destLen);
+ } else {
+ // length 1 means there is no data, so do nothing.
+ assert(destLen==0);
+ }
+ return;
+ }
+ case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD: {
+ z_stream strm;
+ strm.next_in = const_cast<Bytef *>(source + 2);
+ strm.avail_in = sourceLen - 2;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ int8_t windowBits = source[1];
+ int r = inflateInit2(&strm, windowBits);
+ lazy_assert(r == Z_OK);
+ strm.next_out = dest;
+ strm.avail_out = destLen;
+ r = inflate(&strm, Z_FINISH);
+ lazy_assert(r == Z_STREAM_END);
+ r = inflateEnd(&strm);
+ lazy_assert(r == Z_OK);
+ return;
+ }
+ case TOKU_SNAPPY_METHOD: {
+ bool r = snappy::RawUncompress((char*)source + 1, sourceLen - 1, (char*)dest);
+ assert(r);
+ return;
+ }
+ }
+ // default fall through to error.
+ assert(0);
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/compress.h b/storage/tokudb/PerconaFT/ft/serialize/compress.h
new file mode 100644
index 00000000..74307985
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/compress.h
@@ -0,0 +1,78 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <zlib.h>
+#include <db.h>
+
+// The following provides an abstraction of quicklz and zlib.
+// We offer three compression methods: ZLIB, QUICKLZ, and LZMA, as well as a "no compression" option. These options are declared in make_tdb.c.
+// The resulting byte string includes enough information for us to decompress it. That is, we can tell whether it's z-compressed or qz-compressed or xz-compressed.
+
+size_t toku_compress_bound (enum toku_compression_method a, size_t size);
+// Effect: Return the number of bytes needed to compress a buffer of size SIZE using compression method A.
+// Typically, the result is a little bit larger than SIZE, since some data cannot be compressed.
+// Usage note: It may help to know roughly how much space is involved.
+// zlib's bound is something like (size + (size>>12) + (size>>14) + (size>>25) + 13.
+// quicklz's bound is something like size+400.
+
+void toku_compress (enum toku_compression_method a,
+ // the following types and naming conventions come from zlib.h
+ Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen);
+// Effect: Using compression method A, compress SOURCE into DEST. The number of bytes to compress is passed in SOURCELEN.
+// On input: *destLen is the size of the buffer.
+// On output: *destLen is the size of the actual compressed data.
+// Usage note: sourceLen may be be zero (unlike for quicklz, which requires sourceLen>0).
+// Requires: The buffer must be big enough to hold the compressed data. (That is *destLen >= compressBound(a, sourceLen))
+// Requires: sourceLen < 2^32.
+// Usage note: Although we *try* to assert if the DESTLEN isn't big enough, it's possible that it's too late by then (in the case of quicklz which offers
+// no way to avoid a buffer overrun.) So we require that that DESTLEN is big enough.
+// Rationale: zlib's argument order is DEST then SOURCE with the size of the buffer passed in *destLen, and the size of the result returned in *destLen.
+// quicklz's argument order is SOURCE then DEST with the size returned (and it has no way to verify that an overright didn't happen).
+// We use zlib's calling conventions partly because it is safer, and partly because it is more established.
+// We also use zlib's ugly camel case convention for destLen and sourceLen.
+// Unlike zlib, we return no error codes. Instead, we require that the data be OK and the size of the buffers is OK, and assert if there's a problem.
+
+void toku_decompress (Bytef *dest, uLongf destLen,
+ const Bytef *source, uLongf sourceLen);
+// Effect: Decompress source (length sourceLen) into dest (length destLen)
+// This function can decompress data compressed with either zlib or quicklz compression methods (calling toku_compress(), which puts an appropriate header on so we know which it is.)
+// Requires: destLen is equal to the actual decompressed size of the data.
+// Requires: The source must have been properly compressed.
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft-node-deserialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft-node-deserialize.cc
new file mode 100644
index 00000000..de58fb42
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft-node-deserialize.cc
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/node.h"
+#include "ft/ft-internal.h"
+#include "ft/serialize/ft_node-serialize.h"
+
+/*
+ * ft-node-deserialize.c -
+ * This file contains functions used by deserializtion
+ * code paths in and out of the engine. The functions can,
+ * essentially, be broken up into two types. Some of these
+ * functions return error codes based expected values inside
+ * the fractal tree node, others merely read the specific
+ * quantities of bytes out of the buffer. It is expeceted
+ * that these will be called in the correct order by users
+ * of these functions/this API.
+ *
+ */
+
+// Sets initial values for the given fractal tree node to be
+// deserialized
+void
+initialize_ftnode(FTNODE node, BLOCKNUM blocknum)
+{
+ node->fullhash = 0xDEADBEEF; // <CER> Is this 'spoof' ok?
+ node->blocknum = blocknum;
+ node->clear_dirty();
+ node->bp = NULL;
+ // <CER> Can we use this initialization as a correctness assert in
+ // a later function?
+ node->layout_version_read_from_disk = 0;
+}
+
+/************************
+ * TODO: In other deserialization code, we check the rb size member. We
+ * verify that it is greater than or equal to 24. Ignoring this magic
+ * number for a moment, should we put this check in its own function? *
+*************************/
+
+
+// Read and check the 'magic' bytes on disk. Returns an error if
+// the magic does not match.
+int
+read_and_check_magic(struct rbuf *rb)
+{
+ int r = 0;
+ const void *magic;
+ rbuf_literal_bytes(rb, &magic, 8);
+ if (memcmp(magic, "tokuleaf", 8)!=0 &&
+ memcmp(magic, "tokunode", 8)!=0) {
+ r = DB_BADFORMAT; // TODO: Return more meaningful error.
+ }
+
+ return r;
+}
+
+// Read the version number from the given buffer
+// and returns an error if the version is too old.
+int
+read_and_check_version(FTNODE node, struct rbuf *rb)
+{
+ int r = 0;
+ int version = rbuf_int(rb);
+ node->layout_version_read_from_disk = version;
+ if (version < FT_LAYOUT_MIN_SUPPORTED_VERSION) {
+ r = 1; // TODO: Better error reporting.
+ }
+
+ return r;
+}
+
+// Reads the basic version, build, and child info from
+// the given buffer.
+void
+read_node_info(FTNODE node, struct rbuf *rb, int version)
+{
+ node->layout_version = version;
+ node->layout_version_original = rbuf_int(rb);
+ node->build_id = rbuf_int(rb);
+ node->n_children = rbuf_int(rb);
+}
+
+// Allocates the partitions based on the given node's nubmer
+// of children. It then reads, out of the given buffer,
+// the start and size of each child partition.
+// TODO: Should these be two seperate functions?
+void
+allocate_and_read_partition_offsets(FTNODE node, struct rbuf *rb, FTNODE_DISK_DATA *ndd)
+{
+ XMALLOC_N(node->n_children, node->bp);
+ // TODO: Fix this to use xmalloc_n
+ XMALLOC_N(node->n_children, *ndd);
+ // Read the partition locations.
+ for (int i = 0; i < node->n_children; i++) {
+ BP_START(*ndd, i) = rbuf_int(rb);
+ BP_SIZE (*ndd, i) = rbuf_int(rb);
+ }
+}
+
+// Compares checksum of stored (in the given buffer) checksum
+// and the checksum of the buffer itself. If these are NOT
+// equal, this function returns an appropriate error code.
+int
+check_node_info_checksum(struct rbuf *rb)
+{
+ int r = 0;
+ // Verify checksum of header stored.
+ uint32_t checksum = toku_x1764_memory(rb->buf, rb->ndone);
+ uint32_t stored_checksum = rbuf_int(rb);
+
+ if (stored_checksum != checksum) {
+ // TODO: dump_bad_block(rb->buf, rb->size);
+ r = TOKUDB_BAD_CHECKSUM;
+ }
+
+ return r;
+}
+
+// Reads node info from older (13 and 14) fractal tree nodes
+// out of the given buffer.
+void
+read_legacy_node_info(FTNODE node, struct rbuf *rb, int version)
+{
+ (void)rbuf_int(rb); // 1. nodesize
+ node->flags = rbuf_int(rb); // 2. flags
+ node->height = rbuf_int(rb); // 3. height
+
+ // If the version is less than 14, there are two extra ints here.
+ // we would need to ignore them if they are there.
+ if (version == FT_LAYOUT_VERSION_13) {
+ (void) rbuf_int(rb); // 4. rand4
+ (void) rbuf_int(rb); // 5. local
+ }
+}
+
+// Assuming the given buffer is in the correct position,
+// this checks to see if the stored checksum matches the
+// checksum of the entire buffer.
+int
+check_legacy_end_checksum(struct rbuf *rb)
+{
+ int r = 0;
+ uint32_t expected_xsum = rbuf_int(rb);
+ uint32_t actual_xsum = toku_x1764_memory(rb->buf, rb->size - 4);
+ if (expected_xsum != actual_xsum) {
+ r = TOKUDB_BAD_CHECKSUM;
+ }
+
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
new file mode 100644
index 00000000..0813855b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
@@ -0,0 +1,914 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/msg.h"
+#include "ft/serialize/block_allocator.h"
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/compress.h"
+#include "ft/serialize/ft-serialize.h"
+
+// not version-sensitive because we only serialize a descriptor using the current layout_version
+uint32_t
+toku_serialize_descriptor_size(DESCRIPTOR desc) {
+ //Checksum NOT included in this. Checksum only exists in header's version.
+ uint32_t size = 4; // four bytes for size of descriptor
+ size += desc->dbt.size;
+ return size;
+}
+
+static uint32_t
+deserialize_descriptor_size(DESCRIPTOR desc, int layout_version) {
+ //Checksum NOT included in this. Checksum only exists in header's version.
+ uint32_t size = 4; // four bytes for size of descriptor
+ if (layout_version == FT_LAYOUT_VERSION_13)
+ size += 4; // for version 13, include four bytes of "version"
+ size += desc->dbt.size;
+ return size;
+}
+
+void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc) {
+ wbuf_bytes(wb, desc->dbt.data, desc->dbt.size);
+}
+
+//Descriptor is written to disk during toku_ft_handle_open iff we have a new (or changed)
+//descriptor.
+//Descriptors are NOT written during the header checkpoint process.
+void
+toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset) {
+ // make the checksum
+ int64_t size = toku_serialize_descriptor_size(desc)+4; //4 for checksum
+ int64_t size_aligned = roundup_to_multiple(512, size);
+ struct wbuf w;
+ char *XMALLOC_N_ALIGNED(512, size_aligned, aligned_buf);
+ for (int64_t i=size; i<size_aligned; i++) aligned_buf[i] = 0;
+ wbuf_init(&w, aligned_buf, size);
+ toku_serialize_descriptor_contents_to_wbuf(&w, desc);
+ {
+ //Add checksum
+ uint32_t checksum = toku_x1764_finish(&w.checksum);
+ wbuf_int(&w, checksum);
+ }
+ lazy_assert(w.ndone==w.size);
+ {
+ //Actual Write translation table
+ toku_os_full_pwrite(fd, w.buf, size_aligned, offset);
+ }
+ toku_free(w.buf);
+}
+
+static void
+deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, int layout_version) {
+ if (layout_version <= FT_LAYOUT_VERSION_13) {
+ // in older versions of tokuft, the descriptor had a 4 byte
+ // version, which we skip over
+ (void) rbuf_int(rb);
+ }
+
+ uint32_t size;
+ const void *data;
+ rbuf_bytes(rb, &data, &size);
+ toku_memdup_dbt(&desc->dbt, data, size);
+}
+
+static int
+deserialize_descriptor_from(int fd, block_table *bt, DESCRIPTOR desc, int layout_version) {
+ int r = 0;
+ DISKOFF offset;
+ DISKOFF size;
+ unsigned char *dbuf = nullptr;
+ bt->get_descriptor_offset_size(&offset, &size);
+ memset(desc, 0, sizeof(*desc));
+ if (size > 0) {
+ lazy_assert(size>=4); //4 for checksum
+ {
+ ssize_t size_to_malloc = roundup_to_multiple(512, size);
+ XMALLOC_N_ALIGNED(512, size_to_malloc, dbuf);
+ {
+
+ ssize_t sz_read = toku_os_pread(fd, dbuf, size_to_malloc, offset);
+ lazy_assert(sz_read==size_to_malloc);
+ }
+ {
+ // check the checksum
+ uint32_t x1764 = toku_x1764_memory(dbuf, size-4);
+ //printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk);
+ uint32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4));
+ if (x1764 != stored_x1764) {
+ fprintf(stderr, "Descriptor checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764);
+ r = TOKUDB_BAD_CHECKSUM;
+ toku_free(dbuf);
+ goto exit;
+ }
+ }
+
+ struct rbuf rb = { .buf = dbuf, .size = (unsigned int) size, .ndone = 0 };
+ deserialize_descriptor_from_rbuf(&rb, desc, layout_version);
+ lazy_assert(deserialize_descriptor_size(desc, layout_version) + 4 == size);
+ toku_free(dbuf);
+ }
+ }
+exit:
+ return r;
+}
+
+int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
+// Effect: Deserialize the ft header.
+// We deserialize ft_header only once and then share everything with all the FTs.
+{
+ int r;
+ FT ft = NULL;
+ paranoid_invariant(version >= FT_LAYOUT_MIN_SUPPORTED_VERSION);
+ paranoid_invariant(version <= FT_LAYOUT_VERSION);
+ // We already know:
+ // we have an rbuf representing the header.
+ // The checksum has been validated
+
+ //Verification of initial elements.
+ //Check magic number
+ const void *magic;
+ rbuf_literal_bytes(rb, &magic, 8);
+ lazy_assert(memcmp(magic,"tokudata",8)==0);
+
+ XCALLOC(ft);
+ ft->checkpoint_header = NULL;
+ toku_list_init(&ft->live_ft_handles);
+
+ //version MUST be in network order on disk regardless of disk order
+ ft->layout_version_read_from_disk = rbuf_network_int(rb);
+ invariant(ft->layout_version_read_from_disk >= FT_LAYOUT_MIN_SUPPORTED_VERSION);
+ invariant(ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION);
+
+ //build_id MUST be in network order on disk regardless of disk order
+ uint32_t build_id;
+ build_id = rbuf_network_int(rb);
+
+ //Size MUST be in network order regardless of disk order.
+ uint32_t size;
+ size = rbuf_network_int(rb);
+ lazy_assert(size == rb->size);
+
+ const void *tmp_byte_order_check;
+ lazy_assert((sizeof tmp_byte_order_check) >= 8);
+ rbuf_literal_bytes(rb, &tmp_byte_order_check, 8); //Must not translate byte order
+ int64_t byte_order_stored;
+ byte_order_stored = *(int64_t*)tmp_byte_order_check;
+ lazy_assert(byte_order_stored == toku_byte_order_host);
+
+ uint64_t checkpoint_count;
+ checkpoint_count = rbuf_ulonglong(rb);
+ LSN checkpoint_lsn;
+ checkpoint_lsn = rbuf_LSN(rb);
+ unsigned nodesize;
+ nodesize = rbuf_int(rb);
+ DISKOFF translation_address_on_disk;
+ translation_address_on_disk = rbuf_DISKOFF(rb);
+ DISKOFF translation_size_on_disk;
+ translation_size_on_disk = rbuf_DISKOFF(rb);
+ lazy_assert(translation_address_on_disk > 0);
+ lazy_assert(translation_size_on_disk > 0);
+
+ // initialize the tree lock
+ toku_ft_init_reflock(ft);
+
+ //Load translation table
+ {
+ size_t size_to_read = roundup_to_multiple(512, translation_size_on_disk);
+ unsigned char *XMALLOC_N_ALIGNED(512, size_to_read, tbuf);
+ {
+ // This cast is messed up in 32-bits if the block translation
+ // table is ever more than 4GB. But in that case, the
+ // translation table itself won't fit in main memory.
+ ssize_t readsz = toku_os_pread(fd, tbuf, size_to_read,
+ translation_address_on_disk);
+ invariant(readsz >= translation_size_on_disk);
+ invariant(readsz <= (ssize_t)size_to_read);
+ }
+ // Create table and read in data.
+ r = ft->blocktable.create_from_buffer(fd,
+ translation_address_on_disk,
+ translation_size_on_disk,
+ tbuf);
+ toku_free(tbuf);
+ if (r != 0) {
+ goto exit;
+ }
+ }
+
+ BLOCKNUM root_blocknum;
+ root_blocknum = rbuf_blocknum(rb);
+ unsigned flags;
+ flags = rbuf_int(rb);
+ if (ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION_13) {
+ // deprecate 'TOKU_DB_VALCMP_BUILTIN'. just remove the flag
+ flags &= ~TOKU_DB_VALCMP_BUILTIN_13;
+ }
+ int layout_version_original;
+ layout_version_original = rbuf_int(rb);
+ uint32_t build_id_original;
+ build_id_original = rbuf_int(rb);
+ uint64_t time_of_creation;
+ time_of_creation = rbuf_ulonglong(rb);
+ uint64_t time_of_last_modification;
+ time_of_last_modification = rbuf_ulonglong(rb);
+
+ if (ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION_18) {
+ // 17 was the last version with these fields, we no longer store
+ // them, so read and discard them
+ (void) rbuf_ulonglong(rb); // num_blocks_to_upgrade_13
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_15) {
+ (void) rbuf_ulonglong(rb); // num_blocks_to_upgrade_14
+ }
+ }
+
+ // fake creation during the last checkpoint
+ TXNID root_xid_that_created;
+ root_xid_that_created = checkpoint_lsn.lsn;
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_14) {
+ rbuf_TXNID(rb, &root_xid_that_created);
+ }
+
+ // TODO(leif): get this to default to what's specified, not the
+ // hard-coded default
+ unsigned basementnodesize;
+ basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE;
+ uint64_t time_of_last_verification;
+ time_of_last_verification = 0;
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_15) {
+ basementnodesize = rbuf_int(rb);
+ time_of_last_verification = rbuf_ulonglong(rb);
+ }
+
+ STAT64INFO_S on_disk_stats;
+ on_disk_stats = ZEROSTATS;
+ uint64_t time_of_last_optimize_begin;
+ time_of_last_optimize_begin = 0;
+ uint64_t time_of_last_optimize_end;
+ time_of_last_optimize_end = 0;
+ uint32_t count_of_optimize_in_progress;
+ count_of_optimize_in_progress = 0;
+ MSN msn_at_start_of_last_completed_optimize;
+ msn_at_start_of_last_completed_optimize = ZERO_MSN;
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_18) {
+ on_disk_stats.numrows = rbuf_ulonglong(rb);
+ on_disk_stats.numbytes = rbuf_ulonglong(rb);
+ ft->in_memory_stats = on_disk_stats;
+ time_of_last_optimize_begin = rbuf_ulonglong(rb);
+ time_of_last_optimize_end = rbuf_ulonglong(rb);
+ count_of_optimize_in_progress = rbuf_int(rb);
+ msn_at_start_of_last_completed_optimize = rbuf_MSN(rb);
+ }
+
+ enum toku_compression_method compression_method;
+ MSN highest_unused_msn_for_upgrade;
+ highest_unused_msn_for_upgrade.msn = (MIN_MSN.msn - 1);
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_19) {
+ unsigned char method = rbuf_char(rb);
+ compression_method = (enum toku_compression_method) method;
+ highest_unused_msn_for_upgrade = rbuf_MSN(rb);
+ } else {
+ // we hard coded zlib until 5.2, then quicklz in 5.2
+ if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_18) {
+ compression_method = TOKU_ZLIB_METHOD;
+ } else {
+ compression_method = TOKU_QUICKLZ_METHOD;
+ }
+ }
+
+ MSN max_msn_in_ft;
+ max_msn_in_ft = ZERO_MSN; // We'll upgrade it from the root node later if necessary
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_21) {
+ max_msn_in_ft = rbuf_MSN(rb);
+ }
+
+ unsigned fanout;
+ fanout = FT_DEFAULT_FANOUT;
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_28) {
+ fanout = rbuf_int(rb);
+ }
+
+ uint64_t on_disk_logical_rows;
+ on_disk_logical_rows = (uint64_t)-1;
+ if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_29) {
+ on_disk_logical_rows = rbuf_ulonglong(rb);
+ }
+ ft->in_memory_logical_rows = on_disk_logical_rows;
+
+ (void) rbuf_int(rb); //Read in checksum and ignore (already verified).
+ if (rb->ndone != rb->size) {
+ fprintf(stderr, "Header size did not match contents.\n");
+ r = EINVAL;
+ goto exit;
+ }
+
+ {
+ struct ft_header h = {
+ .type = FT_CURRENT,
+ .dirty_ = 0,
+ .checkpoint_count = checkpoint_count,
+ .checkpoint_lsn = checkpoint_lsn,
+ .layout_version = FT_LAYOUT_VERSION,
+ .layout_version_original = layout_version_original,
+ .build_id = build_id,
+ .build_id_original = build_id_original,
+ .time_of_creation = time_of_creation,
+ .root_xid_that_created = root_xid_that_created,
+ .time_of_last_modification = time_of_last_modification,
+ .time_of_last_verification = time_of_last_verification,
+ .root_blocknum = root_blocknum,
+ .flags = flags,
+ .nodesize = nodesize,
+ .basementnodesize = basementnodesize,
+ .compression_method = compression_method,
+ .fanout = fanout,
+ .highest_unused_msn_for_upgrade = highest_unused_msn_for_upgrade,
+ .max_msn_in_ft = max_msn_in_ft,
+ .time_of_last_optimize_begin = time_of_last_optimize_begin,
+ .time_of_last_optimize_end = time_of_last_optimize_end,
+ .count_of_optimize_in_progress = count_of_optimize_in_progress,
+ .count_of_optimize_in_progress_read_from_disk = count_of_optimize_in_progress,
+ .msn_at_start_of_last_completed_optimize = msn_at_start_of_last_completed_optimize,
+ .on_disk_stats = on_disk_stats,
+ .on_disk_logical_rows = on_disk_logical_rows
+ };
+ XMEMDUP(ft->h, &h);
+ }
+
+ if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_18) {
+ // This needs ft->h to be non-null, so we have to do it after we
+ // read everything else.
+ r = toku_upgrade_subtree_estimates_to_stat64info(fd, ft);
+ if (r != 0) {
+ goto exit;
+ }
+ }
+ if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_21) {
+ r = toku_upgrade_msn_from_root_to_header(fd, ft);
+ if (r != 0) {
+ goto exit;
+ }
+ }
+
+ invariant((uint32_t) ft->layout_version_read_from_disk == version);
+ r = deserialize_descriptor_from(fd, &ft->blocktable, &ft->descriptor, version);
+ if (r != 0) {
+ goto exit;
+ }
+
+ // initialize for svn #4541
+ toku_clone_dbt(&ft->cmp_descriptor.dbt, ft->descriptor.dbt);
+
+ // Version 13 descriptors had an extra 4 bytes that we don't read
+ // anymore. Since the header is going to think it's the current
+ // version if it gets written out, we need to write the descriptor in
+ // the new format (without those bytes) before that happens.
+ if (version <= FT_LAYOUT_VERSION_13) {
+ toku_ft_update_descriptor_with_fd(ft, &ft->cmp_descriptor, fd);
+ }
+ r = 0;
+exit:
+ if (r != 0 && ft != NULL) {
+ toku_free(ft);
+ ft = NULL;
+ }
+ *ftp = ft;
+ return r;
+}
+
+static size_t serialize_ft_min_size(uint32_t version) {
+ size_t size = 0;
+
+ switch (version) {
+ case FT_LAYOUT_VERSION_29:
+ size += sizeof(uint64_t); // logrows in ft
+ // fallthrough
+ case FT_LAYOUT_VERSION_28:
+ size += sizeof(uint32_t); // fanout in ft
+ // fallthrough
+ case FT_LAYOUT_VERSION_27:
+ case FT_LAYOUT_VERSION_26:
+ case FT_LAYOUT_VERSION_25:
+ case FT_LAYOUT_VERSION_24:
+ case FT_LAYOUT_VERSION_23:
+ case FT_LAYOUT_VERSION_22:
+ case FT_LAYOUT_VERSION_21:
+ size += sizeof(MSN); // max_msn_in_ft
+ // fallthrough
+ case FT_LAYOUT_VERSION_20:
+ case FT_LAYOUT_VERSION_19:
+ size += 1; // compression method
+ size += sizeof(MSN); // highest_unused_msn_for_upgrade
+ // fallthrough
+ case FT_LAYOUT_VERSION_18:
+ size += sizeof(uint64_t); // time_of_last_optimize_begin
+ size += sizeof(uint64_t); // time_of_last_optimize_end
+ size += sizeof(uint32_t); // count_of_optimize_in_progress
+ size += sizeof(MSN); // msn_at_start_of_last_completed_optimize
+ size -= 8; // removed num_blocks_to_upgrade_14
+ size -= 8; // removed num_blocks_to_upgrade_13
+ // fallthrough
+ case FT_LAYOUT_VERSION_17:
+ size += 16;
+ invariant(sizeof(STAT64INFO_S) == 16);
+ // fallthrough
+ case FT_LAYOUT_VERSION_16:
+ case FT_LAYOUT_VERSION_15:
+ size += 4; // basement node size
+ size += 8; // num_blocks_to_upgrade_14 (previously
+ // num_blocks_to_upgrade, now one int each for upgrade
+ // from 13, 14
+ size += 8; // time of last verification
+ // fallthrough
+ case FT_LAYOUT_VERSION_14:
+ size += 8; // TXNID that created
+ // fallthrough
+ case FT_LAYOUT_VERSION_13:
+ size += (4 // build_id
+ +
+ 4 // build_id_original
+ +
+ 8 // time_of_creation
+ +
+ 8 // time_of_last_modification
+ );
+ // fallthrough
+ case FT_LAYOUT_VERSION_12:
+ size += (+8 // "tokudata"
+ +
+ 4 // version
+ +
+ 4 // original_version
+ +
+ 4 // size
+ +
+ 8 // byte order verification
+ +
+ 8 // checkpoint_count
+ +
+ 8 // checkpoint_lsn
+ +
+ 4 // tree's nodesize
+ +
+ 8 // translation_size_on_disk
+ +
+ 8 // translation_address_on_disk
+ +
+ 4 // checksum
+ +
+ 8 // Number of blocks in old version.
+ +
+ 8 // diskoff
+ +
+ 4 // flags
+ );
+ break;
+ default:
+ abort();
+ }
+
+ lazy_assert(size <= BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE);
+ return size;
+}
+
+int deserialize_ft_from_fd_into_rbuf(int fd,
+ toku_off_t offset_of_header,
+ struct rbuf *rb,
+ uint64_t *checkpoint_count,
+ LSN *checkpoint_lsn,
+ uint32_t *version_p)
+// Effect: Read and parse the header of a fractalal tree
+//
+// Simply reading the raw bytes of the header into an rbuf is insensitive
+// to disk format version. If that ever changes, then modify this.
+//
+// TOKUDB_DICTIONARY_NO_HEADER means we can overwrite everything in the
+// file AND the header is useless
+{
+ int r = 0;
+ const int64_t prefix_size = 8 + // magic ("tokudata")
+ 4 + // version
+ 4 + // build_id
+ 4; // size
+ const int64_t read_size = roundup_to_multiple(512, prefix_size);
+ unsigned char *XMALLOC_N_ALIGNED(512, read_size, prefix);
+ rb->buf = NULL;
+ int64_t n = toku_os_pread(fd, prefix, read_size, offset_of_header);
+ if (n != read_size) {
+ if (n == 0) {
+ r = TOKUDB_DICTIONARY_NO_HEADER;
+ } else if (n < 0) {
+ r = get_error_errno();
+ } else {
+ r = EINVAL;
+ }
+ toku_free(prefix);
+ goto exit;
+ }
+
+ rbuf_init(rb, prefix, prefix_size);
+
+ // Check magic number
+ const void *magic;
+ rbuf_literal_bytes(rb, &magic, 8);
+ if (memcmp(magic, "tokudata", 8) != 0) {
+ if ((*(uint64_t *)magic) == 0) {
+ r = TOKUDB_DICTIONARY_NO_HEADER;
+ } else {
+ r = EINVAL; // Not a tokudb file! Do not use.
+ }
+ goto exit;
+ }
+
+ // Version MUST be in network order regardless of disk order.
+ uint32_t version;
+ version = rbuf_network_int(rb);
+ *version_p = version;
+ if (version < FT_LAYOUT_MIN_SUPPORTED_VERSION) {
+ r = TOKUDB_DICTIONARY_TOO_OLD; // Cannot use
+ goto exit;
+ } else if (version > FT_LAYOUT_VERSION) {
+ r = TOKUDB_DICTIONARY_TOO_NEW; // Cannot use
+ goto exit;
+ }
+
+ // build_id MUST be in network order regardless of disk order.
+ uint32_t build_id __attribute__((__unused__));
+ build_id = rbuf_network_int(rb);
+ int64_t min_header_size;
+ min_header_size = serialize_ft_min_size(version);
+
+ // Size MUST be in network order regardless of disk order.
+ uint32_t size;
+ size = rbuf_network_int(rb);
+ // If too big, it is corrupt. We would probably notice during checksum
+ // but may have to do a multi-gigabyte malloc+read to find out.
+ // If its too small reading rbuf would crash, so verify.
+ if (size > BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE ||
+ size < min_header_size) {
+ r = TOKUDB_DICTIONARY_NO_HEADER;
+ goto exit;
+ }
+
+ lazy_assert(rb->ndone == prefix_size);
+ rb->size = size;
+ {
+ toku_free(rb->buf);
+ uint32_t size_to_read = roundup_to_multiple(512, size);
+ XMALLOC_N_ALIGNED(512, size_to_read, rb->buf);
+
+ invariant(offset_of_header % 512 == 0);
+ n = toku_os_pread(fd, rb->buf, size_to_read, offset_of_header);
+ if (n != size_to_read) {
+ if (n < 0) {
+ r = get_error_errno();
+ } else {
+ r = EINVAL; // Header might be useless (wrong size) or could be
+ // a disk read error.
+ }
+ goto exit;
+ }
+ }
+ // It's version 14 or later. Magic looks OK.
+ // We have an rbuf that represents the header.
+ // Size is within acceptable bounds.
+
+ // Verify checksum (FT_LAYOUT_VERSION_13 or later, when checksum function
+ // changed)
+ uint32_t calculated_x1764;
+ calculated_x1764 = toku_x1764_memory(rb->buf, rb->size - 4);
+ uint32_t stored_x1764;
+ stored_x1764 = toku_dtoh32(*(int *)(rb->buf + rb->size - 4));
+ if (calculated_x1764 != stored_x1764) {
+ r = TOKUDB_BAD_CHECKSUM; // Header useless
+ fprintf(stderr,
+ "Header checksum failure: calc=0x%08x read=0x%08x\n",
+ calculated_x1764,
+ stored_x1764);
+ goto exit;
+ }
+
+ // Verify byte order
+ const void *tmp_byte_order_check;
+ lazy_assert((sizeof toku_byte_order_host) == 8);
+ rbuf_literal_bytes(
+ rb, &tmp_byte_order_check, 8); // Must not translate byte order
+ int64_t byte_order_stored;
+ byte_order_stored = *(int64_t *)tmp_byte_order_check;
+ if (byte_order_stored != toku_byte_order_host) {
+ r = TOKUDB_DICTIONARY_NO_HEADER; // Cannot use dictionary
+ goto exit;
+ }
+
+ // Load checkpoint count
+ *checkpoint_count = rbuf_ulonglong(rb);
+ *checkpoint_lsn = rbuf_LSN(rb);
+ // Restart at beginning during regular deserialization
+ rb->ndone = 0;
+
+exit:
+ if (r != 0 && rb->buf != NULL) {
+ toku_free(rb->buf);
+ rb->buf = NULL;
+ }
+ return r;
+}
+
+// Read ft from file into struct. Read both headers and use one.
+// We want the latest acceptable header whose checkpoint_lsn is no later
+// than max_acceptable_lsn.
+#define dump_state_of_toku_deserialize_ft_from() \
+ fprintf(stderr, \
+ "%s:%d toku_deserialize_ft_from: " \
+ "filename[%s] " \
+ "r[%d] max_acceptable_lsn[%llu]" \
+ "r0[%d] checkpoint_lsn_0[%llu] checkpoint_count_0[%llu] " \
+ "r1[%d] checkpoint_lsn_1[%llu] checkpoint_count_1[%llu]\n", \
+ __FILE__, \
+ __LINE__, \
+ fn, \
+ r, \
+ (ulonglong)max_acceptable_lsn.lsn, \
+ r0, \
+ (ulonglong)checkpoint_lsn_0.lsn, \
+ (ulonglong)checkpoint_count_0, \
+ r1, \
+ (ulonglong)checkpoint_lsn_1.lsn, \
+ (ulonglong)checkpoint_count_1);
+
+int toku_deserialize_ft_from(int fd,
+ const char *fn,
+ LSN max_acceptable_lsn,
+ FT *ft) {
+ struct rbuf rb_0;
+ struct rbuf rb_1;
+ uint64_t checkpoint_count_0 = 0;
+ uint64_t checkpoint_count_1 = 0;
+ LSN checkpoint_lsn_0;
+ LSN checkpoint_lsn_1;
+ uint32_t version_0 = 0, version_1 = 0, version = 0;
+ bool h0_acceptable = false;
+ bool h1_acceptable = false;
+ struct rbuf *rb = NULL;
+ int r0, r1, r = 0;
+
+ toku_off_t header_0_off = 0;
+ r0 = deserialize_ft_from_fd_into_rbuf(fd,
+ header_0_off,
+ &rb_0,
+ &checkpoint_count_0,
+ &checkpoint_lsn_0,
+ &version_0);
+ if (r0 == 0 && checkpoint_lsn_0.lsn <= max_acceptable_lsn.lsn) {
+ h0_acceptable = true;
+ }
+
+ toku_off_t header_1_off = BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
+ r1 = deserialize_ft_from_fd_into_rbuf(fd,
+ header_1_off,
+ &rb_1,
+ &checkpoint_count_1,
+ &checkpoint_lsn_1,
+ &version_1);
+ if (r1 == 0 && checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) {
+ h1_acceptable = true;
+ }
+
+ // if either header is too new, the dictionary is unreadable
+ if (r0 == TOKUDB_DICTIONARY_TOO_NEW || r1 == TOKUDB_DICTIONARY_TOO_NEW ||
+ !(h0_acceptable || h1_acceptable)) {
+ // We were unable to read either header or at least one is too
+ // new. Certain errors are higher priority than others. Order of
+ // these if/else if is important.
+ if (r0 == TOKUDB_DICTIONARY_TOO_NEW ||
+ r1 == TOKUDB_DICTIONARY_TOO_NEW) {
+ r = TOKUDB_DICTIONARY_TOO_NEW;
+ } else if (r0 == TOKUDB_DICTIONARY_TOO_OLD ||
+ r1 == TOKUDB_DICTIONARY_TOO_OLD) {
+ r = TOKUDB_DICTIONARY_TOO_OLD;
+ } else if (r0 == TOKUDB_BAD_CHECKSUM && r1 == TOKUDB_BAD_CHECKSUM) {
+ fprintf(stderr, "Both header checksums failed.\n");
+ r = TOKUDB_BAD_CHECKSUM;
+ } else if (r0 == TOKUDB_DICTIONARY_NO_HEADER ||
+ r1 == TOKUDB_DICTIONARY_NO_HEADER) {
+ r = TOKUDB_DICTIONARY_NO_HEADER;
+ } else {
+ r = r0 ? r0 : r1; // Arbitrarily report the error from the
+ // first header, unless it's readable
+ }
+
+ if (r != TOKUDB_DICTIONARY_NO_HEADER) {
+ dump_state_of_toku_deserialize_ft_from();
+ }
+
+ // it should not be possible for both headers to be later than the
+ // max_acceptable_lsn
+ invariant(
+ !((r0 == 0 && checkpoint_lsn_0.lsn > max_acceptable_lsn.lsn) &&
+ (r1 == 0 && checkpoint_lsn_1.lsn > max_acceptable_lsn.lsn)));
+ invariant(r != 0);
+ goto exit;
+ }
+
+ if (h0_acceptable && h1_acceptable) {
+ if (checkpoint_count_0 > checkpoint_count_1) {
+ if (!(checkpoint_count_0 == checkpoint_count_1 + 1) ||
+ !(version_0 >= version_1)) {
+ dump_state_of_toku_deserialize_ft_from();
+ }
+ invariant(checkpoint_count_0 == checkpoint_count_1 + 1);
+ invariant(version_0 >= version_1);
+ rb = &rb_0;
+ version = version_0;
+ } else {
+ if (!(checkpoint_count_1 == checkpoint_count_0 + 1) ||
+ !(version_1 >= version_0)) {
+ dump_state_of_toku_deserialize_ft_from();
+ }
+ invariant(checkpoint_count_1 == checkpoint_count_0 + 1);
+ invariant(version_1 >= version_0);
+ rb = &rb_1;
+ version = version_1;
+ }
+ } else if (h0_acceptable) {
+ if (r1 == TOKUDB_BAD_CHECKSUM) {
+ // print something reassuring
+ fprintf(
+ stderr,
+ "Header 2 checksum failed, but header 1 ok. Proceeding.\n");
+ dump_state_of_toku_deserialize_ft_from();
+ }
+ rb = &rb_0;
+ version = version_0;
+ } else if (h1_acceptable) {
+ if (r0 == TOKUDB_BAD_CHECKSUM) {
+ // print something reassuring
+ fprintf(
+ stderr,
+ "Header 1 checksum failed, but header 2 ok. Proceeding.\n");
+ dump_state_of_toku_deserialize_ft_from();
+ }
+ rb = &rb_1;
+ version = version_1;
+ }
+
+ if (!rb) {
+ dump_state_of_toku_deserialize_ft_from();
+ }
+ paranoid_invariant(rb);
+ r = deserialize_ft_versioned(fd, rb, ft, version);
+
+exit:
+ if (rb_0.buf) {
+ toku_free(rb_0.buf);
+ }
+ if (rb_1.buf) {
+ toku_free(rb_1.buf);
+ }
+ return r;
+}
+
+size_t toku_serialize_ft_size(FT_HEADER h) {
+ size_t size = serialize_ft_min_size(h->layout_version);
+ // There is no dynamic data.
+ lazy_assert(size <= BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE);
+ return size;
+}
+
+void toku_serialize_ft_to_wbuf (
+ struct wbuf *wbuf,
+ FT_HEADER h,
+ DISKOFF translation_location_on_disk,
+ DISKOFF translation_size_on_disk
+ )
+{
+ wbuf_literal_bytes(wbuf, "tokudata", 8);
+ wbuf_network_int (wbuf, h->layout_version); //MUST be in network order regardless of disk order
+ wbuf_network_int (wbuf, BUILD_ID); //MUST be in network order regardless of disk order
+ wbuf_network_int (wbuf, wbuf->size); //MUST be in network order regardless of disk order
+ wbuf_literal_bytes(wbuf, &toku_byte_order_host, 8); //Must not translate byte order
+ wbuf_ulonglong(wbuf, h->checkpoint_count);
+ wbuf_LSN (wbuf, h->checkpoint_lsn);
+ wbuf_int (wbuf, h->nodesize);
+
+ wbuf_DISKOFF(wbuf, translation_location_on_disk);
+ wbuf_DISKOFF(wbuf, translation_size_on_disk);
+ wbuf_BLOCKNUM(wbuf, h->root_blocknum);
+ wbuf_int(wbuf, h->flags);
+ wbuf_int(wbuf, h->layout_version_original);
+ wbuf_int(wbuf, h->build_id_original);
+ wbuf_ulonglong(wbuf, h->time_of_creation);
+ wbuf_ulonglong(wbuf, h->time_of_last_modification);
+ wbuf_TXNID(wbuf, h->root_xid_that_created);
+ wbuf_int(wbuf, h->basementnodesize);
+ wbuf_ulonglong(wbuf, h->time_of_last_verification);
+ wbuf_ulonglong(wbuf, h->on_disk_stats.numrows);
+ wbuf_ulonglong(wbuf, h->on_disk_stats.numbytes);
+ wbuf_ulonglong(wbuf, h->time_of_last_optimize_begin);
+ wbuf_ulonglong(wbuf, h->time_of_last_optimize_end);
+ wbuf_int(wbuf, h->count_of_optimize_in_progress);
+ wbuf_MSN(wbuf, h->msn_at_start_of_last_completed_optimize);
+ wbuf_char(wbuf, (unsigned char) h->compression_method);
+ wbuf_MSN(wbuf, h->highest_unused_msn_for_upgrade);
+ wbuf_MSN(wbuf, h->max_msn_in_ft);
+ wbuf_int(wbuf, h->fanout);
+ wbuf_ulonglong(wbuf, h->on_disk_logical_rows);
+ uint32_t checksum = toku_x1764_finish(&wbuf->checksum);
+ wbuf_int(wbuf, checksum);
+ lazy_assert(wbuf->ndone == wbuf->size);
+}
+
+void toku_serialize_ft_to(int fd, FT_HEADER h, block_table *bt, CACHEFILE cf) {
+ lazy_assert(h->type == FT_CHECKPOINT_INPROGRESS);
+ struct wbuf w_translation;
+ int64_t size_translation;
+ int64_t address_translation;
+
+ // Must serialize translation first, to get address,size for header.
+ bt->serialize_translation_to_wbuf(
+ fd, &w_translation, &address_translation, &size_translation);
+ invariant(size_translation == w_translation.ndone);
+
+ // the number of bytes available in the buffer is 0 mod 512, and those last
+ // bytes are all initialized.
+ invariant(w_translation.size % 512 == 0);
+
+ struct wbuf w_main;
+ size_t size_main = toku_serialize_ft_size(h);
+ size_t size_main_aligned = roundup_to_multiple(512, size_main);
+ invariant(size_main_aligned <
+ BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE);
+ char *XMALLOC_N_ALIGNED(512, size_main_aligned, mainbuf);
+ for (size_t i = size_main; i < size_main_aligned; i++)
+ mainbuf[i] = 0; // initialize the end of the buffer with zeros
+ wbuf_init(&w_main, mainbuf, size_main);
+ toku_serialize_ft_to_wbuf(
+ &w_main, h, address_translation, size_translation);
+ lazy_assert(w_main.ndone == size_main);
+
+ // Actually write translation table
+ // This write is guaranteed to read good data at the end of the buffer,
+ // since the
+ // w_translation.buf is padded with zeros to a 512-byte boundary.
+ toku_os_full_pwrite(fd,
+ w_translation.buf,
+ roundup_to_multiple(512, size_translation),
+ address_translation);
+
+ // Everything but the header MUST be on disk before header starts.
+ // Otherwise we will think the header is good and some blocks might not
+ // yet be on disk.
+ // If the header has a cachefile we need to do cachefile fsync (to
+ // prevent crash if we redirected to dev null)
+ // If there is no cachefile we still need to do an fsync.
+ if (cf) {
+ toku_cachefile_fsync(cf);
+ } else {
+ toku_file_fsync(fd);
+ }
+
+ // Alternate writing header to two locations:
+ // Beginning (0) or BLOCK_ALLOCATOR_HEADER_RESERVE
+ toku_off_t main_offset;
+ main_offset = (h->checkpoint_count & 0x1)
+ ? 0
+ : BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
+ toku_os_full_pwrite(fd, w_main.buf, size_main_aligned, main_offset);
+ toku_free(w_main.buf);
+ toku_free(w_translation.buf);
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.h b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.h
new file mode 100644
index 00000000..144e1885
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.h
@@ -0,0 +1,73 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/ft.h"
+#include "ft/serialize/block_table.h"
+
+size_t toku_serialize_ft_size(struct ft_header *h);
+void toku_serialize_ft_to(int fd,
+ struct ft_header *h,
+ block_table *bt,
+ CACHEFILE cf);
+void toku_serialize_ft_to_wbuf(struct wbuf *wbuf,
+ struct ft_header *h,
+ DISKOFF translation_location_on_disk,
+ DISKOFF translation_size_on_disk);
+void toku_serialize_descriptor_contents_to_fd(int fd,
+ DESCRIPTOR desc,
+ DISKOFF offset);
+void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb,
+ DESCRIPTOR desc);
+int toku_deserialize_ft_from(int fd,
+ const char *fn,
+ LSN max_acceptable_lsn,
+ FT *ft);
+
+// TODO rename
+int deserialize_ft_from_fd_into_rbuf(int fd,
+ toku_off_t offset_of_header,
+ struct rbuf *rb,
+ uint64_t *checkpoint_count,
+ LSN *checkpoint_lsn,
+ uint32_t *version_p);
+
+// used by verify
+// TODO rename
+int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h b/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h
new file mode 100644
index 00000000..9407a568
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_layout_version.h
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+//Must be defined before other recursive headers could include logger/recover.h
+enum ft_layout_version_e {
+ FT_LAYOUT_VERSION_5 = 5,
+ FT_LAYOUT_VERSION_6 = 6, // Diff from 5 to 6: Add leafentry_estimate
+ FT_LAYOUT_VERSION_7 = 7, // Diff from 6 to 7: Add exact-bit to leafentry_estimate #818, add magic to header #22, add per-subdatase flags #333
+ FT_LAYOUT_VERSION_8 = 8, // Diff from 7 to 8: Use murmur instead of crc32. We are going to make a simplification and stop supporting version 7 and before. Current As of Beta 1.0.6
+ FT_LAYOUT_VERSION_9 = 9, // Diff from 8 to 9: Variable-sized blocks and compression.
+ FT_LAYOUT_VERSION_10 = 10, // Diff from 9 to 10: Variable number of compressed sub-blocks per block, disk byte order == intel byte order, Subtree estimates instead of just leafentry estimates, translation table, dictionary descriptors, checksum in header, subdb support removed from ft layer
+ FT_LAYOUT_VERSION_11 = 11, // Diff from 10 to 11: Nested transaction leafentries (completely redesigned). FT_CMDs on disk now support XIDS (multiple txnids) instead of exactly one.
+ FT_LAYOUT_VERSION_12 = 12, // Diff from 11 to 12: Added FT_CMD 'FT_INSERT_NO_OVERWRITE', compressed block format, num old blocks
+ FT_LAYOUT_VERSION_13 = 13, // Diff from 12 to 13: Fixed loader pivot bug, added build_id to every node, timestamps to ft
+ FT_LAYOUT_VERSION_14 = 14, // Diff from 13 to 14: Added MVCC; deprecated TOKU_DB_VALCMP_BUILTIN(_13); Remove fingerprints; Support QUICKLZ; add end-to-end checksum on uncompressed data.
+ FT_LAYOUT_VERSION_15 = 15, // Diff from 14 to 15: basement nodes, last verification time
+ FT_LAYOUT_VERSION_16 = 16, // Dr. No: No subtree estimates, partition layout information represented more transparently.
+ // ALERT ALERT ALERT: version 16 never released to customers, internal and beta use only
+ FT_LAYOUT_VERSION_17 = 17, // Dr. No: Add STAT64INFO_S to ft header
+ FT_LAYOUT_VERSION_18 = 18, // Dr. No: Add HOT info to ft header
+ FT_LAYOUT_VERSION_19 = 19, // Doofenshmirtz: Add compression method, highest_unused_msn_for_upgrade
+ FT_LAYOUT_VERSION_20 = 20, // Deadshot: Add compression method to log_fcreate,
+ // mgr_last_xid after begin checkpoint,
+ // last_xid to shutdown
+ FT_LAYOUT_VERSION_21 = 21, // Ming: Add max_msn_in_ft to header,
+ // Removed log suppression logentry
+ FT_LAYOUT_VERSION_22 = 22, // Ming: Add oldest known referenced xid to each ftnode, for better garbage collection
+ FT_LAYOUT_VERSION_23 = 23, // Ming: Fix upgrade path #5902
+ FT_LAYOUT_VERSION_24 = 24, // Riddler: change logentries that log transactions to store TXNID_PAIRs instead of TXNIDs
+ FT_LAYOUT_VERSION_25 = 25, // SecretSquirrel: ROLLBACK_LOG_NODES (on disk and in memory) now just use blocknum (instead of blocknum + hash) to point to other log nodes. same for xstillopen log entry
+ FT_LAYOUT_VERSION_26 = 26, // Hojo: basements store key/vals separately on disk for fixed klpair length BNs
+ FT_LAYOUT_VERSION_27 = 27, // serialize message trees with nonleaf buffers to avoid key, msn sort on deserialize
+ FT_LAYOUT_VERSION_28 = 28, // Add fanout to ft_header
+ FT_LAYOUT_VERSION_29 = 29, // Add logrows to ft_header
+ FT_NEXT_VERSION, // the version after the current version
+ FT_LAYOUT_VERSION = FT_NEXT_VERSION-1, // A hack so I don't have to change this line.
+ FT_LAYOUT_MIN_SUPPORTED_VERSION = FT_LAYOUT_VERSION_13, // Minimum version supported
+
+ // Define these symbolically so the knowledge of exactly which layout version got rid of fingerprints isn't spread all over the code.
+ FT_LAST_LAYOUT_VERSION_WITH_FINGERPRINT = FT_LAYOUT_VERSION_13,
+ FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM = FT_LAYOUT_VERSION_14,
+ FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES = FT_LAYOUT_VERSION_15,
+};
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
new file mode 100644
index 00000000..e6648b76
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
@@ -0,0 +1,3259 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "portability/toku_atomic.h"
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/node.h"
+#include "ft/logger/log-internal.h"
+#include "ft/txn/rollback.h"
+#include "ft/serialize/block_allocator.h"
+#include "ft/serialize/block_table.h"
+#include "ft/serialize/compress.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/serialize/sub_block.h"
+#include "util/sort.h"
+#include "util/threadpool.h"
+#include "util/status.h"
+#include "util/scoped_malloc.h"
+
+static FT_UPGRADE_STATUS_S ft_upgrade_status;
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ft_upgrade_status, k, c, t, "ft upgrade: " l, inc)
+
+static void
+status_init(void)
+{
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+ STATUS_INIT(FT_UPGRADE_FOOTPRINT, nullptr, UINT64, "footprint", TOKU_ENGINE_STATUS);
+ ft_upgrade_status.initialized = true;
+}
+#undef STATUS_INIT
+
+#define UPGRADE_STATUS_VALUE(x) ft_upgrade_status.status[x].value.num
+
+void
+toku_ft_upgrade_get_status(FT_UPGRADE_STATUS s) {
+ if (!ft_upgrade_status.initialized) {
+ status_init();
+ }
+ UPGRADE_STATUS_VALUE(FT_UPGRADE_FOOTPRINT) = toku_log_upgrade_get_footprint();
+ *s = ft_upgrade_status;
+}
+
+static int num_cores = 0; // cache the number of cores for the parallelization
+static struct toku_thread_pool *ft_pool = NULL;
+bool toku_serialize_in_parallel;
+
+int get_num_cores(void) {
+ return num_cores;
+}
+
+struct toku_thread_pool *get_ft_pool(void) {
+ return ft_pool;
+}
+
+void toku_serialize_set_parallel(bool in_parallel) {
+ toku_unsafe_set(&toku_serialize_in_parallel, in_parallel);
+}
+
+void toku_ft_serialize_layer_init(void) {
+ num_cores = toku_os_get_number_active_processors();
+ int r = toku_thread_pool_create(&ft_pool, num_cores);
+ lazy_assert_zero(r);
+ toku_serialize_in_parallel = false;
+}
+
+void toku_ft_serialize_layer_destroy(void) {
+ toku_thread_pool_destroy(&ft_pool);
+}
+
+enum { FILE_CHANGE_INCREMENT = (16 << 20) };
+
+static inline uint64_t
+alignup64(uint64_t a, uint64_t b) {
+ return ((a+b-1)/b)*b;
+}
+
+// safe_file_size_lock must be held.
+void
+toku_maybe_truncate_file (int fd, uint64_t size_used, uint64_t expected_size, uint64_t *new_sizep)
+// Effect: If file size >= SIZE+32MiB, reduce file size.
+// (32 instead of 16.. hysteresis).
+// Return 0 on success, otherwise an error number.
+{
+ int64_t file_size;
+ {
+ int r = toku_os_get_file_size(fd, &file_size);
+ lazy_assert_zero(r);
+ invariant(file_size >= 0);
+ }
+ invariant(expected_size == (uint64_t)file_size);
+ // If file space is overallocated by at least 32M
+ if ((uint64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) {
+ toku_off_t new_size = alignup64(size_used, (2*FILE_CHANGE_INCREMENT)); //Truncate to new size_used.
+ invariant(new_size < file_size);
+ invariant(new_size >= 0);
+ int r = ftruncate(fd, new_size);
+ lazy_assert_zero(r);
+ *new_sizep = new_size;
+ }
+ else {
+ *new_sizep = file_size;
+ }
+ return;
+}
+
+static int64_t
+min64(int64_t a, int64_t b) {
+ if (a<b) return a;
+ return b;
+}
+
+void
+toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size, int64_t *new_size)
+// Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size
+// Return 0 on success, otherwise an error number.
+{
+ int64_t file_size = 0;
+ //TODO(yoni): Allow variable stripe_width (perhaps from ft) for larger raids
+ const uint64_t stripe_width = 4096;
+ {
+ int r = toku_os_get_file_size(fd, &file_size);
+ if (r != 0) { // debug #2463
+ int the_errno = get_maybe_error_errno();
+ fprintf(stderr, "%s:%d fd=%d size=%" PRIu64 " r=%d errno=%d\n", __FUNCTION__, __LINE__, fd, size, r, the_errno); fflush(stderr);
+ }
+ lazy_assert_zero(r);
+ }
+ invariant(file_size >= 0);
+ invariant(expected_size == file_size);
+ // We want to double the size of the file, or add 16MiB, whichever is less.
+ // We emulate calling this function repeatedly until it satisfies the request.
+ int64_t to_write = 0;
+ if (file_size == 0) {
+ // Prevent infinite loop by starting with stripe_width as a base case.
+ to_write = stripe_width;
+ }
+ while (file_size + to_write < size) {
+ to_write += alignup64(min64(file_size + to_write, FILE_CHANGE_INCREMENT), stripe_width);
+ }
+ if (to_write > 0) {
+ assert(to_write%512==0);
+ toku::scoped_malloc_aligned wbuf_aligned(to_write, 512);
+ char *wbuf = reinterpret_cast<char *>(wbuf_aligned.get());
+ memset(wbuf, 0, to_write);
+ toku_off_t start_write = alignup64(file_size, stripe_width);
+ invariant(start_write >= file_size);
+ toku_os_full_pwrite(fd, wbuf, to_write, start_write);
+ *new_size = start_write + to_write;
+ }
+ else {
+ *new_size = file_size;
+ }
+}
+
+// Don't include the sub_block header
+// Overhead calculated in same order fields are written to wbuf
+enum {
+ node_header_overhead = (8+ // magic "tokunode" or "tokuleaf" or "tokuroll"
+ 4+ // layout_version
+ 4+ // layout_version_original
+ 4), // build_id
+};
+
+// uncompressed header offsets
+enum {
+ uncompressed_magic_offset = 0,
+ uncompressed_version_offset = 8,
+};
+
+static uint32_t
+serialize_node_header_size(FTNODE node) {
+ uint32_t retval = 0;
+ retval += 8; // magic
+ retval += sizeof(node->layout_version);
+ retval += sizeof(node->layout_version_original);
+ retval += 4; // BUILD_ID
+ retval += 4; // n_children
+ retval += node->n_children*8; // encode start offset and length of each partition
+ retval += 4; // checksum
+ return retval;
+}
+
+static void
+serialize_node_header(FTNODE node, FTNODE_DISK_DATA ndd, struct wbuf *wbuf) {
+ if (node->height == 0)
+ wbuf_nocrc_literal_bytes(wbuf, "tokuleaf", 8);
+ else
+ wbuf_nocrc_literal_bytes(wbuf, "tokunode", 8);
+ paranoid_invariant(node->layout_version == FT_LAYOUT_VERSION);
+ wbuf_nocrc_int(wbuf, node->layout_version);
+ wbuf_nocrc_int(wbuf, node->layout_version_original);
+ wbuf_nocrc_uint(wbuf, BUILD_ID);
+ wbuf_nocrc_int (wbuf, node->n_children);
+ for (int i=0; i<node->n_children; i++) {
+ assert(BP_SIZE(ndd,i)>0);
+ wbuf_nocrc_int(wbuf, BP_START(ndd, i)); // save the beginning of the partition
+ wbuf_nocrc_int(wbuf, BP_SIZE (ndd, i)); // and the size
+ }
+ // checksum the header
+ uint32_t end_to_end_checksum = toku_x1764_memory(wbuf->buf, wbuf_get_woffset(wbuf));
+ wbuf_nocrc_int(wbuf, end_to_end_checksum);
+ invariant(wbuf->ndone == wbuf->size);
+}
+
+static uint32_t
+serialize_ftnode_partition_size (FTNODE node, int i)
+{
+ uint32_t result = 0;
+ paranoid_invariant(node->bp[i].state == PT_AVAIL);
+ result++; // Byte that states what the partition is
+ if (node->height > 0) {
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ // number of messages (4 bytes) plus size of the buffer
+ result += (4 + toku_bnc_nbytesinbuf(bnc));
+ // number of offsets (4 bytes) plus an array of 4 byte offsets, for each message tree
+ result += (4 + (4 * bnc->fresh_message_tree.size()));
+ result += (4 + (4 * bnc->stale_message_tree.size()));
+ result += (4 + (4 * bnc->broadcast_list.size()));
+ }
+ else {
+ result += 4 + bn_data::HEADER_LENGTH; // n_entries in buffer table + basement header
+ result += BLB_NBYTESINDATA(node, i);
+ }
+ result += 4; // checksum
+ return result;
+}
+
+#define FTNODE_PARTITION_DMT_LEAVES 0xaa
+#define FTNODE_PARTITION_MSG_BUFFER 0xbb
+
+UU() static int
+assert_fresh(const int32_t &offset, const uint32_t UU(idx), message_buffer *const msg_buffer) {
+ bool is_fresh = msg_buffer->get_freshness(offset);
+ assert(is_fresh);
+ return 0;
+}
+
+UU() static int
+assert_stale(const int32_t &offset, const uint32_t UU(idx), message_buffer *const msg_buffer) {
+ bool is_fresh = msg_buffer->get_freshness(offset);
+ assert(!is_fresh);
+ return 0;
+}
+
+static void bnc_verify_message_trees(NONLEAF_CHILDINFO UU(bnc)) {
+#ifdef TOKU_DEBUG_PARANOID
+ bnc->fresh_message_tree.iterate<message_buffer, assert_fresh>(&bnc->msg_buffer);
+ bnc->stale_message_tree.iterate<message_buffer, assert_stale>(&bnc->msg_buffer);
+#endif
+}
+
+static int
+wbuf_write_offset(const int32_t &offset, const uint32_t UU(idx), struct wbuf *const wb) {
+ wbuf_nocrc_int(wb, offset);
+ return 0;
+}
+
+static void serialize_child_buffer(NONLEAF_CHILDINFO bnc, struct wbuf *wb) {
+ unsigned char ch = FTNODE_PARTITION_MSG_BUFFER;
+ wbuf_nocrc_char(wb, ch);
+
+ // serialize the message buffer
+ bnc->msg_buffer.serialize_to_wbuf(wb);
+
+ // serialize the message trees (num entries, offsets array):
+ // first, verify their contents are consistent with the message buffer
+ bnc_verify_message_trees(bnc);
+
+ // fresh
+ wbuf_nocrc_int(wb, bnc->fresh_message_tree.size());
+ bnc->fresh_message_tree.iterate<struct wbuf, wbuf_write_offset>(wb);
+
+ // stale
+ wbuf_nocrc_int(wb, bnc->stale_message_tree.size());
+ bnc->stale_message_tree.iterate<struct wbuf, wbuf_write_offset>(wb);
+
+ // broadcast
+ wbuf_nocrc_int(wb, bnc->broadcast_list.size());
+ bnc->broadcast_list.iterate<struct wbuf, wbuf_write_offset>(wb);
+}
+
+//
+// Serialize the i'th partition of node into sb
+// For leaf nodes, this would be the i'th basement node
+// For internal nodes, this would be the i'th internal node
+//
+static void
+serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
+ // Caller should have allocated memory.
+ invariant_notnull(sb->uncompressed_ptr);
+ invariant(sb->uncompressed_size > 0);
+ paranoid_invariant(sb->uncompressed_size == serialize_ftnode_partition_size(node, i));
+
+ //
+ // Now put the data into sb->uncompressed_ptr
+ //
+ struct wbuf wb;
+ wbuf_init(&wb, sb->uncompressed_ptr, sb->uncompressed_size);
+ if (node->height > 0) {
+ // TODO: (Zardosht) possibly exit early if there are no messages
+ serialize_child_buffer(BNC(node, i), &wb);
+ }
+ else {
+ unsigned char ch = FTNODE_PARTITION_DMT_LEAVES;
+ bn_data* bd = BLB_DATA(node, i);
+
+ wbuf_nocrc_char(&wb, ch);
+ wbuf_nocrc_uint(&wb, bd->num_klpairs());
+
+ bd->serialize_to_wbuf(&wb);
+ }
+ uint32_t end_to_end_checksum = toku_x1764_memory(sb->uncompressed_ptr, wbuf_get_woffset(&wb));
+ wbuf_nocrc_int(&wb, end_to_end_checksum);
+ invariant(wb.ndone == wb.size);
+ invariant(sb->uncompressed_size==wb.ndone);
+}
+
+//
+// Takes the data in sb->uncompressed_ptr, and compresses it
+// into a newly allocated buffer sb->compressed_ptr
+//
+static void
+compress_ftnode_sub_block(struct sub_block *sb, enum toku_compression_method method) {
+ invariant(sb->compressed_ptr != nullptr);
+ invariant(sb->compressed_size_bound > 0);
+ paranoid_invariant(sb->compressed_size_bound == toku_compress_bound(method, sb->uncompressed_size));
+
+ //
+ // This probably seems a bit complicated. Here is what is going on.
+ // In PerconaFT 5.0, sub_blocks were compressed and the compressed data
+ // was checksummed. The checksum did NOT include the size of the compressed data
+ // and the size of the uncompressed data. The fields of sub_block only reference the
+ // compressed data, and it is the responsibility of the user of the sub_block
+ // to write the length
+ //
+ // For Dr. No, we want the checksum to also include the size of the compressed data, and the
+ // size of the decompressed data, because this data
+ // may be read off of disk alone, so it must be verifiable alone.
+ //
+ // So, we pass in a buffer to compress_nocrc_sub_block that starts 8 bytes after the beginning
+ // of sb->compressed_ptr, so we have space to put in the sizes, and then run the checksum.
+ //
+ sb->compressed_size = compress_nocrc_sub_block(
+ sb,
+ (char *)sb->compressed_ptr + 8,
+ sb->compressed_size_bound,
+ method
+ );
+
+ uint32_t* extra = (uint32_t *)(sb->compressed_ptr);
+ // store the compressed and uncompressed size at the beginning
+ extra[0] = toku_htod32(sb->compressed_size);
+ extra[1] = toku_htod32(sb->uncompressed_size);
+ // now checksum the entire thing
+ sb->compressed_size += 8; // now add the eight bytes that we saved for the sizes
+ sb->xsum = toku_x1764_memory(sb->compressed_ptr,sb->compressed_size);
+
+ //
+ // This is the end result for Dr. No and forward. For ftnodes, sb->compressed_ptr contains
+ // two integers at the beginning, the size and uncompressed size, and then the compressed
+ // data. sb->xsum contains the checksum of this entire thing.
+ //
+ // In PerconaFT 5.0, sb->compressed_ptr only contained the compressed data, sb->xsum
+ // checksummed only the compressed data, and the checksumming of the sizes were not
+ // done here.
+ //
+}
+
+//
+// Returns the size needed to serialize the ftnode info
+// Does not include header information that is common with rollback logs
+// such as the magic, layout_version, and build_id
+// Includes only node specific info such as pivot information, n_children, and so on
+//
+static uint32_t
+serialize_ftnode_info_size(FTNODE node)
+{
+ uint32_t retval = 0;
+ retval += 8; // max_msn_applied_to_node_on_disk
+ retval += 4; // nodesize
+ retval += 4; // flags
+ retval += 4; // height;
+ retval += 8; // oldest_referenced_xid_known
+ retval += node->pivotkeys.serialized_size();
+ retval += (node->n_children-1)*4; // encode length of each pivot
+ if (node->height > 0) {
+ retval += node->n_children*8; // child blocknum's
+ }
+ retval += 4; // checksum
+ return retval;
+}
+
+static void serialize_ftnode_info(FTNODE node, SUB_BLOCK sb) {
+ // Memory must have been allocated by our caller.
+ invariant(sb->uncompressed_size > 0);
+ invariant_notnull(sb->uncompressed_ptr);
+ paranoid_invariant(sb->uncompressed_size == serialize_ftnode_info_size(node));
+
+ struct wbuf wb;
+ wbuf_init(&wb, sb->uncompressed_ptr, sb->uncompressed_size);
+
+ wbuf_MSN(&wb, node->max_msn_applied_to_node_on_disk);
+ wbuf_nocrc_uint(&wb, 0); // write a dummy value for where node->nodesize used to be
+ wbuf_nocrc_uint(&wb, node->flags);
+ wbuf_nocrc_int (&wb, node->height);
+ wbuf_TXNID(&wb, node->oldest_referenced_xid_known);
+ node->pivotkeys.serialize_to_wbuf(&wb);
+
+ // child blocks, only for internal nodes
+ if (node->height > 0) {
+ for (int i = 0; i < node->n_children; i++) {
+ wbuf_nocrc_BLOCKNUM(&wb, BP_BLOCKNUM(node,i));
+ }
+ }
+
+ uint32_t end_to_end_checksum = toku_x1764_memory(sb->uncompressed_ptr, wbuf_get_woffset(&wb));
+ wbuf_nocrc_int(&wb, end_to_end_checksum);
+ invariant(wb.ndone == wb.size);
+ invariant(sb->uncompressed_size==wb.ndone);
+}
+
+// This is the size of the uncompressed data, not including the compression headers
+unsigned int
+toku_serialize_ftnode_size (FTNODE node) {
+ unsigned int result = 0;
+ //
+ // As of now, this seems to be called if and only if the entire node is supposed
+ // to be in memory, so we will assert it.
+ //
+ toku_ftnode_assert_fully_in_memory(node);
+ result += serialize_node_header_size(node);
+ result += serialize_ftnode_info_size(node);
+ for (int i = 0; i < node->n_children; i++) {
+ result += serialize_ftnode_partition_size(node,i);
+ }
+ return result;
+}
+
+struct serialize_times {
+ tokutime_t serialize_time;
+ tokutime_t compress_time;
+};
+
+static void
+serialize_and_compress_partition(FTNODE node,
+ int childnum,
+ enum toku_compression_method compression_method,
+ SUB_BLOCK sb,
+ struct serialize_times *st)
+{
+ // serialize, compress, update status
+ tokutime_t t0 = toku_time_now();
+ serialize_ftnode_partition(node, childnum, sb);
+ tokutime_t t1 = toku_time_now();
+ compress_ftnode_sub_block(sb, compression_method);
+ tokutime_t t2 = toku_time_now();
+
+ st->serialize_time += t1 - t0;
+ st->compress_time += t2 - t1;
+}
+
+void
+toku_create_compressed_partition_from_available(
+ FTNODE node,
+ int childnum,
+ enum toku_compression_method compression_method,
+ SUB_BLOCK sb
+ )
+{
+ tokutime_t t0 = toku_time_now();
+
+ // serialize
+ sb->uncompressed_size = serialize_ftnode_partition_size(node, childnum);
+ toku::scoped_malloc uncompressed_buf(sb->uncompressed_size);
+ sb->uncompressed_ptr = uncompressed_buf.get();
+ serialize_ftnode_partition(node, childnum, sb);
+
+ tokutime_t t1 = toku_time_now();
+
+ // compress. no need to pad with extra bytes for sizes/xsum - we're not storing them
+ set_compressed_size_bound(sb, compression_method);
+ sb->compressed_ptr = toku_xmalloc(sb->compressed_size_bound);
+ sb->compressed_size = compress_nocrc_sub_block(
+ sb,
+ sb->compressed_ptr,
+ sb->compressed_size_bound,
+ compression_method
+ );
+ sb->uncompressed_ptr = NULL;
+
+ tokutime_t t2 = toku_time_now();
+
+ toku_ft_status_update_serialize_times(node, t1 - t0, t2 - t1);
+}
+
+static void
+serialize_and_compress_serially(FTNODE node,
+ int npartitions,
+ enum toku_compression_method compression_method,
+ struct sub_block sb[],
+ struct serialize_times *st) {
+ for (int i = 0; i < npartitions; i++) {
+ serialize_and_compress_partition(node, i, compression_method, &sb[i], st);
+ }
+}
+
+struct serialize_compress_work {
+ struct work base;
+ FTNODE node;
+ int i;
+ enum toku_compression_method compression_method;
+ struct sub_block *sb;
+ struct serialize_times st;
+};
+
+static void *
+serialize_and_compress_worker(void *arg) {
+ struct workset *ws = (struct workset *) arg;
+ while (1) {
+ struct serialize_compress_work *w = (struct serialize_compress_work *) workset_get(ws);
+ if (w == NULL)
+ break;
+ int i = w->i;
+ serialize_and_compress_partition(w->node, i, w->compression_method, &w->sb[i], &w->st);
+ }
+ workset_release_ref(ws);
+ return arg;
+}
+
+static void
+serialize_and_compress_in_parallel(FTNODE node,
+ int npartitions,
+ enum toku_compression_method compression_method,
+ struct sub_block sb[],
+ struct serialize_times *st) {
+ if (npartitions == 1) {
+ serialize_and_compress_partition(node, 0, compression_method, &sb[0], st);
+ } else {
+ int T = num_cores;
+ if (T > npartitions)
+ T = npartitions;
+ if (T > 0)
+ T = T - 1;
+ struct workset ws;
+ ZERO_STRUCT(ws);
+ workset_init(&ws);
+ struct serialize_compress_work work[npartitions];
+ workset_lock(&ws);
+ for (int i = 0; i < npartitions; i++) {
+ work[i] = (struct serialize_compress_work) { .base = {{NULL, NULL}},
+ .node = node,
+ .i = i,
+ .compression_method = compression_method,
+ .sb = sb,
+ .st = { .serialize_time = 0, .compress_time = 0} };
+ workset_put_locked(&ws, &work[i].base);
+ }
+ workset_unlock(&ws);
+ toku_thread_pool_run(ft_pool, 0, &T, serialize_and_compress_worker, &ws);
+ workset_add_ref(&ws, T);
+ serialize_and_compress_worker(&ws);
+ workset_join(&ws);
+ workset_destroy(&ws);
+
+ // gather up the statistics from each thread's work item
+ for (int i = 0; i < npartitions; i++) {
+ st->serialize_time += work[i].st.serialize_time;
+ st->compress_time += work[i].st.compress_time;
+ }
+ }
+}
+
+static void
+serialize_and_compress_sb_node_info(FTNODE node, struct sub_block *sb,
+ enum toku_compression_method compression_method, struct serialize_times *st) {
+ // serialize, compress, update serialize times.
+ tokutime_t t0 = toku_time_now();
+ serialize_ftnode_info(node, sb);
+ tokutime_t t1 = toku_time_now();
+ compress_ftnode_sub_block(sb, compression_method);
+ tokutime_t t2 = toku_time_now();
+
+ st->serialize_time += t1 - t0;
+ st->compress_time += t2 - t1;
+}
+
+int toku_serialize_ftnode_to_memory(FTNODE node,
+ FTNODE_DISK_DATA* ndd,
+ unsigned int basementnodesize,
+ enum toku_compression_method compression_method,
+ bool do_rebalancing,
+ bool in_parallel, // for loader is true, for toku_ftnode_flush_callback, is false
+ /*out*/ size_t *n_bytes_to_write,
+ /*out*/ size_t *n_uncompressed_bytes,
+ /*out*/ char **bytes_to_write)
+// Effect: Writes out each child to a separate malloc'd buffer, then compresses
+// all of them, and writes the uncompressed header, to bytes_to_write,
+// which is malloc'd.
+//
+// The resulting buffer is guaranteed to be 512-byte aligned and the total length is a multiple of 512 (so we pad with zeros at the end if needed).
+// 512-byte padding is for O_DIRECT to work.
+{
+ toku_ftnode_assert_fully_in_memory(node);
+
+ if (do_rebalancing && node->height == 0) {
+ toku_ftnode_leaf_rebalance(node, basementnodesize);
+ }
+ const int npartitions = node->n_children;
+
+ // Each partition represents a compressed sub block
+ // For internal nodes, a sub block is a message buffer
+ // For leaf nodes, a sub block is a basement node
+ toku::scoped_calloc sb_buf(sizeof(struct sub_block) * npartitions);
+ struct sub_block *sb = reinterpret_cast<struct sub_block *>(sb_buf.get());
+ XREALLOC_N(npartitions, *ndd);
+
+ //
+ // First, let's serialize and compress the individual sub blocks
+ //
+
+ // determine how large our serialization and compression buffers need to be.
+ size_t serialize_buf_size = 0, compression_buf_size = 0;
+ for (int i = 0; i < node->n_children; i++) {
+ sb[i].uncompressed_size = serialize_ftnode_partition_size(node, i);
+ sb[i].compressed_size_bound = toku_compress_bound(compression_method, sb[i].uncompressed_size);
+ serialize_buf_size += sb[i].uncompressed_size;
+ compression_buf_size += sb[i].compressed_size_bound + 8; // add 8 extra bytes, 4 for compressed size, 4 for decompressed size
+ }
+
+ // give each sub block a base pointer to enough buffer space for serialization and compression
+ toku::scoped_malloc serialize_buf(serialize_buf_size);
+ toku::scoped_malloc compression_buf(compression_buf_size);
+ for (size_t i = 0, uncompressed_offset = 0, compressed_offset = 0; i < (size_t) node->n_children; i++) {
+ sb[i].uncompressed_ptr = reinterpret_cast<char *>(serialize_buf.get()) + uncompressed_offset;
+ sb[i].compressed_ptr = reinterpret_cast<char *>(compression_buf.get()) + compressed_offset;
+ uncompressed_offset += sb[i].uncompressed_size;
+ compressed_offset += sb[i].compressed_size_bound + 8; // add 8 extra bytes, 4 for compressed size, 4 for decompressed size
+ invariant(uncompressed_offset <= serialize_buf_size);
+ invariant(compressed_offset <= compression_buf_size);
+ }
+
+ // do the actual serialization now that we have buffer space
+ struct serialize_times st = { 0, 0 };
+ if (in_parallel) {
+ serialize_and_compress_in_parallel(node, npartitions, compression_method, sb, &st);
+ } else {
+ serialize_and_compress_serially(node, npartitions, compression_method, sb, &st);
+ }
+
+ //
+ // Now lets create a sub-block that has the common node information,
+ // This does NOT include the header
+ //
+
+ // determine how large our serialization and copmression buffers need to be
+ struct sub_block sb_node_info;
+ sub_block_init(&sb_node_info);
+ size_t sb_node_info_uncompressed_size = serialize_ftnode_info_size(node);
+ size_t sb_node_info_compressed_size_bound = toku_compress_bound(compression_method, sb_node_info_uncompressed_size);
+ toku::scoped_malloc sb_node_info_uncompressed_buf(sb_node_info_uncompressed_size);
+ toku::scoped_malloc sb_node_info_compressed_buf(sb_node_info_compressed_size_bound + 8); // add 8 extra bytes, 4 for compressed size, 4 for decompressed size
+ sb_node_info.uncompressed_size = sb_node_info_uncompressed_size;
+ sb_node_info.uncompressed_ptr = sb_node_info_uncompressed_buf.get();
+ sb_node_info.compressed_size_bound = sb_node_info_compressed_size_bound;
+ sb_node_info.compressed_ptr = sb_node_info_compressed_buf.get();
+
+ // do the actual serialization now that we have buffer space
+ serialize_and_compress_sb_node_info(node, &sb_node_info, compression_method, &st);
+
+ //
+ // At this point, we have compressed each of our pieces into individual sub_blocks,
+ // we can put the header and all the subblocks into a single buffer and return it.
+ //
+
+ // update the serialize times, ignore the header for simplicity. we captured all
+ // of the partitions' serialize times so that's probably good enough.
+ toku_ft_status_update_serialize_times(node, st.serialize_time, st.compress_time);
+
+ // The total size of the node is:
+ // size of header + disk size of the n+1 sub_block's created above
+ uint32_t total_node_size = (serialize_node_header_size(node) // uncompressed header
+ + sb_node_info.compressed_size // compressed nodeinfo (without its checksum)
+ + 4); // nodeinfo's checksum
+ uint32_t total_uncompressed_size = (serialize_node_header_size(node) // uncompressed header
+ + sb_node_info.uncompressed_size // uncompressed nodeinfo (without its checksum)
+ + 4); // nodeinfo's checksum
+ // store the BP_SIZESs
+ for (int i = 0; i < node->n_children; i++) {
+ uint32_t len = sb[i].compressed_size + 4; // data and checksum
+ BP_SIZE (*ndd,i) = len;
+ BP_START(*ndd,i) = total_node_size;
+ total_node_size += sb[i].compressed_size + 4;
+ total_uncompressed_size += sb[i].uncompressed_size + 4;
+ }
+
+ // now create the final serialized node
+ uint32_t total_buffer_size = roundup_to_multiple(512, total_node_size); // make the buffer be 512 bytes.
+ char *XMALLOC_N_ALIGNED(512, total_buffer_size, data);
+ char *curr_ptr = data;
+
+ // write the header
+ struct wbuf wb;
+ wbuf_init(&wb, curr_ptr, serialize_node_header_size(node));
+ serialize_node_header(node, *ndd, &wb);
+ assert(wb.ndone == wb.size);
+ curr_ptr += serialize_node_header_size(node);
+
+ // now write sb_node_info
+ memcpy(curr_ptr, sb_node_info.compressed_ptr, sb_node_info.compressed_size);
+ curr_ptr += sb_node_info.compressed_size;
+ // write the checksum
+ *(uint32_t *)curr_ptr = toku_htod32(sb_node_info.xsum);
+ curr_ptr += sizeof(sb_node_info.xsum);
+
+ for (int i = 0; i < npartitions; i++) {
+ memcpy(curr_ptr, sb[i].compressed_ptr, sb[i].compressed_size);
+ curr_ptr += sb[i].compressed_size;
+ // write the checksum
+ *(uint32_t *)curr_ptr = toku_htod32(sb[i].xsum);
+ curr_ptr += sizeof(sb[i].xsum);
+ }
+ // Zero the rest of the buffer
+ memset(data + total_node_size, 0, total_buffer_size - total_node_size);
+
+ assert((uint32_t) (curr_ptr - data) == total_node_size);
+ *bytes_to_write = data;
+ *n_bytes_to_write = total_buffer_size;
+ *n_uncompressed_bytes = total_uncompressed_size;
+
+ invariant(*n_bytes_to_write % 512 == 0);
+ invariant(reinterpret_cast<unsigned long long>(*bytes_to_write) % 512 == 0);
+ return 0;
+}
+
+int toku_serialize_ftnode_to(int fd,
+ BLOCKNUM blocknum,
+ FTNODE node,
+ FTNODE_DISK_DATA *ndd,
+ bool do_rebalancing,
+ FT ft,
+ bool for_checkpoint) {
+ size_t n_to_write;
+ size_t n_uncompressed_bytes;
+ char *compressed_buf = nullptr;
+
+ // because toku_serialize_ftnode_to is only called for
+ // in toku_ftnode_flush_callback, we pass false
+ // for in_parallel. The reasoning is that when we write
+ // nodes to disk via toku_ftnode_flush_callback, we
+ // assume that it is being done on a non-critical
+ // background thread (probably for checkpointing), and therefore
+ // should not hog CPU,
+ //
+ // Should the above facts change, we may want to revisit
+ // passing false for in_parallel here
+ //
+ // alternatively, we could have made in_parallel a parameter
+ // for toku_serialize_ftnode_to, but instead we did this.
+ int r = toku_serialize_ftnode_to_memory(
+ node,
+ ndd,
+ ft->h->basementnodesize,
+ ft->h->compression_method,
+ do_rebalancing,
+ toku_unsafe_fetch(&toku_serialize_in_parallel),
+ &n_to_write,
+ &n_uncompressed_bytes,
+ &compressed_buf);
+ if (r != 0) {
+ return r;
+ }
+
+ // If the node has never been written, then write the whole buffer,
+ // including the zeros
+ invariant(blocknum.b >= 0);
+ DISKOFF offset;
+
+ // Dirties the ft
+ ft->blocktable.realloc_on_disk(
+ blocknum, n_to_write, &offset, ft, fd, for_checkpoint);
+
+ tokutime_t t0 = toku_time_now();
+ toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset);
+ tokutime_t t1 = toku_time_now();
+
+ tokutime_t io_time = t1 - t0;
+ toku_ft_status_update_flush_reason(
+ node, n_uncompressed_bytes, n_to_write, io_time, for_checkpoint);
+
+ toku_free(compressed_buf);
+ node->clear_dirty(); // See #1957. Must set the node to be clean after
+ // serializing it so that it doesn't get written again on
+ // the next checkpoint or eviction.
+ if (node->height == 0) {
+ for (int i = 0; i < node->n_children; i++) {
+ if (BP_STATE(node, i) == PT_AVAIL) {
+ BLB_LRD(node, i) = 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static void
+sort_and_steal_offset_arrays(NONLEAF_CHILDINFO bnc,
+ const toku::comparator &cmp,
+ int32_t **fresh_offsets, int32_t nfresh,
+ int32_t **stale_offsets, int32_t nstale,
+ int32_t **broadcast_offsets, int32_t nbroadcast) {
+ // We always have fresh / broadcast offsets (even if they are empty)
+ // but we may not have stale offsets, in the case of v13 upgrade.
+ invariant(fresh_offsets != nullptr);
+ invariant(broadcast_offsets != nullptr);
+ invariant(cmp.valid());
+
+ typedef toku::sort<int32_t, const struct toku_msg_buffer_key_msn_cmp_extra, toku_msg_buffer_key_msn_cmp> msn_sort;
+
+ const int32_t n_in_this_buffer = nfresh + nstale + nbroadcast;
+ struct toku_msg_buffer_key_msn_cmp_extra extra(cmp, &bnc->msg_buffer);
+ msn_sort::mergesort_r(*fresh_offsets, nfresh, extra);
+ bnc->fresh_message_tree.destroy();
+ bnc->fresh_message_tree.create_steal_sorted_array(fresh_offsets, nfresh, n_in_this_buffer);
+ if (stale_offsets) {
+ msn_sort::mergesort_r(*stale_offsets, nstale, extra);
+ bnc->stale_message_tree.destroy();
+ bnc->stale_message_tree.create_steal_sorted_array(stale_offsets, nstale, n_in_this_buffer);
+ }
+ bnc->broadcast_list.destroy();
+ bnc->broadcast_list.create_steal_sorted_array(broadcast_offsets, nbroadcast, n_in_this_buffer);
+}
+
+static MSN
+deserialize_child_buffer_v13(FT ft, NONLEAF_CHILDINFO bnc, struct rbuf *rb) {
+ // We skip 'stale' offsets for upgraded nodes.
+ int32_t nfresh = 0, nbroadcast = 0;
+ int32_t *fresh_offsets = nullptr, *broadcast_offsets = nullptr;
+
+ // Only sort buffers if we have a valid comparison function. In certain scenarios,
+ // like deserialie_ft_versioned() or tokuftdump, we'll need to deserialize ftnodes
+ // for simple inspection and don't actually require that the message buffers are
+ // properly sorted. This is very ugly, but correct.
+ const bool sort = ft->cmp.valid();
+
+ MSN highest_msn_in_this_buffer =
+ bnc->msg_buffer.deserialize_from_rbuf_v13(rb, &ft->h->highest_unused_msn_for_upgrade,
+ sort ? &fresh_offsets : nullptr, &nfresh,
+ sort ? &broadcast_offsets : nullptr, &nbroadcast);
+
+ if (sort) {
+ sort_and_steal_offset_arrays(bnc, ft->cmp,
+ &fresh_offsets, nfresh,
+ nullptr, 0, // no stale offsets
+ &broadcast_offsets, nbroadcast);
+ }
+
+ return highest_msn_in_this_buffer;
+}
+
+static void
+deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rb, const toku::comparator &cmp) {
+ int32_t nfresh = 0, nstale = 0, nbroadcast = 0;
+ int32_t *fresh_offsets, *stale_offsets, *broadcast_offsets;
+
+ // Only sort buffers if we have a valid comparison function. In certain scenarios,
+ // like deserialie_ft_versioned() or tokuftdump, we'll need to deserialize ftnodes
+ // for simple inspection and don't actually require that the message buffers are
+ // properly sorted. This is very ugly, but correct.
+ const bool sort = cmp.valid();
+
+ // read in the message buffer
+ bnc->msg_buffer.deserialize_from_rbuf(rb,
+ sort ? &fresh_offsets : nullptr, &nfresh,
+ sort ? &stale_offsets : nullptr, &nstale,
+ sort ? &broadcast_offsets : nullptr, &nbroadcast);
+
+ if (sort) {
+ sort_and_steal_offset_arrays(bnc, cmp,
+ &fresh_offsets, nfresh,
+ &stale_offsets, nstale,
+ &broadcast_offsets, nbroadcast);
+ }
+}
+
+static void
+deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rb) {
+ // read in the message buffer
+ bnc->msg_buffer.deserialize_from_rbuf(rb,
+ nullptr, nullptr, // fresh_offsets, nfresh,
+ nullptr, nullptr, // stale_offsets, nstale,
+ nullptr, nullptr); // broadcast_offsets, nbroadcast
+
+ // read in each message tree (fresh, stale, broadcast)
+ int32_t nfresh = rbuf_int(rb);
+ int32_t *XMALLOC_N(nfresh, fresh_offsets);
+ for (int i = 0; i < nfresh; i++) {
+ fresh_offsets[i] = rbuf_int(rb);
+ }
+
+ int32_t nstale = rbuf_int(rb);
+ int32_t *XMALLOC_N(nstale, stale_offsets);
+ for (int i = 0; i < nstale; i++) {
+ stale_offsets[i] = rbuf_int(rb);
+ }
+
+ int32_t nbroadcast = rbuf_int(rb);
+ int32_t *XMALLOC_N(nbroadcast, broadcast_offsets);
+ for (int i = 0; i < nbroadcast; i++) {
+ broadcast_offsets[i] = rbuf_int(rb);
+ }
+
+ // build OMTs out of each offset array
+ bnc->fresh_message_tree.destroy();
+ bnc->fresh_message_tree.create_steal_sorted_array(&fresh_offsets, nfresh, nfresh);
+ bnc->stale_message_tree.destroy();
+ bnc->stale_message_tree.create_steal_sorted_array(&stale_offsets, nstale, nstale);
+ bnc->broadcast_list.destroy();
+ bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast, nbroadcast);
+}
+
+// dump a buffer to stderr
+// no locking around this for now
+void
+dump_bad_block(unsigned char *vp, uint64_t size) {
+ const uint64_t linesize = 64;
+ uint64_t n = size / linesize;
+ for (uint64_t i = 0; i < n; i++) {
+ fprintf(stderr, "%p: ", vp);
+ for (uint64_t j = 0; j < linesize; j++) {
+ unsigned char c = vp[j];
+ fprintf(stderr, "%2.2X", c);
+ }
+ fprintf(stderr, "\n");
+ vp += linesize;
+ }
+ size = size % linesize;
+ for (uint64_t i=0; i<size; i++) {
+ if ((i % linesize) == 0)
+ fprintf(stderr, "%p: ", vp+i);
+ fprintf(stderr, "%2.2X", vp[i]);
+ if (((i+1) % linesize) == 0)
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "\n");
+}
+
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////
+
+BASEMENTNODE toku_create_empty_bn(void) {
+ BASEMENTNODE bn = toku_create_empty_bn_no_buffer();
+ bn->data_buffer.initialize_empty();
+ return bn;
+}
+
+BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn) {
+ BASEMENTNODE bn = toku_create_empty_bn_no_buffer();
+ bn->max_msn_applied = orig_bn->max_msn_applied;
+ bn->seqinsert = orig_bn->seqinsert;
+ bn->stale_ancestor_messages_applied = orig_bn->stale_ancestor_messages_applied;
+ bn->stat64_delta = orig_bn->stat64_delta;
+ bn->logical_rows_delta = orig_bn->logical_rows_delta;
+ bn->data_buffer.clone(&orig_bn->data_buffer);
+ return bn;
+}
+
+BASEMENTNODE toku_create_empty_bn_no_buffer(void) {
+ BASEMENTNODE XMALLOC(bn);
+ bn->max_msn_applied.msn = 0;
+ bn->seqinsert = 0;
+ bn->stale_ancestor_messages_applied = false;
+ bn->stat64_delta = ZEROSTATS;
+ bn->logical_rows_delta = 0;
+ bn->data_buffer.init_zero();
+ return bn;
+}
+
+NONLEAF_CHILDINFO toku_create_empty_nl(void) {
+ NONLEAF_CHILDINFO XMALLOC(cn);
+ cn->msg_buffer.create();
+ cn->fresh_message_tree.create_no_array();
+ cn->stale_message_tree.create_no_array();
+ cn->broadcast_list.create_no_array();
+ memset(cn->flow, 0, sizeof cn->flow);
+ return cn;
+}
+
+// must clone the OMTs, since we serialize them along with the message buffer
+NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo) {
+ NONLEAF_CHILDINFO XMALLOC(cn);
+ cn->msg_buffer.clone(&orig_childinfo->msg_buffer);
+ cn->fresh_message_tree.create_no_array();
+ cn->fresh_message_tree.clone(orig_childinfo->fresh_message_tree);
+ cn->stale_message_tree.create_no_array();
+ cn->stale_message_tree.clone(orig_childinfo->stale_message_tree);
+ cn->broadcast_list.create_no_array();
+ cn->broadcast_list.clone(orig_childinfo->broadcast_list);
+ memset(cn->flow, 0, sizeof cn->flow);
+ return cn;
+}
+
+void destroy_basement_node (BASEMENTNODE bn)
+{
+ bn->data_buffer.destroy();
+ toku_free(bn);
+}
+
+void destroy_nonleaf_childinfo (NONLEAF_CHILDINFO nl)
+{
+ nl->msg_buffer.destroy();
+ nl->fresh_message_tree.destroy();
+ nl->stale_message_tree.destroy();
+ nl->broadcast_list.destroy();
+ toku_free(nl);
+}
+
+void read_block_from_fd_into_rbuf(
+ int fd,
+ BLOCKNUM blocknum,
+ FT ft,
+ struct rbuf *rb
+ )
+{
+ // get the file offset and block size for the block
+ DISKOFF offset, size;
+ ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size);
+ DISKOFF size_aligned = roundup_to_multiple(512, size);
+ uint8_t *XMALLOC_N_ALIGNED(512, size_aligned, raw_block);
+ rbuf_init(rb, raw_block, size);
+ // read the block
+ ssize_t rlen = toku_os_pread(fd, raw_block, size_aligned, offset);
+ assert((DISKOFF)rlen >= size);
+ assert((DISKOFF)rlen <= size_aligned);
+}
+
+static const int read_header_heuristic_max = 32*1024;
+
+#ifndef MIN
+#define MIN(a,b) (((a)>(b)) ? (b) : (a))
+#endif
+
+// Effect: If the header part of the node is small enough, then read it into the rbuf. The rbuf will be allocated to be big enough in any case.
+static void read_ftnode_header_from_fd_into_rbuf_if_small_enough(int fd, BLOCKNUM blocknum,
+ FT ft, struct rbuf *rb,
+ ftnode_fetch_extra *bfe) {
+ DISKOFF offset, size;
+ ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size);
+ DISKOFF read_size = roundup_to_multiple(512, MIN(read_header_heuristic_max, size));
+ uint8_t *XMALLOC_N_ALIGNED(512, roundup_to_multiple(512, size), raw_block);
+ rbuf_init(rb, raw_block, read_size);
+
+ // read the block
+ tokutime_t t0 = toku_time_now();
+ ssize_t rlen = toku_os_pread(fd, raw_block, read_size, offset);
+ tokutime_t t1 = toku_time_now();
+
+ assert(rlen >= 0);
+ rbuf_init(rb, raw_block, rlen);
+
+ bfe->bytes_read = rlen;
+ bfe->io_time = t1 - t0;
+ toku_ft_status_update_pivot_fetch_reason(bfe);
+}
+
+//
+// read the compressed partition into the sub_block,
+// validate the checksum of the compressed data
+//
+int
+read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb)
+{
+ int r = 0;
+ sb->compressed_size = rbuf_int(rb);
+ sb->uncompressed_size = rbuf_int(rb);
+ const void **cp = (const void **) &sb->compressed_ptr;
+ rbuf_literal_bytes(rb, cp, sb->compressed_size);
+ sb->xsum = rbuf_int(rb);
+ // let's check the checksum
+ uint32_t actual_xsum = toku_x1764_memory((char *)sb->compressed_ptr-8, 8+sb->compressed_size);
+ if (sb->xsum != actual_xsum) {
+ r = TOKUDB_BAD_CHECKSUM;
+ }
+ return r;
+}
+
+static int
+read_and_decompress_sub_block(struct rbuf *rb, struct sub_block *sb)
+{
+ int r = 0;
+ r = read_compressed_sub_block(rb, sb);
+ if (r != 0) {
+ goto exit;
+ }
+
+ just_decompress_sub_block(sb);
+exit:
+ return r;
+}
+
+// Allocates space for the sub-block and de-compresses the data from
+// the supplied compressed pointer..
+void
+just_decompress_sub_block(struct sub_block *sb)
+{
+ // <CER> TODO: Add assert that the subblock was read in.
+ sb->uncompressed_ptr = toku_xmalloc(sb->uncompressed_size);
+
+ toku_decompress(
+ (Bytef *) sb->uncompressed_ptr,
+ sb->uncompressed_size,
+ (Bytef *) sb->compressed_ptr,
+ sb->compressed_size
+ );
+}
+
+// verify the checksum
+int verify_ftnode_sub_block(struct sub_block *sb,
+ const char *fname,
+ BLOCKNUM blocknum) {
+ int r = 0;
+ // first verify the checksum
+ uint32_t data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
+ uint32_t stored_xsum = toku_dtoh32(*((uint32_t *)((char *)sb->uncompressed_ptr + data_size)));
+ uint32_t actual_xsum = toku_x1764_memory(sb->uncompressed_ptr, data_size);
+ if (stored_xsum != actual_xsum) {
+ fprintf(
+ stderr,
+ "%s:%d:verify_ftnode_sub_block - "
+ "file[%s], blocknum[%lld], stored_xsum[%u] != actual_xsum[%u]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ stored_xsum,
+ actual_xsum);
+ dump_bad_block((Bytef *) sb->uncompressed_ptr, sb->uncompressed_size);
+ r = TOKUDB_BAD_CHECKSUM;
+ }
+ return r;
+}
+
+// This function deserializes the data stored by serialize_ftnode_info
+static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) {
+
+ // sb_node_info->uncompressed_ptr stores the serialized node information
+ // this function puts that information into node
+
+ // first verify the checksum
+ int r = 0;
+ const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
+ r = verify_ftnode_sub_block(sb, fname, node->blocknum);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_info - "
+ "file[%s], blocknum[%lld], verify_ftnode_sub_block failed with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ r);
+ dump_bad_block(static_cast<unsigned char *>(sb->uncompressed_ptr),
+ sb->uncompressed_size);
+ goto exit;
+ }
+
+ uint32_t data_size;
+ data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
+
+ // now with the data verified, we can read the information into the node
+ struct rbuf rb;
+ rbuf_init(&rb, (unsigned char *) sb->uncompressed_ptr, data_size);
+
+ node->max_msn_applied_to_node_on_disk = rbuf_MSN(&rb);
+ (void)rbuf_int(&rb);
+ node->flags = rbuf_int(&rb);
+ node->height = rbuf_int(&rb);
+ if (node->layout_version_read_from_disk < FT_LAYOUT_VERSION_19) {
+ (void) rbuf_int(&rb); // optimized_for_upgrade
+ }
+ if (node->layout_version_read_from_disk >= FT_LAYOUT_VERSION_22) {
+ rbuf_TXNID(&rb, &node->oldest_referenced_xid_known);
+ }
+
+ // now create the basement nodes or childinfos, depending on whether this is a
+ // leaf node or internal node
+ // now the subtree_estimates
+
+ // n_children is now in the header, nd the allocatio of the node->bp is in deserialize_ftnode_from_rbuf.
+
+ // now the pivots
+ if (node->n_children > 1) {
+ node->pivotkeys.deserialize_from_rbuf(&rb, node->n_children - 1);
+ } else {
+ node->pivotkeys.create_empty();
+ }
+
+ // if this is an internal node, unpack the block nums, and fill in necessary fields
+ // of childinfo
+ if (node->height > 0) {
+ for (int i = 0; i < node->n_children; i++) {
+ BP_BLOCKNUM(node,i) = rbuf_blocknum(&rb);
+ BP_WORKDONE(node, i) = 0;
+ }
+ }
+
+ // make sure that all the data was read
+ if (data_size != rb.ndone) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_info - "
+ "file[%s], blocknum[%lld], data_size[%d] != rb.ndone[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ data_size,
+ rb.ndone);
+ dump_bad_block(rb.buf, rb.size);
+ abort();
+ }
+exit:
+ return r;
+}
+
+static void
+setup_available_ftnode_partition(FTNODE node, int i) {
+ if (node->height == 0) {
+ set_BLB(node, i, toku_create_empty_bn());
+ BLB_MAX_MSN_APPLIED(node,i) = node->max_msn_applied_to_node_on_disk;
+ }
+ else {
+ set_BNC(node, i, toku_create_empty_nl());
+ }
+}
+
+// Assign the child_to_read member of the bfe from the given ftnode
+// that has been brought into memory.
+static void
+update_bfe_using_ftnode(FTNODE node, ftnode_fetch_extra *bfe)
+{
+ if (bfe->type == ftnode_fetch_subset && bfe->search != NULL) {
+ // we do not take into account prefetching yet
+ // as of now, if we need a subset, the only thing
+ // we can possibly require is a single basement node
+ // we find out what basement node the query cares about
+ // and check if it is available
+ bfe->child_to_read = toku_ft_search_which_child(
+ bfe->ft->cmp,
+ node,
+ bfe->search
+ );
+ } else if (bfe->type == ftnode_fetch_keymatch) {
+ // we do not take into account prefetching yet
+ // as of now, if we need a subset, the only thing
+ // we can possibly require is a single basement node
+ // we find out what basement node the query cares about
+ // and check if it is available
+ if (node->height == 0) {
+ int left_child = bfe->leftmost_child_wanted(node);
+ int right_child = bfe->rightmost_child_wanted(node);
+ if (left_child == right_child) {
+ bfe->child_to_read = left_child;
+ }
+ }
+ }
+}
+
+// Using the search parameters in the bfe, this function will
+// initialize all of the given ftnode's partitions.
+static void
+setup_partitions_using_bfe(FTNODE node,
+ ftnode_fetch_extra *bfe,
+ bool data_in_memory)
+{
+ // Leftmost and Rightmost Child bounds.
+ int lc, rc;
+ if (bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_prefetch) {
+ lc = bfe->leftmost_child_wanted(node);
+ rc = bfe->rightmost_child_wanted(node);
+ } else {
+ lc = -1;
+ rc = -1;
+ }
+
+ //
+ // setup memory needed for the node
+ //
+ //printf("node height %d, blocknum %" PRId64 ", type %d lc %d rc %d\n", node->height, node->blocknum.b, bfe->type, lc, rc);
+ for (int i = 0; i < node->n_children; i++) {
+ BP_INIT_UNTOUCHED_CLOCK(node,i);
+ if (data_in_memory) {
+ BP_STATE(node, i) = ((bfe->wants_child_available(i) || (lc <= i && i <= rc))
+ ? PT_AVAIL : PT_COMPRESSED);
+ } else {
+ BP_STATE(node, i) = PT_ON_DISK;
+ }
+ BP_WORKDONE(node,i) = 0;
+
+ switch (BP_STATE(node,i)) {
+ case PT_AVAIL:
+ setup_available_ftnode_partition(node, i);
+ BP_TOUCH_CLOCK(node,i);
+ break;
+ case PT_COMPRESSED:
+ set_BSB(node, i, sub_block_creat());
+ break;
+ case PT_ON_DISK:
+ set_BNULL(node, i);
+ break;
+ case PT_INVALID:
+ abort();
+ }
+ }
+}
+
+static void setup_ftnode_partitions(FTNODE node, ftnode_fetch_extra *bfe, bool data_in_memory)
+// Effect: Used when reading a ftnode into main memory, this sets up the partitions.
+// We set bfe->child_to_read as well as the BP_STATE and the data pointers (e.g., with set_BSB or set_BNULL or other set_ operations).
+// Arguments: Node: the node to set up.
+// bfe: Describes the key range needed.
+// data_in_memory: true if we have all the data (in which case we set the BP_STATE to be either PT_AVAIL or PT_COMPRESSED depending on the bfe.
+// false if we don't have the partitions in main memory (in which case we set the state to PT_ON_DISK.
+{
+ // Set bfe->child_to_read.
+ update_bfe_using_ftnode(node, bfe);
+
+ // Setup the partitions.
+ setup_partitions_using_bfe(node, bfe, data_in_memory);
+}
+
+/* deserialize the partition from the sub-block's uncompressed buffer
+ * and destroy the uncompressed buffer
+ */
+static int deserialize_ftnode_partition(
+ struct sub_block *sb,
+ FTNODE node,
+ int childnum, // which partition to deserialize
+ const toku::comparator &cmp) {
+
+ int r = 0;
+ const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
+ r = verify_ftnode_sub_block(sb, fname, node->blocknum);
+ if (r != 0) {
+ fprintf(stderr,
+ "%s:%d:deserialize_ftnode_partition - "
+ "file[%s], blocknum[%lld], "
+ "verify_ftnode_sub_block failed with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ r);
+ goto exit;
+ }
+ uint32_t data_size;
+ data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
+
+ // now with the data verified, we can read the information into the node
+ struct rbuf rb;
+ rbuf_init(&rb, (unsigned char *) sb->uncompressed_ptr, data_size);
+ unsigned char ch;
+ ch = rbuf_char(&rb);
+
+ if (node->height > 0) {
+ if (ch != FTNODE_PARTITION_MSG_BUFFER) {
+ fprintf(stderr,
+ "%s:%d:deserialize_ftnode_partition - "
+ "file[%s], blocknum[%lld], ch[%d] != "
+ "FTNODE_PARTITION_MSG_BUFFER[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ ch,
+ FTNODE_PARTITION_MSG_BUFFER);
+ dump_bad_block(rb.buf, rb.size);
+ assert(ch == FTNODE_PARTITION_MSG_BUFFER);
+ }
+ NONLEAF_CHILDINFO bnc = BNC(node, childnum);
+ if (node->layout_version_read_from_disk <= FT_LAYOUT_VERSION_26) {
+ // Layout version <= 26 did not serialize sorted message trees to disk.
+ deserialize_child_buffer_v26(bnc, &rb, cmp);
+ } else {
+ deserialize_child_buffer(bnc, &rb);
+ }
+ BP_WORKDONE(node, childnum) = 0;
+ } else {
+ if (ch != FTNODE_PARTITION_DMT_LEAVES) {
+ fprintf(stderr,
+ "%s:%d:deserialize_ftnode_partition - "
+ "file[%s], blocknum[%lld], ch[%d] != "
+ "FTNODE_PARTITION_DMT_LEAVES[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ ch,
+ FTNODE_PARTITION_DMT_LEAVES);
+ dump_bad_block(rb.buf, rb.size);
+ assert(ch == FTNODE_PARTITION_DMT_LEAVES);
+ }
+
+ BLB_SEQINSERT(node, childnum) = 0;
+ uint32_t num_entries = rbuf_int(&rb);
+ // we are now at the first byte of first leafentry
+ data_size -= rb.ndone; // remaining bytes of leafentry data
+
+ BASEMENTNODE bn = BLB(node, childnum);
+ bn->data_buffer.deserialize_from_rbuf(
+ num_entries, &rb, data_size, node->layout_version_read_from_disk);
+ }
+ if (rb.ndone != rb.size) {
+ fprintf(stderr,
+ "%s:%d:deserialize_ftnode_partition - "
+ "file[%s], blocknum[%lld], rb.ndone[%d] != rb.size[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ rb.ndone,
+ rb.size);
+ dump_bad_block(rb.buf, rb.size);
+ assert(rb.ndone == rb.size);
+ }
+
+exit:
+ return r;
+}
+
+static int decompress_and_deserialize_worker(struct rbuf curr_rbuf,
+ struct sub_block curr_sb,
+ FTNODE node,
+ int child,
+ const toku::comparator &cmp,
+ tokutime_t *decompress_time) {
+ int r = 0;
+ tokutime_t t0 = toku_time_now();
+ r = read_and_decompress_sub_block(&curr_rbuf, &curr_sb);
+ if (r != 0) {
+ const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
+ fprintf(stderr,
+ "%s:%d:decompress_and_deserialize_worker - "
+ "file[%s], blocknum[%lld], read_and_decompress_sub_block failed "
+ "with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ r);
+ dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
+ goto exit;
+ }
+ *decompress_time = toku_time_now() - t0;
+ // at this point, sb->uncompressed_ptr stores the serialized node partition
+ r = deserialize_ftnode_partition(&curr_sb, node, child, cmp);
+ if (r != 0) {
+ const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
+ fprintf(stderr,
+ "%s:%d:decompress_and_deserialize_worker - "
+ "file[%s], blocknum[%lld], deserialize_ftnode_partition failed "
+ "with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ r);
+ dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
+ goto exit;
+ }
+
+exit:
+ toku_free(curr_sb.uncompressed_ptr);
+ return r;
+}
+
+static int check_and_copy_compressed_sub_block_worker(struct rbuf curr_rbuf,
+ struct sub_block curr_sb,
+ FTNODE node,
+ int child) {
+ int r = 0;
+ r = read_compressed_sub_block(&curr_rbuf, &curr_sb);
+ if (r != 0) {
+ goto exit;
+ }
+
+ SUB_BLOCK bp_sb;
+ bp_sb = BSB(node, child);
+ bp_sb->compressed_size = curr_sb.compressed_size;
+ bp_sb->uncompressed_size = curr_sb.uncompressed_size;
+ bp_sb->compressed_ptr = toku_xmalloc(bp_sb->compressed_size);
+ memcpy(
+ bp_sb->compressed_ptr, curr_sb.compressed_ptr, bp_sb->compressed_size);
+exit:
+ return r;
+}
+
+static FTNODE alloc_ftnode_for_deserialize(uint32_t fullhash, BLOCKNUM blocknum) {
+// Effect: Allocate an FTNODE and fill in the values that are not read from
+ FTNODE XMALLOC(node);
+ node->fullhash = fullhash;
+ node->blocknum = blocknum;
+ node->clear_dirty();
+ node->oldest_referenced_xid_known = TXNID_NONE;
+ node->bp = nullptr;
+ node->ct_pair = nullptr;
+ return node;
+}
+
+static int deserialize_ftnode_header_from_rbuf_if_small_enough(
+ FTNODE *ftnode,
+ FTNODE_DISK_DATA *ndd,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ ftnode_fetch_extra *bfe,
+ struct rbuf *rb,
+ int fd)
+// If we have enough information in the rbuf to construct a header, then do so.
+// Also fetch in the basement node if needed.
+// Return 0 if it worked. If something goes wrong (including that we are
+// looking at some old data format that doesn't have partitions) then return
+// nonzero.
+{
+ int r = 0;
+
+ tokutime_t t0, t1;
+ tokutime_t decompress_time = 0;
+ tokutime_t deserialize_time = 0;
+ // we must get the name from bfe and not through
+ // toku_ftnode_get_cachefile_fname_in_env as the node is not set up yet
+ const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
+
+ t0 = toku_time_now();
+
+ FTNODE node = alloc_ftnode_for_deserialize(fullhash, blocknum);
+
+ if (rb->size < 24) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], rb->size[%u] < 24\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ rb->size);
+ dump_bad_block(rb->buf, rb->size);
+ // TODO: What error do we return here?
+ // Does it even matter?
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+
+ const void *magic;
+ rbuf_literal_bytes(rb, &magic, 8);
+ if (memcmp(magic, "tokuleaf", 8) != 0 &&
+ memcmp(magic, "tokunode", 8) != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], unrecognized magic number "
+ "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ static_cast<const uint8_t*>(magic)[0],
+ static_cast<const uint8_t*>(magic)[1],
+ static_cast<const uint8_t*>(magic)[2],
+ static_cast<const uint8_t*>(magic)[3],
+ static_cast<const uint8_t*>(magic)[4],
+ static_cast<const uint8_t*>(magic)[5],
+ static_cast<const uint8_t*>(magic)[6],
+ static_cast<const uint8_t*>(magic)[7]);
+ dump_bad_block(rb->buf, rb->size);
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+
+ node->layout_version_read_from_disk = rbuf_int(rb);
+ if (node->layout_version_read_from_disk <
+ FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], node->layout_version_read_from_disk[%d] "
+ "< FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ node->layout_version_read_from_disk,
+ FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES);
+ dump_bad_block(rb->buf, rb->size);
+ // This code path doesn't have to worry about upgrade.
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+
+ // If we get here, we know the node is at least
+ // FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES. We haven't changed
+ // the serialization format since then (this comment is correct as of
+ // version 20, which is Deadshot) so we can go ahead and say the
+ // layout version is current (it will be as soon as we finish
+ // deserializing).
+ // TODO(leif): remove node->layout_version (#5174)
+ node->layout_version = FT_LAYOUT_VERSION;
+
+ node->layout_version_original = rbuf_int(rb);
+ node->build_id = rbuf_int(rb);
+ node->n_children = rbuf_int(rb);
+ // Guaranteed to be have been able to read up to here. If n_children
+ // is too big, we may have a problem, so check that we won't overflow
+ // while reading the partition locations.
+ unsigned int nhsize;
+ // we can do this because n_children is filled in.
+ nhsize = serialize_node_header_size(node);
+ unsigned int needed_size;
+ // we need 12 more so that we can read the compressed block size information
+ // that follows for the nodeinfo.
+ needed_size = nhsize + 12;
+ if (needed_size > rb->size) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], needed_size[%d] > rb->size[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ needed_size,
+ rb->size);
+ dump_bad_block(rb->buf, rb->size);
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+
+ XMALLOC_N(node->n_children, node->bp);
+ XMALLOC_N(node->n_children, *ndd);
+ // read the partition locations
+ for (int i=0; i<node->n_children; i++) {
+ BP_START(*ndd,i) = rbuf_int(rb);
+ BP_SIZE (*ndd,i) = rbuf_int(rb);
+ }
+
+ uint32_t checksum;
+ checksum = toku_x1764_memory(rb->buf, rb->ndone);
+ uint32_t stored_checksum;
+ stored_checksum = rbuf_int(rb);
+ if (stored_checksum != checksum) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ stored_checksum,
+ checksum);
+ dump_bad_block(rb->buf, rb->size);
+ r = TOKUDB_BAD_CHECKSUM;
+ goto cleanup;
+ }
+
+ // Now we want to read the pivot information.
+ struct sub_block sb_node_info;
+ sub_block_init(&sb_node_info);
+ // we'll be able to read these because we checked the size earlier.
+ sb_node_info.compressed_size = rbuf_int(rb);
+ sb_node_info.uncompressed_size = rbuf_int(rb);
+ if (rb->size - rb->ndone < sb_node_info.compressed_size + 8) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], rb->size[%d] - rb->ndone[%d] < "
+ "sb_node_info.compressed_size[%d] + 8\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ rb->size,
+ rb->ndone,
+ sb_node_info.compressed_size);
+ dump_bad_block(rb->buf, rb->size);
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+
+ // Finish reading compressed the sub_block
+ const void **cp;
+ cp = (const void **) &sb_node_info.compressed_ptr;
+ rbuf_literal_bytes(rb, cp, sb_node_info.compressed_size);
+ sb_node_info.xsum = rbuf_int(rb);
+ // let's check the checksum
+ uint32_t actual_xsum;
+ actual_xsum = toku_x1764_memory((char *)sb_node_info.compressed_ptr - 8,
+ 8 + sb_node_info.compressed_size);
+ if (sb_node_info.xsum != actual_xsum) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], sb_node_info.xsum[%d] != actual_xsum[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ sb_node_info.xsum,
+ actual_xsum);
+ dump_bad_block(rb->buf, rb->size);
+ r = TOKUDB_BAD_CHECKSUM;
+ goto cleanup;
+ }
+
+ // Now decompress the subblock
+ {
+ toku::scoped_malloc sb_node_info_buf(sb_node_info.uncompressed_size);
+ sb_node_info.uncompressed_ptr = sb_node_info_buf.get();
+ tokutime_t decompress_t0 = toku_time_now();
+ toku_decompress((Bytef *)sb_node_info.uncompressed_ptr,
+ sb_node_info.uncompressed_size,
+ (Bytef *)sb_node_info.compressed_ptr,
+ sb_node_info.compressed_size);
+ tokutime_t decompress_t1 = toku_time_now();
+ decompress_time = decompress_t1 - decompress_t0;
+
+ // at this point sb->uncompressed_ptr stores the serialized node info.
+ r = deserialize_ftnode_info(&sb_node_info, node);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], deserialize_ftnode_info failed with "
+ "%d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ dump_bad_block(
+ static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
+ sb_node_info.uncompressed_size);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+ }
+
+ // Now we have the ftnode_info. We have a bunch more stuff in the
+ // rbuf, so we might be able to store the compressed data for some
+ // objects.
+ // We can proceed to deserialize the individual subblocks.
+
+ // setup the memory of the partitions
+ // for partitions being decompressed, create either message buffer or basement node
+ // for partitions staying compressed, create sub_block
+ setup_ftnode_partitions(node, bfe, false);
+
+ // We must capture deserialize and decompression time before
+ // the pf_callback, otherwise we would double-count.
+ t1 = toku_time_now();
+ deserialize_time = (t1 - t0) - decompress_time;
+
+ // do partial fetch if necessary
+ if (bfe->type != ftnode_fetch_none) {
+ PAIR_ATTR attr;
+ r = toku_ftnode_pf_callback(node, *ndd, bfe, fd, &attr);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
+ "file[%s], blocknum[%lld], toku_ftnode_pf_callback failed with "
+ "%d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+ }
+
+ // handle clock
+ for (int i = 0; i < node->n_children; i++) {
+ if (bfe->wants_child_available(i)) {
+ paranoid_invariant(BP_STATE(node,i) == PT_AVAIL);
+ BP_TOUCH_CLOCK(node,i);
+ }
+ }
+ *ftnode = node;
+ r = 0;
+
+cleanup:
+ if (r == 0) {
+ bfe->deserialize_time += deserialize_time;
+ bfe->decompress_time += decompress_time;
+ toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
+ }
+ if (r != 0) {
+ if (node) {
+ toku_free(*ndd);
+ toku_free(node->bp);
+ toku_free(node);
+ }
+ }
+ return r;
+}
+
+// This function takes a deserialized version 13 or 14 buffer and
+// constructs the associated internal, non-leaf ftnode object. It
+// also creates MSN's for older messages created in older versions
+// that did not generate MSN's for messages. These new MSN's are
+// generated from the root downwards, counting backwards from MIN_MSN
+// and persisted in the ft header.
+static int deserialize_and_upgrade_internal_node(FTNODE node,
+ struct rbuf *rb,
+ ftnode_fetch_extra *bfe,
+ STAT64INFO info) {
+ int version = node->layout_version_read_from_disk;
+
+ if (version == FT_LAST_LAYOUT_VERSION_WITH_FINGERPRINT) {
+ (void) rbuf_int(rb); // 10. fingerprint
+ }
+
+ node->n_children = rbuf_int(rb); // 11. n_children
+
+ // Sub-tree esitmates...
+ for (int i = 0; i < node->n_children; ++i) {
+ if (version == FT_LAST_LAYOUT_VERSION_WITH_FINGERPRINT) {
+ (void) rbuf_int(rb); // 12. fingerprint
+ }
+ uint64_t nkeys = rbuf_ulonglong(rb); // 13. nkeys
+ uint64_t ndata = rbuf_ulonglong(rb); // 14. ndata
+ uint64_t dsize = rbuf_ulonglong(rb); // 15. dsize
+ (void) rbuf_char(rb); // 16. exact (char)
+ invariant(nkeys == ndata);
+ if (info) {
+ // info is non-null if we're trying to upgrade old subtree
+ // estimates to stat64info
+ info->numrows += nkeys;
+ info->numbytes += dsize;
+ }
+ }
+
+ // Pivot keys
+ node->pivotkeys.deserialize_from_rbuf(rb, node->n_children - 1);
+
+ // Create space for the child node buffers (a.k.a. partitions).
+ XMALLOC_N(node->n_children, node->bp);
+
+ // Set the child blocknums.
+ for (int i = 0; i < node->n_children; ++i) {
+ BP_BLOCKNUM(node, i) = rbuf_blocknum(rb); // 18. blocknums
+ BP_WORKDONE(node, i) = 0;
+ }
+
+ // Read in the child buffer maps.
+ for (int i = 0; i < node->n_children; ++i) {
+ // The following fields were previously used by the `sub_block_map'
+ // They include:
+ // - 4 byte index
+ (void) rbuf_int(rb);
+ // - 4 byte offset
+ (void) rbuf_int(rb);
+ // - 4 byte size
+ (void) rbuf_int(rb);
+ }
+
+ // We need to setup this node's partitions, but we can't call the
+ // existing call (setup_ftnode_paritions.) because there are
+ // existing optimizations that would prevent us from bringing all
+ // of this node's partitions into memory. Instead, We use the
+ // existing bfe and node to set the bfe's child_to_search member.
+ // Then we create a temporary bfe that needs all the nodes to make
+ // sure we properly intitialize our partitions before filling them
+ // in from our soon-to-be-upgraded node.
+ update_bfe_using_ftnode(node, bfe);
+ ftnode_fetch_extra temp_bfe;
+ temp_bfe.create_for_full_read(nullptr);
+ setup_partitions_using_bfe(node, &temp_bfe, true);
+
+ // Cache the highest MSN generated for the message buffers. This
+ // will be set in the ftnode.
+ //
+ // The way we choose MSNs for upgraded messages is delicate. The
+ // field `highest_unused_msn_for_upgrade' in the header is always an
+ // MSN that no message has yet. So when we have N messages that need
+ // MSNs, we decrement it by N, and then use it and the N-1 MSNs less
+ // than it, but we do not use the value we decremented it to.
+ //
+ // In the code below, we initialize `lowest' with the value of
+ // `highest_unused_msn_for_upgrade' after it is decremented, so we
+ // need to be sure to increment it once before we enqueue our first
+ // message.
+ MSN highest_msn;
+ highest_msn.msn = 0;
+
+ // Deserialize de-compressed buffers.
+ for (int i = 0; i < node->n_children; ++i) {
+ NONLEAF_CHILDINFO bnc = BNC(node, i);
+ MSN highest_msn_in_this_buffer = deserialize_child_buffer_v13(bfe->ft, bnc, rb);
+ if (highest_msn.msn == 0) {
+ highest_msn.msn = highest_msn_in_this_buffer.msn;
+ }
+ }
+
+ // Assign the highest msn from our upgrade message buffers
+ node->max_msn_applied_to_node_on_disk = highest_msn;
+ // Since we assigned MSNs to this node's messages, we need to dirty it.
+ node->set_dirty();
+
+ // Must compute the checksum now (rather than at the end, while we
+ // still have the pointer to the buffer).
+ if (version >= FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM) {
+ uint32_t expected_xsum = toku_dtoh32(*(uint32_t*)(rb->buf+rb->size-4)); // 27. checksum
+ uint32_t actual_xsum = toku_x1764_memory(rb->buf, rb->size-4);
+ if (expected_xsum != actual_xsum) {
+ fprintf(stderr, "%s:%d: Bad checksum: expected = %" PRIx32 ", actual= %" PRIx32 "\n",
+ __FUNCTION__,
+ __LINE__,
+ expected_xsum,
+ actual_xsum);
+ fprintf(stderr,
+ "Checksum failure while reading node in file %s.\n",
+ toku_cachefile_fname_in_env(bfe->ft->cf));
+ fflush(stderr);
+ return toku_db_badformat();
+ }
+ }
+
+ return 0;
+}
+
+// This function takes a deserialized version 13 or 14 buffer and
+// constructs the associated leaf ftnode object.
+static int
+deserialize_and_upgrade_leaf_node(FTNODE node,
+ struct rbuf *rb,
+ ftnode_fetch_extra *bfe,
+ STAT64INFO info)
+{
+ int r = 0;
+ int version = node->layout_version_read_from_disk;
+
+ // This is a leaf node, so the offsets in the buffer will be
+ // different from the internal node offsets above.
+ uint64_t nkeys = rbuf_ulonglong(rb); // 10. nkeys
+ uint64_t ndata = rbuf_ulonglong(rb); // 11. ndata
+ uint64_t dsize = rbuf_ulonglong(rb); // 12. dsize
+ invariant(nkeys == ndata);
+ if (info) {
+ // info is non-null if we're trying to upgrade old subtree
+ // estimates to stat64info
+ info->numrows += nkeys;
+ info->numbytes += dsize;
+ }
+
+ // This is the optimized for upgrade field.
+ if (version == FT_LAYOUT_VERSION_14) {
+ (void) rbuf_int(rb); // 13. optimized
+ }
+
+ // npartitions - This is really the number of leaf entries in
+ // our single basement node. There should only be 1 (ONE)
+ // partition, so there shouldn't be any pivot key stored. This
+ // means the loop will not iterate. We could remove the loop and
+ // assert that the value is indeed 1.
+ int npartitions = rbuf_int(rb); // 14. npartitions
+ assert(npartitions == 1);
+
+ // Set number of children to 1, since we will only have one
+ // basement node.
+ node->n_children = 1;
+ XMALLOC_N(node->n_children, node->bp);
+ node->pivotkeys.create_empty();
+
+ // Create one basement node to contain all the leaf entries by
+ // setting up the single partition and updating the bfe.
+ update_bfe_using_ftnode(node, bfe);
+ ftnode_fetch_extra temp_bfe;
+ temp_bfe.create_for_full_read(bfe->ft);
+ setup_partitions_using_bfe(node, &temp_bfe, true);
+
+ // 11. Deserialize the partition maps, though they are not used in the
+ // newer versions of ftnodes.
+ for (int i = 0; i < node->n_children; ++i) {
+ // The following fields were previously used by the `sub_block_map'
+ // They include:
+ // - 4 byte index
+ (void) rbuf_int(rb);
+ // - 4 byte offset
+ (void) rbuf_int(rb);
+ // - 4 byte size
+ (void) rbuf_int(rb);
+ }
+
+ // Copy all of the leaf entries into the single basement node.
+
+ // The number of leaf entries in buffer.
+ int n_in_buf = rbuf_int(rb); // 15. # of leaves
+ BLB_SEQINSERT(node,0) = 0;
+ BASEMENTNODE bn = BLB(node, 0);
+
+ // Read the leaf entries from the buffer, advancing the buffer
+ // as we go.
+ bool has_end_to_end_checksum = (version >= FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM);
+ if (version <= FT_LAYOUT_VERSION_13) {
+ // Create our mempool.
+ // Loop through
+ for (int i = 0; i < n_in_buf; ++i) {
+ LEAFENTRY_13 le = reinterpret_cast<LEAFENTRY_13>(&rb->buf[rb->ndone]);
+ uint32_t disksize = leafentry_disksize_13(le);
+ rb->ndone += disksize; // 16. leaf entry (13)
+ invariant(rb->ndone<=rb->size);
+ LEAFENTRY new_le;
+ size_t new_le_size;
+ void* key = NULL;
+ uint32_t keylen = 0;
+ r = toku_le_upgrade_13_14(le,
+ &key,
+ &keylen,
+ &new_le_size,
+ &new_le);
+ assert_zero(r);
+ // Copy the pointer value straight into the OMT
+ LEAFENTRY new_le_in_bn = nullptr;
+ void *maybe_free;
+ bn->data_buffer.get_space_for_insert(
+ i,
+ key,
+ keylen,
+ new_le_size,
+ &new_le_in_bn,
+ &maybe_free
+ );
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ memcpy(new_le_in_bn, new_le, new_le_size);
+ toku_free(new_le);
+ }
+ } else {
+ uint32_t data_size = rb->size - rb->ndone;
+ if (has_end_to_end_checksum) {
+ data_size -= sizeof(uint32_t);
+ }
+ bn->data_buffer.deserialize_from_rbuf(n_in_buf, rb, data_size, node->layout_version_read_from_disk);
+ }
+
+ // Whatever this is must be less than the MSNs of every message above
+ // it, so it's ok to take it here.
+ bn->max_msn_applied = bfe->ft->h->highest_unused_msn_for_upgrade;
+ bn->stale_ancestor_messages_applied = false;
+ node->max_msn_applied_to_node_on_disk = bn->max_msn_applied;
+
+ // Checksum (end to end) is only on version 14
+ if (has_end_to_end_checksum) {
+ uint32_t expected_xsum = rbuf_int(rb); // 17. checksum
+ uint32_t actual_xsum = toku_x1764_memory(rb->buf, rb->size - 4);
+ if (expected_xsum != actual_xsum) {
+ fprintf(stderr, "%s:%d: Bad checksum: expected = %" PRIx32 ", actual= %" PRIx32 "\n",
+ __FUNCTION__,
+ __LINE__,
+ expected_xsum,
+ actual_xsum);
+ fprintf(stderr,
+ "Checksum failure while reading node in file %s.\n",
+ toku_cachefile_fname_in_env(bfe->ft->cf));
+ fflush(stderr);
+ return toku_db_badformat();
+ }
+ }
+
+ // We should have read the whole block by this point.
+ if (rb->ndone != rb->size) {
+ // TODO: Error handling.
+ return 1;
+ }
+
+ return r;
+}
+
+static int read_and_decompress_block_from_fd_into_rbuf(
+ int fd,
+ BLOCKNUM blocknum,
+ DISKOFF offset,
+ DISKOFF size,
+ FT ft,
+ struct rbuf *rb,
+ /* out */ int *layout_version_p);
+
+// This function upgrades a version 14 or 13 ftnode to the current
+// version. NOTE: This code assumes the first field of the rbuf has
+// already been read from the buffer (namely the layout_version of the
+// ftnode.)
+static int deserialize_and_upgrade_ftnode(FTNODE node,
+ FTNODE_DISK_DATA *ndd,
+ BLOCKNUM blocknum,
+ ftnode_fetch_extra *bfe,
+ STAT64INFO info,
+ int fd) {
+ int r = 0;
+ int version;
+
+ // I. First we need to de-compress the entire node, only then can
+ // we read the different sub-sections.
+ // get the file offset and block size for the block
+ DISKOFF offset, size;
+ bfe->ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size);
+
+ struct rbuf rb;
+ r = read_and_decompress_block_from_fd_into_rbuf(fd,
+ blocknum,
+ offset,
+ size,
+ bfe->ft,
+ &rb,
+ &version);
+ if (r != 0) {
+ const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
+ fprintf(stderr,
+ "%s:%d:deserialize_and_upgrade_ftnode - "
+ "file[%s], blocknum[%lld], "
+ "read_and_decompress_block_from_fd_into_rbuf failed with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ goto exit;
+ }
+
+ // Re-read the magic field from the previous call, since we are
+ // restarting with a fresh rbuf.
+ {
+ const void *magic;
+ rbuf_literal_bytes(&rb, &magic, 8); // 1. magic
+ }
+
+ // II. Start reading ftnode fields out of the decompressed buffer.
+
+ // Copy over old version info.
+ node->layout_version_read_from_disk = rbuf_int(&rb); // 2. layout version
+ version = node->layout_version_read_from_disk;
+ if (version > FT_LAYOUT_VERSION_14) {
+ const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
+ fprintf(stderr,
+ "%s:%d:deserialize_and_upgrade_ftnode - "
+ "file[%s], blocknum[%lld], version[%d] > "
+ "FT_LAYOUT_VERSION_14[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ version,
+ FT_LAYOUT_VERSION_14);
+ dump_bad_block(rb.buf, rb.size);
+ goto exit;
+ }
+ assert(version <= FT_LAYOUT_VERSION_14);
+ // Upgrade the current version number to the current version.
+ node->layout_version = FT_LAYOUT_VERSION;
+
+ node->layout_version_original = rbuf_int(&rb); // 3. original layout
+ node->build_id = rbuf_int(&rb); // 4. build id
+
+ // The remaining offsets into the rbuf do not map to the current
+ // version, so we need to fill in the blanks and ignore older
+ // fields.
+ (void)rbuf_int(&rb); // 5. nodesize
+ node->flags = rbuf_int(&rb); // 6. flags
+ node->height = rbuf_int(&rb); // 7. height
+
+ // If the version is less than 14, there are two extra ints here.
+ // we would need to ignore them if they are there.
+ // These are the 'fingerprints'.
+ if (version == FT_LAYOUT_VERSION_13) {
+ (void) rbuf_int(&rb); // 8. rand4
+ (void) rbuf_int(&rb); // 9. local
+ }
+
+ // The next offsets are dependent on whether this is a leaf node
+ // or not.
+
+ // III. Read in Leaf and Internal Node specific data.
+
+ // Check height to determine whether this is a leaf node or not.
+ if (node->height > 0) {
+ r = deserialize_and_upgrade_internal_node(node, &rb, bfe, info);
+ } else {
+ r = deserialize_and_upgrade_leaf_node(node, &rb, bfe, info);
+ }
+
+ XMALLOC_N(node->n_children, *ndd);
+ // Initialize the partition locations to zero, because version 14
+ // and below have no notion of partitions on disk.
+ for (int i=0; i<node->n_children; i++) {
+ BP_START(*ndd,i) = 0;
+ BP_SIZE (*ndd,i) = 0;
+ }
+
+ toku_free(rb.buf);
+exit:
+ return r;
+}
+
+// Effect: deserializes a ftnode that is in rb (with pointer of rb just past the
+// magic) into a FTNODE.
+static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
+ FTNODE_DISK_DATA *ndd,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ ftnode_fetch_extra *bfe,
+ STAT64INFO info,
+ struct rbuf *rb,
+ int fd) {
+ int r = 0;
+ struct sub_block sb_node_info;
+
+ tokutime_t t0, t1;
+ tokutime_t decompress_time = 0;
+ tokutime_t deserialize_time = 0;
+ const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
+
+ t0 = toku_time_now();
+
+ FTNODE node = alloc_ftnode_for_deserialize(fullhash, blocknum);
+
+ // now start reading from rbuf
+ // first thing we do is read the header information
+ const void *magic;
+ rbuf_literal_bytes(rb, &magic, 8);
+ if (memcmp(magic, "tokuleaf", 8) != 0 &&
+ memcmp(magic, "tokunode", 8) != 0) {
+ fprintf(stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], unrecognized magic number "
+ "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ static_cast<const uint8_t *>(magic)[0],
+ static_cast<const uint8_t *>(magic)[1],
+ static_cast<const uint8_t *>(magic)[2],
+ static_cast<const uint8_t *>(magic)[3],
+ static_cast<const uint8_t *>(magic)[4],
+ static_cast<const uint8_t *>(magic)[5],
+ static_cast<const uint8_t *>(magic)[6],
+ static_cast<const uint8_t *>(magic)[7]);
+ dump_bad_block(rb->buf, rb->size);
+
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+
+ node->layout_version_read_from_disk = rbuf_int(rb);
+ lazy_assert(node->layout_version_read_from_disk >= FT_LAYOUT_MIN_SUPPORTED_VERSION);
+
+ // Check if we are reading in an older node version.
+ if (node->layout_version_read_from_disk <= FT_LAYOUT_VERSION_14) {
+ int version = node->layout_version_read_from_disk;
+ // Perform the upgrade.
+ r = deserialize_and_upgrade_ftnode(node, ndd, blocknum, bfe, info, fd);
+ if (r != 0) {
+ fprintf(stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], deserialize_and_upgrade_ftnode "
+ "failed with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+
+ if (version <= FT_LAYOUT_VERSION_13) {
+ // deprecate 'TOKU_DB_VALCMP_BUILTIN'. just remove the flag
+ node->flags &= ~TOKU_DB_VALCMP_BUILTIN_13;
+ }
+
+ // If everything is ok, just re-assign the ftnode and retrn.
+ *ftnode = node;
+ r = 0;
+ goto cleanup;
+ }
+
+ // Upgrade versions after 14 to current. This upgrade is trivial, it
+ // removes the optimized for upgrade field, which has already been
+ // removed in the deserialization code (see
+ // deserialize_ftnode_info()).
+ node->layout_version = FT_LAYOUT_VERSION;
+ node->layout_version_original = rbuf_int(rb);
+ node->build_id = rbuf_int(rb);
+ node->n_children = rbuf_int(rb);
+ XMALLOC_N(node->n_children, node->bp);
+ XMALLOC_N(node->n_children, *ndd);
+ // read the partition locations
+ for (int i=0; i<node->n_children; i++) {
+ BP_START(*ndd,i) = rbuf_int(rb);
+ BP_SIZE (*ndd,i) = rbuf_int(rb);
+ }
+ // verify checksum of header stored
+ uint32_t checksum;
+ checksum = toku_x1764_memory(rb->buf, rb->ndone);
+ uint32_t stored_checksum;
+ stored_checksum = rbuf_int(rb);
+ if (stored_checksum != checksum) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ stored_checksum,
+ checksum);
+ dump_bad_block(rb->buf, rb->size);
+ invariant(stored_checksum == checksum);
+ }
+
+ // now we read and decompress the pivot and child information
+ sub_block_init(&sb_node_info);
+ {
+ tokutime_t sb_decompress_t0 = toku_time_now();
+ r = read_and_decompress_sub_block(rb, &sb_node_info);
+ tokutime_t sb_decompress_t1 = toku_time_now();
+ decompress_time += sb_decompress_t1 - sb_decompress_t0;
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], read_and_decompress_sub_block failed "
+ "with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ dump_bad_block(
+ static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
+ sb_node_info.uncompressed_size);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+ }
+
+ // at this point, sb->uncompressed_ptr stores the serialized node info
+ r = deserialize_ftnode_info(&sb_node_info, node);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], deserialize_ftnode_info failed with "
+ "%d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+ toku_free(sb_node_info.uncompressed_ptr);
+
+ // now that the node info has been deserialized, we can proceed to
+ // deserialize the individual sub blocks
+
+ // setup the memory of the partitions
+ // for partitions being decompressed, create either message buffer or
+ // basement node
+ // for partitions staying compressed, create sub_block
+ setup_ftnode_partitions(node, bfe, true);
+
+ // This loop is parallelizeable, since we don't have a dependency on the
+ // work done so far.
+ for (int i = 0; i < node->n_children; i++) {
+ uint32_t curr_offset = BP_START(*ndd, i);
+ uint32_t curr_size = BP_SIZE(*ndd, i);
+ // the compressed, serialized partitions start at where rb is currently
+ // pointing, which would be rb->buf + rb->ndone
+ // we need to intialize curr_rbuf to point to this place
+ struct rbuf curr_rbuf = {.buf = nullptr, .size = 0, .ndone = 0};
+ rbuf_init(&curr_rbuf, rb->buf + curr_offset, curr_size);
+
+ //
+ // now we are at the point where we have:
+ // - read the entire compressed node off of disk,
+ // - decompressed the pivot and offset information,
+ // - have arrived at the individual partitions.
+ //
+ // Based on the information in bfe, we want to decompress a subset of
+ // of the compressed partitions (also possibly none or possibly all)
+ // The partitions that we want to decompress and make available
+ // to the node, we do, the rest we simply copy in compressed
+ // form into the node, and set the state of the partition to
+ // PT_COMPRESSED
+ //
+
+ struct sub_block curr_sb;
+ sub_block_init(&curr_sb);
+
+ // curr_rbuf is passed by value to decompress_and_deserialize_worker,
+ // so there's no ugly race condition.
+ // This would be more obvious if curr_rbuf were an array.
+
+ // deserialize_ftnode_info figures out what the state
+ // should be and sets up the memory so that we are ready to use it
+
+ switch (BP_STATE(node, i)) {
+ case PT_AVAIL: {
+ // case where we read and decompress the partition
+ tokutime_t partition_decompress_time;
+ r = decompress_and_deserialize_worker(
+ curr_rbuf,
+ curr_sb,
+ node,
+ i,
+ bfe->ft->cmp,
+ &partition_decompress_time);
+ decompress_time += partition_decompress_time;
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], childnum[%d], "
+ "decompress_and_deserialize_worker failed with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ i,
+ r);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+ break;
+ }
+ case PT_COMPRESSED:
+ // case where we leave the partition in the compressed state
+ r = check_and_copy_compressed_sub_block_worker(curr_rbuf, curr_sb, node, i);
+ if (r != 0) {
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_from_rbuf - "
+ "file[%s], blocknum[%lld], childnum[%d], "
+ "check_and_copy_compressed_sub_block_worker failed with "
+ "%d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ i,
+ r);
+ dump_bad_block(rb->buf, rb->size);
+ goto cleanup;
+ }
+ break;
+ case PT_INVALID: // this is really bad
+ case PT_ON_DISK: // it's supposed to be in memory.
+ abort();
+ }
+ }
+ *ftnode = node;
+ r = 0;
+
+cleanup:
+ if (r == 0) {
+ t1 = toku_time_now();
+ deserialize_time = (t1 - t0) - decompress_time;
+ bfe->deserialize_time += deserialize_time;
+ bfe->decompress_time += decompress_time;
+ toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
+ }
+ if (r != 0) {
+ // NOTE: Right now, callers higher in the stack will assert on
+ // failure, so this is OK for production. However, if we
+ // create tools that use this function to search for errors in
+ // the FT, then we will leak memory.
+ if (node) {
+ toku_free(node);
+ }
+ }
+ return r;
+}
+
+int
+toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, ftnode_fetch_extra *bfe) {
+ int r = 0;
+ assert(BP_STATE(node,childnum) == PT_ON_DISK);
+ assert(node->bp[childnum].ptr.tag == BCT_NULL);
+
+ //
+ // setup the partition
+ //
+ setup_available_ftnode_partition(node, childnum);
+ BP_STATE(node,childnum) = PT_AVAIL;
+
+ //
+ // read off disk and make available in memory
+ //
+ // get the file offset and block size for the block
+ DISKOFF node_offset, total_node_disk_size;
+ bfe->ft->blocktable.translate_blocknum_to_offset_size(node->blocknum, &node_offset, &total_node_disk_size);
+
+ uint32_t curr_offset = BP_START(ndd, childnum);
+ uint32_t curr_size = BP_SIZE (ndd, childnum);
+
+ struct rbuf rb;
+ rbuf_init(&rb, nullptr, 0);
+
+ uint32_t pad_at_beginning = (node_offset+curr_offset)%512;
+ uint32_t padded_size = roundup_to_multiple(512, pad_at_beginning + curr_size);
+
+ toku::scoped_malloc_aligned raw_block_buf(padded_size, 512);
+ uint8_t *raw_block = reinterpret_cast<uint8_t *>(raw_block_buf.get());
+ rbuf_init(&rb, pad_at_beginning+raw_block, curr_size);
+ tokutime_t t0 = toku_time_now();
+
+ // read the block
+ assert(0==((unsigned long long)raw_block)%512); // for O_DIRECT
+ assert(0==(padded_size)%512);
+ assert(0==(node_offset+curr_offset-pad_at_beginning)%512);
+ ssize_t rlen = toku_os_pread(fd, raw_block, padded_size, node_offset+curr_offset-pad_at_beginning);
+ assert((DISKOFF)rlen >= pad_at_beginning + curr_size); // we read in at least enough to get what we wanted
+ assert((DISKOFF)rlen <= padded_size); // we didn't read in too much.
+
+ tokutime_t t1 = toku_time_now();
+
+ // read sub block
+ struct sub_block curr_sb;
+ sub_block_init(&curr_sb);
+ r = read_compressed_sub_block(&rb, &curr_sb);
+ if (r != 0) {
+ return r;
+ }
+ invariant(curr_sb.compressed_ptr != NULL);
+
+ // decompress
+ toku::scoped_malloc uncompressed_buf(curr_sb.uncompressed_size);
+ curr_sb.uncompressed_ptr = uncompressed_buf.get();
+ toku_decompress((Bytef *) curr_sb.uncompressed_ptr, curr_sb.uncompressed_size,
+ (Bytef *) curr_sb.compressed_ptr, curr_sb.compressed_size);
+
+ // deserialize
+ tokutime_t t2 = toku_time_now();
+
+ r = deserialize_ftnode_partition(&curr_sb, node, childnum, bfe->ft->cmp);
+
+ tokutime_t t3 = toku_time_now();
+
+ // capture stats
+ tokutime_t io_time = t1 - t0;
+ tokutime_t decompress_time = t2 - t1;
+ tokutime_t deserialize_time = t3 - t2;
+ bfe->deserialize_time += deserialize_time;
+ bfe->decompress_time += decompress_time;
+ toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
+
+ bfe->bytes_read = rlen;
+ bfe->io_time = io_time;
+
+ return r;
+}
+
+// Take a ftnode partition that is in the compressed state, and make it avail
+int toku_deserialize_bp_from_compressed(FTNODE node,
+ int childnum,
+ ftnode_fetch_extra *bfe) {
+
+ int r = 0;
+ assert(BP_STATE(node, childnum) == PT_COMPRESSED);
+ SUB_BLOCK curr_sb = BSB(node, childnum);
+
+ toku::scoped_malloc uncompressed_buf(curr_sb->uncompressed_size);
+ assert(curr_sb->uncompressed_ptr == NULL);
+ curr_sb->uncompressed_ptr = uncompressed_buf.get();
+
+ setup_available_ftnode_partition(node, childnum);
+ BP_STATE(node,childnum) = PT_AVAIL;
+
+ // decompress the sub_block
+ tokutime_t t0 = toku_time_now();
+
+ toku_decompress((Bytef *)curr_sb->uncompressed_ptr,
+ curr_sb->uncompressed_size,
+ (Bytef *)curr_sb->compressed_ptr,
+ curr_sb->compressed_size);
+
+ tokutime_t t1 = toku_time_now();
+
+ r = deserialize_ftnode_partition(curr_sb, node, childnum, bfe->ft->cmp);
+ if (r != 0) {
+ const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
+ fprintf(stderr,
+ "%s:%d:toku_deserialize_bp_from_compressed - "
+ "file[%s], blocknum[%lld], "
+ "deserialize_ftnode_partition failed with %d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)node->blocknum.b,
+ r);
+ dump_bad_block(static_cast<unsigned char *>(curr_sb->compressed_ptr),
+ curr_sb->compressed_size);
+ dump_bad_block(static_cast<unsigned char *>(curr_sb->uncompressed_ptr),
+ curr_sb->uncompressed_size);
+ }
+
+ tokutime_t t2 = toku_time_now();
+
+ tokutime_t decompress_time = t1 - t0;
+ tokutime_t deserialize_time = t2 - t1;
+ bfe->deserialize_time += deserialize_time;
+ bfe->decompress_time += decompress_time;
+ toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
+
+ toku_free(curr_sb->compressed_ptr);
+ toku_free(curr_sb);
+ return r;
+}
+
+static int deserialize_ftnode_from_fd(int fd,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ FTNODE *ftnode,
+ FTNODE_DISK_DATA *ndd,
+ ftnode_fetch_extra *bfe,
+ STAT64INFO info) {
+ struct rbuf rb = RBUF_INITIALIZER;
+
+ tokutime_t t0 = toku_time_now();
+ read_block_from_fd_into_rbuf(fd, blocknum, bfe->ft, &rb);
+ tokutime_t t1 = toku_time_now();
+
+ // Decompress and deserialize the ftnode. Time statistics
+ // are taken inside this function.
+ int r = deserialize_ftnode_from_rbuf(
+ ftnode, ndd, blocknum, fullhash, bfe, info, &rb, fd);
+ if (r != 0) {
+ const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
+ fprintf(
+ stderr,
+ "%s:%d:deserialize_ftnode_from_fd - "
+ "file[%s], blocknum[%lld], deserialize_ftnode_from_rbuf failed with "
+ "%d\n",
+ __FILE__,
+ __LINE__,
+ fname ? fname : "unknown",
+ (longlong)blocknum.b,
+ r);
+ dump_bad_block(rb.buf, rb.size);
+ }
+
+ bfe->bytes_read = rb.size;
+ bfe->io_time = t1 - t0;
+ toku_free(rb.buf);
+ return r;
+}
+
+// Effect: Read a node in. If possible, read just the header.
+// Perform version upgrade if necessary.
+int toku_deserialize_ftnode_from(int fd,
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ FTNODE *ftnode,
+ FTNODE_DISK_DATA *ndd,
+ ftnode_fetch_extra *bfe) {
+ int r = 0;
+ struct rbuf rb = RBUF_INITIALIZER;
+
+ // each function below takes the appropriate io/decompression/deserialize
+ // statistics
+
+ if (!bfe->read_all_partitions) {
+ read_ftnode_header_from_fd_into_rbuf_if_small_enough(
+ fd, blocknum, bfe->ft, &rb, bfe);
+ r = deserialize_ftnode_header_from_rbuf_if_small_enough(
+ ftnode, ndd, blocknum, fullhash, bfe, &rb, fd);
+ } else {
+ // force us to do it the old way
+ r = -1;
+ }
+ if (r != 0) {
+ // Something went wrong, go back to doing it the old way.
+ r = deserialize_ftnode_from_fd(
+ fd, blocknum, fullhash, ftnode, ndd, bfe, nullptr);
+ }
+
+ toku_free(rb.buf);
+ return r;
+}
+
+void
+toku_verify_or_set_counts(FTNODE UU(node)) {
+}
+
+int
+toku_db_badformat(void) {
+ return DB_BADFORMAT;
+}
+
+static size_t
+serialize_rollback_log_size(ROLLBACK_LOG_NODE log) {
+ size_t size = node_header_overhead //8 "tokuroll", 4 version, 4 version_original, 4 build_id
+ +16 //TXNID_PAIR
+ +8 //sequence
+ +8 //blocknum
+ +8 //previous (blocknum)
+ +8 //resident_bytecount
+ +8 //memarena size
+ +log->rollentry_resident_bytecount;
+ return size;
+}
+
+static void
+serialize_rollback_log_node_to_buf(ROLLBACK_LOG_NODE log, char *buf, size_t calculated_size, int UU(n_sub_blocks), struct sub_block UU(sub_block[])) {
+ struct wbuf wb;
+ wbuf_init(&wb, buf, calculated_size);
+ { //Serialize rollback log to local wbuf
+ wbuf_nocrc_literal_bytes(&wb, "tokuroll", 8);
+ lazy_assert(log->layout_version == FT_LAYOUT_VERSION);
+ wbuf_nocrc_int(&wb, log->layout_version);
+ wbuf_nocrc_int(&wb, log->layout_version_original);
+ wbuf_nocrc_uint(&wb, BUILD_ID);
+ wbuf_nocrc_TXNID_PAIR(&wb, log->txnid);
+ wbuf_nocrc_ulonglong(&wb, log->sequence);
+ wbuf_nocrc_BLOCKNUM(&wb, log->blocknum);
+ wbuf_nocrc_BLOCKNUM(&wb, log->previous);
+ wbuf_nocrc_ulonglong(&wb, log->rollentry_resident_bytecount);
+ //Write down memarena size needed to restore
+ wbuf_nocrc_ulonglong(&wb, log->rollentry_arena.total_size_in_use());
+
+ {
+ //Store rollback logs
+ struct roll_entry *item;
+ size_t done_before = wb.ndone;
+ for (item = log->newest_logentry; item; item = item->prev) {
+ toku_logger_rollback_wbuf_nocrc_write(&wb, item);
+ }
+ lazy_assert(done_before + log->rollentry_resident_bytecount == wb.ndone);
+ }
+ }
+ lazy_assert(wb.ndone == wb.size);
+ lazy_assert(calculated_size==wb.ndone);
+}
+
+static void
+serialize_uncompressed_block_to_memory(char * uncompressed_buf,
+ int n_sub_blocks,
+ struct sub_block sub_block[/*n_sub_blocks*/],
+ enum toku_compression_method method,
+ /*out*/ size_t *n_bytes_to_write,
+ /*out*/ char **bytes_to_write)
+// Guarantees that the malloc'd BYTES_TO_WRITE is 512-byte aligned (so that O_DIRECT will work)
+{
+ // allocate space for the compressed uncompressed_buf
+ size_t compressed_len = get_sum_compressed_size_bound(n_sub_blocks, sub_block, method);
+ size_t sub_block_header_len = sub_block_header_size(n_sub_blocks);
+ size_t header_len = node_header_overhead + sub_block_header_len + sizeof (uint32_t); // node + sub_block + checksum
+ char *XMALLOC_N_ALIGNED(512, roundup_to_multiple(512, header_len + compressed_len), compressed_buf);
+
+ // copy the header
+ memcpy(compressed_buf, uncompressed_buf, node_header_overhead);
+ if (0) printf("First 4 bytes before compressing data are %02x%02x%02x%02x\n",
+ uncompressed_buf[node_header_overhead], uncompressed_buf[node_header_overhead+1],
+ uncompressed_buf[node_header_overhead+2], uncompressed_buf[node_header_overhead+3]);
+
+ // compress all of the sub blocks
+ char *uncompressed_ptr = uncompressed_buf + node_header_overhead;
+ char *compressed_ptr = compressed_buf + header_len;
+ compressed_len = compress_all_sub_blocks(n_sub_blocks, sub_block, uncompressed_ptr, compressed_ptr, num_cores, ft_pool, method);
+
+ //if (0) printf("Block %" PRId64 " Size before compressing %u, after compression %" PRIu64 "\n", blocknum.b, calculated_size-node_header_overhead, (uint64_t) compressed_len);
+
+ // serialize the sub block header
+ uint32_t *ptr = (uint32_t *)(compressed_buf + node_header_overhead);
+ *ptr++ = toku_htod32(n_sub_blocks);
+ for (int i=0; i<n_sub_blocks; i++) {
+ ptr[0] = toku_htod32(sub_block[i].compressed_size);
+ ptr[1] = toku_htod32(sub_block[i].uncompressed_size);
+ ptr[2] = toku_htod32(sub_block[i].xsum);
+ ptr += 3;
+ }
+
+ // compute the header checksum and serialize it
+ uint32_t header_length = (char *)ptr - (char *)compressed_buf;
+ uint32_t xsum = toku_x1764_memory(compressed_buf, header_length);
+ *ptr = toku_htod32(xsum);
+
+ uint32_t padded_len = roundup_to_multiple(512, header_len + compressed_len);
+ // Zero out padding.
+ for (uint32_t i = header_len+compressed_len; i < padded_len; i++) {
+ compressed_buf[i] = 0;
+ }
+ *n_bytes_to_write = padded_len;
+ *bytes_to_write = compressed_buf;
+}
+
+void
+toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized) {
+ // get the size of the serialized node
+ size_t calculated_size = serialize_rollback_log_size(log);
+
+ serialized->len = calculated_size;
+ serialized->n_sub_blocks = 0;
+ // choose sub block parameters
+ int sub_block_size = 0;
+ size_t data_size = calculated_size - node_header_overhead;
+ choose_sub_block_size(data_size, max_sub_blocks, &sub_block_size, &serialized->n_sub_blocks);
+ lazy_assert(0 < serialized->n_sub_blocks && serialized->n_sub_blocks <= max_sub_blocks);
+ lazy_assert(sub_block_size > 0);
+
+ // set the initial sub block size for all of the sub blocks
+ for (int i = 0; i < serialized->n_sub_blocks; i++)
+ sub_block_init(&serialized->sub_block[i]);
+ set_all_sub_block_sizes(data_size, sub_block_size, serialized->n_sub_blocks, serialized->sub_block);
+
+ // allocate space for the serialized node
+ XMALLOC_N(calculated_size, serialized->data);
+ // serialize the node into buf
+ serialize_rollback_log_node_to_buf(log, serialized->data, calculated_size, serialized->n_sub_blocks, serialized->sub_block);
+ serialized->blocknum = log->blocknum;
+}
+
+int toku_serialize_rollback_log_to(int fd,
+ ROLLBACK_LOG_NODE log,
+ SERIALIZED_ROLLBACK_LOG_NODE serialized_log,
+ bool is_serialized,
+ FT ft,
+ bool for_checkpoint) {
+ size_t n_to_write;
+ char *compressed_buf;
+ struct serialized_rollback_log_node serialized_local;
+
+ if (is_serialized) {
+ invariant_null(log);
+ } else {
+ invariant_null(serialized_log);
+ serialized_log = &serialized_local;
+ toku_serialize_rollback_log_to_memory_uncompressed(log, serialized_log);
+ }
+
+ BLOCKNUM blocknum = serialized_log->blocknum;
+ invariant(blocknum.b >= 0);
+
+ // Compress and malloc buffer to write
+ serialize_uncompressed_block_to_memory(serialized_log->data,
+ serialized_log->n_sub_blocks,
+ serialized_log->sub_block,
+ ft->h->compression_method,
+ &n_to_write,
+ &compressed_buf);
+
+ // Dirties the ft
+ DISKOFF offset;
+ ft->blocktable.realloc_on_disk(
+ blocknum, n_to_write, &offset, ft, fd, for_checkpoint);
+
+ toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset);
+ toku_free(compressed_buf);
+ if (!is_serialized) {
+ toku_static_serialized_rollback_log_destroy(&serialized_local);
+ log->dirty = false; // See #1957. Must set the node to be clean after
+ // serializing it so that it doesn't get written again
+ // on the next checkpoint or eviction.
+ }
+ return 0;
+}
+
+static int
+deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, ROLLBACK_LOG_NODE *log_p, struct rbuf *rb) {
+ ROLLBACK_LOG_NODE MALLOC(result);
+ int r;
+ if (result==NULL) {
+ r=get_error_errno();
+ if (0) { died0: toku_free(result); }
+ return r;
+ }
+
+ const void *magic;
+ rbuf_literal_bytes(rb, &magic, 8);
+ lazy_assert(!memcmp(magic, "tokuroll", 8));
+
+ result->layout_version = rbuf_int(rb);
+ lazy_assert((FT_LAYOUT_VERSION_25 <= result->layout_version && result->layout_version <= FT_LAYOUT_VERSION_27) ||
+ (result->layout_version == FT_LAYOUT_VERSION));
+ result->layout_version_original = rbuf_int(rb);
+ result->layout_version_read_from_disk = result->layout_version;
+ result->build_id = rbuf_int(rb);
+ result->dirty = false;
+ //TODO: Maybe add descriptor (or just descriptor version) here eventually?
+ //TODO: This is hard.. everything is shared in a single dictionary.
+ rbuf_TXNID_PAIR(rb, &result->txnid);
+ result->sequence = rbuf_ulonglong(rb);
+ result->blocknum = rbuf_blocknum(rb);
+ if (result->blocknum.b != blocknum.b) {
+ r = toku_db_badformat();
+ goto died0;
+ }
+ result->previous = rbuf_blocknum(rb);
+ result->rollentry_resident_bytecount = rbuf_ulonglong(rb);
+
+ size_t arena_initial_size = rbuf_ulonglong(rb);
+ result->rollentry_arena.create(arena_initial_size);
+ if (0) { died1: result->rollentry_arena.destroy(); goto died0; }
+
+ //Load rollback entries
+ lazy_assert(rb->size > 4);
+ //Start with empty list
+ result->oldest_logentry = result->newest_logentry = NULL;
+ while (rb->ndone < rb->size) {
+ struct roll_entry *item;
+ uint32_t rollback_fsize = rbuf_int(rb); //Already read 4. Rest is 4 smaller
+ const void *item_vec;
+ rbuf_literal_bytes(rb, &item_vec, rollback_fsize-4);
+ unsigned char* item_buf = (unsigned char*)item_vec;
+ r = toku_parse_rollback(item_buf, rollback_fsize-4, &item, &result->rollentry_arena);
+ if (r!=0) {
+ r = toku_db_badformat();
+ goto died1;
+ }
+ //Add to head of list
+ if (result->oldest_logentry) {
+ result->oldest_logentry->prev = item;
+ result->oldest_logentry = item;
+ item->prev = NULL;
+ }
+ else {
+ result->oldest_logentry = result->newest_logentry = item;
+ item->prev = NULL;
+ }
+ }
+
+ toku_free(rb->buf);
+ rb->buf = NULL;
+ *log_p = result;
+ return 0;
+}
+
+static int
+deserialize_rollback_log_from_rbuf_versioned (uint32_t version, BLOCKNUM blocknum,
+ ROLLBACK_LOG_NODE *log,
+ struct rbuf *rb) {
+ int r = 0;
+ ROLLBACK_LOG_NODE rollback_log_node = NULL;
+ invariant((FT_LAYOUT_VERSION_25 <= version && version <= FT_LAYOUT_VERSION_27) || version == FT_LAYOUT_VERSION);
+ r = deserialize_rollback_log_from_rbuf(blocknum, &rollback_log_node, rb);
+ if (r==0) {
+ *log = rollback_log_node;
+ }
+ return r;
+}
+
+int
+decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) {
+ int r = 0;
+ // get the number of compressed sub blocks
+ int n_sub_blocks;
+ n_sub_blocks = toku_dtoh32(*(uint32_t*)(&raw_block[node_header_overhead]));
+
+ // verify the number of sub blocks
+ invariant(0 <= n_sub_blocks);
+ invariant(n_sub_blocks <= max_sub_blocks);
+
+ { // verify the header checksum
+ uint32_t header_length = node_header_overhead + sub_block_header_size(n_sub_blocks);
+ invariant(header_length <= raw_block_size);
+ uint32_t xsum = toku_x1764_memory(raw_block, header_length);
+ uint32_t stored_xsum = toku_dtoh32(*(uint32_t *)(raw_block + header_length));
+ if (xsum != stored_xsum) {
+ r = TOKUDB_BAD_CHECKSUM;
+ }
+ }
+
+ // deserialize the sub block header
+ struct sub_block sub_block[n_sub_blocks];
+ uint32_t *sub_block_header = (uint32_t *) &raw_block[node_header_overhead+4];
+ for (int i = 0; i < n_sub_blocks; i++) {
+ sub_block_init(&sub_block[i]);
+ sub_block[i].compressed_size = toku_dtoh32(sub_block_header[0]);
+ sub_block[i].uncompressed_size = toku_dtoh32(sub_block_header[1]);
+ sub_block[i].xsum = toku_dtoh32(sub_block_header[2]);
+ sub_block_header += 3;
+ }
+
+ // This predicate needs to be here and instead of where it is set
+ // for the compiler.
+ if (r == TOKUDB_BAD_CHECKSUM) {
+ goto exit;
+ }
+
+ // verify sub block sizes
+ for (int i = 0; i < n_sub_blocks; i++) {
+ uint32_t compressed_size = sub_block[i].compressed_size;
+ if (compressed_size<=0 || compressed_size>(1<<30)) {
+ r = toku_db_badformat();
+ goto exit;
+ }
+
+ uint32_t uncompressed_size = sub_block[i].uncompressed_size;
+ if (0) printf("Block %" PRId64 " Compressed size = %u, uncompressed size=%u\n", blocknum.b, compressed_size, uncompressed_size);
+ if (uncompressed_size<=0 || uncompressed_size>(1<<30)) {
+ r = toku_db_badformat();
+ goto exit;
+ }
+ }
+
+ // sum up the uncompressed size of the sub blocks
+ size_t uncompressed_size;
+ uncompressed_size = get_sum_uncompressed_size(n_sub_blocks, sub_block);
+
+ // allocate the uncompressed buffer
+ size_t size;
+ size = node_header_overhead + uncompressed_size;
+ unsigned char *buf;
+ XMALLOC_N(size, buf);
+ rbuf_init(rb, buf, size);
+
+ // copy the uncompressed node header to the uncompressed buffer
+ memcpy(rb->buf, raw_block, node_header_overhead);
+
+ // point at the start of the compressed data (past the node header, the sub block header, and the header checksum)
+ unsigned char *compressed_data;
+ compressed_data = raw_block + node_header_overhead + sub_block_header_size(n_sub_blocks) + sizeof (uint32_t);
+
+ // point at the start of the uncompressed data
+ unsigned char *uncompressed_data;
+ uncompressed_data = rb->buf + node_header_overhead;
+
+ // decompress all the compressed sub blocks into the uncompressed buffer
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_block, compressed_data, uncompressed_data, num_cores, ft_pool);
+ if (r != 0) {
+ fprintf(stderr, "%s:%d block %" PRId64 " failed %d at %p size %zu\n", __FUNCTION__, __LINE__, blocknum.b, r, raw_block, raw_block_size);
+ dump_bad_block(raw_block, raw_block_size);
+ goto exit;
+ }
+
+ rb->ndone=0;
+exit:
+ return r;
+}
+
+static int decompress_from_raw_block_into_rbuf_versioned(uint32_t version, uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) {
+ // This function exists solely to accommodate future changes in compression.
+ int r = 0;
+ if ((version == FT_LAYOUT_VERSION_13 || version == FT_LAYOUT_VERSION_14) ||
+ (FT_LAYOUT_VERSION_25 <= version && version <= FT_LAYOUT_VERSION_27) ||
+ version == FT_LAYOUT_VERSION) {
+ r = decompress_from_raw_block_into_rbuf(raw_block, raw_block_size, rb, blocknum);
+ } else {
+ abort();
+ }
+ return r;
+}
+
+static int read_and_decompress_block_from_fd_into_rbuf(
+ int fd,
+ BLOCKNUM blocknum,
+ DISKOFF offset,
+ DISKOFF size,
+ FT ft,
+ struct rbuf *rb,
+ /* out */ int *layout_version_p) {
+ int r = 0;
+ if (0) printf("Deserializing Block %" PRId64 "\n", blocknum.b);
+
+ DISKOFF size_aligned = roundup_to_multiple(512, size);
+ uint8_t *XMALLOC_N_ALIGNED(512, size_aligned, raw_block);
+ {
+ // read the (partially compressed) block
+ ssize_t rlen = toku_os_pread(fd, raw_block, size_aligned, offset);
+ lazy_assert((DISKOFF)rlen >= size);
+ lazy_assert((DISKOFF)rlen <= size_aligned);
+ }
+ // get the layout_version
+ int layout_version;
+ {
+ uint8_t *magic = raw_block + uncompressed_magic_offset;
+ if (memcmp(magic, "tokuleaf", 8)!=0 &&
+ memcmp(magic, "tokunode", 8)!=0 &&
+ memcmp(magic, "tokuroll", 8)!=0) {
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+ uint8_t *version = raw_block + uncompressed_version_offset;
+ layout_version = toku_dtoh32(*(uint32_t*)version);
+ if (layout_version < FT_LAYOUT_MIN_SUPPORTED_VERSION || layout_version > FT_LAYOUT_VERSION) {
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+ }
+
+ r = decompress_from_raw_block_into_rbuf_versioned(layout_version, raw_block, size, rb, blocknum);
+ if (r != 0) {
+ // We either failed the checksome, or there is a bad format in
+ // the buffer.
+ if (r == TOKUDB_BAD_CHECKSUM) {
+ fprintf(stderr,
+ "Checksum failure while reading raw block in file %s.\n",
+ toku_cachefile_fname_in_env(ft->cf));
+ abort();
+ } else {
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+ }
+
+ *layout_version_p = layout_version;
+cleanup:
+ if (r!=0) {
+ if (rb->buf) toku_free(rb->buf);
+ rb->buf = NULL;
+ }
+ if (raw_block) {
+ toku_free(raw_block);
+ }
+ return r;
+}
+
+// Read rollback log node from file into struct.
+// Perform version upgrade if necessary.
+int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft) {
+ int layout_version = 0;
+ int r;
+
+ struct rbuf rb;
+ rbuf_init(&rb, nullptr, 0);
+
+ // get the file offset and block size for the block
+ DISKOFF offset, size;
+ ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size);
+
+ // if the size is 0, then the blocknum is unused
+ if (size == 0) {
+ // blocknum is unused, just create an empty one and get out
+ ROLLBACK_LOG_NODE XMALLOC(log);
+ rollback_empty_log_init(log);
+ log->blocknum.b = blocknum.b;
+ r = 0;
+ *logp = log;
+ goto cleanup;
+ }
+
+ r = read_and_decompress_block_from_fd_into_rbuf(fd, blocknum, offset, size, ft, &rb, &layout_version);
+ if (r!=0) goto cleanup;
+
+ {
+ uint8_t *magic = rb.buf + uncompressed_magic_offset;
+ if (memcmp(magic, "tokuroll", 8)!=0) {
+ r = toku_db_badformat();
+ goto cleanup;
+ }
+ }
+
+ r = deserialize_rollback_log_from_rbuf_versioned(layout_version, blocknum, logp, &rb);
+
+cleanup:
+ if (rb.buf) {
+ toku_free(rb.buf);
+ }
+ return r;
+}
+
+int
+toku_upgrade_subtree_estimates_to_stat64info(int fd, FT ft)
+{
+ int r = 0;
+ // 15 was the last version with subtree estimates
+ invariant(ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION_15);
+
+ FTNODE unused_node = NULL;
+ FTNODE_DISK_DATA unused_ndd = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft);
+ r = deserialize_ftnode_from_fd(fd, ft->h->root_blocknum, 0, &unused_node, &unused_ndd,
+ &bfe, &ft->h->on_disk_stats);
+ ft->in_memory_stats = ft->h->on_disk_stats;
+
+ if (unused_node) {
+ toku_ftnode_free(&unused_node);
+ }
+ if (unused_ndd) {
+ toku_free(unused_ndd);
+ }
+ return r;
+}
+
+int
+toku_upgrade_msn_from_root_to_header(int fd, FT ft)
+{
+ int r;
+ // 21 was the first version with max_msn_in_ft in the header
+ invariant(ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION_20);
+
+ FTNODE node;
+ FTNODE_DISK_DATA ndd;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft);
+ r = deserialize_ftnode_from_fd(fd, ft->h->root_blocknum, 0, &node, &ndd, &bfe, nullptr);
+ if (r != 0) {
+ goto exit;
+ }
+
+ ft->h->max_msn_in_ft = node->max_msn_applied_to_node_on_disk;
+ toku_ftnode_free(&node);
+ toku_free(ndd);
+ exit:
+ return r;
+}
+
+#undef UPGRADE_STATUS_VALUE
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.h b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.h
new file mode 100644
index 00000000..67813965
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.h
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/ft.h"
+#include "ft/node.h"
+#include "ft/serialize/sub_block.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/serialize/wbuf.h"
+#include "ft/serialize/block_table.h"
+
+unsigned int toku_serialize_ftnode_size(FTNODE node);
+int toku_serialize_ftnode_to_memory(
+ FTNODE node,
+ FTNODE_DISK_DATA *ndd,
+ unsigned int basementnodesize,
+ enum toku_compression_method compression_method,
+ bool do_rebalancing,
+ bool in_parallel,
+ size_t *n_bytes_to_write,
+ size_t *n_uncompressed_bytes,
+ char **bytes_to_write);
+int toku_serialize_ftnode_to(int fd,
+ BLOCKNUM,
+ FTNODE node,
+ FTNODE_DISK_DATA *ndd,
+ bool do_rebalancing,
+ FT ft,
+ bool for_checkpoint);
+int toku_serialize_rollback_log_to(int fd,
+ ROLLBACK_LOG_NODE log,
+ SERIALIZED_ROLLBACK_LOG_NODE serialized_log,
+ bool is_serialized,
+ FT ft,
+ bool for_checkpoint);
+void toku_serialize_rollback_log_to_memory_uncompressed(
+ ROLLBACK_LOG_NODE log,
+ SERIALIZED_ROLLBACK_LOG_NODE serialized);
+
+int toku_deserialize_rollback_log_from(int fd,
+ BLOCKNUM blocknum,
+ ROLLBACK_LOG_NODE *logp,
+ FT ft);
+int toku_deserialize_bp_from_disk(FTNODE node,
+ FTNODE_DISK_DATA ndd,
+ int childnum,
+ int fd,
+ ftnode_fetch_extra *bfe);
+int toku_deserialize_bp_from_compressed(FTNODE node,
+ int childnum,
+ ftnode_fetch_extra *bfe);
+int toku_deserialize_ftnode_from(int fd,
+ BLOCKNUM off,
+ uint32_t fullhash,
+ FTNODE *node,
+ FTNODE_DISK_DATA *ndd,
+ ftnode_fetch_extra *bfe);
+
+void toku_serialize_set_parallel(bool);
+
+// used by nonleaf node partial eviction
+void toku_create_compressed_partition_from_available(FTNODE node, int childnum,
+ enum toku_compression_method compression_method, SUB_BLOCK sb);
+
+// <CER> For verifying old, non-upgraded nodes (versions 13 and 14).
+int decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum);
+
+// used by verify
+int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
+void read_block_from_fd_into_rbuf(int fd,
+ BLOCKNUM blocknum,
+ FT ft,
+ struct rbuf *rb);
+int read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb);
+int verify_ftnode_sub_block(struct sub_block *sb,
+ const char *fname,
+ BLOCKNUM blocknum);
+void just_decompress_sub_block(struct sub_block *sb);
+
+// used by ft-node-deserialize.cc
+void initialize_ftnode(FTNODE node, BLOCKNUM blocknum);
+int read_and_check_magic(struct rbuf *rb);
+int read_and_check_version(FTNODE node, struct rbuf *rb);
+void read_node_info(FTNODE node, struct rbuf *rb, int version);
+void allocate_and_read_partition_offsets(FTNODE node, struct rbuf *rb, FTNODE_DISK_DATA *ndd);
+int check_node_info_checksum(struct rbuf *rb);
+void read_legacy_node_info(FTNODE node, struct rbuf *rb, int version);
+int check_legacy_end_checksum(struct rbuf *rb);
+
+// exported so the loader can dump bad blocks
+void dump_bad_block(unsigned char *vp, uint64_t size);
diff --git a/storage/tokudb/PerconaFT/ft/serialize/quicklz.cc b/storage/tokudb/PerconaFT/ft/serialize/quicklz.cc
new file mode 100644
index 00000000..44f084f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/quicklz.cc
@@ -0,0 +1,887 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Fast data compression library
+// Copyright (C) 2006-2011 Lasse Mikkel Reinhold
+// lar@quicklz.com
+//
+// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything
+// released into public must be open source) or under a commercial license if such
+// has been acquired (see http://www.quicklz.com/order.html). The commercial license
+// does not cover derived or ported versions created by third parties under GPL.
+
+// 1.5.0 final
+
+#include "quicklz.h"
+
+#if QLZ_VERSION_MAJOR != 1 || QLZ_VERSION_MINOR != 5 || QLZ_VERSION_REVISION != 0
+ #error quicklz.c and quicklz.h have different versions
+#endif
+
+#if (defined(__X86__) || defined(__i386__) || defined(i386) || defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || defined(_M_X64))
+ #define X86X64
+#endif
+
+#define MINOFFSET 2
+#define UNCONDITIONAL_MATCHLEN 6
+#define UNCOMPRESSED_END 4
+#define CWORD_LEN 4
+
+#if QLZ_COMPRESSION_LEVEL == 1 && defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0
+ #define OFFSET_BASE source
+ #define CAST (ui32)(size_t)
+#else
+ #define OFFSET_BASE 0
+ #define CAST
+#endif
+
+int qlz_get_setting(int setting)
+{
+ switch (setting)
+ {
+ case 0: return QLZ_COMPRESSION_LEVEL;
+ case 1: return sizeof(qlz_state_compress);
+ case 2: return sizeof(qlz_state_decompress);
+ case 3: return QLZ_STREAMING_BUFFER;
+#ifdef QLZ_MEMORY_SAFE
+ case 6: return 1;
+#else
+ case 6: return 0;
+#endif
+ case 7: return QLZ_VERSION_MAJOR;
+ case 8: return QLZ_VERSION_MINOR;
+ case 9: return QLZ_VERSION_REVISION;
+ }
+ return -1;
+}
+
+#if QLZ_COMPRESSION_LEVEL == 1
+static int same(const unsigned char *src, size_t n)
+{
+ while(n > 0 && *(src + n) == *src)
+ n--;
+ return n == 0 ? 1 : 0;
+}
+#endif
+
+static void reset_table_compress(qlz_state_compress *state)
+{
+ int i;
+ for(i = 0; i < QLZ_HASH_VALUES; i++)
+ {
+#if QLZ_COMPRESSION_LEVEL == 1
+ state->hash[i].offset = 0;
+#else
+ state->hash_counter[i] = 0;
+ state->hash[i].offset[0] = 0;
+#endif
+ }
+}
+
+static void reset_table_decompress(qlz_state_decompress *state)
+{
+ (void)state;
+#if QLZ_COMPRESSION_LEVEL == 2
+ for(int i = 0; i < QLZ_HASH_VALUES; i++)
+ {
+ state->hash_counter[i] = 0;
+ }
+#endif
+}
+
+static __inline ui32 hash_func(ui32 i)
+{
+#if QLZ_COMPRESSION_LEVEL == 2
+ return ((i >> 9) ^ (i >> 13) ^ i) & (QLZ_HASH_VALUES - 1);
+#else
+ return ((i >> 12) ^ i) & (QLZ_HASH_VALUES - 1);
+#endif
+}
+
+static __inline ui32 fast_read(void const *src, ui32 bytes)
+{
+#ifndef X86X64
+ unsigned char *p = (unsigned char*)src;
+ switch (bytes)
+ {
+ case 4:
+ return(*p | *(p + 1) << 8 | *(p + 2) << 16 | *(p + 3) << 24);
+ case 3:
+ return(*p | *(p + 1) << 8 | *(p + 2) << 16);
+ case 2:
+ return(*p | *(p + 1) << 8);
+ case 1:
+ return(*p);
+ }
+ return 0;
+#else
+ if (bytes >= 1 && bytes <= 4)
+ return *((ui32*)src);
+ else
+ return 0;
+#endif
+}
+
+static __inline ui32 hashat(const unsigned char *src)
+{
+ ui32 fetch, hash;
+ fetch = fast_read(src, 3);
+ hash = hash_func(fetch);
+ return hash;
+}
+
+static __inline void fast_write(ui32 f, void *dst, size_t bytes)
+{
+#ifndef X86X64
+ unsigned char *p = (unsigned char*)dst;
+
+ switch (bytes)
+ {
+ case 4:
+ *p = (unsigned char)f;
+ *(p + 1) = (unsigned char)(f >> 8);
+ *(p + 2) = (unsigned char)(f >> 16);
+ *(p + 3) = (unsigned char)(f >> 24);
+ return;
+ case 3:
+ *p = (unsigned char)f;
+ *(p + 1) = (unsigned char)(f >> 8);
+ *(p + 2) = (unsigned char)(f >> 16);
+ return;
+ case 2:
+ *p = (unsigned char)f;
+ *(p + 1) = (unsigned char)(f >> 8);
+ return;
+ case 1:
+ *p = (unsigned char)f;
+ return;
+ }
+#else
+ switch (bytes)
+ {
+ case 4:
+ *((ui32*)dst) = f;
+ return;
+ case 3:
+ *((ui32*)dst) = f;
+ return;
+ case 2:
+ *((ui16 *)dst) = (ui16)f;
+ return;
+ case 1:
+ *((unsigned char*)dst) = (unsigned char)f;
+ return;
+ }
+#endif
+}
+
+
+size_t qlz_size_decompressed(const char *source)
+{
+ ui32 n, r;
+ n = (((*source) & 2) == 2) ? 4 : 1;
+ r = fast_read(source + 1 + n, n);
+ r = r & (0xffffffff >> ((4 - n)*8));
+ return r;
+}
+
+size_t qlz_size_compressed(const char *source)
+{
+ ui32 n, r;
+ n = (((*source) & 2) == 2) ? 4 : 1;
+ r = fast_read(source + 1, n);
+ r = r & (0xffffffff >> ((4 - n)*8));
+ return r;
+}
+
+static
+size_t qlz_size_header(const char *source)
+{
+ size_t n = 2*((((*source) & 2) == 2) ? 4 : 1) + 1;
+ return n;
+}
+
+
+static __inline void memcpy_up(unsigned char *dst, const unsigned char *src, ui32 n)
+{
+ // Caution if modifying memcpy_up! Overlap of dst and src must be special handled.
+#ifndef X86X64
+ unsigned char *end = dst + n;
+ while(dst < end)
+ {
+ *dst = *src;
+ dst++;
+ src++;
+ }
+#else
+ ui32 f = 0;
+ do
+ {
+ *(ui32 *)(dst + f) = *(ui32 *)(src + f);
+ f += MINOFFSET + 1;
+ }
+ while (f < n);
+#endif
+}
+
+__attribute__((unused))
+static __inline void update_hash(qlz_state_decompress *state, const unsigned char *s)
+{
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 hash;
+ hash = hashat(s);
+ state->hash[hash].offset = s;
+ state->hash_counter[hash] = 1;
+#elif QLZ_COMPRESSION_LEVEL == 2
+ ui32 hash;
+ unsigned char c;
+ hash = hashat(s);
+ c = state->hash_counter[hash];
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = s;
+ c++;
+ state->hash_counter[hash] = c;
+#endif
+ (void)state;
+ (void)s;
+}
+
+#if QLZ_COMPRESSION_LEVEL <= 2
+static void update_hash_upto(qlz_state_decompress *state, unsigned char **lh, const unsigned char *max)
+{
+ while(*lh < max)
+ {
+ (*lh)++;
+ update_hash(state, *lh);
+ }
+}
+#endif
+
+static size_t qlz_compress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_compress *state)
+{
+ const unsigned char *last_byte = source + size - 1;
+ const unsigned char *src = source;
+ unsigned char *cword_ptr = destination;
+ unsigned char *dst = destination + CWORD_LEN;
+ ui32 cword_val = 1U << 31;
+ const unsigned char *last_matchstart = last_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END;
+ ui32 fetch = 0;
+ unsigned int lits = 0;
+
+ (void) lits;
+
+ if(src <= last_matchstart)
+ fetch = fast_read(src, 3);
+
+ while(src <= last_matchstart)
+ {
+ if ((cword_val & 1) == 1)
+ {
+ // store uncompressed if compression ratio is too low
+ if (src > source + (size >> 1) && dst - destination > src - source - ((src - source) >> 5))
+ return 0;
+
+ fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
+
+ cword_ptr = dst;
+ dst += CWORD_LEN;
+ cword_val = 1U << 31;
+ fetch = fast_read(src, 3);
+ }
+#if QLZ_COMPRESSION_LEVEL == 1
+ {
+ const unsigned char *o;
+ ui32 hash, cached;
+
+ hash = hash_func(fetch);
+ cached = fetch ^ state->hash[hash].cache;
+ state->hash[hash].cache = fetch;
+
+ o = state->hash[hash].offset + OFFSET_BASE;
+ state->hash[hash].offset = CAST(src - OFFSET_BASE);
+
+#ifdef X86X64
+ if ((cached & 0xffffff) == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6))))
+ {
+ if(cached != 0)
+ {
+#else
+ if (cached == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6))))
+ {
+ if (*(o + 3) != *(src + 3))
+ {
+#endif
+ hash <<= 4;
+ cword_val = (cword_val >> 1) | (1U << 31);
+ fast_write((3 - 2) | hash, dst, 2);
+ src += 3;
+ dst += 2;
+ }
+ else
+ {
+ const unsigned char *old_src = src;
+ size_t matchlen;
+ hash <<= 4;
+
+ cword_val = (cword_val >> 1) | (1U << 31);
+ src += 4;
+
+ if(*(o + (src - old_src)) == *src)
+ {
+ src++;
+ if(*(o + (src - old_src)) == *src)
+ {
+ size_t q = last_byte - UNCOMPRESSED_END - (src - 5) + 1;
+ size_t remaining = q > 255 ? 255 : q;
+ src++;
+ while(*(o + (src - old_src)) == *src && (size_t)(src - old_src) < remaining)
+ src++;
+ }
+ }
+
+ matchlen = src - old_src;
+ if (matchlen < 18)
+ {
+ fast_write((ui32)(matchlen - 2) | hash, dst, 2);
+ dst += 2;
+ }
+ else
+ {
+ fast_write((ui32)(matchlen << 16) | hash, dst, 3);
+ dst += 3;
+ }
+ }
+ fetch = fast_read(src, 3);
+ lits = 0;
+ }
+ else
+ {
+ lits++;
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+#ifdef X86X64
+ fetch = fast_read(src, 3);
+#else
+ fetch = (fetch >> 8 & 0xffff) | (*(src + 2) << 16);
+#endif
+ }
+ }
+#elif QLZ_COMPRESSION_LEVEL >= 2
+ {
+ const unsigned char *o, *offset2;
+ ui32 hash, matchlen, k, m, best_k = 0;
+ unsigned char c;
+ size_t remaining = (last_byte - UNCOMPRESSED_END - src + 1) > 255 ? 255 : (last_byte - UNCOMPRESSED_END - src + 1);
+ (void)best_k;
+
+
+ //hash = hashat(src);
+ fetch = fast_read(src, 3);
+ hash = hash_func(fetch);
+
+ c = state->hash_counter[hash];
+
+ offset2 = state->hash[hash].offset[0];
+ if(offset2 < src - MINOFFSET && c > 0 && ((fast_read(offset2, 3) ^ fetch) & 0xffffff) == 0)
+ {
+ matchlen = 3;
+ if(*(offset2 + matchlen) == *(src + matchlen))
+ {
+ matchlen = 4;
+ while(*(offset2 + matchlen) == *(src + matchlen) && matchlen < remaining)
+ matchlen++;
+ }
+ }
+ else
+ matchlen = 0;
+ for(k = 1; k < QLZ_POINTERS && c > k; k++)
+ {
+ o = state->hash[hash].offset[k];
+#if QLZ_COMPRESSION_LEVEL == 3
+ if(((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET)
+#elif QLZ_COMPRESSION_LEVEL == 2
+ if(*(src + matchlen) == *(o + matchlen) && ((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET)
+#endif
+ {
+ m = 3;
+ while(*(o + m) == *(src + m) && m < remaining)
+ m++;
+#if QLZ_COMPRESSION_LEVEL == 3
+ if ((m > matchlen) || (m == matchlen && o > offset2))
+#elif QLZ_COMPRESSION_LEVEL == 2
+ if (m > matchlen)
+#endif
+ {
+ offset2 = o;
+ matchlen = m;
+ best_k = k;
+ }
+ }
+ }
+ o = offset2;
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src;
+ c++;
+ state->hash_counter[hash] = c;
+
+#if QLZ_COMPRESSION_LEVEL == 3
+ if(matchlen > 2 && src - o < 131071)
+ {
+ ui32 u;
+ size_t offset = src - o;
+
+ for(u = 1; u < matchlen; u++)
+ {
+ hash = hashat(src + u);
+ c = state->hash_counter[hash]++;
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src + u;
+ }
+
+ cword_val = (cword_val >> 1) | (1U << 31);
+ src += matchlen;
+
+ if(matchlen == 3 && offset <= 63)
+ {
+ *dst = (unsigned char)(offset << 2);
+ dst++;
+ }
+ else if (matchlen == 3 && offset <= 16383)
+ {
+ ui32 f = (ui32)((offset << 2) | 1);
+ fast_write(f, dst, 2);
+ dst += 2;
+ }
+ else if (matchlen <= 18 && offset <= 1023)
+ {
+ ui32 f = ((matchlen - 3) << 2) | ((ui32)offset << 6) | 2;
+ fast_write(f, dst, 2);
+ dst += 2;
+ }
+
+ else if(matchlen <= 33)
+ {
+ ui32 f = ((matchlen - 2) << 2) | ((ui32)offset << 7) | 3;
+ fast_write(f, dst, 3);
+ dst += 3;
+ }
+ else
+ {
+ ui32 f = ((matchlen - 3) << 7) | ((ui32)offset << 15) | 3;
+ fast_write(f, dst, 4);
+ dst += 4;
+ }
+ }
+ else
+ {
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+ }
+#elif QLZ_COMPRESSION_LEVEL == 2
+
+ if(matchlen > 2)
+ {
+ cword_val = (cword_val >> 1) | (1U << 31);
+ src += matchlen;
+
+ if (matchlen < 10)
+ {
+ ui32 f = best_k | ((matchlen - 2) << 2) | (hash << 5);
+ fast_write(f, dst, 2);
+ dst += 2;
+ }
+ else
+ {
+ ui32 f = best_k | (matchlen << 16) | (hash << 5);
+ fast_write(f, dst, 3);
+ dst += 3;
+ }
+ }
+ else
+ {
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+ }
+#endif
+ }
+#endif
+ }
+ while (src <= last_byte)
+ {
+ if ((cword_val & 1) == 1)
+ {
+ fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
+ cword_ptr = dst;
+ dst += CWORD_LEN;
+ cword_val = 1U << 31;
+ }
+#if QLZ_COMPRESSION_LEVEL < 3
+ if (src <= last_byte - 3)
+ {
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 hash, fetchv;
+ fetchv = fast_read(src, 3);
+ hash = hash_func(fetch);
+ state->hash[hash].offset = CAST(src - OFFSET_BASE);
+ state->hash[hash].cache = fetchv;
+#elif QLZ_COMPRESSION_LEVEL == 2
+ ui32 hash;
+ unsigned char c;
+ hash = hashat(src);
+ c = state->hash_counter[hash];
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src;
+ c++;
+ state->hash_counter[hash] = c;
+#endif
+ }
+#endif
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+ }
+
+ while((cword_val & 1) != 1)
+ cword_val = (cword_val >> 1);
+
+ fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
+
+ // min. size must be 9 bytes so that the qlz_size functions can take 9 bytes as argument
+ return dst - destination < 9 ? 9 : dst - destination;
+}
+
+static size_t qlz_decompress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_decompress *state, const unsigned char *history)
+{
+ const unsigned char *src = source + qlz_size_header((const char *)source);
+ unsigned char *dst = destination;
+ const unsigned char *last_destination_byte = destination + size - 1;
+ ui32 cword_val = 1;
+ const unsigned char *last_matchstart = last_destination_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END;
+ unsigned char *last_hashed = destination - 1;
+ const unsigned char *last_source_byte = source + qlz_size_compressed((const char *)source) - 1;
+ static const ui32 bitlut[16] = {4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
+
+ (void) last_source_byte;
+ (void) last_hashed;
+ (void) state;
+ (void) history;
+
+ for(;;)
+ {
+ ui32 fetch;
+
+ if (cword_val == 1)
+ {
+#ifdef QLZ_MEMORY_SAFE
+ if(src + CWORD_LEN - 1 > last_source_byte)
+ return 0;
+#endif
+ cword_val = fast_read(src, CWORD_LEN);
+ src += CWORD_LEN;
+ }
+
+#ifdef QLZ_MEMORY_SAFE
+ if(src + 4 - 1 > last_source_byte)
+ return 0;
+#endif
+
+ fetch = fast_read(src, 4);
+
+ if ((cword_val & 1) == 1)
+ {
+ ui32 matchlen;
+ const unsigned char *offset2;
+
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 hash;
+ cword_val = cword_val >> 1;
+ hash = (fetch >> 4) & 0xfff;
+ offset2 = (const unsigned char *)(size_t)state->hash[hash].offset;
+
+ if((fetch & 0xf) != 0)
+ {
+ matchlen = (fetch & 0xf) + 2;
+ src += 2;
+ }
+ else
+ {
+ matchlen = *(src + 2);
+ src += 3;
+ }
+
+#elif QLZ_COMPRESSION_LEVEL == 2
+ ui32 hash;
+ unsigned char c;
+ cword_val = cword_val >> 1;
+ hash = (fetch >> 5) & 0x7ff;
+ c = (unsigned char)(fetch & 0x3);
+ offset2 = state->hash[hash].offset[c];
+
+ if((fetch & (28)) != 0)
+ {
+ matchlen = ((fetch >> 2) & 0x7) + 2;
+ src += 2;
+ }
+ else
+ {
+ matchlen = *(src + 2);
+ src += 3;
+ }
+
+#elif QLZ_COMPRESSION_LEVEL == 3
+ ui32 offset;
+ cword_val = cword_val >> 1;
+ if ((fetch & 3) == 0)
+ {
+ offset = (fetch & 0xff) >> 2;
+ matchlen = 3;
+ src++;
+ }
+ else if ((fetch & 2) == 0)
+ {
+ offset = (fetch & 0xffff) >> 2;
+ matchlen = 3;
+ src += 2;
+ }
+ else if ((fetch & 1) == 0)
+ {
+ offset = (fetch & 0xffff) >> 6;
+ matchlen = ((fetch >> 2) & 15) + 3;
+ src += 2;
+ }
+ else if ((fetch & 127) != 3)
+ {
+ offset = (fetch >> 7) & 0x1ffff;
+ matchlen = ((fetch >> 2) & 0x1f) + 2;
+ src += 3;
+ }
+ else
+ {
+ offset = (fetch >> 15);
+ matchlen = ((fetch >> 7) & 255) + 3;
+ src += 4;
+ }
+
+ offset2 = dst - offset;
+#endif
+
+#ifdef QLZ_MEMORY_SAFE
+ if(offset2 < history || offset2 > dst - MINOFFSET - 1)
+ return 0;
+
+ if(matchlen > (ui32)(last_destination_byte - dst - UNCOMPRESSED_END + 1))
+ return 0;
+#endif
+
+ memcpy_up(dst, offset2, matchlen);
+ dst += matchlen;
+
+#if QLZ_COMPRESSION_LEVEL <= 2
+ update_hash_upto(state, &last_hashed, dst - matchlen);
+ last_hashed = dst - 1;
+#endif
+ }
+ else
+ {
+ if (dst < last_matchstart)
+ {
+ unsigned int n = bitlut[cword_val & 0xf];
+#ifdef X86X64
+ *(ui32 *)dst = *(ui32 *)src;
+#else
+ memcpy_up(dst, src, 4);
+#endif
+ cword_val = cword_val >> n;
+ dst += n;
+ src += n;
+#if QLZ_COMPRESSION_LEVEL <= 2
+ update_hash_upto(state, &last_hashed, dst - 3);
+#endif
+ }
+ else
+ {
+ while(dst <= last_destination_byte)
+ {
+ if (cword_val == 1)
+ {
+ src += CWORD_LEN;
+ cword_val = 1U << 31;
+ }
+#ifdef QLZ_MEMORY_SAFE
+ if(src >= last_source_byte + 1)
+ return 0;
+#endif
+ *dst = *src;
+ dst++;
+ src++;
+ cword_val = cword_val >> 1;
+ }
+
+#if QLZ_COMPRESSION_LEVEL <= 2
+ update_hash_upto(state, &last_hashed, last_destination_byte - 3); // todo, use constant
+#endif
+ return size;
+ }
+
+ }
+ }
+}
+
+size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state)
+{
+ size_t r;
+ ui32 compressed;
+ size_t base;
+
+ if(size == 0 || size > 0xffffffff - 400)
+ return 0;
+
+ if(size < 216)
+ base = 3;
+ else
+ base = 9;
+
+#if QLZ_STREAMING_BUFFER > 0
+ if (state->stream_counter + size - 1 >= QLZ_STREAMING_BUFFER)
+#endif
+ {
+ reset_table_compress(state);
+ r = base + qlz_compress_core((const unsigned char *)source, (unsigned char*)destination + base, size, state);
+#if QLZ_STREAMING_BUFFER > 0
+ reset_table_compress(state);
+#endif
+ if(r == base)
+ {
+ memcpy(destination + base, source, size);
+ r = size + base;
+ compressed = 0;
+ }
+ else
+ {
+ compressed = 1;
+ }
+ state->stream_counter = 0;
+ }
+#if QLZ_STREAMING_BUFFER > 0
+ else
+ {
+ unsigned char *src = state->stream_buffer + state->stream_counter;
+
+ memcpy(src, source, size);
+ r = base + qlz_compress_core(src, (unsigned char*)destination + base, size, state);
+
+ if(r == base)
+ {
+ memcpy(destination + base, src, size);
+ r = size + base;
+ compressed = 0;
+ reset_table_compress(state);
+ }
+ else
+ {
+ compressed = 1;
+ }
+ state->stream_counter += size;
+ }
+#endif
+ if(base == 3)
+ {
+ *destination = (unsigned char)(0 | compressed);
+ *(destination + 1) = (unsigned char)r;
+ *(destination + 2) = (unsigned char)size;
+ }
+ else
+ {
+ *destination = (unsigned char)(2 | compressed);
+ fast_write((ui32)r, destination + 1, 4);
+ fast_write((ui32)size, destination + 5, 4);
+ }
+
+ *destination |= (QLZ_COMPRESSION_LEVEL << 2);
+ *destination |= (1 << 6);
+ *destination |= ((QLZ_STREAMING_BUFFER == 0 ? 0 : (QLZ_STREAMING_BUFFER == 100000 ? 1 : (QLZ_STREAMING_BUFFER == 1000000 ? 2 : 3))) << 4);
+
+// 76543210
+// 01SSLLHC
+
+ return r;
+}
+
+size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state)
+{
+ size_t dsiz = qlz_size_decompressed(source);
+
+#if QLZ_STREAMING_BUFFER > 0
+ if (state->stream_counter + qlz_size_decompressed(source) - 1 >= QLZ_STREAMING_BUFFER)
+#endif
+ {
+ if((*source & 1) == 1)
+ {
+ reset_table_decompress(state);
+ dsiz = qlz_decompress_core((const unsigned char *)source, (unsigned char *)destination, dsiz, state, (const unsigned char *)destination);
+ }
+ else
+ {
+ memcpy(destination, source + qlz_size_header(source), dsiz);
+ }
+ state->stream_counter = 0;
+ reset_table_decompress(state);
+ }
+#if QLZ_STREAMING_BUFFER > 0
+ else
+ {
+ unsigned char *dst = state->stream_buffer + state->stream_counter;
+ if((*source & 1) == 1)
+ {
+ dsiz = qlz_decompress_core((const unsigned char *)source, dst, dsiz, state, (const unsigned char *)state->stream_buffer);
+ }
+ else
+ {
+ memcpy(dst, source + qlz_size_header(source), dsiz);
+ reset_table_decompress(state);
+ }
+ memcpy(destination, dst, dsiz);
+ state->stream_counter += dsiz;
+ }
+#endif
+ return dsiz;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/serialize/quicklz.h b/storage/tokudb/PerconaFT/ft/serialize/quicklz.h
new file mode 100644
index 00000000..b9ce2f99
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/quicklz.h
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// Fast data compression library
+// Copyright (C) 2006-2011 Lasse Mikkel Reinhold
+// lar@quicklz.com
+//
+// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything
+// released into public must be open source) or under a commercial license if such
+// has been acquired (see http://www.quicklz.com/order.html). The commercial license
+// does not cover derived or ported versions created by third parties under GPL.
+
+// You can edit following user settings. Data must be decompressed with the same
+// setting of QLZ_COMPRESSION_LEVEL and QLZ_STREAMING_BUFFER as it was compressed
+// (see manual). If QLZ_STREAMING_BUFFER > 0, scratch buffers must be initially
+// zeroed out (see manual). First #ifndef makes it possible to define settings from
+// the outside like the compiler command line.
+
+// 1.5.0 final
+
+#ifndef QLZ_COMPRESSION_LEVEL
+ //#define QLZ_COMPRESSION_LEVEL 1
+ //#define QLZ_COMPRESSION_LEVEL 2
+ #define QLZ_COMPRESSION_LEVEL 3
+
+ #define QLZ_STREAMING_BUFFER 0
+ //#define QLZ_STREAMING_BUFFER 100000
+ //#define QLZ_STREAMING_BUFFER 1000000
+
+ //#define QLZ_MEMORY_SAFE
+#endif
+
+#define QLZ_VERSION_MAJOR 1
+#define QLZ_VERSION_MINOR 5
+#define QLZ_VERSION_REVISION 0
+
+// Using size_t, memset() and memcpy()
+#include <string.h>
+
+// Verify compression level
+#if QLZ_COMPRESSION_LEVEL != 1 && QLZ_COMPRESSION_LEVEL != 2 && QLZ_COMPRESSION_LEVEL != 3
+#error QLZ_COMPRESSION_LEVEL must be 1, 2 or 3
+#endif
+
+typedef unsigned int ui32;
+typedef unsigned short int ui16;
+
+// Decrease QLZ_POINTERS for level 3 to increase compression speed. Do not touch any other values!
+#if QLZ_COMPRESSION_LEVEL == 1
+#define QLZ_POINTERS 1
+#define QLZ_HASH_VALUES 4096
+#elif QLZ_COMPRESSION_LEVEL == 2
+#define QLZ_POINTERS 4
+#define QLZ_HASH_VALUES 2048
+#elif QLZ_COMPRESSION_LEVEL == 3
+#define QLZ_POINTERS 16
+#define QLZ_HASH_VALUES 4096
+#endif
+
+// Detect if pointer size is 64-bit. It's not fatal if some 64-bit target is not detected because this is only for adding an optional 64-bit optimization.
+#if defined _LP64 || defined __LP64__ || defined __64BIT__ || _ADDR64 || defined _WIN64 || defined __arch64__ || __WORDSIZE == 64 || (defined __sparc && defined __sparcv9) || defined __x86_64 || defined __amd64 || defined __x86_64__ || defined _M_X64 || defined _M_IA64 || defined __ia64 || defined __IA64__
+ #define QLZ_PTR_64
+#endif
+
+// hash entry
+typedef struct
+{
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 cache;
+#if defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0
+ unsigned int offset;
+#else
+ const unsigned char *offset;
+#endif
+#else
+ const unsigned char *offset[QLZ_POINTERS];
+#endif
+
+} qlz_hash_compress;
+
+typedef struct
+{
+#if QLZ_COMPRESSION_LEVEL == 1
+ const unsigned char *offset;
+#else
+ const unsigned char *offset[QLZ_POINTERS];
+#endif
+} qlz_hash_decompress;
+
+
+// states
+typedef struct
+{
+ #if QLZ_STREAMING_BUFFER > 0
+ unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
+ #endif
+ size_t stream_counter;
+ qlz_hash_compress hash[QLZ_HASH_VALUES];
+ unsigned char hash_counter[QLZ_HASH_VALUES];
+} qlz_state_compress;
+
+
+#if QLZ_COMPRESSION_LEVEL == 1 || QLZ_COMPRESSION_LEVEL == 2
+ typedef struct
+ {
+#if QLZ_STREAMING_BUFFER > 0
+ unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
+#endif
+ qlz_hash_decompress hash[QLZ_HASH_VALUES];
+ unsigned char hash_counter[QLZ_HASH_VALUES];
+ size_t stream_counter;
+ } qlz_state_decompress;
+#elif QLZ_COMPRESSION_LEVEL == 3
+ typedef struct
+ {
+#if QLZ_STREAMING_BUFFER > 0
+ unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
+#endif
+#if QLZ_COMPRESSION_LEVEL <= 2
+ qlz_hash_decompress hash[QLZ_HASH_VALUES];
+#endif
+ size_t stream_counter;
+ } qlz_state_decompress;
+#endif
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+// Public functions of QuickLZ
+size_t qlz_size_decompressed(const char *source);
+size_t qlz_size_compressed(const char *source);
+size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state);
+size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state);
+int qlz_get_setting(int setting);
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.cc b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.cc
new file mode 100644
index 00000000..922850fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.cc
@@ -0,0 +1,833 @@
+/*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILIT or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/serialize/rbtree_mhs.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_portability.h"
+#include <algorithm>
+
+namespace MhsRbTree {
+
+ Tree::Tree() : _root(NULL), _align(1) {}
+
+ Tree::Tree(uint64_t align) : _root(NULL), _align(align) {}
+
+ Tree::~Tree() { Destroy(); }
+
+ void Tree::PreOrder(Node *tree) const {
+ if (tree != NULL) {
+ fprintf(stderr, "%" PRIu64 " ", rbn_offset(tree).ToInt());
+ PreOrder(tree->_left);
+ PreOrder(tree->_right);
+ }
+ }
+
+ void Tree::PreOrder() { PreOrder(_root); }
+
+ void Tree::InOrder(Node *tree) const {
+ if (tree != NULL) {
+ InOrder(tree->_left);
+ fprintf(stderr, "%" PRIu64 " ", rbn_offset(tree).ToInt());
+ InOrder(tree->_right);
+ }
+ }
+
+ // yeah, i only care about in order visitor. -Jun
+ void Tree::InOrderVisitor(Node *tree,
+ void (*f)(void *, Node *, uint64_t),
+ void *extra,
+ uint64_t depth) {
+ if (tree != NULL) {
+ InOrderVisitor(tree->_left, f, extra, depth + 1);
+ f(extra, tree, depth);
+ InOrderVisitor(tree->_right, f, extra, depth + 1);
+ }
+ }
+
+ void Tree::InOrderVisitor(void (*f)(void *, Node *, uint64_t),
+ void *extra) {
+ InOrderVisitor(_root, f, extra, 0);
+ }
+
+ void Tree::InOrder() { InOrder(_root); }
+
+ void Tree::PostOrder(Node *tree) const {
+ if (tree != NULL) {
+ PostOrder(tree->_left);
+ PostOrder(tree->_right);
+ fprintf(stderr, "%" PRIu64 " ", rbn_offset(tree).ToInt());
+ }
+ }
+
+ void Tree::PostOrder() { PostOrder(_root); }
+
+ Node *Tree::SearchByOffset(uint64_t offset) {
+ Node *x = _root;
+ while ((x != NULL) && (rbn_offset(x).ToInt() != offset)) {
+ if (offset < rbn_offset(x).ToInt())
+ x = x->_left;
+ else
+ x = x->_right;
+ }
+
+ return x;
+ }
+
+ // mostly for testing
+ Node *Tree::SearchFirstFitBySize(uint64_t size) {
+ if (EffectiveSize(_root) < size && rbn_left_mhs(_root) < size &&
+ rbn_right_mhs(_root) < size) {
+ return nullptr;
+ } else {
+ return SearchFirstFitBySizeHelper(_root, size);
+ }
+ }
+
+ Node *Tree::SearchFirstFitBySizeHelper(Node *x, uint64_t size) {
+ if (EffectiveSize(x) >= size) {
+ // only possible to go left
+ if (rbn_left_mhs(x) >= size)
+ return SearchFirstFitBySizeHelper(x->_left, size);
+ else
+ return x;
+ }
+ if (rbn_left_mhs(x) >= size)
+ return SearchFirstFitBySizeHelper(x->_left, size);
+
+ if (rbn_right_mhs(x) >= size)
+ return SearchFirstFitBySizeHelper(x->_right, size);
+
+ // this is an invalid state
+ Dump();
+ ValidateBalance();
+ ValidateMhs();
+ invariant(0);
+ return NULL;
+ }
+
+ Node *Tree::MinNode(Node *tree) {
+ if (tree == NULL)
+ return NULL;
+
+ while (tree->_left != NULL)
+ tree = tree->_left;
+ return tree;
+ }
+
+ Node *Tree::MinNode() { return MinNode(_root); }
+
+ Node *Tree::MaxNode(Node *tree) {
+ if (tree == NULL)
+ return NULL;
+
+ while (tree->_right != NULL)
+ tree = tree->_right;
+ return tree;
+ }
+
+ Node *Tree::MaxNode() { return MaxNode(_root); }
+
+ Node *Tree::SuccessorHelper(Node *y, Node *x) {
+ while ((y != NULL) && (x == y->_right)) {
+ x = y;
+ y = y->_parent;
+ }
+ return y;
+ }
+ Node *Tree::Successor(Node *x) {
+ if (x->_right != NULL)
+ return MinNode(x->_right);
+
+ Node *y = x->_parent;
+ return SuccessorHelper(y, x);
+ }
+
+ Node *Tree::PredecessorHelper(Node *y, Node *x) {
+ while ((y != NULL) && (x == y->_left)) {
+ x = y;
+ y = y->_parent;
+ }
+
+ return y;
+ }
+ Node *Tree::Predecessor(Node *x) {
+ if (x->_left != NULL)
+ return MaxNode(x->_left);
+
+ Node *y = x->_parent;
+ return SuccessorHelper(y, x);
+ }
+
+ /*
+ * px px
+ * / /
+ * x y
+ * / \ --(left rotation)--> / \ #
+ * lx y x ry
+ * / \ / \
+ * ly ry lx ly
+ * max_hole_size updates are pretty local
+ */
+
+ void Tree::LeftRotate(Node *&root, Node *x) {
+ Node *y = x->_right;
+
+ x->_right = y->_left;
+ rbn_right_mhs(x) = rbn_left_mhs(y);
+
+ if (y->_left != NULL)
+ y->_left->_parent = x;
+
+ y->_parent = x->_parent;
+
+ if (x->_parent == NULL) {
+ root = y;
+ } else {
+ if (x->_parent->_left == x) {
+ x->_parent->_left = y;
+ } else {
+ x->_parent->_right = y;
+ }
+ }
+ y->_left = x;
+ rbn_left_mhs(y) = mhs_of_subtree(x);
+
+ x->_parent = y;
+ }
+
+ /* py py
+ * / /
+ * y x
+ * / \ --(right rotate)--> / \ #
+ * x ry lx y
+ * / \ / \ #
+ * lx rx rx ry
+ *
+ */
+
+ void Tree::RightRotate(Node *&root, Node *y) {
+ Node *x = y->_left;
+
+ y->_left = x->_right;
+ rbn_left_mhs(y) = rbn_right_mhs(x);
+
+ if (x->_right != NULL)
+ x->_right->_parent = y;
+
+ x->_parent = y->_parent;
+
+ if (y->_parent == NULL) {
+ root = x;
+ } else {
+ if (y == y->_parent->_right)
+ y->_parent->_right = x;
+ else
+ y->_parent->_left = x;
+ }
+
+ x->_right = y;
+ rbn_right_mhs(x) = mhs_of_subtree(y);
+ y->_parent = x;
+ }
+
+ // walking from this node up to update the mhs info
+ // whenver there is change on left/right mhs or size we should recalculate.
+ // prerequisit: the children of the node are mhs up-to-date.
+ void Tree::RecalculateMhs(Node *node) {
+ uint64_t *p_node_mhs = 0;
+ Node *parent = node->_parent;
+
+ if (!parent)
+ return;
+
+ uint64_t max_mhs = mhs_of_subtree(node);
+ if (node == parent->_left) {
+ p_node_mhs = &rbn_left_mhs(parent);
+ } else if (node == parent->_right) {
+ p_node_mhs = &rbn_right_mhs(parent);
+ } else {
+ return;
+ }
+ if (*p_node_mhs != max_mhs) {
+ *p_node_mhs = max_mhs;
+ RecalculateMhs(parent);
+ }
+ }
+
+ void Tree::IsNewNodeMergable(Node *pred,
+ Node *succ,
+ Node::BlockPair pair,
+ bool *left_merge,
+ bool *right_merge) {
+ if (pred) {
+ OUUInt64 end_of_pred = rbn_size(pred) + rbn_offset(pred);
+ if (end_of_pred < pair._offset)
+ *left_merge = false;
+ else {
+ invariant(end_of_pred == pair._offset);
+ *left_merge = true;
+ }
+ }
+ if (succ) {
+ OUUInt64 begin_of_succ = rbn_offset(succ);
+ OUUInt64 end_of_node = pair._offset + pair._size;
+ if (end_of_node < begin_of_succ) {
+ *right_merge = false;
+ } else {
+ invariant(end_of_node == begin_of_succ);
+ *right_merge = true;
+ }
+ }
+ }
+
+ void Tree::AbsorbNewNode(Node *pred,
+ Node *succ,
+ Node::BlockPair pair,
+ bool left_merge,
+ bool right_merge,
+ bool is_right_child) {
+ invariant(left_merge || right_merge);
+ if (left_merge && right_merge) {
+ // merge to the succ
+ if (!is_right_child) {
+ rbn_size(succ) += pair._size;
+ rbn_offset(succ) = pair._offset;
+ // merge to the pred
+ rbn_size(pred) += rbn_size(succ);
+ // to keep the invariant of the tree -no overlapping holes
+ rbn_offset(succ) += rbn_size(succ);
+ rbn_size(succ) = 0;
+ RecalculateMhs(succ);
+ RecalculateMhs(pred);
+ // pred dominates succ. this is going to
+ // update the pred labels separately.
+ // remove succ
+ RawRemove(_root, succ);
+ } else {
+ rbn_size(pred) += pair._size;
+ rbn_offset(succ) = rbn_offset(pred);
+ rbn_size(succ) += rbn_size(pred);
+ rbn_offset(pred) += rbn_size(pred);
+ rbn_size(pred) = 0;
+ RecalculateMhs(pred);
+ RecalculateMhs(succ);
+ // now remove pred
+ RawRemove(_root, pred);
+ }
+ } else if (left_merge) {
+ rbn_size(pred) += pair._size;
+ RecalculateMhs(pred);
+ } else if (right_merge) {
+ rbn_offset(succ) -= pair._size;
+ rbn_size(succ) += pair._size;
+ RecalculateMhs(succ);
+ }
+ }
+ // this is the most tedious part, but not complicated:
+ // 1.find where to insert the pair
+ // 2.if the pred and succ can merge with the pair. merge with them. either
+ // pred
+ // or succ can be removed.
+ // 3. if only left-mergable or right-mergeable, just merge
+ // 4. non-mergable case. insert the node and run the fixup.
+
+ int Tree::Insert(Node *&root, Node::BlockPair pair) {
+ Node *x = _root;
+ Node *y = NULL;
+ bool left_merge = false;
+ bool right_merge = false;
+ Node *node = NULL;
+
+ while (x != NULL) {
+ y = x;
+ if (pair._offset < rbn_key(x))
+ x = x->_left;
+ else
+ x = x->_right;
+ }
+
+ // we found where to insert, lets find out the pred and succ for
+ // possible
+ // merges.
+ // node->parent = y;
+ Node *pred, *succ;
+ if (y != NULL) {
+ if (pair._offset < rbn_key(y)) {
+ // as the left child
+ pred = PredecessorHelper(y->_parent, y);
+ succ = y;
+ IsNewNodeMergable(pred, succ, pair, &left_merge, &right_merge);
+ if (left_merge || right_merge) {
+ AbsorbNewNode(
+ pred, succ, pair, left_merge, right_merge, false);
+ } else {
+ // construct the node
+ Node::Pair mhsp {0, 0};
+ node =
+ new Node(EColor::BLACK, pair, mhsp, nullptr, nullptr, nullptr);
+ if (!node)
+ return -1;
+ y->_left = node;
+ node->_parent = y;
+ RecalculateMhs(node);
+ }
+
+ } else {
+ // as the right child
+ pred = y;
+ succ = SuccessorHelper(y->_parent, y);
+ IsNewNodeMergable(pred, succ, pair, &left_merge, &right_merge);
+ if (left_merge || right_merge) {
+ AbsorbNewNode(
+ pred, succ, pair, left_merge, right_merge, true);
+ } else {
+ // construct the node
+ Node::Pair mhsp {0, 0};
+ node =
+ new Node(EColor::BLACK, pair, mhsp, nullptr, nullptr, nullptr);
+ if (!node)
+ return -1;
+ y->_right = node;
+ node->_parent = y;
+ RecalculateMhs(node);
+ }
+ }
+ } else {
+ Node::Pair mhsp {0, 0};
+ node = new Node(EColor::BLACK, pair, mhsp, nullptr, nullptr, nullptr);
+ if (!node)
+ return -1;
+ root = node;
+ }
+ if (!left_merge && !right_merge) {
+ invariant_notnull(node);
+ node->_color = EColor::RED;
+ return InsertFixup(root, node);
+ }
+ return 0;
+ }
+
+ int Tree::InsertFixup(Node *&root, Node *node) {
+ Node *parent, *gparent;
+ while ((parent = rbn_parent(node)) && rbn_is_red(parent)) {
+ gparent = rbn_parent(parent);
+ if (parent == gparent->_left) {
+ {
+ Node *uncle = gparent->_right;
+ if (uncle && rbn_is_red(uncle)) {
+ rbn_set_black(uncle);
+ rbn_set_black(parent);
+ rbn_set_red(gparent);
+ node = gparent;
+ continue;
+ }
+ }
+
+ if (parent->_right == node) {
+ Node *tmp;
+ LeftRotate(root, parent);
+ tmp = parent;
+ parent = node;
+ node = tmp;
+ }
+
+ rbn_set_black(parent);
+ rbn_set_red(gparent);
+ RightRotate(root, gparent);
+ } else {
+ {
+ Node *uncle = gparent->_left;
+ if (uncle && rbn_is_red(uncle)) {
+ rbn_set_black(uncle);
+ rbn_set_black(parent);
+ rbn_set_red(gparent);
+ node = gparent;
+ continue;
+ }
+ }
+
+ if (parent->_left == node) {
+ Node *tmp;
+ RightRotate(root, parent);
+ tmp = parent;
+ parent = node;
+ node = tmp;
+ }
+ rbn_set_black(parent);
+ rbn_set_red(gparent);
+ LeftRotate(root, gparent);
+ }
+ }
+ rbn_set_black(root);
+ return 0;
+ }
+
+ int Tree::Insert(Node::BlockPair pair) { return Insert(_root, pair); }
+
+ uint64_t Tree::Remove(size_t size) {
+ Node *node = SearchFirstFitBySize(size);
+ return Remove(_root, node, size);
+ }
+
+ void Tree::RawRemove(Node *&root, Node *node) {
+ Node *child, *parent;
+ EColor color;
+
+ if ((node->_left != NULL) && (node->_right != NULL)) {
+ Node *replace = node;
+ replace = replace->_right;
+ while (replace->_left != NULL)
+ replace = replace->_left;
+
+ if (rbn_parent(node)) {
+ if (rbn_parent(node)->_left == node)
+ rbn_parent(node)->_left = replace;
+ else
+ rbn_parent(node)->_right = replace;
+ } else {
+ root = replace;
+ }
+ child = replace->_right;
+ parent = rbn_parent(replace);
+ color = rbn_color(replace);
+
+ if (parent == node) {
+ parent = replace;
+ } else {
+ if (child)
+ rbn_parent(child) = parent;
+
+ parent->_left = child;
+ rbn_left_mhs(parent) = rbn_right_mhs(replace);
+ RecalculateMhs(parent);
+ replace->_right = node->_right;
+ rbn_set_parent(node->_right, replace);
+ rbn_right_mhs(replace) = rbn_right_mhs(node);
+ }
+
+ replace->_parent = node->_parent;
+ replace->_color = node->_color;
+ replace->_left = node->_left;
+ rbn_left_mhs(replace) = rbn_left_mhs(node);
+ node->_left->_parent = replace;
+ RecalculateMhs(replace);
+ if (color == EColor::BLACK)
+ RawRemoveFixup(root, child, parent);
+ delete node;
+ return;
+ }
+
+ if (node->_left != NULL)
+ child = node->_left;
+ else
+ child = node->_right;
+
+ parent = node->_parent;
+ color = node->_color;
+
+ if (child)
+ child->_parent = parent;
+
+ if (parent) {
+ if (parent->_left == node) {
+ parent->_left = child;
+ rbn_left_mhs(parent) = child ? mhs_of_subtree(child) : 0;
+ } else {
+ parent->_right = child;
+ rbn_right_mhs(parent) = child ? mhs_of_subtree(child) : 0;
+ }
+ RecalculateMhs(parent);
+ } else
+ root = child;
+ if (color == EColor::BLACK)
+ RawRemoveFixup(root, child, parent);
+ delete node;
+ }
+
+ void Tree::RawRemove(uint64_t offset) {
+ Node *node = SearchByOffset(offset);
+ RawRemove(_root, node);
+ }
+ static inline uint64_t align(uint64_t value, uint64_t ba_alignment) {
+ return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment;
+ }
+ uint64_t Tree::Remove(Node *&root, Node *node, size_t size) {
+ OUUInt64 n_offset = rbn_offset(node);
+ OUUInt64 n_size = rbn_size(node);
+ OUUInt64 answer_offset(align(rbn_offset(node).ToInt(), _align));
+
+ invariant((answer_offset + size) <= (n_offset + n_size));
+ if (answer_offset == n_offset) {
+ rbn_offset(node) += size;
+ rbn_size(node) -= size;
+ RecalculateMhs(node);
+ if (rbn_size(node) == 0) {
+ RawRemove(root, node);
+ }
+
+ } else {
+ if (answer_offset + size == n_offset + n_size) {
+ rbn_size(node) -= size;
+ RecalculateMhs(node);
+ } else {
+ // well, cut in the middle...
+ rbn_size(node) = answer_offset - n_offset;
+ RecalculateMhs(node);
+ Insert(_root,
+ {(answer_offset + size),
+ (n_offset + n_size) - (answer_offset + size)});
+ }
+ }
+ return answer_offset.ToInt();
+ }
+
+ void Tree::RawRemoveFixup(Node *&root, Node *node, Node *parent) {
+ Node *other;
+ while ((!node || rbn_is_black(node)) && node != root) {
+ if (parent->_left == node) {
+ other = parent->_right;
+ if (rbn_is_red(other)) {
+ // Case 1: the brother of X, w, is read
+ rbn_set_black(other);
+ rbn_set_red(parent);
+ LeftRotate(root, parent);
+ other = parent->_right;
+ }
+ if ((!other->_left || rbn_is_black(other->_left)) &&
+ (!other->_right || rbn_is_black(other->_right))) {
+ // Case 2: w is black and both of w's children are black
+ rbn_set_red(other);
+ node = parent;
+ parent = rbn_parent(node);
+ } else {
+ if (!other->_right || rbn_is_black(other->_right)) {
+ // Case 3: w is black and left child of w is red but
+ // right
+ // child is black
+ rbn_set_black(other->_left);
+ rbn_set_red(other);
+ RightRotate(root, other);
+ other = parent->_right;
+ }
+ // Case 4: w is black and right child of w is red,
+ // regardless of
+ // left child's color
+ rbn_set_color(other, rbn_color(parent));
+ rbn_set_black(parent);
+ rbn_set_black(other->_right);
+ LeftRotate(root, parent);
+ node = root;
+ break;
+ }
+ } else {
+ other = parent->_left;
+ if (rbn_is_red(other)) {
+ // Case 1: w is red
+ rbn_set_black(other);
+ rbn_set_red(parent);
+ RightRotate(root, parent);
+ other = parent->_left;
+ }
+ if ((!other->_left || rbn_is_black(other->_left)) &&
+ (!other->_right || rbn_is_black(other->_right))) {
+ // Case 2: w is black and both children are black
+ rbn_set_red(other);
+ node = parent;
+ parent = rbn_parent(node);
+ } else {
+ if (!other->_left || rbn_is_black(other->_left)) {
+ // Case 3: w is black and left child of w is red whereas
+ // right child is black
+ rbn_set_black(other->_right);
+ rbn_set_red(other);
+ LeftRotate(root, other);
+ other = parent->_left;
+ }
+ // Case 4:w is black and right child of w is red, regardless
+ // of
+ // the left child's color
+ rbn_set_color(other, rbn_color(parent));
+ rbn_set_black(parent);
+ rbn_set_black(other->_left);
+ RightRotate(root, parent);
+ node = root;
+ break;
+ }
+ }
+ }
+ if (node)
+ rbn_set_black(node);
+ }
+
+ void Tree::Destroy(Node *&tree) {
+ if (tree == NULL)
+ return;
+
+ if (tree->_left != NULL)
+ Destroy(tree->_left);
+ if (tree->_right != NULL)
+ Destroy(tree->_right);
+
+ delete tree;
+ tree = NULL;
+ }
+
+ void Tree::Destroy() { Destroy(_root); }
+
+ void Tree::Dump(Node *tree, Node::BlockPair pair, EDirection dir) {
+ if (tree != NULL) {
+ if (dir == EDirection::NONE)
+ fprintf(stderr,
+ "(%" PRIu64 ",%" PRIu64 ", mhs:(%" PRIu64 ",%" PRIu64
+ "))(B) is root\n",
+ rbn_offset(tree).ToInt(),
+ rbn_size(tree).ToInt(),
+ rbn_left_mhs(tree),
+ rbn_right_mhs(tree));
+ else
+ fprintf(stderr,
+ "(%" PRIu64 ",%" PRIu64 ",mhs:(%" PRIu64 ",%" PRIu64
+ "))(%c) is %" PRIu64 "'s %s\n",
+ rbn_offset(tree).ToInt(),
+ rbn_size(tree).ToInt(),
+ rbn_left_mhs(tree),
+ rbn_right_mhs(tree),
+ rbn_is_red(tree) ? 'R' : 'B',
+ pair._offset.ToInt(),
+ dir == EDirection::RIGHT ? "right child" : "left child");
+
+ Dump(tree->_left, tree->_hole, EDirection::LEFT);
+ Dump(tree->_right, tree->_hole, EDirection::RIGHT);
+ }
+ }
+
+ uint64_t Tree::EffectiveSize(Node *node) {
+ OUUInt64 offset = rbn_offset(node);
+ OUUInt64 size = rbn_size(node);
+ OUUInt64 end = offset + size;
+ OUUInt64 aligned_offset(align(offset.ToInt(), _align));
+ if (aligned_offset > end) {
+ return 0;
+ }
+ return (end - aligned_offset).ToInt();
+ }
+
+ void Tree::Dump() {
+ if (_root != NULL)
+ Dump(_root, _root->_hole, (EDirection)0);
+ }
+
+ static void vis_bal_f(void *extra, Node *node, uint64_t depth) {
+ uint64_t **p = (uint64_t **)extra;
+ uint64_t min = *p[0];
+ uint64_t max = *p[1];
+ if (node->_left) {
+ Node *left = node->_left;
+ invariant(node == left->_parent);
+ }
+
+ if (node->_right) {
+ Node *right = node->_right;
+ invariant(node == right->_parent);
+ }
+
+ if (!node->_left || !node->_right) {
+ if (min > depth) {
+ *p[0] = depth;
+ } else if (max < depth) {
+ *p[1] = depth;
+ }
+ }
+ }
+
+ void Tree::ValidateBalance() {
+ uint64_t min_depth = 0xffffffffffffffff;
+ uint64_t max_depth = 0;
+ if (!_root) {
+ return;
+ }
+ uint64_t *p[2] = {&min_depth, &max_depth};
+ InOrderVisitor(vis_bal_f, (void *)p);
+ invariant((min_depth + 1) * 2 >= max_depth + 1);
+ }
+
+ static void vis_cmp_f(void *extra, Node *node, uint64_t UU(depth)) {
+ Node::BlockPair **p = (Node::BlockPair **)extra;
+
+ invariant_notnull(*p);
+ invariant((*p)->_offset == node->_hole._offset);
+
+ *p = *p + 1;
+ }
+
+ // validate the input pairs matches with sorted pairs
+ void Tree::ValidateInOrder(Node::BlockPair *pairs) {
+ InOrderVisitor(vis_cmp_f, &pairs);
+ }
+
+ uint64_t Tree::ValidateMhs(Node *node) {
+ if (!node)
+ return 0;
+ else {
+ uint64_t mhs_left = ValidateMhs(node->_left);
+ uint64_t mhs_right = ValidateMhs(node->_right);
+ if (mhs_left != rbn_left_mhs(node)) {
+ printf("assert failure: mhs_left = %" PRIu64 "\n", mhs_left);
+ Dump(node, node->_hole, (EDirection)0);
+ }
+ invariant(mhs_left == rbn_left_mhs(node));
+
+ if (mhs_right != rbn_right_mhs(node)) {
+ printf("assert failure: mhs_right = %" PRIu64 "\n", mhs_right);
+ Dump(node, node->_hole, (EDirection)0);
+ }
+ invariant(mhs_right == rbn_right_mhs(node));
+ return std::max(EffectiveSize(node), std::max(mhs_left, mhs_right));
+ }
+ }
+
+ void Tree::ValidateMhs() {
+ if (!_root)
+ return;
+ uint64_t mhs_left = ValidateMhs(_root->_left);
+ uint64_t mhs_right = ValidateMhs(_root->_right);
+ invariant(mhs_left == rbn_left_mhs(_root));
+ invariant(mhs_right == rbn_right_mhs(_root));
+ }
+
+} // namespace MhsRbTree
diff --git a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
new file mode 100644
index 00000000..31ffd7e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
@@ -0,0 +1,356 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_pthread.h"
+#include "portability/toku_stdint.h"
+#include "portability/toku_stdlib.h"
+
+// RBTree(Red-black tree) with max hole sizes for subtrees.
+
+// This is a tentative data struct to improve the block allocation time
+// complexity from the linear time to the log time. Please be noted this DS only
+// supports first-fit for now. It is actually easier to do it with
+// best-fit.(just
+// sort by size).
+
+// RBTree is a classic data struct with O(log(n)) for insertion, deletion and
+// search. Many years have seen its efficiency.
+
+// a *hole* is the representation of an available BlockPair for allocation.
+// defined as (start_address,size) or (offset, size) interchangably.
+
+// each node has a *label* to indicate a pair of the max hole sizes for its
+// subtree.
+
+// We are implementing a RBTree with max hole sizes for subtree. It is a red
+// black tree that is sorted by the start_address but also labeld with the max
+// hole sizes of the subtrees.
+
+// [(6,3)] -> [(offset, size)], the hole
+// [{2,5}] -> [{mhs_of_left, mhs_of_right}], the label
+/* / \ */
+// [(0, 1)] [(10, 5)]
+// [{0, 2}] [{0, 0}]
+/* \ */
+// [(3, 2)]
+// [{0, 0}]
+// request of allocation size=2 goes from root to [(3,2)].
+
+// above example shows a simplified RBTree_max_holes.
+// it is easier to tell the search time is O(log(n)) as we can make a decision
+// on each descent until we get to the target.
+
+// the only question is if we can keep the maintenance cost low -- and i think
+// it is not a problem becoz an insertion/deletion is only going to update the
+// max_hole_sizes of the nodes along the path from the root to the node to be
+// deleted/inserted. The path can be cached and search is anyway O(log(n)).
+
+// unlike the typical rbtree, Tree has to handle the inserts and deletes
+// with more care: an allocation that triggers the delete might leave some
+// unused space which we can simply update the start_addr and size without
+// worrying overlapping. An free might not only mean the insertion but also
+// *merging* with the adjacent holes.
+
+namespace MhsRbTree {
+
+#define offset_t uint64_t
+ enum class EColor { RED, BLACK };
+ enum class EDirection { NONE = 0, LEFT, RIGHT };
+
+ // I am a bit tired of fixing overflow/underflow, just quickly craft some
+ // int
+ // class that has an infinity-like max value and prevents overflow and
+ // underflow. If you got a file offset larger than MHS_MAX_VAL, it is not
+ // a problem here. :-/ - JYM
+ class OUUInt64 {
+ public:
+ static const uint64_t MHS_MAX_VAL = 0xffffffffffffffff;
+ OUUInt64() : _value(0) {}
+ OUUInt64(uint64_t s) : _value(s) {}
+ OUUInt64(const OUUInt64& o) : _value(o._value) {}
+ bool operator<(const OUUInt64 &r) const {
+ invariant(!(_value == MHS_MAX_VAL && r.ToInt() == MHS_MAX_VAL));
+ return _value < r.ToInt();
+ }
+ bool operator>(const OUUInt64 &r) const {
+ invariant(!(_value == MHS_MAX_VAL && r.ToInt() == MHS_MAX_VAL));
+ return _value > r.ToInt();
+ }
+ bool operator<=(const OUUInt64 &r) const {
+ invariant(!(_value == MHS_MAX_VAL && r.ToInt() == MHS_MAX_VAL));
+ return _value <= r.ToInt();
+ }
+ bool operator>=(const OUUInt64 &r) const {
+ invariant(!(_value == MHS_MAX_VAL && r.ToInt() == MHS_MAX_VAL));
+ return _value >= r.ToInt();
+ }
+ OUUInt64 operator+(const OUUInt64 &r) const {
+ if (_value == MHS_MAX_VAL || r.ToInt() == MHS_MAX_VAL) {
+ OUUInt64 tmp(MHS_MAX_VAL);
+ return tmp;
+ } else {
+ // detecting overflow
+ invariant((MHS_MAX_VAL - _value) >= r.ToInt());
+ uint64_t plus = _value + r.ToInt();
+ OUUInt64 tmp(plus);
+ return tmp;
+ }
+ }
+ OUUInt64 operator-(const OUUInt64 &r) const {
+ invariant(r.ToInt() != MHS_MAX_VAL);
+ if (_value == MHS_MAX_VAL) {
+ return *this;
+ } else {
+ invariant(_value >= r.ToInt());
+ uint64_t minus = _value - r.ToInt();
+ OUUInt64 tmp(minus);
+ return tmp;
+ }
+ }
+ OUUInt64 operator-=(const OUUInt64 &r) {
+ if (_value != MHS_MAX_VAL) {
+ invariant(r.ToInt() != MHS_MAX_VAL);
+ invariant(_value >= r.ToInt());
+ _value -= r.ToInt();
+ }
+ return *this;
+ }
+ OUUInt64 operator+=(const OUUInt64 &r) {
+ if (_value != MHS_MAX_VAL) {
+ if (r.ToInt() == MHS_MAX_VAL) {
+ _value = MHS_MAX_VAL;
+ } else {
+ invariant((MHS_MAX_VAL - _value) >= r.ToInt());
+ this->_value += r.ToInt();
+ }
+ }
+ return *this;
+ }
+ bool operator==(const OUUInt64 &r) const {
+ return _value == r.ToInt();
+ }
+ bool operator!=(const OUUInt64 &r) const {
+ return _value != r.ToInt();
+ }
+ OUUInt64 operator=(const OUUInt64 &r) {
+ _value = r.ToInt();
+ return *this;
+ }
+ uint64_t ToInt() const { return _value; }
+
+ private:
+ uint64_t _value;
+ };
+
+ class Node {
+ public:
+ class BlockPair {
+ public:
+ OUUInt64 _offset;
+ OUUInt64 _size;
+
+ BlockPair() : _offset(0), _size(0) {}
+ BlockPair(uint64_t o, uint64_t s) : _offset(o), _size(s) {}
+ BlockPair(OUUInt64 o, OUUInt64 s) : _offset(o), _size(s) {}
+ BlockPair(const BlockPair &o)
+ : _offset(o._offset), _size(o._size) {}
+ BlockPair& operator=(const BlockPair&) = default;
+
+ int operator<(const BlockPair &rhs) const {
+ return _offset < rhs._offset;
+ }
+ int operator<(const uint64_t &o) const { return _offset < o; }
+ };
+
+ struct Pair {
+ uint64_t _left;
+ uint64_t _right;
+ Pair(uint64_t l, uint64_t r) : _left(l), _right(r) {}
+ };
+
+ EColor _color;
+ BlockPair _hole;
+ Pair _label;
+ Node *_left;
+ Node *_right;
+ Node *_parent;
+
+ Node(EColor c,
+ Node::BlockPair h,
+ Pair lb,
+ Node *l,
+ Node *r,
+ Node *p)
+ : _color(c),
+ _hole(h),
+ _label(lb),
+ _left(l),
+ _right(r),
+ _parent(p) {}
+ };
+
+ class Tree {
+ private:
+ Node *_root;
+ uint64_t _align;
+
+ public:
+ Tree();
+ Tree(uint64_t);
+ ~Tree();
+
+ void PreOrder();
+ void InOrder();
+ void PostOrder();
+ // immutable operations
+ Node *SearchByOffset(uint64_t addr);
+ Node *SearchFirstFitBySize(uint64_t size);
+
+ Node *MinNode();
+ Node *MaxNode();
+
+ Node *Successor(Node *);
+ Node *Predecessor(Node *);
+
+ // mapped from tree_allocator::free_block
+ int Insert(Node::BlockPair pair);
+ // mapped from tree_allocator::alloc_block
+ uint64_t Remove(size_t size);
+ // mapped from tree_allocator::alloc_block_after
+
+ void RawRemove(uint64_t offset);
+ void Destroy();
+ // print the tree
+ void Dump();
+ // validation
+ // balance
+ void ValidateBalance();
+ void ValidateInOrder(Node::BlockPair *);
+ void InOrderVisitor(void (*f)(void *, Node *, uint64_t), void *);
+ void ValidateMhs();
+
+ private:
+ void PreOrder(Node *node) const;
+ void InOrder(Node *node) const;
+ void PostOrder(Node *node) const;
+ Node *SearchByOffset(Node *node, offset_t addr) const;
+ Node *SearchFirstFitBySize(Node *node, size_t size) const;
+
+ Node *MinNode(Node *node);
+ Node *MaxNode(Node *node);
+
+ // rotations to fix up. we will have to update the labels too.
+ void LeftRotate(Node *&root, Node *x);
+ void RightRotate(Node *&root, Node *y);
+
+ int Insert(Node *&root, Node::BlockPair pair);
+ int InsertFixup(Node *&root, Node *node);
+
+ void RawRemove(Node *&root, Node *node);
+ uint64_t Remove(Node *&root, Node *node, size_t size);
+ void RawRemoveFixup(Node *&root, Node *node, Node *parent);
+
+ void Destroy(Node *&tree);
+ void Dump(Node *tree, Node::BlockPair pair, EDirection dir);
+ void RecalculateMhs(Node *node);
+ void IsNewNodeMergable(Node *, Node *, Node::BlockPair, bool *, bool *);
+ void AbsorbNewNode(Node *, Node *, Node::BlockPair, bool, bool, bool);
+ Node *SearchFirstFitBySizeHelper(Node *x, uint64_t size);
+
+ Node *SuccessorHelper(Node *y, Node *x);
+
+ Node *PredecessorHelper(Node *y, Node *x);
+
+ void InOrderVisitor(Node *,
+ void (*f)(void *, Node *, uint64_t),
+ void *,
+ uint64_t);
+ uint64_t ValidateMhs(Node *);
+
+ uint64_t EffectiveSize(Node *);
+// mixed with some macros.....
+#define rbn_parent(r) ((r)->_parent)
+#define rbn_color(r) ((r)->_color)
+#define rbn_is_red(r) ((r)->_color == EColor::RED)
+#define rbn_is_black(r) ((r)->_color == EColor::BLACK)
+#define rbn_set_black(r) \
+ do { \
+ (r)->_color = EColor::BLACK; \
+ } while (0)
+#define rbn_set_red(r) \
+ do { \
+ (r)->_color = EColor::RED; \
+ } while (0)
+#define rbn_set_parent(r, p) \
+ do { \
+ (r)->_parent = (p); \
+ } while (0)
+#define rbn_set_color(r, c) \
+ do { \
+ (r)->_color = (c); \
+ } while (0)
+#define rbn_set_offset(r) \
+ do { \
+ (r)->_hole._offset = (c); \
+ } while (0)
+#define rbn_set_size(r, c) \
+ do { \
+ (r)->_hole._size = (c); \
+ } while (0)
+#define rbn_set_left_mhs(r, c) \
+ do { \
+ (r)->_label._left = (c); \
+ } while (0)
+#define rbn_set_right_mhs(r, c) \
+ do { \
+ (r)->_label._right = (c); \
+ } while (0)
+#define rbn_size(r) ((r)->_hole._size)
+#define rbn_offset(r) ((r)->_hole._offset)
+#define rbn_key(r) ((r)->_hole._offset)
+#define rbn_left_mhs(r) ((r)->_label._left)
+#define rbn_right_mhs(r) ((r)->_label._right)
+#define mhs_of_subtree(y) \
+ (std::max(std::max(rbn_left_mhs(y), rbn_right_mhs(y)), EffectiveSize(y)))
+ };
+
+} // namespace MhsRbTree
diff --git a/storage/tokudb/PerconaFT/ft/serialize/rbuf.h b/storage/tokudb/PerconaFT/ft/serialize/rbuf.h
new file mode 100644
index 00000000..c14dedbf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/rbuf.h
@@ -0,0 +1,156 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <string.h>
+
+#include "portability/memory.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_htonl.h"
+#include "portability/toku_portability.h"
+#include "util/memarena.h"
+
+struct rbuf {
+ unsigned char *buf;
+ unsigned int size;
+ unsigned int ndone;
+};
+#define RBUF_INITIALIZER ((struct rbuf){.buf = NULL, .size=0, .ndone=0})
+
+static inline void rbuf_init(struct rbuf *r, unsigned char *buf, unsigned int size) {
+ r->buf = buf;
+ r->size = size;
+ r->ndone = 0;
+}
+
+static inline unsigned int rbuf_get_roffset(struct rbuf *r) {
+ return r->ndone;
+}
+
+static inline unsigned char rbuf_char (struct rbuf *r) {
+ assert(r->ndone<r->size);
+ return r->buf[r->ndone++];
+}
+
+static inline void rbuf_ma_uint8_t (struct rbuf *r, memarena *ma __attribute__((__unused__)), uint8_t *num) {
+ *num = rbuf_char(r);
+}
+
+static inline void rbuf_ma_bool (struct rbuf *r, memarena *ma __attribute__((__unused__)), bool *b) {
+ uint8_t n = rbuf_char(r);
+ *b = (n!=0);
+}
+
+//Read an int that MUST be in network order regardless of disk order
+static unsigned int rbuf_network_int (struct rbuf *r) __attribute__((__unused__));
+static unsigned int rbuf_network_int (struct rbuf *r) {
+ assert(r->ndone+4 <= r->size);
+ uint32_t result = toku_ntohl(*(uint32_t*)(r->buf+r->ndone)); // This only works on machines where unaligned loads are OK.
+ r->ndone+=4;
+ return result;
+}
+
+static unsigned int rbuf_int (struct rbuf *r) {
+#if 1
+ assert(r->ndone+4 <= r->size);
+ uint32_t result = toku_dtoh32(*(uint32_t*)(r->buf+r->ndone)); // This only works on machines where unaligned loads are OK.
+ r->ndone+=4;
+ return result;
+#else
+ unsigned char c0 = rbuf_char(r);
+ unsigned char c1 = rbuf_char(r);
+ unsigned char c2 = rbuf_char(r);
+ unsigned char c3 = rbuf_char(r);
+ return ((c0<<24)|
+ (c1<<16)|
+ (c2<<8)|
+ (c3<<0));
+#endif
+}
+
+static inline void rbuf_literal_bytes (struct rbuf *r, const void **bytes, unsigned int n_bytes) {
+ *bytes = &r->buf[r->ndone];
+ r->ndone+=n_bytes;
+ assert(r->ndone<=r->size);
+}
+
+/* Return a pointer into the middle of the buffer. */
+static inline void rbuf_bytes (struct rbuf *r, const void **bytes, unsigned int *n_bytes)
+{
+ *n_bytes = rbuf_int(r);
+ rbuf_literal_bytes(r, bytes, *n_bytes);
+}
+
+static inline unsigned long long rbuf_ulonglong (struct rbuf *r) {
+ unsigned i0 = rbuf_int(r);
+ unsigned i1 = rbuf_int(r);
+ return ((unsigned long long)(i0)<<32) | ((unsigned long long)(i1));
+}
+
+static inline signed long long rbuf_longlong (struct rbuf *r) {
+ return (signed long long)rbuf_ulonglong(r);
+}
+
+static inline void rbuf_ma_uint32_t (struct rbuf *r, memarena *ma __attribute__((__unused__)), uint32_t *num) {
+ *num = rbuf_int(r);
+}
+
+static inline void rbuf_ma_uint64_t (struct rbuf *r, memarena *ma __attribute__((__unused__)), uint64_t *num) {
+ *num = rbuf_ulonglong(r);
+}
+
+// Don't try to use the same space, malloc it
+static inline void rbuf_BYTESTRING (struct rbuf *r, BYTESTRING *bs) {
+ bs->len = rbuf_int(r);
+ uint32_t newndone = r->ndone + bs->len;
+ assert(newndone <= r->size);
+ bs->data = (char *) toku_memdup(&r->buf[r->ndone], (size_t)bs->len);
+ assert(bs->data);
+ r->ndone = newndone;
+}
+
+static inline void rbuf_ma_BYTESTRING (struct rbuf *r, memarena *ma, BYTESTRING *bs) {
+ bs->len = rbuf_int(r);
+ uint32_t newndone = r->ndone + bs->len;
+ assert(newndone <= r->size);
+ bs->data = (char *) ma->malloc_from_arena(bs->len);
+ assert(bs->data);
+ memcpy(bs->data, &r->buf[r->ndone], bs->len);
+ r->ndone = newndone;
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/sub_block.cc b/storage/tokudb/PerconaFT/ft/serialize/sub_block.cc
new file mode 100644
index 00000000..6dc1f828
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/sub_block.cc
@@ -0,0 +1,392 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <zlib.h>
+
+#include "portability/memory.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_portability.h"
+
+#include "ft/serialize/compress.h"
+#include "ft/serialize/sub_block.h"
+#include "ft/serialize/quicklz.h"
+#include "util/threadpool.h"
+#include "util/x1764.h"
+
+toku_instr_key *workset_lock_mutex_key;
+toku_instr_key *ws_worker_wait_key;
+
+SUB_BLOCK sub_block_creat(void) {
+ SUB_BLOCK XMALLOC(sb);
+ sub_block_init(sb);
+ return sb;
+}
+void sub_block_init(SUB_BLOCK sub_block) {
+ sub_block->uncompressed_ptr = 0;
+ sub_block->uncompressed_size = 0;
+
+ sub_block->compressed_ptr = 0;
+ sub_block->compressed_size_bound = 0;
+ sub_block->compressed_size = 0;
+
+ sub_block->xsum = 0;
+}
+
+// get the size of the compression header
+size_t
+sub_block_header_size(int n_sub_blocks) {
+ return sizeof (uint32_t) + n_sub_blocks * sizeof (struct stored_sub_block);
+}
+
+void
+set_compressed_size_bound(struct sub_block *se, enum toku_compression_method method) {
+ se->compressed_size_bound = toku_compress_bound(method, se->uncompressed_size);
+}
+
+// get the sum of the sub block compressed sizes
+size_t
+get_sum_compressed_size_bound(int n_sub_blocks, struct sub_block sub_block[], enum toku_compression_method method) {
+ size_t compressed_size_bound = 0;
+ for (int i = 0; i < n_sub_blocks; i++) {
+ sub_block[i].compressed_size_bound = toku_compress_bound(method, sub_block[i].uncompressed_size);
+ compressed_size_bound += sub_block[i].compressed_size_bound;
+ }
+ return compressed_size_bound;
+}
+
+// get the sum of the sub block uncompressed sizes
+size_t
+get_sum_uncompressed_size(int n_sub_blocks, struct sub_block sub_block[]) {
+ size_t uncompressed_size = 0;
+ for (int i = 0; i < n_sub_blocks; i++)
+ uncompressed_size += sub_block[i].uncompressed_size;
+ return uncompressed_size;
+}
+
+// round up n
+static inline int
+alignup32(int a, int b) {
+ return ((a+b-1) / b) * b;
+}
+
+// Choose n_sub_blocks and sub_block_size such that the product is >= total_size and the sub_block_size is at
+// least >= the target_sub_block_size.
+int
+choose_sub_block_size(int total_size, int n_sub_blocks_limit, int *sub_block_size_ret, int *n_sub_blocks_ret) {
+ if (total_size < 0 || n_sub_blocks_limit < 1)
+ return EINVAL;
+
+ const int alignment = 32;
+
+ int n_sub_blocks, sub_block_size;
+ n_sub_blocks = total_size / target_sub_block_size;
+ if (n_sub_blocks <= 1) {
+ if (total_size > 0 && n_sub_blocks_limit > 0)
+ n_sub_blocks = 1;
+ sub_block_size = total_size;
+ } else {
+ if (n_sub_blocks > n_sub_blocks_limit) // limit the number of sub-blocks
+ n_sub_blocks = n_sub_blocks_limit;
+ sub_block_size = alignup32(total_size / n_sub_blocks, alignment);
+ while (sub_block_size * n_sub_blocks < total_size) // round up the sub-block size until big enough
+ sub_block_size += alignment;
+ }
+
+ *sub_block_size_ret = sub_block_size;
+ *n_sub_blocks_ret = n_sub_blocks;
+
+ return 0;
+}
+
+// Choose the right size of basement nodes. For now, just align up to
+// 256k blocks and hope it compresses well enough.
+int
+choose_basement_node_size(int total_size, int *sub_block_size_ret, int *n_sub_blocks_ret) {
+ if (total_size < 0)
+ return EINVAL;
+
+ *n_sub_blocks_ret = (total_size + max_basement_node_uncompressed_size - 1) / max_basement_node_uncompressed_size;
+ *sub_block_size_ret = max_basement_node_uncompressed_size;
+
+ return 0;
+}
+
+void
+set_all_sub_block_sizes(int total_size, int sub_block_size, int n_sub_blocks, struct sub_block sub_block[]) {
+ int size_left = total_size;
+ int i;
+ for (i = 0; i < n_sub_blocks-1; i++) {
+ sub_block[i].uncompressed_size = sub_block_size;
+ size_left -= sub_block_size;
+ }
+ if (i == 0 || size_left > 0)
+ sub_block[i].uncompressed_size = size_left;
+}
+
+// find the index of the first sub block that contains offset
+// Returns the sub block index, else returns -1
+int
+get_sub_block_index(int n_sub_blocks, struct sub_block sub_block[], size_t offset) {
+ size_t start_offset = 0;
+ for (int i = 0; i < n_sub_blocks; i++) {
+ size_t size = sub_block[i].uncompressed_size;
+ if (offset < start_offset + size)
+ return i;
+ start_offset += size;
+ }
+ return -1;
+}
+
+#include "workset.h"
+
+void
+compress_work_init(struct compress_work *w, enum toku_compression_method method, struct sub_block *sub_block) {
+ w->method = method;
+ w->sub_block = sub_block;
+}
+
+//
+// takes the uncompressed contents of sub_block
+// and compresses them into sb_compressed_ptr
+// cs_bound is the compressed size bound
+// Returns the size of the compressed data
+//
+uint32_t
+compress_nocrc_sub_block(
+ struct sub_block *sub_block,
+ void* sb_compressed_ptr,
+ uint32_t cs_bound,
+ enum toku_compression_method method
+ )
+{
+ // compress it
+ Bytef *uncompressed_ptr = (Bytef *) sub_block->uncompressed_ptr;
+ Bytef *compressed_ptr = (Bytef *) sb_compressed_ptr;
+ uLongf uncompressed_len = sub_block->uncompressed_size;
+ uLongf real_compressed_len = cs_bound;
+ toku_compress(method,
+ compressed_ptr, &real_compressed_len,
+ uncompressed_ptr, uncompressed_len);
+ return real_compressed_len;
+}
+
+void
+compress_sub_block(struct sub_block *sub_block, enum toku_compression_method method) {
+ sub_block->compressed_size = compress_nocrc_sub_block(
+ sub_block,
+ sub_block->compressed_ptr,
+ sub_block->compressed_size_bound,
+ method
+ );
+ // checksum it
+ sub_block->xsum = toku_x1764_memory(sub_block->compressed_ptr, sub_block->compressed_size);
+}
+
+void *
+compress_worker(void *arg) {
+ struct workset *ws = (struct workset *) arg;
+ while (1) {
+ struct compress_work *w = (struct compress_work *) workset_get(ws);
+ if (w == NULL)
+ break;
+ compress_sub_block(w->sub_block, w->method);
+ }
+ workset_release_ref(ws);
+ return arg;
+}
+
+size_t
+compress_all_sub_blocks(int n_sub_blocks, struct sub_block sub_block[], char *uncompressed_ptr, char *compressed_ptr, int num_cores, struct toku_thread_pool *pool, enum toku_compression_method method) {
+ char *compressed_base_ptr = compressed_ptr;
+ size_t compressed_len;
+
+ // This is a complex way to write a parallel loop. Cilk would be better.
+
+ if (n_sub_blocks == 1) {
+ // single sub-block
+ sub_block[0].uncompressed_ptr = uncompressed_ptr;
+ sub_block[0].compressed_ptr = compressed_ptr;
+ compress_sub_block(&sub_block[0], method);
+ compressed_len = sub_block[0].compressed_size;
+ } else {
+ // multiple sub-blocks
+ int T = num_cores; // T = min(num_cores, n_sub_blocks) - 1
+ if (T > n_sub_blocks)
+ T = n_sub_blocks;
+ if (T > 0)
+ T = T - 1; // threads in addition to the running thread
+
+ struct workset ws;
+ ZERO_STRUCT(ws);
+ workset_init(&ws);
+
+ struct compress_work work[n_sub_blocks];
+ workset_lock(&ws);
+ for (int i = 0; i < n_sub_blocks; i++) {
+ sub_block[i].uncompressed_ptr = uncompressed_ptr;
+ sub_block[i].compressed_ptr = compressed_ptr;
+ compress_work_init(&work[i], method, &sub_block[i]);
+ workset_put_locked(&ws, &work[i].base);
+ uncompressed_ptr += sub_block[i].uncompressed_size;
+ compressed_ptr += sub_block[i].compressed_size_bound;
+ }
+ workset_unlock(&ws);
+
+ // compress the sub-blocks
+ if (0) printf("%s:%d T=%d N=%d\n", __FUNCTION__, __LINE__, T, n_sub_blocks);
+ toku_thread_pool_run(pool, 0, &T, compress_worker, &ws);
+ workset_add_ref(&ws, T);
+ compress_worker(&ws);
+
+ // wait for all of the work to complete
+ workset_join(&ws);
+ workset_destroy(&ws);
+
+ // squeeze out the holes not used by the compress bound
+ compressed_ptr = compressed_base_ptr + sub_block[0].compressed_size;
+ for (int i = 1; i < n_sub_blocks; i++) {
+ memmove(compressed_ptr, sub_block[i].compressed_ptr, sub_block[i].compressed_size);
+ compressed_ptr += sub_block[i].compressed_size;
+ }
+
+ compressed_len = compressed_ptr - compressed_base_ptr;
+ }
+ return compressed_len;
+}
+
+// initialize the decompression work
+void
+decompress_work_init(struct decompress_work *dw,
+ void *compress_ptr, uint32_t compress_size,
+ void *uncompress_ptr, uint32_t uncompress_size,
+ uint32_t xsum) {
+ dw->compress_ptr = compress_ptr;
+ dw->compress_size = compress_size;
+ dw->uncompress_ptr = uncompress_ptr;
+ dw->uncompress_size = uncompress_size;
+ dw->xsum = xsum;
+ dw->error = 0;
+}
+
+int verbose_decompress_sub_block = 1;
+
+// decompress one block
+int
+decompress_sub_block(void *compress_ptr, uint32_t compress_size, void *uncompress_ptr, uint32_t uncompress_size, uint32_t expected_xsum) {
+ int result = 0;
+
+ // verify checksum
+ uint32_t xsum = toku_x1764_memory(compress_ptr, compress_size);
+ if (xsum != expected_xsum) {
+ if (verbose_decompress_sub_block) fprintf(stderr, "%s:%d xsum %u expected %u\n", __FUNCTION__, __LINE__, xsum, expected_xsum);
+ result = EINVAL;
+ } else {
+ // decompress
+ toku_decompress((Bytef *) uncompress_ptr, uncompress_size, (Bytef *) compress_ptr, compress_size);
+ }
+ return result;
+}
+
+// decompress blocks until there is no more work to do
+void *
+decompress_worker(void *arg) {
+ struct workset *ws = (struct workset *) arg;
+ while (1) {
+ struct decompress_work *dw = (struct decompress_work *) workset_get(ws);
+ if (dw == NULL)
+ break;
+ dw->error = decompress_sub_block(dw->compress_ptr, dw->compress_size, dw->uncompress_ptr, dw->uncompress_size, dw->xsum);
+ }
+ workset_release_ref(ws);
+ return arg;
+}
+
+int
+decompress_all_sub_blocks(int n_sub_blocks, struct sub_block sub_block[], unsigned char *compressed_data, unsigned char *uncompressed_data, int num_cores, struct toku_thread_pool *pool) {
+ int r;
+
+ if (n_sub_blocks == 1) {
+ r = decompress_sub_block(compressed_data, sub_block[0].compressed_size, uncompressed_data, sub_block[0].uncompressed_size, sub_block[0].xsum);
+ } else {
+ // compute the number of additional threads needed for decompressing this node
+ int T = num_cores; // T = min(#cores, #blocks) - 1
+ if (T > n_sub_blocks)
+ T = n_sub_blocks;
+ if (T > 0)
+ T = T - 1; // threads in addition to the running thread
+
+ // init the decompression work set
+ struct workset ws;
+ ZERO_STRUCT(ws);
+ workset_init(&ws);
+
+ // initialize the decompression work and add to the work set
+ struct decompress_work decompress_work[n_sub_blocks];
+ workset_lock(&ws);
+ for (int i = 0; i < n_sub_blocks; i++) {
+ decompress_work_init(&decompress_work[i], compressed_data, sub_block[i].compressed_size, uncompressed_data, sub_block[i].uncompressed_size, sub_block[i].xsum);
+ workset_put_locked(&ws, &decompress_work[i].base);
+
+ uncompressed_data += sub_block[i].uncompressed_size;
+ compressed_data += sub_block[i].compressed_size;
+ }
+ workset_unlock(&ws);
+
+ // decompress the sub-blocks
+ if (0) printf("%s:%d Cores=%d Blocks=%d T=%d\n", __FUNCTION__, __LINE__, num_cores, n_sub_blocks, T);
+ toku_thread_pool_run(pool, 0, &T, decompress_worker, &ws);
+ workset_add_ref(&ws, T);
+ decompress_worker(&ws);
+
+ // cleanup
+ workset_join(&ws);
+ workset_destroy(&ws);
+
+ r = 0;
+ for (int i = 0; i < n_sub_blocks; i++) {
+ r = decompress_work[i].error;
+ if (r != 0)
+ break;
+ }
+ }
+
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/sub_block.h b/storage/tokudb/PerconaFT/ft/serialize/sub_block.h
new file mode 100644
index 00000000..2ae8a2a4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/sub_block.h
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/serialize/compress.h"
+
+// TODO: Clean this abstraciton up
+static const int max_sub_blocks = 8;
+static const int target_sub_block_size = 512 * 1024;
+static const int max_basement_nodes = 32;
+static const int max_basement_node_uncompressed_size = 256 * 1024;
+static const int max_basement_node_compressed_size = 64 * 1024;
+
+struct sub_block {
+ void *uncompressed_ptr;
+ uint32_t uncompressed_size;
+
+ void *compressed_ptr;
+ uint32_t compressed_size; // real compressed size
+ uint32_t compressed_size_bound; // estimated compressed size
+
+ uint32_t xsum; // sub block checksum
+};
+typedef struct sub_block *SUB_BLOCK;
+
+struct stored_sub_block {
+ uint32_t uncompressed_size;
+ uint32_t compressed_size;
+ uint32_t xsum;
+};
+
+void sub_block_init(SUB_BLOCK);
+SUB_BLOCK sub_block_creat(void);
+
+// get the size of the compression header
+size_t
+sub_block_header_size(int n_sub_blocks);
+
+void
+set_compressed_size_bound(struct sub_block *se, enum toku_compression_method method);
+
+// get the sum of the sub block compressed bound sizes
+size_t
+get_sum_compressed_size_bound(int n_sub_blocks, struct sub_block sub_block[], enum toku_compression_method method);
+
+// get the sum of the sub block uncompressed sizes
+size_t
+get_sum_uncompressed_size(int n_sub_blocks, struct sub_block sub_block[]);
+
+// Choose n_sub_blocks and sub_block_size such that the product is >= total_size and the sub_block_size is at
+// least >= the target_sub_block_size.
+int
+choose_sub_block_size(int total_size, int n_sub_blocks_limit, int *sub_block_size_ret, int *n_sub_blocks_ret);
+
+int
+choose_basement_node_size(int total_size, int *sub_block_size_ret, int *n_sub_blocks_ret);
+
+void
+set_all_sub_block_sizes(int total_size, int sub_block_size, int n_sub_blocks, struct sub_block sub_block[]);
+
+// find the index of the first sub block that contains the offset
+// Returns the index if found, else returns -1
+int
+get_sub_block_index(int n_sub_blocks, struct sub_block sub_block[], size_t offset);
+
+#include "workset.h"
+
+struct compress_work {
+ struct work base;
+ enum toku_compression_method method;
+ struct sub_block *sub_block;
+};
+
+void
+compress_work_init(struct compress_work *w, enum toku_compression_method method, struct sub_block *sub_block);
+
+uint32_t
+compress_nocrc_sub_block(
+ struct sub_block *sub_block,
+ void* sb_compressed_ptr,
+ uint32_t cs_bound,
+ enum toku_compression_method method
+ );
+
+void
+compress_sub_block(struct sub_block *sub_block, enum toku_compression_method method);
+
+void *
+compress_worker(void *arg);
+
+size_t
+compress_all_sub_blocks(int n_sub_blocks, struct sub_block sub_block[], char *uncompressed_ptr, char *compressed_ptr, int num_cores, struct toku_thread_pool *pool, enum toku_compression_method method);
+
+struct decompress_work {
+ struct work base;
+ void *compress_ptr;
+ void *uncompress_ptr;
+ uint32_t compress_size;
+ uint32_t uncompress_size;
+ uint32_t xsum;
+ int error;
+};
+
+// initialize the decompression work
+void
+decompress_work_init(struct decompress_work *dw,
+ void *compress_ptr, uint32_t compress_size,
+ void *uncompress_ptr, uint32_t uncompress_size,
+ uint32_t xsum);
+
+// decompress one block
+int
+decompress_sub_block(void *compress_ptr, uint32_t compress_size, void *uncompress_ptr, uint32_t uncompress_size, uint32_t expected_xsum);
+
+// decompress blocks until there is no more work to do
+void *
+decompress_worker(void *arg);
+
+// decompress all sub blocks from the compressed_data buffer to the uncompressed_data buffer
+// Returns 0 if success, otherwise an error
+int
+decompress_all_sub_blocks(int n_sub_blocks, struct sub_block sub_block[], unsigned char *compressed_data, unsigned char *uncompressed_data, int num_cores, struct toku_thread_pool *pool);
+
+extern int verbose_decompress_sub_block;
diff --git a/storage/tokudb/PerconaFT/ft/serialize/wbuf.h b/storage/tokudb/PerconaFT/ft/serialize/wbuf.h
new file mode 100644
index 00000000..062294e2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/wbuf.h
@@ -0,0 +1,209 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <memory.h>
+#include <string.h>
+
+#include "portability/toku_htonl.h"
+
+#include "util/bytestring.h"
+#include "util/x1764.h"
+
+/* When serializing a value, write it into a buffer. */
+/* This code requires that the buffer be big enough to hold whatever you put into it. */
+/* This abstraction doesn't do a good job of hiding its internals.
+ * Why? The performance of this code is important, and we want to inline stuff */
+//Why is size here an int instead of DISKOFF like in the initializer?
+struct wbuf {
+ unsigned char *buf;
+ unsigned int size;
+ unsigned int ndone;
+ struct x1764 checksum; // The checksum state
+};
+
+static inline void wbuf_nocrc_init (struct wbuf *w, void *buf, unsigned int size) {
+ w->buf = (unsigned char *) buf;
+ w->size = size;
+ w->ndone = 0;
+}
+
+static inline void wbuf_init (struct wbuf *w, void *buf, unsigned int size) {
+ wbuf_nocrc_init(w, buf, size);
+ toku_x1764_init(&w->checksum);
+}
+
+static inline size_t wbuf_get_woffset(struct wbuf *w) {
+ return w->ndone;
+}
+
+/* Write a character. */
+static inline void wbuf_nocrc_char (struct wbuf *w, unsigned char ch) {
+ assert(w->ndone<w->size);
+ w->buf[w->ndone++]=ch;
+}
+
+/* Write a character. */
+static inline void wbuf_nocrc_uint8_t (struct wbuf *w, uint8_t ch) {
+ assert(w->ndone<w->size);
+ w->buf[w->ndone++]=ch;
+}
+
+static inline void wbuf_char (struct wbuf *w, unsigned char ch) {
+ wbuf_nocrc_char (w, ch);
+ toku_x1764_add(&w->checksum, &w->buf[w->ndone-1], 1);
+}
+
+//Write an int that MUST be in network order regardless of disk order
+static void wbuf_network_int (struct wbuf *w, int32_t i) __attribute__((__unused__));
+static void wbuf_network_int (struct wbuf *w, int32_t i) {
+ assert(w->ndone + 4 <= w->size);
+ *(uint32_t*)(&w->buf[w->ndone]) = toku_htonl(i);
+ toku_x1764_add(&w->checksum, &w->buf[w->ndone], 4);
+ w->ndone += 4;
+}
+
+static inline void wbuf_nocrc_int (struct wbuf *w, int32_t i) {
+#if 0
+ wbuf_nocrc_char(w, i>>24);
+ wbuf_nocrc_char(w, i>>16);
+ wbuf_nocrc_char(w, i>>8);
+ wbuf_nocrc_char(w, i>>0);
+#else
+ assert(w->ndone + 4 <= w->size);
+ #if 0
+ w->buf[w->ndone+0] = i>>24;
+ w->buf[w->ndone+1] = i>>16;
+ w->buf[w->ndone+2] = i>>8;
+ w->buf[w->ndone+3] = i>>0;
+ #else
+ *(uint32_t*)(&w->buf[w->ndone]) = toku_htod32(i);
+ #endif
+ w->ndone += 4;
+#endif
+}
+
+static inline void wbuf_int (struct wbuf *w, int32_t i) {
+ wbuf_nocrc_int(w, i);
+ toku_x1764_add(&w->checksum, &w->buf[w->ndone-4], 4);
+}
+
+static inline void wbuf_nocrc_uint (struct wbuf *w, uint32_t i) {
+ wbuf_nocrc_int(w, (int32_t)i);
+}
+
+static inline void wbuf_uint (struct wbuf *w, uint32_t i) {
+ wbuf_int(w, (int32_t)i);
+}
+
+static inline uint8_t* wbuf_nocrc_reserve_literal_bytes(struct wbuf *w, uint32_t nbytes) {
+ assert(w->ndone + nbytes <= w->size);
+ uint8_t * dest = w->buf + w->ndone;
+ w->ndone += nbytes;
+ return dest;
+}
+
+static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, const void *bytes_bv, uint32_t nbytes) {
+ const unsigned char *bytes = (const unsigned char *) bytes_bv;
+#if 0
+ { int i; for (i=0; i<nbytes; i++) wbuf_nocrc_char(w, bytes[i]); }
+#else
+ assert(w->ndone + nbytes <= w->size);
+ memcpy(w->buf + w->ndone, bytes, (size_t)nbytes);
+ w->ndone += nbytes;
+#endif
+}
+
+static inline void wbuf_literal_bytes(struct wbuf *w, const void *bytes_bv, uint32_t nbytes) {
+ wbuf_nocrc_literal_bytes(w, bytes_bv, nbytes);
+ toku_x1764_add(&w->checksum, &w->buf[w->ndone-nbytes], nbytes);
+}
+
+static void wbuf_nocrc_bytes (struct wbuf *w, const void *bytes_bv, uint32_t nbytes) {
+ wbuf_nocrc_uint(w, nbytes);
+ wbuf_nocrc_literal_bytes(w, bytes_bv, nbytes);
+}
+
+static void wbuf_bytes (struct wbuf *w, const void *bytes_bv, uint32_t nbytes) {
+ wbuf_uint(w, nbytes);
+ wbuf_literal_bytes(w, bytes_bv, nbytes);
+}
+
+static void wbuf_nocrc_ulonglong (struct wbuf *w, uint64_t ull) {
+ wbuf_nocrc_uint(w, (uint32_t)(ull>>32));
+ wbuf_nocrc_uint(w, (uint32_t)(ull&0xFFFFFFFF));
+}
+
+static void wbuf_ulonglong (struct wbuf *w, uint64_t ull) {
+ wbuf_uint(w, (uint32_t)(ull>>32));
+ wbuf_uint(w, (uint32_t)(ull&0xFFFFFFFF));
+}
+
+static inline void wbuf_nocrc_uint64_t(struct wbuf *w, uint64_t ull) {
+ wbuf_nocrc_ulonglong(w, ull);
+}
+
+
+static inline void wbuf_uint64_t(struct wbuf *w, uint64_t ull) {
+ wbuf_ulonglong(w, ull);
+}
+
+static inline void wbuf_nocrc_bool (struct wbuf *w, bool b) {
+ wbuf_nocrc_uint8_t(w, (uint8_t)(b ? 1 : 0));
+}
+
+static inline void wbuf_nocrc_BYTESTRING (struct wbuf *w, BYTESTRING v) {
+ wbuf_nocrc_bytes(w, v.data, v.len);
+}
+
+static inline void wbuf_BYTESTRING (struct wbuf *w, BYTESTRING v) {
+ wbuf_bytes(w, v.data, v.len);
+}
+
+static inline void wbuf_uint8_t (struct wbuf *w, uint8_t v) {
+ wbuf_char(w, v);
+}
+
+static inline void wbuf_nocrc_uint32_t (struct wbuf *w, uint32_t v) {
+ wbuf_nocrc_uint(w, v);
+}
+
+static inline void wbuf_uint32_t (struct wbuf *w, uint32_t v) {
+ wbuf_uint(w, v);
+}
diff --git a/storage/tokudb/PerconaFT/ft/serialize/workset.h b/storage/tokudb/PerconaFT/ft/serialize/workset.h
new file mode 100644
index 00000000..295eb73c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/serialize/workset.h
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_list.h>
+#include <toku_pthread.h>
+
+extern toku_instr_key *ws_worker_wait_key;
+
+// The work struct is the base class for work to be done by some threads
+struct work {
+ struct toku_list next;
+};
+
+// The workset struct contains the set of work to be done by some threads
+struct workset {
+ toku_mutex_t lock;
+ struct toku_list worklist; // a list of work
+ int refs; // number of workers that have a reference on the workset
+ toku_cond_t worker_wait; // a condition variable used to wait for all of the worker to release their reference on the workset
+};
+
+static inline void workset_init(struct workset *ws) {
+ toku_mutex_init(*workset_lock_mutex_key, &ws->lock, nullptr);
+ toku_list_init(&ws->worklist);
+ ws->refs = 1; // the calling thread gets a reference
+ toku_cond_init(*ws_worker_wait_key, &ws->worker_wait, nullptr);
+}
+
+static inline void workset_destroy(struct workset *ws) {
+ invariant(toku_list_empty(&ws->worklist));
+ toku_cond_destroy(&ws->worker_wait);
+ toku_mutex_destroy(&ws->lock);
+}
+
+static inline void
+workset_lock(struct workset *ws) {
+ toku_mutex_lock(&ws->lock);
+}
+
+static inline void
+workset_unlock(struct workset *ws) {
+ toku_mutex_unlock(&ws->lock);
+}
+
+// Put work in the workset. Assume the workset is already locked.
+static inline void
+workset_put_locked(struct workset *ws, struct work *w) {
+ toku_list_push(&ws->worklist, &w->next);
+}
+
+// Put work in the workset
+static inline void
+workset_put(struct workset *ws, struct work *w) {
+ workset_lock(ws);
+ workset_put_locked(ws, w);
+ workset_unlock(ws);
+}
+
+// Get work from the workset
+static inline struct work *
+workset_get(struct workset *ws) {
+ workset_lock(ws);
+ struct work *w = NULL;
+ if (!toku_list_empty(&ws->worklist)) {
+ struct toku_list *l = toku_list_pop_head(&ws->worklist);
+ w = toku_list_struct(l, struct work, next);
+ }
+ workset_unlock(ws);
+ return w;
+}
+
+// Add references to the workset
+static inline void
+workset_add_ref(struct workset *ws, int refs) {
+ workset_lock(ws);
+ ws->refs += refs;
+ workset_unlock(ws);
+}
+
+// Release a reference on the workset
+static inline void
+workset_release_ref(struct workset *ws) {
+ workset_lock(ws);
+ if (--ws->refs == 0) {
+ toku_cond_broadcast(&ws->worker_wait);
+ }
+ workset_unlock(ws);
+}
+
+// Wait until all of the worker threads have released their reference on the workset
+static inline void
+workset_join(struct workset *ws) {
+ workset_lock(ws);
+ while (ws->refs != 0) {
+ toku_cond_wait(&ws->worker_wait, &ws->lock);
+ }
+ workset_unlock(ws);
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
new file mode 100644
index 00000000..270ec976
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/CMakeLists.txt
@@ -0,0 +1,144 @@
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO)
+
+if(BUILD_TESTING OR BUILD_FT_TESTS)
+ function(add_ft_test bin)
+ add_toku_test(ft ${bin} ${ARGN})
+ endfunction(add_ft_test)
+ function(add_ft_test_aux name bin)
+ add_toku_test_aux(ft ${name} ${bin} ${ARGN})
+ endfunction(add_ft_test_aux)
+
+ ## get a list of the sources in this directory
+ file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
+ set(tests)
+ foreach(src ${srcs})
+ get_filename_component(base ${src} NAME_WE)
+ list(APPEND tests ${base})
+ endforeach(src)
+
+ ## this macro will remove the test from the list of source files so it
+ ## doesn't end up getting the default test rule applied to it
+ macro(declare_custom_tests)
+ foreach(test ${ARGN})
+ list(REMOVE_ITEM tests ${test})
+ endforeach(test)
+ endmacro(declare_custom_tests)
+
+ declare_custom_tests(logcursor-fw logcursor-bw)
+ add_test(ft/logcursor-fw echo "logcursor-fw must be run manually (needs logs to iterate over).")
+ add_test(ft/logcursor-bw echo "logcursor-bw must be run manually (needs logs to iterate over).")
+
+ foreach(test ${tests})
+ add_executable(${test} ${test}.cc)
+ target_link_libraries(${test} ft ${LIBTOKUPORTABILITY})
+ set_target_properties(${test} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ add_space_separated_property(TARGET ${test} COMPILE_FLAGS -fvisibility=hidden)
+ endforeach(test)
+
+ ## declare some tests that should be run with specific options
+
+ declare_custom_tests(test-assert)
+ add_ft_test_aux(test-assertA test-assert)
+ add_ft_test_aux(test-assertB test-assert notok)
+ set_tests_properties(ft/test-assertA ft/test-assertB PROPERTIES WILL_FAIL TRUE)
+ add_ft_test_aux(test-assertC test-assert ok)
+ setup_toku_test_properties(ft/test-assertC test-assertC)
+
+ declare_custom_tests(benchmark-test)
+ add_ft_test(benchmark-test -q 1)
+ add_ft_test_aux(benchmark-test_256 benchmark-test --valsize 256 --periter 131072 --verify --verify_period 4096 1)
+
+ declare_custom_tests(ftloader-test-merge-files-dbufio)
+ add_ft_test(ftloader-test-merge-files-dbufio -r 8000 -s)
+
+ declare_custom_tests(ftloader-test-extractor)
+ add_ft_test_aux(ftloader-test-extractor-1 ftloader-test-extractor -s -r 1000 --rowsets 1000 --asc)
+ add_ft_test_aux(ftloader-test-extractor-2 ftloader-test-extractor -s -r 1000 --rowsets 1000 --dsc)
+ add_ft_test_aux(ftloader-test-extractor-3 ftloader-test-extractor -s -r 1000 --rowsets 1000 --random)
+ add_ft_test_aux(ftloader-test-extractor-1a ftloader-test-extractor -s -r 1000 --rowsets 1000 --asc)
+ add_ft_test_aux(ftloader-test-extractor-2a ftloader-test-extractor -s -r 1000 --rowsets 1000 --dsc)
+ add_ft_test_aux(ftloader-test-extractor-3a ftloader-test-extractor -s -r 1000 --rowsets 1000 --random)
+ add_ft_test_aux(ftloader-test-extractor-4a ftloader-test-extractor -s -r 1000 --rowsets 3 --asc)
+ add_ft_test_aux(ftloader-test-extractor-5a ftloader-test-extractor -s -r 1000 --rowsets 3 --asc --asc-poison)
+
+ declare_custom_tests(ftloader-test-extractor-errors)
+ add_ft_test_aux(ftloader-test-extractor-errors-1 ftloader-test-extractor-errors -w -m -u -r 1)
+ add_ft_test_aux(ftloader-test-extractor-errors-2 ftloader-test-extractor-errors -m -r 10000)
+
+ declare_custom_tests(ftloader-test-writer)
+ add_ft_test_aux(ftloader-test-writer-1 ftloader-test-writer -r 1 -s)
+ add_ft_test_aux(ftloader-test-writer-1000 ftloader-test-writer -r 1000 -s)
+ add_ft_test_aux(ftloader-test-writer-100000 ftloader-test-writer -r 100000 -s)
+ add_ft_test_aux(ftloader-test-writer-1000000 ftloader-test-writer -r 1000000 -s)
+ add_ft_test_aux(ftloader-test-writer-1-x ftloader-test-writer -r 1 -s -x 42)
+ add_ft_test_aux(ftloader-test-writer-1000-x ftloader-test-writer -r 1000 -s -x 42)
+ add_ft_test_aux(ftloader-test-writer-100000-x ftloader-test-writer -r 100000 -s -x 42)
+ add_ft_test_aux(ftloader-test-writer-1000000-x ftloader-test-writer -r 1000000 -s -x 42)
+
+ declare_custom_tests(ftloader-test-writer-errors)
+ add_ft_test_aux(ftloader-test-writer-errors-1 ftloader-test-writer-errors -w -m -u -r 100000)
+ add_ft_test_aux(ftloader-test-writer-errors-2 ftloader-test-writer-errors -s -w -m -u -r 10000)
+ add_ft_test_aux(ftloader-test-writer-errors-3 ftloader-test-writer-errors -s -r 20000 --malloc_limit 0 --realloc_errors)
+ add_ft_test_aux(ftloader-test-writer-errors-4 ftloader-test-writer-errors -s -m --malloc_limit 0 -r 10000)
+
+ declare_custom_tests(ft-serialize-benchmark)
+ add_ft_test(ft-serialize-benchmark 92 200000)
+ declare_custom_tests(bnc-insert-benchmark)
+ add_ft_test(bnc-insert-benchmark 100 4096000 1000)
+
+ declare_custom_tests(cachetable-5097)
+ add_ft_test_aux(cachetable-5097-enabled cachetable-5097 enable_pe)
+ add_ft_test_aux(cachetable-5097-disabled cachetable-5097 disable_pe)
+
+ add_ft_test_aux(ftdump-test-generate ft-test)
+ add_test(NAME ft/ftdump-test
+ COMMAND $<TARGET_FILE:tokuftdump> "ftdump-test-generate.ctest-data"
+ )
+ set_tests_properties(ft/ftdump-test PROPERTIES
+ DEPENDS ft/ftdump-test-generate
+ REQUIRED_FILES "ftdump-test-generate.ctest-data"
+ )
+
+ ## keyrange has some inequality assumptions that were broken by
+ ## promotion, they seem benign but are complicated, so for now, skip
+ ## this test, when we get some time, we should fix it and re-enable it.
+ list(REMOVE_ITEM tests keyrange)
+
+ foreach(test ${tests})
+ add_ft_test(${test})
+ endforeach(test)
+
+ set_property(TEST ft/upgrade_test_simple APPEND PROPERTY ENVIRONMENT "TOKUDB_DATA=${TOKUDB_DATA}")
+
+ declare_custom_tests(test-upgrade-recovery-logs)
+ file(GLOB upgrade_tests "${TOKUDB_DATA}/upgrade-recovery-logs-??-clean")
+ file(GLOB upgrade_tests "${CMAKE_CURRENT_SOURCE_DIR}/upgrade.data/upgrade-recovery-logs-??-clean")
+ foreach(test ${upgrade_tests})
+ get_filename_component(test_basename "${test}" NAME)
+ add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
+ endforeach(test)
+ file(GLOB upgrade_tests "${TOKUDB_DATA}/upgrade-recovery-logs-??-dirty")
+ file(GLOB upgrade_tests "${CMAKE_CURRENT_SOURCE_DIR}/upgrade.data/upgrade-recovery-logs-??-dirty")
+ foreach(test ${upgrade_tests})
+ get_filename_component(test_basename "${test}" NAME)
+ add_ft_test_aux(test-${test_basename} test-upgrade-recovery-logs ${test})
+ endforeach(test)
+
+ ## give some tests, that time out normally, 1 hour to complete
+ set(long_tests
+ ft/ftloader-test-extractor-3a
+ ft/log-test7
+ ft/recovery-bad-last-entry
+ ft/subblock-test-compression
+ ft/upgrade_test_simple
+ )
+ set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
+ ## some take even longer, with valgrind
+ set(extra_long_tests
+ ft/benchmark-test
+ ft/benchmark-test_256
+ ft/is_empty
+ ft/subblock-test-checksum
+ )
+ set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
+endif(BUILD_TESTING OR BUILD_FT_TESTS)
diff --git a/storage/tokudb/PerconaFT/ft/tests/benchmark-test.cc b/storage/tokudb/PerconaFT/ft/tests/benchmark-test.cc
new file mode 100644
index 00000000..3fba467d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/benchmark-test.cc
@@ -0,0 +1,254 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Insert a bunch of stuff */
+#include <toku_time.h>
+
+static const char *fname ="sinsert.ft";
+
+enum { SERIAL_SPACING = 1<<6 };
+int64_t ITEMS_TO_INSERT_PER_ITERATION = 1<<20;
+int64_t BOUND_INCREASE_PER_ITERATION = SERIAL_SPACING*ITEMS_TO_INSERT_PER_ITERATION;
+
+enum { NODE_SIZE = 1<<20 };
+enum { BASEMENT_NODE_SIZE = 128 * 1024 };
+
+static int nodesize = NODE_SIZE;
+static int basementnodesize = BASEMENT_NODE_SIZE;
+static enum toku_compression_method compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
+static int keysize = sizeof (long long);
+static int valsize = sizeof (long long);
+static int do_verify =0; /* Do a slow verify after every k inserts. */
+static int verify_period = 256; /* how many inserts between verifies. */
+
+static int do_serial = 1;
+static int do_random = 1;
+
+static CACHETABLE ct;
+static FT_HANDLE t;
+
+static void setup (void) {
+ int r;
+ unlink(fname);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, nullptr, toku_builtin_compare_fun); assert(r==0);
+}
+
+static void toku_shutdown (void) {
+ int r;
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+static void long_long_to_array (unsigned char *a, unsigned long long l) {
+ int i;
+ for (i=0; i<8; i++)
+ a[i] = (l>>(56-8*i))&0xff;
+}
+
+static void insert (long long v) {
+ unsigned char kc[keysize], vc[valsize];
+ DBT kt, vt;
+ memset(kc, 0, sizeof kc);
+ long_long_to_array(kc, v);
+ memset(vc, 0, sizeof vc);
+ long_long_to_array(vc, v);
+ toku_ft_insert(t, toku_fill_dbt(&kt, kc, keysize), toku_fill_dbt(&vt, vc, valsize), 0);
+ if (do_verify) {
+ static int inserts_since_last_verify = 0;
+ inserts_since_last_verify++;
+ if (inserts_since_last_verify % verify_period == 0) {
+ toku_cachetable_verify(ct);
+ }
+ }
+}
+
+static void serial_insert_from (long long from) {
+ long long i;
+ for (i=0; i<ITEMS_TO_INSERT_PER_ITERATION; i++) {
+ insert((from+i)*SERIAL_SPACING);
+ }
+}
+
+static long long llrandom (void) {
+ return (((long long)(random()))<<32) + random();
+}
+
+static void random_insert_below (long long below) {
+ long long i;
+ assert(0 < below);
+ for (i=0; i<ITEMS_TO_INSERT_PER_ITERATION; i++) {
+ insert(llrandom()%below);
+ }
+}
+
+static void biginsert (long long n_elements, struct timeval *starttime) {
+ long long i;
+ struct timeval t1,t2;
+ int iteration;
+ for (i=0, iteration=0; i<n_elements; i+=ITEMS_TO_INSERT_PER_ITERATION, iteration++) {
+ gettimeofday(&t1,0);
+ if (do_serial)
+ serial_insert_from(i);
+ gettimeofday(&t2,0);
+ if (verbose && do_serial) {
+ printf("serial %9.6fs %8.0f/s ", toku_tdiff(&t2, &t1), ITEMS_TO_INSERT_PER_ITERATION/toku_tdiff(&t2, &t1));
+ fflush(stdout);
+ }
+ gettimeofday(&t1,0);
+ if (do_random)
+ random_insert_below((i+ITEMS_TO_INSERT_PER_ITERATION)*SERIAL_SPACING);
+ gettimeofday(&t2,0);
+ if (verbose && do_random) {
+ printf("random %9.6fs %8.0f/s ", toku_tdiff(&t2, &t1), ITEMS_TO_INSERT_PER_ITERATION/toku_tdiff(&t2, &t1));
+ fflush(stdout);
+ }
+ if (verbose && (do_serial || do_random)) {
+ double f = 0;
+ if (do_serial) f += 1.0;
+ if (do_random) f += 1.0;
+ printf("cumulative %9.6fs %8.0f/s\n", toku_tdiff(&t2, starttime), (ITEMS_TO_INSERT_PER_ITERATION*f/toku_tdiff(&t2, starttime))*(iteration+1));
+ fflush(stdout);
+ }
+ }
+}
+
+static void usage(void) {
+ printf("benchmark-test [OPTIONS] [ITERATIONS]\n");
+ printf("[-v]\n");
+ printf("[-q]\n");
+ printf("[--nodesize NODESIZE]\n");
+ printf("[--keysize KEYSIZE]\n");
+ printf("[--valsize VALSIZE]\n");
+ printf("[--noserial]\n");
+ printf("[--norandom]\n");
+ printf("[--verify]\n");
+ printf("[--verify_period PERIOD]\n");
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ verbose=1; //Default
+ /* parse parameters */
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (arg[0] != '-')
+ break;
+ if (strcmp(arg, "--nodesize") == 0) {
+ if (i+1 < argc) {
+ i++;
+ nodesize = atoi(argv[i]);
+ }
+ } else if (strcmp(arg, "--keysize") == 0) {
+ if (i+1 < argc) {
+ i++;
+ keysize = atoi(argv[i]);
+ }
+ } else if (strcmp(arg, "--periter") == 0) {
+ if (i+1 < argc) {
+ i++;
+ ITEMS_TO_INSERT_PER_ITERATION = atoi(argv[i]);
+ }
+ } else if (strcmp(arg, "--valsize") == 0) {
+ if (i+1 < argc) {
+ i++;
+ valsize = atoi(argv[i]);
+ }
+ } else if (strcmp(arg, "--verify")==0) {
+ do_verify = 1;
+ } else if (strcmp(arg, "--verify_period")==0) {
+ if (i+1 < argc) {
+ i++;
+ verify_period = atoi(argv[i]);
+ }
+ } else if (strcmp(arg, "--noserial") == 0) {
+ do_serial = 0;
+ } else if (strcmp(arg, "--norandom") == 0) {
+ do_random = 0;
+ } else if (strcmp(arg, "-v")==0) {
+ verbose++;
+ } else if (strcmp(arg, "-q")==0) {
+ verbose = 0;
+ } else {
+ usage();
+ return 1;
+ }
+ }
+ fname = TOKU_TEST_FILENAME;
+
+ struct timeval t1,t2,t3;
+ long long total_n_items;
+ if (i < argc) {
+ char *end;
+ errno=0;
+ total_n_items = ITEMS_TO_INSERT_PER_ITERATION * (long long) strtol(argv[i], &end, 10);
+ assert(errno==0);
+ assert(*end==0);
+ assert(end!=argv[i]);
+ } else {
+ total_n_items = 1LL<<22; // 1LL<<16
+ }
+
+ if (verbose) {
+ printf("nodesize=%d\n", nodesize);
+ printf("keysize=%d\n", keysize);
+ printf("valsize=%d\n", valsize);
+ printf("Serial and random insertions of %" PRId64 " per batch\n", ITEMS_TO_INSERT_PER_ITERATION);
+ fflush(stdout);
+ }
+ setup();
+ gettimeofday(&t1,0);
+ biginsert(total_n_items, &t1);
+ gettimeofday(&t2,0);
+ toku_shutdown();
+ gettimeofday(&t3,0);
+ if (verbose) {
+ int f = 0;
+ if (do_serial) f += 1;
+ if (do_random) f += 1;
+ printf("Shutdown %9.6fs\n", toku_tdiff(&t3, &t2));
+ printf("Total time %9.6fs for %lld insertions = %8.0f/s\n", toku_tdiff(&t3, &t1), f*total_n_items, f*total_n_items/toku_tdiff(&t3, &t1));
+ fflush(stdout);
+ }
+ unlink(fname);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/block_allocator_test.cc b/storage/tokudb/PerconaFT/ft/tests/block_allocator_test.cc
new file mode 100644
index 00000000..3eff52b9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/block_allocator_test.cc
@@ -0,0 +1,280 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void ba_alloc(BlockAllocator *ba, uint64_t size, uint64_t *answer) {
+ ba->Validate();
+ uint64_t actual_answer;
+ ba->AllocBlock(512 * size, &actual_answer);
+ ba->Validate();
+
+ invariant(actual_answer % 512 == 0);
+ *answer = actual_answer / 512;
+}
+
+static void ba_free(BlockAllocator *ba, uint64_t offset, uint64_t size) {
+ ba->Validate();
+ ba->FreeBlock(offset * 512, 512 * size);
+ ba->Validate();
+}
+
+static void ba_check_l(BlockAllocator *ba,
+ uint64_t blocknum_in_layout_order,
+ uint64_t expected_offset,
+ uint64_t expected_size) {
+ uint64_t actual_offset, actual_size;
+ int r = ba->NthBlockInLayoutOrder(
+ blocknum_in_layout_order, &actual_offset, &actual_size);
+ invariant(r == 0);
+ invariant(expected_offset * 512 == actual_offset);
+ invariant(expected_size * 512 == actual_size);
+}
+
+static void ba_check_none(BlockAllocator *ba,
+ uint64_t blocknum_in_layout_order) {
+ uint64_t actual_offset, actual_size;
+ int r = ba->NthBlockInLayoutOrder(
+ blocknum_in_layout_order, &actual_offset, &actual_size);
+ invariant(r == -1);
+}
+
+// Simple block allocator test
+static void test_ba0() {
+ BlockAllocator allocator;
+ BlockAllocator *ba = &allocator;
+ ba->Create(100 * 512, 1 * 512);
+ invariant(ba->AllocatedLimit() == 100 * 512);
+
+ uint64_t b2, b3, b4, b5, b6, b7;
+ ba_alloc(ba, 100, &b2);
+ ba_alloc(ba, 100, &b3);
+ ba_alloc(ba, 100, &b4);
+ ba_alloc(ba, 100, &b5);
+ ba_alloc(ba, 100, &b6);
+ ba_alloc(ba, 100, &b7);
+ ba_free(ba, b2, 100);
+ ba_alloc(ba, 100, &b2);
+ ba_free(ba, b4, 100);
+ ba_free(ba, b6, 100);
+ uint64_t b8, b9;
+ ba_alloc(ba, 100, &b4);
+ ba_free(ba, b2, 100);
+ ba_alloc(ba, 100, &b6);
+ ba_alloc(ba, 100, &b8);
+ ba_alloc(ba, 100, &b9);
+ ba_free(ba, b6, 100);
+ ba_free(ba, b7, 100);
+ ba_free(ba, b8, 100);
+ ba_alloc(ba, 100, &b6);
+ ba_alloc(ba, 100, &b7);
+ ba_free(ba, b4, 100);
+ ba_alloc(ba, 100, &b4);
+
+ ba->Destroy();
+}
+
+// Manually to get coverage of all the code in the block allocator.
+static void test_ba1(int n_initial) {
+ BlockAllocator allocator;
+ BlockAllocator *ba = &allocator;
+ ba->Create(0 * 512, 1 * 512);
+
+ int n_blocks = 0;
+ uint64_t blocks[1000];
+ for (int i = 0; i < 1000; i++) {
+ if (i < n_initial || random() % 2 == 0) {
+ if (n_blocks < 1000) {
+ ba_alloc(ba, 1, &blocks[n_blocks]);
+ // printf("A[%d]=%ld\n", n_blocks, blocks[n_blocks]);
+ n_blocks++;
+ }
+ } else {
+ if (n_blocks > 0) {
+ int blocknum = random() % n_blocks;
+ // printf("F[%d]=%ld\n", blocknum, blocks[blocknum]);
+ ba_free(ba, blocks[blocknum], 1);
+ blocks[blocknum] = blocks[n_blocks - 1];
+ n_blocks--;
+ }
+ }
+ }
+
+ ba->Destroy();
+}
+
+// Check to see if it is first fit or best fit.
+static void test_ba2(void) {
+ BlockAllocator allocator;
+ BlockAllocator *ba = &allocator;
+ uint64_t b[6];
+ enum { BSIZE = 1024 };
+ ba->Create(100 * 512, BSIZE * 512);
+ invariant(ba->AllocatedLimit() == 100 * 512);
+
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_none(ba, 1);
+
+ ba_alloc(ba, 100, &b[0]);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_none(ba, 2);
+
+ ba_alloc(ba, BSIZE + 100, &b[1]);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_none(ba, 3);
+
+ ba_alloc(ba, 100, &b[2]);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 3, 4 * BSIZE, 100);
+ ba_check_none(ba, 4);
+
+ ba_alloc(ba, 100, &b[3]);
+ ba_alloc(ba, 100, &b[4]);
+ ba_alloc(ba, 100, &b[5]);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 3, 4 * BSIZE, 100);
+ ba_check_l(ba, 4, 5 * BSIZE, 100);
+ ba_check_l(ba, 5, 6 * BSIZE, 100);
+ ba_check_l(ba, 6, 7 * BSIZE, 100);
+ ba_check_none(ba, 7);
+
+ ba_free(ba, 4 * BSIZE, 100);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 3, 5 * BSIZE, 100);
+ ba_check_l(ba, 4, 6 * BSIZE, 100);
+ ba_check_l(ba, 5, 7 * BSIZE, 100);
+ ba_check_none(ba, 6);
+
+ uint64_t b2;
+ ba_alloc(ba, 100, &b2);
+ invariant(b2 == 4 * BSIZE);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 3, 4 * BSIZE, 100);
+ ba_check_l(ba, 4, 5 * BSIZE, 100);
+ ba_check_l(ba, 5, 6 * BSIZE, 100);
+ ba_check_l(ba, 6, 7 * BSIZE, 100);
+ ba_check_none(ba, 7);
+
+ ba_free(ba, BSIZE, 100);
+ ba_free(ba, 5 * BSIZE, 100);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 2, 4 * BSIZE, 100);
+ ba_check_l(ba, 3, 6 * BSIZE, 100);
+ ba_check_l(ba, 4, 7 * BSIZE, 100);
+ ba_check_none(ba, 5);
+
+ // This alloc will allocate the first block after the reserve space in the
+ // case of first fit.
+ uint64_t b3;
+ ba_alloc(ba, 100, &b3);
+ invariant(b3 == BSIZE); // First fit.
+ // if (b3==5*BSIZE) then it is next fit.
+
+ // Now 5*BSIZE is free
+ uint64_t b5;
+ ba_alloc(ba, 100, &b5);
+ invariant(b5 == 5 * BSIZE);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 3, 4 * BSIZE, 100);
+ ba_check_l(ba, 4, 5 * BSIZE, 100);
+ ba_check_l(ba, 5, 6 * BSIZE, 100);
+ ba_check_l(ba, 6, 7 * BSIZE, 100);
+ ba_check_none(ba, 7);
+
+ // Now all blocks are busy
+ uint64_t b6, b7, b8;
+ ba_alloc(ba, 100, &b6);
+ ba_alloc(ba, 100, &b7);
+ ba_alloc(ba, 100, &b8);
+ invariant(b6 == 8 * BSIZE);
+ invariant(b7 == 9 * BSIZE);
+ invariant(b8 == 10 * BSIZE);
+ ba_check_l(ba, 0, 0, 100);
+ ba_check_l(ba, 1, BSIZE, 100);
+ ba_check_l(ba, 2, 2 * BSIZE, BSIZE + 100);
+ ba_check_l(ba, 3, 4 * BSIZE, 100);
+ ba_check_l(ba, 4, 5 * BSIZE, 100);
+ ba_check_l(ba, 5, 6 * BSIZE, 100);
+ ba_check_l(ba, 6, 7 * BSIZE, 100);
+ ba_check_l(ba, 7, 8 * BSIZE, 100);
+ ba_check_l(ba, 8, 9 * BSIZE, 100);
+ ba_check_l(ba, 9, 10 * BSIZE, 100);
+ ba_check_none(ba, 10);
+
+ ba_free(ba, 9 * BSIZE, 100);
+ ba_free(ba, 7 * BSIZE, 100);
+ uint64_t b9;
+ ba_alloc(ba, 100, &b9);
+ invariant(b9 == 7 * BSIZE);
+
+ ba_free(ba, 5 * BSIZE, 100);
+ ba_free(ba, 2 * BSIZE, BSIZE + 100);
+ uint64_t b10, b11;
+ ba_alloc(ba, 100, &b10);
+ invariant(b10 == 2 * BSIZE);
+ ba_alloc(ba, 100, &b11);
+ invariant(b11 == 3 * BSIZE);
+ ba_alloc(ba, 100, &b11);
+ invariant(b11 == 5 * BSIZE);
+
+ ba->Destroy();
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ test_ba0();
+ test_ba1(0);
+ test_ba1(10);
+ test_ba1(20);
+ test_ba2();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/bnc-insert-benchmark.cc b/storage/tokudb/PerconaFT/ft/tests/bnc-insert-benchmark.cc
new file mode 100644
index 00000000..04b3743d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/bnc-insert-benchmark.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include "test.h"
+
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+const double USECS_PER_SEC = 1000000.0;
+
+static int
+long_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
+{
+ const long *CAST_FROM_VOIDP(x, a->data);
+ const long *CAST_FROM_VOIDP(y, b->data);
+ return (*x > *y) - (*x < *y);
+}
+
+static void
+run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat)
+{
+ int cur = 0;
+ const int n = 1024;
+ long keys[n];
+ char *vals[n];
+ for (int i = 0; i < n; ++i) {
+ keys[i] = rand();
+ XMALLOC_N(eltsize - (sizeof keys[i]), vals[i]);
+ unsigned int j = 0;
+ char *val = vals[i];
+ for (; j < eltsize - (sizeof keys[i]) - sizeof(int); j += sizeof(int)) {
+ int *p = cast_to_typeof(p) &val[j];
+ *p = rand();
+ }
+ for (; j < eltsize - (sizeof keys[i]); ++j) {
+ char *p = &val[j];
+ *p = (rand() & 0xff);
+ }
+ }
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123;
+ int r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+
+ NONLEAF_CHILDINFO bnc;
+ long long unsigned nbytesinserted = 0;
+ struct timeval t[2];
+ gettimeofday(&t[0], NULL);
+
+ toku::comparator cmp;
+ cmp.create(long_key_cmp, nullptr);
+
+ for (unsigned int i = 0; i < repeat; ++i) {
+ bnc = toku_create_empty_nl();
+ for (; toku_bnc_nbytesinbuf(bnc) <= nodesize; ++cur) {
+ toku_bnc_insert_msg(bnc,
+ &keys[cur % n], sizeof keys[cur % n],
+ vals[cur % n], eltsize - (sizeof keys[cur % n]),
+ FT_NONE, next_dummymsn(), xids_123, true,
+ cmp); assert_zero(r);
+ }
+ nbytesinserted += toku_bnc_nbytesinbuf(bnc);
+ destroy_nonleaf_childinfo(bnc);
+ }
+
+ for (int i = 0; i < n; ++i) {
+ toku_free(vals[i]);
+ vals[i] = nullptr;
+ }
+
+ toku_xids_destroy(&xids_123);
+
+ gettimeofday(&t[1], NULL);
+ double dt;
+ dt = (t[1].tv_sec - t[0].tv_sec) + ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC);
+ double mbrate = ((double) nbytesinserted / (1 << 20)) / dt;
+ long long unsigned eltrate = (long) (cur / dt);
+ printf("%0.03lf MB/sec\n", mbrate);
+ printf("%llu elts/sec\n", eltrate);
+
+ cmp.destroy();
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ unsigned long eltsize, nodesize, repeat;
+
+ initialize_dummymsn();
+ if (argc != 4) {
+ fprintf(stderr, "Usage: %s <eltsize> <nodesize> <repeat>\n", argv[0]);
+ return 2;
+ }
+ eltsize = strtoul(argv[1], NULL, 0);
+ nodesize = strtoul(argv[2], NULL, 0);
+ repeat = strtoul(argv[3], NULL, 0);
+
+ run_test(eltsize, nodesize, repeat);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-4357.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-4357.cc
new file mode 100644
index 00000000..dd76b7fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-4357.cc
@@ -0,0 +1,107 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+CACHEFILE f1;
+
+static void *pin_nonblocking(void *arg) {
+ void* v1;
+ int r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1, make_blocknum(1)),
+ &v1,
+ def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ NULL
+ );
+ assert(r==TOKUDB_TRY_AGAIN);
+ return arg;
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1, make_blocknum(1)),
+ &v1,
+ def_write_callback(NULL),
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ toku_pthread_t pin_nonblocking_tid;
+ r = toku_pthread_create(toku_uninstrumented,
+ &pin_nonblocking_tid,
+ nullptr,
+ pin_nonblocking,
+ nullptr);
+ assert_zero(r);
+ // sleep 3 seconds
+ usleep(3 * 1024 * 1024);
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), NULL, NULL);
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(pin_nonblocking_tid, &ret);
+ assert_zero(r);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-4365.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-4365.cc
new file mode 100644
index 00000000..75b6eb3f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-4365.cc
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+CACHEFILE f1;
+
+static void *pin_nonblocking(void *arg) {
+ void* v1;
+ int r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1, make_blocknum(1)),
+ &v1,
+ def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ NULL
+ );
+ assert(r==TOKUDB_TRY_AGAIN);
+ return arg;
+}
+
+static void *put_same_key(void *arg) {
+ toku_cachetable_put(
+ f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1,make_blocknum(1)),
+ NULL,
+ make_pair_attr(4),
+ def_write_callback(NULL),
+ put_callback_nop
+ );
+ return arg;
+}
+
+toku_pthread_t put_tid;
+
+static void test_remove_key(CACHEKEY *UU(cachekey),
+ bool UU(for_checkpoint),
+ void *UU(extra)) {
+ int r = toku_pthread_create(
+ toku_uninstrumented, &put_tid, nullptr, put_same_key, nullptr);
+ assert_zero(r);
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1, make_blocknum(1)),
+ &v1,
+ def_write_callback(nullptr),
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ nullptr);
+ toku_pthread_t pin_nonblocking_tid;
+ r = toku_pthread_create(toku_uninstrumented,
+ &pin_nonblocking_tid,
+ nullptr,
+ pin_nonblocking,
+ nullptr);
+ assert_zero(r);
+ // sleep 3 seconds
+ usleep(3 * 1024 * 1024);
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), test_remove_key, NULL);
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(pin_nonblocking_tid, &ret);
+ assert_zero(r);
+ r = toku_pthread_join(put_tid, &ret);
+ assert_zero(r);
+
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(2));
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ for (int i = 0; i < 20; i++) {
+ cachetable_test();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-5097.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-5097.cc
new file mode 100644
index 00000000..b9c299eb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-5097.cc
@@ -0,0 +1,192 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+CACHEFILE f1;
+CACHEFILE f2;
+
+bool check_flush;
+bool dirty_flush_called;
+bool check_pe_callback;
+bool pe_callback_called;
+bool enable_partial_eviction;
+
+CACHETABLE ct;
+
+static int
+pe_callback (
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ if (check_pe_callback) {
+ pe_callback_called = true;
+ }
+ usleep(4*1024*1024);
+ finalize(make_pair_attr(1), finalize_extra);
+ return 0;
+}
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ if (check_flush && w) {
+ dirty_flush_called = true;
+ }
+}
+
+static void *f2_pin(void *arg) {
+ int r;
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ //
+ // these booleans for pe_callback just ensure that the
+ // test is working as we expect it to. We expect the get_and_pin to
+ // cause a partial eviction of f1's PAIR, reducing its size from 8 to 1
+ // and we expect that to be enough so that the unpin does not invoke a partial eviction
+ // This is just to ensure that the bug is being exercised
+ //
+ check_pe_callback = true;
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert(r == 0);
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+ if (enable_partial_eviction)
+ assert(pe_callback_called);
+ else
+ assert(!pe_callback_called);
+ pe_callback_called = false;
+ r = toku_test_cachetable_unpin(f2, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ check_pe_callback = false;
+ assert(!pe_callback_called);
+ assert(r == 0);
+
+ return arg;
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ check_flush = false;
+ dirty_flush_called = false;
+
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ evictor_test_helpers::disable_ev_thread(&ct->ev); // disable eviction thread
+
+ toku_set_enable_partial_eviction(ct, enable_partial_eviction);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+ char fname1[TOKU_PATH_MAX + 1];
+ unlink(toku_path_join(fname1, 2, TOKU_TEST_FILENAME, "test1.dat"));
+ char fname2[TOKU_PATH_MAX + 1];
+ unlink(toku_path_join(fname2, 2, TOKU_TEST_FILENAME, "test2.dat"));
+
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(r == 0);
+ r = toku_cachetable_openf(&f2, ct, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.pe_callback = pe_callback;
+ wc.flush_callback = flush;
+ // pin and unpin a node 20 times, just to get clock count up
+ for (int i = 0; i < 20; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert(r == 0);
+ }
+
+ // at this point, we have a dirty PAIR in the cachetable associated with
+ // cachefile f1
+ // launch a thread that will put another PAIR in the cachetable, and get
+ // partial eviction started
+ toku_pthread_t tid;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, f2_pin, nullptr);
+ assert_zero(r);
+
+ usleep(2 * 1024 * 1024);
+ check_flush = true;
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ if (enable_partial_eviction)
+ assert(dirty_flush_called);
+ else
+ assert(!dirty_flush_called);
+ check_flush = false;
+
+ void *ret;
+ r = toku_pthread_join(tid, &ret);
+ assert_zero(r);
+
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f2, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ if (argc!=2) { printf("argcount should be 2.\n"); exit(1); }
+ if (strcmp(argv[1], "enable_pe") == 0) {
+ enable_partial_eviction = true;
+ } else {
+ enable_partial_eviction = false;
+ }
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc
new file mode 100644
index 00000000..183c2c8b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+
+//
+// This test verifies that if a node is pinned by a thread
+// doing get_and_pin_nonblocking while another thread is trying
+// to unpin_and_remove it, that nothing bad happens.
+//
+
+CACHEFILE f1;
+PAIR p1;
+PAIR p2;
+
+
+static int
+fetch_one(CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ assert(k.b == 1);
+ p1 = p;
+ return 0;
+}
+
+static int
+fetch_two (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ assert(k.b == 2);
+ p2 = p;
+ return 0;
+}
+
+toku_pthread_t unpin_and_remove_tid;
+
+static void *unpin_and_remove_one(void *UU(arg)) {
+ int r = toku_cachetable_unpin_and_remove(
+ f1,
+ p1,
+ NULL,
+ NULL
+ );
+ assert_zero(r);
+ return arg;
+}
+
+static void
+unpin_two (void* UU(v)) {
+ int r = toku_cachetable_unpin_ct_prelocked_no_flush(
+ f1,
+ p2,
+ CACHETABLE_DIRTY,
+ make_pair_attr(8)
+ );
+ assert_zero(r);
+
+ // at this point, we have p1 pinned, want to start a thread to do an
+ // unpin_and_remove
+ // on p1
+ r = toku_pthread_create(toku_uninstrumented,
+ &unpin_and_remove_tid,
+ nullptr,
+ unpin_and_remove_one,
+ nullptr);
+ assert_zero(r);
+ // sleep to give a chance for the unpin_and_remove to get going
+ usleep(512*1024);
+}
+
+static void *repin_one(void *UU(arg)) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ struct unlockers unlockers = {true, unpin_two, NULL, NULL};
+ void* v1;
+ int r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ &unlockers
+ );
+ assert(r == TOKUDB_TRY_AGAIN);
+ return arg;
+}
+
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 1000;
+ int r;
+ toku_pair_list_set_lock_size(2); // set two bucket mutexes
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ // bring pairs 1 and 2 into memory, then unpin
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch_one, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, wc, fetch_two, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+
+ toku_pthread_t tid1;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid1, nullptr, repin_one, nullptr);
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(tid1, &ret);
+ assert_zero(r);
+ r = toku_pthread_join(unpin_and_remove_tid, &ret);
+ assert_zero(r);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ // test ought to run bunch of times in hope of hitting bug
+ uint32_t num_test_runs = 1;
+ for (uint32_t i = 0; i < num_test_runs; i++) {
+ if (verbose) {
+ printf("starting test run %" PRIu32 " \n", i);
+ }
+ cachetable_test();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-5978.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-5978.cc
new file mode 100644
index 00000000..c8a6f366
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-5978.cc
@@ -0,0 +1,235 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+
+//
+// This test verifies the behavior that originally caused
+// #5978 is fixed. Here is what we do. We have four pairs with
+// blocknums and fullhashes of 1,2,3,4. The cachetable has only
+// two bucket mutexes, so 1 and 3 share a pair mutex, as do 2 and 4.
+// We pin all four with expensive write locks. Then, on background threads,
+// we call get_and_pin_nonblocking on 3, where the unlockers unpins 2, and
+// we call get_and_pin_nonblocking on 4, where the unlockers unpins 1. Run this
+// enough times, and we should see a deadlock before the fix, and no deadlock
+// after the fix.
+//
+
+CACHEFILE f1;
+PAIR p3;
+PAIR p4;
+
+
+static int
+fetch_three (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ assert(k.b == 3);
+ p3 = p;
+ return 0;
+}
+
+static int
+fetch_four (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ assert(k.b == 4);
+ p4 = p;
+ return 0;
+}
+
+
+
+static void
+unpin_four (void* UU(v)) {
+ int r = toku_cachetable_unpin_ct_prelocked_no_flush(
+ f1,
+ p3,
+ CACHETABLE_DIRTY,
+ make_pair_attr(8)
+ );
+ assert_zero(r);
+}
+
+static void
+unpin_three (void* UU(v)) {
+ int r = toku_cachetable_unpin_ct_prelocked_no_flush(
+ f1,
+ p4,
+ CACHETABLE_DIRTY,
+ make_pair_attr(8)
+ );
+ assert_zero(r);
+}
+
+static void *repin_one(void *UU(arg)) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ struct unlockers unlockers = {true, unpin_four, NULL, NULL};
+ void* v1;
+ int r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ &unlockers
+ );
+ assert(r == TOKUDB_TRY_AGAIN);
+ return arg;
+}
+
+
+static void *repin_two(void *UU(arg)) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ struct unlockers unlockers = {true, unpin_three, NULL, NULL};
+ void* v1;
+ int r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(2),
+ 2,
+ &v1,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ &unlockers
+ );
+ assert(r == TOKUDB_TRY_AGAIN);
+ return arg;
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 1000;
+ int r;
+ toku_pair_list_set_lock_size(2); // set two bucket mutexes
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ // bring pairs 1 and 2 into memory, then unpin
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+
+
+ // now pin pairs 3 and 4
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v1, wc, fetch_three, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v1, wc, fetch_four, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+
+ toku_pthread_t tid1;
+ toku_pthread_t tid2;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid1, nullptr, repin_one, nullptr);
+ assert_zero(r);
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid2, nullptr, repin_two, nullptr);
+ assert_zero(r);
+
+ // unpin 1 and 2 so tid1 and tid2 can make progress
+ usleep(512*1024);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+
+
+ void *ret;
+ r = toku_pthread_join(tid1, &ret);
+ assert_zero(r);
+ r = toku_pthread_join(tid2, &ret);
+ assert_zero(r);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ // test ought to run bunch of times in hope of hitting bug
+ uint32_t num_test_runs = 30;
+ for (uint32_t i = 0; i < num_test_runs; i++) {
+ if (verbose) {
+ printf("starting test run %" PRIu32 " \n", i);
+ }
+ cachetable_test();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-all-write.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-all-write.cc
new file mode 100644
index 00000000..efc844d5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-all-write.cc
@@ -0,0 +1,96 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d write_me %d\n", (int)k.b, w); }
+ if (w) {
+ usleep (5*1024*1024);
+ }
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ void* v2;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+
+
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
+ //r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, 8);
+
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pending.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pending.cc
new file mode 100644
index 00000000..024e2f5d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pending.cc
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Make sure that the pending stuff gets checkpointed, but subsequent changes don't, even with concurrent updates.
+#include "test.h"
+#include <stdio.h>
+#include <unistd.h>
+#include "cachetable-test.h"
+#include "cachetable/checkpoint.h"
+#include <portability/toku_atomic.h>
+
+static int N; // how many items in the table
+static CACHEFILE cf;
+static CACHETABLE ct;
+int *values;
+
+static const int item_size = sizeof(int);
+
+static volatile int n_flush, n_write_me, n_keep_me, n_fetch;
+
+static void
+sleep_random (void)
+{
+ toku_timespec_t req = {.tv_sec = 0,
+ .tv_nsec = random()%1000000}; //Max just under 1ms
+ nanosleep(&req, NULL);
+}
+
+int expect_value = 42; // initially 42, later 43
+
+static void
+flush (
+ CACHEFILE UU(thiscf),
+ int UU(fd),
+ CACHEKEY UU(key),
+ void *value,
+ void** UU(dd),
+ void *UU(extraargs),
+ PAIR_ATTR size,
+ PAIR_ATTR* UU(new_size),
+ bool write_me,
+ bool keep_me,
+ bool UU(for_checkpoint),
+ bool UU(is_clone)
+ )
+{
+ // printf("f");
+ assert(size.size== item_size);
+ int *CAST_FROM_VOIDP(v, value);
+ if (*v!=expect_value) printf("got %d expect %d\n", *v, expect_value);
+ assert(*v==expect_value);
+ (void)toku_sync_fetch_and_add(&n_flush, 1);
+ if (write_me) (void)toku_sync_fetch_and_add(&n_write_me, 1);
+ if (keep_me) (void)toku_sync_fetch_and_add(&n_keep_me, 1);
+ sleep_random();
+}
+
+static void*
+do_update (void *UU(ignore))
+{
+ while (n_flush==0); // wait until the first checkpoint ran
+ int i;
+ for (i=0; i<N; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t hi = toku_cachetable_hash(cf, key);
+ void *vv;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, wc, fetch_die, def_pf_req_callback, def_pf_callback, true, 0);
+ //printf("g");
+ assert(r==0);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(cf, key, hi, &attr);
+ assert(r==0);
+ assert(attr.size==sizeof(int));
+ int *CAST_FROM_VOIDP(v, vv);
+ assert(*v==42);
+ *v = 43;
+ //printf("[%d]43\n", i);
+ r = toku_test_cachetable_unpin(cf, key, hi, CACHETABLE_DIRTY, make_pair_attr(item_size));
+ sleep_random();
+ }
+ return 0;
+}
+
+static void*
+do_checkpoint (void *UU(v))
+{
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ int r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ return 0;
+}
+
+// put n items into the cachetable, mark them dirty, and then concurently
+// do a checkpoint (in which the callback functions are slow)
+// replace the n items with new values
+// make sure that the stuff that was checkpointed includes only the old versions
+// then do a flush and make sure the new items are written
+
+static void checkpoint_pending(void) {
+ if (verbose) { printf("%s:%d n=%d\n", __FUNCTION__, __LINE__, N); fflush(stdout); }
+ const int test_limit = N;
+ int r;
+ toku_cachetable_create(&ct, test_limit*sizeof(int), ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ r = unlink(fname1); if (r!=0) CKERR2(get_error_errno(), ENOENT);
+ r = toku_cachetable_openf(&cf, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(cf);
+
+ // Insert items into the cachetable. All dirty.
+ int i;
+ for (i=0; i<N; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t hi = toku_cachetable_hash(cf, key);
+ values[i] = 42;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ toku_cachetable_put(cf, key, hi, &values[i], make_pair_attr(sizeof(int)), wc, put_callback_nop);
+ assert(r == 0);
+
+ r = toku_test_cachetable_unpin(cf, key, hi, CACHETABLE_DIRTY, make_pair_attr(item_size));
+ assert(r == 0);
+ }
+
+ // the checkpoint should cause n writes, but since n <= the cachetable size,
+ // all items should be kept in the cachetable
+ n_flush = n_write_me = n_keep_me = n_fetch = 0;
+ expect_value = 42;
+ // printf("E42\n");
+ toku_pthread_t checkpoint_thread, update_thread;
+ r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_thread,
+ nullptr,
+ do_checkpoint,
+ nullptr);
+ assert(r == 0);
+ r = toku_pthread_create(
+ toku_uninstrumented, &update_thread, nullptr, do_update, nullptr);
+ assert(r == 0);
+ r = toku_pthread_join(checkpoint_thread, 0);
+ assert(r == 0);
+ r = toku_pthread_join(update_thread, 0);
+ assert(r == 0);
+
+ assert(n_flush == N && n_write_me == N && n_keep_me == N);
+
+ // after the checkpoint, all of the items should be 43
+ //printf("E43\n");
+ n_flush = n_write_me = n_keep_me = n_fetch = 0; expect_value = 43;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ assert(n_flush == N && n_write_me == N && n_keep_me == N);
+
+ // a subsequent checkpoint should cause no flushes, or writes since all of the items are clean
+ n_flush = n_write_me = n_keep_me = n_fetch = 0;
+
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
+
+ toku_cachefile_close(&cf, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ {
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ srandom(tv.tv_sec * 1000000 + tv.tv_usec);
+ }
+ {
+ int i;
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ }
+ }
+ for (N=1; N<=128; N*=2) {
+ int myvalues[N];
+ values = myvalues;
+ checkpoint_pending();
+ //printf("\n");
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pinned-nodes.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pinned-nodes.cc
new file mode 100644
index 00000000..0846974d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-pinned-nodes.cc
@@ -0,0 +1,155 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+uint64_t clean_val = 0;
+uint64_t dirty_val = 0;
+
+bool check_me;
+bool flush_called;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ // if the checkpoint is pending, assert that it is of what we made dirty
+ if (check_me) {
+ flush_called = true;
+ assert(c);
+ assert(e == &dirty_val);
+ assert(v == &dirty_val);
+ assert(keep);
+ assert(w);
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ if (extraargs) {
+ *value = &dirty_val;
+ }
+ else {
+ *value = &clean_val;
+ }
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 20;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ void* v2;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &dirty_val);
+ wc.write_extraargs = NULL;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+
+ //
+ // Here is the test, we have two pairs, v1 is dirty, v2 is clean, but both are currently pinned
+ // Then we will begin a checkpoint, which should theoretically mark both as pending, but
+ // flush will be called only for v1, because v1 is dirty
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+
+
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ check_me = true;
+ flush_called = false;
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+ assert(flush_called);
+ check_me = false;
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-prefetched-nodes.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-prefetched-nodes.cc
new file mode 100644
index 00000000..6155d237
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-prefetched-nodes.cc
@@ -0,0 +1,158 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+uint64_t clean_val = 0;
+uint64_t dirty_val = 0;
+
+bool check_me;
+bool flush_called;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ // if the checkpoint is pending, assert that it is of what we made dirty
+ if (check_me) {
+ flush_called = true;
+ assert(c);
+ assert(e == &dirty_val);
+ assert(v == &dirty_val);
+ assert(keep);
+ assert(w);
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ if (extraargs) {
+ *value = &dirty_val;
+ *dirtyp = true;
+ }
+ else {
+ *value = &clean_val;
+ *dirtyp = false;
+ }
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 20;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ bool doing_prefetch = false;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val);
+ wc.flush_callback = flush;
+ r = toku_cachefile_prefetch(f1, make_blocknum(1), 1, wc, fetch, def_pf_req_callback, def_pf_callback, &dirty_val, &doing_prefetch);
+ assert(doing_prefetch);
+ doing_prefetch = false;
+ wc.write_extraargs = NULL;
+ r = toku_cachefile_prefetch(f1, make_blocknum(2), 2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL, &doing_prefetch);
+ assert(doing_prefetch);
+
+ //
+ // Here is the test, we have two pairs, v1 is dirty, v2 is clean, but both are currently pinned
+ // Then we will begin a checkpoint, which should theoretically mark both as pending, but
+ // flush will be called only for v1, because v1 is dirty
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+
+
+ check_me = true;
+ flush_called = false;
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+ assert(flush_called);
+ check_me = false;
+
+
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-test.cc
new file mode 100644
index 00000000..b848b8cc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpoint-test.cc
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+#include <stdio.h>
+#include <unistd.h>
+
+
+#include "cachetable/checkpoint.h"
+
+static const int item_size = 1;
+
+static int n_flush, n_write_me, n_keep_me, n_fetch;
+
+static void flush(
+ CACHEFILE UU(cf),
+ int UU(fd),
+ CACHEKEY UU(key),
+ void *UU(value),
+ void** UU(dd),
+ void *UU(extraargs),
+ PAIR_ATTR size,
+ PAIR_ATTR* UU(new_size),
+ bool write_me,
+ bool keep_me,
+ bool UU(for_checkpoint),
+ bool UU(is_clone)
+ )
+{
+ //cf = cf; key = key; value = value; extraargs = extraargs;
+ // assert(key == make_blocknum((long)value));
+ assert(size.size == item_size);
+ n_flush++;
+ if (write_me) n_write_me++;
+ if (keep_me) n_keep_me++;
+}
+
+static int callback_was_called = 0;
+static int callback2_was_called = 0;
+
+static void checkpoint_callback(void * extra) {
+ int * x = (int*) extra;
+ (*x)++;
+ if (verbose) printf("checkpoint_callback called %d (should be 1-16)\n", *x);
+}
+
+static void checkpoint_callback2(void * extra) {
+ int * x = (int*) extra;
+ (*x)++;
+ if (verbose) printf("checkpoint_callback2 called %d (should be 1-16)\n", *x);
+}
+
+// put n items into the cachetable, maybe mark them dirty, do a checkpoint, and
+// verify that all of the items have been written and are clean.
+
+static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
+ if (verbose) printf("%s:%d n=%d dirty=%d\n", __FUNCTION__, __LINE__, n, (int) dirty);
+ const int test_limit = n;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ // insert items into the cachetable. all should be dirty
+ int i;
+ for (i=0; i<n; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t hi = toku_cachetable_hash(f1, key);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ toku_cachetable_put(f1, key, hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+
+ r = toku_test_cachetable_unpin(f1, key, hi, dirty, make_pair_attr(item_size));
+ assert(r == 0);
+
+ void *v;
+ int its_dirty;
+ long long its_pin;
+ long its_size;
+ r = toku_cachetable_get_key_state(ct, key, f1, &v, &its_dirty, &its_pin, &its_size);
+ if (r != 0)
+ continue;
+ assert(its_dirty == CACHETABLE_DIRTY);
+ assert(its_pin == 0);
+ assert(its_size == item_size);
+ }
+
+ // the checkpoint should cause n writes, but since n <= the cachetable size,
+ // all items should be kept in the cachetable
+ n_flush = n_write_me = n_keep_me = n_fetch = 0;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, checkpoint_callback, &callback_was_called, checkpoint_callback2, &callback2_was_called, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ assert(callback_was_called != 0);
+ assert(callback2_was_called != 0);
+ assert(n_flush == n && n_write_me == n && n_keep_me == n);
+
+ // after the checkpoint, all of the items should be clean
+ for (i=0; i<n; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t hi = toku_cachetable_hash(f1, key);
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, key, hi, PL_WRITE_EXPENSIVE, &v);
+ if (r != 0)
+ continue;
+ r = toku_test_cachetable_unpin(f1, key, hi, CACHETABLE_CLEAN, make_pair_attr(item_size));
+ assert(r == 0);
+
+ int its_dirty;
+ long long its_pin;
+ long its_size;
+ r = toku_cachetable_get_key_state(ct, key, f1, &v, &its_dirty, &its_pin, &its_size);
+ if (r != 0)
+ continue;
+ assert(its_dirty == CACHETABLE_CLEAN);
+ assert(its_pin == 0);
+ assert(its_size == item_size);
+ }
+
+ // a subsequent checkpoint should cause no flushes, or writes since all of the items are clean
+ n_flush = n_write_me = n_keep_me = n_fetch = 0;
+
+
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ int i;
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ }
+ for (i=0; i<8; i++) {
+ cachetable_checkpoint_test(i, CACHETABLE_CLEAN);
+ cachetable_checkpoint_test(i, CACHETABLE_DIRTY);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpointer-class.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpointer-class.cc
new file mode 100644
index 00000000..c3838a60
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-checkpointer-class.cc
@@ -0,0 +1,368 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable/cachetable-internal.h"
+#include "cachetable-test.h"
+
+//
+// Wrapper for the checkpointer and necessary
+// data to run the tests.
+//
+struct checkpointer_test {
+ checkpointer m_cp;
+ pair_list m_pl;
+
+ // Tests
+ void test_begin_checkpoint();
+ void test_pending_bits();
+ void test_end_checkpoint();
+
+ // Test Helper
+ void add_pairs(struct cachefile *cf,
+ ctpair pairs[],
+ uint32_t count,
+ uint32_t k);
+};
+
+static void init_cachefile(CACHEFILE cf, int which_cf, bool for_checkpoint) {
+ memset(cf, 0, sizeof(*cf));
+ create_dummy_functions(cf);
+ cf->fileid = { 0, (unsigned) which_cf };
+ cf->filenum = { (unsigned) which_cf };
+ cf->for_checkpoint = for_checkpoint;
+}
+
+//------------------------------------------------------------------------------
+// test_begin_checkpoint() -
+//
+// Description:
+//
+void checkpointer_test::test_begin_checkpoint() {
+ cachefile_list cfl;
+ ZERO_STRUCT(cfl);
+ cfl.init();
+
+ cachetable ctbl;
+ ZERO_STRUCT(ctbl);
+ ctbl.list.init();
+
+ ZERO_STRUCT(m_cp);
+ m_cp.init(&ctbl.list, NULL, &ctbl.ev, &cfl);
+
+ // 1. Call checkpoint with NO cachefiles.
+ m_cp.begin_checkpoint();
+
+ // 2. Call checkpoint with ONE cachefile.
+ //cachefile cf;
+ struct cachefile cf;
+ init_cachefile(&cf, 0, false);
+ m_cp.m_cf_list->add_cf_unlocked(&cf);
+
+ m_cp.begin_checkpoint();
+ assert(m_cp.m_checkpoint_num_files == 1);
+ assert(cf.for_checkpoint == true);
+ m_cp.m_cf_list->remove_cf(&cf);
+
+ // 3. Call checkpoint with MANY cachefiles.
+ const uint32_t count = 3;
+ struct cachefile cfs[count];
+ for (uint32_t i = 0; i < count; ++i) {
+ init_cachefile(&cfs[i], i, false);
+ create_dummy_functions(&cfs[i]);
+ m_cp.m_cf_list->add_cf_unlocked(&cfs[i]);
+ }
+
+ m_cp.begin_checkpoint();
+ assert(m_cp.m_checkpoint_num_files == count);
+ for (uint32_t i = 0; i < count; ++i) {
+ assert(cfs[i].for_checkpoint == true);
+ cfl.remove_cf(&cfs[i]);
+ }
+ ctbl.list.destroy();
+ m_cp.destroy();
+ cfl.destroy();
+}
+
+//------------------------------------------------------------------------------
+// test_pending_bits() -
+//
+// Description:
+//
+void checkpointer_test::test_pending_bits() {
+ cachefile_list cfl;
+ ZERO_STRUCT(cfl);
+ cfl.init();
+
+ cachetable ctbl;
+ ZERO_STRUCT(ctbl);
+ ctbl.list.init();
+
+ ZERO_STRUCT(m_cp);
+ m_cp.init(&ctbl.list, NULL, &ctbl.ev, &cfl);
+
+ //
+ // 1. Empty hash chain.
+ //
+ m_cp.turn_on_pending_bits();
+
+ //
+ // 2. One entry in pair chain
+ //
+ struct cachefile cf;
+ cf.cachetable = &ctbl;
+ init_cachefile(&cf, 0, true);
+ m_cp.m_cf_list->add_cf_unlocked(&cf);
+ create_dummy_functions(&cf);
+
+ CACHEKEY k;
+ k.b = 0;
+ uint32_t hash = toku_cachetable_hash(&cf, k);
+
+ ctpair p;
+ CACHETABLE_WRITE_CALLBACK cb;
+
+ pair_attr_s attr;
+ attr.size = 0;
+ attr.nonleaf_size = 0;
+ attr.leaf_size = 0;
+ attr.rollback_size = 0;
+ attr.cache_pressure_size = 0;
+ attr.is_valid = true;
+
+ ZERO_STRUCT(p);
+ pair_init(&p,
+ &cf,
+ k,
+ NULL,
+ attr,
+ CACHETABLE_CLEAN,
+ hash,
+ cb,
+ NULL,
+ &ctbl.list);
+
+ m_cp.m_list->put(&p);
+
+ m_cp.turn_on_pending_bits();
+ assert(p.checkpoint_pending);
+ m_cp.m_list->evict_completely(&p);
+
+ //
+ // 3. Many hash chain entries.
+ //
+ const uint32_t count = 3;
+ ctpair pairs[count];
+ ZERO_ARRAY(pairs);
+ add_pairs(&cf, pairs, count, 0);
+
+ m_cp.turn_on_pending_bits();
+
+ for (uint32_t i = 0; i < count; ++i) {
+ assert(pairs[i].checkpoint_pending);
+ }
+ for (uint32_t i = 0; i < count; ++i) {
+ CACHEKEY key;
+ key.b = i;
+ uint32_t full_hash = toku_cachetable_hash(&cf, key);
+ PAIR pp = m_cp.m_list->find_pair(&cf, key, full_hash);
+ assert(pp);
+ m_cp.m_list->evict_completely(pp);
+ }
+
+ ctbl.list.destroy();
+ m_cp.destroy();
+ cfl.remove_cf(&cf);
+ cfl.destroy();
+}
+
+//------------------------------------------------------------------------------
+// add_pairs() -
+//
+// Description: Adds data (pairs) to the list referenced in the checkpoitner.
+//
+void checkpointer_test::add_pairs(struct cachefile *cf,
+ ctpair pairs[],
+ uint32_t count,
+ uint32_t k)
+{
+ pair_attr_s attr;
+ attr.size = 0;
+ attr.nonleaf_size = 0;
+ attr.leaf_size = 0;
+ attr.rollback_size = 0;
+ attr.cache_pressure_size = 0;
+ attr.is_valid = true;
+ CACHETABLE_WRITE_CALLBACK cb;
+ ZERO_STRUCT(cb); // All nullptr
+
+ for (uint32_t i = k; i < count + k; ++i) {
+ CACHEKEY key;
+ key.b = i;
+ uint32_t full_hash = toku_cachetable_hash(cf, key);
+ pair_init(&(pairs[i]),
+ cf,
+ key,
+ nullptr,
+ attr,
+ CACHETABLE_CLEAN,
+ full_hash,
+ cb,
+ nullptr,
+ m_cp.m_list);
+
+ m_cp.m_list->put(&pairs[i]);
+ }
+}
+
+//------------------------------------------------------------------------------
+// get_number_pending_pairs() -
+//
+// Description: Helper function that iterates over pending list, and returns
+// the number of pairs discovered.
+//
+static uint32_t get_number_pending_pairs(pair_list *list)
+{
+ PAIR p;
+ uint32_t count = 0;
+ PAIR head = list->m_pending_head;
+ while((p = list->m_pending_head) != 0)
+ {
+ list->m_pending_head = list->m_pending_head->pending_next;
+ count++;
+ }
+
+ list->m_pending_head = head;
+ return count;
+}
+
+//------------------------------------------------------------------------------
+// test_end_checkpoint() -
+//
+// Description: Adds pairs to the list, before and after a checkpoint.
+//
+void checkpointer_test::test_end_checkpoint() {
+ // 1. Init test.
+ cachetable ctbl;
+ ZERO_STRUCT(ctbl);
+ ctbl.list.init();
+
+ cachefile_list cfl;
+ ZERO_STRUCT(cfl);
+ cfl.init();
+
+ struct cachefile cf;
+ init_cachefile(&cf, 0, true);
+
+ ZERO_STRUCT(m_cp);
+ m_cp.init(&ctbl.list, NULL, &ctbl.ev, &cfl);
+ m_cp.m_cf_list->add_cf_unlocked(&cf);
+
+ // 2. Add data before running checkpoint.
+ const uint32_t count = 6;
+ ctpair pairs[count];
+ ZERO_ARRAY(pairs);
+ add_pairs(&cf, pairs, count / 2, 0);
+ assert(m_cp.m_list->m_n_in_table == count / 2);
+
+ // 3. Call begin checkpoint.
+ m_cp.begin_checkpoint();
+ assert(m_cp.m_checkpoint_num_files == 1);
+ for (uint32_t i = 0; i < count / 2; ++i)
+ {
+ assert(pairs[i].checkpoint_pending);
+ }
+
+ // 4. Add new data between starting and stopping checkpoint.
+ add_pairs(&cf, pairs, count / 2, count / 2);
+ assert(m_cp.m_list->m_n_in_table == count);
+ for (uint32_t i = count / 2; i < count / 2; ++i)
+ {
+ assert(!pairs[i].checkpoint_pending);
+ }
+
+ uint32_t pending_pairs = 0;
+ pending_pairs = get_number_pending_pairs(m_cp.m_list);
+ assert(pending_pairs == count / 2);
+
+ // 5. Call end checkpoint
+ m_cp.end_checkpoint(NULL, NULL);
+
+ pending_pairs = get_number_pending_pairs(m_cp.m_list);
+ assert(pending_pairs == 0);
+
+ // Verify that none of the pairs are pending a checkpoint.
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ assert(!pairs[i].checkpoint_pending);
+ }
+
+ // 6. Cleanup
+ for (uint32_t i = 0; i < count; ++i) {
+ CACHEKEY key;
+ key.b = i;
+ uint32_t full_hash = toku_cachetable_hash(&cf, key);
+ PAIR pp = m_cp.m_list->find_pair(&cf, key, full_hash);
+ assert(pp);
+ m_cp.m_list->evict_completely(pp);
+ }
+ cfl.remove_cf(&cf);
+ m_cp.destroy();
+ ctbl.list.destroy();
+ cfl.destroy();
+}
+
+
+//------------------------------------------------------------------------------
+// test_main() -
+//
+// Description:
+//
+int
+test_main(int argc, const char *argv[]) {
+ int r = 0;
+ default_parse_args(argc, argv);
+ checkpointer_test cp_test;
+
+ // Run the tests.
+ cp_test.test_begin_checkpoint();
+ cp_test.test_pending_bits();
+ cp_test.test_end_checkpoint();
+
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint.cc
new file mode 100644
index 00000000..5afc1230
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint.cc
@@ -0,0 +1,140 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+CACHEFILE f1;
+
+bool flush_called;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 8;
+ *new_size = attr;
+ if (w) {
+ assert(!flush_called);
+ assert(c);
+ flush_called = true;
+ }
+}
+
+bool cleaner_called;
+
+static int
+cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ void* UU(extraargs)
+ )
+{
+ assert(blocknum.b == 1);
+ assert(fullhash == 1);
+ assert(!cleaner_called);
+ assert(flush_called);
+ cleaner_called = true;
+ int r = toku_test_cachetable_unpin(f1, blocknum, fullhash, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ return 0;
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.cleaner_callback = cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 8;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
+
+ cleaner_called = false;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ toku_cleaner_thread_for_test(ct);
+ assert(cleaner_called);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint2.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint2.cc
new file mode 100644
index 00000000..16e6102a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-checkpoint2.cc
@@ -0,0 +1,140 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+CACHEFILE f1;
+
+bool flush_called;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 0;
+ *new_size = attr;
+ if (w) {
+ assert(!flush_called);
+ assert(c);
+ flush_called = true;
+ }
+}
+
+bool cleaner_called;
+
+static int
+cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ void* UU(extraargs)
+ )
+{
+ assert(blocknum.b == 1);
+ assert(fullhash == 1);
+ assert(!cleaner_called);
+ assert(flush_called);
+ cleaner_called = true;
+ int r = toku_test_cachetable_unpin(f1, blocknum, fullhash, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ return 0;
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.cleaner_callback = cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 8;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
+
+ cleaner_called = false;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ toku_cleaner_thread_for_test(ct);
+ assert(!cleaner_called);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc
new file mode 100644
index 00000000..c3125d0c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc
@@ -0,0 +1,169 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//
+// This test verifies that the cleaner thread doesn't call the callback if
+// nothing needs flushing.
+//
+
+toku_mutex_t attr_mutex;
+
+// used to access engine status variables
+#define STATUS_VALUE(x) ct_test_status.status[CACHETABLE_STATUS_S::x].value.num
+
+const PAIR_ATTR attrs[] = {
+ { .size = 20, .nonleaf_size = 13, .leaf_size = 900, .rollback_size = 123, .cache_pressure_size = 403, .is_valid = true },
+ { .size = 21, .nonleaf_size = 16, .leaf_size = 910, .rollback_size = 113, .cache_pressure_size = 401, .is_valid = true },
+ { .size = 22, .nonleaf_size = 17, .leaf_size = 940, .rollback_size = 133, .cache_pressure_size = 402, .is_valid = true },
+ { .size = 23, .nonleaf_size = 18, .leaf_size = 931, .rollback_size = 153, .cache_pressure_size = 404, .is_valid = true },
+ { .size = 25, .nonleaf_size = 19, .leaf_size = 903, .rollback_size = 173, .cache_pressure_size = 413, .is_valid = true },
+ { .size = 26, .nonleaf_size = 10, .leaf_size = 903, .rollback_size = 193, .cache_pressure_size = 423, .is_valid = true },
+ { .size = 20, .nonleaf_size = 11, .leaf_size = 902, .rollback_size = 103, .cache_pressure_size = 433, .is_valid = true },
+ { .size = 29, .nonleaf_size = 12, .leaf_size = 909, .rollback_size = 113, .cache_pressure_size = 443, .is_valid = true }
+};
+const int n_pairs = (sizeof attrs) / (sizeof attrs[0]);
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ PAIR_ATTR *CAST_FROM_VOIDP(expect, e);
+ if (!keep) {
+ toku_mutex_lock(&attr_mutex); // purpose is to make this function single-threaded
+ expect->size -= s.size;
+ expect->nonleaf_size -= s.nonleaf_size;
+ expect->leaf_size -= s.leaf_size;
+ expect->rollback_size -= s.rollback_size;
+ expect->cache_pressure_size -= s.cache_pressure_size;
+ toku_mutex_unlock(&attr_mutex);
+ }
+}
+
+static void
+run_test (void) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_mutex_init(toku_uninstrumented, &attr_mutex, nullptr);
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ CACHETABLE_STATUS_S ct_test_status;
+ toku_cachetable_get_status(ct, &ct_test_status);
+ assert(STATUS_VALUE(CT_SIZE_NONLEAF) == 0);
+ assert(STATUS_VALUE(CT_SIZE_LEAF) == 0);
+ assert(STATUS_VALUE(CT_SIZE_ROLLBACK) == 0);
+ assert(STATUS_VALUE(CT_SIZE_CACHEPRESSURE) == 0);
+
+ void* vs[n_pairs];
+ PAIR_ATTR expect = { .size = 0, .nonleaf_size = 0, .leaf_size = 0, .rollback_size = 0, .cache_pressure_size = 0 };
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.write_extraargs = &expect;
+ for (int i = 0; i < n_pairs; ++i) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ &expect);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_DIRTY, attrs[i]);
+ assert_zero(r);
+ expect.size += attrs[i].size;
+ expect.nonleaf_size += attrs[i].nonleaf_size;
+ expect.leaf_size += attrs[i].leaf_size;
+ expect.rollback_size += attrs[i].rollback_size;
+ expect.cache_pressure_size += attrs[i].cache_pressure_size;
+ }
+
+ toku_cachetable_get_status(ct, &ct_test_status);
+ assert(STATUS_VALUE(CT_SIZE_NONLEAF ) == (uint64_t) expect.nonleaf_size);
+ assert(STATUS_VALUE(CT_SIZE_LEAF ) == (uint64_t) expect.leaf_size);
+ assert(STATUS_VALUE(CT_SIZE_ROLLBACK ) == (uint64_t) expect.rollback_size);
+ assert(STATUS_VALUE(CT_SIZE_CACHEPRESSURE) == (uint64_t) expect.cache_pressure_size);
+
+ void *big_v;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, &big_v,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ &expect);
+ toku_test_cachetable_unpin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, CACHETABLE_CLEAN,
+ make_pair_attr(test_limit - expect.size + 20));
+
+ usleep(2*1024*1024);
+
+ toku_cachetable_get_status(ct, &ct_test_status);
+ assert(STATUS_VALUE(CT_SIZE_NONLEAF ) == (uint64_t) expect.nonleaf_size);
+ assert(STATUS_VALUE(CT_SIZE_LEAF ) == (uint64_t) expect.leaf_size);
+ assert(STATUS_VALUE(CT_SIZE_ROLLBACK ) == (uint64_t) expect.rollback_size);
+ assert(STATUS_VALUE(CT_SIZE_CACHEPRESSURE) == (uint64_t) expect.cache_pressure_size);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+#undef STATUS_VALUE
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc
new file mode 100644
index 00000000..21f6f06d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+//
+// simple tests for cleaner thread with an empty cachetable
+//
+
+static void
+cachetable_test (void) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_set_cleaner_period(ct, 1);
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ usleep(4000000);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-everything-pinned.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-everything-pinned.cc
new file mode 100644
index 00000000..e643f739
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-everything-pinned.cc
@@ -0,0 +1,104 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//
+// This test verifies that the cleaner thread doesn't call the callback if
+// everything is pinned.
+//
+
+static int
+everything_pinned_cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM UU(blocknum),
+ uint32_t UU(fullhash),
+ void* UU(extraargs)
+ )
+{
+ assert(false); // everything is pinned so this should never be called
+ return 0;
+}
+
+static void
+run_test (void) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_set_cleaner_period(ct, 1);
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ toku_set_cleaner_period(ct, 1);
+ assert(r==0);
+
+ void* vs[8];
+ for (int i = 0; i < 8; ++i) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.cleaner_callback = everything_pinned_cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ assert_zero(r);
+ }
+
+ usleep(4000000);
+
+ for (int i = 0; i < 8; ++i) {
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc
new file mode 100644
index 00000000..1dd2a8e0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//
+// This test verifies that the cleaner thread doesn't call the callback if
+// nothing needs flushing.
+//
+
+static UU() int
+everything_pinned_cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM UU(blocknum),
+ uint32_t UU(fullhash),
+ void* UU(extraargs)
+ )
+{
+ assert(false); // everything is pinned so this should never be called
+ return 0;
+}
+
+static void
+run_test (void) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_set_cleaner_period(ct, 1);
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* vs[8];
+ for (int i = 0; i < 8; ++i) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.cleaner_callback = everything_pinned_cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ assert_zero(r);
+ // set cachepressure_size to 0
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 0;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_CLEAN, attr);
+ assert_zero(r);
+ }
+
+ usleep(4000000);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-same-fullhash.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-same-fullhash.cc
new file mode 100644
index 00000000..af519d83
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-same-fullhash.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//
+// This test verifies that the cleaner thread doesn't call the callback if
+// nothing needs flushing.
+//
+
+CACHEFILE f1;
+bool my_cleaner_callback_called;
+
+static int
+my_cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ void* UU(extraargs)
+ )
+{
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 0;
+ int r = toku_test_cachetable_unpin(f1, blocknum, fullhash, CACHETABLE_CLEAN, attr);
+ my_cleaner_callback_called = true;
+ return r;
+}
+
+// point of this test is to have two pairs that have the same fullhash,
+// and therefore, the same bucket mutex
+static void
+run_test (void) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ my_cleaner_callback_called = false;
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* vs[5];
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.cleaner_callback = my_cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &vs[0],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 100;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, attr);
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 1, &vs[1],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ attr = make_pair_attr(8);
+ attr.cache_pressure_size = 50;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 1, CACHETABLE_CLEAN, attr);
+
+ toku_cleaner_thread_for_test(ct);
+
+ assert(my_cleaner_callback_called);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-simple.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-simple.cc
new file mode 100644
index 00000000..363b2d30
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-cleaner-thread-simple.cc
@@ -0,0 +1,122 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//
+// This test verifies that the cleaner thread doesn't call the callback if
+// nothing needs flushing.
+//
+
+CACHEFILE f1;
+bool my_cleaner_callback_called;
+
+static int
+my_cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM UU(blocknum),
+ uint32_t UU(fullhash),
+ void* UU(extraargs)
+ )
+{
+ assert(blocknum.b == 100); // everything is pinned so this should never be called
+ assert(fullhash == 100);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 100;
+ int r = toku_test_cachetable_unpin(f1, make_blocknum(100), 100, CACHETABLE_CLEAN, attr);
+ my_cleaner_callback_called = true;
+ return r;
+}
+
+static void
+run_test (void) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_set_cleaner_period(ct, 1);
+ my_cleaner_callback_called = false;
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* vs[5];
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.cleaner_callback = my_cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(100), 100, &vs[4],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 100;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(100), 100, CACHETABLE_CLEAN, attr);
+
+ for (int i = 0; i < 4; ++i) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i+1), i+1, &vs[i],
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+ assert_zero(r);
+ // set cachepressure_size to 0
+ attr = make_pair_attr(8);
+ attr.cache_pressure_size = 0;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_CLEAN, attr);
+ assert_zero(r);
+ }
+
+ usleep(4000000);
+ assert(my_cleaner_callback_called);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-all-pinned.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-all-pinned.cc
new file mode 100644
index 00000000..03af7b21
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-all-pinned.cc
@@ -0,0 +1,79 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static void
+cachetable_test (void) {
+ int num_entries = 100;
+ int test_limit = 6;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ // test that putting something too big in the cachetable works fine
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ toku_cachetable_put(f1, make_blocknum(num_entries+1), num_entries+1, NULL, make_pair_attr(test_limit*2), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(num_entries+1), num_entries+1, CACHETABLE_DIRTY, make_pair_attr(test_limit*2));
+ assert(r==0);
+
+
+ for (int64_t i = 0; i < num_entries; i++) {
+ toku_cachetable_put(f1, make_blocknum(i), i, NULL, make_pair_attr(1), wc, put_callback_nop);
+ assert(toku_cachefile_count_pinned(f1, 0) == (i+1));
+ }
+ for (int64_t i = 0; i < num_entries; i++) {
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, make_pair_attr(1));
+ }
+
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction.cc
new file mode 100644
index 00000000..855a7154
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+int num_entries;
+bool flush_may_occur;
+int expected_flushed_key;
+bool check_flush;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (check_flush && !keep) {
+ if (verbose) { printf("FLUSH: %d write_me %d expected %d\n", (int)k.b, w, expected_flushed_key); }
+ assert(flush_may_occur);
+ assert(!w);
+ assert(expected_flushed_key == (int)k.b);
+ expected_flushed_key--;
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(1);
+ return 0;
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 4;
+ num_entries = 0;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ void* v2;
+ flush_may_occur = false;
+ check_flush = true;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ for (int i = 0; i < 100000; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ for (int i = 0; i < 8; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ for (int i = 0; i < 4; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ for (int i = 0; i < 2; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ flush_may_occur = true;
+ expected_flushed_key = 4;
+ toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(1), wc, put_callback_nop);
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+
+ flush_may_occur = true;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(2));
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+
+ check_flush = false;
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction2.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction2.cc
new file mode 100644
index 00000000..89b1fba5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction2.cc
@@ -0,0 +1,198 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool flush_may_occur;
+long expected_bytes_to_free;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v,
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep,
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(flush_may_occur);
+ if (!keep) {
+ int* CAST_FROM_VOIDP(foo, v);
+ assert(*foo == 3);
+ toku_free(v);
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ int* XMALLOC(foo);
+ *value = foo;
+ *sizep = make_pair_attr(4);
+ *foo = 4;
+ return 0;
+}
+
+static void
+other_flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+}
+
+static int
+pe_callback (
+ void *ftnode_pv,
+ PAIR_ATTR UU(bytes_to_free),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ expected_bytes_to_free--;
+ int* CAST_FROM_VOIDP(foo, ftnode_pv);
+ int blah = *foo;
+ *foo = blah-1;
+ finalize(make_pair_attr(bytes_to_free.size-1), finalize_extra);
+ return 0;
+}
+
+static int
+other_pe_callback (
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ finalize(bytes_to_free, finalize_extra);
+ return 0;
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 16;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ void* v2;
+ flush_may_occur = false;
+ for (int i = 0; i < 100000; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ for (int i = 0; i < 8; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ for (int i = 0; i < 4; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ for (int i = 0; i < 2; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ flush_may_occur = false;
+ expected_bytes_to_free = 4;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = other_flush;
+ wc.pe_callback = other_pe_callback;
+ toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc, put_callback_nop);
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+ flush_may_occur = true;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(4));
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+ assert(expected_bytes_to_free == 0);
+
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction3.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction3.cc
new file mode 100644
index 00000000..a6c8d2fd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction3.cc
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool flush_may_occur;
+long expected_bytes_to_free;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void* UU(v),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep,
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(flush_may_occur);
+ if (!keep) {
+ //int* foo = v;
+ //assert(*foo == 3);
+ toku_free(v);
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ int* XMALLOC(foo);
+ *value = foo;
+ *sizep = make_pair_attr(4);
+ *foo = 4;
+ return 0;
+}
+
+static void
+other_flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+}
+
+static void
+pe_est_callback(
+ void* UU(ftnode_pv),
+ void* UU(dd),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ *bytes_freed_estimate = 1000;
+ *cost = PE_EXPENSIVE;
+}
+
+static int
+pe_callback (
+ void *ftnode_pv,
+ PAIR_ATTR UU(bytes_to_free),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ usleep(1*1024*1024);
+ if (verbose) printf("calling pe_callback\n");
+ expected_bytes_to_free--;
+ int* CAST_FROM_VOIDP(foo, ftnode_pv);
+ int blah = *foo;
+ *foo = blah-1;
+ finalize(make_pair_attr(bytes_to_free.size-1), finalize_extra);
+ return 0;
+}
+
+static int
+other_pe_callback (
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ finalize(bytes_to_free, finalize_extra);
+ return 0;
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 20;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ evictor_test_helpers::set_hysteresis_limits(&ct->ev, test_limit, 100*test_limit);
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ void* v2;
+ flush_may_occur = false;
+ for (int i = 0; i < 100000; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ for (int i = 0; i < 8; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ for (int i = 0; i < 4; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ for (int i = 0; i < 2; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
+ }
+ flush_may_occur = false;
+ expected_bytes_to_free = 4;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = other_flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = other_pe_callback;
+ toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc, put_callback_nop);
+ flush_may_occur = true;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(8));
+ ct->ev.signal_eviction_thread();
+
+ // we are testing that having a wildly different estimate than
+ // what actually gets freed is ok
+ // in the callbacks, we estimate that 1000 bytes gets freed
+ // whereas in reality, only 1 byte will be freed
+ // we measure that only 1 byte gets freed (which leaves cachetable
+ // oversubscrubed)
+ usleep(3*1024*1024);
+ assert(expected_bytes_to_free == 3);
+
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction4.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction4.cc
new file mode 100644
index 00000000..8537a5a0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clock-eviction4.cc
@@ -0,0 +1,179 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+int num_entries;
+bool flush_may_occur;
+int expected_flushed_key;
+bool check_flush;
+
+
+//
+// This test verifies that if partial eviction is expensive and
+// does not estimate number of freed bytes to be greater than 0,
+// then partial eviction is not called, and normal eviction
+// is used. The verification is done ia an assert(false) in
+// pe_callback.
+//
+
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (check_flush && !keep) {
+ if (verbose) { printf("FLUSH: %d write_me %d\n", (int)k.b, w); }
+ assert(flush_may_occur);
+ assert(!w);
+ assert(expected_flushed_key == (int)k.b);
+ expected_flushed_key--;
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(1);
+ return 0;
+}
+
+static void
+pe_est_callback(
+ void* UU(ftnode_pv),
+ void* UU(dd),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ *bytes_freed_estimate = 0;
+ *cost = PE_EXPENSIVE;
+}
+
+static int
+pe_callback (
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ assert(false);
+ finalize(bytes_to_free, finalize_extra);
+ return 0;
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 4;
+ num_entries = 0;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ void* v2;
+ flush_may_occur = false;
+ check_flush = true;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ for (int i = 0; i < 100000; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ for (int i = 0; i < 8; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ for (int i = 0; i < 4; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ for (int i = 0; i < 2; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+ flush_may_occur = true;
+ expected_flushed_key = 4;
+ toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc, put_callback_nop);
+ flush_may_occur = true;
+ expected_flushed_key = 5;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(4));
+
+ check_flush = false;
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-checkpoint.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-checkpoint.cc
new file mode 100644
index 00000000..e9571dfd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-checkpoint.cc
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ *clone_size = 8;
+ new_attr->is_valid = false;
+}
+
+bool clone_flush_started;
+bool clone_flush_completed;
+CACHETABLE ct;
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool is_clone
+ )
+{
+ if (is_clone) {
+ clone_flush_started = true;
+ usleep(4*1024*1024);
+ clone_flush_completed = true;
+ }
+}
+
+static void *run_end_checkpoint(void *arg) {
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ return arg;
+}
+
+//
+// this test verifies that a PAIR that undergoes a checkpoint on the checkpoint thread is still pinnable while being written out
+//
+static void
+cachetable_test (void) {
+ const int test_limit = 200;
+ int r;
+ ct = NULL;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+
+ clone_flush_started = false;
+ clone_flush_completed = false;
+ toku_pthread_t checkpoint_tid;
+ r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_tid,
+ nullptr,
+ run_end_checkpoint,
+ nullptr);
+ assert_zero(r);
+
+ usleep(1 * 1024 * 1024);
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ assert(clone_flush_started && !clone_flush_completed);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+ assert(clone_flush_started && clone_flush_completed);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc
new file mode 100644
index 00000000..42faa8ad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc
@@ -0,0 +1,148 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool flush_completed;
+bool pf_called;
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ *clone_size = 8;
+ new_attr->is_valid = false;
+}
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+ if (is_clone) {
+ usleep(2*1024*1024);
+ flush_completed = true;
+ }
+}
+
+static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ assert(flush_completed);
+ pf_called = true;
+ *sizep = make_pair_attr(9);
+ return 0;
+}
+
+
+// this test verifies that a partial fetch will wait for a cloned pair to complete
+// writing to disk
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.clone_callback = clone_callback;
+ wc.flush_callback = flush;
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+
+ flush_completed = false;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+
+ pf_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ assert(!pf_called);
+ toku_cachetable_pf_pinned_pair(v1, true_pf_callback, NULL, f1, make_blocknum(1), 1);
+ assert(pf_called);
+
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ assert(pf_called);
+
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert_zero(r);
+
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch.cc
new file mode 100644
index 00000000..912cd0df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-partial-fetch.cc
@@ -0,0 +1,149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool flush_completed;
+bool pf_called;
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ *clone_size = 8;
+ new_attr->is_valid = false;
+}
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+ if (is_clone) {
+ usleep(2*1024*1024);
+ flush_completed = true;
+ }
+}
+
+static bool true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ if (pf_called) return false;
+ return true;
+}
+
+static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ assert(flush_completed);
+ pf_called = true;
+ *sizep = make_pair_attr(9);
+ return 0;
+}
+
+
+// this test verifies that a partial fetch will wait for a cloned pair to complete
+// writing to disk
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.clone_callback = clone_callback;
+ wc.flush_callback = flush;
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+
+ flush_completed = false;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+
+ pf_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, true_pf_req_callback, true_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ assert(pf_called);
+
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert_zero(r);
+
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-pin-nonblocking.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-pin-nonblocking.cc
new file mode 100644
index 00000000..81c6cecc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-pin-nonblocking.cc
@@ -0,0 +1,124 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ *clone_size = 8;
+ new_attr->is_valid = false;
+}
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+}
+
+
+// this test verifies that a partial fetch will wait for a cloned pair to complete
+// writing to disk
+static void
+cachetable_test (enum cachetable_dirty dirty, bool cloneable) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.clone_callback = cloneable ? clone_callback : NULL;
+ wc.flush_callback = flush;
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
+
+ // test that having a pin that passes false for may_modify_value does not stall behind checkpoint
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r == 0);
+
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test(CACHETABLE_DIRTY, true);
+ cachetable_test(CACHETABLE_DIRTY, false);
+ cachetable_test(CACHETABLE_CLEAN, true);
+ cachetable_test(CACHETABLE_CLEAN, false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-unpin-remove.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-unpin-remove.cc
new file mode 100644
index 00000000..625718f9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-clone-unpin-remove.cc
@@ -0,0 +1,137 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool flush_completed;
+bool evict_called;
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ *clone_size = 8;
+ new_attr->is_valid = false;
+}
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+ if (is_clone) {
+ usleep(2*1024*1024);
+ flush_completed = true;
+ }
+ else if (!keep && !is_clone) {
+ assert(flush_completed);
+ evict_called = true;
+ }
+}
+
+
+
+// this test verifies that a partial fetch will wait for a cloned pair to complete
+// writing to disk
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.clone_callback = clone_callback;
+ wc.flush_callback = flush;
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+
+ flush_completed = false;
+ evict_called = false;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), NULL, NULL);
+ assert_zero(r);
+
+
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert_zero(r);
+
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-count-pinned-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-count-pinned-test.cc
new file mode 100644
index 00000000..1d023b71
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-count-pinned-test.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static void
+cachetable_count_pinned_test (int n) {
+ const int test_limit = 2*n;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int i;
+ for (i=1; i<=n; i++) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
+
+ assert(r == -1);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+
+ //r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, 1);
+ //assert(r == 0);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+ }
+ for (i=n; i>0; i--) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ if (i-1) assert(toku_cachetable_assert_all_unpinned(ct));
+ assert(toku_cachefile_count_pinned(f1, 0) == i-1);
+ }
+ assert(toku_cachetable_assert_all_unpinned(ct) == 0);
+ assert(toku_cachefile_count_pinned(f1, 1) == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_count_pinned_test(8);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-debug-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-debug-test.cc
new file mode 100644
index 00000000..a66ef9eb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-debug-test.cc
@@ -0,0 +1,96 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+cachetable_debug_test (int n) {
+ const int test_limit = n;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int num_entries, hash_size; long size_current, size_limit;
+ toku_cachetable_get_state(ct, &num_entries, &hash_size, &size_current, &size_limit);
+ assert(num_entries == 0);
+ assert(size_current == 0);
+ assert(size_limit == n);
+ // printf("%d %d %ld %ld\n", num_entries, hash_size, size_current, size_limit);
+
+ int i;
+ for (i=1; i<=n; i++) {
+ const int item_size = 1;
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(item_size), wc, put_callback_nop);
+
+ void *v; int dirty; long long pinned; long pair_size;
+ r = toku_cachetable_get_key_state(ct, make_blocknum(i), f1, &v, &dirty, &pinned, &pair_size);
+ assert(r == 0);
+ assert(v == (void *)(long)i);
+ assert(dirty == CACHETABLE_DIRTY);
+ assert(pinned == 1);
+ assert(pair_size == item_size);
+
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+
+ toku_cachetable_get_state(ct, &num_entries, &hash_size, &size_current, &size_limit);
+ assert(num_entries == i);
+ assert(size_current == i);
+ assert(size_limit == n);
+
+ if (verbose) toku_cachetable_print_state(ct);
+ }
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_debug_test(8);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test.cc
new file mode 100644
index 00000000..787353e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test.cc
@@ -0,0 +1,173 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that closing the cachetable with prefetches in progress works
+
+#include "test.h"
+
+bool check_flush;
+bool expect_full_flush;
+bool expect_pe;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(expect_full_flush);
+ sleep(2);
+}
+
+static int fetch_calls = 0;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ fetch_calls++;
+
+ *value = 0;
+ *sizep = make_pair_attr(8);
+ *dirtyp = 0;
+
+ return 0;
+}
+
+static void
+pe_est_callback(
+ void* UU(ftnode_pv),
+ void* UU(dd),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ *bytes_freed_estimate = 0;
+ *cost = PE_EXPENSIVE;
+}
+
+static void cachetable_eviction_full_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+
+ void* value1;
+ void* value2;
+ //
+ // let's pin a node multiple times
+ // and really bring up its clock count
+ //
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ for (int i = 0; i < 20; i++) {
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &value1,
+ wc,
+ fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_DIRTY, make_pair_attr(1));
+ assert(r == 0);
+ }
+ expect_full_flush = true;
+ // now pin a different, causing an eviction
+ wc.flush_callback = def_flush;
+ wc.pe_est_callback = pe_est_callback;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ make_blocknum(1),
+ 1,
+ &value2,
+ wc,
+ fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ // close with the eviction in progress. the close should block until
+ // all of the reads and writes are complete.
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_eviction_full_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test2.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test2.cc
new file mode 100644
index 00000000..6fb5311c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-close-test2.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that closing the cachetable with prefetches in progress works
+
+#include "test.h"
+
+bool check_flush;
+bool expect_full_flush;
+bool expect_pe;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(expect_full_flush);
+}
+
+static int fetch_calls = 0;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ fetch_calls++;
+
+ *value = 0;
+ *sizep = make_pair_attr(8);
+ *dirtyp = 0;
+
+ return 0;
+}
+
+static void
+pe_est_callback(
+ void* UU(ftnode_pv),
+ void* UU(dd),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ *bytes_freed_estimate = 7;
+ *cost = PE_EXPENSIVE;
+}
+
+static int
+pe_callback (
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ sleep(2);
+ finalize(bytes_to_free, finalize_extra);
+ return 0;
+}
+
+static void cachetable_eviction_full_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+
+ void* value1;
+ void* value2;
+ //
+ // let's pin a node multiple times
+ // and really bring up its clock count
+ //
+ for (int i = 0; i < 20; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &value1,
+ wc,
+ fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert(r == 0);
+ }
+ expect_full_flush = true;
+ // now pin a different, causing an eviction
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ make_blocknum(1),
+ 1,
+ &value2,
+ wc,
+ fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ // close with the eviction in progress. the close should block until
+ // all of the reads and writes are complete.
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_eviction_full_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test.cc
new file mode 100644
index 00000000..61ba1e65
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test.cc
@@ -0,0 +1,162 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* verify that get_and_pin waits while a prefetch block is pending */
+
+#include "test.h"
+
+bool do_sleep;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ if (do_sleep) {
+ sleep(3);
+ }
+}
+
+static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
+ uint64_t t = tend->tv_sec * 1000000 + tend->tv_usec;
+ t -= tstart->tv_sec * 1000000 + tstart->tv_usec;
+ return t;
+}
+
+static void cachetable_predef_fetch_maybegetandpin_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+
+ // let's get and pin this node a bunch of times to drive up the clock count
+ for (int i = 0; i < 20; i++) {
+ void* value;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &value,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ }
+
+ struct timeval tstart;
+ gettimeofday(&tstart, NULL);
+
+ // def_fetch another block, causing an eviction of the first block we made above
+ do_sleep = true;
+ void* value2;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(
+ f1,
+ make_blocknum(1),
+ 1,
+ &value2,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ toku_cachetable_verify(ct);
+
+ void *v = 0;
+ // now verify that the block we are trying to evict is gone
+ wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r == TOKUDB_TRY_AGAIN);
+ r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert(r == 0 && v == 0);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(f1, key, fullhash, &attr);
+ assert(r == 0 && attr.size == 8);
+ do_sleep = false;
+
+ struct timeval tend;
+ gettimeofday(&tend, NULL);
+
+ assert(tdelta_usec(&tend, &tstart) >= 2000000);
+ if (verbose)printf("time %" PRIu64 " \n", tdelta_usec(&tend, &tstart));
+ toku_cachetable_verify(ct);
+
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_predef_fetch_maybegetandpin_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test2.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test2.cc
new file mode 100644
index 00000000..f6dd04f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-eviction-getandpin-test2.cc
@@ -0,0 +1,190 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* verify that get_and_pin waits while a prefetch block is pending */
+
+#include "test.h"
+
+
+static void
+pe_est_callback(
+ void* UU(ftnode_pv),
+ void* UU(dd),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ *bytes_freed_estimate = 7;
+ *cost = PE_EXPENSIVE;
+}
+
+static int
+pe_callback (
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR new_attr, void *extra),
+ void *finalize_extra
+ )
+{
+ sleep(3);
+ finalize(make_pair_attr(bytes_to_free.size - 7), finalize_extra);
+ return 0;
+}
+
+static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
+ uint64_t t = tend->tv_sec * 1000000 + tend->tv_usec;
+ t -= tstart->tv_sec * 1000000 + tstart->tv_usec;
+ return t;
+}
+
+static void cachetable_prefetch_maybegetandpin_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+
+ // let's get and pin this node a bunch of times to drive up the clock count
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.pe_est_callback = pe_est_callback;
+ wc.pe_callback = pe_callback;
+ for (int i = 0; i < 20; i++) {
+ void* value;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &value,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ }
+
+ struct timeval tstart;
+ gettimeofday(&tstart, NULL);
+
+ // fetch another block, causing an eviction of the first block we made above
+ void* value2;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ make_blocknum(1),
+ 1,
+ &value2,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ ct->ev.signal_eviction_thread();
+ usleep(1*1024*1024);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+
+ toku_cachetable_verify(ct);
+
+ void *v = 0;
+ // now verify that the block we are trying to evict may be pinned
+ r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ key,
+ fullhash,
+ &v,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ NULL
+ );
+ assert(r==TOKUDB_TRY_AGAIN);
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &v,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL
+ );
+ assert(r == 0 && v == 0);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(f1, key, fullhash, &attr);
+ assert(r == 0 && attr.size == 1);
+
+ struct timeval tend;
+ gettimeofday(&tend, NULL);
+
+ assert(tdelta_usec(&tend, &tstart) >= 2000000);
+ if (verbose) printf("time %" PRIu64 " \n", tdelta_usec(&tend, &tstart));
+ toku_cachetable_verify(ct);
+
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_maybegetandpin_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-evictor-class.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-evictor-class.cc
new file mode 100644
index 00000000..ab7de3dd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-evictor-class.cc
@@ -0,0 +1,272 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable/cachetable-internal.h"
+
+class evictor_unit_test {
+public:
+ evictor m_ev;
+ pair_list m_pl;
+ cachefile_list m_cf_list;
+ KIBBUTZ m_kb;
+ void init();
+ void destroy();
+ void run_test();
+ void verify_ev_init(long limit);
+ void verify_ev_destroy();
+ void verify_ev_counts();
+ void verify_ev_m_size_reserved();
+ void verify_ev_handling_cache_pressure();
+
+ // function to disable the eviction thread from waking up every second
+ void disable_ev_thread();
+};
+
+// initialize this class to run tests
+void evictor_unit_test::init() {
+ ZERO_STRUCT(m_pl);
+ ZERO_STRUCT(m_cf_list);
+ m_pl.init();
+ m_cf_list.init();
+ m_kb = NULL;
+ int r = toku_kibbutz_create(1, &m_kb);
+ assert(r == 0);
+}
+
+// destroy class after tests have run
+void evictor_unit_test::destroy() {
+ m_pl.destroy();
+ m_cf_list.destroy();
+ toku_kibbutz_destroy(m_kb);
+}
+
+// test that verifies evictor.init properly worked
+void evictor_unit_test::verify_ev_init(long limit) {
+ assert(m_ev.m_kibbutz == m_kb);
+ assert(m_ev.m_pl == &m_pl);
+ assert(m_ev.m_cf_list == &m_cf_list);
+ assert(m_ev.m_low_size_watermark == limit);
+ assert(m_ev.m_num_sleepers == 0);
+ assert(m_ev.m_run_thread == true);
+ assert(m_ev.m_size_current == 0);
+ assert(read_partitioned_counter(m_ev.m_size_leaf) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_nonleaf) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_rollback) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_cachepressure) == 0);
+ assert(m_ev.m_size_evicting == 0);
+ // this comes from definition of unreservable_memory in cachetable.cc
+ assert(m_ev.m_size_reserved == (limit/4));
+}
+
+// test that verifies evictor.destroy properly worked
+void evictor_unit_test::verify_ev_destroy() {
+ assert(m_ev.m_num_sleepers == 0);
+ assert(m_ev.m_run_thread == false);
+}
+
+void evictor_unit_test::disable_ev_thread() {
+ toku_mutex_lock(&m_ev.m_ev_thread_lock);
+ m_ev.m_period_in_seconds = 0;
+ // signal eviction thread so that it wakes up
+ // and then sleeps indefinitely
+ m_ev.signal_eviction_thread();
+ toku_mutex_unlock(&m_ev.m_ev_thread_lock);
+ // sleep for one second to ensure eviction thread picks up new period
+ usleep(1*1024*1024);
+}
+
+// test that verifies that counts, such as m_size_current
+// are accurately maintained
+void evictor_unit_test::verify_ev_counts() {
+ long limit = 10;
+ long expected_m_size_reserved = limit/4;
+ ZERO_STRUCT(m_ev);
+ m_ev.init(limit, &m_pl, &m_cf_list, m_kb, 0);
+ this->verify_ev_init(limit);
+
+ m_ev.add_to_size_current(1);
+ assert(m_ev.m_size_current == 1);
+ assert(m_ev.m_size_reserved == expected_m_size_reserved);
+ assert(read_partitioned_counter(m_ev.m_size_leaf) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_nonleaf) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_rollback) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_cachepressure) == 0);
+ assert(m_ev.m_size_evicting == 0);
+
+ m_ev.add_to_size_current(3);
+ assert(m_ev.m_size_current == 4);
+
+ m_ev.remove_from_size_current(4);
+ assert(m_ev.m_size_current == 0);
+ assert(m_ev.m_size_reserved == expected_m_size_reserved);
+
+ PAIR_ATTR attr = {
+ .size = 1,
+ .nonleaf_size = 2,
+ .leaf_size = 3,
+ .rollback_size = 4,
+ .cache_pressure_size = 5,
+ .is_valid = true
+ };
+
+ m_ev.add_pair_attr(attr);
+ assert(m_ev.m_size_current == 1);
+ assert(read_partitioned_counter(m_ev.m_size_nonleaf) == 2);
+ assert(read_partitioned_counter(m_ev.m_size_leaf) == 3);
+ assert(read_partitioned_counter(m_ev.m_size_rollback) == 4);
+ assert(read_partitioned_counter(m_ev.m_size_cachepressure) == 5);
+ m_ev.remove_pair_attr(attr);
+ assert(m_ev.m_size_current == 0);
+ assert(read_partitioned_counter(m_ev.m_size_leaf) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_nonleaf) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_rollback) == 0);
+ assert(read_partitioned_counter(m_ev.m_size_cachepressure) == 0);
+
+ PAIR_ATTR other_attr = {
+ .size = 2,
+ .nonleaf_size = 3,
+ .leaf_size = 4,
+ .rollback_size = 5,
+ .cache_pressure_size = 6,
+ .is_valid = true
+ };
+ m_ev.change_pair_attr(attr, other_attr);
+ assert(m_ev.m_size_current == 1);
+ assert(read_partitioned_counter(m_ev.m_size_leaf) == 1);
+ assert(read_partitioned_counter(m_ev.m_size_nonleaf) == 1);
+ assert(read_partitioned_counter(m_ev.m_size_rollback) == 1);
+ assert(read_partitioned_counter(m_ev.m_size_cachepressure) == 1);
+
+ m_ev.m_size_current = 0;
+ m_ev.destroy();
+ this->verify_ev_destroy();
+}
+
+// test to verify the functionality surrounding m_size_reserved
+void evictor_unit_test::verify_ev_m_size_reserved() {
+ long limit = 400;
+ long expected_m_size_reserved = 100; //limit/4
+ ZERO_STRUCT(m_ev);
+ m_ev.init(limit, &m_pl, &m_cf_list, m_kb, 0);
+ this->verify_ev_init(limit);
+ assert(m_ev.m_size_reserved == expected_m_size_reserved);
+ m_ev.m_num_eviction_thread_runs = 0;
+ m_ev.reserve_memory(0.5, 0);
+ assert(m_ev.m_size_reserved == 100+150); //100 original, 150 from last call
+ assert(m_ev.m_size_current == 150);
+ assert(m_ev.m_size_evicting == 0);
+ usleep(1*1024*1024); // sleep to give eviction thread a chance to wake up
+ assert(m_ev.m_num_eviction_thread_runs > 0);
+
+ m_ev.m_size_current = 0;
+ m_ev.destroy();
+ this->verify_ev_destroy();
+}
+
+// test to verify functionality of handling cache pressure,
+// ensures that wait_for_cache_pressure_to_subside works correctly,
+// that decrease_m_size_evicting works correctly, and the logic for when to wake
+// threads up works correctly
+void evictor_unit_test::verify_ev_handling_cache_pressure() {
+ long limit = 400;
+ ZERO_STRUCT(m_ev);
+ m_ev.init(limit, &m_pl, &m_cf_list, m_kb, 0);
+ this->verify_ev_init(limit);
+ m_ev.m_low_size_watermark = 400;
+ m_ev.m_low_size_hysteresis = 400;
+ m_ev.m_high_size_hysteresis = 500;
+ m_ev.m_high_size_watermark = 500;
+ m_ev.m_size_current = 500;
+
+ m_ev.m_num_eviction_thread_runs = 0;
+
+ // test that waiting for cache pressure wakes eviction thread
+ assert(m_ev.m_num_sleepers == 0);
+ m_ev.wait_for_cache_pressure_to_subside();
+ assert(m_ev.m_num_eviction_thread_runs == 1);
+ assert(m_ev.m_num_sleepers == 0);
+
+ m_ev.m_num_eviction_thread_runs = 0;
+ m_ev.m_size_evicting = 101;
+ m_ev.decrease_size_evicting(101);
+ usleep(1*1024*1024);
+ // should not have been signaled because we have no sleepers
+ assert(m_ev.m_num_eviction_thread_runs == 0);
+
+ m_ev.m_num_eviction_thread_runs = 0;
+ m_ev.m_size_evicting = 101;
+ m_ev.m_num_sleepers = 1;
+ m_ev.decrease_size_evicting(2);
+ usleep(1*1024*1024);
+ // should have been signaled because we have sleepers
+ assert(m_ev.m_num_eviction_thread_runs == 1);
+ assert(m_ev.m_num_sleepers == 1); // make sure fake sleeper did not go away
+
+ m_ev.m_num_eviction_thread_runs = 0;
+ m_ev.m_size_evicting = 102;
+ m_ev.m_num_sleepers = 1;
+ m_ev.decrease_size_evicting(1);
+ usleep(1*1024*1024);
+ // should not have been signaled because we did not go to less than 100
+ assert(m_ev.m_num_eviction_thread_runs == 0);
+ assert(m_ev.m_num_sleepers == 1); // make sure fake sleeper did not go away
+
+ m_ev.m_size_evicting = 0;
+ m_ev.m_num_sleepers = 0;
+ m_ev.m_size_current = 0;
+ m_ev.destroy();
+ this->verify_ev_destroy();
+}
+
+void evictor_unit_test::run_test() {
+ this->verify_ev_counts();
+ this->verify_ev_m_size_reserved();
+ this->verify_ev_handling_cache_pressure();
+ return;
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ evictor_unit_test ev_test;
+ ev_test.init();
+ ev_test.run_test();
+ ev_test.destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-fd-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-fd-test.cc
new file mode 100644
index 00000000..d4e956ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-fd-test.cc
@@ -0,0 +1,98 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_os.h"
+
+
+static void
+cachetable_fd_test (void) {
+ const int test_limit = 1;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+ char fname1[TOKU_PATH_MAX+1];
+ unlink(toku_path_join(fname1, 2, TOKU_TEST_FILENAME, "test1.dat"));
+ CACHEFILE cf;
+ r = toku_cachetable_openf(&cf, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int fd1 = toku_cachefile_get_fd(cf); assert(fd1 >= 0);
+
+ // test set to good fd succeeds
+ char fname2[TOKU_PATH_MAX+1];
+ unlink(toku_path_join(fname2, 2, TOKU_TEST_FILENAME, "test2.dat"));
+ int fd2 = open(fname2, O_RDWR | O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd2 >= 0 && fd1 != fd2);
+ struct fileid id;
+ r = toku_os_get_unique_file_id(fd2, &id);
+ assert(r == 0);
+ close(fd2);
+
+ // test set to bogus fd fails
+ int fd3 = open(DEV_NULL_FILE, O_RDWR); assert(fd3 >= 0);
+ r = close(fd3);
+ assert(r == 0);
+ r = toku_os_get_unique_file_id(fd3, &id);
+ assert(r < 0);
+
+ // test the filenum functions
+ FILENUM fn = toku_cachefile_filenum(cf);
+ CACHEFILE newcf = 0;
+ r = toku_cachefile_of_filenum(ct, fn, &newcf);
+ assert(r == 0 && cf == newcf);
+
+ // test a bogus filenum
+ fn.fileid++;
+ r = toku_cachefile_of_filenum(ct, fn, &newcf);
+ assert(r == ENOENT);
+
+ toku_cachefile_close(&cf, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ toku_os_initialize_settings(verbose);
+
+ cachetable_fd_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-fetch-inducing-evictor.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-fetch-inducing-evictor.cc
new file mode 100644
index 00000000..ff72e660
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-fetch-inducing-evictor.cc
@@ -0,0 +1,149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool pf_called;
+
+enum pin_evictor_test_type {
+ pin_in_memory,
+ pin_fetch,
+ pin_partial_fetch
+};
+
+static bool pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return true;
+}
+
+static int pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+
+static void
+cachetable_test (enum pin_evictor_test_type test_type, bool nonblocking) {
+ const int test_limit = 7;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ evictor_test_helpers::set_hysteresis_limits(&ct->ev, test_limit, test_limit);
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ // at this point, we should have 8 bytes of data in a cachetable that supports 7
+ // adding data via get_and_pin or get_and_pin_nonblocking should induce eviction
+ uint64_t old_num_ev_runs = 0;
+ uint64_t new_num_ev_runs = 0;
+ if (test_type == pin_in_memory) {
+ old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ if (nonblocking) {
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert_zero(r);
+ }
+ else {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ }
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs == old_num_ev_runs);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ }
+ else if (test_type == pin_fetch) {
+ old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ if (nonblocking) {
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(2), 2, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r == TOKUDB_TRY_AGAIN);
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs > old_num_ev_runs);
+ }
+ else {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert_zero(r);
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs > old_num_ev_runs);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ }
+ }
+ else if (test_type == pin_partial_fetch) {
+ old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ if (nonblocking) {
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, pf_req_callback, pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r == TOKUDB_TRY_AGAIN);
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs > old_num_ev_runs);
+ }
+ else {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, pf_req_callback, pf_callback, true, NULL);
+ assert_zero(r);
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs > old_num_ev_runs);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert_zero(r);
+ }
+ }
+ else {
+ assert(false);
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test(pin_in_memory, true);
+ cachetable_test(pin_fetch, true);
+ cachetable_test(pin_partial_fetch, true);
+ cachetable_test(pin_in_memory, false);
+ cachetable_test(pin_fetch, false);
+ cachetable_test(pin_partial_fetch, false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-flush-during-cleaner.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-flush-during-cleaner.cc
new file mode 100644
index 00000000..db8c5cc9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-flush-during-cleaner.cc
@@ -0,0 +1,101 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+CACHEFILE f1;
+
+bool should_close;
+
+static int
+cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM blocknum,
+ uint32_t fullhash,
+ void* UU(extraargs)
+ )
+{
+ should_close = true;
+ sleep(2);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 8;
+ int r = toku_test_cachetable_unpin(f1, blocknum, fullhash, CACHETABLE_CLEAN, attr);
+ assert(r==0);
+ return 0;
+}
+
+static void
+cachetable_test (void) {
+ should_close = false;
+ const int test_limit = 400;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_set_cleaner_period(ct, 1);
+
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ for (int i = 0; i < 10; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.cleaner_callback = cleaner_callback;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i), i, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ PAIR_ATTR attr = make_pair_attr(8);
+ attr.cache_pressure_size = 8;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, attr);
+ }
+ while (!should_close) {
+ usleep(1024);
+ }
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+
+
+ toku_cachetable_verify(ct);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-flush-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-flush-test.cc
new file mode 100644
index 00000000..608d3013
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-flush-test.cc
@@ -0,0 +1,116 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+test_cachetable_def_flush (int n) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ const int test_limit = 2*n;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+ char fname1[TOKU_PATH_MAX+1];
+ unlink(toku_path_join(fname1, 2, TOKU_TEST_FILENAME, "test1.dat"));
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ char fname2[TOKU_PATH_MAX+1];
+ unlink(toku_path_join(fname2, 2, TOKU_TEST_FILENAME, "test2.dat"));
+ CACHEFILE f2;
+ r = toku_cachetable_openf(&f2, ct, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ // insert keys 0..n-1
+ int i;
+ for (i=0; i<n; i++) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ hi = toku_cachetable_hash(f2, make_blocknum(i));
+ toku_cachetable_put(f2, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ }
+ toku_cachetable_verify(ct);
+
+ // verify keys exists
+ for (i=0; i<n; i++) {
+ uint32_t hi;
+ void *v;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
+ assert(r == 0 && v == (void *)(long)i);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ hi = toku_cachetable_hash(f2, make_blocknum(i));
+ r = toku_cachetable_maybe_get_and_pin(f2, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
+ assert(r == 0 && v == (void *)(long)i);
+ r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ }
+
+ // def_flush
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachefile_verify(f2);
+
+ // verify keys exist in f2
+ for (i=0; i<n; i++) {
+ uint32_t hi;
+ void *v;
+ hi = toku_cachetable_hash(f2, make_blocknum(i));
+ r = toku_cachetable_maybe_get_and_pin(f2, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ }
+
+ toku_cachefile_close(&f2, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_cachetable_def_flush(8);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-getandpin-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-getandpin-test.cc
new file mode 100644
index 00000000..c5391722
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-getandpin-test.cc
@@ -0,0 +1,119 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+flush (CACHEFILE cf __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY key __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *extraargs __attribute__((__unused__)),
+ PAIR_ATTR size __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool write_me __attribute__((__unused__)),
+ bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert((long) key.b == size.size);
+ if (!keep_me) toku_free(v);
+}
+
+static int
+fetch (
+ CACHEFILE UU(cf),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY key,
+ uint32_t UU(hash),
+ void **vptr,
+ void** UU(dd),
+ PAIR_ATTR *sizep,
+ int *dirtyp,
+ void *UU(extra)
+ )
+{
+ *sizep = make_pair_attr((long) key.b);
+ *vptr = toku_malloc(sizep->size);
+ *dirtyp = 0;
+ return 0;
+}
+
+static void
+cachetable_getandpin_test (int n) {
+ const int test_limit = 1024*1024;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int i;
+
+ // test get_and_pin size
+ for (i=1; i<=n; i++) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ void *v;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i), hi, &v, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
+ assert(r == 0);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(f1, make_blocknum(i), hi, &attr);
+ assert(r == 0 && attr.size == i);
+
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(i));
+ assert(r == 0);
+ }
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_getandpin_test(8);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc
new file mode 100644
index 00000000..df4137e9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc
@@ -0,0 +1,89 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool foo;
+
+//
+// This test verifies that flushing a cachefile will wait on kibbutzes to finish
+//
+static void kibbutz_work(void *fe_v)
+{
+ CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
+ sleep(2);
+ // note that we make the size 16 to induce an eviction
+ // once evictions are moved to their own thread, we need
+ // to modify this test
+ int r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(16));
+ assert(r==0);
+ foo = true;
+ remove_background_job_from_cf(f1);
+}
+
+
+static void
+run_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ foo = false;
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ assert(foo);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-partial-fetch.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-partial-fetch.cc
new file mode 100644
index 00000000..42c60f1f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-partial-fetch.cc
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//
+// This file contains some basic tests for partial fetch, ensuring that
+// it works correctly
+//
+
+uint32_t fetch_val = 0;
+bool pf_req_called;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = &fetch_val;
+ *sizep = make_pair_attr(sizeof(fetch_val));
+ return 0;
+}
+
+static int
+err_fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ assert(false);
+ *dirtyp = 0;
+ return 0;
+}
+
+static bool pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return false;
+}
+
+static bool true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ if (pf_req_called) return false;
+ return true;
+}
+
+static int err_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
+ assert(false);
+ return 0; // gcov
+}
+
+static int pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
+ assert(false);
+ return 0; // gcov
+}
+
+static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* read_extraargs, int UU(fd), PAIR_ATTR* sizep) {
+ pf_req_called = true;
+ *sizep = make_pair_attr(sizeof(fetch_val)+1);
+ assert(read_extraargs == &fetch_val);
+ return 0;
+}
+
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ bool doing_prefetch = false;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, pf_req_callback, pf_callback, true, NULL);
+ assert(&fetch_val == v1);
+ //
+ // verify that a prefetch of this node will fail
+ //
+ r = toku_cachefile_prefetch(
+ f1,
+ make_blocknum(1),
+ 1,
+ wc,
+ fetch,
+ pf_req_callback,
+ pf_callback,
+ NULL,
+ &doing_prefetch
+ );
+ assert(r == 0);
+ // make sure that prefetch should not happen, because we have already pinned node
+ assert(!doing_prefetch);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ //
+ // now get and pin node again, and make sure that partial fetch and fetch are not called
+ //
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ //
+ // now make sure that if we say a partial fetch is required, that we get a partial fetch
+ // and that read_extraargs properly passed down
+ //
+ pf_req_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, true_pf_req_callback, true_pf_callback, true, &fetch_val);
+ assert(pf_req_called);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(f1, make_blocknum(1), 1, &attr);
+ assert(r == 0);
+ assert(attr.size == sizeof(fetch_val)+1);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ // close and reopen cachefile so we can do some simple prefetch tests
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ //
+ // verify that a prefetch of the node will succeed
+ //
+ r = toku_cachefile_prefetch(
+ f1,
+ make_blocknum(1),
+ 1,
+ wc,
+ fetch,
+ pf_req_callback,
+ pf_callback,
+ NULL,
+ &doing_prefetch
+ );
+ assert(r == 0);
+ // make sure that prefetch should happen, because we have already pinned node
+ assert(doing_prefetch);
+ //
+ // now verify we can pin it, and NO fetch callback should get called
+ //
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
+ assert(&fetch_val == v1);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ //
+ // now verify a prefetch that requires a partial fetch works, and that we can then pin the node
+ //
+ pf_req_called = false;
+ r = toku_cachefile_prefetch(
+ f1,
+ make_blocknum(1),
+ 1,
+ wc,
+ fetch,
+ true_pf_req_callback,
+ true_pf_callback,
+ &fetch_val,
+ &doing_prefetch
+ );
+ assert(doing_prefetch);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
+ assert(&fetch_val == v1);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-pin-checkpoint.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-pin-checkpoint.cc
new file mode 100644
index 00000000..9632b199
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-pin-checkpoint.cc
@@ -0,0 +1,433 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+//
+// This test ensures that get_and_pin with dependent nodes works
+// as intended with checkpoints, by having multiple threads changing
+// values on elements in data, and ensure that checkpoints always get snapshots
+// such that the sum of all the elements in data are 0.
+//
+
+// The arrays
+
+#define NUM_ELEMENTS 100
+#define NUM_MOVER_THREADS 4
+
+int64_t data[NUM_ELEMENTS];
+int64_t checkpointed_data[NUM_ELEMENTS];
+PAIR data_pair[NUM_ELEMENTS];
+
+uint32_t time_of_test;
+bool run_test;
+
+static void
+clone_callback(
+ void* value_data,
+ void** cloned_value_data,
+ long* clone_size,
+ PAIR_ATTR* new_attr,
+ bool UU(for_checkpoint),
+ void* UU(write_extraargs)
+ )
+{
+ new_attr->is_valid = false;
+ int64_t* XMALLOC(data_val);
+ *data_val = *(int64_t *)value_data;
+ *cloned_value_data = data_val;
+ *clone_size = 8;
+}
+
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool write_me,
+ bool keep_me,
+ bool checkpoint_me,
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ int64_t val_to_write = *(int64_t *)v;
+ size_t data_index = (size_t)k.b;
+ assert(val_to_write != INT64_MAX);
+ if (write_me) {
+ usleep(10);
+ data[data_index] = val_to_write;
+ if (checkpoint_me) checkpointed_data[data_index] = val_to_write;
+ }
+ if (!keep_me) {
+ toku_free(v);
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR p,
+ int UU(fd),
+ CACHEKEY k,
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value,
+ void** UU(dd),
+ PAIR_ATTR *sizep,
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ size_t data_index = (size_t)k.b;
+ assert(data[data_index] != INT64_MAX);
+
+ int64_t* XMALLOC(data_val);
+ usleep(10);
+ *data_val = data[data_index];
+ data_pair[data_index] = p;
+ *value = data_val;
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+static void *test_time(void *arg) {
+ //
+ // if num_Seconds is set to 0, run indefinitely
+ //
+ if (time_of_test != 0) {
+ usleep(time_of_test*1000*1000);
+ if (verbose) printf("should now end test\n");
+ run_test = false;
+ }
+ if (verbose) printf("should be ending test now\n");
+ return arg;
+}
+
+CACHETABLE ct;
+CACHEFILE f1;
+
+static void *move_numbers(void *arg) {
+ while (run_test) {
+ int rand_key1 = 0;
+ int rand_key2 = 0;
+ int less;
+ int greater;
+ int r;
+ while (rand_key1 == rand_key2) {
+ rand_key1 = random() % NUM_ELEMENTS;
+ rand_key2 = random() % NUM_ELEMENTS;
+ less = (rand_key1 < rand_key2) ? rand_key1 : rand_key2;
+ greater = (rand_key1 > rand_key2) ? rand_key1 : rand_key2;
+ }
+ assert(less < greater);
+
+ /*
+ while (rand_key1 == rand_key2) {
+ rand_key1 = random() % (NUM_ELEMENTS/2);
+ rand_key2 = (NUM_ELEMENTS-1) - rand_key1;
+ less = (rand_key1 < rand_key2) ? rand_key1 : rand_key2;
+ greater = (rand_key1 > rand_key2) ? rand_key1 : rand_key2;
+ }
+ assert(less < greater);
+ */
+
+ void* v1;
+ CACHEKEY less_key;
+ less_key.b = less;
+ uint32_t less_fullhash = less;
+ enum cachetable_dirty less_dirty = CACHETABLE_DIRTY;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ less_key,
+ less,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 0, //num_dependent_pairs
+ NULL,
+ NULL
+ );
+ assert(r==0);
+ int64_t* first_val = (int64_t *)v1;
+
+ CACHEKEY greater_key;
+ greater_key.b = greater;
+ uint32_t greater_fullhash = greater;
+ enum cachetable_dirty greater_dirty = CACHETABLE_DIRTY;
+ PAIR dep_pair = data_pair[less];
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ make_blocknum(greater),
+ greater,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 1, //num_dependent_pairs
+ &dep_pair,
+ &less_dirty
+ );
+ assert(r==0);
+
+ int64_t* second_val = (int64_t *)v1;
+ assert(second_val != first_val); // sanity check that we are messing with different vals
+ assert(*first_val != INT64_MAX);
+ assert(*second_val != INT64_MAX);
+ usleep(10);
+ (*first_val)++;
+ (*second_val)--;
+ r = toku_test_cachetable_unpin(f1, less_key, less_fullhash, less_dirty, make_pair_attr(8));
+
+ int third = 0;
+ int num_possible_values = (NUM_ELEMENTS-1) - greater;
+ if (num_possible_values > 0) {
+ third = (random() % (num_possible_values)) + greater + 1;
+ CACHEKEY third_key;
+ third_key.b = third;
+ dep_pair = data_pair[greater];
+ uint32_t third_fullhash = third;
+ enum cachetable_dirty third_dirty = CACHETABLE_DIRTY;
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ make_blocknum(third),
+ third,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 1, //num_dependent_pairs
+ &dep_pair,
+ &greater_dirty
+ );
+ assert(r==0);
+
+ int64_t* third_val = (int64_t *)v1;
+ assert(second_val != third_val); // sanity check that we are messing with different vals
+ usleep(10);
+ (*second_val)++;
+ (*third_val)--;
+ r = toku_test_cachetable_unpin(f1, third_key, third_fullhash, third_dirty, make_pair_attr(8));
+ }
+ r = toku_test_cachetable_unpin(f1, greater_key, greater_fullhash, greater_dirty, make_pair_attr(8));
+ }
+ return arg;
+}
+
+static void *read_random_numbers(void *arg) {
+ while(run_test) {
+ int rand_key1 = random() % NUM_ELEMENTS;
+ void* v1;
+ int r1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ r1 = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(rand_key1),
+ rand_key1,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_READ,
+ NULL,
+ NULL
+ );
+ if (r1 == 0) {
+ r1 = toku_test_cachetable_unpin(f1, make_blocknum(rand_key1), rand_key1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r1 == 0);
+ }
+ }
+ if (verbose) printf("leaving\n");
+ return arg;
+}
+
+static int num_checkpoints = 0;
+static void *checkpoints(void *arg) {
+ // first verify that checkpointed_data is correct;
+ while(run_test) {
+ int64_t sum = 0;
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ sum += checkpointed_data[i];
+ }
+ assert (sum==0);
+
+ //
+ // now run a checkpoint
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert (sum==0);
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ sum += checkpointed_data[i];
+ }
+ assert (sum==0);
+ usleep(10*1024);
+ num_checkpoints++;
+ }
+ return arg;
+}
+
+static void
+test_begin_checkpoint (
+ LSN UU(checkpoint_lsn),
+ void* UU(header_v))
+{
+ memcpy(checkpointed_data, data, sizeof(int64_t)*NUM_ELEMENTS);
+}
+
+static void sum_vals(void) {
+ int64_t sum = 0;
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ //printf("actual: i %d val %" PRId64 " \n", i, data[i]);
+ sum += data[i];
+ }
+ if (verbose) printf("actual sum %" PRId64 " \n", sum);
+ assert(sum == 0);
+ sum = 0;
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ //printf("checkpointed: i %d val %" PRId64 " \n", i, checkpointed_data[i]);
+ sum += checkpointed_data[i];
+ }
+ if (verbose) printf("checkpointed sum %" PRId64 " \n", sum);
+ assert(sum == 0);
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = NUM_ELEMENTS;
+
+ //
+ // let's set up the data
+ //
+ for (int64_t i = 0; i < NUM_ELEMENTS; i++) {
+ data[i] = 0;
+ checkpointed_data[i] = 0;
+ }
+ time_of_test = 30;
+
+ int r;
+
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ toku_cachefile_set_userdata(
+ f1,
+ NULL,
+ &dummy_log_fassociate,
+ &dummy_close_usr,
+ &dummy_free_usr,
+ &dummy_chckpnt_usr,
+ &test_begin_checkpoint,
+ &dummy_end,
+ &dummy_note_pin,
+ &dummy_note_unpin
+ );
+
+ toku_pthread_t time_tid;
+ toku_pthread_t checkpoint_tid;
+ toku_pthread_t move_tid[NUM_MOVER_THREADS];
+ toku_pthread_t read_random_tid[NUM_MOVER_THREADS];
+ run_test = true;
+
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_create(toku_uninstrumented,
+ &read_random_tid[i],
+ nullptr,
+ read_random_numbers,
+ nullptr);
+ assert_zero(r);
+ }
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_create(toku_uninstrumented,
+ &move_tid[i],
+ nullptr,
+ move_numbers,
+ nullptr);
+ assert_zero(r);
+ }
+ r = toku_pthread_create(
+ toku_uninstrumented, &checkpoint_tid, nullptr, checkpoints, nullptr);
+ assert_zero(r);
+ r = toku_pthread_create(
+ toku_uninstrumented, &time_tid, nullptr, test_time, nullptr);
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(time_tid, &ret);
+ assert_zero(r);
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_join(move_tid[i], &ret);
+ assert_zero(r);
+ }
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_join(read_random_tid[i], &ret);
+ assert_zero(r);
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+ sum_vals();
+ if (verbose) printf("num_checkpoints %d\n", num_checkpoints);
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc
new file mode 100644
index 00000000..2d358110
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc
@@ -0,0 +1,111 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+CACHETABLE ct;
+
+CACHEFILE f1;
+
+static void
+run_test (void) {
+ const int test_limit = 20;
+ int r;
+ ct = NULL;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ f1 = NULL;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ void* v2;
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+
+ for (int i = 0; i < 20; i++) {
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+ }
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ // mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+
+ r = toku_cachetable_get_and_pin_nonblocking(
+ f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ def_write_callback(NULL),
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert(r==0);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-checkpoint-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-checkpoint-test.cc
new file mode 100644
index 00000000..13c4f2ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-checkpoint-test.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the cache table checkpoint with prefetched blocks active works.
+// the blocks in the reading state should be ignored.
+
+#include "test.h"
+#include <stdio.h>
+#include <unistd.h>
+#include "cachetable-test.h"
+
+#include "cachetable/checkpoint.h"
+
+const int item_size = 1;
+
+int n_flush, n_write_me, n_keep_me, n_fetch;
+
+static void flush(
+ CACHEFILE UU(cf),
+ int UU(fd),
+ CACHEKEY UU(key),
+ void *UU(value),
+ void** UU(dd),
+ void *UU(extraargs),
+ PAIR_ATTR size,
+ PAIR_ATTR* UU(new_size),
+ bool write_me,
+ bool keep_me,
+ bool UU(for_checkpoint),
+ bool UU(is_clone)
+ )
+{
+ // assert(key == make_blocknum((long)value));
+ assert(size.size == item_size);
+ n_flush++;
+ if (write_me) n_write_me++;
+ if (keep_me) n_keep_me++;
+}
+
+static int fetch(
+ CACHEFILE UU(cf),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY UU(key),
+ uint32_t UU(fullhash),
+ void **UU(value),
+ void** UU(dd),
+ PAIR_ATTR *UU(sizep),
+ int *dirtyp,
+ void *UU(extraargs)
+ )
+{
+ n_fetch++;
+ sleep(10);
+ *value = 0;
+ *sizep = make_pair_attr(item_size);
+ *dirtyp = 0;
+ return 0;
+}
+
+// put n items into the cachetable, maybe mark them dirty, do a checkpoint, and
+// verify that all of the items have been written and are clean.
+static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dirty) {
+ if (verbose) printf("%s:%d n=%d dirty=%d\n", __FUNCTION__, __LINE__, n, (int) dirty);
+ const int test_limit = n;
+ int r;
+ CACHETABLE ct;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ // disable the eviction thread. this thread was written to assume
+ // evictions hapepn on the client thread, which is no longer true.
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+
+ // prefetch block n+1. this will take 10 seconds.
+ {
+ CACHEKEY key = make_blocknum(n+1);
+ uint32_t fullhash = toku_cachetable_hash(f1, key);
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+ }
+
+ // insert items into the cachetable. all should be dirty
+ int i;
+ for (i=0; i<n; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t hi = toku_cachetable_hash(f1, key);
+ toku_cachetable_put(f1, key, hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+
+ r = toku_test_cachetable_unpin(f1, key, hi, dirty, make_pair_attr(item_size));
+ assert(r == 0);
+
+ void *v;
+ int its_dirty;
+ long long its_pin;
+ long its_size;
+ r = toku_cachetable_get_key_state(ct, key, f1, &v, &its_dirty, &its_pin, &its_size);
+ if (r != 0)
+ continue;
+ assert(its_dirty == CACHETABLE_DIRTY);
+ assert(its_pin == 0);
+ assert(its_size == item_size);
+ }
+
+ // the checkpoint should cause n writes, but since n <= the cachetable size,
+ // all items should be kept in the cachetable
+ n_flush = n_write_me = n_keep_me = n_fetch = 0;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ assert(n_flush == n && n_write_me == n && n_keep_me == n);
+
+ // after the checkpoint, all of the items should be clean
+ for (i=0; i<n; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t hi = toku_cachetable_hash(f1, key);
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, key, hi, PL_WRITE_EXPENSIVE, &v);
+ if (r != 0)
+ continue;
+ r = toku_test_cachetable_unpin(f1, key, hi, CACHETABLE_CLEAN, make_pair_attr(item_size));
+ assert(r == 0);
+
+ int its_dirty;
+ long long its_pin;
+ long its_size;
+ r = toku_cachetable_get_key_state(ct, key, f1, &v, &its_dirty, &its_pin, &its_size);
+ if (r != 0)
+ continue;
+ assert(its_dirty == CACHETABLE_CLEAN);
+ assert(its_pin == 0);
+ assert(its_size == item_size);
+ }
+
+ // a subsequent checkpoint should cause no flushes, or writes since all of the items are clean
+ n_flush = n_write_me = n_keep_me = n_fetch = 0;
+
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(r == 0);
+ assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ int i;
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ }
+ for (i=0; i<8; i++) {
+ cachetable_prefetch_checkpoint_test(i, CACHETABLE_CLEAN);
+ cachetable_prefetch_checkpoint_test(i, CACHETABLE_DIRTY);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-leak-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-leak-test.cc
new file mode 100644
index 00000000..5e136619
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-leak-test.cc
@@ -0,0 +1,115 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that closing the cachetable with an in progress prefetch works
+
+#include "test.h"
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(w == false && v != NULL);
+ toku_free(v);
+}
+
+static int fetch_calls = 0;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ fetch_calls++;
+ sleep(10);
+
+ *value = toku_malloc(1);
+ *sizep = make_pair_attr(1);
+ *dirtyp = 0;
+
+ return 0;
+}
+
+static void cachetable_prefetch_close_leak_test (void) {
+ const int test_limit = 1;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ // prefetch block 0. this will take 10 seconds.
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+
+ // close with the prefetch in progress. the close should block until
+ // all of the reads and writes are complete.
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_close_leak_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-test.cc
new file mode 100644
index 00000000..bcc6556b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-close-test.cc
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that closing the cachetable with prefetches in progress works
+
+#include "test.h"
+
+bool expect_pf;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(w == false);
+}
+
+static int fetch_calls = 0;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ fetch_calls++;
+ sleep(2);
+
+ *value = 0;
+ *sizep = make_pair_attr(1);
+ *dirtyp = 0;
+
+ return 0;
+}
+
+static void cachetable_prefetch_full_test (bool partial_fetch) {
+ const int test_limit = 2;
+ expect_pf = false;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ // prefetch block 0. this will take 2 seconds.
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+
+ // if we want to do a test of partial fetch,
+ // we first put the key into the cachefile so that
+ // the subsequent prefetch does a partial fetch
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ if (partial_fetch) {
+ expect_pf = true;
+ void* value;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &value,
+ wc,
+ fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+
+ // close with the prefetch in progress. the close should block until
+ // all of the reads and writes are complete.
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_full_test(true);
+ cachetable_prefetch_full_test(false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-flowcontrol-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-flowcontrol-test.cc
new file mode 100644
index 00000000..baa45408
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-flowcontrol-test.cc
@@ -0,0 +1,150 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cachetable prefetch multiple blocks hits the cachetable size limit
+// and flushes eventually happen.
+
+
+#include "test.h"
+#include "cachetable/cachetable-internal.h"
+
+static int flush_calls = 0;
+static int flush_evict_calls = 0;
+static int evicted_keys = 0;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k,
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w,
+ bool keep,
+ bool f_ckpt __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(w == false);
+ sleep(1);
+ flush_calls++;
+ if (keep == false) {
+ flush_evict_calls++;
+ if (verbose) printf("%s:%d flush %" PRId64 "\n", __FUNCTION__, __LINE__, k.b);
+ evicted_keys |= 1 << k.b;
+ }
+}
+
+static int fetch_calls = 0;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k,
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value,
+ void** UU(dd),
+ PAIR_ATTR *sizep,
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ fetch_calls++;
+ if (verbose) printf("%s:%d %" PRId64 "\n", __FUNCTION__, __LINE__, k.b);
+
+ *value = 0;
+ *sizep = make_pair_attr(1);
+ *dirtyp = 0;
+
+ return 0;
+}
+
+// Note: cachetable_size_limit must be a power of 2
+static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) {
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, cachetable_size_limit, ZERO_LSN, nullptr);
+ evictor_test_helpers::set_hysteresis_limits(&ct->ev, cachetable_size_limit, cachetable_size_limit);
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int i;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+
+ // prefetch keys 0 .. N-1. they should all fit in the cachetable
+ for (i=0; i<cachetable_size_limit+1; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t fullhash = toku_cachetable_hash(f1, key);
+ bool doing_prefetch = false;
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, &doing_prefetch);
+ assert(doing_prefetch);
+ toku_cachetable_verify(ct);
+ }
+
+ // wait for all of the blocks to be fetched
+ sleep(3);
+
+ // prefetch keys N .. 2*N-1. 0 .. N-1 should be evicted.
+ for (i=i+1; i<2*cachetable_size_limit; i++) {
+ CACHEKEY key = make_blocknum(i);
+ uint32_t fullhash = toku_cachetable_hash(f1, key);
+ bool doing_prefetch = false;
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, &doing_prefetch);
+ assert(!doing_prefetch);
+ toku_cachetable_verify(ct);
+ }
+
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ if (verbose) printf("%s:%d 0x%x 0x%x\n", __FUNCTION__, __LINE__,
+ evicted_keys, (1 << (2*cachetable_size_limit))-1);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_flowcontrol_test(8);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-getandpin-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-getandpin-test.cc
new file mode 100644
index 00000000..5769b5c8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-getandpin-test.cc
@@ -0,0 +1,183 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* verify that get_and_pin waits while a prefetch block is pending */
+
+#include "test.h"
+
+bool do_pf;
+bool expect_pf;
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ assert(w == false);
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ if(!expect_pf) {
+ sleep(2);
+ }
+ *value = 0;
+ *sizep = make_pair_attr(2);
+ *dirtyp = 0;
+
+ return 0;
+}
+
+static bool pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ if (do_pf) {
+ assert(expect_pf);
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
+static int pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
+ assert(expect_pf);
+ sleep(2);
+ *sizep = make_pair_attr(2);
+ return 0;
+}
+
+static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
+ uint64_t t = tend->tv_sec * 1000000 + tend->tv_usec;
+ t -= tstart->tv_sec * 1000000 + tstart->tv_usec;
+ return t;
+}
+
+static void cachetable_prefetch_maybegetandpin_test (bool do_partial_fetch) {
+ const int test_limit = 2;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ expect_pf = false;
+ do_pf = false;
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ if (do_partial_fetch) {
+ expect_pf = true;
+ void* value;
+ r = toku_cachetable_get_and_pin(
+ f1,
+ key,
+ fullhash,
+ &value,
+ wc,
+ fetch,
+ pf_req_callback,
+ pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ }
+
+ struct timeval tstart;
+ gettimeofday(&tstart, NULL);
+
+ // prefetch block 0. this will take 2 seconds.
+ do_pf = true;
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, pf_req_callback, pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+
+ // verify that get_and_pin waits while the prefetch is in progress
+ void *v = 0;
+ do_pf = false;
+ r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, wc, fetch, pf_req_callback, pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==TOKUDB_TRY_AGAIN);
+ r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, wc, fetch, pf_req_callback, pf_callback, true, NULL);
+ assert(r == 0 && v == 0);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(f1, key, fullhash, &attr);
+ assert(r == 0 && attr.size == 2);
+
+ struct timeval tend;
+ gettimeofday(&tend, NULL);
+
+ assert(tdelta_usec(&tend, &tstart) >= 1900000);
+
+ toku_cachetable_verify(ct);
+
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_maybegetandpin_test(true);
+ cachetable_prefetch_maybegetandpin_test(false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-maybegetandpin-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-maybegetandpin-test.cc
new file mode 100644
index 00000000..41e326d7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch-maybegetandpin-test.cc
@@ -0,0 +1,108 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* verify that maybe_get_and_pin returns an error while a prefetch block is pending */
+
+#include "test.h"
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ sleep(10);
+
+ *value = 0;
+ *sizep = make_pair_attr(1);
+ *dirtyp = 1;
+
+ return 0;
+}
+
+
+static void cachetable_prefetch_maybegetandpin_test (void) {
+ const int test_limit = 1;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ // prefetch block 0. this will take 10 seconds.
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+
+ // verify that maybe_get_and_pin returns an error while the prefetch is in progress
+ int i;
+ for (i=1; i>=0; i++) {
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, key, fullhash, PL_WRITE_EXPENSIVE, &v);
+ if (r == 0) break;
+ toku_pthread_yield();
+ }
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, i);
+ assert(i>1);
+ toku_cachetable_verify(ct);
+
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_maybegetandpin_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch2-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch2-test.cc
new file mode 100644
index 00000000..8e9f58d6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-prefetch2-test.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cachetable prefetch of the same block multiple times only
+// fetches the block once.
+
+#include "test.h"
+
+static int fetch_calls = 0;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+
+ fetch_calls++;
+ sleep(10);
+
+ *value = 0;
+ *sizep = make_pair_attr(1);
+ *dirtyp = 1;
+
+ return 0;
+}
+
+static void cachetable_prefetch_maybegetandpin_test (void) {
+ const int test_limit = 1;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ // prefetch block 0. this will take 10 seconds.
+ CACHEKEY key = make_blocknum(0);
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+
+ // prefetch again. this should do nothing.
+ r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
+ toku_cachetable_verify(ct);
+
+ // verify that maybe_get_and_pin returns an error while the prefetch is in progress
+ int i;
+ for (i=1; i>=0; i++) {
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, key, fullhash, PL_WRITE_EXPENSIVE, &v);
+ if (r == 0) break;
+ toku_pthread_yield();
+ }
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, i);
+ assert(i>1);
+ toku_cachetable_verify(ct);
+
+ // there should only be 1 fetch callback
+ assert(fetch_calls == 1);
+
+ r = toku_test_cachetable_unpin(f1, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_prefetch_maybegetandpin_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-put-checkpoint.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-put-checkpoint.cc
new file mode 100644
index 00000000..a159d448
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-put-checkpoint.cc
@@ -0,0 +1,562 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+//
+// This test ensures that get_and_pin with dependent nodes works
+// as intended with checkpoints, by having multiple threads changing
+// values on elements in data, and ensure that checkpoints always get snapshots
+// such that the sum of all the elements in data are 0.
+//
+
+// The arrays
+
+// must be power of 2 minus 1
+#define NUM_ELEMENTS 127
+// must be (NUM_ELEMENTS +1)/2 - 1
+#define NUM_INTERNAL 63
+#define NUM_MOVER_THREADS 4
+
+int64_t data[NUM_ELEMENTS];
+int64_t checkpointed_data[NUM_ELEMENTS];
+PAIR data_pair[NUM_ELEMENTS];
+
+uint32_t time_of_test;
+bool run_test;
+
+static void
+put_callback_pair(
+ CACHEKEY key,
+ void *UU(v),
+ PAIR p)
+{
+ int64_t data_index = key.b;
+ data_pair[data_index] = p;
+}
+
+static void
+clone_callback(
+ void* value_data,
+ void** cloned_value_data,
+ long* clone_size,
+ PAIR_ATTR* new_attr,
+ bool UU(for_checkpoint),
+ void* UU(write_extraargs)
+ )
+{
+ new_attr->is_valid = false;
+ int64_t* XMALLOC(data_val);
+ *data_val = *(int64_t *)value_data;
+ *cloned_value_data = data_val;
+ *new_attr = make_pair_attr(8);
+ *clone_size = 8;
+}
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size,
+ bool write_me,
+ bool keep_me,
+ bool checkpoint_me,
+ bool UU(is_clone)
+ ) {
+ int64_t val_to_write = *(int64_t *)v;
+ size_t data_index = (size_t)k.b;
+ if (write_me) {
+ usleep(10);
+ *new_size = make_pair_attr(8);
+ data[data_index] = val_to_write;
+ if (checkpoint_me) checkpointed_data[data_index] = val_to_write;
+ }
+ if (!keep_me) {
+ toku_free(v);
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR p,
+ int UU(fd),
+ CACHEKEY k,
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value,
+ void** UU(dd),
+ PAIR_ATTR *sizep,
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ size_t data_index = (size_t)k.b;
+ // assert that data_index is valid
+ // if it is INT64_MAX, then that means
+ // the block is not supposed to be in the cachetable
+ assert(data[data_index] != INT64_MAX);
+
+ int64_t* XMALLOC(data_val);
+ usleep(10);
+ *data_val = data[data_index];
+ data_pair[data_index] = p;
+ *value = data_val;
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+static void *test_time(void *arg) {
+ //
+ // if num_Seconds is set to 0, run indefinitely
+ //
+ if (time_of_test != 0) {
+ usleep(time_of_test*1000*1000);
+ if (verbose) printf("should now end test\n");
+ run_test = false;
+ }
+ if (verbose) printf("should be ending test now\n");
+ return arg;
+}
+
+CACHETABLE ct;
+CACHEFILE f1;
+
+static void move_number_to_child(
+ int parent,
+ int64_t* parent_val,
+ enum cachetable_dirty parent_dirty
+ )
+{
+ int child = 0;
+ int r;
+ child = ((random() % 2) == 0) ? (2*parent + 1) : (2*parent + 2);
+
+ void* v1;
+ CACHEKEY parent_key;
+ parent_key.b = parent;
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+
+
+ CACHEKEY child_key;
+ child_key.b = child;
+ uint32_t child_fullhash = toku_cachetable_hash(f1, child_key);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ PAIR dep_pair = data_pair[parent];
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ child_key,
+ child_fullhash,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 1, //num_dependent_pairs
+ &dep_pair,
+ &parent_dirty
+ );
+ assert(r==0);
+
+ int64_t* child_val = (int64_t *)v1;
+ assert(child_val != parent_val); // sanity check that we are messing with different vals
+ assert(*parent_val != INT64_MAX);
+ assert(*child_val != INT64_MAX);
+ usleep(10);
+ (*parent_val)++;
+ (*child_val)--;
+ r = toku_test_cachetable_unpin(f1, parent_key, parent_fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ if (child < NUM_INTERNAL) {
+ move_number_to_child(child, child_val, CACHETABLE_DIRTY);
+ }
+ else {
+ r = toku_test_cachetable_unpin(f1, child_key, child_fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ }
+}
+
+static void *move_numbers(void *arg) {
+ while (run_test) {
+ int parent = 0;
+ int r;
+ void* v1;
+ CACHEKEY parent_key;
+ parent_key.b = parent;
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ parent_key,
+ parent_fullhash,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 0, //num_dependent_pairs
+ NULL,
+ NULL
+ );
+ assert(r==0);
+ int64_t* parent_val = (int64_t *)v1;
+ move_number_to_child(parent, parent_val, CACHETABLE_CLEAN);
+ }
+ return arg;
+}
+
+static void remove_data(CACHEKEY* cachekey, bool for_checkpoint, void* UU(extra)) {
+ assert(cachekey->b < NUM_ELEMENTS);
+ data[cachekey->b] = INT64_MAX;
+ if (for_checkpoint) {
+ checkpointed_data[cachekey->b] = INT64_MAX;
+ }
+}
+
+
+static void get_data(CACHEKEY* cachekey, uint32_t* fullhash, void* extra) {
+ int* CAST_FROM_VOIDP(key, extra);
+ cachekey->b = *key;
+ *fullhash = toku_cachetable_hash(f1, *cachekey);
+ data[*key] = INT64_MAX - 1;
+}
+
+static void merge_and_split_child(
+ int parent,
+ int64_t* parent_val,
+ enum cachetable_dirty parent_dirty
+ )
+{
+ int child = 0;
+ int other_child = 0;
+ int r;
+ bool even = (random() % 2) == 0;
+ child = (even) ? (2*parent + 1) : (2*parent + 2);
+ other_child = (!even) ? (2*parent + 1) : (2*parent + 2);
+ assert(child != other_child);
+
+ void* v1;
+
+ CACHEKEY parent_key;
+ parent_key.b = parent;
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+
+ CACHEKEY child_key;
+ child_key.b = child;
+ uint32_t child_fullhash = toku_cachetable_hash(f1, child_key);
+ enum cachetable_dirty child_dirty = CACHETABLE_CLEAN;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ PAIR dep_pair = data_pair[parent];
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ child_key,
+ child_fullhash,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 1, //num_dependent_pairs
+ &dep_pair,
+ &parent_dirty
+ );
+ assert(r==0);
+ int64_t* child_val = (int64_t *)v1;
+
+ CACHEKEY other_child_key;
+ other_child_key.b = other_child;
+ uint32_t other_child_fullhash = toku_cachetable_hash(f1, other_child_key);
+ enum cachetable_dirty dirties[2];
+ dirties[0] = parent_dirty;
+ dirties[1] = child_dirty;
+ PAIR dep_pairs[2];
+ dep_pairs[0] = data_pair[parent];
+ dep_pairs[1] = data_pair[child];
+
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ other_child_key,
+ other_child_fullhash,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 2, //num_dependent_pairs
+ dep_pairs,
+ dirties
+ );
+ assert(r==0);
+ int64_t* other_child_val = (int64_t *)v1;
+ assert(*parent_val != INT64_MAX);
+ assert(*child_val != INT64_MAX);
+ assert(*other_child_val != INT64_MAX);
+
+ // lets get rid of other_child_val with a merge
+ *child_val += *other_child_val;
+ *other_child_val = INT64_MAX;
+ toku_test_cachetable_unpin_and_remove(f1, other_child_key, remove_data, NULL);
+ dirties[1] = CACHETABLE_DIRTY;
+ child_dirty = CACHETABLE_DIRTY;
+
+ // now do a split
+ CACHEKEY new_key;
+ uint32_t new_fullhash;
+ int64_t* XMALLOC(data_val);
+ toku_cachetable_put_with_dep_pairs(
+ f1,
+ get_data,
+ data_val,
+ make_pair_attr(8),
+ wc,
+ &other_child,
+ 2, // number of dependent pairs that we may need to checkpoint
+ dep_pairs,
+ dirties,
+ &new_key,
+ &new_fullhash,
+ put_callback_pair
+ );
+ assert(new_key.b == other_child);
+ assert(new_fullhash == other_child_fullhash);
+ *data_val = 5000;
+ *child_val -= 5000;
+
+ r = toku_test_cachetable_unpin(f1, parent_key, parent_fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, other_child_key, other_child_fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ if (child < NUM_INTERNAL) {
+ merge_and_split_child(child, child_val, CACHETABLE_DIRTY);
+ }
+ else {
+ r = toku_test_cachetable_unpin(f1, child_key, child_fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
+ assert_zero(r);
+ }
+}
+
+static void *merge_and_split(void *arg) {
+ while (run_test) {
+ int parent = 0;
+ int r;
+ void* v1;
+ CACHEKEY parent_key;
+ parent_key.b = parent;
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ wc.clone_callback = clone_callback;
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ parent_key,
+ parent_fullhash,
+ &v1,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_CHEAP,
+ NULL,
+ 0, //num_dependent_pairs
+ NULL,
+ NULL
+ );
+ assert(r==0);
+ int64_t* parent_val = (int64_t *)v1;
+ merge_and_split_child(parent, parent_val, CACHETABLE_CLEAN);
+ }
+ return arg;
+}
+
+static int num_checkpoints = 0;
+static void *checkpoints(void *arg) {
+ // first verify that checkpointed_data is correct;
+ while(run_test) {
+ int64_t sum = 0;
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ if (checkpointed_data[i] != INT64_MAX) {
+ sum += checkpointed_data[i];
+ }
+ }
+ assert (sum==0);
+
+ //
+ // now run a checkpoint
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ assert (sum==0);
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ if (checkpointed_data[i] != INT64_MAX) {
+ sum += checkpointed_data[i];
+ }
+ }
+ assert (sum==0);
+ num_checkpoints++;
+ }
+ return arg;
+}
+
+static void
+test_begin_checkpoint (
+ LSN UU(checkpoint_lsn),
+ void* UU(header_v))
+{
+ memcpy(checkpointed_data, data, sizeof(int64_t)*NUM_ELEMENTS);
+}
+
+static void sum_vals(void) {
+ int64_t sum = 0;
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ //printf("actual: i %d val %" PRId64 " \n", i, data[i]);
+ if (data[i] != INT64_MAX) {
+ sum += data[i];
+ }
+ }
+ if (verbose) printf("actual sum %" PRId64 " \n", sum);
+ assert(sum == 0);
+ sum = 0;
+ for (int i = 0; i < NUM_ELEMENTS; i++) {
+ //printf("checkpointed: i %d val %" PRId64 " \n", i, checkpointed_data[i]);
+ if (checkpointed_data[i] != INT64_MAX) {
+ sum += checkpointed_data[i];
+ }
+ }
+ if (verbose) printf("checkpointed sum %" PRId64 " \n", sum);
+ assert(sum == 0);
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = NUM_ELEMENTS;
+
+ //
+ // let's set up the data
+ //
+ for (int64_t i = 0; i < NUM_ELEMENTS; i++) {
+ data[i] = 0;
+ checkpointed_data[i] = 0;
+ }
+ time_of_test = 60;
+
+ int r;
+
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ toku_cachefile_set_userdata(
+ f1,
+ NULL,
+ &dummy_log_fassociate,
+ &dummy_close_usr,
+ &dummy_free_usr,
+ &dummy_chckpnt_usr,
+ test_begin_checkpoint, // called in begin_checkpoint
+ &dummy_end,
+ &dummy_note_pin,
+ &dummy_note_unpin
+ );
+
+ toku_pthread_t time_tid;
+ toku_pthread_t checkpoint_tid;
+ toku_pthread_t move_tid[NUM_MOVER_THREADS];
+ toku_pthread_t merge_and_split_tid[NUM_MOVER_THREADS];
+ run_test = true;
+
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_create(toku_uninstrumented,
+ &move_tid[i],
+ nullptr,
+ move_numbers,
+ nullptr);
+ assert_zero(r);
+ }
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_create(toku_uninstrumented,
+ &merge_and_split_tid[i],
+ nullptr,
+ merge_and_split,
+ nullptr);
+ assert_zero(r);
+ }
+ r = toku_pthread_create(
+ toku_uninstrumented, &checkpoint_tid, nullptr, checkpoints, nullptr);
+ assert_zero(r);
+ r = toku_pthread_create(
+ toku_uninstrumented, &time_tid, nullptr, test_time, nullptr);
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(time_tid, &ret);
+ assert_zero(r);
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_join(merge_and_split_tid[i], &ret);
+ assert_zero(r);
+ }
+ for (int i = 0; i < NUM_MOVER_THREADS; i++) {
+ r = toku_pthread_join(move_tid[i], &ret);
+ assert_zero(r);
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+ sum_vals();
+ if (verbose) printf("num_checkpoints %d\n", num_checkpoints);
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-put-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-put-test.cc
new file mode 100644
index 00000000..ba930d5b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-put-test.cc
@@ -0,0 +1,88 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+cachetable_put_test (int n) {
+ const int test_limit = 2*n;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int i;
+ for (i=1; i<=n; i++) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
+ assert(r == -1);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+
+ //r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, 1);
+ //assert(r == 0);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+ }
+ for (i=n; i>0; i--) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ assert(toku_cachefile_count_pinned(f1, 0) == i-1);
+ }
+ assert(toku_cachefile_count_pinned(f1, 1) == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_put_test(8);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-reserve-filenum.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-reserve-filenum.cc
new file mode 100644
index 00000000..e25ead06
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-reserve-filenum.cc
@@ -0,0 +1,112 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that closing the cachetable with an in progress prefetch works
+
+#include "test.h"
+
+struct reserve_filenum_test {
+ void test_reserve_filenum();
+ void test_reserve_filenum_active();
+};
+
+void reserve_filenum_test::test_reserve_filenum() {
+ cachefile_list cfl;
+ cfl.init();
+
+ // set m_next_filenum_to_use.fileid
+ cfl.m_next_filenum_to_use.fileid = (UINT32_MAX -2);
+
+ FILENUM fn1 = cfl.reserve_filenum();
+ assert(fn1.fileid == (UINT32_MAX - 2));
+
+ FILENUM fn2 = cfl.reserve_filenum();
+ assert(fn2.fileid == (UINT32_MAX - 1));
+
+ // skip the reversed value UINT32_MAX and wrap around
+ FILENUM fn3 = cfl.reserve_filenum();
+ assert(fn3.fileid == 0U);
+
+ FILENUM fn4 = cfl.reserve_filenum();
+ assert(fn4.fileid == 1U);
+
+ cfl.destroy();
+}
+
+void reserve_filenum_test::test_reserve_filenum_active() {
+ cachefile_list cfl;
+ cfl.init();
+
+ // start the filenum space to UINT32_MAX - 1
+ cfl.m_next_filenum_to_use.fileid = (UINT32_MAX -1);
+
+ // reserve filenum UINT32_MAX-1
+ FILENUM fn1 = cfl.reserve_filenum();
+ assert(fn1.fileid == (UINT32_MAX - 1));
+ cachefile cf1 = {};
+ cf1.filenum = fn1;
+ cf1.fileid = {0, 1};
+ cfl.add_cf_unlocked(&cf1);
+
+ // reset next filenum so that we test skipping UINT32_MAX
+ cfl.m_next_filenum_to_use.fileid = (UINT32_MAX -1);
+
+ // reserve filenum 0
+ FILENUM fn2 = cfl.reserve_filenum();
+ assert(fn2.fileid == 0);
+
+ cachefile cf2 = {};
+ cf2.filenum = fn2;
+ cf2.fileid = {0, 2};
+ cfl.add_cf_unlocked(&cf2);
+
+ cfl.destroy();
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ int r = 0;
+ default_parse_args(argc, argv);
+ reserve_filenum_test fn_test;
+
+ // Run the tests.
+ fn_test.test_reserve_filenum();
+ fn_test.test_reserve_filenum_active();
+
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-rwlock-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-rwlock-test.cc
new file mode 100644
index 00000000..6d8bc280
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-rwlock-test.cc
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// test create and destroy
+
+static void test_create_destroy(void) {
+ struct st_rwlock the_rwlock, *rwlock = &the_rwlock;
+
+ rwlock_init(toku_uninstrumented, rwlock);
+ rwlock_destroy(rwlock);
+}
+
+// test read lock and unlock with no writers
+
+static void test_simple_read_lock(int n) {
+ struct st_rwlock the_rwlock, *rwlock = &the_rwlock;
+
+ rwlock_init(toku_uninstrumented, rwlock);
+ assert(rwlock_readers(rwlock) == 0);
+ int i;
+ for (i = 1; i <= n; i++) {
+ rwlock_read_lock(rwlock, 0);
+ assert(rwlock_readers(rwlock) == i);
+ assert(rwlock_users(rwlock) == i);
+ }
+ for (i=n-1; i>=0; i--) {
+ rwlock_read_unlock(rwlock);
+ assert(rwlock_readers(rwlock) == i);
+ assert(rwlock_users(rwlock) == i);
+ }
+ rwlock_destroy(rwlock);
+}
+
+// test write lock and unlock with no readers
+
+static void test_simple_write_lock(void) {
+ struct st_rwlock the_rwlock, *rwlock = &the_rwlock;
+
+ rwlock_init(toku_uninstrumented, rwlock);
+ assert(rwlock_users(rwlock) == 0);
+ rwlock_write_lock(rwlock, 0);
+ assert(rwlock_writers(rwlock) == 1);
+ assert(rwlock_users(rwlock) == 1);
+ rwlock_write_unlock(rwlock);
+ assert(rwlock_users(rwlock) == 0);
+ rwlock_destroy(rwlock);
+}
+
+struct rw_event {
+ int e;
+ struct st_rwlock the_rwlock;
+ toku_mutex_t mutex;
+};
+
+static void rw_event_init(struct rw_event *rwe) {
+ rwe->e = 0;
+ rwlock_init(toku_uninstrumented, &rwe->the_rwlock);
+ toku_mutex_init(toku_uninstrumented, &rwe->mutex, nullptr);
+}
+
+static void rw_event_destroy(struct rw_event *rwe) {
+ rwlock_destroy(&rwe->the_rwlock);
+ toku_mutex_destroy(&rwe->mutex);
+}
+
+static void *
+test_writer_priority_thread (void *arg) {
+ struct rw_event *CAST_FROM_VOIDP(rwe, arg);
+
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_write_lock(&rwe->the_rwlock, &rwe->mutex);
+ rwe->e++; assert(rwe->e == 3);
+ toku_mutex_unlock(&rwe->mutex);
+ sleep(1);
+ toku_mutex_lock(&rwe->mutex);
+ rwe->e++; assert(rwe->e == 4);
+ rwlock_write_unlock(&rwe->the_rwlock);
+ toku_mutex_unlock(&rwe->mutex);
+
+ return arg;
+}
+
+// test writer priority over new readers
+
+static void
+test_writer_priority (void) {
+ struct rw_event rw_event, *rwe = &rw_event;
+ ZERO_STRUCT(rw_event);
+ int r;
+
+ rw_event_init(rwe);
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_read_lock(&rwe->the_rwlock, &rwe->mutex);
+ sleep(1);
+ rwe->e++; assert(rwe->e == 1);
+ toku_mutex_unlock(&rwe->mutex);
+
+ toku_pthread_t tid;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid, 0, test_writer_priority_thread, rwe);
+ sleep(1);
+ toku_mutex_lock(&rwe->mutex);
+ rwe->e++;
+ assert(rwe->e == 2);
+ toku_mutex_unlock(&rwe->mutex);
+
+ sleep(1);
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_read_unlock(&rwe->the_rwlock);
+ toku_mutex_unlock(&rwe->mutex);
+ sleep(1);
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_read_lock(&rwe->the_rwlock, &rwe->mutex);
+ rwe->e++; assert(rwe->e == 5);
+ toku_mutex_unlock(&rwe->mutex);
+ sleep(1);
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_read_unlock(&rwe->the_rwlock);
+ toku_mutex_unlock(&rwe->mutex);
+
+ void *ret;
+ r = toku_pthread_join(tid, &ret); assert(r == 0);
+
+ rw_event_destroy(rwe);
+}
+
+// test single writer
+
+static void *
+test_single_writer_thread (void *arg) {
+ struct rw_event *CAST_FROM_VOIDP(rwe, arg);
+
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_write_lock(&rwe->the_rwlock, &rwe->mutex);
+ rwe->e++; assert(rwe->e == 3);
+ assert(rwlock_writers(&rwe->the_rwlock) == 1);
+ rwlock_write_unlock(&rwe->the_rwlock);
+ toku_mutex_unlock(&rwe->mutex);
+
+ return arg;
+}
+
+static void
+test_single_writer (void) {
+ struct rw_event rw_event, *rwe = &rw_event;
+ ZERO_STRUCT(rw_event);
+ int r;
+
+ rw_event_init(rwe);
+ assert(rwlock_writers(&rwe->the_rwlock) == 0);
+ toku_mutex_lock(&rwe->mutex);
+ rwlock_write_lock(&rwe->the_rwlock, &rwe->mutex);
+ assert(rwlock_writers(&rwe->the_rwlock) == 1);
+ sleep(1);
+ rwe->e++; assert(rwe->e == 1);
+ toku_mutex_unlock(&rwe->mutex);
+
+ toku_pthread_t tid;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid, 0, test_single_writer_thread, rwe);
+ sleep(1);
+ toku_mutex_lock(&rwe->mutex);
+ rwe->e++;
+ assert(rwe->e == 2);
+ assert(rwlock_writers(&rwe->the_rwlock) == 1);
+ assert(rwlock_users(&rwe->the_rwlock) == 2);
+ rwlock_write_unlock(&rwe->the_rwlock);
+ toku_mutex_unlock(&rwe->mutex);
+
+ void *ret;
+ r = toku_pthread_join(tid, &ret); assert(r == 0);
+
+ assert(rwlock_writers(&rwe->the_rwlock) == 0);
+ rw_event_destroy(rwe);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_create_destroy();
+ test_simple_read_lock(0);
+ test_simple_read_lock(42);
+ test_simple_write_lock();
+ test_writer_priority();
+ test_single_writer();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone.cc
new file mode 100644
index 00000000..c51096b3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool clone_called;
+bool check_flush;
+bool flush_expected;
+bool flush_called;
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ new_attr->is_valid = false;
+ clone_called = true;
+ *clone_size = 8;
+}
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+ if (w) usleep(5*1024*1024);
+ if (w && check_flush) {
+ assert(flush_expected);
+ if (clone_called) assert(is_clone);
+ }
+ flush_called = true;
+ if (is_clone) assert(!keep);
+}
+
+static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
+ uint64_t t = tend->tv_sec * 1000000 + tend->tv_usec;
+ t -= tstart->tv_sec * 1000000 + tstart->tv_usec;
+ return t;
+}
+
+
+//
+// test the following things for simple cloning:
+// - if the pending pair is clean, nothing gets written
+// - if the pending pair is dirty and cloneable, then pair is written
+// in background and get_and_pin returns immedietely
+// - if the pending pair is dirty and not cloneable, then get_and_pin
+// blocks until the pair is written out
+//
+static void
+test_clean (enum cachetable_dirty dirty, bool cloneable) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.clone_callback = cloneable ? clone_callback : NULL;
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
+
+ check_flush = true;
+ clone_called = false;
+ flush_expected = (dirty == CACHETABLE_DIRTY) ? true : false;
+ flush_called = false;
+ // begin checkpoint, since pair is clean, we should not
+ // have the clone called
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ struct timeval tstart;
+ struct timeval tend;
+ gettimeofday(&tstart, NULL);
+
+ // test that having a pin that passes false for may_modify_value does not stall behind checkpoint
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ gettimeofday(&tend, NULL);
+ assert(tdelta_usec(&tend, &tstart) <= 2000000);
+ assert(!clone_called);
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ gettimeofday(&tend, NULL);
+
+ // we take 5 seconds for a write
+ // we check if time to pin is less than 2 seconds, if it is
+ // then we know act of cloning worked properly
+ if (cloneable || !dirty ) {
+ assert(tdelta_usec(&tend, &tstart) <= 2000000);
+ }
+ else {
+ assert(tdelta_usec(&tend, &tstart) >= 2000000);
+ }
+
+
+ if (dirty == CACHETABLE_DIRTY && cloneable) {
+ assert(clone_called);
+ }
+ else {
+ assert(!clone_called);
+ }
+
+ // at this point, there should be no more dirty writes
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ gettimeofday(&tend, NULL);
+ if (cloneable || !dirty ) {
+ assert(tdelta_usec(&tend, &tstart) <= 2000000);
+ }
+ else {
+ assert(tdelta_usec(&tend, &tstart) >= 2000000);
+ }
+
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+
+ check_flush = false;
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_clean(CACHETABLE_CLEAN, true);
+ test_clean(CACHETABLE_DIRTY, true);
+ test_clean(CACHETABLE_CLEAN, false);
+ test_clean(CACHETABLE_DIRTY, false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone2.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone2.cc
new file mode 100644
index 00000000..341bbe92
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-clone2.cc
@@ -0,0 +1,138 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool clone_called;
+bool check_flush;
+bool flush_expected;
+bool flush_called;
+
+static void
+clone_callback(void* UU(value_data), void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
+{
+ *cloned_value_data = (void *)1;
+ new_attr->is_valid = false;
+ clone_called = true;
+ *clone_size = 8;
+}
+
+static void
+flush (
+ CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+ if (w && check_flush) {
+ assert(flush_expected);
+ flush_called = true;
+ }
+}
+
+//
+// test the following things for simple cloning:
+// - verifies that after the checkpoint ends, the PAIR is properly
+// dirty or clean based on the second unpin
+//
+static void
+test_clean (enum cachetable_dirty dirty, bool cloneable) {
+ const int test_limit = 200;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+ check_flush = false;
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.clone_callback = cloneable ? clone_callback : NULL;
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+
+ // begin checkpoint, since pair is clean, we should not
+ // have the clone called
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+
+ // at this point, there should be no more dirty writes
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
+ usleep(2*1024*1024);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+
+ check_flush = true;
+ flush_expected = (dirty == CACHETABLE_DIRTY) ? true : false;
+ flush_called = false;
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+ if (flush_expected) assert(flush_called);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_clean(CACHETABLE_CLEAN, true);
+ test_clean(CACHETABLE_DIRTY, true);
+ test_clean(CACHETABLE_CLEAN, false);
+ test_clean(CACHETABLE_DIRTY, false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc
new file mode 100644
index 00000000..a13e6d26
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc
@@ -0,0 +1,330 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool close_called;
+bool free_called;
+
+static void close_usr(CACHEFILE UU(cf), int UU(i), void* UU(p), bool UU(b), LSN UU(lsn)) {
+ close_called = true;
+}
+static void free_usr(CACHEFILE UU(cf), void* UU(p)) {
+ free_called = true;
+}
+
+static void set_cf_userdata(CACHEFILE f1) {
+ toku_cachefile_set_userdata(
+ f1,
+ NULL,
+ &dummy_log_fassociate,
+ &close_usr,
+ &free_usr,
+ &dummy_chckpnt_usr,
+ &dummy_begin,
+ &dummy_end,
+ &dummy_note_pin,
+ &dummy_note_unpin
+ );
+}
+
+bool keep_me;
+bool write_me;
+bool flush_called;
+static UU() void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ )
+{
+ flush_called = true;
+ if (!keep) keep_me = keep;
+ if (w) write_me = w;
+}
+
+
+static void
+simple_test(bool unlink_on_close) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ set_cf_userdata(f1);
+
+ // test that if we just open a cachefile and then close it (have no pairs active),
+ // then it does not get cached
+ close_called = false;
+ free_called = false;
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ assert(close_called);
+ assert(free_called);
+
+ // now reopen the cachefile
+ f1 = NULL;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ set_cf_userdata(f1);
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
+ toku_cachetable_verify(ct);
+ if (unlink_on_close) {
+ toku_cachefile_unlink_on_close(f1);
+ }
+ close_called = false;
+ free_called = false;
+ keep_me = true;
+ write_me = false;
+ flush_called = false;
+ // because we ought to have one pair in the cachetable for this cf,
+ // close should cache the cf and not free it (unless we unlink on close)
+ // also, make sure we wrote dirty pair, but we did NOT free PAIR unless
+ // unlink_on_close was set
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ CACHETABLE_STATUS_S stats;
+ toku_cachetable_get_status(ct, &stats);
+ assert(flush_called);
+ assert(close_called);
+ assert(write_me);
+ if (unlink_on_close) {
+ assert(free_called);
+ assert(!keep_me);
+ // pair should NOT still be accounted for
+ assert(stats.status[CACHETABLE_STATUS_S::CT_SIZE_CURRENT].value.num == 0);
+ }
+ else {
+ assert(keep_me);
+ assert(!free_called);
+ // pair should still be accounted for
+ assert(stats.status[CACHETABLE_STATUS_S::CT_SIZE_CURRENT].value.num == 8);
+ }
+ toku_cachetable_close(&ct);
+ if (!unlink_on_close) {
+ assert(free_called);
+ assert(!keep_me);
+ }
+}
+
+// test to verify that a PAIR stays in cache
+// after the cachefile undergoes a close and reopen
+static void test_pair_stays_in_cache(enum cachetable_dirty dirty) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), dirty, make_pair_attr(8));
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ // now reopen the cachefile
+ f1 = NULL;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ // do a maybe_get_and_pin and verify that it succeeds
+ // therefore proving that the PAIR was cached
+ // and could be successfully retrieved
+ r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), PL_WRITE_EXPENSIVE, &v1);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_multiple_cachefiles(bool use_same_hash) {
+ for (int iter = 0; iter < 3; iter++) {
+ const int test_limit = 1000;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+
+ char fname1[strlen(TOKU_TEST_FILENAME) + sizeof("_1")];
+ strcpy(fname1, TOKU_TEST_FILENAME);
+ strcat(fname1, "_1");
+ char fname2[strlen(TOKU_TEST_FILENAME) + sizeof("_2")];
+ strcpy(fname2, TOKU_TEST_FILENAME);
+ strcat(fname2, "_2");
+ char fname3[strlen(TOKU_TEST_FILENAME) + sizeof("_3")];
+ strcpy(fname3, TOKU_TEST_FILENAME);
+ strcat(fname3, "_3");
+
+ unlink(fname1);
+ unlink(fname2);
+ unlink(fname3);
+ CACHEFILE f1;
+ CACHEFILE f2;
+ CACHEFILE f3;
+
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ r = toku_cachetable_openf(&f2, ct, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ r = toku_cachetable_openf(&f3, ct, fname3, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ void* v2;
+ void* v3;
+
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ for (int j = 0; j < 3; j++) {
+ uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f1, make_blocknum(j));
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(j), hash, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
+ }
+
+ for (int j = 0; j < 3; j++) {
+ uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f2, make_blocknum(j));
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(j), hash, &v2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f2, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
+ }
+
+ for (int j = 0; j < 3; j++) {
+ uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f3, make_blocknum(j));
+ r = toku_cachetable_get_and_pin(f3, make_blocknum(j), hash, &v3, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f3, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
+ }
+
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachefile_close(&f2, false, ZERO_LSN);
+ toku_cachefile_close(&f3, false, ZERO_LSN);
+
+ char* fname_to_open = NULL;
+ if (iter == 0) {
+ fname_to_open = fname1;
+ }
+ else if (iter == 1) {
+ fname_to_open = fname2;
+ }
+ else if (iter == 2) {
+ fname_to_open = fname3;
+ }
+
+ // now reopen the cachefile
+ f1 = NULL;
+ r = toku_cachetable_openf(&f1, ct, fname_to_open, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ // do a maybe_get_and_pin and verify that it succeeds
+ // therefore proving that the PAIR was cached
+ // and could be successfully retrieved
+ for (int j = 0; j < 3; j++) {
+ uint32_t hash = use_same_hash ? 1 : toku_cachetable_hash(f1, make_blocknum(j));
+ r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(j), hash, PL_WRITE_EXPENSIVE, &v1);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(j), hash, CACHETABLE_CLEAN, make_pair_attr(8));
+ }
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+
+ toku_cachetable_close(&ct);
+ }
+}
+
+// test that the evictor works properly with closed cachefiles
+static void test_evictor(void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+
+ char fname1[strlen(TOKU_TEST_FILENAME) + sizeof("_1")];
+ strcpy(fname1, TOKU_TEST_FILENAME);
+ strcat(fname1, "_1");
+ char fname2[strlen(TOKU_TEST_FILENAME) + sizeof("_2")];
+ strcpy(fname2, TOKU_TEST_FILENAME);
+ strcat(fname2, "_2");
+
+ unlink(fname1);
+ unlink(fname2);
+ CACHEFILE f1;
+ CACHEFILE f2;
+
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ set_cf_userdata(f1);
+ r = toku_cachetable_openf(&f2, ct, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(8));
+ close_called = false;
+ free_called = false;
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ assert(close_called);
+ assert(!free_called);
+
+ // at this point, we should f1, along with one PAIR, stale in the cachetable
+ // now let's pin another node, and ensure that it causes an eviction and free of f1
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(8));
+ // now sleep for 2 seconds, and check to see if f1 has been closed
+ sleep(2);
+ assert(free_called);
+
+ toku_cachefile_close(&f2, false, ZERO_LSN);
+
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_evictor();
+ test_multiple_cachefiles(false);
+ test_multiple_cachefiles(true);
+ simple_test(false);
+ simple_test(true);
+ test_pair_stays_in_cache(CACHETABLE_DIRTY);
+ test_pair_stays_in_cache(CACHETABLE_CLEAN);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-maybe-get-pin.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-maybe-get-pin.cc
new file mode 100644
index 00000000..1b6ef3ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-maybe-get-pin.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+//
+// simple tests for maybe_get_and_pin(_clean)
+//
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ void* v1;
+ // nothing in cachetable, so this should fail
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==-1);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ // maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==-1);
+ r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r == 0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ // maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==0);
+ // now these calls should fail because the node is already pinned, and therefore in use
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==-1);
+ r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==-1);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+
+ // sanity check, this should still succeed, because the PAIR is dirty
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ // now these should fail, because the node should be pending a checkpoint
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==-1);
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
+ assert(r==-1);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-cheap.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-cheap.cc
new file mode 100644
index 00000000..d79d1fb1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-cheap.cc
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool pf_called;
+static bool true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ if (pf_called) return false;
+ return true;
+}
+
+static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ *sizep = make_pair_attr(9);
+ pf_called = true;
+ return 0;
+}
+
+static void kibbutz_work(void *fe_v)
+{
+ CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
+ sleep(2);
+ int r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ remove_background_job_from_cf(f1);
+}
+
+static void
+unlock_dummy (void* UU(v)) {
+}
+
+static void reset_unlockers(UNLOCKERS unlockers) {
+ unlockers->locked = true;
+}
+
+static void
+run_test (pair_lock_type lock_type) {
+ const int test_limit = 12;
+ struct unlockers unlockers = {true, unlock_dummy, NULL, NULL};
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin_with_dep_pairs(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, lock_type, NULL, 0, NULL, NULL);
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ reset_unlockers(&unlockers);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, &unlockers);
+ // to fix #5393, we changed behavior on full fetch where if we
+ // requested a PL_WRITE_CHEAP, and had to grab a PL_WRITE_EXPENSIVE for
+ // a full fetch, we keep it as a PL_WRITE_EXPENSIVE because downgrading back
+ // was too big a pain.
+ if (lock_type == PL_WRITE_EXPENSIVE || lock_type == PL_WRITE_CHEAP) {
+ assert(r == TOKUDB_TRY_AGAIN); assert(!unlockers.locked);
+ }
+ else {
+ assert(r == 0); assert(unlockers.locked);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+ }
+
+ // now do the same test with a partial fetch required
+ pf_called = false;
+ r = toku_cachetable_get_and_pin_with_dep_pairs(f1, make_blocknum(1), 1, &v1, wc, def_fetch, true_pf_req_callback, true_pf_callback, lock_type, NULL, 0, NULL, NULL);
+ assert(pf_called);
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ reset_unlockers(&unlockers);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, &unlockers);
+ if (lock_type == PL_WRITE_EXPENSIVE) {
+ assert(r == TOKUDB_TRY_AGAIN); assert(!unlockers.locked);
+ }
+ else {
+ assert(r == 0); assert(unlockers.locked);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test(PL_READ);
+ run_test(PL_WRITE_CHEAP);
+ run_test(PL_WRITE_EXPENSIVE);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-dep-nodes.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-dep-nodes.cc
new file mode 100644
index 00000000..f8219a0a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-dep-nodes.cc
@@ -0,0 +1,198 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool v1_written;
+uint64_t val1;
+bool v2_written;
+uint64_t val2;
+uint64_t val3;
+bool check_me;
+
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ if(check_me) {
+ assert(c);
+ assert(keep);
+ assert(w);
+ if (v == &val1) {
+ v1_written = true;
+ }
+ else if (v == &val2) {
+ v2_written = true;
+ }
+ else {
+ assert(false);
+ }
+ }
+}
+
+PAIR* dest_pair;
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR p,
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = extraargs;
+ *sizep = make_pair_attr(8);
+ *dest_pair = p;
+ return 0;
+}
+
+static void
+cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ void* v2;
+ void* v3;
+ PAIR dependent_pairs[2];
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&val1);
+ wc.flush_callback = flush;
+ wc.write_extraargs = &val1;
+ dest_pair = &dependent_pairs[0];
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
+ dest_pair = &dependent_pairs[1];
+ wc.write_extraargs = &val2;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
+
+ // now we set the dirty state of these two.
+ enum cachetable_dirty cd[2];
+ cd[0] = write_first ? CACHETABLE_DIRTY : CACHETABLE_CLEAN;
+ cd[1] = write_second ? CACHETABLE_DIRTY : CACHETABLE_CLEAN;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ if (start_checkpoint) {
+ //
+ // should mark the v1 and v2 as pending
+ //
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ }
+ //
+ // This call should cause a flush for both
+ //
+ check_me = true;
+ v1_written = false;
+ v2_written = false;
+ wc.write_extraargs = &val3;
+ r = toku_cachetable_get_and_pin_with_dep_pairs(
+ f1,
+ make_blocknum(3),
+ 3,
+ &v3,
+ wc, fetch, def_pf_req_callback, def_pf_callback,
+ PL_WRITE_EXPENSIVE,
+ &val3,
+ 2, //num_dependent_pairs
+ dependent_pairs,
+ cd
+ );
+ if (start_checkpoint) {
+ assert(v1_written == write_first);
+ assert(v2_written == write_second);
+ }
+ else {
+ assert(!v1_written);
+ assert(!v2_written);
+ }
+ check_me = false;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ if (start_checkpoint) {
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test(false,false,true);
+ cachetable_test(false,true,true);
+ cachetable_test(true,false,true);
+ cachetable_test(true,true,true);
+ cachetable_test(false,false,false);
+ cachetable_test(false,true,false);
+ cachetable_test(true,false,false);
+ cachetable_test(true,true,false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc
new file mode 100644
index 00000000..9e3213a8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc
@@ -0,0 +1,155 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ if (w) {
+ assert(c);
+ assert(keep);
+ }
+}
+
+static void kibbutz_work(void *fe_v)
+{
+ CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
+ sleep(2);
+ int r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ remove_background_job_from_cf(f1);
+}
+
+static void
+unlock_dummy (void* UU(v)) {
+}
+
+static void reset_unlockers(UNLOCKERS unlockers) {
+ unlockers->locked = true;
+}
+
+static void
+run_case_that_should_succeed(CACHEFILE f1, pair_lock_type first_lock, pair_lock_type second_lock) {
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ struct unlockers unlockers = {true, unlock_dummy, NULL, NULL};
+ int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, first_lock, NULL, NULL);
+ assert(r==0);
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ reset_unlockers(&unlockers);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, second_lock, NULL, &unlockers);
+ assert(r==0); assert(unlockers.locked);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+}
+
+static void
+run_case_that_should_fail(CACHEFILE f1, pair_lock_type first_lock, pair_lock_type second_lock) {
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ struct unlockers unlockers = {true, unlock_dummy, NULL, NULL};
+ int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, first_lock, NULL, NULL);
+ assert(r==0);
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ reset_unlockers(&unlockers);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, second_lock, NULL, &unlockers);
+ assert(r == TOKUDB_TRY_AGAIN); assert(!unlockers.locked);
+}
+
+
+static void
+run_test (void) {
+ // sometimes the cachetable evictor runs during the test. this sometimes causes cachetable pair locking contention,
+ // which results with a TOKUDB_TRY_AGAIN error occurring. unfortunately, the test does not expect this and fails.
+ // set cachetable size limit to a value big enough so that the cachetable evictor is not triggered during the test.
+ const int test_limit = 100;
+
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ //
+ // test that if we are getting a PAIR for the first time that TOKUDB_TRY_AGAIN is returned
+ // because the PAIR was not in the cachetable.
+ //
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==TOKUDB_TRY_AGAIN);
+
+
+ run_case_that_should_succeed(f1, PL_READ, PL_WRITE_CHEAP);
+ run_case_that_should_succeed(f1, PL_READ, PL_WRITE_EXPENSIVE);
+
+ run_case_that_should_succeed(f1, PL_WRITE_CHEAP, PL_READ);
+ run_case_that_should_succeed(f1, PL_WRITE_CHEAP, PL_WRITE_CHEAP);
+ run_case_that_should_succeed(f1, PL_WRITE_CHEAP, PL_WRITE_EXPENSIVE);
+
+ run_case_that_should_fail(f1, PL_WRITE_EXPENSIVE, PL_READ);
+ run_case_that_should_fail(f1, PL_WRITE_EXPENSIVE, PL_WRITE_CHEAP);
+ run_case_that_should_fail(f1, PL_WRITE_EXPENSIVE, PL_WRITE_EXPENSIVE);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking.cc
new file mode 100644
index 00000000..6a09b538
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin-nonblocking.cc
@@ -0,0 +1,145 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool foo;
+
+//
+// This test verifies that get_and_pin_nonblocking works and returns DB_TRYAGAIN when the PAIR is being used.
+//
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ // this should not be flushed until the bottom of the test, which
+ // verifies that this is called if we have it pending a checkpoint
+ if (w) {
+ assert(c);
+ assert(keep);
+ }
+ //usleep (5*1024*1024);
+}
+
+static bool true_def_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return true;
+}
+static int true_def_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+
+static void kibbutz_work(void *fe_v)
+{
+ CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
+ sleep(2);
+ foo = true;
+ int r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ remove_background_job_from_cf(f1);
+}
+
+
+static void
+run_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ //
+ // test that if we are getting a PAIR for the first time that TOKUDB_TRY_AGAIN is returned
+ // because the PAIR was not in the cachetable.
+ //
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==TOKUDB_TRY_AGAIN);
+ // now it should succeed
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==0);
+ foo = false;
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ // because node is in use, should return TOKUDB_TRY_AGAIN
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==TOKUDB_TRY_AGAIN);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert(foo);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+
+ // now make sure we get TOKUDB_TRY_AGAIN when a partial fetch is involved
+ // first make sure value is there
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
+ // now make sure that we get TOKUDB_TRY_AGAIN for the partial fetch
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, def_fetch, true_def_pf_req_callback, true_def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
+ assert(r==TOKUDB_TRY_AGAIN);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin.cc
new file mode 100644
index 00000000..6750fdd3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-pin.cc
@@ -0,0 +1,144 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool foo;
+bool check_me;
+bool flush_called;
+
+//
+// This test verifies that get_and_pin takes a write lock on a PAIR.
+//
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ if (check_me) {
+ flush_called = true;
+ assert(c);
+ assert(keep);
+ assert(w);
+ }
+}
+
+static void kibbutz_work(void *fe_v)
+{
+ CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
+ sleep(2);
+ foo = true;
+ int r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ remove_background_job_from_cf(f1);
+}
+
+static void
+run_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ foo = false;
+ cachefile_kibbutz_enq(f1, kibbutz_work, f1);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert(foo);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ //now let's do a simple checkpoint test
+ // first dirty the PAIR
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+
+ // now this should mark the pair for checkpoint
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+
+ //
+ // now we pin the pair again, and verify in flush callback that the pair is being checkpointed
+ //
+ check_me = true;
+ flush_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ assert(flush_called);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+
+
+ check_me = false;
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-put-dep-nodes.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-put-dep-nodes.cc
new file mode 100644
index 00000000..892c15a1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-put-dep-nodes.cc
@@ -0,0 +1,220 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+bool v1_written;
+uint64_t val1;
+bool v2_written;
+uint64_t val2;
+uint64_t val3;
+bool check_me;
+PAIR* dest_pair;
+
+static void
+put_callback_pair(
+ CACHEKEY UU(key),
+ void *UU(v),
+ PAIR p)
+{
+ *dest_pair = p;
+}
+
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ /* Do nothing */
+ if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
+ //usleep (5*1024*1024);
+ if(check_me) {
+ assert(c);
+ assert(keep);
+ assert(w);
+ if (v == &val1) {
+ v1_written = true;
+ }
+ else if (v == &val2) {
+ v2_written = true;
+ }
+ else {
+ assert(false);
+ }
+ }
+}
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = extraargs;
+ *sizep = make_pair_attr(8);
+ *dest_pair = p;
+ return 0;
+}
+
+static void get_key_and_fullhash(CACHEKEY* cachekey, uint32_t* fullhash, void* extra) {
+ assert(extra == NULL);
+ cachekey->b = 3;
+ *fullhash = 3;
+}
+
+
+static void
+cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);//toku_cachefile_set_userdata(ft, NULL, NULL,
+
+ void* v1;
+ void* v2;
+ PAIR dependent_pairs[2];
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ dest_pair = &dependent_pairs[0];
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
+ assert(r==0);
+ dest_pair = &dependent_pairs[1];
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
+ assert(r==0);
+
+ // now we set the dirty state of these two.
+ enum cachetable_dirty cd[2];
+ cd[0] = write_first ? CACHETABLE_DIRTY : CACHETABLE_CLEAN;
+ cd[1] = write_second ? CACHETABLE_DIRTY : CACHETABLE_CLEAN;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ if (start_checkpoint) {
+ //
+ // should mark the v1 and v2 as pending
+ //
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ }
+ //
+ // This call should cause a flush for both
+ //
+ check_me = true;
+ v1_written = false;
+ v2_written = false;
+
+ CACHEKEY put_key;
+ uint32_t put_fullhash;
+ PAIR dummy_pair;
+ dest_pair = &dummy_pair;
+ toku_cachetable_put_with_dep_pairs(
+ f1,
+ get_key_and_fullhash,
+ &val3,
+ make_pair_attr(8),
+ wc,
+ NULL,
+ 2, //num_dependent_pairs
+ dependent_pairs,
+ cd,
+ &put_key,
+ &put_fullhash,
+ put_callback_pair
+ );
+ assert(put_key.b == 3);
+ assert(put_fullhash == 3);
+
+ if (start_checkpoint) {
+ assert(v1_written == write_first);
+ assert(v2_written == write_second);
+ }
+
+ check_me = false;
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(8));
+
+ if (start_checkpoint) {
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test(false,false,true);
+ cachetable_test(false,true,true);
+ cachetable_test(true,false,true);
+ cachetable_test(true,true,true);
+ cachetable_test(false,false,false);
+ cachetable_test(false,true,false);
+ cachetable_test(true,false,false);
+ cachetable_test(true,true,false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin-nonblocking.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin-nonblocking.cc
new file mode 100644
index 00000000..fdca6ef2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin-nonblocking.cc
@@ -0,0 +1,174 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool pf_called;
+bool fetch_called;
+CACHEFILE f1;
+
+static int
+sleep_fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ sleep(2);
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ fetch_called = true;
+ return 0;
+}
+
+static bool sleep_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return true;
+}
+
+static int sleep_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ sleep(2);
+ *sizep = make_pair_attr(8);
+ pf_called = true;
+ return 0;
+}
+
+static void *run_expensive_pf(void *arg) {
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, PL_READ, NULL, NULL);
+ assert(r == TOKUDB_TRY_AGAIN);
+ assert(pf_called);
+ return arg;
+}
+
+static void *run_expensive_fetch(void *arg) {
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ int r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, PL_READ, NULL, NULL);
+ assert(fetch_called);
+ assert(r == TOKUDB_TRY_AGAIN);
+ return arg;
+}
+
+
+static void
+run_test (void) {
+ const int test_limit = 20;
+ int r;
+ void *ret;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ toku_pthread_t fetch_tid;
+ fetch_called = false;
+ r = toku_pthread_create(
+ toku_uninstrumented, &fetch_tid, nullptr, run_expensive_fetch, nullptr);
+ sleep(1);
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ wc,
+ sleep_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ false,
+ NULL);
+ assert_zero(r);
+ assert(fetch_called);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ r = toku_pthread_join(fetch_tid, &ret);
+ assert_zero(r);
+
+ // call with may_modify_node = false twice, make sure we can get it
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+
+ toku_pthread_t pf_tid;
+ pf_called = false;
+ r = toku_pthread_create(
+ toku_uninstrumented, &pf_tid, nullptr, run_expensive_pf, nullptr);
+ sleep(1);
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ wc,
+ sleep_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ false,
+ NULL);
+ assert_zero(r);
+ assert(pf_called);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+
+ r = toku_pthread_join(pf_tid, &ret);
+ assert_zero(r);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin.cc
new file mode 100644
index 00000000..1a8f3813
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-read-pin.cc
@@ -0,0 +1,184 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+bool pf_called;
+bool fetch_called;
+CACHEFILE f1;
+
+static int
+sleep_fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ sleep(2);
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ fetch_called = true;
+ return 0;
+}
+
+static bool sleep_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ if (pf_called || fetch_called) return false;
+ return true;
+ return true;
+}
+
+static int sleep_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
+ sleep(2);
+ *sizep = make_pair_attr(8);
+ pf_called = true;
+ return 0;
+}
+
+static void *run_expensive_pf(void *arg) {
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ pf_called = false;
+ fetch_called = false;
+ int r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, false, NULL);
+ assert_zero(r);
+ assert(pf_called);
+ return arg;
+}
+
+static void *run_expensive_fetch(void *arg) {
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ pf_called = false;
+ fetch_called = false;
+ int r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, sleep_pf_req_callback, sleep_pf_callback, false, NULL);
+ assert_zero(r);
+ assert(fetch_called);
+ return arg;
+}
+
+
+static void
+run_test (void) {
+ const int test_limit = 12;
+ int r;
+ void *ret;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ toku_pthread_t fetch_tid;
+ fetch_called = false;
+ r = toku_pthread_create(
+ toku_uninstrumented, &fetch_tid, nullptr, run_expensive_fetch, nullptr);
+ sleep(1);
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ wc,
+ sleep_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ false,
+ NULL);
+ assert_zero(r);
+ assert(fetch_called);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ r = toku_pthread_join(fetch_tid, &ret);
+ assert_zero(r);
+
+ // call with may_modify_node = false twice, make sure we can get it
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
+ assert_zero(r);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, sleep_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
+ assert_zero(r);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+
+ toku_pthread_t pf_tid;
+ pf_called = false;
+ r = toku_pthread_create(
+ toku_uninstrumented, &pf_tid, nullptr, run_expensive_pf, nullptr);
+ sleep(1);
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ 1,
+ &v1,
+ wc,
+ sleep_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ false,
+ NULL);
+ assert_zero(r);
+ assert(pf_called);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
+ assert(r==0);
+
+ r = toku_pthread_join(pf_tid, &ret);
+ assert_zero(r);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc
new file mode 100644
index 00000000..6b0efdb0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc
@@ -0,0 +1,100 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+static void remove_key_expect_checkpoint(
+ CACHEKEY* UU(cachekey),
+ bool for_checkpoint,
+ void* UU(extra)
+ )
+{
+ assert(for_checkpoint);
+}
+
+static void remove_key_expect_no_checkpoint(
+ CACHEKEY* UU(cachekey),
+ bool for_checkpoint,
+ void* UU(extra)
+ )
+{
+ assert(!for_checkpoint);
+}
+
+static void
+cachetable_test (void) {
+ const int test_limit = 120;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_checkpoint, NULL);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_no_checkpoint, NULL);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-verify.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-verify.cc
new file mode 100644
index 00000000..67ff4b96
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-verify.cc
@@ -0,0 +1,68 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+cachetable_test (void) {
+ const int test_limit = 12;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ void* v1;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-test.cc
new file mode 100644
index 00000000..a2b04d57
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-test.cc
@@ -0,0 +1,547 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// this mutex is used by some of the tests to serialize access to some
+// global data, especially between the test thread and the cachetable
+// writeback threads
+
+toku_mutex_t test_mutex;
+
+static inline void test_mutex_init(void) {
+ toku_mutex_init(toku_uninstrumented, &test_mutex, nullptr);
+}
+
+static inline void test_mutex_destroy(void) { toku_mutex_destroy(&test_mutex); }
+
+static inline void test_mutex_lock(void) {
+ toku_mutex_lock(&test_mutex);
+}
+
+static inline void test_mutex_unlock(void) {
+ toku_mutex_unlock(&test_mutex);
+}
+
+// verify that cachetable creation and close works
+
+static void
+test_cachetable_create(void) {
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ toku_cachetable_close(&ct);
+}
+
+static const int test_object_size = 1;
+
+struct item {
+ CACHEKEY key;
+ const char *something;
+};
+
+static CACHEFILE expect_f;
+
+static void maybe_flush(CACHETABLE t) {
+ toku_cachetable_maybe_flush_some(t);
+}
+
+
+static void flush_n (CACHEFILE f __attribute__((__unused__)), int UU(fd), CACHEKEY key __attribute__((__unused__)),
+ void *value,
+ void** UU(dd),
+ void *extra __attribute__((__unused__)),
+ PAIR_ATTR size __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool write_me __attribute__((__unused__)), bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__ ((__unused__)),
+ bool UU(is_clone)
+ ) {
+ int *CAST_FROM_VOIDP(v, value);
+ assert(*v==0);
+}
+static int fetch_n (CACHEFILE f __attribute__((__unused__)), PAIR UU(p), int UU(fd), CACHEKEY key __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void**value,
+ void** UU(dd),
+PAIR_ATTR *sizep __attribute__((__unused__)),
+ int * dirtyp, void*extraargs) {
+ assert((long)extraargs==42);
+ *value=0;
+ *dirtyp = 0;
+ *sizep = make_pair_attr(0);
+ return 0;
+}
+
+
+static void test_nested_pin (void) {
+ void *f2=(void*)42;
+ CACHETABLE t;
+ CACHEFILE f;
+ int i0, i1;
+ int r;
+ void *vv,*vv2;
+ const char *fname = TOKU_TEST_FILENAME;
+ if (verbose) printf("creating cachetable\n");
+ toku_cachetable_create(&t, 1, ZERO_LSN, nullptr);
+ toku_os_recursive_delete(fname);
+ r = toku_cachetable_openf(&f, t, fname, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(r==0);
+ expect_f = f;
+
+ i0=0; i1=0;
+ uint32_t f1hash = toku_cachetable_hash(f, make_blocknum(1));
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(f2);
+ wc.flush_callback = flush_n;
+ toku_cachetable_put(f, make_blocknum(1), f1hash, &i0, make_pair_attr(1), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
+ r = toku_cachetable_get_and_pin(f, make_blocknum(1), f1hash, &vv, wc, fetch_n, def_pf_req_callback, def_pf_callback, true, f2);
+ assert(r==0);
+ assert(vv==&i0);
+ assert(i0==0);
+ r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
+ assert(r==0);
+ r = toku_cachetable_maybe_get_and_pin(f, make_blocknum(1), f1hash, PL_WRITE_EXPENSIVE, &vv2);
+ assert(r==0);
+ assert(vv2==vv);
+ r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
+ assert(r==0);
+ uint32_t f2hash = toku_cachetable_hash(f, make_blocknum(2));
+ toku_cachetable_put(f, make_blocknum(2), f2hash, &i1, make_pair_attr(test_object_size), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f, make_blocknum(2), f2hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
+ assert(r==0);
+ toku_cachefile_close(&f, false, ZERO_LSN);
+ toku_cachetable_close(&t);
+}
+
+
+static void null_flush (CACHEFILE cf __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *extra __attribute__((__unused__)),
+ PAIR_ATTR size __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool write_me __attribute__((__unused__)),
+ bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+}
+
+static int add123_fetch (CACHEFILE cf, PAIR UU(p), int UU(fd), CACHEKEY key, uint32_t fullhash, void **value,
+ void** UU(dd),
+PAIR_ATTR *sizep __attribute__((__unused__)), int * dirtyp, void*extraargs) {
+ assert(fullhash==toku_cachetable_hash(cf,key));
+ assert((long)extraargs==123);
+ *value = (void*)((unsigned long)key.b+123L);
+ *dirtyp = 0;
+ *sizep = make_pair_attr(0);
+ return 0;
+}
+
+static int add222_fetch (CACHEFILE cf, PAIR UU(p), int UU(fd), CACHEKEY key, uint32_t fullhash, void **value,
+ void** UU(dd),
+PAIR_ATTR *sizep __attribute__((__unused__)), int * dirtyp, void*extraargs) {
+ assert(fullhash==toku_cachetable_hash(cf,key));
+ assert((long)extraargs==222);
+ *value = (void*)((unsigned long)key.b+222L);
+ *dirtyp = 0;
+ *sizep = make_pair_attr(0);
+ return 0;
+}
+
+static void test_multi_filehandles (void) {
+ CACHETABLE t;
+ CACHEFILE f1,f2,f3;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+ char fname1[TOKU_PATH_MAX+1];
+ char fname2[TOKU_PATH_MAX+1];
+ char fname3[TOKU_PATH_MAX+1];
+ toku_path_join(fname1, 2, TOKU_TEST_FILENAME, "test1_ct.dat");
+ toku_path_join(fname2, 2, TOKU_TEST_FILENAME, "test2_ct.dat");
+ toku_path_join(fname3, 2, TOKU_TEST_FILENAME, "test3_ct.dat");
+ void *v;
+ unlink(fname1);
+ unlink(fname2);
+
+ toku_cachetable_create(&t, 4, ZERO_LSN, nullptr);
+ r = toku_cachetable_openf(&f1, t, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0);
+ r = link(fname1, fname2); assert(r==0);
+ r = toku_cachetable_openf(&f2, t, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0);
+ r = toku_cachetable_openf(&f3, t, fname3, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0);
+
+ assert(f1==f2);
+ assert(f1!=f3);
+
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback((void*)123);
+ wc.flush_callback = null_flush;
+ toku_cachetable_put(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), (void*)124, make_pair_attr(test_object_size), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(0)); assert(r==0);
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
+ assert((unsigned long)v==124);
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), &v, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
+ assert((unsigned long)v==125);
+ wc.write_extraargs = (void*)222;
+ r = toku_cachetable_get_and_pin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), &v, wc, add222_fetch, def_pf_req_callback, def_pf_callback, true, (void*)222); assert(r==0);
+ assert((unsigned long)v==224);
+
+ // we support only one close for a file handle
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(0)); assert(r==0);
+ r = toku_test_cachetable_unpin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), CACHETABLE_CLEAN, make_pair_attr(0)); assert(r==0);
+ toku_cachefile_close(&f2, false, ZERO_LSN);
+
+ r = toku_test_cachetable_unpin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), CACHETABLE_CLEAN, make_pair_attr(0)); assert(r==0);
+ toku_cachefile_close(&f3, false, ZERO_LSN);
+
+ toku_cachetable_close(&t);
+}
+
+static void test_dirty_flush(CACHEFILE f,
+ int UU(fd),
+ CACHEKEY key,
+ void *value,
+ void** UU(dd),
+ void *extra __attribute__((__unused__)),
+ PAIR_ATTR size,
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool do_write,
+ bool keep,
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ if (verbose) printf("test_dirty_flush %p %" PRId64 " %p %ld %u %u\n", f, key.b, value, size.size, (unsigned)do_write, (unsigned)keep);
+}
+
+static int test_dirty_fetch(CACHEFILE f, PAIR UU(p), int UU(fd), CACHEKEY key, uint32_t fullhash, void **value_ptr,
+ void** UU(dd),
+PAIR_ATTR *size_ptr, int * dirtyp, void *arg) {
+ *value_ptr = arg;
+ *dirtyp = 0;
+ *size_ptr = make_pair_attr(0);
+ assert(fullhash==toku_cachetable_hash(f,key));
+ if (verbose) printf("test_dirty_fetch %p %" PRId64 " %p %ld %p\n", f, key.b, *value_ptr, size_ptr->size, arg);
+ return 0;
+}
+
+static void test_dirty(void) {
+ if (verbose) printf("test_dirty\n");
+
+ CACHETABLE t;
+ CACHEFILE f;
+ CACHEKEY key; void *value;
+ int dirty; long long pinned; long entry_size;
+ int r;
+
+ toku_cachetable_create(&t, 4, ZERO_LSN, nullptr);
+
+ const char *fname = TOKU_TEST_FILENAME;
+ toku_os_recursive_delete(fname);
+ r = toku_cachetable_openf(&f, t, fname, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(r == 0);
+
+ key = make_blocknum(1); value = (void*)1;
+ uint32_t hkey = toku_cachetable_hash(f, key);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = test_dirty_flush;
+ toku_cachetable_put(f, key, hkey, value, make_pair_attr(test_object_size), wc, put_callback_nop);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 1);
+
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(0));
+ assert(r == 0);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 0);
+
+ r = toku_cachetable_get_and_pin(f, key, hkey, &value, wc,
+ test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
+ assert(r == 0);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 1);
+
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
+ assert(r == 0);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 0);
+
+ key = make_blocknum(2);
+ hkey = toku_cachetable_hash(f, key);
+ r = toku_cachetable_get_and_pin(f, key, hkey,
+ &value, wc,
+ test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
+ assert(r == 0);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 0);
+ assert(pinned == 1);
+
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
+ assert(r == 0);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 0);
+ assert(pinned == 0);
+
+ r = toku_cachetable_get_and_pin(f, key, hkey,
+ &value, wc,
+ test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
+ assert(r == 0);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 0);
+ assert(pinned == 1);
+
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_DIRTY, make_pair_attr(test_object_size));
+ assert(r == 0);
+
+ // cachetable_print_state(t);
+ r = toku_cachetable_get_key_state(t, key, f, &value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 0);
+
+ toku_cachefile_close(&f, false, ZERO_LSN);
+
+ toku_cachetable_close(&t);
+}
+
+static int test_size_debug;
+static CACHEKEY test_size_flush_key;
+
+static void test_size_flush_callback(CACHEFILE f,
+ int UU(fd),
+ CACHEKEY key,
+ void *value,
+ void** UU(dd),
+ void *extra __attribute__((__unused__)),
+ PAIR_ATTR size,
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool do_write,
+ bool keep,
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ if (test_size_debug && verbose) printf("test_size_flush %p %" PRId64 " %p %ld %u %u\n", f, key.b, value, size.size, (unsigned)do_write, (unsigned)keep);
+ if (keep) {
+ if (do_write) {
+ test_mutex_lock();
+ test_size_flush_key = key;
+ test_mutex_unlock();
+ }
+ } else {
+ assert(!do_write);
+ }
+}
+
+static void test_size_resize(void) {
+ if (verbose) printf("test_size_resize\n");
+
+ CACHETABLE t;
+ CACHEFILE f;
+ int r;
+
+ int n = 3;
+ long size = 1;
+
+ toku_cachetable_create(&t, n*size, ZERO_LSN, nullptr);
+
+ const char *fname = TOKU_TEST_FILENAME;
+ unlink(fname);
+ r = toku_cachetable_openf(&f, t, fname, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(r == 0);
+
+ CACHEKEY key = make_blocknum(42);
+ void *value = (void *) -42;
+
+ uint32_t hkey = toku_cachetable_hash(f, key);
+
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = test_size_flush_callback;
+ toku_cachetable_put(f, key, hkey, value, make_pair_attr(size), wc, put_callback_nop);
+
+ void *entry_value; int dirty; long long pinned; long entry_size;
+ r = toku_cachetable_get_key_state(t, key, f, &entry_value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 1);
+ assert(entry_value == value);
+ assert(entry_size == size);
+
+ long long new_size = 2*size;
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(new_size));
+ assert(r == 0);
+
+ void *current_value;
+ r = toku_cachetable_get_and_pin(f, key, hkey, &current_value, wc, 0, def_pf_req_callback, def_pf_callback, true, 0);
+ assert(r == 0);
+ assert(current_value == value);
+ PAIR_ATTR attr;
+ r = toku_cachetable_get_attr(f, key, hkey, &attr);
+ assert(r == 0);
+ assert(attr.size == new_size);
+
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(new_size));
+ assert(r == 0);
+
+ toku_cachefile_close(&f, false, ZERO_LSN);
+ toku_cachetable_close(&t);
+}
+
+static int min2(int a, int b) { return a < b ? a : b; }
+
+__attribute__((unused))
+static void test_size_flush(void) {
+ if (verbose) printf("test_size_flush\n");
+
+ CACHETABLE t;
+ CACHEFILE f;
+ int r;
+
+ const int n = 8;
+ long long size = 1*1024*1024;
+ toku_cachetable_create(&t, n*size, ZERO_LSN, nullptr);
+
+ const char *fname = TOKU_TEST_FILENAME;
+ unlink(fname);
+ r = toku_cachetable_openf(&f, t, fname, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(r == 0);
+
+ /* put 2*n keys into the table, ensure flushes occur in key order */
+ test_mutex_lock();
+ test_size_flush_key = make_blocknum(-1);
+ test_mutex_unlock();
+
+ int i;
+ CACHEKEY expect_flush_key = make_blocknum(0);
+ for (i=0; i<2*n; i++) {
+ CACHEKEY key = make_blocknum(i);
+ void *value = (void *)(long)-i;
+ // printf("test_size put %lld %p %lld\n", key, value, size);
+ uint32_t hkey = toku_cachetable_hash(f, key);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = test_size_flush_callback;
+ toku_cachetable_put(f, key, hkey, value, make_pair_attr(size), wc, put_callback_nop);
+
+ int n_entries, hash_size; long size_current, size_limit;
+ toku_cachetable_get_state(t, &n_entries, &hash_size, &size_current, &size_limit);
+ while (n_entries != min2(i+1, n)) {
+ toku_pthread_yield(); maybe_flush(t);
+ toku_cachetable_get_state(t, &n_entries, 0, 0, 0);
+ }
+ assert(n_entries == min2(i+1, n));
+
+ void *entry_value; int dirty; long long pinned; long entry_size;
+ r = toku_cachetable_get_key_state(t, key, f, &entry_value, &dirty, &pinned, &entry_size);
+ assert(r == 0);
+ assert(dirty == 1);
+ assert(pinned == 1);
+ assert(entry_value == value);
+ assert(entry_size == size);
+
+ test_mutex_lock();
+ if (test_size_flush_key.b != -1) {
+ assert(test_size_flush_key.b == expect_flush_key.b);
+ assert(expect_flush_key.b == i-n);
+ expect_flush_key.b += 1;
+ }
+ test_mutex_unlock();
+
+ r = toku_test_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(size));
+ assert(r == 0);
+ }
+
+ toku_cachefile_close(&f, false, ZERO_LSN);
+ toku_cachetable_close(&t);
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ // parse args
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ }
+
+ test_mutex_init();
+
+ // run tests
+ test_multi_filehandles();
+ test_cachetable_create();
+ for (i=0; i<1; i++) {
+ test_nested_pin();
+ test_multi_filehandles ();
+ test_dirty();
+ test_size_resize();
+ //test_size_flush();
+ }
+
+ test_mutex_destroy();
+
+ if (verbose) printf("ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-test.h b/storage/tokudb/PerconaFT/ft/tests/cachetable-test.h
new file mode 100644
index 00000000..acb859de
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-test.h
@@ -0,0 +1,72 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "cachetable/cachetable-internal.h"
+
+//
+// Dummy callbacks for checkpointing
+//
+static void dummy_log_fassociate(CACHEFILE UU(cf), void* UU(p)) { }
+static void dummy_close_usr(CACHEFILE UU(cf), int UU(i), void* UU(p), bool UU(b), LSN UU(lsn)) { }
+static void dummy_free_usr(CACHEFILE UU(cf), void* UU(p)) { }
+static void dummy_chckpnt_usr(CACHEFILE UU(cf), int UU(i), void* UU(p)) { }
+static void dummy_begin(LSN UU(lsn), void* UU(p)) { }
+static void dummy_end(CACHEFILE UU(cf), int UU(i), void* UU(p)) { }
+static void dummy_note_pin(CACHEFILE UU(cf), void* UU(p)) { }
+static void dummy_note_unpin(CACHEFILE UU(cf), void* UU(p)) { }
+
+//
+// Helper function to set dummy functions in given cachefile.
+//
+static UU() void
+create_dummy_functions(CACHEFILE cf)
+{
+ void *ud = NULL;
+ toku_cachefile_set_userdata(cf,
+ ud,
+ &dummy_log_fassociate,
+ &dummy_close_usr,
+ &dummy_free_usr,
+ &dummy_chckpnt_usr,
+ &dummy_begin,
+ &dummy_end,
+ &dummy_note_pin,
+ &dummy_note_unpin);
+};
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-and-remove-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-and-remove-test.cc
new file mode 100644
index 00000000..5290db26
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-and-remove-test.cc
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static int
+fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void** UU(dd),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp __attribute__((__unused__)),
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *sizep = make_pair_attr(0);
+ return 0;
+}
+
+// test simple unpin and remove
+static void
+cachetable_unpin_and_remove_test (int n) {
+ if (verbose) printf("%s %d\n", __FUNCTION__, n);
+ const int table_limit = 2*n;
+ int r;
+ int i;
+
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, table_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ // generate some random keys
+ CACHEKEY keys[n]; int nkeys = n;
+ for (i=0; i<n; i++) {
+ keys[i].b = random();
+ }
+
+ // put the keys into the cachetable
+ for (i=0; i<n; i++) {
+ uint32_t hi = toku_cachetable_hash(f1, make_blocknum(keys[i].b));
+ toku_cachetable_put(f1, make_blocknum(keys[i].b), hi, (void *)(long) keys[i].b, make_pair_attr(1),wc, put_callback_nop);
+ }
+
+ // unpin and remove
+ CACHEKEY testkeys[n];
+ for (i=0; i<n; i++) testkeys[i] = keys[i];
+ while (nkeys > 0) {
+ i = random() % nkeys;
+ uint32_t hi = toku_cachetable_hash(f1, make_blocknum(testkeys[i].b));
+ r = toku_test_cachetable_unpin_and_remove(f1, testkeys[i], NULL, NULL);
+ assert(r == 0);
+
+ toku_cachefile_verify(f1);
+
+ // verify that k is removed
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(testkeys[i].b), hi, PL_WRITE_EXPENSIVE, &v);
+ assert(r != 0);
+
+ testkeys[i] = testkeys[nkeys-1]; nkeys -= 1;
+ }
+
+ // verify that the cachtable is empty
+ int nentries;
+ toku_cachetable_get_state(ct, &nentries, NULL, NULL, NULL);
+ assert(nentries == 0);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+// test remove when the pair in being written
+static void
+cachetable_put_evict_remove_test (int n) {
+ if (verbose) printf("%s %d\n", __FUNCTION__, n);
+ const int table_limit = n-1;
+ int r;
+ int i;
+
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, table_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+
+ uint32_t hi[n];
+ for (i=0; i<n; i++)
+ hi[i] = toku_cachetable_hash(f1, make_blocknum(i));
+
+ // put 0, 1, 2, ... should evict 0
+ for (i=0; i<n; i++) {
+ toku_cachetable_put(f1, make_blocknum(i), hi[i], (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi[i], CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ }
+
+ // get 0
+ void *v;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(0), hi[0], &v, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
+ assert(r == 0);
+
+ // remove 0
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(0), NULL, NULL);
+ assert(r == 0);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_unpin_and_remove_test(8);
+ cachetable_put_evict_remove_test(4);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-remove-and-checkpoint.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-remove-and-checkpoint.cc
new file mode 100644
index 00000000..9fe43672
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-remove-and-checkpoint.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable-test.h"
+
+CACHETABLE ct;
+
+//
+// This test exposed a bug (#3970) caught only by Valgrind.
+// freed memory was being accessed by toku_test_cachetable_unpin_and_remove
+//
+static void *run_end_chkpt(void *arg) {
+ assert(arg == NULL);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_end_checkpoint(
+ cp,
+ NULL,
+ NULL,
+ NULL
+ );
+ return arg;
+}
+
+static void
+run_test (void) {
+ const int test_limit = 12;
+ int r;
+ ct = NULL;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ create_dummy_functions(f1);
+
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ void* v1;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ toku_test_cachetable_unpin(
+ f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1, make_blocknum(1)),
+ CACHETABLE_DIRTY,
+ make_pair_attr(8)
+ );
+
+ // now this should mark the pair for checkpoint
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ toku_cachetable_begin_checkpoint(cp, NULL);
+ r = toku_cachetable_get_and_pin(f1,
+ make_blocknum(1),
+ toku_cachetable_hash(f1, make_blocknum(1)),
+ &v1,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ NULL);
+
+ toku_pthread_t mytid;
+ r = toku_pthread_create(
+ toku_uninstrumented, &mytid, nullptr, run_end_chkpt, nullptr);
+ assert(r == 0);
+
+ // give checkpoint thread a chance to start waiting on lock
+ sleep(1);
+ r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), NULL, NULL);
+ assert(r==0);
+
+ void* ret;
+ r = toku_pthread_join(mytid, &ret);
+ assert(r==0);
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+
+
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-test.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-test.cc
new file mode 100644
index 00000000..e41181d9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-unpin-test.cc
@@ -0,0 +1,157 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static void
+cachetable_unpin_test (int n) {
+ const int test_limit = 2*n;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ int i;
+ for (i=1; i<=n; i++) {
+ uint32_t hi;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc, put_callback_nop);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+
+ void *v;
+ r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
+ assert(r == -1);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+
+ //r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, 1);
+ //assert(r == 0);
+ assert(toku_cachefile_count_pinned(f1, 0) == i);
+ }
+ for (i=n; i>0; i--) {
+ uint32_t hi;
+ hi = toku_cachetable_hash(f1, make_blocknum(i));
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
+ assert(r == 0);
+ assert(toku_cachefile_count_pinned(f1, 0) == i-1);
+ }
+ assert(toku_cachefile_count_pinned(f1, 1) == 0);
+ toku_cachetable_verify(ct);
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+enum unpin_evictor_test_type {
+ unpin_increase,
+ unpin_decrease,
+ unpin_invalid_attr
+};
+
+static void
+unpin_and_evictor_test(enum unpin_evictor_test_type test_type) {
+ int r;
+ CACHETABLE ct;
+ int test_limit = 4;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+ evictor_test_helpers::set_hysteresis_limits(&ct->ev, test_limit, test_limit);
+ evictor_test_helpers::disable_ev_thread(&ct->ev);
+
+ void* value2;
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ // this should put in the cachetable a pair of size 8
+ r = toku_cachetable_get_and_pin(
+ f1,
+ make_blocknum(1),
+ 1,
+ &value2,
+ wc,
+ def_fetch,
+ def_pf_req_callback,
+ def_pf_callback,
+ true,
+ 0
+ );
+ assert(r==0);
+ //
+ // now we unpin,
+ // if we increase the size, we should catch a sleep
+ // if we don't increase the size, there should be no sleep
+ // if we pass in an invalid pair_attr, there should be no sleep.
+ //
+ uint64_t old_num_ev_runs = 0;
+ uint64_t new_num_ev_runs = 0;
+ if (test_type == unpin_increase) {
+ old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(9));
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs > old_num_ev_runs);
+ }
+ else if (test_type == unpin_decrease || test_type == unpin_invalid_attr) {
+ old_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
+ new_num_ev_runs = evictor_test_helpers::get_num_eviction_runs(&ct->ev);
+ assert(new_num_ev_runs == old_num_ev_runs);
+ }
+ else {
+ assert(false);
+ }
+
+ toku_cachetable_verify(ct);
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_unpin_test(8);
+ unpin_and_evictor_test(unpin_increase);
+ unpin_and_evictor_test(unpin_decrease);
+ unpin_and_evictor_test(unpin_invalid_attr);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-writer-thread-limit.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-writer-thread-limit.cc
new file mode 100644
index 00000000..cb60ca9d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-writer-thread-limit.cc
@@ -0,0 +1,100 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <portability/toku_atomic.h>
+
+
+static int total_size;
+static int test_limit;
+
+
+static void
+flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void** UU(dd),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+ if (w) {
+ int curr_size = toku_sync_fetch_and_sub(&total_size, 1);
+ assert(curr_size <= 200);
+ usleep(500*1000);
+ }
+}
+
+
+static void
+cachetable_test (void) {
+ total_size = 0;
+ int num_entries = 100;
+ test_limit = 6;
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr);
+ const char *fname1 = TOKU_TEST_FILENAME;
+ unlink(fname1);
+ CACHEFILE f1;
+ r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ for (int64_t i = 0; i < num_entries; i++) {
+ CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
+ wc.flush_callback = flush;
+ toku_cachetable_put(f1, make_blocknum(i), i, NULL, make_pair_attr(1), wc, put_callback_nop);
+ int curr_size = toku_sync_fetch_and_add(&total_size, 1);
+ assert(curr_size <= test_limit + test_limit/2+1);
+ r = toku_test_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, make_pair_attr(4));
+ }
+
+ toku_cachefile_close(&f1, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ cachetable_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/comparator-test.cc b/storage/tokudb/PerconaFT/ft/tests/comparator-test.cc
new file mode 100644
index 00000000..7ca2d0e4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/comparator-test.cc
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdlib.h>
+#include <ft/comparator.h>
+
+static int MAGIC = 49;
+static DBT dbt_a;
+static DBT dbt_b;
+static DESCRIPTOR expected_desc;
+
+static int magic_compare(DB *db, const DBT *a, const DBT *b) {
+ invariant(db && a && b);
+ invariant(db->cmp_descriptor == expected_desc);
+ invariant(a == &dbt_a);
+ invariant(b == &dbt_b);
+ return MAGIC;
+}
+
+static void test_desc(void) {
+ int c;
+ toku::comparator cmp;
+ DESCRIPTOR_S d1, d2;
+
+ // create with d1, make sure it gets used
+ cmp.create(magic_compare, &d1);
+ expected_desc = &d1;
+ c = cmp(&dbt_a, &dbt_b);
+ invariant(c == MAGIC);
+
+ // set desc to d2, make sure it gets used
+ toku::comparator cmp2;
+ cmp2.create(magic_compare, &d2);
+ cmp.inherit(cmp2);
+ expected_desc = &d2;
+ c = cmp(&dbt_a, &dbt_b);
+ invariant(c == MAGIC);
+ cmp2.destroy();
+
+ // go back to using d1, but using the create_from API
+ toku::comparator cmp3, cmp4;
+ cmp3.create(magic_compare, &d1); // cmp3 has d1
+ cmp4.create_from(cmp3); // cmp4 should get d1 from cmp3
+ expected_desc = &d1;
+ c = cmp3(&dbt_a, &dbt_b);
+ invariant(c == MAGIC);
+ c = cmp4(&dbt_a, &dbt_b);
+ invariant(c == MAGIC);
+ cmp3.destroy();
+ cmp4.destroy();
+
+ cmp.destroy();
+}
+
+static int dont_compare_me_bro(DB *db, const DBT *a, const DBT *b) {
+ abort();
+ return db && a && b;
+}
+
+static void test_infinity(void) {
+ int c;
+ toku::comparator cmp;
+ cmp.create(dont_compare_me_bro, nullptr);
+
+ // make sure infinity-valued end points compare as expected
+ // to an arbitrary (uninitialized!) dbt. the comparison function
+ // should never be called and thus the dbt never actually read.
+ DBT arbitrary_dbt;
+
+ c = cmp(&arbitrary_dbt, toku_dbt_positive_infinity());
+ invariant(c < 0);
+ c = cmp(toku_dbt_negative_infinity(), &arbitrary_dbt);
+ invariant(c < 0);
+
+ c = cmp(toku_dbt_positive_infinity(), &arbitrary_dbt);
+ invariant(c > 0);
+ c = cmp(&arbitrary_dbt, toku_dbt_negative_infinity());
+ invariant(c > 0);
+
+ c = cmp(toku_dbt_negative_infinity(), toku_dbt_negative_infinity());
+ invariant(c == 0);
+ c = cmp(toku_dbt_positive_infinity(), toku_dbt_positive_infinity());
+ invariant(c == 0);
+
+ cmp.destroy();
+}
+
+int main(void) {
+ test_desc();
+ test_infinity();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/compress-test.cc b/storage/tokudb/PerconaFT/ft/tests/compress-test.cc
new file mode 100644
index 00000000..f5ed3181
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/compress-test.cc
@@ -0,0 +1,145 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test zlib, lzma, quicklz, and snappy.
+// Compare to compress-test which tests the toku compression (which is a composite of quicklz and zlib).
+
+#include <sys/time.h>
+#include "test.h"
+#include "serialize/compress.h"
+
+static float tdiff (struct timeval *start, struct timeval *end) {
+ return (end->tv_sec-start->tv_sec) + 1e-6*(end->tv_usec - start->tv_usec);
+}
+
+static uLongf test_compress_buf_method (unsigned char *buf, int i, enum toku_compression_method m) {
+ int bound = toku_compress_bound(m, i);
+ unsigned char *MALLOC_N(bound, cb);
+ uLongf actual_clen = bound;
+ toku_compress(m, cb, &actual_clen, buf, i);
+ unsigned char *MALLOC_N(i, ubuf);
+ toku_decompress(ubuf, i, cb, actual_clen);
+ assert(0==memcmp(ubuf, buf, i));
+ toku_free(ubuf);
+ toku_free(cb);
+ return actual_clen;
+}
+
+static void test_compress_i (int i, enum toku_compression_method m, uLongf *compress_size, uLongf *uncompress_size) {
+ unsigned char *MALLOC_N(i, b);
+ for (int j=0; j<i; j++) b[j] = random()%256;
+ *compress_size += test_compress_buf_method (b, i, m);
+ *uncompress_size += i;
+
+ for (int j=0; j<i; j++) b[j] = 0;
+ *compress_size += test_compress_buf_method (b, i, m);
+ *uncompress_size += i;
+
+ for (int j=0; j<i; j++) b[j] = 0xFF;
+ *compress_size += test_compress_buf_method(b, i, m);
+ *uncompress_size += i;
+
+ toku_free(b);
+}
+
+static void test_compress (enum toku_compression_method m, uLongf *compress_size, uLongf *uncompress_size) {
+ // unlike quicklz, we can handle length 0.
+ for (int i=0; i<100; i++) {
+ test_compress_i(i, m, compress_size, uncompress_size);
+ }
+ test_compress_i(1024, m, compress_size, uncompress_size);
+ test_compress_i(1024*1024*4, m, compress_size, uncompress_size);
+ test_compress_i(1024*1024*4 - 123, m, compress_size, uncompress_size); // just some random lengths
+}
+
+static void test_compress_methods () {
+ struct timeval start, end;
+ uLongf compress_size = 0;
+ uLongf uncompress_size = 0;
+
+ gettimeofday(&start, NULL);
+ test_compress(TOKU_ZLIB_METHOD, &compress_size, &uncompress_size);
+ gettimeofday(&end, NULL);
+ printf("TOKU_ZLIB_METHOD Time=%.6fs , Ratio=%.2f[%d/%d]\n",
+ tdiff(&start, &end),
+ (float)compress_size / (float)uncompress_size, (int)compress_size, (int)uncompress_size);
+
+ compress_size = 0;
+ uncompress_size = 0;
+ gettimeofday(&start, NULL);
+ test_compress(TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD, &compress_size, &uncompress_size);
+ gettimeofday(&end, NULL);
+ printf("TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD Time=%.6fs, Ratio=%.2f[%d/%d]\n",
+ tdiff(&start, &end),
+ (float)compress_size / (float)uncompress_size, (int)compress_size, (int)uncompress_size);
+
+ compress_size = 0;
+ uncompress_size = 0;
+ gettimeofday(&start, NULL);
+ test_compress(TOKU_QUICKLZ_METHOD, &compress_size, &uncompress_size);
+ gettimeofday(&end, NULL);
+ printf("TOKU_QUICKLZ_METHOD Time=%.6fs, Ratio=%.2f[%d/%d]\n",
+ tdiff(&start, &end),
+ (float)compress_size / (float)uncompress_size, (int)compress_size, (int)uncompress_size);
+
+ compress_size = 0;
+ uncompress_size = 0;
+ gettimeofday(&start, NULL);
+ test_compress(TOKU_LZMA_METHOD, &compress_size, &uncompress_size);
+ gettimeofday(&end, NULL);
+ printf("TOKU_LZMA_METHOD Time=%.6fs, Ratio=%.2f[%d/%d]\n",
+ tdiff(&start, &end),
+ (float)compress_size / (float)uncompress_size, (int)compress_size, (int)uncompress_size);
+
+ compress_size = 0;
+ uncompress_size = 0;
+ gettimeofday(&start, NULL);
+ test_compress(TOKU_SNAPPY_METHOD, &compress_size, &uncompress_size);
+ gettimeofday(&end, NULL);
+ printf("TOKU_SNAPPY_METHOD Time=%.6fs, Ratio=%.2f[%d/%d]\n",
+ tdiff(&start, &end),
+ (float)compress_size / (float)uncompress_size, (int)compress_size, (int)uncompress_size);
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test_compress_methods();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/dbufio-test-destroy.cc b/storage/tokudb/PerconaFT/ft/tests/dbufio-test-destroy.cc
new file mode 100644
index 00000000..31f9ae5b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/dbufio-test-destroy.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "loader/dbufio.h"
+#include <stdio.h>
+#include <fcntl.h>
+#include <toku_assert.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+enum { N = 5 };
+enum { M = 10 };
+static void test1 (size_t chars_per_file, size_t UU(bytes_per_read)) {
+ int fds[N];
+ char fnames[N][100];
+ size_t n_read[N];
+ int UU(n_live)=N;
+ for (int i=0; i<N; i++) {
+ snprintf(fnames[i], 100, "dbufio-test-destroy-file%d.data", i);
+ unlink(fnames[i]);
+ fds[i] = open(fnames[i], O_CREAT|O_RDWR, S_IRWXU);
+ //printf("fds[%d]=%d is %s\n", i, fds[i], fnames[i]);
+ assert(fds[i]>=0);
+ n_read[i]=0;
+ for (size_t j=0; j<chars_per_file; j++) {
+ unsigned char c = (i+j)%256;
+ int r = toku_os_write(fds[i], &c, 1);
+ if (r!=0) {
+ int er = get_maybe_error_errno();
+ printf("fds[%d]=%d r=%d errno=%d (%s)\n", i, fds[i], r, er, strerror(er));
+ }
+ assert(r==0);
+ }
+ {
+ int r = lseek(fds[i], 0, SEEK_SET);
+ assert(r==0);
+ }
+
+ }
+ DBUFIO_FILESET bfs;
+ {
+ int r = create_dbufio_fileset(&bfs, N, fds, M, false);
+ assert(r==0);
+ }
+
+ { int r = panic_dbufio_fileset(bfs, EIO); assert(r == 0); }
+
+ {
+ int r = destroy_dbufio_fileset(bfs);
+ assert(r==0);
+ }
+ for (int i=0; i<N; i++) {
+ {
+ int r = unlink(fnames[i]);
+ assert(r==0);
+ }
+ {
+ int r = close(fds[i]);
+ assert(r==0);
+ }
+ assert(n_read[i]==0);
+ }
+}
+
+
+
+int main (int argc __attribute__((__unused__)), char *argv[]__attribute__((__unused__))) {
+// test1(0, 1);
+// test1(1, 1);
+// test1(15, 1);
+// test1(100, 1);
+ test1(30, 3); // 3 and M are relatively prime. But 3 divides the file size.
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/dbufio-test.cc b/storage/tokudb/PerconaFT/ft/tests/dbufio-test.cc
new file mode 100644
index 00000000..54872766
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/dbufio-test.cc
@@ -0,0 +1,126 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "loader/dbufio.h"
+#include <stdio.h>
+#include <fcntl.h>
+#include <toku_assert.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+enum { N = 5 };
+enum { M = 10 };
+static void test1 (size_t chars_per_file, size_t bytes_per_read) {
+ int fds[N];
+ char fnames[N][100];
+ size_t n_read[N];
+ int still_live[N];
+ int n_live=N;
+ for (int i=0; i<N; i++) {
+ snprintf(fnames[i], 100, "dbufio-test-file%d.data", i);
+ unlink(fnames[i]);
+ fds[i] = open(fnames[i], O_CREAT|O_RDWR, S_IRWXU);
+ //printf("fds[%d]=%d is %s\n", i, fds[i], fnames[i]);
+ assert(fds[i]>=0);
+ n_read[i]=0;
+ still_live[i]=i;
+ for (size_t j=0; j<chars_per_file; j++) {
+ unsigned char c = (i+j)%256;
+ int r = toku_os_write(fds[i], &c, 1);
+ if (r!=0) printf("fds[%d]=%d r=%d errno=%d (%s)\n", i, fds[i], r, errno, strerror(errno));
+ assert(r==0);
+ }
+ {
+ int r = lseek(fds[i], 0, SEEK_SET);
+ assert(r==0);
+ }
+
+ }
+ DBUFIO_FILESET bfs;
+ {
+ int r = create_dbufio_fileset(&bfs, N, fds, M, false);
+ assert(r==0);
+ }
+ while (n_live>0) {
+ int indirectnum = random()%n_live;
+ int filenum = still_live[indirectnum];
+ char buf[bytes_per_read];
+ size_t n_read_here=0;
+ int r = dbufio_fileset_read(bfs, filenum, buf, bytes_per_read, &n_read_here);
+ //printf("read(%d) -> %d (%ld) (old n_read=%ld)\n", filenum, r, n_read_here, n_read[filenum]);
+ if (r==0) {
+ // did read something
+ assert(n_read_here==bytes_per_read);
+ n_read[filenum]+=n_read_here;
+ //printf(" new n_read=%ld\n", n_read[filenum]);
+ assert(n_read[filenum]<=chars_per_file);
+ } else {
+ assert(r==EOF);
+ assert(n_read[filenum]==chars_per_file);
+ still_live[indirectnum] = still_live[n_live-1];
+ n_live--;
+ }
+ }
+ {
+ int r = destroy_dbufio_fileset(bfs);
+ assert(r==0);
+ }
+ for (int i=0; i<N; i++) {
+ {
+ int r = unlink(fnames[i]);
+ assert(r==0);
+ }
+ {
+ int r = close(fds[i]);
+ assert(r==0);
+ }
+ assert(n_read[i]==chars_per_file);
+ }
+}
+
+
+
+int main (int argc __attribute__((__unused__)), char *argv[]__attribute__((__unused__))) {
+// test1(0, 1);
+// test1(1, 1);
+// test1(15, 1);
+// test1(100, 1);
+ test1(30, 3); // 3 and M are relatively prime. But 3 divides the file size.
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/dmt-test.cc b/storage/tokudb/PerconaFT/ft/tests/dmt-test.cc
new file mode 100644
index 00000000..7e31ec2f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/dmt-test.cc
@@ -0,0 +1,985 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <util/dmt.h>
+
+typedef void *DMTVALUE;
+
+class dmtvalue_writer {
+public:
+ size_t get_size(void) const {
+ return sizeof(DMTVALUE);
+ }
+ void write_to(DMTVALUE *const dest) const {
+ *dest = value;
+ }
+
+ dmtvalue_writer(DMTVALUE _value)
+ : value(_value) {
+ }
+ dmtvalue_writer(const uint32_t size UU(), DMTVALUE *const src)
+ : value(*src) {
+ paranoid_invariant(size == sizeof(DMTVALUE));
+ }
+private:
+ const DMTVALUE value;
+};
+
+typedef toku::dmt<DMTVALUE, DMTVALUE, dmtvalue_writer> *DMT;
+
+static int dmt_insert_at(DMT dmt, DMTVALUE value, uint32_t index) {
+ dmtvalue_writer functor(value);
+ return dmt->insert_at(functor, index);
+}
+
+static DMT dmt_create_from_sorted_array(DMTVALUE *values, uint32_t numvalues) {
+ DMT XMALLOC(dmt);
+ dmt->create();
+ for (uint32_t i = 0; i < numvalues; i++) {
+ dmt_insert_at(dmt, values[i], i);
+ }
+ return dmt;
+}
+
+struct heftor {
+ int (*h)(DMTVALUE, void *v);
+ void *v;
+};
+
+int call_heftor(const uint32_t size, const DMTVALUE &v, const heftor &htor);
+int call_heftor(const uint32_t size, const DMTVALUE &v, const heftor &htor) {
+ invariant(size == sizeof(DMTVALUE));
+ return htor.h(const_cast<DMTVALUE>(v), htor.v);
+}
+
+static int dmt_insert(DMT dmt, DMTVALUE value, int(*h)(DMTVALUE, void*v), void *v, uint32_t *index) {
+ struct heftor htor = { .h = h, .v = v };
+ dmtvalue_writer functor(value);
+ return dmt->insert<heftor, call_heftor>(functor, htor, index);
+}
+
+static int dmt_find_zero(DMT V, int (*h)(DMTVALUE, void*extra), void*extra, DMTVALUE *value, uint32_t *index) {
+ struct heftor htor = { .h = h, .v = extra };
+ uint32_t ignore;
+ return V->find_zero<heftor, call_heftor>(htor, &ignore, value, index);
+}
+
+static int dmt_find(DMT V, int (*h)(DMTVALUE, void*extra), void*extra, int direction, DMTVALUE *value, uint32_t *index) {
+ struct heftor htor = { .h = h, .v = extra };
+ uint32_t ignore;
+ return V->find<heftor, call_heftor>(htor, direction, &ignore, value, index);
+}
+
+static int dmt_split_at(DMT dmt, DMT *newdmtp, uint32_t index) {
+ if (index > dmt->size()) { return EINVAL; }
+ DMT XMALLOC(newdmt);
+ newdmt->create();
+ int r;
+
+ for (uint32_t i = index; i < dmt->size(); i++) {
+ DMTVALUE v;
+ r = dmt->fetch(i, nullptr, &v);
+ invariant_zero(r);
+ r = dmt_insert_at(newdmt, v, i-index);
+ invariant_zero(r);
+ }
+ if (dmt->size() > 0) {
+ for (uint32_t i = dmt->size(); i > index; i--) {
+ r = dmt->delete_at(i - 1);
+ invariant_zero(r);
+ }
+ }
+ r = 0;
+
+ if (r != 0) {
+ toku_free(newdmt);
+ } else {
+ *newdmtp = newdmt;
+ }
+ return r;
+}
+
+static void
+parse_args (int argc, const char *argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1], "-q")==0) {
+ verbose = 0;
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-h]\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+/* End ".h like" stuff. */
+
+struct value {
+ uint32_t number;
+};
+#define V(x) ((struct value *)(x))
+
+enum rand_type {
+ TEST_RANDOM,
+ TEST_SORTED,
+ TEST_IDENTITY
+};
+enum close_when_done {
+ CLOSE_WHEN_DONE,
+ KEEP_WHEN_DONE
+};
+enum create_type {
+ BATCH_INSERT,
+ INSERT_AT,
+ INSERT_AT_ALMOST_RANDOM,
+};
+
+/* Globals */
+DMT global_dmt;
+DMTVALUE* values = nullptr;
+struct value* nums = nullptr;
+uint32_t length;
+
+static void
+cleanup_globals (void) {
+ assert(values);
+ toku_free(values);
+ values = nullptr;
+ assert(nums);
+ toku_free(nums);
+ nums = nullptr;
+}
+
+const unsigned int random_seed = 0xFEADACBA;
+
+static void
+init_init_values (unsigned int seed, uint32_t num_elements) {
+ srandom(seed);
+
+ cleanup_globals();
+
+ MALLOC_N(num_elements, values);
+ assert(values);
+ MALLOC_N(num_elements, nums);
+ assert(nums);
+ length = num_elements;
+}
+
+static void
+init_identity_values (unsigned int seed, uint32_t num_elements) {
+ uint32_t i;
+
+ init_init_values(seed, num_elements);
+
+ for (i = 0; i < length; i++) {
+ nums[i].number = i;
+ values[i] = (DMTVALUE)&nums[i];
+ }
+}
+
+static void
+init_distinct_sorted_values (unsigned int seed, uint32_t num_elements) {
+ uint32_t i;
+
+ init_init_values(seed, num_elements);
+
+ uint32_t number = 0;
+
+ for (i = 0; i < length; i++) {
+ number += (uint32_t)(random() % 32) + 1;
+ nums[i].number = number;
+ values[i] = (DMTVALUE)&nums[i];
+ }
+}
+
+static void
+init_distinct_random_values (unsigned int seed, uint32_t num_elements) {
+ init_distinct_sorted_values(seed, num_elements);
+
+ uint32_t i;
+ uint32_t choice;
+ uint32_t choices;
+ struct value temp;
+ for (i = 0; i < length - 1; i++) {
+ choices = length - i;
+ choice = random() % choices;
+ if (choice != i) {
+ temp = nums[i];
+ nums[i] = nums[choice];
+ nums[choice] = temp;
+ }
+ }
+}
+
+static void
+init_globals (void) {
+ MALLOC_N(1, values);
+ assert(values);
+ MALLOC_N(1, nums);
+ assert(nums);
+ length = 1;
+}
+
+static void
+test_close (enum close_when_done do_close) {
+ if (do_close == KEEP_WHEN_DONE) return;
+ assert(do_close == CLOSE_WHEN_DONE);
+ global_dmt->destroy();
+ toku_free(global_dmt);
+ global_dmt = nullptr;
+}
+
+static void
+test_create (enum close_when_done do_close) {
+ XMALLOC(global_dmt);
+ global_dmt->create();
+ test_close(do_close);
+}
+
+static void
+test_create_size (enum close_when_done do_close) {
+ test_create(KEEP_WHEN_DONE);
+ assert(global_dmt->size() == 0);
+ test_close(do_close);
+}
+
+static void
+test_create_insert_at_almost_random (enum close_when_done do_close) {
+ uint32_t i;
+ int r;
+ uint32_t size = 0;
+
+ test_create(KEEP_WHEN_DONE);
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+1);
+ CKERR2(r, EINVAL);
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+2);
+ CKERR2(r, EINVAL);
+ for (i = 0; i < length/2; i++) {
+ assert(size==global_dmt->size());
+ r = dmt_insert_at(global_dmt, values[i], i);
+ CKERR(r);
+ assert(++size==global_dmt->size());
+ r = dmt_insert_at(global_dmt, values[length-1-i], i+1);
+ CKERR(r);
+ assert(++size==global_dmt->size());
+ }
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+1);
+ CKERR2(r, EINVAL);
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+2);
+ CKERR2(r, EINVAL);
+ assert(size==global_dmt->size());
+ test_close(do_close);
+}
+
+static void
+test_create_insert_at_sequential (enum close_when_done do_close) {
+ uint32_t i;
+ int r;
+ uint32_t size = 0;
+
+ test_create(KEEP_WHEN_DONE);
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+1);
+ CKERR2(r, EINVAL);
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+2);
+ CKERR2(r, EINVAL);
+ for (i = 0; i < length; i++) {
+ assert(size==global_dmt->size());
+ r = dmt_insert_at(global_dmt, values[i], i);
+ CKERR(r);
+ assert(++size==global_dmt->size());
+ }
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+1);
+ CKERR2(r, EINVAL);
+ r = dmt_insert_at(global_dmt, values[0], global_dmt->size()+2);
+ CKERR2(r, EINVAL);
+ assert(size==global_dmt->size());
+ test_close(do_close);
+}
+
+static void
+test_create_from_sorted_array (enum create_type create_choice, enum close_when_done do_close) {
+ global_dmt = nullptr;
+
+ if (create_choice == BATCH_INSERT) {
+ global_dmt = dmt_create_from_sorted_array(values, length);
+ }
+ else if (create_choice == INSERT_AT) {
+ test_create_insert_at_sequential(KEEP_WHEN_DONE);
+ }
+ else if (create_choice == INSERT_AT_ALMOST_RANDOM) {
+ test_create_insert_at_almost_random(KEEP_WHEN_DONE);
+ }
+ else {
+ assert(false);
+ }
+
+ assert(global_dmt!=nullptr);
+ test_close(do_close);
+}
+
+static void
+test_create_from_sorted_array_size (enum create_type create_choice, enum close_when_done do_close) {
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ assert(global_dmt->size()==length);
+ test_close(do_close);
+}
+
+static void
+test_fetch_verify (DMT dmtree, DMTVALUE* val, uint32_t len ) {
+ uint32_t i;
+ int r;
+ DMTVALUE v = (DMTVALUE)&i;
+ DMTVALUE oldv = v;
+
+ assert(len == dmtree->size());
+ for (i = 0; i < len; i++) {
+ assert(oldv!=val[i]);
+ v = nullptr;
+ r = dmtree->fetch(i, nullptr, &v);
+ CKERR(r);
+ assert(v != nullptr);
+ assert(v != oldv);
+ assert(v == val[i]);
+ assert(V(v)->number == V(val[i])->number);
+ v = oldv;
+ }
+
+ for (i = len; i < len*2; i++) {
+ v = oldv;
+ r = dmtree->fetch(i, nullptr, &v);
+ CKERR2(r, EINVAL);
+ assert(v == oldv);
+ }
+
+}
+
+static void
+test_create_fetch_verify (enum create_type create_choice, enum close_when_done do_close) {
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ test_fetch_verify(global_dmt, values, length);
+ test_close(do_close);
+}
+
+static int iterate_helper_error_return = 1;
+
+static int
+iterate_helper (DMTVALUE v, uint32_t idx, void* extra) {
+ if (extra == nullptr) return iterate_helper_error_return;
+ DMTVALUE* vals = (DMTVALUE *)extra;
+ assert(v != nullptr);
+ assert(v == vals[idx]);
+ assert(V(v)->number == V(vals[idx])->number);
+ return 0;
+}
+struct functor {
+ int (*f)(DMTVALUE, uint32_t, void *);
+ void *v;
+};
+
+int call_functor(const uint32_t size, const DMTVALUE &v, uint32_t idx, functor *const ftor);
+int call_functor(const uint32_t size, const DMTVALUE &v, uint32_t idx, functor *const ftor) {
+ invariant(size == sizeof(DMTVALUE));
+ return ftor->f(const_cast<DMTVALUE>(v), idx, ftor->v);
+}
+
+static int dmt_iterate(DMT dmt, int (*f)(DMTVALUE, uint32_t, void*), void*v) {
+ struct functor ftor = { .f = f, .v = v };
+ return dmt->iterate<functor, call_functor>(&ftor);
+}
+
+static void
+test_iterate_verify (DMT dmtree, DMTVALUE* vals, uint32_t len) {
+ int r;
+ iterate_helper_error_return = 0;
+ r = dmt_iterate(dmtree, iterate_helper, (void*)vals);
+ CKERR(r);
+ iterate_helper_error_return = 0xFEEDABBA;
+ r = dmt_iterate(dmtree, iterate_helper, nullptr);
+ if (!len) {
+ CKERR2(r, 0);
+ }
+ else {
+ CKERR2(r, iterate_helper_error_return);
+ }
+}
+
+static void
+test_create_iterate_verify (enum create_type create_choice, enum close_when_done do_close) {
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ test_iterate_verify(global_dmt, values, length);
+ test_close(do_close);
+}
+
+
+static void
+permute_array (uint32_t* arr, uint32_t len) {
+ //
+ // create a permutation of 0...size-1
+ //
+ uint32_t i = 0;
+ for (i = 0; i < len; i++) {
+ arr[i] = i;
+ }
+ for (i = 0; i < len - 1; i++) {
+ uint32_t choices = len - i;
+ uint32_t choice = random() % choices;
+ if (choice != i) {
+ uint32_t temp = arr[i];
+ arr[i] = arr[choice];
+ arr[choice] = temp;
+ }
+ }
+}
+
+static int
+dmt_set_at (DMT dmt, DMTVALUE value, uint32_t index) {
+ int r = dmt->delete_at(index);
+ if (r!=0) return r;
+ return dmt_insert_at(dmt, value, index);
+}
+
+static void
+test_create_set_at (enum create_type create_choice, enum close_when_done do_close) {
+ uint32_t i = 0;
+
+ struct value* old_nums = nullptr;
+ MALLOC_N(length, old_nums);
+ assert(nums);
+
+ uint32_t* perm = nullptr;
+ MALLOC_N(length, perm);
+ assert(perm);
+
+ DMTVALUE* old_values = nullptr;
+ MALLOC_N(length, old_values);
+ assert(old_values);
+
+ permute_array(perm, length);
+
+ //
+ // These are going to be the new values
+ //
+ for (i = 0; i < length; i++) {
+ old_nums[i] = nums[i];
+ old_values[i] = &old_nums[i];
+ values[i] = &old_nums[i];
+ }
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ int r;
+ r = dmt_set_at (global_dmt, values[0], length);
+ CKERR2(r,EINVAL);
+ r = dmt_set_at (global_dmt, values[0], length+1);
+ CKERR2(r,EINVAL);
+ for (i = 0; i < length; i++) {
+ uint32_t choice = perm[i];
+ values[choice] = &nums[choice];
+ nums[choice].number = (uint32_t)random();
+ r = dmt_set_at (global_dmt, values[choice], choice);
+ CKERR(r);
+ test_iterate_verify(global_dmt, values, length);
+ test_fetch_verify(global_dmt, values, length);
+ }
+ r = dmt_set_at (global_dmt, values[0], length);
+ CKERR2(r,EINVAL);
+ r = dmt_set_at (global_dmt, values[0], length+1);
+ CKERR2(r,EINVAL);
+
+ toku_free(perm);
+ toku_free(old_values);
+ toku_free(old_nums);
+
+ test_close(do_close);
+}
+
+static int
+insert_helper (DMTVALUE value, void* extra_insert) {
+ DMTVALUE to_insert = (DMTVALUE)extra_insert;
+ assert(to_insert);
+
+ if (V(value)->number < V(to_insert)->number) return -1;
+ if (V(value)->number > V(to_insert)->number) return +1;
+ return 0;
+}
+
+static void
+test_create_insert (enum close_when_done do_close) {
+ uint32_t i = 0;
+
+ uint32_t* perm = nullptr;
+ MALLOC_N(length, perm);
+ assert(perm);
+
+ permute_array(perm, length);
+
+ test_create(KEEP_WHEN_DONE);
+ int r;
+ uint32_t size = length;
+ length = 0;
+ while (length < size) {
+ uint32_t choice = perm[length];
+ DMTVALUE to_insert = &nums[choice];
+ uint32_t idx = UINT32_MAX;
+
+ assert(length==global_dmt->size());
+ r = dmt_insert(global_dmt, to_insert, insert_helper, to_insert, &idx);
+ CKERR(r);
+ assert(idx <= length);
+ if (idx > 0) {
+ assert(V(to_insert)->number > V(values[idx-1])->number);
+ }
+ if (idx < length) {
+ assert(V(to_insert)->number < V(values[idx])->number);
+ }
+ length++;
+ assert(length==global_dmt->size());
+ /* Make room */
+ for (i = length-1; i > idx; i--) {
+ values[i] = values[i-1];
+ }
+ values[idx] = to_insert;
+ test_fetch_verify(global_dmt, values, length);
+ test_iterate_verify(global_dmt, values, length);
+
+ idx = UINT32_MAX;
+ r = dmt_insert(global_dmt, to_insert, insert_helper, to_insert, &idx);
+ CKERR2(r, DB_KEYEXIST);
+ assert(idx < length);
+ assert(V(values[idx])->number == V(to_insert)->number);
+ assert(length==global_dmt->size());
+
+ test_iterate_verify(global_dmt, values, length);
+ test_fetch_verify(global_dmt, values, length);
+ }
+
+ toku_free(perm);
+
+ test_close(do_close);
+}
+
+static void
+test_create_delete_at (enum create_type create_choice, enum close_when_done do_close) {
+ uint32_t i = 0;
+ int r = ENOSYS;
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+
+ assert(length == global_dmt->size());
+ r = global_dmt->delete_at(length);
+ CKERR2(r,EINVAL);
+ assert(length == global_dmt->size());
+ r = global_dmt->delete_at(length+1);
+ CKERR2(r,EINVAL);
+ while (length > 0) {
+ assert(length == global_dmt->size());
+ uint32_t index_to_delete = random()%length;
+ r = global_dmt->delete_at(index_to_delete);
+ CKERR(r);
+ for (i = index_to_delete+1; i < length; i++) {
+ values[i-1] = values[i];
+ }
+ length--;
+ test_fetch_verify(global_dmt, values, length);
+ test_iterate_verify(global_dmt, values, length);
+ }
+ assert(length == 0);
+ assert(length == global_dmt->size());
+ r = global_dmt->delete_at(length);
+ CKERR2(r, EINVAL);
+ assert(length == global_dmt->size());
+ r = global_dmt->delete_at(length+1);
+ CKERR2(r, EINVAL);
+ test_close(do_close);
+}
+
+static int dmt_merge(DMT leftdmt, DMT rightdmt, DMT *newdmtp) {
+ DMT XMALLOC(newdmt);
+ newdmt->create();
+ int r;
+ for (uint32_t i = 0; i < leftdmt->size(); i++) {
+ DMTVALUE v;
+ r = leftdmt->fetch(i, nullptr, &v);
+ invariant_zero(r);
+ r = newdmt->insert_at(v, i);
+ invariant_zero(r);
+ }
+ uint32_t offset = leftdmt->size();
+ for (uint32_t i = 0; i < rightdmt->size(); i++) {
+ DMTVALUE v;
+ r = rightdmt->fetch(i, nullptr, &v);
+ invariant_zero(r);
+ r = newdmt->insert_at(v, i+offset);
+ invariant_zero(r);
+ }
+ leftdmt->destroy();
+ rightdmt->destroy();
+ toku_free(leftdmt);
+ toku_free(rightdmt);
+ *newdmtp = newdmt;
+ return 0;
+}
+
+static void
+test_split_merge (enum create_type create_choice, enum close_when_done do_close) {
+ int r = ENOSYS;
+ uint32_t i = 0;
+ DMT left_split = nullptr;
+ DMT right_split = nullptr;
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+
+ for (i = 0; i <= length; i++) {
+ r = dmt_split_at(global_dmt, &right_split, length+1);
+ CKERR2(r,EINVAL);
+ r = dmt_split_at(global_dmt, &right_split, length+2);
+ CKERR2(r,EINVAL);
+
+ //
+ // test successful split
+ //
+ r = dmt_split_at(global_dmt, &right_split, i);
+ CKERR(r);
+ left_split = global_dmt;
+ global_dmt = nullptr;
+ assert(left_split->size() == i);
+ assert(right_split->size() == length - i);
+ test_fetch_verify(left_split, values, i);
+ test_iterate_verify(left_split, values, i);
+ test_fetch_verify(right_split, &values[i], length - i);
+ test_iterate_verify(right_split, &values[i], length - i);
+ //
+ // verify that new global_dmt's cannot do bad splits
+ //
+ r = dmt_split_at(left_split, &global_dmt, i+1);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == length - i);
+ r = dmt_split_at(left_split, &global_dmt, i+2);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == length - i);
+ r = dmt_split_at(right_split, &global_dmt, length - i + 1);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == length - i);
+ r = dmt_split_at(right_split, &global_dmt, length - i + 1);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == length - i);
+
+ //
+ // test merge
+ //
+ r = dmt_merge(left_split,right_split,&global_dmt);
+ CKERR(r);
+ left_split = nullptr;
+ right_split = nullptr;
+ assert(global_dmt->size() == length);
+ test_fetch_verify(global_dmt, values, length);
+ test_iterate_verify(global_dmt, values, length);
+ }
+ test_close(do_close);
+}
+
+
+static void
+init_values (enum rand_type rand_choice) {
+ const uint32_t test_size = 100;
+ if (rand_choice == TEST_RANDOM) {
+ init_distinct_random_values(random_seed, test_size);
+ }
+ else if (rand_choice == TEST_SORTED) {
+ init_distinct_sorted_values(random_seed, test_size);
+ }
+ else if (rand_choice == TEST_IDENTITY) {
+ init_identity_values( random_seed, test_size);
+ }
+ else assert(false);
+}
+
+static void
+test_create_array (enum create_type create_choice, enum rand_type rand_choice) {
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_from_sorted_array( create_choice, CLOSE_WHEN_DONE);
+ test_create_from_sorted_array_size(create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_fetch_verify( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_iterate_verify( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_set_at( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_delete_at( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_insert( CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_split_merge( create_choice, CLOSE_WHEN_DONE);
+}
+
+typedef struct {
+ uint32_t first_zero;
+ uint32_t first_pos;
+} h_extra;
+
+
+static int
+test_heaviside (DMTVALUE v_dmt, void* x) {
+ DMTVALUE v = (DMTVALUE) v_dmt;
+ h_extra* extra = (h_extra*)x;
+ assert(v && x);
+ assert(extra->first_zero <= extra->first_pos);
+
+ uint32_t value = V(v)->number;
+ if (value < extra->first_zero) return -1;
+ if (value < extra->first_pos) return 0;
+ return 1;
+}
+
+static void
+heavy_extra (h_extra* extra, uint32_t first_zero, uint32_t first_pos) {
+ extra->first_zero = first_zero;
+ extra->first_pos = first_pos;
+}
+
+static void
+test_find_dir (int dir, void* extra, int (*h)(DMTVALUE, void*),
+ int r_expect, bool idx_will_change, uint32_t idx_expect,
+ uint32_t number_expect, bool UU(cursor_valid)) {
+ uint32_t idx = UINT32_MAX;
+ uint32_t old_idx = idx;
+ DMTVALUE dmt_val;
+ int r;
+
+ dmt_val = nullptr;
+
+ /* Verify we can pass nullptr value. */
+ dmt_val = nullptr;
+ idx = old_idx;
+ if (dir == 0) {
+ r = dmt_find_zero(global_dmt, h, extra, nullptr, &idx);
+ }
+ else {
+ r = dmt_find( global_dmt, h, extra, dir, nullptr, &idx);
+ }
+ CKERR2(r, r_expect);
+ if (idx_will_change) {
+ assert(idx == idx_expect);
+ }
+ else {
+ assert(idx == old_idx);
+ }
+ assert(dmt_val == nullptr);
+
+ /* Verify we can pass nullptr idx. */
+ dmt_val = nullptr;
+ idx = old_idx;
+ if (dir == 0) {
+ r = dmt_find_zero(global_dmt, h, extra, &dmt_val, 0);
+ }
+ else {
+ r = dmt_find( global_dmt, h, extra, dir, &dmt_val, 0);
+ }
+ CKERR2(r, r_expect);
+ assert(idx == old_idx);
+ if (r == DB_NOTFOUND) {
+ assert(dmt_val == nullptr);
+ }
+ else {
+ assert(V(dmt_val)->number == number_expect);
+ }
+
+ /* Verify we can pass nullptr both. */
+ dmt_val = nullptr;
+ idx = old_idx;
+ if (dir == 0) {
+ r = dmt_find_zero(global_dmt, h, extra, nullptr, 0);
+ }
+ else {
+ r = dmt_find( global_dmt, h, extra, dir, nullptr, 0);
+ }
+ CKERR2(r, r_expect);
+ assert(idx == old_idx);
+ assert(dmt_val == nullptr);
+}
+
+static void
+test_find (enum create_type create_choice, enum close_when_done do_close) {
+ h_extra extra;
+ init_identity_values(random_seed, 100);
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+
+/*
+ -...-
+ A
+*/
+ heavy_extra(&extra, length, length);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length-1, length-1, true);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, length, length, false);
+
+
+/*
+ +...+
+ B
+*/
+ heavy_extra(&extra, 0, 0);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, 0, 0, true);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, 0, 0, false);
+
+/*
+ 0...0
+ C
+*/
+ heavy_extra(&extra, 0, length);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, 0, true, 0, 0, true);
+
+/*
+ -...-0...0
+ AC
+*/
+ heavy_extra(&extra, length/2, length);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length/2-1, length/2-1, true);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, 0, true, length/2, length/2, true);
+
+/*
+ 0...0+...+
+ C B
+*/
+ heavy_extra(&extra, 0, length/2);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, length/2, length/2, true);
+ test_find_dir(0, &extra, test_heaviside, 0, true, 0, 0, true);
+
+/*
+ -...-+...+
+ AB
+*/
+ heavy_extra(&extra, length/2, length/2);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length/2-1, length/2-1, true);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, length/2, length/2, true);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, length/2, length/2, false);
+
+/*
+ -...-0...0+...+
+ AC B
+*/
+ heavy_extra(&extra, length/3, 2*length/3);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length/3-1, length/3-1, true);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, 2*length/3, 2*length/3, true);
+ test_find_dir(0, &extra, test_heaviside, 0, true, length/3, length/3, true);
+
+ /* Cleanup */
+ test_close(do_close);
+}
+
+static void
+runtests_create_choice (enum create_type create_choice) {
+ test_create_array(create_choice, TEST_SORTED);
+ test_create_array(create_choice, TEST_RANDOM);
+ test_create_array(create_choice, TEST_IDENTITY);
+ test_find( create_choice, CLOSE_WHEN_DONE);
+}
+
+static void
+test_clone(uint32_t nelts)
+// Test that each clone operation gives the right data back. If nelts is
+// zero, also tests that you still get a valid DMT back and that the way
+// to deallocate it still works.
+{
+ DMT src = nullptr, dest = nullptr;
+ int r = 0;
+
+ XMALLOC(src);
+ src->create();
+ for (long i = 0; i < nelts; ++i) {
+ r = dmt_insert_at(src, (DMTVALUE) i, i);
+ assert_zero(r);
+ }
+
+ XMALLOC(dest);
+ dest->clone(*src);
+ assert(dest->size() == nelts);
+ for (long i = 0; i < nelts; ++i) {
+ DMTVALUE v;
+ long l;
+ r = dest->fetch(i, nullptr, &v);
+ assert_zero(r);
+ l = (long) v;
+ assert(l == i);
+ }
+ dest->destroy();
+ toku_free(dest);
+ src->destroy();
+ toku_free(src);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ init_globals();
+ test_create( CLOSE_WHEN_DONE);
+ test_create_size( CLOSE_WHEN_DONE);
+ runtests_create_choice(BATCH_INSERT);
+ runtests_create_choice(INSERT_AT);
+ runtests_create_choice(INSERT_AT_ALMOST_RANDOM);
+ test_clone(0);
+ test_clone(1);
+ test_clone(1000);
+ test_clone(10000);
+ cleanup_globals();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/dmt-test2.cc b/storage/tokudb/PerconaFT/ft/tests/dmt-test2.cc
new file mode 100644
index 00000000..f2463c55
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/dmt-test2.cc
@@ -0,0 +1,321 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <util/dmt.h>
+
+static void
+parse_args (int argc, const char *argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1], "-q")==0) {
+ verbose = 0;
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-h]\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+/* End ".h like" stuff. */
+
+struct value {
+ uint32_t number;
+};
+#define V(x) ((struct value *)(x))
+
+
+
+const uint32_t MAXNUM = 1024;
+const uint32_t MAXLEN = 32;
+char data[MAXNUM][MAXLEN];
+
+struct val_type {
+ char c[MAXLEN];
+};
+
+namespace toku {
+class vwriter {
+ public:
+ size_t get_size(void) const {
+ size_t len = strlen(v.c);
+ invariant(len < sizeof(val_type));
+ return len + 1;
+ }
+ void write_to(val_type *const dest) const {
+ strcpy(dest->c, v.c);
+ }
+
+ vwriter(const char* c) {
+ invariant(strlen(c) < sizeof(val_type));
+ strcpy(v.c, c);
+ }
+
+ vwriter(const uint32_t klpair_len, val_type *const src) {
+ invariant(strlen(src->c) < sizeof(val_type));
+ strcpy(v.c, src->c);
+ invariant(klpair_len == get_size());
+ }
+ private:
+ val_type v;
+};
+}
+
+/* Globals */
+typedef toku::dmt<val_type, val_type*, toku::vwriter> vdmt;
+
+const unsigned int random_seed = 0xFEADACBA;
+
+///////////////
+
+
+static void fail_one_verify(uint32_t len, uint32_t num, vdmt *v) {
+ val_type* fetched_data;
+ int count = 0;
+ v->verify();
+ for (uint32_t i = 0; i < num; i++) {
+ uint32_t fetched_len;
+ int r = v->fetch(i-count, &fetched_len, &fetched_data);
+ if (r != 0 || fetched_len != len || strcmp(fetched_data->c, data[i])) {
+ count++;
+ continue;
+ }
+ }
+ invariant(count == 1);
+}
+
+static void verify(uint32_t len, uint32_t num, vdmt *v) {
+ v->verify();
+ val_type* fetched_data;
+ for (uint32_t i = 0; i < num; i++) {
+ uint32_t fetched_len;
+ int r = v->fetch(i, &fetched_len, &fetched_data);
+ CKERR(r);
+ invariant(fetched_len == len);
+ invariant(!strcmp(fetched_data->c, data[i]));
+ }
+}
+
+
+static void test_builder_fixed(uint32_t len, uint32_t num) {
+ srandom(random_seed);
+ assert(len > 1);
+ assert(len <= MAXLEN);
+ assert(num <= MAXNUM);
+ for (uint32_t i = 0; i < num; i++) {
+ for (uint32_t j = 0; j < len-1; j++) {
+ data[i][j] = random() % 255 + 1; //This way it doesn't end up being 0 and thought of as NUL
+ }
+ data[i][len-1] = '\0'; //cap it
+ }
+
+ vdmt::builder builder;
+ builder.create(num, num * len);
+
+ for (uint32_t i = 0; i < num; i++) {
+ vwriter vfun(data[i]);
+ builder.append(vfun);
+ }
+ invariant(builder.value_length_is_fixed());
+ vdmt v;
+ builder.build(&v);
+ invariant(v.value_length_is_fixed());
+ invariant(v.get_fixed_length() == len || num == 0);
+
+ invariant(v.size() == num);
+
+ verify(len, num, &v);
+
+ for (uint32_t change = 0; change < num; change++) {
+ vdmt v2;
+ v2.clone(v);
+ v2.delete_at(change);
+ fail_one_verify(len, num, &v2);
+
+ vwriter vfun(data[change]);
+ v2.insert_at(vfun, change);
+ verify(len, num, &v2);
+ v2.destroy();
+ }
+
+ v.destroy();
+}
+
+static void test_builder_variable(uint32_t len, uint32_t len2, uint32_t num) {
+ srandom(random_seed);
+ assert(len > 1);
+ assert(len <= MAXLEN);
+ assert(num <= MAXNUM);
+ assert(num > 3);
+ uint32_t which2 = random() % num;
+ for (uint32_t i = 0; i < num; i++) {
+ uint32_t thislen = i == which2 ? len2 : len;
+ for (uint32_t j = 0; j < thislen-1; j++) {
+ data[i][j] = random() % 255 + 1; //This way it doesn't end up being 0 and thought of as NUL
+ }
+ data[i][thislen-1] = '\0'; //cap it
+ }
+
+ vdmt::builder builder;
+ builder.create(num, (num-1) * len + len2);
+
+ for (uint32_t i = 0; i < num; i++) {
+ vwriter vfun(data[i]);
+ builder.append(vfun);
+ }
+ invariant(!builder.value_length_is_fixed());
+ vdmt v;
+ builder.build(&v);
+ invariant(!v.value_length_is_fixed());
+
+ invariant(v.size() == num);
+
+ val_type* fetched_data;
+ for (uint32_t i = 0; i < num; i++) {
+ uint32_t fetched_len;
+ int r = v.fetch(i, &fetched_len, &fetched_data);
+ CKERR(r);
+ if (i == which2) {
+ invariant(fetched_len == len2);
+ invariant(!strcmp(fetched_data->c, data[i]));
+ } else {
+ invariant(fetched_len == len);
+ invariant(!strcmp(fetched_data->c, data[i]));
+ }
+ }
+
+ v.destroy();
+}
+
+static void test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(uint32_t len, uint32_t num) {
+ srandom(random_seed);
+ assert(len <= MAXLEN);
+ assert(num <= MAXNUM);
+ for (uint32_t i = 0; i < num; i++) {
+ for (uint32_t j = 0; j < len-1; j++) {
+ data[i][j] = random() % 255 + 1; //This way it doesn't end up being 0 and thought of as NUL
+ }
+ data[i][len-1] = '\0'; //cap it
+ }
+
+ char *flat = (char*)toku_xmalloc(len * num);
+ char *p = flat;
+ for (uint32_t i = 0; i < num; i++) {
+ memcpy(p, data[i], len);
+ p += len;
+ }
+ vdmt v;
+
+ v.create_from_sorted_memory_of_fixed_size_elements(flat, num, len*num, len);
+ invariant(v.value_length_is_fixed());
+ invariant(v.get_fixed_length() == len);
+
+ invariant(v.size() == num);
+
+ val_type* fetched_data;
+ for (uint32_t i = 0; i < num; i++) {
+ uint32_t fetched_len;
+ int r = v.fetch(i, &fetched_len, &fetched_data);
+ CKERR(r);
+ invariant(fetched_len == len);
+ invariant(!strcmp(fetched_data->c, data[i]));
+ }
+
+ char *serialized_flat = (char*)toku_xmalloc(len*num);
+ struct wbuf wb;
+ wbuf_nocrc_init(&wb, serialized_flat, len*num);
+ v.prepare_for_serialize();
+ v.serialize_values(len*num, &wb);
+ invariant(!memcmp(serialized_flat, flat, len*num));
+
+
+ if (num > 1) {
+ //Currently converting to dtree treats the entire thing as NOT fixed length.
+ //Optional additional perf here.
+ uint32_t which = (random() % (num-1)) + 1; // Not last, not first
+ invariant(which > 0 && which < num-1);
+ v.delete_at(which);
+
+ memmove(flat + which*len, flat+(which+1)*len, (num-which-1) * len);
+ v.prepare_for_serialize();
+ wbuf_nocrc_init(&wb, serialized_flat, len*(num-1));
+ v.serialize_values(len*(num-1), &wb);
+ invariant(!memcmp(serialized_flat, flat, len*(num-1)));
+ }
+
+ toku_free(flat);
+ toku_free(serialized_flat);
+
+ v.destroy();
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ // Do test with size divisible by 4 and not
+ test_builder_fixed(4, 0);
+ test_builder_fixed(5, 0);
+ test_builder_fixed(4, 1);
+ test_builder_fixed(5, 1);
+ test_builder_fixed(4, 100);
+ test_builder_fixed(5, 100);
+ // Do test with zero, one, or both sizes divisible
+ test_builder_variable(4, 8, 100);
+ test_builder_variable(4, 5, 100);
+ test_builder_variable(5, 8, 100);
+ test_builder_variable(5, 10, 100);
+
+ test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(4, 0);
+ test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(5, 0);
+ test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(4, 1);
+ test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(5, 1);
+ test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(4, 100);
+ test_create_from_sorted_memory_of_fixed_sized_elements_and_serialize(5, 100);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/fifo-test.cc b/storage/tokudb/PerconaFT/ft/tests/fifo-test.cc
new file mode 100644
index 00000000..7a63969b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/fifo-test.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static void
+test_create (void) {
+ message_buffer msg_buffer;
+ msg_buffer.create();
+ msg_buffer.destroy();
+}
+
+static char *buildkey(size_t len) {
+ char *XMALLOC_N(len, k);
+ memset(k, 0, len);
+ return k;
+}
+
+static char *buildval(size_t len) {
+ char *XMALLOC_N(len, v);
+ memset(v, ~len, len);
+ return v;
+}
+
+static void
+test_enqueue(int n) {
+ MSN startmsn = ZERO_MSN;
+
+ message_buffer msg_buffer;
+ msg_buffer.create();
+
+ for (int i=0; i<n; i++) {
+ int thekeylen = i + 1;
+ int thevallen = i + 2;
+ char *thekey = buildkey(thekeylen);
+ char *theval = buildval(thevallen);
+ XIDS xids;
+ if (i == 0) {
+ xids = toku_xids_get_root_xids();
+ } else {
+ int r = toku_xids_create_child(toku_xids_get_root_xids(), &xids, (TXNID)i);
+ assert_zero(r);
+ }
+ MSN msn = next_dummymsn();
+ if (startmsn.msn == ZERO_MSN.msn)
+ startmsn = msn;
+ enum ft_msg_type type = (enum ft_msg_type) i;
+ DBT k, v;
+ ft_msg msg(toku_fill_dbt(&k, thekey, thekeylen), toku_fill_dbt(&v, theval, thevallen), type, msn, xids);
+ msg_buffer.enqueue(msg, true, nullptr);
+ toku_xids_destroy(&xids);
+ toku_free(thekey);
+ toku_free(theval);
+ }
+
+ struct checkit_fn {
+ MSN startmsn;
+ int verbose;
+ int i;
+ checkit_fn(MSN smsn, bool v)
+ : startmsn(smsn), verbose(v), i(0) {
+ }
+ int operator()(const ft_msg &msg, bool UU(is_fresh)) {
+ int thekeylen = i + 1;
+ int thevallen = i + 2;
+ char *thekey = buildkey(thekeylen);
+ char *theval = buildval(thevallen);
+
+ MSN msn = msg.msn();
+ enum ft_msg_type type = msg.type();
+ if (verbose) printf("checkit %d %d %" PRIu64 "\n", i, type, msn.msn);
+ assert(msn.msn == startmsn.msn + i);
+ assert((int) msg.kdbt()->size == thekeylen); assert(memcmp(msg.kdbt()->data, thekey, msg.kdbt()->size) == 0);
+ assert((int) msg.vdbt()->size == thevallen); assert(memcmp(msg.vdbt()->data, theval, msg.vdbt()->size) == 0);
+ assert(i % 256 == (int)type);
+ assert((TXNID)i == toku_xids_get_innermost_xid(msg.xids()));
+ i += 1;
+ toku_free(thekey);
+ toku_free(theval);
+ return 0;
+ }
+ } checkit(startmsn, verbose);
+ msg_buffer.iterate(checkit);
+ assert(checkit.i == n);
+
+ msg_buffer.destroy();
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ initialize_dummymsn();
+ test_create();
+ test_enqueue(4);
+ test_enqueue(512);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc b/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc
new file mode 100644
index 00000000..1d6bc2fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc
@@ -0,0 +1,439 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static int int64_key_cmp(DB *db UU(), const DBT *a, const DBT *b) {
+ int64_t x = *(int64_t *)a->data;
+ int64_t y = *(int64_t *)b->data;
+
+ if (x < y)
+ return -1;
+ if (x > y)
+ return 1;
+ return 0;
+}
+
+static void test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
+ int r;
+ FT_CURSOR XMALLOC(cursor);
+ FTNODE dn = NULL;
+ PAIR_ATTR attr;
+
+ // first test that prefetching everything should work
+ memset(&cursor->range_lock_left_key, 0, sizeof(DBT));
+ memset(&cursor->range_lock_right_key, 0, sizeof(DBT));
+ cursor->left_is_neg_infty = true;
+ cursor->right_is_pos_infty = true;
+ cursor->disable_prefetching = false;
+
+ ftnode_fetch_extra bfe;
+
+ // quick test to see that we have the right behavior when we set
+ // disable_prefetching to true
+ cursor->disable_prefetching = true;
+ bfe.create_for_prefetch(ft_h, cursor);
+ FTNODE_DISK_DATA ndd = NULL;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ bfe.destroy();
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ // now enable prefetching again
+ cursor->disable_prefetching = false;
+
+ bfe.create_for_prefetch(ft_h, cursor);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_COMPRESSED);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ bfe.destroy();
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ uint64_t left_key = 150;
+ toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t));
+ cursor->left_is_neg_infty = false;
+ bfe.create_for_prefetch(ft_h, cursor);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_COMPRESSED);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ bfe.destroy();
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ uint64_t right_key = 151;
+ toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t));
+ cursor->right_is_pos_infty = false;
+ bfe.create_for_prefetch(ft_h, cursor);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ bfe.destroy();
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ left_key = 100000;
+ right_key = 100000;
+ bfe.create_for_prefetch(ft_h, cursor);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_COMPRESSED);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ bfe.destroy();
+ toku_free(ndd);
+ toku_ftnode_free(&dn);
+
+ left_key = 100;
+ right_key = 100;
+ bfe.create_for_prefetch(ft_h, cursor);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ bfe.destroy();
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ toku_free(cursor);
+}
+
+static void test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
+ int r;
+ FT_CURSOR XMALLOC(cursor);
+ FTNODE dn = NULL;
+ FTNODE_DISK_DATA ndd = NULL;
+ PAIR_ATTR attr;
+
+ // first test that prefetching everything should work
+ memset(&cursor->range_lock_left_key, 0, sizeof(DBT));
+ memset(&cursor->range_lock_right_key, 0, sizeof(DBT));
+ cursor->left_is_neg_infty = true;
+ cursor->right_is_pos_infty = true;
+
+ uint64_t left_key = 150;
+ uint64_t right_key = 151;
+ DBT left, right;
+ toku_fill_dbt(&left, &left_key, sizeof(left_key));
+ toku_fill_dbt(&right, &right_key, sizeof(right_key));
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_subset_read(
+ ft_h, NULL, &left, &right, false, false, false, false);
+
+ // fake the childnum to read
+ // set disable_prefetching ON
+ bfe.child_to_read = 2;
+ bfe.disable_prefetching = true;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ // need to call this twice because we had a subset read before, that touched
+ // the clock
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_COMPRESSED);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ // fake the childnum to read
+ bfe.child_to_read = 2;
+ bfe.disable_prefetching = false;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ // need to call this twice because we had a subset read before, that touched
+ // the clock
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_COMPRESSED);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_ON_DISK);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_AVAIL);
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ // fake the childnum to read
+ bfe.child_to_read = 0;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd, &bfe);
+ invariant(r == 0);
+ invariant(dn->n_children == 3);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ // need to call this twice because we had a subset read before, that touched
+ // the clock
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ toku_ftnode_pe_callback(
+ dn, make_pair_attr(0xffffffff), ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(dn, 0) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 1) == PT_COMPRESSED);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ r = toku_ftnode_pf_callback(dn, ndd, &bfe, fd, &attr);
+ invariant(BP_STATE(dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(dn, 1) == PT_AVAIL);
+ invariant(BP_STATE(dn, 2) == PT_ON_DISK);
+ toku_ftnode_free(&dn);
+ toku_free(ndd);
+
+ toku_free(cursor);
+}
+
+static void test_prefetching(void) {
+ // struct ft_handle source_ft;
+ struct ftnode sn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ // source_ft.fd=fd;
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 1;
+ sn.n_children = 3;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+
+ uint64_t key1 = 100;
+ uint64_t key2 = 200;
+
+ MALLOC_N(sn.n_children, sn.bp);
+ DBT pivotkeys[2];
+ toku_fill_dbt(&pivotkeys[0], &key1, sizeof(key1));
+ toku_fill_dbt(&pivotkeys[1], &key2, sizeof(key2));
+ sn.pivotkeys.create_from_dbts(pivotkeys, 2);
+ BP_BLOCKNUM(&sn, 0).b = 30;
+ BP_BLOCKNUM(&sn, 1).b = 35;
+ BP_BLOCKNUM(&sn, 2).b = 40;
+ BP_STATE(&sn, 0) = PT_AVAIL;
+ BP_STATE(&sn, 1) = PT_AVAIL;
+ BP_STATE(&sn, 2) = PT_AVAIL;
+ set_BNC(&sn, 0, toku_create_empty_nl());
+ set_BNC(&sn, 1, toku_create_empty_nl());
+ set_BNC(&sn, 2, toku_create_empty_nl());
+ // Create XIDS
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123;
+ XIDS xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_123, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ // data in the buffers does not matter in this test
+ // Cleanup:
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft_h->cmp.create(int64_key_cmp, nullptr);
+ ft->ft = ft_h;
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA ndd = NULL;
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
+ invariant(r == 0);
+
+ test_prefetch_read(fd, ft, ft_h);
+ test_subset_read(fd, ft, ft_h);
+
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ ft_h->cmp.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ test_prefetching();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc
new file mode 100644
index 00000000..1a708b8e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc
@@ -0,0 +1,464 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "ft/cursor.h"
+
+enum ftnode_verify_type { read_all = 1, read_compressed, read_none };
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+static int string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) {
+ char *CAST_FROM_VOIDP(s, a->data);
+ char *CAST_FROM_VOIDP(t, b->data);
+ return strcmp(s, t);
+}
+
+static void le_add_to_bn(bn_data *bn,
+ uint32_t idx,
+ const char *key,
+ int keylen,
+ const char *val,
+ int vallen) {
+ LEAFENTRY r = NULL;
+ uint32_t size_needed = LE_CLEAN_MEMSIZE(vallen);
+ void *maybe_free = nullptr;
+ bn->get_space_for_insert(idx, key, keylen, size_needed, &r, &maybe_free);
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ resource_assert(r);
+ r->type = LE_CLEAN;
+ r->u.clean.vallen = vallen;
+ memcpy(r->u.clean.val, val, vallen);
+}
+
+static void le_malloc(bn_data *bn,
+ uint32_t idx,
+ const char *key,
+ const char *val) {
+ int keylen = strlen(key) + 1;
+ int vallen = strlen(val) + 1;
+ le_add_to_bn(bn, idx, key, keylen, val, vallen);
+}
+
+static void test1(int fd, FT ft_h, FTNODE *dn) {
+ int r;
+ ftnode_fetch_extra bfe_all;
+ bfe_all.create_for_full_read(ft_h);
+ FTNODE_DISK_DATA ndd = NULL;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, dn, &ndd, &bfe_all);
+ bool is_leaf = ((*dn)->height == 0);
+ invariant(r == 0);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ // should sweep and NOT get rid of anything
+ PAIR_ATTR attr;
+ memset(&attr, 0, sizeof(attr));
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ // should sweep and get compress all
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ if (!is_leaf) {
+ invariant(BP_STATE(*dn, i) == PT_COMPRESSED);
+ } else {
+ invariant(BP_STATE(*dn, i) == PT_ON_DISK);
+ }
+ }
+ PAIR_ATTR size;
+ bool req = toku_ftnode_pf_req_callback(*dn, &bfe_all);
+ invariant(req);
+ toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ // should sweep and get compress all
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ if (!is_leaf) {
+ invariant(BP_STATE(*dn, i) == PT_COMPRESSED);
+ } else {
+ invariant(BP_STATE(*dn, i) == PT_ON_DISK);
+ }
+ }
+
+ req = toku_ftnode_pf_req_callback(*dn, &bfe_all);
+ invariant(req);
+ toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ (*dn)->set_dirty();
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ toku_free(ndd);
+ toku_ftnode_free(dn);
+}
+
+static int search_cmp(const struct ft_search &UU(so), const DBT *UU(key)) {
+ return 0;
+}
+
+static void test2(int fd, FT ft_h, FTNODE *dn) {
+ DBT left, right;
+ DB dummy_db;
+ memset(&dummy_db, 0, sizeof(dummy_db));
+ memset(&left, 0, sizeof(left));
+ memset(&right, 0, sizeof(right));
+ ft_search search;
+
+ ftnode_fetch_extra bfe_subset;
+ bfe_subset.create_for_subset_read(
+ ft_h,
+ ft_search_init(
+ &search, search_cmp, FT_SEARCH_LEFT, nullptr, nullptr, nullptr),
+ &left,
+ &right,
+ true,
+ true,
+ false,
+ false);
+
+ FTNODE_DISK_DATA ndd = NULL;
+ int r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, dn, &ndd, &bfe_subset);
+ invariant(r == 0);
+ bool is_leaf = ((*dn)->height == 0);
+ // at this point, although both partitions are available, only the
+ // second basement node should have had its clock
+ // touched
+ invariant(BP_STATE(*dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(*dn, 1) == PT_AVAIL);
+ invariant(BP_SHOULD_EVICT(*dn, 0));
+ invariant(!BP_SHOULD_EVICT(*dn, 1));
+ PAIR_ATTR attr;
+ memset(&attr, 0, sizeof(attr));
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(*dn, 0) == ((is_leaf) ? PT_ON_DISK : PT_COMPRESSED));
+ invariant(BP_STATE(*dn, 1) == PT_AVAIL);
+ invariant(BP_SHOULD_EVICT(*dn, 1));
+ toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
+ invariant(BP_STATE(*dn, 1) == ((is_leaf) ? PT_ON_DISK : PT_COMPRESSED));
+
+ bool req = toku_ftnode_pf_req_callback(*dn, &bfe_subset);
+ invariant(req);
+ toku_ftnode_pf_callback(*dn, ndd, &bfe_subset, fd, &attr);
+ invariant(BP_STATE(*dn, 0) == PT_AVAIL);
+ invariant(BP_STATE(*dn, 1) == PT_AVAIL);
+ invariant(BP_SHOULD_EVICT(*dn, 0));
+ invariant(!BP_SHOULD_EVICT(*dn, 1));
+
+ toku_free(ndd);
+ toku_ftnode_free(dn);
+}
+
+static void test3_leaf(int fd, FT ft_h, FTNODE *dn) {
+ DBT left, right;
+ DB dummy_db;
+ memset(&dummy_db, 0, sizeof(dummy_db));
+ memset(&left, 0, sizeof(left));
+ memset(&right, 0, sizeof(right));
+
+ ftnode_fetch_extra bfe_min;
+ bfe_min.create_for_min_read(ft_h);
+
+ FTNODE_DISK_DATA ndd = NULL;
+ int r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, dn, &ndd, &bfe_min);
+ invariant(r == 0);
+ //
+ // make sure we have a leaf
+ //
+ invariant((*dn)->height == 0);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_ON_DISK);
+ }
+ toku_ftnode_free(dn);
+ toku_free(ndd);
+}
+
+static void test_serialize_nonleaf(void) {
+ // struct ft_handle source_ft;
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ // source_ft.fd=fd;
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 1;
+ sn.n_children = 2;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(2, sn.bp);
+ DBT pivotkey;
+ sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "hello", 6), 1);
+ BP_BLOCKNUM(&sn, 0).b = 30;
+ BP_BLOCKNUM(&sn, 1).b = 35;
+ BP_STATE(&sn, 0) = PT_AVAIL;
+ BP_STATE(&sn, 1) = PT_AVAIL;
+ set_BNC(&sn, 0, toku_create_empty_nl());
+ set_BNC(&sn, 1, toku_create_empty_nl());
+ // Create XIDS
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123;
+ XIDS xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_123, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ toku::comparator cmp;
+ cmp.create(string_key_cmp, nullptr);
+
+ toku_bnc_insert_msg(BNC(&sn, 0),
+ "a",
+ 2,
+ "aval",
+ 5,
+ FT_NONE,
+ next_dummymsn(),
+ xids_0,
+ true,
+ cmp);
+ toku_bnc_insert_msg(BNC(&sn, 0),
+ "b",
+ 2,
+ "bval",
+ 5,
+ FT_NONE,
+ next_dummymsn(),
+ xids_123,
+ false,
+ cmp);
+ toku_bnc_insert_msg(BNC(&sn, 1),
+ "x",
+ 2,
+ "xval",
+ 5,
+ FT_NONE,
+ next_dummymsn(),
+ xids_234,
+ true,
+ cmp);
+
+ // Cleanup:
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+ cmp.destroy();
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft_h->cmp.create(string_key_cmp, nullptr);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA ndd = NULL;
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
+ invariant(r == 0);
+
+ test1(fd, ft_h, &dn);
+ test2(fd, ft_h, &dn);
+
+ toku_destroy_ftnode_internals(&sn);
+ toku_free(ndd);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ ft_h->cmp.destroy();
+ toku_free(ft_h);
+ toku_free(ft);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_leaf(void) {
+ // struct ft_handle source_ft;
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 2;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn.n_children, sn.bp);
+ DBT pivotkey;
+ sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "b", 2), 1);
+ BP_STATE(&sn, 0) = PT_AVAIL;
+ BP_STATE(&sn, 1) = PT_AVAIL;
+ set_BLB(&sn, 0, toku_create_empty_bn());
+ set_BLB(&sn, 1, toku_create_empty_bn());
+ le_malloc(BLB_DATA(&sn, 0), 0, "a", "aval");
+ le_malloc(BLB_DATA(&sn, 0), 1, "b", "bval");
+ le_malloc(BLB_DATA(&sn, 1), 0, "x", "xval");
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA ndd = NULL;
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
+ invariant(r == 0);
+
+ test1(fd, ft_h, &dn);
+ test3_leaf(fd, ft_h, &dn);
+
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(ndd);
+ r = close(fd);
+ invariant(r != -1);
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ initialize_dummymsn();
+ test_serialize_nonleaf();
+ test_serialize_leaf();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc
new file mode 100644
index 00000000..bd5df786
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc
@@ -0,0 +1,444 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include "test.h"
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+const double USECS_PER_SEC = 1000000.0;
+
+static void le_add_to_bn(bn_data *bn,
+ uint32_t idx,
+ char *key,
+ int keylen,
+ char *val,
+ int vallen) {
+ LEAFENTRY r = NULL;
+ uint32_t size_needed = LE_CLEAN_MEMSIZE(vallen);
+ void *maybe_free = nullptr;
+ bn->get_space_for_insert(idx, key, keylen, size_needed, &r, &maybe_free);
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ resource_assert(r);
+ r->type = LE_CLEAN;
+ r->u.clean.vallen = vallen;
+ memcpy(r->u.clean.val, val, vallen);
+}
+
+static int long_key_cmp(DB *UU(e), const DBT *a, const DBT *b) {
+ const long *CAST_FROM_VOIDP(x, a->data);
+ const long *CAST_FROM_VOIDP(y, b->data);
+ return (*x > *y) - (*x < *y);
+}
+
+static void test_serialize_leaf(int valsize,
+ int nelts,
+ double entropy,
+ int ser_runs,
+ int deser_runs) {
+ // struct ft_handle source_ft;
+ struct ftnode *sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ XCALLOC(sn);
+
+ sn->max_msn_applied_to_node_on_disk.msn = 0;
+ sn->flags = 0x11223344;
+ sn->blocknum.b = 20;
+ sn->layout_version = FT_LAYOUT_VERSION;
+ sn->layout_version_original = FT_LAYOUT_VERSION;
+ sn->height = 0;
+ sn->n_children = 8;
+ sn->set_dirty();
+ sn->oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn->n_children, sn->bp);
+ sn->pivotkeys.create_empty();
+ for (int i = 0; i < sn->n_children; ++i) {
+ BP_STATE(sn, i) = PT_AVAIL;
+ set_BLB(sn, i, toku_create_empty_bn());
+ }
+ int nperbn = nelts / sn->n_children;
+ for (int ck = 0; ck < sn->n_children; ++ck) {
+ long k;
+ for (long i = 0; i < nperbn; ++i) {
+ k = ck * nperbn + i;
+ char buf[valsize];
+ int c;
+ for (c = 0; c < valsize * entropy;) {
+ int *p = (int *)&buf[c];
+ *p = rand();
+ c += sizeof(*p);
+ }
+ memset(&buf[c], 0, valsize - c);
+ le_add_to_bn(
+ BLB_DATA(sn, ck), i, (char *)&k, sizeof k, buf, sizeof buf);
+ }
+ if (ck < 7) {
+ DBT pivotkey;
+ sn->pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)),
+ ck);
+ }
+ }
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft_h->cmp.create(long_key_cmp, nullptr);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+
+ struct timeval total_start;
+ struct timeval total_end;
+ total_start.tv_sec = total_start.tv_usec = 0;
+ total_end.tv_sec = total_end.tv_usec = 0;
+ struct timeval t[2];
+ FTNODE_DISK_DATA ndd = NULL;
+ for (int i = 0; i < ser_runs; i++) {
+ gettimeofday(&t[0], NULL);
+ ndd = NULL;
+ sn->set_dirty();
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), sn, &ndd, true, ft->ft, false);
+ invariant(r == 0);
+ gettimeofday(&t[1], NULL);
+ total_start.tv_sec += t[0].tv_sec;
+ total_start.tv_usec += t[0].tv_usec;
+ total_end.tv_sec += t[1].tv_sec;
+ total_end.tv_usec += t[1].tv_usec;
+ toku_free(ndd);
+ }
+ double dt;
+ dt = (total_end.tv_sec - total_start.tv_sec) +
+ ((total_end.tv_usec - total_start.tv_usec) / USECS_PER_SEC);
+ dt *= 1000;
+ dt /= ser_runs;
+ printf(
+ "serialize leaf(ms): %0.05lf (average of %d runs)\n", dt, ser_runs);
+
+ // reset
+ total_start.tv_sec = total_start.tv_usec = 0;
+ total_end.tv_sec = total_end.tv_usec = 0;
+
+ ftnode_fetch_extra bfe;
+ for (int i = 0; i < deser_runs; i++) {
+ bfe.create_for_full_read(ft_h);
+ gettimeofday(&t[0], NULL);
+ FTNODE_DISK_DATA ndd2 = NULL;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd2, &bfe);
+ invariant(r == 0);
+ gettimeofday(&t[1], NULL);
+
+ total_start.tv_sec += t[0].tv_sec;
+ total_start.tv_usec += t[0].tv_usec;
+ total_end.tv_sec += t[1].tv_sec;
+ total_end.tv_usec += t[1].tv_usec;
+
+ toku_ftnode_free(&dn);
+ toku_free(ndd2);
+ }
+ dt = (total_end.tv_sec - total_start.tv_sec) +
+ ((total_end.tv_usec - total_start.tv_usec) / USECS_PER_SEC);
+ dt *= 1000;
+ dt /= deser_runs;
+ printf(
+ "deserialize leaf(ms): %0.05lf (average of %d runs)\n", dt, deser_runs);
+ printf(
+ "io time(ms) %lf decompress time(ms) %lf deserialize time(ms) %lf "
+ "(average of %d runs)\n",
+ tokutime_to_seconds(bfe.io_time) * 1000,
+ tokutime_to_seconds(bfe.decompress_time) * 1000,
+ tokutime_to_seconds(bfe.deserialize_time) * 1000,
+ deser_runs);
+
+ toku_ftnode_free(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ ft_h->cmp.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_nonleaf(int valsize,
+ int nelts,
+ double entropy,
+ int ser_runs,
+ int deser_runs) {
+ // struct ft_handle source_ft;
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ // source_ft.fd=fd;
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 1;
+ sn.n_children = 8;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn.n_children, sn.bp);
+ sn.pivotkeys.create_empty();
+ for (int i = 0; i < sn.n_children; ++i) {
+ BP_BLOCKNUM(&sn, i).b = 30 + (i * 5);
+ BP_STATE(&sn, i) = PT_AVAIL;
+ set_BNC(&sn, i, toku_create_empty_nl());
+ }
+ // Create XIDS
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ toku::comparator cmp;
+ cmp.create(long_key_cmp, nullptr);
+ int nperchild = nelts / 8;
+ for (int ck = 0; ck < sn.n_children; ++ck) {
+ long k;
+ NONLEAF_CHILDINFO bnc = BNC(&sn, ck);
+ for (long i = 0; i < nperchild; ++i) {
+ k = ck * nperchild + i;
+ char buf[valsize];
+ int c;
+ for (c = 0; c < valsize * entropy;) {
+ int *p = (int *)&buf[c];
+ *p = rand();
+ c += sizeof(*p);
+ }
+ memset(&buf[c], 0, valsize - c);
+
+ toku_bnc_insert_msg(bnc,
+ &k,
+ sizeof k,
+ buf,
+ valsize,
+ FT_NONE,
+ next_dummymsn(),
+ xids_123,
+ true,
+ cmp);
+ }
+ if (ck < 7) {
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), ck);
+ }
+ }
+
+ // Cleanup:
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ cmp.destroy();
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft_h->cmp.create(long_key_cmp, nullptr);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+
+ struct timeval t[2];
+ gettimeofday(&t[0], NULL);
+ FTNODE_DISK_DATA ndd = NULL;
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), &sn, &ndd, true, ft->ft, false);
+ invariant(r == 0);
+ gettimeofday(&t[1], NULL);
+ double dt;
+ dt = (t[1].tv_sec - t[0].tv_sec) +
+ ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC);
+ dt *= 1000;
+ printf(
+ "serialize nonleaf(ms): %0.05lf (IGNORED RUNS=%d)\n", dt, ser_runs);
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_h);
+ gettimeofday(&t[0], NULL);
+ FTNODE_DISK_DATA ndd2 = NULL;
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, &dn, &ndd2, &bfe);
+ invariant(r == 0);
+ gettimeofday(&t[1], NULL);
+ dt = (t[1].tv_sec - t[0].tv_sec) +
+ ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC);
+ dt *= 1000;
+ printf(
+ "deserialize nonleaf(ms): %0.05lf (IGNORED RUNS=%d)\n", dt, deser_runs);
+ printf(
+ "io time(ms) %lf decompress time(ms) %lf deserialize time(ms) %lf "
+ "(IGNORED RUNS=%d)\n",
+ tokutime_to_seconds(bfe.io_time) * 1000,
+ tokutime_to_seconds(bfe.decompress_time) * 1000,
+ tokutime_to_seconds(bfe.deserialize_time) * 1000,
+ deser_runs);
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ ft_h->cmp.destroy();
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(ndd);
+ toku_free(ndd2);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ const int DEFAULT_RUNS = 5;
+ long valsize, nelts, ser_runs = DEFAULT_RUNS, deser_runs = DEFAULT_RUNS;
+ double entropy = 0.3;
+
+ if (argc != 3 && argc != 5) {
+ fprintf(stderr,
+ "Usage: %s <valsize> <nelts> [<serialize_runs> "
+ "<deserialize_runs>]\n",
+ argv[0]);
+ fprintf(stderr, "Default (and min) runs is %d\n", DEFAULT_RUNS);
+ return 2;
+ }
+ valsize = strtol(argv[1], NULL, 0);
+ nelts = strtol(argv[2], NULL, 0);
+ if (argc == 5) {
+ ser_runs = strtol(argv[3], NULL, 0);
+ deser_runs = strtol(argv[4], NULL, 0);
+ }
+
+ if (ser_runs <= 0) {
+ ser_runs = DEFAULT_RUNS;
+ }
+ if (deser_runs <= 0) {
+ deser_runs = DEFAULT_RUNS;
+ }
+
+ initialize_dummymsn();
+ test_serialize_leaf(valsize, nelts, entropy, ser_runs, deser_runs);
+ test_serialize_nonleaf(valsize, nelts, entropy, ser_runs, deser_runs);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-sub-block-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-sub-block-test.cc
new file mode 100644
index 00000000..6adef261
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-sub-block-test.cc
@@ -0,0 +1,126 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+// create a ft and put n rows into it
+// write the ft to the file
+// verify the rows in the ft
+static void test_sub_block(int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+
+ const char *fname = TOKU_TEST_FILENAME;
+ const int nodesize = 4*1024*1024;
+ const int basementnodesize = 128*1024;
+ const enum toku_compression_method compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
+
+ TOKUTXN const null_txn = 0;
+
+ int error;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int i;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ error = toku_open_ft_handle(fname, true, &ft, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
+ assert(error == 0);
+
+ // insert keys 0, 1, 2, .. (n-1)
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key, val;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, 0);
+ assert(error == 0);
+ }
+
+ // write to the file
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ // verify the ft by walking a cursor through the rows
+ error = toku_open_ft_handle(fname, false, &ft, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
+ assert(error == 0);
+
+ FT_CURSOR cursor;
+ error = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(error == 0);
+
+ for (i=0; ; i++) {
+ int k = htonl(i);
+ int v = i;
+ struct check_pair pair = {sizeof k, &k, sizeof v, &v, 0};
+ error = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (error != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ assert(i == n);
+
+ toku_ft_cursor_close(cursor);
+
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+int test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ const int meg = 1024*1024;
+ const int row = 32;
+ const int rowspermeg = meg/row;
+
+ test_sub_block(1);
+ test_sub_block(rowspermeg-1);
+ int i;
+ for (i=1; i<8; i++)
+ test_sub_block(rowspermeg*i);
+
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc
new file mode 100644
index 00000000..4fca8efa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc
@@ -0,0 +1,1299 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "bndata.h"
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+static size_t le_add_to_bn(bn_data *bn,
+ uint32_t idx,
+ const char *key,
+ int keysize,
+ const char *val,
+ int valsize) {
+ LEAFENTRY r = NULL;
+ uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
+ void *maybe_free = nullptr;
+ bn->get_space_for_insert(idx, key, keysize, size_needed, &r, &maybe_free);
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ resource_assert(r);
+ r->type = LE_CLEAN;
+ r->u.clean.vallen = valsize;
+ memcpy(r->u.clean.val, val, valsize);
+ return size_needed + keysize + sizeof(uint32_t);
+}
+
+class test_key_le_pair {
+ public:
+ uint32_t keylen;
+ char *keyp;
+ LEAFENTRY le;
+
+ test_key_le_pair() : keylen(), keyp(), le() {}
+ void init(const char *_keyp, const char *_val) {
+ init(_keyp, strlen(_keyp) + 1, _val, strlen(_val) + 1);
+ }
+ void init(const char *_keyp,
+ uint32_t _keylen,
+ const char *_val,
+ uint32_t _vallen) {
+ keylen = _keylen;
+
+ CAST_FROM_VOIDP(le, toku_malloc(LE_CLEAN_MEMSIZE(_vallen)));
+ le->type = LE_CLEAN;
+ le->u.clean.vallen = _vallen;
+ memcpy(le->u.clean.val, _val, _vallen);
+
+ CAST_FROM_VOIDP(keyp, toku_xmemdup(_keyp, keylen));
+ }
+ ~test_key_le_pair() {
+ toku_free(le);
+ toku_free(keyp);
+ }
+};
+
+enum ftnode_verify_type { read_all = 1, read_compressed, read_none };
+
+static int string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) {
+ char *CAST_FROM_VOIDP(s, a->data);
+ char *CAST_FROM_VOIDP(t, b->data);
+ return strcmp(s, t);
+}
+
+static void setup_dn(enum ftnode_verify_type bft,
+ int fd,
+ FT ft_h,
+ FTNODE *dn,
+ FTNODE_DISK_DATA *ndd) {
+ int r;
+ if (bft == read_all) {
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft_h);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, dn, ndd, &bfe);
+ invariant(r == 0);
+ } else if (bft == read_compressed || bft == read_none) {
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft_h);
+ r = toku_deserialize_ftnode_from(
+ fd, make_blocknum(20), 0 /*pass zero for hash*/, dn, ndd, &bfe);
+ invariant(r == 0);
+ // invariant all bp's are compressed or on disk.
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_COMPRESSED ||
+ BP_STATE(*dn, i) == PT_ON_DISK);
+ }
+ // if read_none, get rid of the compressed bp's
+ if (bft == read_none) {
+ if ((*dn)->height == 0) {
+ toku_ftnode_pe_callback(*dn,
+ make_pair_attr(0xffffffff),
+ ft_h,
+ def_pe_finalize_impl,
+ nullptr);
+ // invariant all bp's are on disk
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ if ((*dn)->height == 0) {
+ invariant(BP_STATE(*dn, i) == PT_ON_DISK);
+ invariant(is_BNULL(*dn, i));
+ } else {
+ invariant(BP_STATE(*dn, i) == PT_COMPRESSED);
+ }
+ }
+ } else {
+ // first decompress everything, and make sure
+ // that it is available
+ // then run partial eviction to get it compressed
+ PAIR_ATTR attr;
+ bfe.create_for_full_read(ft_h);
+ invariant(toku_ftnode_pf_req_callback(*dn, &bfe));
+ r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr);
+ invariant(r == 0);
+ // invariant all bp's are available
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ toku_ftnode_pe_callback(*dn,
+ make_pair_attr(0xffffffff),
+ ft_h,
+ def_pe_finalize_impl,
+ nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ // invariant all bp's are still available, because we touched
+ // the clock
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ // now invariant all should be evicted
+ invariant(BP_SHOULD_EVICT(*dn, i));
+ }
+ toku_ftnode_pe_callback(*dn,
+ make_pair_attr(0xffffffff),
+ ft_h,
+ def_pe_finalize_impl,
+ nullptr);
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_COMPRESSED);
+ }
+ }
+ }
+ // now decompress them
+ bfe.create_for_full_read(ft_h);
+ invariant(toku_ftnode_pf_req_callback(*dn, &bfe));
+ PAIR_ATTR attr;
+ r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr);
+ invariant(r == 0);
+ // invariant all bp's are available
+ for (int i = 0; i < (*dn)->n_children; i++) {
+ invariant(BP_STATE(*dn, i) == PT_AVAIL);
+ }
+ // continue on with test
+ } else {
+ // if we get here, this is a test bug, NOT a bug in development code
+ invariant(false);
+ }
+}
+
+static void write_sn_to_disk(int fd,
+ FT_HANDLE ft,
+ FTNODE sn,
+ FTNODE_DISK_DATA *src_ndd,
+ bool do_clone) {
+ int r;
+ if (do_clone) {
+ void *cloned_node_v = NULL;
+ PAIR_ATTR attr;
+ long clone_size;
+ toku_ftnode_clone_callback(
+ sn, &cloned_node_v, &clone_size, &attr, false, ft->ft);
+ FTNODE CAST_FROM_VOIDP(cloned_node, cloned_node_v);
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), cloned_node, src_ndd, false, ft->ft, false);
+ invariant(r == 0);
+ toku_ftnode_free(&cloned_node);
+ } else {
+ r = toku_serialize_ftnode_to(
+ fd, make_blocknum(20), sn, src_ndd, true, ft->ft, false);
+ invariant(r == 0);
+ }
+}
+
+static void test_serialize_leaf_check_msn(enum ftnode_verify_type bft,
+ bool do_clone) {
+ // struct ft_handle source_ft;
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+#define PRESERIALIZE_MSN_ON_DISK ((MSN){MIN_MSN.msn + 42})
+#define POSTSERIALIZE_MSN_ON_DISK ((MSN){MIN_MSN.msn + 84})
+
+ sn.max_msn_applied_to_node_on_disk = PRESERIALIZE_MSN_ON_DISK;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 2;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn.n_children, sn.bp);
+ DBT pivotkey;
+ sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "b", 2), 1);
+ BP_STATE(&sn, 0) = PT_AVAIL;
+ BP_STATE(&sn, 1) = PT_AVAIL;
+ set_BLB(&sn, 0, toku_create_empty_bn());
+ set_BLB(&sn, 1, toku_create_empty_bn());
+ le_add_to_bn(BLB_DATA(&sn, 0), 0, "a", 2, "aval", 5);
+ le_add_to_bn(BLB_DATA(&sn, 0), 1, "b", 2, "bval", 5);
+ le_add_to_bn(BLB_DATA(&sn, 1), 0, "x", 2, "xval", 5);
+ BLB_MAX_MSN_APPLIED(&sn, 0) = ((MSN){MIN_MSN.msn + 73});
+ BLB_MAX_MSN_APPLIED(&sn, 1) = POSTSERIALIZE_MSN_ON_DISK;
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_read_from_disk == FT_LAYOUT_VERSION);
+ invariant(dn->height == 0);
+ invariant(dn->n_children >= 1);
+ invariant(dn->max_msn_applied_to_node_on_disk.msn ==
+ POSTSERIALIZE_MSN_ON_DISK.msn);
+ {
+ // Man, this is way too ugly. This entire test suite needs to be
+ // refactored.
+ // Create a dummy mempool and put the leaves there. Ugh.
+ test_key_le_pair elts[3];
+ elts[0].init("a", "aval");
+ elts[1].init("b", "bval");
+ elts[2].init("x", "xval");
+ const uint32_t npartitions = dn->n_children;
+ uint32_t last_i = 0;
+ for (uint32_t bn = 0; bn < npartitions; ++bn) {
+ invariant(BLB_MAX_MSN_APPLIED(dn, bn).msn ==
+ POSTSERIALIZE_MSN_ON_DISK.msn);
+ invariant(dest_ndd[bn].start > 0);
+ invariant(dest_ndd[bn].size > 0);
+ if (bn > 0) {
+ invariant(dest_ndd[bn].start >=
+ dest_ndd[bn - 1].start + dest_ndd[bn - 1].size);
+ }
+ for (uint32_t i = 0; i < BLB_DATA(dn, bn)->num_klpairs(); i++) {
+ LEAFENTRY curr_le;
+ uint32_t curr_keylen;
+ void *curr_key;
+ BLB_DATA(dn, bn)
+ ->fetch_klpair(i, &curr_le, &curr_keylen, &curr_key);
+ invariant(leafentry_memsize(curr_le) ==
+ leafentry_memsize(elts[last_i].le));
+ invariant(memcmp(curr_le,
+ elts[last_i].le,
+ leafentry_memsize(curr_le)) == 0);
+ if (bn < npartitions - 1) {
+ invariant(strcmp((char *)dn->pivotkeys.get_pivot(bn).data,
+ elts[last_i].keyp) <= 0);
+ }
+ // TODO for later, get a key comparison here as well
+ last_i++;
+ }
+ }
+ invariant(last_i == 3);
+ }
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft,
+ bool do_clone) {
+ int r;
+ struct ftnode sn, *dn;
+ const int keylens = 256 * 1024, vallens = 0;
+ const uint32_t nrows = 8;
+ // invariant(val_size > BN_MAX_SIZE); // BN_MAX_SIZE isn't visible
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = nrows;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+
+ MALLOC_N(sn.n_children, sn.bp);
+ sn.pivotkeys.create_empty();
+ for (int i = 0; i < sn.n_children; ++i) {
+ BP_STATE(&sn, i) = PT_AVAIL;
+ set_BLB(&sn, i, toku_create_empty_bn());
+ }
+ for (uint32_t i = 0; i < nrows; ++i) { // one basement per row
+ char key[keylens], val[vallens];
+ key[keylens - 1] = '\0';
+ char c = 'a' + i;
+ memset(key, c, keylens - 1);
+ le_add_to_bn(BLB_DATA(&sn, i),
+ 0,
+ (char *)&key,
+ sizeof(key),
+ (char *)&val,
+ sizeof(val));
+ if (i < nrows - 1) {
+ uint32_t keylen;
+ void *curr_key;
+ BLB_DATA(&sn, i)->fetch_key_and_len(0, &keylen, &curr_key);
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, curr_key, keylen),
+ i);
+ }
+ }
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ {
+ // Man, this is way too ugly. This entire test suite needs to be
+ // refactored.
+ // Create a dummy mempool and put the leaves there. Ugh.
+ test_key_le_pair *les = new test_key_le_pair[nrows];
+ {
+ char key[keylens], val[vallens];
+ key[keylens - 1] = '\0';
+ for (uint32_t i = 0; i < nrows; ++i) {
+ char c = 'a' + i;
+ memset(key, c, keylens - 1);
+ les[i].init(
+ (char *)&key, sizeof(key), (char *)&val, sizeof(val));
+ }
+ }
+ const uint32_t npartitions = dn->n_children;
+ uint32_t last_i = 0;
+ for (uint32_t bn = 0; bn < npartitions; ++bn) {
+ invariant(dest_ndd[bn].start > 0);
+ invariant(dest_ndd[bn].size > 0);
+ if (bn > 0) {
+ invariant(dest_ndd[bn].start >=
+ dest_ndd[bn - 1].start + dest_ndd[bn - 1].size);
+ }
+ invariant(BLB_DATA(dn, bn)->num_klpairs() > 0);
+ for (uint32_t i = 0; i < BLB_DATA(dn, bn)->num_klpairs(); i++) {
+ LEAFENTRY curr_le;
+ uint32_t curr_keylen;
+ void *curr_key;
+ BLB_DATA(dn, bn)
+ ->fetch_klpair(i, &curr_le, &curr_keylen, &curr_key);
+ invariant(leafentry_memsize(curr_le) ==
+ leafentry_memsize(les[last_i].le));
+ invariant(memcmp(curr_le,
+ les[last_i].le,
+ leafentry_memsize(curr_le)) == 0);
+ if (bn < npartitions - 1) {
+ invariant(strcmp((char *)dn->pivotkeys.get_pivot(bn).data,
+ les[last_i].keyp) <= 0);
+ }
+ // TODO for later, get a key comparison here as well
+ last_i++;
+ }
+ }
+ invariant(last_i == nrows);
+ delete[] les;
+ }
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft,
+ bool do_clone) {
+ int r;
+ struct ftnode sn, *dn;
+ const uint32_t nrows = 196 * 1024;
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 1;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+
+ XMALLOC_N(sn.n_children, sn.bp);
+ sn.pivotkeys.create_empty();
+ for (int i = 0; i < sn.n_children; ++i) {
+ BP_STATE(&sn, i) = PT_AVAIL;
+ set_BLB(&sn, i, toku_create_empty_bn());
+ }
+ size_t total_size = 0;
+ for (uint32_t i = 0; i < nrows; ++i) {
+ uint32_t key = i;
+ uint32_t val = i;
+ total_size += le_add_to_bn(BLB_DATA(&sn, 0),
+ i,
+ (char *)&key,
+ sizeof(key),
+ (char *)&val,
+ sizeof(val));
+ }
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ {
+ // Man, this is way too ugly. This entire test suite needs to be
+ // refactored.
+ // Create a dummy mempool and put the leaves there. Ugh.
+ test_key_le_pair *les = new test_key_le_pair[nrows];
+ {
+ int key = 0, val = 0;
+ for (uint32_t i = 0; i < nrows; ++i, key++, val++) {
+ les[i].init(
+ (char *)&key, sizeof(key), (char *)&val, sizeof(val));
+ }
+ }
+ const uint32_t npartitions = dn->n_children;
+ uint32_t last_i = 0;
+ for (uint32_t bn = 0; bn < npartitions; ++bn) {
+ invariant(dest_ndd[bn].start > 0);
+ invariant(dest_ndd[bn].size > 0);
+ if (bn > 0) {
+ invariant(dest_ndd[bn].start >=
+ dest_ndd[bn - 1].start + dest_ndd[bn - 1].size);
+ }
+ invariant(BLB_DATA(dn, bn)->num_klpairs() > 0);
+ for (uint32_t i = 0; i < BLB_DATA(dn, bn)->num_klpairs(); i++) {
+ LEAFENTRY curr_le;
+ uint32_t curr_keylen;
+ void *curr_key;
+ BLB_DATA(dn, bn)
+ ->fetch_klpair(i, &curr_le, &curr_keylen, &curr_key);
+ invariant(leafentry_memsize(curr_le) ==
+ leafentry_memsize(les[last_i].le));
+ invariant(memcmp(curr_le,
+ les[last_i].le,
+ leafentry_memsize(curr_le)) == 0);
+ if (bn < npartitions - 1) {
+ uint32_t *CAST_FROM_VOIDP(pivot,
+ dn->pivotkeys.get_pivot(bn).data);
+ void *tmp = les[last_i].keyp;
+ uint32_t *CAST_FROM_VOIDP(item, tmp);
+ invariant(*pivot >= *item);
+ }
+ // TODO for later, get a key comparison here as well
+ last_i++;
+ }
+ // don't check soft_copy_is_up_to_date or seqinsert
+ invariant(BLB_DATA(dn, bn)->get_disk_size() <
+ 128 * 1024); // BN_MAX_SIZE, apt to change
+ }
+ invariant(last_i == nrows);
+ delete[] les;
+ }
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft,
+ bool do_clone) {
+ int r;
+ struct ftnode sn, *dn;
+ const uint32_t nrows = 7;
+ const size_t key_size = 8;
+ const size_t val_size = 512 * 1024;
+ // invariant(val_size > BN_MAX_SIZE); // BN_MAX_SIZE isn't visible
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 1;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+
+ MALLOC_N(sn.n_children, sn.bp);
+ sn.pivotkeys.create_empty();
+ for (int i = 0; i < sn.n_children; ++i) {
+ BP_STATE(&sn, i) = PT_AVAIL;
+ set_BLB(&sn, i, toku_create_empty_bn());
+ }
+ for (uint32_t i = 0; i < nrows; ++i) {
+ char key[key_size], val[val_size];
+ key[key_size - 1] = '\0';
+ val[val_size - 1] = '\0';
+ char c = 'a' + i;
+ memset(key, c, key_size - 1);
+ memset(val, c, val_size - 1);
+ le_add_to_bn(BLB_DATA(&sn, 0), i, key, 8, val, val_size);
+ }
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ {
+ // Man, this is way too ugly. This entire test suite needs to be
+ // refactored.
+ // Create a dummy mempool and put the leaves there. Ugh.
+ test_key_le_pair *les = new test_key_le_pair[nrows];
+ {
+ char key[key_size], val[val_size];
+ key[key_size - 1] = '\0';
+ val[val_size - 1] = '\0';
+ for (uint32_t i = 0; i < nrows; ++i) {
+ char c = 'a' + i;
+ memset(key, c, key_size - 1);
+ memset(val, c, val_size - 1);
+ les[i].init(key, key_size, val, val_size);
+ }
+ }
+ const uint32_t npartitions = dn->n_children;
+ invariant(npartitions == nrows);
+ uint32_t last_i = 0;
+ for (uint32_t bn = 0; bn < npartitions; ++bn) {
+ invariant(dest_ndd[bn].start > 0);
+ invariant(dest_ndd[bn].size > 0);
+ if (bn > 0) {
+ invariant(dest_ndd[bn].start >=
+ dest_ndd[bn - 1].start + dest_ndd[bn - 1].size);
+ }
+ invariant(BLB_DATA(dn, bn)->num_klpairs() > 0);
+ for (uint32_t i = 0; i < BLB_DATA(dn, bn)->num_klpairs(); i++) {
+ LEAFENTRY curr_le;
+ uint32_t curr_keylen;
+ void *curr_key;
+ BLB_DATA(dn, bn)
+ ->fetch_klpair(i, &curr_le, &curr_keylen, &curr_key);
+ invariant(leafentry_memsize(curr_le) ==
+ leafentry_memsize(les[last_i].le));
+ invariant(memcmp(curr_le,
+ les[last_i].le,
+ leafentry_memsize(curr_le)) == 0);
+ if (bn < npartitions - 1) {
+ invariant(strcmp((char *)dn->pivotkeys.get_pivot(bn).data,
+ (char *)(les[last_i].keyp)) <= 0);
+ }
+ // TODO for later, get a key comparison here as well
+ last_i++;
+ }
+ // don't check soft_copy_is_up_to_date or seqinsert
+ }
+ invariant(last_i == 7);
+ delete[] les;
+ }
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_leaf_with_empty_basement_nodes(
+ enum ftnode_verify_type bft,
+ bool do_clone) {
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 7;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn.n_children, sn.bp);
+ DBT pivotkeys[6];
+ toku_fill_dbt(&pivotkeys[0], "A", 2);
+ toku_fill_dbt(&pivotkeys[1], "a", 2);
+ toku_fill_dbt(&pivotkeys[2], "a", 2);
+ toku_fill_dbt(&pivotkeys[3], "b", 2);
+ toku_fill_dbt(&pivotkeys[4], "b", 2);
+ toku_fill_dbt(&pivotkeys[5], "x", 2);
+ sn.pivotkeys.create_from_dbts(pivotkeys, 6);
+ for (int i = 0; i < sn.n_children; ++i) {
+ BP_STATE(&sn, i) = PT_AVAIL;
+ set_BLB(&sn, i, toku_create_empty_bn());
+ BLB_SEQINSERT(&sn, i) = 0;
+ }
+ le_add_to_bn(BLB_DATA(&sn, 1), 0, "a", 2, "aval", 5);
+ le_add_to_bn(BLB_DATA(&sn, 3), 0, "b", 2, "bval", 5);
+ le_add_to_bn(BLB_DATA(&sn, 5), 0, "x", 2, "xval", 5);
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_read_from_disk == FT_LAYOUT_VERSION);
+ invariant(dn->height == 0);
+ invariant(dn->n_children > 0);
+ {
+ test_key_le_pair elts[3];
+
+ // Man, this is way too ugly. This entire test suite needs to be
+ // refactored.
+ // Create a dummy mempool and put the leaves there. Ugh.
+ elts[0].init("a", "aval");
+ elts[1].init("b", "bval");
+ elts[2].init("x", "xval");
+ const uint32_t npartitions = dn->n_children;
+ uint32_t last_i = 0;
+ for (uint32_t bn = 0; bn < npartitions; ++bn) {
+ invariant(dest_ndd[bn].start > 0);
+ invariant(dest_ndd[bn].size > 0);
+ if (bn > 0) {
+ invariant(dest_ndd[bn].start >=
+ dest_ndd[bn - 1].start + dest_ndd[bn - 1].size);
+ }
+ for (uint32_t i = 0; i < BLB_DATA(dn, bn)->num_klpairs(); i++) {
+ LEAFENTRY curr_le;
+ uint32_t curr_keylen;
+ void *curr_key;
+ BLB_DATA(dn, bn)
+ ->fetch_klpair(i, &curr_le, &curr_keylen, &curr_key);
+ invariant(leafentry_memsize(curr_le) ==
+ leafentry_memsize(elts[last_i].le));
+ invariant(memcmp(curr_le,
+ elts[last_i].le,
+ leafentry_memsize(curr_le)) == 0);
+ if (bn < npartitions - 1) {
+ invariant(strcmp((char *)dn->pivotkeys.get_pivot(bn).data,
+ (char *)(elts[last_i].keyp)) <= 0);
+ }
+ // TODO for later, get a key comparison here as well
+ last_i++;
+ }
+ }
+ invariant(last_i == 3);
+ }
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_leaf_with_multiple_empty_basement_nodes(
+ enum ftnode_verify_type bft,
+ bool do_clone) {
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 4;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn.n_children, sn.bp);
+ DBT pivotkeys[3];
+ toku_fill_dbt(&pivotkeys[0], "A", 2);
+ toku_fill_dbt(&pivotkeys[1], "A", 2);
+ toku_fill_dbt(&pivotkeys[2], "A", 2);
+ sn.pivotkeys.create_from_dbts(pivotkeys, 3);
+ for (int i = 0; i < sn.n_children; ++i) {
+ BP_STATE(&sn, i) = PT_AVAIL;
+ set_BLB(&sn, i, toku_create_empty_bn());
+ }
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_read_from_disk == FT_LAYOUT_VERSION);
+ invariant(dn->height == 0);
+ invariant(dn->n_children == 1);
+ {
+ const uint32_t npartitions = dn->n_children;
+ for (uint32_t i = 0; i < npartitions; ++i) {
+ invariant(dest_ndd[i].start > 0);
+ invariant(dest_ndd[i].size > 0);
+ if (i > 0) {
+ invariant(dest_ndd[i].start >=
+ dest_ndd[i - 1].start + dest_ndd[i - 1].size);
+ }
+ invariant(BLB_DATA(dn, i)->num_klpairs() == 0);
+ }
+ }
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+static void test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
+ // struct ft_handle source_ft;
+ struct ftnode sn, *dn;
+
+ int fd = open(TOKU_TEST_FILENAME,
+ O_RDWR | O_CREAT | O_BINARY,
+ S_IRWXU | S_IRWXG | S_IRWXO);
+ invariant(fd >= 0);
+
+ int r;
+
+ // source_ft.fd=fd;
+ sn.max_msn_applied_to_node_on_disk.msn = 0;
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 1;
+ sn.n_children = 2;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(2, sn.bp);
+ DBT pivotkey;
+ sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "hello", 6), 1);
+ BP_BLOCKNUM(&sn, 0).b = 30;
+ BP_BLOCKNUM(&sn, 1).b = 35;
+ BP_STATE(&sn, 0) = PT_AVAIL;
+ BP_STATE(&sn, 1) = PT_AVAIL;
+ set_BNC(&sn, 0, toku_create_empty_nl());
+ set_BNC(&sn, 1, toku_create_empty_nl());
+ // Create XIDS
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123;
+ XIDS xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_123, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ toku::comparator cmp;
+ cmp.create(string_key_cmp, nullptr);
+
+ toku_bnc_insert_msg(BNC(&sn, 0),
+ "a",
+ 2,
+ "aval",
+ 5,
+ FT_NONE,
+ next_dummymsn(),
+ xids_0,
+ true,
+ cmp);
+ toku_bnc_insert_msg(BNC(&sn, 0),
+ "b",
+ 2,
+ "bval",
+ 5,
+ FT_NONE,
+ next_dummymsn(),
+ xids_123,
+ false,
+ cmp);
+ toku_bnc_insert_msg(BNC(&sn, 1),
+ "x",
+ 2,
+ "xval",
+ 5,
+ FT_NONE,
+ next_dummymsn(),
+ xids_234,
+ true,
+ cmp);
+
+ // Cleanup:
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+ cmp.destroy();
+
+ FT_HANDLE XMALLOC(ft);
+ FT XCALLOC(ft_h);
+ toku_ft_init(ft_h,
+ make_blocknum(0),
+ ZERO_LSN,
+ TXNID_NONE,
+ 4 * 1024 * 1024,
+ 128 * 1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ 16);
+ ft_h->cmp.create(string_key_cmp, nullptr);
+ ft->ft = ft_h;
+
+ ft_h->blocktable.create();
+ {
+ int r_truncate = ftruncate(fd, 0);
+ CKERR(r_truncate);
+ }
+ // Want to use block #20
+ BLOCKNUM b = make_blocknum(0);
+ while (b.b < 20) {
+ ft_h->blocktable.allocate_blocknum(&b, ft_h);
+ }
+ invariant(b.b == 20);
+
+ {
+ DISKOFF offset;
+ DISKOFF size;
+ ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+
+ ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size);
+ invariant(offset ==
+ (DISKOFF)BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
+ invariant(size == 100);
+ }
+ FTNODE_DISK_DATA src_ndd = NULL;
+ FTNODE_DISK_DATA dest_ndd = NULL;
+ write_sn_to_disk(fd, ft, &sn, &src_ndd, do_clone);
+
+ setup_dn(bft, fd, ft_h, &dn, &dest_ndd);
+
+ invariant(dn->blocknum.b == 20);
+
+ invariant(dn->layout_version == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_original == FT_LAYOUT_VERSION);
+ invariant(dn->layout_version_read_from_disk == FT_LAYOUT_VERSION);
+ invariant(dn->height == 1);
+ invariant(dn->n_children == 2);
+ invariant(strcmp((char *)dn->pivotkeys.get_pivot(0).data, "hello") == 0);
+ invariant(dn->pivotkeys.get_pivot(0).size == 6);
+ invariant(BP_BLOCKNUM(dn, 0).b == 30);
+ invariant(BP_BLOCKNUM(dn, 1).b == 35);
+
+ message_buffer *src_msg_buffer1 = &BNC(&sn, 0)->msg_buffer;
+ message_buffer *src_msg_buffer2 = &BNC(&sn, 1)->msg_buffer;
+ message_buffer *dest_msg_buffer1 = &BNC(dn, 0)->msg_buffer;
+ message_buffer *dest_msg_buffer2 = &BNC(dn, 1)->msg_buffer;
+
+ invariant(src_msg_buffer1->equals(dest_msg_buffer1));
+ invariant(src_msg_buffer2->equals(dest_msg_buffer2));
+
+ toku_ftnode_free(&dn);
+ toku_destroy_ftnode_internals(&sn);
+
+ ft_h->blocktable.block_free(
+ BlockAllocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, 100);
+ ft_h->blocktable.destroy();
+ ft_h->cmp.destroy();
+ toku_free(ft_h->h);
+ toku_free(ft_h);
+ toku_free(ft);
+ toku_free(src_ndd);
+ toku_free(dest_ndd);
+
+ r = close(fd);
+ invariant(r != -1);
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ initialize_dummymsn();
+
+ test_serialize_nonleaf(read_none, false);
+ test_serialize_nonleaf(read_all, false);
+ test_serialize_nonleaf(read_compressed, false);
+ test_serialize_nonleaf(read_none, true);
+ test_serialize_nonleaf(read_all, true);
+ test_serialize_nonleaf(read_compressed, true);
+
+ test_serialize_leaf_check_msn(read_none, false);
+ test_serialize_leaf_check_msn(read_all, false);
+ test_serialize_leaf_check_msn(read_compressed, false);
+ test_serialize_leaf_check_msn(read_none, true);
+ test_serialize_leaf_check_msn(read_all, true);
+ test_serialize_leaf_check_msn(read_compressed, true);
+
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_none, false);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_all, false);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_compressed,
+ false);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_none, true);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_all, true);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_compressed,
+ true);
+
+ test_serialize_leaf_with_empty_basement_nodes(read_none, false);
+ test_serialize_leaf_with_empty_basement_nodes(read_all, false);
+ test_serialize_leaf_with_empty_basement_nodes(read_compressed, false);
+ test_serialize_leaf_with_empty_basement_nodes(read_none, true);
+ test_serialize_leaf_with_empty_basement_nodes(read_all, true);
+ test_serialize_leaf_with_empty_basement_nodes(read_compressed, true);
+
+ test_serialize_leaf_with_large_rows(read_none, false);
+ test_serialize_leaf_with_large_rows(read_all, false);
+ test_serialize_leaf_with_large_rows(read_compressed, false);
+ test_serialize_leaf_with_large_rows(read_none, true);
+ test_serialize_leaf_with_large_rows(read_all, true);
+ test_serialize_leaf_with_large_rows(read_compressed, true);
+
+ test_serialize_leaf_with_large_pivots(read_none, false);
+ test_serialize_leaf_with_large_pivots(read_all, false);
+ test_serialize_leaf_with_large_pivots(read_compressed, false);
+ test_serialize_leaf_with_large_pivots(read_none, true);
+ test_serialize_leaf_with_large_pivots(read_all, true);
+ test_serialize_leaf_with_large_pivots(read_compressed, true);
+
+ test_serialize_leaf_with_many_rows(read_none, false);
+ test_serialize_leaf_with_many_rows(read_all, false);
+ test_serialize_leaf_with_many_rows(read_compressed, false);
+ test_serialize_leaf_with_many_rows(read_none, true);
+ test_serialize_leaf_with_many_rows(read_all, true);
+ test_serialize_leaf_with_many_rows(read_compressed, true);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test-cursor-2.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test-cursor-2.cc
new file mode 100644
index 00000000..2e104250
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test-cursor-2.cc
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static TOKUTXN const null_txn = 0;
+
+static int
+save_data (uint32_t UU(keylen), const void *UU(key), uint32_t vallen, const void *val, void *v, bool lock_only) {
+ if (lock_only) return 0;
+ assert(key!=NULL);
+ void **CAST_FROM_VOIDP(vp, v);
+ *vp = toku_memdup(val, vallen);
+ return 0;
+}
+
+
+// Verify that different cursors return different data items when a DBT is initialized to all zeros (no flags)
+// Note: The ft test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more
+// so this test is a little bit obsolete.
+static void test_multiple_ft_cursor_dbts(int n) {
+ if (verbose) printf("test_multiple_ft_cursors:%d\n", n);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursors[n];
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ int i;
+ for (i=0; i<n; i++) {
+ DBT kbt,vbt;
+ char key[10],val[10];
+ snprintf(key, sizeof key, "k%04d", i);
+ snprintf(val, sizeof val, "v%04d", i);
+ toku_ft_insert(ft,
+ toku_fill_dbt(&kbt, key, 1+strlen(key)),
+ toku_fill_dbt(&vbt, val, 1+strlen(val)),
+ 0);
+ }
+
+ for (i=0; i<n; i++) {
+ r = toku_ft_cursor(ft, &cursors[i], NULL, false, false);
+ assert(r == 0);
+ }
+
+ void *ptrs[n];
+ for (i=0; i<n; i++) {
+ DBT kbt;
+ char key[10];
+ snprintf(key, sizeof key, "k%04d", i);
+ r = toku_ft_cursor_get(cursors[i],
+ toku_fill_dbt(&kbt, key, 1+strlen(key)),
+ save_data,
+ &ptrs[i],
+ DB_SET);
+ assert(r == 0);
+ }
+
+ for (i=0; i<n; i++) {
+ int j;
+ for (j=i+1; j<n; j++) {
+ assert(strcmp((char*)ptrs[i],(char*)ptrs[j])!=0);
+ }
+ }
+
+ for (i=0; i<n; i++) {
+ toku_ft_cursor_close(cursors[i]);
+ assert(r == 0);
+ toku_free(ptrs[i]);
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor(void) {
+ test_multiple_ft_cursor_dbts(1);
+ test_multiple_ft_cursor_dbts(2);
+ test_multiple_ft_cursor_dbts(3);
+}
+
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_ft_cursor();
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test-cursor.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test-cursor.cc
new file mode 100644
index 00000000..db3426c9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test-cursor.cc
@@ -0,0 +1,914 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static TOKUTXN const null_txn = 0;
+
+static int test_cursor_debug = 0;
+
+static int test_ft_cursor_keycompare(DB *desc __attribute__((unused)), const DBT *a, const DBT *b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+static void assert_cursor_notfound(FT_HANDLE ft, int position) {
+ FT_CURSOR cursor=0;
+ int r;
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, position);
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+
+ toku_ft_cursor_close(cursor);
+}
+
+static void assert_cursor_value(FT_HANDLE ft, int position, long long value) {
+ FT_CURSOR cursor=0;
+ int r;
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ if (test_cursor_debug && verbose) printf("key: ");
+ struct check_pair pair = {len_ignore, 0, sizeof(value), &value, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, position);
+ assert(r == 0);
+ assert(pair.call_count==1);
+
+ toku_ft_cursor_close(cursor);
+}
+
+static void assert_cursor_first_last(FT_HANDLE ft, long long firstv, long long lastv) {
+ FT_CURSOR cursor=0;
+ int r;
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ if (test_cursor_debug && verbose) printf("first key: ");
+ {
+ struct check_pair pair = {len_ignore, 0, sizeof(firstv), &firstv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_FIRST);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+
+ if (test_cursor_debug && verbose) printf("last key:");
+ {
+ struct check_pair pair = {len_ignore, 0, sizeof(lastv), &lastv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_LAST);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+ if (test_cursor_debug && verbose) printf("\n");
+
+ toku_ft_cursor_close(cursor);
+}
+
+static void test_ft_cursor_first(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_first:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert a bunch of kv pairs */
+ for (i=0; i<n; i++) {
+ char key[8]; long long v;
+ DBT kbt, vbt;
+
+ snprintf(key, sizeof key, "%4.4d", i);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ if (n == 0)
+ assert_cursor_notfound(ft, DB_FIRST);
+ else
+ assert_cursor_value(ft, DB_FIRST, 0);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor_last(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_last:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert keys 0, 1, .. (n-1) */
+ for (i=0; i<n; i++) {
+ char key[8]; long long v;
+ DBT kbt, vbt;
+
+ snprintf(key, sizeof key, "%4.4d", i);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ assert(r==0);
+ }
+
+ if (n == 0)
+ assert_cursor_notfound(ft, DB_LAST);
+ else
+ assert_cursor_value(ft, DB_LAST, n-1);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor_first_last(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_first_last:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert a bunch of kv pairs */
+ for (i=0; i<n; i++) {
+ char key[8]; long long v;
+ DBT kbt, vbt;
+
+ snprintf(key, sizeof key, "%4.4d", i);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ if (n == 0) {
+ assert_cursor_notfound(ft, DB_FIRST);
+ assert_cursor_notfound(ft, DB_LAST);
+ } else
+ assert_cursor_first_last(ft, 0, n-1);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+
+
+}
+
+static void test_ft_cursor_rfirst(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_rfirst:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert keys n-1, n-2, ... , 0 */
+ for (i=n-1; i>=0; i--) {
+ char key[8]; long long v;
+ DBT kbt, vbt;
+
+
+ snprintf(key, sizeof key, "%4.4d", i);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ if (n == 0)
+ assert_cursor_notfound(ft, DB_FIRST);
+ else
+ assert_cursor_value(ft, DB_FIRST, 0);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void assert_cursor_walk(FT_HANDLE ft, int n) {
+ FT_CURSOR cursor=0;
+ int i;
+ int r;
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ if (test_cursor_debug && verbose) printf("key: ");
+ for (i=0; ; i++) {
+ long long v = i;
+ struct check_pair pair = {len_ignore, 0, sizeof(v), &v, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ if (test_cursor_debug && verbose) printf("\n");
+ assert(i == n);
+
+ toku_ft_cursor_close(cursor);
+}
+
+static void test_ft_cursor_walk(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_walk:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert a bunch of kv pairs */
+ for (i=0; i<n; i++) {
+ char key[8]; long long v;
+ DBT kbt, vbt;
+
+ snprintf(key, sizeof key, "%4.4d", i);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ /* walk the tree */
+ assert_cursor_walk(ft, n);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+
+}
+
+static void assert_cursor_rwalk(FT_HANDLE ft, int n) {
+ FT_CURSOR cursor=0;
+ int i;
+ int r;
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ if (test_cursor_debug && verbose) printf("key: ");
+ for (i=n-1; ; i--) {
+ long long v = i;
+ struct check_pair pair = {len_ignore, 0, sizeof v, &v, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_PREV);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ if (test_cursor_debug && verbose) printf("\n");
+ assert(i == -1);
+
+ toku_ft_cursor_close(cursor);
+}
+
+static void test_ft_cursor_rwalk(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_rwalk:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert a bunch of kv pairs */
+ for (i=0; i<n; i++) {
+ int k; long long v;
+ DBT kbt, vbt;
+
+ k = toku_htonl(i);
+ toku_fill_dbt(&kbt, &k, sizeof k);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ /* walk the tree */
+ assert_cursor_rwalk(ft, n);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+
+}
+
+static int
+ascending_key_string_checkf (uint32_t keylen, const void *key, uint32_t UU(vallen), const void *UU(val), void *v, bool lock_only)
+// the keys are strings. Verify that they keylen matches the key, that the keys are ascending. Use (char**)v to hold a
+// malloc'd previous string.
+{
+ if (lock_only) return 0;
+ if (key!=NULL) {
+ assert(keylen == 1+strlen((char*)key));
+ char **CAST_FROM_VOIDP(prevkeyp, v);
+ char *prevkey = *prevkeyp;
+ if (prevkey!=0) {
+ assert(strcmp(prevkey, (char*)key)<0);
+ toku_free(prevkey);
+ }
+ *prevkeyp = toku_strdup((char*) key);
+ }
+ return 0;
+}
+
+// The keys are strings (null terminated)
+static void assert_cursor_walk_inorder(FT_HANDLE ft, int n) {
+ FT_CURSOR cursor=0;
+ int i;
+ int r;
+ char *prevkey = 0;
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ if (test_cursor_debug && verbose) printf("key: ");
+ for (i=0; ; i++) {
+ r = toku_ft_cursor_get(cursor, NULL, ascending_key_string_checkf, &prevkey, DB_NEXT);
+ if (r != 0) {
+ break;
+ }
+ assert(prevkey!=0);
+ }
+ if (prevkey) toku_free(prevkey);
+ if (test_cursor_debug && verbose) printf("\n");
+ assert(i == n);
+
+ toku_ft_cursor_close(cursor);
+}
+
+static void test_ft_cursor_rand(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_rand:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert a bunch of kv pairs */
+ for (i=0; i<n; i++) {
+ char key[8]; long long v;
+ DBT kbt, vbt;
+
+ for (;;) {
+ v = ((long long) random() << 32) + random();
+ snprintf(key, sizeof key, "%lld", v);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = i;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ struct check_pair pair = {kbt.size, key, len_ignore, 0, 0};
+ r = toku_ft_lookup(ft, &kbt, lookup_checkf, &pair);
+ if (r == 0) {
+ assert(pair.call_count==1);
+ if (verbose) printf("dup");
+ continue;
+ }
+ assert(pair.call_count==0);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ break;
+ }
+ }
+
+ /* walk the tree */
+ assert_cursor_walk_inorder(ft, n);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor_split(int n) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+ int r;
+ int keyseqnum;
+ int i;
+
+ if (verbose) printf("test_ft_cursor_split:%d\n", n);
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ /* insert a bunch of kv pairs */
+ for (keyseqnum=0; keyseqnum < n/2; keyseqnum++) {
+ DBT kbt, vbt;
+ char key[8]; long long v;
+
+ snprintf(key, sizeof key, "%4.4d", keyseqnum);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = keyseqnum;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ if (test_cursor_debug && verbose) printf("key: ");
+ for (i=0; i<n/2; i++) {
+ struct check_pair pair = {len_ignore, 0, len_ignore, 0, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+ if (test_cursor_debug && verbose) printf("\n");
+
+ for (; keyseqnum<n; keyseqnum++) {
+ DBT kbt,vbt;
+ char key[8]; long long v;
+
+ snprintf(key, sizeof key, "%4.4d", keyseqnum);
+ toku_fill_dbt(&kbt, key, strlen(key)+1);
+ v = keyseqnum;
+ toku_fill_dbt(&vbt, &v, sizeof v);
+ toku_ft_insert(ft, &kbt, &vbt, 0);
+ }
+
+ if (test_cursor_debug && verbose) printf("key: ");
+ // Just loop through the cursor
+ for (;;) {
+ struct check_pair pair = {len_ignore, 0, len_ignore, 0, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ if (test_cursor_debug && verbose) printf("\n");
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_multiple_ft_cursors(int n) {
+ if (verbose) printf("test_multiple_ft_cursors:%d\n", n);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursors[n];
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ int i;
+ for (i=0; i<n; i++) {
+ r = toku_ft_cursor(ft, &cursors[i], NULL, false, false);
+ assert(r == 0);
+ }
+
+ for (i=0; i<n; i++) {
+ toku_ft_cursor_close(cursors[i]);
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static int log16(int n) {
+ int r = 0;
+ int b = 1;
+ while (b < n) {
+ b *= 16;
+ r += 1;
+ }
+ return r;
+}
+
+static void test_multiple_ft_cursor_walk(int n) {
+ if (verbose) printf("test_multiple_ft_cursor_walk:%d\n", n);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ const int cursor_gap = 1000;
+ const int ncursors = n/cursor_gap;
+ FT_CURSOR cursors[ncursors];
+
+ unlink(fname);
+
+ int nodesize = 1<<12;
+ int h = log16(n);
+ int cachesize = 2 * h * ncursors * nodesize;
+ toku_cachetable_create(&ct, cachesize, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ int c;
+ /* create the cursors */
+ for (c=0; c<ncursors; c++) {
+ r = toku_ft_cursor(ft, &cursors[c], NULL, false, false);
+ assert(r == 0);
+ }
+
+
+ /* insert keys 0, 1, 2, ... n-1 */
+ int i;
+ for (i=0; i<n; i++) {
+ {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key, val;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_fill_dbt(&val, &v, sizeof v);
+
+ toku_ft_insert(ft, &key, &val, 0);
+ }
+
+ /* point cursor i / cursor_gap to the current last key i */
+ if ((i % cursor_gap) == 0) {
+ c = i / cursor_gap;
+ struct check_pair pair = {len_ignore, 0, len_ignore, 0, 0};
+ r = toku_ft_cursor_get(cursors[c], NULL, lookup_checkf, &pair, DB_LAST);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+ }
+
+ /* walk the cursors by cursor_gap */
+ for (i=0; i<cursor_gap; i++) {
+ for (c=0; c<ncursors; c++) {
+ int vv = c*cursor_gap + i + 1;
+ struct check_pair pair = {len_ignore, 0, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursors[c], NULL, lookup_checkf, &pair, DB_NEXT);
+ if (r == DB_NOTFOUND) {
+ /* we already consumed 1 previously */
+ assert(pair.call_count==0);
+ assert(i == cursor_gap-1);
+ } else {
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+ }
+ }
+
+ for (i=0; i<ncursors; i++) {
+ toku_ft_cursor_close(cursors[i]);
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor_set(int n, int cursor_op) {
+ if (verbose) printf("test_ft_cursor_set:%d %d\n", n, cursor_op);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ int i;
+
+ /* insert keys 0, 10, 20 .. 10*(n-1) */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(10*i);
+ int v = 10*i;
+ DBT key,val;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, 0);
+ }
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ /* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */
+ for (i=0; i<n; i++) {
+ int vv;
+
+ int v = 10*(random() % n);
+ int k = toku_htonl(v);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {sizeof k, 0, sizeof vv, &v, 0};
+ if (cursor_op == DB_SET) pair.key = &k; // if it is a set operation, make sure that the result we get is the right one.
+ r = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, cursor_op);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+
+ /* try to set cursor to keys not in the tree, all should fail */
+ for (i=0; i<10*n; i++) {
+ if (i % 10 == 0)
+ continue;
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {0, 0, 0, 0, 0};
+ r = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, DB_SET);
+ CKERR2(r,DB_NOTFOUND);
+ assert(pair.call_count==0);
+ assert(key.data == &k); // make sure that no side effect happened on key
+ assert((unsigned int)k==toku_htonl(i));
+ }
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor_set_range(int n) {
+ if (verbose) printf("test_ft_cursor_set_range:%d\n", n);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(r==0);
+
+ int i;
+
+ /* insert keys 0, 10, 20 .. 10*(n-1) */
+ int max_key = 10*(n-1);
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(10*i);
+ int v = 10*i;
+ DBT key, val;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, 0);
+ }
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(r==0);
+
+ /* pick random keys v in 0 <= v < 10*n, the cursor should point
+ to the smallest key in the tree that is >= v */
+ for (i=0; i<n; i++) {
+
+ int v = random() % (10*n);
+ int k = toku_htonl(v);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ int vv = ((v+9)/10)*10;
+ struct check_pair pair = {sizeof k, 0, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, DB_SET_RANGE);
+ if (v > max_key) {
+ /* there is no smallest key if v > the max key */
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ } else {
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+ }
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_cursor_delete(int n) {
+ if (verbose) printf("test_ft_cursor_delete:%d\n", n);
+
+ int error;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(error == 0);
+
+ error = toku_ft_cursor(ft, &cursor, NULL, false, false);
+ assert(error == 0);
+
+ DBT key, val;
+ int k, v;
+
+ int i;
+ /* insert keys 0, 1, 2, .. (n-1) */
+ for (i=0; i<n; i++) {
+ k = toku_htonl(i);
+ v = i;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, 0);
+ }
+
+ /* walk the tree and delete under the cursor */
+ for (;;) {
+ struct check_pair pair = {len_ignore, 0, len_ignore, 0, 0};
+ error = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, DB_NEXT);
+ if (error == DB_NOTFOUND) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(error == 0);
+ assert(pair.call_count==1);
+
+ error = toku_ft_cursor_delete(cursor, 0, null_txn);
+ assert(error == 0);
+ }
+
+ error = toku_ft_cursor_delete(cursor, 0, null_txn);
+ assert(error != 0);
+
+ toku_ft_cursor_close(cursor);
+
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+static int test_ft_cursor_inc = 1000;
+static int test_ft_cursor_limit = 10000;
+
+static void test_ft_cursor(void) {
+ int n;
+
+ test_multiple_ft_cursors(1);
+ test_multiple_ft_cursors(2);
+ test_multiple_ft_cursors(3);
+
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_first(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_rfirst(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_walk(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_last(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_first_last(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_split(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_rand(n);
+ }
+ for (n=0; n<test_ft_cursor_limit; n += test_ft_cursor_inc) {
+ test_ft_cursor_rwalk(n);
+ }
+
+ test_ft_cursor_set(1000, DB_SET);
+ test_ft_cursor_set(10000, DB_SET);
+ test_ft_cursor_set(1000, DB_SET_RANGE);
+ test_ft_cursor_set_range(1000);
+ test_ft_cursor_set_range(10000);
+
+
+ test_ft_cursor_delete(1000);
+ test_multiple_ft_cursor_walk(10000);
+ test_multiple_ft_cursor_walk(100000);
+}
+
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_ft_cursor();
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc
new file mode 100644
index 00000000..c668b941
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc
@@ -0,0 +1,95 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// The purpose of this test is to verify that certain information in the
+// ft_header is properly serialized and deserialized.
+
+
+static TOKUTXN const null_txn = 0;
+
+static void test_header (void) {
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ const char *fname = TOKU_TEST_FILENAME;
+
+ // First create dictionary
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+ // now insert some info into the header
+ FT ft = t->ft;
+ ft->h->set_dirty();
+ // cast away const because we actually want to fiddle with the header
+ // in this test
+ *((int *) &ft->h->layout_version_original) = 13;
+ ft->layout_version_read_from_disk = 14;
+ *((uint32_t *) &ft->h->build_id_original) = 1234;
+ ft->in_memory_stats = (STAT64INFO_S) {10, 11};
+ ft->h->on_disk_stats = (STAT64INFO_S) {20, 21};
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ // Now read dictionary back into memory and examine some header fields
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 0, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ ft = t->ft;
+ STAT64INFO_S expected_stats = {20, 21}; // on checkpoint, on_disk_stats copied to ft->checkpoint_header->on_disk_stats
+ assert(ft->h->layout_version == FT_LAYOUT_VERSION);
+ assert(ft->h->layout_version_original == 13);
+ assert(ft->layout_version_read_from_disk == FT_LAYOUT_VERSION);
+ assert(ft->h->build_id_original == 1234);
+ assert(ft->in_memory_stats.numrows == expected_stats.numrows);
+ assert(ft->h->on_disk_stats.numbytes == expected_stats.numbytes);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_header();
+ test_header(); /* Make sure it works twice. Redundant, but it's a very cheap test. */
+ if (verbose) printf("test_header ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test.cc
new file mode 100644
index 00000000..706bd94f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test.cc
@@ -0,0 +1,1281 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static void test_dump_empty_db (void) {
+ FT_HANDLE t;
+ CACHETABLE ct;
+ int r;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+ if (verbose) { r=toku_dump_ft(stdout, t); assert(r==0); }
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+}
+
+/* Test running multiple trees in different files */
+static void test_multiple_files_of_size (int size) {
+ char n0[TOKU_PATH_MAX+1];
+ toku_path_join(n0, 2, TOKU_TEST_FILENAME, "test0.dat");
+ char n1[TOKU_PATH_MAX+1];
+ toku_path_join(n1, 2, TOKU_TEST_FILENAME, "test1.dat");
+ CACHETABLE ct;
+ FT_HANDLE t0,t1;
+ int r,i;
+ if (verbose) printf("test_multiple_files_of_size(%d)\n", size);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(n0, 1, &t0, size, size / 4, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ r = toku_open_ft_handle(n1, 1, &t1, size, size / 4, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ for (i=0; i<10000; i++) {
+ char key[100],val[100];
+ DBT k,v;
+ snprintf(key, 100, "key%d", i);
+ snprintf(val, 100, "val%d", i);
+ toku_ft_insert(t0, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ snprintf(val, 100, "Val%d", i);
+ toku_ft_insert(t1, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ }
+ //toku_verify_ft(t0);
+ //dump_ft(t0);
+ //dump_ft(t1);
+ r = toku_verify_ft(t0); assert(r==0);
+ r = toku_verify_ft(t1); assert(r==0);
+
+ r = toku_close_ft_handle_nolsn(t0, 0); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t1, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+
+ /* Now see if the data is all there. */
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(n0, 0, &t0, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ if (verbose) printf("%s:%d r=%d\n", __FILE__, __LINE__,r);
+ assert(r==0);
+ r = toku_open_ft_handle(n1, 0, &t1, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ for (i=0; i<10000; i++) {
+ char key[100],val[100];
+ snprintf(key, 100, "key%d", i);
+ snprintf(val, 100, "val%d", i);
+ ft_lookup_and_check_nodup(t0, key, val);
+ snprintf(val, 100, "Val%d", i);
+ ft_lookup_and_check_nodup(t1, key, val);
+ }
+
+ r = toku_close_ft_handle_nolsn(t0, 0); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t1, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+}
+
+static void test_multiple_files (void) {
+ test_multiple_files_of_size (1<<12);
+ test_multiple_files_of_size (1<<20);
+}
+
+/* Test to see that a single db can be opened many times. */
+static void test_multiple_ft_handles_one_db_one_file (void) {
+ enum { MANYN = 2 };
+ int i, r;
+ CACHETABLE ct;
+ FT_HANDLE trees[MANYN];
+ if (verbose) printf("test_multiple_ft_handles_one_db_one_file:");
+
+ unlink(fname);
+ toku_cachetable_create(&ct, 32, ZERO_LSN, nullptr);
+ for (i=0; i<MANYN; i++) {
+ r = toku_open_ft_handle(fname, (i==0), &trees[i], 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+ }
+ for (i=0; i<MANYN; i++) {
+ char k[20], v[20];
+ DBT kb, vb;
+ snprintf(k, 20, "key%d", i);
+ snprintf(v, 20, "val%d", i);
+ toku_ft_insert(trees[i], toku_fill_dbt(&kb, k, strlen(k)+1), toku_fill_dbt(&vb, v, strlen(v)+1), null_txn);
+ }
+ for (i=0; i<MANYN; i++) {
+ char k[20],vexpect[20];
+ snprintf(k, 20, "key%d", i);
+ snprintf(vexpect, 20, "val%d", i);
+ ft_lookup_and_check_nodup(trees[0], k, vexpect);
+ }
+ for (i=0; i<MANYN; i++) {
+ r=toku_close_ft_handle_nolsn(trees[i], 0); assert(r==0);
+ }
+ toku_cachetable_close(&ct);
+
+ if (verbose) printf(" ok\n");
+}
+
+
+/* Check to see if data can be read that was written. */
+static void test_read_what_was_written (void) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ const int NVALS=10000;
+
+ if (verbose) {
+ printf("test_read_what_was_written(): "); fflush(stdout);
+ }
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ /* Now see if we can read an empty tree in. */
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ /* See if we can put something in it. */
+ {
+ DBT k,v;
+ toku_ft_insert(ft, toku_fill_dbt(&k, "hello", 6), toku_fill_dbt(&v, "there", 6), null_txn);
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ /* Now see if we can read it in and get the value. */
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ ft_lookup_and_check_nodup(ft, "hello", "there");
+
+ assert(toku_verify_ft(ft)==0);
+
+ /* Now put a bunch (NVALS) of things in. */
+ {
+ int i;
+ for (i=0; i<NVALS; i++) {
+ char key[100],val[100];
+ DBT k,v;
+ snprintf(key, 100, "key%d", i);
+ snprintf(val, 100, "val%d", i);
+ if (i<600) {
+ int verify_result=toku_verify_ft(ft);;
+ assert(verify_result==0);
+ }
+ toku_ft_insert(ft, toku_fill_dbt(&k, key, strlen(key)+1), toku_fill_dbt(&v, val, strlen(val)+1), null_txn);
+ if (i<600) {
+ int verify_result=toku_verify_ft(ft);
+ if (verify_result) {
+ r = toku_dump_ft(stdout, ft);
+ assert(r==0);
+ assert(0);
+ }
+ {
+ int j;
+ for (j=0; j<=i; j++) {
+ char expectedval[100];
+ snprintf(key, 100, "key%d", j);
+ snprintf(expectedval, 100, "val%d", j);
+ ft_lookup_and_check_nodup(ft, key, expectedval);
+ }
+ }
+ }
+ }
+ }
+ if (verbose) printf("Now read them out\n");
+
+ r = toku_verify_ft(ft);
+ assert(r==0);
+ //dump_ft(ft);
+
+ /* See if we can read them all out again. */
+ {
+ int i;
+ for (i=0; i<NVALS; i++) {
+ char key[100],expectedval[100];
+ snprintf(key, 100, "key%d", i);
+ snprintf(expectedval, 100, "val%d", i);
+ ft_lookup_and_check_nodup(ft, key, expectedval);
+ }
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ if (verbose) printf("%s:%d About to close %p\n", __FILE__, __LINE__, ct);
+ toku_cachetable_close(&ct);
+
+
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ ft_lookup_and_check_nodup(ft, "hello", "there");
+ {
+ int i;
+ for (i=0; i<NVALS; i++) {
+ char key[100],expectedval[100];
+ snprintf(key, 100, "key%d", i);
+ snprintf(expectedval, 100, "val%d", i);
+ ft_lookup_and_check_nodup(ft, key, expectedval);
+ }
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+
+
+
+ if (verbose) printf(" ok\n");
+}
+
+/* Test c_get(DB_LAST) on an empty tree */
+static void test_cursor_last_empty(void) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+ int r;
+ if (verbose) printf("%s", __FUNCTION__);
+ unlink(fname);
+
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+ {
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_LAST);
+ assert(pair.call_count==0);
+ assert(r==DB_NOTFOUND);
+ }
+ {
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_FIRST);
+ assert(pair.call_count==0);
+ assert(r==DB_NOTFOUND);
+ }
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ toku_cachetable_close(&ct);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+
+}
+
+static void test_cursor_next (void) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+ int r;
+ DBT kbt, vbt;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ toku_ft_insert(ft, toku_fill_dbt(&kbt, "hello", 6), toku_fill_dbt(&vbt, "there", 6), null_txn);
+ toku_ft_insert(ft, toku_fill_dbt(&kbt, "byebye", 7), toku_fill_dbt(&vbt, "byenow", 7), null_txn);
+ if (verbose) printf("%s:%d calling toku_ft_cursor(...)\n", __FILE__, __LINE__);
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+ toku_init_dbt(&kbt);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ toku_init_dbt(&vbt);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+
+ if (verbose) printf("%s:%d calling toku_ft_cursor_get(...)\n", __FILE__, __LINE__);
+ {
+ struct check_pair pair = {7, "byebye", 7, "byenow", 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (verbose) printf("%s:%d called toku_ft_cursor_get(...)\n", __FILE__, __LINE__);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+
+ {
+ struct check_pair pair = {6, "hello", 6, "there", 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+ {
+ struct check_pair pair = {0, 0, 0, 0, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ assert(r==DB_NOTFOUND);
+ assert(pair.call_count==0);
+ }
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+ toku_cachetable_close(&ct);
+ //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
+
+
+}
+
+static int wrong_compare_fun(DB* UU(desc), const DBT *a, const DBT *b) {
+ unsigned int i;
+ unsigned char *CAST_FROM_VOIDP(ad, a->data);
+ unsigned char *CAST_FROM_VOIDP(bd, b->data);
+ unsigned int siz=a->size;
+ assert(a->size==b->size);
+ //assert(db==&nonce_db); // make sure the db was passed down correctly
+ for (i=0; i<siz; i++) {
+ if (ad[siz-1-i]<bd[siz-1-i]) return -1;
+ if (ad[siz-1-i]>bd[siz-1-i]) return +1;
+ }
+ return 0;
+
+}
+
+static void test_wrongendian_compare (int wrong_p, unsigned int N) {
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ int r;
+ unsigned int i;
+
+ unlink(fname);
+
+
+ {
+ char a[4]={0,1,0,0};
+ char b[4]={1,0,0,0};
+ DBT at, bt;
+ assert(wrong_compare_fun(NULL, toku_fill_dbt(&at, a, 4), toku_fill_dbt(&bt, b, 4))>0);
+ assert(wrong_compare_fun(NULL, toku_fill_dbt(&at, b, 4), toku_fill_dbt(&bt, a, 4))<0);
+ }
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ //printf("%s:%d WRONG=%d\n", __FILE__, __LINE__, wrong_p);
+
+ if (0) { // ???? Why is this commented out?
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<20, 1<<17, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, wrong_p ? wrong_compare_fun : toku_builtin_compare_fun); assert(r==0);
+ for (i=1; i<257; i+=255) {
+ unsigned char a[4],b[4];
+ b[3] = a[0] = (unsigned char)(i&255);
+ b[2] = a[1] = (unsigned char)((i>>8)&255);
+ b[1] = a[2] = (unsigned char)((i>>16)&255);
+ b[0] = a[3] = (unsigned char)((i>>24)&255);
+ DBT kbt;
+ toku_fill_dbt(&kbt, a, sizeof a);
+ DBT vbt;
+ toku_fill_dbt(&vbt, b, sizeof b);
+ if (verbose)
+ printf("%s:%d insert: %02x%02x%02x%02x -> %02x%02x%02x%02x\n", __FILE__, __LINE__,
+ ((char*)kbt.data)[0], ((char*)kbt.data)[1], ((char*)kbt.data)[2], ((char*)kbt.data)[3],
+ ((char*)vbt.data)[0], ((char*)vbt.data)[1], ((char*)vbt.data)[2], ((char*)vbt.data)[3]);
+ toku_ft_insert(ft, &kbt, &vbt, null_txn);
+ }
+ {
+ FT_CURSOR cursor=0;
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+
+ for (i=0; i<2; i++) {
+ unsigned char a[4],b[4];
+ struct check_pair pair = {4, &a, 4, &b, 0};
+ b[3] = a[0] = (unsigned char)(i&255);
+ b[2] = a[1] = (unsigned char)((i>>8)&255);
+ b[1] = a[2] = (unsigned char)((i>>16)&255);
+ b[0] = a[3] = (unsigned char)((i>>24)&255);
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ }
+ }
+
+ {
+ toku_cachetable_verify(ct);
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<20, 1<<17, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, wrong_p ? wrong_compare_fun : toku_builtin_compare_fun); assert(r==0);
+ toku_cachetable_verify(ct);
+
+ for (i=0; i<N; i++) {
+ unsigned char a[4],b[4];
+ b[3] = a[0] = (unsigned char)(i&255);
+ b[2] = a[1] = (unsigned char)((i>>8)&255);
+ b[1] = a[2] = (unsigned char)((i>>16)&255);
+ b[0] = a[3] = (unsigned char)((i>>24)&255);
+ DBT kbt;
+ toku_fill_dbt(&kbt, a, sizeof a);
+ DBT vbt;
+ toku_fill_dbt(&vbt, b, sizeof b);
+ if (0) printf("%s:%d insert: %02x%02x%02x%02x -> %02x%02x%02x%02x\n", __FILE__, __LINE__,
+ ((unsigned char*)kbt.data)[0], ((unsigned char*)kbt.data)[1], ((unsigned char*)kbt.data)[2], ((unsigned char*)kbt.data)[3],
+ ((unsigned char*)vbt.data)[0], ((unsigned char*)vbt.data)[1], ((unsigned char*)vbt.data)[2], ((unsigned char*)vbt.data)[3]);
+ toku_ft_insert(ft, &kbt, &vbt, null_txn);
+ toku_cachetable_verify(ct);
+ }
+ FT_CURSOR cursor=0;
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+
+ for (i=0; i<N; i++) {
+ unsigned char a[4],b[4];
+ struct check_pair pair = {4, &a, 4, &b, 0};
+ b[3] = a[0] = (unsigned char)(i&255);
+ b[2] = a[1] = (unsigned char)((i>>8)&255);
+ b[1] = a[2] = (unsigned char)((i>>16)&255);
+ b[0] = a[3] = (unsigned char)((i>>24)&255);
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ assert(r==0);
+ assert(pair.call_count==1);
+ toku_cachetable_verify(ct);
+ }
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r==0);
+ }
+ toku_cachetable_close(&ct);
+
+}
+
+static int test_ft_cursor_keycompare(DB *desc __attribute__((unused)), const DBT *a, const DBT *b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+static void test_large_kv(int bsize, int ksize, int vsize) {
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+
+ if (verbose) printf("test_large_kv: %d %d %d\n", bsize, ksize, vsize);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, bsize, bsize / 4, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ DBT key, val;
+ char *k, *v;
+ XCALLOC_N(ksize, k);
+ XCALLOC_N(vsize, v);
+ toku_fill_dbt(&key, k, ksize);
+ toku_fill_dbt(&val, v, vsize);
+
+ toku_ft_insert(t, &key, &val, 0);
+
+ toku_free(k);
+ toku_free(v);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+/*
+ * test the key and value limits
+ * the current implementation crashes when kvsize == bsize/2 rather than fails
+ */
+static void test_ft_limits(void) {
+ int bsize = 1024;
+ int kvsize = 4;
+ while (kvsize < bsize/2) {
+ test_large_kv(bsize, kvsize, kvsize);
+ kvsize *= 2;
+ }
+}
+
+/*
+ * verify that a delete on an empty tree fails
+ */
+static void test_ft_delete_empty(void) {
+ if (verbose) printf("test_ft_delete_empty\n");
+
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ DBT key;
+ int k = toku_htonl(1);
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(t, &key, null_txn);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+/*
+ * insert n keys, delete all n keys, verify that lookups for all the keys fail,
+ * verify that a cursor walk of the tree finds nothing
+ */
+static void test_ft_delete_present(int n) {
+ if (verbose) printf("test_ft_delete_present:%d\n", n);
+
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ /* insert 0 .. n-1 */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(t, &key, &val, 0);
+ }
+
+ /* delete 0 .. n-1 */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(t, &key, null_txn);
+ assert(r == 0);
+ }
+
+ /* lookups should all fail */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {0, 0, 0, 0, 0};
+ r = toku_ft_lookup(t, &key, lookup_checkf, &pair);
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ }
+
+ /* cursor should not find anything */
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false);
+ assert(r == 0);
+
+ {
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_FIRST);
+ assert(r != 0);
+ assert(pair.call_count==0);
+ }
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_delete_not_present(int n) {
+ if (verbose) printf("test_ft_delete_not_present:%d\n", n);
+
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ DBT key, val;
+ int k, v;
+
+ /* insert 0 .. n-1 */
+ for (i=0; i<n; i++) {
+ k = toku_htonl(i); v = i;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(t, &key, &val, 0);
+ }
+
+ /* delete 0 .. n-1 */
+ for (i=0; i<n; i++) {
+ k = toku_htonl(i);
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(t, &key, null_txn);
+ assert(r == 0);
+ }
+
+ /* try to delete key n+1 not in the tree */
+ k = toku_htonl(n+1);
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(t, &key, null_txn);
+ /* the delete may be buffered or may be executed on a leaf node, so the
+ return value depends */
+ if (verbose) printf("toku_ft_delete k=%d %d\n", k, r);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_ft_delete_cursor_first(int n) {
+ if (verbose) printf("test_ft_delete_cursor_first:%d\n", n);
+
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ /* insert 0 .. n-1 */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(t, &key, &val, 0);
+ }
+
+ /* lookups 0 .. n-1 should succeed */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ int k2 = k;
+ int v = i;
+ struct check_pair pair = {sizeof k, &k2, sizeof v, &v, 0};
+ r = toku_ft_lookup(t, &key, lookup_checkf, &pair);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+
+ /* delete 0 .. n-2 */
+ for (i=0; i<n-1; i++) {
+ {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(t, &key, null_txn);
+ }
+
+ {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_lookup(t, &key, lookup_checkf, &pair);
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ }
+ }
+
+ /* lookup of 0 .. n-2 should all fail */
+ for (i=0; i<n-1; i++) {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_lookup(t, &key, lookup_checkf, &pair);
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ }
+
+ /* cursor should find the last key: n-1 */
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false);
+ assert(r == 0);
+
+ {
+ int kv = toku_htonl(n-1);
+ int vv = n-1;
+ struct check_pair pair = {sizeof kv, &kv, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_FIRST);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+/* test for bug: insert message in a nonleaf node, delete removes the
+ insert message, but lookup finds the insert message
+
+ build a 2 level tree, and expect the last insertion to be
+ buffered. then delete and lookup. */
+
+static void test_insert_delete_lookup(int n) {
+ if (verbose) printf("test_insert_delete_lookup:%d\n", n);
+
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ /* insert 0 .. n-1 */
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(t, &key, &val, 0);
+ }
+
+ if (n > 0) {
+ {
+ int k = toku_htonl(n-1);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(t, &key, null_txn);
+ }
+ {
+ int k = toku_htonl(n-1);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_lookup(t, &key, lookup_checkf, &pair);
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ }
+ }
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+/* insert <0,0>, <0,1>, .. <0,n>
+ delete_both <0,i> for all even i
+ verify <0,i> exists for all odd i */
+
+
+static void test_ft_delete(void) {
+ test_ft_delete_empty();
+ test_ft_delete_present(1);
+ test_ft_delete_present(100);
+ test_ft_delete_present(500);
+ test_ft_delete_not_present(1);
+ test_ft_delete_not_present(100);
+ test_ft_delete_not_present(500);
+ test_ft_delete_cursor_first(1);
+ test_ft_delete_cursor_first(100);
+ test_ft_delete_cursor_first(500);
+ test_ft_delete_cursor_first(10000);
+ test_insert_delete_lookup(2);
+ test_insert_delete_lookup(512);
+}
+
+static void test_new_ft_cursor_create_close (void) {
+ int r;
+ FT_HANDLE ft=0;
+ int n = 8;
+ FT_CURSOR cursors[n];
+
+ toku_ft_handle_create(&ft);
+
+ int i;
+ for (i=0; i<n; i++) {
+ r = toku_ft_cursor(ft, &cursors[i], NULL, false, false); assert(r == 0);
+ }
+
+ for (i=0; i<n; i++) {
+ toku_ft_cursor_close(cursors[i]);
+ }
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r == 0);
+}
+
+static void test_new_ft_cursor_first(int n) {
+ if (verbose) printf("test_ft_cursor_first:%d\n", n);
+
+ FT_HANDLE t=0;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ toku_ft_handle_create(&t);
+ toku_ft_handle_set_nodesize(t, 4096);
+ r = toku_ft_handle_open(t, fname, 1, 1, ct, null_txn); assert(r==0);
+
+ DBT key, val;
+ int k, v;
+
+ for (i=0; i<n; i++) {
+ k = toku_htonl(i); v = toku_htonl(i);
+ toku_ft_insert(t, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
+ toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
+
+ for (i=0; ; i++) {
+ int kv = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kv, &kv, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_FIRST);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+
+ r = toku_ft_cursor_delete(cursor, 0, null_txn); assert(r == 0);
+ }
+ assert(i == n);
+
+ if (key.data) toku_free(key.data);
+ if (val.data) toku_free(val.data);
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursor_last(int n) {
+ if (verbose) printf("test_ft_cursor_last:%d\n", n);
+
+ FT_HANDLE t=0;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ toku_ft_handle_create(&t);
+ toku_ft_handle_set_nodesize(t, 4096);
+ r = toku_ft_handle_open(t, fname, 1, 1, ct, null_txn); assert(r==0);
+
+ DBT key, val;
+ int k, v;
+
+ for (i=0; i<n; i++) {
+ k = toku_htonl(i); v = toku_htonl(i);
+ toku_ft_insert(t, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
+ toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
+
+ for (i=n-1; ; i--) {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_LAST);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+
+ //if (n==512 && i<=360) { printf("i=%d\n", i); toku_dump_ft(stdout, t); }
+ r = toku_ft_cursor_delete(cursor, 0, null_txn); assert(r == 0);
+ }
+ assert(i == -1);
+
+ if (key.data) toku_free(key.data);
+ if (val.data) toku_free(val.data);
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursor_next(int n) {
+ if (verbose) printf("test_ft_cursor_next:%d\n", n);
+
+ FT_HANDLE t=0;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ toku_ft_handle_create(&t);
+ toku_ft_handle_set_nodesize(t, 4096);
+ r = toku_ft_handle_open(t, fname, 1, 1, ct, null_txn); assert(r==0);
+
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ int k = toku_htonl(i);
+ int v = toku_htonl(i);
+ toku_ft_insert(t, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ for (i=0; ; i++) {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (r != 0) {
+ assert(pair.call_count ==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ assert(i == n);
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursor_prev(int n) {
+ if (verbose) printf("test_ft_cursor_prev:%d\n", n);
+
+ FT_HANDLE t=0;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ toku_ft_handle_create(&t);
+ toku_ft_handle_set_nodesize(t, 4096);
+ r = toku_ft_handle_open(t, fname, 1, 1, ct, null_txn); assert(r==0);
+
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ int k = toku_htonl(i);
+ int v = toku_htonl(i);
+ toku_ft_insert(t, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ for (i=n-1; ; i--) {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_PREV);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ assert(i == -1);
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursor_current(int n) {
+ if (verbose) printf("test_ft_cursor_current:%d\n", n);
+
+ FT_HANDLE t=0;
+ int r;
+ CACHETABLE ct;
+ int i;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ toku_ft_handle_create(&t);
+ toku_ft_handle_set_nodesize(t, 4096);
+ r = toku_ft_handle_open(t, fname, 1, 1, ct, null_txn); assert(r==0);
+
+ for (i=0; i<n; i++) {
+ int k = toku_htonl(i);
+ int v = toku_htonl(i);
+ DBT key, val;
+ toku_ft_insert(t, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ FT_CURSOR cursor=0;
+
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ for (i=0; ; i++) {
+ {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_FIRST);
+ if (r != 0) {
+ assert(pair.call_count==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ }
+ {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_CURRENT);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+
+ {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_CURRENT_BINDING);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+
+ r = toku_ft_cursor_delete(cursor, 0, null_txn); assert(r == 0);
+
+ {
+ static int count=0;
+ count++;
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_CURRENT);
+ CKERR2(r,DB_NOTFOUND); // previous DB_KEYEMPTY
+ assert(pair.call_count==0);
+ }
+
+ {
+ int kk = toku_htonl(i);
+ int vv = toku_htonl(i);
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_CURRENT_BINDING);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+ }
+ assert(i == n);
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursor_set_range(int n) {
+ if (verbose) printf("test_ft_cursor_set_range:%d\n", n);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft=0;
+ FT_CURSOR cursor=0;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ toku_ft_handle_create(&ft);
+ toku_ft_handle_set_nodesize(ft, 4096);
+ r = toku_ft_handle_open(ft, fname, 1, 1, ct, null_txn); assert(r==0);
+
+ int i;
+
+ /* insert keys 0, 10, 20 .. 10*(n-1) */
+ int max_key = 10*(n-1);
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ int k = toku_htonl(10*i);
+ int v = 10*i;
+ toku_ft_insert(ft, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+
+ /* pick random keys v in 0 <= v < 10*n, the cursor should point
+ to the smallest key in the tree that is >= v */
+ for (i=0; i<n; i++) {
+
+ int v = random() % (10*n);
+ int k = toku_htonl(v);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+
+ int vv = (((v+9)/10)*10); // This is the value we should actually find.
+
+ struct check_pair pair = {sizeof k, NULL, // NULL data means don't check it
+ sizeof vv, &vv,
+ 0};
+ r = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, DB_SET_RANGE);
+ if (v > max_key) {
+ /* there is no smallest key if v > the max key */
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ } else {
+ assert(r == 0);
+ assert(pair.call_count==1);
+ }
+ }
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursor_set(int n, int cursor_op, DB *db) {
+ if (verbose) printf("test_ft_cursor_set:%d %d %p\n", n, cursor_op, db);
+
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ FT_CURSOR cursor=0;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0);
+
+ int i;
+
+ /* insert keys 0, 10, 20 .. 10*(n-1) */
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ int k = toku_htonl(10*i);
+ int v = 10*i;
+ toku_ft_insert(ft, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+
+ /* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */
+ for (i=0; i<n; i++) {
+
+ int v = 10*(random() % n);
+ int k = toku_htonl(v);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {sizeof k, &k, sizeof v, &v, 0};
+ r = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, cursor_op);
+ assert(r == 0);
+ assert(pair.call_count==1);
+ if (cursor_op == DB_SET) assert(key.data == &k);
+ }
+
+ /* try to set cursor to keys not in the tree, all should fail */
+ for (i=0; i<10*n; i++) {
+ if (i % 10 == 0)
+ continue;
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ struct check_pair pair = {0,0,0,0,0};
+ r = toku_ft_cursor_get(cursor, &key, lookup_checkf, &pair, DB_SET);
+ assert(r == DB_NOTFOUND);
+ assert(pair.call_count==0);
+ assert(key.data == &k);
+ }
+
+ toku_ft_cursor_close(cursor);
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void test_new_ft_cursors(void) {
+ test_new_ft_cursor_create_close();
+ test_new_ft_cursor_first(8);
+ test_new_ft_cursor_last(8);
+ test_new_ft_cursor_last(512);
+ test_new_ft_cursor_next(8);
+ test_new_ft_cursor_prev(8);
+ test_new_ft_cursor_current(8);
+ test_new_ft_cursor_next(512);
+ test_new_ft_cursor_set_range(512);
+ test_new_ft_cursor_set(512, DB_SET, 0);
+}
+
+static void ft_blackbox_test (void) {
+
+ test_wrongendian_compare(0, 2);
+ test_wrongendian_compare(1, 2);
+ test_wrongendian_compare(1, 257);
+ test_wrongendian_compare(1, 1000);
+ test_new_ft_cursors();
+
+ test_read_what_was_written(); if (verbose) printf("did read_what_was_written\n");
+ test_cursor_next();
+ test_cursor_last_empty();
+ test_multiple_ft_handles_one_db_one_file();
+ test_dump_empty_db();
+
+
+ if (verbose) printf("test_multiple_files\n");
+ test_multiple_files();
+
+ test_ft_limits();
+
+ test_ft_delete();
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ ft_blackbox_test();
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test0.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test0.cc
new file mode 100644
index 00000000..f7a26eda
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test0.cc
@@ -0,0 +1,72 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+static void test0 (void) {
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ const char *fname = TOKU_TEST_FILENAME;
+ if (verbose) printf("%s:%d test0\n", __FILE__, __LINE__);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ if (verbose) printf("%s:%d test0\n", __FILE__, __LINE__);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+ //printf("%s:%d test0\n", __FILE__, __LINE__);
+ //printf("%s:%d n_items_malloced=%lld\n", __FILE__, __LINE__, n_items_malloced);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ //printf("%s:%d n_items_malloced=%lld\n", __FILE__, __LINE__, n_items_malloced);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ if (verbose) printf("test0 A\n");
+ test0();
+ if (verbose) printf("test0 B\n");
+ test0(); /* Make sure it works twice. */
+
+ if (verbose) printf("test0 ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test1.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test1.cc
new file mode 100644
index 00000000..d3fddb83
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test1.cc
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+static void test1 (void) {
+ FT_HANDLE t;
+ int r;
+ CACHETABLE ct;
+ const char *fname = TOKU_TEST_FILENAME;
+ DBT k,v;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+ toku_ft_insert(t, toku_fill_dbt(&k, "hello", 6), toku_fill_dbt(&v, "there", 6), null_txn);
+ assert(r==0);
+ {
+ struct check_pair pair = {6, "hello", 6, "there", 0};
+ r = toku_ft_lookup(t, toku_fill_dbt(&k, "hello", 6), lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ if (verbose) printf("test1 ok\n");
+}
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ if (verbose) printf("test1\n");
+ test1();
+
+ if (verbose) printf("test1 ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test2.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test2.cc
new file mode 100644
index 00000000..46476691
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test2.cc
@@ -0,0 +1,85 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+static void test2 (int limit) {
+ FT_HANDLE t;
+ int r;
+ int i;
+ CACHETABLE ct;
+ const char *fname = TOKU_TEST_FILENAME;
+ if (verbose) printf("%s:%d checking\n", __FILE__, __LINE__);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ if (verbose) printf("%s:%d did setup\n", __FILE__, __LINE__);
+ assert(r==0);
+ for (i=0; i<limit; i++) { // 4096
+ DBT k,v;
+ char key[100],val[100];
+ snprintf(key,100,"hello%d",i);
+ snprintf(val,100,"there%d",i);
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ assert(r==0);
+ r = toku_verify_ft(t); assert(r==0);
+ //printf("%s:%d did insert %d\n", __FILE__, __LINE__, i);
+ }
+ if (verbose) printf("%s:%d inserted\n", __FILE__, __LINE__);
+ r = toku_verify_ft(t); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ if (verbose) printf("test2 ok\n");
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ if (verbose) printf("test2 faster\n");
+ test2(2);
+ test2(27);
+ test2(212);
+ test2(4096);
+
+ if (verbose) printf("test1 ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test3.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test3.cc
new file mode 100644
index 00000000..68d93448
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test3.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <toku_time.h>
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static const enum toku_compression_method compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
+
+static TOKUTXN const null_txn = 0;
+
+static void test3 (int nodesize, int basementnodesize, int count) {
+ FT_HANDLE t;
+ int r;
+ struct timeval t0,t1;
+ int i;
+ CACHETABLE ct;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ gettimeofday(&t0, 0);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+ for (i=0; i<count; i++) {
+ char key[100],val[100];
+ DBT k,v;
+ snprintf(key,100,"hello%d",i);
+ snprintf(val,100,"there%d",i);
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ }
+ r = toku_verify_ft(t); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ gettimeofday(&t1, 0);
+ {
+ double diff = toku_tdiff(&t1, &t0);
+ if (verbose) printf("serial insertions: blocksize=%d %d insertions in %.3f seconds, %.2f insertions/second\n", nodesize, count, diff, count/diff);
+ }
+}
+
+static void ft_blackbox_test (void) {
+ if (verbose) printf("test3 slow\n");
+
+ test3(2048, 512, 1<<15);
+ if (verbose) printf("test3 fast\n");
+
+ //if (verbose) toku_pma_show_stats();
+
+ test3(1<<15, 1<<12, 1024);
+ if (verbose) printf("test3 fast\n");
+
+ test3(1<<18, 1<<15, 1<<20);
+
+
+// test3(1<<19, 1<<16, 1<<20);
+
+// test3(1<<20, 1<<17, 1<<20);
+
+// test3(1<<20, 1<<17, 1<<21);
+
+// test3(1<<20, 1<<17, 1<<22);
+
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ ft_blackbox_test();
+
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test4.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test4.cc
new file mode 100644
index 00000000..9191427d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test4.cc
@@ -0,0 +1,103 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <toku_time.h>
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static TOKUTXN const null_txn = 0;
+
+static void test4 (int nodesize, int count) {
+ FT_HANDLE t;
+ int r;
+ struct timeval t0,t1;
+ int i;
+ CACHETABLE ct;
+ gettimeofday(&t0, 0);
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, nodesize, nodesize / 8, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ for (i=0; i<count; i++) {
+ char key[100],val[100];
+ int rv = random();
+ DBT k,v;
+ snprintf(key,100,"hello%d",rv);
+ snprintf(val,100,"there%d",i);
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ }
+ r = toku_verify_ft(t); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ gettimeofday(&t1, 0);
+ {
+ double diff = toku_tdiff(&t1, &t0);
+ if (verbose) printf("random insertions: blocksize=%d %d insertions in %.3f seconds, %.2f insertions/second\n", nodesize, count, diff, count/diff);
+ }
+}
+
+static void ft_blackbox_test (void) {
+ test4(2048, 1<<14);
+
+ if (0) {
+
+ if (verbose) printf("test4 slow\n");
+ test4(2048, 1<<15);
+
+ //if (verbose) toku_pma_show_stats();
+
+ test4(1<<15, 1024);
+
+ test4(1<<18, 1<<20);
+
+ // Once upon a time srandom(8) caused this test to fail.
+ srandom(8); test4(2048, 1<<15);
+ }
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ ft_blackbox_test();
+
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test5.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test5.cc
new file mode 100644
index 00000000..74919e3c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test5.cc
@@ -0,0 +1,99 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+static void test5 (void) {
+ int r;
+ FT_HANDLE t;
+ int limit=100000;
+ int *values;
+ int i;
+ CACHETABLE ct;
+ const char *fname = TOKU_TEST_FILENAME;
+
+ MALLOC_N(limit,values);
+ for (i=0; i<limit; i++) values[i]=-1;
+ unlink(fname);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ for (i=0; i<limit/2; i++) {
+ char key[100],val[100];
+ int rk = random()%limit;
+ int rv = random();
+ if (i%1000==0 && verbose) { printf("w"); fflush(stdout); }
+ values[rk] = rv;
+ snprintf(key, 100, "key%d", rk);
+ snprintf(val, 100, "val%d", rv);
+ DBT k,v;
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ }
+ if (verbose) printf("\n");
+ for (i=0; i<limit/2; i++) {
+ int rk = random()%limit;
+ if (values[rk]>=0) {
+ char key[100], valexpected[100];
+ DBT k;
+ if (i%1000==0 && verbose) { printf("r"); fflush(stdout); }
+ snprintf(key, 100, "key%d", rk);
+ snprintf(valexpected, 100, "val%d", values[rk]);
+ struct check_pair pair = {(uint32_t) (1+strlen(key)), key, (uint32_t) (1+strlen(valexpected)), valexpected, 0};
+ r = toku_ft_lookup(t, toku_fill_dbt(&k, key, 1+strlen(key)), lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+ }
+ if (verbose) printf("\n");
+ toku_free(values);
+ r = toku_verify_ft(t); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test5();
+
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-error-injector.h b/storage/tokudb/PerconaFT/ft/tests/ftloader-error-injector.h
new file mode 100644
index 00000000..a1d14abb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-error-injector.h
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <portability/toku_atomic.h>
+
+static toku_mutex_t event_mutex = TOKU_MUTEX_INITIALIZER;
+static void lock_events(void) {
+ toku_mutex_lock(&event_mutex);
+}
+static void unlock_events(void) {
+ toku_mutex_unlock(&event_mutex);
+}
+static int event_count, event_count_trigger;
+
+__attribute__((__unused__))
+static void reset_event_counts(void) {
+ lock_events();
+ event_count = event_count_trigger = 0;
+ unlock_events();
+}
+
+__attribute__((__unused__))
+static void event_hit(void) {
+}
+
+__attribute__((__unused__))
+static int event_add_and_fetch(void) {
+ lock_events();
+ int r = ++event_count;
+ unlock_events();
+ return r;
+}
+
+static int do_user_errors = 0;
+
+__attribute__((__unused__))
+static int loader_poll_callback(void *UU(extra), float UU(progress)) {
+ int r;
+ if (do_user_errors && event_count_trigger == event_add_and_fetch()) {
+ event_hit();
+ r = 1;
+ } else {
+ r = 0;
+ }
+ return r;
+}
+
+static int do_write_errors = 0;
+
+__attribute__((__unused__))
+static size_t bad_fwrite (const void *ptr, size_t size, size_t nmemb, FILE *stream) {
+ size_t r;
+ if (do_write_errors && event_count_trigger == event_add_and_fetch()) {
+ event_hit();
+ errno = ENOSPC;
+ r = (size_t) -1;
+ } else {
+ r = fwrite(ptr, size, nmemb, stream);
+ if (r!=nmemb) {
+ errno = ferror(stream);
+ }
+ }
+ return r;
+}
+
+__attribute__((__unused__))
+static ssize_t bad_write(int fd, const void * bp, size_t len) {
+ ssize_t r;
+ if (do_write_errors && event_count_trigger == event_add_and_fetch()) {
+ event_hit();
+ errno = ENOSPC;
+ r = -1;
+ } else {
+ r = write(fd, bp, len);
+ }
+ return r;
+}
+
+__attribute__((__unused__))
+static ssize_t bad_pwrite(int fd, const void * bp, size_t len, toku_off_t off) {
+ ssize_t r;
+ if (do_write_errors && event_count_trigger == event_add_and_fetch()) {
+ event_hit();
+ errno = ENOSPC;
+ r = -1;
+ } else {
+ r = pwrite(fd, bp, len, off);
+ }
+ return r;
+}
+
+static int do_malloc_errors = 0;
+static int my_malloc_count = 0, my_big_malloc_count = 0;
+static int my_realloc_count = 0, my_big_realloc_count = 0;
+static size_t my_big_malloc_limit = 64*1024;
+
+__attribute__((__unused__))
+static void reset_my_malloc_counts(void) {
+ my_malloc_count = my_big_malloc_count = 0;
+ my_realloc_count = my_big_realloc_count = 0;
+}
+
+__attribute__((__unused__))
+static void *my_malloc(size_t n) {
+ (void) toku_sync_fetch_and_add(&my_malloc_count, 1); // my_malloc_count++;
+ if (n >= my_big_malloc_limit) {
+ (void) toku_sync_fetch_and_add(&my_big_malloc_count, 1); // my_big_malloc_count++;
+ if (do_malloc_errors) {
+ if (event_add_and_fetch() == event_count_trigger) {
+ event_hit();
+ errno = ENOMEM;
+ return NULL;
+ }
+ }
+ }
+ return malloc(n);
+}
+
+static int do_realloc_errors = 0;
+
+__attribute__((__unused__))
+static void *my_realloc(void *p, size_t n) {
+ (void) toku_sync_fetch_and_add(&my_realloc_count, 1); // my_realloc_count++;
+ if (n >= my_big_malloc_limit) {
+ (void) toku_sync_fetch_and_add(&my_big_realloc_count, 1); // my_big_realloc_count++;
+ if (do_realloc_errors) {
+ if (event_add_and_fetch() == event_count_trigger) {
+ event_hit();
+ errno = ENOMEM;
+ return NULL;
+ }
+ }
+ }
+ return realloc(p, n);
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-bad-generate.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-bad-generate.cc
new file mode 100644
index 00000000..c2837124
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-bad-generate.cc
@@ -0,0 +1,202 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// The purpose of this test is force errors returned from the generate function
+
+#define DONT_DEPRECATE_MALLOC
+#define DONT_DEPRECATE_WRITES
+#include "test.h"
+#include "loader/loader.h"
+#include "loader/loader-internal.h"
+#include "ftloader-error-injector.h"
+#include "memory.h"
+#include <portability/toku_path.h>
+
+
+static int generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ if (verbose) printf("%s %p %p %p %p %p %p\n", __FUNCTION__, dest_db, src_db, dest_keys, dest_vals, src_key, src_val);
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ assert(dest_db == NULL); assert(src_db == NULL);
+
+ int result;
+ if (event_count_trigger == event_add_and_fetch()) {
+ event_hit();
+ result = EINVAL;
+ } else {
+ copy_dbt(dest_key, src_key);
+ copy_dbt(dest_val, src_val);
+ result = 0;
+ }
+
+ if (verbose) printf("%s %d\n", __FUNCTION__, result);
+ return result;
+}
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+}
+
+static int compare_int(DB *desc, const DBT *akey, const DBT *bkey) {
+ assert(desc == NULL);
+ assert(akey->size == sizeof (int));
+ assert(bkey->size == sizeof (int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static void populate_rowset(struct rowset *rowset, int seq, int nrows) {
+ for (int i = 0; i < nrows; i++) {
+ int k = seq * nrows + i;
+ int v = seq * nrows + i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ add_row(rowset, &key, &val);
+ }
+}
+
+static void test_extractor(int nrows, int nrowsets, bool expect_fail) {
+ if (verbose) printf("%s %d %d\n", __FUNCTION__, nrows, nrowsets);
+
+ int r;
+
+ // open the ft_loader. this runs the extractor.
+ const int N = 1;
+ FT_HANDLE fts[N];
+ DB* dbs[N];
+ const char *fnames[N];
+ ft_compare_func compares[N];
+ for (int i = 0; i < N; i++) {
+ fts[i] = NULL;
+ dbs[i] = NULL;
+ fnames[i] = "";
+ compares[i] = compare_int;
+ }
+
+ FTLOADER loader;
+ r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false, true);
+ assert(r == 0);
+
+ struct rowset *rowset[nrowsets];
+ for (int i = 0 ; i < nrowsets; i++) {
+ rowset[i] = (struct rowset *) toku_malloc(sizeof (struct rowset));
+ assert(rowset[i]);
+ init_rowset(rowset[i], toku_ft_loader_get_rowset_budget_for_testing());
+ populate_rowset(rowset[i], i, nrows);
+ }
+
+ // feed rowsets to the extractor
+ for (int i = 0; i < nrowsets; i++) {
+ r = toku_queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL);
+ assert(r == 0);
+ }
+
+ r = toku_ft_loader_finish_extractor(loader);
+ assert(r == 0);
+
+ int loader_error;
+ r = toku_ft_loader_get_error(loader, &loader_error);
+ assert(r == 0);
+
+ assert(expect_fail ? loader_error != 0 : loader_error == 0);
+
+ // abort the ft_loader. this ends the test
+ r = toku_ft_loader_abort(loader, true);
+ assert(r == 0);
+}
+
+static int nrows = 1;
+static int nrowsets = 2;
+
+static int usage(const char *progname) {
+ fprintf(stderr, "Usage:\n %s [-h] [-v] [-q] [-s] [-r %d] [--rowsets %d]\n", progname, nrows, nrowsets);
+ return 1;
+}
+
+int test_main (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-h")==0) {
+ return usage(progname);
+ } else if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r") == 0 && argc >= 1) {
+ argc--; argv++;
+ nrows = atoi(argv[0]);
+ } else if (strcmp(argv[0],"--nrowsets") == 0 && argc >= 1) {
+ argc--; argv++;
+ nrowsets = atoi(argv[0]);
+ } else if (strcmp(argv[0],"-s") == 0) {
+ toku_ft_loader_set_size_factor(1);
+ } else if (argc!=1) {
+ return usage(progname);
+ exit(1);
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+
+ // callibrate
+ test_extractor(nrows, nrowsets, false);
+
+ // run tests
+ int event_limit = event_count;
+ if (verbose) printf("event_limit=%d\n", event_limit);
+
+ for (int i = 1; i <= event_limit; i++) {
+ reset_event_counts();
+ event_count_trigger = i;
+ test_extractor(nrows, nrowsets, true);
+ }
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor-errors.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor-errors.cc
new file mode 100644
index 00000000..4bff52ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor-errors.cc
@@ -0,0 +1,263 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// The purpose of this test is to test the error recovery of the extractor. We inject errors into the extractor and
+// verify that the extractor error state is set.
+
+#define DONT_DEPRECATE_MALLOC
+#define DONT_DEPRECATE_WRITES
+#include "test.h"
+#include "loader/loader.h"
+#include "loader/loader-internal.h"
+#include "ftloader-error-injector.h"
+#include "memory.h"
+#include <portability/toku_path.h>
+
+
+static int generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ (void) dest_db; (void) src_db; (void) src_key; (void) src_val;
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+
+ copy_dbt(&dest_keys->dbts[0], src_key);
+ copy_dbt(&dest_vals->dbts[0], src_val);
+
+ return 0;
+}
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+}
+
+static int compare_int(DB *desc, const DBT *akey, const DBT *bkey) {
+ assert(desc == NULL);
+ assert(akey->size == sizeof (int));
+ assert(bkey->size == sizeof (int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static void populate_rowset(struct rowset *rowset, int seq, int nrows, int keys[]) {
+ for (int i = 0; i < nrows; i++) {
+ int k = keys[i];
+ int v = seq * nrows + i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ add_row(rowset, &key, &val);
+ }
+}
+
+static void shuffle(int a[], int n) {
+ for (int i = 0; i < n; i++) {
+ int r = random() % n;
+ int t = a[i]; a[i] = a[r]; a[r] = t;
+ }
+}
+
+static int ascending_keys = 0;
+static int descending_keys = 0;
+static int random_keys = 0;
+
+static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char *testdir) {
+ if (verbose) printf("%s %d %d %s\n", __FUNCTION__, nrows, nrowsets, testdir);
+
+ int r;
+
+ int nkeys = nrows * nrowsets;
+ int *XMALLOC_N(nkeys, keys);
+ for (int i = 0; i < nkeys; i++)
+ keys[i] = ascending_keys ? i : nkeys - i;
+ if (random_keys)
+ shuffle(keys, nkeys);
+
+ // open the ft_loader. this runs the extractor.
+ const int N = 1;
+ FT_HANDLE fts[N];
+ DB* dbs[N];
+ const char *fnames[N];
+ ft_compare_func compares[N];
+ for (int i = 0; i < N; i++) {
+ fts[i] = NULL;
+ dbs[i] = NULL;
+ fnames[i] = "";
+ compares[i] = compare_int;
+ }
+
+ char temp[strlen(testdir) + 1 + strlen("tempXXXXXX") + 1];
+ sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
+
+ FTLOADER loader;
+ r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false, true);
+ assert(r == 0);
+
+ struct rowset *rowset[nrowsets];
+ for (int i = 0 ; i < nrowsets; i++) {
+ rowset[i] = (struct rowset *) toku_malloc(sizeof (struct rowset));
+ assert(rowset[i]);
+ init_rowset(rowset[i], toku_ft_loader_get_rowset_budget_for_testing());
+ populate_rowset(rowset[i], i, nrows, &keys[i*nrows]);
+ }
+
+ // setup error injection
+ toku_set_func_malloc(my_malloc);
+ toku_set_func_realloc(my_realloc);
+ toku_set_func_fwrite(bad_fwrite);
+ toku_set_func_write(bad_write);
+ toku_set_func_pwrite(bad_pwrite);
+ ft_loader_set_poll_function(&loader->poll_callback, loader_poll_callback, NULL);
+
+ // feed rowsets to the extractor
+ for (int i = 0; i < nrowsets; i++) {
+ r = toku_queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL);
+ assert(r == 0);
+ }
+
+ r = toku_ft_loader_finish_extractor(loader);
+ assert(r == 0);
+
+ toku_set_func_malloc(NULL);
+ toku_set_func_realloc(NULL);
+ toku_set_func_fwrite(nullptr);
+ toku_set_func_write(NULL);
+ toku_set_func_pwrite(NULL);
+
+ int error;
+ r = toku_ft_loader_get_error(loader, &error);
+ assert(r == 0);
+ assert(expect_fail ? error != 0 : error == 0);
+
+ // verify the temp files
+
+ // abort the ft_loader. this ends the test
+ r = toku_ft_loader_abort(loader, true);
+ assert(r == 0);
+
+ toku_free(keys);
+}
+static int nrows = 1;
+static int nrowsets = 2;
+
+static int usage(const char *progname) {
+ fprintf(stderr, "Usage: %s [options] directory\n", progname);
+ fprintf(stderr, "[-v] turn on verbose\n");
+ fprintf(stderr, "[-q] turn off verbose\n");
+ fprintf(stderr, "[-r %d] set the number of rows\n", nrows);
+ fprintf(stderr, "[--rowsets %d] set the number of rowsets\n", nrowsets);
+ fprintf(stderr, "[-s] set the small loader size factor\n");
+ fprintf(stderr, "[-m] inject big malloc and realloc errors\n");
+ fprintf(stderr, "[--malloc_limit %u] set the threshold for failing malloc and realloc\n", (unsigned) my_big_malloc_limit);
+ fprintf(stderr, "[-w] inject write errors\n");
+ fprintf(stderr, "[-u] inject user errors\n");
+ return 1;
+}
+
+int test_main (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ int max_error_limit = -1;
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-h")==0) {
+ return usage(progname);
+ } else if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r") == 0 && argc >= 1) {
+ argc--; argv++;
+ nrows = atoi(argv[0]);
+ } else if (strcmp(argv[0],"--rowsets") == 0 && argc >= 1) {
+ argc--; argv++;
+ nrowsets = atoi(argv[0]);
+ } else if (strcmp(argv[0],"-s") == 0) {
+ toku_ft_loader_set_size_factor(1);
+ } else if (strcmp(argv[0],"-w") == 0) {
+ do_write_errors = 1;
+ } else if (strcmp(argv[0],"-m") == 0) {
+ do_malloc_errors = 1;
+ } else if (strcmp(argv[0],"-u") == 0) {
+ do_user_errors = 1;
+ } else if (strcmp(argv[0],"--malloc_limit") == 0 && argc > 1) {
+ argc--; argv++;
+ my_big_malloc_limit = atoi(argv[0]);
+ } else if (strcmp(argv[0],"--max_error_limit") == 0 && argc >= 1) {
+ argc--; argv++;
+ max_error_limit = atoi(argv[0]);
+ } else if (strcmp(argv[0],"--asc") == 0) {
+ ascending_keys = 1;
+ } else if (strcmp(argv[0],"--dsc") == 0) {
+ descending_keys = 1;
+ } else if (strcmp(argv[0],"--random") == 0) {
+ random_keys = 1;
+ } else if (argc!=1) {
+ return usage(progname);
+ exit(1);
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+
+ const char *testdir = TOKU_TEST_FILENAME;
+
+ if (ascending_keys + descending_keys + random_keys == 0)
+ ascending_keys = 1;
+
+ // callibrate
+ test_extractor(nrows, nrowsets, false, testdir);
+
+ // run tests
+ int error_limit = event_count;
+ if (verbose) printf("error_limit=%d\n", error_limit);
+
+ if (max_error_limit != -1 && error_limit > max_error_limit)
+ error_limit = max_error_limit;
+ for (int i = 1; i <= error_limit; i++) {
+ reset_event_counts();
+ reset_my_malloc_counts();
+ event_count_trigger = i;
+ test_extractor(nrows, nrowsets, true, testdir);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor.cc
new file mode 100644
index 00000000..20f05607
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-extractor.cc
@@ -0,0 +1,456 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// The purpose of this test is to test the extractor component of the ft loader. We insert rowsets into the extractor queue and verify temp files
+// after the extractor is finished.
+
+#define DONT_DEPRECATE_MALLOC
+#define DONT_DEPRECATE_WRITES
+#include "test.h"
+#include "loader/loader.h"
+#include "loader/loader-internal.h"
+#include "memory.h"
+#include <portability/toku_path.h>
+
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+}
+
+static int compare_int(DB *desc, const DBT *akey, const DBT *bkey) {
+ assert(desc == NULL);
+ assert(akey->size == sizeof (int));
+ assert(bkey->size == sizeof (int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static char **get_temp_files(const char *testdir) {
+ int ntemp = 0;
+ int maxtemp = 32;
+ char **XMALLOC_N(maxtemp, tempfiles);
+
+ DIR *d = opendir(testdir);
+ if (d) {
+ struct dirent *de;
+ while ((de = readdir(d)) != NULL) {
+ if (strncmp(de->d_name, "temp", 4) == 0) {
+ if (ntemp >= maxtemp-1) {
+ maxtemp = 2*maxtemp;
+ XREALLOC_N(2*maxtemp, tempfiles);
+ }
+ tempfiles[ntemp++] = toku_strdup(de->d_name);
+ }
+ }
+ closedir(d);
+ }
+ tempfiles[ntemp] = NULL;
+ return tempfiles;
+}
+
+static void free_temp_files(char **tempfiles) {
+ for (int i = 0; tempfiles[i] != NULL; i++)
+ toku_free(tempfiles[i]);
+ toku_free(tempfiles);
+}
+
+static int read_row(FILE *f, DBT *key, DBT *val) {
+ size_t r;
+ int len;
+ r = fread(&len, sizeof len, 1, f);
+ if (r != 1)
+ return EOF;
+ assert(key->flags == DB_DBT_REALLOC);
+ key->data = toku_realloc(key->data, len); key->size = len;
+ r = fread(key->data, len, 1, f);
+ if (r != 1)
+ return EOF;
+ r = fread(&len, sizeof len, 1, f);
+ if (r != 1)
+ return EOF;
+ assert(val->flags == DB_DBT_REALLOC);
+ val->data = toku_realloc(val->data, len); val->size = len;
+ r = fread(val->data, len, 1, f);
+ if (r != 1)
+ return EOF;
+ return 0;
+}
+
+static void write_row(FILE *f, DBT *key, DBT *val) {
+ size_t r;
+ int len = key->size;
+ r = fwrite(&len, sizeof len, 1, f);
+ assert(r == 1);
+ r = fwrite(key->data, len, 1, f);
+ assert(r == 1);
+ len = val->size;
+ r = fwrite(&len, sizeof len, 1, f);
+ assert(r == 1);
+ r = fwrite(val->data, len, 1, f);
+ assert(r == 1);
+}
+
+static void read_tempfile(const char *testdir, const char *tempfile, int **tempkeys, int *ntempkeys) {
+ int maxkeys = 32;
+ int nkeys = 0;
+ int *XCALLOC_N(maxkeys, keys);
+
+ char fname[strlen(testdir) + 1 + strlen(tempfile) + 1];
+ sprintf(fname, "%s/%s", testdir, tempfile);
+ FILE *f = fopen(fname, "r");
+ if (f) {
+ DBT key;
+ toku_init_dbt_flags(&key, DB_DBT_REALLOC);
+ DBT val;
+ toku_init_dbt_flags(&val, DB_DBT_REALLOC);
+ while (read_row(f, &key, &val) == 0) {
+ if (nkeys >= maxkeys) {
+ maxkeys *= 2;
+ XREALLOC_N(maxkeys, keys);
+ }
+ assert(key.size == sizeof (int));
+ memcpy(&keys[nkeys], key.data, key.size);
+ nkeys++;
+ }
+ toku_free(key.data);
+ toku_free(val.data);
+ fclose(f);
+ }
+
+ *tempkeys = keys;
+ *ntempkeys = nkeys;
+}
+
+static void verify_sorted(int a[], int n) {
+ for (int i = 1; i < n; i++)
+ assert(a[i-1] <= a[i]);
+}
+
+struct merge_file {
+ FILE *f;
+ DBT key, val;
+ bool row_valid;
+};
+
+static DBT zero_dbt;
+
+static void merge_file_init(struct merge_file *mf) {
+ mf->f = NULL;
+ mf->key = zero_dbt; mf->key.flags = DB_DBT_REALLOC;
+ mf->val = zero_dbt; mf->val.flags = DB_DBT_REALLOC;
+ mf->row_valid = false;
+}
+
+static void merge_file_destroy(struct merge_file *mf) {
+ if (mf->f) {
+ fclose(mf->f);
+ mf->f = NULL;
+ }
+ toku_free(mf->key.data);
+ toku_free(mf->val.data);
+}
+
+static char *merge(char **tempfiles, int ntempfiles, const char *testdir) {
+ char fname[strlen(testdir) + 1 + strlen("result") + 1];
+ sprintf(fname, "%s/%s", testdir, "result");
+ FILE *mergef = fopen(fname, "w"); assert(mergef != NULL);
+
+ struct merge_file f[ntempfiles];
+ for (int i = 0; i < ntempfiles; i++) {
+ merge_file_init(&f[i]);
+ char tname[strlen(testdir) + 1 + strlen(tempfiles[i]) + 1];
+ sprintf(tname, "%s/%s", testdir, tempfiles[i]);
+ f[i].f = fopen(tname, "r");
+ if (f[i].f == NULL) {
+ int error = errno;
+ fprintf(stderr, "%s:%d errno=%d %s\n", __FILE__, __LINE__, error, strerror(error));
+ if (error == EMFILE)
+ fprintf(stderr, "may need to increase the nofile ulimit\n");
+ }
+ assert(f[i].f != NULL);
+ if (read_row(f[i].f, &f[i].key, &f[i].val) == 0)
+ f[i].row_valid = true;
+ }
+
+ while (1) {
+ // get min
+ int mini = -1;
+ for (int i = 0; i < ntempfiles; i++) {
+ if (f[i].row_valid) {
+ if (mini == -1) {
+ mini = i;
+ } else {
+ int r = compare_int(NULL, &f[mini].key, &f[i].key);
+ assert(r != 0);
+ if (r > 0)
+ mini = i;
+ }
+ }
+ }
+ if (mini == -1)
+ break;
+
+ // write min
+ write_row(mergef, &f[mini].key, &f[mini].val);
+
+ // refresh mini
+ if (read_row(f[mini].f, &f[mini].key, &f[mini].val) != 0)
+ f[mini].row_valid = false;
+ }
+
+ for (int i = 0; i < ntempfiles; i++) {
+ merge_file_destroy(&f[i]);
+ }
+
+ fclose(mergef);
+ return toku_strdup("result");
+}
+
+static void verify(int inkey[], int nkeys, const char *testdir) {
+ // find the temp files
+ char **tempfiles = get_temp_files(testdir);
+ int ntempfiles = 0;
+ for (int i = 0; tempfiles[i] != NULL; i++) {
+ if (verbose) printf("%s\n", tempfiles[i]);
+ ntempfiles++;
+ }
+
+ // verify each is sorted
+ for (int i = 0; i < ntempfiles; i++) {
+ int *tempkeys; int ntempkeys;
+ read_tempfile(testdir, tempfiles[i], &tempkeys, &ntempkeys);
+ verify_sorted(tempkeys, ntempkeys);
+ toku_free(tempkeys);
+ }
+
+ // merge
+ char *result_file = merge(tempfiles, ntempfiles, testdir);
+ assert(result_file);
+
+ int *result_keys; int n_result_keys;
+ read_tempfile(testdir, result_file, &result_keys, &n_result_keys);
+ toku_free(result_file);
+
+ // compare
+ assert(nkeys == n_result_keys);
+ for (int i = 0; i < nkeys; i++)
+ assert(inkey[i] == result_keys[i]);
+
+ toku_free(result_keys);
+ free_temp_files(tempfiles);
+}
+
+static int generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ assert(dest_db == NULL); assert(src_db == NULL);
+
+ copy_dbt(dest_key, src_key);
+ copy_dbt(dest_val, src_val);
+
+ return 0;
+}
+
+static void populate_rowset(struct rowset *rowset, int seq, int nrows, int keys[]) {
+ for (int i = 0; i < nrows; i++) {
+ int k = keys[i];
+ int v = seq * nrows + i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ add_row(rowset, &key, &val);
+ }
+}
+
+static void shuffle(int a[], int n) {
+ for (int i = 0; i < n; i++) {
+ int r = random() % n;
+ int t = a[i]; a[i] = a[r]; a[r] = t;
+ }
+}
+
+static int ascending_keys = 0;
+static int ascending_keys_poison = 0;
+static int descending_keys = 0;
+static int random_keys = 0;
+
+static void test_extractor(int nrows, int nrowsets, const char *testdir) {
+ if (verbose) printf("%s %d %d %s\n", __FUNCTION__, nrows, nrowsets, testdir);
+
+ int r;
+
+ int nkeys = nrows * nrowsets;
+ int *XCALLOC_N(nkeys, keys);
+ for (int i = 0; i < nkeys; i++)
+ keys[i] = ascending_keys ? 2*i : nkeys - i;
+ if (ascending_keys_poison) {
+ if (verbose)
+ printf("poison %d %d %d\n", nrows*(nrowsets-1), keys[nrows*(nrowsets-1)], keys[nrows-1] -1);
+ keys[nrows*(nrowsets-1)] = keys[nrows-1] - 1;
+ }
+ if (random_keys)
+ shuffle(keys, nkeys);
+
+ // open the ft_loader. this runs the extractor.
+ const int N = 1;
+ FT_HANDLE fts[N];
+ DB* dbs[N];
+ const char *fnames[N];
+ ft_compare_func compares[N];
+ for (int i = 0; i < N; i++) {
+ fts[i] = NULL;
+ dbs[i] = NULL;
+ fnames[i] = "";
+ compares[i] = compare_int;
+ }
+
+ char temp[strlen(testdir) + 1 + strlen("tempXXXXXX") + 1];
+ sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
+
+ FTLOADER loader;
+ r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false, true);
+ assert(r == 0);
+
+ struct rowset *rowset[nrowsets];
+ for (int i = 0 ; i < nrowsets; i++) {
+ rowset[i] = (struct rowset *) toku_malloc(sizeof (struct rowset));
+ assert(rowset[i]);
+ init_rowset(rowset[i], toku_ft_loader_get_rowset_budget_for_testing());
+ populate_rowset(rowset[i], i, nrows, &keys[i*nrows]);
+ }
+
+ // feed rowsets to the extractor
+ for (int i = 0; i < nrowsets; i++) {
+ r = toku_queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL);
+ assert(r == 0);
+ }
+ r = toku_ft_loader_finish_extractor(loader);
+ assert(r == 0);
+
+ int error;
+ r = toku_ft_loader_get_error(loader, &error);
+ assert(r == 0);
+ assert(error == 0);
+
+ // sort the input keys
+ qsort(keys, nkeys, sizeof (int), qsort_compare_ints);
+
+ // verify the temp files
+ verify(keys, nkeys, testdir);
+
+ // abort the ft_loader. this ends the test
+ r = toku_ft_loader_abort(loader, true);
+ assert(r == 0);
+
+ toku_free(keys);
+}
+
+static int nrows = 1;
+static int nrowsets = 2;
+
+static int usage(const char *progname) {
+ fprintf(stderr, "Usage: %s [options] directory\n", progname);
+ fprintf(stderr, "[-v] turn on verbose\n");
+ fprintf(stderr, "[-q] turn off verbose\n");
+ fprintf(stderr, "[-r %d] set the number of rows\n", nrows);
+ fprintf(stderr, "[--rowsets %d] set the number of rowsets\n", nrowsets);
+ fprintf(stderr, "[-s] set the small loader size factor\n");
+ fprintf(stderr, "[--asc] [--dsc] [--random]\n");
+ return 1;
+}
+
+int test_main (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-h")==0) {
+ return usage(progname);
+ } else if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r") == 0 && argc >= 1) {
+ argc--; argv++;
+ nrows = atoi(argv[0]);
+ } else if (strcmp(argv[0],"--rowsets") == 0 && argc >= 1) {
+ argc--; argv++;
+ nrowsets = atoi(argv[0]);
+ } else if (strcmp(argv[0],"-s") == 0) {
+ toku_ft_loader_set_size_factor(1);
+ } else if (strcmp(argv[0],"--asc") == 0) {
+ ascending_keys = 1;
+ } else if (strcmp(argv[0],"--dsc") == 0) {
+ descending_keys = 1;
+ } else if (strcmp(argv[0],"--random") == 0) {
+ random_keys = 1;
+ } else if (strcmp(argv[0], "--asc-poison") == 0) {
+ ascending_keys = 1;
+ ascending_keys_poison = 1;
+ } else if (argc!=1) {
+ return usage(progname);
+ exit(1);
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+
+ const char *testdir = TOKU_TEST_FILENAME;
+ char unlink_all[strlen(testdir)+20];
+ snprintf(unlink_all, strlen(testdir)+20, "rm -rf %s", testdir);
+ int r;
+ r = system(unlink_all); CKERR(r);
+ r = toku_os_mkdir(testdir, 0755); CKERR(r);
+
+ if (ascending_keys + descending_keys + random_keys == 0)
+ ascending_keys = 1;
+
+ // run test
+ test_extractor(nrows, nrowsets, testdir);
+
+ r = system(unlink_all); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-merge-files-dbufio.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-merge-files-dbufio.cc
new file mode 100644
index 00000000..0b121316
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-merge-files-dbufio.cc
@@ -0,0 +1,568 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the loader write dbfile function
+
+#define DONT_DEPRECATE_WRITES
+#define DONT_DEPRECATE_MALLOC
+
+#include "test.h"
+#include "loader/loader-internal.h"
+#include <portability/toku_path.h>
+
+static int event_count, event_count_trigger;
+
+static void my_assert_hook (void) {
+ fprintf(stderr, "event_count=%d\n", event_count);
+}
+
+static void reset_event_counts(void) {
+ event_count = event_count_trigger = 0;
+}
+
+static void event_hit(void) {
+}
+
+static int loader_poll_callback(void *UU(extra), float UU(progress)) {
+ int r;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ r = TOKUDB_CANCELED;
+ } else {
+ r = 0;
+ }
+ return r;
+}
+
+static size_t bad_fwrite (const void *ptr, size_t size, size_t nmemb, FILE *stream) {
+ event_count++;
+ size_t r;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = ENOSPC;
+ r = (size_t) -1;
+ } else {
+ r = fwrite(ptr, size, nmemb, stream);
+ if (r!=nmemb) {
+ errno = ferror(stream);
+ }
+ }
+ return r;
+}
+
+static ssize_t bad_write(int fd, const void * bp, size_t len) {
+ ssize_t r;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = ENOSPC;
+ r = -1;
+ } else {
+ r = write(fd, bp, len);
+ }
+ return r;
+}
+
+static ssize_t bad_pwrite(int fd, const void * bp, size_t len, toku_off_t off) {
+ ssize_t r;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = ENOSPC;
+ r = -1;
+ } else {
+ r = pwrite(fd, bp, len, off);
+ }
+ return r;
+}
+
+static FILE *
+bad_fdopen(int fd, const char * mode) {
+ FILE * rval;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = EINVAL;
+ rval = NULL;
+ } else {
+ rval = fdopen(fd, mode);
+ }
+ return rval;
+}
+
+static FILE *
+bad_fopen(const char *filename, const char *mode) {
+ FILE * rval;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = EINVAL;
+ rval = NULL;
+ } else {
+ rval = fopen(filename, mode);
+ }
+ return rval;
+}
+
+
+static int
+bad_open(const char *path, int oflag, int mode) {
+ int rval;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = EINVAL;
+ rval = -1;
+ } else {
+ rval = open(path, oflag, mode);
+ }
+ return rval;
+}
+
+
+
+static int
+bad_fclose(FILE * stream) {
+ int rval;
+ event_count++;
+ // Must close the stream even in the "error case" because otherwise there is no way to get the memory back.
+ rval = fclose(stream);
+ if (rval==0) {
+ if (event_count_trigger == event_count) {
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = ENOSPC;
+ rval = -1;
+ }
+ }
+ return rval;
+}
+
+int bad_read_errno = 0;
+
+static ssize_t
+bad_read(int fd, void *buf, size_t count) {
+ ssize_t rval;
+ event_count++;
+ if (event_count_trigger == event_count) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = bad_read_errno;
+ rval = -1;
+ } else
+ rval = read(fd, buf, count);
+ return rval;
+}
+
+static int my_malloc_event = 1;
+static int my_malloc_count = 0, my_big_malloc_count = 0;
+static int my_realloc_count = 0, my_big_realloc_count = 0;
+
+static void reset_my_malloc_counts(void) {
+ my_malloc_count = my_big_malloc_count = 0;
+ my_realloc_count = my_big_realloc_count = 0;
+}
+
+size_t min_malloc_error_size = 0;
+
+static void *my_malloc(size_t n) {
+ my_malloc_count++;
+ if (n >= min_malloc_error_size) {
+ my_big_malloc_count++;
+ if (my_malloc_event) {
+ event_count++;
+ if (event_count == event_count_trigger) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = ENOMEM;
+ return NULL;
+ }
+ }
+ }
+ return os_malloc(n);
+}
+
+static int do_realloc_errors = 1;
+
+static void *my_realloc(void *p, size_t n) {
+ my_realloc_count++;
+ if (n >= min_malloc_error_size) {
+ my_big_realloc_count++;
+ if (do_realloc_errors) {
+ event_count++;
+ if (event_count == event_count_trigger) {
+ event_hit();
+ if (verbose) printf("%s %d\n", __FUNCTION__, event_count);
+ errno = ENOMEM;
+ return NULL;
+ }
+ }
+ }
+ return os_realloc(p, n);
+}
+
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+
+}
+
+static int compare_ints (DB* UU(desc), const DBT *akey, const DBT *bkey) {
+ assert(akey->size==sizeof(int));
+ assert(bkey->size==sizeof(int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static char *errorstr_static (int err) {
+ static char errorstr[100];
+ toku_ft_strerror_r(err, errorstr, sizeof(errorstr));
+ return errorstr;
+}
+
+
+static void err_cb(DB *db UU(), int dbn, int err, DBT *key UU(), DBT *val UU(), void *extra UU()) {
+ fprintf(stderr, "error in test dbn=%d err=%d (%s)\n", dbn, err, errorstr_static(err));
+ abort();
+}
+
+enum { N_SOURCES = 2, N_DEST_DBS=1 };
+
+int N_RECORDS = 10;
+
+static char *make_fname(const char *directory, const char *fname, int idx) {
+ int len = strlen(directory)+strlen(fname)+20;
+ char *XMALLOC_N(len, result);
+ int r = snprintf(result, len, "%s/%s%d", directory, fname, idx);
+ assert(r<len);
+ return result; // don't care that it's a little too long.
+}
+
+
+struct consumer_thunk {
+ QUEUE q;
+ int64_t n_read;
+};
+
+static void *consumer_thread (void *ctv) {
+ struct consumer_thunk *cthunk = (struct consumer_thunk *)ctv;
+ while (1) {
+ void *item;
+ int r = toku_queue_deq(cthunk->q, &item, NULL, NULL);
+ if (r==EOF) return NULL;
+ assert(r==0);
+ struct rowset *rowset = (struct rowset *)item;
+ cthunk->n_read += rowset->n_rows;
+ destroy_rowset(rowset);
+ toku_free(rowset);
+ }
+}
+
+
+static void test (const char *directory, bool is_error) {
+
+ int *XMALLOC_N(N_SOURCES, fds);
+
+ char **XMALLOC_N(N_SOURCES, fnames);
+ int *XMALLOC_N(N_SOURCES, n_records_in_fd);
+ for (int i=0; i<N_SOURCES; i++) {
+ fnames[i] = make_fname(directory, "temp", i);
+ fds[i] = open(fnames[i], O_CREAT|O_RDWR, S_IRWXU);
+ assert(fds[i]>=0);
+ n_records_in_fd[i] = 0;
+ }
+ for (int i=0; i<N_RECORDS; i++) {
+ int size=4;
+ int fdi = random()%N_SOURCES;
+ int fd = fds[fdi];
+ { int r = write(fd, &size, 4); assert(r==4); }
+ { int r = write(fd, &i, 4); assert(r==4); }
+ { int r = write(fd, &size, 4); assert(r==4); }
+ { int r = write(fd, &i, 4); assert(r==4); }
+ n_records_in_fd[fdi]++;
+ }
+ for (int i=0; i<N_SOURCES; i++) {
+ toku_off_t r = lseek(fds[i], 0, SEEK_SET);
+ assert(r==0);
+ }
+
+ FTLOADER bl;
+ FT_HANDLE *XCALLOC_N(N_DEST_DBS, fts);
+ DB* *XCALLOC_N(N_DEST_DBS, dbs);
+ const char **XMALLOC_N(N_DEST_DBS, new_fnames_in_env);
+ for (int i=0; i<N_DEST_DBS; i++) {
+ char s[100];
+ snprintf(s, sizeof(s), "db%d.db", i);
+ new_fnames_in_env[i] = toku_strdup(s);
+ assert(new_fnames_in_env[i]);
+ }
+ ft_compare_func *XMALLOC_N(N_DEST_DBS, bt_compare_functions);
+ bt_compare_functions[0] = compare_ints;
+ CACHETABLE ct;
+ enum {CACHETABLE_SIZE = 64*1024};
+ {
+ toku_cachetable_create(&ct, CACHETABLE_SIZE, (LSN){1}, NULL);
+ }
+ LSN *XMALLOC(lsnp);
+ {
+ int r = toku_ft_loader_internal_init (&bl,
+ ct,
+ (generate_row_for_put_func)NULL,
+ (DB*)NULL,
+ N_DEST_DBS, fts, dbs,
+ new_fnames_in_env,
+ bt_compare_functions,
+ "tempxxxxxx",
+ *lsnp,
+ nullptr, true, 0, false, true);
+ assert(r==0);
+ }
+
+ ft_loader_init_error_callback(&bl->error_callback);
+ ft_loader_set_error_function(&bl->error_callback, err_cb, NULL);
+ ft_loader_init_poll_callback(&bl->poll_callback);
+ ft_loader_set_poll_function(&bl->poll_callback, loader_poll_callback, NULL);
+ ft_loader_set_fractal_workers_count_from_c(bl);
+
+ QUEUE q;
+ { int r = toku_queue_create(&q, 1000); assert(r==0); }
+ DBUFIO_FILESET bfs;
+ const int MERGE_BUF_SIZE = 100000; // bigger than 64K so that we will trigger malloc issues.
+ { int r = create_dbufio_fileset(&bfs, N_SOURCES, fds, MERGE_BUF_SIZE, false); assert(r==0); }
+ FIDX *XMALLOC_N(N_SOURCES, src_fidxs);
+ assert(bl->file_infos.n_files==0);
+ bl->file_infos.n_files = N_SOURCES;
+ bl->file_infos.n_files_limit = N_SOURCES;
+ bl->file_infos.n_files_open = 0;
+ bl->file_infos.n_files_extant = 0;
+ XREALLOC_N(bl->file_infos.n_files_limit, bl->file_infos.file_infos);
+ for (int i=0; i<N_SOURCES; i++) {
+ // all we really need is the number of records in the file. The rest of the file_info is unused by the dbufio code.n
+ bl->file_infos.file_infos[i].n_rows = n_records_in_fd[i];
+ // However we need these for the destroy method to work right.
+ bl->file_infos.file_infos[i].is_extant = false;
+ bl->file_infos.file_infos[i].is_open = false;
+ bl->file_infos.file_infos[i].buffer = NULL;
+ src_fidxs[i].idx = i;
+ }
+ toku_pthread_t consumer;
+ struct consumer_thunk cthunk = {q, 0};
+ {
+ int r = toku_pthread_create(toku_uninstrumented,
+ &consumer,
+ nullptr,
+ consumer_thread,
+ static_cast<void *>(&cthunk));
+ assert(r == 0);
+ }
+
+ toku_set_func_malloc_only(my_malloc);
+ toku_set_func_realloc_only(my_realloc);
+ toku_set_func_fwrite(bad_fwrite);
+ toku_set_func_write(bad_write);
+ toku_set_func_pwrite(bad_pwrite);
+ toku_set_func_fdopen(bad_fdopen);
+ toku_set_func_fopen(bad_fopen);
+ toku_set_func_open(bad_open);
+ toku_set_func_fclose(bad_fclose);
+ if (bad_read_errno) toku_set_func_read(bad_read);
+
+ int result = 0;
+ {
+ int r = toku_merge_some_files_using_dbufio(true, FIDX_NULL, q, N_SOURCES, bfs, src_fidxs, bl, 0, (DB*)NULL, compare_ints, 10000);
+ if (is_error && r!=0) {
+ result = r;
+ } else {
+ if (r!=0) printf("%s:%d r=%d (%s)\n", __FILE__, __LINE__, r, errorstr_static(r));
+ assert(r==0);
+ }
+ if (r)
+ panic_dbufio_fileset(bfs, r);
+ }
+ {
+ int r = toku_queue_eof(q);
+ assert(r==0);
+ }
+
+ toku_set_func_malloc(NULL);
+ toku_set_func_realloc(NULL);
+ toku_set_func_fwrite(nullptr);
+ toku_set_func_write(NULL);
+ toku_set_func_pwrite(NULL);
+ toku_set_func_fdopen(NULL);
+ toku_set_func_fopen(NULL);
+ toku_set_func_open(NULL);
+ toku_set_func_fclose(NULL);
+ toku_set_func_read(NULL);
+ do_assert_hook = my_assert_hook;
+
+ {
+ void *vresult;
+ int r = toku_pthread_join(consumer, &vresult);
+ assert(r==0);
+ assert(vresult==NULL);
+ //printf("n_read = %ld, N_SOURCES=%d N_RECORDS=%d\n", cthunk.n_read, N_SOURCES, N_RECORDS);
+ if (result==0) {
+ assert(cthunk.n_read == N_RECORDS);
+ }
+ }
+ {
+ int r = toku_queue_destroy(q);
+ assert(r==0);
+ }
+ toku_ft_loader_internal_destroy(bl, false);
+ {
+ toku_cachetable_close(&ct);
+ }
+ for (int i=0; i<N_DEST_DBS; i++) {
+ toku_free((void*)new_fnames_in_env[i]);
+ }
+ for (int i=0; i<N_SOURCES; i++) {
+ toku_free(fnames[i]);
+ }
+ destroy_dbufio_fileset(bfs);
+ toku_free(fnames);
+ toku_free(fds);
+ toku_free(fts);
+ toku_free(dbs);
+ toku_free(new_fnames_in_env);
+ toku_free(bt_compare_functions);
+ toku_free(lsnp);
+ toku_free(src_fidxs);
+ toku_free(n_records_in_fd);
+}
+
+
+static int usage(const char *progname, int n) {
+ fprintf(stderr, "Usage:\n %s [-v] [-q] [-r %d] [-s] [-m] [-tend NEVENTS] directory\n", progname, n);
+ fprintf(stderr, "[-v] turn on verbose\n");
+ fprintf(stderr, "[-q] turn off verbose\n");
+ fprintf(stderr, "[-r %d] set the number of rows\n", n);
+ fprintf(stderr, "[-s] set the small loader size factor\n");
+ fprintf(stderr, "[-m] inject big malloc failures\n");
+ fprintf(stderr, "[-tend NEVENTS] stop testing after N events\n");
+ fprintf(stderr, "[-bad_read_errno ERRNO]\n");
+ return 1;
+}
+
+int test_main (int argc, const char *argv[]) {
+ int tstart = 0;
+ int tend = -1;
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-h")==0) {
+ return usage(progname, N_RECORDS);
+ } else if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r") == 0) {
+ argc--; argv++;
+ N_RECORDS = atoi(argv[0]);
+ } else if (strcmp(argv[0],"-s") == 0) {
+ toku_ft_loader_set_size_factor(1);
+ } else if (strcmp(argv[0],"-m") == 0) {
+ my_malloc_event = 1;
+ } else if (strcmp(argv[0],"-tend") == 0 && argc > 1) {
+ argc--; argv++;
+ tend = atoi(argv[0]);
+ } else if (strcmp(argv[0],"-tstart") == 0 && argc > 1) {
+ argc--; argv++;
+ tstart = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-bad_read_errno") == 0 && argc > 1) {
+ argc--; argv++;
+ bad_read_errno = atoi(argv[0]);
+ } else if (argc!=1) {
+ return usage(progname, N_RECORDS);
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+ const char* directory = TOKU_TEST_FILENAME;
+ char unlink_all[strlen(directory)+20];
+ snprintf(unlink_all, strlen(directory)+20, "rm -rf %s", directory);
+
+ int templen = strlen(directory)+15;
+ char tf_template[templen];
+ int tlen = snprintf(tf_template, templen, "%s/tempXXXXXX", directory);
+ assert (tlen>0 && tlen<templen);
+
+ char output_name[templen];
+ int olen = snprintf(output_name, templen, "%s/test.tokudb", directory);
+ assert (olen>0 && olen<templen);
+
+ // callibrate
+ int r;
+ r = system(unlink_all); CKERR(r);
+ r = toku_os_mkdir(directory, 0755); CKERR(r);
+ test(directory, false);
+
+ if (verbose) printf("my_malloc_count=%d big_count=%d\n", my_malloc_count, my_big_malloc_count);
+
+ {
+ int event_limit = event_count;
+ if (tend>=0 && tend<event_limit) event_limit=tend;
+ if (verbose) printf("event_limit=%d\n", event_limit);
+
+ for (int i = tstart+1; i <= event_limit; i++) {
+ reset_event_counts();
+ reset_my_malloc_counts();
+ event_count_trigger = i;
+ r = system(unlink_all); CKERR(r);
+ r = toku_os_mkdir(directory, 0755); CKERR(r);
+ if (verbose) printf("event=%d\n", i);
+ test(directory, true);
+ }
+ r = system(unlink_all); CKERR(r);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-open.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-open.cc
new file mode 100644
index 00000000..eb2a42de
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-open.cc
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// The purpose of this test is to find memory leaks in the ft_loader_open function. Right now, it finds leaks in some very simple
+// cases.
+
+#define DONT_DEPRECATE_MALLOC
+#include "test.h"
+#include "loader/loader.h"
+#include "loader/loader-internal.h"
+#include "memory.h"
+#include <portability/toku_path.h>
+
+
+static int my_malloc_count = 0;
+static int my_malloc_trigger = 0;
+
+static void set_my_malloc_trigger(int n) {
+ my_malloc_count = 0;
+ my_malloc_trigger = n;
+}
+
+static void *my_malloc(size_t n) {
+ my_malloc_count++;
+ if (my_malloc_count == my_malloc_trigger) {
+ errno = ENOSPC;
+ return NULL;
+ } else
+ return os_malloc(n);
+}
+
+static int my_compare(DB *UU(desc), const DBT *UU(akey), const DBT *UU(bkey)) {
+ return EINVAL;
+}
+
+static void test_loader_open(int ndbs) {
+ int r;
+ FTLOADER loader;
+
+ // open the ft_loader. this runs the extractor.
+ FT_HANDLE fts[ndbs];
+ DB* dbs[ndbs];
+ const char *fnames[ndbs];
+ ft_compare_func compares[ndbs];
+ for (int i = 0; i < ndbs; i++) {
+ fts[i] = NULL;
+ dbs[i] = NULL;
+ fnames[i] = "";
+ compares[i] = my_compare;
+ }
+
+ toku_set_func_malloc(my_malloc);
+
+ int i;
+ for (i = 0; ; i++) {
+ set_my_malloc_trigger(i+1);
+
+ r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, fts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false, true);
+ if (r == 0)
+ break;
+ }
+
+ if (verbose) printf("i=%d\n", i);
+
+ r = toku_ft_loader_abort(loader, true);
+ assert(r == 0);
+}
+
+int test_main (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (argc!=1) {
+ fprintf(stderr, "Usage:\n %s [-v] [-q]\n", progname);
+ exit(1);
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+
+ test_loader_open(0);
+ test_loader_open(1);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-vm.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-vm.cc
new file mode 100644
index 00000000..7ac1f59d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-vm.cc
@@ -0,0 +1,83 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "cachetable/cachetable.h"
+#include <inttypes.h>
+
+/* Test for #2755. The ft_loader is using too much VM. */
+static void test_cachetable_reservation (long size) {
+ CACHETABLE ct;
+ {
+ toku_cachetable_create(&ct, size, ZERO_LSN, NULL);
+ }
+ {
+ uint64_t r0 = toku_cachetable_reserve_memory(ct, 0.5, 0);
+ uint64_t r0_bound = size/2 + size/16;
+ uint64_t r1 = toku_cachetable_reserve_memory(ct, 0.5, 0);
+ uint64_t r1_bound = r0_bound/2;
+ uint64_t r2 = toku_cachetable_reserve_memory(ct, 0.5, 0);
+ uint64_t r2_bound = r1_bound/2;
+ if (verbose) printf("%10ld: r0=%10" PRIu64 " r1=%10" PRIu64 " r2=%10" PRIu64 "\n", size, r0, r1, r2);
+ assert(r0 <= r0_bound);
+ assert(r1 <= r1_bound);
+ assert(r2 <= r2_bound);
+ assert(r1 <= r0);
+ assert(r2 <= r1);
+
+ long unreservable_part = size * 0.25;
+ assert(r0 <= (size - unreservable_part)*0.5);
+ assert(r1 <= (size - unreservable_part - r0)*0.5);
+ assert(r2 <= (size - unreservable_part - r0 -1)*0.5);
+ toku_cachetable_release_reserved_memory(ct, r0);
+ toku_cachetable_release_reserved_memory(ct, r1);
+ toku_cachetable_release_reserved_memory(ct, r2);
+ }
+ {
+ toku_cachetable_close(&ct);
+ }
+
+}
+
+int test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ test_cachetable_reservation(1L<<28);
+ test_cachetable_reservation(1LL<<33);
+ test_cachetable_reservation(3L<<28);
+ test_cachetable_reservation((3L<<28) - 107);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer-errors.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer-errors.cc
new file mode 100644
index 00000000..e4423319
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer-errors.cc
@@ -0,0 +1,277 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the loader write dbfile function
+
+#define DONT_DEPRECATE_WRITES
+#define DONT_DEPRECATE_MALLOC
+
+#include "test.h"
+#include "loader/loader-internal.h"
+#include "ftloader-error-injector.h"
+#include <portability/toku_path.h>
+
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+
+}
+
+static int compare_ints (DB *UU(desc), const DBT *akey, const DBT *bkey) {
+ assert(akey->size==sizeof(int));
+ assert(bkey->size==sizeof(int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static void err_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *val UU(), void *extra UU()) {
+ fprintf(stderr, "error in test");
+ abort();
+}
+
+static int write_dbfile (char *tf_template, int n, char *output_name, bool expect_error, int testno) {
+ if (verbose) printf("test start %d %d testno=%d\n", n, expect_error, testno);
+
+ int result = 0;
+ DB *dest_db = NULL;
+ struct ft_loader_s bl;
+ ZERO_STRUCT(bl);
+ bl.temp_file_template = tf_template;
+ bl.reserved_memory = 512*1024*1024;
+ int r = ft_loader_init_file_infos(&bl.file_infos); CKERR(r);
+ ft_loader_lock_init(&bl);
+ ft_loader_set_fractal_workers_count_from_c(&bl);
+
+ struct merge_fileset fs;
+ init_merge_fileset(&fs);
+
+ // put rows in the row set
+ struct rowset aset;
+ uint64_t size_est = 0;
+ init_rowset(&aset, toku_ft_loader_get_rowset_budget_for_testing());
+ for (int i=0; i<n; i++) {
+ DBT key;
+ toku_fill_dbt(&key, &i, sizeof i);
+ DBT val;
+ toku_fill_dbt(&val, &i, sizeof i);
+ add_row(&aset, &key, &val);
+ size_est += ft_loader_leafentry_size(key.size, val.size, TXNID_NONE);
+ }
+
+ toku_ft_loader_set_n_rows(&bl, n);
+
+ ft_loader_init_error_callback(&bl.error_callback);
+ ft_loader_set_error_function(&bl.error_callback, err_cb, NULL);
+ ft_loader_init_poll_callback(&bl.poll_callback);
+ r = ft_loader_sort_and_write_rows(&aset, &fs, &bl, 0, dest_db, compare_ints); CKERR(r);
+
+ ft_loader_fi_close_all(&bl.file_infos);
+
+ QUEUE q;
+ r = toku_queue_create(&q, 0xFFFFFFFF); // infinite queue.
+ assert(r==0);
+ r = merge_files(&fs, &bl, 0, dest_db, compare_ints, 0, q); CKERR(r);
+ assert(fs.n_temp_files==0);
+
+ QUEUE q2;
+ r = toku_queue_create(&q2, 0xFFFFFFFF); // infinite queue.
+ assert(r==0);
+
+ size_t num_found = 0;
+ size_t found_size_est = 0;
+ while (1) {
+ void *v;
+ r = toku_queue_deq(q, &v, NULL, NULL);
+ if (r==EOF) break;
+ struct rowset *rs = (struct rowset *)v;
+ if (verbose) printf("v=%p\n", v);
+
+ for (size_t i=0; i<rs->n_rows; i++) {
+ struct row *row = &rs->rows[i];
+ assert(row->klen==sizeof(int));
+ assert(row->vlen==sizeof(int));
+ assert((int)(num_found+i)==*(int*)(rs->data+row->off));
+ found_size_est += ft_loader_leafentry_size(row->klen, row->vlen, TXNID_NONE);
+ }
+
+ num_found += rs->n_rows;
+
+ r = toku_queue_enq(q2, v, 0, NULL);
+ assert(r==0);
+ }
+ assert((int)num_found == n);
+ if (!expect_error) assert(found_size_est == size_est);
+
+ r = toku_queue_eof(q2);
+ assert(r==0);
+
+ r = toku_queue_destroy(q);
+ assert(r==0);
+
+ DESCRIPTOR_S desc;
+ toku_fill_dbt(&desc.dbt, "abcd", 4);
+
+ int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd>=0);
+
+ toku_set_func_malloc_only(my_malloc);
+ toku_set_func_realloc_only(my_realloc);
+ toku_set_func_fwrite(bad_fwrite);
+ toku_set_func_write(bad_write);
+ toku_set_func_pwrite(bad_pwrite);
+ ft_loader_set_error_function(&bl.error_callback, NULL, NULL);
+ ft_loader_set_poll_function(&bl.poll_callback, loader_poll_callback, NULL);
+
+ result = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
+
+ toku_set_func_malloc_only(NULL);
+ toku_set_func_realloc_only(NULL);
+ toku_set_func_fwrite(nullptr);
+ toku_set_func_write(NULL);
+ toku_set_func_pwrite(NULL);
+
+ ft_loader_destroy_error_callback(&bl.error_callback);
+ ft_loader_destroy_poll_callback(&bl.poll_callback);
+ ft_loader_lock_destroy(&bl);
+
+ r = toku_queue_destroy(q2);
+ assert(r==0);
+
+ destroy_merge_fileset(&fs);
+ ft_loader_fi_destroy(&bl.file_infos, expect_error);
+
+ return result;
+}
+
+static int usage(const char *progname, int n) {
+ fprintf(stderr, "Usage: %s [options] directory\n", progname);
+ fprintf(stderr, "[-v] turn on verbose\n");
+ fprintf(stderr, "[-q] turn off verbose\n");
+ fprintf(stderr, "[-r %d] set the number of rows\n", n);
+ fprintf(stderr, "[-s] set the small loader size factor\n");
+ fprintf(stderr, "[-m] inject big malloc and realloc errors\n");
+ fprintf(stderr, "[--malloc_limit %u] set the threshold for failing malloc and realloc\n", (unsigned) my_big_malloc_limit);
+ fprintf(stderr, "[--realloc_errors] inject realloc errors\n");
+ fprintf(stderr, "[-w] inject write errors\n");
+ fprintf(stderr, "[-u] inject user errors\n");
+ return 1;
+}
+
+int test_main (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ int n = 1;
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-h")==0) {
+ return usage(progname, n);
+ } else if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r") == 0) {
+ argc--; argv++;
+ n = atoi(argv[0]);
+ } else if (strcmp(argv[0],"-s") == 0) {
+ toku_ft_loader_set_size_factor(1);
+ } else if (strcmp(argv[0],"-w") == 0) {
+ do_write_errors = 1;
+ } else if (strcmp(argv[0],"-m") == 0) {
+ do_malloc_errors = 1;
+ do_realloc_errors = 1;
+ } else if (strcmp(argv[0],"-u") == 0) {
+ do_user_errors = 1;
+ } else if (strcmp(argv[0],"--realloc_errors") == 0) {
+ do_realloc_errors = 1;
+ } else if (strcmp(argv[0],"--malloc_limit") == 0 && argc > 1) {
+ argc--; argv++;
+ my_big_malloc_limit = atoi(argv[0]);
+ } else if (argc!=1) {
+ return usage(progname, n);
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+ const char* directory = TOKU_TEST_FILENAME;
+ char unlink_all[strlen(directory)+20];
+ snprintf(unlink_all, strlen(directory)+20, "rm -rf %s", directory);
+
+ int templen = strlen(directory)+15;
+ char tf_template[templen];
+ int tlen = snprintf(tf_template, templen, "%s/tempXXXXXX", directory);
+ assert (tlen>0 && tlen<templen);
+
+ char output_name[templen];
+ int olen = snprintf(output_name, templen, "%s/test.tokudb", directory);
+ assert (olen>0 && olen<templen);
+
+ // callibrate
+ int r;
+ r = system(unlink_all); CKERR(r);
+ r = toku_os_mkdir(directory, 0755); CKERR(r);
+ r = write_dbfile(tf_template, n, output_name, false, 0); CKERR(r);
+
+ if (verbose) printf("my_malloc_count=%d big_count=%d\n", my_malloc_count, my_big_malloc_count);
+ if (verbose) printf("my_realloc_count=%d big_count=%d\n", my_realloc_count, my_big_realloc_count);
+
+ int event_limit = event_count;
+ if (verbose) printf("event_limit=%d\n", event_limit);
+
+ // we computed an upper bound on the number of events. since the loader continues to malloc after a
+ // malloc failure, the actual number of events that can induce a failed load is less than the upper
+ // bound.
+ for (int i = 1; i <= event_limit; i++) {
+ reset_event_counts();
+ reset_my_malloc_counts();
+ event_count_trigger = i;
+ r = system(unlink_all); CKERR(r);
+ r = toku_os_mkdir(directory, 0755); CKERR(r);
+ r = write_dbfile(tf_template, n, output_name, true, i);
+ if (verbose) printf("event_count=%d\n", event_count);
+ if (r == 0)
+ break;
+ }
+
+ r = system(unlink_all); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer.cc
new file mode 100644
index 00000000..0638d1a2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test-writer.cc
@@ -0,0 +1,289 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the loader write dbfile function
+
+
+#include "test.h"
+#include "loader/loader-internal.h"
+#include <inttypes.h>
+#include <portability/toku_path.h>
+
+
+static void traceit(const char *s) {
+ time_t t = time(NULL);
+ printf("%.24s %s\n", ctime(&t), s);
+ fflush(stdout);
+}
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+}
+
+static int compare_ints (DB *UU(desc), const DBT *akey, const DBT *bkey) {
+ assert(akey->size==sizeof(int));
+ assert(bkey->size==sizeof(int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static void err_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *val UU(), void *extra UU()) {
+ fprintf(stderr, "error in test");
+ abort();
+}
+
+static void verify_dbfile(int n, const char *name) {
+ if (verbose) traceit("verify");
+
+ int r;
+
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ TOKUTXN const null_txn = NULL;
+ FT_HANDLE t = NULL;
+ toku_ft_handle_create(&t);
+ toku_ft_set_bt_compare(t, compare_ints);
+ r = toku_ft_handle_open(t, name, 0, 0, ct, null_txn); assert(r==0);
+
+ if (verbose) traceit("Verifying ft internals");
+ r = toku_verify_ft(t);
+ if (verbose) traceit("Verified ft internals");
+
+ FT_CURSOR cursor = NULL;
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ size_t userdata = 0;
+ int i;
+ for (i=0; ; i++) {
+ int kk = i;
+ int vv = i;
+ struct check_pair pair = {sizeof kk, &kk, sizeof vv, &vv, 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (r != 0) {
+ assert(pair.call_count ==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ userdata += pair.keylen + pair.vallen;
+ }
+
+ assert(i == n);
+
+ toku_ft_cursor_close(cursor);
+
+ struct ftstat64_s s;
+ toku_ft_handle_stat64(t, NULL, &s);
+ assert(s.nkeys == (uint64_t)n && s.ndata == (uint64_t)n && s.dsize == userdata);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+ if (verbose) traceit("verify done");
+}
+
+static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNID xid) {
+ if (verbose) traceit("test start");
+
+ DB *dest_db = NULL;
+ struct ft_loader_s bl;
+ ZERO_STRUCT(bl);
+ bl.temp_file_template = tf_template;
+ bl.reserved_memory = 512*1024*1024;
+ bl.load_root_xid = xid;
+ if (xid) {
+ XCALLOC_N(1, bl.root_xids_that_created);
+ bl.root_xids_that_created[0] = 0;
+ }
+ int r = ft_loader_init_file_infos(&bl.file_infos); CKERR(r);
+ ft_loader_lock_init(&bl);
+ ft_loader_set_fractal_workers_count_from_c(&bl);
+
+ struct merge_fileset fs;
+ init_merge_fileset(&fs);
+
+ // put rows in the row set
+ struct rowset aset;
+ uint64_t size_est = 0;
+ init_rowset(&aset, toku_ft_loader_get_rowset_budget_for_testing());
+ for (int i=0; i<n; i++) {
+ DBT key;
+ toku_fill_dbt(&key, &i, sizeof i);
+ DBT val;
+ toku_fill_dbt(&val, &i, sizeof i);
+ add_row(&aset, &key, &val);
+ size_est += ft_loader_leafentry_size(key.size, val.size, xid);
+ }
+
+ toku_ft_loader_set_n_rows(&bl, n);
+
+ ft_loader_init_error_callback(&bl.error_callback);
+ ft_loader_set_error_function(&bl.error_callback, err_cb, NULL);
+ r = ft_loader_sort_and_write_rows(&aset, &fs, &bl, 0, dest_db, compare_ints); CKERR(r);
+ // destroy_rowset(&aset);
+
+ ft_loader_fi_close_all(&bl.file_infos);
+
+ QUEUE q;
+ r = toku_queue_create(&q, 0xFFFFFFFF); // infinite queue.
+ assert(r==0);
+ r = merge_files(&fs, &bl, 0, dest_db, compare_ints, 0, q); CKERR(r);
+ assert(fs.n_temp_files==0);
+
+ QUEUE q2;
+ r = toku_queue_create(&q2, 0xFFFFFFFF); // infinite queue.
+ assert(r==0);
+
+ size_t num_found = 0;
+ size_t found_size_est = 0;
+ while (1) {
+ void *v;
+ r = toku_queue_deq(q, &v, NULL, NULL);
+ if (r==EOF) break;
+ struct rowset *rs = (struct rowset *)v;
+ if (verbose) printf("v=%p\n", v);
+
+ for (size_t i=0; i<rs->n_rows; i++) {
+ struct row *row = &rs->rows[i];
+ assert(row->klen==sizeof(int));
+ assert(row->vlen==sizeof(int));
+ assert((int)(num_found+i)==*(int*)(rs->data+row->off));
+ found_size_est += ft_loader_leafentry_size(row->klen, row->vlen, xid);
+ }
+
+ num_found += rs->n_rows;
+
+ r = toku_queue_enq(q2, v, 0, NULL);
+ assert(r==0);
+ }
+ assert((int)num_found == n);
+ assert(found_size_est == size_est);
+
+ r = toku_queue_eof(q2);
+ assert(r==0);
+
+ r = toku_queue_destroy(q);
+ assert(r==0);
+
+ DESCRIPTOR_S desc;
+ toku_fill_dbt(&desc.dbt, "abcd", 4);
+
+ int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd>=0);
+
+ if (verbose) traceit("write to file");
+ r = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
+ assert(r==0);
+
+ r = toku_queue_destroy(q2);
+ assert_zero(r);
+
+ destroy_merge_fileset(&fs);
+ ft_loader_fi_destroy(&bl.file_infos, false);
+
+ // walk a cursor through the dbfile and verify the rows
+ verify_dbfile(n, output_name);
+
+ ft_loader_destroy_error_callback(&bl.error_callback);
+ ft_loader_lock_destroy(&bl);
+
+ toku_free(bl.root_xids_that_created);
+}
+
+static int nrows = 1;
+static TXNID xid = 0;
+
+static int usage(const char *progname) {
+ fprintf(stderr, "Usage:\n %s [-h] [-v] [-q] [-r %d] [-x %" PRIu64 "] [-s] directory\n", progname, nrows, xid);
+ return 1;
+}
+
+int test_main (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0) {
+ return usage(progname);
+ } else if (strcmp(argv[0], "-v") == 0 || strcmp(argv[0], "--verbose") == 0) {
+ verbose=1;
+ } else if (strcmp(argv[0], "-q") == 0) {
+ verbose=0;
+ } else if (strcmp(argv[0], "-r") == 0) {
+ argc--; argv++;
+ nrows = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-x") == 0) {
+ argc--; argv++;
+ xid = atol(argv[0]);
+ } else if (strcmp(argv[0], "-s") == 0) {
+ toku_ft_loader_set_size_factor(1);
+ } else if (argv[0][0] == '-' || argc != 1) {
+ return usage(progname);
+ } else {
+ break;
+ }
+ argc--; argv++;
+ }
+ const char* directory = TOKU_TEST_FILENAME;
+ char unlink_all[strlen(directory)+20];
+ snprintf(unlink_all, strlen(directory)+20, "rm -rf %s", directory);
+ int r;
+ r = system(unlink_all);
+ CKERR(r);
+ r = toku_os_mkdir(directory, 0755);
+ CKERR(r);
+
+ int templen = strlen(directory)+15;
+ char tf_template[templen];
+ int tlen = snprintf(tf_template, templen, "%s/tempXXXXXX", directory);
+ assert (tlen>0 && tlen<templen);
+
+ char output_name[templen];
+ int olen = snprintf(output_name, templen, "%s/test.tokudb", directory);
+ assert (olen>0 && olen<templen);
+
+ test_write_dbfile(tf_template, nrows, output_name, xid);
+
+#if 0
+ r = system(unlink_all);
+ CKERR(r);
+#endif
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/ftloader-test.cc b/storage/tokudb/PerconaFT/ft/tests/ftloader-test.cc
new file mode 100644
index 00000000..4a3282e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ftloader-test.cc
@@ -0,0 +1,434 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <toku_assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "loader/loader-internal.h"
+#include "memory.h"
+#include <portability/toku_path.h>
+
+
+static int qsort_compare_ints (const void *a, const void *b) {
+ int avalue = *(int*)a;
+ int bvalue = *(int*)b;
+ if (avalue<bvalue) return -1;
+ if (avalue>bvalue) return +1;
+ return 0;
+
+}
+
+static int compare_ints (DB* UU(desc), const DBT *akey, const DBT *bkey) {
+ assert(akey->size==sizeof(int));
+ assert(bkey->size==sizeof(int));
+ return qsort_compare_ints(akey->data, bkey->data);
+}
+
+static void err_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *val UU(), void *extra UU()) {
+ fprintf(stderr, "error in test");
+ abort();
+}
+
+bool founddup;
+static void expect_dups_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *val UU(), void *extra UU()) {
+ founddup=true;
+}
+
+static void test_merge_internal (int a[], int na, int b[], int nb, bool dups) {
+ int *MALLOC_N(na+nb, ab); // the combined array a and b
+ for (int i=0; i<na; i++) {
+ ab[i]=a[i];
+ }
+ for (int i=0; i<nb; i++) {
+ ab[na+i] = b[i];
+ }
+ struct row *MALLOC_N(na, ar);
+ struct row *MALLOC_N(nb, br);
+ for (int i=0; i<na; i++) {
+ ar[i].off = i*sizeof(a[0]);
+ ar[i].klen = sizeof(a[i]);
+ ar[i].vlen = 0;
+ }
+ for (int i=0; i<nb; i++) {
+ br[i].off = (na+i)*sizeof(b[0]);
+ br[i].klen = sizeof(b[i]);
+ br[i].vlen = 0;
+ }
+ struct row *MALLOC_N(na+nb, cr);
+ DB *dest_db = NULL;
+ struct ft_loader_s bl;
+ ft_loader_init_error_callback(&bl.error_callback);
+ ft_loader_set_error_function(&bl.error_callback, dups ? expect_dups_cb : err_cb, NULL);
+ struct rowset rs = { .memory_budget = 0, .n_rows = 0, .n_rows_limit = 0, .rows = NULL, .n_bytes = 0, .n_bytes_limit = 0,
+ .data=(char*)ab};
+ merge_row_arrays_base(cr, ar, na, br, nb, 0, dest_db, compare_ints, &bl, &rs);
+ ft_loader_call_error_function(&bl.error_callback);
+ if (dups) {
+ assert(founddup);
+ } else {
+ // verify the merge
+ int i=0;
+ int j=0;
+ for (int k=0; k<na+nb; k++) {
+ int voff = cr[k].off;
+ int vc = *(int*)(((char*)ab)+voff);
+ if (i<na && j<nb) {
+ if (vc==a[i]) {
+ assert(a[i]<=b[j]);
+ i++;
+ } else if (vc==b[j]) {
+ assert(a[i]>b[j]);
+ j++;
+ } else {
+ assert(0);
+ }
+ }
+ }
+ }
+ toku_free(cr);
+ toku_free(ar);
+ toku_free(br);
+ toku_free(ab);
+ ft_loader_destroy_error_callback(&bl.error_callback);
+}
+
+/* Test the basic merger. */
+static void test_merge (void) {
+ {
+ int avals[]={1,2,3,4,5};
+ int *bvals = NULL;
+ test_merge_internal(avals, 5, bvals, 0, false);
+ test_merge_internal(bvals, 0, avals, 5, false);
+ }
+ {
+ int avals[]={1,3,5,7};
+ int bvals[]={2,4};
+ test_merge_internal(avals, 4, bvals, 2, false);
+ test_merge_internal(bvals, 2, avals, 4, false);
+ }
+ {
+ int avals[]={1,2,3,5,6,7};
+ int bvals[]={2,4,5,6,8};
+ test_merge_internal(avals, 6, bvals, 5, true);
+ test_merge_internal(bvals, 5, avals, 6, true);
+ }
+}
+
+static void test_internal_mergesort_row_array (int a[], int n) {
+ struct row *MALLOC_N(n, ar);
+ for (int i=0; i<n; i++) {
+ ar[i].off = i*sizeof(a[0]);
+ ar[i].klen = sizeof(a[i]);
+ ar[i].vlen = 0;
+ }
+ struct rowset rs = { .memory_budget = 0, .n_rows = 0, .n_rows_limit = 0, .rows = NULL, .n_bytes = 0, .n_bytes_limit = 0,
+ .data=(char*)a};
+ ft_loader_mergesort_row_array (ar, n, 0, NULL, compare_ints, NULL, &rs);
+ int *MALLOC_N(n, tmp);
+ for (int i=0; i<n; i++) {
+ tmp[i]=a[i];
+ }
+ qsort(tmp, n, sizeof(a[0]), qsort_compare_ints);
+ for (int i=0; i<n; i++) {
+ int voff = ar[i].off;
+ int v = *(int*)(((char*)a)+voff);
+ assert(tmp[i]==v);
+ }
+ toku_free(ar);
+ toku_free(tmp);
+}
+
+static void test_mergesort_row_array (void) {
+ {
+ int avals[]={5,2,1,7};
+ for (int i=0; i<=4; i++)
+ test_internal_mergesort_row_array(avals, i);
+ }
+ const int MAX_LEN = 100;
+ enum { MAX_VAL = 1000 };
+ for (int i=0; i<MAX_LEN; i++) {
+ bool used[MAX_VAL];
+ for (int j=0; j<MAX_VAL; j++) used[j]=false;
+ int len=1+random()%MAX_LEN;
+ int avals[len];
+ for (int j=0; j<len; j++) {
+ int v;
+ do {
+ v = random()%MAX_VAL;
+ } while (used[v]);
+ avals[j] = v;
+ used[v] = true;
+ }
+ test_internal_mergesort_row_array(avals, len);
+ }
+}
+
+static void test_read_write_rows (char *tf_template) {
+ struct ft_loader_s bl;
+ ZERO_STRUCT(bl);
+ bl.temp_file_template = tf_template;
+ int r = ft_loader_init_file_infos(&bl.file_infos);
+ CKERR(r);
+ FIDX file;
+ r = ft_loader_open_temp_file(&bl, &file);
+ CKERR(r);
+
+ uint64_t dataoff=0;
+
+ const char *keystrings[] = {"abc", "b", "cefgh"};
+ const char *valstrings[] = {"defg", "", "xyz"};
+ uint64_t actual_size=0;
+ for (int i=0; i<3; i++) {
+ DBT key;
+ toku_fill_dbt(&key, keystrings[i], strlen(keystrings[i]));
+ DBT val;
+ toku_fill_dbt(&val, valstrings[i], strlen(valstrings[i]));
+ r = loader_write_row(&key, &val, file, toku_bl_fidx2file(&bl, file), &dataoff, nullptr, &bl);
+ CKERR(r);
+ actual_size+=key.size + val.size + 8;
+ }
+ if (actual_size != dataoff) fprintf(stderr, "actual_size=%" PRIu64 ", dataoff=%" PRIu64 "\n", actual_size, dataoff);
+ assert(actual_size == dataoff);
+
+ r = ft_loader_fi_close(&bl.file_infos, file, true);
+ CKERR(r);
+
+ r = ft_loader_fi_reopen(&bl.file_infos, file, "r");
+ CKERR(r);
+
+ {
+ int n_read=0;
+ DBT key, val;
+ toku_init_dbt(&key);
+ toku_init_dbt(&val);
+ while (0==loader_read_row(toku_bl_fidx2file(&bl, file), &key, &val)) {
+ assert(strlen(keystrings[n_read])==key.size);
+ assert(strlen(valstrings[n_read])==val.size);
+ assert(0==memcmp(keystrings[n_read], key.data, key.size));
+ assert(0==memcmp(valstrings[n_read], val.data, val.size));
+ assert(key.size<=key.ulen);
+ assert(val.size<=val.ulen);
+ n_read++;
+ }
+ assert(n_read==3);
+ toku_free(key.data);
+ toku_free(val.data);
+ }
+ r = ft_loader_fi_close(&bl.file_infos, file, true);
+ CKERR(r);
+
+ r = ft_loader_fi_unlink(&bl.file_infos, file);
+ CKERR(r);
+
+ assert(bl.file_infos.n_files_open==0);
+ assert(bl.file_infos.n_files_extant==0);
+
+ ft_loader_fi_destroy(&bl.file_infos, false);
+}
+
+static void fill_rowset (struct rowset *rows,
+ int keys[],
+ const char *vals[],
+ int n,
+ uint64_t *size_est) {
+ init_rowset(rows, toku_ft_loader_get_rowset_budget_for_testing());
+ for (int i=0; i<n; i++) {
+ DBT key;
+ toku_fill_dbt(&key, &keys[i], sizeof keys[i]);
+ DBT val;
+ toku_fill_dbt(&val, vals[i], strlen(vals[i]));
+ add_row(rows, &key, &val);
+ *size_est += ft_loader_leafentry_size(key.size, val.size, TXNID_NONE);
+ }
+}
+
+static void verify_dbfile(int n, int sorted_keys[], const char *sorted_vals[], const char *name) {
+ int r;
+
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ TOKUTXN const null_txn = NULL;
+ FT_HANDLE t = NULL;
+ toku_ft_handle_create(&t);
+ toku_ft_set_bt_compare(t, compare_ints);
+ r = toku_ft_handle_open(t, name, 0, 0, ct, null_txn); assert(r==0);
+
+ FT_CURSOR cursor = NULL;
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
+
+ size_t userdata = 0;
+ int i;
+ for (i=0; i<n; i++) {
+ struct check_pair pair = {sizeof sorted_keys[i], &sorted_keys[i], (uint32_t) strlen(sorted_vals[i]), sorted_vals[i], 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ if (r != 0) {
+ assert(pair.call_count ==0);
+ break;
+ }
+ assert(pair.call_count==1);
+ userdata += pair.keylen + pair.vallen;
+ }
+
+ struct check_pair pair; memset(&pair, 0, sizeof pair);
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT);
+ assert(r != 0);
+
+ toku_ft_cursor_close(cursor);
+
+ struct ftstat64_s s;
+ toku_ft_handle_stat64(t, NULL, &s);
+ assert(s.nkeys == (uint64_t) n && s.ndata == (uint64_t) n && s.dsize == userdata);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void test_merge_files (const char *tf_template, const char *output_name) {
+ DB *dest_db = NULL;
+ struct ft_loader_s bl;
+ ZERO_STRUCT(bl);
+ bl.temp_file_template = tf_template;
+ bl.reserved_memory = 512*1024*1024;
+ int r = ft_loader_init_file_infos(&bl.file_infos); CKERR(r);
+ ft_loader_lock_init(&bl);
+ ft_loader_init_error_callback(&bl.error_callback);
+ ft_loader_set_fractal_workers_count_from_c(&bl);
+
+ struct merge_fileset fs;
+ init_merge_fileset(&fs);
+
+ int a_keys[] = { 1, 3, 5, 7, 8, 9};
+ int b_keys[] = { 0, 2, 4, 6 };
+ const char *a_vals[] = {"a", "c", "e", "g", "h", "i"};
+ const char *b_vals[] = {"0", "b", "d", "f"};
+ int sorted_keys[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ const char *sorted_vals[] = { "0", "a", "b", "c", "d", "e", "f", "g", "h", "i" };
+ struct rowset aset, bset;
+ uint64_t size_est = 0;
+ fill_rowset(&aset, a_keys, a_vals, 6, &size_est);
+ fill_rowset(&bset, b_keys, b_vals, 4, &size_est);
+
+ toku_ft_loader_set_n_rows(&bl, 6+4);
+
+ ft_loader_set_error_function(&bl.error_callback, err_cb, NULL);
+ r = ft_loader_sort_and_write_rows(&aset, &fs, &bl, 0, dest_db, compare_ints); CKERR(r);
+ r = ft_loader_sort_and_write_rows(&bset, &fs, &bl, 0, dest_db, compare_ints); CKERR(r);
+ assert(fs.n_temp_files==2 && fs.n_temp_files_limit >= fs.n_temp_files);
+ // destroy_rowset(&aset);
+ // destroy_rowset(&bset);
+ for (int i=0; i<2; i++) assert(fs.data_fidxs[i].idx != -1);
+
+ ft_loader_fi_close_all(&bl.file_infos);
+
+ QUEUE q;
+ r = toku_queue_create(&q, 0xFFFFFFFF); // infinite queue.
+ assert(r==0);
+
+ r = merge_files(&fs, &bl, 0, dest_db, compare_ints, 0, q); CKERR(r);
+
+ assert(fs.n_temp_files==0);
+
+ DESCRIPTOR_S desc;
+ toku_fill_dbt(&desc.dbt, "abcd", 4);
+
+ int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd>=0);
+
+ r = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
+ assert(r==0);
+
+ destroy_merge_fileset(&fs);
+ ft_loader_fi_destroy(&bl.file_infos, false);
+ ft_loader_destroy_error_callback(&bl.error_callback);
+ ft_loader_lock_destroy(&bl);
+
+ // verify the dbfile
+ verify_dbfile(10, sorted_keys, sorted_vals, output_name);
+
+ r = toku_queue_destroy(q);
+ assert(r==0);
+}
+
+/* Test to see if we can open temporary files. */
+int test_main (int argc, const char *argv[]) {
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ }
+ else {
+ break;
+ }
+ argc--; argv++;
+ }
+ const char* directory = TOKU_TEST_FILENAME;
+ int r = toku_os_mkdir(directory, 0755);
+ if (r!=0) CKERR2(errno, EEXIST);
+
+ int templen = strlen(directory)+15;
+ char tf_template[templen];
+ {
+ int n = snprintf(tf_template, templen, "%s/tempXXXXXX", directory);
+ assert (n>0 && n<templen);
+ }
+ char output_name[templen];
+ {
+ int n = snprintf(output_name, templen, "%s/data.tokudb", directory);
+ assert (n>0 && n<templen);
+ }
+ test_read_write_rows(tf_template);
+ test_merge();
+ test_mergesort_row_array();
+ test_merge_files(tf_template, output_name);
+
+ {
+ char deletecmd[templen];
+ int n = snprintf(deletecmd, templen, "rm -rf %s", directory);
+ assert(n>0 && n<templen);
+ r = system(deletecmd);
+ CKERR(r);
+ }
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/generate-upgrade-recovery-logs.cc b/storage/tokudb/PerconaFT/ft/tests/generate-upgrade-recovery-logs.cc
new file mode 100644
index 00000000..f8903f01
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/generate-upgrade-recovery-logs.cc
@@ -0,0 +1,98 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Generate a recovery log with a checkpoint and an optional shutdown log entry.
+// These logs will be used later to test recovery.
+
+#include "test.h"
+
+static void generate_recovery_log(const char *testdir, bool do_shutdown) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(testdir);
+ r = toku_os_mkdir(testdir, S_IRWXU);
+ CKERR(r);
+
+ // open the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger);
+ CKERR(r);
+ r = toku_logger_open(testdir, logger);
+ CKERR(r);
+
+ // log checkpoint
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, false, 0, 0);
+ toku_log_end_checkpoint(logger, nullptr, false, beginlsn, 0, 0, 0);
+
+ // log shutdown
+ if (do_shutdown) {
+ toku_log_shutdown(logger, nullptr, true, 0, 0);
+ }
+
+ r = toku_logger_close(&logger);
+ CKERR(r);
+}
+
+int test_main(int argc, const char *argv[]) {
+ bool do_shutdown = true;
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--clean") == 0) {
+ do_shutdown = true;
+ continue;
+ }
+ if (strcmp(argv[i], "--dirty") == 0) {
+ do_shutdown = false;
+ continue;
+ }
+ }
+ char testdir[256];
+ sprintf(testdir, "upgrade-recovery-logs-%d-%s", TOKU_LOG_VERSION, do_shutdown ? "clean" : "dirty");
+ generate_recovery_log(testdir, do_shutdown);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/is_empty.cc b/storage/tokudb/PerconaFT/ft/tests/is_empty.cc
new file mode 100644
index 00000000..d0499b95
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/is_empty.cc
@@ -0,0 +1,156 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "toku_os.h"
+#include "cachetable/checkpoint.h"
+
+
+#define FILENAME "test0.ft"
+
+static void test_it (int N) {
+ FT_HANDLE ft;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r);
+
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); CKERR(r);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); CKERR(r);
+
+
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, logger);
+ toku_cachetable_set_env_dir(ct, TOKU_TEST_FILENAME);
+
+ toku_logger_set_cachetable(logger, ct);
+
+ r = toku_logger_open_rollback(logger, ct, true); CKERR(r);
+
+ TOKUTXN txn;
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
+
+ r = toku_open_ft_handle(FILENAME, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
+
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
+ toku_txn_close_txn(txn);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
+ r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
+
+ unsigned int rands[N];
+ for (int i=0; i<N; i++) {
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
+ r = toku_open_ft_handle(FILENAME, 0, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
+ toku_txn_close_txn(txn);
+
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
+ char key[100],val[300];
+ DBT k, v;
+ rands[i] = random();
+ snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
+ memset(val, 'v', sizeof(val));
+ val[sizeof(val)-1]=0;
+ toku_ft_insert(ft, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
+ toku_txn_close_txn(txn);
+
+
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
+ r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
+
+ if (verbose) printf("i=%d\n", i);
+ }
+ for (int i=0; i<N; i++) {
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
+ r = toku_open_ft_handle(FILENAME, 0, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
+ toku_txn_close_txn(txn);
+
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
+ char key[100];
+ DBT k;
+ snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
+ toku_ft_delete(ft, toku_fill_dbt(&k, key, 1+strlen(key)), txn);
+
+ if (0) {
+ bool is_empty;
+ is_empty = toku_ft_is_empty_fast(ft);
+ assert(!is_empty);
+ }
+
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
+ toku_txn_close_txn(txn);
+
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
+ r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
+
+ if (verbose) printf("d=%d\n", i);
+ }
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
+ r = toku_open_ft_handle(FILENAME, 0, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
+ toku_txn_close_txn(txn);
+
+ if (0) {
+ bool is_empty;
+ is_empty = toku_ft_is_empty_fast(ft);
+ assert(is_empty);
+ }
+
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
+ r = toku_close_ft_handle_nolsn(ft, NULL); CKERR(r);
+
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
+ toku_logger_close_rollback(logger);
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
+ toku_cachetable_close(&ct);
+ r = toku_logger_close(&logger); assert(r==0);
+
+}
+
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ for (int i=1; i<=64; i++) {
+ test_it(i);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/keyrange.cc b/storage/tokudb/PerconaFT/ft/tests/keyrange.cc
new file mode 100644
index 00000000..1c568290
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/keyrange.cc
@@ -0,0 +1,372 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test keyrange
+
+
+#include "test.h"
+
+#include <unistd.h>
+
+static TOKUTXN const null_txn = 0;
+
+static const char *fname = TOKU_TEST_FILENAME;
+static CACHETABLE ct;
+static FT_HANDLE t;
+
+static void close_ft_and_ct (void) {
+ int r;
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void open_ft_and_ct (bool unlink_old) {
+ int r;
+ if (unlink_old) unlink(fname);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+}
+
+static void close_and_reopen (void) {
+ close_ft_and_ct();
+ open_ft_and_ct(false);
+}
+
+static void reload (uint64_t limit) {
+ // insert keys 1, 3, 5, ...
+ for (uint64_t i=0; i<limit; i++) {
+ char key[100],val[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ snprintf(val, 100, "%08llu", (unsigned long long)2*i+1);
+ ft_lookup_and_check_nodup(t, key, val);
+ }
+}
+
+enum memory_state {
+ LEAVE_IN_MEMORY, // leave the state in main memory
+ CLOSE_AND_RELOAD, // close the fts and reload them into main memory (that will cause >1 partitio in many leaves.)
+ CLOSE_AND_REOPEN_LEAVE_ON_DISK // close the fts, reopen them, but leave the state on disk.
+};
+
+static void maybe_reopen (enum memory_state ms, uint64_t limit) {
+ switch (ms) {
+ case CLOSE_AND_RELOAD:
+ close_and_reopen();
+ reload(limit);
+ return;
+ case CLOSE_AND_REOPEN_LEAVE_ON_DISK:
+ close_and_reopen();
+ return;
+ case LEAVE_IN_MEMORY:
+ return;
+ }
+ assert(0);
+}
+
+static void verify_keysrange(enum memory_state UU(ms), uint64_t limit,
+ uint64_t intkey1,
+ uint64_t intkey2,
+ uint64_t less,
+ uint64_t equal1,
+ uint64_t middle,
+ uint64_t equal2,
+ uint64_t greater,
+ bool middle3exact) {
+ uint64_t max_item = limit * 2 - 1;
+ uint64_t perfect_total = limit;
+ uint64_t perfect_less = intkey1 / 2;
+ uint64_t perfect_equal1 = intkey1 % 2 == 1;
+ uint64_t perfect_equal2 = intkey2 % 2 == 1 && intkey2 <= max_item;
+ uint64_t perfect_greater = intkey2 >= max_item ? 0 : (max_item + 1 - intkey2) / 2;
+ uint64_t perfect_middle = perfect_total - perfect_less - perfect_equal1 - perfect_equal2 - perfect_greater;
+
+ uint64_t total = less + equal1 + middle + equal2 + greater;
+ assert(total > 0);
+ assert(total < 2 * perfect_total);
+ assert(total > perfect_total / 2);
+
+ assert(equal1 == perfect_equal1 || (equal1 == 0 && !middle3exact));
+ assert(equal2 == perfect_equal2 || (equal2 == 0 && !middle3exact));
+
+ // As of 2013-02-25 this is accurate with fiddle ~= total/50.
+ // Set to 1/10th to prevent flakiness.
+ uint64_t fiddle = perfect_total / 10;
+ assert(less + fiddle > perfect_less);
+ assert(less < perfect_less + fiddle);
+
+ assert(middle + fiddle > perfect_middle);
+ assert(middle < perfect_middle + fiddle);
+
+ assert(greater + fiddle > perfect_greater);
+ assert(greater < perfect_greater + fiddle);
+
+ if (middle3exact) {
+ assert(middle == perfect_middle);
+ }
+}
+
+
+static void test_keyrange (enum memory_state ms, uint64_t limit) {
+ open_ft_and_ct(true);
+
+ // insert keys 1, 3, 5, ...
+ for (uint64_t i=0; i<limit; i++) {
+ char key[100],val[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ snprintf(val, 100, "%08llu", (unsigned long long)2*i+1);
+ DBT k,v;
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v,val, 1+strlen(val)), null_txn);
+ }
+
+ {
+ struct ftstat64_s s;
+ toku_ft_handle_stat64(t, null_txn, &s);
+
+ assert(0 < s.nkeys && s.nkeys <= limit);
+ assert(0 < s.dsize && s.dsize <= limit * (9 + 9)); // keylen = 9, vallen = 9
+ }
+
+ maybe_reopen(ms, limit);
+
+ {
+ uint64_t prev_less = 0, prev_greater = 1LL << 60;
+ uint64_t count_less_adjacent = 0, count_greater_adjacent = 0; // count the number of times that the next value is 1 more (less) than the previous.
+ uint64_t equal_count = 0;
+
+ // lookup keys 1, 3, 5, ...
+ for (uint64_t i=0; i<limit; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ DBT k;
+ uint64_t less,equal,greater;
+ toku_ft_keyrange(t, toku_fill_dbt(&k, key, 1+strlen(key)), &less, &equal, &greater);
+ if (verbose > 1)
+ printf("Pkey %llu/%llu %llu %llu %llu\n", (unsigned long long)2*i+1, (unsigned long long)2*limit, (unsigned long long)less, (unsigned long long)equal, (unsigned long long)greater);
+
+ assert(0 < less + equal + greater);
+ assert(less + equal + greater <= 2 * limit);
+ assert(equal == 0 || equal == 1);
+
+ // It's an estimate, and the values don't even change monotonically.
+ // And all the leaves are in main memory so it's always present.
+ if (ms!=CLOSE_AND_REOPEN_LEAVE_ON_DISK) {
+ if (equal==1) equal_count++;
+#if 0
+ // The first few items are exact for less.
+ if (i<70) {
+ assert(less==i);
+ }
+ // The last few items are exact for greater.
+ if (limit-i<70) {
+ assert(greater<=limit-i-1);
+ }
+#endif
+ } else {
+ // after reopen, none of the basements are in memory
+ // However, "both" keys can be in the same basement (specifically the last basement node in the tree)
+ // Without trying to figure out how many are in the last basement node, we expect at least the first half not to be in the last basement node.
+ assert(i > limit / 2 || equal == 0);
+#if 0
+ if (i<10) {
+ assert(less==0);
+ }
+ if (limit-i<10) {
+ assert(greater==0);
+ }
+#endif
+ }
+ // Count the number of times that prev_less is 1 less than less.
+ if (prev_less+1 == less) {
+ count_less_adjacent++;
+ }
+ if (prev_greater-1 == greater) {
+ count_greater_adjacent++;
+ }
+ // the best we can do: It's an estimate. At least in the current implementation for this test (which has small rows)
+ // the estimate grows monotonically as the leaf grows.
+ prev_less = less;
+ prev_greater = greater;
+ }
+ if (ms!=CLOSE_AND_REOPEN_LEAVE_ON_DISK) {
+ // If we were doing the in-memory case then most keys are adjacent.
+ assert(count_less_adjacent >= 0.9 * limit); // we expect at least 90% to be right.
+ assert(count_greater_adjacent >= 0.9 * limit); // we expect at least 90% to be right.
+ assert(equal_count >= 0.9 * limit);
+ }
+ }
+
+ maybe_reopen(ms, limit);
+
+ // lookup keys 0, 2, 4, ... not in the tree
+ for (uint64_t i=0; i<1+limit; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i);
+ DBT k;
+ uint64_t less,equal,greater;
+ toku_ft_keyrange(t, toku_fill_dbt(&k, key, 1+strlen(key)), &less, &equal, &greater);
+ if (verbose > 1)
+ printf("Akey %llu/%llu %llu %llu %llu\n", (unsigned long long)2*i, (unsigned long long)2*limit, (unsigned long long)less, (unsigned long long)equal, (unsigned long long)greater);
+
+ assert(0 < less + equal + greater);
+ assert(less + equal + greater <= 2 * limit);
+ assert(equal == 0);
+#if 0
+ // The first few items are exact (looking a key that's not there)
+ if (ms!=CLOSE_AND_REOPEN_LEAVE_ON_DISK) {
+ if (i<70) {
+ assert(less==i);
+ }
+ // The last few items are exact (looking up a key that's not there)
+ if (limit-i<70) {
+ assert(greater<=limit-i);
+ }
+ } else {
+ if (i<10) {
+ assert(less==0);
+ }
+ if (limit-i<10) {
+ assert(greater==0);
+ }
+ }
+#endif
+ }
+
+ maybe_reopen(ms, limit);
+
+ {
+ uint64_t totalqueries = 0;
+ uint64_t num_middle3_exact = 0;
+ for (uint64_t i=0; i < 2*limit; i++) {
+ char key[100];
+ char keyplus4[100];
+ char keyplus5[100];
+ uint64_t intkey = i;
+
+ snprintf(key, 100, "%08" PRIu64 "", intkey);
+ snprintf(keyplus4, 100, "%08" PRIu64 "", intkey+4);
+ snprintf(keyplus5, 100, "%08" PRIu64 "", intkey+5);
+
+ DBT k;
+ DBT k2;
+ DBT k3;
+ toku_fill_dbt(&k, key, 1+strlen(key));
+ toku_fill_dbt(&k2, keyplus4, 1+strlen(keyplus4));
+ toku_fill_dbt(&k3, keyplus5, 1+strlen(keyplus5));
+ uint64_t less,equal1,middle,equal2,greater;
+ bool middle3exact;
+ toku_ft_keysrange(t, &k, &k2, &less, &equal1, &middle, &equal2, &greater, &middle3exact);
+ if (ms == CLOSE_AND_REOPEN_LEAVE_ON_DISK) {
+ //TODO(yoni): when reading basement nodes is implemented, get rid of this hack
+ middle3exact = false;
+ }
+ totalqueries++;
+ num_middle3_exact += middle3exact;
+ if (verbose > 1) {
+ printf("Rkey2 %" PRIu64 "/%" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %s\n",
+ intkey, 2*limit, less, equal1, middle, equal2, greater, middle3exact ? "true" : "false");
+ }
+ verify_keysrange(ms, limit, intkey, intkey+4,
+ less, equal1, middle, equal2, greater, middle3exact);
+
+ toku_ft_keysrange(t, &k, &k3, &less, &equal1, &middle, &equal2, &greater, &middle3exact);
+ if (ms == CLOSE_AND_REOPEN_LEAVE_ON_DISK) {
+ //TODO(yoni): when reading basement nodes is implemented, get rid of this hack
+ middle3exact = false;
+ }
+ totalqueries++;
+ num_middle3_exact += middle3exact;
+ if (verbose > 1) {
+ printf("Rkey3 %" PRIu64 "/%" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %" PRIu64
+ " %s\n",
+ intkey, 2*limit, less, equal1, middle, equal2, greater, middle3exact ? "true" : "false");
+ }
+ verify_keysrange(ms, limit, intkey, intkey+5,
+ less, equal1, middle, equal2, greater, middle3exact);
+ }
+ assert(num_middle3_exact <= totalqueries);
+ if (ms == CLOSE_AND_REOPEN_LEAVE_ON_DISK) {
+ //TODO(yoni): when reading basement nodes is implemented, get rid of this hack
+ assert(num_middle3_exact == 0);
+ } else {
+ // About 85% of the time, the key for an int (and +4 or +5) is in the
+ // same basement node. Check >= 70% so this isn't very flaky.
+ assert(num_middle3_exact > totalqueries * 7 / 10);
+ }
+ }
+
+ close_ft_and_ct();
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ uint64_t limit = 30000;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0 && i+1 < argc) {
+ limit = atoll(argv[++i]);
+ continue;
+ }
+ }
+
+ test_keyrange(LEAVE_IN_MEMORY, limit);
+ test_keyrange(CLOSE_AND_REOPEN_LEAVE_ON_DISK, limit);
+ test_keyrange(CLOSE_AND_RELOAD, limit);
+
+ if (verbose) printf("test ok\n");
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/keytest.cc b/storage/tokudb/PerconaFT/ft/tests/keytest.cc
new file mode 100644
index 00000000..19ee092d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/keytest.cc
@@ -0,0 +1,63 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "ft.h"
+
+static void
+test_keycompare (void) {
+ assert(toku_keycompare("a",1, "a",1)==0);
+ assert(toku_keycompare("aa",2, "a",1)>0);
+ assert(toku_keycompare("a",1, "aa",2)<0);
+ assert(toku_keycompare("b",1, "aa",2)>0);
+ assert(toku_keycompare("aa",2, "b",1)<0);
+ assert(toku_keycompare("aaaba",5, "aaaba",5)==0);
+ assert(toku_keycompare("aaaba",5, "aaaaa",5)>0);
+ assert(toku_keycompare("aaaaa",5, "aaaba",5)<0);
+ assert(toku_keycompare("aaaaa",3, "aaaba",3)==0);
+ assert(toku_keycompare("\000\000\000\a", 4, "\000\000\000\004", 4)>0);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test_keycompare();
+ if (verbose) printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/le-cursor-provdel.cc b/storage/tokudb/PerconaFT/ft/tests/le-cursor-provdel.cc
new file mode 100644
index 00000000..61df30eb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/le-cursor-provdel.cc
@@ -0,0 +1,256 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the LE_CURSOR next function with provisionally deleted rows
+
+
+#include "cachetable/checkpoint.h"
+#include "le-cursor.h"
+#include "test.h"
+
+
+static int
+get_next_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) {
+ DBT *CAST_FROM_VOIDP(key_dbt, extra);
+ if (!lock_only) {
+ toku_dbt_set(keylen, key, key_dbt, NULL);
+ }
+ return 0;
+}
+
+static int
+le_cursor_get_next(LE_CURSOR cursor, DBT *key) {
+ int r = toku_le_cursor_next(cursor, get_next_callback, key);
+ return r;
+}
+
+static int test_ft_cursor_keycompare(DB *desc __attribute__((unused)), const DBT *a, const DBT *b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+// create a tree and populate it with n rows
+static void
+create_populate_tree(const char *logdir, const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %s %d\n", __FUNCTION__, logdir, fname, n);
+ int error;
+
+ TOKULOGGER logger = NULL;
+ error = toku_logger_create(&logger);
+ assert(error == 0);
+ error = toku_logger_open(logdir, logger);
+ assert(error == 0);
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, logger);
+ toku_logger_set_cachetable(logger, ct);
+ error = toku_logger_open_rollback(logger, ct, true);
+ assert(error == 0);
+
+ TOKUTXN txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
+ assert(error == 0);
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ // insert keys 0, 1, 2, .. (n-1)
+ for (int i = 0; i < n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, txn);
+ assert(error == 0);
+ }
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ error = toku_close_ft_handle_nolsn(ft, NULL);
+ assert(error == 0);
+
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+
+ toku_logger_close_rollback(logger);
+
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+
+ toku_logger_shutdown(logger);
+
+ error = toku_logger_close(&logger);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+// provionally delete all of the even keys
+// the LE_CURSOR should see all of the leaf entries
+static void
+test_provdel(const char *logdir, const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %s %d\n", __FUNCTION__, logdir, fname, n);
+ int error;
+
+ TOKULOGGER logger = NULL;
+ error = toku_logger_create(&logger);
+ assert(error == 0);
+ error = toku_logger_open(logdir, logger);
+ assert(error == 0);
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, logger);
+ toku_logger_set_cachetable(logger, ct);
+ error = toku_logger_open_rollback(logger, ct, false);
+ assert(error == 0);
+
+ TOKUTXN txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
+ assert(error == 0);
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ // del keys 0, 2, 4, ...
+ for (int i = 0; i < n; i += 2) {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ toku_ft_delete(ft, &key, txn);
+ assert(error == 0);
+ }
+
+ TOKUTXN cursortxn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &cursortxn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ LE_CURSOR cursor = NULL;
+ error = toku_le_cursor_create(&cursor, ft, cursortxn);
+ assert(error == 0);
+
+ DBT key;
+ toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
+ DBT val;
+ toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
+
+ int i;
+ for (i=0; ; i++) {
+ error = le_cursor_get_next(cursor, &key);
+ if (error != 0)
+ break;
+
+ assert(key.size == sizeof (int));
+ int ii = *(int *)key.data;
+ assert((int) toku_htonl(n-i-1) == ii);
+ }
+ assert(i == n);
+
+ toku_destroy_dbt(&key);
+ toku_destroy_dbt(&val);
+
+ toku_le_cursor_close(cursor);
+
+ error = toku_txn_commit_txn(cursortxn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(cursortxn);
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ error = toku_close_ft_handle_nolsn(ft, NULL);
+ assert(error == 0);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+
+ toku_logger_close_rollback(logger);
+ error = toku_logger_close(&logger);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void
+init_logdir(const char *logdir) {
+ int error;
+
+ toku_os_recursive_delete(logdir);
+ error = toku_os_mkdir(logdir, 0777);
+ assert(error == 0);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+
+ char logdir[TOKU_PATH_MAX+1];
+ toku_path_join(logdir, 2, TOKU_TEST_FILENAME, "logdir");
+ init_logdir(logdir);
+ int error = chdir(logdir);
+ assert(error == 0);
+
+ const int n = 10;
+ create_populate_tree(".", "ftfile", n);
+ test_provdel(".", "ftfile", n);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/le-cursor-right.cc b/storage/tokudb/PerconaFT/ft/tests/le-cursor-right.cc
new file mode 100644
index 00000000..0e6df3a6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/le-cursor-right.cc
@@ -0,0 +1,322 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the LE_CURSOR toku_le_cursor_is_key_greater function
+// - LE_CURSOR at neg infinity
+// - LE_CURSOR at pos infinity
+// - LE_CURSOR somewhere else
+
+
+#include "cachetable/checkpoint.h"
+#include "le-cursor.h"
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+static int
+get_next_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) {
+ DBT *CAST_FROM_VOIDP(key_dbt, extra);
+ if (!lock_only) {
+ toku_dbt_set(keylen, key, key_dbt, NULL);
+ }
+ return 0;
+}
+
+static int
+le_cursor_get_next(LE_CURSOR cursor, DBT *val) {
+ int r = toku_le_cursor_next(cursor, get_next_callback, val);
+ return r;
+}
+
+static int
+test_keycompare(DB* UU(desc), const DBT *a, const DBT *b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+// create a tree and populate it with n rows
+static void
+create_populate_tree(const char *logdir, const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %s %d\n", __FUNCTION__, logdir, fname, n);
+ int error;
+
+ TOKULOGGER logger = NULL;
+ error = toku_logger_create(&logger);
+ assert(error == 0);
+ error = toku_logger_open(logdir, logger);
+ assert(error == 0);
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, logger);
+ toku_logger_set_cachetable(logger, ct);
+ error = toku_logger_open_rollback(logger, ct, true);
+ assert(error == 0);
+
+ TOKUTXN txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_keycompare);
+ assert(error == 0);
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ // insert keys 0, 1, 2, .. (n-1)
+ for (int i = 0; i < n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, txn);
+ }
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ error = toku_close_ft_handle_nolsn(ft, NULL);
+ assert(error == 0);
+
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+ toku_logger_close_rollback(logger);
+ assert(error == 0);
+
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+
+ toku_logger_shutdown(logger);
+
+ error = toku_logger_close(&logger);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+// test toku_le_cursor_is_key_greater when the LE_CURSOR is positioned at +infinity
+static void
+test_pos_infinity(const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %d\n", __FUNCTION__, fname, n);
+ int error;
+
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare);
+ assert(error == 0);
+
+ // position the cursor at -infinity
+ LE_CURSOR cursor = NULL;
+ error = toku_le_cursor_create(&cursor, ft, NULL);
+ assert(error == 0);
+
+ for (int i = 0; i < 2*n; i++) {
+ int k = toku_htonl(i);
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ int right = toku_le_cursor_is_key_greater_or_equal(cursor, &key);
+ assert(right == false);
+ }
+
+ toku_le_cursor_close(cursor);
+
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+// test toku_le_cursor_is_key_greater when the LE_CURSOR is positioned at -infinity
+static void
+test_neg_infinity(const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %d\n", __FUNCTION__, fname, n);
+ int error;
+
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare);
+ assert(error == 0);
+
+ // position the LE_CURSOR at +infinity
+ LE_CURSOR cursor = NULL;
+ error = toku_le_cursor_create(&cursor, ft, NULL);
+ assert(error == 0);
+
+ DBT key;
+ toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
+ DBT val;
+ toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
+
+ int i;
+ for (i = n-1; ; i--) {
+ error = le_cursor_get_next(cursor, &key);
+ if (error != 0)
+ break;
+
+ assert(key.size == sizeof (int));
+ int ii = *(int *)key.data;
+ assert((int) toku_htonl(i) == ii);
+ }
+ assert(i == -1);
+
+ toku_destroy_dbt(&key);
+ toku_destroy_dbt(&val);
+
+ for (i = 0; i < 2*n; i++) {
+ int k = toku_htonl(i);
+ DBT key2;
+ toku_fill_dbt(&key2, &k, sizeof k);
+ int right = toku_le_cursor_is_key_greater_or_equal(cursor, &key2);
+ assert(right == true);
+ }
+
+ toku_le_cursor_close(cursor);
+
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+// test toku_le_cursor_is_key_greater when the LE_CURSOR is positioned in between -infinity and +infinity
+static void
+test_between(const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %d\n", __FUNCTION__, fname, n);
+ int error;
+
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare);
+ assert(error == 0);
+
+ // position the LE_CURSOR at +infinity
+ LE_CURSOR cursor = NULL;
+ error = toku_le_cursor_create(&cursor, ft, NULL);
+ assert(error == 0);
+
+ DBT key;
+ toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
+ DBT val;
+ toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
+
+ int i;
+ for (i = 0; ; i++) {
+ // move the LE_CURSOR forward
+ error = le_cursor_get_next(cursor, &key);
+ if (error != 0)
+ break;
+
+ assert(key.size == sizeof (int));
+ int ii = *(int *)key.data;
+ assert((int) toku_htonl(n-i-1) == ii);
+
+ // test 0 .. i-1
+ for (int j = 0; j <= i; j++) {
+ int k = toku_htonl(n-j-1);
+ DBT key2;
+ toku_fill_dbt(&key2, &k, sizeof k);
+ int right = toku_le_cursor_is_key_greater_or_equal(cursor, &key2);
+ assert(right == true);
+ }
+
+ // test i .. n
+ for (int j = i+1; j < n; j++) {
+ int k = toku_htonl(n-j-1);
+ DBT key2;
+ toku_fill_dbt(&key2, &k, sizeof k);
+ int right = toku_le_cursor_is_key_greater_or_equal(cursor, &key2);
+ assert(right == false);
+ }
+
+ }
+ assert(i == n);
+
+ toku_destroy_dbt(&key);
+ toku_destroy_dbt(&val);
+
+ toku_le_cursor_close(cursor);
+
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void
+init_logdir(const char *logdir) {
+ int error;
+
+ toku_os_recursive_delete(logdir);
+ error = toku_os_mkdir(logdir, 0777);
+ assert(error == 0);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+
+ char logdir[TOKU_PATH_MAX+1];
+ toku_path_join(logdir, 2, TOKU_TEST_FILENAME, "logdir");
+ init_logdir(logdir);
+ int error = chdir(logdir);
+ assert(error == 0);
+
+ const int n = 10;
+ create_populate_tree(".", "ftfile", n);
+ test_pos_infinity("ftfile", n);
+ test_neg_infinity("ftfile", n);
+ test_between("ftfile", n);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/le-cursor-walk.cc b/storage/tokudb/PerconaFT/ft/tests/le-cursor-walk.cc
new file mode 100644
index 00000000..7af13ad3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/le-cursor-walk.cc
@@ -0,0 +1,217 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the LE_CURSOR next function
+
+
+#include "cachetable/checkpoint.h"
+#include "le-cursor.h"
+#include "test.h"
+#include <unistd.h>
+
+static TOKUTXN const null_txn = 0;
+
+static int
+get_next_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) {
+ DBT *CAST_FROM_VOIDP(key_dbt, extra);
+ if (!lock_only) {
+ toku_dbt_set(keylen, key, key_dbt, NULL);
+ }
+ return 0;
+}
+
+static int
+le_cursor_get_next(LE_CURSOR cursor, DBT *val) {
+ int r = toku_le_cursor_next(cursor, get_next_callback, val);
+ return r;
+}
+
+static int test_ft_cursor_keycompare(DB *db __attribute__((unused)), const DBT *a, const DBT *b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+// create a tree and populate it with n rows
+static void
+create_populate_tree(const char *logdir, const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %s %d\n", __FUNCTION__, logdir, fname, n);
+ int error;
+
+ TOKULOGGER logger = NULL;
+ error = toku_logger_create(&logger);
+ assert(error == 0);
+ error = toku_logger_open(logdir, logger);
+ assert(error == 0);
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, logger);
+ toku_logger_set_cachetable(logger, ct);
+ error = toku_logger_open_rollback(logger, ct, true);
+ assert(error == 0);
+
+ TOKUTXN txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
+ assert(error == 0);
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ txn = NULL;
+ error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ assert(error == 0);
+
+ // insert keys 0, 1, 2, .. (n-1)
+ for (int i = 0; i < n; i++) {
+ int k = toku_htonl(i);
+ int v = i;
+ DBT key;
+ toku_fill_dbt(&key, &k, sizeof k);
+ DBT val;
+ toku_fill_dbt(&val, &v, sizeof v);
+ toku_ft_insert(ft, &key, &val, txn);
+ }
+
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
+ assert(error == 0);
+ toku_txn_close_txn(txn);
+
+ error = toku_close_ft_handle_nolsn(ft, NULL);
+ assert(error == 0);
+
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+ toku_logger_close_rollback(logger);
+ assert(error == 0);
+ error = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert(error == 0);
+
+ toku_logger_shutdown(logger);
+ error = toku_logger_close(&logger);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+// retrieve all of the leaf entries in the the tree and verify the key associated with each one.
+// there should be n leaf entires in the tree.
+static void
+walk_tree(const char *fname, int n) {
+ if (verbose) fprintf(stderr, "%s %s %d\n", __FUNCTION__, fname, n);
+ int error;
+
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ FT_HANDLE ft = NULL;
+ error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
+ assert(error == 0);
+
+ LE_CURSOR cursor = NULL;
+ error = toku_le_cursor_create(&cursor, ft, NULL);
+ assert(error == 0);
+
+ DBT key;
+ toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
+ DBT val;
+ toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
+
+ int i;
+ for (i=0; ; i++) {
+ error = le_cursor_get_next(cursor, &key);
+ if (error != 0)
+ break;
+
+ assert(key.size == sizeof (int));
+ int ii = *(int *)key.data;
+ assert((int) toku_htonl(n-i-1) == ii);
+ }
+ assert(i == n);
+
+ toku_destroy_dbt(&key);
+ toku_destroy_dbt(&val);
+
+ toku_le_cursor_close(cursor);
+
+ error = toku_close_ft_handle_nolsn(ft, 0);
+ assert(error == 0);
+
+ toku_cachetable_close(&ct);
+}
+
+static void
+init_logdir(const char *logdir) {
+ toku_os_recursive_delete(logdir);
+ int error = toku_os_mkdir(logdir, 0777);
+ assert(error == 0);
+}
+
+static void
+run_test(const char *logdir, const char *ftfile, int n) {
+ char lastdir[TOKU_PATH_MAX+1];
+ char *last = getcwd(lastdir, TOKU_PATH_MAX);
+ assert(last != nullptr);
+ init_logdir(logdir);
+ int error = chdir(logdir);
+ assert(error == 0);
+
+ create_populate_tree(".", ftfile, n);
+ walk_tree(ftfile, n);
+
+ error = chdir(last);
+ assert(error == 0);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+
+ char logdir[TOKU_PATH_MAX+1];
+ toku_path_join(logdir, 2, TOKU_TEST_FILENAME, "logdir");
+
+ run_test(logdir, "ftfile", 0);
+ run_test(logdir, "ftfile", 1000);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/list-test.cc b/storage/tokudb/PerconaFT/ft/tests/list-test.cc
new file mode 100644
index 00000000..e9dbc7f6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/list-test.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "toku_list.h"
+
+
+#include "test.h"
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+struct testlist {
+ struct toku_list next;
+ int tag;
+};
+
+static void testlist_init (struct testlist *tl, int tag) {
+ tl->tag = tag;
+}
+
+static void test_push_pop (int n) {
+ int i;
+ struct toku_list head;
+
+ toku_list_init(&head);
+ for (i=0; i<n; i++) {
+ struct testlist *tl = (struct testlist *) toku_malloc(sizeof *tl);
+ assert(tl);
+ testlist_init(tl, i);
+ toku_list_push(&head, &tl->next);
+ assert(!toku_list_empty(&head));
+ }
+ for (i=n-1; i>=0; i--) {
+ struct toku_list *list;
+ struct testlist *tl;
+
+ list = toku_list_head(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == 0);
+ list = toku_list_tail(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+ list = toku_list_pop(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+ toku_free(tl);
+ }
+ assert(toku_list_empty(&head));
+}
+
+static void test_push_pop_head (int n) {
+ int i;
+ struct toku_list head;
+
+ toku_list_init(&head);
+ for (i=0; i<n; i++) {
+ struct testlist *tl = (struct testlist *) toku_malloc(sizeof *tl);
+ assert(tl);
+ testlist_init(tl, i);
+ toku_list_push(&head, &tl->next);
+ assert(!toku_list_empty(&head));
+ }
+ for (i=0; i<n; i++) {
+ struct toku_list *list;
+ struct testlist *tl;
+
+ list = toku_list_head(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+ list = toku_list_tail(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == n-1);
+
+ list = toku_list_pop_head(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+ toku_free(tl);
+ }
+ assert(toku_list_empty(&head));
+}
+
+static void test_push_head_pop (int n) {
+ int i;
+ struct toku_list head;
+
+ toku_list_init(&head);
+ for (i=0; i<n; i++) {
+ struct testlist *tl = (struct testlist *) toku_malloc(sizeof *tl);
+ assert(tl);
+ testlist_init(tl, i);
+ toku_list_push_head(&head, &tl->next);
+ assert(!toku_list_empty(&head));
+ }
+ for (i=0; i<n; i++) {
+ struct toku_list *list;
+ struct testlist *tl;
+
+ list = toku_list_head(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == n-1);
+ list = toku_list_tail(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+
+ list = toku_list_pop(&head);
+ tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+ toku_free(tl);
+ }
+ assert(toku_list_empty(&head));
+}
+
+#if 0
+// cant move an empty list
+static void test_move_empty (void) {
+ struct toku_list h1, h2;
+
+ toku_list_init(&h1);
+ toku_list_init(&h2);
+ toku_list_move(&h1, &h2);
+ assert(toku_list_empty(&h2));
+ assert(toku_list_empty(&h1));
+}
+#endif
+
+static void test_move (int n) {
+ struct toku_list h1, h2;
+ int i;
+
+ toku_list_init(&h1);
+ toku_list_init(&h2);
+ for (i=0; i<n; i++) {
+ struct testlist *tl = (struct testlist *) toku_malloc(sizeof *tl);
+ assert(tl);
+ testlist_init(tl, i);
+ toku_list_push(&h2, &tl->next);
+ }
+ toku_list_move(&h1, &h2);
+ assert(!toku_list_empty(&h1));
+ assert(toku_list_empty(&h2));
+ i = 0;
+ while (!toku_list_empty(&h1)) {
+ struct toku_list *list = toku_list_pop_head(&h1);
+ struct testlist *tl = toku_list_struct(list, struct testlist, next);
+ assert(tl->tag == i);
+ toku_free(tl);
+ i += 1;
+ }
+ assert(i == n);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_push_pop(0);
+ test_push_pop(8);
+ test_push_pop_head(0);
+ test_push_pop_head(8);
+ test_push_head_pop(8);
+ test_move(1);
+ test_move(8);
+ // test_move_empty();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test-maybe-trim.cc b/storage/tokudb/PerconaFT/ft/tests/log-test-maybe-trim.cc
new file mode 100644
index 00000000..21a1650b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test-maybe-trim.cc
@@ -0,0 +1,76 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the log file trimmer does not delete the log file containing the
+// begin checkpoint when the checkpoint log entries span multiple log files.
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_set_lg_max(logger, 32); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ BYTESTRING hello = (BYTESTRING) { 5, (char *) "hello"};
+ LSN comment_lsn;
+ toku_log_comment(logger, &comment_lsn, true, 0, hello);
+ LSN begin_lsn;
+ toku_log_begin_checkpoint(logger, &begin_lsn, true, 0, 0);
+ LSN end_lsn;
+ toku_log_end_checkpoint(logger, &end_lsn, true, begin_lsn, 0, 0, 0);
+ toku_logger_maybe_trim_log(logger, begin_lsn);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // verify all log entries prior the begin checkpoint are trimmed
+ TOKULOGCURSOR lc = NULL;
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME); assert(r == 0);
+ struct log_entry *le = NULL;
+ r = toku_logcursor_first(lc, &le); assert(r == 0);
+ assert(le->cmd == LT_begin_checkpoint);
+ r = toku_logcursor_destroy(&lc); assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test.cc b/storage/tokudb/PerconaFT/ft/tests/log-test.cc
new file mode 100644
index 00000000..19e2277f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test.cc
@@ -0,0 +1,76 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ long long lognum;
+ char logname[TOKU_PATH_MAX+1];
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ r = toku_logger_find_next_unused_log_file(TOKU_TEST_FILENAME,&lognum);
+ assert(r==0 && lognum==0LL);
+
+ mode_t mode = S_IRWXU + S_IRWXG + S_IRWXO;
+ sprintf(logname, "%s/log01.tokulog%d", TOKU_TEST_FILENAME, TOKU_LOG_VERSION);
+ r = open(logname, O_WRONLY + O_CREAT + O_BINARY, mode); assert(r>=0);
+ r = close(r); assert(r==0);
+
+ r = toku_logger_find_next_unused_log_file(TOKU_TEST_FILENAME,&lognum);
+ assert(r==0 && lognum==2LL);
+
+ sprintf(logname, "%s/log123456789012345.tokulog%d", TOKU_TEST_FILENAME, TOKU_LOG_VERSION);
+ r = open(logname, O_WRONLY + O_CREAT + O_BINARY, mode); assert(r>=0);
+ r = close(r); assert(r==0);
+ r = toku_logger_find_next_unused_log_file(TOKU_TEST_FILENAME,&lognum);
+ assert(r==0 && lognum==123456789012346LL);
+
+ sprintf(logname, "%s/log3.tokulog%d", TOKU_TEST_FILENAME, TOKU_LOG_VERSION);
+ r = open(logname, O_WRONLY + O_CREAT + O_BINARY, mode); assert(r>=0);
+ r = close(r); assert(r==0);
+ r = toku_logger_find_next_unused_log_file(TOKU_TEST_FILENAME,&lognum);
+ assert(r==0 && lognum==123456789012346LL);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test2.cc b/storage/tokudb/PerconaFT/ft/tests/log-test2.cc
new file mode 100644
index 00000000..b70e973a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test2.cc
@@ -0,0 +1,57 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+// create and close, making sure that everything is deallocated properly.
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test3.cc b/storage/tokudb/PerconaFT/ft/tests/log-test3.cc
new file mode 100644
index 00000000..ea685b17
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test3.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// create and close, making sure that everything is deallocated properly.
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test4.cc b/storage/tokudb/PerconaFT/ft/tests/log-test4.cc
new file mode 100644
index 00000000..019852bb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test4.cc
@@ -0,0 +1,77 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// create and close, making sure that everything is deallocated properly.
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ char logname[TOKU_PATH_MAX+1];
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+
+ {
+ ml_lock(&logger->input_lock);
+ toku_logger_make_space_in_inbuf(logger, 5);
+ memcpy(logger->inbuf.buf+logger->inbuf.n_in_buf, "a1234", 5);
+ logger->inbuf.n_in_buf+=5;
+ logger->lsn.lsn++;
+ logger->inbuf.max_lsn_in_buf = logger->lsn;
+ ml_unlock(&logger->input_lock);
+ }
+
+ r = toku_logger_close(&logger); assert(r == 0);
+ {
+ toku_struct_stat statbuf;
+ sprintf(logname,
+ "%s/log000000000000.tokulog%d",
+ TOKU_TEST_FILENAME,
+ TOKU_LOG_VERSION);
+ r = toku_stat(logname, &statbuf, toku_uninstrumented);
+ assert(r == 0);
+ assert(statbuf.st_size == 12 + 5);
+ }
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test5.cc b/storage/tokudb/PerconaFT/ft/tests/log-test5.cc
new file mode 100644
index 00000000..fed9467a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test5.cc
@@ -0,0 +1,96 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// create and close, making sure that everything is deallocated properly.
+
+#define LSIZE 100
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+ r = toku_logger_set_lg_max(logger, LSIZE);
+ {
+ uint32_t n;
+ r = toku_logger_get_lg_max(logger, &n);
+ assert(n==LSIZE);
+ }
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+ int i;
+ for (i=0; i<1000; i++) {
+ ml_lock(&logger->input_lock);
+
+ int ilen=3+random()%5;
+ toku_logger_make_space_in_inbuf(logger, ilen+1);
+ snprintf(logger->inbuf.buf+logger->inbuf.n_in_buf, ilen+1, "a%0*d ", (int)ilen, i);
+ logger->inbuf.n_in_buf+=(ilen+1);
+ logger->lsn.lsn++;
+ logger->inbuf.max_lsn_in_buf = logger->lsn;
+ ml_unlock(&logger->input_lock);
+ toku_logger_fsync(logger);
+ }
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+
+ {
+ DIR *dir=opendir(TOKU_TEST_FILENAME);
+ assert(dir);
+ struct dirent *dirent;
+ while ((dirent=readdir(dir))) {
+ if (strncmp(dirent->d_name, "log", 3)!=0) continue;
+ char fname[TOKU_PATH_MAX + 1];
+ toku_path_join(fname, 2, TOKU_TEST_FILENAME, dirent->d_name);
+ toku_struct_stat statbuf;
+ r = toku_stat(fname, &statbuf, toku_uninstrumented);
+ assert(r == 0);
+ assert(statbuf.st_size <= LSIZE + 10);
+ }
+ r = closedir(dir);
+ assert(r==0);
+ }
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test6.cc b/storage/tokudb/PerconaFT/ft/tests/log-test6.cc
new file mode 100644
index 00000000..0e8b9456
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test6.cc
@@ -0,0 +1,100 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// create and close, making sure that everything is deallocated properly.
+
+#define LSIZE 100
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+ r = toku_logger_set_lg_max(logger, LSIZE);
+ {
+ uint32_t n;
+ r = toku_logger_get_lg_max(logger, &n);
+ assert(n==LSIZE);
+ }
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ {
+ ml_lock(&logger->input_lock);
+ int lsize=LSIZE-12-2;
+ toku_logger_make_space_in_inbuf(logger, lsize);
+ snprintf(logger->inbuf.buf+logger->inbuf.n_in_buf, lsize, "a%*d", lsize-1, 0);
+ logger->inbuf.n_in_buf += lsize;
+ logger->lsn.lsn++;
+ logger->inbuf.max_lsn_in_buf = logger->lsn;
+ ml_unlock(&logger->input_lock);
+ }
+
+ {
+ ml_lock(&logger->input_lock);
+ toku_logger_make_space_in_inbuf(logger, 2);
+ memcpy(logger->inbuf.buf+logger->inbuf.n_in_buf, "b1", 2);
+ logger->inbuf.n_in_buf += 2;
+ logger->lsn.lsn++;
+ logger->inbuf.max_lsn_in_buf = logger->lsn;
+ ml_unlock(&logger->input_lock);
+ }
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+
+ {
+ char logname[PATH_MAX];
+ toku_struct_stat statbuf;
+ sprintf(logname,
+ "%s/log000000000000.tokulog%d",
+ TOKU_TEST_FILENAME,
+ TOKU_LOG_VERSION);
+ r = toku_stat(logname, &statbuf, toku_uninstrumented);
+ assert(r == 0);
+ assert(statbuf.st_size <= LSIZE);
+ }
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/log-test7.cc b/storage/tokudb/PerconaFT/ft/tests/log-test7.cc
new file mode 100644
index 00000000..234db2e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/log-test7.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// create and close, making sure that everything is deallocated properly.
+
+#define LSIZE 100
+#define NUM_LOGGERS 10
+TOKULOGGER logger[NUM_LOGGERS];
+
+static void setup_logger(int which) {
+ char logname[10];
+ snprintf(logname, sizeof(logname), "log%d", which);
+ char dnamewhich[TOKU_PATH_MAX+1];
+ int r;
+ toku_path_join(dnamewhich, 2, TOKU_TEST_FILENAME, logname);
+ r = toku_os_mkdir(dnamewhich, S_IRWXU);
+ if (r!=0) {
+ int er = get_error_errno();
+ printf("file %s error (%d) %s\n", dnamewhich, er, strerror(er));
+ assert(r==0);
+ }
+ r = toku_logger_create(&logger[which]);
+ assert(r == 0);
+ r = toku_logger_set_lg_max(logger[which], LSIZE);
+ {
+ uint32_t n;
+ r = toku_logger_get_lg_max(logger[which], &n);
+ assert(n==LSIZE);
+ }
+ r = toku_logger_open(dnamewhich, logger[which]);
+ assert(r == 0);
+}
+
+static void play_with_logger(int which) {
+ {
+ ml_lock(&logger[which]->input_lock);
+ int lsize=LSIZE-12-2;
+ toku_logger_make_space_in_inbuf(logger[which], lsize);
+ snprintf(logger[which]->inbuf.buf+logger[which]->inbuf.n_in_buf, lsize, "a%*d", lsize-1, 0);
+ logger[which]->inbuf.n_in_buf += lsize;
+ logger[which]->lsn.lsn++;
+ logger[which]->inbuf.max_lsn_in_buf = logger[which]->lsn;
+ ml_unlock(&logger[which]->input_lock);
+ }
+
+ {
+ ml_lock(&logger[which]->input_lock);
+ toku_logger_make_space_in_inbuf(logger[which], 2);
+ memcpy(logger[which]->inbuf.buf+logger[which]->inbuf.n_in_buf, "b1", 2);
+ logger[which]->inbuf.n_in_buf += 2;
+ logger[which]->lsn.lsn++;
+ logger[which]->inbuf.max_lsn_in_buf = logger[which]->lsn;
+ ml_unlock(&logger[which]->input_lock);
+ }
+}
+
+static void tear_down_logger(int which) {
+ int r;
+ r = toku_logger_close(&logger[which]);
+ assert(r == 0);
+}
+
+int
+test_main (int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ int i;
+ int loop;
+ const int numloops = 100;
+ for (loop = 0; loop < numloops; loop++) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+ for (i = 0; i < NUM_LOGGERS; i++) setup_logger(i);
+ for (i = 0; i < NUM_LOGGERS; i++) play_with_logger(i);
+ for (i = 0; i < NUM_LOGGERS; i++) tear_down_logger(i);
+ }
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-bad-checksum.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-bad-checksum.cc
new file mode 100644
index 00000000..ab0ed0ea
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-bad-checksum.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+// log a couple of timestamp entries and verify the log by walking
+// a cursor through the log entries
+
+static void corrupt_the_checksum(void) {
+ // change the LSN in the first log entry of log 0. this will cause an checksum error.
+ char logname[TOKU_PATH_MAX+1];
+ int r;
+ sprintf(logname, "%s/log000000000000.tokulog%d", TOKU_TEST_FILENAME, TOKU_LOG_VERSION);
+ FILE *f = fopen(logname, "r+b"); assert(f);
+ r = fseek(f, 025, SEEK_SET); assert(r == 0);
+ char c = 100;
+ size_t n = fwrite(&c, sizeof c, 1, f); assert(n == sizeof c);
+ r = fclose(f); assert(r == 0);
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ LSN lsn = ZERO_LSN;
+
+ // log a couple of timestamp log entries
+
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ BYTESTRING bs0 = { .len = 5, .data = (char *) "hello" };
+ toku_log_comment(logger, &lsn, 0, 0, bs0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+
+ // change the LSN and corrupt the checksum
+ corrupt_the_checksum();
+
+ if (!verbose) {
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul >= 0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r == fileno(stderr));
+ r = close(devnul); assert(r == 0);
+ }
+
+ // walk forwards
+ TOKULOGCURSOR lc = NULL;
+ struct log_entry *le;
+
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ // walk backwards
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-bw.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-bw.cc
new file mode 100644
index 00000000..9a354d6d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-bw.cc
@@ -0,0 +1,71 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+
+// walk forward through the log files found in the current directory
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+
+ // verify the log backwards
+ TOKULOGCURSOR lc = NULL;
+ r = toku_logcursor_create(&lc, ".");
+ assert(r == 0 && lc != NULL);
+
+ int n = 0;
+ while (1) {
+ struct log_entry *le = NULL;
+ r = toku_logcursor_prev(lc, &le);
+ if (r != 0)
+ break;
+ n++;
+ }
+
+ printf("n=%d\n", n);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logdir.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logdir.cc
new file mode 100644
index 00000000..c9d9c897
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logdir.cc
@@ -0,0 +1,72 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+// a logcursor in an empty directory should not find any log entries
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+
+ // verify the log is empty
+
+ TOKULOGCURSOR lc = NULL;
+ struct log_entry *le;
+
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-2.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-2.cc
new file mode 100644
index 00000000..fcbf8b50
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-2.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+const int N = 2;
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ LSN lsn = ZERO_LSN;
+
+ int helloseq = 0;
+
+ // create N empty log files
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // create N log files with a hello message
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ char str[32];
+ sprintf(str, "hello%d", helloseq++);
+ BYTESTRING bs0 = { .len = (uint32_t) strlen(str), .data = str };
+ toku_log_comment(logger, &lsn, 0, 0, bs0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // create N empty log files
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // verify the log forwards
+ TOKULOGCURSOR lc = NULL;
+ struct log_entry *le;
+
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ helloseq = 0;
+ for (int i=0; i<N; i++) {
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ char expect[32];
+ sprintf(expect, "hello%d", helloseq++);
+ assert(le->u.comment.comment.len == strlen(expect) && memcmp(le->u.comment.comment.data, expect, le->u.comment.comment.len) == 0);
+ }
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ // verify the log backwards
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ helloseq = N;
+ for (int i=0; i<N; i++) {
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ char expect[32];
+ sprintf(expect, "hello%d", --helloseq);
+ assert(le->u.comment.comment.len == strlen(expect) && memcmp(le->u.comment.comment.data, expect, le->u.comment.comment.len) == 0);
+ }
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-3.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-3.cc
new file mode 100644
index 00000000..e54ea495
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile-3.cc
@@ -0,0 +1,185 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+const int N = 2;
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ LSN lsn = ZERO_LSN;
+
+ int helloseq = 0;
+
+ // create N log files with a hello message
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ char str[32];
+ sprintf(str, "hello%d", helloseq++);
+ BYTESTRING bs0 = { .len = (uint32_t) strlen(str), .data = str };
+ toku_log_comment(logger, &lsn, 0, 0, bs0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // create N empty log files
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // CREATE AN EMPTY FILE (tests [t:2384])
+ {
+ long long nexti;
+ r = toku_logger_find_next_unused_log_file(TOKU_TEST_FILENAME, &nexti);
+ assert(r == 0);
+ char mt_fname[TOKU_PATH_MAX+1];
+ snprintf(mt_fname, TOKU_PATH_MAX, "%s/log%012lld.tokulog%d", TOKU_TEST_FILENAME, nexti, TOKU_LOG_VERSION);
+ int mt_fd = open(mt_fname, O_CREAT+O_WRONLY+O_TRUNC+O_EXCL+O_BINARY, S_IRWXU);
+ assert(mt_fd != -1);
+ r = close(mt_fd);
+ }
+
+ // create N log files with a hello message
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ char str[32];
+ sprintf(str, "hello%d", helloseq++);
+ BYTESTRING bs0 = { .len = (uint32_t) strlen(str), .data = str };
+ toku_log_comment(logger, &lsn, 0, 0, bs0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // CREATE AN EMPTY FILE (tests [t:2384])
+ {
+ long long nexti;
+ r = toku_logger_find_next_unused_log_file(TOKU_TEST_FILENAME, &nexti);
+ assert(r == 0);
+ char mt_fname[TOKU_PATH_MAX+1];
+ snprintf(mt_fname, TOKU_PATH_MAX, "%s/log%012lld.tokulog%d", TOKU_TEST_FILENAME, nexti, TOKU_LOG_VERSION);
+ int mt_fd = open(mt_fname, O_CREAT+O_WRONLY+O_TRUNC+O_EXCL+O_BINARY, S_IRWXU);
+ assert(mt_fd != -1);
+ r = close(mt_fd);
+ }
+
+ // verify the log forwards
+ TOKULOGCURSOR lc = NULL;
+ struct log_entry *le;
+
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ helloseq = 0;
+ for (int i=0; i<2*N; i++) {
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ char expect[32];
+ sprintf(expect, "hello%d", helloseq++);
+ assert(le->u.comment.comment.len == strlen(expect) && memcmp(le->u.comment.comment.data, expect, le->u.comment.comment.len) == 0);
+ }
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ // verify the log backwards
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ helloseq = 2*N;
+ for (int i=0; i<2*N; i++) {
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ char expect[32];
+ sprintf(expect, "hello%d", --helloseq);
+ assert(le->u.comment.comment.len == strlen(expect) && memcmp(le->u.comment.comment.data, expect, le->u.comment.comment.len) == 0);
+ }
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ // VERIFY TRIM WORKS WITH ZERO LENGTH FILE [t:2384]
+ {
+ LSN trim_lsn;
+ trim_lsn.lsn = (2*N)-1;
+ r = toku_logger_create(&logger); assert(r==0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r==0);
+
+ toku_logger_maybe_trim_log(logger, trim_lsn);
+ assert( toku_logfilemgr_num_logfiles(logger->logfilemgr) == 4 ); // untrimmed log, empty log, plus newly opened log
+
+ r = toku_logger_close(&logger);
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile.cc
new file mode 100644
index 00000000..55e5a209
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-empty-logfile.cc
@@ -0,0 +1,148 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+const int N = 2;
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ LSN lsn = ZERO_LSN;
+
+ int helloseq = 0;
+
+ // create N log files with a hello message
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ char str[32];
+ sprintf(str, "hello%d", helloseq++);
+ BYTESTRING bs0 = { .len = (uint32_t) strlen(str), .data = str };
+ toku_log_comment(logger, &lsn, 0, 0, bs0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // create N empty log files
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // create N log files with a hello message
+ for (int i=0; i<N; i++) {
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ char str[32];
+ sprintf(str, "hello%d", helloseq++);
+ BYTESTRING bs0 = { .len = (uint32_t) strlen(str), .data = str };
+ toku_log_comment(logger, &lsn, 0, 0, bs0);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+ }
+
+ // verify the log forwards
+ TOKULOGCURSOR lc = NULL;
+ struct log_entry *le;
+
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ helloseq = 0;
+ for (int i=0; i<2*N; i++) {
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ char expect[32];
+ sprintf(expect, "hello%d", helloseq++);
+ assert(le->u.comment.comment.len == strlen(expect) && memcmp(le->u.comment.comment.data, expect, le->u.comment.comment.len) == 0);
+ }
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ // verify the log backwards
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ helloseq = 2*N;
+ for (int i=0; i<2*N; i++) {
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ char expect[32];
+ sprintf(expect, "hello%d", --helloseq);
+ assert(le->u.comment.comment.len == strlen(expect) && memcmp(le->u.comment.comment.data, expect, le->u.comment.comment.len) == 0);
+ }
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-fw.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-fw.cc
new file mode 100644
index 00000000..9eaefe4f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-fw.cc
@@ -0,0 +1,71 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+
+// walk forward through the log files found in the current directory
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+
+ // verify the log backwards
+ TOKULOGCURSOR lc = NULL;
+ r = toku_logcursor_create(&lc, ".");
+ assert(r == 0 && lc != NULL);
+
+ int n = 0;
+ while (1) {
+ struct log_entry *le = NULL;
+ r = toku_logcursor_next(lc, &le);
+ if (r != 0)
+ break;
+ n++;
+ }
+
+ printf("n=%d\n", n);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-print.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-print.cc
new file mode 100644
index 00000000..e68d2e86
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-print.cc
@@ -0,0 +1,62 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "logger/logcursor.h"
+
+int test_main(int argc, const char *argv[]) {
+ int r;
+
+ default_parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert(r == 0);
+
+ TOKULOGCURSOR lc;
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0);
+
+ toku_logcursor_print(lc);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logcursor-timestamp.cc b/storage/tokudb/PerconaFT/ft/tests/logcursor-timestamp.cc
new file mode 100644
index 00000000..98c44962
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logcursor-timestamp.cc
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+static uint64_t now(void) {
+ struct timeval tv;
+ int r = gettimeofday(&tv, NULL);
+ assert(r == 0);
+ return tv.tv_sec * 1000000ULL + tv.tv_usec;
+}
+
+// log a couple of timestamp entries and verify the log by walking
+// a cursor through the log entries
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r==0);
+ TOKULOGGER logger;
+ LSN lsn = ZERO_LSN;
+
+ // log a couple of timestamp log entries
+
+ r = toku_logger_create(&logger);
+ assert(r == 0);
+
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger);
+ assert(r == 0);
+
+ BYTESTRING bs0 = { .len = 5, .data = (char *) "hello" };
+ toku_log_comment(logger, &lsn, 0, now(), bs0);
+
+ sleep(11);
+
+ BYTESTRING bs1 = { .len = 5, .data = (char *) "world" };
+ toku_log_comment(logger, &lsn, 0, now(), bs1);
+
+ r = toku_logger_close(&logger);
+ assert(r == 0);
+
+ // verify the log forwards
+ TOKULOGCURSOR lc = NULL;
+ struct log_entry *le;
+
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ assert(le->u.comment.comment.len == 5 && memcmp(le->u.comment.comment.data, "hello", 5) == 0);
+ uint64_t t = le->u.comment.timestamp;
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ assert(le->u.comment.comment.len == 5 && memcmp(le->u.comment.comment.data, "world", 5) == 0);
+ if (verbose)
+ printf("%" PRIu64 "\n", le->u.comment.timestamp - t);
+ assert(le->u.comment.timestamp - t >= 10*1000000);
+
+ r = toku_logcursor_next(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ // verify the log backwards
+ r = toku_logcursor_create(&lc, TOKU_TEST_FILENAME);
+ assert(r == 0 && lc != NULL);
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ assert(le->u.comment.comment.len == 5 && memcmp(le->u.comment.comment.data, "world", 5) == 0);
+ t = le->u.comment.timestamp;
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r == 0 && le->cmd == LT_comment);
+ assert(le->u.comment.comment.len == 5 && memcmp(le->u.comment.comment.data, "hello", 5) == 0);
+ if (verbose)
+ printf("%" PRIu64 "\n", t - le->u.comment.timestamp);
+ assert(t - le->u.comment.timestamp >= 10*1000000);
+
+ r = toku_logcursor_prev(lc, &le);
+ assert(r != 0);
+
+ r = toku_logcursor_destroy(&lc);
+ assert(r == 0 && lc == NULL);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logfilemgr-create-destroy.cc b/storage/tokudb/PerconaFT/ft/tests/logfilemgr-create-destroy.cc
new file mode 100644
index 00000000..df854692
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logfilemgr-create-destroy.cc
@@ -0,0 +1,53 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/tests/test.h"
+#include "ft/logger/logfilemgr.h"
+
+int test_main(int argc __attribute__((unused)), const char *argv[] __attribute__((unused))) {
+ int r;
+
+ TOKULOGFILEMGR lfm = NULL;
+ r = toku_logfilemgr_create(&lfm);
+ assert(r == 0);
+
+ r = toku_logfilemgr_destroy(&lfm);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/logfilemgr-print.cc b/storage/tokudb/PerconaFT/ft/tests/logfilemgr-print.cc
new file mode 100644
index 00000000..e22350dc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/logfilemgr-print.cc
@@ -0,0 +1,55 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/tests/test.h"
+#include "ft/logger/logfilemgr.h"
+
+int test_main(int argc __attribute__((unused)), const char *argv[] __attribute__((unused))) {
+ int r;
+
+ TOKULOGFILEMGR lfm = NULL;
+ r = toku_logfilemgr_create(&lfm);
+ assert(r == 0);
+
+ toku_logfilemgr_print(lfm);
+
+ r = toku_logfilemgr_destroy(&lfm);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
new file mode 100644
index 00000000..fe950b60
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
@@ -0,0 +1,244 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate fractal trees with a given height, fanout, and number of leaf elements per leaf.
+// jam the child buffers with inserts.
+// this code can be used as a template to build broken trees
+//
+// To correctly set msn per node:
+// - set in each non-leaf when message is injected into node (see insert_into_child_buffer())
+// - set in each leaf node (see append_leaf())
+// - set in root node (set test_make_tree())
+
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ MSN msn = next_dummymsn();
+
+ // apply an insert to the leaf node
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ leafnode->max_msn_applied_to_node_on_disk = msn;
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
+ for (int i = 0; i < n; i++) {
+ int k = htonl(seq + i);
+ int v = seq + i;
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+ }
+ *minkey = htonl(seq);
+ *maxkey = htonl(seq + n - 1);
+}
+
+static void
+insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, int maxkey) {
+ for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) {
+ MSN msn = next_dummymsn();
+ unsigned int key = htonl(val);
+ DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
+ DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
+ toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, toku_xids_get_root_xids(), true, &thekey, &theval);
+ node->max_msn_applied_to_node_on_disk = msn;
+ }
+}
+
+static FTNODE
+make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
+ FTNODE node;
+ if (height == 0) {
+ node = make_node(ft, 0);
+ populate_leaf(node, *seq, nperleaf, minkey, maxkey);
+ *seq += nperleaf;
+ } else {
+ node = make_node(ft, height);
+ int minkeys[fanout], maxkeys[fanout];
+ for (int childnum = 0; childnum < fanout; childnum++) {
+ FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
+ if (childnum == 0) {
+ toku_ft_nonleaf_append_child(node, child, NULL);
+ } else {
+ int k = maxkeys[childnum-1]; // use the max of the left tree
+ DBT pivotkey;
+ toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
+ }
+ toku_unpin_ftnode(ft->ft, child);
+ insert_into_child_buffer(ft, node, childnum, minkeys[childnum], maxkeys[childnum]);
+ }
+ *minkey = minkeys[0];
+ *maxkey = maxkeys[0];
+ for (int i = 1; i < fanout; i++) {
+ if (memcmp(minkey, &minkeys[i], sizeof minkeys[i]) > 0)
+ *minkey = minkeys[i];
+ if (memcmp(maxkey, &maxkeys[i], sizeof maxkeys[i]) < 0)
+ *maxkey = maxkeys[i];
+ }
+ }
+ return node;
+}
+
+static UU() void
+deleted_row(UU() DB *db, UU() DBT *key, UU() DBT *val) {
+}
+
+static void
+test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ if (r != 0) {
+ assert(r == -1);
+ assert(get_error_errno() == ENOENT);
+ }
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // make a tree
+ int seq = 0, minkey, maxkey;
+ FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
+
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ ft->ft->h->max_msn_in_ft = last_dummymsn(); // capture msn of last message injected into tree
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r == 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int height = 1;
+ int fanout = 2;
+ int nperleaf = 8;
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--height") == 0 && i+1 < argc) {
+ height = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--fanout") == 0 && i+1 < argc) {
+ fanout = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nperleaf") == 0 && i+1 < argc) {
+ nperleaf = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_make_tree(height, fanout, nperleaf, do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc b/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc
new file mode 100644
index 00000000..bf9a1aa1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc
@@ -0,0 +1,144 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "bndata.h"
+
+static void
+le_add_to_bn(bn_data* bn, uint32_t idx, const char *key, int keysize, const char *val, int valsize)
+{
+ LEAFENTRY r = NULL;
+ uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
+ void *maybe_free = nullptr;
+ bn->get_space_for_insert(
+ idx,
+ key,
+ keysize,
+ size_needed,
+ &r,
+ &maybe_free
+ );
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ resource_assert(r);
+ r->type = LE_CLEAN;
+ r->u.clean.vallen = valsize;
+ memcpy(r->u.clean.val, val, valsize);
+}
+
+static void
+le_overwrite(bn_data* bn, uint32_t idx, const char *key, int keysize, const char *val, int valsize) {
+ LEAFENTRY r = NULL;
+ uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
+ void *maybe_free = nullptr;
+ bn->get_space_for_overwrite(
+ idx,
+ key,
+ keysize,
+ keysize, // old_keylen
+ size_needed, // old_le_size
+ size_needed,
+ &r,
+ &maybe_free
+ );
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ resource_assert(r);
+ r->type = LE_CLEAN;
+ r->u.clean.vallen = valsize;
+ memcpy(r->u.clean.val, val, valsize);
+}
+
+
+class bndata_bugfix_test {
+public:
+ void
+ run_test(void) {
+ // struct ft_handle source_ft;
+ struct ftnode sn;
+
+ // just copy this code from a previous test
+ // don't care what it does, just want to get a node up and running
+ sn.flags = 0x11223344;
+ sn.blocknum.b = 20;
+ sn.layout_version = FT_LAYOUT_VERSION;
+ sn.layout_version_original = FT_LAYOUT_VERSION;
+ sn.height = 0;
+ sn.n_children = 2;
+ sn.set_dirty();
+ sn.oldest_referenced_xid_known = TXNID_NONE;
+ MALLOC_N(sn.n_children, sn.bp);
+ DBT pivotkey;
+ sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "b", 2), 1);
+ BP_STATE(&sn,0) = PT_AVAIL;
+ BP_STATE(&sn,1) = PT_AVAIL;
+ set_BLB(&sn, 0, toku_create_empty_bn());
+ set_BLB(&sn, 1, toku_create_empty_bn());
+ le_add_to_bn(BLB_DATA(&sn, 0), 0, "a", 2, "aval", 5);
+ le_add_to_bn(BLB_DATA(&sn, 0), 1, "b", 2, "bval", 5);
+ le_add_to_bn(BLB_DATA(&sn, 1), 0, "x", 2, "xval", 5);
+
+ // now this is the test. If I keep getting space for overwrite
+ // like crazy, it should expose the bug
+ bn_data* bnd = BLB_DATA(&sn, 0);
+ size_t old_size = bnd->m_buffer_mempool.size;
+ if (verbose) printf("frag size: %zu\n", bnd->m_buffer_mempool.frag_size);
+ if (verbose) printf("size: %zu\n", bnd->m_buffer_mempool.size);
+ for (uint32_t i = 0; i < 1000000; i++) {
+ le_overwrite(bnd, 0, "a", 2, "aval", 5);
+ }
+ if (verbose) printf("frag size: %zu\n", bnd->m_buffer_mempool.frag_size);
+ if (verbose) printf("size: %zu\n", bnd->m_buffer_mempool.size);
+ size_t new_size = bnd->m_buffer_mempool.size;
+ // just a crude test to make sure we did not grow unbounded.
+ // if this assert ever fails, revisit the code and see what is going
+ // on. It may be that some algorithm has changed.
+ assert(new_size < 5*old_size);
+
+ toku_destroy_ftnode_internals(&sn);
+ }
+};
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ bndata_bugfix_test t;
+ t.run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
new file mode 100644
index 00000000..6d13eabf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
@@ -0,0 +1,252 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Verify that a message with an old msn is ignored
+// by toku_apply_msg_to_leaf()
+//
+// method:
+// - inject valid message, verify that new value is in row
+// - inject message with same msn and new value, verify that original value is still in key (verify msg.msn == node.max_msn is rejected)
+// - inject valid message with new value2, verify that row has new value2
+// - inject message with old msn, verify that row still has value2 (verify msg.msn < node.max_msn is rejected)
+
+
+// TODO:
+// - verify that no work is done by messages that should be ignored (via workdone arg to ft_leaf_put_msg())
+// - maybe get counter of messages ignored for old msn (once the counter is implemented in ft-ops.c)
+
+#include "ft-internal.h"
+#include <ft-cachetable-wrappers.h>
+
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val, uint32_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+ DBT badval; toku_fill_dbt(&badval, (char*)val+1, vallen);
+ DBT val2; toku_fill_dbt(&val2, (char*)val+2, vallen);
+
+ struct check_pair pair = {keylen, key, vallen, val, 0};
+ struct check_pair pair2 = {keylen, key, vallen, (char*)val+2, 0};
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft->ft->h->max_msn_in_ft = msn;
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ msg,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
+ {
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==1);
+ }
+
+ ft_msg badmsg(&thekey, &badval, FT_INSERT, msn, toku_xids_get_root_xids());
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ badmsg,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
+
+ // message should be rejected for duplicate msn, row should still have original val
+ {
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==2);
+ }
+
+ // now verify that message with proper msn gets through
+ msn = next_dummymsn();
+ ft->ft->h->max_msn_in_ft = msn;
+ ft_msg msg2(&thekey, &val2, FT_INSERT, msn, toku_xids_get_root_xids());
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ msg2,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
+
+ // message should be accepted, val should have new value
+ {
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
+ assert(r==0);
+ assert(pair2.call_count==1);
+ }
+
+ // now verify that message with lesser (older) msn is rejected
+ msn.msn = msn.msn - 10;
+ ft_msg msg3(&thekey, &badval, FT_INSERT, msn, toku_xids_get_root_xids());
+ toku_ft_leaf_apply_msg(
+ ft->ft->cmp,
+ ft->ft->update_fun,
+ leafnode,
+ -1,
+ msg3,
+ &gc_info,
+ nullptr,
+ nullptr,
+ nullptr);
+
+ // message should be rejected, val should still have value in pair2
+ {
+ int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair2);
+ assert(r==0);
+ assert(pair2.call_count==2);
+ }
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FT_HANDLE ft, FTNODE leafnode, int k, int v) {
+ char vbuf[32]; // store v in a buffer large enough to dereference unaligned int's
+ memset(vbuf, 0, sizeof vbuf);
+ memcpy(vbuf, &v, sizeof v);
+ append_leaf(ft, leafnode, &k, sizeof k, vbuf, sizeof v);
+}
+
+static void
+test_msnfilter(int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ if (r != 0) {
+ assert(r == -1);
+ assert(get_error_errno() == ENOENT);
+ }
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ FTNODE newroot = make_node(ft, 0);
+
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // KLUDGE: Unpin the new root so toku_ft_lookup() can pin it. (Pin lock is no longer a recursive
+ // mutex.) Just leaving it unpinned for this test program works because it is the only
+ // node in the cachetable and won't be evicted. The right solution would be to lock the
+ // node and unlock it again before and after each message injection, but that requires more
+ // work than it's worth (setting up dummy callbacks, etc.)
+ //
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ populate_leaf(ft, newroot, htonl(2), 1);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r == 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_msnfilter(do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc b/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc
new file mode 100644
index 00000000..393fb88a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/orthopush-flush.cc
@@ -0,0 +1,1295 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include "ule.h"
+
+static TOKUTXN const null_txn = 0;
+static const char *fname = TOKU_TEST_FILENAME;
+static txn_gc_info non_mvcc_gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+static toku::comparator dummy_cmp;
+
+// generate size random bytes into dest
+static void
+rand_bytes(void *dest, int size)
+{
+ long *l;
+ for (CAST_FROM_VOIDP(l, dest); (unsigned int) size >= (sizeof *l); ++l, size -= (sizeof *l)) {
+ *l = random();
+ }
+ for (char *c = (char *) l; size > 0; ++c, --size) {
+ *c = random() & 0xff;
+ }
+}
+
+// generate size random bytes into dest, with a lot less entropy (every
+// group of 4 bytes is the same)
+static void
+rand_bytes_limited(void *dest, int size)
+{
+ long *l;
+ for (CAST_FROM_VOIDP(l, dest); (size_t) size >= (sizeof *l); ++l, size -= (sizeof *l)) {
+ char c = random() & 0xff;
+ for (char *p = (char *) l; (size_t) (p - (char *) l) < (sizeof *l); ++p) {
+ *p = c;
+ }
+ }
+ char c = random() & 0xff;
+ for (char *p = (char *) l; size > 0; ++p, --size) {
+ *p = c;
+ }
+}
+
+// generate a random message with xids and a key starting with pfx, insert
+// it in bnc, and save it in output params save and is_fresh_out
+static void
+insert_random_message(NONLEAF_CHILDINFO bnc, ft_msg **save, bool *is_fresh_out, XIDS xids, int pfx)
+{
+ int keylen = (random() % 128) + 16;
+ int vallen = (random() % 128) + 16;
+ void *key = toku_xmalloc(keylen + (sizeof pfx));
+ void *val = toku_xmalloc(vallen);
+ *(int *) key = pfx;
+ rand_bytes((char *) key + (sizeof pfx), keylen);
+ rand_bytes(val, vallen);
+ MSN msn = next_dummymsn();
+ bool is_fresh = (random() & 0x100) == 0;
+
+ DBT keydbt, valdbt;
+ toku_fill_dbt(&keydbt, key, keylen + (sizeof pfx));
+ toku_fill_dbt(&valdbt, val, vallen);
+ *save = new ft_msg(&keydbt, &valdbt, FT_INSERT, msn, xids);
+ *is_fresh_out = is_fresh;
+
+ toku_bnc_insert_msg(bnc, key, keylen + (sizeof pfx), val, vallen,
+ FT_INSERT, msn, xids, is_fresh,
+ dummy_cmp);
+}
+
+// generate a random message with xids and a key starting with pfx, insert
+// it into blb, and save it in output param save
+static void
+insert_random_message_to_bn(
+ FT_HANDLE t,
+ BASEMENTNODE blb,
+ void** keyp,
+ uint32_t* keylenp,
+ LEAFENTRY *save,
+ XIDS xids,
+ int pfx
+ )
+{
+ int keylen = (random() % 16) + 16;
+ int vallen = (random() % 128) + 16;
+ uint32_t *pfxp;
+ char key[(sizeof *pfxp) + keylen];
+ char val[vallen];
+ pfxp = (uint32_t *) &key[0];
+ *pfxp = pfx;
+ char *randkeyp = &key[sizeof *pfxp];
+ rand_bytes_limited(randkeyp, keylen);
+ rand_bytes(val, vallen);
+ MSN msn = next_dummymsn();
+
+ DBT keydbt_s, *keydbt, valdbt_s, *valdbt;
+ keydbt = &keydbt_s;
+ valdbt = &valdbt_s;
+ toku_fill_dbt(keydbt, key, (sizeof *pfxp) + keylen);
+ toku_fill_dbt(valdbt, val, vallen);
+ *keylenp = keydbt->size;
+ *keyp = toku_xmemdup(keydbt->data, keydbt->size);
+ ft_msg msg(keydbt, valdbt, FT_INSERT, msn, xids);
+ int64_t numbytes;
+ toku_le_apply_msg(
+ msg,
+ NULL,
+ NULL,
+ 0,
+ keydbt->size,
+ &non_mvcc_gc_info,
+ save,
+ &numbytes);
+ toku_ft_bn_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ blb,
+ msg,
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ if (msn.msn > blb->max_msn_applied.msn) {
+ blb->max_msn_applied = msn;
+ }
+}
+
+// generate a random message with xids and a key starting with pfx, insert
+// it into blb1 and also into blb2, and save it in output param save
+//
+// used for making two leaf nodes the same in order to compare the result
+// of 'maybe_apply' and a normal buffer flush
+static void
+insert_same_message_to_bns(
+ FT_HANDLE t,
+ BASEMENTNODE blb1,
+ BASEMENTNODE blb2,
+ void** keyp,
+ uint32_t* keylenp,
+ LEAFENTRY *save,
+ XIDS xids,
+ int pfx
+ )
+{
+ int keylen = (random() % 16) + 16;
+ int vallen = (random() % 128) + 16;
+ uint32_t *pfxp;
+ char key[(sizeof *pfxp) + keylen];
+ char val[vallen];
+ pfxp = (uint32_t *) &key[0];
+ *pfxp = pfx;
+ char *randkeyp = &key[sizeof *pfxp];
+ rand_bytes_limited(randkeyp, keylen);
+ rand_bytes(val, vallen);
+ MSN msn = next_dummymsn();
+
+ DBT keydbt_s, *keydbt, valdbt_s, *valdbt;
+ keydbt = &keydbt_s;
+ valdbt = &valdbt_s;
+ toku_fill_dbt(keydbt, key, (sizeof *pfxp) + keylen);
+ toku_fill_dbt(valdbt, val, vallen);
+ *keylenp = keydbt->size;
+ *keyp = toku_xmemdup(keydbt->data, keydbt->size);
+ ft_msg msg(keydbt, valdbt, FT_INSERT, msn, xids);
+ int64_t numbytes;
+ toku_le_apply_msg(
+ msg,
+ NULL,
+ NULL,
+ 0,
+ keydbt->size,
+ &non_mvcc_gc_info,
+ save,
+ &numbytes);
+ toku_ft_bn_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ blb1,
+ msg,
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ if (msn.msn > blb1->max_msn_applied.msn) {
+ blb1->max_msn_applied = msn;
+ }
+ toku_ft_bn_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ blb2,
+ msg,
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ if (msn.msn > blb2->max_msn_applied.msn) {
+ blb2->max_msn_applied = msn;
+ }
+}
+
+struct orthopush_flush_update_fun_extra {
+ DBT new_val;
+ int *num_applications;
+};
+
+static int
+orthopush_flush_update_fun(DB * UU(db), const DBT *UU(key), const DBT *UU(old_val), const DBT *extra,
+ void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra) {
+ struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(e, extra->data);
+ (*e->num_applications)++;
+ set_val(&e->new_val, set_extra);
+ return 0;
+}
+
+// generate a random update message with xids and a key starting with pfx,
+// insert it into blb, and save it in output param save, and update the
+// max msn so far in max_msn
+//
+// the update message will overwrite the value with something generated
+// here, and add one to the int pointed to by applied
+static void
+insert_random_update_message(NONLEAF_CHILDINFO bnc, ft_msg **save, bool is_fresh, XIDS xids, int pfx, int *applied, MSN *max_msn)
+{
+ int keylen = (random() % 16) + 16;
+ int vallen = (random() % 16) + 16;
+ void *key = toku_xmalloc(keylen + (sizeof pfx));
+ struct orthopush_flush_update_fun_extra *XMALLOC(update_extra);
+ *(int *) key = pfx;
+ rand_bytes_limited((char *) key + (sizeof pfx), keylen);
+ toku_fill_dbt(&update_extra->new_val, toku_xmalloc(vallen), vallen);
+ rand_bytes(update_extra->new_val.data, vallen);
+ update_extra->num_applications = applied;
+ MSN msn = next_dummymsn();
+
+ DBT keydbt, valdbt;
+ toku_fill_dbt(&keydbt, key, keylen + (sizeof pfx));
+ toku_fill_dbt(&valdbt, update_extra, sizeof *update_extra);
+ *save = new ft_msg(&keydbt, &valdbt, FT_UPDATE, msn, xids);
+
+ toku_bnc_insert_msg(bnc, key, keylen + (sizeof pfx),
+ update_extra, sizeof *update_extra,
+ FT_UPDATE, msn, xids, is_fresh,
+ dummy_cmp);
+ if (msn.msn > max_msn->msn) {
+ *max_msn = msn;
+ }
+}
+
+// flush from one internal node to another, where both only have one
+// buffer
+static void
+flush_to_internal(FT_HANDLE t) {
+ int r;
+
+ ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096
+ ft_msg **MALLOC_N(4096,child_messages);
+ bool *MALLOC_N(4096,parent_messages_is_fresh);
+ bool *MALLOC_N(4096,child_messages_is_fresh);
+ memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0]));
+ memset(child_messages_is_fresh, 0, 4096*(sizeof child_messages_is_fresh[0]));
+
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123, xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ NONLEAF_CHILDINFO child_bnc = toku_create_empty_nl();
+ int i;
+ for (i = 0; toku_bnc_memory_used(child_bnc) < 128*1024; ++i) {
+ insert_random_message(child_bnc, &child_messages[i], &child_messages_is_fresh[i], xids_123, 0);
+ }
+ int num_child_messages = i;
+
+ NONLEAF_CHILDINFO parent_bnc = toku_create_empty_nl();
+ for (i = 0; toku_bnc_memory_used(parent_bnc) < 128*1024; ++i) {
+ insert_random_message(parent_bnc, &parent_messages[i], &parent_messages_is_fresh[i], xids_234, 0);
+ }
+ int num_parent_messages = i;
+
+ FTNODE XMALLOC(child);
+ BLOCKNUM blocknum = { 42 };
+ toku_initialize_empty_ftnode(child, blocknum, 1, 1, FT_LAYOUT_VERSION, 0);
+ destroy_nonleaf_childinfo(BNC(child, 0));
+ set_BNC(child, 0, child_bnc);
+ BP_STATE(child, 0) = PT_AVAIL;
+
+ toku_bnc_flush_to_child(t->ft, parent_bnc, child, TXNID_NONE);
+
+ int parent_messages_present[num_parent_messages];
+ int child_messages_present[num_child_messages];
+ memset(parent_messages_present, 0, sizeof parent_messages_present);
+ memset(child_messages_present, 0, sizeof child_messages_present);
+
+ struct checkit_fn {
+ int num_parent_messages;
+ ft_msg **parent_messages;
+ int *parent_messages_present;
+ bool *parent_messages_is_fresh;
+ int num_child_messages;
+ ft_msg **child_messages;
+ int *child_messages_present;
+ bool *child_messages_is_fresh;
+ checkit_fn(int np, ft_msg **pm, int *npp, bool *pmf, int nc, ft_msg **cm, int *ncp, bool *cmf) :
+ num_parent_messages(np), parent_messages(pm), parent_messages_present(npp), parent_messages_is_fresh(pmf),
+ num_child_messages(nc), child_messages(cm), child_messages_present(ncp), child_messages_is_fresh(cmf) {
+ }
+ int operator()(const ft_msg &msg, bool is_fresh) {
+ DBT keydbt;
+ DBT valdbt;
+ toku_fill_dbt(&keydbt, msg.kdbt()->data, msg.kdbt()->size);
+ toku_fill_dbt(&valdbt, msg.vdbt()->data, msg.vdbt()->size);
+ int found = 0;
+ MSN msn = msg.msn();
+ enum ft_msg_type type = msg.type();
+ XIDS xids = msg.xids();
+ for (int k = 0; k < num_parent_messages; ++k) {
+ if (dummy_cmp(&keydbt, parent_messages[k]->kdbt()) == 0 &&
+ msn.msn == parent_messages[k]->msn().msn) {
+ assert(parent_messages_present[k] == 0);
+ assert(found == 0);
+ assert(dummy_cmp(&valdbt, parent_messages[k]->vdbt()) == 0);
+ assert(type == parent_messages[k]->type());
+ assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(parent_messages[k]->xids()));
+ assert(parent_messages_is_fresh[k] == is_fresh);
+ parent_messages_present[k]++;
+ found++;
+ }
+ }
+ for (int k = 0; k < num_child_messages; ++k) {
+ if (dummy_cmp(&keydbt, child_messages[k]->kdbt()) == 0 &&
+ msn.msn == child_messages[k]->msn().msn) {
+ assert(child_messages_present[k] == 0);
+ assert(found == 0);
+ assert(dummy_cmp(&valdbt, child_messages[k]->vdbt()) == 0);
+ assert(type == child_messages[k]->type());
+ assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(child_messages[k]->xids()));
+ assert(child_messages_is_fresh[k] == is_fresh);
+ child_messages_present[k]++;
+ found++;
+ }
+ }
+ assert(found == 1);
+ return 0;
+ }
+ } checkit(num_parent_messages, parent_messages, parent_messages_present, parent_messages_is_fresh,
+ num_child_messages, child_messages, child_messages_present, child_messages_is_fresh);
+ child_bnc->msg_buffer.iterate(checkit);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ assert(parent_messages_present[i] == 1);
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ assert(child_messages_present[i] == 1);
+ }
+
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ toku_free(parent_messages[i]->kdbt()->data);
+ toku_free(parent_messages[i]->vdbt()->data);
+ delete parent_messages[i];
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ toku_free(child_messages[i]->kdbt()->data);
+ toku_free(child_messages[i]->vdbt()->data);
+ delete child_messages[i];
+ }
+ destroy_nonleaf_childinfo(parent_bnc);
+ toku_ftnode_free(&child);
+ toku_free(parent_messages);
+ toku_free(child_messages);
+ toku_free(parent_messages_is_fresh);
+ toku_free(child_messages_is_fresh);
+}
+
+// flush from one internal node to another, where the child has 8 buffers
+static void
+flush_to_internal_multiple(FT_HANDLE t) {
+ int r;
+
+ ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096
+ ft_msg **MALLOC_N(4096,child_messages);
+ bool *MALLOC_N(4096,parent_messages_is_fresh);
+ bool *MALLOC_N(4096,child_messages_is_fresh);
+ memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0]));
+ memset(child_messages_is_fresh, 0, 4096*(sizeof child_messages_is_fresh[0]));
+
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123, xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ NONLEAF_CHILDINFO child_bncs[8];
+ ft_msg *childkeys[7];
+ int i;
+ for (i = 0; i < 8; ++i) {
+ child_bncs[i] = toku_create_empty_nl();
+ if (i < 7) {
+ childkeys[i] = NULL;
+ }
+ }
+ int total_size = 0;
+ for (i = 0; total_size < 128*1024; ++i) {
+ total_size -= toku_bnc_memory_used(child_bncs[i%8]);
+ insert_random_message(child_bncs[i%8], &child_messages[i], &child_messages_is_fresh[i], xids_123, i%8);
+ total_size += toku_bnc_memory_used(child_bncs[i%8]);
+ if (i % 8 < 7) {
+ if (childkeys[i%8] == NULL || dummy_cmp(child_messages[i]->kdbt(), childkeys[i%8]->kdbt()) > 0) {
+ childkeys[i%8] = child_messages[i];
+ }
+ }
+ }
+ int num_child_messages = i;
+
+ NONLEAF_CHILDINFO parent_bnc = toku_create_empty_nl();
+ for (i = 0; toku_bnc_memory_used(parent_bnc) < 128*1024; ++i) {
+ insert_random_message(parent_bnc, &parent_messages[i], &parent_messages_is_fresh[i], xids_234, 0);
+ }
+ int num_parent_messages = i;
+
+ FTNODE XMALLOC(child);
+ BLOCKNUM blocknum = { 42 };
+ toku_initialize_empty_ftnode(child, blocknum, 1, 8, FT_LAYOUT_VERSION, 0);
+ for (i = 0; i < 8; ++i) {
+ destroy_nonleaf_childinfo(BNC(child, i));
+ set_BNC(child, i, child_bncs[i]);
+ BP_STATE(child, i) = PT_AVAIL;
+ if (i < 7) {
+ child->pivotkeys.insert_at(childkeys[i]->kdbt(), i);
+ }
+ }
+
+ toku_bnc_flush_to_child(t->ft, parent_bnc, child, TXNID_NONE);
+
+ int total_messages = 0;
+ for (i = 0; i < 8; ++i) {
+ total_messages += toku_bnc_n_entries(BNC(child, i));
+ }
+ assert(total_messages == num_parent_messages + num_child_messages);
+ int parent_messages_present[num_parent_messages];
+ int child_messages_present[num_child_messages];
+ memset(parent_messages_present, 0, sizeof parent_messages_present);
+ memset(child_messages_present, 0, sizeof child_messages_present);
+
+ for (int j = 0; j < 8; ++j) {
+ struct checkit_fn {
+ int num_parent_messages;
+ ft_msg **parent_messages;
+ int *parent_messages_present;
+ bool *parent_messages_is_fresh;
+ int num_child_messages;
+ ft_msg **child_messages;
+ int *child_messages_present;
+ bool *child_messages_is_fresh;
+ checkit_fn(int np, ft_msg **pm, int *npp, bool *pmf, int nc, ft_msg **cm, int *ncp, bool *cmf) :
+ num_parent_messages(np), parent_messages(pm), parent_messages_present(npp), parent_messages_is_fresh(pmf),
+ num_child_messages(nc), child_messages(cm), child_messages_present(ncp), child_messages_is_fresh(cmf) {
+ }
+ int operator()(const ft_msg &msg, bool is_fresh) {
+ DBT keydbt;
+ DBT valdbt;
+ toku_fill_dbt(&keydbt, msg.kdbt()->data, msg.kdbt()->size);
+ toku_fill_dbt(&valdbt, msg.vdbt()->data, msg.vdbt()->size);
+ int found = 0;
+ MSN msn = msg.msn();
+ enum ft_msg_type type = msg.type();
+ XIDS xids = msg.xids();
+ for (int _i = 0; _i < num_parent_messages; ++_i) {
+ if (dummy_cmp(&keydbt, parent_messages[_i]->kdbt()) == 0 &&
+ msn.msn == parent_messages[_i]->msn().msn) {
+ assert(parent_messages_present[_i] == 0);
+ assert(found == 0);
+ assert(dummy_cmp(&valdbt, parent_messages[_i]->vdbt()) == 0);
+ assert(type == parent_messages[_i]->type());
+ assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(parent_messages[_i]->xids()));
+ assert(parent_messages_is_fresh[_i] == is_fresh);
+ parent_messages_present[_i]++;
+ found++;
+ }
+ }
+ for (int _i = 0; _i < num_child_messages; ++_i) {
+ if (dummy_cmp(&keydbt, child_messages[_i]->kdbt()) == 0 &&
+ msn.msn == child_messages[_i]->msn().msn) {
+ assert(child_messages_present[_i] == 0);
+ assert(found == 0);
+ assert(dummy_cmp(&valdbt, child_messages[_i]->vdbt()) == 0);
+ assert(type == child_messages[_i]->type());
+ assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(child_messages[_i]->xids()));
+ assert(child_messages_is_fresh[_i] == is_fresh);
+ child_messages_present[_i]++;
+ found++;
+ }
+ }
+ assert(found == 1);
+ return 0;
+ }
+ } checkit(num_parent_messages, parent_messages, parent_messages_present, parent_messages_is_fresh,
+ num_child_messages, child_messages, child_messages_present, child_messages_is_fresh);
+ child_bncs[j]->msg_buffer.iterate(checkit);
+ }
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ assert(parent_messages_present[i] == 1);
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ assert(child_messages_present[i] == 1);
+ }
+
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ toku_free(parent_messages[i]->kdbt()->data);
+ toku_free(parent_messages[i]->vdbt()->data);
+ delete parent_messages[i];
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ toku_free(child_messages[i]->kdbt()->data);
+ toku_free(child_messages[i]->vdbt()->data);
+ delete child_messages[i];
+ }
+ destroy_nonleaf_childinfo(parent_bnc);
+ toku_ftnode_free(&child);
+ toku_free(parent_messages);
+ toku_free(child_messages);
+ toku_free(parent_messages_is_fresh);
+ toku_free(child_messages_is_fresh);
+}
+
+// flush from one internal node to a leaf node, which has 8 basement
+// nodes
+//
+// if make_leaf_up_to_date is true, then apply the messages that are stale
+// in the parent to the leaf before doing the flush, otherwise assume the
+// leaf was just read off disk
+//
+// if use_flush is true, use a buffer flush, otherwise, use maybe_apply
+static void
+flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
+ int r;
+
+ ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096
+ LEAFENTRY* child_messages = NULL;
+ XMALLOC_N(4096,child_messages);
+ void** key_pointers = NULL;
+ XMALLOC_N(4096, key_pointers);
+ uint32_t* keylens = NULL;
+ XMALLOC_N(4096, keylens);
+ bool *MALLOC_N(4096,parent_messages_is_fresh);
+ memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0]));
+ int *MALLOC_N(4096,parent_messages_applied);
+ memset(parent_messages_applied, 0, 4096*(sizeof parent_messages_applied[0]));
+
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123, xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ BASEMENTNODE child_blbs[8];
+ DBT childkeys[7];
+ int i;
+ for (i = 0; i < 8; ++i) {
+ child_blbs[i] = toku_create_empty_bn();
+ if (i < 7) {
+ toku_init_dbt(&childkeys[i]);
+ }
+ }
+
+ FTNODE child = NULL;
+ XMALLOC(child);
+ BLOCKNUM blocknum = { 42 };
+ toku_initialize_empty_ftnode(child, blocknum, 0, 8, FT_LAYOUT_VERSION, 0);
+ for (i = 0; i < 8; ++i) {
+ destroy_basement_node(BLB(child, i));
+ set_BLB(child, i, child_blbs[i]);
+ BP_STATE(child, i) = PT_AVAIL;
+ }
+
+ int total_size = 0;
+ for (i = 0; total_size < 128*1024; ++i) {
+ total_size -= child_blbs[i%8]->data_buffer.get_memory_size();
+ insert_random_message_to_bn(t, child_blbs[i%8], &key_pointers[i], &keylens[i], &child_messages[i], xids_123, i%8);
+ total_size += child_blbs[i%8]->data_buffer.get_memory_size();
+ if (i % 8 < 7) {
+ DBT keydbt;
+ if (childkeys[i%8].size == 0 || dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) > 0) {
+ toku_fill_dbt(&childkeys[i%8], key_pointers[i], keylens[i]);
+ }
+ }
+ }
+ int num_child_messages = i;
+
+ for (i = 0; i < num_child_messages; ++i) {
+ DBT keydbt;
+ if (i % 8 < 7) {
+ assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) <= 0);
+ }
+ }
+
+ {
+ int num_stale = random() % 2000;
+ memset(&parent_messages_is_fresh[num_stale], true, (4096 - num_stale) * (sizeof parent_messages_is_fresh[0]));
+ }
+ NONLEAF_CHILDINFO parent_bnc = toku_create_empty_nl();
+ MSN max_parent_msn = MIN_MSN;
+ for (i = 0; toku_bnc_memory_used(parent_bnc) < 128*1024; ++i) {
+ insert_random_update_message(parent_bnc, &parent_messages[i], parent_messages_is_fresh[i], xids_234, i%8, &parent_messages_applied[i], &max_parent_msn);
+ }
+ int num_parent_messages = i;
+
+ for (i = 0; i < 7; ++i) {
+ child->pivotkeys.insert_at(&childkeys[i], i);
+ }
+
+ if (make_leaf_up_to_date) {
+ for (i = 0; i < num_parent_messages; ++i) {
+ if (!parent_messages_is_fresh[i]) {
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ }
+ }
+ for (i = 0; i < 8; ++i) {
+ BLB(child, i)->stale_ancestor_messages_applied = true;
+ }
+ } else {
+ for (i = 0; i < 8; ++i) {
+ BLB(child, i)->stale_ancestor_messages_applied = false;
+ }
+ }
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ if (make_leaf_up_to_date && !parent_messages_is_fresh[i]) {
+ assert(parent_messages_applied[i] == 1);
+ } else {
+ assert(parent_messages_applied[i] == 0);
+ }
+ }
+
+ if (use_flush) {
+ toku_bnc_flush_to_child(t->ft, parent_bnc, child, TXNID_NONE);
+ destroy_nonleaf_childinfo(parent_bnc);
+ } else {
+ FTNODE XMALLOC(parentnode);
+ BLOCKNUM parentblocknum = { 17 };
+ toku_initialize_empty_ftnode(parentnode, parentblocknum, 1, 1, FT_LAYOUT_VERSION, 0);
+ destroy_nonleaf_childinfo(BNC(parentnode, 0));
+ set_BNC(parentnode, 0, parent_bnc);
+ BP_STATE(parentnode, 0) = PT_AVAIL;
+ parentnode->max_msn_applied_to_node_on_disk = max_parent_msn;
+ struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL };
+ bool msgs_applied;
+ toku_apply_ancestors_messages_to_node(t, child, &ancestors, pivot_bounds::infinite_bounds(), &msgs_applied, -1);
+
+ struct checkit_fn {
+ int operator()(const ft_msg &UU(msg), bool is_fresh) {
+ assert(!is_fresh);
+ return 0;
+ }
+ } checkit;
+ parent_bnc->msg_buffer.iterate(checkit);
+ invariant(parent_bnc->fresh_message_tree.size() + parent_bnc->stale_message_tree.size()
+ == (uint32_t) num_parent_messages);
+
+ toku_ftnode_free(&parentnode);
+ }
+
+ int total_messages = 0;
+ for (i = 0; i < 8; ++i) {
+ total_messages += BLB_DATA(child, i)->num_klpairs();
+ }
+ assert(total_messages <= num_parent_messages + num_child_messages);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ assert(parent_messages_applied[i] == 1);
+ }
+
+ int parent_messages_present[num_parent_messages];
+ int child_messages_present[num_child_messages];
+ memset(parent_messages_present, 0, sizeof parent_messages_present);
+ memset(child_messages_present, 0, sizeof child_messages_present);
+ for (int j = 0; j < 8; ++j) {
+ uint32_t len = BLB_DATA(child, j)->num_klpairs();
+ for (uint32_t idx = 0; idx < len; ++idx) {
+ LEAFENTRY le;
+ DBT keydbt, valdbt;
+ {
+ uint32_t keylen, vallen;
+ void *keyp = NULL;
+ void *valp = NULL;
+ r = BLB_DATA(child, j)->fetch_klpair(idx, &le, &keylen, &keyp);
+ assert_zero(r);
+ valp = le_latest_val_and_len(le, &vallen);
+ toku_fill_dbt(&keydbt, keyp, keylen);
+ toku_fill_dbt(&valdbt, valp, vallen);
+ }
+ int found = 0;
+ for (i = num_parent_messages - 1; i >= 0; --i) {
+ if (dummy_cmp(&keydbt, parent_messages[i]->kdbt()) == 0) {
+ if (found == 0) {
+ struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(e, parent_messages[i]->vdbt()->data);
+ assert(dummy_cmp(&valdbt, &e->new_val) == 0);
+ found++;
+ }
+ assert(parent_messages_present[i] == 0);
+ parent_messages_present[i]++;
+ }
+ }
+ for (i = j + (~7 & (num_child_messages - 1)); i >= 0; i -= 8) {
+ if (i >= num_child_messages) { continue; }
+ DBT childkeydbt, childvaldbt;
+ {
+ uint32_t vallen;
+ void *valp = le_latest_val_and_len(child_messages[i], &vallen);
+ toku_fill_dbt(&childkeydbt, key_pointers[i], keylens[i]);
+ toku_fill_dbt(&childvaldbt, valp, vallen);
+ }
+ if (dummy_cmp(&keydbt, &childkeydbt) == 0) {
+ if (found == 0) {
+ assert(dummy_cmp(&valdbt, &childvaldbt) == 0);
+ found++;
+ }
+ assert(child_messages_present[i] == 0);
+ child_messages_present[i]++;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ assert(parent_messages_present[i] == 1);
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ assert(child_messages_present[i] == 1);
+ }
+
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ toku_free(parent_messages[i]->kdbt()->data);
+ struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->vdbt()->data);
+ toku_free(extra->new_val.data);
+ toku_free(parent_messages[i]->vdbt()->data);
+ delete parent_messages[i];
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ toku_free(child_messages[i]);
+ toku_free(key_pointers[i]);
+ }
+ toku_ftnode_free(&child);
+ toku_free(parent_messages);
+ toku_free(key_pointers);
+ toku_free(keylens);
+ toku_free(child_messages);
+ toku_free(parent_messages_is_fresh);
+ toku_free(parent_messages_applied);
+}
+
+// flush from one internal node to a leaf node, which has 8 basement
+// nodes, but only using maybe_apply, and with actual pivot bounds
+//
+// if make_leaf_up_to_date is true, then apply the messages that are stale
+// in the parent to the leaf before doing the flush, otherwise assume the
+// leaf was just read off disk
+static void
+flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
+ int r;
+
+ ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4k
+ LEAFENTRY* child_messages = NULL;
+ XMALLOC_N(4096,child_messages);
+ void** key_pointers = NULL;
+ XMALLOC_N(4096, key_pointers);
+ uint32_t* keylens = NULL;
+ XMALLOC_N(4096, keylens);
+ bool *MALLOC_N(4096,parent_messages_is_fresh);
+ memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0]));
+ int *MALLOC_N(4096,parent_messages_applied);
+ memset(parent_messages_applied, 0, 4096*(sizeof parent_messages_applied[0]));
+
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123, xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ BASEMENTNODE child_blbs[8];
+ DBT childkeys[8];
+ int i;
+ for (i = 0; i < 8; ++i) {
+ child_blbs[i] = toku_create_empty_bn();
+ toku_init_dbt(&childkeys[i]);
+ }
+
+ FTNODE XMALLOC(child);
+ BLOCKNUM blocknum = { 42 };
+ toku_initialize_empty_ftnode(child, blocknum, 0, 8, FT_LAYOUT_VERSION, 0);
+ for (i = 0; i < 8; ++i) {
+ destroy_basement_node(BLB(child, i));
+ set_BLB(child, i, child_blbs[i]);
+ BP_STATE(child, i) = PT_AVAIL;
+ }
+
+ int total_size = 0;
+ for (i = 0; total_size < 128*1024; ++i) {
+ total_size -= child_blbs[i%8]->data_buffer.get_memory_size();
+ insert_random_message_to_bn(t, child_blbs[i%8], &key_pointers[i], &keylens[i], &child_messages[i], xids_123, i%8);
+ total_size += child_blbs[i%8]->data_buffer.get_memory_size();
+ DBT keydbt;
+ if (childkeys[i%8].size == 0 || dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) > 0) {
+ toku_fill_dbt(&childkeys[i%8], key_pointers[i], keylens[i]);
+ }
+ }
+ int num_child_messages = i;
+
+ for (i = 0; i < num_child_messages; ++i) {
+ DBT keydbt;
+ assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) <= 0);
+ }
+
+ {
+ int num_stale = random() % 2000;
+ memset(&parent_messages_is_fresh[num_stale], true, (4096 - num_stale) * (sizeof parent_messages_is_fresh[0]));
+ }
+ NONLEAF_CHILDINFO parent_bnc = toku_create_empty_nl();
+ MSN max_parent_msn = MIN_MSN;
+ for (i = 0; toku_bnc_memory_used(parent_bnc) < 128*1024; ++i) {
+ insert_random_update_message(parent_bnc, &parent_messages[i], parent_messages_is_fresh[i], xids_234, i%8, &parent_messages_applied[i], &max_parent_msn);
+ }
+ int num_parent_messages = i;
+
+ for (i = 0; i < 7; ++i) {
+ child->pivotkeys.insert_at(&childkeys[i], i);
+ }
+
+ if (make_leaf_up_to_date) {
+ for (i = 0; i < num_parent_messages; ++i) {
+ if (dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0 &&
+ !parent_messages_is_fresh[i]) {
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ }
+ }
+ for (i = 0; i < 8; ++i) {
+ BLB(child, i)->stale_ancestor_messages_applied = true;
+ }
+ } else {
+ for (i = 0; i < 8; ++i) {
+ BLB(child, i)->stale_ancestor_messages_applied = false;
+ }
+ }
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ if (make_leaf_up_to_date &&
+ dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0 &&
+ !parent_messages_is_fresh[i]) {
+ assert(parent_messages_applied[i] == 1);
+ } else {
+ assert(parent_messages_applied[i] == 0);
+ }
+ }
+
+ FTNODE XMALLOC(parentnode);
+ BLOCKNUM parentblocknum = { 17 };
+ toku_initialize_empty_ftnode(parentnode, parentblocknum, 1, 1, FT_LAYOUT_VERSION, 0);
+ destroy_nonleaf_childinfo(BNC(parentnode, 0));
+ set_BNC(parentnode, 0, parent_bnc);
+ BP_STATE(parentnode, 0) = PT_AVAIL;
+ parentnode->max_msn_applied_to_node_on_disk = max_parent_msn;
+ struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL };
+ DBT lbe, ubi;
+ toku_init_dbt(&lbe);
+ toku_clone_dbt(&ubi, childkeys[7]);
+ const pivot_bounds bounds(lbe, ubi);
+ bool msgs_applied;
+ toku_apply_ancestors_messages_to_node(t, child, &ancestors, bounds, &msgs_applied, -1);
+
+ struct checkit_fn {
+ DBT *childkeys;
+ int num_parent_messages;
+ ft_msg **parent_messages;
+ bool *parent_messages_is_fresh;
+ checkit_fn(DBT *ck, int np, ft_msg **pm, bool *pmf) :
+ childkeys(ck), num_parent_messages(np), parent_messages(pm), parent_messages_is_fresh(pmf) {
+ }
+ int operator()(const ft_msg &msg, bool is_fresh) {
+ DBT keydbt;
+ toku_fill_dbt(&keydbt, msg.kdbt()->data, msg.kdbt()->size);
+ MSN msn = msg.msn();
+ if (dummy_cmp(&keydbt, &childkeys[7]) > 0) {
+ for (int _i = 0; _i < num_parent_messages; ++_i) {
+ if (dummy_cmp(&keydbt, parent_messages[_i]->kdbt()) == 0 &&
+ msn.msn == parent_messages[_i]->msn().msn) {
+ assert(is_fresh == parent_messages_is_fresh[_i]);
+ break;
+ }
+ }
+ } else {
+ assert(!is_fresh);
+ }
+ return 0;
+ }
+ } checkit(childkeys, num_parent_messages, parent_messages, parent_messages_is_fresh);
+ parent_bnc->msg_buffer.iterate(checkit);
+
+ toku_ftnode_free(&parentnode);
+
+ int total_messages = 0;
+ for (i = 0; i < 8; ++i) {
+ total_messages += BLB_DATA(child, i)->num_klpairs();
+ }
+ assert(total_messages <= num_parent_messages + num_child_messages);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ if (dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0) {
+ assert(parent_messages_applied[i] == 1);
+ } else {
+ assert(parent_messages_applied[i] == 0);
+ }
+ }
+
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ toku_free(parent_messages[i]->kdbt()->data);
+ struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->vdbt()->data);
+ toku_free(extra->new_val.data);
+ toku_free(parent_messages[i]->vdbt()->data);
+ delete parent_messages[i];
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ toku_free(child_messages[i]);
+ toku_free(key_pointers[i]);
+ }
+ toku_free(ubi.data);
+ toku_ftnode_free(&child);
+ toku_free(parent_messages);
+ toku_free(key_pointers);
+ toku_free(keylens);
+ toku_free(child_messages);
+ toku_free(parent_messages_is_fresh);
+ toku_free(parent_messages_applied);
+}
+
+// create identical leaf nodes and then buffer flush to one and
+// maybe_apply to the other, and compare the results, they should be the
+// same.
+//
+// if make_leaf_up_to_date is true, then apply the messages that are stale
+// in the parent to the leaf before doing the flush, otherwise assume the
+// leaf was just read off disk
+static void
+compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
+ int r;
+
+ ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4k
+ LEAFENTRY* child_messages = NULL;
+ XMALLOC_N(4096,child_messages);
+ void** key_pointers = NULL;
+ XMALLOC_N(4096, key_pointers);
+ uint32_t* keylens = NULL;
+ XMALLOC_N(4096, keylens);
+ bool *MALLOC_N(4096,parent_messages_is_fresh);
+ memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0]));
+ int *MALLOC_N(4096,parent_messages_applied);
+ memset(parent_messages_applied, 0, 4096*(sizeof parent_messages_applied[0]));
+
+ XIDS xids_0 = toku_xids_get_root_xids();
+ XIDS xids_123, xids_234;
+ r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123);
+ CKERR(r);
+ r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234);
+ CKERR(r);
+
+ BASEMENTNODE child1_blbs[8], child2_blbs[8];
+ DBT child1keys[7], child2keys[7];
+ int i;
+ for (i = 0; i < 8; ++i) {
+ child1_blbs[i] = toku_create_empty_bn();
+ child2_blbs[i] = toku_create_empty_bn();
+ if (i < 7) {
+ toku_init_dbt(&child1keys[i]);
+ toku_init_dbt(&child2keys[i]);
+ }
+ }
+
+ FTNODE XMALLOC(child1), XMALLOC(child2);
+ BLOCKNUM blocknum = { 42 };
+ toku_initialize_empty_ftnode(child1, blocknum, 0, 8, FT_LAYOUT_VERSION, 0);
+ toku_initialize_empty_ftnode(child2, blocknum, 0, 8, FT_LAYOUT_VERSION, 0);
+ for (i = 0; i < 8; ++i) {
+ destroy_basement_node(BLB(child1, i));
+ set_BLB(child1, i, child1_blbs[i]);
+ BP_STATE(child1, i) = PT_AVAIL;
+ destroy_basement_node(BLB(child2, i));
+ set_BLB(child2, i, child2_blbs[i]);
+ BP_STATE(child2, i) = PT_AVAIL;
+ }
+
+ int total_size = 0;
+ for (i = 0; total_size < 128*1024; ++i) {
+ total_size -= child1_blbs[i%8]->data_buffer.get_memory_size();
+ insert_same_message_to_bns(t, child1_blbs[i%8], child2_blbs[i%8], &key_pointers[i], &keylens[i], &child_messages[i], xids_123, i%8);
+ total_size += child1_blbs[i%8]->data_buffer.get_memory_size();
+ if (i % 8 < 7) {
+ DBT keydbt;
+ if (child1keys[i%8].size == 0 || dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child1keys[i%8]) > 0) {
+ toku_fill_dbt(&child1keys[i%8], key_pointers[i], keylens[i]);
+ toku_fill_dbt(&child2keys[i%8], key_pointers[i], keylens[i]);
+ }
+ }
+ }
+ int num_child_messages = i;
+
+ for (i = 0; i < num_child_messages; ++i) {
+ DBT keydbt;
+ if (i % 8 < 7) {
+ assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child1keys[i%8]) <= 0);
+ assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child2keys[i%8]) <= 0);
+ }
+ }
+
+ {
+ int num_stale = random() % 2000;
+ memset(&parent_messages_is_fresh[num_stale], true, (4096 - num_stale) * (sizeof parent_messages_is_fresh[0]));
+ }
+ NONLEAF_CHILDINFO parent_bnc = toku_create_empty_nl();
+ MSN max_parent_msn = MIN_MSN;
+ for (i = 0; toku_bnc_memory_used(parent_bnc) < 128*1024; ++i) {
+ insert_random_update_message(parent_bnc, &parent_messages[i], parent_messages_is_fresh[i], xids_234, i%8, &parent_messages_applied[i], &max_parent_msn);
+ }
+ int num_parent_messages = i;
+
+ for (i = 0; i < 7; ++i) {
+ child1->pivotkeys.insert_at(&child1keys[i], i);
+ child2->pivotkeys.insert_at(&child2keys[i], i);
+ }
+
+ if (make_leaf_up_to_date) {
+ for (i = 0; i < num_parent_messages; ++i) {
+ if (!parent_messages_is_fresh[i]) {
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child1,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ toku_ft_leaf_apply_msg(
+ t->ft->cmp,
+ t->ft->update_fun,
+ child2,
+ -1,
+ *parent_messages[i],
+ &non_mvcc_gc_info,
+ NULL,
+ NULL,
+ NULL);
+ }
+ }
+ for (i = 0; i < 8; ++i) {
+ BLB(child1, i)->stale_ancestor_messages_applied = true;
+ BLB(child2, i)->stale_ancestor_messages_applied = true;
+ }
+ } else {
+ for (i = 0; i < 8; ++i) {
+ BLB(child1, i)->stale_ancestor_messages_applied = false;
+ BLB(child2, i)->stale_ancestor_messages_applied = false;
+ }
+ }
+
+ toku_bnc_flush_to_child(t->ft, parent_bnc, child1, TXNID_NONE);
+
+ FTNODE XMALLOC(parentnode);
+ BLOCKNUM parentblocknum = { 17 };
+ toku_initialize_empty_ftnode(parentnode, parentblocknum, 1, 1, FT_LAYOUT_VERSION, 0);
+ destroy_nonleaf_childinfo(BNC(parentnode, 0));
+ set_BNC(parentnode, 0, parent_bnc);
+ BP_STATE(parentnode, 0) = PT_AVAIL;
+ parentnode->max_msn_applied_to_node_on_disk = max_parent_msn;
+ struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL };
+ bool msgs_applied;
+ toku_apply_ancestors_messages_to_node(t, child2, &ancestors, pivot_bounds::infinite_bounds(), &msgs_applied, -1);
+
+ struct checkit_fn {
+ int operator()(const ft_msg &UU(msg), bool is_fresh) {
+ assert(!is_fresh);
+ return 0;
+ }
+ } checkit;
+ parent_bnc->msg_buffer.iterate(checkit);
+ invariant(parent_bnc->fresh_message_tree.size() + parent_bnc->stale_message_tree.size()
+ == (uint32_t) num_parent_messages);
+
+ toku_ftnode_free(&parentnode);
+
+ for (int j = 0; j < 8; ++j) {
+ bn_data* first = BLB_DATA(child1, j);
+ bn_data* second = BLB_DATA(child2, j);
+ uint32_t len = first->num_klpairs();
+ assert(len == second->num_klpairs());
+ for (uint32_t idx = 0; idx < len; ++idx) {
+ LEAFENTRY le1, le2;
+ DBT key1dbt, val1dbt, key2dbt, val2dbt;
+ {
+ uint32_t keylen, vallen;
+ void *keyp = NULL;
+ r = first->fetch_klpair(idx, &le1, &keylen, &keyp);
+ assert_zero(r);
+ void *valp = le_latest_val_and_len(le1, &vallen);
+ toku_fill_dbt(&key1dbt, keyp, keylen);
+ toku_fill_dbt(&val1dbt, valp, vallen);
+ }
+ {
+ uint32_t keylen, vallen;
+ void *keyp = NULL;
+ r = second->fetch_klpair(idx, &le2, &keylen, &keyp);
+ assert_zero(r);
+ void *valp = le_latest_val_and_len(le2, &vallen);
+ toku_fill_dbt(&key2dbt, keyp, keylen);
+ toku_fill_dbt(&val2dbt, valp, vallen);
+ }
+ assert(dummy_cmp(&key1dbt, &key2dbt) == 0);
+ assert(dummy_cmp(&val1dbt, &val2dbt) == 0);
+ }
+ }
+
+ toku_xids_destroy(&xids_0);
+ toku_xids_destroy(&xids_123);
+ toku_xids_destroy(&xids_234);
+
+ for (i = 0; i < num_parent_messages; ++i) {
+ toku_free(parent_messages[i]->kdbt()->data);
+ struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->vdbt()->data);
+ toku_free(extra->new_val.data);
+ toku_free(parent_messages[i]->vdbt()->data);
+ delete parent_messages[i];
+ }
+ for (i = 0; i < num_child_messages; ++i) {
+ toku_free(key_pointers[i]);
+ toku_free(child_messages[i]);
+ }
+ toku_ftnode_free(&child1);
+ toku_ftnode_free(&child2);
+ toku_free(parent_messages);
+ toku_free(key_pointers);
+ toku_free(keylens);
+ toku_free(child_messages);
+ toku_free(parent_messages_is_fresh);
+ toku_free(parent_messages_applied);
+}
+
+static void
+parse_args(int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose=1;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+static int cmp_fn(DB *db __attribute__((unused)),
+ const DBT *a, const DBT *b) {
+ int c;
+ if (a->size > b->size) {
+ c = memcmp(a->data, b->data, b->size);
+ } else if (a->size < b->size) {
+ c = memcmp(a->data, b->data, a->size);
+ } else {
+ return memcmp(a->data, b->data, a->size);
+ }
+ if (c == 0) {
+ c = a->size - b->size;
+ }
+ return c;
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ parse_args(argc, argv);
+
+ dummy_cmp.create(cmp_fn, nullptr);
+
+ initialize_dummymsn();
+ int r;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ FT_HANDLE t;
+ r = toku_open_ft_handle(fname, 1, &t, 128*1024, 4096, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ toku_ft_set_update(t, orthopush_flush_update_fun);
+ // HACK
+ t->ft->update_fun = orthopush_flush_update_fun;
+
+ for (int i = 0; i < 10; ++i) {
+ flush_to_internal(t);
+ }
+ for (int i = 0; i < 10; ++i) {
+ flush_to_internal_multiple(t);
+ }
+ for (int i = 0; i < 3; ++i) {
+ flush_to_leaf(t, false, false);
+ flush_to_leaf(t, false, true);
+ flush_to_leaf(t, true, false);
+ flush_to_leaf(t, true, true);
+ }
+ for (int i = 0; i < 10; ++i) {
+ flush_to_leaf_with_keyrange(t, false);
+ flush_to_leaf_with_keyrange(t, true);
+ compare_apply_and_flush(t, false);
+ compare_apply_and_flush(t, true);
+ }
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ dummy_cmp.destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/pqueue-test.cc b/storage/tokudb/PerconaFT/ft/tests/pqueue-test.cc
new file mode 100644
index 00000000..aeb5a897
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/pqueue-test.cc
@@ -0,0 +1,263 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "loader/loader-internal.h"
+#include "loader/pqueue.h"
+
+int found_dup = -1;
+
+// simple compare func
+static int test_compare(DB * UU(db), const DBT *dbta, const DBT *dbtb)
+{
+ int a = *((int*)dbta->data);
+ int b = *((int*)dbtb->data);
+ if ( a<b ) return -1;
+ if ( a>b ) return 1;
+ return 0;
+}
+
+static inline DBT *dbt_init(DBT *dbt, void *data, uint32_t size) {
+ memset(dbt, 0, sizeof *dbt);
+ dbt->data = data;
+ dbt->size = size;
+ return dbt;
+}
+
+static void err_cb(DB *db, int which_db, int err, DBT *key, DBT *val, void *extra) {
+ (void) db; (void) which_db; (void) err; (void) extra;
+ (void) val;
+ found_dup = *(int *)key->data;
+ if (verbose) printf("err_cb : key <%d> val <%d>\n", *(int *)key->data, *(int *)val->data);
+}
+
+static int run_test(void)
+{
+ const int n_sources=10;
+ pqueue_t *pq;
+ pqueue_node_t *pq_nodes = (pqueue_node_t *) toku_malloc( n_sources * sizeof(pqueue_node_t));
+ pqueue_node_t *node;
+ DB *dest_db = NULL;
+ ft_compare_func compare = test_compare;
+ int r;
+ struct error_callback_s error_callback;
+ ft_loader_init_error_callback(&error_callback);
+ ft_loader_set_error_function(&error_callback, err_cb, NULL);
+
+ r = pqueue_init(&pq, n_sources, 0, dest_db, compare, &error_callback);
+ if (r) return r;
+
+ DBT keys[n_sources];
+ DBT vals[n_sources];
+ DBT zero;
+ toku_init_dbt_flags(&zero, DB_DBT_REALLOC);
+ int key_data[10] = {0, 4, 8, 9, 5, 1, 2, 6, 7, 3};
+
+ for (int i=0;i<n_sources; i++) {
+ if (verbose) printf("%d ", key_data[i]);
+ keys[i] = zero;
+ vals[i] = zero;
+ dbt_init(&keys[i], &key_data[i], sizeof(int));
+ }
+ if (verbose) printf("\n");
+
+ // test 1 : fill it up, then empty it out
+ for (int i=0; i<n_sources; i++) {
+ pq_nodes[i].key = &keys[i];
+ pq_nodes[i].val = &vals[i];
+ pq_nodes[i].i = i;
+ pqueue_insert(pq, &pq_nodes[i]);
+ }
+
+ for (int i=0; i<n_sources; i++) {
+ r = pqueue_pop(pq, &node); assert(r==0);
+ if (verbose) printf("%d : %d\n", i, *(int*)(node->key->data));
+ if ( *(int*)(node->key->data) != i ) {
+ if (verbose)
+ printf("FAIL\n");
+ return -1;
+ }
+ }
+ pqueue_free(pq);
+ if (verbose) printf("test1 : PASS\n");
+
+ // test 2 : fill it, then empty and reload, then empty
+ {
+ r = pqueue_init(&pq, n_sources, 0, dest_db, compare, &error_callback);
+ if (r) return r;
+ }
+
+ DBT more_keys[20];
+ DBT more_vals[20];
+ int more_key_data[20] = {0, 4, 8, 9, 5, 1, 2, 6, 7, 3, 10, 11, 14, 13, 12, 17, 19, 15, 18, 16};
+ for (int i=0; i<20; i++) {
+ more_keys[i] = zero;
+ more_vals[i] = zero;
+ dbt_init(&more_keys[i], &more_key_data[i], sizeof(int));
+ }
+
+ for (int i=0; i<10; i++) {
+ pq_nodes[i].key = &more_keys[i];
+ pq_nodes[i].val = &more_vals[i];
+ pq_nodes[i].i = i;
+ pqueue_insert(pq, &pq_nodes[i]);
+ }
+
+ for (int i=0; i<5; i++) {
+ r = pqueue_pop(pq, &node); assert(r==0);
+ if ( *(int *)node->key->data != i ) { printf("FAIL\n"); return -1; }
+ if (verbose) printf("%d : %d\n", i, *(int*)node->key->data);
+ }
+
+ int n;
+ for (int i=5; i<15; i++) {
+ r = pqueue_pop(pq, &node); assert(r==0);
+ if ( *(int *)node->key->data != i ) { printf("FAIL\n"); return -1; }
+ if (verbose) printf("%d : %d\n", i, *(int*)node->key->data);
+ n = node->i;
+ pq_nodes[n].key = &more_keys[i+5];
+ pq_nodes[n].val = &more_vals[i+5];
+ pqueue_insert(pq, &pq_nodes[n]);
+ }
+
+ for (int i=15; i<20; i++) {
+ r = pqueue_pop(pq, &node); assert(r==0);
+ if ( *(int*)node->key->data != i ) { printf("FAIL\n"); return -1; }
+ if (verbose) printf("%d : %d\n", i, *(int*)node->key->data);
+ }
+ if (verbose) printf("test2 : PASS\n");
+ pqueue_free(pq);
+
+ // test 3 : put in a dup
+ {
+ r = pqueue_init(&pq, 10, 0, dest_db, compare, &error_callback);
+ if (r) return r;
+ }
+
+ DBT keys3[10];
+ DBT vals3[10];
+ int key_data3[10] = {0, 1, 2, 3, 4, 5, 6, 6, 8, 9}; // dup is 6
+ int val_data3[10];
+
+ for (int i=0; i<10; i++) {
+ keys3[i] = zero;
+ vals3[i] = zero;
+ val_data3[i] = i;
+ dbt_init(&keys3[i], &key_data3[i], sizeof(int));
+ dbt_init(&vals3[i], &val_data3[i], sizeof(int));
+ }
+ int ii;
+ for (ii=0; ii<10; ii++) {
+ pq_nodes[ii].key = &keys3[ii];
+ pq_nodes[ii].val = &vals3[ii];
+ pq_nodes[ii].i = ii;
+ r = pqueue_insert(pq, &pq_nodes[ii]);
+ if ( r != 0 ) goto found_duplicate6;
+ }
+ for (ii=0; ii<10; ii++) {
+ r = pqueue_pop(pq, &node);
+// if (verbose) printf("%d : %d\n", ii, *(int*)node->key->data);
+ if ( r != 0 ) goto found_duplicate6;
+ }
+found_duplicate6:
+// if (verbose) printf("%d : %d\n", ii, *(int*)node->key->data);
+ if ( found_dup != 6 ) { printf("FAIL\n"); return -1; }
+ if (verbose) printf("test3 : PASS\n");
+ pqueue_free(pq);
+ ft_loader_destroy_error_callback(&error_callback);
+
+ // test 4 - find duplicate when inserting
+ ft_loader_init_error_callback(&error_callback);
+ ft_loader_set_error_function(&error_callback, err_cb, NULL);
+ r = pqueue_init(&pq, 10, 0, dest_db, compare, &error_callback); if (r) return r;
+
+ found_dup = -1;
+ DBT keys4[10];
+ DBT vals4[10];
+ int key_data4[10] = {0, 0, 2, 3, 4, 5, 6, 7, 8, 9}; // dup is 0
+ int val_data4[10];
+
+ for (int i=0; i<10; i++) {
+ keys4[i] = zero;
+ vals4[i] = zero;
+ val_data4[i] = i;
+ dbt_init(&keys4[i], &key_data4[i], sizeof(int));
+ dbt_init(&vals4[i], &val_data4[i], sizeof(int));
+ }
+
+ for (ii=0; ii<10; ii++) {
+ pq_nodes[ii].key = &keys4[ii];
+ pq_nodes[ii].val = &vals4[ii];
+ pq_nodes[ii].i = ii;
+ r = pqueue_insert(pq, &pq_nodes[ii]);
+ if ( r != 0 ) {
+// if (verbose) printf("%d : %d\n", ii, *(int*)pq_nodes[ii].key->data);
+ goto found_duplicate0;
+ }
+ }
+ for (ii=0; ii<10; ii++) {
+ r = pqueue_pop(pq, &node);
+// if (verbose) printf("%d : %d\n", ii, *(int*)node->key->data);
+ if ( r != 0 ) goto found_duplicate0;
+ }
+found_duplicate0:
+ if ( found_dup != 0 ) { printf("FAIL - found_dup : %d\n", found_dup); return -1; }
+ if (verbose) printf("test4 : PASS\n");
+ if (verbose) printf("PASS\n");
+ pqueue_free(pq);
+ toku_free(pq_nodes);
+ ft_loader_destroy_error_callback(&error_callback);
+
+ return 0;
+}
+
+
+
+int
+test_main (int argc, const char *argv[]) {
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ }
+ argc--;
+ argv++;
+ }
+ return run_test();
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/quicklz-test.cc b/storage/tokudb/PerconaFT/ft/tests/quicklz-test.cc
new file mode 100644
index 00000000..9d420fb3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/quicklz-test.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test quicklz.
+// Compare to compress-test which tests the toku compression (which is a composite of quicklz and zlib).
+
+#include "test.h"
+#include "serialize/quicklz.h"
+
+static void test_qlz_random_i (int i) {
+ if (verbose) printf("i=%d\n", i);
+
+ qlz_state_compress *MALLOC(compress_state);
+ qlz_state_decompress *MALLOC(decompress_state);
+
+ char *MALLOC_N(i, m);
+ char *MALLOC_N(i, m2);
+ for (int j=0; j<i; j++) {
+ m[j] = (random()%256)-128;
+ }
+ int csize_bound = i+400;
+ char *MALLOC_N(csize_bound, c);
+ memset(compress_state, 0, sizeof(*compress_state));
+ memset(decompress_state, 0, sizeof(*decompress_state));
+ int s = qlz_compress(m, c, i, compress_state);
+ assert(s <= csize_bound);
+ int r = qlz_decompress(c, m2, decompress_state);
+ assert(r==i);
+ assert(memcmp(m, m2, i)==0);
+
+ toku_free(m);
+ toku_free(c);
+ toku_free(m2);
+ toku_free(compress_state);
+ toku_free(decompress_state);
+}
+
+static void test_qlz_random (void) {
+ // quicklz cannot handle i==0.
+ for (int i=1; i<100; i++) {
+ test_qlz_random_i(i);
+ }
+ for (int i=64; i<=1024*1024*8; i*=4) {
+ test_qlz_random_i(i);
+ test_qlz_random_i(i+random()%i);
+ }
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test_qlz_random();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-bad-last-entry.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-bad-last-entry.cc
new file mode 100644
index 00000000..a3c934d1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-bad-last-entry.cc
@@ -0,0 +1,120 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery of "hello" comments
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ // leave this many bytes in file
+ const int magic_begin_end_checkpoint_sz = 8 // "tokulogg" magic 8 byte header
+ +4 // version
+ +toku_log_begin_checkpoint_overhead
+ +toku_log_end_checkpoint_overhead;
+
+ int r;
+ int trim = 1;
+ toku_struct_stat st;
+
+ while ( 1 ) {
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
+ BYTESTRING world = { (uint32_t) strlen("world"), (char *) "world" };
+ BYTESTRING there = { (uint32_t) strlen("there"), (char *) "there" };
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ LSN beginlsn;
+ // all logs must contain a valid checkpoint
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+ toku_log_comment(logger, NULL, true, 0, hello);
+ toku_log_comment(logger, NULL, true, 0, world);
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+ toku_log_comment(logger, NULL, true, 0, hello);
+ toku_log_comment(logger, NULL, true, 0, there);
+ toku_logger_close(&logger);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ char fname[TOKU_PATH_MAX + 1];
+ sprintf(fname,
+ "%s/%s%d",
+ TOKU_TEST_FILENAME,
+ "log000000000000.tokulog",
+ TOKU_LOG_VERSION);
+
+ r = toku_stat(fname, &st, toku_uninstrumented);
+ assert(r == 0);
+ if (st.st_size - trim > magic_begin_end_checkpoint_sz) {
+ r = truncate(fname, st.st_size - trim);
+ CKERR(r);
+ }
+ else
+ break;
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger,
+ TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == 0);
+
+ trim += 1;
+ }
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend-hello.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend-hello.cc
new file mode 100644
index 00000000..28f0b5b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend-hello.cc
@@ -0,0 +1,95 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery of a clean shutdown
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+
+ // add begin checkpoint, end checkpoint
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, false, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // add hello
+ for (int i=0; i<2; i++) {
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
+ toku_log_comment(logger, NULL, true, 0, hello);
+ r = toku_logger_close(&logger); assert(r == 0);
+ }
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend.cc
new file mode 100644
index 00000000..af3406e5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin-cend.cc
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // put begin and end checkpoint into separate log files
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, false, 0, 0);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin.cc
new file mode 100644
index 00000000..2c56ada7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-cbegin.cc
@@ -0,0 +1,86 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // run recovery
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr));
+ assert(r==fileno(stderr));
+ r = close(devnul);
+ assert(r==0);
+
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-cend-cbegin.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-cend-cbegin.cc
new file mode 100644
index 00000000..817ae970
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-cend-cbegin.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// run recovery on a log with an incomplete checkpoint
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ LSN firstbegin = ZERO_LSN;
+ toku_log_begin_checkpoint(logger, &firstbegin, true, 0, 0);
+ assert(firstbegin.lsn != ZERO_LSN.lsn);
+ toku_log_end_checkpoint(logger, NULL, false, firstbegin, 0, 0, 0);
+ toku_log_begin_checkpoint(logger, NULL, true, 0, 0);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ if (!verbose) {
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul >= 0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r == fileno(stderr));
+ r = close(devnul); assert(r == 0);
+ }
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME,
+ toku_builtin_compare_fun,
+ NULL, NULL, NULL,
+ 0);
+ assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-datadir-is-file.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-datadir-is-file.cc
new file mode 100644
index 00000000..f7835351
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-datadir-is-file.cc
@@ -0,0 +1,98 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery with no data directory
+
+#include "test.h"
+
+static int
+run_test(void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU);
+ assert_zero(r);
+
+ char testdir[TOKU_PATH_MAX+1];
+ char testfile[TOKU_PATH_MAX+1];
+ toku_path_join(testdir, 2, TOKU_TEST_FILENAME, "dir");
+ toku_path_join(testfile, 2, TOKU_TEST_FILENAME, "file");
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+ r = toku_os_mkdir(testdir, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(testdir, logger); assert(r == 0);
+ BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
+ toku_log_comment(logger, NULL, true, 0, hello);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // run recovery
+ {
+ char buf[TOKU_PATH_MAX+sizeof("touch ")];
+ strcpy(buf, "touch ");
+ strncat(buf, testfile, TOKU_PATH_MAX);
+ r = system(buf); CKERR(r);
+ }
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger,
+ testfile, testdir, 0, 0, 0, NULL, 0);
+ assert(r != 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-empty.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-empty.cc
new file mode 100644
index 00000000..374085f5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-empty.cc
@@ -0,0 +1,84 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery from a log that exist but has no log entries
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ if (!verbose) {
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul >= 0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r == fileno(stderr));
+ r = close(devnul); assert(r == 0);
+ }
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == DB_RUNRECOVERY);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-fopen-missing-file.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-fopen-missing-file.cc
new file mode 100644
index 00000000..88f67d73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-fopen-missing-file.cc
@@ -0,0 +1,89 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery with a fopen that references a missing file
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+
+ BYTESTRING iname = { (uint32_t) strlen("missing_tokuft_file"), (char *) "missing_tokuft_file" };
+ FILENUM filenum = {42};
+ uint32_t treeflags = 0;
+ toku_log_fopen(logger, NULL, true, iname, filenum, treeflags);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-hello.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-hello.cc
new file mode 100644
index 00000000..29cda9b4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-hello.cc
@@ -0,0 +1,89 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery of "hello" comments
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
+ toku_log_comment(logger, NULL, true, 0, hello);
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+ toku_log_comment(logger, NULL, true, 0, hello);
+ BYTESTRING there = { (uint32_t) strlen("there"), (char *) "there" };
+ toku_log_comment(logger, NULL, true, 0, there);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r == 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-lsn-error-during-forward-scan.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-lsn-error-during-forward-scan.cc
new file mode 100644
index 00000000..8e579bf0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-lsn-error-during-forward-scan.cc
@@ -0,0 +1,119 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// force a bad LSN during the forward scan. recovery should fail.
+
+#include "test.h"
+
+
+static void recover_callback_at_turnaround(void *UU(arg)) {
+ // change the LSN in the first log entry of log 2. this will cause an LSN error during the forward scan.
+ int r;
+ char logname[TOKU_PATH_MAX+1];
+ sprintf(logname, "%s/log000000000002.tokulog%d", TOKU_TEST_FILENAME, TOKU_LOG_VERSION);
+ FILE *f = fopen(logname, "r+b"); assert(f);
+ r = fseek(f, 025, SEEK_SET); assert(r == 0);
+ char c = 100;
+ size_t n = fwrite(&c, sizeof c, 1, f); assert(n == sizeof c);
+ r = fclose(f); assert(r == 0);
+}
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // log 1 has the checkpoint
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+
+ LSN beginlsn;
+ toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0);
+ toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0);
+
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // log 2 has hello
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+
+ BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
+ toku_log_comment(logger, NULL, true, 0, hello);
+
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // log 3 has there
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+
+ BYTESTRING there = { (uint32_t) strlen("there"), (char *) "there" };
+ toku_log_comment(logger, NULL, true, 0, there);
+
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // delete log 2 at the turnaround to force
+ toku_recover_set_callback(recover_callback_at_turnaround, NULL);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r != 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-no-datadir.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-no-datadir.cc
new file mode 100644
index 00000000..6c34138f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-no-datadir.cc
@@ -0,0 +1,83 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery with no data directory
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // create the log
+ TOKULOGGER logger;
+ r = toku_logger_create(&logger); assert(r == 0);
+ r = toku_logger_open(TOKU_TEST_FILENAME, logger); assert(r == 0);
+ BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
+ toku_log_comment(logger, NULL, true, 0, hello);
+ r = toku_logger_close(&logger); assert(r == 0);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, "/junk", TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r != 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-no-log.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-no-log.cc
new file mode 100644
index 00000000..75fa06ad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-no-log.cc
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery with no log
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul>=0);
+ r = toku_dup2(devnul, fileno(stderr)); assert(r==fileno(stderr));
+ r = close(devnul); assert(r==0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0);
+ assert(r != 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-no-logdir.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-no-logdir.cc
new file mode 100644
index 00000000..5e809797
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-no-logdir.cc
@@ -0,0 +1,69 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test recovery with a NULL log directory
+
+#include "test.h"
+
+
+static int
+run_test(void) {
+ int r;
+
+ // setup the test dir
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0);
+
+ // run recovery
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, NULL, NULL, 0, 0, 0, NULL, 0);
+ assert(r != 0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
+
+int
+test_main(int UU(argc), const char *UU(argv[])) {
+ int r;
+ r = run_test();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc
new file mode 100644
index 00000000..02dc63fc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "toku_os.h"
+#include "cachetable/checkpoint.h"
+
+#include "test-ft-txns.h"
+
+static void test_5123(void) {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+
+ int r;
+ TXNID_PAIR one = { (TXNID)1, TXNID_NONE};
+ TXNID_PAIR two = { (TXNID)2, TXNID_NONE};
+ TXNID_PAIR three = { (TXNID)3, TXNID_NONE};
+
+ toku_log_xbegin(logger, NULL, false, one, TXNID_PAIR_NONE);
+ toku_log_xbegin(logger, NULL, false, three, TXNID_PAIR_NONE);
+ toku_log_xbegin(logger, NULL, false, two, TXNID_PAIR_NONE);
+
+ toku_log_xcommit(logger, NULL, false, NULL, two);
+
+ toku_logger_close_rollback(logger);
+
+ toku_cachetable_close(&ct);
+ // "Crash"
+ r = toku_logger_close(&logger);
+ CKERR(r);
+ ct = NULL;
+ logger = NULL;
+
+ // "Recover"
+ test_setup_and_recover(TOKU_TEST_FILENAME, &logger, &ct);
+
+ shutdown_after_recovery(&logger, &ct);
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_5123();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/shortcut.cc b/storage/tokudb/PerconaFT/ft/tests/shortcut.cc
new file mode 100644
index 00000000..322052e8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/shortcut.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+static const char *fname = TOKU_TEST_FILENAME;
+
+static TOKUTXN const null_txn = 0;
+CACHETABLE ct;
+FT_HANDLE ft;
+FT_CURSOR cursor;
+
+static int test_ft_cursor_keycompare(DB *db __attribute__((unused)), const DBT *a, const DBT *b) {
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ int r;
+
+ unlink(fname);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0);
+ r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0);
+
+ int i;
+ for (i=0; i<1000; i++) {
+ char string[100];
+ snprintf(string, sizeof(string), "%04d", i);
+ DBT key,val;
+ toku_ft_insert(ft, toku_fill_dbt(&key, string, 5), toku_fill_dbt(&val, string, 5), 0);
+ }
+
+ {
+ struct check_pair pair = {5, "0000", 5, "0000", 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT); assert(r==0); assert(pair.call_count==1);
+ }
+ {
+ struct check_pair pair = {5, "0001", 5, "0001", 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT); assert(r==0); assert(pair.call_count==1);
+ }
+
+ // This will invalidate due to the root counter bumping, but the OMT itself will still be valid.
+ {
+ DBT key, val;
+ toku_ft_insert(ft, toku_fill_dbt(&key, "d", 2), toku_fill_dbt(&val, "w", 2), 0);
+ }
+
+ {
+ struct check_pair pair = {5, "0002", 5, "0002", 0};
+ r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT); assert(r==0); assert(pair.call_count==1);
+ }
+
+ toku_ft_cursor_close(cursor);
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/subblock-test-checksum.cc b/storage/tokudb/PerconaFT/ft/tests/subblock-test-checksum.cc
new file mode 100644
index 00000000..3248b40d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/subblock-test-checksum.cc
@@ -0,0 +1,193 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that corrupt checksums are detected
+
+#include "test.h"
+
+#include "serialize/compress.h"
+#include "serialize/sub_block.h"
+
+#include <toku_portability.h>
+#include <util/threadpool.h>
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+static uint8_t
+get_uint8_at_offset(void *vp, size_t offset) {
+ uint8_t *ip = (uint8_t *) vp;
+ return ip[offset];
+}
+
+static void
+set_uint8_at_offset(void *vp, size_t offset, uint8_t newv) {
+ uint8_t *ip = (uint8_t *) vp;
+ ip[offset] = newv;
+}
+
+static void
+test_sub_block_checksum(void *buf, int total_size, int my_max_sub_blocks, int n_cores, struct toku_thread_pool *pool, enum toku_compression_method method) {
+ if (verbose)
+ printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, total_size, my_max_sub_blocks);
+
+ int r;
+
+ int sub_block_size, n_sub_blocks;
+ r = choose_sub_block_size(total_size, my_max_sub_blocks, &sub_block_size, &n_sub_blocks);
+ assert(r == 0);
+ if (verbose)
+ printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, sub_block_size, n_sub_blocks);
+
+ struct sub_block sub_blocks[n_sub_blocks];
+ set_all_sub_block_sizes(total_size, sub_block_size, n_sub_blocks, sub_blocks);
+
+ size_t cbuf_size_bound = get_sum_compressed_size_bound(n_sub_blocks, sub_blocks, method);
+ void *cbuf = toku_malloc(cbuf_size_bound);
+ assert(cbuf);
+
+ size_t cbuf_size = compress_all_sub_blocks(n_sub_blocks, sub_blocks, (char*)buf, (char*)cbuf, n_cores, pool, method);
+ assert(cbuf_size <= cbuf_size_bound);
+
+ void *ubuf = toku_malloc(total_size);
+ assert(ubuf);
+
+ for (int xidx = 0; xidx < n_sub_blocks; xidx++) {
+ // corrupt a checksum
+ sub_blocks[xidx].xsum += 1;
+
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_blocks, (unsigned char*)cbuf, (unsigned char*)ubuf, n_cores, pool);
+ assert(r != 0);
+
+ // reset the checksums
+ sub_blocks[xidx].xsum -= 1;
+
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_blocks, (unsigned char*)cbuf, (unsigned char*)ubuf, n_cores, pool);
+ assert(r == 0);
+ assert(memcmp(buf, ubuf, total_size) == 0);
+
+ // corrupt the data
+ size_t offset = random() % cbuf_size;
+ unsigned char c = get_uint8_at_offset(cbuf, offset);
+ set_uint8_at_offset(cbuf, offset, c+1);
+
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_blocks, (unsigned char*)cbuf, (unsigned char*)ubuf, n_cores, pool);
+ assert(r != 0);
+
+ // reset the data
+ set_uint8_at_offset(cbuf, offset, c);
+
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_blocks, (unsigned char*)cbuf, (unsigned char*)ubuf, n_cores, pool);
+
+ assert(r == 0);
+ assert(memcmp(buf, ubuf, total_size) == 0);
+ }
+ toku_free(ubuf);
+ toku_free(cbuf);
+}
+
+static void
+set_random(void *buf, int total_size) {
+ char *bp = (char *) buf;
+ for (int i = 0; i < total_size; i++)
+ bp[i] = random();
+}
+
+static void
+run_test(int total_size, int n_cores, struct toku_thread_pool *pool, enum toku_compression_method method) {
+ void *buf = toku_malloc(total_size);
+ assert(buf);
+
+ for (int my_max_sub_blocks = 1; my_max_sub_blocks <= max_sub_blocks; my_max_sub_blocks++) {
+ memset(buf, 0, total_size);
+ test_sub_block_checksum(buf, total_size, my_max_sub_blocks, n_cores, pool, method);
+
+ set_random(buf, total_size);
+ test_sub_block_checksum(buf, total_size, my_max_sub_blocks, n_cores, pool, method);
+ }
+
+ toku_free(buf);
+}
+int
+test_main (int argc, const char *argv[]) {
+ int n_cores = 1;
+ int e = 1;
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose++;
+ verbose_decompress_sub_block = 1;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose_decompress_sub_block = 0;
+ continue;
+ }
+ if (strcmp(arg, "-n") == 0) {
+ if (i+1 < argc) {
+ n_cores = atoi(argv[++i]);
+ continue;
+ }
+ }
+ if (strcmp(arg, "-e") == 0) {
+ if (i+1 < argc) {
+ e = atoi(argv[++i]);
+ continue;
+ }
+ }
+ }
+
+ struct toku_thread_pool *pool = NULL;
+ int r = toku_thread_pool_create(&pool, 8); assert(r == 0);
+
+ for (int total_size = 256*1024; total_size <= 4*1024*1024; total_size *= 2) {
+ for (int size = total_size - e; size <= total_size + e; size++) {
+ run_test(size, n_cores, pool, TOKU_NO_COMPRESSION);
+ run_test(size, n_cores, pool, TOKU_ZLIB_METHOD);
+ run_test(size, n_cores, pool, TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD);
+ run_test(size, n_cores, pool, TOKU_QUICKLZ_METHOD);
+ run_test(size, n_cores, pool, TOKU_LZMA_METHOD);
+ }
+ }
+
+ toku_thread_pool_destroy(&pool);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/subblock-test-compression.cc b/storage/tokudb/PerconaFT/ft/tests/subblock-test-compression.cc
new file mode 100644
index 00000000..30295204
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/subblock-test-compression.cc
@@ -0,0 +1,142 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test sub block compression and decompression
+
+#include <toku_portability.h>
+#include "test.h"
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include "serialize/sub_block.h"
+
+static void
+test_sub_block_compression(void *buf, int total_size, int my_max_sub_blocks, int n_cores, enum toku_compression_method method) {
+ if (verbose)
+ printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, total_size, my_max_sub_blocks);
+
+ int r;
+
+ int sub_block_size, n_sub_blocks;
+ r = choose_sub_block_size(total_size, my_max_sub_blocks, &sub_block_size, &n_sub_blocks);
+ assert(r == 0);
+ if (verbose)
+ printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, sub_block_size, n_sub_blocks);
+
+ struct sub_block sub_blocks[n_sub_blocks];
+ set_all_sub_block_sizes(total_size, sub_block_size, n_sub_blocks, sub_blocks);
+
+ size_t cbuf_size_bound = get_sum_compressed_size_bound(n_sub_blocks, sub_blocks, method);
+ void *cbuf = toku_malloc(cbuf_size_bound);
+ assert(cbuf);
+
+ size_t cbuf_size = compress_all_sub_blocks(n_sub_blocks, sub_blocks, (char*)buf, (char*)cbuf, n_cores, NULL, method);
+ assert(cbuf_size <= cbuf_size_bound);
+
+ void *ubuf = toku_malloc(total_size);
+ assert(ubuf);
+
+ r = decompress_all_sub_blocks(n_sub_blocks, sub_blocks, (unsigned char*)cbuf, (unsigned char*)ubuf, n_cores, NULL);
+ assert(r == 0);
+
+ assert(memcmp(buf, ubuf, total_size) == 0);
+
+ toku_free(ubuf);
+ toku_free(cbuf);
+}
+
+static void
+set_random(void *buf, int total_size) {
+ char *bp = (char *) buf;
+ for (int i = 0; i < total_size; i++)
+ bp[i] = random();
+}
+
+static void
+run_test(int total_size, int n_cores, enum toku_compression_method method) {
+ void *buf = toku_malloc(total_size);
+ assert(buf);
+
+ for (int my_max_sub_blocks = 1; my_max_sub_blocks <= max_sub_blocks; my_max_sub_blocks++) {
+ memset(buf, 0, total_size);
+ test_sub_block_compression(buf, total_size, my_max_sub_blocks, n_cores, method);
+
+ set_random(buf, total_size);
+ test_sub_block_compression(buf, total_size, my_max_sub_blocks, n_cores, method);
+ }
+
+ toku_free(buf);
+}
+int
+test_main (int argc, const char *argv[]) {
+ int n_cores = 1;
+ int e = 1;
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-n") == 0) {
+ if (i+1 < argc) {
+ n_cores = atoi(argv[++i]);
+ continue;
+ }
+ }
+ if (strcmp(arg, "-e") == 0) {
+ if (i+1 < argc) {
+ e = atoi(argv[++i]);
+ continue;
+ }
+ }
+ }
+
+ for (int total_size = 256*1024; total_size <= 4*1024*1024; total_size *= 2) {
+ for (int size = total_size - e; size <= total_size + e; size++) {
+ run_test(size, n_cores, TOKU_NO_COMPRESSION);
+ run_test(size, n_cores, TOKU_ZLIB_METHOD);
+ run_test(size, n_cores, TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD);
+ run_test(size, n_cores, TOKU_QUICKLZ_METHOD);
+ run_test(size, n_cores, TOKU_LZMA_METHOD);
+ }
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/subblock-test-index.cc b/storage/tokudb/PerconaFT/ft/tests/subblock-test-index.cc
new file mode 100644
index 00000000..4b7d23ea
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/subblock-test-index.cc
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the sub block index function
+
+#include <toku_portability.h>
+#include "test.h"
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include "serialize/sub_block.h"
+
+static void
+test_sub_block_index(void) {
+ if (verbose)
+ printf("%s:%d\n", __FUNCTION__, __LINE__);
+
+ const int n_sub_blocks = max_sub_blocks;
+ struct sub_block sub_block[n_sub_blocks];
+
+ size_t max_offset = 0;
+ for (int i = 0 ; i < n_sub_blocks; i++) {
+ size_t size = i+1;
+ sub_block_init(&sub_block[i]);
+ sub_block[i].uncompressed_size = size;
+ max_offset += size;
+ }
+
+ int offset_to_sub_block[max_offset];
+ for (int i = 0; i < (int) max_offset; i++)
+ offset_to_sub_block[i] = -1;
+
+ size_t start_offset = 0;
+ for (int i = 0; i < n_sub_blocks; i++) {
+ size_t size = sub_block[i].uncompressed_size;
+ for (int j = 0; j < (int) (start_offset + size); j++) {
+ if (offset_to_sub_block[j] == -1)
+ offset_to_sub_block[j] = i;
+ }
+ start_offset += size;
+ }
+
+ int r;
+ for (size_t offset = 0; offset < max_offset; offset++) {
+ r = get_sub_block_index(n_sub_blocks, sub_block, offset);
+ if (verbose)
+ printf("%s:%d %u %d\n", __FUNCTION__, __LINE__, (unsigned int) offset, r);
+ assert(0 <= r && r < n_sub_blocks);
+ assert(r == offset_to_sub_block[offset]);
+ }
+
+ r = get_sub_block_index(n_sub_blocks, sub_block, max_offset);
+ assert(r == -1);
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0)
+ verbose++;
+ }
+ test_sub_block_index();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/subblock-test-size.cc b/storage/tokudb/PerconaFT/ft/tests/subblock-test-size.cc
new file mode 100644
index 00000000..03db4ebc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/subblock-test-size.cc
@@ -0,0 +1,78 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the choose sub block size function
+
+#include <toku_portability.h>
+#include "test.h"
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include "serialize/sub_block.h"
+
+static void
+test_sub_block_size(int total_size) {
+ if (verbose)
+ printf("%s:%d %d\n", __FUNCTION__, __LINE__, total_size);
+ int r;
+ int sub_block_size, n_sub_blocks;
+ r = choose_sub_block_size(total_size, 0, &sub_block_size, &n_sub_blocks);
+ assert(r == EINVAL);
+ for (int i = 1; i < max_sub_blocks; i++) {
+ r = choose_sub_block_size(total_size, i, &sub_block_size, &n_sub_blocks);
+ assert(r == 0);
+ assert(0 <= n_sub_blocks && n_sub_blocks <= i);
+ assert(total_size <= n_sub_blocks * sub_block_size);
+ }
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0)
+ verbose++;
+ }
+ test_sub_block_size(0);
+ for (int total_size = 1; total_size <= 4*1024*1024; total_size *= 2) {
+ test_sub_block_size(total_size);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc b/storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc
new file mode 100644
index 00000000..fc7d5cc4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc
@@ -0,0 +1,178 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+#include "test.h"
+
+#include "cachetable/checkpoint.h"
+#include "ft-flusher-internal.h"
+#include "ft-flusher.h"
+#include <ft-cachetable-wrappers.h>
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE = NODESIZE - 100, TOKU_PSIZE = 20 };
+
+CACHETABLE ct;
+FT_HANDLE ft;
+const char *fname = TOKU_TEST_FILENAME;
+
+static int update_func(DB *UU(db), const DBT *key, const DBT *old_val,
+ const DBT *UU(extra),
+ void (*set_val)(const DBT *new_val, void *set_extra),
+ void *set_extra) {
+ DBT new_val;
+ assert(old_val->size > 0);
+ if (verbose) {
+ printf("applying update to %s\n", (char *)key->data);
+ }
+ toku_init_dbt(&new_val);
+ set_val(&new_val, set_extra);
+ return 0;
+}
+
+static void doit() {
+ BLOCKNUM node_leaf;
+ BLOCKNUM node_root;
+ BLOCKNUM node_internal;
+ int r;
+
+ toku_cachetable_create(&ct, 500 * 1024 * 1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE / 2,
+ TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn,
+ toku_builtin_compare_fun);
+ assert(r == 0);
+
+ ft->options.update_fun = update_func;
+ ft->ft->update_fun = update_func;
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+ char *pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+ r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
+ assert(r == 0);
+
+ toku_free(pivots[0]);
+
+ r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
+ assert(r == 0);
+
+ r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
+ assert(r == 0);
+
+ r = toku_testsetup_root(ft, node_root);
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "a", // key
+ 2, // keylen
+ "aa", 3);
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "z", // key
+ 2, // keylen
+ "zz", 3);
+ assert(r == 0);
+ char filler[400];
+ memset(filler, 0, sizeof(filler));
+ // now we insert filler data so that the rebalance
+ // keeps it at two nodes
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "b", // key
+ 2, // keylen
+ filler, sizeof(filler));
+ assert(r == 0);
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "y", // key
+ 2, // keylen
+ filler, sizeof(filler));
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_nonleaf(ft, node_internal, FT_INSERT,
+ "a", // key
+ 2, // keylen
+ "yy", 3);
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_nonleaf(ft, node_root, FT_INSERT,
+ "a", // key
+ 2, // keylen
+ "zz", 3);
+ assert(r == 0);
+
+ // at this point of time, the logical row count will be 6. This has to be
+ // manually set up as the tests work under the interface of the ft_send_msg
+ ft->ft->in_memory_logical_rows = 6;
+ // now run a checkpoint to get everything clean
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ // now do a lookup on one of the keys, this should bring a leaf node up to
+ // date
+ DBT k;
+ struct check_pair pair = {2, "a", 3, "zz", 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
+ assert(r == 0);
+ assert(ft->ft->in_memory_logical_rows == 4);
+ FTNODE node;
+ // now lock and release the leaf node to make sure it is what we expect it to
+ // be.
+ toku_pin_node_with_min_bfe(&node, node_leaf, ft);
+ for (int i = 0; i < 20; i++) {
+ toku_ftnode_pe_callback(node, make_pair_attr(0xffffffff), ft->ft,
+ def_pe_finalize_impl, nullptr);
+ }
+ toku_unpin_ftnode(ft->ft, node);
+ assert(ft->ft->in_memory_logical_rows == 6);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+ toku_cachetable_close(&ct);
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc b/storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc
new file mode 100644
index 00000000..9371a3a0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc
@@ -0,0 +1,208 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+#include "test.h"
+
+#include "cachetable/checkpoint.h"
+#include "ft-flusher-internal.h"
+#include "ft-flusher.h"
+#include <ft-cachetable-wrappers.h>
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE = NODESIZE - 100, TOKU_PSIZE = 20 };
+
+CACHETABLE ct;
+FT_HANDLE ft;
+const char *fname = TOKU_TEST_FILENAME;
+
+static int update_func(DB *UU(db), const DBT *key, const DBT *old_val,
+ const DBT *UU(extra),
+ void (*set_val)(const DBT *new_val, void *set_extra),
+ void *set_extra) {
+ DBT new_val;
+ assert(old_val->size > 0);
+ if (verbose) {
+ printf("applying update to %s\n", (char *)key->data);
+ }
+ toku_init_dbt(&new_val);
+ set_val(&new_val, set_extra);
+ return 0;
+}
+
+// callback functions for toku_ft_flush_some_child
+static bool destroy_bn(void *UU(extra)) { return true; }
+
+static bool recursively_flush_should_not_happen(FTNODE UU(child),
+ void *UU(extra)) {
+ assert(false);
+}
+
+static int child_to_flush(FT UU(h), FTNODE parent, void *UU(extra)) {
+ assert(parent->height == 1);
+ assert(parent->n_children == 1);
+ return 0;
+}
+
+static void dummy_update_status(FTNODE UU(child), int UU(dirtied),
+ void *UU(extra)) {}
+
+static void doit() {
+ BLOCKNUM node_leaf;
+ BLOCKNUM node_root;
+ BLOCKNUM node_internal;
+ int r;
+
+ toku_cachetable_create(&ct, 500 * 1024 * 1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE / 2,
+ TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn,
+ toku_builtin_compare_fun);
+ assert(r == 0);
+
+ ft->options.update_fun = update_func;
+ ft->ft->update_fun = update_func;
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+ char *pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+ r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
+ assert(r == 0);
+
+ toku_free(pivots[0]);
+
+ r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
+ assert(r == 0);
+
+ r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
+ assert(r == 0);
+
+ r = toku_testsetup_root(ft, node_root);
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "a", // key
+ 2, // keylen
+ "aa", 3);
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "z", // key
+ 2, // keylen
+ "zz", 3);
+ assert(r == 0);
+ char filler[400];
+ memset(filler, 0, sizeof(filler));
+ // now we insert filler data so that the rebalance
+ // keeps it at two nodes
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "b", // key
+ 2, // keylen
+ filler, sizeof(filler));
+ assert(r == 0);
+ r = toku_testsetup_insert_to_leaf(ft, node_leaf,
+ "y", // key
+ 2, // keylen
+ filler, sizeof(filler));
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_nonleaf(ft, node_internal, FT_INSERT,
+ "a", // key
+ 2, // keylen
+ "yy", 3);
+ assert(r == 0);
+
+ r = toku_testsetup_insert_to_nonleaf(ft, node_root, FT_INSERT,
+ "a", // key
+ 2, // keylen
+ "zz", 3);
+ assert(r == 0);
+
+ // at this point of time, the logical row count will be 6. This has to be
+ // manually set up as the tests work under the interface of the ft_send_msg
+ ft->ft->in_memory_logical_rows = 6;
+ // now run a checkpoint to get everything clean
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ // now do a lookup on one of the keys, this should bring a leaf node up to
+ // date
+ DBT k;
+ struct check_pair pair = {2, "a", 3, "zz", 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
+ assert(r == 0);
+ assert(ft->ft->in_memory_logical_rows == 4);
+
+ // now lock and release the leaf node to make sure it is what we expect it to
+ // be.
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ ft->ft, node_internal, toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe, PL_WRITE_EXPENSIVE, 0, NULL, &node, true);
+ assert(node->height == 1);
+ assert(node->n_children == 1);
+
+ struct flusher_advice fa;
+ flusher_advice_init(&fa, child_to_flush, destroy_bn,
+ recursively_flush_should_not_happen, default_merge_child,
+ dummy_update_status, default_pick_child_after_split,
+ NULL);
+
+ // do the flush which forces an evict of the leaf. logical row count back to
+ // 6 before the flush
+ toku_ft_flush_some_child(ft->ft, node, &fa);
+
+ assert(ft->ft->in_memory_logical_rows == 5);
+
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+ toku_cachetable_close(&ct);
+}
+
+int test_main(int argc __attribute__((__unused__)),
+ const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-assert.cc b/storage/tokudb/PerconaFT/ft/tests/test-assert.cc
new file mode 100644
index 00000000..698cf7d3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-assert.cc
@@ -0,0 +1,63 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <signal.h>
+
+
+static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__unused__))) {
+ exit(1);
+}
+
+static bool foo (void) {
+ return true;
+}
+
+
+int
+test_main (int argc, const char *argv[]) {
+ signal (SIGABRT, catch_abort);
+ if (argc!=2) { printf("argcount should be 2.\n"); exit(1); }
+ const char *str=argv[1];
+ assert(strcmp(str,"ok")==0);
+ assert(foo());
+ assert(0x8000000000000000ULL);
+ assert(0x4000000000000000ULL);
+ assert(argv[1]);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-bjm.cc b/storage/tokudb/PerconaFT/ft/tests/test-bjm.cc
new file mode 100644
index 00000000..6afe5b9f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-bjm.cc
@@ -0,0 +1,104 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "cachetable/background_job_manager.h"
+
+#include "test.h"
+
+BACKGROUND_JOB_MANAGER bjm;
+
+static void *finish_bjm(void *arg) {
+ bjm_wait_for_jobs_to_finish(bjm);
+ return arg;
+}
+
+
+static void bjm_test(void) {
+ int r = 0;
+ bjm = NULL;
+ bjm_init(&bjm);
+ // test simple add/remove of background job works
+ r = bjm_add_background_job(bjm);
+ assert_zero(r);
+ bjm_remove_background_job(bjm);
+ bjm_wait_for_jobs_to_finish(bjm);
+ // assert that you cannot add a background job
+ // without resetting bjm after waiting
+ // for finish
+ r = bjm_add_background_job(bjm);
+ assert(r != 0);
+ // test that after a reset, we can resume adding background jobs
+ bjm_reset(bjm);
+ r = bjm_add_background_job(bjm);
+ assert_zero(r);
+ bjm_remove_background_job(bjm);
+ bjm_wait_for_jobs_to_finish(bjm);
+
+ bjm_reset(bjm);
+ r = bjm_add_background_job(bjm);
+ assert_zero(r);
+ toku_pthread_t tid;
+ r = toku_pthread_create(toku_uninstrumented,
+ &tid,
+ nullptr,
+ finish_bjm,
+ nullptr);
+ assert_zero(r);
+ usleep(2 * 1024 * 1024);
+ // should return non-zero because tid is waiting
+ // for background jobs to finish
+ r = bjm_add_background_job(bjm);
+ assert(r != 0);
+ bjm_remove_background_job(bjm);
+ void *ret;
+ r = toku_pthread_join(tid, &ret);
+ assert_zero(r);
+
+ bjm_destroy(bjm);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ bjm_test();
+ if (verbose) printf("test ok\n");
+ return 0;
+}
+
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc
new file mode 100644
index 00000000..5c73d281
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc
@@ -0,0 +1,295 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+
+bool checkpoint_called;
+bool checkpoint_callback_called;
+toku_pthread_t checkpoint_tid;
+
+
+// callback functions for toku_ft_flush_some_child
+static bool
+dont_destroy_bn(void* UU(extra))
+{
+ return false;
+}
+static void merge_should_not_happen(struct flusher_advice* UU(fa),
+ FT UU(h),
+ FTNODE UU(parent),
+ int UU(childnum),
+ FTNODE UU(child),
+ void* UU(extra))
+{
+ assert(false);
+}
+
+static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
+ assert(false);
+}
+
+static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
+ assert(parent->height == 1);
+ assert(parent->n_children == 1);
+ return 0;
+}
+
+static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extra)) {
+}
+
+
+static void checkpoint_callback(void* UU(extra)) {
+ usleep(1*1024*1024);
+ checkpoint_callback_called = true;
+}
+
+
+static void *do_checkpoint(void *arg) {
+ // first verify that checkpointed_data is correct;
+ if (verbose) printf("starting a checkpoint\n");
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ int r = toku_checkpoint(cp, NULL, checkpoint_callback, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ if (verbose) printf("completed a checkpoint\n");
+ return arg;
+}
+
+
+static void flusher_callback(int state, void* extra) {
+ bool after_child_pin = *(bool *)extra;
+ if (verbose) {
+ printf("state %d\n", state);
+ }
+ if ((state == flt_flush_before_child_pin && !after_child_pin) ||
+ (state == ft_flush_aflter_child_pin && after_child_pin)) {
+ checkpoint_called = true;
+ int r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_tid,
+ nullptr,
+ do_checkpoint,
+ nullptr);
+ assert_zero(r);
+ while (!checkpoint_callback_called) {
+ usleep(1 * 1024 * 1024);
+ }
+ }
+}
+
+static void
+doit (bool after_child_pin) {
+ BLOCKNUM node_leaf, node_root;
+
+ int r;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
+
+ toku_flusher_thread_set_callback(flusher_callback, &after_child_pin);
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink("foo1.ft_handle");
+ r = toku_open_ft_handle("foo1.ft_handle", 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaf, 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 1, &node_root, 1, &node_leaf, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_root,
+ FT_INSERT,
+ "a",
+ 2,
+ NULL,
+ 0
+ );
+
+ // at this point, we have inserted a message into
+ // the root, and we wish to flush it, the leaf
+ // should be empty
+
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ recursively_flush_should_not_happen,
+ merge_should_not_happen,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 1);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) > 0);
+
+ // do the flush
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(checkpoint_callback_called);
+
+ // now let's pin the root again and make sure it is flushed
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 1);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
+ toku_unpin_ftnode(t->ft, node);
+
+ void *ret;
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+
+ //
+ // now the dictionary has been checkpointed
+ // copy the file to something with a new name,
+ // open it, and verify that the state of what is
+ // checkpointed is what we expect
+ //
+
+ r = system("cp foo1.ft_handle bar1.ft_handle ");
+ assert_zero(r);
+
+ FT_HANDLE c_ft;
+ r = toku_open_ft_handle("bar1.ft_handle", 0, &c_ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ //
+ // now pin the root, verify that we have a message in there, and that it is clean
+ //
+ bfe.create_for_full_read(c_ft->ft);
+ toku_pin_ftnode(
+ c_ft->ft,
+ node_root,
+ toku_cachetable_hash(c_ft->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ if (after_child_pin) {
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
+ }
+ else {
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) > 0);
+ }
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ toku_pin_ftnode(
+ c_ft->ft,
+ node_leaf,
+ toku_cachetable_hash(c_ft->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ if (after_child_pin) {
+ assert(BLB_NBYTESINDATA(node,0) > 0);
+ }
+ else {
+ assert(BLB_NBYTESINDATA(node,0) == 0);
+ }
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ struct check_pair pair1 = {2, "a", 0, NULL, 0};
+ DBT k;
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ r = toku_close_ft_handle_nolsn(c_ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit(false);
+ doit(true);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc
new file mode 100644
index 00000000..cab37027
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc
@@ -0,0 +1,372 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+
+bool checkpoint_called;
+bool checkpoint_callback_called;
+toku_pthread_t checkpoint_tid;
+
+
+// callback functions for toku_ft_flush_some_child
+static bool
+dont_destroy_bn(void* UU(extra))
+{
+ return false;
+}
+
+static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
+ assert(false);
+}
+
+static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
+ assert(parent->height == 1);
+ assert(parent->n_children == 2);
+ return 0;
+}
+
+static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extra)) {
+}
+
+
+static void checkpoint_callback(void* UU(extra)) {
+ usleep(1*1024*1024);
+ checkpoint_callback_called = true;
+}
+
+
+static void *do_checkpoint(void *arg) {
+ // first verify that checkpointed_data is correct;
+ if (verbose) printf("starting a checkpoint\n");
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ int r = toku_checkpoint(cp, NULL, checkpoint_callback, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ if (verbose) printf("completed a checkpoint\n");
+ return arg;
+}
+
+
+static void flusher_callback(int state, void* extra) {
+ int desired_state = *(int *)extra;
+ if (verbose) {
+ printf("state %d\n", state);
+ }
+ if (state == desired_state) {
+ checkpoint_called = true;
+ int r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_tid,
+ nullptr,
+ do_checkpoint,
+ nullptr);
+ assert_zero(r);
+ while (!checkpoint_callback_called) {
+ usleep(1 * 1024 * 1024);
+ }
+ }
+}
+
+static void
+doit (int state) {
+ BLOCKNUM node_root;
+ BLOCKNUM node_leaves[2];
+
+ int r;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
+
+ toku_flusher_thread_set_callback(flusher_callback, &state);
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink("foo2.ft_handle");
+ unlink("bar2.ft_handle");
+ // note the basement node size is 5 times the node size
+ // this is done to avoid rebalancing when writing a leaf
+ // node to disk
+ r = toku_open_ft_handle("foo2.ft_handle", 1, &t, NODESIZE, 5*NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaves[0], 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_leaf(t, &node_leaves[1], 1, NULL, NULL);
+ assert(r==0);
+
+ char* pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+
+ r = toku_testsetup_nonleaf(t, 1, &node_root, 2, node_leaves, pivots, &pivot_len);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaves[0],
+ "a",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaves[1],
+ "z",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+
+
+ // at this point, we have inserted two leafentries,
+ // one in each leaf node. A flush should invoke a merge
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ recursively_flush_should_not_happen,
+ default_merge_child,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+
+ // hack to get merge going
+ FTNODE node = NULL;
+ toku_pin_node_with_min_bfe(&node, node_leaves[0], t);
+ BLB_SEQINSERT(node, node->n_children-1) = false;
+ toku_unpin_ftnode(t->ft, node);
+ toku_pin_node_with_min_bfe(&node, node_leaves[1], t);
+ BLB_SEQINSERT(node, node->n_children-1) = false;
+ toku_unpin_ftnode(t->ft, node);
+
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 2);
+
+ // do the flush
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(checkpoint_callback_called);
+
+ // now let's pin the root again and make sure it is has merged
+ toku_pin_ftnode_with_dep_nodes(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 1);
+ toku_unpin_ftnode(t->ft, node);
+
+ void *ret;
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+
+ //
+ // now the dictionary has been checkpointed
+ // copy the file to something with a new name,
+ // open it, and verify that the state of what is
+ // checkpointed is what we expect
+ //
+
+ r = system("cp foo2.ft_handle bar2.ft_handle ");
+ assert_zero(r);
+
+ FT_HANDLE c_ft;
+ // note the basement node size is 5 times the node size
+ // this is done to avoid rebalancing when writing a leaf
+ // node to disk
+ r = toku_open_ft_handle("bar2.ft_handle", 0, &c_ft, NODESIZE, 5*NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ //
+ // now pin the root, verify that the state is what we expect
+ //
+ bfe.create_for_full_read(c_ft->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ c_ft->ft,
+ node_root,
+ toku_cachetable_hash(c_ft->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(!node->dirty());
+ BLOCKNUM left_child, right_child;
+ // cases where we expect the checkpoint to contain the merge
+ if (state == ft_flush_aflter_merge || state == flt_flush_before_unpin_remove) {
+ assert(node->n_children == 1);
+ left_child = BP_BLOCKNUM(node,0);
+ }
+ else if (state == flt_flush_before_merge || state == flt_flush_before_pin_second_node_for_merge) {
+ assert(node->n_children == 2);
+ left_child = BP_BLOCKNUM(node,0);
+ right_child = BP_BLOCKNUM(node,1);
+ }
+ else {
+ assert(false);
+ }
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ // now let's verify the leaves are what we expect
+ if (state == flt_flush_before_merge || state == flt_flush_before_pin_second_node_for_merge) {
+ toku_pin_ftnode_with_dep_nodes(
+ c_ft->ft,
+ left_child,
+ toku_cachetable_hash(c_ft->ft->cf, left_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 1);
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ toku_pin_ftnode_with_dep_nodes(
+ c_ft->ft,
+ right_child,
+ toku_cachetable_hash(c_ft->ft->cf, right_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 1);
+ toku_unpin_ftnode(c_ft->ft, node);
+ }
+ else if (state == ft_flush_aflter_merge || state == flt_flush_before_unpin_remove) {
+ toku_pin_ftnode_with_dep_nodes(
+ c_ft->ft,
+ left_child,
+ toku_cachetable_hash(c_ft->ft->cf, left_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 2);
+ toku_unpin_ftnode(c_ft->ft, node);
+ }
+ else {
+ assert(false);
+ }
+
+
+ DBT k;
+ struct check_pair pair1 = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+ struct check_pair pair2 = {2, "z", 0, NULL, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ r = toku_close_ft_handle_nolsn(c_ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+ toku_free(pivots[0]);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit(flt_flush_before_merge);
+ doit(flt_flush_before_pin_second_node_for_merge);
+ doit(flt_flush_before_unpin_remove);
+ doit(ft_flush_aflter_merge);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc
new file mode 100644
index 00000000..87f66512
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc
@@ -0,0 +1,353 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+
+bool checkpoint_called;
+bool checkpoint_callback_called;
+toku_pthread_t checkpoint_tid;
+
+
+// callback functions for toku_ft_flush_some_child
+static bool
+dont_destroy_bn(void* UU(extra))
+{
+ return false;
+}
+
+static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
+ assert(false);
+}
+
+static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
+ assert(parent->height == 1);
+ assert(parent->n_children == 2);
+ return 0;
+}
+
+static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extra)) {
+}
+
+
+static void checkpoint_callback(void* UU(extra)) {
+ usleep(1*1024*1024);
+ checkpoint_callback_called = true;
+}
+
+
+static void *do_checkpoint(void *arg) {
+ // first verify that checkpointed_data is correct;
+ if (verbose) printf("starting a checkpoint\n");
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ int r = toku_checkpoint(cp, NULL, checkpoint_callback, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ if (verbose) printf("completed a checkpoint\n");
+ return arg;
+}
+
+
+static void flusher_callback(int state, void* extra) {
+ int desired_state = *(int *)extra;
+ if (verbose) {
+ printf("state %d\n", state);
+ }
+ if (state == desired_state) {
+ checkpoint_called = true;
+ int r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_tid,
+ nullptr,
+ do_checkpoint,
+ nullptr);
+ assert_zero(r);
+ while (!checkpoint_callback_called) {
+ usleep(1 * 1024 * 1024);
+ }
+ }
+}
+
+static void
+doit (int state) {
+ BLOCKNUM node_root;
+ BLOCKNUM node_leaves[2];
+
+ int r;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
+
+ toku_flusher_thread_set_callback(flusher_callback, &state);
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink("foo3.ft_handle");
+ unlink("bar3.ft_handle");
+ // note the basement node size is 5 times the node size
+ // this is done to avoid rebalancing when writing a leaf
+ // node to disk
+ r = toku_open_ft_handle("foo3.ft_handle", 1, &t, NODESIZE, 5*NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaves[0], 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_leaf(t, &node_leaves[1], 1, NULL, NULL);
+ assert(r==0);
+
+ char* pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+
+ r = toku_testsetup_nonleaf(t, 1, &node_root, 2, node_leaves, pivots, &pivot_len);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+ char dummy_val[NODESIZE/2-50];
+ memset(dummy_val, 0, sizeof(dummy_val));
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaves[0],
+ "a",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaves[1],
+ "x",
+ 2,
+ dummy_val,
+ sizeof(dummy_val)
+ );
+ assert_zero(r);
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaves[1],
+ "y",
+ 2,
+ dummy_val,
+ sizeof(dummy_val)
+ );
+ assert_zero(r);
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaves[1],
+ "z",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+
+
+ // at this point, we have inserted two leafentries,
+ // one in each leaf node. A flush should invoke a merge
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ recursively_flush_should_not_happen,
+ default_merge_child,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+
+ // hack to get merge going
+ FTNODE node = NULL;
+ toku_pin_node_with_min_bfe(&node, node_leaves[0], t);
+ BLB_SEQINSERT(node, node->n_children-1) = false;
+ toku_unpin_ftnode(t->ft, node);
+ toku_pin_node_with_min_bfe(&node, node_leaves[1], t);
+ BLB_SEQINSERT(node, node->n_children-1) = false;
+ toku_unpin_ftnode(t->ft, node);
+
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 2);
+
+ // do the flush
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(checkpoint_callback_called);
+
+ // now let's pin the root again and make sure it is has rebalanced
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 2);
+ toku_unpin_ftnode(t->ft, node);
+
+ void *ret;
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+
+ //
+ // now the dictionary has been checkpointed
+ // copy the file to something with a new name,
+ // open it, and verify that the state of what is
+ // checkpointed is what we expect
+ //
+
+ r = system("cp foo3.ft_handle bar3.ft_handle ");
+ assert_zero(r);
+
+ FT_HANDLE c_ft;
+ // note the basement node size is 5 times the node size
+ // this is done to avoid rebalancing when writing a leaf
+ // node to disk
+ r = toku_open_ft_handle("bar3.ft_handle", 0, &c_ft, NODESIZE, 5*NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ //
+ // now pin the root, verify that the state is what we expect
+ //
+ bfe.create_for_full_read(c_ft->ft);
+ toku_pin_ftnode(
+ c_ft->ft,
+ node_root,
+ toku_cachetable_hash(c_ft->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(!node->dirty());
+ BLOCKNUM left_child, right_child;
+
+ assert(node->n_children == 2);
+ left_child = BP_BLOCKNUM(node,0);
+ right_child = BP_BLOCKNUM(node,1);
+
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ // now let's verify the leaves are what we expect
+ toku_pin_ftnode(
+ c_ft->ft,
+ left_child,
+ toku_cachetable_hash(c_ft->ft->cf, left_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 2);
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ toku_pin_ftnode(
+ c_ft->ft,
+ right_child,
+ toku_cachetable_hash(c_ft->ft->cf, right_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 2);
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ DBT k;
+ struct check_pair pair1 = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+ struct check_pair pair2 = {2, "x", sizeof(dummy_val), dummy_val, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "x", 2), lookup_checkf, &pair2);
+ assert(r==0);
+ struct check_pair pair3 = {2, "y", sizeof(dummy_val), dummy_val, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "y", 2), lookup_checkf, &pair3);
+ assert(r==0);
+ struct check_pair pair4 = {2, "z", 0, NULL, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair4);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ r = toku_close_ft_handle_nolsn(c_ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+ toku_free(pivots[0]);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit(ft_flush_aflter_rebalance);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc
new file mode 100644
index 00000000..d5f7fe50
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc
@@ -0,0 +1,348 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+
+bool checkpoint_called;
+bool checkpoint_callback_called;
+toku_pthread_t checkpoint_tid;
+
+
+// callback functions for toku_ft_flush_some_child
+static bool
+dont_destroy_bn(void* UU(extra))
+{
+ return false;
+}
+static void merge_should_not_happen(struct flusher_advice* UU(fa),
+ FT UU(h),
+ FTNODE UU(parent),
+ int UU(childnum),
+ FTNODE UU(child),
+ void* UU(extra))
+{
+ assert(false);
+}
+
+static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
+ assert(false);
+}
+
+static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
+ assert(parent->height == 1);
+ assert(parent->n_children == 1);
+ return 0;
+}
+
+static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extra)) {
+}
+
+
+static void checkpoint_callback(void* UU(extra)) {
+ usleep(1*1024*1024);
+ checkpoint_callback_called = true;
+}
+
+
+static void *do_checkpoint(void *arg) {
+ // first verify that checkpointed_data is correct;
+ if (verbose) printf("starting a checkpoint\n");
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ int r = toku_checkpoint(cp, NULL, checkpoint_callback, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ if (verbose) printf("completed a checkpoint\n");
+ return arg;
+}
+
+
+static void flusher_callback(int state, void* extra) {
+ bool after_split = *(bool *)extra;
+ if (verbose) {
+ printf("state %d\n", state);
+ }
+ if ((state == flt_flush_before_split && !after_split) ||
+ (state == flt_flush_during_split && after_split)) {
+ checkpoint_called = true;
+ int r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_tid,
+ nullptr,
+ do_checkpoint,
+ nullptr);
+ assert_zero(r);
+ while (!checkpoint_callback_called) {
+ usleep(1 * 1024 * 1024);
+ }
+ }
+}
+
+static void
+doit (bool after_split) {
+ BLOCKNUM node_leaf, node_root;
+
+ int r;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
+
+ toku_flusher_thread_set_callback(flusher_callback, &after_split);
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink("foo4.ft_handle");
+ unlink("bar4.ft_handle");
+ // note the basement node size is 5 times the node size
+ // this is done to avoid rebalancing when writing a leaf
+ // node to disk
+ r = toku_open_ft_handle("foo4.ft_handle", 1, &t, NODESIZE, 5*NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaf, 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 1, &node_root, 1, &node_leaf, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+ char dummy_val[NODESIZE-50];
+ memset(dummy_val, 0, sizeof(dummy_val));
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaf,
+ "a",
+ 2,
+ dummy_val,
+ sizeof(dummy_val)
+ );
+ assert_zero(r);
+ r = toku_testsetup_insert_to_leaf(
+ t,
+ node_leaf,
+ "z",
+ 2,
+ dummy_val,
+ sizeof(dummy_val)
+ );
+ assert_zero(r);
+
+
+ // at this point, we have inserted two leafentries into
+ // the leaf, that should be big enough such that a split
+ // will happen
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ recursively_flush_should_not_happen,
+ merge_should_not_happen,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 1);
+
+ // do the flush
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(checkpoint_callback_called);
+
+ // now let's pin the root again and make sure it is has split
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 2);
+ toku_unpin_ftnode(t->ft, node);
+
+ void *ret;
+ r = toku_pthread_join(checkpoint_tid, &ret);
+ assert_zero(r);
+
+ //
+ // now the dictionary has been checkpointed
+ // copy the file to something with a new name,
+ // open it, and verify that the state of what is
+ // checkpointed is what we expect
+ //
+
+ r = system("cp foo4.ft_handle bar4.ft_handle ");
+ assert_zero(r);
+
+ FT_HANDLE c_ft;
+ // note the basement node size is 5 times the node size
+ // this is done to avoid rebalancing when writing a leaf
+ // node to disk
+ r = toku_open_ft_handle("bar4.ft_handle", 0, &c_ft, NODESIZE, 5*NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ //
+ // now pin the root, verify that we have a message in there, and that it is clean
+ //
+ bfe.create_for_full_read(c_ft->ft);
+ toku_pin_ftnode(
+ c_ft->ft,
+ node_root,
+ toku_cachetable_hash(c_ft->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(!node->dirty());
+ BLOCKNUM left_child, right_child;
+ if (after_split) {
+ assert(node->n_children == 2);
+ left_child = BP_BLOCKNUM(node,0);
+ assert(left_child.b == node_leaf.b);
+ right_child = BP_BLOCKNUM(node,1);
+ }
+ else {
+ assert(node->n_children == 1);
+ left_child = BP_BLOCKNUM(node,0);
+ assert(left_child.b == node_leaf.b);
+ }
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ // now let's verify the leaves are what we expect
+ if (after_split) {
+ toku_pin_ftnode(
+ c_ft->ft,
+ left_child,
+ toku_cachetable_hash(c_ft->ft->cf, left_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 1);
+ toku_unpin_ftnode(c_ft->ft, node);
+
+ toku_pin_ftnode(
+ c_ft->ft,
+ right_child,
+ toku_cachetable_hash(c_ft->ft->cf, right_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 1);
+ toku_unpin_ftnode(c_ft->ft, node);
+ }
+ else {
+ toku_pin_ftnode(
+ c_ft->ft,
+ left_child,
+ toku_cachetable_hash(c_ft->ft->cf, left_child),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 0);
+ assert(!node->dirty());
+ assert(node->n_children == 1);
+ assert(BLB_DATA(node, 0)->num_klpairs() == 2);
+ toku_unpin_ftnode(c_ft->ft, node);
+ }
+
+
+ DBT k;
+ struct check_pair pair1 = {2, "a", sizeof(dummy_val), dummy_val, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+ struct check_pair pair2 = {2, "z", sizeof(dummy_val), dummy_val, 0};
+ r = toku_ft_lookup(c_ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ r = toku_close_ft_handle_nolsn(c_ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit(false);
+ doit(true);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-del-inorder.cc b/storage/tokudb/PerconaFT/ft/tests/test-del-inorder.cc
new file mode 100644
index 00000000..2cf8480b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-del-inorder.cc
@@ -0,0 +1,98 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+const char *fname = TOKU_TEST_FILENAME;
+
+static void
+doit (void) {
+ BLOCKNUM nodea,nodeb;
+
+ int r;
+
+ toku_cachetable_create(&ct, 16*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &nodea, 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 1, &nodeb, 1, &nodea, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_insert_to_nonleaf(t, nodeb, FT_DELETE_ANY, "hello", 6, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, nodeb);
+ assert(r==0);
+
+ DBT k,v;
+ toku_ft_insert(t,
+ toku_fill_dbt(&k, "hello", 6),
+ toku_fill_dbt(&v, "there", 6),
+ null_txn);
+
+ memset(&v, 0, sizeof(v));
+ struct check_pair pair = {6, "hello", 6, "there", 0};
+ r = toku_ft_lookup(t, &k, lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count == 1);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc b/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc
new file mode 100644
index 00000000..e1937538
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc
@@ -0,0 +1,310 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "cachetable/checkpoint.h"
+
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE ft;
+const char *fname = TOKU_TEST_FILENAME;
+
+static int update_func(
+ DB* UU(db),
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* UU(extra),
+ void (*set_val)(const DBT *new_val, void *set_extra),
+ void *set_extra)
+{
+ DBT new_val;
+ assert(old_val->size > 0);
+ if (verbose) {
+ printf("applying update to %s\n", (char *)key->data);
+ }
+ toku_init_dbt(&new_val);
+ set_val(&new_val, set_extra);
+ return 0;
+}
+
+
+static void
+doit (void) {
+ BLOCKNUM node_leaf;
+ BLOCKNUM node_internal, node_root;
+
+ int r;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ ft->ft->update_fun = update_func;
+ ft->ft->update_fun = update_func;
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ char* pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+
+ r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(ft, node_root);
+ assert(r==0);
+
+ //
+ // at this point we have created a tree with a root, an internal node,
+ // and two leaf nodes, the pivot being "kkkkk"
+ //
+
+ // now we insert a row into each leaf node
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "a", // key
+ 2, // keylen
+ "aa",
+ 3
+ );
+ assert(r==0);
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "z", // key
+ 2, // keylen
+ "zz",
+ 3
+ );
+ assert(r==0);
+ char filler[400];
+ memset(filler, 0, sizeof(filler));
+ // now we insert filler data so that the rebalance
+ // keeps it at two nodes
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "b", // key
+ 2, // keylen
+ filler,
+ sizeof(filler)
+ );
+ assert(r==0);
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "y", // key
+ 2, // keylen
+ filler,
+ sizeof(filler)
+ );
+ assert(r==0);
+
+ //
+ // now insert a bunch of dummy delete messages
+ // into the internal node, to get its cachepressure size up
+ //
+ for (int i = 0; i < 100000; i++) {
+ r = toku_testsetup_insert_to_nonleaf (
+ ft,
+ node_internal,
+ FT_DELETE_ANY,
+ "jj", // this key does not exist, so its message application should be a no-op
+ 3,
+ NULL,
+ 0
+ );
+ assert(r==0);
+ }
+
+ //
+ // now insert a broadcast message into the root
+ //
+ r = toku_testsetup_insert_to_nonleaf (
+ ft,
+ node_root,
+ FT_UPDATE_BROADCAST_ALL,
+ NULL,
+ 0,
+ NULL,
+ 0
+ );
+ assert(r==0);
+
+ // now lock and release the leaf node to make sure it is what we expect it to be.
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ ft->ft,
+ node_leaf,
+ toku_cachetable_hash(ft->ft->cf, node_leaf),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->dirty());
+ assert(node->n_children == 2);
+ assert(BP_STATE(node,0) == PT_AVAIL);
+ assert(BP_STATE(node,1) == PT_AVAIL);
+ toku_unpin_ftnode(ft->ft, node);
+
+ // now do a lookup on one of the keys, this should bring a leaf node up to date
+ DBT k;
+ struct check_pair pair = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
+ assert(r==0);
+
+ //
+ // pin the leaf one more time
+ // and make sure that one basement
+ // node is in memory and another is
+ // on disk
+ //
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ ft->ft,
+ node_leaf,
+ toku_cachetable_hash(ft->ft->cf, node_leaf),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->dirty());
+ assert(node->n_children == 2);
+ assert(BP_STATE(node,0) == PT_AVAIL);
+ assert(BP_STATE(node,1) == PT_AVAIL);
+ toku_unpin_ftnode(ft->ft, node);
+
+ //
+ // now let us induce a clean on the internal node
+ //
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ ft->ft,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ assert(node->dirty());
+
+ // we expect that this flushes its buffer, that
+ // a merge is not done, and that the lookup
+ // of values "a" and "z" still works
+ r = toku_ftnode_cleaner_callback(
+ node,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ ft->ft
+ );
+
+ // verify that node_internal's buffer is empty
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode_with_dep_nodes(
+ ft->ft,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ 0,
+ NULL,
+ &node,
+ true
+ );
+ // check that buffers are empty
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
+ toku_unpin_ftnode(ft->ft, node);
+
+ //
+ // now run a checkpoint to get everything clean,
+ // and to get the rebalancing to happen
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+
+ // check that lookups on the two keys is still good
+ struct check_pair pair1 = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+ struct check_pair pair2 = {2, "z", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ toku_free(pivots[0]);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-dump-ft.cc b/storage/tokudb/PerconaFT/ft/tests/test-dump-ft.cc
new file mode 100644
index 00000000..fee2ff52
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-dump-ft.cc
@@ -0,0 +1,72 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test the toku_dump_ft() call that is available in the debugger.
+#include <stdio.h>
+
+#include "test.h"
+
+static TOKUTXN const null_txn = 0;
+
+int
+test_main(int argc, const char *argv[]) {
+ default_parse_args (argc, argv);
+ const char *n = TOKU_TEST_FILENAME;
+ int r;
+ FT_HANDLE t;
+ CACHETABLE ct;
+ FILE *f = fopen("test-dump-ft.out", "w");
+ unlink(n);
+ assert(f);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(n, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ int i;
+ for (i=0; i<10000; i++) {
+ char key[100],val[100];
+ DBT k,v;
+ snprintf(key, 100, "key%d", i);
+ snprintf(val, 100, "val%d", i);
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ }
+ r = toku_dump_ft(f, t); assert(r==0);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+ fclose(f);
+ toku_os_recursive_delete("test-dump-ft.out");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc b/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc
new file mode 100644
index 00000000..f9d4d164
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc
@@ -0,0 +1,331 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "cachetable/checkpoint.h"
+
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE ft;
+const char *fname = TOKU_TEST_FILENAME;
+
+static int update_func(
+ DB* UU(db),
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* UU(extra),
+ void (*set_val)(const DBT *new_val, void *set_extra),
+ void *set_extra)
+{
+ DBT new_val;
+ assert(old_val->size > 0);
+ if (verbose) {
+ printf("applying update to %s\n", (char *)key->data);
+ }
+ toku_init_dbt(&new_val);
+ set_val(&new_val, set_extra);
+ return 0;
+}
+
+
+static void
+doit (bool keep_other_bn_in_memory) {
+ BLOCKNUM node_leaf;
+ BLOCKNUM node_internal, node_root;
+
+ int r;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ ft->options.update_fun = update_func;
+ ft->ft->update_fun = update_func;
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ char* pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+
+ r = toku_testsetup_leaf(ft, &node_leaf, 2, pivots, &pivot_len);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(ft, 1, &node_internal, 1, &node_leaf, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(ft, node_root);
+ assert(r==0);
+
+ //
+ // at this point we have created a tree with a root, an internal node,
+ // and two leaf nodes, the pivot being "kkkkk"
+ //
+
+ // now we insert a row into each leaf node
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "a", // key
+ 2, // keylen
+ "aa",
+ 3
+ );
+ assert(r==0);
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "z", // key
+ 2, // keylen
+ "zz",
+ 3
+ );
+ assert(r==0);
+ char filler[400];
+ memset(filler, 0, sizeof(filler));
+ // now we insert filler data so that the rebalance
+ // keeps it at two nodes
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "b", // key
+ 2, // keylen
+ filler,
+ sizeof(filler)
+ );
+ assert(r==0);
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf,
+ "y", // key
+ 2, // keylen
+ filler,
+ sizeof(filler)
+ );
+ assert(r==0);
+
+ //
+ // now insert a bunch of dummy delete messages
+ // into the internal node, to get its cachepressure size up
+ //
+ for (int i = 0; i < 100000; i++) {
+ r = toku_testsetup_insert_to_nonleaf (
+ ft,
+ node_internal,
+ FT_DELETE_ANY,
+ "jj", // this key does not exist, so its message application should be a no-op
+ 3,
+ NULL,
+ 0
+ );
+ assert(r==0);
+ }
+
+ //
+ // now insert a broadcast message into the root
+ //
+ r = toku_testsetup_insert_to_nonleaf (
+ ft,
+ node_root,
+ FT_UPDATE_BROADCAST_ALL,
+ NULL,
+ 0,
+ NULL,
+ 0
+ );
+ assert(r==0);
+
+ //
+ // now run a checkpoint to get everything clean
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ // now lock and release the leaf node to make sure it is what we expect it to be.
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode(
+ ft->ft,
+ node_leaf,
+ toku_cachetable_hash(ft->ft->cf, node_leaf),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(!node->dirty());
+ assert(node->n_children == 2);
+ // a hack to get the basement nodes evicted
+ for (int i = 0; i < 20; i++) {
+ toku_ftnode_pe_callback(node, make_pair_attr(0xffffffff), ft->ft, def_pe_finalize_impl, nullptr);
+ }
+ // this ensures that when we do the lookups below,
+ // that the data is read off disk
+ assert(BP_STATE(node,0) == PT_ON_DISK);
+ assert(BP_STATE(node,1) == PT_ON_DISK);
+ toku_unpin_ftnode(ft->ft, node);
+
+ // now do a lookup on one of the keys, this should bring a leaf node up to date
+ DBT k;
+ struct check_pair pair = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
+ assert(r==0);
+
+ if (keep_other_bn_in_memory) {
+ //
+ // pin the leaf one more time
+ // and make sure that one basement
+ // both basement nodes are in memory,
+ // but only one should have broadcast message
+ // applied.
+ //
+ bfe.create_for_full_read(ft->ft);
+ }
+ else {
+ //
+ // pin the leaf one more time
+ // and make sure that one basement
+ // node is in memory and another is
+ // on disk
+ //
+ bfe.create_for_min_read(ft->ft);
+ }
+ toku_pin_ftnode(
+ ft->ft,
+ node_leaf,
+ toku_cachetable_hash(ft->ft->cf, node_leaf),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(!node->dirty());
+ assert(node->n_children == 2);
+ assert(BP_STATE(node,0) == PT_AVAIL);
+ if (keep_other_bn_in_memory) {
+ assert(BP_STATE(node,1) == PT_AVAIL);
+ }
+ else {
+ assert(BP_STATE(node,1) == PT_ON_DISK);
+ }
+ toku_unpin_ftnode(ft->ft, node);
+
+ //
+ // now let us induce a clean on the internal node
+ //
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode(
+ ft->ft,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(!node->dirty());
+
+ // we expect that this flushes its buffer, that
+ // a merge is not done, and that the lookup
+ // of values "a" and "z" still works
+ r = toku_ftnode_cleaner_callback(
+ node,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ ft->ft
+ );
+
+ // verify that node_internal's buffer is empty
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode(
+ ft->ft,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ // check that buffers are empty
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
+ toku_unpin_ftnode(ft->ft, node);
+
+ //
+ // now run a checkpoint to get everything clean,
+ // and to get the rebalancing to happen
+ //
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+
+ // check that lookups on the two keys is still good
+ struct check_pair pair1 = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+ struct check_pair pair2 = {2, "z", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ toku_free(pivots[0]);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit(false);
+ doit(true);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-ft-overflow.cc b/storage/tokudb/PerconaFT/ft/tests/test-ft-overflow.cc
new file mode 100644
index 00000000..724a44c6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-ft-overflow.cc
@@ -0,0 +1,85 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Test an overflow condition on the leaf. See #632. */
+
+
+#include "test.h"
+
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static TOKUTXN const null_txn = 0;
+
+static void
+test_overflow (void) {
+ FT_HANDLE t;
+ CACHETABLE ct;
+ uint32_t nodesize = 1<<20;
+ int r;
+ unlink(fname);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, nodesize, nodesize / 8, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ DBT k,v;
+ uint32_t vsize = nodesize/8;
+ char buf[vsize];
+ memset(buf, 'a', vsize);
+ int i;
+ for (i=0; i<8; i++) {
+ char key[]={(char)('a'+i), 0};
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 2), toku_fill_dbt(&v,buf,sizeof(buf)), null_txn);
+ }
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (0 == strcmp(arg, "-v") || 0 == strcmp(arg, "--verbose"))
+ verbose = 1;
+ else if (0 == strcmp(arg, "-q") || 0 == strcmp(arg, "--quiet"))
+ verbose = 0;
+ }
+ test_overflow();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-ft-txns.h b/storage/tokudb/PerconaFT/ft/tests/test-ft-txns.h
new file mode 100644
index 00000000..e62b1ca3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-ft-txns.h
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+static inline void
+test_setup(const char *envdir, TOKULOGGER *loggerp, CACHETABLE *ctp) {
+ *loggerp = NULL;
+ *ctp = NULL;
+ int r;
+ toku_os_recursive_delete(envdir);
+ r = toku_os_mkdir(envdir, S_IRWXU);
+ CKERR(r);
+
+ r = toku_logger_create(loggerp);
+ CKERR(r);
+ TOKULOGGER logger = *loggerp;
+
+ r = toku_logger_open(envdir, logger);
+ CKERR(r);
+
+ toku_cachetable_create(ctp, 0, ZERO_LSN, logger);
+ CACHETABLE ct = *ctp;
+ toku_cachetable_set_env_dir(ct, envdir);
+
+ toku_logger_set_cachetable(logger, ct);
+
+ r = toku_logger_open_rollback(logger, ct, true);
+ CKERR(r);
+
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(*ctp);
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, STARTUP_CHECKPOINT);
+ CKERR(r);
+}
+
+static inline void
+xid_lsn_keep_cachetable_callback (DB_ENV *env, CACHETABLE cachetable) {
+ CACHETABLE *CAST_FROM_VOIDP(ctp, (void *) env);
+ *ctp = cachetable;
+}
+
+static inline void test_setup_and_recover(const char *envdir, TOKULOGGER *loggerp, CACHETABLE *ctp) {
+ int r;
+ TOKULOGGER logger = NULL;
+ CACHETABLE ct = NULL;
+ r = toku_logger_create(&logger);
+ CKERR(r);
+
+ DB_ENV *CAST_FROM_VOIDP(ctv, (void *) &ct); // Use intermediate to avoid compiler warning.
+ r = tokuft_recover(ctv,
+ NULL_prepared_txn_callback,
+ xid_lsn_keep_cachetable_callback,
+ logger,
+ envdir, envdir, 0, 0, 0, NULL, 0);
+ CKERR(r);
+ if (!toku_logger_is_open(logger)) {
+ //Did not need recovery.
+ invariant(ct==NULL);
+ r = toku_logger_open(envdir, logger);
+ CKERR(r);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, logger);
+ toku_logger_set_cachetable(logger, ct);
+ }
+ *ctp = ct;
+ *loggerp = logger;
+}
+
+static inline void clean_shutdown(TOKULOGGER *loggerp, CACHETABLE *ctp) {
+ int r;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(*ctp);
+ r = toku_checkpoint(cp, *loggerp, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT);
+ CKERR(r);
+
+ toku_logger_close_rollback(*loggerp);
+
+ r = toku_checkpoint(cp, *loggerp, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT);
+ CKERR(r);
+
+ toku_logger_shutdown(*loggerp);
+
+ toku_cachetable_close(ctp);
+
+ r = toku_logger_close(loggerp);
+ CKERR(r);
+}
+
+static inline void shutdown_after_recovery(TOKULOGGER *loggerp, CACHETABLE *ctp) {
+ toku_logger_close_rollback(*loggerp);
+ toku_cachetable_close(ctp);
+ int r = toku_logger_close(loggerp);
+ CKERR(r);
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-hot-with-bounds.cc b/storage/tokudb/PerconaFT/ft/tests/test-hot-with-bounds.cc
new file mode 100644
index 00000000..b0247db5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-hot-with-bounds.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+static void
+doit (void) {
+ BLOCKNUM node_leaf[3];
+ BLOCKNUM node_root;
+
+ CACHETABLE ct;
+ FT_HANDLE t;
+
+ int r;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink(TOKU_TEST_FILENAME);
+ r = toku_open_ft_handle(TOKU_TEST_FILENAME, 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaf[0], 1, NULL, NULL);
+ assert(r==0);
+ r = toku_testsetup_leaf(t, &node_leaf[1], 1, NULL, NULL);
+ assert(r==0);
+ r = toku_testsetup_leaf(t, &node_leaf[2], 1, NULL, NULL);
+ assert(r==0);
+
+ int keylens[2];
+ keylens[0] = 2;
+ keylens[1] = 2;
+ char first[2];
+ first[0] = 'f';
+ first[1] = 0;
+ char second[2];
+ second[0] = 'p';
+ second[1] = 0;
+
+ char* keys[2];
+ keys[0] = first;
+ keys[1] = second;
+ r = toku_testsetup_nonleaf(t, 1, &node_root, 3, node_leaf, keys, keylens);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_root,
+ FT_INSERT,
+ "a",
+ 2,
+ NULL,
+ 0
+ );
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_root,
+ FT_INSERT,
+ "m",
+ 2,
+ NULL,
+ 0
+ );
+
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_root,
+ FT_INSERT,
+ "z",
+ 2,
+ NULL,
+ 0
+ );
+
+
+ // at this point, we have inserted three messages into
+ // the root, one in each buffer, let's verify this.
+
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 3);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) > 0);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 1)) > 0);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 2)) > 0);
+ toku_unpin_ftnode(t->ft, node);
+
+ // now let's run a hot optimize, that should only flush the middle buffer
+ DBT left;
+ toku_fill_dbt(&left, "g", 2);
+ DBT right;
+ toku_fill_dbt(&right, "n", 2);
+ uint64_t loops_run = 0;
+ r = toku_ft_hot_optimize(t, &left, &right, NULL, NULL, &loops_run);
+ assert(r==0);
+
+ // at this point, we have should have flushed
+ // only the middle buffer, let's verify this.
+ node = NULL;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ node_root,
+ toku_cachetable_hash(t->ft->cf, node_root),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 3);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) > 0);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 1)) == 0);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 2)) > 0);
+ toku_unpin_ftnode(t->ft, node);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-inc-split.cc b/storage/tokudb/PerconaFT/ft/tests/test-inc-split.cc
new file mode 100644
index 00000000..28a4443a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-inc-split.cc
@@ -0,0 +1,185 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test: Make sure that when we aggressively promote
+ * that we don't get a fencepost error on the size. (#399, I think)
+
+ *
+ * For various values of I do the following:
+ *
+ * Make a tree of height 3 (that is, the root is of height 2)
+ * use small nodes (say 4KB)
+ * you have this tree:
+ * A
+ * B
+ * C0 C1 C2 .. C15
+ * A has only one child. B has as many children as it can get.
+ * Fill the C nodes (the leaves) all almost full.
+ * Fill B's buffer up with a big message X for C15, and a slightly smaller message Y for C1.
+ * Put into A's buffer a little message Z aimed at C0.
+ * Now when insert a message of size I aimed at C0. I and Z together are too big to fit in A.
+ * First: X will be pushed into C15, resulting in this split
+ * A
+ * B0
+ * C0 C1 ... C8
+ * B1
+ * C9 C10 ... C15 C16
+ * At this point C0 through C14 are full, Y is in B0's buffer, and A's buffer contains I and Z.
+ * So we try to push Z if it fits. Which it does.
+ * So then we try to I if it fits. If we calculated wrong, everything breaks now.
+ *
+ */
+
+#include "test.h"
+
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+const char *fname = TOKU_TEST_FILENAME;
+
+static void
+doit (int ksize __attribute__((__unused__))) {
+ BLOCKNUM cnodes[16], bnode, anode;
+
+ char *keys[16-1];
+ int keylens[16-1];
+ int i;
+ int r;
+
+ toku_cachetable_create(&ct, 16*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ for (i=0; i<16; i++) {
+ r=toku_testsetup_leaf(t, &cnodes[i], 1, NULL, NULL);
+ assert(r==0);
+ char key[KSIZE+10];
+ int keylen = 1+snprintf(key, KSIZE, "%08d%0*d", i*10000+1, KSIZE-9, 0);
+ char val[1];
+ char vallen=0;
+ r=toku_testsetup_insert_to_leaf(t, cnodes[i], key, keylen, val, vallen);
+ assert(r==0);
+ }
+
+ // Now we have a bunch of leaves, all of which are with 100 bytes of full.
+ for (i=0; i+1<16; i++) {
+ char key[TOKU_PSIZE];
+ keylens[i]=1+snprintf(key, TOKU_PSIZE, "%08d", (i+1)*10000);
+ keys[i]=toku_strdup(key);
+ }
+
+ r = toku_testsetup_nonleaf(t, 1, &bnode, 16, cnodes, keys, keylens);
+ assert(r==0);
+
+ for (i=0; i+1<16; i++) {
+ toku_free(keys[i]);
+ }
+
+ {
+ const int magic_size = (NODESIZE-toku_testsetup_get_sersize(t, bnode))/2-25;
+ //printf("magic_size=%d\n", magic_size);
+ char key [KSIZE];
+ int keylen = 1+snprintf(key, KSIZE, "%08d%0*d", 150002, magic_size, 0);
+ char val[1];
+ char vallen=0;
+ r=toku_testsetup_insert_to_nonleaf(t, bnode, FT_INSERT, key, keylen, val, vallen);
+
+ keylen = 1+snprintf(key, KSIZE, "%08d%0*d", 2, magic_size-1, 0);
+ r=toku_testsetup_insert_to_nonleaf(t, bnode, FT_INSERT, key, keylen, val, vallen);
+ }
+ //printf("%lld sersize=%d\n", bnode, toku_testsetup_get_sersize(t, bnode));
+ // Now we have an internal node which has full children and the buffers are nearly full
+
+ r = toku_testsetup_nonleaf(t, 2, &anode, 1, &bnode, 0, 0);
+ assert(r==0);
+ {
+ char key[20];
+ int keylen = 1+snprintf(key, 20, "%08d", 3);
+ char val[1];
+ char vallen=0;
+ r=toku_testsetup_insert_to_nonleaf(t, anode, FT_INSERT, key, keylen, val, vallen);
+ }
+ if (0)
+ {
+ const int magic_size = 1; //NODESIZE-toku_testsetup_get_sersize(t, anode)-100;
+ DBT k,v;
+ char key[20];
+ char data[magic_size];
+ int keylen=1+snprintf(key, sizeof(key), "%08d", 4);
+ int vallen=magic_size;
+ snprintf(data, magic_size, "%*s", magic_size-1, " ");
+ toku_ft_insert(t,
+ toku_fill_dbt(&k, key, keylen),
+ toku_fill_dbt(&v, data, vallen),
+ null_txn);
+ }
+
+ r = toku_testsetup_root(t, anode);
+ assert(r==0);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ //printf("ksize=%d, unused\n", ksize);
+
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ doit(53);
+#if 0
+ //Skip remaining tests.
+{
+ int i;
+
+ for (i=1; i<NODESIZE/2; i++) {
+ printf("extrasize=%d\n", i);
+ doit(i);
+ }
+}
+#endif
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-leafentry-child-txn.cc b/storage/tokudb/PerconaFT/ft/tests/test-leafentry-child-txn.cc
new file mode 100644
index 00000000..2c5b9dad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-leafentry-child-txn.cc
@@ -0,0 +1,153 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <string.h>
+
+#include "test.h"
+
+#include "ft/ule.h"
+#include "ft/ule-internal.h"
+
+static void init_empty_ule(ULE ule) {
+ ule->num_cuxrs = 0;
+ ule->num_puxrs = 0;
+ ule->uxrs = ule->uxrs_static;
+}
+
+static void add_committed_entry(ULE ule, DBT *val, TXNID xid) {
+ uint32_t index = ule->num_cuxrs;
+ ule->num_cuxrs++;
+ ule->uxrs[index].type = XR_INSERT;
+ ule->uxrs[index].vallen = val->size;
+ ule->uxrs[index].valp = val->data;
+ ule->uxrs[index].xid = xid;
+}
+
+//Test all the different things that can happen to a
+//committed leafentry (logical equivalent of a committed insert).
+static void
+run_test(void) {
+ ULE_S ule_initial;
+ ULE ule = &ule_initial;
+ ule_initial.uxrs = ule_initial.uxrs_static;
+ int r;
+ DBT key;
+ DBT val;
+ uint64_t key_data = 1;
+ uint64_t val_data_one = 1;
+ uint64_t val_data_two = 2;
+ uint64_t val_data_three = 3;
+ uint32_t keysize = 8;
+ uint32_t valsize = 8;
+
+ toku_fill_dbt(&key, &key_data, keysize);
+ toku_fill_dbt(&val, &val_data_one, valsize);
+
+ // test case where we apply a message and the innermost child_id
+ // is the same as the innermost committed TXNID
+ XIDS root_xids = toku_xids_get_root_xids();
+ TXNID root_txnid = 1000;
+ TXNID child_id = 10;
+ XIDS msg_xids_1;
+ XIDS msg_xids_2;
+ r = toku_xids_create_child(root_xids, &msg_xids_1, root_txnid);
+ assert(r==0);
+ r = toku_xids_create_child(msg_xids_1, &msg_xids_2, child_id);
+ assert(r==0);
+
+ init_empty_ule(&ule_initial);
+ add_committed_entry(&ule_initial, &val, 0);
+ val.data = &val_data_two;
+ // make the TXNID match the child id of xids
+ add_committed_entry(&ule_initial, &val, 10);
+
+ // now do the application of xids to the ule
+ // do a commit
+ {
+ ft_msg msg(&key, &val, FT_COMMIT_ANY, ZERO_MSN, msg_xids_2);
+ test_msg_modify_ule(&ule_initial, msg);
+ assert(ule->num_cuxrs == 2);
+ assert(ule->uxrs[0].xid == TXNID_NONE);
+ assert(ule->uxrs[1].xid == 10);
+ assert(ule->uxrs[0].valp == &val_data_one);
+ assert(ule->uxrs[1].valp == &val_data_two);
+ }
+
+ // do an abort
+ {
+ ft_msg msg(&key, &val, FT_ABORT_ANY, ZERO_MSN, msg_xids_2);
+ test_msg_modify_ule(&ule_initial, msg);
+ assert(ule->num_cuxrs == 2);
+ assert(ule->uxrs[0].xid == TXNID_NONE);
+ assert(ule->uxrs[1].xid == 10);
+ assert(ule->uxrs[0].valp == &val_data_one);
+ assert(ule->uxrs[1].valp == &val_data_two);
+ }
+
+ // do an insert
+ val.data = &val_data_three;
+ {
+ ft_msg msg(&key, &val, FT_INSERT, ZERO_MSN, msg_xids_2);
+ test_msg_modify_ule(&ule_initial, msg);
+ // now that message applied, verify that things are good
+ assert(ule->num_cuxrs == 2);
+ assert(ule->num_puxrs == 2);
+ assert(ule->uxrs[0].xid == TXNID_NONE);
+ assert(ule->uxrs[1].xid == 10);
+ assert(ule->uxrs[2].xid == 1000);
+ assert(ule->uxrs[3].xid == 10);
+ assert(ule->uxrs[0].valp == &val_data_one);
+ assert(ule->uxrs[1].valp == &val_data_two);
+ assert(ule->uxrs[2].type == XR_PLACEHOLDER);
+ assert(ule->uxrs[3].valp == &val_data_three);
+ }
+
+ toku_xids_destroy(&msg_xids_2);
+ toku_xids_destroy(&msg_xids_1);
+ toku_xids_destroy(&root_xids);
+
+}
+
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ run_test();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-leafentry-nested.cc b/storage/tokudb/PerconaFT/ft/tests/test-leafentry-nested.cc
new file mode 100644
index 00000000..f2004964
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-leafentry-nested.cc
@@ -0,0 +1,999 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <string.h>
+
+#include "test.h"
+
+#include "ft/ule.h"
+#include "ft/ule-internal.h"
+
+enum {MAX_SIZE = 256};
+static XIDS nested_xids[MAX_TRANSACTION_RECORDS];
+
+static void
+verify_ule_equal(ULE a, ULE b) {
+ assert(a->num_cuxrs > 0);
+ assert(a->num_puxrs < MAX_TRANSACTION_RECORDS);
+ assert(a->num_cuxrs == b->num_cuxrs);
+ assert(a->num_puxrs == b->num_puxrs);
+ uint32_t i;
+ for (i = 0; i < (a->num_cuxrs + a->num_puxrs); i++) {
+ assert(a->uxrs[i].type == b->uxrs[i].type);
+ assert(a->uxrs[i].xid == b->uxrs[i].xid);
+ if (a->uxrs[i].type == XR_INSERT) {
+ assert(a->uxrs[i].vallen == b->uxrs[i].vallen);
+ assert(memcmp(a->uxrs[i].valp, b->uxrs[i].valp, a->uxrs[i].vallen) == 0);
+ }
+ }
+}
+
+static void
+verify_le_equal(LEAFENTRY a, LEAFENTRY b) {
+ if (a==NULL) assert(b==NULL);
+ else {
+ assert(b!=NULL);
+
+ size_t size = leafentry_memsize(a);
+ assert(size==leafentry_memsize(b));
+
+ assert(memcmp(a, b, size) == 0);
+
+ ULE_S ule_a;
+ ULE_S ule_b;
+
+ le_unpack(&ule_a, a);
+ le_unpack(&ule_b, b);
+ verify_ule_equal(&ule_a, &ule_b);
+ ule_cleanup(&ule_a);
+ ule_cleanup(&ule_b);
+ }
+}
+
+static void
+fillrandom(uint8_t buf[MAX_SIZE], uint32_t length) {
+ assert(length < MAX_SIZE);
+ uint32_t i;
+ for (i = 0; i < length; i++) {
+ buf[i] = random() & 0xFF;
+ }
+}
+
+static void
+test_le_offset_is(LEAFENTRY le, void *field, size_t expected_offset) {
+ size_t le_address = (size_t) le;
+ size_t field_address = (size_t) field;
+ assert(field_address >= le_address);
+ size_t actual_offset = field_address - le_address;
+ assert(actual_offset == expected_offset);
+}
+
+//Fixed offsets in a packed leafentry.
+enum {
+ LE_OFFSET_NUM = 0,
+ LE_OFFSET_VARIABLE = 1+LE_OFFSET_NUM
+};
+
+static void
+test_le_fixed_offsets (void) {
+ LEAFENTRY XMALLOC(le);
+ test_le_offset_is(le, &le->type, LE_OFFSET_NUM);
+ toku_free(le);
+}
+
+//Fixed offsets in a leafentry with no uncommitted transaction records.
+//(Note, there is no type required.)
+enum {
+ LE_COMMITTED_OFFSET_VALLEN = LE_OFFSET_VARIABLE,
+ LE_COMMITTED_OFFSET_VAL = 4 + LE_COMMITTED_OFFSET_VALLEN
+};
+
+static void
+test_le_committed_offsets (void) {
+ LEAFENTRY XMALLOC(le);
+ test_le_offset_is(le, &le->u.clean.vallen, LE_COMMITTED_OFFSET_VALLEN);
+ test_le_offset_is(le, &le->u.clean.val, LE_COMMITTED_OFFSET_VAL);
+ toku_free(le);
+}
+
+//Fixed offsets in a leafentry with uncommitted transaction records.
+enum {
+ LE_MVCC_OFFSET_NUM_CUXRS = LE_OFFSET_VARIABLE, //Type of innermost record
+ LE_MVCC_OFFSET_NUM_PUXRS = 4+LE_MVCC_OFFSET_NUM_CUXRS, //XID of outermost noncommitted record
+ LE_MVCC_OFFSET_XRS = 1+LE_MVCC_OFFSET_NUM_PUXRS
+};
+
+static void
+test_le_provisional_offsets (void) {
+ LEAFENTRY XMALLOC(le);
+ test_le_offset_is(le, &le->u.mvcc.num_cxrs, LE_MVCC_OFFSET_NUM_CUXRS);
+ test_le_offset_is(le, &le->u.mvcc.num_pxrs, LE_MVCC_OFFSET_NUM_PUXRS);
+ test_le_offset_is(le, &le->u.mvcc.xrs, LE_MVCC_OFFSET_XRS);
+ toku_free(le);
+}
+
+//We use a packed struct to represent a leafentry.
+//We want to make sure the compiler correctly represents the offsets.
+//This test verifies all offsets in a packed leafentry correspond to the required memory format.
+static void
+test_le_offsets (void) {
+ test_le_fixed_offsets();
+ test_le_committed_offsets();
+ test_le_provisional_offsets();
+}
+
+static void
+test_ule_packs_to_nothing (ULE ule) {
+ LEAFENTRY le;
+ int r = le_pack(ule, NULL, 0, NULL, 0, 0, 0, &le, nullptr);
+ assert(r==0);
+ assert(le==NULL);
+}
+
+//A leafentry must contain at least one 'insert' (all deletes means the leafentry
+//should not exist).
+//Verify that 'le_pack' of any set of all deletes ends up not creating a leafentry.
+static void
+test_le_empty_packs_to_nothing (void) {
+ ULE_S ule;
+ ule.uxrs = ule.uxrs_static;
+
+ //Set up defaults.
+ int committed;
+ for (committed = 1; committed < MAX_TRANSACTION_RECORDS; committed++) {
+ int32_t num_xrs;
+
+ for (num_xrs = committed; num_xrs < MAX_TRANSACTION_RECORDS; num_xrs++) {
+ ule.num_cuxrs = committed;
+ ule.num_puxrs = num_xrs - committed;
+ if (num_xrs == 1) {
+ ule.uxrs[num_xrs-1].xid = TXNID_NONE;
+ }
+ else {
+ ule.uxrs[num_xrs-1].xid = ule.uxrs[num_xrs-2].xid + (random() % 32 + 1); //Abitrary number, xids must be strictly increasing
+ }
+ ule.uxrs[num_xrs-1].type = XR_DELETE;
+ test_ule_packs_to_nothing(&ule);
+ if (num_xrs > 2 && num_xrs > committed && num_xrs % 4) {
+ //Set some of them to placeholders instead of deletes
+ ule.uxrs[num_xrs-2].type = XR_PLACEHOLDER;
+ }
+ test_ule_packs_to_nothing(&ule);
+ }
+ }
+
+}
+
+static void
+le_verify_accessors(LEAFENTRY le, ULE ule, size_t pre_calculated_memsize) {
+ assert(le);
+ assert(ule->num_cuxrs > 0);
+ assert(ule->num_puxrs <= MAX_TRANSACTION_RECORDS);
+ assert(ule->uxrs[ule->num_cuxrs + ule->num_puxrs-1].type != XR_PLACEHOLDER);
+ //Extract expected values from ULE
+ size_t memsize = le_memsize_from_ule(ule);
+ size_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+
+ void *latest_val = ule->uxrs[num_uxrs -1].type == XR_DELETE ? NULL : ule->uxrs[num_uxrs -1].valp;
+ uint32_t latest_vallen = ule->uxrs[num_uxrs -1].type == XR_DELETE ? 0 : ule->uxrs[num_uxrs -1].vallen;
+ {
+ int i;
+ for (i = num_uxrs - 1; i >= 0; i--) {
+ if (ule->uxrs[i].type == XR_INSERT) {
+ goto found_insert;
+ }
+ }
+ assert(false);
+ }
+found_insert:;
+ TXNID outermost_uncommitted_xid = ule->num_puxrs == 0 ? TXNID_NONE : ule->uxrs[ule->num_cuxrs].xid;
+ int is_provdel = ule->uxrs[num_uxrs-1].type == XR_DELETE;
+
+ assert(le!=NULL);
+ //Verify all accessors
+ assert(memsize == pre_calculated_memsize);
+ assert(memsize == leafentry_memsize(le));
+ {
+ uint32_t test_vallen;
+ void* test_valp = le_latest_val_and_len(le, &test_vallen);
+ if (latest_val != NULL) assert(test_valp != latest_val);
+ assert(test_vallen == latest_vallen);
+ assert(memcmp(test_valp, latest_val, test_vallen) == 0);
+ assert(le_latest_val(le) == test_valp);
+ assert(le_latest_vallen(le) == test_vallen);
+ }
+ {
+ assert(le_outermost_uncommitted_xid(le) == outermost_uncommitted_xid);
+ }
+ {
+ assert((le_latest_is_del(le)==0) == (is_provdel==0));
+ }
+}
+
+
+
+static void
+test_le_pack_committed (void) {
+ ULE_S ule;
+ ule.uxrs = ule.uxrs_static;
+
+ uint8_t val[MAX_SIZE];
+ uint32_t valsize;
+ for (valsize = 0; valsize < MAX_SIZE; valsize += (random() % MAX_SIZE) + 1) {
+ fillrandom(val, valsize);
+
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 0;
+ ule.uxrs[0].type = XR_INSERT;
+ ule.uxrs[0].xid = 0;
+ ule.uxrs[0].valp = val;
+ ule.uxrs[0].vallen = valsize;
+
+ size_t memsize;
+ LEAFENTRY le;
+ int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, 0, &le, nullptr);
+ assert(r==0);
+ assert(le!=NULL);
+ memsize = le_memsize_from_ule(&ule);
+ le_verify_accessors(le, &ule, memsize);
+ ULE_S tmp_ule;
+ le_unpack(&tmp_ule, le);
+ verify_ule_equal(&ule, &tmp_ule);
+ LEAFENTRY tmp_le;
+ size_t tmp_memsize;
+ r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, 0, &tmp_le, nullptr);
+ tmp_memsize = le_memsize_from_ule(&tmp_ule);
+ assert(r==0);
+ assert(tmp_memsize == memsize);
+ assert(memcmp(le, tmp_le, memsize) == 0);
+ le_verify_accessors(tmp_le, &tmp_ule, tmp_memsize);
+
+ toku_free(tmp_le);
+ toku_free(le);
+ ule_cleanup(&tmp_ule);
+ }
+}
+
+static void
+test_le_pack_uncommitted (uint8_t committed_type, uint8_t prov_type, int num_placeholders) {
+ ULE_S ule;
+ ule.uxrs = ule.uxrs_static;
+ assert(num_placeholders >= 0);
+
+ uint8_t cval[MAX_SIZE];
+ uint8_t pval[MAX_SIZE];
+ uint32_t cvalsize;
+ uint32_t pvalsize;
+ for (cvalsize = 0; cvalsize < MAX_SIZE; cvalsize += (random() % MAX_SIZE) + 1) {
+ pvalsize = (cvalsize + random()) % MAX_SIZE;
+ if (committed_type == XR_INSERT)
+ fillrandom(cval, cvalsize);
+ if (prov_type == XR_INSERT)
+ fillrandom(pval, pvalsize);
+ ule.uxrs[0].type = committed_type;
+ ule.uxrs[0].xid = TXNID_NONE;
+ ule.uxrs[0].vallen = cvalsize;
+ ule.uxrs[0].valp = cval;
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 1 + num_placeholders;
+
+ uint32_t idx;
+ for (idx = 1; idx <= (uint32_t)num_placeholders; idx++) {
+ ule.uxrs[idx].type = XR_PLACEHOLDER;
+ ule.uxrs[idx].xid = ule.uxrs[idx-1].xid + (random() % 32 + 1); //Abitrary number, xids must be strictly increasing
+ }
+ ule.uxrs[idx].xid = ule.uxrs[idx-1].xid + (random() % 32 + 1); //Abitrary number, xids must be strictly increasing
+ ule.uxrs[idx].type = prov_type;
+ ule.uxrs[idx].vallen = pvalsize;
+ ule.uxrs[idx].valp = pval;
+
+ size_t memsize;
+ LEAFENTRY le;
+ int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, 0, &le, nullptr);
+ assert(r==0);
+ assert(le!=NULL);
+ memsize = le_memsize_from_ule(&ule);
+ le_verify_accessors(le, &ule, memsize);
+ ULE_S tmp_ule;
+ le_unpack(&tmp_ule, le);
+ verify_ule_equal(&ule, &tmp_ule);
+ LEAFENTRY tmp_le;
+ size_t tmp_memsize;
+ r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, 0, &tmp_le, nullptr);
+ tmp_memsize = le_memsize_from_ule(&tmp_ule);
+ assert(r==0);
+ assert(tmp_memsize == memsize);
+ assert(memcmp(le, tmp_le, memsize) == 0);
+ le_verify_accessors(tmp_le, &tmp_ule, tmp_memsize);
+
+ toku_free(tmp_le);
+ toku_free(le);
+ ule_cleanup(&tmp_ule);
+ }
+}
+
+static void
+test_le_pack_provpair (int num_placeholders) {
+ test_le_pack_uncommitted(XR_DELETE, XR_INSERT, num_placeholders);
+}
+
+static void
+test_le_pack_provdel (int num_placeholders) {
+ test_le_pack_uncommitted(XR_INSERT, XR_DELETE, num_placeholders);
+}
+
+static void
+test_le_pack_both (int num_placeholders) {
+ test_le_pack_uncommitted(XR_INSERT, XR_INSERT, num_placeholders);
+}
+
+//Test of PACK
+// Committed leafentry
+// delete -> nothing (le_empty_packs_to_nothing)
+// insert
+// make key/val have diff lengths/content
+// Uncommitted
+// committed delete
+// followed by placeholder*, delete (le_empty_packs_to_nothing)
+// followed by placeholder*, insert
+// committed insert
+// followed by placeholder*, delete
+// followed by placeholder*, insert
+//
+// placeholder* is 0,1, or 2 placeholders
+static void
+test_le_pack (void) {
+ test_le_empty_packs_to_nothing();
+ test_le_pack_committed();
+ int i;
+ for (i = 0; i < 3; i++) {
+ test_le_pack_provpair(i);
+ test_le_pack_provdel(i);
+ test_le_pack_both(i);
+ }
+}
+
+static void
+test_le_apply(ULE ule_initial, const ft_msg &msg, ULE ule_expected) {
+ int r;
+ LEAFENTRY le_initial;
+ LEAFENTRY le_expected;
+ LEAFENTRY le_result;
+
+ r = le_pack(ule_initial, nullptr, 0, nullptr, 0, 0, 0, &le_initial, nullptr);
+ CKERR(r);
+
+ size_t result_memsize = 0;
+ int64_t ignoreme;
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
+ toku_le_apply_msg(msg,
+ le_initial,
+ nullptr,
+ 0,
+ 0,
+ &gc_info,
+ &le_result,
+ &ignoreme);
+ if (le_result) {
+ result_memsize = leafentry_memsize(le_result);
+ le_verify_accessors(le_result, ule_expected, result_memsize);
+ }
+
+ size_t expected_memsize = 0;
+ r = le_pack(ule_expected, nullptr, 0, nullptr, 0, 0, 0, &le_expected, nullptr);
+ CKERR(r);
+ if (le_expected) {
+ expected_memsize = leafentry_memsize(le_expected);
+ }
+
+
+ verify_le_equal(le_result, le_expected);
+ if (le_result && le_expected) {
+ assert(result_memsize == expected_memsize);
+ }
+ if (le_initial) toku_free(le_initial);
+ if (le_result) toku_free(le_result);
+ if (le_expected) toku_free(le_expected);
+}
+
+static const ULE_S ule_committed_delete = {
+ .num_puxrs = 0,
+ .num_cuxrs = 1,
+ .uxrs_static = {{
+ .type = XR_DELETE,
+ .vallen = 0,
+ .valp = NULL,
+ .xid = 0
+ }},
+ .uxrs = (UXR_S *)ule_committed_delete.uxrs_static
+};
+
+static uint32_t
+next_nesting_level(uint32_t current) {
+ uint32_t rval = current + 1;
+
+ if (current > 3 && current < MAX_TRANSACTION_RECORDS - 1) {
+ rval = current + random() % 100;
+ if (rval >= MAX_TRANSACTION_RECORDS)
+ rval = MAX_TRANSACTION_RECORDS - 1;
+ }
+ return rval;
+}
+
+static void
+generate_committed_for(ULE ule, DBT *val) {
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = 0;
+ ule->uxrs = ule->uxrs_static;
+ ule->uxrs[0].type = XR_INSERT;
+ ule->uxrs[0].vallen = val->size;
+ ule->uxrs[0].valp = val->data;
+ ule->uxrs[0].xid = 0;
+}
+
+static void
+generate_provpair_for(ULE ule, const ft_msg &msg) {
+ uint32_t level;
+ XIDS xids = msg.xids();
+ ule->uxrs = ule->uxrs_static;
+
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = toku_xids_get_num_xids(xids);
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ ule->uxrs[0].type = XR_DELETE;
+ ule->uxrs[0].vallen = 0;
+ ule->uxrs[0].valp = NULL;
+ ule->uxrs[0].xid = TXNID_NONE;
+ for (level = 1; level < num_uxrs - 1; level++) {
+ ule->uxrs[level].type = XR_PLACEHOLDER;
+ ule->uxrs[level].vallen = 0;
+ ule->uxrs[level].valp = NULL;
+ ule->uxrs[level].xid = toku_xids_get_xid(xids, level-1);
+ }
+ ule->uxrs[num_uxrs - 1].type = XR_INSERT;
+ ule->uxrs[num_uxrs - 1].vallen = msg.vdbt()->size;
+ ule->uxrs[num_uxrs - 1].valp = msg.vdbt()->data;
+ ule->uxrs[num_uxrs - 1].xid = toku_xids_get_innermost_xid(xids);
+}
+
+//Test all the different things that can happen to a
+//non-existent leafentry (logical equivalent of a committed delete).
+static void
+test_le_empty_apply(void) {
+ ULE_S ule_initial = ule_committed_delete;
+
+ DBT key;
+ DBT val;
+ uint8_t keybuf[MAX_SIZE];
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t keysize;
+ uint32_t valsize;
+ uint32_t nesting_level;
+ for (keysize = 0; keysize < MAX_SIZE; keysize += (random() % MAX_SIZE) + 1) {
+ for (valsize = 0; valsize < MAX_SIZE; valsize += (random() % MAX_SIZE) + 1) {
+ for (nesting_level = 0;
+ nesting_level < MAX_TRANSACTION_RECORDS;
+ nesting_level = next_nesting_level(nesting_level)) {
+ XIDS msg_xids = nested_xids[nesting_level];
+ fillrandom(keybuf, keysize);
+ fillrandom(valbuf, valsize);
+ toku_fill_dbt(&key, keybuf, keysize);
+ toku_fill_dbt(&val, valbuf, valsize);
+
+ //COMMIT/ABORT is illegal with TXNID 0
+ if (nesting_level > 0) {
+ //Abort/commit of an empty le is an empty le
+ ULE_S ule_expected = ule_committed_delete;
+
+ {
+ ft_msg msg(&key, &val, FT_COMMIT_ANY, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_COMMIT_BROADCAST_TXN, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_ABORT_ANY, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_ABORT_BROADCAST_TXN, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ }
+ {
+ //delete of an empty le is an empty le
+ ULE_S ule_expected = ule_committed_delete;
+
+ ft_msg msg(&key, &val, FT_DELETE_ANY, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_INSERT, ZERO_MSN, msg_xids);
+ ULE_S ule_expected;
+ generate_provpair_for(&ule_expected, msg);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_INSERT_NO_OVERWRITE, ZERO_MSN, msg_xids);
+ ULE_S ule_expected;
+ generate_provpair_for(&ule_expected, msg);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ }
+ }
+ }
+}
+
+static void
+generate_provdel_for(ULE ule, const ft_msg &msg) {
+ uint32_t level;
+ XIDS xids = msg.xids();
+
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = toku_xids_get_num_xids(xids);
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ ule->uxrs[0].type = XR_INSERT;
+ ule->uxrs[0].vallen = msg.vdbt()->size;
+ ule->uxrs[0].valp = msg.vdbt()->data;
+ ule->uxrs[0].xid = TXNID_NONE;
+ for (level = ule->num_cuxrs; level < ule->num_cuxrs + ule->num_puxrs - 1; level++) {
+ ule->uxrs[level].type = XR_PLACEHOLDER;
+ ule->uxrs[level].vallen = 0;
+ ule->uxrs[level].valp = NULL;
+ ule->uxrs[level].xid = toku_xids_get_xid(xids, level-1);
+ }
+ ule->uxrs[num_uxrs - 1].type = XR_DELETE;
+ ule->uxrs[num_uxrs - 1].vallen = 0;
+ ule->uxrs[num_uxrs - 1].valp = NULL;
+ ule->uxrs[num_uxrs - 1].xid = toku_xids_get_innermost_xid(xids);
+}
+
+static void
+generate_both_for(ULE ule, DBT *oldval, const ft_msg &msg) {
+ uint32_t level;
+ XIDS xids = msg.xids();
+
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = toku_xids_get_num_xids(xids);
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ ule->uxrs[0].type = XR_INSERT;
+ ule->uxrs[0].vallen = oldval->size;
+ ule->uxrs[0].valp = oldval->data;
+ ule->uxrs[0].xid = TXNID_NONE;
+ for (level = ule->num_cuxrs; level < ule->num_cuxrs + ule->num_puxrs - 1; level++) {
+ ule->uxrs[level].type = XR_PLACEHOLDER;
+ ule->uxrs[level].vallen = 0;
+ ule->uxrs[level].valp = NULL;
+ ule->uxrs[level].xid = toku_xids_get_xid(xids, level-1);
+ }
+ ule->uxrs[num_uxrs - 1].type = XR_INSERT;
+ ule->uxrs[num_uxrs - 1].vallen = msg.vdbt()->size;
+ ule->uxrs[num_uxrs - 1].valp = msg.vdbt()->data;
+ ule->uxrs[num_uxrs - 1].xid = toku_xids_get_innermost_xid(xids);
+}
+
+//Test all the different things that can happen to a
+//committed leafentry (logical equivalent of a committed insert).
+static void
+test_le_committed_apply(void) {
+ ULE_S ule_initial;
+ ule_initial.uxrs = ule_initial.uxrs_static;
+
+ DBT key;
+ DBT val;
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t valsize;
+ uint32_t nesting_level;
+ for (valsize = 0; valsize < MAX_SIZE; valsize += (random() % MAX_SIZE) + 1) {
+ for (nesting_level = 0;
+ nesting_level < MAX_TRANSACTION_RECORDS;
+ nesting_level = next_nesting_level(nesting_level)) {
+ XIDS msg_xids = nested_xids[nesting_level];
+ fillrandom(valbuf, valsize);
+ toku_fill_dbt(&val, valbuf, valsize);
+
+ //Generate initial ule
+ generate_committed_for(&ule_initial, &val);
+
+
+ //COMMIT/ABORT is illegal with TXNID 0
+ if (nesting_level > 0) {
+ //Commit/abort will not change a committed le
+ ULE_S ule_expected = ule_initial;
+ {
+ ft_msg msg(&key, &val, FT_COMMIT_ANY, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_COMMIT_BROADCAST_TXN, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_ABORT_ANY, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ ft_msg msg(&key, &val, FT_ABORT_BROADCAST_TXN, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ }
+
+ {
+ ft_msg msg(&key, &val, FT_DELETE_ANY, ZERO_MSN, msg_xids);
+ ULE_S ule_expected;
+ ule_expected.uxrs = ule_expected.uxrs_static;
+ generate_provdel_for(&ule_expected, msg);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+
+ {
+ uint8_t valbuf2[MAX_SIZE];
+ uint32_t valsize2 = random() % MAX_SIZE;
+ fillrandom(valbuf2, valsize2);
+ DBT val2;
+ toku_fill_dbt(&val2, valbuf2, valsize2);
+ ft_msg msg(&key, &val2, FT_INSERT, ZERO_MSN, msg_xids);
+ ULE_S ule_expected;
+ ule_expected.uxrs = ule_expected.uxrs_static;
+ generate_both_for(&ule_expected, &val, msg);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ {
+ //INSERT_NO_OVERWRITE will not change a committed insert
+ ULE_S ule_expected = ule_initial;
+ uint8_t valbuf2[MAX_SIZE];
+ uint32_t valsize2 = random() % MAX_SIZE;
+ fillrandom(valbuf2, valsize2);
+ DBT val2;
+ toku_fill_dbt(&val2, valbuf2, valsize2);
+ ft_msg msg(&key, &val2, FT_INSERT_NO_OVERWRITE, ZERO_MSN, msg_xids);
+ test_le_apply(&ule_initial, msg, &ule_expected);
+ }
+ }
+ }
+}
+
+static void
+test_le_apply_messages(void) {
+ test_le_empty_apply();
+ test_le_committed_apply();
+}
+
+static bool ule_worth_running_garbage_collection(ULE ule, TXNID oldest_referenced_xid_known) {
+ LEAFENTRY le;
+ int r = le_pack(ule, nullptr, 0, nullptr, 0, 0, 0, &le, nullptr); CKERR(r);
+ invariant_notnull(le);
+ txn_gc_info gc_info(nullptr, oldest_referenced_xid_known, oldest_referenced_xid_known, true);
+ bool worth_running = toku_le_worth_running_garbage_collection(le, &gc_info);
+ toku_free(le);
+ return worth_running;
+}
+
+static void test_le_garbage_collection_birdie(void) {
+ DBT key;
+ DBT val;
+ ULE_S ule;
+ uint8_t keybuf[MAX_SIZE];
+ uint32_t keysize=8;
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t valsize=8;
+ bool do_garbage_collect;
+
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ fillrandom(keybuf, keysize);
+ fillrandom(valbuf, valsize);
+ memset(&ule, 0, sizeof(ule));
+ ule.uxrs = ule.uxrs_static;
+
+ //
+ // Test garbage collection "worth-doing" heurstic
+ //
+
+ // Garbage collection should not be worth doing on a clean leafentry.
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 0;
+ ule.uxrs[0].xid = TXNID_NONE;
+ ule.uxrs[0].type = XR_INSERT;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(!do_garbage_collect);
+
+ // It is worth doing when there is more than one committed entry
+ ule.num_cuxrs = 2;
+ ule.num_puxrs = 1;
+ ule.uxrs[1].xid = 500;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(do_garbage_collect);
+
+ // It is not worth doing when there is one of each, when the
+ // provisional entry is newer than the oldest known referenced xid
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 1;
+ ule.uxrs[1].xid = 1500;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(!do_garbage_collect);
+ ule.uxrs[1].xid = 200;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(!do_garbage_collect);
+
+ // It is not worth doing when there is only one committed entry,
+ // multiple provisional entries, but the outermost entry is newer.
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 3;
+ ule.uxrs[1].xid = 201;
+ ule.uxrs[2].xid = 206;
+ ule.uxrs[3].xid = 215;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(!do_garbage_collect);
+
+ // It is worth doing when the above scenario has an outermost entry
+ // older than the oldest known, even if its children seem newer.
+ // this children must have commit because the parent is not live.
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 3;
+ ule.uxrs[1].xid = 190;
+ ule.uxrs[2].xid = 206;
+ ule.uxrs[3].xid = 215;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(do_garbage_collect);
+
+ // It is worth doing when there is more than one committed entry,
+ // even if a provisional entry exists that is newer than the
+ // oldest known refrenced xid
+ ule.num_cuxrs = 2;
+ ule.num_puxrs = 1;
+ ule.uxrs[1].xid = 499;
+ ule.uxrs[2].xid = 500;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(do_garbage_collect);
+
+ // It is worth doing when there is one of each, and the provisional
+ // entry is older than the oldest known referenced xid
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 1;
+ ule.uxrs[1].xid = 199;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(do_garbage_collect);
+
+ // It is definitely worth doing when the above case is true
+ // and there is more than one provisional entry.
+ ule.num_cuxrs = 1;
+ ule.num_puxrs = 2;
+ ule.uxrs[1].xid = 150;
+ ule.uxrs[2].xid = 175;
+ do_garbage_collect = ule_worth_running_garbage_collection(&ule, 200);
+ invariant(do_garbage_collect);
+}
+
+static void test_le_optimize(void) {
+ DBT key;
+ DBT val;
+ ULE_S ule_initial;
+ ULE_S ule_expected;
+ uint8_t keybuf[MAX_SIZE];
+ uint32_t keysize=8;
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t valsize=8;
+ ule_initial.uxrs = ule_initial.uxrs_static;
+ ule_expected.uxrs = ule_expected.uxrs_static;
+ TXNID optimize_txnid = 1000;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ XIDS root_xids = toku_xids_get_root_xids();
+ XIDS msg_xids;
+ int r = toku_xids_create_child(root_xids, &msg_xids, optimize_txnid);
+ assert(r==0);
+ ft_msg msg(&key, &val, FT_OPTIMIZE, ZERO_MSN, msg_xids);
+
+ //
+ // create the key
+ //
+ fillrandom(keybuf, keysize);
+ fillrandom(valbuf, valsize);
+
+ //
+ // test a clean leafentry has no effect
+ //
+ ule_initial.num_cuxrs = 1;
+ ule_initial.num_puxrs = 0;
+ ule_initial.uxrs[0].type = XR_INSERT;
+ ule_initial.uxrs[0].xid = TXNID_NONE;
+ ule_initial.uxrs[0].vallen = valsize;
+ ule_initial.uxrs[0].valp = valbuf;
+
+ ule_expected.num_cuxrs = 1;
+ ule_expected.num_puxrs = 0;
+ ule_expected.uxrs[0].type = XR_INSERT;
+ ule_expected.uxrs[0].xid = TXNID_NONE;
+ ule_expected.uxrs[0].vallen = valsize;
+ ule_expected.uxrs[0].valp = valbuf;
+
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ //
+ // add another committed entry and ensure no effect
+ //
+ ule_initial.num_cuxrs = 2;
+ ule_initial.uxrs[1].type = XR_DELETE;
+ ule_initial.uxrs[1].xid = 500;
+ ule_initial.uxrs[1].vallen = 0;
+ ule_initial.uxrs[1].valp = NULL;
+
+ ule_expected.num_cuxrs = 2;
+ ule_expected.uxrs[1].type = XR_DELETE;
+ ule_expected.uxrs[1].xid = 500;
+ ule_expected.uxrs[1].vallen = 0;
+ ule_expected.uxrs[1].valp = NULL;
+
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ //
+ // now test when there is one provisional, three cases, after, equal, and before FT_OPTIMIZE's transaction
+ //
+ ule_initial.num_cuxrs = 1;
+ ule_initial.num_puxrs = 1;
+ ule_initial.uxrs[1].xid = 1500;
+
+ ule_expected.num_cuxrs = 1;
+ ule_expected.num_puxrs = 1;
+ ule_expected.uxrs[1].xid = 1500;
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ ule_initial.uxrs[1].xid = 1000;
+ ule_expected.uxrs[1].xid = 1000;
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ ule_initial.uxrs[1].xid = 500;
+ ule_expected.uxrs[1].xid = 500;
+ ule_expected.num_cuxrs = 2;
+ ule_expected.num_puxrs = 0;
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ //
+ // now test cases with two provisional
+ //
+ ule_initial.num_cuxrs = 1;
+ ule_initial.num_puxrs = 2;
+ ule_expected.num_cuxrs = 1;
+ ule_expected.num_puxrs = 2;
+
+ ule_initial.uxrs[2].type = XR_INSERT;
+ ule_initial.uxrs[2].xid = 1500;
+ ule_initial.uxrs[2].vallen = valsize;
+ ule_initial.uxrs[2].valp = valbuf;
+ ule_initial.uxrs[1].xid = 1200;
+
+ ule_expected.uxrs[2].type = XR_INSERT;
+ ule_expected.uxrs[2].xid = 1500;
+ ule_expected.uxrs[2].vallen = valsize;
+ ule_expected.uxrs[2].valp = valbuf;
+ ule_expected.uxrs[1].xid = 1200;
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ ule_initial.uxrs[1].xid = 1000;
+ ule_expected.uxrs[1].xid = 1000;
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+ ule_initial.uxrs[1].xid = 800;
+ ule_expected.uxrs[1].xid = 800;
+ ule_expected.num_cuxrs = 2;
+ ule_expected.num_puxrs = 0;
+ ule_expected.uxrs[1].type = ule_initial.uxrs[2].type;
+ ule_expected.uxrs[1].valp = ule_initial.uxrs[2].valp;
+ ule_expected.uxrs[1].vallen = ule_initial.uxrs[2].vallen;
+ test_msg_modify_ule(&ule_initial, msg);
+ verify_ule_equal(&ule_initial, &ule_expected);
+
+
+ toku_xids_destroy(&msg_xids);
+ toku_xids_destroy(&root_xids);
+}
+
+//TODO: #1125 tests:
+// Will probably have to expose ULE_S definition
+// - Check memsize function is correct
+// - Assert == disksize (almost useless, but go ahead)
+// - Check standard accessors
+// - le_latest_val_and_len
+// - le_latest_val
+// - le_latest_vallen
+// - le_key_and_len
+// - le_innermost_inserted_val_and_len
+// - le_innermost_inserted_val
+// - le_innermost_inserted_vallen
+// - Check le_outermost_uncommitted_xid
+// - Check le_latest_is_del
+// - Check unpack+pack memcmps equal
+// - Check exact memory expected (including size) for various leafentry types.
+// - Check apply_msg logic
+// - Known start, known expected.. various types.
+// - Go through test-leafentry10.c
+// - Verify we have tests for all analogous stuff.
+//
+// PACK
+// UNPACK
+// verify pack+unpack is no-op
+// verify unpack+pack is no-op
+// accessors
+// Test apply_msg logic
+// i.e. start with LE, apply message
+// in parallel, construct the expected ULE manually, and pack that
+// Compare the two results
+// Test full_promote
+
+static void
+init_xids(void) {
+ uint32_t i;
+ nested_xids[0] = toku_xids_get_root_xids();
+ for (i = 1; i < MAX_TRANSACTION_RECORDS; i++) {
+ int r = toku_xids_create_child(nested_xids[i-1], &nested_xids[i], i * 37 + random() % 36);
+ assert(r==0);
+ }
+}
+
+static void
+destroy_xids(void) {
+ uint32_t i;
+ for (i = 0; i < MAX_TRANSACTION_RECORDS; i++) {
+ toku_xids_destroy(&nested_xids[i]);
+ }
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ srandom(7); //Arbitrary seed.
+ init_xids();
+ test_le_offsets();
+ test_le_pack();
+ test_le_apply_messages();
+ test_le_optimize();
+ test_le_garbage_collection_birdie();
+ destroy_xids();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-merges-on-cleaner.cc b/storage/tokudb/PerconaFT/ft/tests/test-merges-on-cleaner.cc
new file mode 100644
index 00000000..ee82daaf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-merges-on-cleaner.cc
@@ -0,0 +1,248 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+#include "ft-flusher.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE ft;
+const char *fname = TOKU_TEST_FILENAME;
+
+static int update_func(
+ DB* UU(db),
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* UU(extra),
+ void (*set_val)(const DBT *new_val, void *set_extra),
+ void *set_extra)
+{
+ DBT new_val;
+ assert(old_val->size > 0);
+ if (verbose) {
+ printf("applying update to %s\n", (char *)key->data);
+ }
+ toku_init_dbt(&new_val);
+ set_val(&new_val, set_extra);
+ return 0;
+}
+
+
+static void
+doit (void) {
+ BLOCKNUM node_leaf[2];
+ BLOCKNUM node_internal, node_root;
+
+ int r;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ ft->options.update_fun = update_func;
+ ft->ft->update_fun = update_func;
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(ft, &node_leaf[0], 1, NULL, NULL);
+ assert(r==0);
+ r = toku_testsetup_leaf(ft, &node_leaf[1], 1, NULL, NULL);
+ assert(r==0);
+
+ char* pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+
+ r = toku_testsetup_nonleaf(ft, 1, &node_internal, 2, node_leaf, pivots, &pivot_len);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(ft, 2, &node_root, 1, &node_internal, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(ft, node_root);
+ assert(r==0);
+
+ //
+ // at this point we have created a tree with a root, an internal node,
+ // and two leaf nodes, the pivot being "kkkkk"
+ //
+
+ // now we insert a row into each leaf node
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf[0],
+ "a", // key
+ 2, // keylen
+ "aa",
+ 3
+ );
+ assert(r==0);
+ r = toku_testsetup_insert_to_leaf (
+ ft,
+ node_leaf[1],
+ "z", // key
+ 2, // keylen
+ "zz",
+ 3
+ );
+ assert(r==0);
+
+ //
+ // now insert a bunch of dummy delete messages
+ // into the internal node, to get its cachepressure size up
+ //
+ for (int i = 0; i < 100000; i++) {
+ r = toku_testsetup_insert_to_nonleaf (
+ ft,
+ node_internal,
+ FT_DELETE_ANY,
+ "jj", // this key does not exist, so its message application should be a no-op
+ 3,
+ NULL,
+ 0
+ );
+ assert(r==0);
+ }
+
+ //
+ // now insert a broadcast message into the root
+ //
+ r = toku_testsetup_insert_to_nonleaf (
+ ft,
+ node_root,
+ FT_UPDATE_BROADCAST_ALL,
+ NULL,
+ 0,
+ NULL,
+ 0
+ );
+ assert(r==0);
+
+ //
+ // now let us induce a clean on the internal node
+ //
+ FTNODE node;
+ toku_pin_node_with_min_bfe(&node, node_leaf[1], ft);
+ // hack to get merge going
+ BLB_SEQINSERT(node, node->n_children-1) = false;
+ toku_unpin_ftnode(ft->ft, node);
+
+ // now do a lookup on one of the keys, this should bring a leaf node up to date
+ DBT k;
+ struct check_pair pair = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
+ assert(r==0);
+
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode(
+ ft->ft,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->n_children == 2);
+ // we expect that this flushes its buffer, that
+ // a merge is not done, and that the lookup
+ // of values "a" and "z" still works
+ r = toku_ftnode_cleaner_callback(
+ node,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ ft->ft
+ );
+
+ // verify that node_internal's buffer is empty
+ bfe.create_for_min_read(ft->ft);
+ toku_pin_ftnode(
+ ft->ft,
+ node_internal,
+ toku_cachetable_hash(ft->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ // check that merge happened
+ assert(node->n_children == 1);
+ // check that buffers are empty
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
+ toku_unpin_ftnode(ft->ft, node);
+
+ //
+ // now run a checkpoint to get everything clean,
+ // and to get the rebalancing to happen
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+
+ // check that lookups on the two keys is still good
+ struct check_pair pair1 = {2, "a", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
+ assert(r==0);
+ struct check_pair pair2 = {2, "z", 0, NULL, 0};
+ r = toku_ft_lookup(ft, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
+ assert(r==0);
+
+
+ r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ toku_free(pivots[0]);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-oldest-referenced-xid-flush.cc b/storage/tokudb/PerconaFT/ft/tests/test-oldest-referenced-xid-flush.cc
new file mode 100644
index 00000000..71357a1e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-oldest-referenced-xid-flush.cc
@@ -0,0 +1,190 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "ft-cachetable-wrappers.h"
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+
+static bool
+dont_destroy_bn(void* UU(extra)) {
+ return false;
+}
+
+static void merge_should_not_happen(struct flusher_advice* UU(fa),
+ FT UU(h),
+ FTNODE UU(parent),
+ int UU(childnum),
+ FTNODE UU(child),
+ void* UU(extra))
+{
+ assert(false);
+}
+
+static bool dont_recursively_flush(FTNODE UU(child), void* UU(extra)) {
+ return false;
+}
+
+static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
+ assert(parent->height == 2);
+ assert(parent->n_children == 1);
+ return 0;
+}
+
+static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extra)) {
+}
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+static void test_oldest_referenced_xid_gets_propagated(void) {
+ int r;
+ CACHETABLE ct;
+ FT_HANDLE t;
+ BLOCKNUM grandchild_leaf_blocknum, child_nonleaf_blocknum, root_blocknum;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink("foo1.ft_handle");
+ r = toku_open_ft_handle("foo1.ft_handle", 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, nullptr, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ // This test flushes from a nonleaf root to a nonleaf child, without any leaf nodes.
+
+ r = toku_testsetup_leaf(t, &grandchild_leaf_blocknum, 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 1, &child_nonleaf_blocknum, 1, &grandchild_leaf_blocknum, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 2, &root_blocknum, 1, &child_nonleaf_blocknum, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, root_blocknum);
+ assert(r==0);
+
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ root_blocknum,
+ FT_INSERT,
+ "a",
+ 2,
+ NULL,
+ 0
+ );
+
+ // Verify that both the root and its child start with TXNID_NONE
+ // for the oldest referenced xid
+
+ // first verify the child
+ FTNODE node = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_min_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ child_nonleaf_blocknum,
+ toku_cachetable_hash(t->ft->cf, child_nonleaf_blocknum),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 1);
+ assert(node->n_children == 1);
+ assert(BP_BLOCKNUM(node, 0).b == grandchild_leaf_blocknum.b);
+ assert(node->oldest_referenced_xid_known == TXNID_NONE);
+ toku_unpin_ftnode(t->ft, node);
+
+ // now verify the root - keep it pinned so we can flush it below
+ toku_pin_ftnode(
+ t->ft,
+ root_blocknum,
+ toku_cachetable_hash(t->ft->cf, root_blocknum),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->height == 2);
+ assert(node->n_children == 1);
+ assert(BP_BLOCKNUM(node, 0).b == child_nonleaf_blocknum.b);
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) > 0);
+ assert(node->oldest_referenced_xid_known == TXNID_NONE);
+
+ // set the root's oldest referenced xid to something special
+ const TXNID flush_xid = 25000;
+ node->oldest_referenced_xid_known = flush_xid;
+
+ // do the flush
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ dont_recursively_flush,
+ merge_should_not_happen,
+ dummy_update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+ toku_ft_flush_some_child(t->ft, node, &fa);
+
+ // pin the child, verify that oldest referenced xid was
+ // propagated from parent to child during the flush
+ toku_pin_ftnode(
+ t->ft,
+ child_nonleaf_blocknum,
+ toku_cachetable_hash(t->ft->cf, child_nonleaf_blocknum),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->oldest_referenced_xid_known == flush_xid);
+
+ toku_unpin_ftnode(t->ft, node);
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int test_main(int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ default_parse_args(argc, argv);
+ test_oldest_referenced_xid_gets_propagated();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc b/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc
new file mode 100644
index 00000000..29d07483
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc
@@ -0,0 +1,342 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+
+#include "ft-flusher.h"
+#include "ft-flusher-internal.h"
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+const char *fname = TOKU_TEST_FILENAME;
+
+int curr_child_to_flush;
+int num_flushes_called;
+
+static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
+ // internal node has 2 children
+ if (parent->height == 1) {
+ assert(parent->n_children == 2);
+ return curr_child_to_flush;
+ }
+ // root has 1 child
+ else if (parent->height == 2) {
+ assert(parent->n_children == 1);
+ return 0;
+ }
+ else {
+ assert(false);
+ }
+ return curr_child_to_flush;
+}
+
+static void update_status(FTNODE UU(child), int UU(dirtied), void* UU(extra)) {
+ num_flushes_called++;
+}
+
+
+
+static bool
+dont_destroy_bn(void* UU(extra))
+{
+ return false;
+}
+
+static void merge_should_not_happen(struct flusher_advice* UU(fa),
+ FT UU(h),
+ FTNODE UU(parent),
+ int UU(childnum),
+ FTNODE UU(child),
+ void* UU(extra))
+{
+ assert(false);
+}
+
+static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
+ assert(false);
+}
+
+static bool always_flush(FTNODE UU(child), void* UU(extra)) {
+ return true;
+}
+
+
+static void
+doit (void) {
+ BLOCKNUM node_internal, node_root;
+ BLOCKNUM node_leaf[2];
+ int r;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaf[0], 1, NULL, NULL);
+ assert(r==0);
+ r = toku_testsetup_leaf(t, &node_leaf[1], 1, NULL, NULL);
+ assert(r==0);
+
+ char* pivots[1];
+ pivots[0] = toku_strdup("kkkkk");
+ int pivot_len = 6;
+ r = toku_testsetup_nonleaf(t, 1, &node_internal, 2, node_leaf, pivots, &pivot_len);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 2, &node_root, 1, &node_internal, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+ char filler[900-2*bn_data::HEADER_LENGTH];
+ memset(filler, 0, sizeof(filler));
+ // now we insert filler data so that a merge does not happen
+ r = toku_testsetup_insert_to_leaf (
+ t,
+ node_leaf[0],
+ "b", // key
+ 2, // keylen
+ filler,
+ sizeof(filler)
+ );
+ assert(r==0);
+ r = toku_testsetup_insert_to_leaf (
+ t,
+ node_leaf[1],
+ "y", // key
+ 2, // keylen
+ filler,
+ sizeof(filler)
+ );
+ assert(r==0);
+
+ // make buffers in internal node non-empty
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_internal,
+ FT_INSERT,
+ "a",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_internal,
+ FT_INSERT,
+ "z",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+
+ //
+ // now run a checkpoint to get everything clean
+ //
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+
+ // now with setup done, start the test
+ // test that if toku_ft_flush_some_child properly honors
+ // what we say and flushes the child we pick
+ FTNODE node = NULL;
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ toku_ftnode_assert_fully_in_memory(node);
+ assert(node->n_children == 2);
+ assert(!node->dirty());
+ assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) > 0);
+ assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) > 0);
+
+ struct flusher_advice fa;
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ recursively_flush_should_not_happen,
+ merge_should_not_happen,
+ update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+ curr_child_to_flush = 0;
+ num_flushes_called = 0;
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(num_flushes_called == 1);
+
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ toku_ftnode_assert_fully_in_memory(node);
+ assert(node->dirty());
+ assert(node->n_children == 2);
+ // child 0 should have empty buffer because it flushed
+ // child 1 should still have message in buffer
+ assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0);
+ assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) > 0);
+ toku_unpin_ftnode(t->ft, node);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ assert(!node->dirty());
+ curr_child_to_flush = 1;
+ num_flushes_called = 0;
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(num_flushes_called == 1);
+
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ assert(node->dirty());
+ toku_ftnode_assert_fully_in_memory(node);
+ assert(node->n_children == 2);
+ // both buffers should be empty now
+ assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0);
+ assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) == 0);
+ // now let's do a flush with an empty buffer, make sure it is ok
+ toku_unpin_ftnode(t->ft, node);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ assert(!node->dirty());
+ curr_child_to_flush = 0;
+ num_flushes_called = 0;
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(num_flushes_called == 1);
+
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ assert(node->dirty()); // nothing was flushed, but since we were trying to flush to a leaf, both become dirty
+ toku_ftnode_assert_fully_in_memory(node);
+ assert(node->n_children == 2);
+ // both buffers should be empty now
+ assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0);
+ assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) == 0);
+ toku_unpin_ftnode(t->ft, node);
+
+ // now let's start a flush from the root, that always recursively flushes
+ flusher_advice_init(
+ &fa,
+ child_to_flush,
+ dont_destroy_bn,
+ always_flush,
+ merge_should_not_happen,
+ update_status,
+ default_pick_child_after_split,
+ NULL
+ );
+ // use a for loop so to get us down both paths
+ for (int i = 0; i < 2; i++) {
+ toku_pin_node_with_min_bfe(&node, node_root, t);
+ toku_ftnode_assert_fully_in_memory(node); // entire root is in memory
+ curr_child_to_flush = i;
+ num_flushes_called = 0;
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(num_flushes_called == 2);
+
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ assert(node->dirty());
+ toku_unpin_ftnode(t->ft, node);
+ toku_pin_node_with_min_bfe(&node, node_leaf[0], t);
+ assert(node->dirty());
+ toku_unpin_ftnode(t->ft, node);
+ toku_pin_node_with_min_bfe(&node, node_leaf[1], t);
+ if (i == 0) {
+ assert(!node->dirty());
+ }
+ else {
+ assert(node->dirty());
+ }
+ toku_unpin_ftnode(t->ft, node);
+ }
+
+ // now one more test to show a bug was fixed
+ // if there is nothing to flush from parent to child,
+ // and child is not fully in memory, we used to crash
+ // so, to make sure that is fixed, let's get internal to not
+ // be fully in memory, and make sure the above test works
+
+ // a hack to get internal compressed
+ r = toku_testsetup_insert_to_nonleaf(
+ t,
+ node_internal,
+ FT_INSERT,
+ "c",
+ 2,
+ NULL,
+ 0
+ );
+ assert_zero(r);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ assert_zero(r);
+ toku_pin_node_with_min_bfe(&node, node_internal, t);
+ for (int i = 0; i < 20; i++) {
+ toku_ftnode_pe_callback(node, make_pair_attr(0xffffffff), t->ft, def_pe_finalize_impl, nullptr);
+ }
+ assert(BP_STATE(node,0) == PT_COMPRESSED);
+ toku_unpin_ftnode(t->ft, node);
+
+ //now let's do the same test as above
+ toku_pin_node_with_min_bfe(&node, node_root, t);
+ toku_ftnode_assert_fully_in_memory(node); // entire root is in memory
+ curr_child_to_flush = 0;
+ num_flushes_called = 0;
+ toku_ft_flush_some_child(t->ft, node, &fa);
+ assert(num_flushes_called == 2);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+
+ toku_free(pivots[0]);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-with-mhs.cc b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-with-mhs.cc
new file mode 100644
index 00000000..ea4f9374
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-with-mhs.cc
@@ -0,0 +1,97 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/serialize/rbtree_mhs.h"
+#include "test.h"
+#include <algorithm>
+#include <vector>
+#include <ctime>
+#include <cstdlib>
+
+static void test_insert_remove(void) {
+ uint64_t i;
+ MhsRbTree::Tree *tree = new MhsRbTree::Tree();
+ verbose = 0;
+
+ tree->Insert({0, 100});
+
+ for (i = 0; i < 10; i++) {
+ tree->Remove(3);
+ tree->Remove(2);
+ }
+ tree->ValidateBalance();
+ tree->ValidateMhs();
+
+ for (i = 0; i < 10; i++) {
+ tree->Insert({5 * i, 3});
+ }
+ tree->ValidateBalance();
+ tree->ValidateMhs();
+
+ uint64_t offset = tree->Remove(2);
+ invariant(offset == 0);
+ offset = tree->Remove(10);
+ invariant(offset == 50);
+ offset = tree->Remove(3);
+ invariant(offset == 5);
+ tree->ValidateBalance();
+ tree->ValidateMhs();
+
+ tree->Insert({48, 2});
+ tree->Insert({50, 10});
+
+ tree->ValidateBalance();
+ tree->ValidateMhs();
+
+ tree->Insert({3, 7});
+ offset = tree->Remove(10);
+ invariant(offset == 2);
+ tree->ValidateBalance();
+ tree->ValidateMhs();
+ tree->Dump();
+ delete tree;
+}
+
+int test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test_insert_remove();
+ if (verbose)
+ printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc
new file mode 100644
index 00000000..cefe6633
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc
@@ -0,0 +1,103 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/serialize/rbtree_mhs.h"
+#include "test.h"
+#include <algorithm>
+#include <vector>
+#include <ctime>
+#include <cstdlib>
+
+#define N 1000000
+std::vector<MhsRbTree::Node::BlockPair> input_vector;
+MhsRbTree::Node::BlockPair old_vector[N];
+
+static int myrandom(int i) { return std::rand() % i; }
+
+static void generate_random_input() {
+ std::srand(unsigned(std::time(0)));
+
+ // set some values:
+ for (uint64_t i = 0; i < N; ++i) {
+ MhsRbTree::Node::BlockPair bp = {i+1, 0};
+ input_vector.push_back(bp);
+ old_vector[i] = bp;
+ }
+ // using built-in random generator:
+ std::random_shuffle(input_vector.begin(), input_vector.end(), myrandom);
+}
+
+static void test_insert_remove(void) {
+ int i;
+ MhsRbTree::Tree *tree = new MhsRbTree::Tree();
+ verbose = 0;
+ generate_random_input();
+ if (verbose) {
+ printf("\n we are going to insert the following block offsets\n");
+ for (i = 0; i < N; i++)
+ printf("%" PRIu64 "\t", input_vector[i]._offset.ToInt());
+ }
+ for (i = 0; i < N; i++) {
+ tree->Insert(input_vector[i]);
+ // tree->ValidateBalance();
+ }
+ tree->ValidateBalance();
+ MhsRbTree::Node::BlockPair *p_bps = &old_vector[0];
+ tree->ValidateInOrder(p_bps);
+ printf("min node of the tree:%" PRIu64 "\n",
+ rbn_offset(tree->MinNode()).ToInt());
+ printf("max node of the tree:%" PRIu64 "\n",
+ rbn_offset(tree->MaxNode()).ToInt());
+
+ for (i = 0; i < N; i++) {
+ // tree->ValidateBalance();
+ tree->RawRemove(input_vector[i]._offset.ToInt());
+ }
+
+ tree->Destroy();
+ delete tree;
+}
+
+int test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test_insert_remove();
+ if (verbose)
+ printf("test ok\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-txn-child-manager.cc b/storage/tokudb/PerconaFT/ft/tests/test-txn-child-manager.cc
new file mode 100644
index 00000000..cc4f831e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-txn-child-manager.cc
@@ -0,0 +1,290 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "toku_os.h"
+#include "cachetable/checkpoint.h"
+
+#include "test-ft-txns.h"
+
+static int txn_child_manager_test_cb(TOKUTXN txn, void* extra) {
+ TOKUTXN* ptxn = (TOKUTXN *)extra;
+ assert(txn == *ptxn);
+ *ptxn = txn->child;
+ return 0;
+}
+
+static int txn_child_manager_test_cb2(TOKUTXN txn, void* extra) {
+ TOKUTXN extra_txn = (TOKUTXN)extra;
+ if (txn == extra_txn) {
+ return -1;
+ }
+ return 0;
+}
+
+
+class txn_child_manager_unit_test {
+public:
+ void run_test();
+ void run_child_txn_test();
+};
+
+// simple test that verifies that creating a TXN_CHILD_SNAPSHOT tokutxn
+// creates its own snapshot
+void txn_child_manager_unit_test::run_child_txn_test() {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ int r = 0;
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+ // create the root transaction
+ TOKUTXN root_txn = NULL;
+ r = toku_txn_begin_txn(
+ (DB_TXN *)NULL,
+ NULL,
+ &root_txn,
+ logger,
+ TXN_SNAPSHOT_CHILD,
+ false
+ );
+ CKERR(r);
+ // test starting a child txn
+ TOKUTXN child_txn = NULL;
+ r = toku_txn_begin_txn(
+ NULL,
+ root_txn,
+ &child_txn,
+ logger,
+ TXN_SNAPSHOT_CHILD,
+ false
+ );
+ CKERR(r);
+
+ // assert that the child has a later snapshot
+ assert(child_txn->snapshot_txnid64 > root_txn->snapshot_txnid64);
+
+ r = toku_txn_commit_txn(child_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(child_txn);
+ assert(root_txn->child == NULL);
+
+ r = toku_txn_commit_txn(root_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(root_txn);
+
+
+ clean_shutdown(&logger, &ct);
+}
+
+void txn_child_manager_unit_test::run_test() {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ int r = 0;
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+ // create the root transaction
+ TOKUTXN root_txn = NULL;
+ r = toku_txn_begin_txn(
+ (DB_TXN *)NULL,
+ NULL,
+ &root_txn,
+ logger,
+ TXN_SNAPSHOT_ROOT,
+ false
+ );
+ CKERR(r);
+ txn_child_manager* cm = root_txn->child_manager;
+ assert(cm == &root_txn->child_manager_s);
+ assert(cm->m_root == root_txn);
+ assert(cm->m_last_xid == TXNID_NONE);
+ assert(root_txn->child == NULL);
+ // this assumption implies our assumptions of child_id values below,
+ // because the parent id cannot be the child id
+ assert(root_txn->txnid.parent_id64 == 1);
+
+ // test starting a child txn
+ TOKUTXN child_txn = NULL;
+ r = toku_txn_begin_txn(
+ NULL,
+ root_txn,
+ &child_txn,
+ logger,
+ TXN_SNAPSHOT_ROOT,
+ false
+ );
+ CKERR(r);
+ assert(child_txn->child_manager == cm);
+ assert(child_txn->parent == root_txn);
+ assert(root_txn->child == child_txn);
+ assert(child_txn->txnid.parent_id64 == root_txn->txnid.parent_id64);
+ assert(child_txn->txnid.child_id64 == 2);
+ assert(child_txn->live_root_txn_list == root_txn->live_root_txn_list);
+ assert(child_txn->snapshot_txnid64 == root_txn->snapshot_txnid64);
+
+ assert(cm->m_root == root_txn);
+ assert(cm->m_last_xid == child_txn->txnid.child_id64);
+
+ TOKUTXN grandchild_txn = NULL;
+ r = toku_txn_begin_txn(
+ NULL,
+ child_txn,
+ &grandchild_txn,
+ logger,
+ TXN_SNAPSHOT_ROOT,
+ false
+ );
+ CKERR(r);
+ assert(grandchild_txn->child_manager == cm);
+ assert(grandchild_txn->parent == child_txn);
+ assert(child_txn->child == grandchild_txn);
+ assert(grandchild_txn->txnid.parent_id64 == root_txn->txnid.parent_id64);
+ assert(grandchild_txn->txnid.child_id64 == 3);
+ assert(grandchild_txn->live_root_txn_list == root_txn->live_root_txn_list);
+ assert(grandchild_txn->snapshot_txnid64 == root_txn->snapshot_txnid64);
+
+ assert(cm->m_root == root_txn);
+ assert(cm->m_last_xid == grandchild_txn->txnid.child_id64);
+
+ r = toku_txn_commit_txn(grandchild_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(grandchild_txn);
+
+
+ // now after closing one grandchild txn, open another one
+ r = toku_txn_begin_txn(
+ NULL,
+ child_txn,
+ &grandchild_txn,
+ logger,
+ TXN_SNAPSHOT_ROOT,
+ false
+ );
+ CKERR(r);
+ assert(grandchild_txn->child_manager == cm);
+ assert(grandchild_txn->parent == child_txn);
+ assert(child_txn->child == grandchild_txn);
+ assert(grandchild_txn->txnid.parent_id64 == root_txn->txnid.parent_id64);
+ assert(grandchild_txn->txnid.child_id64 == 4);
+ assert(grandchild_txn->live_root_txn_list == root_txn->live_root_txn_list);
+ assert(grandchild_txn->snapshot_txnid64 == root_txn->snapshot_txnid64);
+
+ assert(cm->m_root == root_txn);
+ assert(cm->m_last_xid == grandchild_txn->txnid.child_id64);
+
+
+ TXNID_PAIR xid = {.parent_id64 = root_txn->txnid.parent_id64, .child_id64 = 100};
+ TOKUTXN recovery_txn = NULL;
+ r = toku_txn_begin_with_xid(
+ grandchild_txn,
+ &recovery_txn,
+ logger,
+ xid,
+ TXN_SNAPSHOT_NONE,
+ NULL,
+ true, // for recovery
+ false // read_only
+ );
+
+ assert(recovery_txn->child_manager == cm);
+ assert(recovery_txn->parent == grandchild_txn);
+ assert(grandchild_txn->child == recovery_txn);
+ assert(recovery_txn->txnid.parent_id64 == root_txn->txnid.parent_id64);
+ assert(recovery_txn->txnid.child_id64 == 100);
+ // ensure that no snapshot is made
+ assert(recovery_txn->live_root_txn_list == NULL);
+ assert(recovery_txn->snapshot_txnid64 == TXNID_NONE);
+
+ assert(cm->m_root == root_txn);
+ assert(cm->m_last_xid == recovery_txn->txnid.child_id64);
+
+
+ // now ensure that txn_child_manager::find_tokutxn_by_xid_unlocked works
+ TOKUTXN found_txn = NULL;
+ // first ensure that a dummy TXNID_PAIR cannot be found
+ TXNID_PAIR dummy_pair = { .parent_id64 = root_txn->txnid.parent_id64, .child_id64 = 1000};
+ cm->find_tokutxn_by_xid_unlocked(dummy_pair, &found_txn);
+ assert(found_txn == NULL);
+ cm->find_tokutxn_by_xid_unlocked(root_txn->txnid, &found_txn);
+ assert(found_txn == root_txn);
+ cm->find_tokutxn_by_xid_unlocked(child_txn->txnid, &found_txn);
+ assert(found_txn == child_txn);
+ cm->find_tokutxn_by_xid_unlocked(grandchild_txn->txnid, &found_txn);
+ assert(found_txn == grandchild_txn);
+ cm->find_tokutxn_by_xid_unlocked(recovery_txn->txnid, &found_txn);
+ assert(found_txn == recovery_txn);
+
+
+ // now ensure that the iterator works
+ found_txn = root_txn;
+ r = cm->iterate(txn_child_manager_test_cb, &found_txn);
+ CKERR(r);
+ assert(found_txn == NULL);
+
+ // now test that iterator properly stops
+ found_txn = child_txn;
+ r = cm->iterate(txn_child_manager_test_cb2, found_txn);
+ assert(r == -1);
+
+ r = toku_txn_commit_txn(recovery_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(recovery_txn);
+ assert(grandchild_txn->child == NULL);
+
+ r = toku_txn_commit_txn(grandchild_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(grandchild_txn);
+ assert(child_txn->child == NULL);
+
+ r = toku_txn_commit_txn(child_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(child_txn);
+ assert(root_txn->child == NULL);
+
+ r = toku_txn_commit_txn(root_txn, true, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(root_txn);
+
+
+ clean_shutdown(&logger, &ct);
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ txn_child_manager_unit_test foo;
+ foo.run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc b/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
new file mode 100644
index 00000000..7691ffaa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test-upgrade-recovery-logs.cc
@@ -0,0 +1,140 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that recovery works correctly on a recovery log in a log directory.
+
+#include "test.h"
+#include <libgen.h>
+
+static void run_recovery(const char *testdir) {
+ int r;
+
+ int log_version;
+ char shutdown[32+1];
+ r = sscanf(testdir, "upgrade-recovery-logs-%d-%32s", &log_version, shutdown);
+ assert(r == 2);
+
+ char **logfiles = nullptr;
+ int n_logfiles = 0;
+ r = toku_logger_find_logfiles(testdir, &logfiles, &n_logfiles);
+ CKERR(r);
+ assert(n_logfiles > 0);
+
+ FILE *f = fopen(logfiles[n_logfiles-1], "r");
+ assert(f);
+ uint32_t real_log_version;
+ r = toku_read_logmagic(f, &real_log_version);
+ CKERR(r);
+ assert((uint32_t)log_version == (uint32_t)real_log_version);
+ r = fclose(f);
+ CKERR(r);
+
+ toku_logger_free_logfiles(logfiles, n_logfiles);
+
+ // test needs recovery
+ r = tokuft_needs_recovery(testdir, false);
+ if (strcmp(shutdown, "clean") == 0) {
+ CKERR(r); // clean does not need recovery
+ } else if (strncmp(shutdown, "dirty", 5) == 0) {
+ CKERR2(r, 1); // dirty needs recovery
+ } else {
+ CKERR(EINVAL);
+ }
+
+ // test maybe upgrade log
+ LSN lsn_of_clean_shutdown;
+ bool upgrade_in_progress;
+ r = toku_maybe_upgrade_log(testdir, testdir, &lsn_of_clean_shutdown, &upgrade_in_progress);
+ if (strcmp(shutdown, "dirty") == 0 && log_version <= 24) {
+ CKERR2(r, TOKUDB_UPGRADE_FAILURE); // we don't support dirty upgrade from versions <= 24
+ return;
+ } else {
+ CKERR(r);
+ }
+
+ if (!verbose) {
+ // redirect stderr
+ int devnul = open(DEV_NULL_FILE, O_WRONLY);
+ assert(devnul >= 0);
+ int rr = toku_dup2(devnul, fileno(stderr));
+ assert(rr == fileno(stderr));
+ rr = close(devnul);
+ assert(rr == 0);
+ }
+
+ // run recovery
+ if (r == 0) {
+ r = tokuft_recover(NULL,
+ NULL_prepared_txn_callback,
+ NULL_keep_cachetable_callback,
+ NULL_logger, testdir, testdir, 0, 0, 0, NULL, 0);
+ CKERR(r);
+ }
+}
+
+int test_main(int argc, const char *argv[]) {
+ int i = 0;
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ break;
+ }
+ if (i < argc) {
+ const char *full_test_dir = argv[i];
+ const char *test_dir = basename((char *)full_test_dir);
+ if (strcmp(full_test_dir, test_dir) != 0) {
+ int r;
+ char cmd[32 + strlen(full_test_dir) + strlen(test_dir)];
+ sprintf(cmd, "rm -rf %s", test_dir);
+ r = system(cmd);
+ CKERR(r);
+ sprintf(cmd, "cp -r %s %s", full_test_dir, test_dir);
+ r = system(cmd);
+ CKERR(r);
+ }
+ run_recovery(test_dir);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test.h b/storage/tokudb/PerconaFT/ft/tests/test.h
new file mode 100644
index 00000000..81faba20
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test.h
@@ -0,0 +1,349 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_portability.h>
+#include <toku_htonl.h>
+#include <toku_assert.h>
+#include <toku_stdlib.h>
+
+#include <stdio.h>
+#include <memory.h>
+#include <string.h>
+#include <portability/toku_path.h>
+
+#include "ft/serialize/block_allocator.h"
+#include "ft/serialize/block_table.h"
+#include "ft/cachetable/cachetable.h"
+#include "ft/cachetable/cachetable-internal.h"
+#include "ft/cursor.h"
+#include "ft/ft.h"
+#include "ft/ft-ops.h"
+#include "ft/serialize/ft-serialize.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/logger/log-internal.h"
+#include "ft/logger/logger.h"
+#include "ft/node.h"
+#include "util/bytestring.h"
+
+#define CKERR(r) ({ int __r = r; if (__r!=0) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, __r, strerror(r)); assert(__r==0); })
+#define CKERR2(r,r2) do { if (r!=r2) fprintf(stderr, "%s:%d error %d %s, expected %d\n", __FILE__, __LINE__, r, strerror(r), r2); assert(r==r2); } while (0)
+#define CKERR2s(r,r2,r3) do { if (r!=r2 && r!=r3) fprintf(stderr, "%s:%d error %d %s, expected %d or %d\n", __FILE__, __LINE__, r, strerror(r), r2,r3); assert(r==r2||r==r3); } while (0)
+
+#define DEBUG_LINE() do { \
+ fprintf(stderr, "%s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ fflush(stderr); \
+} while (0)
+
+const uint32_t len_ignore = 0xFFFFFFFF;
+
+static const prepared_txn_callback_t NULL_prepared_txn_callback __attribute__((__unused__)) = NULL;
+static const keep_cachetable_callback_t NULL_keep_cachetable_callback __attribute__((__unused__)) = NULL;
+static const TOKULOGGER NULL_logger __attribute__((__unused__)) = NULL;
+
+// dummymsn needed to simulate msn because test messages are injected at a lower level than toku_ft_root_put_msg()
+#define MIN_DUMMYMSN ((MSN) {(uint64_t)1<<62})
+static MSN dummymsn;
+static int dummymsn_initialized = 0;
+
+static void
+initialize_dummymsn(void) {
+ if (dummymsn_initialized == 0) {
+ dummymsn_initialized = 1;
+ dummymsn = MIN_DUMMYMSN;
+ }
+}
+
+static UU() MSN
+next_dummymsn(void) {
+ assert(dummymsn_initialized);
+ ++(dummymsn.msn);
+ return dummymsn;
+}
+
+static UU() MSN
+last_dummymsn(void) {
+ assert(dummymsn_initialized);
+ return dummymsn;
+}
+
+
+struct check_pair {
+ uint32_t keylen; // A keylen equal to 0xFFFFFFFF means don't check the keylen or the key.
+ const void *key; // A NULL key means don't check the key.
+ uint32_t vallen; // Similarly for vallen and null val.
+ const void *val;
+ int call_count;
+};
+static int
+lookup_checkf (uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *pair_v, bool lock_only) {
+ if (!lock_only) {
+ struct check_pair *pair = (struct check_pair *) pair_v;
+ if (key!=NULL) {
+ if (pair->keylen!=len_ignore) {
+ assert(pair->keylen == keylen);
+ if (pair->key)
+ assert(memcmp(pair->key, key, keylen)==0);
+ }
+ if (pair->vallen!=len_ignore) {
+ assert(pair->vallen == vallen);
+ if (pair->val)
+ assert(memcmp(pair->val, val, vallen)==0);
+ }
+ pair->call_count++; // this call_count is really how many calls were made with r==0
+ }
+ }
+ return 0;
+}
+
+static inline void
+ft_lookup_and_check_nodup (FT_HANDLE t, const char *keystring, const char *valstring)
+{
+ DBT k;
+ toku_fill_dbt(&k, keystring, strlen(keystring) + 1);
+ struct check_pair pair = {(uint32_t) (1+strlen(keystring)), keystring,
+ (uint32_t) (1+strlen(valstring)), valstring,
+ 0};
+ int r = toku_ft_lookup(t, &k, lookup_checkf, &pair);
+ assert(r==0);
+ assert(pair.call_count==1);
+}
+
+static inline void
+ft_lookup_and_fail_nodup (FT_HANDLE t, char *keystring)
+{
+ DBT k;
+ toku_fill_dbt(&k, keystring, strlen(keystring) + 1);
+ struct check_pair pair = {(uint32_t) (1+strlen(keystring)), keystring,
+ 0, 0,
+ 0};
+ int r = toku_ft_lookup(t, &k, lookup_checkf, &pair);
+ assert(r!=0);
+ assert(pair.call_count==0);
+}
+
+static UU() void fake_ydb_lock(void) {
+}
+
+static UU() void fake_ydb_unlock(void) {
+}
+
+static UU() void
+def_flush (CACHEFILE f __attribute__((__unused__)),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ void *v __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ void *e __attribute__((__unused__)),
+ PAIR_ATTR s __attribute__((__unused__)),
+ PAIR_ATTR* new_size __attribute__((__unused__)),
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
+ ) {
+}
+
+static UU() void
+def_pe_est_callback(
+ void* UU(ftnode_pv),
+ void* UU(dd),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ *bytes_freed_estimate = 0;
+ *cost = PE_CHEAP;
+}
+
+static UU() int
+def_pe_callback(
+ void *ftnode_pv __attribute__((__unused__)),
+ PAIR_ATTR bytes_to_free __attribute__((__unused__)),
+ void* extraargs __attribute__((__unused__)),
+ void (*finalize)(PAIR_ATTR bytes_freed, void *extra),
+ void *finalize_extra
+ )
+{
+ finalize(bytes_to_free, finalize_extra);
+ return 0;
+}
+
+static UU() void
+def_pe_finalize_impl(PAIR_ATTR UU(bytes_freed), void *UU(extra)) { }
+
+static UU() bool def_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return false;
+}
+
+ static UU() int def_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
+ assert(false);
+ return 0;
+}
+
+static UU() int
+def_fetch (CACHEFILE f __attribute__((__unused__)),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY k __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
+ void **value __attribute__((__unused__)),
+ void **dd __attribute__((__unused__)),
+ PAIR_ATTR *sizep __attribute__((__unused__)),
+ int *dirtyp,
+ void *extraargs __attribute__((__unused__))
+ ) {
+ *dirtyp = 0;
+ *value = NULL;
+ *sizep = make_pair_attr(8);
+ return 0;
+}
+
+static UU() void
+put_callback_nop(
+ CACHEKEY UU(key),
+ void *UU(v),
+ PAIR UU(p)) {
+}
+
+static UU() int
+fetch_die(
+ CACHEFILE UU(thiscf),
+ PAIR UU(p),
+ int UU(fd),
+ CACHEKEY UU(key),
+ uint32_t UU(fullhash),
+ void **UU(value),
+ void **UU(dd),
+ PAIR_ATTR *UU(sizep),
+ int *UU(dirtyp),
+ void *UU(extraargs)
+ )
+{
+ assert(0); // should not be called
+ return 0;
+}
+
+
+static UU() int
+def_cleaner_callback(
+ void* UU(ftnode_pv),
+ BLOCKNUM UU(blocknum),
+ uint32_t UU(fullhash),
+ void* UU(extraargs)
+ )
+{
+ assert(false);
+ return 0;
+}
+
+static UU() CACHETABLE_WRITE_CALLBACK def_write_callback(void* write_extraargs) {
+ CACHETABLE_WRITE_CALLBACK wc;
+ wc.flush_callback = def_flush;
+ wc.pe_est_callback = def_pe_est_callback;
+ wc.pe_callback = def_pe_callback;
+ wc.cleaner_callback = def_cleaner_callback;
+ wc.write_extraargs = write_extraargs;
+ wc.clone_callback = nullptr;
+ wc.checkpoint_complete_callback = nullptr;
+ return wc;
+}
+
+class evictor_test_helpers {
+public:
+ static void set_hysteresis_limits(evictor* ev, long low_size_watermark, long high_size_watermark) {
+ ev->m_low_size_watermark = low_size_watermark;
+ ev->m_low_size_hysteresis = low_size_watermark;
+ ev->m_high_size_hysteresis = high_size_watermark;
+ ev->m_high_size_watermark = high_size_watermark;
+ }
+ static void disable_ev_thread(evictor* ev) {
+ toku_mutex_lock(&ev->m_ev_thread_lock);
+ ev->m_period_in_seconds = 0;
+ // signal eviction thread so that it wakes up
+ // and then sleeps indefinitely
+ ev->signal_eviction_thread_locked();
+ toku_mutex_unlock(&ev->m_ev_thread_lock);
+ // sleep for one second to ensure eviction thread picks up new period
+ usleep(1*1024*1024);
+ }
+ static uint64_t get_num_eviction_runs(evictor* ev) {
+ return ev->m_num_eviction_thread_runs;
+ }
+};
+
+UU()
+static void copy_dbt(DBT *dest, const DBT *src) {
+ assert(dest->flags & DB_DBT_REALLOC);
+ dest->data = toku_realloc(dest->data, src->size);
+ dest->size = src->size;
+ memcpy(dest->data, src->data, src->size);
+}
+
+int verbose=0;
+
+static inline void
+default_parse_args (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ ++verbose;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+int test_main(int argc, const char *argv[]);
+
+int
+main(int argc, const char *argv[]) {
+ initialize_dummymsn();
+ int rinit = toku_ft_layer_init();
+ CKERR(rinit);
+ int r = test_main(argc, argv);
+ toku_ft_layer_destroy();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test1308a.cc b/storage/tokudb/PerconaFT/ft/tests/test1308a.cc
new file mode 100644
index 00000000..8b25df6e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test1308a.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test the first case for the bug in #1308 (ft-serialize.c:33 does the cast wrong)
+
+#include "test.h"
+
+#include <string.h>
+
+#include <toku_portability.h>
+#include "../ft-ops.h"
+
+#define FNAME "test1308a.data"
+
+#define BUFSIZE (16<<20)
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__)))
+{
+ unlink(FNAME);
+
+ int fd;
+ {
+
+ static uint64_t buf [BUFSIZE]; // make this static because it's too big to fit on the stack.
+
+ fd = open(FNAME, O_CREAT+O_RDWR+O_BINARY, 0777);
+ assert(fd>=0);
+ memset(buf, 0, sizeof(buf));
+ uint64_t i;
+ for (i=0; i<(1LL<<32); i+=BUFSIZE) {
+ toku_os_full_write(fd, buf, BUFSIZE);
+ }
+ }
+ int64_t file_size;
+ {
+ int r = toku_os_get_file_size(fd, &file_size);
+ assert(r==0);
+ }
+ {
+ int64_t size_after;
+ toku_maybe_preallocate_in_file(fd, 1000, file_size, &size_after);
+ assert(size_after == file_size);
+ }
+ int64_t file_size2;
+ {
+ int r = toku_os_get_file_size(fd, &file_size2);
+ assert(r==0);
+ }
+ assert(file_size==file_size2);
+ close(fd);
+
+ unlink(FNAME);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test3681.cc b/storage/tokudb/PerconaFT/ft/tests/test3681.cc
new file mode 100644
index 00000000..9e4a46e8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test3681.cc
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test for #3681: iibench hangs. The scenario is
+// * Thread 1 calls root_put_msg, get_and_pin_root, 1 holds read lock on the root.
+// * Thread 2 calls checkpoint, marks the root for checkpoint.
+// * Thread 2 calls end_checkpoint, tries to write lock the root, sets want_write, and blocks on the rwlock because there is a reader.
+// * Thread 1 calls apply_msg_to_in_memory_leaves, calls get_and_pin_if_in_memory, tries to get a read lock on the root node and blocks on the rwlock because there is a write request on the lock.
+
+
+#include "cachetable/checkpoint.h"
+#include "test.h"
+
+CACHETABLE ct;
+FT_HANDLE t;
+
+static TOKUTXN const null_txn = 0;
+
+volatile bool done = false;
+
+static void setup (void) {
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ const char *fname = TOKU_TEST_FILENAME;
+ unlink(fname);
+ { int r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); }
+}
+
+
+static void finish (void) {
+ { int r = toku_close_ft_handle_nolsn(t, 0); assert(r==0); };
+ toku_cachetable_close(&ct);
+}
+
+static void *starta (void *n) {
+ assert(n==NULL);
+ for (int i=0; i<10000; i++) {
+ DBT k,v;
+ char ks[20], vs[20];
+ snprintf(ks, sizeof(ks), "hello%03d", i);
+ snprintf(vs, sizeof(vs), "there%03d", i);
+ toku_ft_insert(t, toku_fill_dbt(&k, ks, strlen(ks)), toku_fill_dbt(&v, vs, strlen(vs)), null_txn);
+ usleep(1);
+ }
+ done = true;
+ return NULL;
+}
+static void *startb (void *n) {
+ assert(n==NULL);
+ int count=0;
+ while (!done) {
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ int r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); assert(r==0);
+ count++;
+ }
+ printf("count=%d\n", count);
+ return NULL;
+}
+
+static void test3681(void) {
+ setup();
+ toku_pthread_t a, b;
+ {
+ int r;
+ r = toku_pthread_create(
+ toku_uninstrumented, &a, nullptr, starta, nullptr);
+ assert(r == 0);
+ }
+ {
+ int r;
+ r = toku_pthread_create(
+ toku_uninstrumented, &b, nullptr, startb, nullptr);
+ assert(r == 0);
+ }
+ {
+ int r;
+ void *v;
+ r = toku_pthread_join(a, &v);
+ assert(r == 0);
+ assert(v == NULL);
+ }
+ {
+ int r;
+ void *v;
+ r = toku_pthread_join(b, &v);
+ assert(r == 0);
+ assert(v == NULL);
+ }
+ finish();
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test3681();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/test3856.cc b/storage/tokudb/PerconaFT/ft/tests/test3856.cc
new file mode 100644
index 00000000..43b030a4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test3856.cc
@@ -0,0 +1,112 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// it used to be the case that we copied the left and right keys of a
+// range to be prelocked but never freed them, this test checks that they
+// are freed (as of this time, this happens in ftnode_fetch_extra::destroy())
+
+#include "test.h"
+
+
+
+static const char *fname = TOKU_TEST_FILENAME;
+
+static TOKUTXN const null_txn = 0;
+static int const nodesize = 1<<12, basementnodesize = 1<<9;
+static const enum toku_compression_method compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
+static int const count = 1000;
+
+static int
+string_cmp(DB* UU(db), const DBT *a, const DBT *b)
+{
+ return strcmp((char*)a->data, (char*)b->data);
+}
+
+static int
+found(uint32_t UU(keylen), const void *key, uint32_t UU(vallen), const void *UU(val), void *UU(extra), bool lock_only)
+{
+ assert(key != NULL && !lock_only);
+ return 0;
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+
+ CACHETABLE ct;
+ FT_HANDLE t;
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ unlink(fname);
+ int r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, null_txn, string_cmp); assert(r==0);
+
+ for (int i = 0; i < count; ++i) {
+ char key[100],val[100];
+ DBT k,v;
+ snprintf(key, 100, "hello%d", i);
+ snprintf(val, 100, "there%d", i);
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), null_txn);
+ }
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, null_txn, string_cmp); assert(r == 0);
+
+ for (int n = 0; n < count/100; ++n) {
+ int i = n * 100;
+ FT_CURSOR c;
+ char lkey[100],rkey[100];
+ DBT lk, rk;
+ r = toku_ft_cursor(t, &c, null_txn, false, false); assert(r == 0);
+ snprintf(lkey, 100, "hello%d", i);
+ snprintf(rkey, 100, "hello%d", i + 100);
+ toku_ft_cursor_set_range_lock(c, toku_fill_dbt(&lk, lkey, 1+strlen(lkey)),
+ toku_fill_dbt(&rk, rkey, 1+strlen(rkey)),
+ false, false, 0);
+ r = toku_ft_cursor_set(c, &lk, found, NULL); assert(r == 0);
+ for (int j = 0; j < 100; ++j) {
+ r = toku_ft_cursor_next(c, found, NULL); assert(r == 0);
+ }
+ toku_ft_cursor_close(c);
+ }
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test3884.cc b/storage/tokudb/PerconaFT/ft/tests/test3884.cc
new file mode 100644
index 00000000..5de55b0d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test3884.cc
@@ -0,0 +1,502 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// it used to be the case that we copied the left and right keys of a
+// range to be prelocked but never freed them, this test checks that they
+// are freed (as of this time, this happens in ftnode_fetch_extra::destroy())
+
+#include "test.h"
+
+
+#include <ft-cachetable-wrappers.h>
+#include <ft-flusher.h>
+
+// Some constants to be used in calculations below
+static const int nodesize = 1024; // Target max node size
+static const int eltsize = 64; // Element size (for most elements)
+static const int bnsize = 256; // Target basement node size
+static const int eltsperbn = 256 / 64; // bnsize / eltsize
+static const int keylen = sizeof(long);
+// vallen is eltsize - keylen and leafentry overhead
+static const int vallen = 64 - sizeof(long) - (sizeof(((LEAFENTRY)NULL)->type) // overhead from LE_CLEAN_MEMSIZE
+ +sizeof(uint32_t)
+ +sizeof(((LEAFENTRY)NULL)->u.clean.vallen));
+#define dummy_msn_3884 ((MSN) { (uint64_t) 3884 * MIN_MSN.msn })
+
+static TOKUTXN const null_txn = 0;
+static const char *fname = TOKU_TEST_FILENAME;
+
+static void
+le_add_to_bn(bn_data* bn, uint32_t idx, const char *key, int keysize, const char *val, int valsize)
+{
+ LEAFENTRY r = NULL;
+ uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
+ void *maybe_free = nullptr;
+ bn->get_space_for_insert(
+ idx,
+ key,
+ keysize,
+ size_needed,
+ &r,
+ &maybe_free
+ );
+ if (maybe_free) {
+ toku_free(maybe_free);
+ }
+ resource_assert(r);
+ r->type = LE_CLEAN;
+ r->u.clean.vallen = valsize;
+ memcpy(r->u.clean.val, val, valsize);
+}
+
+
+static size_t
+insert_dummy_value(FTNODE node, int bn, long k, uint32_t idx)
+{
+ char val[vallen];
+ memset(val, k, sizeof val);
+ le_add_to_bn(BLB_DATA(node, bn), idx,(char *) &k, keylen, val, vallen);
+ return LE_CLEAN_MEMSIZE(vallen) + keylen + sizeof(uint32_t);
+}
+
+// TODO: this stuff should be in ft/ft-ops.cc, not in a test.
+// it makes it incredibly hard to add things to an ftnode
+// when tests hard code initializations...
+static void
+setup_ftnode_header(struct ftnode *node)
+{
+ node->flags = 0x11223344;
+ node->blocknum.b = 20;
+ node->layout_version = FT_LAYOUT_VERSION;
+ node->layout_version_original = FT_LAYOUT_VERSION;
+ node->height = 0;
+ node->set_dirty();
+ node->oldest_referenced_xid_known = TXNID_NONE;
+}
+
+static void
+setup_ftnode_partitions(struct ftnode *node, int n_children, const MSN msn, size_t maxbnsize UU())
+{
+ node->n_children = n_children;
+ node->max_msn_applied_to_node_on_disk = msn;
+ MALLOC_N(node->n_children, node->bp);
+ for (int bn = 0; bn < node->n_children; ++bn) {
+ BP_STATE(node, bn) = PT_AVAIL;
+ set_BLB(node, bn, toku_create_empty_bn());
+ BLB_MAX_MSN_APPLIED(node, bn) = msn;
+ }
+ node->pivotkeys.create_empty();
+}
+
+static void
+verify_basement_node_msns(FTNODE node, MSN expected)
+{
+ for(int i = 0; i < node->n_children; ++i) {
+ assert(expected.msn == BLB_MAX_MSN_APPLIED(node, i).msn);
+ }
+}
+
+//
+// Maximum node size according to the FT: 1024 (expected node size after split)
+// Maximum basement node size: 256
+// Actual node size before split: 2048
+// Actual basement node size before split: 256
+// Start by creating 8 basements, then split node, expected result of two nodes with 4 basements each.
+static void
+test_split_on_boundary(void)
+{
+ struct ftnode sn;
+
+ int fd = open(fname, O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
+
+ int r;
+
+ setup_ftnode_header(&sn);
+ const int nelts = 2 * nodesize / eltsize;
+ setup_ftnode_partitions(&sn, nelts * eltsize / bnsize, dummy_msn_3884, bnsize);
+ for (int bn = 0; bn < sn.n_children; ++bn) {
+ long k;
+ for (int i = 0; i < eltsperbn; ++i) {
+ k = bn * eltsperbn + i;
+ insert_dummy_value(&sn, bn, k, i);
+ }
+ if (bn < sn.n_children - 1) {
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn);
+ }
+ }
+
+ unlink(fname);
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ FTNODE nodea, nodeb;
+ DBT splitk;
+ // if we haven't done it right, we should hit the assert in the top of move_leafentries
+ ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
+
+ verify_basement_node_msns(nodea, dummy_msn_3884);
+ verify_basement_node_msns(nodeb, dummy_msn_3884);
+
+ toku_unpin_ftnode(ft->ft, nodeb);
+ r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_destroy_dbt(&splitk);
+ toku_destroy_ftnode_internals(&sn);
+}
+
+//
+// Maximum node size according to the FT: 1024 (expected node size after split)
+// Maximum basement node size: 256 (except the last)
+// Actual node size before split: 4095
+// Actual basement node size before split: 256 (except the last, of size 2K)
+//
+// Start by creating 9 basements, the first 8 being of 256 bytes each,
+// and the last with one row of size 2047 bytes. Then split node,
+// expected result is two nodes, one with 8 basement nodes and one
+// with 1 basement node.
+static void
+test_split_with_everything_on_the_left(void)
+{
+ struct ftnode sn;
+
+ int fd = open(fname, O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
+
+ int r;
+
+ setup_ftnode_header(&sn);
+ const int nelts = 2 * nodesize / eltsize;
+ setup_ftnode_partitions(&sn, nelts * eltsize / bnsize + 1, dummy_msn_3884, 2 * nodesize);
+ size_t big_val_size = 0;
+ for (int bn = 0; bn < sn.n_children; ++bn) {
+ long k;
+ if (bn < sn.n_children - 1) {
+ for (int i = 0; i < eltsperbn; ++i) {
+ k = bn * eltsperbn + i;
+ big_val_size += insert_dummy_value(&sn, bn, k, i);
+ }
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn);
+ } else {
+ k = bn * eltsperbn;
+ // we want this to be as big as the rest of our data and a
+ // little bigger, so the halfway mark will land inside this
+ // value and it will be split to the left
+ big_val_size += 100;
+ char * XMALLOC_N(big_val_size, big_val);
+ memset(big_val, k, big_val_size);
+ le_add_to_bn(BLB_DATA(&sn, bn), 0, (char *) &k, keylen, big_val, big_val_size);
+ toku_free(big_val);
+ }
+ }
+
+ unlink(fname);
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ FTNODE nodea, nodeb;
+ DBT splitk;
+ // if we haven't done it right, we should hit the assert in the top of move_leafentries
+ ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
+
+ toku_unpin_ftnode(ft->ft, nodeb);
+ r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_destroy_dbt(&splitk);
+ toku_destroy_ftnode_internals(&sn);
+}
+
+
+//
+// Maximum node size according to the FT: 1024 (expected node size after split)
+// Maximum basement node size: 256 (except the last)
+// Actual node size before split: 4095
+// Actual basement node size before split: 256 (except the last, of size 2K)
+//
+// Start by creating 9 basements, the first 8 being of 256 bytes each,
+// and the last with one row of size 2047 bytes. Then split node,
+// expected result is two nodes, one with 8 basement nodes and one
+// with 1 basement node.
+static void
+test_split_on_boundary_of_last_node(void)
+{
+ struct ftnode sn;
+
+ int fd = open(fname, O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
+
+ int r;
+
+ setup_ftnode_header(&sn);
+ const int nelts = 2 * nodesize / eltsize;
+ const size_t maxbnsize = 2 * nodesize;
+ setup_ftnode_partitions(&sn, nelts * eltsize / bnsize + 1, dummy_msn_3884, maxbnsize);
+ size_t big_val_size = 0;
+ for (int bn = 0; bn < sn.n_children; ++bn) {
+ long k;
+ if (bn < sn.n_children - 1) {
+ for (int i = 0; i < eltsperbn; ++i) {
+ k = bn * eltsperbn + i;
+ big_val_size += insert_dummy_value(&sn, bn, k, i);
+ }
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn);
+ } else {
+ k = bn * eltsperbn;
+ // we want this to be slightly smaller than all the rest of
+ // the data combined, so the halfway mark will be just to its
+ // left and just this element will end up on the right of the split
+ big_val_size -= 1 + (sizeof(((LEAFENTRY)NULL)->type) // overhead from LE_CLEAN_MEMSIZE
+ +sizeof(uint32_t) // sizeof(keylen)
+ +sizeof(((LEAFENTRY)NULL)->u.clean.vallen));
+ invariant(big_val_size <= maxbnsize);
+ char * XMALLOC_N(big_val_size, big_val);
+ memset(big_val, k, big_val_size);
+ le_add_to_bn(BLB_DATA(&sn, bn), 0, (char *) &k, keylen, big_val, big_val_size);
+ toku_free(big_val);
+ }
+ }
+
+ unlink(fname);
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ FTNODE nodea, nodeb;
+ DBT splitk;
+ // if we haven't done it right, we should hit the assert in the top of move_leafentries
+ ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
+
+ toku_unpin_ftnode(ft->ft, nodeb);
+ r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_destroy_dbt(&splitk);
+ toku_destroy_ftnode_internals(&sn);
+}
+
+static void
+test_split_at_begin(void)
+{
+ struct ftnode sn;
+
+ int fd = open(fname, O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
+
+ int r;
+
+ setup_ftnode_header(&sn);
+ const int nelts = 2 * nodesize / eltsize;
+ const size_t maxbnsize = 2 * nodesize;
+ setup_ftnode_partitions(&sn, nelts * eltsize / bnsize, dummy_msn_3884, maxbnsize);
+ size_t totalbytes = 0;
+ for (int bn = 0; bn < sn.n_children; ++bn) {
+ long k;
+ for (int i = 0; i < eltsperbn; ++i) {
+ k = bn * eltsperbn + i;
+ if (bn == 0 && i == 0) {
+ // we'll add the first element later when we know how big
+ // to make it
+ continue;
+ }
+ totalbytes += insert_dummy_value(&sn, bn, k, i-1);
+ }
+ if (bn < sn.n_children - 1) {
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn);
+ }
+ }
+ { // now add the first element
+ int bn = 0; long k = 0;
+ // add a few bytes so the halfway mark is definitely inside this
+ // val, which will make it go to the left and everything else to
+ // the right
+ char val[totalbytes + 3];
+ invariant(totalbytes + 3 <= maxbnsize);
+ memset(val, k, sizeof val);
+ le_add_to_bn(BLB_DATA(&sn, bn), 0, (char *) &k, keylen, val, totalbytes + 3);
+ totalbytes += LE_CLEAN_MEMSIZE(totalbytes + 3) + keylen + sizeof(uint32_t);
+ }
+
+ unlink(fname);
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ FTNODE nodea, nodeb;
+ DBT splitk;
+ // if we haven't done it right, we should hit the assert in the top of move_leafentries
+ ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
+
+ toku_unpin_ftnode(ft->ft, nodeb);
+ r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_destroy_dbt(&splitk);
+ toku_destroy_ftnode_internals(&sn);
+}
+
+static void
+test_split_at_end(void)
+{
+ struct ftnode sn;
+
+ int fd = open(fname, O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
+
+ int r;
+
+ setup_ftnode_header(&sn);
+ const int nelts = 2 * nodesize / eltsize;
+ const size_t maxbnsize = 2 * nodesize;
+ setup_ftnode_partitions(&sn, nelts * eltsize / bnsize, dummy_msn_3884, maxbnsize);
+ long totalbytes = 0;
+ int bn, i;
+ for (bn = 0; bn < sn.n_children; ++bn) {
+ long k;
+ for (i = 0; i < eltsperbn; ++i) {
+ k = bn * eltsperbn + i;
+ if (bn == sn.n_children - 1 && i == eltsperbn - 1) {
+ // add a few bytes so the halfway mark is definitely inside this
+ // val, which will make it go to the left and everything else to
+ // the right, which is nothing, so we actually split at the very end
+ char val[totalbytes + 3];
+ invariant(totalbytes + 3 <= (long) maxbnsize);
+ memset(val, k, sizeof val);
+ le_add_to_bn(BLB_DATA(&sn, bn), i, (char *) &k, keylen, val, totalbytes + 3);
+ totalbytes += LE_CLEAN_MEMSIZE(totalbytes + 3) + keylen + sizeof(uint32_t);
+ } else {
+ totalbytes += insert_dummy_value(&sn, bn, k, i);
+ }
+ }
+ if (bn < sn.n_children - 1) {
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn);
+ }
+ }
+
+ unlink(fname);
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ FTNODE nodea, nodeb;
+ DBT splitk;
+ // if we haven't done it right, we should hit the assert in the top of move_leafentries
+ ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
+
+ toku_unpin_ftnode(ft->ft, nodeb);
+ r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_destroy_dbt(&splitk);
+ toku_destroy_ftnode_internals(&sn);
+}
+
+// Maximum node size according to the FT: 1024 (expected node size after split)
+// Maximum basement node size: 256
+// Actual node size before split: 2048
+// Actual basement node size before split: 256
+// Start by creating 9 basements, then split node.
+// Expected result of two nodes with 5 basements each.
+static void
+test_split_odd_nodes(void)
+{
+ struct ftnode sn;
+
+ int fd = open(fname, O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd >= 0);
+
+ int r;
+
+ setup_ftnode_header(&sn);
+ // This will give us 9 children.
+ const int nelts = 2 * (nodesize + 128) / eltsize;
+ setup_ftnode_partitions(&sn, nelts * eltsize / bnsize, dummy_msn_3884, bnsize);
+ for (int bn = 0; bn < sn.n_children; ++bn) {
+ long k;
+ for (int i = 0; i < eltsperbn; ++i) {
+ k = bn * eltsperbn + i;
+ insert_dummy_value(&sn, bn, k, i);
+ }
+ if (bn < sn.n_children - 1) {
+ DBT pivotkey;
+ sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn);
+ }
+ }
+
+ unlink(fname);
+ CACHETABLE ct;
+ FT_HANDLE ft;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+
+ FTNODE nodea, nodeb;
+ DBT splitk;
+ // if we haven't done it right, we should hit the assert in the top of move_leafentries
+ ftleaf_split(ft->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
+
+ verify_basement_node_msns(nodea, dummy_msn_3884);
+ verify_basement_node_msns(nodeb, dummy_msn_3884);
+
+ toku_unpin_ftnode(ft->ft, nodeb);
+ r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0);
+ toku_cachetable_close(&ct);
+
+ toku_destroy_dbt(&splitk);
+ toku_destroy_ftnode_internals(&sn);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+
+ test_split_on_boundary();
+ test_split_with_everything_on_the_left();
+ test_split_on_boundary_of_last_node();
+ test_split_at_begin();
+ test_split_at_end();
+ test_split_odd_nodes();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test4115.cc b/storage/tokudb/PerconaFT/ft/tests/test4115.cc
new file mode 100644
index 00000000..b9e18bf4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test4115.cc
@@ -0,0 +1,98 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test toku_ft_handle_stat64 to make sure it works even if the comparison function won't allow an arbitrary prefix of the key to work.
+
+
+#include "test.h"
+
+#include <unistd.h>
+
+static TOKUTXN const null_txn = 0;
+
+const char *fname = TOKU_TEST_FILENAME;
+CACHETABLE ct;
+FT_HANDLE t;
+int keysize = 9;
+
+static int dont_allow_prefix (DB *db __attribute__((__unused__)), const DBT *a, const DBT *b) {
+ assert(a->size==9 && b->size==9);
+ return toku_keycompare(a->data, a->size, b->data, b->size);
+}
+
+static void close_ft_and_ct (void) {
+ int r;
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+static void open_ft_and_ct (bool unlink_old) {
+ int r;
+ if (unlink_old) unlink(fname);
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
+ toku_ft_set_bt_compare(t, dont_allow_prefix);
+}
+
+static void test_4115 (void) {
+ uint64_t limit=30000;
+ open_ft_and_ct(true);
+ for (uint64_t i=0; i<limit; i++) {
+ char key[100],val[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ snprintf(val, 100, "%08llu", (unsigned long long)2*i+1);
+ DBT k,v;
+ toku_ft_insert(t, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v,val, 1+strlen(val)), null_txn);
+ }
+ struct ftstat64_s s;
+ toku_ft_handle_stat64(t, NULL, &s);
+ assert(s.nkeys>0);
+ assert(s.dsize>0);
+ close_ft_and_ct();
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ test_4115();
+
+ if (verbose) printf("test ok\n");
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/test4244.cc b/storage/tokudb/PerconaFT/ft/tests/test4244.cc
new file mode 100644
index 00000000..31e3b5cb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test4244.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* The goal of this test. Make sure that inserts stay behind deletes. */
+
+
+#include "test.h"
+
+#include <ft-cachetable-wrappers.h>
+
+static TOKUTXN const null_txn = 0;
+
+enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
+
+CACHETABLE ct;
+FT_HANDLE t;
+const char *fname = TOKU_TEST_FILENAME;
+
+static void
+doit (void) {
+ BLOCKNUM node_leaf, node_internal, node_root;
+
+ int r;
+
+ toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr);
+ unlink(fname);
+ r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r==0);
+
+ toku_testsetup_initialize(); // must precede any other toku_testsetup calls
+
+ r = toku_testsetup_leaf(t, &node_leaf, 1, NULL, NULL);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 1, &node_internal, 1, &node_leaf, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_nonleaf(t, 1, &node_root, 1, &node_internal, 0, 0);
+ assert(r==0);
+
+ r = toku_testsetup_root(t, node_root);
+ assert(r==0);
+
+ // make a 1MB val
+ uint32_t big_val_size = 1000000;
+ char* XCALLOC_N(big_val_size, big_val);
+ DBT k,v;
+ memset(&k, 0, sizeof(k));
+ memset(&v, 0, sizeof(v));
+ for (int i = 0; i < 100; i++) {
+ toku_ft_insert(t,
+ toku_fill_dbt(&k, "hello", 6),
+ toku_fill_dbt(&v, big_val, big_val_size),
+ null_txn);
+ }
+ toku_free(big_val);
+
+
+ // at this point, we have inserted 100MB of messages, if bug exists,
+ // then node_internal should be huge
+ // we pin it and verify that it is not
+ FTNODE node;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(t->ft);
+ toku_pin_ftnode(
+ t->ft,
+ node_internal,
+ toku_cachetable_hash(t->ft->cf, node_internal),
+ &bfe,
+ PL_WRITE_EXPENSIVE,
+ &node,
+ true
+ );
+ assert(node->n_children == 1);
+ // simply assert that the buffer is less than 50MB,
+ // we inserted 100MB of data in there.
+ assert(toku_bnc_nbytesinbuf(BNC(node, 0)) < 50*1000*1000);
+ toku_unpin_ftnode(t->ft, node);
+
+ r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
+ toku_cachetable_close(&ct);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ doit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test_logcursor.cc b/storage/tokudb/PerconaFT/ft/tests/test_logcursor.cc
new file mode 100644
index 00000000..804f94f1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test_logcursor.cc
@@ -0,0 +1,265 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <string.h>
+
+#include "logger/logcursor.h"
+#include "test.h"
+
+#if defined(HAVE_LIMITS_H)
+# include <limits.h>
+#endif
+#if defined(HAVE_SYS_SYSLIMITS_H)
+# include <sys/syslimits.h>
+#endif
+
+const char LOGDIR[100] = "./dir.test_logcursor";
+const int FSYNC = 1;
+const int NO_FSYNC = 0;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+const char *a="a";
+const char *b="b";
+
+const FILENUM fn_aname = {0};
+const FILENUM fn_bname = {1};
+BYTESTRING bs_aname, bs_bname;
+BYTESTRING bs_a, bs_b;
+BYTESTRING bs_empty;
+
+static int create_logfiles(void);
+
+static int test_0 (void);
+static int test_1 (void);
+static void usage(void) {
+ printf("test_logcursors [OPTIONS]\n");
+ printf("[-v]\n");
+ printf("[-q]\n");
+}
+
+int test_main(int argc, const char *argv[]) {
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (arg[0] != '-')
+ break;
+ if (strcmp(arg, "-v")==0) {
+ verbose++;
+ } else if (strcmp(arg, "-q")==0) {
+ verbose = 0;
+ } else {
+ usage();
+ return 1;
+ }
+ }
+
+ int r = 0;
+ // start from a clean directory
+ char rmrf_msg[100];
+ sprintf(rmrf_msg, "rm -rf %s", LOGDIR);
+ r = system(rmrf_msg);
+ CKERR(r);
+ toku_os_mkdir(LOGDIR, S_IRWXU+S_IRWXG+S_IRWXO);
+ if ( (r=create_logfiles()) !=0 ) return r;
+
+ if ( (r=test_0()) !=0 ) return r;
+ if ( (r=test_1()) !=0 ) return r;
+
+ r = system(rmrf_msg);
+ CKERR(r);
+ return r;
+}
+
+int test_0 (void) {
+ int r=0;
+ struct toku_logcursor *cursor;
+ struct log_entry *entry;
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_first(cursor, &entry); if (verbose) printf("First Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_last(cursor, &entry); if (verbose) printf("Last Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); assert(r==DB_NOTFOUND);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); assert(r==DB_NOTFOUND);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ r = toku_logcursor_create(&cursor, LOGDIR); if (verbose) printf("create returns %d\n", r); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_next(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd); assert(r==0);
+ r = toku_logcursor_prev(cursor, &entry); if (verbose) printf("Entry = %c\n", entry->cmd);
+ if ( verbose) {
+ if ( r == DB_NOTFOUND ) printf("PASS\n");
+ else printf("FAIL\n");
+ }
+ assert(r==DB_NOTFOUND);
+ r = toku_logcursor_destroy(&cursor); if (verbose) printf("destroy returns %d\n", r); assert(r==0);
+
+ return 0;
+}
+
+// test per-file version
+int test_1 () {
+ int r=0;
+ char logfile[PATH_MAX];
+ sprintf(logfile, "log000000000000.tokulog%d", TOKU_LOG_VERSION);
+
+ struct toku_logcursor *cursor;
+ struct log_entry *entry;
+
+ r = toku_logcursor_create_for_file(&cursor, LOGDIR, logfile);
+ if (verbose) printf("create returns %d\n", r);
+ assert(r==0);
+
+ r = toku_logcursor_last(cursor, &entry);
+ if (verbose) printf("entry = %c\n", entry->cmd);
+ assert(r==0);
+ assert(entry->cmd =='C');
+
+ r = toku_logcursor_destroy(&cursor);
+ if (verbose) printf("destroy returns %d\n", r);
+ assert(r==0);
+
+ return 0;
+}
+
+int create_logfiles() {
+ int r = 0;
+
+ TOKULOGGER logger;
+
+ LSN lsn = {0};
+ TXNID_PAIR txnid = {.parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE};
+ LSN begin_checkpoint_lsn;
+
+ uint32_t num_fassociate = 0;
+ uint32_t num_xstillopen = 0;
+
+ bs_aname.len = 4; bs_aname.data=(char *)"a.db";
+ bs_bname.len = 4; bs_bname.data=(char *)"b.db";
+ bs_a.len = 2; bs_a.data=(char *)"a";
+ bs_b.len = 2; bs_b.data=(char *)"b";
+ bs_empty.len = 0; bs_empty.data = NULL;
+
+
+ // create and open logger
+ r = toku_logger_create(&logger); assert(r==0);
+ r = toku_logger_open(LOGDIR, logger); assert(r==0);
+
+ // use old x1.tdb test log as basis
+ //xbegin 'b': lsn=1 parenttxnid=0 crc=00005f1f len=29
+ txnid.parent_id64 = 1;
+ toku_log_xbegin(logger, &lsn, NO_FSYNC, txnid, TXNID_PAIR_NONE);
+ //fcreate 'F': lsn=2 txnid=1 filenum=0 fname={len=4 data="a.db"} mode=0777 treeflags=0 crc=18a3d525 len=49
+ toku_log_fcreate(logger, &lsn, NO_FSYNC, NULL, txnid, fn_aname, bs_aname, 0x0777, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 0);
+ //commit 'C': lsn=3 txnid=1 crc=00001f1e len=29
+ toku_log_xcommit(logger, &lsn, FSYNC, NULL, txnid);
+ //xbegin 'b': lsn=4 parenttxnid=0 crc=00000a1f len=29
+ txnid.parent_id64 = 4; // Choosing ids based on old test instead of what should happen now.
+ toku_log_xbegin(logger, &lsn, NO_FSYNC, txnid, TXNID_PAIR_NONE);
+ //fcreate 'F': lsn=5 txnid=4 filenum=1 fname={len=4 data="b.db"} mode=0777 treeflags=0 crc=14a47925 len=49
+ toku_log_fcreate(logger, &lsn, NO_FSYNC, NULL, txnid, fn_bname, bs_bname, 0x0777, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 0);
+ //commit 'C': lsn=6 txnid=4 crc=0000c11e len=29
+ toku_log_xcommit(logger, &lsn, FSYNC, NULL, txnid);
+ //xbegin 'b': lsn=7 parenttxnid=0 crc=0000f91f len=29
+ txnid.parent_id64 = 7; // Choosing ids based on old test instead of what should happen now.
+ toku_log_xbegin(logger, &lsn, NO_FSYNC, txnid, TXNID_PAIR_NONE);
+ //enq_insert 'I': lsn=8 filenum=0 xid=7 key={len=2 data="a\000"} value={len=2 data="b\000"} crc=40b863e4 len=45
+ toku_log_enq_insert(logger, &lsn, NO_FSYNC, NULL, fn_aname, txnid, bs_a, bs_b);
+ //begin_checkpoint 'x': lsn=9 timestamp=1251309957584197 crc=cd067878 len=29
+ toku_log_begin_checkpoint(logger, &begin_checkpoint_lsn, NO_FSYNC, 1251309957584197, 0);
+ //fassociate 'f': lsn=11 filenum=1 fname={len=4 data="b.db"} crc=a7126035 len=33
+ toku_log_fassociate(logger, &lsn, NO_FSYNC, fn_bname, 0, bs_bname, 0);
+ num_fassociate++;
+ //fassociate 'f': lsn=12 filenum=0 fname={len=4 data="a.db"} crc=a70c5f35 len=33
+ toku_log_fassociate(logger, &lsn, NO_FSYNC, fn_aname, 0, bs_aname, 0);
+ num_fassociate++;
+ //xstillopen 's': lsn=10 txnid=7 parent=0 crc=00061816 len=37 <- obsolete
+ {
+ FILENUMS filenums = {0, NULL};
+ toku_log_xstillopen(logger, &lsn, NO_FSYNC, NULL, txnid, TXNID_PAIR_NONE,
+ 0, filenums, 0, 0, 0,
+ ROLLBACK_NONE, ROLLBACK_NONE, ROLLBACK_NONE);
+ }
+ num_xstillopen++;
+ //end_checkpoint 'X': lsn=13 txnid=9 timestamp=1251309957586872 crc=cd285c30 len=37
+ toku_log_end_checkpoint(logger, &lsn, FSYNC, begin_checkpoint_lsn, 1251309957586872, num_fassociate, num_xstillopen);
+ //enq_insert 'I': lsn=14 filenum=1 xid=7 key={len=2 data="b\000"} value={len=2 data="a\000"} crc=40388be4 len=45
+ toku_log_enq_insert(logger, &lsn, NO_FSYNC, NULL, fn_bname, txnid, bs_b, bs_a);
+ //commit 'C': lsn=15 txnid=7 crc=00016d1e len=29
+ toku_log_xcommit(logger, &lsn, FSYNC, NULL, txnid);
+
+ // close logger
+ r = toku_logger_close(&logger); assert(r==0);
+ return r;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/tests/test_oexcl.cc b/storage/tokudb/PerconaFT/ft/tests/test_oexcl.cc
new file mode 100644
index 00000000..1675dfda
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test_oexcl.cc
@@ -0,0 +1,51 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ unlink(TOKU_TEST_FILENAME);
+ int fd0 = open (TOKU_TEST_FILENAME, O_RDWR|O_CREAT|O_EXCL, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd0>=0);
+ int fd1 = open (TOKU_TEST_FILENAME, O_RDWR|O_CREAT|O_EXCL, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd1==-1);
+ assert(errno==EEXIST);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc b/storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc
new file mode 100644
index 00000000..90ee4661
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <util/dbt.h>
+#include <ft/ft-cachetable-wrappers.h>
+
+// Each FT maintains a sequential insert heuristic to determine if its
+// worth trying to insert directly into a well-known rightmost leaf node.
+//
+// The heuristic is only maintained when a rightmost leaf node is known.
+//
+// This test verifies that sequential inserts increase the seqinsert score
+// and that a single non-sequential insert resets the score.
+
+static void test_seqinsert_heuristic(void) {
+ int r = 0;
+ char name[TOKU_PATH_MAX + 1];
+ toku_path_join(name, 2, TOKU_TEST_FILENAME, "ftdata");
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r);
+
+ FT_HANDLE ft_handle;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(name, 1, &ft_handle,
+ 4*1024*1024, 64*1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD, ct, NULL,
+ toku_builtin_compare_fun); CKERR(r);
+ FT ft = ft_handle->ft;
+
+ int k;
+ DBT key, val;
+ const int val_size = 1024 * 1024;
+ char *XMALLOC_N(val_size, val_buf);
+ memset(val_buf, 'x', val_size);
+ toku_fill_dbt(&val, val_buf, val_size);
+
+ // Insert many rows sequentially. This is enough data to:
+ // - force the root to split (the righmost leaf will then be known)
+ // - raise the seqinsert score high enough to enable direct rightmost injections
+ const int rows_to_insert = 200;
+ for (int i = 0; i < rows_to_insert; i++) {
+ k = toku_htonl(i);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ }
+ invariant(ft->rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL);
+ invariant(ft->seqinsert_score == FT_SEQINSERT_SCORE_THRESHOLD);
+
+ // Insert on the left extreme. The seq insert score is high enough
+ // that we will attempt to insert into the rightmost leaf. We won't
+ // be successful because key 0 won't be in the bounds of the rightmost leaf.
+ // This failure should reset the seqinsert score back to 0.
+ k = toku_htonl(0);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ invariant(ft->seqinsert_score == 0);
+
+ // Insert in the middle. The score should not go up.
+ k = toku_htonl(rows_to_insert / 2);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ invariant(ft->seqinsert_score == 0);
+
+ // Insert on the right extreme. The score should go up.
+ k = toku_htonl(rows_to_insert);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ invariant(ft->seqinsert_score == 1);
+
+ // Insert again on the right extreme again, the score should go up.
+ k = toku_htonl(rows_to_insert + 1);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ invariant(ft->seqinsert_score == 2);
+
+ // Insert close to, but not at, the right extreme. The score should reset.
+ // -- the magic number 4 derives from the fact that vals are 1mb and nodes are 4mb
+ k = toku_htonl(rows_to_insert - 4);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ invariant(ft->seqinsert_score == 0);
+
+ toku_free(val_buf);
+ toku_ft_handle_close(ft_handle);
+ toku_cachetable_close(&ct);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+}
+
+int test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_seqinsert_heuristic();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_split_merge.cc b/storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_split_merge.cc
new file mode 100644
index 00000000..34edec67
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test_rightmost_leaf_split_merge.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <util/dbt.h>
+#include <ft/ft-cachetable-wrappers.h>
+#include <ft/ft-flusher.h>
+
+// Promotion tracks the rightmost blocknum in the FT when a message
+// is successfully promoted to a non-root leaf node on the right extreme.
+//
+// This test verifies that a split or merge of the rightmost leaf properly
+// maintains the rightmost blocknum (which is constant - the pair's swap values,
+// like the root blocknum).
+
+static void test_split_merge(void) {
+ int r = 0;
+ char name[TOKU_PATH_MAX + 1];
+ toku_path_join(name, 2, TOKU_TEST_FILENAME, "ftdata");
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r);
+
+ FT_HANDLE ft_handle;
+ CACHETABLE ct;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(name, 1, &ft_handle,
+ 4*1024*1024, 64*1024,
+ TOKU_DEFAULT_COMPRESSION_METHOD, ct, NULL,
+ toku_builtin_compare_fun); CKERR(r);
+
+ // We have a root blocknum, but no rightmost blocknum yet.
+ FT ft = ft_handle->ft;
+ invariant(ft->h->root_blocknum.b != RESERVED_BLOCKNUM_NULL);
+ invariant(ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL);
+
+ int k;
+ DBT key, val;
+ const int val_size = 1 * 1024 * 1024;
+ char *XMALLOC_N(val_size, val_buf);
+ memset(val_buf, 'x', val_size);
+ toku_fill_dbt(&val, val_buf, val_size);
+
+ // Insert 16 rows (should induce a few splits)
+ const int rows_to_insert = 16;
+ for (int i = 0; i < rows_to_insert; i++) {
+ k = toku_htonl(i);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_insert(ft_handle, &key, &val, NULL);
+ }
+
+ // rightmost blocknum should be set, because the root split and promotion
+ // did a rightmost insertion directly into the rightmost leaf, lazily
+ // initializing the rightmost blocknum.
+ invariant(ft->rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL);
+
+ BLOCKNUM root_blocknum = ft->h->root_blocknum;
+ FTNODE root_node;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ toku_pin_ftnode(ft, root_blocknum,
+ toku_cachetable_hash(ft->cf, ft->h->root_blocknum),
+ &bfe, PL_WRITE_EXPENSIVE, &root_node, true);
+ // root blocknum should be consistent
+ invariant(root_node->blocknum.b == ft->h->root_blocknum.b);
+ // root should have split at least once, and it should now be at height 1
+ invariant(root_node->n_children > 1);
+ invariant(root_node->height == 1);
+ // rightmost blocknum should no longer be the root, since the root split
+ invariant(ft->h->root_blocknum.b != ft->rightmost_blocknum.b);
+ // the right child should have the rightmost blocknum
+ invariant(BP_BLOCKNUM(root_node, root_node->n_children - 1).b == ft->rightmost_blocknum.b);
+
+ BLOCKNUM rightmost_blocknum_before_merge = ft->rightmost_blocknum;
+ const int num_children_before_merge = root_node->n_children;
+
+ // delete the last 6 rows.
+ // - 1mb each, so 6mb deleted
+ // - should be enough to delete the entire rightmost leaf + some of its neighbor
+ const int rows_to_delete = 6;
+ toku_unpin_ftnode(ft, root_node);
+ for (int i = 0; i < rows_to_delete; i++) {
+ k = toku_htonl(rows_to_insert - i);
+ toku_fill_dbt(&key, &k, sizeof(k));
+ toku_ft_delete(ft_handle, &key, NULL);
+ }
+ toku_pin_ftnode(ft, root_blocknum,
+ toku_cachetable_hash(ft->cf, root_blocknum),
+ &bfe, PL_WRITE_EXPENSIVE, &root_node, true);
+
+ // - rightmost leaf should be fusible after those deletes (which were promoted directly to the leaf)
+ FTNODE rightmost_leaf;
+ toku_pin_ftnode(ft, rightmost_blocknum_before_merge,
+ toku_cachetable_hash(ft->cf, rightmost_blocknum_before_merge),
+ &bfe, PL_WRITE_EXPENSIVE, &rightmost_leaf, true);
+ invariant(toku_ftnode_get_reactivity(ft, rightmost_leaf) == RE_FUSIBLE);
+ toku_unpin_ftnode(ft, rightmost_leaf);
+
+ // - merge the rightmost child now that it's fusible
+ toku_ft_merge_child(ft, root_node, root_node->n_children - 1);
+ toku_pin_ftnode(ft, root_blocknum,
+ toku_cachetable_hash(ft->cf, root_blocknum),
+ &bfe, PL_WRITE_EXPENSIVE, &root_node, true);
+
+ // the merge should have worked, and the root should still be at height 1
+ invariant(root_node->n_children < num_children_before_merge);
+ invariant(root_node->height == 1);
+ // the rightmost child of the root has the rightmost blocknum
+ invariant(BP_BLOCKNUM(root_node, root_node->n_children - 1).b == ft->rightmost_blocknum.b);
+ // the value for rightmost blocknum itself should not have changed
+ // (we keep it constant, like the root blocknum)
+ invariant(rightmost_blocknum_before_merge.b == ft->rightmost_blocknum.b);
+
+ toku_unpin_ftnode(ft, root_node);
+
+ toku_free(val_buf);
+ toku_ft_handle_close(ft_handle);
+ toku_cachetable_close(&ct);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+}
+
+int test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_split_merge();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test_toku_malloc_plain_free.cc b/storage/tokudb/PerconaFT/ft/tests/test_toku_malloc_plain_free.cc
new file mode 100644
index 00000000..100eda07
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/test_toku_malloc_plain_free.cc
@@ -0,0 +1,52 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include "memory.h"
+#include "stdlib.h"
+
+#include "test.h"
+
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ char *XMALLOC_N(5, m);
+ (void)m;
+ toku_free(m);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24
new file mode 100755
index 00000000..9a56e83e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-clean/log000000000000.tokulog24
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24
new file mode 100755
index 00000000..c552cda6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-24-dirty/log000000000000.tokulog24
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25
new file mode 100755
index 00000000..26b8bcfb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-clean/log000000000000.tokulog25
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25
new file mode 100755
index 00000000..04d3190c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-25-dirty/log000000000000.tokulog25
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26
new file mode 100755
index 00000000..02047325
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-clean/log000000000000.tokulog26
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26
new file mode 100755
index 00000000..ce826b56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-26-dirty/log000000000000.tokulog26
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27
new file mode 100755
index 00000000..9849b977
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-clean/log000000000000.tokulog27
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27
new file mode 100755
index 00000000..8b658ea4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-27-dirty/log000000000000.tokulog27
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28
new file mode 100644
index 00000000..11fecfb9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-clean/log000000000000.tokulog28
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28
new file mode 100644
index 00000000..b7a9b03b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-28-dirty/log000000000000.tokulog28
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29
new file mode 100644
index 00000000..a1f306f4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-clean/log000000000000.tokulog29
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29 b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29
new file mode 100644
index 00000000..b9e79eeb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade.data/upgrade-recovery-logs-29-dirty/log000000000000.tokulog29
Binary files differ
diff --git a/storage/tokudb/PerconaFT/ft/tests/upgrade_test_simple.cc b/storage/tokudb/PerconaFT/ft/tests/upgrade_test_simple.cc
new file mode 100644
index 00000000..1deb8e4b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/upgrade_test_simple.cc
@@ -0,0 +1,212 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <libgen.h>
+#include <sys/time.h>
+#include <portability/toku_path.h>
+#include "test.h"
+
+#include "ft-flusher.h"
+
+#include "cachetable/checkpoint.h"
+
+static TOKUTXN const null_txn = NULL;
+
+static int
+noop_getf(uint32_t UU(keylen), const void *UU(key), uint32_t UU(vallen), const void *UU(val), void *extra, bool UU(lock_only))
+{
+ int *CAST_FROM_VOIDP(calledp, extra);
+ (*calledp)++;
+ return 0;
+}
+
+static int
+get_one_value(FT_HANDLE t, CACHETABLE UU(ct), void *UU(extra))
+{
+ int r;
+ int called;
+ FT_CURSOR cursor;
+
+ r = toku_ft_cursor(t, &cursor, null_txn, false, false);
+ CKERR(r);
+ called = 0;
+ r = toku_ft_cursor_first(cursor, noop_getf, &called);
+ CKERR(r);
+ assert(called == 1);
+ toku_ft_cursor_close(cursor);
+ CKERR(r);
+
+ return 0;
+}
+
+static int
+progress(void *extra, float fraction)
+{
+ float *CAST_FROM_VOIDP(stop_at, extra);
+ if (fraction > *stop_at) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static int
+do_hot_optimize(FT_HANDLE t, CACHETABLE UU(ct), void *extra)
+{
+ float *CAST_FROM_VOIDP(fraction, extra);
+ uint64_t loops_run = 0;
+ int r = toku_ft_hot_optimize(t, NULL, NULL, progress, extra, &loops_run);
+ if (*fraction < 1.0) {
+ CKERR2(r, 1);
+ } else {
+ CKERR(r);
+ }
+ return 0;
+}
+
+static int
+insert_something(FT_HANDLE t, CACHETABLE UU(ct), void *UU(extra))
+{
+ assert(t);
+ unsigned int dummy_value = 1U << 31;
+ DBT key;
+ DBT val;
+ toku_fill_dbt(&key, &dummy_value, sizeof(unsigned int));
+ toku_fill_dbt(&val, &dummy_value, sizeof(unsigned int));
+ toku_ft_insert (t, &key, &val, 0);
+ return 0;
+}
+
+typedef int (*tree_cb)(FT_HANDLE t, CACHETABLE ct, void *extra);
+
+static int
+with_open_tree(const char *fname, tree_cb cb, void *cb_extra)
+{
+ int r, r2;
+ FT_HANDLE t;
+ CACHETABLE ct;
+
+ toku_cachetable_create(&ct, 16*(1<<20), ZERO_LSN, nullptr);
+ r = toku_open_ft_handle(fname,
+ 0,
+ &t,
+ 4*(1<<20),
+ 128*(1<<10),
+ TOKU_DEFAULT_COMPRESSION_METHOD,
+ ct,
+ null_txn,
+ toku_builtin_compare_fun
+ );
+ CKERR(r);
+
+ r2 = cb(t, ct, cb_extra);
+ r = toku_verify_ft(t);
+ CKERR(r);
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ CKERR(r);
+ r = toku_close_ft_handle_nolsn(t, 0);
+ CKERR(r);
+ toku_cachetable_close(&ct);
+
+ return r2;
+}
+
+#define TMPFTFMT "%s-tmpdata.ft"
+static const char *origft_6_0 = "upgrade_test_data.ft.6.0.gz";
+static const char *origft_5_0 = "upgrade_test_data.ft.5.0.gz";
+static const char *origft_4_2 = "upgrade_test_data.ft.4.2.gz";
+static const char *not_flat_4_2 = "upgrade_test_data.ft.4.2.not.flat.gz";
+
+static int
+run_test(const char *prog, const char *origft) {
+ int r;
+
+ char *fullprog = toku_strdup(__FILE__);
+ char *progdir = dirname(fullprog);
+
+ size_t templen = strlen(progdir) + strlen(prog) + strlen(TMPFTFMT) - 1;
+ char tempft[templen + 1];
+ snprintf(tempft, templen + 1, TMPFTFMT, prog);
+ toku_free(fullprog);
+ {
+ char origbuf[TOKU_PATH_MAX + 1];
+ char *datadir = getenv("TOKUDB_DATA");
+ toku_path_join(origbuf, 2, datadir, origft);
+ size_t len = 13 + strlen(origbuf) + strlen(tempft);
+ char buf[len + 1];
+ snprintf(buf, len + 1, "gunzip -c %s > %s", origbuf, tempft);
+ r = system(buf);
+ CKERR(r);
+ }
+
+ r = with_open_tree(tempft, get_one_value, NULL);
+ CKERR(r);
+ r = with_open_tree(tempft, insert_something, NULL);
+ CKERR(r);
+ float fraction = 0.5;
+ r = with_open_tree(tempft, do_hot_optimize, &fraction);
+ CKERR(r);
+ fraction = 1.0;
+ r = with_open_tree(tempft, do_hot_optimize, &fraction);
+ CKERR(r);
+ r = unlink(tempft);
+ CKERR(r);
+
+ return r;
+}
+
+int
+test_main(int argc __attribute__((__unused__)), const char *argv[])
+{
+ int r;
+
+ r = run_test(argv[0], origft_6_0);
+ CKERR(r);
+ r = run_test(argv[0], origft_5_0);
+ CKERR(r);
+ r = run_test(argv[0], origft_4_2);
+ CKERR(r);
+
+ r = run_test(argv[0], not_flat_4_2);
+ CKERR(r);
+
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
new file mode 100644
index 00000000..1ba5f1c2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
@@ -0,0 +1,249 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+
+// generate fractal trees with a given height, fanout, and number of leaf elements per leaf.
+// jam the child buffers with inserts.
+// this code can be used as a template to build broken trees
+//
+// This program (copied from make-tree.c) creates a tree with bad msns by commenting out
+// the setting of the msn:
+//
+// To correctly set msn per node:
+// - set in each non-leaf when message is injected into node (see insert_into_child_buffer())
+// - set in each leaf node (see append_leaf())
+// - set in root node (set test_make_tree())
+
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ MSN msn = next_dummymsn();
+
+ // apply an insert to the leaf node
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // Create bad tree (don't do following):
+ // leafnode->max_msn_applied_to_node = msn;
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
+ for (int i = 0; i < n; i++) {
+ int k = htonl(seq + i);
+ int v = seq + i;
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+ }
+ *minkey = htonl(seq);
+ *maxkey = htonl(seq + n - 1);
+}
+
+static void
+insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, int maxkey) {
+ for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) {
+ MSN msn = next_dummymsn();
+ unsigned int key = htonl(val);
+ DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
+ DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
+ toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, toku_xids_get_root_xids(), true, &thekey, &theval);
+
+ // Create bad tree (don't do following):
+ // node->max_msn_applied_to_node = msn;
+ }
+}
+
+static FTNODE
+make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
+ FTNODE node;
+ if (height == 0) {
+ node = make_node(ft, 0);
+ populate_leaf(node, *seq, nperleaf, minkey, maxkey);
+ *seq += nperleaf;
+ } else {
+ node = make_node(ft, height);
+ int minkeys[fanout], maxkeys[fanout];
+ for (int childnum = 0; childnum < fanout; childnum++) {
+ FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
+ if (childnum == 0) {
+ toku_ft_nonleaf_append_child(node, child, NULL);
+ } else {
+ int k = maxkeys[childnum-1]; // use the max of the left tree
+ DBT pivotkey;
+ toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
+ }
+ toku_unpin_ftnode(ft->ft, child);
+ insert_into_child_buffer(ft, node, childnum, minkeys[childnum], maxkeys[childnum]);
+ }
+ *minkey = minkeys[0];
+ *maxkey = maxkeys[0];
+ for (int i = 1; i < fanout; i++) {
+ if (memcmp(minkey, &minkeys[i], sizeof minkeys[i]) > 0)
+ *minkey = minkeys[i];
+ if (memcmp(maxkey, &maxkeys[i], sizeof maxkeys[i]) < 0)
+ *maxkey = maxkeys[i];
+ }
+ }
+ return node;
+}
+
+static UU() void
+deleted_row(UU() DB *db, UU() DBT *key, UU() DBT *val) {
+}
+
+static void
+test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ assert(r == 0 || (r == -1 && errno == ENOENT));
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // make a tree
+ int seq = 0, minkey, maxkey;
+ FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
+
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // Create bad tree (don't do following):
+ // newroot->max_msn_applied_to_node = last_dummymsn(); // capture msn of last message injected into tree
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ initialize_dummymsn();
+ int height = 1;
+ int fanout = 2;
+ int nperleaf = 8;
+ int do_verify = 1;
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--height") == 0 && i+1 < argc) {
+ height = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--fanout") == 0 && i+1 < argc) {
+ fanout = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nperleaf") == 0 && i+1 < argc) {
+ nperleaf = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_make_tree(height, fanout, nperleaf, do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
new file mode 100644
index 00000000..42415a07
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
@@ -0,0 +1,215 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate a tree with bad pivots and check that ft->verify finds them
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
+ for (int i = 0; i < n; i++) {
+ int k = htonl(seq + i);
+ int v = seq + i;
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+ }
+ *minkey = htonl(seq);
+ *maxkey = htonl(seq + n - 1);
+}
+
+static FTNODE
+make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
+ FTNODE node;
+ if (height == 0) {
+ node = make_node(ft, 0);
+ populate_leaf(node, *seq, nperleaf, minkey, maxkey);
+ *seq += nperleaf;
+ } else {
+ node = make_node(ft, height);
+ int minkeys[fanout], maxkeys[fanout];
+ for (int childnum = 0; childnum < fanout; childnum++) {
+ FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
+ if (childnum == 0) {
+ toku_ft_nonleaf_append_child(node, child, NULL);
+ } else {
+ int k = minkeys[childnum]; // use the min key of the right subtree, which creates a broken tree
+ DBT pivotkey;
+ toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
+ }
+ toku_unpin_ftnode(ft->ft, child);
+ }
+ *minkey = minkeys[0];
+ *maxkey = maxkeys[0];
+ for (int i = 1; i < fanout; i++) {
+ if (memcmp(minkey, &minkeys[i], sizeof minkeys[i]) > 0)
+ *minkey = minkeys[i];
+ if (memcmp(maxkey, &maxkeys[i], sizeof maxkeys[i]) < 0)
+ *maxkey = maxkeys[i];
+ }
+ }
+ return node;
+}
+
+static UU() void
+deleted_row(UU() DB *db, UU() DBT *key, UU() DBT *val) {
+}
+
+static void
+test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ assert(r == 0 || (r == -1 && errno == ENOENT));
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // make a tree
+ int seq = 0, minkey, maxkey;
+ FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
+
+ // discard the old root block
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int height = 1;
+ int fanout = 2;
+ int nperleaf = 8;
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--height") == 0 && i+1 < argc) {
+ height = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--fanout") == 0 && i+1 < argc) {
+ fanout = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nperleaf") == 0 && i+1 < argc) {
+ nperleaf = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_make_tree(height, fanout, nperleaf, do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
new file mode 100644
index 00000000..e31b13c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate a tree with a single leaf node containing duplicate keys
+// check that ft verify finds them
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int k, int v) {
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+}
+
+static void
+test_dup_in_leaf(int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ assert(r == 0 || (r == -1 && errno == ENOENT));
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // discard the old root block
+
+ FTNODE newroot = make_node(ft, 0);
+ populate_leaf(newroot, htonl(2), 1);
+ populate_leaf(newroot, htonl(2), 2);
+
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_dup_in_leaf(do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
new file mode 100644
index 00000000..009eda63
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
@@ -0,0 +1,219 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate a tree with duplicate pivots and check that ft->verify finds them
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
+ for (int i = 0; i < n; i++) {
+ int k = htonl(seq + i);
+ int v = seq + i;
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+ }
+ *minkey = htonl(seq);
+ *maxkey = htonl(seq + n - 1);
+}
+
+static FTNODE
+make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
+ FTNODE node;
+ if (height == 0) {
+ node = make_node(ft, 0);
+ populate_leaf(node, *seq, nperleaf, minkey, maxkey);
+ *seq += nperleaf;
+ } else {
+ node = make_node(ft, height);
+ int minkeys[fanout], maxkeys[fanout];
+ for (int childnum = 0; childnum < fanout; childnum++) {
+ FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
+ if (childnum == 0) {
+ toku_ft_nonleaf_append_child(node, child, NULL);
+ } else {
+ int k = maxkeys[0]; // use duplicate pivots, should result in a broken tree
+ DBT pivotkey;
+ toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
+ }
+ toku_unpin_ftnode(ft->ft, child);
+ }
+ *minkey = minkeys[0];
+ *maxkey = maxkeys[0];
+ for (int i = 1; i < fanout; i++) {
+ if (memcmp(minkey, &minkeys[i], sizeof minkeys[i]) > 0)
+ *minkey = minkeys[i];
+ if (memcmp(maxkey, &maxkeys[i], sizeof maxkeys[i]) < 0)
+ *maxkey = maxkeys[i];
+ }
+ }
+ return node;
+}
+
+static UU() void
+deleted_row(UU() DB *db, UU() DBT *key, UU() DBT *val) {
+}
+
+static void
+test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ if (r != 0) {
+ assert(r == -1);
+ assert(get_error_errno() == ENOENT);
+ }
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // make a tree
+ int seq = 0, minkey, maxkey;
+ FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
+
+ // discard the old root block
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int height = 1;
+ int fanout = 3;
+ int nperleaf = 8;
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--height") == 0 && i+1 < argc) {
+ height = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--fanout") == 0 && i+1 < argc) {
+ fanout = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nperleaf") == 0 && i+1 < argc) {
+ nperleaf = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_make_tree(height, fanout, nperleaf, do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
new file mode 100644
index 00000000..5c639d8d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
@@ -0,0 +1,231 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate a tree with misrouted messages in the child buffers.
+// check that ft verify finds them.
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
+ for (int i = 0; i < n; i++) {
+ int k = htonl(seq + i);
+ int v = seq + i;
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+ }
+ *minkey = htonl(seq);
+ *maxkey = htonl(seq + n - 1);
+}
+
+static void
+insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, int maxkey) {
+ int k = htonl(maxkey);
+ maxkey = htonl(k+1);
+ for (unsigned int val = htonl(minkey); val <= htonl(maxkey); val++) {
+ unsigned int key = htonl(val);
+ DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
+ DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
+ MSN msn = next_dummymsn();
+ toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, toku_xids_get_root_xids(), true, &thekey, &theval);
+ }
+}
+
+static FTNODE
+make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
+ FTNODE node;
+ if (height == 0) {
+ node = make_node(ft, 0);
+ populate_leaf(node, *seq, nperleaf, minkey, maxkey);
+ *seq += nperleaf;
+ } else {
+ node = make_node(ft, height);
+ int minkeys[fanout], maxkeys[fanout];
+ for (int childnum = 0; childnum < fanout; childnum++) {
+ FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
+ if (childnum == 0) {
+ toku_ft_nonleaf_append_child(node, child, NULL);
+ } else {
+ int k = maxkeys[childnum-1]; // use the max of the left tree
+ DBT pivotkey;
+ toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
+ }
+ toku_unpin_ftnode(ft->ft, child);
+ insert_into_child_buffer(ft, node, childnum, minkeys[childnum], maxkeys[childnum]);
+ }
+ *minkey = minkeys[0];
+ *maxkey = maxkeys[0];
+ for (int i = 1; i < fanout; i++) {
+ if (memcmp(minkey, &minkeys[i], sizeof minkeys[i]) > 0)
+ *minkey = minkeys[i];
+ if (memcmp(maxkey, &maxkeys[i], sizeof maxkeys[i]) < 0)
+ *maxkey = maxkeys[i];
+ }
+ }
+ return node;
+}
+
+static UU() void
+deleted_row(UU() DB *db, UU() DBT *key, UU() DBT *val) {
+}
+
+static void
+test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ assert(r == 0 || (r == -1 && errno == ENOENT));
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // make a tree
+ int seq = 0, minkey, maxkey;
+ FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
+
+ // discard the old root block
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int height = 1;
+ int fanout = 2;
+ int nperleaf = 8;
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--height") == 0 && i+1 < argc) {
+ height = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--fanout") == 0 && i+1 < argc) {
+ fanout = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nperleaf") == 0 && i+1 < argc) {
+ nperleaf = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_make_tree(height, fanout, nperleaf, do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
new file mode 100644
index 00000000..d55ec7a7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate a tree with a single leaf node containing unsorted keys
+// check that ft verify finds them
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey;
+ toku_fill_dbt(&thekey, key, keylen);
+ DBT theval;
+ toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int k, int v) {
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+}
+
+static void
+test_dup_in_leaf(int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ assert(r == 0 || (r == -1 && errno == ENOENT));
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // discard the old root block
+ FTNODE newroot = make_node(ft, 0);
+ populate_leaf(newroot, htonl(2), 1);
+ populate_leaf(newroot, htonl(1), 2);
+
+ // set the new root to point to the new tree
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_dup_in_leaf(do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
new file mode 100644
index 00000000..ff231001
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
@@ -0,0 +1,215 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// generate a tree with unsorted pivots and check that ft->verify finds them
+
+
+#include <ft-cachetable-wrappers.h>
+#include "test.h"
+
+static FTNODE
+make_node(FT_HANDLE ft, int height) {
+ FTNODE node = NULL;
+ int n_children = (height == 0) ? 1 : 0;
+ toku_create_new_ftnode(ft, &node, height, n_children);
+ if (n_children) BP_STATE(node,0) = PT_AVAIL;
+ return node;
+}
+
+static void
+append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) {
+ assert(leafnode->height == 0);
+
+ DBT thekey; toku_fill_dbt(&thekey, key, keylen);
+ DBT theval; toku_fill_dbt(&theval, val, vallen);
+
+ // get an index that we can use to create a new leaf entry
+ uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs();
+
+ // apply an insert to the leaf node
+ MSN msn = next_dummymsn();
+ ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids());
+ txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
+ toku_ft_bn_apply_msg_once(
+ BLB(leafnode, 0),
+ msg,
+ idx,
+ keylen,
+ NULL,
+ &gc_info,
+ NULL,
+ NULL,
+ NULL);
+
+ // don't forget to dirty the node
+ leafnode->set_dirty();
+}
+
+static void
+populate_leaf(FTNODE leafnode, int seq, int n, int *minkey, int *maxkey) {
+ for (int i = 0; i < n; i++) {
+ int k = htonl(seq + i);
+ int v = seq + i;
+ append_leaf(leafnode, &k, sizeof k, &v, sizeof v);
+ }
+ *minkey = htonl(seq);
+ *maxkey = htonl(seq + n - 1);
+}
+
+static FTNODE
+make_tree(FT_HANDLE ft, int height, int fanout, int nperleaf, int *seq, int *minkey, int *maxkey) {
+ FTNODE node;
+ if (height == 0) {
+ node = make_node(ft, 0);
+ populate_leaf(node, *seq, nperleaf, minkey, maxkey);
+ *seq += nperleaf;
+ } else {
+ node = make_node(ft, height);
+ int minkeys[fanout], maxkeys[fanout];
+ for (int childnum = 0; childnum < fanout; childnum++) {
+ FTNODE child = make_tree(ft, height-1, fanout, nperleaf, seq, &minkeys[childnum], &maxkeys[childnum]);
+ if (childnum == 0) {
+ toku_ft_nonleaf_append_child(node, child, NULL);
+ } else {
+ int k = minkeys[fanout - childnum - 1]; // use unsorted pivots
+ DBT pivotkey;
+ toku_ft_nonleaf_append_child(node, child, toku_fill_dbt(&pivotkey, &k, sizeof k));
+ }
+ toku_unpin_ftnode(ft->ft, child);
+ }
+ *minkey = minkeys[0];
+ *maxkey = maxkeys[0];
+ for (int i = 1; i < fanout; i++) {
+ if (memcmp(minkey, &minkeys[i], sizeof minkeys[i]) > 0)
+ *minkey = minkeys[i];
+ if (memcmp(maxkey, &maxkeys[i], sizeof maxkeys[i]) < 0)
+ *maxkey = maxkeys[i];
+ }
+ }
+ return node;
+}
+
+static UU() void
+deleted_row(UU() DB *db, UU() DBT *key, UU() DBT *val) {
+}
+
+static void
+test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
+ int r;
+
+ // cleanup
+ const char *fname = TOKU_TEST_FILENAME;
+ r = unlink(fname);
+ assert(r == 0 || (r == -1 && errno == ENOENT));
+
+ // create a cachetable
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr);
+
+ // create the ft
+ TOKUTXN null_txn = NULL;
+ FT_HANDLE ft = NULL;
+ r = toku_open_ft_handle(fname, 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun);
+ assert(r == 0);
+
+ // make a tree
+ int seq = 0, minkey, maxkey;
+ FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey);
+
+ // discard the old root block
+ toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum);
+
+ // unpin the new root
+ toku_unpin_ftnode(ft->ft, newroot);
+
+ if (do_verify) {
+ r = toku_verify_ft(ft);
+ assert(r != 0);
+ }
+
+ // flush to the file system
+ r = toku_close_ft_handle_nolsn(ft, 0);
+ assert(r == 0);
+
+ // shutdown the cachetable
+ toku_cachetable_close(&ct);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ int height = 1;
+ int fanout = 3;
+ int nperleaf = 8;
+ int do_verify = 1;
+ initialize_dummymsn();
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--height") == 0 && i+1 < argc) {
+ height = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--fanout") == 0 && i+1 < argc) {
+ fanout = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nperleaf") == 0 && i+1 < argc) {
+ nperleaf = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verify") == 0 && i+1 < argc) {
+ do_verify = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+ test_make_tree(height, fanout, nperleaf, do_verify);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/xid_lsn_independent.cc b/storage/tokudb/PerconaFT/ft/tests/xid_lsn_independent.cc
new file mode 100644
index 00000000..3ab3448b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/xid_lsn_independent.cc
@@ -0,0 +1,253 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "toku_os.h"
+#include "cachetable/checkpoint.h"
+
+#define ENVDIR TOKU_TEST_FILENAME
+#include "test-ft-txns.h"
+
+static void do_txn(TOKULOGGER logger, bool readonly) {
+ int r;
+ TOKUTXN txn;
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ CKERR(r);
+
+ if (!readonly) {
+ toku_maybe_log_begin_txn_for_write_operation(txn);
+ }
+ r = toku_txn_commit_txn(txn, false, NULL, NULL);
+ CKERR(r);
+
+ toku_txn_close_txn(txn);
+}
+
+static void test_xid_lsn_independent(int N) {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ int r;
+
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+
+ FT_HANDLE ft;
+
+ TOKUTXN txn;
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ CKERR(r);
+
+ r = toku_open_ft_handle("ftfile", 1, &ft, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun);
+ CKERR(r);
+
+ r = toku_txn_commit_txn(txn, false, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(txn);
+
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
+ CKERR(r);
+ TXNID xid_first = txn->txnid.parent_id64;
+ unsigned int rands[N];
+ for (int i=0; i<N; i++) {
+ char key[100],val[300];
+ DBT k, v;
+ rands[i] = random();
+ snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
+ memset(val, 'v', sizeof(val));
+ val[sizeof(val)-1]=0;
+ toku_ft_insert(ft, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn);
+ }
+ {
+ TOKUTXN txn2;
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE, false);
+ CKERR(r);
+ // Verify the txnid has gone up only by one (even though many log entries were done)
+ invariant(txn2->txnid.parent_id64 == xid_first + 1);
+ r = toku_txn_commit_txn(txn2, false, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(txn2);
+ }
+ r = toku_txn_commit_txn(txn, false, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(txn);
+ {
+ //TODO(yoni) #5067 will break this portion of the test. (End ids are also assigned, so it would increase by 4 instead of 2.)
+ // Verify the txnid has gone up only by two (even though many log entries were done)
+ TOKUTXN txn3;
+ r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE, false);
+ CKERR(r);
+ invariant(txn3->txnid.parent_id64 == xid_first + 2);
+ r = toku_txn_commit_txn(txn3, false, NULL, NULL);
+ CKERR(r);
+ toku_txn_close_txn(txn3);
+ }
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
+ r = toku_checkpoint(cp, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
+ CKERR(r);
+ r = toku_close_ft_handle_nolsn(ft, NULL);
+ CKERR(r);
+
+ clean_shutdown(&logger, &ct);
+}
+
+static TXNID
+logger_get_last_xid(TOKULOGGER logger) {
+ TXN_MANAGER mgr = toku_logger_get_txn_manager(logger);
+ return toku_txn_manager_get_last_xid(mgr);
+}
+
+static void test_xid_lsn_independent_crash_recovery(int N) {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ int r;
+
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+
+ for (int i=0; i < N - 1; i++) {
+ do_txn(logger, true);
+ }
+ do_txn(logger, false);
+
+ TXNID last_xid_before = logger_get_last_xid(logger);
+
+ toku_logger_close_rollback(logger);
+
+ toku_cachetable_close(&ct);
+ // "Crash"
+ r = toku_logger_close(&logger);
+ CKERR(r);
+ ct = NULL;
+ logger = NULL;
+
+ // "Recover"
+ test_setup_and_recover(TOKU_TEST_FILENAME, &logger, &ct);
+
+ TXNID last_xid_after = logger_get_last_xid(logger);
+
+ invariant(last_xid_after == last_xid_before);
+
+ shutdown_after_recovery(&logger, &ct);
+}
+
+static void test_xid_lsn_independent_shutdown_recovery(int N) {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+
+ for (int i=0; i < N - 1; i++) {
+ do_txn(logger, true);
+ }
+ do_txn(logger, false);
+
+ TXNID last_xid_before = logger_get_last_xid(logger);
+
+ clean_shutdown(&logger, &ct);
+
+ // Did a clean shutdown.
+
+ // "Recover"
+ test_setup_and_recover(TOKU_TEST_FILENAME, &logger, &ct);
+
+ TXNID last_xid_after = logger_get_last_xid(logger);
+
+ invariant(last_xid_after == last_xid_before);
+
+ shutdown_after_recovery(&logger, &ct);
+}
+
+static void test_xid_lsn_independent_parents(int N) {
+ TOKULOGGER logger;
+ CACHETABLE ct;
+ int r;
+
+ // Lets txns[-1] be NULL
+ TOKUTXN txns_hack[N+1];
+ TOKUTXN *txns=&txns_hack[1];
+
+ int num_non_cascade = N;
+ do {
+ test_setup(TOKU_TEST_FILENAME, &logger, &ct);
+ ZERO_ARRAY(txns_hack);
+
+ for (int i = 0; i < N; i++) {
+ r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE, false);
+ CKERR(r);
+
+ if (i < num_non_cascade) {
+ toku_maybe_log_begin_txn_for_write_operation(txns[i]);
+ invariant(txns[i]->begin_was_logged);
+ }
+ else {
+ invariant(!txns[i]->begin_was_logged);
+ }
+ }
+ for (int i = 0; i < N; i++) {
+ if (i < num_non_cascade) {
+ toku_maybe_log_begin_txn_for_write_operation(txns[i]);
+ invariant(txns[i]->begin_was_logged);
+ }
+ else {
+ invariant(!txns[i]->begin_was_logged);
+ }
+ }
+ toku_maybe_log_begin_txn_for_write_operation(txns[N-1]);
+ for (int i = 0; i < N; i++) {
+ invariant(txns[i]->begin_was_logged);
+ }
+ for (int i = N-1; i >= 0; i--) {
+ r = toku_txn_commit_txn(txns[i], false, NULL, NULL);
+ CKERR(r);
+
+ toku_txn_close_txn(txns[i]);
+ }
+ clean_shutdown(&logger, &ct);
+
+ num_non_cascade /= 2;
+ } while (num_non_cascade > 0);
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ for (int i=1; i<=128; i *= 2) {
+ test_xid_lsn_independent(i);
+ test_xid_lsn_independent_crash_recovery(i);
+ test_xid_lsn_independent_shutdown_recovery(i);
+ test_xid_lsn_independent_parents(i);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/tests/ybt-test.cc b/storage/tokudb/PerconaFT/ft/tests/ybt-test.cc
new file mode 100644
index 00000000..3d0e92fd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/tests/ybt-test.cc
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+
+static void
+cleanup_and_free(struct simple_dbt *v) {
+ if (v->data) toku_free(v->data);
+ v->data = NULL;
+ v->len = 0;
+}
+
+static void
+cleanup(struct simple_dbt *v) {
+ v->data = NULL;
+ v->len = 0;
+}
+
+static void ybt_test0 (void) {
+ struct simple_dbt v0 = {0,0}, v1 = {0,0};
+ DBT t0,t1;
+ toku_init_dbt(&t0);
+ toku_init_dbt(&t1);
+ {
+ const void *temp1 = "hello";
+ toku_dbt_set(6, temp1, &t0, &v0);
+ }
+ {
+ const void *temp2 = "foo";
+ toku_dbt_set( 4, temp2, &t1, &v1);
+ }
+ assert(t0.size==6);
+ assert(strcmp((char*)t0.data, "hello")==0);
+ assert(t1.size==4);
+ assert(strcmp((char*)t1.data, "foo")==0);
+
+ {
+ const void *temp3 = "byebye";
+ toku_dbt_set(7, temp3, &t1, &v0); /* Use v0, not v1 */
+ }
+ // This assertion would be wrong, since v0 may have been realloc'd, and t0.data may now point
+ // at the wrong place
+ //assert(strcmp(t0.data, "byebye")==0); /* t0's data should be changed too, since it used v0 */
+ assert(strcmp((char*)t1.data, "byebye")==0);
+
+ cleanup_and_free(&v0);
+ cleanup_and_free(&v1);
+
+
+ /* See if we can probe to find out how big something is by setting ulen=0 with YBT_USERMEM */
+ toku_init_dbt(&t0);
+ t0.flags = DB_DBT_USERMEM;
+ t0.ulen = 0;
+ {
+ const void *temp4 = "hello";
+ toku_dbt_set(6, temp4, &t0, 0);
+ }
+ assert(t0.data==0);
+ assert(t0.size==6);
+
+ /* Check realloc. */
+ toku_init_dbt(&t0);
+ t0.flags = DB_DBT_REALLOC;
+ cleanup(&v0);
+ {
+ const void *temp5 = "internationalization";
+ toku_dbt_set(21, temp5, &t0, &v0);
+ }
+ assert(v0.data==0); /* Didn't change v0 */
+ assert(t0.size==21);
+ assert(strcmp((char*)t0.data, "internationalization")==0);
+
+ {
+ const void *temp6 = "provincial";
+ toku_dbt_set(11, temp6, &t0, &v0);
+ }
+ assert(t0.size==11);
+ assert(strcmp((char*)t0.data, "provincial")==0);
+
+ toku_free(t0.data);
+
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ ybt_test0();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ft/txn/roll.cc b/storage/tokudb/PerconaFT/ft/txn/roll.cc
new file mode 100644
index 00000000..7228de06
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/roll.cc
@@ -0,0 +1,692 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* rollback and rollforward routines. */
+
+#include <memory>
+#include "ft/ft-ops.h"
+#include "ft/ft.h"
+#include "ft/log_header.h"
+#include "ft/logger/log-internal.h"
+#include "ft/txn/rollback-apply.h"
+#include "ft/txn/xids.h"
+
+// functionality provided by roll.c is exposed by an autogenerated
+// header file, logheader.h
+//
+// this (poorly) explains the absence of "roll.h"
+
+// these flags control whether or not we send commit messages for
+// various operations
+
+// When a transaction is committed, should we send a FT_COMMIT message
+// for each FT_INSERT message sent earlier by the transaction?
+#define TOKU_DO_COMMIT_CMD_INSERT 0
+
+// When a transaction is committed, should we send a FT_COMMIT message
+// for each FT_DELETE_ANY message sent earlier by the transaction?
+#define TOKU_DO_COMMIT_CMD_DELETE 1
+
+// When a transaction is committed, should we send a FT_COMMIT message
+// for each FT_UPDATE message sent earlier by the transaction?
+#define TOKU_DO_COMMIT_CMD_UPDATE 0
+
+int
+toku_commit_fdelete (FILENUM filenum,
+ TOKUTXN txn,
+ LSN UU(oplsn)) //oplsn is the lsn of the commit
+{
+ int r;
+ CACHEFILE cf;
+ CACHETABLE ct = txn->logger->ct;
+
+ // Try to get the cachefile for this filenum. A missing file on recovery
+ // is not an error, but a missing file outside of recovery is.
+ r = toku_cachefile_of_filenum(ct, filenum, &cf);
+ if (r == ENOENT) {
+ assert(txn->for_recovery);
+ r = 0;
+ goto done;
+ }
+ assert_zero(r);
+
+ // bug fix for #4718
+ // bug was introduced in with fix for #3590
+ // Before Maxwell (and fix for #3590),
+ // the recovery log was fsynced after the xcommit was loged but
+ // before we processed rollback entries and before we released
+ // the row locks (in the lock tree). Due to performance concerns,
+ // the fsync was moved to after the release of row locks, which comes
+ // after processing rollback entries. As a result, we may be unlinking a file
+ // here as part of a transactoin that may abort if we do not fsync the log.
+ // So, we fsync the log here.
+ if (txn->logger) {
+ toku_logger_fsync_if_lsn_not_fsynced(txn->logger, txn->do_fsync_lsn);
+ }
+
+ // Mark the cachefile as unlink on close. There are two ways for close
+ // to be eventually called on the cachefile:
+ //
+ // - when this txn completes, it will release a reference on the
+ // ft and close it, UNLESS it was pinned by checkpoint
+ // - if the cf was pinned by checkpoint, an unpin will release the
+ // final reference and call close. it must be the final reference
+ // since this txn has exclusive access to dictionary (by the
+ // directory row lock for its dname) and we would not get this
+ // far if there were other live handles.
+ toku_cachefile_unlink_on_close(cf);
+done:
+ return r;
+}
+
+int
+toku_rollback_fdelete (FILENUM UU(filenum),
+ TOKUTXN UU(txn),
+ LSN UU(oplsn)) //oplsn is the lsn of the abort
+{
+ //Rolling back an fdelete is an no-op.
+ return 0;
+}
+
+int
+toku_commit_fcreate (FILENUM UU(filenum),
+ BYTESTRING UU(bs_fname),
+ TOKUTXN UU(txn),
+ LSN UU(oplsn))
+{
+ return 0;
+}
+
+int
+toku_rollback_fcreate (FILENUM filenum,
+ BYTESTRING UU(bs_fname),
+ TOKUTXN txn,
+ LSN UU(oplsn))
+{
+ int r;
+ CACHEFILE cf;
+ CACHETABLE ct = txn->logger->ct;
+
+ // Try to get the cachefile for this filenum. A missing file on recovery
+ // is not an error, but a missing file outside of recovery is.
+ r = toku_cachefile_of_filenum(ct, filenum, &cf);
+ if (r == ENOENT) {
+ r = 0;
+ goto done;
+ }
+ assert_zero(r);
+
+ // Mark the cachefile as unlink on close. There are two ways for close
+ // to be eventually called on the cachefile:
+ //
+ // - when this txn completes, it will release a reference on the
+ // ft and close it, UNLESS it was pinned by checkpoint
+ // - if the cf was pinned by checkpoint, an unpin will release the
+ // final reference and call close. it must be the final reference
+ // since this txn has exclusive access to dictionary (by the
+ // directory row lock for its dname) and we would not get this
+ // far if there were other live handles.
+ toku_cachefile_unlink_on_close(cf);
+ toku_cachefile_skip_log_recover_on_close(cf);
+done:
+ return 0;
+}
+
+int toku_commit_frename(BYTESTRING /* old_name */,
+ BYTESTRING /* new_iname */,
+ TOKUTXN /* txn */,
+ LSN UU(oplsn)) {
+ return 0;
+}
+
+int toku_rollback_frename(BYTESTRING old_iname,
+ BYTESTRING new_iname,
+ TOKUTXN txn,
+ LSN UU(oplsn)) {
+ assert(txn);
+ assert(txn->logger);
+ assert(txn->logger->ct);
+
+ CACHETABLE cachetable = txn->logger->ct;
+
+ toku_struct_stat stat;
+ bool old_exist = true;
+ bool new_exist = true;
+
+ std::unique_ptr<char[], decltype(&toku_free)> old_iname_full(
+ toku_cachetable_get_fname_in_cwd(cachetable, old_iname.data),
+ &toku_free);
+ std::unique_ptr<char[], decltype(&toku_free)> new_iname_full(
+ toku_cachetable_get_fname_in_cwd(cachetable, new_iname.data),
+ &toku_free);
+
+ if (toku_stat(old_iname_full.get(), &stat, toku_uninstrumented) == -1) {
+ if (ENOENT == errno)
+ old_exist = false;
+ else
+ return 1;
+ }
+
+ if (toku_stat(new_iname_full.get(), &stat, toku_uninstrumented) == -1) {
+ if (ENOENT == errno || ENAMETOOLONG == errno)
+ new_exist = false;
+ else
+ return 1;
+ }
+
+ // Both old and new files can exist if:
+ // - rename() is not completed
+ // - fcreate was replayed during recovery
+ // 'Stalled cachefiles' container cachefile_list::m_stale_fileid contains
+ // closed but not yet evicted cachefiles and the key of this container is
+ // fs-dependent file id - (device id, inode number) pair. To preserve the
+ // new cachefile
+ // file's id and keep it in 'stalled cachefiles' container the old file is
+ // removed
+ // and the new file is renamed.
+ if (old_exist && new_exist &&
+ (toku_os_delete(old_iname_full.get()) == -1 ||
+ toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1))
+ return 1;
+
+ if (!old_exist && new_exist &&
+ (!toku_create_subdirs_if_needed(old_iname_full.get()) ||
+ toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
+ toku_fsync_directory(new_iname_full.get()) == -1 ||
+ toku_fsync_directory(old_iname_full.get()) == -1))
+ return 1;
+
+ // it's ok if both files do not exist on recovery
+ if (!old_exist && !new_exist)
+ assert(txn->for_recovery);
+
+ CACHEFILE cf;
+ int r = toku_cachefile_of_iname_in_env(cachetable, new_iname.data, &cf);
+ if (r != ENOENT) {
+ char *old_fname_in_cf = toku_cachefile_fname_in_env(cf);
+ toku_cachefile_set_fname_in_env(cf, toku_xstrdup(old_iname.data));
+ toku_free(old_fname_in_cf);
+ // There is at least one case when fclose logging cause error:
+ // 1) start transaction
+ // 2) create ft 'a'(write "fcreate" in recovery log)
+ // 3) rename ft 'a' to 'b'(write "frename" in recovery log)
+ // 4) abort transaction:
+ // a) rollback rename ft (renames 'b' to 'a')
+ // b) rollback create ft (removes 'a'):
+ // invokes toku_cachefile_unlink_on_close - lazy unlink on file
+ // close,
+ // it just sets corresponding flag in cachefile object
+ // c) write "unlink" for 'a' in recovery log
+ // (when transaction is aborted all locks are released,
+ // when file lock is released the file is closed and unlinked if
+ // corresponding flag is set in cachefile object)
+ // 5) crash
+ //
+ // After this we have the following records in recovery log:
+ // - create ft 'a',
+ // - rename 'a' to 'b',
+ // - unlink 'a'
+ //
+ // On recovery:
+ // - create 'a'
+ // - rename 'a' to 'b'
+ // - unlink 'a' - as 'a' file does not exist we have crash on assert
+ // here
+ //
+ // There is no need to write "unlink" in recovery log in (4a) because
+ // 'a' will be removed
+ // on transaction rollback on recovery.
+ toku_cachefile_skip_log_recover_on_close(cf);
+ }
+
+ return 0;
+}
+
+int find_ft_from_filenum (const FT &ft, const FILENUM &filenum);
+int find_ft_from_filenum (const FT &ft, const FILENUM &filenum) {
+ FILENUM thisfnum = toku_cachefile_filenum(ft->cf);
+ if (thisfnum.fileid<filenum.fileid) return -1;
+ if (thisfnum.fileid>filenum.fileid) return +1;
+ return 0;
+}
+
+// Input arg reset_root_xid_that_created true means that this operation has changed the definition of this dictionary.
+// (Example use is for schema change committed with txn that inserted cmdupdatebroadcast message.)
+// The oplsn argument is ZERO_LSN for normal operation. When this function is called for recovery, it has the LSN of
+// the operation (insert, delete, update, etc).
+static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, BYTESTRING *data, TOKUTXN txn, LSN oplsn,
+ bool reset_root_xid_that_created) {
+ int r = 0;
+ //printf("%s:%d committing insert %s %s\n", __FILE__, __LINE__, key.data, data.data);
+ FT ft = nullptr;
+ r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &ft, NULL);
+ if (r == DB_NOTFOUND) {
+ assert(txn->for_recovery);
+ r = 0;
+ goto done;
+ }
+ assert(r==0);
+
+ if (oplsn.lsn != 0) { // if we are executing the recovery algorithm
+ LSN treelsn = toku_ft_checkpoint_lsn(ft);
+ if (oplsn.lsn <= treelsn.lsn) { // if operation was already applied to tree ...
+ r = 0; // ... do not apply it again.
+ goto done;
+ }
+ }
+
+ DBT key_dbt,data_dbt;
+ XIDS xids;
+ xids = toku_txn_get_xids(txn);
+ {
+ const DBT *kdbt = key.len > 0 ? toku_fill_dbt(&key_dbt, key.data, key.len) :
+ toku_init_dbt(&key_dbt);
+ const DBT *vdbt = data ? toku_fill_dbt(&data_dbt, data->data, data->len) :
+ toku_init_dbt(&data_dbt);
+ ft_msg msg(kdbt, vdbt, type, ZERO_MSN, xids);
+
+ TXN_MANAGER txn_manager = toku_logger_get_txn_manager(txn->logger);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ // no messages above us, we can implicitly promote uxrs based on this xid
+ oldest_referenced_xid_estimate,
+ !txn->for_recovery);
+ toku_ft_root_put_msg(ft, msg, &gc_info);
+ if (reset_root_xid_that_created) {
+ TXNID new_root_xid_that_created = toku_xids_get_outermost_xid(xids);
+ toku_reset_root_xid_that_created(ft, new_root_xid_that_created);
+ }
+ }
+done:
+ return r;
+}
+
+
+static int do_nothing_with_filenum(TOKUTXN UU(txn), FILENUM UU(filenum)) {
+ return 0;
+}
+
+
+int toku_commit_cmdinsert (FILENUM filenum, BYTESTRING UU(key), TOKUTXN txn, LSN UU(oplsn)) {
+#if TOKU_DO_COMMIT_CMD_INSERT
+ return do_insertion (FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, false);
+#else
+ return do_nothing_with_filenum(txn, filenum);
+#endif
+}
+
+int
+toku_rollback_cmdinsert (FILENUM filenum,
+ BYTESTRING key,
+ TOKUTXN txn,
+ LSN oplsn)
+{
+ return do_insertion (FT_ABORT_ANY, filenum, key, 0, txn, oplsn, false);
+}
+
+int
+toku_commit_cmdupdate(FILENUM filenum,
+ BYTESTRING UU(key),
+ TOKUTXN txn,
+ LSN UU(oplsn))
+{
+#if TOKU_DO_COMMIT_CMD_UPDATE
+ return do_insertion(FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, false);
+#else
+ return do_nothing_with_filenum(txn, filenum);
+#endif
+}
+
+int
+toku_rollback_cmdupdate(FILENUM filenum,
+ BYTESTRING key,
+ TOKUTXN txn,
+ LSN oplsn)
+{
+ return do_insertion(FT_ABORT_ANY, filenum, key, 0, txn, oplsn, false);
+}
+
+int
+toku_commit_cmdupdatebroadcast(FILENUM filenum,
+ bool is_resetting_op,
+ TOKUTXN txn,
+ LSN oplsn)
+{
+ // if is_resetting_op, reset root_xid_that_created in
+ // relevant ft.
+ bool reset_root_xid_that_created = (is_resetting_op ? true : false);
+ const enum ft_msg_type msg_type = (is_resetting_op
+ ? FT_COMMIT_BROADCAST_ALL
+ : FT_COMMIT_BROADCAST_TXN);
+ BYTESTRING nullkey = { 0, NULL };
+ return do_insertion(msg_type, filenum, nullkey, 0, txn, oplsn, reset_root_xid_that_created);
+}
+
+int
+toku_rollback_cmdupdatebroadcast(FILENUM filenum,
+ bool UU(is_resetting_op),
+ TOKUTXN txn,
+ LSN oplsn)
+{
+ BYTESTRING nullkey = { 0, NULL };
+ return do_insertion(FT_ABORT_BROADCAST_TXN, filenum, nullkey, 0, txn, oplsn, false);
+}
+
+int
+toku_commit_cmddelete (FILENUM filenum,
+ BYTESTRING key,
+ TOKUTXN txn,
+ LSN oplsn)
+{
+#if TOKU_DO_COMMIT_CMD_DELETE
+ return do_insertion (FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, false);
+#else
+ key = key; oplsn = oplsn;
+ return do_nothing_with_filenum(txn, filenum);
+#endif
+}
+
+int
+toku_rollback_cmddelete (FILENUM filenum,
+ BYTESTRING key,
+ TOKUTXN txn,
+ LSN oplsn)
+{
+ return do_insertion (FT_ABORT_ANY, filenum, key, 0, txn, oplsn, false);
+}
+
+static int
+toku_apply_rollinclude (TXNID_PAIR xid,
+ uint64_t num_nodes,
+ BLOCKNUM spilled_head,
+ BLOCKNUM spilled_tail,
+ TOKUTXN txn,
+ LSN oplsn,
+ apply_rollback_item func) {
+ int r = 0;
+ struct roll_entry *item;
+
+ BLOCKNUM next_log = spilled_tail;
+ uint64_t last_sequence = num_nodes;
+
+ bool found_head = false;
+ assert(next_log.b != ROLLBACK_NONE.b);
+ while (next_log.b != ROLLBACK_NONE.b) {
+ //pin log
+ ROLLBACK_LOG_NODE log;
+ toku_get_and_pin_rollback_log(txn, next_log, &log);
+ toku_rollback_verify_contents(log, xid, last_sequence - 1);
+ last_sequence = log->sequence;
+
+ toku_maybe_prefetch_previous_rollback_log(txn, log);
+
+ while ((item=log->newest_logentry)) {
+ log->newest_logentry = item->prev;
+ r = func(txn, item, oplsn);
+ if (r!=0) return r;
+ }
+ if (next_log.b == spilled_head.b) {
+ assert(!found_head);
+ found_head = true;
+ assert(log->sequence == 0);
+ }
+ next_log = log->previous;
+ {
+ //Clean up transaction structure to prevent
+ //toku_txn_close from double-freeing
+ spilled_tail = next_log;
+ if (found_head) {
+ assert(next_log.b == ROLLBACK_NONE.b);
+ spilled_head = next_log;
+ }
+ }
+ toku_rollback_log_unpin_and_remove(txn, log);
+ }
+ return r;
+}
+
+int
+toku_commit_rollinclude (TXNID_PAIR xid,
+ uint64_t num_nodes,
+ BLOCKNUM spilled_head,
+ BLOCKNUM spilled_tail,
+ TOKUTXN txn,
+ LSN oplsn) {
+ int r;
+ r = toku_apply_rollinclude(xid, num_nodes,
+ spilled_head,
+ spilled_tail,
+ txn, oplsn,
+ toku_commit_rollback_item);
+ return r;
+}
+
+int
+toku_rollback_rollinclude (TXNID_PAIR xid,
+ uint64_t num_nodes,
+ BLOCKNUM spilled_head,
+ BLOCKNUM spilled_tail,
+ TOKUTXN txn,
+ LSN oplsn) {
+ int r;
+ r = toku_apply_rollinclude(xid, num_nodes,
+ spilled_head,
+ spilled_tail,
+ txn, oplsn,
+ toku_abort_rollback_item);
+ return r;
+}
+
+int
+toku_commit_load (FILENUM old_filenum,
+ BYTESTRING UU(new_iname),
+ TOKUTXN txn,
+ LSN UU(oplsn))
+{
+ int r;
+ CACHEFILE old_cf;
+ CACHETABLE ct = txn->logger->ct;
+
+ // To commit a dictionary load, we delete the old file
+ //
+ // Try to get the cachefile for the old filenum. A missing file on recovery
+ // is not an error, but a missing file outside of recovery is.
+ r = toku_cachefile_of_filenum(ct, old_filenum, &old_cf);
+ if (r == ENOENT) {
+ invariant(txn->for_recovery);
+ r = 0;
+ goto done;
+ }
+ lazy_assert(r == 0);
+
+ // bug fix for #4718
+ // bug was introduced in with fix for #3590
+ // Before Maxwell (and fix for #3590),
+ // the recovery log was fsynced after the xcommit was loged but
+ // before we processed rollback entries and before we released
+ // the row locks (in the lock tree). Due to performance concerns,
+ // the fsync was moved to after the release of row locks, which comes
+ // after processing rollback entries. As a result, we may be unlinking a file
+ // here as part of a transactoin that may abort if we do not fsync the log.
+ // So, we fsync the log here.
+ if (txn->logger) {
+ toku_logger_fsync_if_lsn_not_fsynced(txn->logger, txn->do_fsync_lsn);
+ }
+
+ // TODO: Zardosht
+ // Explain why this condition is valid, because I forget.
+ if (!toku_cachefile_is_unlink_on_close(old_cf)) {
+ toku_cachefile_unlink_on_close(old_cf);
+ }
+done:
+ return r;
+}
+
+int
+toku_rollback_load (FILENUM UU(old_filenum),
+ BYTESTRING new_iname,
+ TOKUTXN txn,
+ LSN UU(oplsn))
+{
+ int r;
+ CACHEFILE new_cf;
+ CACHETABLE ct = txn->logger->ct;
+
+ // To rollback a dictionary load, we delete the new file.
+ // Try to get the cachefile for the new fname.
+ char *fname_in_env = fixup_fname(&new_iname);
+ r = toku_cachefile_of_iname_in_env(ct, fname_in_env, &new_cf);
+ if (r == ENOENT) {
+ // It's possible the new iname was never created, so just try to
+ // unlink it if it's there and ignore the error if it's not.
+ char *fname_in_cwd = toku_cachetable_get_fname_in_cwd(ct, fname_in_env);
+ r = unlink(fname_in_cwd);
+ assert(r == 0 || get_error_errno() == ENOENT);
+ toku_free(fname_in_cwd);
+ r = 0;
+ } else {
+ assert_zero(r);
+ toku_cachefile_unlink_on_close(new_cf);
+ }
+ toku_free(fname_in_env);
+ return r;
+}
+
+//2954
+int
+toku_commit_hot_index (FILENUMS UU(hot_index_filenums),
+ TOKUTXN UU(txn),
+ LSN UU(oplsn))
+{
+ // nothing
+ return 0;
+}
+
+int
+toku_rollback_hot_index (FILENUMS UU(hot_index_filenums),
+ TOKUTXN UU(txn),
+ LSN UU(oplsn))
+{
+ return 0;
+}
+
+int
+toku_commit_dictionary_redirect (FILENUM UU(old_filenum),
+ FILENUM UU(new_filenum),
+ TOKUTXN UU(txn),
+ LSN UU(oplsn)) //oplsn is the lsn of the commit
+{
+ //Redirect only has meaning during normal operation (NOT during recovery).
+ if (!txn->for_recovery) {
+ //NO-OP
+ }
+ return 0;
+}
+
+int
+toku_rollback_dictionary_redirect (FILENUM old_filenum,
+ FILENUM new_filenum,
+ TOKUTXN txn,
+ LSN UU(oplsn)) //oplsn is the lsn of the abort
+{
+ int r = 0;
+ //Redirect only has meaning during normal operation (NOT during recovery).
+ if (!txn->for_recovery) {
+ CACHEFILE new_cf = NULL;
+ r = toku_cachefile_of_filenum(txn->logger->ct, new_filenum, &new_cf);
+ assert(r == 0);
+ FT CAST_FROM_VOIDP(new_ft, toku_cachefile_get_userdata(new_cf));
+
+ CACHEFILE old_cf = NULL;
+ r = toku_cachefile_of_filenum(txn->logger->ct, old_filenum, &old_cf);
+ assert(r == 0);
+ FT CAST_FROM_VOIDP(old_ft, toku_cachefile_get_userdata(old_cf));
+
+ //Redirect back from new to old.
+ r = toku_dictionary_redirect_abort(old_ft, new_ft, txn);
+ assert(r==0);
+ }
+ return r;
+}
+
+int
+toku_commit_change_fdescriptor(FILENUM filenum,
+ BYTESTRING UU(old_descriptor),
+ TOKUTXN txn,
+ LSN UU(oplsn))
+{
+ return do_nothing_with_filenum(txn, filenum);
+}
+
+int
+toku_rollback_change_fdescriptor(FILENUM filenum,
+ BYTESTRING old_descriptor,
+ TOKUTXN txn,
+ LSN UU(oplsn))
+{
+ CACHEFILE cf;
+ int r;
+ r = toku_cachefile_of_filenum(txn->logger->ct, filenum, &cf);
+ if (r == ENOENT) { //Missing file on recovered transaction is not an error
+ assert(txn->for_recovery);
+ r = 0;
+ goto done;
+ }
+ // file must be open, because the txn that created it opened it and
+ // noted it,
+ assert(r == 0);
+
+ FT ft;
+ ft = NULL;
+ r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &ft, NULL);
+ assert(r == 0);
+
+ DESCRIPTOR_S d;
+ toku_fill_dbt(&d.dbt, old_descriptor.data, old_descriptor.len);
+ toku_ft_update_descriptor(ft, &d);
+done:
+ return r;
+}
+
+
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc b/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc
new file mode 100644
index 00000000..0f19c445
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc
@@ -0,0 +1,258 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/logger/log-internal.h"
+#include "ft/txn/rollback-apply.h"
+
+static void poll_txn_progress_function(TOKUTXN txn, uint8_t is_commit, uint8_t stall_for_checkpoint) {
+ if (txn->progress_poll_fun) {
+ TOKU_TXN_PROGRESS_S progress = {
+ .entries_total = txn->roll_info.num_rollentries,
+ .entries_processed = txn->roll_info.num_rollentries_processed,
+ .is_commit = is_commit,
+ .stalled_on_checkpoint = stall_for_checkpoint};
+ txn->progress_poll_fun(&progress, txn->progress_poll_fun_extra);
+ }
+}
+
+int toku_commit_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn) {
+ int r=0;
+ rolltype_dispatch_assign(item, toku_commit_, r, txn, lsn);
+ txn->roll_info.num_rollentries_processed++;
+ if (txn->roll_info.num_rollentries_processed % 1024 == 0) {
+ poll_txn_progress_function(txn, true, false);
+ }
+ return r;
+}
+
+int toku_abort_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn) {
+ int r=0;
+ rolltype_dispatch_assign(item, toku_rollback_, r, txn, lsn);
+ txn->roll_info.num_rollentries_processed++;
+ if (txn->roll_info.num_rollentries_processed % 1024 == 0) {
+ poll_txn_progress_function(txn, false, false);
+ }
+ return r;
+}
+
+int note_ft_used_in_txns_parent(const FT &ft, uint32_t UU(index), TOKUTXN const child);
+int note_ft_used_in_txns_parent(const FT &ft, uint32_t UU(index), TOKUTXN const child) {
+ TOKUTXN parent = child->parent;
+ toku_txn_maybe_note_ft(parent, ft);
+ return 0;
+}
+
+static int apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) {
+ int r = 0;
+ // do the commit/abort calls and free everything
+ // we do the commit/abort calls in reverse order too.
+ struct roll_entry *item;
+ //printf("%s:%d abort\n", __FILE__, __LINE__);
+
+ BLOCKNUM next_log = ROLLBACK_NONE;
+
+ bool is_current = false;
+ if (txn_has_current_rollback_log(txn)) {
+ next_log = txn->roll_info.current_rollback;
+ is_current = true;
+ }
+ else if (txn_has_spilled_rollback_logs(txn)) {
+ next_log = txn->roll_info.spilled_rollback_tail;
+ }
+
+ uint64_t last_sequence = txn->roll_info.num_rollback_nodes;
+ bool found_head = false;
+ while (next_log.b != ROLLBACK_NONE.b) {
+ ROLLBACK_LOG_NODE log;
+ //pin log
+ toku_get_and_pin_rollback_log(txn, next_log, &log);
+ toku_rollback_verify_contents(log, txn->txnid, last_sequence - 1);
+
+ toku_maybe_prefetch_previous_rollback_log(txn, log);
+
+ last_sequence = log->sequence;
+ if (func) {
+ while ((item=log->newest_logentry)) {
+ log->newest_logentry = item->prev;
+ r = func(txn, item, lsn);
+ if (r!=0) return r;
+ }
+ }
+ if (next_log.b == txn->roll_info.spilled_rollback_head.b) {
+ assert(!found_head);
+ found_head = true;
+ assert(log->sequence == 0);
+ }
+ next_log = log->previous;
+ {
+ //Clean up transaction structure to prevent
+ //toku_txn_close from double-freeing
+ if (is_current) {
+ txn->roll_info.current_rollback = ROLLBACK_NONE;
+ is_current = false;
+ }
+ else {
+ txn->roll_info.spilled_rollback_tail = next_log;
+ }
+ if (found_head) {
+ assert(next_log.b == ROLLBACK_NONE.b);
+ txn->roll_info.spilled_rollback_head = next_log;
+ }
+ }
+ bool give_back = false;
+ // each txn tries to give back at most one rollback log node
+ // to the cache.
+ if (next_log.b == ROLLBACK_NONE.b) {
+ give_back = txn->logger->rollback_cache.give_rollback_log_node(
+ txn,
+ log
+ );
+ }
+ if (!give_back) {
+ toku_rollback_log_unpin_and_remove(txn, log);
+ }
+ }
+ return r;
+}
+
+//Commit each entry in the rollback log.
+//If the transaction has a parent, it just promotes its information to its parent.
+int toku_rollback_commit(TOKUTXN txn, LSN lsn) {
+ int r=0;
+ if (txn->parent!=0) {
+ // First we must put a rollinclude entry into the parent if we spilled
+
+ if (txn_has_spilled_rollback_logs(txn)) {
+ uint64_t num_nodes = txn->roll_info.num_rollback_nodes;
+ if (txn_has_current_rollback_log(txn)) {
+ num_nodes--; //Don't count the in-progress rollback log.
+ }
+ toku_logger_save_rollback_rollinclude(txn->parent, txn->txnid, num_nodes,
+ txn->roll_info.spilled_rollback_head,
+ txn->roll_info.spilled_rollback_tail);
+ //Remove ownership from child.
+ txn->roll_info.spilled_rollback_head = ROLLBACK_NONE;
+ txn->roll_info.spilled_rollback_tail = ROLLBACK_NONE;
+ }
+ // if we're committing a child rollback, put its entries into the parent
+ // by pinning both child and parent and then linking the child log entry
+ // list to the end of the parent log entry list.
+ if (txn_has_current_rollback_log(txn)) {
+ //Pin parent log
+ toku_txn_lock(txn->parent);
+ ROLLBACK_LOG_NODE parent_log;
+ toku_get_and_pin_rollback_log_for_new_entry(txn->parent, &parent_log);
+
+ //Pin child log
+ ROLLBACK_LOG_NODE child_log;
+ toku_get_and_pin_rollback_log(txn, txn->roll_info.current_rollback, &child_log);
+ toku_rollback_verify_contents(child_log, txn->txnid, txn->roll_info.num_rollback_nodes - 1);
+
+ // Append the list to the front of the parent.
+ if (child_log->oldest_logentry) {
+ // There are some entries, so link them in.
+ parent_log->dirty = true;
+ child_log->oldest_logentry->prev = parent_log->newest_logentry;
+ if (!parent_log->oldest_logentry) {
+ parent_log->oldest_logentry = child_log->oldest_logentry;
+ }
+ parent_log->newest_logentry = child_log->newest_logentry;
+ parent_log->rollentry_resident_bytecount += child_log->rollentry_resident_bytecount;
+ txn->parent->roll_info.rollentry_raw_count += txn->roll_info.rollentry_raw_count;
+ child_log->rollentry_resident_bytecount = 0;
+ }
+ if (parent_log->oldest_logentry==NULL) {
+ parent_log->oldest_logentry = child_log->oldest_logentry;
+ }
+ child_log->newest_logentry = child_log->oldest_logentry = 0;
+ // Put all the memarena data into the parent.
+ if (child_log->rollentry_arena.total_size_in_use() > 0) {
+ // If there are no bytes to move, then just leave things alone, and let the memory be reclaimed on txn is closed.
+ child_log->rollentry_arena.move_memory(&parent_log->rollentry_arena);
+ }
+ // each txn tries to give back at most one rollback log node
+ // to the cache. All other rollback log nodes for this child
+ // transaction are included in the parent's rollback log,
+ // so this is the only node we can give back to the cache
+ bool give_back = txn->logger->rollback_cache.give_rollback_log_node(
+ txn,
+ child_log
+ );
+ if (!give_back) {
+ toku_rollback_log_unpin_and_remove(txn, child_log);
+ }
+ txn->roll_info.current_rollback = ROLLBACK_NONE;
+
+ toku_maybe_spill_rollbacks(txn->parent, parent_log);
+ toku_rollback_log_unpin(txn->parent, parent_log);
+ assert(r == 0);
+ toku_txn_unlock(txn->parent);
+ }
+
+ // Note the open FTs, the omts must be merged
+ r = txn->open_fts.iterate<struct tokutxn, note_ft_used_in_txns_parent>(txn);
+ assert(r==0);
+
+ //If this transaction needs an fsync (if it commits)
+ //save that in the parent. Since the commit really happens in the root txn.
+ toku_txn_lock(txn->parent);
+ txn->parent->force_fsync_on_commit |= txn->force_fsync_on_commit;
+ txn->parent->roll_info.num_rollentries += txn->roll_info.num_rollentries;
+ toku_txn_unlock(txn->parent);
+ } else {
+ r = apply_txn(txn, lsn, toku_commit_rollback_item);
+ assert(r==0);
+ }
+
+ return r;
+}
+
+int toku_rollback_abort(TOKUTXN txn, LSN lsn) {
+ int r;
+ r = apply_txn(txn, lsn, toku_abort_rollback_item);
+ assert(r==0);
+ return r;
+}
+
+int toku_rollback_discard(TOKUTXN txn) {
+ txn->roll_info.current_rollback = ROLLBACK_NONE;
+ txn->roll_info.spilled_rollback_head = ROLLBACK_NONE;
+ txn->roll_info.spilled_rollback_tail = ROLLBACK_NONE;
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback-apply.h b/storage/tokudb/PerconaFT/ft/txn/rollback-apply.h
new file mode 100644
index 00000000..bf87cd29
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback-apply.h
@@ -0,0 +1,47 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+typedef int(*apply_rollback_item)(TOKUTXN txn, struct roll_entry *item, LSN lsn);
+int toku_commit_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn);
+int toku_abort_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn);
+
+int toku_rollback_commit(TOKUTXN txn, LSN lsn);
+int toku_rollback_abort(TOKUTXN txn, LSN lsn);
+int toku_rollback_discard(TOKUTXN txn);
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.cc b/storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.cc
new file mode 100644
index 00000000..08d7c887
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.cc
@@ -0,0 +1,257 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "portability/memory.h"
+#include "portability/toku_portability.h"
+
+#include "ft/serialize/block_table.h"
+#include "ft/ft-internal.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/txn/rollback.h"
+#include "ft/txn/rollback-ct-callbacks.h"
+
+#include "util/memarena.h"
+
+// Address used as a sentinel. Otherwise unused.
+static struct serialized_rollback_log_node cloned_rollback;
+
+// Cleanup the rollback memory
+static void
+rollback_log_destroy(ROLLBACK_LOG_NODE log) {
+ make_rollback_log_empty(log);
+ toku_free(log);
+}
+
+// flush an ununused log to disk, by allocating a size 0 blocknum in
+// the blocktable
+static void toku_rollback_flush_unused_log(ROLLBACK_LOG_NODE log,
+ BLOCKNUM logname,
+ int fd,
+ FT ft,
+ bool write_me,
+ bool keep_me,
+ bool for_checkpoint,
+ bool is_clone) {
+ if (write_me) {
+ DISKOFF offset;
+ ft->blocktable.realloc_on_disk(
+ logname, 0, &offset, ft, fd, for_checkpoint);
+ }
+ if (!keep_me && !is_clone) {
+ toku_free(log);
+ }
+}
+
+// flush a used log to disk by serializing and writing the node out
+static void
+toku_rollback_flush_used_log (
+ ROLLBACK_LOG_NODE log,
+ SERIALIZED_ROLLBACK_LOG_NODE serialized,
+ int fd,
+ FT ft,
+ bool write_me,
+ bool keep_me,
+ bool for_checkpoint,
+ bool is_clone
+ )
+{
+
+ if (write_me) {
+ int r = toku_serialize_rollback_log_to(fd, log, serialized, is_clone, ft, for_checkpoint);
+ assert(r == 0);
+ }
+ if (!keep_me) {
+ if (is_clone) {
+ toku_serialized_rollback_log_destroy(serialized);
+ }
+ else {
+ rollback_log_destroy(log);
+ }
+ }
+}
+
+// Write something out. Keep trying even if partial writes occur.
+// On error: Return negative with errno set.
+// On success return nbytes.
+void toku_rollback_flush_callback (
+ CACHEFILE UU(cachefile),
+ int fd,
+ BLOCKNUM logname,
+ void *rollback_v,
+ void** UU(disk_data),
+ void *extraargs,
+ PAIR_ATTR size,
+ PAIR_ATTR* new_size,
+ bool write_me,
+ bool keep_me,
+ bool for_checkpoint,
+ bool is_clone
+ )
+{
+ ROLLBACK_LOG_NODE log = nullptr;
+ SERIALIZED_ROLLBACK_LOG_NODE serialized = nullptr;
+ bool is_unused = false;
+ if (is_clone) {
+ is_unused = (rollback_v == &cloned_rollback);
+ CAST_FROM_VOIDP(serialized, rollback_v);
+ }
+ else {
+ CAST_FROM_VOIDP(log, rollback_v);
+ is_unused = rollback_log_is_unused(log);
+ }
+ *new_size = size;
+ FT ft;
+ CAST_FROM_VOIDP(ft, extraargs);
+ if (is_unused) {
+ toku_rollback_flush_unused_log(
+ log,
+ logname,
+ fd,
+ ft,
+ write_me,
+ keep_me,
+ for_checkpoint,
+ is_clone
+ );
+ }
+ else {
+ toku_rollback_flush_used_log(
+ log,
+ serialized,
+ fd,
+ ft,
+ write_me,
+ keep_me,
+ for_checkpoint,
+ is_clone
+ );
+ }
+}
+
+int toku_rollback_fetch_callback (CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM logname, uint32_t fullhash UU(),
+ void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs) {
+ int r;
+ FT CAST_FROM_VOIDP(h, extraargs);
+ assert(h->cf == cachefile);
+ ROLLBACK_LOG_NODE *result = (ROLLBACK_LOG_NODE*)rollback_pv;
+ r = toku_deserialize_rollback_log_from(fd, logname, result, h);
+ if (r==0) {
+ (*result)->ct_pair = p;
+ *sizep = rollback_memory_size(*result);
+ }
+ return r;
+}
+
+void toku_rollback_pe_est_callback(
+ void* rollback_v,
+ void* UU(disk_data),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ )
+{
+ assert(rollback_v != NULL);
+ *bytes_freed_estimate = 0;
+ *cost = PE_CHEAP;
+}
+
+// callback for partially evicting a cachetable entry
+int toku_rollback_pe_callback (
+ void *rollback_v,
+ PAIR_ATTR old_attr,
+ void* UU(extraargs),
+ void (*finalize)(PAIR_ATTR new_attr, void * extra),
+ void *finalize_extra
+ )
+{
+ assert(rollback_v != NULL);
+ finalize(old_attr, finalize_extra);
+ return 0;
+}
+
+// partial fetch is never required for a rollback log node
+bool toku_rollback_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return false;
+}
+
+// a rollback node should never be partial fetched,
+// because we always say it is not required.
+// (pf req callback always returns false)
+int toku_rollback_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
+ assert(false);
+ return 0;
+}
+
+// the cleaner thread should never choose a rollback node for cleaning
+int toku_rollback_cleaner_callback (
+ void* UU(ftnode_pv),
+ BLOCKNUM UU(blocknum),
+ uint32_t UU(fullhash),
+ void* UU(extraargs)
+ )
+{
+ assert(false);
+ return 0;
+}
+
+void toku_rollback_clone_callback(
+ void* value_data,
+ void** cloned_value_data,
+ long* clone_size,
+ PAIR_ATTR* new_attr,
+ bool UU(for_checkpoint),
+ void* UU(write_extraargs)
+ )
+{
+ ROLLBACK_LOG_NODE CAST_FROM_VOIDP(log, value_data);
+ SERIALIZED_ROLLBACK_LOG_NODE serialized = nullptr;
+ if (!rollback_log_is_unused(log)) {
+ XMALLOC(serialized);
+ toku_serialize_rollback_log_to_memory_uncompressed(log, serialized);
+ *cloned_value_data = serialized;
+ *clone_size = sizeof(struct serialized_rollback_log_node) + serialized->len;
+ }
+ else {
+ *cloned_value_data = &cloned_rollback;
+ *clone_size = sizeof(cloned_rollback);
+ }
+ // clear the dirty bit, because the node has been cloned
+ log->dirty = 0;
+ new_attr->is_valid = false;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.h b/storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.h
new file mode 100644
index 00000000..5fedb0e5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback-ct-callbacks.h
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/cachetable/cachetable.h"
+
+void toku_rollback_flush_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool UU(is_clone));
+int toku_rollback_fetch_callback(CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM logname, uint32_t fullhash, void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs);
+void toku_rollback_pe_est_callback(
+ void* rollback_v,
+ void* UU(disk_data),
+ long* bytes_freed_estimate,
+ enum partial_eviction_cost *cost,
+ void* UU(write_extraargs)
+ );
+int toku_rollback_pe_callback (
+ void *rollback_v,
+ PAIR_ATTR old_attr,
+ void* UU(extraargs),
+ void (*finalize)(PAIR_ATTR new_attr, void * extra),
+ void *finalize_extra
+ );
+bool toku_rollback_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) ;
+int toku_rollback_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep));
+void toku_rollback_clone_callback(void* value_data, void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
+
+int toku_rollback_cleaner_callback (
+ void* UU(ftnode_pv),
+ BLOCKNUM UU(blocknum),
+ uint32_t UU(fullhash),
+ void* UU(extraargs)
+ );
+
+static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT ft) {
+ CACHETABLE_WRITE_CALLBACK wc;
+ wc.flush_callback = toku_rollback_flush_callback;
+ wc.pe_est_callback = toku_rollback_pe_est_callback;
+ wc.pe_callback = toku_rollback_pe_callback;
+ wc.cleaner_callback = toku_rollback_cleaner_callback;
+ wc.clone_callback = toku_rollback_clone_callback;
+ wc.checkpoint_complete_callback = nullptr;
+ wc.write_extraargs = ft;
+ return wc;
+}
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback.cc b/storage/tokudb/PerconaFT/ft/txn/rollback.cc
new file mode 100644
index 00000000..105f980d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback.cc
@@ -0,0 +1,334 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_stdint.h>
+
+#include "ft/serialize/block_table.h"
+#include "ft/ft.h"
+#include "ft/logger/log-internal.h"
+#include "ft/txn/rollback-ct-callbacks.h"
+
+extern int writing_rollback;
+
+static void rollback_unpin_remove_callback(CACHEKEY* cachekey, bool for_checkpoint, void* extra) {
+ FT CAST_FROM_VOIDP(ft, extra);
+ ft->blocktable.free_blocknum(cachekey, ft, for_checkpoint);
+}
+
+void toku_rollback_log_unpin_and_remove(TOKUTXN txn, ROLLBACK_LOG_NODE log) {
+ int r;
+ CACHEFILE cf = txn->logger->rollback_cachefile;
+ FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf));
+ r = toku_cachetable_unpin_and_remove (cf, log->ct_pair, rollback_unpin_remove_callback, ft);
+ assert(r == 0);
+}
+
+int
+toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind) {
+ if (xid<xidfind) return -1;
+ if (xid>xidfind) return +1;
+ return 0;
+}
+
+// TODO: fix this name
+// toku_rollback_malloc
+void *toku_malloc_in_rollback(ROLLBACK_LOG_NODE log, size_t size) {
+ return log->rollentry_arena.malloc_from_arena(size);
+}
+
+// TODO: fix this name
+// toku_rollback_memdup
+void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len) {
+ void *r = toku_malloc_in_rollback(log, len);
+ memcpy(r, v, len);
+ return r;
+}
+
+static inline PAIR_ATTR make_rollback_pair_attr(long size) {
+ PAIR_ATTR result={
+ .size = size,
+ .nonleaf_size = 0,
+ .leaf_size = 0,
+ .rollback_size = size,
+ .cache_pressure_size = 0,
+ .is_valid = true
+ };
+ return result;
+}
+
+PAIR_ATTR
+rollback_memory_size(ROLLBACK_LOG_NODE log) {
+ size_t size = sizeof(*log);
+ size += log->rollentry_arena.total_footprint();
+ return make_rollback_pair_attr(size);
+}
+
+static void toku_rollback_node_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p) {
+ ROLLBACK_LOG_NODE CAST_FROM_VOIDP(log, value_data);
+ log->ct_pair = p;
+}
+
+//
+// initializes an empty rollback log node
+// Does not touch the blocknum, that is the
+// responsibility of the caller
+//
+void rollback_empty_log_init(ROLLBACK_LOG_NODE log) {
+ // Having a txnid set to TXNID_NONE is how we determine if the
+ // rollback log node is empty or in use.
+ log->txnid.parent_id64 = TXNID_NONE;
+ log->txnid.child_id64 = TXNID_NONE;
+
+ log->layout_version = FT_LAYOUT_VERSION;
+ log->layout_version_original = FT_LAYOUT_VERSION;
+ log->layout_version_read_from_disk = FT_LAYOUT_VERSION;
+ log->dirty = true;
+ log->sequence = 0;
+ log->previous = make_blocknum(0);
+ log->oldest_logentry = NULL;
+ log->newest_logentry = NULL;
+ log->rollentry_arena.create(0);
+ log->rollentry_resident_bytecount = 0;
+}
+
+static void rollback_initialize_for_txn(
+ ROLLBACK_LOG_NODE log,
+ TOKUTXN txn,
+ BLOCKNUM previous
+ )
+{
+ log->txnid = txn->txnid;
+ log->sequence = txn->roll_info.num_rollback_nodes++;
+ log->previous = previous;
+ log->oldest_logentry = NULL;
+ log->newest_logentry = NULL;
+ log->rollentry_arena.create(1024);
+ log->rollentry_resident_bytecount = 0;
+ log->dirty = true;
+}
+
+// TODO: fix this name
+void make_rollback_log_empty(ROLLBACK_LOG_NODE log) {
+ log->rollentry_arena.destroy();
+ rollback_empty_log_init(log);
+}
+
+// create and pin a new rollback log node. chain it to the other rollback nodes
+// by providing a previous blocknum and assigning the new rollback log
+// node the next sequence number
+static void rollback_log_create (
+ TOKUTXN txn,
+ BLOCKNUM previous,
+ ROLLBACK_LOG_NODE *result
+ )
+{
+ writing_rollback++;
+ ROLLBACK_LOG_NODE XMALLOC(log);
+ rollback_empty_log_init(log);
+
+ CACHEFILE cf = txn->logger->rollback_cachefile;
+ FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf));
+ rollback_initialize_for_txn(log, txn, previous);
+ ft->blocktable.allocate_blocknum(&log->blocknum, ft);
+ const uint32_t hash = toku_cachetable_hash(ft->cf, log->blocknum);
+ *result = log;
+ toku_cachetable_put(cf, log->blocknum, hash,
+ log, rollback_memory_size(log),
+ get_write_callbacks_for_rollback_log(ft),
+ toku_rollback_node_save_ct_pair);
+ txn->roll_info.current_rollback = log->blocknum;
+ writing_rollback --;
+}
+
+void toku_rollback_log_unpin(TOKUTXN txn, ROLLBACK_LOG_NODE log) {
+ int r;
+ CACHEFILE cf = txn->logger->rollback_cachefile;
+ r = toku_cachetable_unpin(
+ cf,
+ log->ct_pair,
+ (enum cachetable_dirty)log->dirty,
+ rollback_memory_size(log)
+ );
+ assert(r == 0);
+}
+
+//Requires: log is pinned
+// log is current
+//After:
+// Maybe there is no current after (if it spilled)
+void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log) {
+ if (log->rollentry_resident_bytecount > txn->logger->write_block_size) {
+ assert(log->blocknum.b == txn->roll_info.current_rollback.b);
+ //spill
+ if (!txn_has_spilled_rollback_logs(txn)) {
+ //First spilled. Copy to head.
+ txn->roll_info.spilled_rollback_head = txn->roll_info.current_rollback;
+ }
+ //Unconditionally copy to tail. Old tail does not need to be cached anymore.
+ txn->roll_info.spilled_rollback_tail = txn->roll_info.current_rollback;
+
+ txn->roll_info.current_rollback = ROLLBACK_NONE;
+ }
+}
+
+int find_filenum (const FT &h, const FT &hfind);
+int find_filenum (const FT &h, const FT &hfind) {
+ FILENUM fnum = toku_cachefile_filenum(h->cf);
+ FILENUM fnumfind = toku_cachefile_filenum(hfind->cf);
+ if (fnum.fileid<fnumfind.fileid) return -1;
+ if (fnum.fileid>fnumfind.fileid) return +1;
+ return 0;
+}
+
+//Notify a transaction that it has touched an ft.
+void toku_txn_maybe_note_ft (TOKUTXN txn, FT ft) {
+ toku_txn_lock(txn);
+ FT ftv;
+ uint32_t idx;
+ int r = txn->open_fts.find_zero<FT, find_filenum>(ft, &ftv, &idx);
+ if (r == 0) {
+ // already there
+ assert(ftv == ft);
+ goto exit;
+ }
+ r = txn->open_fts.insert_at(ft, idx);
+ assert_zero(r);
+ // TODO(leif): if there's anything that locks the reflock and then
+ // the txn lock, this may deadlock, because it grabs the reflock.
+ toku_ft_add_txn_ref(ft);
+exit:
+ toku_txn_unlock(txn);
+}
+
+// Return the number of bytes that went into the rollback data structure (the uncompressed count if there is compression)
+int toku_logger_txn_rollback_stats(TOKUTXN txn, struct txn_stat *txn_stat)
+{
+ toku_txn_lock(txn);
+ txn_stat->rollback_raw_count = txn->roll_info.rollentry_raw_count;
+ txn_stat->rollback_num_entries = txn->roll_info.num_rollentries;
+ toku_txn_unlock(txn);
+ return 0;
+}
+
+void toku_maybe_prefetch_previous_rollback_log(TOKUTXN txn, ROLLBACK_LOG_NODE log) {
+ //Currently processing 'log'. Prefetch the next (previous) log node.
+
+ BLOCKNUM name = log->previous;
+ int r = 0;
+ if (name.b != ROLLBACK_NONE.b) {
+ CACHEFILE cf = txn->logger->rollback_cachefile;
+ uint32_t hash = toku_cachetable_hash(cf, name);
+ FT CAST_FROM_VOIDP(h, toku_cachefile_get_userdata(cf));
+ bool doing_prefetch = false;
+ r = toku_cachefile_prefetch(cf, name, hash,
+ get_write_callbacks_for_rollback_log(h),
+ toku_rollback_fetch_callback,
+ toku_rollback_pf_req_callback,
+ toku_rollback_pf_callback,
+ h,
+ &doing_prefetch);
+ assert(r == 0);
+ }
+}
+
+void toku_rollback_verify_contents(ROLLBACK_LOG_NODE log,
+ TXNID_PAIR txnid, uint64_t sequence)
+{
+ assert(log->txnid.parent_id64 == txnid.parent_id64);
+ assert(log->txnid.child_id64 == txnid.child_id64);
+ assert(log->sequence == sequence);
+}
+
+void toku_get_and_pin_rollback_log(TOKUTXN txn, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *log) {
+ void * value;
+ CACHEFILE cf = txn->logger->rollback_cachefile;
+ FT CAST_FROM_VOIDP(h, toku_cachefile_get_userdata(cf));
+ uint32_t hash = toku_cachetable_hash(cf, blocknum);
+ int r = toku_cachetable_get_and_pin_with_dep_pairs(cf, blocknum, hash,
+ &value,
+ get_write_callbacks_for_rollback_log(h),
+ toku_rollback_fetch_callback,
+ toku_rollback_pf_req_callback,
+ toku_rollback_pf_callback,
+ PL_WRITE_CHEAP, // lock_type
+ h,
+ 0, NULL, NULL
+ );
+ assert(r == 0);
+ ROLLBACK_LOG_NODE CAST_FROM_VOIDP(pinned_log, value);
+ assert(pinned_log->blocknum.b == blocknum.b);
+ *log = pinned_log;
+}
+
+void toku_get_and_pin_rollback_log_for_new_entry (TOKUTXN txn, ROLLBACK_LOG_NODE *log) {
+ ROLLBACK_LOG_NODE pinned_log = NULL;
+ invariant(txn->state == TOKUTXN_LIVE || txn->state == TOKUTXN_PREPARING); // hot indexing may call this function for prepared transactions
+ if (txn_has_current_rollback_log(txn)) {
+ toku_get_and_pin_rollback_log(txn, txn->roll_info.current_rollback, &pinned_log);
+ toku_rollback_verify_contents(pinned_log, txn->txnid, txn->roll_info.num_rollback_nodes - 1);
+ } else {
+ // For each transaction, we try to acquire the first rollback log
+ // from the rollback log node cache, so that we avoid
+ // putting something new into the cachetable. However,
+ // if transaction has spilled rollbacks, that means we
+ // have already done a lot of work for this transaction,
+ // and subsequent rollback log nodes are created
+ // and put into the cachetable. The idea is for
+ // transactions that don't do a lot of work to (hopefully)
+ // get a rollback log node from a cache, as opposed to
+ // taking the more expensive route of creating a new one.
+ if (!txn_has_spilled_rollback_logs(txn)) {
+ txn->logger->rollback_cache.get_rollback_log_node(txn, &pinned_log);
+ if (pinned_log != NULL) {
+ rollback_initialize_for_txn(
+ pinned_log,
+ txn,
+ txn->roll_info.spilled_rollback_tail
+ );
+ txn->roll_info.current_rollback = pinned_log->blocknum;
+ }
+ }
+ if (pinned_log == NULL) {
+ rollback_log_create(txn, txn->roll_info.spilled_rollback_tail, &pinned_log);
+ }
+ }
+ assert(pinned_log->txnid.parent_id64 == txn->txnid.parent_id64);
+ assert(pinned_log->txnid.child_id64 == txn->txnid.child_id64);
+ assert(pinned_log->blocknum.b != ROLLBACK_NONE.b);
+ *log = pinned_log;
+}
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback.h b/storage/tokudb/PerconaFT/ft/txn/rollback.h
new file mode 100644
index 00000000..359f2317
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback.h
@@ -0,0 +1,145 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/cachetable/cachetable.h"
+#include "ft/serialize/sub_block.h"
+#include "ft/txn/txn.h"
+
+#include "util/memarena.h"
+
+typedef struct rollback_log_node *ROLLBACK_LOG_NODE;
+typedef struct serialized_rollback_log_node *SERIALIZED_ROLLBACK_LOG_NODE;
+
+void toku_poll_txn_progress_function(TOKUTXN txn, uint8_t is_commit, uint8_t stall_for_checkpoint);
+
+// these functions assert internally that they succeed
+
+// get a rollback node this txn may use for a new entry. if there
+// is a current rollback node to use, pin it, otherwise create one.
+void toku_get_and_pin_rollback_log_for_new_entry(TOKUTXN txn, ROLLBACK_LOG_NODE *log);
+
+// get a specific rollback by blocknum
+void toku_get_and_pin_rollback_log(TOKUTXN txn, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *log);
+
+// unpin a rollback node from the cachetable
+void toku_rollback_log_unpin(TOKUTXN txn, ROLLBACK_LOG_NODE log);
+
+// assert that the given log's txnid and sequence match the ones given
+void toku_rollback_verify_contents(ROLLBACK_LOG_NODE log, TXNID_PAIR txnid, uint64_t sequence);
+
+// if there is a previous rollback log for the given log node, prefetch it
+void toku_maybe_prefetch_previous_rollback_log(TOKUTXN txn, ROLLBACK_LOG_NODE log);
+
+// unpin and rmove a rollback log from the cachetable
+void toku_rollback_log_unpin_and_remove(TOKUTXN txn, ROLLBACK_LOG_NODE log);
+
+void *toku_malloc_in_rollback(ROLLBACK_LOG_NODE log, size_t size);
+void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len);
+
+// given a transaction and a log node, and if the log is too full,
+// set the current rollback log to ROLLBACK_NONE and move the current
+// node onto the tail of the rollback node chain. further insertions
+// into the rollback log for this transaction will force the creation
+// of a new rollback log.
+//
+// this never unpins the rollback log if a spill occurs. the caller
+// is responsible for ensuring the given rollback node is unpinned
+// if necessary.
+void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log);
+
+void toku_txn_maybe_note_ft (TOKUTXN txn, struct ft *ft);
+int toku_logger_txn_rollback_stats(TOKUTXN txn, struct txn_stat *txn_stat);
+
+int toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind);
+
+PAIR_ATTR rollback_memory_size(ROLLBACK_LOG_NODE log);
+
+// A high-level rollback log is made up of a chain of rollback log nodes.
+// Each rollback log node is represented (separately) in the cachetable by
+// this structure. Each portion of the rollback log chain has a block num
+// and a hash to identify it.
+struct rollback_log_node {
+ int layout_version;
+ int layout_version_original;
+ int layout_version_read_from_disk;
+ uint32_t build_id; // build_id (svn rev number) of software that wrote this node to disk
+ int dirty;
+ // to which transaction does this node belong?
+ TXNID_PAIR txnid;
+ // sequentially, where in the rollback log chain is this node?
+ // the sequence is between 0 and totalnodes-1
+ uint64_t sequence;
+ BLOCKNUM blocknum; // on which block does this node live?
+ // which block number is the previous in the chain of rollback nodes
+ // that make up this rollback log?
+ BLOCKNUM previous;
+ struct roll_entry *oldest_logentry;
+ struct roll_entry *newest_logentry;
+ memarena rollentry_arena;
+ size_t rollentry_resident_bytecount; // How many bytes for the rollentries that are stored in main memory.
+ PAIR ct_pair;
+};
+
+struct serialized_rollback_log_node {
+ char *data;
+ uint32_t len;
+ int n_sub_blocks;
+ BLOCKNUM blocknum;
+ struct sub_block sub_block[max_sub_blocks];
+};
+typedef struct serialized_rollback_log_node *SERIALIZED_ROLLBACK_LOG_NODE;
+
+static inline void
+toku_static_serialized_rollback_log_destroy(SERIALIZED_ROLLBACK_LOG_NODE log) {
+ toku_free(log->data);
+}
+
+static inline void
+toku_serialized_rollback_log_destroy(SERIALIZED_ROLLBACK_LOG_NODE log) {
+ toku_static_serialized_rollback_log_destroy(log);
+ toku_free(log);
+}
+
+void rollback_empty_log_init(ROLLBACK_LOG_NODE log);
+void make_rollback_log_empty(ROLLBACK_LOG_NODE log);
+
+static inline bool rollback_log_is_unused(ROLLBACK_LOG_NODE log) {
+ return (log->txnid.parent_id64 == TXNID_NONE);
+}
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.cc b/storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.cc
new file mode 100644
index 00000000..5e1ab746
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.cc
@@ -0,0 +1,109 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <memory.h>
+#include <portability/toku_portability.h>
+
+#include "txn/rollback_log_node_cache.h"
+
+toku_instr_key* rollback_log_node_cache_mutex_key;
+
+void rollback_log_node_cache::init(uint32_t max_num_avail_nodes) {
+ XMALLOC_N(max_num_avail_nodes, m_avail_blocknums);
+ m_max_num_avail = max_num_avail_nodes;
+ m_first = 0;
+ m_num_avail = 0;
+ toku_pthread_mutexattr_t attr;
+ toku_mutexattr_init(&attr);
+ toku_mutexattr_settype(&attr, TOKU_MUTEX_ADAPTIVE);
+ toku_mutex_init(*rollback_log_node_cache_mutex_key, &m_mutex, &attr);
+ toku_mutexattr_destroy(&attr);
+}
+
+void rollback_log_node_cache::destroy() {
+ toku_mutex_destroy(&m_mutex);
+ toku_free(m_avail_blocknums);
+}
+
+// returns true if rollback log node was successfully added,
+// false otherwise
+bool rollback_log_node_cache::give_rollback_log_node(TOKUTXN txn, ROLLBACK_LOG_NODE log){
+ bool retval = false;
+ toku_mutex_lock(&m_mutex);
+ if (m_num_avail < m_max_num_avail) {
+ retval = true;
+ uint32_t index = m_first + m_num_avail;
+ if (index >= m_max_num_avail) {
+ index -= m_max_num_avail;
+ }
+ m_avail_blocknums[index].b = log->blocknum.b;
+ m_num_avail++;
+ }
+ toku_mutex_unlock(&m_mutex);
+ //
+ // now unpin the rollback log node
+ //
+ if (retval) {
+ make_rollback_log_empty(log);
+ toku_rollback_log_unpin(txn, log);
+ }
+ return retval;
+}
+
+// if a rollback log node is available, will set log to it,
+// otherwise, will set log to NULL and caller is on his own
+// for getting a rollback log node
+void rollback_log_node_cache::get_rollback_log_node(TOKUTXN txn, ROLLBACK_LOG_NODE* log){
+ BLOCKNUM b = ROLLBACK_NONE;
+ toku_mutex_lock(&m_mutex);
+ if (m_num_avail > 0) {
+ b.b = m_avail_blocknums[m_first].b;
+ m_num_avail--;
+ if (++m_first >= m_max_num_avail) {
+ m_first = 0;
+ }
+ }
+ toku_mutex_unlock(&m_mutex);
+ if (b.b != ROLLBACK_NONE.b) {
+ toku_get_and_pin_rollback_log(txn, b, log);
+ invariant(rollback_log_is_unused(*log));
+ } else {
+ *log = NULL;
+ }
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.h b/storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.h
new file mode 100644
index 00000000..c7f1b9a2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/rollback_log_node_cache.h
@@ -0,0 +1,63 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/txn/rollback.h"
+
+class rollback_log_node_cache {
+public:
+ void init (uint32_t max_num_avail_nodes);
+ void destroy();
+ // returns true if rollback log node was successfully added,
+ // false otherwise
+ bool give_rollback_log_node(TOKUTXN txn, ROLLBACK_LOG_NODE log);
+ // if a rollback log node is available, will set log to it,
+ // otherwise, will set log to NULL and caller is on his own
+ // for getting a rollback log node
+ void get_rollback_log_node(TOKUTXN txn, ROLLBACK_LOG_NODE* log);
+
+private:
+ BLOCKNUM* m_avail_blocknums;
+ uint32_t m_first;
+ uint32_t m_num_avail;
+ uint32_t m_max_num_avail;
+ toku_mutex_t m_mutex;
+};
+
+ENSURE_POD(rollback_log_node_cache);
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn.cc b/storage/tokudb/PerconaFT/ft/txn/txn.cc
new file mode 100644
index 00000000..7152833d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn.cc
@@ -0,0 +1,754 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/cachetable/checkpoint.h"
+#include "ft/ft.h"
+#include "ft/logger/log-internal.h"
+#include "ft/ule.h"
+#include "ft/txn/rollback-apply.h"
+#include "ft/txn/txn.h"
+#include "ft/txn/txn_manager.h"
+#include "util/status.h"
+
+toku_instr_key *txn_lock_mutex_key;
+toku_instr_key *txn_state_lock_mutex_key;
+toku_instr_key *result_state_cond_key;
+
+void toku_txn_get_status(TXN_STATUS s) {
+ txn_status.init();
+ *s = txn_status;
+}
+
+void
+toku_txn_lock(TOKUTXN txn)
+{
+ toku_mutex_lock(&txn->txn_lock);
+}
+
+void
+toku_txn_unlock(TOKUTXN txn)
+{
+ toku_mutex_unlock(&txn->txn_lock);
+}
+
+uint64_t
+toku_txn_get_root_id(TOKUTXN txn)
+{
+ return txn->txnid.parent_id64;
+}
+
+bool txn_declared_read_only(TOKUTXN txn) {
+ return txn->declared_read_only;
+}
+
+int
+toku_txn_begin_txn (
+ DB_TXN *container_db_txn,
+ TOKUTXN parent_tokutxn,
+ TOKUTXN *tokutxn,
+ TOKULOGGER logger,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ bool read_only
+ )
+{
+ int r = toku_txn_begin_with_xid(
+ parent_tokutxn,
+ tokutxn,
+ logger,
+ TXNID_PAIR_NONE,
+ snapshot_type,
+ container_db_txn,
+ false, // for_recovery
+ read_only
+ );
+ return r;
+}
+
+
+static void
+txn_create_xids(TOKUTXN txn, TOKUTXN parent) {
+ XIDS xids;
+ XIDS parent_xids;
+ if (parent == NULL) {
+ parent_xids = toku_xids_get_root_xids();
+ } else {
+ parent_xids = parent->xids;
+ }
+ toku_xids_create_unknown_child(parent_xids, &xids);
+ TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64;
+ toku_xids_finalize_with_child(xids, finalized_xid);
+ txn->xids = xids;
+}
+
+// Allocate and initialize a txn
+static void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, bool for_checkpoint, bool read_only);
+
+int
+toku_txn_begin_with_xid (
+ TOKUTXN parent,
+ TOKUTXN *txnp,
+ TOKULOGGER logger,
+ TXNID_PAIR xid,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ DB_TXN *container_db_txn,
+ bool for_recovery,
+ bool read_only
+ )
+{
+ int r = 0;
+ TOKUTXN txn;
+ // check for case where we are trying to
+ // create too many nested transactions
+ if (!read_only && parent && !toku_xids_can_create_child(parent->xids)) {
+ r = EINVAL;
+ goto exit;
+ }
+ if (read_only && parent) {
+ invariant(txn_declared_read_only(parent));
+ }
+ toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, for_recovery, read_only);
+ // txnid64, snapshot_txnid64
+ // will be set in here.
+ if (for_recovery) {
+ if (parent == NULL) {
+ invariant(xid.child_id64 == TXNID_NONE);
+ toku_txn_manager_start_txn_for_recovery(
+ txn,
+ logger->txn_manager,
+ xid.parent_id64
+ );
+ }
+ else {
+ parent->child_manager->start_child_txn_for_recovery(txn, parent, xid);
+ }
+ }
+ else {
+ assert(xid.parent_id64 == TXNID_NONE);
+ assert(xid.child_id64 == TXNID_NONE);
+ if (parent == NULL) {
+ toku_txn_manager_start_txn(
+ txn,
+ logger->txn_manager,
+ snapshot_type,
+ read_only
+ );
+ }
+ else {
+ parent->child_manager->start_child_txn(txn, parent);
+ toku_txn_manager_handle_snapshot_create_for_child_txn(
+ txn,
+ logger->txn_manager,
+ snapshot_type
+ );
+ }
+ }
+ if (!read_only) {
+ // this call will set txn->xids
+ txn_create_xids(txn, parent);
+ }
+ toku_unsafe_set(txnp, txn);
+exit:
+ return r;
+}
+
+DB_TXN *
+toku_txn_get_container_db_txn (TOKUTXN tokutxn) {
+ DB_TXN * container = tokutxn->container_db_txn;
+ return container;
+}
+
+void toku_txn_set_container_db_txn (TOKUTXN tokutxn, DB_TXN*container) {
+ tokutxn->container_db_txn = container;
+}
+
+static void invalidate_xa_xid (TOKU_XA_XID *xid) {
+ TOKU_ANNOTATE_NEW_MEMORY(xid, sizeof(*xid)); // consider it to be all invalid for valgrind
+ xid->formatID = -1; // According to the XA spec, -1 means "invalid data"
+}
+
+static void toku_txn_create_txn (
+ TOKUTXN *tokutxn,
+ TOKUTXN parent_tokutxn,
+ TOKULOGGER logger,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ DB_TXN *container_db_txn,
+ bool for_recovery,
+ bool read_only
+ )
+{
+ assert(logger->rollback_cachefile);
+
+ omt<FT> open_fts;
+ open_fts.create_no_array();
+
+ struct txn_roll_info roll_info = {
+ .num_rollback_nodes = 0,
+ .num_rollentries = 0,
+ .num_rollentries_processed = 0,
+ .rollentry_raw_count = 0,
+ .spilled_rollback_head = ROLLBACK_NONE,
+ .spilled_rollback_tail = ROLLBACK_NONE,
+ .current_rollback = ROLLBACK_NONE,
+ };
+
+static txn_child_manager tcm;
+
+struct tokutxn new_txn = {
+ .txnid = {.parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE },
+ .snapshot_txnid64 = TXNID_NONE,
+ .snapshot_type = for_recovery ? TXN_SNAPSHOT_NONE : snapshot_type,
+ .for_recovery = for_recovery,
+ .logger = logger,
+ .parent = parent_tokutxn,
+ .child = NULL,
+ .child_manager_s = tcm,
+ .child_manager = NULL,
+ .container_db_txn = container_db_txn,
+ .live_root_txn_list = nullptr,
+ .xids = NULL,
+ .snapshot_next = NULL,
+ .snapshot_prev = NULL,
+ .begin_was_logged = false,
+ .declared_read_only = read_only,
+ .do_fsync = false,
+ .force_fsync_on_commit = false,
+ .do_fsync_lsn = ZERO_LSN,
+ .xa_xid = {0, 0, 0, ""},
+ .progress_poll_fun = NULL,
+ .progress_poll_fun_extra = NULL,
+
+ // You cannot initialize txn_lock a TOKU_MUTEX_INITIALIZER, because we
+ // will initialize it in the code below, and it cannot already
+ // be initialized at that point. Also, in general, you don't
+ // get to use PTHREAD_MUTEX_INITALIZER (which is what is inside
+ // TOKU_MUTEX_INITIALIZER) except in static variables, and this
+ // is initializing an auto variable.
+ //
+ // And we cannot simply avoid initializing these fields
+ // because, although it avoids -Wmissing-field-initializer
+ // errors under gcc, it gets other errors about non-trivial
+ // designated initializers not being supported.
+
+ .txn_lock = ZERO_MUTEX_INITIALIZER, // Not TOKU_MUTEX_INITIALIZER
+ .open_fts = open_fts,
+ .roll_info = roll_info,
+ .state_lock = ZERO_MUTEX_INITIALIZER, // Not TOKU_MUTEX_INITIALIZER
+ .state_cond = ZERO_COND_INITIALIZER, // Not TOKU_COND_INITIALIZER
+ .state = TOKUTXN_LIVE,
+ .num_pin = 0,
+ .client_id = 0,
+ .client_extra = nullptr,
+ .start_time = time(NULL),
+};
+
+TOKUTXN result = NULL;
+XMEMDUP(result, &new_txn);
+invalidate_xa_xid(&result->xa_xid);
+if (parent_tokutxn == NULL) {
+ result->child_manager = &result->child_manager_s;
+ result->child_manager->init(result);
+ }
+ else {
+ result->child_manager = parent_tokutxn->child_manager;
+ }
+
+ toku_mutex_init(*txn_lock_mutex_key, &result->txn_lock, nullptr);
+
+ toku_pthread_mutexattr_t attr;
+ toku_mutexattr_init(&attr);
+ toku_mutexattr_settype(&attr, TOKU_MUTEX_ADAPTIVE);
+ toku_mutex_init(*txn_state_lock_mutex_key, &result->state_lock, &attr);
+ toku_mutexattr_destroy(&attr);
+
+ toku_cond_init(*result_state_cond_key, &result->state_cond, nullptr);
+
+ *tokutxn = result;
+
+ if (read_only) {
+ TXN_STATUS_INC(TXN_READ_BEGIN, 1);
+ }
+ else {
+ TXN_STATUS_INC(TXN_BEGIN, 1);
+ }
+}
+
+void
+toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid)
+{
+ // these should not have been set yet
+ invariant(txn->txnid.parent_id64 == TXNID_NONE);
+ invariant(txn->txnid.child_id64 == TXNID_NONE);
+ txn->txnid.parent_id64 = xid;
+ txn->txnid.child_id64 = TXNID_NONE;
+}
+
+//Used on recovery to recover a transaction.
+int
+toku_txn_load_txninfo (TOKUTXN txn, struct txninfo *info) {
+ txn->roll_info.rollentry_raw_count = info->rollentry_raw_count;
+ uint32_t i;
+ for (i = 0; i < info->num_fts; i++) {
+ FT ft = info->open_fts[i];
+ toku_txn_maybe_note_ft(txn, ft);
+ }
+ txn->force_fsync_on_commit = info->force_fsync_on_commit;
+ txn->roll_info.num_rollback_nodes = info->num_rollback_nodes;
+ txn->roll_info.num_rollentries = info->num_rollentries;
+
+ txn->roll_info.spilled_rollback_head = info->spilled_rollback_head;
+ txn->roll_info.spilled_rollback_tail = info->spilled_rollback_tail;
+ txn->roll_info.current_rollback = info->current_rollback;
+ return 0;
+}
+
+int toku_txn_commit_txn(TOKUTXN txn, int nosync,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra)
+// Effect: Doesn't close the txn, just performs the commit operations.
+// If release_multi_operation_client_lock is true, then unlock that lock (even if an error path is taken)
+{
+ return toku_txn_commit_with_lsn(txn, nosync, ZERO_LSN,
+ poll, poll_extra);
+}
+
+struct xcommit_info {
+ int r;
+ TOKUTXN txn;
+};
+
+static void txn_note_commit(TOKUTXN txn) {
+ // Purpose:
+ // Delay until any indexer is done pinning this transaction.
+ // Update status of a transaction from live->committing (or prepared->committing)
+ // Do so in a thread-safe manner that does not conflict with hot indexing or
+ // begin checkpoint.
+ if (toku_txn_is_read_only(txn)) {
+ // Neither hot indexing nor checkpoint do any work with readonly txns,
+ // so we can skip taking the txn_manager lock here.
+ invariant(txn->state==TOKUTXN_LIVE);
+ txn->state = TOKUTXN_COMMITTING;
+ goto done;
+ }
+ if (txn->state==TOKUTXN_PREPARING) {
+ invalidate_xa_xid(&txn->xa_xid);
+ }
+ // for hot indexing, if hot index is processing
+ // this transaction in some leafentry, then we cannot change
+ // the state to commit or abort until
+ // hot index is done with that leafentry
+ toku_txn_lock_state(txn);
+ while (txn->num_pin > 0) {
+ toku_cond_wait(
+ &txn->state_cond,
+ &txn->state_lock
+ );
+ }
+ txn->state = TOKUTXN_COMMITTING;
+ toku_txn_unlock_state(txn);
+done:
+ return;
+}
+
+int toku_txn_commit_with_lsn(TOKUTXN txn, int nosync, LSN oplsn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra)
+{
+ // there should be no child when we commit or abort a TOKUTXN
+ invariant(txn->child == NULL);
+ txn_note_commit(txn);
+
+ // Child transactions do not actually 'commit'. They promote their
+ // changes to parent, so no need to fsync if this txn has a parent. The
+ // do_sync state is captured in the txn for txn_maybe_fsync_log function
+ // Additionally, if the transaction was first prepared, we do not need to
+ // fsync because the prepare caused an fsync of the log. In this case,
+ // we do not need an additional of the log. We rely on the client running
+ // recovery to properly recommit this transaction if the commit
+ // does not make it to disk. In the case of MySQL, that would be the
+ // binary log.
+ txn->do_fsync = !txn->parent && (txn->force_fsync_on_commit || (!nosync && txn->roll_info.num_rollentries>0));
+
+ txn->progress_poll_fun = poll;
+ txn->progress_poll_fun_extra = poll_extra;
+
+ if (!toku_txn_is_read_only(txn)) {
+ toku_log_xcommit(txn->logger, &txn->do_fsync_lsn, 0, txn, txn->txnid);
+ }
+ // If !txn->begin_was_logged, we could skip toku_rollback_commit
+ // but it's cheap (only a number of function calls that return immediately)
+ // since there were no writes. Skipping it would mean we would need to be careful
+ // in case we added any additional required cleanup into those functions in the future.
+ int r = toku_rollback_commit(txn, oplsn);
+ TXN_STATUS_INC(TXN_COMMIT, 1);
+ return r;
+}
+
+int toku_txn_abort_txn(TOKUTXN txn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra)
+// Effect: Doesn't close the txn, just performs the abort operations.
+// If release_multi_operation_client_lock is true, then unlock that lock (even if an error path is taken)
+{
+ return toku_txn_abort_with_lsn(txn, ZERO_LSN, poll, poll_extra);
+}
+
+static void txn_note_abort(TOKUTXN txn) {
+ // Purpose:
+ // Delay until any indexer is done pinning this transaction.
+ // Update status of a transaction from live->aborting (or prepared->aborting)
+ // Do so in a thread-safe manner that does not conflict with hot indexing or
+ // begin checkpoint.
+ if (toku_txn_is_read_only(txn)) {
+ // Neither hot indexing nor checkpoint do any work with readonly txns,
+ // so we can skip taking the state lock here.
+ invariant(txn->state==TOKUTXN_LIVE);
+ txn->state = TOKUTXN_ABORTING;
+ goto done;
+ }
+ if (txn->state==TOKUTXN_PREPARING) {
+ invalidate_xa_xid(&txn->xa_xid);
+ }
+ // for hot indexing, if hot index is processing
+ // this transaction in some leafentry, then we cannot change
+ // the state to commit or abort until
+ // hot index is done with that leafentry
+ toku_txn_lock_state(txn);
+ while (txn->num_pin > 0) {
+ toku_cond_wait(
+ &txn->state_cond,
+ &txn->state_lock
+ );
+ }
+ txn->state = TOKUTXN_ABORTING;
+ toku_txn_unlock_state(txn);
+done:
+ return;
+}
+
+int toku_txn_abort_with_lsn(TOKUTXN txn, LSN oplsn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra)
+{
+ // there should be no child when we commit or abort a TOKUTXN
+ invariant(txn->child == NULL);
+ txn_note_abort(txn);
+
+ txn->progress_poll_fun = poll;
+ txn->progress_poll_fun_extra = poll_extra;
+ txn->do_fsync = false;
+
+ if (!toku_txn_is_read_only(txn)) {
+ toku_log_xabort(txn->logger, &txn->do_fsync_lsn, 0, txn, txn->txnid);
+ }
+ // If !txn->begin_was_logged, we could skip toku_rollback_abort
+ // but it's cheap (only a number of function calls that return immediately)
+ // since there were no writes. Skipping it would mean we would need to be careful
+ // in case we added any additional required cleanup into those functions in the future.
+ int r = toku_rollback_abort(txn, oplsn);
+ TXN_STATUS_INC(TXN_ABORT, 1);
+ return r;
+}
+
+static void copy_xid (TOKU_XA_XID *dest, TOKU_XA_XID *source) {
+ TOKU_ANNOTATE_NEW_MEMORY(dest, sizeof(*dest));
+ dest->formatID = source->formatID;
+ dest->gtrid_length = source->gtrid_length;
+ dest->bqual_length = source->bqual_length;
+ memcpy(dest->data, source->data, source->gtrid_length+source->bqual_length);
+}
+
+void toku_txn_prepare_txn (TOKUTXN txn, TOKU_XA_XID *xa_xid, int nosync) {
+ if (txn->parent || toku_txn_is_read_only(txn)) {
+ // We do not prepare children.
+ //
+ // Readonly transactions do the same if they commit or abort, so
+ // XA guarantees are free. No need to pay for overhead of prepare.
+ return;
+ }
+ assert(txn->state==TOKUTXN_LIVE);
+ // This state transition must be protected against begin_checkpoint
+ // Therefore, the caller must have the mo lock held
+ toku_txn_lock_state(txn);
+ txn->state = TOKUTXN_PREPARING;
+ toku_txn_unlock_state(txn);
+ // Do we need to do an fsync?
+ txn->do_fsync = txn->force_fsync_on_commit || (!nosync && txn->roll_info.num_rollentries>0);
+ copy_xid(&txn->xa_xid, xa_xid);
+ // This list will go away with #4683, so we wn't need the ydb lock for this anymore.
+ toku_log_xprepare(txn->logger, &txn->do_fsync_lsn, 0, txn, txn->txnid, xa_xid);
+}
+
+void toku_txn_get_prepared_xa_xid (TOKUTXN txn, TOKU_XA_XID *xid) {
+ copy_xid(xid, &txn->xa_xid);
+}
+
+int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags) {
+ return toku_txn_manager_recover_root_txn(
+ logger->txn_manager,
+ preplist,
+ count,
+ retp,
+ flags
+ );
+}
+
+void toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, bool do_fsync) {
+ if (logger && do_fsync) {
+ toku_logger_fsync_if_lsn_not_fsynced(logger, do_fsync_lsn);
+ }
+}
+
+void toku_txn_get_fsync_info(TOKUTXN ttxn, bool* do_fsync, LSN* do_fsync_lsn) {
+ *do_fsync = ttxn->do_fsync;
+ *do_fsync_lsn = ttxn->do_fsync_lsn;
+}
+
+void toku_txn_close_txn(TOKUTXN txn) {
+ toku_txn_complete_txn(txn);
+ toku_txn_destroy_txn(txn);
+}
+
+int remove_txn (const FT &h, const uint32_t UU(idx), TOKUTXN const txn);
+int remove_txn (const FT &h, const uint32_t UU(idx), TOKUTXN const UU(txn))
+// Effect: This function is called on every open FT that a transaction used.
+// This function removes the transaction from that FT.
+{
+ toku_ft_remove_txn_ref(h);
+
+ return 0;
+}
+
+// for every ft in txn, remove it.
+static void note_txn_closing (TOKUTXN txn) {
+ txn->open_fts.iterate<struct tokutxn, remove_txn>(txn);
+}
+
+void toku_txn_complete_txn(TOKUTXN txn) {
+ assert(txn->roll_info.spilled_rollback_head.b == ROLLBACK_NONE.b);
+ assert(txn->roll_info.spilled_rollback_tail.b == ROLLBACK_NONE.b);
+ assert(txn->roll_info.current_rollback.b == ROLLBACK_NONE.b);
+ assert(txn->num_pin == 0);
+ assert(txn->state == TOKUTXN_COMMITTING || txn->state == TOKUTXN_ABORTING || txn->state == TOKUTXN_PREPARING);
+ if (txn->parent) {
+ toku_txn_manager_handle_snapshot_destroy_for_child_txn(
+ txn,
+ txn->logger->txn_manager,
+ txn->snapshot_type
+ );
+ txn->parent->child_manager->finish_child_txn(txn);
+ }
+ else {
+ toku_txn_manager_finish_txn(txn->logger->txn_manager, txn);
+ txn->child_manager->destroy();
+ }
+ // note that here is another place we depend on
+ // this function being called with the multi operation lock
+ note_txn_closing(txn);
+}
+
+void toku_txn_destroy_txn(TOKUTXN txn) {
+ txn->open_fts.destroy();
+ if (txn->xids) {
+ toku_xids_destroy(&txn->xids);
+ }
+ toku_mutex_destroy(&txn->txn_lock);
+ toku_mutex_destroy(&txn->state_lock);
+ toku_cond_destroy(&txn->state_cond);
+ toku_free(txn);
+}
+
+XIDS toku_txn_get_xids (TOKUTXN txn) {
+ if (txn==0) return toku_xids_get_root_xids();
+ else return txn->xids;
+}
+
+void toku_txn_force_fsync_on_commit(TOKUTXN txn) {
+ txn->force_fsync_on_commit = true;
+}
+
+TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn) {
+ TXNID xid;
+ if (txn->live_root_txn_list->size()>0) {
+ int r = txn->live_root_txn_list->fetch(0, &xid);
+ assert_zero(r);
+ }
+ else {
+ xid = TXNID_NONE;
+ }
+ return xid;
+}
+
+bool toku_is_txn_in_live_root_txn_list(const xid_omt_t &live_root_txn_list, TXNID xid) {
+ TXNID txnid;
+ bool retval = false;
+ int r = live_root_txn_list.find_zero<TXNID, toku_find_xid_by_xid>(xid, &txnid, nullptr);
+ if (r==0) {
+ invariant(txnid == xid);
+ retval = true;
+ }
+ else {
+ invariant(r==DB_NOTFOUND);
+ }
+ return retval;
+}
+
+TOKUTXN_STATE
+toku_txn_get_state(TOKUTXN txn) {
+ return txn->state;
+}
+
+static void
+maybe_log_begin_txn_for_write_operation_unlocked(TOKUTXN txn) {
+ // We now hold the lock.
+ if (txn->begin_was_logged) {
+ return;
+ }
+ TOKUTXN parent;
+ parent = txn->parent;
+ TXNID_PAIR xid;
+ xid = txn->txnid;
+ TXNID_PAIR pxid;
+ pxid = TXNID_PAIR_NONE;
+ if (parent) {
+ // Recursively log parent first if necessary.
+ // Transactions cannot do work if they have children,
+ // so the lowest level child's lock is sufficient for ancestors.
+ maybe_log_begin_txn_for_write_operation_unlocked(parent);
+ pxid = parent->txnid;
+ }
+
+ toku_log_xbegin(txn->logger, NULL, 0, xid, pxid);
+ txn->begin_was_logged = true;
+}
+
+void
+toku_maybe_log_begin_txn_for_write_operation(TOKUTXN txn) {
+ toku_txn_lock(txn);
+ maybe_log_begin_txn_for_write_operation_unlocked(txn);
+ toku_txn_unlock(txn);
+}
+
+bool
+toku_txn_is_read_only(TOKUTXN txn) {
+ // No need to recursively check children because parents are
+ // recursively logged before children.
+ if (!txn->begin_was_logged) {
+ // Did no work.
+ invariant(txn->roll_info.num_rollentries == 0);
+ invariant(txn->do_fsync_lsn.lsn == ZERO_LSN.lsn);
+ invariant(txn->open_fts.size() == 0);
+ invariant(txn->num_pin==0);
+ return true;
+ }
+ return false;
+}
+
+// needed for hot indexing
+void toku_txn_lock_state(TOKUTXN txn) {
+ toku_mutex_lock(&txn->state_lock);
+}
+void toku_txn_unlock_state(TOKUTXN txn){
+ toku_mutex_unlock(&txn->state_lock);
+}
+
+
+// prevents a client thread from transitioning txn from LIVE|PREPARING -> COMMITTING|ABORTING
+// hot indexing may need a transactions to stay in the LIVE|PREPARING state while it processes
+// a leafentry.
+void toku_txn_pin_live_txn_unlocked(TOKUTXN txn) {
+ assert(txn->state == TOKUTXN_LIVE || txn->state == TOKUTXN_PREPARING);
+ assert(!toku_txn_is_read_only(txn));
+ txn->num_pin++;
+}
+
+// allows a client thread to go back to being able to transition txn
+// from LIVE|PREPARING -> COMMITTING|ABORTING
+void toku_txn_unpin_live_txn(TOKUTXN txn) {
+ assert(txn->state == TOKUTXN_LIVE || txn->state == TOKUTXN_PREPARING);
+ assert(txn->num_pin > 0);
+ toku_txn_lock_state(txn);
+ txn->num_pin--;
+ if (txn->num_pin == 0) {
+ toku_cond_broadcast(&txn->state_cond);
+ }
+ toku_txn_unlock_state(txn);
+}
+
+bool toku_txn_has_spilled_rollback(TOKUTXN txn) {
+ return txn_has_spilled_rollback_logs(txn);
+}
+
+void toku_txn_get_client_id(TOKUTXN txn, uint64_t *client_id, void **client_extra) {
+ if (client_id) *client_id = txn->client_id;
+ if (client_extra) *client_extra = txn->client_extra;
+}
+
+void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id, void *client_extra) {
+ txn->client_id = client_id;
+ txn->client_extra = client_extra;
+}
+
+time_t toku_txn_get_start_time(struct tokutxn *txn) {
+ return txn->start_time;
+}
+
+extern uint force_recovery;
+int toku_txn_reads_txnid(TXNID txnid, TOKUTXN txn, bool is_provisional UU()) {
+ if(force_recovery) {
+ return TOKUDB_ACCEPT;
+ }
+ int r = 0;
+ TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(txn);
+ if (oldest_live_in_snapshot == TXNID_NONE && txnid < txn->snapshot_txnid64) {
+ r = TOKUDB_ACCEPT;
+ } else if (txnid < oldest_live_in_snapshot || txnid == txn->txnid.parent_id64) {
+ r = TOKUDB_ACCEPT;
+ } else if (txnid > txn->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*txn->live_root_txn_list, txnid)) {
+ r = 0;
+ } else {
+ r = TOKUDB_ACCEPT;
+ }
+ return r;
+}
+
+int toku_txn_discard_txn(TOKUTXN txn) {
+ int r = toku_rollback_discard(txn);
+ return r;
+}
+
+#include <toku_race_tools.h>
+void __attribute__((__constructor__)) toku_txn_status_helgrind_ignore(void);
+void toku_txn_status_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&txn_status, sizeof txn_status);
+}
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn.h b/storage/tokudb/PerconaFT/ft/txn/txn.h
new file mode 100644
index 00000000..34a76aa9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn.h
@@ -0,0 +1,362 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "portability/toku_stdint.h"
+
+#include "ft/txn/txn_state.h"
+#include "ft/serialize/block_table.h"
+#include "ft/ft-status.h"
+#include "util/omt.h"
+
+typedef uint64_t TXNID;
+
+typedef struct tokutxn *TOKUTXN;
+
+#define TXNID_NONE_LIVING ((TXNID)0)
+#define TXNID_NONE ((TXNID)0)
+#define TXNID_MAX ((TXNID)-1)
+
+typedef struct txnid_pair_s {
+ TXNID parent_id64;
+ TXNID child_id64;
+} TXNID_PAIR;
+
+static const TXNID_PAIR TXNID_PAIR_NONE = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE };
+
+// We include the child manager here beacuse it uses the TXNID / TOKUTXN types
+#include "ft/txn/txn_child_manager.h"
+
+/* Log Sequence Number (LSN)
+ * Make the LSN be a struct instead of an integer so that we get better type checking. */
+typedef struct __toku_lsn { uint64_t lsn; } LSN;
+static const LSN ZERO_LSN = { .lsn = 0 };
+static const LSN MAX_LSN = { .lsn = UINT64_MAX };
+
+//
+// Types of snapshots that can be taken by a tokutxn
+// - TXN_SNAPSHOT_NONE: means that there is no snapshot. Reads do not use snapshot reads.
+// used for SERIALIZABLE and READ UNCOMMITTED
+// - TXN_SNAPSHOT_ROOT: means that all tokutxns use their root transaction's snapshot
+// used for REPEATABLE READ
+// - TXN_SNAPSHOT_CHILD: means that each child tokutxn creates its own snapshot
+// used for READ COMMITTED
+//
+
+typedef enum __TXN_SNAPSHOT_TYPE {
+ TXN_SNAPSHOT_NONE=0,
+ TXN_SNAPSHOT_ROOT=1,
+ TXN_SNAPSHOT_CHILD=2,
+ TXN_COPIES_SNAPSHOT=3
+} TXN_SNAPSHOT_TYPE;
+
+typedef toku::omt<struct tokutxn *> txn_omt_t;
+typedef toku::omt<TXNID> xid_omt_t;
+typedef toku::omt<struct referenced_xid_tuple, struct referenced_xid_tuple *> rx_omt_t;
+
+inline bool txn_pair_is_none(TXNID_PAIR txnid) {
+ return txnid.parent_id64 == TXNID_NONE && txnid.child_id64 == TXNID_NONE;
+}
+
+struct tokulogger;
+
+struct txn_roll_info {
+ // these are number of rollback nodes and rollback entries for this txn.
+ //
+ // the current rollback node below has sequence number num_rollback_nodes - 1
+ // (because they are numbered 0...num-1). often, the current rollback is
+ // already set to this block num, which means it exists and is available to
+ // log some entries. if the current rollback is NONE and the number of
+ // rollback nodes for this transaction is non-zero, then we will use
+ // the number of rollback nodes to know which sequence number to assign
+ // to a new one we create
+ uint64_t num_rollback_nodes;
+ uint64_t num_rollentries;
+ uint64_t num_rollentries_processed;
+ uint64_t rollentry_raw_count; // the total count of every byte in the transaction and all its children.
+
+ // spilled rollback nodes are rollback nodes that were gorged by this
+ // transaction, retired, and saved in a list.
+
+ // the spilled rollback head is the block number of the first rollback node
+ // that makes up the rollback log chain
+ BLOCKNUM spilled_rollback_head;
+
+ // the spilled rollback is the block number of the last rollback node that
+ // makes up the rollback log chain.
+ BLOCKNUM spilled_rollback_tail;
+
+ // the current rollback node block number we may use. if this is ROLLBACK_NONE,
+ // then we need to create one and set it here before using it.
+ BLOCKNUM current_rollback;
+};
+
+struct tokutxn {
+ // These don't change after create:
+
+ TXNID_PAIR txnid;
+
+ uint64_t snapshot_txnid64; // this is the lsn of the snapshot
+ const TXN_SNAPSHOT_TYPE snapshot_type;
+ const bool for_recovery;
+ struct tokulogger *const logger;
+ struct tokutxn *const parent;
+ // The child txn is protected by the child_txn_manager lock
+ // and by the user contract. The user contract states (and is
+ // enforced at the ydb layer) that a child txn should not be created
+ // while another child exists. The txn_child_manager will protect
+ // other threads from trying to read this value while another
+ // thread commits/aborts the child
+ struct tokutxn *child;
+
+ // statically allocated child manager, if this
+ // txn is a root txn, this manager will be used and set to
+ // child_manager for this transaction and all of its children
+ txn_child_manager child_manager_s;
+
+ // child manager for this transaction, all of its children,
+ // and all of its ancestors
+ txn_child_manager* child_manager;
+
+ // These don't change but they're created in a way that's hard to make
+ // strictly const.
+ DB_TXN *container_db_txn; // reference to DB_TXN that contains this tokutxn
+ xid_omt_t *live_root_txn_list; // the root txns live when the root ancestor (self if a root) started.
+ struct XIDS_S *xids; // Represents the xid list
+
+ struct tokutxn *snapshot_next;
+ struct tokutxn *snapshot_prev;
+
+ bool begin_was_logged;
+ bool declared_read_only; // true if the txn was declared read only when began
+
+ // These are not read until a commit, prepare, or abort starts, and
+ // they're "monotonic" (only go false->true) during operation:
+ bool do_fsync;
+ bool force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn)
+
+ // Not used until commit, prepare, or abort starts:
+ LSN do_fsync_lsn;
+ TOKU_XA_XID xa_xid; // for prepared transactions
+ TXN_PROGRESS_POLL_FUNCTION progress_poll_fun;
+ void *progress_poll_fun_extra;
+
+ toku_mutex_t txn_lock;
+ // Protected by the txn lock:
+ toku::omt<struct ft*> open_fts; // a collection of the fts that we touched. Indexed by filenum.
+ struct txn_roll_info roll_info; // Info used to manage rollback entries
+
+ // mutex that protects the transition of the state variable
+ // the rest of the variables are used by the txn code and
+ // hot indexing to ensure that when hot indexing is processing a
+ // leafentry, a TOKUTXN cannot dissappear or change state out from
+ // underneath it
+ toku_mutex_t state_lock;
+ toku_cond_t state_cond;
+ TOKUTXN_STATE state;
+ uint32_t num_pin; // number of threads (all hot indexes) that want this
+ // txn to not transition to commit or abort
+ uint64_t client_id;
+ void *client_extra;
+ time_t start_time;
+};
+typedef struct tokutxn *TOKUTXN;
+
+void toku_txn_lock(struct tokutxn *txn);
+void toku_txn_unlock(struct tokutxn *txn);
+
+uint64_t toku_txn_get_root_id(struct tokutxn *txn);
+bool txn_declared_read_only(struct tokutxn *txn);
+
+int toku_txn_begin_txn (
+ DB_TXN *container_db_txn,
+ struct tokutxn *parent_tokutxn,
+ struct tokutxn **tokutxn,
+ struct tokulogger *logger,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ bool read_only
+ );
+
+DB_TXN * toku_txn_get_container_db_txn (struct tokutxn *tokutxn);
+void toku_txn_set_container_db_txn(struct tokutxn *txn, DB_TXN *db_txn);
+
+// toku_txn_begin_with_xid is called from recovery and has no containing DB_TXN
+int toku_txn_begin_with_xid (
+ struct tokutxn *parent_tokutxn,
+ struct tokutxn **tokutxn,
+ struct tokulogger *logger,
+ TXNID_PAIR xid,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ DB_TXN *container_db_txn,
+ bool for_recovery,
+ bool read_only
+ );
+
+void toku_txn_update_xids_in_txn(struct tokutxn *txn, TXNID xid);
+
+int toku_txn_load_txninfo (struct tokutxn *txn, struct txninfo *info);
+
+int toku_txn_commit_txn (struct tokutxn *txn, int nosync,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
+int toku_txn_commit_with_lsn(struct tokutxn *txn, int nosync, LSN oplsn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
+
+int toku_txn_abort_txn(struct tokutxn *txn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
+int toku_txn_abort_with_lsn(struct tokutxn *txn, LSN oplsn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
+
+int toku_txn_discard_txn(struct tokutxn *txn);
+
+void toku_txn_prepare_txn (struct tokutxn *txn, TOKU_XA_XID *xid, int nosync);
+// Effect: Do the internal work of preparing a transaction (does not log the prepare record).
+
+void toku_txn_get_prepared_xa_xid(struct tokutxn *txn, TOKU_XA_XID *xa_xid);
+// Effect: Fill in the XID information for a transaction. The caller allocates the XID and the function fills in values.
+
+void toku_txn_maybe_fsync_log(struct tokulogger *logger, LSN do_fsync_lsn, bool do_fsync);
+
+void toku_txn_get_fsync_info(struct tokutxn *ttxn, bool* do_fsync, LSN* do_fsync_lsn);
+
+// Complete and destroy a txn
+void toku_txn_close_txn(struct tokutxn *txn);
+
+// Remove a txn from any live txn lists
+void toku_txn_complete_txn(struct tokutxn *txn);
+
+// Free the memory of a txn
+void toku_txn_destroy_txn(struct tokutxn *txn);
+
+struct XIDS_S *toku_txn_get_xids(struct tokutxn *txn);
+
+// Force fsync on commit
+void toku_txn_force_fsync_on_commit(struct tokutxn *txn);
+
+void toku_txn_get_status(TXN_STATUS s);
+
+bool toku_is_txn_in_live_root_txn_list(const xid_omt_t &live_root_txn_list, TXNID xid);
+
+TXNID toku_get_oldest_in_live_root_txn_list(struct tokutxn *txn);
+
+TOKUTXN_STATE toku_txn_get_state(struct tokutxn *txn);
+
+struct tokulogger_preplist {
+ TOKU_XA_XID xid;
+ DB_TXN *txn;
+};
+int toku_logger_recover_txn (struct tokulogger *logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags);
+
+void toku_maybe_log_begin_txn_for_write_operation(struct tokutxn *txn);
+
+// Return whether txn (or it's descendents) have done no work.
+bool toku_txn_is_read_only(struct tokutxn *txn);
+
+void toku_txn_lock_state(struct tokutxn *txn);
+void toku_txn_unlock_state(struct tokutxn *txn);
+void toku_txn_pin_live_txn_unlocked(struct tokutxn *txn);
+void toku_txn_unpin_live_txn(struct tokutxn *txn);
+
+bool toku_txn_has_spilled_rollback(struct tokutxn *txn);
+
+void toku_txn_get_client_id(struct tokutxn *txn, uint64_t *client_id, void **client_extra);
+void toku_txn_set_client_id(struct tokutxn *txn, uint64_t client_id, void *client_extra);
+
+time_t toku_txn_get_start_time(struct tokutxn *txn);
+
+//
+// This function is used by the leafentry iterators.
+// returns TOKUDB_ACCEPT if live transaction context is allowed to read a value
+// that is written by transaction with LSN of id
+// live transaction context may read value if either id is the root ancestor of context, or if
+// id was committed before context's snapshot was taken.
+// For id to be committed before context's snapshot was taken, the following must be true:
+// - id < context->snapshot_txnid64 AND id is not in context's live root transaction list
+// For the above to NOT be true:
+// - id > context->snapshot_txnid64 OR id is in context's live root transaction list
+//
+int toku_txn_reads_txnid(TXNID txnid, struct tokutxn *txn, bool is_provisional UU());
+
+// For serialize / deserialize
+
+#include "ft/serialize/wbuf.h"
+
+static inline void wbuf_TXNID(struct wbuf *wb, TXNID txnid) {
+ wbuf_ulonglong(wb, txnid);
+}
+
+static inline void wbuf_nocrc_TXNID(struct wbuf *wb, TXNID txnid) {
+ wbuf_nocrc_ulonglong(wb, txnid);
+}
+
+static inline void wbuf_nocrc_TXNID_PAIR(struct wbuf *wb, TXNID_PAIR txnid) {
+ wbuf_nocrc_ulonglong(wb, txnid.parent_id64);
+ wbuf_nocrc_ulonglong(wb, txnid.child_id64);
+}
+
+static inline void wbuf_nocrc_LSN(struct wbuf *wb, LSN lsn) {
+ wbuf_nocrc_ulonglong(wb, lsn.lsn);
+}
+
+static inline void wbuf_LSN(struct wbuf *wb, LSN lsn) {
+ wbuf_ulonglong(wb, lsn.lsn);
+}
+
+#include "ft/serialize/rbuf.h"
+
+static inline void rbuf_TXNID(struct rbuf *rb, TXNID *txnid) {
+ *txnid = rbuf_ulonglong(rb);
+}
+
+static inline void rbuf_TXNID_PAIR(struct rbuf *rb, TXNID_PAIR *txnid) {
+ txnid->parent_id64 = rbuf_ulonglong(rb);
+ txnid->child_id64 = rbuf_ulonglong(rb);
+}
+
+static inline void rbuf_ma_TXNID(struct rbuf *rb, memarena *UU(ma), TXNID *txnid) {
+ rbuf_TXNID(rb, txnid);
+}
+
+static inline void rbuf_ma_TXNID_PAIR (struct rbuf *r, memarena *ma __attribute__((__unused__)), TXNID_PAIR *txnid) {
+ rbuf_TXNID_PAIR(r, txnid);
+}
+
+static inline LSN rbuf_LSN(struct rbuf *rb) {
+ LSN lsn = { .lsn = rbuf_ulonglong(rb) };
+ return lsn;
+}
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_child_manager.cc b/storage/tokudb/PerconaFT/ft/txn/txn_child_manager.cc
new file mode 100644
index 00000000..99a21331
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_child_manager.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "ft/logger/log-internal.h"
+#include "ft/txn/txn_child_manager.h"
+
+toku_instr_key *txn_child_manager_mutex_key;
+
+//
+// initialized a txn_child_manager,
+// when called, root->txnid.parent_id64 may not yet be set
+//
+void txn_child_manager::init(TOKUTXN root) {
+ invariant(root->txnid.child_id64 == TXNID_NONE);
+ invariant(root->parent == NULL);
+ m_root = root;
+ m_last_xid = TXNID_NONE;
+ ZERO_STRUCT(m_mutex);
+
+ toku_pthread_mutexattr_t attr;
+ toku_mutexattr_init(&attr);
+ toku_mutexattr_settype(&attr, TOKU_MUTEX_ADAPTIVE);
+ toku_mutex_init(*txn_child_manager_mutex_key, &m_mutex, &attr);
+ toku_mutexattr_destroy(&attr);
+}
+
+void txn_child_manager::destroy() {
+ toku_mutex_destroy(&m_mutex);
+}
+
+void txn_child_manager::start_child_txn_for_recovery(TOKUTXN child, TOKUTXN parent, TXNID_PAIR txnid) {
+ invariant(parent->txnid.parent_id64 == m_root->txnid.parent_id64);
+ invariant(txnid.parent_id64 == m_root->txnid.parent_id64);
+
+ child->txnid = txnid;
+ toku_mutex_lock(&m_mutex);
+ if (txnid.child_id64 > m_last_xid) {
+ m_last_xid = txnid.child_id64;
+ }
+ parent->child = child;
+ toku_mutex_unlock(&m_mutex);
+}
+
+void txn_child_manager::start_child_txn(TOKUTXN child, TOKUTXN parent) {
+ invariant(parent->txnid.parent_id64 == m_root->txnid.parent_id64);
+ child->txnid.parent_id64 = m_root->txnid.parent_id64;
+ toku_mutex_lock(&m_mutex);
+
+ ++m_last_xid;
+ // Here we ensure that the child_id64 is never equal to the parent_id64
+ // We do this to make this feature work more easily with the XIDs
+ // struct and message application. The XIDs struct stores the parent id
+ // as the first TXNID, and subsequent TXNIDs store child ids. So, if we
+ // have a case where the parent id is the same as the child id, we will
+ // have to do some tricky maneuvering in the message application code
+ // in ule.cc. So, to lessen the probability of bugs, we ensure that the
+ // parent id is not the same as the child id.
+ if (m_last_xid == m_root->txnid.parent_id64) {
+ ++m_last_xid;
+ }
+ child->txnid.child_id64 = m_last_xid;
+
+ parent->child = child;
+ toku_mutex_unlock(&m_mutex);
+}
+
+void txn_child_manager::finish_child_txn(TOKUTXN child) {
+ invariant(child->txnid.parent_id64 == m_root->txnid.parent_id64);
+ toku_mutex_lock(&m_mutex);
+ child->parent->child = NULL;
+ toku_mutex_unlock(&m_mutex);
+}
+
+void txn_child_manager::suspend() {
+ toku_mutex_lock(&m_mutex);
+}
+
+void txn_child_manager::resume() {
+ toku_mutex_unlock(&m_mutex);
+}
+
+void txn_child_manager::find_tokutxn_by_xid_unlocked(TXNID_PAIR xid, TOKUTXN* result) {
+ invariant(xid.parent_id64 == m_root->txnid.parent_id64);
+ TOKUTXN curr_txn = m_root;
+ while (curr_txn != NULL) {
+ if (xid.child_id64 == curr_txn->txnid.child_id64) {
+ *result = curr_txn;
+ break;
+ }
+ curr_txn = curr_txn->child;
+ }
+}
+
+int txn_child_manager::iterate(txn_mgr_iter_callback cb, void* extra) {
+ TOKUTXN curr_txn = m_root;
+ int ret = 0;
+ toku_mutex_lock(&m_mutex);
+ while (curr_txn != NULL) {
+ ret = cb(curr_txn, extra);
+ if (ret != 0) {
+ break;
+ }
+ curr_txn = curr_txn->child;
+ }
+ toku_mutex_unlock(&m_mutex);
+ return ret;
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_child_manager.h b/storage/tokudb/PerconaFT/ft/txn/txn_child_manager.h
new file mode 100644
index 00000000..76db3705
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_child_manager.h
@@ -0,0 +1,66 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// We should be including ft/txn/txn.h here but that header includes this one,
+// so we don't.
+#include "portability/toku_pthread.h"
+
+class txn_child_manager {
+public:
+ void init (TOKUTXN root);
+ void destroy();
+ void start_child_txn_for_recovery(TOKUTXN child, TOKUTXN parent, TXNID_PAIR txnid);
+ void start_child_txn(TOKUTXN child, TOKUTXN parent);
+ void finish_child_txn(TOKUTXN child);
+ void suspend();
+ void resume();
+ void find_tokutxn_by_xid_unlocked(TXNID_PAIR xid, TOKUTXN* result);
+ int iterate(int (*cb)(TOKUTXN txn, void *extra), void* extra);
+
+private:
+ TXNID m_last_xid;
+ TOKUTXN m_root;
+ toku_mutex_t m_mutex;
+
+ friend class txn_child_manager_unit_test;
+};
+
+
+ENSURE_POD(txn_child_manager);
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
new file mode 100644
index 00000000..1b55844b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc
@@ -0,0 +1,1040 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "portability/toku_race_tools.h"
+
+#include "ft/cachetable/checkpoint.h"
+#include "ft/logger/log-internal.h"
+#include "ft/ule.h"
+#include "ft/txn/txn.h"
+#include "ft/txn/txn_manager.h"
+#include "ft/txn/rollback.h"
+#include "util/omt.h"
+//this is only for testing
+
+static void (* test_txn_sync_callback) (pthread_t, void *) = NULL;
+static void * test_txn_sync_callback_extra = NULL;
+
+void set_test_txn_sync_callback(void (*cb) (pthread_t, void *), void *extra) {
+ test_txn_sync_callback = cb;
+ test_txn_sync_callback_extra = extra;
+}
+bool garbage_collection_debug = false;
+
+toku_instr_key *txn_manager_lock_mutex_key;
+
+static bool txn_records_snapshot(TXN_SNAPSHOT_TYPE snapshot_type,
+ struct tokutxn *parent) {
+ if (snapshot_type == TXN_COPIES_SNAPSHOT) {
+ return false;
+ }
+ // we need a snapshot if the snapshot type is a child or
+ // if the snapshot type is root and we have no parent.
+ // Cases that we don't need a snapshot: when snapshot type is NONE
+ // or when it is ROOT and we have a parent
+ return (snapshot_type != TXN_SNAPSHOT_NONE && (parent==NULL || snapshot_type == TXN_SNAPSHOT_CHILD));
+}
+
+static bool txn_copies_snapshot(TXN_SNAPSHOT_TYPE snapshot_type, struct tokutxn *parent) {
+ return (snapshot_type == TXN_COPIES_SNAPSHOT) || txn_records_snapshot(snapshot_type, parent);
+}
+
+// internal locking functions, should use this instead of accessing lock directly
+static void txn_manager_lock(TXN_MANAGER txn_manager);
+static void txn_manager_unlock(TXN_MANAGER txn_manager);
+
+#if 0
+static bool is_txnid_live(TXN_MANAGER txn_manager, TXNID txnid) {
+ TOKUTXN result = NULL;
+ toku_txn_manager_id2txn_unlocked(txn_manager, txnid, &result);
+ return (result != NULL);
+}
+#endif
+
+//Heaviside function to search through an OMT by a TXNID
+int find_by_xid (const TOKUTXN &txn, const TXNID &txnidfind);
+
+static bool is_txnid_live(TXN_MANAGER txn_manager, TXNID txnid) {
+ TOKUTXN result = NULL;
+ TXNID_PAIR id = { .parent_id64 = txnid, .child_id64 = TXNID_NONE };
+ toku_txn_manager_id2txn_unlocked(txn_manager, id, &result);
+ return (result != NULL);
+}
+
+static void toku_txn_manager_clone_state_for_gc_unlocked(
+ TXN_MANAGER txn_manager,
+ xid_omt_t* snapshot_xids,
+ rx_omt_t* referenced_xids,
+ xid_omt_t* live_root_txns
+ );
+
+static void
+verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
+ uint32_t num_snapshot_txnids = txn_manager->num_snapshots;
+ TXNID snapshot_txnids[num_snapshot_txnids];
+ TOKUTXN snapshot_txns[num_snapshot_txnids];
+ uint32_t num_live_txns = txn_manager->live_root_txns.size();
+ TOKUTXN live_txns[num_live_txns];
+ uint32_t num_referenced_xid_tuples = txn_manager->referenced_xids.size();
+ struct referenced_xid_tuple *referenced_xid_tuples[num_referenced_xid_tuples];
+
+ // do this to get an omt of snapshot_txnids
+ xid_omt_t snapshot_txnids_omt;
+ rx_omt_t referenced_xids_omt;
+ xid_omt_t live_root_txns_omt;
+ toku_txn_manager_clone_state_for_gc_unlocked(
+ txn_manager,
+ &snapshot_txnids_omt,
+ &referenced_xids_omt,
+ &live_root_txns_omt
+ );
+
+ int r;
+ uint32_t i;
+ uint32_t j;
+ //set up arrays for easier access
+ {
+ TOKUTXN curr_txn = txn_manager->snapshot_head;
+ uint32_t curr_index = 0;
+ while (curr_txn != NULL) {
+ snapshot_txns[curr_index] = curr_txn;
+ snapshot_txnids[curr_index] = curr_txn->snapshot_txnid64;
+ curr_txn = curr_txn->snapshot_next;
+ curr_index++;
+ }
+ }
+
+ for (i = 0; i < num_live_txns; i++) {
+ r = txn_manager->live_root_txns.fetch(i, &live_txns[i]);
+ assert_zero(r);
+ }
+ for (i = 0; i < num_referenced_xid_tuples; i++) {
+ r = txn_manager->referenced_xids.fetch(i, &referenced_xid_tuples[i]);
+ assert_zero(r);
+ }
+
+ {
+ //Verify snapshot_txnids
+ for (i = 0; i < num_snapshot_txnids; i++) {
+ TXNID snapshot_xid = snapshot_txnids[i];
+ TOKUTXN snapshot_txn = snapshot_txns[i];
+ uint32_t num_live_root_txn_list = snapshot_txn->live_root_txn_list->size();
+ TXNID live_root_txn_list[num_live_root_txn_list];
+ {
+ for (j = 0; j < num_live_root_txn_list; j++) {
+ r = snapshot_txn->live_root_txn_list->fetch(j, &live_root_txn_list[j]);
+ assert_zero(r);
+ }
+ }
+ {
+ // Only committed entries have return a youngest.
+ TXNID youngest = toku_get_youngest_live_list_txnid_for(
+ snapshot_xid,
+ snapshot_txnids_omt,
+ txn_manager->referenced_xids
+ );
+ invariant(youngest == TXNID_NONE);
+ }
+ for (j = 0; j < num_live_root_txn_list; j++) {
+ TXNID live_xid = live_root_txn_list[j];
+ invariant(live_xid <= snapshot_xid);
+ TXNID youngest = toku_get_youngest_live_list_txnid_for(
+ live_xid,
+ snapshot_txnids_omt,
+ txn_manager->referenced_xids
+ );
+ if (is_txnid_live(txn_manager, live_xid)) {
+ // Only committed entries have return a youngest.
+ invariant(youngest == TXNID_NONE);
+ }
+ else {
+ invariant(youngest != TXNID_NONE);
+ // A committed entry might have been read-only, in which case it won't return anything.
+ // This snapshot reads 'live_xid' so it's youngest cannot be older than snapshot_xid.
+ invariant(youngest >= snapshot_xid);
+ }
+ }
+ }
+ }
+ {
+ // Verify referenced_xids.
+ for (i = 0; i < num_referenced_xid_tuples; i++) {
+ struct referenced_xid_tuple *tuple = referenced_xid_tuples[i];
+ invariant(tuple->begin_id < tuple->end_id);
+ invariant(tuple->references > 0);
+
+ {
+ //verify neither pair->begin_id nor end_id is in live_list
+ r = txn_manager->live_root_txns.find_zero<TXNID, find_by_xid>(tuple->begin_id, nullptr, nullptr);
+ invariant(r == DB_NOTFOUND);
+ r = txn_manager->live_root_txns.find_zero<TXNID, find_by_xid>(tuple->end_id, nullptr, nullptr);
+ invariant(r == DB_NOTFOUND);
+ }
+ {
+ //verify neither pair->begin_id nor end_id is in snapshot_xids
+ TOKUTXN curr_txn = txn_manager->snapshot_head;
+ uint32_t curr_index = 0;
+ while (curr_txn != NULL) {
+ invariant(tuple->begin_id != curr_txn->txnid.parent_id64);
+ invariant(tuple->end_id != curr_txn->txnid.parent_id64);
+ curr_txn = curr_txn->snapshot_next;
+ curr_index++;
+ }
+ }
+ {
+ // Verify number of references is correct
+ uint32_t refs_found = 0;
+ for (j = 0; j < num_snapshot_txnids; j++) {
+ TOKUTXN snapshot_txn = snapshot_txns[j];
+ if (toku_is_txn_in_live_root_txn_list(*snapshot_txn->live_root_txn_list, tuple->begin_id)) {
+ refs_found++;
+ }
+ invariant(!toku_is_txn_in_live_root_txn_list(
+ *snapshot_txn->live_root_txn_list,
+ tuple->end_id));
+ }
+ invariant(refs_found == tuple->references);
+ }
+ {
+ // Verify youngest makes sense.
+ TXNID youngest = toku_get_youngest_live_list_txnid_for(
+ tuple->begin_id,
+ snapshot_txnids_omt,
+ txn_manager->referenced_xids
+ );
+ invariant(youngest != TXNID_NONE);
+ invariant(youngest > tuple->begin_id);
+ invariant(youngest < tuple->end_id);
+ // Youngest must be found, and must be a snapshot txn
+ r = snapshot_txnids_omt.find_zero<TXNID, toku_find_xid_by_xid>(youngest, nullptr, nullptr);
+ invariant_zero(r);
+ }
+ }
+ }
+ snapshot_txnids_omt.destroy();
+ referenced_xids_omt.destroy();
+ live_root_txns_omt.destroy();
+}
+
+void toku_txn_manager_init(TXN_MANAGER *txn_managerp) {
+ TXN_MANAGER XCALLOC(txn_manager);
+ toku_mutex_init(
+ *txn_manager_lock_mutex_key, &txn_manager->txn_manager_lock, nullptr);
+ txn_manager->live_root_txns.create();
+ txn_manager->live_root_ids.create();
+ txn_manager->snapshot_head = NULL;
+ txn_manager->snapshot_tail = NULL;
+ txn_manager->num_snapshots = 0;
+ txn_manager->referenced_xids.create();
+ txn_manager->last_xid = 0;
+
+ txn_manager->last_xid_seen_for_recover = TXNID_NONE;
+ txn_manager->last_calculated_oldest_referenced_xid = TXNID_NONE;
+
+ *txn_managerp = txn_manager;
+}
+
+void toku_txn_manager_destroy(TXN_MANAGER txn_manager) {
+ toku_mutex_destroy(&txn_manager->txn_manager_lock);
+ invariant(txn_manager->live_root_txns.size() == 0);
+ txn_manager->live_root_txns.destroy();
+ invariant(txn_manager->live_root_ids.size() == 0);
+ txn_manager->live_root_ids.destroy();
+ invariant(txn_manager->snapshot_head == NULL);
+ invariant(txn_manager->referenced_xids.size() == 0);
+ txn_manager->referenced_xids.destroy();
+ toku_free(txn_manager);
+}
+
+TXNID
+toku_txn_manager_get_oldest_living_xid(TXN_MANAGER txn_manager) {
+ TOKUTXN rtxn = NULL;
+ TXNID rval = TXNID_NONE_LIVING;
+ txn_manager_lock(txn_manager);
+
+ if (txn_manager->live_root_txns.size() > 0) {
+ int r = txn_manager->live_root_txns.fetch(0, &rtxn);
+ invariant_zero(r);
+ }
+ if (rtxn) {
+ rval = rtxn->txnid.parent_id64;
+ }
+ txn_manager_unlock(txn_manager);
+ return rval;
+}
+
+TXNID toku_txn_manager_get_oldest_referenced_xid_estimate(TXN_MANAGER txn_manager) {
+ return toku_unsafe_fetch(&txn_manager->last_calculated_oldest_referenced_xid);
+}
+
+int live_root_txn_list_iter(const TOKUTXN &live_xid, const uint32_t UU(index), TXNID **const referenced_xids);
+int live_root_txn_list_iter(const TOKUTXN &live_xid, const uint32_t UU(index), TXNID **const referenced_xids){
+ (*referenced_xids)[index] = live_xid->txnid.parent_id64;
+ return 0;
+}
+
+
+// Create list of root transactions that were live when this txn began.
+static inline void
+setup_live_root_txn_list(xid_omt_t* live_root_txnid, xid_omt_t* live_root_txn_list) {
+ if (live_root_txnid->size() > 0) {
+ live_root_txn_list->clone(*live_root_txnid);
+ } else {
+ live_root_txn_list->create_no_array();
+ }
+}
+
+//Heaviside function to search through an OMT by a TXNID
+int
+find_by_xid (const TOKUTXN &txn, const TXNID &txnidfind) {
+ if (txn->txnid.parent_id64 < txnidfind) return -1;
+ if (txn->txnid.parent_id64 > txnidfind) return +1;
+ return 0;
+}
+
+static TXNID
+max_xid(TXNID a, TXNID b) {
+ return a < b ? b : a;
+}
+
+static void set_oldest_referenced_xid(TXN_MANAGER txn_manager) {
+ TXNID oldest_referenced_xid = TXNID_MAX;
+ int r;
+ if (txn_manager->live_root_ids.size() > 0) {
+ r = txn_manager->live_root_ids.fetch(0, &oldest_referenced_xid);
+ // this function should only be called when we know there is at least
+ // one live transaction
+ invariant_zero(r);
+ }
+
+ if (txn_manager->referenced_xids.size() > 0) {
+ struct referenced_xid_tuple* tuple;
+ r = txn_manager->referenced_xids.fetch(0, &tuple);
+ if (r == 0 && tuple->begin_id < oldest_referenced_xid) {
+ oldest_referenced_xid = tuple->begin_id;
+ }
+ }
+ if (txn_manager->snapshot_head != NULL) {
+ TXNID id = txn_manager->snapshot_head->snapshot_txnid64;
+ if (id < oldest_referenced_xid) {
+ oldest_referenced_xid = id;
+ }
+ }
+ if (txn_manager->last_xid < oldest_referenced_xid) {
+ oldest_referenced_xid = txn_manager->last_xid;
+ }
+ invariant(oldest_referenced_xid != TXNID_MAX);
+ toku_unsafe_set(&txn_manager->last_calculated_oldest_referenced_xid, oldest_referenced_xid);
+}
+
+//Heaviside function to find a TOKUTXN by TOKUTXN (used to find the index)
+// template-only function, but must be extern
+int find_xid (const TOKUTXN &txn, const TOKUTXN &txnfind);
+int
+find_xid (const TOKUTXN &txn, const TOKUTXN &txnfind)
+{
+ if (txn->txnid.parent_id64 < txnfind->txnid.parent_id64) return -1;
+ if (txn->txnid.parent_id64 > txnfind->txnid.parent_id64) return +1;
+ return 0;
+}
+
+static inline void txn_manager_create_snapshot_unlocked(
+ TXN_MANAGER txn_manager,
+ TOKUTXN txn
+ )
+{
+ txn->snapshot_txnid64 = ++txn_manager->last_xid;
+ // Add this txn to the global list of txns that have their own snapshots.
+ // (Note, if a txn is a child that creates its own snapshot, then that child xid
+ // is the xid stored in the global list.)
+ if (txn_manager->snapshot_head == NULL) {
+ invariant(txn_manager->snapshot_tail == NULL);
+ txn_manager->snapshot_head = txn;
+ txn_manager->snapshot_tail = txn;
+ }
+ else {
+ txn_manager->snapshot_tail->snapshot_next = txn;
+ txn->snapshot_prev = txn_manager->snapshot_tail;
+ txn_manager->snapshot_tail = txn;
+ }
+ txn_manager->num_snapshots++;
+}
+
+// template-only function, but must be extern
+int find_tuple_by_xid (const struct referenced_xid_tuple &tuple, const TXNID &xidfind);
+int
+find_tuple_by_xid (const struct referenced_xid_tuple &tuple, const TXNID &xidfind)
+{
+ if (tuple.begin_id < xidfind) return -1;
+ if (tuple.begin_id > xidfind) return +1;
+ return 0;
+}
+
+// template-only function, but must be extern
+int referenced_xids_note_snapshot_txn_end_iter(const TXNID &live_xid, const uint32_t UU(index), rx_omt_t *const referenced_xids)
+ __attribute__((nonnull(3)));
+int referenced_xids_note_snapshot_txn_end_iter(const TXNID &live_xid, const uint32_t UU(index), rx_omt_t *const referenced_xids)
+{
+ int r;
+ uint32_t idx;
+ struct referenced_xid_tuple *tuple;
+
+ r = referenced_xids->find_zero<TXNID, find_tuple_by_xid>(live_xid, &tuple, &idx);
+ if (r == DB_NOTFOUND) {
+ goto done;
+ }
+ invariant_zero(r);
+ invariant(tuple->references > 0);
+ if (--tuple->references == 0) {
+ r = referenced_xids->delete_at(idx);
+ lazy_assert_zero(r);
+ }
+done:
+ return 0;
+}
+
+// When txn ends, update reverse live list. To do that, examine each txn in this (closing) txn's live list.
+static inline int
+note_snapshot_txn_end_by_ref_xids(TXN_MANAGER mgr, const xid_omt_t &live_root_txn_list) {
+ int r;
+ r = live_root_txn_list.iterate<rx_omt_t, referenced_xids_note_snapshot_txn_end_iter>(&mgr->referenced_xids);
+ invariant_zero(r);
+ return r;
+}
+
+typedef struct snapshot_iter_extra {
+ uint32_t* indexes_to_delete;
+ uint32_t num_indexes;
+ xid_omt_t* live_root_txn_list;
+} SNAPSHOT_ITER_EXTRA;
+
+// template-only function, but must be extern
+int note_snapshot_txn_end_by_txn_live_list_iter(referenced_xid_tuple* tuple, const uint32_t index, SNAPSHOT_ITER_EXTRA *const sie)
+ __attribute__((nonnull(3)));
+int note_snapshot_txn_end_by_txn_live_list_iter(
+ referenced_xid_tuple* tuple,
+ const uint32_t index,
+ SNAPSHOT_ITER_EXTRA *const sie
+ )
+{
+ int r;
+ uint32_t idx;
+ TXNID txnid;
+ r = sie->live_root_txn_list->find_zero<TXNID, toku_find_xid_by_xid>(tuple->begin_id, &txnid, &idx);
+ if (r == DB_NOTFOUND) {
+ goto done;
+ }
+ invariant_zero(r);
+ invariant(txnid == tuple->begin_id);
+ invariant(tuple->references > 0);
+ if (--tuple->references == 0) {
+ sie->indexes_to_delete[sie->num_indexes] = index;
+ sie->num_indexes++;
+ }
+done:
+ return 0;
+}
+
+static inline int
+note_snapshot_txn_end_by_txn_live_list(TXN_MANAGER mgr, xid_omt_t* live_root_txn_list) {
+ uint32_t size = mgr->referenced_xids.size();
+ uint32_t indexes_to_delete[size];
+ SNAPSHOT_ITER_EXTRA sie = { .indexes_to_delete = indexes_to_delete, .num_indexes = 0, .live_root_txn_list = live_root_txn_list};
+ mgr->referenced_xids.iterate_ptr<SNAPSHOT_ITER_EXTRA, note_snapshot_txn_end_by_txn_live_list_iter>(&sie);
+ for (uint32_t i = 0; i < sie.num_indexes; i++) {
+ uint32_t curr_index = sie.indexes_to_delete[sie.num_indexes-i-1];
+ mgr->referenced_xids.delete_at(curr_index);
+ }
+ return 0;
+}
+
+static inline void txn_manager_remove_snapshot_unlocked(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager
+ )
+{
+ // Remove from linked list of snapshot txns
+ if (txn_manager->snapshot_head == txn) {
+ txn_manager->snapshot_head = txn->snapshot_next;
+ }
+ if (txn_manager->snapshot_tail == txn) {
+ txn_manager->snapshot_tail = txn->snapshot_prev;
+ }
+ if (txn->snapshot_next) {
+ txn->snapshot_next->snapshot_prev = txn->snapshot_prev;
+ }
+ if (txn->snapshot_prev) {
+ txn->snapshot_prev->snapshot_next = txn->snapshot_next;
+ }
+ txn_manager->num_snapshots--;
+ uint32_t ref_xids_size = txn_manager->referenced_xids.size();
+ uint32_t live_list_size = txn->live_root_txn_list->size();
+ if (ref_xids_size > 0 && live_list_size > 0) {
+ if (live_list_size > ref_xids_size && ref_xids_size < 2000) {
+ note_snapshot_txn_end_by_txn_live_list(txn_manager, txn->live_root_txn_list);
+ }
+ else {
+ note_snapshot_txn_end_by_ref_xids(txn_manager, *txn->live_root_txn_list);
+ }
+ }
+}
+
+static inline void inherit_snapshot_from_parent(TOKUTXN child) {
+ if (child->parent) {
+ child->snapshot_txnid64 = child->parent->snapshot_txnid64;
+ child->live_root_txn_list = child->parent->live_root_txn_list;
+ }
+}
+void toku_txn_manager_handle_snapshot_create_for_child_txn(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXN_SNAPSHOT_TYPE snapshot_type
+ )
+{
+ // this is a function for child txns, so just doint a sanity check
+ invariant(txn->parent != NULL);
+ bool copies_snapshot = txn_copies_snapshot(snapshot_type, txn->parent);
+ bool records_snapshot = txn_records_snapshot(snapshot_type, txn->parent);
+ // assert that if records_snapshot is true, then copies_snapshot is true
+ invariant(!records_snapshot || copies_snapshot);
+ if (records_snapshot) {
+ invariant(txn->live_root_txn_list == nullptr);
+ XMALLOC(txn->live_root_txn_list);
+ txn_manager_lock(txn_manager);
+ txn_manager_create_snapshot_unlocked(txn_manager, txn);
+ }
+ else {
+ inherit_snapshot_from_parent(txn);
+ }
+
+ toku_debug_txn_sync(pthread_self());
+
+ if (copies_snapshot) {
+ if(!records_snapshot)
+ txn_manager_lock(txn_manager);
+ setup_live_root_txn_list(&txn_manager->live_root_ids, txn->live_root_txn_list);
+ txn_manager_unlock(txn_manager);
+ }
+}
+
+void toku_txn_manager_handle_snapshot_destroy_for_child_txn(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXN_SNAPSHOT_TYPE snapshot_type
+ )
+{
+ // this is a function for child txns, so just doint a sanity check
+ invariant(txn->parent != NULL);
+ bool copies_snapshot = txn_copies_snapshot(snapshot_type, txn->parent);
+ bool records_snapshot = txn_records_snapshot(snapshot_type, txn->parent);
+ if (records_snapshot) {
+ txn_manager_lock(txn_manager);
+ txn_manager_remove_snapshot_unlocked(txn, txn_manager);
+ txn_manager_unlock(txn_manager);
+ }
+ if (copies_snapshot) {
+ invariant(txn->live_root_txn_list != nullptr);
+ txn->live_root_txn_list->destroy();
+ toku_free(txn->live_root_txn_list);
+ }
+}
+
+void toku_txn_manager_start_txn_for_recovery(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXNID xid
+ )
+{
+ txn_manager_lock(txn_manager);
+ // using xid that is passed in
+ txn_manager->last_xid = max_xid(txn_manager->last_xid, xid);
+ toku_txn_update_xids_in_txn(txn, xid);
+
+ uint32_t idx;
+ int r = txn_manager->live_root_txns.find_zero<TOKUTXN, find_xid>(txn, nullptr, &idx);
+ invariant(r == DB_NOTFOUND);
+ r = txn_manager->live_root_txns.insert_at(txn, idx);
+ invariant_zero(r);
+ r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx);
+ invariant_zero(r);
+
+ txn_manager_unlock(txn_manager);
+}
+
+void toku_txn_manager_start_txn(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ bool read_only
+ )
+{
+ int r;
+ TXNID xid = TXNID_NONE;
+ // if we are running in recovery, we don't need to make snapshots
+ bool copies_snapshot = txn_copies_snapshot(snapshot_type, NULL);
+ bool records_snapshot = txn_records_snapshot(snapshot_type, NULL);
+ // assert that if records_snapshot is true, then copies_snapshot is true
+ invariant(!records_snapshot || copies_snapshot);
+
+ // perform a malloc outside of the txn_manager lock
+ // will be used in txn_manager_create_snapshot_unlocked below
+ if (copies_snapshot) {
+ invariant(txn->live_root_txn_list == nullptr);
+ XMALLOC(txn->live_root_txn_list);
+ }
+ // the act of getting a transaction ID and adding the
+ // txn to the proper OMTs must be atomic. MVCC depends
+ // on this.
+ txn_manager_lock(txn_manager);
+ if (garbage_collection_debug) {
+ verify_snapshot_system(txn_manager);
+ }
+
+ //
+ // maintain the data structures necessary for MVCC:
+ // 1. add txn to list of live_root_txns if this is a root transaction
+ // 2. if the transaction is creating a snapshot:
+ // - create a live list for the transaction
+ // - add the id to the list of snapshot ids
+ //
+ // The order of operations is important here, and must be taken
+ // into account when the transaction is closed. The txn is added
+ // to the live_root_txns first (if it is a root txn). This has the implication
+ // that a root level snapshot transaction is in its own live list. This fact
+ // is taken into account when the transaction is closed.
+
+ // add ancestor information, and maintain global live root txn list
+ xid = ++txn_manager->last_xid; // we always need an ID, needed for lock tree
+ toku_txn_update_xids_in_txn(txn, xid);
+ if (!read_only) {
+ uint32_t idx = txn_manager->live_root_txns.size();
+ r = txn_manager->live_root_txns.insert_at(txn, idx);
+ invariant_zero(r);
+ r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx);
+ invariant_zero(r);
+ }
+ set_oldest_referenced_xid(txn_manager);
+
+ if (records_snapshot) {
+ txn_manager_create_snapshot_unlocked(
+ txn_manager,
+ txn
+ );
+ }
+ if (copies_snapshot) {
+ setup_live_root_txn_list(&txn_manager->live_root_ids, txn->live_root_txn_list);
+ }
+
+ if (garbage_collection_debug) {
+ verify_snapshot_system(txn_manager);
+ }
+ txn_manager_unlock(txn_manager);
+ return;
+}
+
+TXNID
+toku_get_youngest_live_list_txnid_for(TXNID xc, const xid_omt_t &snapshot_txnids, const rx_omt_t &referenced_xids) {
+ struct referenced_xid_tuple *tuple;
+ int r;
+ TXNID rval = TXNID_NONE;
+
+ r = referenced_xids.find_zero<TXNID, find_tuple_by_xid>(xc, &tuple, nullptr);
+ if (r == DB_NOTFOUND) {
+ goto done;
+ }
+ TXNID live;
+
+ r = snapshot_txnids.find<TXNID, toku_find_xid_by_xid>(tuple->end_id, -1, &live, nullptr);
+ if (r == DB_NOTFOUND) {
+ goto done;
+ }
+ invariant(live < tuple->end_id);
+ if (live > tuple->begin_id) {
+ rval = live;
+ }
+done:
+ return rval;
+}
+
+void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
+ int r;
+ invariant(txn->parent == NULL);
+ bool records_snapshot = txn_records_snapshot(txn->snapshot_type, NULL);
+ txn_manager_lock(txn_manager);
+
+ if (garbage_collection_debug) {
+ verify_snapshot_system(txn_manager);
+ }
+
+ if (records_snapshot) {
+ txn_manager_remove_snapshot_unlocked(
+ txn,
+ txn_manager
+ );
+ }
+
+ if (!txn_declared_read_only(txn)) {
+ uint32_t idx;
+ //Remove txn from list of live root txns
+ TOKUTXN txnagain;
+ r = txn_manager->live_root_txns.find_zero<TOKUTXN, find_xid>(txn, &txnagain, &idx);
+ invariant_zero(r);
+ invariant(txn==txnagain);
+
+ r = txn_manager->live_root_txns.delete_at(idx);
+ invariant_zero(r);
+ r = txn_manager->live_root_ids.delete_at(idx);
+ invariant_zero(r);
+
+ if (!toku_txn_is_read_only(txn) || garbage_collection_debug) {
+ uint32_t num_references = 0;
+ TOKUTXN curr_txn = txn_manager->snapshot_tail;
+ while(curr_txn != NULL) {
+ if (curr_txn->snapshot_txnid64 > txn->txnid.parent_id64) {
+ num_references++;
+ }
+ else {
+ break;
+ }
+ curr_txn = curr_txn->snapshot_prev;
+ }
+
+ if (num_references > 0) {
+ // This transaction exists in a live list of another transaction.
+ struct referenced_xid_tuple tuple = {
+ .begin_id = txn->txnid.parent_id64,
+ .end_id = ++txn_manager->last_xid,
+ .references = num_references
+ };
+ r = txn_manager->referenced_xids.insert<TXNID, find_tuple_by_xid>(tuple, txn->txnid.parent_id64, nullptr);
+ lazy_assert_zero(r);
+ }
+ }
+ }
+
+ if (garbage_collection_debug) {
+ verify_snapshot_system(txn_manager);
+ }
+ txn_manager_unlock(txn_manager);
+
+ //Cleanup that does not require the txn_manager lock
+ if (txn->live_root_txn_list) {
+ txn->live_root_txn_list->destroy();
+ toku_free(txn->live_root_txn_list);
+ }
+ return;
+}
+
+static void toku_txn_manager_clone_state_for_gc_unlocked(
+ TXN_MANAGER txn_manager,
+ xid_omt_t* snapshot_xids,
+ rx_omt_t* referenced_xids,
+ xid_omt_t* live_root_txns
+ )
+{
+ TXNID* snapshot_xids_array = NULL;
+ XMALLOC_N(txn_manager->num_snapshots, snapshot_xids_array);
+ TOKUTXN curr_txn = txn_manager->snapshot_head;
+ uint32_t curr_index = 0;
+ while (curr_txn != NULL) {
+ snapshot_xids_array[curr_index] = curr_txn->snapshot_txnid64;
+ curr_txn = curr_txn->snapshot_next;
+ curr_index++;
+ }
+ snapshot_xids->create_steal_sorted_array(
+ &snapshot_xids_array,
+ txn_manager->num_snapshots,
+ txn_manager->num_snapshots
+ );
+
+ referenced_xids->clone(txn_manager->referenced_xids);
+ setup_live_root_txn_list(&txn_manager->live_root_ids, live_root_txns);
+}
+
+void toku_txn_manager_clone_state_for_gc(
+ TXN_MANAGER txn_manager,
+ xid_omt_t* snapshot_xids,
+ rx_omt_t* referenced_xids,
+ xid_omt_t* live_root_txns
+ )
+{
+ txn_manager_lock(txn_manager);
+ toku_txn_manager_clone_state_for_gc_unlocked(
+ txn_manager,
+ snapshot_xids,
+ referenced_xids,
+ live_root_txns
+ );
+ txn_manager_unlock(txn_manager);
+}
+
+void txn_manager_state::init() {
+ invariant(!initialized);
+ invariant_notnull(txn_manager);
+ toku_txn_manager_clone_state_for_gc(
+ txn_manager,
+ &snapshot_xids,
+ &referenced_xids,
+ &live_root_txns
+ );
+ initialized = true;
+}
+
+void toku_txn_manager_id2txn_unlocked(TXN_MANAGER txn_manager, TXNID_PAIR txnid, TOKUTXN *result) {
+ TOKUTXN txn;
+ int r = txn_manager->live_root_txns.find_zero<TXNID, find_by_xid>(txnid.parent_id64, &txn, nullptr);
+ if (r==0) {
+ assert(txn->txnid.parent_id64 == txnid.parent_id64);
+ *result = txn;
+ }
+ else {
+ assert(r==DB_NOTFOUND);
+ // If there is no txn, then we treat it as the null txn.
+ *result = NULL;
+ }
+}
+
+int toku_txn_manager_get_root_txn_from_xid (TXN_MANAGER txn_manager, TOKU_XA_XID *xid, DB_TXN **txnp) {
+ txn_manager_lock(txn_manager);
+ int ret_val = 0;
+ int num_live_txns = txn_manager->live_root_txns.size();
+ for (int i = 0; i < num_live_txns; i++) {
+ TOKUTXN txn;
+ {
+ int r = txn_manager->live_root_txns.fetch(i, &txn);
+ assert_zero(r);
+ }
+ if (txn->xa_xid.formatID == xid->formatID
+ && txn->xa_xid.gtrid_length == xid->gtrid_length
+ && txn->xa_xid.bqual_length == xid->bqual_length
+ && 0==memcmp(txn->xa_xid.data, xid->data, xid->gtrid_length + xid->bqual_length)) {
+ *txnp = txn->container_db_txn;
+ ret_val = 0;
+ goto exit;
+ }
+ }
+ ret_val = DB_NOTFOUND;
+exit:
+ txn_manager_unlock(txn_manager);
+ return ret_val;
+}
+
+uint32_t toku_txn_manager_num_live_root_txns(TXN_MANAGER txn_manager) {
+ int ret_val = 0;
+ txn_manager_lock(txn_manager);
+ ret_val = txn_manager->live_root_txns.size();
+ txn_manager_unlock(txn_manager);
+ return ret_val;
+}
+
+static int txn_manager_iter(
+ TXN_MANAGER txn_manager,
+ txn_mgr_iter_callback cb,
+ void* extra,
+ bool just_root_txns
+ )
+{
+ int r = 0;
+ toku_mutex_lock(&txn_manager->txn_manager_lock);
+ uint32_t size = txn_manager->live_root_txns.size();
+ for (uint32_t i = 0; i < size; i++) {
+ TOKUTXN curr_txn = NULL;
+ r = txn_manager->live_root_txns.fetch(i, &curr_txn);
+ assert_zero(r);
+ if (just_root_txns) {
+ r = cb(curr_txn, extra);
+ }
+ else {
+ r = curr_txn->child_manager->iterate(cb, extra);
+ }
+ if (r) {
+ break;
+ }
+ }
+ toku_mutex_unlock(&txn_manager->txn_manager_lock);
+ return r;
+}
+
+int toku_txn_manager_iter_over_live_txns(
+ TXN_MANAGER txn_manager,
+ txn_mgr_iter_callback cb,
+ void* extra
+ )
+{
+ return txn_manager_iter(
+ txn_manager,
+ cb,
+ extra,
+ false
+ );
+}
+
+int toku_txn_manager_iter_over_live_root_txns(
+ TXN_MANAGER txn_manager,
+ txn_mgr_iter_callback cb,
+ void* extra
+ )
+{
+ return txn_manager_iter(
+ txn_manager,
+ cb,
+ extra,
+ true
+ );
+}
+
+
+//
+// This function is called only via env_txn_xa_recover and env_txn_recover.
+// See comments for those functions to understand assumptions that
+// can be made when calling this function. Namely, that the system is
+// quiescant, in that we are right after recovery and before user operations
+// commence.
+//
+// Another key assumption made here is that only root transactions
+// may be prepared and that child transactions cannot be prepared.
+// This assumption is made by the fact that we iterate over the live root txns
+// to find prepared transactions.
+//
+// I (Zardosht), don't think we take advantage of this fact, as we are holding
+// the txn_manager_lock in this function, but in the future we might want
+// to take these assumptions into account.
+//
+int toku_txn_manager_recover_root_txn (
+ TXN_MANAGER txn_manager,
+ struct tokulogger_preplist preplist[/*count*/],
+ long count,
+ long *retp, /*out*/
+ uint32_t flags
+ )
+{
+ int ret_val = 0;
+ txn_manager_lock(txn_manager);
+ uint32_t num_txns_returned = 0;
+ // scan through live root txns to find
+ // prepared transactions and return them
+ uint32_t size = txn_manager->live_root_txns.size();
+ if (flags==DB_FIRST) {
+ txn_manager->last_xid_seen_for_recover = TXNID_NONE;
+ }
+ else if (flags!=DB_NEXT) {
+ ret_val = EINVAL;
+ goto exit;
+ }
+ for (uint32_t i = 0; i < size; i++) {
+ TOKUTXN curr_txn = NULL;
+ txn_manager->live_root_txns.fetch(i, &curr_txn);
+ // skip over TOKUTXNs whose txnid64 is too small, meaning
+ // we have already processed them.
+ if (curr_txn->txnid.parent_id64 <= txn_manager->last_xid_seen_for_recover) {
+ continue;
+ }
+ if (curr_txn->state == TOKUTXN_PREPARING) {
+ assert(curr_txn->container_db_txn);
+ preplist[num_txns_returned].txn = curr_txn->container_db_txn;
+ preplist[num_txns_returned].xid = curr_txn->xa_xid;
+ txn_manager->last_xid_seen_for_recover = curr_txn->txnid.parent_id64;
+ num_txns_returned++;
+ }
+ txn_manager->last_xid_seen_for_recover = curr_txn->txnid.parent_id64;
+ // if we found the maximum number of prepared transactions we are
+ // allowed to find, then break
+ if ((long) num_txns_returned >= count) {
+ break;
+ }
+ }
+ invariant((long) num_txns_returned <= count);
+ *retp = num_txns_returned;
+ ret_val = 0;
+exit:
+ txn_manager_unlock(txn_manager);
+ return ret_val;
+}
+
+static void txn_manager_lock(TXN_MANAGER txn_manager) {
+ toku_mutex_lock(&txn_manager->txn_manager_lock);
+}
+
+static void txn_manager_unlock(TXN_MANAGER txn_manager) {
+ toku_mutex_unlock(&txn_manager->txn_manager_lock);
+}
+
+void toku_txn_manager_suspend(TXN_MANAGER txn_manager) {
+ txn_manager_lock(txn_manager);
+}
+
+void toku_txn_manager_resume(TXN_MANAGER txn_manager) {
+ txn_manager_unlock(txn_manager);
+}
+
+void
+toku_txn_manager_set_last_xid_from_logger(TXN_MANAGER txn_manager, TXNID last_xid) {
+ invariant(txn_manager->last_xid == TXNID_NONE);
+ txn_manager->last_xid = last_xid;
+}
+
+void
+toku_txn_manager_set_last_xid_from_recovered_checkpoint(TXN_MANAGER txn_manager, TXNID last_xid) {
+ txn_manager->last_xid = last_xid;
+}
+
+TXNID
+toku_txn_manager_get_last_xid(TXN_MANAGER mgr) {
+ txn_manager_lock(mgr);
+ TXNID last_xid = mgr->last_xid;
+ txn_manager_unlock(mgr);
+ return last_xid;
+}
+
+bool
+toku_txn_manager_txns_exist(TXN_MANAGER mgr) {
+ txn_manager_lock(mgr);
+ bool retval = mgr->live_root_txns.size() > 0;
+ txn_manager_unlock(mgr);
+ return retval;
+}
+
+
+// Test-only function
+void
+toku_txn_manager_increase_last_xid(TXN_MANAGER mgr, uint64_t increment) {
+ txn_manager_lock(mgr);
+ mgr->last_xid += increment;
+ txn_manager_unlock(mgr);
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
new file mode 100644
index 00000000..25fa6032
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
@@ -0,0 +1,223 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "portability/toku_portability.h"
+#include "portability/toku_pthread.h"
+
+#include "ft/txn/txn.h"
+
+void set_test_txn_sync_callback(void (*) (pthread_t, void*), void*);
+#define toku_test_txn_sync_callback(a) ((test_txn_sync_callback)? test_txn_sync_callback( a,test_txn_sync_callback_extra) : (void) 0)
+
+#if defined(TOKU_DEBUG_TXN_SYNC)
+#define toku_debug_txn_sync(a) toku_test_txn_sync_callback(a)
+#else
+#define toku_debug_txn_sync(a) ((void) 0)
+#endif // defined(TOKU_DEBUG_TXN_SYNC)
+
+typedef struct txn_manager *TXN_MANAGER;
+
+struct referenced_xid_tuple {
+ TXNID begin_id;
+ TXNID end_id;
+ uint32_t references;
+};
+
+struct txn_manager {
+ toku_mutex_t txn_manager_lock; // a lock protecting this object
+ txn_omt_t live_root_txns; // a sorted tree.
+ xid_omt_t live_root_ids; //contains TXNID x | x is snapshot txn
+ TOKUTXN snapshot_head;
+ TOKUTXN snapshot_tail;
+ uint32_t num_snapshots;
+ // Contains 3-tuples: (TXNID begin_id, TXNID end_id, uint64_t num_live_list_references)
+ // for committed root transaction ids that are still referenced by a live list.
+ rx_omt_t referenced_xids;
+
+ TXNID last_xid;
+ TXNID last_xid_seen_for_recover;
+ TXNID last_calculated_oldest_referenced_xid;
+};
+typedef struct txn_manager *TXN_MANAGER;
+
+struct txn_manager_state {
+ txn_manager_state(TXN_MANAGER mgr) :
+ txn_manager(mgr),
+ initialized(false) {
+ snapshot_xids.create_no_array();
+ referenced_xids.create_no_array();
+ live_root_txns.create_no_array();
+ }
+
+ // should not copy construct
+ txn_manager_state &operator=(txn_manager_state &rhs) = delete;
+ txn_manager_state(txn_manager_state &rhs) = delete;
+
+ ~txn_manager_state() {
+ snapshot_xids.destroy();
+ referenced_xids.destroy();
+ live_root_txns.destroy();
+ }
+
+ void init();
+
+ TXN_MANAGER txn_manager;
+ bool initialized;
+
+ // a snapshot of the txn manager's mvcc state
+ // only valid if initialized = true
+ xid_omt_t snapshot_xids;
+ rx_omt_t referenced_xids;
+ xid_omt_t live_root_txns;
+};
+
+// represents all of the information needed to run garbage collection
+struct txn_gc_info {
+ txn_gc_info(txn_manager_state *st, TXNID xid_sgc, TXNID xid_ip, bool mvcc)
+ : txn_state_for_gc(st),
+ oldest_referenced_xid_for_simple_gc(xid_sgc),
+ oldest_referenced_xid_for_implicit_promotion(xid_ip),
+ mvcc_needed(mvcc) {
+ }
+
+ // a snapshot of the transcation system. may be null.
+ txn_manager_state *txn_state_for_gc;
+
+ // the oldest xid in any live list
+ //
+ // suitible for simple garbage collection that cleans up multiple committed
+ // transaction records into one. not suitible for implicit promotions, which
+ // must be correct in the face of abort messages - see ftnode->oldest_referenced_xid
+ TXNID oldest_referenced_xid_for_simple_gc;
+
+ // lower bound on the oldest xid in any live when the messages to be cleaned
+ // had no messages above them. suitable for implicitly promoting a provisonal uxr.
+ TXNID oldest_referenced_xid_for_implicit_promotion;
+
+ // whether or not mvcc is actually needed - false during recovery and non-transactional systems
+ const bool mvcc_needed;
+};
+
+void toku_txn_manager_init(TXN_MANAGER* txn_manager);
+void toku_txn_manager_destroy(TXN_MANAGER txn_manager);
+
+TXNID toku_txn_manager_get_oldest_living_xid(TXN_MANAGER txn_manager);
+
+TXNID toku_txn_manager_get_oldest_referenced_xid_estimate(TXN_MANAGER txn_manager);
+
+void toku_txn_manager_handle_snapshot_create_for_child_txn(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXN_SNAPSHOT_TYPE snapshot_type
+ );
+void toku_txn_manager_handle_snapshot_destroy_for_child_txn(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXN_SNAPSHOT_TYPE snapshot_type
+ );
+
+
+// Assign a txnid. Log the txn begin in the recovery log. Initialize the txn live lists.
+void toku_txn_manager_start_txn(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXN_SNAPSHOT_TYPE snapshot_type,
+ bool read_only
+ );
+
+void toku_txn_manager_start_txn_for_recovery(
+ TOKUTXN txn,
+ TXN_MANAGER txn_manager,
+ TXNID xid
+ );
+
+void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn);
+
+void toku_txn_manager_clone_state_for_gc(
+ TXN_MANAGER txn_manager,
+ xid_omt_t* snapshot_xids,
+ rx_omt_t* referenced_xids,
+ xid_omt_t* live_root_txns
+ );
+
+void toku_txn_manager_id2txn_unlocked(TXN_MANAGER txn_manager, TXNID_PAIR txnid, TOKUTXN *result);
+
+// Returns a root txn associated with xid. The system as a whole
+// assumes that only root txns get prepared, adn therefore only
+// root txns will have XIDs associated with them.
+int toku_txn_manager_get_root_txn_from_xid (TXN_MANAGER txn_manager, TOKU_XA_XID *xid, DB_TXN **txnp);
+
+uint32_t toku_txn_manager_num_live_root_txns(TXN_MANAGER txn_manager);
+
+typedef int (*txn_mgr_iter_callback)(TOKUTXN txn, void* extra);
+
+int toku_txn_manager_iter_over_live_txns(
+ TXN_MANAGER txn_manager,
+ txn_mgr_iter_callback cb,
+ void* extra
+ );
+
+int toku_txn_manager_iter_over_live_root_txns(
+ TXN_MANAGER txn_manager,
+ txn_mgr_iter_callback cb,
+ void* extra
+ );
+
+int toku_txn_manager_recover_root_txn(
+ TXN_MANAGER txn_manager,
+ struct tokulogger_preplist preplist[/*count*/],
+ long count,
+ long *retp, /*out*/
+ uint32_t flags
+ );
+
+void toku_txn_manager_suspend(TXN_MANAGER txn_manager);
+void toku_txn_manager_resume(TXN_MANAGER txn_manager);
+
+void toku_txn_manager_set_last_xid_from_logger(TXN_MANAGER txn_manager, TXNID last_xid);
+void toku_txn_manager_set_last_xid_from_recovered_checkpoint(TXN_MANAGER txn_manager, TXNID last_xid);
+TXNID toku_txn_manager_get_last_xid(TXN_MANAGER mgr);
+
+bool toku_txn_manager_txns_exist(TXN_MANAGER mgr);
+
+// Test-only function
+void toku_txn_manager_increase_last_xid(TXN_MANAGER mgr, uint64_t increment);
+
+TXNID toku_get_youngest_live_list_txnid_for(TXNID xc, const xid_omt_t &snapshot_txnids, const rx_omt_t &referenced_xids);
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_state.h b/storage/tokudb/PerconaFT/ft/txn/txn_state.h
new file mode 100644
index 00000000..3301cc68
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_state.h
@@ -0,0 +1,50 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// this is a separate file so that the hotindexing tests can see the txn states
+
+enum tokutxn_state {
+ TOKUTXN_LIVE, // initial txn state
+ TOKUTXN_PREPARING, // txn is preparing (or prepared)
+ TOKUTXN_COMMITTING, // txn in the process of committing
+ TOKUTXN_ABORTING, // txn in the process of aborting
+ TOKUTXN_RETIRED, // txn no longer exists
+};
+typedef enum tokutxn_state TOKUTXN_STATE;
diff --git a/storage/tokudb/PerconaFT/ft/txn/xids.cc b/storage/tokudb/PerconaFT/ft/txn/xids.cc
new file mode 100644
index 00000000..59bf3c9b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/xids.cc
@@ -0,0 +1,247 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this file is to implement xids list of nested transactions
+ * ids.
+ *
+ * See design documentation for nested transactions at
+ * TokuWiki/Imp/TransactionsOverview.
+ *
+ * NOTE: xids are always stored in disk byte order.
+ * Accessors are responsible for transposing bytes to
+ * host order.
+ */
+
+#include <errno.h>
+#include <string.h>
+
+#include "portability/memory.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_htod.h"
+#include "portability/toku_portability.h"
+
+#include "ft/txn/xids.h"
+
+/////////////////////////////////////////////////////////////////////////////////
+// This layer of abstraction (xids_xxx) understands xids<> and nothing else.
+// It contains all the functions that understand xids<>
+//
+// xids<> do not store the implicit transaction id of 0 at index 0.
+// The accessor functions make the id of 0 explicit at index 0.
+// The number of xids physically stored in the xids array is in
+// the variable num_xids.
+//
+// The xids struct is immutable. The caller gets an initial version of XIDS
+// by calling toku_xids_get_root_xids(), which returns the constant struct
+// representing the root transaction (id 0). When a transaction begins,
+// a new XIDS is created with the id of the current transaction appended to
+// the list.
+//
+//
+
+// This is the xids list for a transactionless environment.
+// It is also the initial state of any xids list created for
+// nested transactions.
+
+XIDS
+toku_xids_get_root_xids(void) {
+ static const struct XIDS_S root_xids = {
+ .num_xids = 0
+ };
+
+ XIDS rval = (XIDS)&root_xids;
+ return rval;
+}
+
+bool
+toku_xids_can_create_child(XIDS xids) {
+ invariant(xids->num_xids < MAX_TRANSACTION_RECORDS);
+ return (xids->num_xids + 1) != MAX_TRANSACTION_RECORDS;
+}
+
+int
+toku_xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) {
+ // Postcondition:
+ // xids_p points to an xids that is an exact copy of parent_xids, but with room for one more xid.
+ int rval;
+ invariant(parent_xids);
+ uint32_t num_child_xids = parent_xids->num_xids + 1;
+ // assumes that caller has verified that num_child_xids will
+ // be less than MAX_TRANSACTIN_RECORDS
+ invariant(num_child_xids < MAX_TRANSACTION_RECORDS);
+ size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]);
+ XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size));
+ // Clone everything (parent does not have the newest xid).
+ memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0]));
+ *xids_p = xids;
+ rval = 0;
+ return rval;
+}
+
+void
+toku_xids_finalize_with_child(XIDS xids, TXNID this_xid) {
+ // Precondition:
+ // - xids was created by toku_xids_create_unknown_child
+ TXNID this_xid_disk = toku_htod64(this_xid);
+ uint32_t num_child_xids = ++xids->num_xids;
+ xids->ids[num_child_xids - 1] = this_xid_disk;
+}
+
+// xids is immutable. This function creates a new xids by copying the
+// parent's list and then appending the xid of the new transaction.
+int
+toku_xids_create_child(XIDS parent_xids, // xids list for parent transaction
+ XIDS *xids_p, // xids list created
+ TXNID this_xid) { // xid of this transaction (new innermost)
+ bool can_create_child = toku_xids_can_create_child(parent_xids);
+ if (!can_create_child) {
+ return EINVAL;
+ }
+ toku_xids_create_unknown_child(parent_xids, xids_p);
+ toku_xids_finalize_with_child(*xids_p, this_xid);
+ return 0;
+}
+
+void
+toku_xids_create_from_buffer(struct rbuf *rb, // xids list for parent transaction
+ XIDS *xids_p) { // xids list created
+ uint8_t num_xids = rbuf_char(rb);
+ invariant(num_xids < MAX_TRANSACTION_RECORDS);
+ XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(sizeof(*xids) + num_xids*sizeof(xids->ids[0])));
+ xids->num_xids = num_xids;
+ uint8_t index;
+ for (index = 0; index < xids->num_xids; index++) {
+ rbuf_TXNID(rb, &xids->ids[index]);
+ }
+ *xids_p = xids;
+}
+
+void
+toku_xids_destroy(XIDS *xids_p) {
+ if (*xids_p != toku_xids_get_root_xids()) toku_free(*xids_p);
+ *xids_p = NULL;
+}
+
+// Return xid at requested position.
+// If requesting an xid out of range (which will be the case if xids array is empty)
+// then return 0, the xid of the root transaction.
+TXNID
+toku_xids_get_xid(XIDS xids, uint8_t index) {
+ invariant(index < toku_xids_get_num_xids(xids));
+ TXNID rval = xids->ids[index];
+ rval = toku_dtoh64(rval);
+ return rval;
+}
+
+uint8_t
+toku_xids_get_num_xids(XIDS xids) {
+ uint8_t rval = xids->num_xids;
+ return rval;
+}
+
+// Return innermost xid
+TXNID
+toku_xids_get_innermost_xid(XIDS xids) {
+ TXNID rval = TXNID_NONE;
+ if (toku_xids_get_num_xids(xids)) {
+ // if clause above makes this cast ok
+ uint8_t innermost_xid = (uint8_t) (toku_xids_get_num_xids(xids) - 1);
+ rval = toku_xids_get_xid(xids, innermost_xid);
+ }
+ return rval;
+}
+
+TXNID
+toku_xids_get_outermost_xid(XIDS xids) {
+ TXNID rval = TXNID_NONE;
+ if (toku_xids_get_num_xids(xids)) {
+ rval = toku_xids_get_xid(xids, 0);
+ }
+ return rval;
+}
+
+void
+toku_xids_cpy(XIDS target, XIDS source) {
+ size_t size = toku_xids_get_size(source);
+ memcpy(target, source, size);
+}
+
+// return size in bytes
+uint32_t
+toku_xids_get_size(XIDS xids) {
+ uint32_t rval;
+ uint8_t num_xids = xids->num_xids;
+ rval = sizeof(*xids) + num_xids * sizeof(xids->ids[0]);
+ return rval;
+}
+
+uint32_t
+toku_xids_get_serialize_size(XIDS xids) {
+ uint32_t rval;
+ uint8_t num_xids = xids->num_xids;
+ rval = 1 + //num xids
+ 8 * num_xids;
+ return rval;
+}
+
+unsigned char *
+toku_xids_get_end_of_array(XIDS xids) {
+ TXNID *r = xids->ids + xids->num_xids;
+ return (unsigned char*)r;
+}
+
+void wbuf_nocrc_xids(struct wbuf *wb, XIDS xids) {
+ wbuf_nocrc_char(wb, (unsigned char)xids->num_xids);
+ uint8_t index;
+ for (index = 0; index < xids->num_xids; index++) {
+ wbuf_nocrc_TXNID(wb, xids->ids[index]);
+ }
+}
+
+void
+toku_xids_fprintf(FILE *fp, XIDS xids) {
+ uint8_t index;
+ unsigned num_xids = toku_xids_get_num_xids(xids);
+ fprintf(fp, "[|%u| ", num_xids);
+ for (index = 0; index < toku_xids_get_num_xids(xids); index++) {
+ if (index) fprintf(fp, ",");
+ fprintf(fp, "%" PRIx64, toku_xids_get_xid(xids, index));
+ }
+ fprintf(fp, "]");
+}
+
diff --git a/storage/tokudb/PerconaFT/ft/txn/xids.h b/storage/tokudb/PerconaFT/ft/txn/xids.h
new file mode 100644
index 00000000..83ad5e57
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/txn/xids.h
@@ -0,0 +1,116 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this file is to provide the world with everything necessary
+ * to use the xids and nothing else.
+ * Internal requirements of the xids logic do not belong here.
+ *
+ * xids is (abstractly) an immutable list of nested transaction ids, accessed only
+ * via the functions in this file.
+ *
+ * See design documentation for nested transactions at
+ * TokuWiki/Imp/TransactionsOverview.
+ */
+
+#pragma once
+
+#include "ft/txn/txn.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/serialize/wbuf.h"
+
+/* The number of transaction ids stored in the xids structure is
+ * represented by an 8-bit value. The value 255 is reserved.
+ * The constant MAX_NESTED_TRANSACTIONS is one less because
+ * one slot in the packed leaf entry is used for the implicit
+ * root transaction (id 0).
+ */
+enum {
+ MAX_NESTED_TRANSACTIONS = 253,
+ MAX_TRANSACTION_RECORDS = MAX_NESTED_TRANSACTIONS + 1
+};
+
+// Variable size list of transaction ids (known in design doc as xids<>).
+// ids[0] is the outermost transaction.
+// ids[num_xids - 1] is the innermost transaction.
+// Should only be accessed by accessor functions toku_xids_xxx, not directly.
+
+// If the xids struct is unpacked, the compiler aligns the ids[] and we waste a lot of space
+struct __attribute__((__packed__)) XIDS_S {
+ // maximum value of MAX_TRANSACTION_RECORDS - 1 because transaction 0 is implicit
+ uint8_t num_xids;
+ TXNID ids[];
+};
+typedef struct XIDS_S *XIDS;
+
+// Retrieve an XIDS representing the root transaction.
+XIDS toku_xids_get_root_xids(void);
+
+bool toku_xids_can_create_child(XIDS xids);
+
+void toku_xids_cpy(XIDS target, XIDS source);
+
+//Creates an XIDS representing this transaction.
+//You must pass in an XIDS representing the parent of this transaction.
+int toku_xids_create_child(XIDS parent_xids, XIDS *xids_p, TXNID this_xid);
+
+// The following two functions (in order) are equivalent to toku_xids_create child,
+// but allow you to do most of the work without knowing the new xid.
+int toku_xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p);
+void toku_xids_finalize_with_child(XIDS xids, TXNID this_xid);
+
+void toku_xids_create_from_buffer(struct rbuf *rb, XIDS *xids_p);
+
+void toku_xids_destroy(XIDS *xids_p);
+
+TXNID toku_xids_get_xid(XIDS xids, uint8_t index);
+
+uint8_t toku_xids_get_num_xids(XIDS xids);
+
+TXNID toku_xids_get_innermost_xid(XIDS xids);
+TXNID toku_xids_get_outermost_xid(XIDS xids);
+
+// return size in bytes
+uint32_t toku_xids_get_size(XIDS xids);
+
+uint32_t toku_xids_get_serialize_size(XIDS xids);
+
+unsigned char *toku_xids_get_end_of_array(XIDS xids);
+
+void wbuf_nocrc_xids(struct wbuf *wb, XIDS xids);
+
+void toku_xids_fprintf(FILE* fp, XIDS xids);
diff --git a/storage/tokudb/PerconaFT/ft/ule-internal.h b/storage/tokudb/PerconaFT/ft/ule-internal.h
new file mode 100644
index 00000000..c59e3a5b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ule-internal.h
@@ -0,0 +1,103 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this file is to provide the test programs with internal
+ * ule mechanisms that do not belong in the public interface.
+ */
+
+#pragma once
+
+//1 does much slower debugging
+#define ULE_DEBUG 0
+
+/////////////////////////////////////////////////////////////////////////////////
+// Following data structures are the unpacked format of a leafentry.
+// * ule is the unpacked leaf entry, that contains an array of unpacked
+// transaction records
+// * uxr is the unpacked transaction record
+//
+
+
+//Types of transaction records.
+enum {XR_INSERT = 1,
+ XR_DELETE = 2,
+ XR_PLACEHOLDER = 3};
+
+typedef struct uxr { // unpacked transaction record
+ uint8_t type; // delete/insert/placeholder
+ uint32_t vallen; // number of bytes in value
+ void * valp; // pointer to value (Where is value really stored?)
+ TXNID xid; // transaction id
+ // Note: when packing ule into a new leafentry, will need
+ // to copy actual data from valp to new leafentry
+} UXR_S, *UXR;
+
+
+// Unpacked Leaf Entry is of fixed size because it's just on the
+// stack and we care about ease of access more than the memory footprint.
+typedef struct ule { // unpacked leaf entry
+ uint32_t num_puxrs; // how many of uxrs[] are provisional
+ uint32_t num_cuxrs; // how many of uxrs[] are committed
+ UXR_S uxrs_static[MAX_TRANSACTION_RECORDS*2]; // uxrs[0] is oldest committed (txn commit time, not txn start time), uxrs[num_cuxrs] is outermost provisional value (if any exist/num_puxrs > 0)
+ UXR uxrs; //If num_cuxrs < MAX_TRANSACTION_RECORDS then &uxrs_static[0].
+ //Otherwise we use a dynamically allocated array of size num_cuxrs + 1 + MAX_TRANSATION_RECORD.
+} ULE_S, *ULE;
+
+
+
+void test_msg_modify_ule(ULE ule, const ft_msg &msg);
+
+
+//////////////////////////////////////////////////////////////////////////////////////
+//Functions exported for test purposes only (used internally for non-test purposes).
+void le_unpack(ULE ule, LEAFENTRY le);
+int
+le_pack(ULE ule, // data to be packed into new leafentry
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ uint32_t old_keylen,
+ uint32_t old_le_size,
+ LEAFENTRY * const new_leafentry_p, // this is what this function creates
+ void **const maybe_free
+ );
+
+
+size_t le_memsize_from_ule (ULE ule);
+void ule_cleanup(ULE ule);
diff --git a/storage/tokudb/PerconaFT/ft/ule.cc b/storage/tokudb/PerconaFT/ft/ule.cc
new file mode 100644
index 00000000..f43094b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ule.cc
@@ -0,0 +1,2662 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Purpose of this file is to handle all modifications and queries to the database
+// at the level of leafentry.
+//
+// ule = Unpacked Leaf Entry
+//
+// This design unpacks the leafentry into a convenient format, performs all work
+// on the unpacked form, then repacks the leafentry into its compact format.
+//
+// See design documentation for nested transactions at
+// TokuWiki/Imp/TransactionsOverview.
+
+#include <my_global.h>
+#include "portability/toku_portability.h"
+
+#include "ft/ft-internal.h"
+#include "ft/leafentry.h"
+#include "ft/logger/logger.h"
+#include "ft/msg.h"
+#include "ft/txn/txn.h"
+#include "ft/txn/txn_manager.h"
+#include "ft/ule.h"
+#include "ft/ule-internal.h"
+#include "ft/txn/xids.h"
+#include "util/bytestring.h"
+#include "util/omt.h"
+#include "util/partitioned_counter.h"
+#include "util/scoped_malloc.h"
+#include "util/status.h"
+
+#define ULE_DEBUG 0
+
+static uint32_t ule_get_innermost_numbytes(ULE ule, uint32_t keylen);
+
+void toku_le_get_status(LE_STATUS statp) {
+ le_status.init();
+ *statp = le_status;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Accessor functions used by outside world (e.g. indexer)
+//
+
+ULEHANDLE toku_ule_create(LEAFENTRY le) {
+ ULE XMALLOC(ule_p);
+ le_unpack(ule_p, le);
+ return (ULEHANDLE) ule_p;
+}
+
+void toku_ule_free(ULEHANDLE ule_p) {
+ ule_cleanup((ULE) ule_p);
+ toku_free(ule_p);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Question: Can any software outside this file modify or read a leafentry?
+// If so, is it worthwhile to put it all here?
+//
+// There are two entries, one each for modification and query:
+// toku_le_apply_msg() performs all inserts/deletes/aborts
+//
+//
+//
+//
+
+//This is what we use to initialize Xuxrs[0] in a new unpacked leafentry.
+const UXR_S committed_delete = {
+ .type = XR_DELETE,
+ .vallen = 0,
+ .valp = NULL,
+ .xid = 0
+}; // static allocation of uxr with type set to committed delete and xid = 0
+
+#define INSERT_LENGTH(len) ((1U << 31) | len)
+#define DELETE_LENGTH(len) (0)
+#define GET_LENGTH(len) (len & ((1U << 31)-1))
+#define IS_INSERT(len) (len & (1U << 31))
+#define IS_VALID_LEN(len) (len < (1U<<31))
+
+// Local functions:
+
+static inline void msg_init_empty_ule(ULE ule);
+static int64_t msg_modify_ule(ULE ule, const ft_msg &msg);
+static inline void ule_init_empty_ule(ULE ule);
+static void ule_do_implicit_promotions(ULE ule, XIDS xids);
+static void ule_try_promote_provisional_outermost(
+ ULE ule,
+ TXNID oldest_possible_live_xid);
+static void ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index);
+static void ule_promote_provisional_innermost_to_committed(ULE ule);
+static inline int64_t ule_apply_insert_no_overwrite(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp);
+static inline int64_t ule_apply_insert(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp);
+static inline int64_t ule_apply_delete(ULE ule, XIDS xids);
+static inline void ule_prepare_for_new_uxr(ULE ule, XIDS xids);
+static inline int64_t ule_apply_abort(ULE ule, XIDS xids);
+static void ule_apply_broadcast_commit_all (ULE ule);
+static void ule_apply_commit(ULE ule, XIDS xids);
+static inline void ule_push_insert_uxr(
+ ULE ule,
+ bool is_committed,
+ TXNID xid,
+ uint32_t vallen,
+ void* valp);
+static inline void ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid);
+static inline void ule_push_placeholder_uxr(ULE ule, TXNID xid);
+static inline UXR ule_get_innermost_uxr(ULE ule);
+static inline UXR ule_get_first_empty_uxr(ULE ule);
+static inline void ule_remove_innermost_uxr(ULE ule);
+static inline TXNID ule_get_innermost_xid(ULE ule);
+static inline TXNID ule_get_xid(ULE ule, uint32_t index);
+static void ule_remove_innermost_placeholders(ULE ule);
+static void ule_add_placeholders(ULE ule, XIDS xids);
+static void ule_optimize(ULE ule, XIDS xids);
+static inline bool uxr_type_is_insert(uint8_t type);
+static inline bool uxr_type_is_delete(uint8_t type);
+static inline bool uxr_type_is_placeholder(uint8_t type);
+static inline size_t uxr_pack_txnid(UXR uxr, uint8_t *p);
+static inline size_t uxr_pack_type_and_length(UXR uxr, uint8_t *p);
+static inline size_t uxr_pack_length_and_bit(UXR uxr, uint8_t *p);
+static inline size_t uxr_pack_data(UXR uxr, uint8_t *p);
+static inline size_t uxr_unpack_txnid(UXR uxr, uint8_t *p);
+static inline size_t uxr_unpack_type_and_length(UXR uxr, uint8_t *p);
+static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p);
+static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p);
+
+#if 0
+static void ule_print(ULE ule, const char* note) {
+ fprintf(stderr, "%s : ULE[0x%p]\n", note, ule);
+ fprintf(stderr, " num_puxrs[%u]\n", ule->num_puxrs);
+ fprintf(stderr, " num_cuxrs[%u]\n", ule->num_cuxrs);
+ fprintf(stderr, " innermost[%u]\n", ule->num_cuxrs + ule->num_puxrs - 1);
+ fprintf(stderr, " first_empty[%u]\n", ule->num_cuxrs + ule->num_puxrs);
+
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs - 1;
+ for (uint32_t uxr_num = 0; uxr_num <= num_uxrs; uxr_num++) {
+ UXR uxr = &(ule->uxrs[uxr_num]);
+ fprintf(stderr, " uxr[%u]\n", uxr_num);
+ switch (uxr->type) {
+ case 0: fprintf(stderr, " type[NONE]\n"); break;
+ case 1: fprintf(stderr, " type[INSERT]\n"); break;
+ case 2: fprintf(stderr, " type[DELETE]\n"); break;
+ case 3: fprintf(stderr, " type[PLACEHOLDER]\n"); break;
+ default: fprintf(stderr, " type[WHAT??]\n"); break;
+ }
+ fprintf(stderr, " xid[%lu]\n", uxr->xid);
+ }
+}
+#endif
+
+static void get_space_for_le(
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ uint32_t old_keylen,
+ uint32_t old_le_size,
+ size_t size,
+ LEAFENTRY* new_le_space,
+ void** const maybe_free) {
+
+ if (data_buffer == nullptr) {
+ CAST_FROM_VOIDP(*new_le_space, toku_xmalloc(size));
+ } else if (old_le_size > 0) {
+ // this means we are overwriting something
+ data_buffer->get_space_for_overwrite(
+ idx,
+ keyp,
+ keylen,
+ old_keylen,
+ old_le_size,
+ size,
+ new_le_space,
+ maybe_free);
+ } else {
+ // this means we are inserting something new
+ data_buffer->get_space_for_insert(
+ idx,
+ keyp,
+ keylen,
+ size,
+ new_le_space,
+ maybe_free);
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////
+// Garbage collection related functions
+//
+
+static TXNID get_next_older_txnid(TXNID xc, const xid_omt_t &omt) {
+ int r;
+ TXNID xid;
+ r = omt.find<TXNID, toku_find_xid_by_xid>(xc, -1, &xid, nullptr);
+ if (r==0) {
+ invariant(xid < xc); //sanity check
+ } else {
+ invariant(r==DB_NOTFOUND);
+ xid = TXNID_NONE;
+ }
+ return xid;
+}
+
+//
+// This function returns true if live transaction TL1 is allowed to read a
+// value committed by transaction xc, false otherwise.
+//
+static bool xid_reads_committed_xid(
+ TXNID tl1,
+ TXNID xc,
+ const xid_omt_t& snapshot_txnids,
+ const rx_omt_t& referenced_xids) {
+
+ bool rval;
+ if (tl1 < xc) {
+ rval = false; //cannot read a newer txn
+ } else {
+ TXNID x =
+ toku_get_youngest_live_list_txnid_for(
+ xc,
+ snapshot_txnids,
+ referenced_xids);
+
+ if (x == TXNID_NONE) {
+ //Not in ANY live list, tl1 can read it.
+ rval = true;
+ } else {
+ //Newer than the 'newest one that has it in live list'
+ rval = tl1 > x;
+ }
+ // we know tl1 > xc
+ // we know x > xc
+ // if tl1 == x, then we do not read, because tl1 is in xc's live list
+ // if x is older than tl1, that means that xc < x < tl1
+ // and if xc is in x's live list, it CANNOT be in tl1's live list
+ }
+ return rval;
+}
+
+//
+// This function does some simple garbage collection given a TXNID known
+// to be the oldest referenced xid, that is, the oldest xid in any live list.
+// We find the youngest entry in the stack with an xid less
+// than oldest_referenced_xid. All elements below this entry are garbage,
+// so we get rid of them.
+//
+static void ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) {
+ if (ule->num_cuxrs == 1) {
+ return;
+ }
+
+ uint32_t curr_index = 0;
+ if (gc_info->mvcc_needed) {
+ // starting at the top of the committed stack, find the first
+ // uxr with a txnid that is less than oldest_referenced_xid
+ for (uint32_t i = 0; i < ule->num_cuxrs; i++) {
+ curr_index = ule->num_cuxrs - i - 1;
+ if (ule->uxrs[curr_index].xid <
+ gc_info->oldest_referenced_xid_for_simple_gc) {
+ break;
+ }
+ }
+ } else {
+ // if mvcc is not needed, we can need the top committed
+ // value and nothing else
+ curr_index = ule->num_cuxrs - 1;
+ }
+
+ // curr_index is now set to the youngest uxr older than
+ // oldest_referenced_xid so if it's not the bottom of the stack..
+ if (curr_index != 0) {
+ // ..then we need to get rid of the entries below curr_index
+ uint32_t num_entries = ule->num_cuxrs + ule->num_puxrs - curr_index;
+ memmove(
+ &ule->uxrs[0],
+ &ule->uxrs[curr_index],
+ num_entries * sizeof(ule->uxrs[0]));
+ ule->uxrs[0].xid = TXNID_NONE; // New 'bottom of stack' loses its TXNID
+ ule->num_cuxrs -= curr_index;
+ }
+}
+
+// TODO: Clean this up
+extern bool garbage_collection_debug;
+
+static void ule_garbage_collect(
+ ULE ule,
+ const xid_omt_t& snapshot_xids,
+ const rx_omt_t& referenced_xids,
+ const xid_omt_t& live_root_txns) {
+
+ if (ule->num_cuxrs == 1) {
+ return;
+ }
+
+ toku::scoped_calloc necessary_buf(ule->num_cuxrs * sizeof(bool));
+ bool *necessary = reinterpret_cast<bool *>(necessary_buf.get());
+
+ uint32_t curr_committed_entry;
+ curr_committed_entry = ule->num_cuxrs - 1;
+ while (true) {
+ // mark the curr_committed_entry as necessary
+ necessary[curr_committed_entry] = true;
+ if (curr_committed_entry == 0) break; //nothing left
+
+ // find the youngest live transaction that reads something
+ // below curr_committed_entry, if it exists
+ TXNID tl1;
+ TXNID xc = ule->uxrs[curr_committed_entry].xid;
+
+ //
+ // If we find that the committed transaction is in the live list,
+ // then xc is really in the process of being committed. It has not
+ // been fully committed. As a result, our assumption that transactions
+ // newer than what is currently in these OMTs will read the top of the
+ // stack is not necessarily accurate. Transactions may read what is
+ // just below xc.
+ // As a result, we must mark what is just below xc as necessary and
+ // move on. This issue was found while testing flusher threads, and was
+ // fixed for #3979
+ //
+ bool is_xc_live = toku_is_txn_in_live_root_txn_list(live_root_txns, xc);
+ if (is_xc_live) {
+ curr_committed_entry--;
+ continue;
+ }
+
+ tl1 =
+ toku_get_youngest_live_list_txnid_for(
+ xc,
+ snapshot_xids,
+ referenced_xids);
+
+ // if tl1 == xc, that means xc should be live and show up in
+ // live_root_txns, which we check above.
+ invariant(tl1 != xc);
+
+ if (tl1 == TXNID_NONE) {
+ // set tl1 to youngest live transaction older than
+ // ule->uxrs[curr_committed_entry]->xid
+ tl1 = get_next_older_txnid(xc, snapshot_xids);
+ if (tl1 == TXNID_NONE) {
+ // remainder is garbage, we're done
+ break;
+ }
+ }
+ if (garbage_collection_debug) {
+ int r =
+ snapshot_xids.find_zero<TXNID, toku_find_xid_by_xid>(
+ tl1,
+ nullptr,
+ nullptr);
+ // make sure that the txn you are claiming is live is actually live
+ invariant_zero(r);
+ }
+ //
+ // tl1 should now be set
+ //
+ curr_committed_entry--;
+ while (curr_committed_entry > 0) {
+ xc = ule->uxrs[curr_committed_entry].xid;
+ if (xid_reads_committed_xid(
+ tl1,
+ xc,
+ snapshot_xids,
+ referenced_xids)) {
+ break;
+ }
+ curr_committed_entry--;
+ }
+ }
+ uint32_t first_free = 0;
+ for (uint32_t i = 0; i < ule->num_cuxrs; i++) {
+ // Shift values to 'delete' garbage values.
+ if (necessary[i]) {
+ ule->uxrs[first_free] = ule->uxrs[i];
+ first_free++;
+ }
+ }
+ uint32_t saved = first_free;
+ invariant(saved <= ule->num_cuxrs);
+ invariant(saved >= 1);
+ ule->uxrs[0].xid = TXNID_NONE; //New 'bottom of stack' loses its TXNID
+ if (first_free != ule->num_cuxrs) {
+ // Shift provisional values
+ memmove(
+ &ule->uxrs[first_free],
+ &ule->uxrs[ule->num_cuxrs],
+ ule->num_puxrs * sizeof(ule->uxrs[0]));
+ }
+ ule->num_cuxrs = saved;
+}
+
+static size_t ule_packed_memsize(ULE ule) {
+// Returns: The size 'ule' would be when packed into a leafentry, or 0 if the
+// topmost committed value is a delete.
+ if (ule->num_cuxrs == 1 && ule->num_puxrs == 0) {
+ UXR uxr = ule_get_innermost_uxr(ule);
+ if (uxr_is_delete(uxr)) {
+ return 0;
+ }
+ }
+ return le_memsize_from_ule(ule);
+}
+
+// Heuristics to control when we decide to initialize
+// txn manager state (possibly expensive) and run gc.
+enum {
+ ULE_MIN_STACK_SIZE_TO_FORCE_GC = 5,
+ ULE_MIN_MEMSIZE_TO_FORCE_GC = 1024 * 1024
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// This is the big enchilada. (Bring Tums.) Note that this level of
+// abstraction has no knowledge of the inner structure of either leafentry or
+// msg. It makes calls into the next lower layer (msg_xxx) which handles
+// messages.
+//
+// NOTE: This is the only function (at least in this body of code) that modifies
+// a leafentry.
+// NOTE: It is the responsibility of the caller to make sure that the key is set
+// in the FT_MSG, as it will be used to store the data in the data_buffer
+//
+// Returns -1, 0, or 1 that identifies the change in logical row count needed
+// based on the results of the message application. For example, if a delete
+// finds no logical leafentry or if an insert finds a duplicate and is
+// converted to an update.
+//
+// old_leafentry - NULL if there was no stored data.
+// data_buffer - bn_data storing leafentry, if NULL, means there is no bn_data
+// idx - index in data_buffer where leafentry is stored
+// (and should be replaced)
+// old_keylen - length of the any key in data_buffer
+// new_leafentry_p - If the leafentry is destroyed it sets *new_leafentry_p
+// to NULL. Otherwise the new_leafentry_p points at the new
+// leaf entry.
+// numbytes_delta_p - change in total size of key and val, not including any
+// overhead
+int64_t toku_le_apply_msg(
+ const ft_msg& msg,
+ LEAFENTRY old_leafentry,
+ bn_data* data_buffer,
+ uint32_t idx,
+ uint32_t old_keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY* new_leafentry_p,
+ int64_t* numbytes_delta_p) {
+
+ invariant_notnull(gc_info);
+ paranoid_invariant_notnull(new_leafentry_p);
+ ULE_S ule;
+ int64_t oldnumbytes = 0;
+ int64_t newnumbytes = 0;
+ uint64_t oldmemsize = 0;
+ uint32_t keylen = msg.kdbt()->size;
+ int32_t rowcountdelta = 0;
+
+ if (old_leafentry == NULL) {
+ msg_init_empty_ule(&ule);
+ } else {
+ oldmemsize = leafentry_memsize(old_leafentry);
+ le_unpack(&ule, old_leafentry); // otherwise unpack leafentry
+ oldnumbytes = ule_get_innermost_numbytes(&ule, keylen);
+ }
+
+ // modify unpacked leafentry
+ rowcountdelta = msg_modify_ule(&ule, msg);
+
+ // - we may be able to immediately promote the newly-apllied outermost
+ // provisonal uxr
+ // - either way, run simple gc first, and then full gc if there are still
+ // some committed uxrs.
+ ule_try_promote_provisional_outermost(
+ &ule,
+ gc_info->oldest_referenced_xid_for_implicit_promotion);
+ ule_simple_garbage_collection(&ule, gc_info);
+ txn_manager_state *txn_state_for_gc = gc_info->txn_state_for_gc;
+ size_t size_before_gc = 0;
+ // there is garbage to clean, and our caller gave us state..
+ // ..and either the state is pre-initialized, or the committed stack is
+ // large enough
+ // ..or the ule's raw memsize is sufficiently large
+ // ..then it's worth running gc, possibly initializing the txn manager
+ // state, if it isn't already
+ if (ule.num_cuxrs > 1 && txn_state_for_gc != nullptr &&
+ (txn_state_for_gc->initialized ||
+ ule.num_cuxrs >= ULE_MIN_STACK_SIZE_TO_FORCE_GC ||
+ (size_before_gc = ule_packed_memsize(&ule)) >=
+ ULE_MIN_MEMSIZE_TO_FORCE_GC)) {
+ if (!txn_state_for_gc->initialized) {
+ txn_state_for_gc->init();
+ }
+ // it's already been calculated above
+ size_before_gc =
+ size_before_gc != 0 ? size_before_gc : ule_packed_memsize(&ule);
+ ule_garbage_collect(
+ &ule,
+ txn_state_for_gc->snapshot_xids,
+ txn_state_for_gc->referenced_xids,
+ txn_state_for_gc->live_root_txns);
+ size_t size_after_gc = ule_packed_memsize(&ule);
+
+ LE_STATUS_INC(LE_APPLY_GC_BYTES_IN, size_before_gc);
+ LE_STATUS_INC(LE_APPLY_GC_BYTES_OUT, size_after_gc);
+ }
+
+ void* maybe_free = nullptr;
+ // create packed leafentry
+ // contract of this function is caller has keyp and keylen set, always
+ int r =
+ le_pack(
+ &ule,
+ data_buffer,
+ idx,
+ msg.kdbt()->data,
+ keylen,
+ old_keylen,
+ oldmemsize,
+ new_leafentry_p,
+ &maybe_free);
+ invariant_zero(r);
+ if (*new_leafentry_p) {
+ newnumbytes = ule_get_innermost_numbytes(&ule, keylen);
+ }
+ *numbytes_delta_p = newnumbytes - oldnumbytes;
+
+ ule_cleanup(&ule);
+ if (maybe_free != nullptr) {
+ toku_free(maybe_free);
+ }
+ return rowcountdelta;
+}
+
+bool toku_le_worth_running_garbage_collection(
+ LEAFENTRY le,
+ txn_gc_info* gc_info) {
+// Effect: Quickly determines if it's worth trying to run garbage collection
+// on a leafentry
+// Return: True if it makes sense to try garbage collection, false otherwise.
+// Rationale: Garbage collection is likely to clean up under two circumstances:
+// 1.) There are multiple committed entries. Some may never be read
+// by new txns.
+// 2.) There is only one committed entry, but the outermost
+// provisional entry is older than the oldest known referenced
+// xid, so it must have committed. Therefor we can promote it to
+// committed and get rid of the old committed entry.
+ if (le->type != LE_MVCC) {
+ return false;
+ }
+ if (le->u.mvcc.num_cxrs > 1) {
+ return true;
+ } else {
+ paranoid_invariant(le->u.mvcc.num_cxrs == 1);
+ }
+ return le->u.mvcc.num_pxrs > 0 &&
+ le_outermost_uncommitted_xid(le) <
+ gc_info->oldest_referenced_xid_for_implicit_promotion;
+}
+
+// Garbage collect one leaf entry, using the given OMT's.
+// Parameters:
+// -- old_leaf_entry : the leaf we intend to clean up through garbage
+// collecting.
+// -- new_leaf_entry (OUTPUT) : a pointer to the leaf entry after
+// garbage collection.
+// -- new_leaf_entry_memory_size : after this call, our leaf entry
+// should be empty or smaller. This number represents that and is
+// used in a previous call to truncate the existing size.
+// -- omt : the memory where our leaf entry resides.
+// -- mp : our memory pool.
+// -- maybe_free (OUTPUT) : in a previous call, we may be able to free
+// the memory completely, if we removed the leaf entry.
+// -- snapshot_xids : we use these in memory transaction ids to
+// determine what to garbage collect.
+// -- referenced_xids : list of in memory active transactions.
+// NOTE: it is not a good idea to garbage collect a leaf
+// entry with only one committed value.
+void toku_le_garbage_collect(
+ LEAFENTRY old_leaf_entry,
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ txn_gc_info* gc_info,
+ LEAFENTRY* new_leaf_entry,
+ int64_t* numbytes_delta_p) {
+
+ // We shouldn't want to run gc without having provided a snapshot of the
+ // txn system.
+ invariant_notnull(gc_info);
+ invariant_notnull(gc_info->txn_state_for_gc);
+ paranoid_invariant_notnull(new_leaf_entry);
+ ULE_S ule;
+ int64_t oldnumbytes = 0;
+ int64_t newnumbytes = 0;
+
+ le_unpack(&ule, old_leaf_entry);
+
+ oldnumbytes = ule_get_innermost_numbytes(&ule, keylen);
+ uint32_t old_mem_size = leafentry_memsize(old_leaf_entry);
+
+ // Before running garbage collection, try to promote the outermost
+ // provisional entries to committed if its xid is older than the oldest
+ // possible live xid.
+ //
+ // The oldest known refeferenced xid is a lower bound on the oldest possible
+ // live xid, so we use that. It's usually close enough to get rid of most
+ // garbage in leafentries.
+ ule_try_promote_provisional_outermost(
+ &ule,
+ gc_info->oldest_referenced_xid_for_implicit_promotion);
+ // No need to run simple gc here if we're going straight for full gc.
+ if (ule.num_cuxrs > 1) {
+ size_t size_before_gc = ule_packed_memsize(&ule);
+ ule_garbage_collect(
+ &ule,
+ gc_info->txn_state_for_gc->snapshot_xids,
+ gc_info->txn_state_for_gc->referenced_xids,
+ gc_info->txn_state_for_gc->live_root_txns);
+ size_t size_after_gc = ule_packed_memsize(&ule);
+
+ LE_STATUS_INC(LE_APPLY_GC_BYTES_IN, size_before_gc);
+ LE_STATUS_INC(LE_APPLY_GC_BYTES_OUT, size_after_gc);
+ }
+
+ void *maybe_free = nullptr;
+ // old_keylen, same because the key isn't going to change for gc
+ int r =
+ le_pack(
+ &ule,
+ data_buffer,
+ idx,
+ keyp,
+ keylen,
+ keylen,
+ old_mem_size,
+ new_leaf_entry,
+ &maybe_free);
+ invariant_zero(r);
+ if (*new_leaf_entry) {
+ newnumbytes = ule_get_innermost_numbytes(&ule, keylen);
+ }
+ *numbytes_delta_p = newnumbytes - oldnumbytes;
+
+ ule_cleanup(&ule);
+ if (maybe_free != nullptr) {
+ toku_free(maybe_free);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// This layer of abstraction (msg_xxx)
+// knows the accessors of msg, but not of leafentry or unpacked leaf entry.
+// It makes calls into the lower layer (le_xxx) which handles leafentries.
+
+// Purpose is to init the ule with given key and no transaction records
+//
+static inline void msg_init_empty_ule(ULE ule) {
+ ule_init_empty_ule(ule);
+}
+
+// Purpose is to modify the unpacked leafentry in our private workspace.
+//
+// Returns -1, 0, or 1 that identifies the change in logical row count needed
+// based on the results of the message application. For example, if a delete
+// finds no logical leafentry or if an insert finds a duplicate and is
+// converted to an update.
+static int64_t msg_modify_ule(ULE ule, const ft_msg &msg) {
+ int64_t retval = 0;
+ XIDS xids = msg.xids();
+ invariant(toku_xids_get_num_xids(xids) < MAX_TRANSACTION_RECORDS);
+ enum ft_msg_type type = msg.type();
+ if (FT_LIKELY(type != FT_OPTIMIZE && type != FT_OPTIMIZE_FOR_UPGRADE)) {
+ ule_do_implicit_promotions(ule, xids);
+ }
+ switch (type) {
+ case FT_INSERT_NO_OVERWRITE:
+ retval =
+ ule_apply_insert_no_overwrite(
+ ule,
+ xids,
+ msg.vdbt()->size,
+ msg.vdbt()->data);
+ break;
+ case FT_INSERT:
+ retval =
+ ule_apply_insert(
+ ule,
+ xids,
+ msg.vdbt()->size,
+ msg.vdbt()->data);
+ break;
+ case FT_DELETE_ANY:
+ retval = ule_apply_delete(ule, xids);
+ break;
+ case FT_ABORT_ANY:
+ case FT_ABORT_BROADCAST_TXN:
+ retval = ule_apply_abort(ule, xids);
+ break;
+ case FT_COMMIT_BROADCAST_ALL:
+ ule_apply_broadcast_commit_all(ule);
+ break;
+ case FT_COMMIT_ANY:
+ case FT_COMMIT_BROADCAST_TXN:
+ ule_apply_commit(ule, xids);
+ break;
+ case FT_OPTIMIZE:
+ case FT_OPTIMIZE_FOR_UPGRADE:
+ ule_optimize(ule, xids);
+ break;
+ case FT_UPDATE:
+ case FT_UPDATE_BROADCAST_ALL:
+ // These messages don't get this far. Instead they get translated (in
+ // setval_fun in do_update) into FT_INSERT messages.
+ assert(false);
+ break;
+ default:
+ // illegal ft msg type
+ assert(false);
+ break;
+ }
+ return retval;
+}
+
+void test_msg_modify_ule(ULE ule, const ft_msg &msg) {
+ msg_modify_ule(ule,msg);
+}
+
+static void ule_optimize(ULE ule, XIDS xids) {
+ if (ule->num_puxrs) {
+ // outermost uncommitted
+ TXNID uncommitted = ule->uxrs[ule->num_cuxrs].xid;
+ TXNID oldest_living_xid = TXNID_NONE;
+ uint32_t num_xids = toku_xids_get_num_xids(xids);
+ if (num_xids > 0) {
+ invariant(num_xids==1);
+ oldest_living_xid = toku_xids_get_xid(xids, 0);
+ }
+ if (oldest_living_xid == TXNID_NONE ||
+ uncommitted < oldest_living_xid) {
+ ule_promote_provisional_innermost_to_committed(ule);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// This layer of abstraction (le_xxx) understands the structure of the leafentry
+// and of the unpacked leafentry. It is the only layer that understands the
+// structure of leafentry. It has no knowledge of any other data structures.
+//
+
+//
+// required for every le_unpack that is done
+//
+void ule_cleanup(ULE ule) {
+ invariant(ule->uxrs);
+ if (ule->uxrs != ule->uxrs_static) {
+ toku_free(ule->uxrs);
+ ule->uxrs = NULL;
+ }
+}
+
+// populate an unpacked leafentry using pointers into the given leafentry.
+// thus, the memory referenced by 'le' must live as long as the ULE.
+void le_unpack(ULE ule, LEAFENTRY le) {
+ uint8_t type = le->type;
+ uint8_t *p;
+ uint32_t i;
+ switch (type) {
+ case LE_CLEAN: {
+ ule->uxrs = ule->uxrs_static; //Static version is always enough.
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = 0;
+ UXR uxr = ule->uxrs;
+ uxr->type = XR_INSERT;
+ uxr->vallen = toku_dtoh32(le->u.clean.vallen);
+ uxr->valp = le->u.clean.val;
+ uxr->xid = TXNID_NONE;
+ //Set p to immediately after leafentry
+ p = le->u.clean.val + uxr->vallen;
+ break;
+ }
+ case LE_MVCC:
+ ule->num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ invariant(ule->num_cuxrs);
+ ule->num_puxrs = le->u.mvcc.num_pxrs;
+ //Dynamic memory
+ if (ule->num_cuxrs < MAX_TRANSACTION_RECORDS) {
+ ule->uxrs = ule->uxrs_static;
+ } else {
+ XMALLOC_N(
+ ule->num_cuxrs + 1 + MAX_TRANSACTION_RECORDS,
+ ule->uxrs);
+ }
+ p = le->u.mvcc.xrs;
+
+ //unpack interesting TXNIDs inner to outer.
+ if (ule->num_puxrs!=0) {
+ UXR outermost = ule->uxrs + ule->num_cuxrs;
+ p += uxr_unpack_txnid(outermost, p);
+ }
+ //unpack other TXNIDS (not for ule->uxrs[0])
+ ule->uxrs[0].xid = TXNID_NONE; //0 for super-root is implicit
+ for (i = 0; i < ule->num_cuxrs - 1; i++) {
+ p += uxr_unpack_txnid(ule->uxrs + ule->num_cuxrs - 1 - i, p);
+ }
+
+ //unpack interesting lengths inner to outer.
+ if (ule->num_puxrs!=0) {
+ UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
+ p += uxr_unpack_length_and_bit(innermost, p);
+ }
+ for (i = 0; i < ule->num_cuxrs; i++) {
+ p +=
+ uxr_unpack_length_and_bit(
+ ule->uxrs + ule->num_cuxrs - 1 - i,
+ p);
+ }
+
+ //unpack interesting values inner to outer
+ if (ule->num_puxrs!=0) {
+ UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
+ p += uxr_unpack_data(innermost, p);
+ }
+ for (i = 0; i < ule->num_cuxrs; i++) {
+ p += uxr_unpack_data(ule->uxrs + ule->num_cuxrs - 1 - i, p);
+ }
+
+ //unpack provisional xrs outer to inner
+ if (ule->num_puxrs > 1) {
+ {
+ //unpack length, bit, data for outermost uncommitted
+ UXR outermost = ule->uxrs + ule->num_cuxrs;
+ p += uxr_unpack_type_and_length(outermost, p);
+ p += uxr_unpack_data(outermost, p);
+ }
+ //unpack txnid, length, bit, data for non-outermost, non-innermost
+ for (i = ule->num_cuxrs + 1; i < ule->num_cuxrs + ule->num_puxrs - 1; i++) {
+ UXR uxr = ule->uxrs + i;
+ p += uxr_unpack_txnid(uxr, p);
+ p += uxr_unpack_type_and_length(uxr, p);
+ p += uxr_unpack_data(uxr, p);
+ }
+ {
+ //Just unpack txnid for innermost
+ UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
+ p += uxr_unpack_txnid(innermost, p);
+ }
+ }
+ break;
+ default:
+ invariant(false);
+ }
+
+#if ULE_DEBUG
+ size_t memsize = le_memsize_from_ule(ule);
+ assert(p == ((uint8_t*)le) + memsize);
+#endif
+}
+
+static inline size_t uxr_pack_txnid(UXR uxr, uint8_t *p) {
+ *(TXNID*)p = toku_htod64(uxr->xid);
+ return sizeof(TXNID);
+}
+
+static inline size_t uxr_pack_type_and_length(UXR uxr, uint8_t *p) {
+ size_t rval = 1;
+ *p = uxr->type;
+ if (uxr_is_insert(uxr)) {
+ *(uint32_t*)(p+1) = toku_htod32(uxr->vallen);
+ rval += sizeof(uint32_t);
+ }
+ return rval;
+}
+
+static inline size_t uxr_pack_length_and_bit(UXR uxr, uint8_t *p) {
+ uint32_t length_and_bit;
+ if (uxr_is_insert(uxr)) {
+ length_and_bit = INSERT_LENGTH(uxr->vallen);
+ } else {
+ length_and_bit = DELETE_LENGTH(uxr->vallen);
+ }
+ *(uint32_t*)p = toku_htod32(length_and_bit);
+ return sizeof(uint32_t);
+}
+
+static inline size_t uxr_pack_data(UXR uxr, uint8_t *p) {
+ if (uxr_is_insert(uxr)) {
+ memcpy(p, uxr->valp, uxr->vallen);
+ return uxr->vallen;
+ }
+ return 0;
+}
+
+static inline size_t uxr_unpack_txnid(UXR uxr, uint8_t *p) {
+ uxr->xid = toku_dtoh64(*(TXNID*)p);
+ return sizeof(TXNID);
+}
+
+static inline size_t uxr_unpack_type_and_length(UXR uxr, uint8_t *p) {
+ size_t rval = 1;
+ uxr->type = *p;
+ if (uxr_is_insert(uxr)) {
+ uxr->vallen = toku_dtoh32(*(uint32_t*)(p+1));
+ rval += sizeof(uint32_t);
+ }
+ return rval;
+}
+
+static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p) {
+ uint32_t length_and_bit = toku_dtoh32(*(uint32_t*)p);
+ if (IS_INSERT(length_and_bit)) {
+ uxr->type = XR_INSERT;
+ uxr->vallen = GET_LENGTH(length_and_bit);
+ } else {
+ uxr->type = XR_DELETE;
+ uxr->vallen = 0;
+ }
+ return sizeof(uint32_t);
+}
+
+static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p) {
+ if (uxr_is_insert(uxr)) {
+ uxr->valp = p;
+ return uxr->vallen;
+ }
+ return 0;
+}
+
+// executed too often to be worth making threadsafe
+static inline void update_le_status(ULE ule, size_t memsize) {
+ if (ule->num_cuxrs > LE_STATUS_VAL(LE_MAX_COMMITTED_XR))
+ LE_STATUS_VAL(LE_MAX_COMMITTED_XR) = ule->num_cuxrs;
+ if (ule->num_puxrs > LE_STATUS_VAL(LE_MAX_PROVISIONAL_XR))
+ LE_STATUS_VAL(LE_MAX_PROVISIONAL_XR) = ule->num_puxrs;
+ if (ule->num_cuxrs > MAX_TRANSACTION_RECORDS)
+ LE_STATUS_VAL(LE_EXPANDED)++;
+ if (memsize > LE_STATUS_VAL(LE_MAX_MEMSIZE))
+ LE_STATUS_VAL(LE_MAX_MEMSIZE) = memsize;
+}
+
+// Purpose is to return a newly allocated leaf entry in packed format, or
+// return null if leaf entry should be destroyed (if no transaction records
+// are for inserts).
+// Transaction records in packed le are stored inner to outer (first xr is
+// innermost), with some information extracted out of the transaction records
+// into the header.
+// Transaction records in ule are stored outer to inner (uxr[0] is outermost).
+// Takes 'ule' and creates 'new_leafentry_p
+int le_pack(
+ ULE ule,
+ bn_data* data_buffer,
+ uint32_t idx,
+ void* keyp,
+ uint32_t keylen,
+ uint32_t old_keylen,
+ uint32_t old_le_size,
+ LEAFENTRY* const new_leafentry_p,
+ void** const maybe_free) {
+
+ invariant(ule->num_cuxrs > 0);
+ invariant(ule->uxrs[0].xid == TXNID_NONE);
+ int rval;
+ size_t memsize = 0;
+ {
+ // The unpacked leafentry may contain no inserts anywhere on its stack.
+ // If so, then there IS no leafentry to pack, we should return NULL
+ // So, first we check the stack to see if there is any insert. If not,
+ // Then we can return NULL and exit the function, otherwise, we goto
+ // found_insert, and proceed with packing the leafentry
+ uint32_t i;
+ for (i = 0; i < ule->num_cuxrs + ule->num_puxrs; i++) {
+ if (uxr_is_insert(&ule->uxrs[i])) {
+ goto found_insert;
+ }
+ }
+ if (data_buffer && old_le_size > 0) {
+ // must pass old_keylen and old_le_size, since that's what is
+ // actually stored in data_buffer
+ data_buffer->delete_leafentry(idx, old_keylen, old_le_size);
+ }
+ *new_leafentry_p = NULL;
+ rval = 0;
+ goto cleanup;
+ }
+found_insert:
+ memsize = le_memsize_from_ule(ule);
+ LEAFENTRY new_leafentry;
+ get_space_for_le(
+ data_buffer,
+ idx,
+ keyp,
+ keylen,
+ old_keylen,
+ old_le_size,
+ memsize,
+ &new_leafentry,
+ maybe_free);
+
+ //p always points to first unused byte after leafentry we are packing
+ uint8_t *p;
+ invariant(ule->num_cuxrs>0);
+ //Type specific data
+ if (ule->num_cuxrs == 1 && ule->num_puxrs == 0) {
+ //Pack a 'clean leafentry' (no uncommitted transactions, only one
+ //committed value)
+ new_leafentry->type = LE_CLEAN;
+
+ uint32_t vallen = ule->uxrs[0].vallen;
+ //Store vallen
+ new_leafentry->u.clean.vallen = toku_htod32(vallen);
+
+ //Store actual val
+ memcpy(new_leafentry->u.clean.val, ule->uxrs[0].valp, vallen);
+
+ //Set p to after leafentry
+ p = new_leafentry->u.clean.val + vallen;
+ } else {
+ uint32_t i;
+ //Pack an 'mvcc leafentry'
+ new_leafentry->type = LE_MVCC;
+
+ new_leafentry->u.mvcc.num_cxrs = toku_htod32(ule->num_cuxrs);
+ // invariant makes cast that follows ok, although not sure if
+ // check should be "< MAX_TRANSACTION_RECORDS" or
+ // "< MAX_TRANSACTION_RECORDS - 1"
+ invariant(ule->num_puxrs < MAX_TRANSACTION_RECORDS);
+ new_leafentry->u.mvcc.num_pxrs = (uint8_t)ule->num_puxrs;
+
+ p = new_leafentry->u.mvcc.xrs;
+
+ //pack interesting TXNIDs inner to outer.
+ if (ule->num_puxrs!=0) {
+ UXR outermost = ule->uxrs + ule->num_cuxrs;
+ p += uxr_pack_txnid(outermost, p);
+ }
+ //pack other TXNIDS (not for ule->uxrs[0])
+ for (i = 0; i < ule->num_cuxrs - 1; i++) {
+ p += uxr_pack_txnid(ule->uxrs + ule->num_cuxrs - 1 - i, p);
+ }
+
+ //pack interesting lengths inner to outer.
+ if (ule->num_puxrs!=0) {
+ UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
+ p += uxr_pack_length_and_bit(innermost, p);
+ }
+ for (i = 0; i < ule->num_cuxrs; i++) {
+ p += uxr_pack_length_and_bit(ule->uxrs + ule->num_cuxrs - 1 - i, p);
+ }
+
+ //pack interesting values inner to outer
+ if (ule->num_puxrs!=0) {
+ UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
+ p += uxr_pack_data(innermost, p);
+ }
+ for (i = 0; i < ule->num_cuxrs; i++) {
+ p += uxr_pack_data(ule->uxrs + ule->num_cuxrs - 1 - i, p);
+ }
+
+ //pack provisional xrs outer to inner
+ if (ule->num_puxrs > 1) {
+ {
+ //pack length, bit, data for outermost uncommitted
+ UXR outermost = ule->uxrs + ule->num_cuxrs;
+ p += uxr_pack_type_and_length(outermost, p);
+ p += uxr_pack_data(outermost, p);
+ }
+ //pack txnid, length, bit, data for non-outermost, non-innermost
+ for (i = ule->num_cuxrs + 1;
+ i < ule->num_cuxrs + ule->num_puxrs - 1;
+ i++) {
+ UXR uxr = ule->uxrs + i;
+ p += uxr_pack_txnid(uxr, p);
+ p += uxr_pack_type_and_length(uxr, p);
+ p += uxr_pack_data(uxr, p);
+ }
+ {
+ //Just pack txnid for innermost
+ UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
+ p += uxr_pack_txnid(innermost, p);
+ }
+ }
+ }
+
+ //p points to first unused byte after packed leafentry
+
+ size_t bytes_written;
+ bytes_written = (size_t)p - (size_t)new_leafentry;
+ invariant(bytes_written == memsize);
+
+#if ULE_DEBUG
+ if (omt) { //Disable recursive debugging.
+ size_t memsize_verify = leafentry_memsize(new_leafentry);
+ invariant(memsize_verify == memsize);
+
+ ULE_S ule_tmp;
+ le_unpack(&ule_tmp, new_leafentry);
+
+ memsize_verify = le_memsize_from_ule(&ule_tmp);
+ invariant(memsize_verify == memsize);
+ //Debugging code inside le_unpack will repack and verify it is the same.
+
+ LEAFENTRY le_copy;
+
+ int r_tmp = le_pack(&ule_tmp, &memsize_verify, &memsize_verify,
+ &le_copy);
+ invariant(r_tmp==0);
+ invariant(memsize_verify == memsize);
+
+ invariant(memcmp(new_leafentry, le_copy, memsize)==0);
+ toku_free(le_copy);
+
+ ule_cleanup(&ule_tmp);
+ }
+#endif
+
+ *new_leafentry_p = (LEAFENTRY)new_leafentry;
+ rval = 0;
+cleanup:
+ update_le_status(ule, memsize);
+ return rval;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Following functions provide convenient access to a packed leafentry.
+
+//Requires:
+// Leafentry that ule represents should not be destroyed (is not just all
+// deletes)
+size_t le_memsize_from_ule (ULE ule) {
+ invariant(ule->num_cuxrs);
+ size_t rval;
+ if (ule->num_cuxrs == 1 && ule->num_puxrs == 0) {
+ UXR committed = ule->uxrs;
+ invariant(uxr_is_insert(committed));
+ rval = 1 //type
+ +4 //vallen
+ +committed->vallen; //actual val
+ } else {
+ rval = 1 //type
+ +4 //num_cuxrs
+ +1 //num_puxrs
+ +4*(ule->num_cuxrs) //types+lengths for committed
+ +8*(ule->num_cuxrs + ule->num_puxrs - 1); //txnids (excluding
+ //superroot)
+ uint32_t i;
+ //Count data from committed uxrs and innermost puxr
+ for (i = 0; i < ule->num_cuxrs; i++) {
+ UXR uxr = &ule->uxrs[i];
+ if (uxr_is_insert(uxr)) {
+ rval += uxr->vallen; //actual val
+ }
+ }
+ if (ule->num_puxrs) {
+ UXR uxr = ule_get_innermost_uxr(ule);
+ if (uxr_is_insert(uxr)) {
+ rval += uxr->vallen; //actual val
+ }
+ rval += 4; //type+length for innermost puxr
+ rval += 1*(ule->num_puxrs - 1); //type for remaining puxrs.
+ //Count data and lengths from other puxrs
+ for (i = 0; i < ule->num_puxrs-1; i++) {
+ uxr = &ule->uxrs[i+ule->num_cuxrs];
+ if (uxr_is_insert(uxr)) {
+ rval += 4 + uxr->vallen; //length plus actual val
+ }
+ }
+ }
+ }
+ return rval;
+}
+
+// TODO: rename
+size_t leafentry_rest_memsize(
+ uint32_t num_puxrs,
+ uint32_t num_cuxrs,
+ uint8_t* start) {
+
+ UXR_S uxr;
+ size_t lengths = 0;
+ uint8_t* p = start;
+
+ //Skip TXNIDs
+ if (num_puxrs!=0) {
+ p += sizeof(TXNID);
+ }
+ p += (num_cuxrs-1)*sizeof(TXNID);
+
+ //Retrieve interesting lengths inner to outer.
+ if (num_puxrs!=0) {
+ p += uxr_unpack_length_and_bit(&uxr, p);
+ if (uxr_is_insert(&uxr)) {
+ lengths += uxr.vallen;
+ }
+ }
+ uint32_t i;
+ for (i = 0; i < num_cuxrs; i++) {
+ p += uxr_unpack_length_and_bit(&uxr, p);
+ if (uxr_is_insert(&uxr)) {
+ lengths += uxr.vallen;
+ }
+ }
+ //Skip all interesting 'data'
+ p += lengths;
+
+ //unpack provisional xrs outer to inner
+ if (num_puxrs > 1) {
+ {
+ p += uxr_unpack_type_and_length(&uxr, p);
+ p += uxr_unpack_data(&uxr, p);
+ }
+ //unpack txnid, length, bit, data for non-outermost, non-innermost
+ for (i = 0; i < num_puxrs - 2; i++) {
+ p += uxr_unpack_txnid(&uxr, p);
+ p += uxr_unpack_type_and_length(&uxr, p);
+ p += uxr_unpack_data(&uxr, p);
+ }
+ {
+ //Just unpack txnid for innermost
+ p += uxr_unpack_txnid(&uxr, p);
+ }
+ }
+ size_t rval = (size_t)p - (size_t)start;
+ return rval;
+}
+
+size_t leafentry_memsize (LEAFENTRY le) {
+ size_t rval = 0;
+
+ uint8_t type = le->type;
+
+ uint8_t *p = NULL;
+ switch (type) {
+ case LE_CLEAN: {
+ uint32_t vallen = toku_dtoh32(le->u.clean.vallen);
+ rval = LE_CLEAN_MEMSIZE(vallen);
+ break;
+ }
+ case LE_MVCC: {
+ p = le->u.mvcc.xrs;
+ uint32_t num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ invariant(num_cuxrs);
+ uint32_t num_puxrs = le->u.mvcc.num_pxrs;
+ p += leafentry_rest_memsize(num_puxrs, num_cuxrs, p);
+ rval = (size_t)p - (size_t)le;
+ break;
+ }
+ default:
+ invariant(false);
+ }
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+ size_t slow_rval = le_memsize_from_ule(&ule);
+ if (slow_rval!=rval) {
+ int r = print_klpair(stderr, le, NULL, 0);
+ fprintf(stderr, "\nSlow: [%" PRIu64 "] Fast: [%" PRIu64 "]\n", slow_rval, rval);
+ invariant(r==0);
+ }
+ assert(slow_rval == rval);
+ ule_cleanup(&ule);
+#endif
+ return rval;
+}
+
+size_t leafentry_disksize (LEAFENTRY le) {
+ return leafentry_memsize(le);
+}
+
+bool le_is_clean(LEAFENTRY le) {
+ uint8_t type = le->type;
+ uint32_t rval;
+ switch (type) {
+ case LE_CLEAN:
+ rval = true;
+ break;
+ case LE_MVCC:;
+ rval = false;
+ break;
+ default:
+ invariant(false);
+ }
+ return rval;
+}
+
+int le_latest_is_del(LEAFENTRY le) {
+ int rval;
+ uint8_t type = le->type;
+ uint8_t *p;
+ switch (type) {
+ case LE_CLEAN: {
+ rval = 0;
+ break;
+ }
+ case LE_MVCC: {
+ UXR_S uxr;
+ uint32_t num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ invariant(num_cuxrs);
+ uint32_t num_puxrs = le->u.mvcc.num_pxrs;
+
+ //Position p.
+ p = le->u.mvcc.xrs;
+
+ //Skip TXNIDs
+ if (num_puxrs!=0) {
+ p += sizeof(TXNID);
+ }
+ p += (num_cuxrs-1)*sizeof(TXNID);
+
+ p += uxr_unpack_length_and_bit(&uxr, p);
+ rval = uxr_is_delete(&uxr);
+ break;
+ }
+ default:
+ invariant(false);
+ }
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+ UXR uxr = ule_get_innermost_uxr(&ule);
+ int slow_rval = uxr_is_delete(uxr);
+ assert((rval==0) == (slow_rval==0));
+ ule_cleanup(&ule);
+#endif
+ return rval;
+}
+
+
+//
+// returns true if the outermost provisional transaction id on the leafentry's
+// stack matches the outermost transaction id in xids
+// It is used to determine if a broadcast commit/abort message (look in ft-ops.c)
+// should be applied to this leafentry
+// If the outermost transactions match, then the broadcast commit/abort should
+// be applied
+//
+bool le_has_xids(LEAFENTRY le, XIDS xids) {
+ //Read num_uxrs
+ uint32_t num_xids = toku_xids_get_num_xids(xids);
+ invariant(num_xids > 0); //Disallow checking for having TXNID_NONE
+ TXNID xid = toku_xids_get_xid(xids, 0);
+ invariant(xid!=TXNID_NONE);
+
+ bool rval = (le_outermost_uncommitted_xid(le) == xid);
+ return rval;
+}
+
+void* le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
+ uint8_t type = le->type;
+ void *valp;
+
+ uint8_t *p;
+ switch (type) {
+ case LE_CLEAN:
+ *len = toku_dtoh32(le->u.clean.vallen);
+ valp = le->u.clean.val;
+ break;
+ case LE_MVCC:;
+ UXR_S uxr;
+ uint32_t num_cuxrs;
+ num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ invariant(num_cuxrs);
+ uint32_t num_puxrs;
+ num_puxrs = le->u.mvcc.num_pxrs;
+
+ //Position p.
+ p = le->u.mvcc.xrs;
+
+ //Skip TXNIDs
+ if (num_puxrs!=0) {
+ p += sizeof(TXNID);
+ }
+ p += (num_cuxrs-1)*sizeof(TXNID);
+
+ p += uxr_unpack_length_and_bit(&uxr, p);
+ if (uxr_is_insert(&uxr)) {
+ *len = uxr.vallen;
+ valp = p + (num_cuxrs - 1 + (num_puxrs!=0))*sizeof(uint32_t);
+ } else {
+ *len = 0;
+ valp = NULL;
+ }
+ break;
+ default:
+ invariant(false);
+ }
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+ UXR uxr = ule_get_innermost_uxr(&ule);
+ void *slow_valp;
+ uint32_t slow_len;
+ if (uxr_is_insert(uxr)) {
+ slow_valp = uxr->valp;
+ slow_len = uxr->vallen;
+ } else {
+ slow_valp = NULL;
+ slow_len = 0;
+ }
+ assert(slow_valp == le_latest_val(le));
+ assert(slow_len == le_latest_vallen(le));
+ assert(valp==slow_valp);
+ assert(*len==slow_len);
+ ule_cleanup(&ule);
+#endif
+ return valp;
+}
+
+//DEBUG ONLY can be slow
+void* le_latest_val (LEAFENTRY le) {
+ ULE_S ule;
+ le_unpack(&ule, le);
+ UXR uxr = ule_get_innermost_uxr(&ule);
+ void *slow_rval;
+ if (uxr_is_insert(uxr))
+ slow_rval = uxr->valp;
+ else
+ slow_rval = NULL;
+ ule_cleanup(&ule);
+ return slow_rval;
+}
+
+//needed to be fast for statistics.
+uint32_t le_latest_vallen (LEAFENTRY le) {
+ uint32_t rval;
+ uint8_t type = le->type;
+ uint8_t *p;
+ switch (type) {
+ case LE_CLEAN:
+ rval = toku_dtoh32(le->u.clean.vallen);
+ break;
+ case LE_MVCC:;
+ UXR_S uxr;
+ uint32_t num_cuxrs;
+ num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ invariant(num_cuxrs);
+ uint32_t num_puxrs;
+ num_puxrs = le->u.mvcc.num_pxrs;
+
+ //Position p.
+ p = le->u.mvcc.xrs;
+
+ //Skip TXNIDs
+ if (num_puxrs!=0) {
+ p += sizeof(TXNID);
+ }
+ p += (num_cuxrs-1)*sizeof(TXNID);
+
+ uxr_unpack_length_and_bit(&uxr, p);
+ if (uxr_is_insert(&uxr)) {
+ rval = uxr.vallen;
+ } else {
+ rval = 0;
+ }
+ break;
+ default:
+ invariant(false);
+ }
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+ UXR uxr = ule_get_innermost_uxr(&ule);
+ uint32_t slow_rval;
+ if (uxr_is_insert(uxr))
+ slow_rval = uxr->vallen;
+ else
+ slow_rval = 0;
+ ule_cleanup(&ule);
+ invariant(slow_rval == rval);
+#endif
+ return rval;
+}
+
+uint64_t le_outermost_uncommitted_xid (LEAFENTRY le) {
+ uint64_t rval = TXNID_NONE;
+ uint8_t type = le->type;
+
+ uint8_t *p;
+ switch (type) {
+ case LE_CLEAN:
+ break;
+ case LE_MVCC:;
+ UXR_S uxr;
+ uint32_t num_puxrs = le->u.mvcc.num_pxrs;
+
+ if (num_puxrs) {
+ p = le->u.mvcc.xrs;
+ uxr_unpack_txnid(&uxr, p);
+ rval = uxr.xid;
+ }
+ break;
+ }
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+ TXNID slow_rval = 0;
+ if (ule.num_puxrs > 0)
+ slow_rval = ule.uxrs[ule.num_cuxrs].xid;
+ assert(rval==slow_rval);
+ ule_cleanup(&ule);
+#endif
+ return rval;
+}
+
+
+//Optimization not required. This is a debug only function.
+//Print a leafentry out in human-readable format
+int print_klpair (FILE *outf, const void* keyp, uint32_t keylen, LEAFENTRY le) {
+ ULE_S ule;
+ le_unpack(&ule, le);
+ uint32_t i;
+ invariant(ule.num_cuxrs > 0);
+ UXR uxr;
+ if (!le) { printf("NULL"); return 0; }
+ if (keyp) {
+ fprintf(outf, "{key=");
+ toku_print_BYTESTRING(outf, keylen, (char *) keyp);
+ }
+ for (i = 0; i < ule.num_cuxrs+ule.num_puxrs; i++) {
+ // fprintf(outf, "\n%*s", i+1, " "); //Nested indenting
+ uxr = &ule.uxrs[i];
+ char prov = i < ule.num_cuxrs ? 'c' : 'p';
+ fprintf(outf, " ");
+ if (uxr_is_placeholder(uxr))
+ fprintf(outf, "P: xid=%016" PRIx64, uxr->xid);
+ else if (uxr_is_delete(uxr))
+ fprintf(outf, "%cD: xid=%016" PRIx64, prov, uxr->xid);
+ else {
+ assert(uxr_is_insert(uxr));
+ fprintf(outf, "%cI: xid=%016" PRIx64 " val=", prov, uxr->xid);
+ toku_print_BYTESTRING(outf, uxr->vallen, (char *) uxr->valp);
+ }
+ }
+ fprintf(outf, "}");
+ ule_cleanup(&ule);
+ return 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// This layer of abstraction (ule_xxx) knows the structure of the unpacked
+// leafentry and no other structure.
+//
+
+// ule constructor
+// Note that transaction 0 is explicit in the ule
+static inline void ule_init_empty_ule(ULE ule) {
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = 0;
+ ule->uxrs = ule->uxrs_static;
+ ule->uxrs[0] = committed_delete;
+}
+
+static inline int32_t min_i32(int32_t a, int32_t b) {
+ int32_t rval = a < b ? a : b;
+ return rval;
+}
+
+///////////////////
+// Implicit promotion logic:
+//
+// If the leafentry has already been promoted, there is nothing to do.
+// We have two transaction stacks (one from message, one from leaf entry).
+// We want to implicitly promote transactions newer than (but not including)
+// the innermost common ancestor (ICA) of the two stacks of transaction ids. We
+// know that this is the right thing to do because each transaction with an id
+// greater (later) than the ICA must have been either committed or aborted.
+// If it was aborted then we would have seen an abort message and removed the
+// xid from the stack of transaction records. So any transaction still on the
+// leaf entry stack must have been successfully promoted.
+//
+// After finding the ICA, promote transaction later than the ICA by copying
+// value and type from innermost transaction record of leafentry to transaction
+// record of ICA, keeping the transaction id of the ICA.
+// Outermost xid is zero for both ule and xids<>
+//
+static void ule_do_implicit_promotions(ULE ule, XIDS xids) {
+ //Optimization for (most) common case.
+ //No commits necessary if everything is already committed.
+ if (ule->num_puxrs > 0) {
+ int num_xids = toku_xids_get_num_xids(xids);
+ invariant(num_xids>0);
+ uint32_t max_index = ule->num_cuxrs + min_i32(ule->num_puxrs, num_xids) - 1;
+ uint32_t ica_index = max_index;
+ uint32_t index;
+ for (index = ule->num_cuxrs; index <= max_index; index++) {
+ TXNID current_msg_xid = toku_xids_get_xid(xids, index - ule->num_cuxrs);
+ TXNID current_ule_xid = ule_get_xid(ule, index);
+ if (current_msg_xid != current_ule_xid) {
+ //ica is innermost transaction with matching xids.
+ ica_index = index - 1;
+ break;
+ }
+ }
+
+ if (ica_index < ule->num_cuxrs) {
+ invariant(ica_index == ule->num_cuxrs - 1);
+ ule_promote_provisional_innermost_to_committed(ule);
+ } else if (ica_index < ule->num_cuxrs + ule->num_puxrs - 1) {
+ //If ica is the innermost uxr in the leafentry, no commits are
+ //necessary.
+ ule_promote_provisional_innermost_to_index(ule, ica_index);
+ }
+
+ }
+}
+
+static void ule_promote_provisional_innermost_to_committed(ULE ule) {
+ //Must be something to promote.
+ invariant(ule->num_puxrs);
+ //Take value (or delete flag) from innermost.
+ //Take TXNID from outermost uncommitted txn
+ //"Delete" provisional stack
+ //add one UXR that is committed using saved TXNID,val/delete flag
+
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ assert(!uxr_is_placeholder(old_innermost_uxr));
+
+ UXR old_outermost_uncommitted_uxr = &ule->uxrs[ule->num_cuxrs];
+
+ ule->num_puxrs = 0; //Discard all provisional uxrs.
+ if (uxr_is_delete(old_innermost_uxr)) {
+ ule_push_delete_uxr(ule, true, old_outermost_uncommitted_uxr->xid);
+ } else {
+ ule_push_insert_uxr(ule, true,
+ old_outermost_uncommitted_uxr->xid,
+ old_innermost_uxr->vallen,
+ old_innermost_uxr->valp);
+ }
+}
+
+static void ule_try_promote_provisional_outermost(
+ ULE ule,
+ TXNID oldest_possible_live_xid) {
+// Effect: If there is a provisional record whose outermost xid is older than
+// the oldest known referenced_xid, promote it to committed.
+ if (ule->num_puxrs > 0 &&
+ ule_get_xid(ule, ule->num_cuxrs) < oldest_possible_live_xid) {
+ ule_promote_provisional_innermost_to_committed(ule);
+ }
+}
+
+// Purpose is to promote the value (and type) of the innermost transaction
+// record to the uxr at the specified index (keeping the txnid of the uxr at
+// specified index.)
+static void ule_promote_provisional_innermost_to_index(
+ ULE ule,
+ uint32_t index) {
+ //Must not promote to committed portion of stack.
+ invariant(index >= ule->num_cuxrs);
+ //Must actually be promoting.
+ invariant(index < ule->num_cuxrs + ule->num_puxrs - 1);
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ assert(!uxr_is_placeholder(old_innermost_uxr));
+ TXNID new_innermost_xid = ule->uxrs[index].xid;
+ //Discard old uxr at index (and everything inner)
+ ule->num_puxrs = index - ule->num_cuxrs;
+ if (uxr_is_delete(old_innermost_uxr)) {
+ ule_push_delete_uxr(ule, false, new_innermost_xid);
+ } else {
+ ule_push_insert_uxr(
+ ule,
+ false,
+ new_innermost_xid,
+ old_innermost_uxr->vallen,
+ old_innermost_uxr->valp);
+ }
+}
+
+///////////////////
+// All ule_apply_xxx operations are done after implicit promotions,
+// so the innermost transaction record in the leafentry is the ICA.
+//
+
+
+// Purpose is to apply an insert message to this leafentry:
+static inline int64_t ule_apply_insert_no_overwrite(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp) {
+
+ invariant(IS_VALID_LEN(vallen));
+ int64_t retval = 0;
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ // If something exists, don't overwrite
+ if (uxr_is_insert(old_innermost_uxr)) {
+ retval = -1;
+ return retval;
+ }
+ ule_prepare_for_new_uxr(ule, xids);
+ // xid of transaction doing this insert
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ ule_push_insert_uxr(ule, this_xid == TXNID_NONE, this_xid, vallen, valp);
+ return retval;
+}
+
+// Purpose is to apply an insert message to this leafentry:
+static inline int64_t ule_apply_insert(
+ ULE ule,
+ XIDS xids,
+ uint32_t vallen,
+ void* valp) {
+
+ invariant(IS_VALID_LEN(vallen));
+ int64_t retval = 0;
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ // If something exists, overwrite
+ if (uxr_is_insert(old_innermost_uxr)) {
+ retval = -1;
+ }
+ ule_prepare_for_new_uxr(ule, xids);
+ // xid of transaction doing this insert
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ ule_push_insert_uxr(ule, this_xid == TXNID_NONE, this_xid, vallen, valp);
+ return retval;
+}
+
+// Purpose is to apply a delete message to this leafentry:
+static inline int64_t ule_apply_delete(ULE ule, XIDS xids) {
+ int64_t retval = 0;
+ UXR old_innermost_uxr = ule_get_innermost_uxr(ule);
+ if (FT_UNLIKELY(uxr_is_delete(old_innermost_uxr))) {
+ retval = 1;
+ }
+ ule_prepare_for_new_uxr(ule, xids);
+ // xid of transaction doing this delete
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ ule_push_delete_uxr(ule, this_xid == TXNID_NONE, this_xid);
+ return retval;
+}
+
+// First, discard anything done earlier by this transaction.
+// Then, add placeholders if necessary. This transaction may be nested within
+// outer transactions that are newer than then newest (innermost) transaction in
+// the leafentry. If so, record those outer transactions in the leafentry
+// with placeholders.
+static inline void ule_prepare_for_new_uxr(ULE ule, XIDS xids) {
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ //This is for LOADER_USE_PUTS or transactionless environment
+ //where messages use XIDS of 0
+ if (this_xid == TXNID_NONE && ule_get_innermost_xid(ule) == TXNID_NONE) {
+ ule_remove_innermost_uxr(ule);
+ } else if (ule->num_puxrs > 0 && ule_get_innermost_xid(ule) == this_xid) {
+ // case where we are transactional and xids stack matches ule stack
+ ule_remove_innermost_uxr(ule);
+ } else {
+ // case where we are transactional and xids stack does not match ule
+ // stack
+ ule_add_placeholders(ule, xids);
+ }
+}
+
+// Purpose is to apply an abort message to this leafentry.
+// If the aborted transaction (the transaction whose xid is the innermost xid
+// in the id stack passed in the message), has not modified this leafentry,
+// then there is nothing to be done.
+// If this transaction did modify the leafentry, then undo whatever it did (by
+// removing the transaction record (uxr) and any placeholders underneath.
+// Remember, the innermost uxr can only be an insert or a delete, not a
+// placeholder.
+static inline int64_t ule_apply_abort(ULE ule, XIDS xids) {
+ int64_t retval = 0;
+ // xid of transaction doing this abort
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ invariant(this_xid!=TXNID_NONE);
+ UXR innermost = ule_get_innermost_uxr(ule);
+ // need to check for provisional entries in ule, otherwise
+ // there is nothing to abort, not checking this may result
+ // in a bug where the most recently committed has same xid
+ // as the XID's innermost
+ if (ule->num_puxrs > 0 && innermost->xid == this_xid) {
+ // if this is a rollback of a delete of a new ule, return 0
+ // (i.e. double delete)
+ if (uxr_is_delete(innermost)) {
+ if (ule->num_puxrs == 1 && ule->num_cuxrs == 1 &&
+ uxr_is_delete(&(ule->uxrs[0]))) {
+ retval = 0;
+ } else {
+ retval = 1;
+ }
+ } else if (uxr_is_insert(innermost)) {
+ if (ule->num_puxrs == 1 && ule->num_cuxrs == 1 &&
+ uxr_is_insert(&(ule->uxrs[0]))) {
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ }
+ // if this is a rollback of a insert of an exising ule, return 0
+ // (i.e. double insert)
+ invariant(ule->num_puxrs>0);
+ ule_remove_innermost_uxr(ule);
+ ule_remove_innermost_placeholders(ule);
+ }
+ invariant(ule->num_cuxrs > 0);
+ return retval;
+}
+
+static void ule_apply_broadcast_commit_all (ULE ule) {
+ ule->uxrs[0] = ule->uxrs[ule->num_puxrs + ule->num_cuxrs - 1];
+ ule->uxrs[0].xid = TXNID_NONE;
+ ule->num_puxrs = 0;
+ ule->num_cuxrs = 1;
+}
+
+// Purpose is to apply a commit message to this leafentry.
+// If the committed transaction (the transaction whose xid is the innermost xid
+// in the id stack passed in the message), has not modified this leafentry,
+// then there is nothing to be done.
+// Also, if there are no uncommitted transaction records there is nothing to do.
+// If this transaction did modify the leafentry, then promote whatever it did.
+// Remember, the innermost uxr can only be an insert or a delete, not a
+// placeholder.
+void ule_apply_commit(ULE ule, XIDS xids) {
+ // xid of transaction committing
+ TXNID this_xid = toku_xids_get_innermost_xid(xids);
+ invariant(this_xid!=TXNID_NONE);
+ // need to check for provisional entries in ule, otherwise
+ // there is nothing to abort, not checking this may result
+ // in a bug where the most recently committed has same xid
+ // as the XID's innermost
+ if (ule->num_puxrs > 0 && ule_get_innermost_xid(ule) == this_xid) {
+ // 3 cases:
+ //1- it's already a committed value (do nothing) (num_puxrs==0)
+ //2- it's provisional but root level (make a new committed value
+ // (num_puxrs==1)
+ //3- it's provisional and not root (promote); (num_puxrs>1)
+ if (ule->num_puxrs == 1) { //new committed value
+ ule_promote_provisional_innermost_to_committed(ule);
+ } else if (ule->num_puxrs > 1) {
+ //ule->uxrs[ule->num_cuxrs+ule->num_puxrs-1] is the innermost
+ // (this transaction)
+ //ule->uxrs[ule->num_cuxrs+ule->num_puxrs-2] is the 2nd innermost
+ //We want to promote the innermost uxr one level out.
+ ule_promote_provisional_innermost_to_index(
+ ule,
+ ule->num_cuxrs+ule->num_puxrs-2);
+ }
+ }
+}
+
+///////////////////
+// Helper functions called from the functions above:
+//
+
+// Purpose is to record an insert for this transaction (and set type correctly).
+static inline void ule_push_insert_uxr(
+ ULE ule,
+ bool is_committed, TXNID xid,
+ uint32_t vallen,
+ void* valp) {
+
+ UXR uxr = ule_get_first_empty_uxr(ule);
+ if (is_committed) {
+ invariant(ule->num_puxrs==0);
+ ule->num_cuxrs++;
+ } else {
+ ule->num_puxrs++;
+ }
+ uxr->xid = xid;
+ uxr->vallen = vallen;
+ uxr->valp = valp;
+ uxr->type = XR_INSERT;
+}
+
+// Purpose is to record a delete for this transaction. If this transaction
+// is the root transaction, then truly delete the leafentry by marking the
+// ule as empty.
+static inline void ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid) {
+ UXR uxr = ule_get_first_empty_uxr(ule);
+ if (is_committed) {
+ invariant(ule->num_puxrs==0);
+ ule->num_cuxrs++;
+ } else {
+ ule->num_puxrs++;
+ }
+ uxr->xid = xid;
+ uxr->type = XR_DELETE;
+}
+
+// Purpose is to push a placeholder on the top of the leafentry's transaction
+// stack.
+static inline void ule_push_placeholder_uxr(ULE ule, TXNID xid) {
+ invariant(ule->num_cuxrs>0);
+ UXR uxr = ule_get_first_empty_uxr(ule);
+ uxr->xid = xid;
+ uxr->type = XR_PLACEHOLDER;
+ ule->num_puxrs++;
+}
+
+// Return innermost transaction record.
+static inline UXR ule_get_innermost_uxr(ULE ule) {
+ invariant(ule->num_cuxrs > 0);
+ UXR rval = &(ule->uxrs[ule->num_cuxrs + ule->num_puxrs - 1]);
+ return rval;
+}
+
+// Return first empty transaction record
+static inline UXR ule_get_first_empty_uxr(ULE ule) {
+ invariant(ule->num_puxrs < MAX_TRANSACTION_RECORDS-1);
+ UXR rval = &(ule->uxrs[ule->num_cuxrs+ule->num_puxrs]);
+ return rval;
+}
+
+// Remove the innermost transaction (pop the leafentry's stack), undoing
+// whatever the innermost transaction did.
+static inline void ule_remove_innermost_uxr(ULE ule) {
+ //It is possible to remove the committed delete at first insert.
+ invariant(ule->num_cuxrs > 0);
+ if (ule->num_puxrs) {
+ ule->num_puxrs--;
+ } else {
+ //This is for LOADER_USE_PUTS or transactionless environment
+ //where messages use XIDS of 0
+ invariant(ule->num_cuxrs == 1);
+ invariant(ule_get_innermost_xid(ule)==TXNID_NONE);
+ ule->num_cuxrs--;
+ }
+}
+
+static inline TXNID ule_get_innermost_xid(ULE ule) {
+ TXNID rval = ule_get_xid(ule, ule->num_cuxrs + ule->num_puxrs - 1);
+ return rval;
+}
+
+static inline TXNID ule_get_xid(ULE ule, uint32_t index) {
+ invariant(index < ule->num_cuxrs + ule->num_puxrs);
+ TXNID rval = ule->uxrs[index].xid;
+ return rval;
+}
+
+// Purpose is to remove any placeholders from the top of the leaf stack (the
+// innermost recorded transactions), if necessary. This function is idempotent.
+// It makes no logical sense for a placeholder to be the innermost recorded
+// transaction record, so placeholders at the top of the stack are not legal.
+static void ule_remove_innermost_placeholders(ULE ule) {
+ UXR uxr = ule_get_innermost_uxr(ule);
+ while (uxr_is_placeholder(uxr)) {
+ invariant(ule->num_puxrs>0);
+ ule_remove_innermost_uxr(ule);
+ uxr = ule_get_innermost_uxr(ule);
+ }
+}
+
+// Purpose is to add placeholders to the top of the leaf stack (the innermost
+// recorded transactions), if necessary. This function is idempotent.
+// Note, after placeholders are added, an insert or delete will be added. This
+// function temporarily leaves the transaction stack in an illegal state (having
+// placeholders on top).
+static void ule_add_placeholders(ULE ule, XIDS xids) {
+ //Placeholders can be placed on top of the committed uxr.
+ invariant(ule->num_cuxrs > 0);
+
+ uint32_t num_xids = toku_xids_get_num_xids(xids);
+ // we assume that implicit promotion has happened
+ // when we get this call, so the number of xids MUST
+ // be greater than the number of provisional entries
+ invariant(num_xids >= ule->num_puxrs);
+ // make sure that the xids stack matches up to a certain amount
+ // this first for loop is just debug code
+ for (uint32_t i = 0; i < ule->num_puxrs; i++) {
+ TXNID current_msg_xid = toku_xids_get_xid(xids, i);
+ TXNID current_ule_xid = ule_get_xid(ule, i + ule->num_cuxrs);
+ invariant(current_msg_xid == current_ule_xid);
+ }
+ for (uint32_t i = ule->num_puxrs; i < num_xids-1; i++) {
+ TXNID current_msg_xid = toku_xids_get_xid(xids, i);
+ ule_push_placeholder_uxr(ule, current_msg_xid);
+ }
+}
+
+uint64_t ule_num_uxrs(ULE ule) {
+ return ule->num_cuxrs + ule->num_puxrs;
+}
+
+UXR ule_get_uxr(ULE ule, uint64_t ith) {
+ invariant(ith < ule_num_uxrs(ule));
+ return &ule->uxrs[ith];
+}
+
+uint32_t ule_get_num_committed(ULE ule) {
+ return ule->num_cuxrs;
+}
+
+uint32_t ule_get_num_provisional(ULE ule) {
+ return ule->num_puxrs;
+}
+
+int ule_is_committed(ULE ule, uint64_t ith) {
+ invariant(ith < ule_num_uxrs(ule));
+ return ith < ule->num_cuxrs;
+}
+
+int ule_is_provisional(ULE ule, uint64_t ith) {
+ invariant(ith < ule_num_uxrs(ule));
+ return ith >= ule->num_cuxrs;
+}
+
+// return size of data for innermost uxr, the size of val
+uint32_t ule_get_innermost_numbytes(ULE ule, uint32_t keylen) {
+ uint32_t rval;
+ UXR uxr = ule_get_innermost_uxr(ule);
+ if (uxr_is_delete(uxr)) {
+ rval = 0;
+ } else {
+ rval = uxr_get_vallen(uxr) + keylen;
+ }
+ return rval;
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+// This layer of abstraction (uxr_xxx) understands uxr and nothing else.
+//
+
+static inline bool uxr_type_is_insert(uint8_t type) {
+ bool rval = (bool)(type == XR_INSERT);
+ return rval;
+}
+
+bool uxr_is_insert(UXR uxr) {
+ return uxr_type_is_insert(uxr->type);
+}
+
+static inline bool uxr_type_is_delete(uint8_t type) {
+ bool rval = (bool)(type == XR_DELETE);
+ return rval;
+}
+
+bool uxr_is_delete(UXR uxr) {
+ return uxr_type_is_delete(uxr->type);
+}
+
+static inline bool uxr_type_is_placeholder(uint8_t type) {
+ bool rval = (bool)(type == XR_PLACEHOLDER);
+ return rval;
+}
+
+bool uxr_is_placeholder(UXR uxr) {
+ return uxr_type_is_placeholder(uxr->type);
+}
+
+void* uxr_get_val(UXR uxr) {
+ return uxr->valp;
+}
+
+uint32_t uxr_get_vallen(UXR uxr) {
+ return uxr->vallen;
+}
+
+
+TXNID uxr_get_txnid(UXR uxr) {
+ return uxr->xid;
+}
+
+static int le_iterate_get_accepted_index(
+ TXNID* xids,
+ uint32_t* index,
+ uint32_t num_xids,
+ LE_ITERATE_CALLBACK f,
+ TOKUTXN context,
+ bool top_is_provisional) {
+
+ uint32_t i;
+ int r = 0;
+ // if this for loop does not return anything, we return num_xids-1, which
+ // should map to T_0
+ for (i = 0; i < num_xids - 1; i++) {
+ TXNID xid = toku_dtoh64(xids[i]);
+ r = f(xid, context, (i == 0 && top_is_provisional));
+ if (r==TOKUDB_ACCEPT) {
+ r = 0;
+ break; //or goto something
+ } else if (r!=0) {
+ break;
+ }
+ }
+ *index = i;
+ return r;
+}
+
+#if ULE_DEBUG
+static void ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) {
+ int has_p = (ule->num_puxrs != 0);
+ invariant(ule->num_cuxrs + has_p == interesting);
+ uint32_t i;
+ for (i = 0; i < interesting - 1; i++) {
+ TXNID xid = toku_dtoh64(xids[i]);
+ invariant(ule->uxrs[ule->num_cuxrs - 1 + has_p - i].xid == xid);
+ }
+}
+#endif
+
+//
+// Iterates over "possible" TXNIDs in a leafentry's stack, until one is
+// accepted by 'f'. If the value associated with the accepted TXNID is not an
+// insert, then set *is_emptyp to true, otherwise false
+// The "possible" TXNIDs are:
+// If provisionals exist, then the first possible TXNID is the outermost
+// provisional.
+// The next possible TXNIDs are the committed TXNIDs, from most recently
+// committed to T_0.
+// If provisionals exist, and the outermost provisional is accepted by 'f',
+// the associated value checked is the innermost provisional's value.
+// Parameters:
+// le - leafentry to iterate over
+// f - callback function that checks if a TXNID in le is accepted, and its
+// associated value should be examined.
+// is_delp - output parameter that returns answer
+// context - parameter for f
+//
+static int le_iterate_is_del(
+ LEAFENTRY le,
+ LE_ITERATE_CALLBACK f,
+ bool* is_delp,
+ TOKUTXN context) {
+
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+#endif
+
+ uint8_t type = le->type;
+ int r;
+ bool is_del = false;
+ switch (type) {
+ case LE_CLEAN: {
+ r = 0;
+#if ULE_DEBUG
+ invariant(ule.num_cuxrs == 1);
+ invariant(ule.num_puxrs == 0);
+ invariant(uxr_is_insert(ule.uxrs));
+#endif
+ break;
+ }
+ case LE_MVCC:;
+ uint32_t num_cuxrs;
+ num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ uint32_t num_puxrs;
+ num_puxrs = le->u.mvcc.num_pxrs;
+ uint8_t *p;
+ p = le->u.mvcc.xrs;
+
+ uint32_t index;
+ uint32_t num_interesting;
+ num_interesting = num_cuxrs + (num_puxrs != 0);
+ TXNID *xids;
+ xids = (TXNID*)p;
+#if ULE_DEBUG
+ ule_verify_xids(&ule, num_interesting, xids);
+#endif
+ r =
+ le_iterate_get_accepted_index(
+ xids,
+ &index,
+ num_interesting,
+ f,
+ context,
+ (num_puxrs != 0));
+ if (r != 0) {
+ goto cleanup;
+ }
+ invariant(index < num_interesting);
+
+ //Skip TXNIDs
+ p += (num_interesting - 1)*sizeof(TXNID);
+
+ uint32_t *length_and_bits;
+ length_and_bits = (uint32_t*)p;
+ uint32_t my_length_and_bit;
+ my_length_and_bit = toku_dtoh32(length_and_bits[index]);
+ is_del = !IS_INSERT(my_length_and_bit);
+#if ULE_DEBUG
+ {
+ uint32_t has_p = (ule.num_puxrs != 0);
+ uint32_t ule_index = (index==0) ?
+ ule.num_cuxrs + ule.num_puxrs - 1 :
+ ule.num_cuxrs - 1 + has_p - index;
+ UXR uxr = ule.uxrs + ule_index;
+ invariant(uxr_is_delete(uxr) == is_del);
+ }
+#endif
+ break;
+ default:
+ invariant(false);
+ }
+cleanup:
+#if ULE_DEBUG
+ ule_cleanup(&ule);
+#endif
+ if (!r) *is_delp = is_del;
+ return r;
+}
+
+static int le_iterate_read_committed_callback(
+ TXNID txnid,
+ TOKUTXN txn,
+ bool is_provisional UU()) {
+
+ if (is_provisional) {
+ return toku_txn_reads_txnid(txnid, txn, is_provisional);
+ }
+ return TOKUDB_ACCEPT;
+}
+
+//
+// Returns true if the value that is to be read is empty.
+//
+int le_val_is_del(LEAFENTRY le, enum cursor_read_type read_type, TOKUTXN txn) {
+ int rval;
+ if (read_type == C_READ_SNAPSHOT || read_type == C_READ_COMMITTED) {
+ LE_ITERATE_CALLBACK f = (read_type == C_READ_SNAPSHOT) ?
+ toku_txn_reads_txnid :
+ le_iterate_read_committed_callback;
+ bool is_del = false;
+ le_iterate_is_del(
+ le,
+ f,
+ &is_del,
+ txn
+ );
+ rval = is_del;
+ } else if (read_type == C_READ_ANY) {
+ rval = le_latest_is_del(le);
+ } else {
+ invariant(false);
+ }
+ return rval;
+}
+
+//
+// Iterates over "possible" TXNIDs in a leafentry's stack, until one is accepted
+// by 'f'. Set valpp and vallenp to value and length associated with accepted
+// TXNID
+// The "possible" TXNIDs are:
+// If provisionals exist, then the first possible TXNID is the outermost
+// provisional.
+// The next possible TXNIDs are the committed TXNIDs, from most recently
+// committed to T_0.
+// If provisionals exist, and the outermost provisional is accepted by 'f',
+// the associated length value is the innermost provisional's length and value.
+// Parameters:
+// le - leafentry to iterate over
+// f - callback function that checks if a TXNID in le is accepted, and its
+// associated value should be examined.
+// valpp - output parameter that returns pointer to value
+// vallenp - output parameter that returns length of value
+// context - parameter for f
+//
+int le_iterate_val(
+ LEAFENTRY le,
+ LE_ITERATE_CALLBACK f,
+ void** valpp,
+ uint32_t* vallenp,
+ TOKUTXN context) {
+
+#if ULE_DEBUG
+ ULE_S ule;
+ le_unpack(&ule, le);
+#endif
+
+ uint8_t type = le->type;
+ int r;
+ uint32_t vallen = 0;
+ void *valp = NULL;
+ switch (type) {
+ case LE_CLEAN: {
+ vallen = toku_dtoh32(le->u.clean.vallen);
+ valp = le->u.clean.val;
+ r = 0;
+#if ULE_DEBUG
+ invariant(ule.num_cuxrs == 1);
+ invariant(ule.num_puxrs == 0);
+ invariant(uxr_is_insert(ule.uxrs));
+ invariant(ule.uxrs[0].vallen == vallen);
+ invariant(ule.uxrs[0].valp == valp);
+#endif
+ break;
+ }
+ case LE_MVCC:;
+ uint32_t num_cuxrs;
+ num_cuxrs = toku_dtoh32(le->u.mvcc.num_cxrs);
+ uint32_t num_puxrs;
+ num_puxrs = le->u.mvcc.num_pxrs;
+ uint8_t *p;
+ p = le->u.mvcc.xrs;
+
+ uint32_t index;
+ uint32_t num_interesting;
+ num_interesting = num_cuxrs + (num_puxrs != 0);
+ TXNID *xids;
+ xids = (TXNID*)p;
+#if ULE_DEBUG
+ ule_verify_xids(&ule, num_interesting, xids);
+#endif
+ r =
+ le_iterate_get_accepted_index(
+ xids,
+ &index,
+ num_interesting,
+ f,
+ context,
+ (num_puxrs != 0));
+ if (r != 0) {
+ goto cleanup;
+ }
+ invariant(index < num_interesting);
+
+ //Skip TXNIDs
+ p += (num_interesting - 1)*sizeof(TXNID);
+
+ UXR_S temp;
+ size_t offset;
+ offset = 0;
+
+ uint32_t *length_and_bits;
+ length_and_bits = (uint32_t*)p;
+ uint32_t i;
+ //evaluate the offset
+ for (i=0; i < index; i++){
+ uxr_unpack_length_and_bit(&temp, (uint8_t*)&length_and_bits[i]);
+ offset += temp.vallen;
+ }
+ uxr_unpack_length_and_bit(&temp, (uint8_t*)&length_and_bits[index]);
+ if (uxr_is_delete(&temp)) {
+ goto verify_is_empty;
+ }
+ vallen = temp.vallen;
+
+ // move p past the length and bits, now points to beginning of data
+ p += num_interesting*sizeof(uint32_t);
+ // move p to point to the data we care about
+ p += offset;
+ valp = p;
+
+#if ULE_DEBUG
+ {
+ uint32_t has_p = (ule.num_puxrs != 0);
+ uint32_t ule_index = (index==0) ?
+ ule.num_cuxrs + ule.num_puxrs - 1 :
+ ule.num_cuxrs - 1 + has_p - index;
+ UXR uxr = ule.uxrs + ule_index;
+ invariant(uxr_is_insert(uxr));
+ invariant(uxr->vallen == vallen);
+ invariant(uxr->valp == valp);
+ }
+#endif
+ if (0) {
+verify_is_empty:;
+#if ULE_DEBUG
+ uint32_t has_p = (ule.num_puxrs != 0);
+ UXR uxr = ule.uxrs + ule.num_cuxrs - 1 + has_p - index;
+ invariant(uxr_is_delete(uxr));
+#endif
+ }
+ break;
+ default:
+ invariant(false);
+ }
+cleanup:
+#if ULE_DEBUG
+ ule_cleanup(&ule);
+#endif
+ if (!r) {
+ *valpp = valp;
+ *vallenp = vallen;
+ }
+ return r;
+}
+
+void le_extract_val(
+ LEAFENTRY le,
+ // should we return the entire leafentry as the val?
+ bool is_leaf_mode,
+ enum cursor_read_type read_type,
+ TOKUTXN ttxn,
+ uint32_t* vallen,
+ void** val) {
+
+ if (is_leaf_mode) {
+ *val = le;
+ *vallen = leafentry_memsize(le);
+ } else if (read_type == C_READ_SNAPSHOT || read_type == C_READ_COMMITTED) {
+ LE_ITERATE_CALLBACK f = (read_type == C_READ_SNAPSHOT) ?
+ toku_txn_reads_txnid :
+ le_iterate_read_committed_callback;
+ int r = le_iterate_val(le, f, val, vallen, ttxn);
+ lazy_assert_zero(r);
+ } else if (read_type == C_READ_ANY){
+ *val = le_latest_val_and_len(le, vallen);
+ } else {
+ assert(false);
+ }
+}
+
+// This is an on-disk format. static_asserts verify everything is packed and aligned correctly.
+struct __attribute__ ((__packed__)) leafentry_13 {
+ struct leafentry_committed_13 {
+ uint8_t key_val[0]; //Actual key, then actual val
+ };
+ static_assert(0 == sizeof(leafentry_committed_13), "wrong size");
+ static_assert(0 == __builtin_offsetof(leafentry_committed_13, key_val), "wrong offset");
+ struct __attribute__ ((__packed__)) leafentry_provisional_13 {
+ uint8_t innermost_type;
+ TXNID xid_outermost_uncommitted;
+ uint8_t key_val_xrs[0]; //Actual key,
+ //then actual innermost inserted val,
+ //then transaction records.
+ };
+ static_assert(9 == sizeof(leafentry_provisional_13), "wrong size");
+ static_assert(9 == __builtin_offsetof(leafentry_provisional_13, key_val_xrs), "wrong offset");
+
+ uint8_t num_xrs;
+ uint32_t keylen;
+ uint32_t innermost_inserted_vallen;
+ union __attribute__ ((__packed__)) {
+ struct leafentry_committed_13 comm;
+ struct leafentry_provisional_13 prov;
+ } u;
+};
+static_assert(18 == sizeof(leafentry_13), "wrong size");
+static_assert(9 == __builtin_offsetof(leafentry_13, u), "wrong offset");
+
+//Requires:
+// Leafentry that ule represents should not be destroyed (is not just all
+// deletes)
+static size_t le_memsize_from_ule_13 (ULE ule, LEAFENTRY_13 le) {
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ assert(num_uxrs);
+ size_t rval;
+ if (num_uxrs == 1) {
+ assert(uxr_is_insert(&ule->uxrs[0]));
+ rval = 1 //num_uxrs
+ +4 //keylen
+ +4 //vallen
+ +le->keylen //actual key
+ +ule->uxrs[0].vallen; //actual val
+ } else {
+ rval = 1 //num_uxrs
+ +4 //keylen
+ +le->keylen //actual key
+ +1*num_uxrs //types
+ +8*(num_uxrs-1); //txnids
+ uint8_t i;
+ for (i = 0; i < num_uxrs; i++) {
+ UXR uxr = &ule->uxrs[i];
+ if (uxr_is_insert(uxr)) {
+ rval += 4; //vallen
+ rval += uxr->vallen; //actual val
+ }
+ }
+ }
+ return rval;
+}
+
+// This function is mostly copied from 4.1.1 (which is version 12, same as 13
+// except that only 13 is upgradable).
+// Note, number of transaction records in version 13 has been replaced by
+// separate counters in version 14 (MVCC), one counter for committed transaction
+// records and one counter for provisional transaction records. When upgrading
+// a version 13 le to version 14, the number of committed transaction records is
+// always set to one (1) and the number of provisional transaction records is
+// set to the original number of transaction records minus one. The bottom
+// transaction record is assumed to be a committed value. (If there is no
+// committed value then the bottom transaction record of version 13 is a
+// committed delete.)
+// This is the only change from the 4.1.1 code. The rest of the leafentry is
+// read as is.
+static void le_unpack_13(ULE ule, LEAFENTRY_13 le) {
+ //Read num_uxrs
+ uint8_t num_xrs = le->num_xrs;
+ assert(num_xrs > 0);
+ ule->uxrs = ule->uxrs_static; //Static version is always enough.
+ ule->num_cuxrs = 1;
+ ule->num_puxrs = num_xrs - 1;
+
+ //Read the keylen
+ uint32_t keylen = toku_dtoh32(le->keylen);
+
+ //Read the vallen of innermost insert
+ uint32_t vallen_of_innermost_insert = toku_dtoh32(le->innermost_inserted_vallen);
+
+ uint8_t *p;
+ if (num_xrs == 1) {
+ //Unpack a 'committed leafentry' (No uncommitted transactions exist)
+ //Must be or the leafentry would not exist
+ ule->uxrs[0].type = XR_INSERT;
+ ule->uxrs[0].vallen = vallen_of_innermost_insert;
+ ule->uxrs[0].valp = &le->u.comm.key_val[keylen];
+ ule->uxrs[0].xid = 0; //Required.
+
+ //Set p to immediately after leafentry
+ p = &le->u.comm.key_val[keylen + vallen_of_innermost_insert];
+ } else {
+ //Unpack a 'provisional leafentry' (Uncommitted transactions exist)
+
+ //Read in type.
+ uint8_t innermost_type = le->u.prov.innermost_type;
+ assert(!uxr_type_is_placeholder(innermost_type));
+
+ //Read in xid
+ TXNID xid_outermost_uncommitted = toku_dtoh64(le->u.prov.xid_outermost_uncommitted);
+
+ //Read pointer to innermost inserted val (immediately after key)
+ uint8_t *valp_of_innermost_insert = &le->u.prov.key_val_xrs[keylen];
+
+ //Point p to immediately after 'header'
+ p = &le->u.prov.key_val_xrs[keylen + vallen_of_innermost_insert];
+
+ bool found_innermost_insert = false;
+ int i; //Index in ULE.uxrs[]
+ //Loop inner to outer
+ for (i = num_xrs - 1; i >= 0; i--) {
+ UXR uxr = &ule->uxrs[i];
+
+ //Innermost's type is in header.
+ if (i < num_xrs - 1) {
+ //Not innermost, so load the type.
+ uxr->type = *p;
+ p += 1;
+ } else {
+ //Innermost, load the type previously read from header
+ uxr->type = innermost_type;
+ }
+
+ //Committed txn id is implicit (0). (i==0)
+ //Outermost uncommitted txnid is stored in header. (i==1)
+ if (i > 1) {
+ //Not committed nor outermost uncommitted, so load the xid.
+ uxr->xid = toku_dtoh64(*(TXNID*)p);
+ p += 8;
+ } else if (i == 1) {
+ //Outermost uncommitted, load the xid previously read from
+ //header
+ uxr->xid = xid_outermost_uncommitted;
+ } else {
+ // i == 0, committed entry
+ uxr->xid = 0;
+ }
+
+ if (uxr_is_insert(uxr)) {
+ if (found_innermost_insert) {
+ //Not the innermost insert. Load vallen/valp
+ uxr->vallen = toku_dtoh32(*(uint32_t*)p);
+ p += 4;
+
+ uxr->valp = p;
+ p += uxr->vallen;
+ } else {
+ //Innermost insert, load the vallen/valp previously read
+ //from header
+ uxr->vallen = vallen_of_innermost_insert;
+ uxr->valp = valp_of_innermost_insert;
+ found_innermost_insert = true;
+ }
+ }
+ }
+ assert(found_innermost_insert);
+ }
+#if ULE_DEBUG
+ size_t memsize = le_memsize_from_ule_13(ule);
+ assert(p == ((uint8_t*)le) + memsize);
+#endif
+}
+
+size_t leafentry_disksize_13(LEAFENTRY_13 le) {
+ ULE_S ule;
+ le_unpack_13(&ule, le);
+ size_t memsize = le_memsize_from_ule_13(&ule, le);
+ ule_cleanup(&ule);
+ return memsize;
+}
+
+int toku_le_upgrade_13_14(
+ LEAFENTRY_13 old_leafentry,
+ void** keyp,
+ uint32_t* keylen,
+ size_t* new_leafentry_memorysize,
+ LEAFENTRY* new_leafentry_p) {
+
+ ULE_S ule;
+ int rval;
+ invariant(old_leafentry);
+ le_unpack_13(&ule, old_leafentry);
+ // get the key
+ *keylen = old_leafentry->keylen;
+ if (old_leafentry->num_xrs == 1) {
+ *keyp = old_leafentry->u.comm.key_val;
+ } else {
+ *keyp = old_leafentry->u.prov.key_val_xrs;
+ }
+ // We used to pass NULL for omt and mempool, so that we would use
+ // malloc instead of a mempool. However after supporting upgrade,
+ // we need to use mempools and the OMT.
+ rval =
+ le_pack(
+ &ule, // create packed leafentry
+ nullptr,
+ 0, //only matters if we are passing in a bn_data
+ nullptr, //only matters if we are passing in a bn_data
+ 0, //only matters if we are passing in a bn_data
+ 0, //only matters if we are passing in a bn_data
+ 0, //only matters if we are passing in a bn_data
+ new_leafentry_p,
+ nullptr); //only matters if we are passing in a bn_data
+ ule_cleanup(&ule);
+ *new_leafentry_memorysize = leafentry_memsize(*new_leafentry_p);
+ return rval;
+}
+
+#include <toku_race_tools.h>
+void __attribute__((__constructor__)) toku_ule_helgrind_ignore(void);
+void
+toku_ule_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&le_status, sizeof le_status);
+}
diff --git a/storage/tokudb/PerconaFT/ft/ule.h b/storage/tokudb/PerconaFT/ft/ule.h
new file mode 100644
index 00000000..cc31249e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/ule.h
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this file is to provide the world with everything necessary
+ * to use the nested transaction logic and nothing else. No internal
+ * requirements of the nested transaction logic belongs here.
+ */
+
+#pragma once
+
+#include "leafentry.h"
+#include "txn/txn_manager.h"
+#include <util/mempool.h>
+
+// opaque handles used by outside world (i.e. indexer)
+typedef struct ule *ULEHANDLE;
+typedef struct uxr *UXRHANDLE;
+
+// create a ULE by copying the contents of the given leafentry
+ULEHANDLE toku_ule_create(LEAFENTRY le);
+
+void toku_ule_free(ULEHANDLE ule_p);
+
+uint64_t ule_num_uxrs(ULEHANDLE ule);
+uint32_t ule_get_num_committed(ULEHANDLE ule);
+uint32_t ule_get_num_provisional(ULEHANDLE ule);
+UXRHANDLE ule_get_uxr(ULEHANDLE ule, uint64_t ith);
+int ule_is_committed(ULEHANDLE ule, uint64_t ith);
+int ule_is_provisional(ULEHANDLE ule, uint64_t ith);
+
+bool uxr_is_insert(UXRHANDLE uxr);
+bool uxr_is_delete(UXRHANDLE uxr);
+bool uxr_is_placeholder(UXRHANDLE uxr);
+void *uxr_get_val(UXRHANDLE uxr);
+uint32_t uxr_get_vallen(UXRHANDLE uxr);
+TXNID uxr_get_txnid(UXRHANDLE uxr);
+
+//1 does much slower debugging
+#define GARBAGE_COLLECTION_DEBUG 0
diff --git a/storage/tokudb/PerconaFT/ft/valgrind.suppressions b/storage/tokudb/PerconaFT/ft/valgrind.suppressions
new file mode 100644
index 00000000..7f6be084
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ft/valgrind.suppressions
@@ -0,0 +1,294 @@
+{
+ compress_is_still_not_valgrind_clean_in_ubuntu_3
+ Memcheck:Param
+ write(buf)
+ obj:/lib/libpthread-2.10.1.so
+ fun:toku_os_write
+ fun:write_nonleaf_node
+}
+{
+ compress_is_still_not_valgrind_clean_in_ubuntu_2
+ Memcheck:Param
+ write(buf)
+ obj:/lib/libpthread-2.10.1.so
+ fun:toku_os_write
+ fun:finish_leafnode
+}
+{
+ compress_is_still_not_valgrind_clean_in_ubuntu
+ Memcheck:Param
+ pwrite64(buf)
+ obj:/lib/libpthread-2.10.1.so
+ fun:toku_os_full_pwrite
+ fun:toku_serialize_ftnode_to
+}
+{
+ compress_is_not_valgrind_clean
+ Memcheck:Cond
+ fun:longest_match
+ fun:deflate_slow
+ fun:deflate
+ fun:compress2
+}
+
+{
+ compress_is_not_valgrind_clean2
+ Memcheck:Cond
+ fun:longest_match
+ fun:deflate_fast
+ fun:deflate
+ fun:compress2
+}
+
+{
+ compress_is_not_valgrind_clean3
+ Memcheck:Cond
+ obj:/usr/lib64/libz.so.1.2.3
+ obj:/usr/lib64/libz.so.1.2.3
+ fun:deflate
+ fun:compress2
+}
+{
+ compress_is_not_valgrind_clean3_32
+ Memcheck:Cond
+ obj:/usr/lib/libz.so.1.2.3
+ obj:/usr/lib/libz.so.1.2.3
+ fun:deflate
+ fun:compress2
+}
+{
+ compress_is_not_valgrind_clean3_fedora9
+ Memcheck:Cond
+ obj:/lib64/libz.so.1.2.3
+ obj:/lib64/libz.so.1.2.3
+ fun:deflate
+ fun:compress2
+}
+{
+ compress_is_not_valgrind_clean_ubuntu804
+ Memcheck:Cond
+ obj:/usr/lib/libz.so.1.2.3.3
+ fun:deflate
+ fun:compress2
+}
+{
+ compress_is_not_valgrind_clean_ubuntu810
+ Memcheck:Value8
+ obj:/usr/lib/libz.so.1.2.3.3
+ fun:deflate
+ fun:compress2
+}
+{
+ compress_is_not_valgrind_clean2_ubuntu810
+ Memcheck:Cond
+ obj:/usr/lib/libz.so.1.2.3.3
+ obj:/usr/lib/libz.so.1.2.3.3
+ fun:deflate
+ fun:compress2
+}
+{
+ compress_is_not_valgrind_clean3_ubuntu810
+ Memcheck:Value8
+ obj:/usr/lib/libz.so.1.2.3.3
+ obj:/usr/lib/libz.so.1.2.3.3
+ obj:/usr/lib/libz.so.1.2.3.3
+ fun:deflate
+ fun:compress2
+}
+{
+ qlz_is_not_valgrind_clean
+ Memcheck:Cond
+ fun:qlz_compress_core
+}
+{
+ qlz_is_not_valgrind_clean
+ Memcheck:Value8
+ fun:qlz_compress_core
+}
+{
+ qlz_is_not_valgrind_clean
+ Memcheck:Cond
+ fun:qlz_compress
+}
+{
+ qlz_is_not_valgrind_clean
+ Memcheck:Value8
+ fun:qlz_compress
+}
+{
+ dlsym_on_centos
+ Memcheck:Leak
+ fun:calloc
+ fun:_dlerror_run
+ obj:/lib64/libdl-2.5.so
+}
+{
+ dlsym_on_centos
+ Memcheck:Leak
+ fun:malloc
+ fun:_dl_signal_error
+ obj:/lib64/ld-2.5.so
+}
+{
+ dlsym_on_FC12
+ Memcheck:Leak
+ fun:calloc
+ fun:_dlerror_run
+ fun:dlsym
+}
+{
+ dlsym_on_FC12
+ Memcheck:Leak
+ fun:malloc
+ fun:_dl_signal_error
+ fun:_dl_signal_cerror
+ fun:_dl_lookup_symbol_x
+ fun:do_sym
+ fun:dlsym_doit
+ fun:_dl_catch_error
+ fun:_dlerror_run
+ fun:dlsym
+}
+{
+ dyld_on_OSX
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/dyld
+}
+{
+ dyld_on_OSX
+ Memcheck:Value8
+ ...
+ obj:/usr/lib/dyld
+}
+{
+ dyld_on_OSX
+ Memcheck:Cond
+ ...
+ obj:/usr/lib/dyld
+}
+{
+ ctime_on_OSX
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+ ...
+ fun:asctime_r
+}
+{
+ ctime_on_OSX
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+ ...
+ fun:ctime
+}
+{
+ pthread_join_on_OSX
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+ fun:pthread_join
+}
+{
+ pthread_start_on_OSX
+ Memcheck:Leak
+ ...
+ fun:_pthread_start
+ obj:/usr/lib/system/libsystem_c.dylib
+}
+{
+ printf_etc_on_OSX
+ Memcheck:Leak
+ ...
+ fun:__dtoa
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+}
+{
+ printf_etc_on_OSX
+ Memcheck:Leak
+ ...
+ fun:__smakebuf
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+}
+{
+ fopen_on_OSX
+ Memcheck:Leak
+ ...
+ fun:fopen$DARWIN_EXTSN
+}
+{
+ popen_on_OSX
+ Memcheck:Leak
+ ...
+ fun:popen$DARWIN_EXTSN
+}
+{
+ gettimeofday_on_OSX
+ Memcheck:Leak
+ ...
+ fun:tzsetwall_basic
+ obj:/usr/lib/system/libsystem_c.dylib
+}
+{
+ setenv_on_OSX
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+ fun:setenv
+}
+{
+ unsetenv_on_OSX
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+ fun:unsetenv
+}
+{
+ loadlocale_on_osx
+ Memcheck:Leak
+ ...
+ obj:/usr/lib/system/libsystem_c.dylib
+ fun:loadlocale
+}
+{
+ <ld_is_not_valgrind_clean_in_ubuntu_13.04_1>
+ Memcheck:Cond
+ fun:index
+ fun:expand_dynamic_string_token
+ fun:_dl_map_object
+ fun:map_doit
+ fun:_dl_catch_error
+ fun:do_preload
+ fun:dl_main
+ fun:_dl_sysdep_start
+ fun:_dl_start
+ obj:/lib/x86_64-linux-gnu/ld-2.17.so
+}
+{
+ <ld_is_not_valgrind_clean_in_ubuntu_13.04_2>
+ Memcheck:Cond
+ fun:index
+ fun:expand_dynamic_string_token
+ fun:_dl_map_object
+ fun:map_doit
+ fun:_dl_catch_error
+ fun:do_preload
+ fun:dl_main
+ fun:_dl_sysdep_start
+ fun:_dl_start
+ obj:/lib/x86_64-linux-gnu/ld-2.17.so
+}
+{
+ <ld_is_not_clean_on_arch_linux_june_2014>
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:calloc
+ ...
+ fun:dlsym
+ ...
+ fun:_dl_init
+}
+
diff --git a/storage/tokudb/PerconaFT/ftcxx/CMakeLists.txt b/storage/tokudb/PerconaFT/ftcxx/CMakeLists.txt
new file mode 100644
index 00000000..46067803
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/CMakeLists.txt
@@ -0,0 +1,31 @@
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions -Wno-deprecated-declarations")
+
+if (APPLE)
+ ## osx is weird about weak symbols
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-flat_namespace,-undefined,dynamic_lookup")
+endif (APPLE)
+
+add_library(ftcxx STATIC
+ buffer
+ cursor
+ db_env
+ malloc_utils
+ )
+add_dependencies(ftcxx install_tdb_h)
+
+if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ install(
+ TARGETS ftcxx
+ DESTINATION ${INSTALL_LIBDIR}
+ COMPONENT tokukv_libs_static
+ )
+
+ file(GLOB ftcxx_headers "*.hpp")
+ install(
+ FILES ${ftcxx_headers}
+ DESTINATION include/ftcxx
+ COMPONENT tokukv_headers
+ )
+endif ()
+
+add_subdirectory(tests)
diff --git a/storage/tokudb/PerconaFT/ftcxx/buffer.cpp b/storage/tokudb/PerconaFT/ftcxx/buffer.cpp
new file mode 100644
index 00000000..f02014cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/buffer.cpp
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <algorithm>
+#include <cassert>
+#include <cstdlib>
+#include <memory>
+
+#include "buffer.hpp"
+#include "malloc_utils.hpp"
+
+namespace mu = malloc_utils;
+
+namespace ftcxx {
+
+ const size_t Buffer::INITIAL_CAPACITY = 1<<10;
+ const size_t Buffer::MAXIMUM_CAPACITY = 1<<18;
+ const double Buffer::FULLNESS_RATIO = 0.9;
+
+ Buffer::Buffer()
+ : _cur(0),
+ _end(0),
+ _capacity(INITIAL_CAPACITY),
+ _buf(nullptr, &std::free)
+ {
+ init();
+ }
+
+ Buffer::Buffer(size_t capacity)
+ : _end(0),
+ _capacity(capacity),
+ _buf(nullptr, &std::free)
+ {
+ init();
+ }
+
+ char *Buffer::alloc(size_t sz) {
+ grow(sz);
+ char *p = raw(_end);
+ _end += sz;
+ return p;
+ }
+
+ bool Buffer::full() const {
+ return _end > MAXIMUM_CAPACITY * FULLNESS_RATIO;
+ }
+
+ bool Buffer::more() const {
+ return _cur < _end;
+ }
+
+ char *Buffer::current() const {
+ return raw(_cur);
+ }
+
+ void Buffer::advance(size_t sz) {
+ _cur += sz;
+ }
+
+ void Buffer::clear() {
+ _cur = 0;
+ _end = 0;
+ }
+
+ void Buffer::init() {
+ _buf.reset(static_cast<char *>(mu::checkedMalloc(_capacity)));
+ }
+
+ /**
+ * Implements our growth strategy. Currently we double until we get
+ * up to 4kB so that we can quickly reach the point where jemalloc can
+ * help us resize in-place, but after that point we grow by a factor
+ * of 1.5x.
+ *
+ * FBVector doubles once it is bigger than 128kB, but I don't think we
+ * actually want to because that's about when we want to stop growing.
+ */
+ size_t Buffer::next_alloc_size(size_t sz) {
+ if (sz < mu::jemallocMinInPlaceExpandable) {
+ return sz * 2;
+ }
+#if 0
+ else if (sz > (128<<10)) {
+ return sz * 2;
+ }
+#endif
+ else {
+ return (sz * 3 + 1) / 2;
+ }
+ }
+
+ void Buffer::grow(size_t sz) {
+ size_t new_capacity = _capacity;
+ while (new_capacity < _end + sz) {
+ new_capacity = next_alloc_size(new_capacity);
+ }
+ assert(new_capacity >= _capacity); // overflow?
+ if (new_capacity > _capacity) {
+ // This section isn't exception-safe, but smartRealloc already
+ // isn't. The only thing we can throw in here is
+ // std::bad_alloc, in which case we're kind of screwed anyway.
+ new_capacity = mu::goodMallocSize(new_capacity);
+ _buf.reset(static_cast<char *>(mu::smartRealloc(_buf.release(), _end, _capacity, new_capacity, _capacity)));
+ }
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/buffer.hpp b/storage/tokudb/PerconaFT/ftcxx/buffer.hpp
new file mode 100644
index 00000000..c0772277
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/buffer.hpp
@@ -0,0 +1,159 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <algorithm>
+#include <cstdlib>
+#include <memory>
+
+namespace ftcxx {
+
+ /**
+ * Buffer implements a flat memory buffer intended for FIFO usage
+ * where allocations are piecemeal but consumption is total. That is,
+ * we repeatedly fill up the buffer with small allocations, and
+ * periodically consume all entries and clear the buffer.
+ *
+ * For now, the implementation uses a doubling array strategy,
+ * starting at 1kB growing to a maximum advised capacity of 256kB,
+ * never shrinking the buffer.
+ *
+ * However, we hope to find a better strategy.
+ *
+ * Facebook's FBVector claims that a reallocation growth factor of 1.5
+ * rather than 2 hits their sweet spot, and they claim to have
+ * additional improvements by integrating with jemalloc (which we use
+ * as well).
+ *
+ * Additionally, it may be advantageous to use some memarena-style
+ * tricks like allocating a separate overflow buffer to avoid
+ * memcpying when we're close to our intended maximum capacity, and
+ * also to avoid wasting extra memory if we overflow our maximum
+ * capacity once but never do so again.
+ */
+ class Buffer {
+ public:
+
+ Buffer();
+
+ explicit Buffer(size_t capacity);
+
+ Buffer(const Buffer &) = delete;
+ Buffer& operator=(const Buffer &) = delete;
+
+ Buffer(Buffer&& other)
+ : _cur(0),
+ _end(0),
+ _capacity(0),
+ _buf(nullptr, &std::free)
+ {
+ std::swap(_cur, other._cur);
+ std::swap(_end, other._end);
+ std::swap(_capacity, other._capacity);
+ std::swap(_buf, other._buf);
+ }
+
+ Buffer& operator=(Buffer&& other) {
+ std::swap(_cur, other._cur);
+ std::swap(_end, other._end);
+ std::swap(_capacity, other._capacity);
+ std::swap(_buf, other._buf);
+ return *this;
+ }
+
+ // Producer API:
+
+ /**
+ * Allocate room for sz more bytes at the end, and return a
+ * pointer to the allocated space. This causes at most one
+ * realloc and memcpy of existing data.
+ */
+ char *alloc(size_t sz);
+
+ /**
+ * Returns true if we're close to our maximum capacity. If so,
+ * the producer should stop and allow the consumer to clear the
+ * buffer.
+ */
+ bool full() const;
+
+ // Consumer API:
+
+ /**
+ * Returns true if there are more unconsumed bytes in the buffer.
+ */
+ bool more() const;
+
+ /**
+ * Returns a pointer to the next unconsumed byte in the buffer.
+ */
+ char *current() const;
+
+ /**
+ * Advances the unconsumed position pointer by sz bytes.
+ */
+ void advance(size_t sz);
+
+ /**
+ * Free all allocated space.
+ */
+ void clear();
+
+ private:
+
+ size_t _cur;
+ size_t _end;
+ size_t _capacity;
+ std::unique_ptr<char, void (*)(void*)> _buf;
+
+ static const size_t INITIAL_CAPACITY;
+ static const size_t MAXIMUM_CAPACITY;
+ static const double FULLNESS_RATIO;
+
+ void init();
+
+ static size_t next_alloc_size(size_t sz);
+
+ void grow(size_t sz);
+
+ char *raw(size_t i=0) const {
+ return &(_buf.get()[i]);
+ }
+ };
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/cursor-inl.hpp b/storage/tokudb/PerconaFT/ftcxx/cursor-inl.hpp
new file mode 100644
index 00000000..3ec6787a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/cursor-inl.hpp
@@ -0,0 +1,418 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+
+#include <db.h>
+
+#include "buffer.hpp"
+#include "db_txn.hpp"
+#include "exceptions.hpp"
+#include "slice.hpp"
+
+namespace ftcxx {
+
+ class DB;
+
+ template<class Comparator>
+ bool Bounds::check(Comparator &cmp, const IterationStrategy &strategy, const Slice &key) const {
+ int c;
+ if (strategy.forward) {
+ if (_right_infinite) {
+ return true;
+ }
+ c = cmp(key, _right);
+ } else {
+ if (_left_infinite) {
+ return true;
+ }
+ c = cmp(_left, key);
+ }
+ if (c > 0 || (c == 0 && _end_exclusive)) {
+ return false;
+ }
+ return true;
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler>::CallbackCursor(const DBEnv &env, const DBTxn &txn,
+ Comparator &&cmp, Handler &&handler)
+ : _dbc(env, txn),
+ _iteration_strategy(IterationStrategy(true, true)),
+ _bounds(DB(env.env()->get_db_for_directory(env.env())), Bounds::Infinite(), Bounds::Infinite(), false),
+ _cmp(std::forward<Comparator>(cmp)),
+ _handler(std::forward<Handler>(handler)),
+ _finished(false)
+ {
+ init();
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler>::CallbackCursor(const DB &db, const DBTxn &txn, int flags,
+ IterationStrategy iteration_strategy,
+ Bounds bounds,
+ Comparator &&cmp, Handler &&handler)
+ : _dbc(db, txn, flags),
+ _iteration_strategy(iteration_strategy),
+ _bounds(std::move(bounds)),
+ _cmp(std::forward<Comparator>(cmp)),
+ _handler(std::forward<Handler>(handler)),
+ _finished(false)
+ {
+ init();
+ }
+
+ template<class Comparator, class Handler>
+ void CallbackCursor<Comparator, Handler>::init() {
+ if (!_dbc.set_range(_iteration_strategy, _bounds, getf_callback, this)) {
+ _finished = true;
+ }
+ }
+
+ template<class Comparator, class Handler>
+ int CallbackCursor<Comparator, Handler>::getf(const DBT *key, const DBT *val) {
+ if (!_bounds.check(_cmp, _iteration_strategy, Slice(*key))) {
+ _finished = true;
+ return -1;
+ }
+
+ if (!_handler(key, val)) {
+ return 0;
+ }
+
+ return TOKUDB_CURSOR_CONTINUE;
+ }
+
+ template<class Comparator, class Handler>
+ bool CallbackCursor<Comparator, Handler>::consume_batch() {
+ if (!_dbc.advance(_iteration_strategy, getf_callback, this)) {
+ _finished = true;
+ }
+ return !_finished;
+ }
+
+ template<class Comparator, class Handler>
+ void CallbackCursor<Comparator, Handler>::seek(const Slice &key) {
+ if (_iteration_strategy.forward) {
+ _bounds.set_left(key);
+ } else {
+ _bounds.set_right(key);
+ }
+ if (!_dbc.set_range(_iteration_strategy, _bounds, getf_callback, this)) {
+ _finished = true;
+ }
+ }
+
+ template<class Predicate>
+ inline void BufferAppender<Predicate>::marshall(char *dest, const DBT *key, const DBT *val) {
+ uint32_t *keylen = reinterpret_cast<uint32_t *>(&dest[0]);
+ uint32_t *vallen = reinterpret_cast<uint32_t *>(&dest[sizeof *keylen]);
+ *keylen = key->size;
+ *vallen = val->size;
+
+ char *p = &dest[(sizeof *keylen) + (sizeof *vallen)];
+
+ const char *kp = static_cast<char *>(key->data);
+ std::copy(kp, kp + key->size, p);
+
+ p += key->size;
+
+ const char *vp = static_cast<char *>(val->data);
+ std::copy(vp, vp + val->size, p);
+ }
+
+ template<class Predicate>
+ inline void BufferAppender<Predicate>::unmarshall(char *src, DBT *key, DBT *val) {
+ const uint32_t *keylen = reinterpret_cast<uint32_t *>(&src[0]);
+ const uint32_t *vallen = reinterpret_cast<uint32_t *>(&src[sizeof *keylen]);
+ key->size = *keylen;
+ val->size = *vallen;
+ char *p = &src[(sizeof *keylen) + (sizeof *vallen)];
+ key->data = p;
+ val->data = p + key->size;
+ }
+
+ template<class Predicate>
+ inline void BufferAppender<Predicate>::unmarshall(char *src, Slice &key, Slice &val) {
+ const uint32_t *keylen = reinterpret_cast<uint32_t *>(&src[0]);
+ const uint32_t *vallen = reinterpret_cast<uint32_t *>(&src[sizeof *keylen]);
+ char *p = &src[(sizeof *keylen) + (sizeof *vallen)];
+ key = Slice(p, *keylen);
+ val = Slice(p + *keylen, *vallen);
+ }
+
+ template<class Predicate>
+ inline bool BufferAppender<Predicate>::operator()(const DBT *key, const DBT *val) {
+ if (_filter(Slice(*key), Slice(*val))) {
+ size_t needed = marshalled_size(key->size, val->size);
+ char *dest = _buf.alloc(needed);
+ marshall(dest, key, val);
+ }
+ return !_buf.full();
+ }
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate>::BufferedCursor(const DBEnv &env, const DBTxn &txn,
+ Comparator &&cmp, Predicate &&filter)
+ : _buf(),
+ _cur(env, txn, std::forward<Comparator>(cmp), Appender(_buf, std::forward<Predicate>(filter)))
+ {}
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate>::BufferedCursor(const DB &db, const DBTxn &txn, int flags,
+ IterationStrategy iteration_strategy,
+ Bounds bounds,
+ Comparator &&cmp, Predicate &&filter)
+ : _buf(),
+ _cur(db, txn, flags,
+ iteration_strategy,
+ std::move(bounds),
+ std::forward<Comparator>(cmp), Appender(_buf, std::forward<Predicate>(filter)))
+ {}
+
+ template<class Comparator, class Predicate>
+ bool BufferedCursor<Comparator, Predicate>::next(DBT *key, DBT *val) {
+ if (!_buf.more() && !_cur.finished()) {
+ _buf.clear();
+ _cur.consume_batch();
+ }
+
+ if (!_buf.more()) {
+ return false;
+ }
+
+ char *src = _buf.current();
+ Appender::unmarshall(src, key, val);
+ _buf.advance(Appender::marshalled_size(key->size, val->size));
+ return true;
+ }
+
+ template<class Comparator, class Predicate>
+ bool BufferedCursor<Comparator, Predicate>::next(Slice &key, Slice &val) {
+ if (!_buf.more() && !_cur.finished()) {
+ _buf.clear();
+ _cur.consume_batch();
+ }
+
+ if (!_buf.more()) {
+ return false;
+ }
+
+ char *src = _buf.current();
+ Appender::unmarshall(src, key, val);
+ _buf.advance(Appender::marshalled_size(key.size(), val.size()));
+ return true;
+ }
+
+ template<class Comparator, class Predicate>
+ void BufferedCursor<Comparator, Predicate>::seek(const Slice &key) {
+ _buf.clear();
+ _cur.seek(key);
+ }
+
+ template<class Comparator>
+ SimpleCursor<Comparator>::SimpleCursor(const DBEnv &env, const DBTxn &txn, Comparator &&cmp,
+ Slice &key, Slice &val)
+ : _copier(key, val),
+ _cur(env, txn, std::forward<Comparator>(cmp), _copier)
+ {}
+
+ template<class Comparator>
+ SimpleCursor<Comparator>::SimpleCursor(const DB &db, const DBTxn &txn, int flags,
+ IterationStrategy iteration_strategy,
+ Bounds bounds, Comparator &&cmp,
+ Slice &key, Slice &val)
+ : _copier(key, val),
+ _cur(db, txn, flags,
+ iteration_strategy,
+ std::move(bounds),
+ std::forward<Comparator>(cmp), _copier)
+ {}
+
+ template<class Comparator>
+ bool SimpleCursor<Comparator>::next() {
+ return _cur.consume_batch();
+ }
+
+ template<class Comparator>
+ void SimpleCursor<Comparator>::seek(const Slice &key) {
+ _cur.seek(key);
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> DB::cursor(const DBTxn &txn, DBT *left, DBT *right,
+ Comparator &&cmp, Handler &&handler, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return CallbackCursor<Comparator, Handler>(*this, txn, flags, strategy,
+ Bounds(*this, Slice(*left), Slice(*right), end_exclusive),
+ std::forward<Comparator>(cmp), std::forward<Handler>(handler));
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> DB::cursor(const DBTxn &txn, const Slice &start_key,
+ Comparator &&cmp, Handler &&handler, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ Bounds bounds = forward
+ ? Bounds(*this, start_key, Bounds::Infinite(), end_exclusive)
+ : Bounds(*this, Bounds::Infinite(), start_key, end_exclusive);
+ return CallbackCursor<Comparator, Handler>(*this, txn, flags, strategy, std::move(bounds),
+ std::forward<Comparator>(cmp), std::forward<Handler>(handler));
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> DB::cursor(const DBTxn &txn, const Slice &left, const Slice &right,
+ Comparator &&cmp, Handler &&handler, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return CallbackCursor<Comparator, Handler>(*this, txn, flags, strategy,
+ Bounds(*this, left, right, end_exclusive),
+ std::forward<Comparator>(cmp), std::forward<Handler>(handler));
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> DB::cursor(const DBTxn &txn, Comparator &&cmp, Handler &&handler,
+ int flags, bool forward, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return CallbackCursor<Comparator, Handler>(*this, txn, flags, strategy,
+ Bounds(*this, Bounds::Infinite(), Bounds::Infinite(), false),
+ std::forward<Comparator>(cmp), std::forward<Handler>(handler));
+ }
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> DB::buffered_cursor(const DBTxn &txn, DBT *left, DBT *right,
+ Comparator &&cmp, Predicate &&filter, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return BufferedCursor<Comparator, Predicate>(*this, txn, flags, strategy,
+ Bounds(*this, Slice(*left), Slice(*right), end_exclusive),
+ std::forward<Comparator>(cmp), std::forward<Predicate>(filter));
+ }
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> DB::buffered_cursor(const DBTxn &txn, const Slice &start_key,
+ Comparator &&cmp, Predicate &&filter, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ Bounds bounds = forward
+ ? Bounds(*this, start_key, Bounds::Infinite(), end_exclusive)
+ : Bounds(*this, Bounds::Infinite(), start_key, end_exclusive);
+ return BufferedCursor<Comparator, Predicate>(*this, txn, flags, strategy, std::move(bounds),
+ std::forward<Comparator>(cmp), std::forward<Predicate>(filter));
+ }
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> DB::buffered_cursor(const DBTxn &txn, const Slice &left, const Slice &right,
+ Comparator &&cmp, Predicate &&filter, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return BufferedCursor<Comparator, Predicate>(*this, txn, flags, strategy,
+ Bounds(*this, left, right, end_exclusive),
+ std::forward<Comparator>(cmp), std::forward<Predicate>(filter));
+ }
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> DB::buffered_cursor(const DBTxn &txn, Comparator &&cmp, Predicate &&filter,
+ int flags, bool forward, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return BufferedCursor<Comparator, Predicate>(*this, txn, flags, strategy,
+ Bounds(*this, Bounds::Infinite(), Bounds::Infinite(), false),
+ std::forward<Comparator>(cmp), std::forward<Predicate>(filter));
+ }
+
+ template<class Comparator>
+ SimpleCursor<Comparator> DB::simple_cursor(const DBTxn &txn, DBT *left, DBT *right,
+ Comparator &&cmp, Slice &key, Slice &val, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return SimpleCursor<Comparator>(*this, txn, flags, strategy,
+ Bounds(*this, Slice(*left), Slice(*right), end_exclusive),
+ std::forward<Comparator>(cmp), key, val);
+ }
+
+ template<class Comparator>
+ SimpleCursor<Comparator> DB::simple_cursor(const DBTxn &txn, const Slice &start_key,
+ Comparator &&cmp, Slice &key, Slice &val, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ Bounds bounds = forward
+ ? Bounds(*this, start_key, Bounds::Infinite(), end_exclusive)
+ : Bounds(*this, Bounds::Infinite(), start_key, end_exclusive);
+ return SimpleCursor<Comparator>(*this, txn, flags, strategy, std::move(bounds),
+ std::forward<Comparator>(cmp), key, val);
+ }
+
+ template<class Comparator>
+ SimpleCursor<Comparator> DB::simple_cursor(const DBTxn &txn, const Slice &left, const Slice &right,
+ Comparator &&cmp, Slice &key, Slice &val, int flags,
+ bool forward, bool end_exclusive, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return SimpleCursor<Comparator>(*this, txn, flags, strategy,
+ Bounds(*this, left, right, end_exclusive),
+ std::forward<Comparator>(cmp), key, val);
+ }
+
+ template<class Comparator>
+ SimpleCursor<Comparator> DB::simple_cursor(const DBTxn &txn, Comparator &&cmp, Slice &key, Slice &val,
+ int flags, bool forward, bool prelock) const {
+ IterationStrategy strategy(forward, prelock);
+ return SimpleCursor<Comparator>(*this, txn, flags, strategy,
+ Bounds(*this, Bounds::Infinite(), Bounds::Infinite(), false),
+ std::forward<Comparator>(cmp), key, val);
+ }
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> DBEnv::cursor(const DBTxn &txn, Comparator &&cmp, Handler &&handler) const {
+ return CallbackCursor<Comparator, Handler>(*this, txn, std::forward<Comparator>(cmp), std::forward<Handler>(handler));
+ }
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> DBEnv::buffered_cursor(const DBTxn &txn, Comparator &&cmp, Predicate &&filter) const {
+ return BufferedCursor<Comparator, Predicate>(*this, txn, std::forward<Comparator>(cmp), std::forward<Predicate>(filter));
+ }
+
+ template<class Comparator>
+ SimpleCursor<Comparator> DBEnv::simple_cursor(const DBTxn &txn, Comparator &&cmp, Slice &key, Slice &val) const {
+ return SimpleCursor<Comparator>(*this, txn, std::forward<Comparator>(cmp), key, val);
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/cursor.cpp b/storage/tokudb/PerconaFT/ftcxx/cursor.cpp
new file mode 100644
index 00000000..e8427bec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/cursor.cpp
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+
+#include "cursor.hpp"
+#include "db.hpp"
+#include "db_env.hpp"
+#include "db_txn.hpp"
+#include "exceptions.hpp"
+
+namespace ftcxx {
+
+ DBC::DBC(const DB &db, const DBTxn &txn, int flags)
+ : _txn(),
+ _dbc(nullptr)
+ {
+ if (db.db() != nullptr) {
+ DB_TXN *txnp = txn.txn();
+ if (txnp == nullptr) {
+ _txn = DBTxn(DBEnv(db.db()->dbenv), DB_TXN_READ_ONLY | DB_READ_UNCOMMITTED);
+ txnp = _txn.txn();
+ }
+
+ ::DBC *c;
+ int r = db.db()->cursor(db.db(), txnp, &c, flags);
+ handle_ft_retval(r);
+ _dbc = c;
+ }
+ }
+
+ DBC::DBC(const DBEnv &env, const DBTxn &txn)
+ : _txn(),
+ _dbc(nullptr)
+ {
+ if (env.env() != nullptr) {
+ DB_TXN *txnp = txn.txn();
+ if (txnp == nullptr) {
+ _txn = DBTxn(env, DB_TXN_READ_ONLY | DB_READ_UNCOMMITTED);
+ txnp = _txn.txn();
+ }
+
+ ::DBC *c;
+ int r = env.env()->get_cursor_for_directory(env.env(), txnp, &c);
+ handle_ft_retval(r);
+ _dbc = c;
+ }
+ }
+
+ DBC::~DBC() {
+ if (_dbc != nullptr) {
+ close();
+ }
+ }
+
+ void DBC::close() {
+ int r = _dbc->c_close(_dbc);
+ handle_ft_retval(r);
+ _dbc = nullptr;
+ }
+
+ bool DBC::set_range(const IterationStrategy &strategy, const Bounds &bounds, YDB_CALLBACK_FUNCTION callback, void *extra) const {
+ int r = dbc()->c_set_bounds(dbc(), bounds.left_dbt(), bounds.right_dbt(), strategy.prelock, 0);
+ handle_ft_retval(r);
+
+ if (strategy.forward) {
+ if (bounds.left_infinite()) {
+ r = dbc()->c_getf_first(dbc(), strategy.getf_flags(), callback, extra);
+ } else {
+ r = dbc()->c_getf_set_range(dbc(), strategy.getf_flags(), const_cast<DBT *>(bounds.left_dbt()), callback, extra);
+ }
+ } else {
+ if (bounds.right_infinite()) {
+ r = dbc()->c_getf_last(dbc(), strategy.getf_flags(), callback, extra);
+ } else {
+ r = dbc()->c_getf_set_range_reverse(dbc(), strategy.getf_flags(), const_cast<DBT *>(bounds.right_dbt()), callback, extra);
+ }
+ }
+ if (r == DB_NOTFOUND) {
+ return false;
+ } else if (r != 0 && r != -1) {
+ handle_ft_retval(r);
+ }
+ return true;
+ }
+
+ bool DBC::advance(const IterationStrategy &strategy, YDB_CALLBACK_FUNCTION callback, void *extra) const {
+ int r;
+ if (strategy.forward) {
+ r = dbc()->c_getf_next(dbc(), strategy.getf_flags(), callback, extra);
+ } else {
+ r = dbc()->c_getf_prev(dbc(), strategy.getf_flags(), callback, extra);
+ }
+ if (r == DB_NOTFOUND) {
+ return false;
+ } else if (r != 0 && r != -1) {
+ handle_ft_retval(r);
+ }
+ return true;
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/cursor.hpp b/storage/tokudb/PerconaFT/ftcxx/cursor.hpp
new file mode 100644
index 00000000..bde5dbf2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/cursor.hpp
@@ -0,0 +1,417 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <utility>
+
+#include <db.h>
+
+#include "buffer.hpp"
+#include "db.hpp"
+#include "db_env.hpp"
+#include "db_txn.hpp"
+#include "slice.hpp"
+
+namespace ftcxx {
+
+ class DB;
+
+ struct IterationStrategy {
+ bool forward;
+ bool prelock;
+
+ IterationStrategy(bool forward_, bool prelock_)
+ : forward(forward_),
+ prelock(prelock_)
+ {}
+
+ int getf_flags() const {
+ if (prelock) {
+ return DB_PRELOCKED | DB_PRELOCKED_WRITE;
+ } else {
+ return DBC_DISABLE_PREFETCHING;
+ }
+ }
+ };
+
+ class Bounds {
+ const ::DB *_db;
+ Slice _left;
+ Slice _right;
+ DBT _left_dbt;
+ DBT _right_dbt;
+ bool _left_infinite;
+ bool _right_infinite;
+ bool _end_exclusive;
+
+ public:
+ Bounds(const DB &db, const Slice &left, const Slice &right, bool end_exclusive)
+ : _db(db.db()),
+ _left(left.owned()),
+ _right(right.owned()),
+ _left_dbt(_left.dbt()),
+ _right_dbt(_right.dbt()),
+ _left_infinite(false),
+ _right_infinite(false),
+ _end_exclusive(end_exclusive)
+ {}
+
+ struct Infinite {};
+
+ Bounds(const DB &db, Infinite, const Slice &right, bool end_exclusive)
+ : _db(db.db()),
+ _left(),
+ _right(right.owned()),
+ _left_dbt(_left.dbt()),
+ _right_dbt(_right.dbt()),
+ _left_infinite(true),
+ _right_infinite(false),
+ _end_exclusive(end_exclusive)
+ {}
+
+ Bounds(const DB &db, const Slice &left, Infinite, bool end_exclusive)
+ : _db(db.db()),
+ _left(left.owned()),
+ _right(),
+ _left_dbt(_left.dbt()),
+ _right_dbt(_right.dbt()),
+ _left_infinite(false),
+ _right_infinite(true),
+ _end_exclusive(end_exclusive)
+ {}
+
+ Bounds(const DB &db, Infinite, Infinite, bool end_exclusive)
+ : _db(db.db()),
+ _left(),
+ _right(),
+ _left_dbt(_left.dbt()),
+ _right_dbt(_right.dbt()),
+ _left_infinite(true),
+ _right_infinite(true),
+ _end_exclusive(end_exclusive)
+ {}
+
+ Bounds(const Bounds &other) = delete;
+ Bounds& operator=(const Bounds &) = delete;
+
+ Bounds(Bounds &&o)
+ : _db(nullptr),
+ _left(),
+ _right(),
+ _left_infinite(o._left_infinite),
+ _right_infinite(o._right_infinite),
+ _end_exclusive(o._end_exclusive)
+ {
+ std::swap(_db, o._db);
+ std::swap(_left, o._left);
+ std::swap(_right, o._right);
+ _left_dbt = _left.dbt();
+ _right_dbt = _right.dbt();
+ }
+
+ Bounds& operator=(Bounds&& other) {
+ std::swap(_db, other._db);
+ std::swap(_left, other._left);
+ std::swap(_right, other._right);
+ _left_dbt = _left.dbt();
+ _right_dbt = _right.dbt();
+ _left_infinite = other._left_infinite;
+ _right_infinite = other._right_infinite;
+ _end_exclusive = other._end_exclusive;
+ return *this;
+ }
+
+ const DBT *left_dbt() const {
+ if (_left_infinite) {
+ return _db->dbt_neg_infty();
+ } else {
+ return &_left_dbt;
+ }
+ }
+
+ const DBT *right_dbt() const {
+ if (_right_infinite) {
+ return _db->dbt_pos_infty();
+ } else {
+ return &_right_dbt;
+ }
+ }
+
+ void set_left(const Slice &left) {
+ _left = left.owned();
+ _left_dbt = _left.dbt();
+ _left_infinite = false;
+ }
+
+ void set_right(const Slice &right) {
+ _right = right.owned();
+ _right_dbt = _right.dbt();
+ _right_infinite = false;
+ }
+
+ bool left_infinite() const { return _left_infinite; }
+ bool right_infinite() const { return _right_infinite; }
+
+ template<class Comparator>
+ bool check(Comparator &cmp, const IterationStrategy &strategy, const Slice &key) const;
+ };
+
+ /**
+ * DBC is a simple RAII wrapper around a DBC object.
+ */
+ class DBC {
+ public:
+ DBC(const DB &db, const DBTxn &txn=DBTxn(), int flags=0);
+ ~DBC();
+
+ // Directory cursor.
+ DBC(const DBEnv &env, const DBTxn &txn=DBTxn());
+
+ DBC(const DBC &) = delete;
+ DBC& operator=(const DBC &) = delete;
+
+ DBC(DBC &&o)
+ : _txn(),
+ _dbc(nullptr)
+ {
+ std::swap(_txn, o._txn);
+ std::swap(_dbc, o._dbc);
+ }
+
+ DBC& operator=(DBC &&o) {
+ std::swap(_txn, o._txn);
+ std::swap(_dbc, o._dbc);
+ return *this;
+ }
+
+ ::DBC *dbc() const { return _dbc; }
+
+ void set_txn(const DBTxn &txn) const {
+ _dbc->c_set_txn(_dbc, txn.txn());
+ }
+
+ void close();
+
+ bool set_range(const IterationStrategy &strategy, const Bounds &bounds, YDB_CALLBACK_FUNCTION callback, void *extra) const;
+
+ bool advance(const IterationStrategy &strategy, YDB_CALLBACK_FUNCTION callback, void *extra) const;
+
+ protected:
+
+ // the ordering here matters, for destructors
+ DBTxn _txn;
+ ::DBC *_dbc;
+ };
+
+ /**
+ * Cursor supports iterating a cursor over a key range,
+ * with bulk fetch buffering, and optional filtering.
+ */
+ template<class Comparator, class Handler>
+ class CallbackCursor {
+ public:
+
+ /**
+ * Directory cursor.
+ */
+ CallbackCursor(const DBEnv &env, const DBTxn &txn,
+ Comparator &&cmp, Handler &&handler);
+
+ /**
+ * Constructs an cursor. Better to use DB::cursor instead to
+ * avoid template parameters.
+ */
+ CallbackCursor(const DB &db, const DBTxn &txn, int flags,
+ IterationStrategy iteration_strategy,
+ Bounds bounds,
+ Comparator &&cmp, Handler &&handler);
+
+ /**
+ * Gets the next key/val pair in the iteration. Returns true
+ * if there is more data, and fills in key and val. If the
+ * range is exhausted, returns false.
+ */
+ bool consume_batch();
+
+ void seek(const Slice &key);
+
+ bool finished() const { return _finished; }
+
+ bool ok() const { return !finished(); }
+
+ void set_txn(const DBTxn &txn) const { _dbc.set_txn(txn); }
+
+ private:
+
+ DBC _dbc;
+ IterationStrategy _iteration_strategy;
+ Bounds _bounds;
+ Comparator _cmp;
+ Handler _handler;
+
+ bool _finished;
+
+ void init();
+
+ static int getf_callback(const DBT *key, const DBT *val, void *extra) {
+ CallbackCursor *i = static_cast<CallbackCursor *>(extra);
+ return i->getf(key, val);
+ }
+
+ int getf(const DBT *key, const DBT *val);
+ };
+
+ template<class Predicate>
+ class BufferAppender {
+ Buffer &_buf;
+ Predicate _filter;
+
+ public:
+ BufferAppender(Buffer &buf, Predicate &&filter)
+ : _buf(buf),
+ _filter(std::forward<Predicate>(filter))
+ {}
+
+ bool operator()(const DBT *key, const DBT *val);
+
+ static size_t marshalled_size(size_t keylen, size_t vallen) {
+ return (sizeof(((DBT *)0)->size)) + (sizeof(((DBT *)0)->size)) + keylen + vallen;
+ }
+
+ static void marshall(char *dest, const DBT *key, const DBT *val);
+
+ static void unmarshall(char *src, DBT *key, DBT *val);
+ static void unmarshall(char *src, Slice &key, Slice &val);
+ };
+
+ template<class Comparator, class Predicate>
+ class BufferedCursor {
+ public:
+
+ /**
+ * Directory cursor.
+ */
+ BufferedCursor(const DBEnv &env, const DBTxn &txn,
+ Comparator &&cmp, Predicate &&filter);
+
+ /**
+ * Constructs an buffered cursor. Better to use
+ * DB::buffered_cursor instead to avoid template parameters.
+ */
+ BufferedCursor(const DB &db, const DBTxn &txn, int flags,
+ IterationStrategy iteration_strategy,
+ Bounds bounds,
+ Comparator &&cmp, Predicate &&filter);
+
+ /**
+ * Gets the next key/val pair in the iteration. Returns true
+ * if there is more data, and fills in key and val. If the
+ * range is exhausted, returns false.
+ */
+ bool next(DBT *key, DBT *val);
+ bool next(Slice &key, Slice &val);
+
+ void seek(const Slice &key);
+
+ bool ok() const {
+ return _cur.ok() || _buf.more();
+ }
+
+ void set_txn(const DBTxn &txn) const { _cur.set_txn(txn); }
+
+ private:
+
+ typedef BufferAppender<Predicate> Appender;
+
+ Buffer _buf;
+ CallbackCursor<Comparator, Appender> _cur;
+ };
+
+ template<class Comparator>
+ class SimpleCursor {
+ public:
+ SimpleCursor(const DBEnv &env, const DBTxn &txn,
+ Comparator &&cmp, Slice &key, Slice &val);
+
+ SimpleCursor(const DB &db, const DBTxn &txn, int flags,
+ IterationStrategy iteration_strategy,
+ Bounds bounds, Comparator &&cmp,
+ Slice &key, Slice &val);
+
+ /**
+ * Gets the next key/val pair in the iteration. Copies data
+ * directly into key and val, which will own their buffers.
+ */
+ bool next();
+
+ void seek(const Slice &key);
+
+ bool ok() const {
+ return _cur.ok();
+ }
+
+ void set_txn(const DBTxn &txn) const { _cur.set_txn(txn); }
+
+ class SliceCopier {
+ Slice &_key;
+ Slice &_val;
+
+ public:
+ SliceCopier(Slice &key, Slice &val)
+ : _key(key),
+ _val(val)
+ {}
+
+ bool operator()(const DBT *key, const DBT *val) {
+ _key = Slice(*key).owned();
+ _val = Slice(*val).owned();
+
+ // Don't bulk fetch.
+ return false;
+ }
+ };
+
+ private:
+
+ SliceCopier _copier;
+ CallbackCursor<Comparator, SliceCopier&> _cur;
+ };
+
+} // namespace ftcxx
+
+#include "cursor-inl.hpp"
diff --git a/storage/tokudb/PerconaFT/ftcxx/db.hpp b/storage/tokudb/PerconaFT/ftcxx/db.hpp
new file mode 100644
index 00000000..7f5af9b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/db.hpp
@@ -0,0 +1,370 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <string>
+
+#include <db.h>
+
+#include "db_env.hpp"
+#include "db_txn.hpp"
+#include "exceptions.hpp"
+#include "slice.hpp"
+#include "stats.hpp"
+
+namespace ftcxx {
+
+ template<class Comparator, class Handler>
+ class CallbackCursor;
+ template<class Comparator, class Predicate>
+ class BufferedCursor;
+ template<class Comparator>
+ class SimpleCursor;
+
+ class DB {
+ public:
+ DB()
+ : _db(nullptr),
+ _close_on_destroy(false)
+ {}
+
+ explicit DB(::DB *d, bool close_on_destroy=false)
+ : _db(d),
+ _close_on_destroy(close_on_destroy)
+ {}
+
+ ~DB() {
+ if (_db && _close_on_destroy) {
+ close();
+ }
+ }
+
+ DB(const DB &) = delete;
+ DB& operator=(const DB &) = delete;
+
+ DB(DB &&o)
+ : _db(nullptr),
+ _close_on_destroy(false)
+ {
+ std::swap(_db, o._db);
+ std::swap(_close_on_destroy, o._close_on_destroy);
+ }
+
+ DB& operator=(DB &&o) {
+ std::swap(_db, o._db);
+ std::swap(_close_on_destroy, o._close_on_destroy);
+ return *this;
+ }
+
+ ::DB *db() const { return _db; }
+
+ Slice descriptor() const {
+ return Slice(_db->cmp_descriptor->dbt);
+ }
+
+ template<typename Callback>
+ int getf_set(const DBTxn &txn, const Slice &key, int flags, Callback cb) const {
+ class WrappedCallback {
+ Callback &_cb;
+ public:
+ WrappedCallback(Callback &cb_)
+ : _cb(cb_)
+ {}
+
+ static int call(const DBT *key_, const DBT *val_, void *extra) {
+ WrappedCallback *wc = static_cast<WrappedCallback *>(extra);
+ return wc->call(key_, val_);
+ }
+
+ int call(const DBT *key_, const DBT *val_) {
+ return _cb(Slice(*key_), Slice(*val_));
+ }
+ } wc(cb);
+
+ DBT kdbt = key.dbt();
+ return _db->getf_set(_db, txn.txn(), flags, &kdbt, &WrappedCallback::call, &wc);
+ }
+
+ int put(const DBTxn &txn, DBT *key, DBT *val, int flags=0) const {
+ return _db->put(_db, txn.txn(), key, val, flags);
+ }
+
+ int put(const DBTxn &txn, const Slice &key, const Slice &val, int flags=0) const {
+ DBT kdbt = key.dbt();
+ DBT vdbt = val.dbt();
+ return put(txn, &kdbt, &vdbt, flags);
+ }
+
+ int update(const DBTxn &txn, DBT *key, DBT *val, int flags=0) const {
+ return _db->update(_db, txn.txn(), key, val, flags);
+ }
+
+ int update(const DBTxn &txn, const Slice &key, const Slice &extra, int flags=0) const {
+ DBT kdbt = key.dbt();
+ DBT edbt = extra.dbt();
+ return update(txn, &kdbt, &edbt, flags);
+ }
+
+ int del(const DBTxn &txn, DBT *key, int flags=0) const {
+ return _db->del(_db, txn.txn(), key, flags);
+ }
+
+ int del(const DBTxn &txn, const Slice &key, int flags=0) const {
+ DBT kdbt = key.dbt();
+ return _db->del(_db, txn.txn(), &kdbt, flags);
+ }
+
+ template<class OptimizeCallback>
+ int hot_optimize(const Slice &left, const Slice &right, OptimizeCallback callback, uint64_t *loops_run = NULL) const {
+ DBT ldbt = left.dbt();
+ DBT rdbt = right.dbt();
+
+ class WrappedOptimizeCallback {
+ OptimizeCallback &_oc;
+ size_t _loops;
+
+ public:
+ WrappedOptimizeCallback(OptimizeCallback &oc)
+ : _oc(oc),
+ _loops(0)
+ {}
+
+ static int call(void *extra, float progress) {
+ WrappedOptimizeCallback *e = static_cast<WrappedOptimizeCallback *>(extra);
+ return e->_oc(progress, ++e->_loops);
+ }
+ } woc(callback);
+
+ uint64_t dummy;
+ return _db->hot_optimize(_db, &ldbt, &rdbt,
+ &WrappedOptimizeCallback::call, &woc,
+ loops_run == NULL ? &dummy : loops_run);
+ }
+
+ Stats get_stats() const {
+ Stats stats;
+ DB_BTREE_STAT64 s = {0, 0, 0, 0, 0, 0, 0};
+ int r = _db->stat64(_db, NULL, &s);
+ handle_ft_retval(r);
+ stats.data_size = s.bt_dsize;
+ stats.file_size = s.bt_fsize;
+ stats.num_keys = s.bt_nkeys;
+ return stats;
+ }
+
+ struct NullFilter {
+ bool operator()(const Slice &, const Slice &) {
+ return true;
+ }
+ };
+
+ /**
+ * Constructs a Cursor over this DB, over the range from left to
+ * right (or right to left if !forward).
+ */
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> cursor(const DBTxn &txn, DBT *left, DBT *right,
+ Comparator &&cmp, Handler &&handler, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> cursor(const DBTxn &txn, const Slice &start_key,
+ Comparator &&cmp, Handler &&handler, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> cursor(const DBTxn &txn, const Slice &left, const Slice &right,
+ Comparator &&cmp, Handler &&handler, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> cursor(const DBTxn &txn, Comparator &&cmp, Handler &&handler,
+ int flags=0, bool forward=true, bool prelock=false) const;
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> buffered_cursor(const DBTxn &txn, DBT *left, DBT *right,
+ Comparator &&cmp, Predicate &&filter, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> buffered_cursor(const DBTxn &txn, const Slice &start_key,
+ Comparator &&cmp, Predicate &&filter, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> buffered_cursor(const DBTxn &txn, const Slice &left, const Slice &right,
+ Comparator &&cmp, Predicate &&filter, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> buffered_cursor(const DBTxn &txn, Comparator &&cmp, Predicate &&filter,
+ int flags=0, bool forward=true, bool prelock=false) const;
+
+ template<class Comparator>
+ SimpleCursor<Comparator> simple_cursor(const DBTxn &txn, DBT *left, DBT *right,
+ Comparator &&cmp, Slice &key, Slice &val, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator>
+ SimpleCursor<Comparator> simple_cursor(const DBTxn &txn, const Slice &start_key,
+ Comparator &&cmp, Slice &key, Slice &val, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator>
+ SimpleCursor<Comparator> simple_cursor(const DBTxn &txn, const Slice &left, const Slice &right,
+ Comparator &&cmp, Slice &key, Slice &val, int flags=0,
+ bool forward=true, bool end_exclusive=false, bool prelock=false) const;
+
+ template<class Comparator>
+ SimpleCursor<Comparator> simple_cursor(const DBTxn &txn, Comparator &&cmp, Slice &key, Slice &val,
+ int flags=0, bool forward=true, bool prelock=false) const;
+
+ void close() {
+ int r = _db->close(_db, 0);
+ handle_ft_retval(r);
+ _db = nullptr;
+ }
+
+ private:
+ ::DB *_db;
+ bool _close_on_destroy;
+ };
+
+ class DBBuilder {
+ uint32_t _readpagesize;
+ int _compression_method;
+ uint32_t _fanout;
+ uint8_t _memcmp_magic;
+ uint32_t _pagesize;
+ Slice _descriptor;
+
+ public:
+ DBBuilder()
+ : _readpagesize(0),
+ _compression_method(-1),
+ _fanout(0),
+ _memcmp_magic(0),
+ _pagesize(0),
+ _descriptor()
+ {}
+
+ DB open(const DBEnv &env, const DBTxn &txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) const {
+ ::DB *db;
+ int r = db_create(&db, env.env(), 0);
+ handle_ft_retval(r);
+
+ if (_readpagesize) {
+ r = db->set_readpagesize(db, _readpagesize);
+ handle_ft_retval(r);
+ }
+
+ if (_compression_method >= 0) {
+ r = db->set_compression_method(db, TOKU_COMPRESSION_METHOD(_compression_method));
+ handle_ft_retval(r);
+ }
+
+ if (_fanout) {
+ r = db->set_fanout(db, _fanout);
+ handle_ft_retval(r);
+ }
+
+ if (_memcmp_magic) {
+ r = db->set_memcmp_magic(db, _memcmp_magic);
+ handle_ft_retval(r);
+ }
+
+ if (_pagesize) {
+ r = db->set_pagesize(db, _pagesize);
+ handle_ft_retval(r);
+ }
+
+ const DBTxn *txnp = &txn;
+ DBTxn writeTxn;
+ if (txn.is_read_only()) {
+ writeTxn = DBTxn(env, DB_SERIALIZABLE);
+ txnp = &writeTxn;
+ }
+
+ r = db->open(db, txnp->txn(), fname, dbname, dbtype, flags, mode);
+ handle_ft_retval(r);
+
+ if (!_descriptor.empty()) {
+ DBT desc = _descriptor.dbt();
+ r = db->change_descriptor(db, txnp->txn(), &desc, DB_UPDATE_CMP_DESCRIPTOR);
+ handle_ft_retval(r);
+ }
+
+ if (txn.is_read_only()) {
+ writeTxn.commit();
+ }
+
+ return DB(db, true);
+ }
+
+ DBBuilder& set_readpagesize(uint32_t readpagesize) {
+ _readpagesize = readpagesize;
+ return *this;
+ }
+
+ DBBuilder& set_compression_method(TOKU_COMPRESSION_METHOD _compressionmethod) {
+ _compression_method = int(_compressionmethod);
+ return *this;
+ }
+
+ DBBuilder& set_fanout(uint32_t fanout) {
+ _fanout = fanout;
+ return *this;
+ }
+
+ DBBuilder& set_memcmp_magic(uint8_t _memcmpmagic) {
+ _memcmp_magic = _memcmpmagic;
+ return *this;
+ }
+
+ DBBuilder& set_pagesize(uint32_t pagesize) {
+ _pagesize = pagesize;
+ return *this;
+ }
+
+ DBBuilder& set_descriptor(const Slice &desc) {
+ _descriptor = desc.owned();
+ return *this;
+ }
+ };
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/db_env-inl.hpp b/storage/tokudb/PerconaFT/ftcxx/db_env-inl.hpp
new file mode 100644
index 00000000..43ab86ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/db_env-inl.hpp
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "db.hpp"
+#include "slice.hpp"
+
+namespace ftcxx {
+
+ typedef int (*slice_compare_func)(const Slice &desc, const Slice &key, const Slice &val);
+
+ template<slice_compare_func slice_cmp>
+ int wrapped_comparator(::DB *db, const DBT *a, const DBT *b) {
+ return slice_cmp(DB(db).descriptor(), Slice(*a), Slice(*b));
+ }
+
+ class SetvalFunc {
+ void (*_setval)(const DBT *, void *);
+ void *_extra;
+ public:
+ SetvalFunc(void (*setval)(const DBT *, void *), void *extra)
+ : _setval(setval),
+ _extra(extra)
+ {}
+ void operator()(const Slice &new_val) {
+ DBT vdbt = new_val.dbt();
+ _setval(&vdbt, _extra);
+ }
+ };
+
+ typedef int (*slice_update_func)(const Slice &desc, const Slice &key, const Slice &old_val, const Slice &extra, SetvalFunc callback);
+
+ template<slice_update_func slice_update>
+ int wrapped_updater(::DB *db, const DBT *key, const DBT *old_val, const DBT *extra, void (*setval)(const DBT *, void *), void *setval_extra) {
+ return slice_update(DB(db).descriptor(), Slice(*key), Slice(*old_val), Slice(*extra), SetvalFunc(setval, setval_extra));
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/db_env.cpp b/storage/tokudb/PerconaFT/ftcxx/db_env.cpp
new file mode 100644
index 00000000..bb8df22c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/db_env.cpp
@@ -0,0 +1,70 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include <db.h>
+
+#include "db_env.hpp"
+
+namespace ftcxx {
+
+ void DBEnv::get_status(DBEnv::Status &status, fs_redzone_state &redzone_state, uint64_t &env_panic, std::string &panic_string) const{
+ uint64_t num_rows;
+ int r = _env->get_engine_status_num_rows(_env, &num_rows);
+ handle_ft_retval(r);
+
+ std::unique_ptr<TOKU_ENGINE_STATUS_ROW_S[]> buf(new TOKU_ENGINE_STATUS_ROW_S[num_rows]);
+ char panic_string_buf[1<<12];
+ panic_string_buf[0] = '\0';
+
+ r = _env->get_engine_status(_env, buf.get(), num_rows, &num_rows,
+ &redzone_state,
+ &env_panic, panic_string_buf, sizeof panic_string_buf,
+ toku_engine_status_include_type(TOKU_ENGINE_STATUS | TOKU_GLOBAL_STATUS));
+ handle_ft_retval(r);
+
+ panic_string = std::string(panic_string_buf);
+
+ for (uint64_t i = 0; i < num_rows; ++i) {
+ status[buf[i].keyname] = buf[i];
+ }
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/db_env.hpp b/storage/tokudb/PerconaFT/ftcxx/db_env.hpp
new file mode 100644
index 00000000..15b5ce55
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/db_env.hpp
@@ -0,0 +1,466 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <errno.h>
+
+#include <map>
+#include <string>
+
+#include <db.h>
+
+#include "exceptions.hpp"
+#include "slice.hpp"
+
+namespace ftcxx {
+
+ template<class Comparator, class Handler>
+ class CallbackCursor;
+ template<class Comparator, class Predicate>
+ class BufferedCursor;
+ template<class Comparator>
+ class SimpleCursor;
+
+ class DBTxn;
+
+ class DBEnv {
+ public:
+ explicit DBEnv(DB_ENV *e, bool close_on_destroy=false)
+ : _env(e),
+ _close_on_destroy(close_on_destroy)
+ {}
+
+ ~DBEnv() {
+ if (_env && _close_on_destroy) {
+ close();
+ }
+ }
+
+ DBEnv(const DBEnv &) = delete;
+ DBEnv& operator=(const DBEnv &) = delete;
+
+ DBEnv(DBEnv &&o)
+ : _env(nullptr),
+ _close_on_destroy(false)
+ {
+ std::swap(_env, o._env);
+ std::swap(_close_on_destroy, o._close_on_destroy);
+ }
+
+ DBEnv& operator=(DBEnv &&o) {
+ std::swap(_env, o._env);
+ std::swap(_close_on_destroy, o._close_on_destroy);
+ return *this;
+ }
+
+ DB_ENV *env() const { return _env; }
+
+ void close() {
+ int r = _env->close(_env, 0);
+ handle_ft_retval(r);
+ _env = nullptr;
+ }
+
+ typedef std::map<std::string, TOKU_ENGINE_STATUS_ROW_S> Status;
+ void get_status(Status &status, fs_redzone_state &redzone_state, uint64_t &env_panic, std::string &panic_string) const;
+
+ void log_flush() {
+ int r = _env->log_flush(_env, NULL);
+ handle_ft_retval(r);
+ }
+
+ int checkpointing_set_period(uint32_t period) {
+ if (!_env) {
+ return EINVAL;
+ }
+ _env->checkpointing_set_period(_env, period);
+ return 0;
+ }
+
+ int cleaner_set_iterations(uint32_t iterations) {
+ if (!_env) {
+ return EINVAL;
+ }
+ _env->cleaner_set_iterations(_env, iterations);
+ return 0;
+ }
+
+ int cleaner_set_period(uint32_t period) {
+ if (!_env) {
+ return EINVAL;
+ }
+ _env->cleaner_set_period(_env, period);
+ return 0;
+ }
+
+ int change_fsync_log_period(uint32_t period) {
+ if (!_env) {
+ return EINVAL;
+ }
+ _env->change_fsync_log_period(_env, period);
+ return 0;
+ }
+
+ uint64_t get_engine_status_num_rows() {
+ if (!_env) {
+ handle_ft_retval(EINVAL); // throws
+ }
+ uint64_t ret;
+ int r = _env->get_engine_status_num_rows(_env, &ret);
+ handle_ft_retval(r);
+ return ret;
+ }
+
+ void get_engine_status(TOKU_ENGINE_STATUS_ROW_S *rows, uint64_t max_rows, uint64_t &num_rows,
+ uint64_t &panic, std::string &panic_string,
+ toku_engine_status_include_type include_type) {
+ if (!_env) {
+ handle_ft_retval(EINVAL);
+ }
+ fs_redzone_state dummy; // this is duplicated in the actual engine status output
+ const size_t panic_string_len = 1024;
+ char panic_string_buf[panic_string_len];
+ panic_string_buf[0] = '\0';
+ int r = _env->get_engine_status(_env, rows, max_rows, &num_rows,
+ &dummy, &panic, panic_string_buf, panic_string_len,
+ include_type);
+ handle_ft_retval(r);
+ panic_string = panic_string_buf;
+ }
+
+ /**
+ * Constructs a Cursor over this DBEnv's directory.
+ */
+ template<class Comparator, class Handler>
+ CallbackCursor<Comparator, Handler> cursor(const DBTxn &txn, Comparator &&cmp, Handler &&handler) const;
+
+ template<class Comparator, class Predicate>
+ BufferedCursor<Comparator, Predicate> buffered_cursor(const DBTxn &txn, Comparator &&cmp, Predicate &&filter) const;
+
+ template<class Comparator>
+ SimpleCursor<Comparator> simple_cursor(const DBTxn &txn, Comparator &&cmp, Slice &key, Slice &val) const;
+
+ private:
+ DB_ENV *_env;
+ bool _close_on_destroy;
+ };
+
+ class DBEnvBuilder {
+ typedef int (*bt_compare_func)(DB *, const DBT *, const DBT *);
+ bt_compare_func _bt_compare;
+
+ typedef int (*update_func)(DB *, const DBT *, const DBT *, const DBT *, void (*)(const DBT *, void *), void *);
+ update_func _update_function;
+
+ generate_row_for_put_func _generate_row_for_put;
+ generate_row_for_del_func _generate_row_for_del;
+
+ uint32_t _cleaner_period;
+ uint32_t _cleaner_iterations;
+ uint32_t _checkpointing_period;
+ uint32_t _fsync_log_period_msec;
+ int _fs_redzone;
+
+ uint64_t _lk_max_memory;
+ uint64_t _lock_wait_time_msec;
+
+ typedef uint64_t (*get_lock_wait_time_cb_func)(uint64_t);
+ get_lock_wait_time_cb_func _get_lock_wait_time_cb;
+ lock_timeout_callback _lock_timeout_callback;
+ lock_wait_callback _lock_wait_needed_callback;
+ uint64_t (*_loader_memory_size_callback)(void);
+
+ uint32_t _cachesize_gbytes;
+ uint32_t _cachesize_bytes;
+ uint32_t _cachetable_bucket_mutexes;
+
+ std::string _product_name;
+
+ std::string _lg_dir;
+ std::string _tmp_dir;
+
+ bool _direct_io;
+ bool _compress_buffers;
+
+ public:
+ DBEnvBuilder()
+ : _bt_compare(nullptr),
+ _update_function(nullptr),
+ _generate_row_for_put(nullptr),
+ _generate_row_for_del(nullptr),
+ _cleaner_period(0),
+ _cleaner_iterations(0),
+ _checkpointing_period(0),
+ _fsync_log_period_msec(0),
+ _fs_redzone(0),
+ _lk_max_memory(0),
+ _lock_wait_time_msec(0),
+ _get_lock_wait_time_cb(nullptr),
+ _lock_timeout_callback(nullptr),
+ _lock_wait_needed_callback(nullptr),
+ _loader_memory_size_callback(nullptr),
+ _cachesize_gbytes(0),
+ _cachesize_bytes(0),
+ _cachetable_bucket_mutexes(0),
+ _product_name(""),
+ _lg_dir(""),
+ _tmp_dir(""),
+ _direct_io(false),
+ _compress_buffers(true)
+ {}
+
+ DBEnv open(const char *env_dir, uint32_t flags, int mode) const {
+ db_env_set_direct_io(_direct_io);
+ db_env_set_compress_buffers_before_eviction(_compress_buffers);
+ if (_cachetable_bucket_mutexes) {
+ db_env_set_num_bucket_mutexes(_cachetable_bucket_mutexes);
+ }
+
+ if (!_product_name.empty()) {
+ db_env_set_toku_product_name(_product_name.c_str());
+ }
+
+ DB_ENV *env;
+ int r = db_env_create(&env, 0);
+ handle_ft_retval(r);
+
+ if (_bt_compare) {
+ r = env->set_default_bt_compare(env, _bt_compare);
+ handle_ft_retval(r);
+ }
+
+ if (_update_function) {
+ env->set_update(env, _update_function);
+ }
+
+ if (_generate_row_for_put) {
+ r = env->set_generate_row_callback_for_put(env, _generate_row_for_put);
+ handle_ft_retval(r);
+ }
+
+ if (_generate_row_for_del) {
+ r = env->set_generate_row_callback_for_del(env, _generate_row_for_del);
+ handle_ft_retval(r);
+ }
+
+ if (_lk_max_memory) {
+ r = env->set_lk_max_memory(env, _lk_max_memory);
+ handle_ft_retval(r);
+ }
+
+ if (_lock_wait_time_msec || _get_lock_wait_time_cb) {
+ uint64_t wait_time = _lock_wait_time_msec;
+ if (!wait_time) {
+ r = env->get_lock_timeout(env, &wait_time);
+ handle_ft_retval(r);
+ }
+ r = env->set_lock_timeout(env, wait_time, _get_lock_wait_time_cb);
+ handle_ft_retval(r);
+ }
+
+ if (_lock_timeout_callback) {
+ r = env->set_lock_timeout_callback(env, _lock_timeout_callback);
+ handle_ft_retval(r);
+ }
+
+ if (_lock_wait_needed_callback) {
+ r = env->set_lock_wait_callback(env, _lock_wait_needed_callback);
+ handle_ft_retval(r);
+ }
+
+ if (_loader_memory_size_callback) {
+ env->set_loader_memory_size(env, _loader_memory_size_callback);
+ }
+
+ if (_cachesize_gbytes || _cachesize_bytes) {
+ r = env->set_cachesize(env, _cachesize_gbytes, _cachesize_bytes, 1);
+ handle_ft_retval(r);
+ }
+
+ if (_fs_redzone) {
+ env->set_redzone(env, _fs_redzone);
+ }
+
+ if (!_lg_dir.empty()) {
+ r = env->set_lg_dir(env, _lg_dir.c_str());
+ handle_ft_retval(r);
+ }
+
+ if (!_tmp_dir.empty()) {
+ r = env->set_tmp_dir(env, _tmp_dir.c_str());
+ handle_ft_retval(r);
+ }
+
+ r = env->open(env, env_dir, flags, mode);
+ handle_ft_retval(r);
+
+ if (_cleaner_period) {
+ r = env->cleaner_set_period(env, _cleaner_period);
+ handle_ft_retval(r);
+ }
+
+ if (_cleaner_iterations) {
+ r = env->cleaner_set_iterations(env, _cleaner_iterations);
+ handle_ft_retval(r);
+ }
+
+ if (_checkpointing_period) {
+ r = env->checkpointing_set_period(env, _checkpointing_period);
+ handle_ft_retval(r);
+ }
+
+ if (_fsync_log_period_msec) {
+ env->change_fsync_log_period(env, _fsync_log_period_msec);
+ }
+
+ return DBEnv(env, true);
+ }
+
+ DBEnvBuilder& set_direct_io(bool direct_io) {
+ _direct_io = direct_io;
+ return *this;
+ }
+
+ DBEnvBuilder& set_compress_buffers_before_eviction(bool compress_buffers) {
+ _compress_buffers = compress_buffers;
+ return *this;
+ }
+
+ DBEnvBuilder& set_default_bt_compare(bt_compare_func bt_compare) {
+ _bt_compare = bt_compare;
+ return *this;
+ }
+
+ DBEnvBuilder& set_update(update_func update_function) {
+ _update_function = update_function;
+ return *this;
+ }
+
+ DBEnvBuilder& set_generate_row_callback_for_put(generate_row_for_put_func generate_row_for_put) {
+ _generate_row_for_put = generate_row_for_put;
+ return *this;
+ }
+
+ DBEnvBuilder& set_generate_row_callback_for_del(generate_row_for_del_func generate_row_for_del) {
+ _generate_row_for_del = generate_row_for_del;
+ return *this;
+ }
+
+ DBEnvBuilder& cleaner_set_period(uint32_t period) {
+ _cleaner_period = period;
+ return *this;
+ }
+
+ DBEnvBuilder& cleaner_set_iterations(uint32_t iterations) {
+ _cleaner_iterations = iterations;
+ return *this;
+ }
+
+ DBEnvBuilder& checkpointing_set_period(uint32_t period) {
+ _checkpointing_period = period;
+ return *this;
+ }
+
+ DBEnvBuilder& change_fsync_log_period(uint32_t period) {
+ _fsync_log_period_msec = period;
+ return *this;
+ }
+
+ DBEnvBuilder& set_fs_redzone(int fs_redzone) {
+ _fs_redzone = fs_redzone;
+ return *this;
+ }
+
+ DBEnvBuilder& set_lk_max_memory(uint64_t sz) {
+ _lk_max_memory = sz;
+ return *this;
+ }
+
+ DBEnvBuilder& set_lock_wait_time_msec(uint64_t lock_wait_time_msec) {
+ _lock_wait_time_msec = lock_wait_time_msec;
+ return *this;
+ }
+
+ DBEnvBuilder& set_lock_wait_time_cb(get_lock_wait_time_cb_func get_lock_wait_time_cb) {
+ _get_lock_wait_time_cb = get_lock_wait_time_cb;
+ return *this;
+ }
+
+ DBEnvBuilder& set_lock_timeout_callback(lock_timeout_callback callback) {
+ _lock_timeout_callback = callback;
+ return *this;
+ }
+
+ DBEnvBuilder& set_lock_wait_callback(lock_wait_callback callback) {
+ _lock_wait_needed_callback = callback;
+ return *this;
+ }
+
+ DBEnvBuilder& set_loader_memory_size(uint64_t (*callback)(void)) {
+ _loader_memory_size_callback = callback;
+ return *this;
+ }
+
+ DBEnvBuilder& set_cachesize(uint32_t gbytes, uint32_t bytes) {
+ _cachesize_gbytes = gbytes;
+ _cachesize_bytes = bytes;
+ return *this;
+ }
+
+ DBEnvBuilder& set_cachetable_bucket_mutexes(uint32_t mutexes) {
+ _cachetable_bucket_mutexes = mutexes;
+ return *this;
+ }
+
+ DBEnvBuilder& set_product_name(const char *product_name) {
+ _product_name = std::string(product_name);
+ return *this;
+ }
+
+ DBEnvBuilder& set_lg_dir(const char *lg_dir) {
+ _lg_dir = std::string(lg_dir);
+ return *this;
+ }
+
+ DBEnvBuilder& set_tmp_dir(const char *tmp_dir) {
+ _tmp_dir = std::string(tmp_dir);
+ return *this;
+ }
+ };
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/db_txn.hpp b/storage/tokudb/PerconaFT/ftcxx/db_txn.hpp
new file mode 100644
index 00000000..adcdc8f5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/db_txn.hpp
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "db_env.hpp"
+#include "exceptions.hpp"
+
+namespace ftcxx {
+
+ class DBTxn {
+ public:
+ DBTxn()
+ : _flags(0),
+ _txn(nullptr)
+ {}
+
+ explicit DBTxn(const DBEnv &env, int flags=0)
+ : _flags(flags),
+ _txn(nullptr)
+ {
+ DB_TXN *t;
+ int r = env.env()->txn_begin(env.env(), nullptr, &t, _flags);
+ handle_ft_retval(r);
+ _txn = t;
+ }
+
+ DBTxn(const DBEnv &env, const DBTxn &parent, int flags=0)
+ : _flags(flags),
+ _txn(nullptr)
+ {
+ DB_TXN *t;
+ int r = env.env()->txn_begin(env.env(), parent.txn(), &t, _flags);
+ handle_ft_retval(r);
+ _txn = t;
+ }
+
+ ~DBTxn() {
+ if (_txn) {
+ abort();
+ }
+ }
+
+ DBTxn(const DBTxn &) = delete;
+ DBTxn& operator=(const DBTxn &) = delete;
+
+ DBTxn(DBTxn &&o)
+ : _flags(0),
+ _txn(nullptr)
+ {
+ std::swap(_flags, o._flags);
+ std::swap(_txn, o._txn);
+ }
+
+ DBTxn& operator=(DBTxn &&o) {
+ std::swap(_flags, o._flags);
+ std::swap(_txn, o._txn);
+ return *this;
+ }
+
+ DB_TXN *txn() const { return _txn; }
+
+ void commit(int flags=0) {
+ int r = _txn->commit(_txn, flags);
+ handle_ft_retval(r);
+ _txn = nullptr;
+ }
+
+ void abort() {
+ int r = _txn->abort(_txn);
+ handle_ft_retval(r);
+ _txn = nullptr;
+ }
+
+ bool is_read_only() const {
+ return _flags & DB_TXN_READ_ONLY;
+ }
+
+ uint64_t id() const {
+ if (!_txn) {
+ return 0;
+ }
+ return _txn->id64(_txn);
+ }
+
+ private:
+ int _flags;
+ DB_TXN *_txn;
+ };
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/exceptions.hpp b/storage/tokudb/PerconaFT/ftcxx/exceptions.hpp
new file mode 100644
index 00000000..d8080d41
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/exceptions.hpp
@@ -0,0 +1,152 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <exception>
+#include <string.h>
+
+#include <db.h>
+
+namespace ftcxx {
+
+ class ft_exception : public std::exception {
+ int _code;
+
+ static const char *ft_strerror(int code) {
+ switch (code) {
+ case DB_RUNRECOVERY:
+ return "DB_RUNRECOVERY";
+ case DB_KEYEXIST:
+ return "DB_KEYEXIST";
+ case DB_LOCK_DEADLOCK:
+ return "DB_LOCK_DEADLOCK";
+ case DB_LOCK_NOTGRANTED:
+ return "DB_LOCK_NOTGRANTED";
+ case DB_NOTFOUND:
+ return "DB_NOTFOUND";
+ case DB_SECONDARY_BAD:
+ return "DB_SECONDARY_BAD";
+ case DB_DONOTINDEX:
+ return "DB_DONOTINDEX";
+ case DB_BUFFER_SMALL:
+ return "DB_BUFFER_SMALL";
+ case DB_BADFORMAT:
+ return "DB_BADFORMAT";
+ case TOKUDB_OUT_OF_LOCKS:
+ return "TOKUDB_OUT_OF_LOCKS";
+ case TOKUDB_SUCCEEDED_EARLY:
+ return "TOKUDB_SUCCEEDED_EARLY";
+ case TOKUDB_FOUND_BUT_REJECTED:
+ return "TOKUDB_FOUND_BUT_REJECTED";
+ case TOKUDB_USER_CALLBACK_ERROR:
+ return "TOKUDB_USER_CALLBACK_ERROR";
+ case TOKUDB_DICTIONARY_TOO_OLD:
+ return "TOKUDB_DICTIONARY_TOO_OLD";
+ case TOKUDB_DICTIONARY_TOO_NEW:
+ return "TOKUDB_DICTIONARY_TOO_NEW";
+ case TOKUDB_DICTIONARY_NO_HEADER:
+ return "TOKUDB_DICTIONARY_NO_HEADER";
+ case TOKUDB_CANCELED:
+ return "TOKUDB_CANCELED";
+ case TOKUDB_NO_DATA:
+ return "TOKUDB_NO_DATA";
+ case TOKUDB_ACCEPT:
+ return "TOKUDB_ACCEPT";
+ case TOKUDB_MVCC_DICTIONARY_TOO_NEW:
+ return "TOKUDB_MVCC_DICTIONARY_TOO_NEW";
+ case TOKUDB_UPGRADE_FAILURE:
+ return "TOKUDB_UPGRADE_FAILURE";
+ case TOKUDB_TRY_AGAIN:
+ return "TOKUDB_TRY_AGAIN";
+ case TOKUDB_NEEDS_REPAIR:
+ return "TOKUDB_NEEDS_REPAIR";
+ case TOKUDB_CURSOR_CONTINUE:
+ return "TOKUDB_CURSOR_CONTINUE";
+ case TOKUDB_BAD_CHECKSUM:
+ return "TOKUDB_BAD_CHECKSUM";
+ case TOKUDB_HUGE_PAGES_ENABLED:
+ return "TOKUDB_HUGE_PAGES_ENABLED";
+ case TOKUDB_OUT_OF_RANGE:
+ return "TOKUDB_OUT_OF_RANGE";
+ case TOKUDB_INTERRUPTED:
+ return "TOKUDB_INTERRUPTED";
+ default:
+ return "unknown ft error";
+ }
+ }
+
+ public:
+ ft_exception(int c) : _code(c) {}
+
+ int code() const noexcept {
+ return _code;
+ }
+
+ virtual const char *what() const noexcept {
+ return ft_strerror(_code);
+ }
+ };
+
+ class system_exception : public std::exception {
+ int _code;
+
+ public:
+ system_exception(int c) : _code(c) {}
+
+ int code() const noexcept {
+ return _code;
+ }
+
+ virtual const char *what() const noexcept {
+ return strerror(_code);
+ }
+ };
+
+ inline void handle_ft_retval(int r) {
+ if (r == 0) {
+ return;
+ }
+ if (r < 0) {
+ throw ft_exception(r);
+ }
+ if (r > 0) {
+ throw system_exception(r);
+ }
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp
new file mode 100644
index 00000000..6c0fb341
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp
@@ -0,0 +1,97 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <cstdint>
+
+#include "malloc_utils.hpp"
+
+#if !defined(HAVE_BITS_FUNCTEXCEPT_H) || !HAVE_BITS_FUNCTEXCEPT_H
+
+namespace std {
+
+ void __throw_bad_alloc() {
+ throw bad_alloc();
+ }
+
+} // namespace std
+
+#endif
+
+namespace malloc_utils {
+
+ // How do we determine that we're using jemalloc?
+ // In the hackiest way possible. We allocate memory using malloc() and see if
+ // the per-thread counter of allocated memory increases. This makes me feel
+ // dirty inside. Also note that this requires jemalloc to have been compiled
+ // with --enable-stats.
+ bool usingJEMallocSlow() {
+ // Some platforms (*cough* OSX *cough*) require weak symbol checks to be
+ // in the form if (mallctl != nullptr). Not if (mallctl) or if (!mallctl)
+ // (!!). http://goo.gl/xpmctm
+ if (mallocx == nullptr || rallocx == nullptr || xallocx == nullptr
+ || sallocx == nullptr || dallocx == nullptr || nallocx == nullptr
+ || mallctl == nullptr) {
+ return false;
+ }
+
+ // "volatile" because gcc optimizes out the reads from *counter, because
+ // it "knows" malloc doesn't modify global state...
+ volatile uint64_t* counter;
+ size_t counterLen = sizeof(uint64_t*);
+
+ if (mallctl("thread.allocatedp", static_cast<void*>(&counter), &counterLen,
+ nullptr, 0) != 0) {
+ return false;
+ }
+
+ if (counterLen != sizeof(uint64_t*)) {
+ return false;
+ }
+
+ uint64_t origAllocated = *counter;
+
+ void* ptr = malloc(1);
+ if (!ptr) {
+ // wtf, failing to allocate 1 byte
+ return false;
+ }
+ free(ptr);
+
+ return (origAllocated != *counter);
+ }
+
+} // namespace malloc_utils
diff --git a/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp
new file mode 100644
index 00000000..4aae801b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp
@@ -0,0 +1,226 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/**
+ * These functions are extracted from Facebook's folly library, which
+ * integrates well with jemalloc. See
+ * https://github.com/facebook/folly/blob/master/folly/Malloc.h
+ */
+
+#include <algorithm>
+#include <cassert>
+#include <cstdlib>
+
+#if defined(HAVE_BITS_FUNCTEXCEPT_H) && HAVE_BITS_FUNCTEXCEPT_H
+
+# include <bits/functexcept.h>
+
+#else
+
+# include <stdexcept>
+
+namespace std {
+
+ void __throw_bad_alloc();
+
+}
+
+#endif
+
+/**
+ * Declare *allocx() and mallctl() as weak symbols. These will be provided by
+ * jemalloc if we are using jemalloc, or will be NULL if we are using another
+ * malloc implementation.
+ */
+extern "C" void* mallocx(size_t, int)
+ __attribute__((__weak__));
+extern "C" void* rallocx(void*, size_t, int)
+ __attribute__((__weak__));
+extern "C" size_t xallocx(void*, size_t, size_t, int)
+ __attribute__((__weak__));
+extern "C" size_t sallocx(const void*, int)
+ __attribute__((__weak__));
+extern "C" void dallocx(void*, int)
+ __attribute__((__weak__));
+extern "C" size_t nallocx(size_t, int)
+ __attribute__((__weak__));
+extern "C" int mallctl(const char*, void*, size_t*, void*, size_t)
+ __attribute__((__weak__));
+
+namespace malloc_utils {
+
+ bool usingJEMallocSlow();
+
+ /**
+ * Determine if we are using jemalloc or not.
+ */
+ inline bool usingJEMalloc() {
+ // Checking for rallocx != NULL is not sufficient; we may be in a
+ // dlopen()ed module that depends on libjemalloc, so rallocx is
+ // resolved, but the main program might be using a different
+ // memory allocator. Look at the implementation of
+ // usingJEMallocSlow() for the (hacky) details.
+ static const bool result = usingJEMallocSlow();
+ return result;
+ }
+
+ /**
+ * For jemalloc's size classes, see
+ * http://www.canonware.com/download/jemalloc/jemalloc-latest/doc/jemalloc.html
+ */
+ inline size_t goodMallocSize(size_t minSize) noexcept {
+ if (!usingJEMalloc()) {
+ // Not using jemalloc - no smarts
+ return minSize;
+ }
+ size_t goodSize;
+ if (minSize <= 64) {
+ // Choose smallest allocation to be 64 bytes - no tripping
+ // over cache line boundaries, and small string optimization
+ // takes care of short strings anyway.
+ goodSize = 64;
+ } else if (minSize <= 512) {
+ // Round up to the next multiple of 64; we don't want to trip
+ // over cache line boundaries.
+ goodSize = (minSize + 63) & ~size_t(63);
+ } else if (minSize <= 3584) {
+ // Round up to the next multiple of 256. For some size
+ // classes jemalloc will additionally round up to the nearest
+ // multiple of 512, hence the nallocx() call.
+ goodSize = nallocx((minSize + 255) & ~size_t(255), 0);
+ } else if (minSize <= 4072 * 1024) {
+ // Round up to the next multiple of 4KB
+ goodSize = (minSize + 4095) & ~size_t(4095);
+ } else {
+ // Holy Moly
+ // Round up to the next multiple of 4MB
+ goodSize = (minSize + 4194303) & ~size_t(4194303);
+ }
+ assert(nallocx(goodSize, 0) == goodSize);
+ return goodSize;
+ }
+
+ static const size_t jemallocMinInPlaceExpandable = 4096;
+
+ /**
+ * Trivial wrappers around malloc, calloc, realloc that check for
+ * allocation failure and throw std::bad_alloc in that case.
+ */
+ inline void* checkedMalloc(size_t size) {
+ void* p = malloc(size);
+ if (!p) std::__throw_bad_alloc();
+ return p;
+ }
+
+ inline void* checkedCalloc(size_t n, size_t size) {
+ void* p = calloc(n, size);
+ if (!p) std::__throw_bad_alloc();
+ return p;
+ }
+
+ inline void* checkedRealloc(void* ptr, size_t size) {
+ void* p = realloc(ptr, size);
+ if (!p) std::__throw_bad_alloc();
+ return p;
+ }
+
+ /**
+ * This function tries to reallocate a buffer of which only the first
+ * currentSize bytes are used. The problem with using realloc is that
+ * if currentSize is relatively small _and_ if realloc decides it
+ * needs to move the memory chunk to a new buffer, then realloc ends
+ * up copying data that is not used. It's impossible to hook into
+ * GNU's malloc to figure whether expansion will occur in-place or as
+ * a malloc-copy-free troika. (If an expand_in_place primitive would
+ * be available, smartRealloc would use it.) As things stand, this
+ * routine just tries to call realloc() (thus benefitting of potential
+ * copy-free coalescing) unless there's too much slack memory.
+ */
+ inline void* smartRealloc(void* p,
+ const size_t currentSize,
+ const size_t currentCapacity,
+ const size_t newCapacity,
+ size_t &realNewCapacity) {
+ assert(p);
+ assert(currentSize <= currentCapacity &&
+ currentCapacity < newCapacity);
+
+ if (usingJEMalloc()) {
+ // using jemalloc's API. Don't forget that jemalloc can never
+ // grow in place blocks smaller than 4096 bytes.
+ //
+ // NB: newCapacity may not be precisely equal to a jemalloc
+ // size class, i.e. newCapacity is not guaranteed to be the
+ // result of a goodMallocSize() call, therefore xallocx() may
+ // return more than newCapacity bytes of space. Use >= rather
+ // than == to check whether xallocx() successfully expanded in
+ // place.
+ size_t realNewCapacity_;
+ if (currentCapacity >= jemallocMinInPlaceExpandable &&
+ (realNewCapacity_ = xallocx(p, newCapacity, 0, 0)) >= newCapacity) {
+ // Managed to expand in place
+ realNewCapacity = realNewCapacity_;
+ return p;
+ }
+ // Cannot expand; must move
+ char * const result = static_cast<char *>(checkedMalloc(newCapacity));
+ char *cp = static_cast<char *>(p);
+ std::copy(cp, cp + currentSize, result);
+ free(p);
+ realNewCapacity = newCapacity;
+ return result;
+ }
+
+ // No jemalloc no honey
+ auto const slack = currentCapacity - currentSize;
+ if (slack * 2 > currentSize) {
+ // Too much slack, malloc-copy-free cycle:
+ char * const result = static_cast<char *>(checkedMalloc(newCapacity));
+ char *cp = static_cast<char *>(p);
+ std::copy(cp, cp + currentSize, result);
+ free(p);
+ realNewCapacity = newCapacity;
+ return result;
+ }
+ // If there's not too much slack, we realloc in hope of coalescing
+ realNewCapacity = newCapacity;
+ return checkedRealloc(p, newCapacity);
+ }
+
+} // namespace malloc_utils
diff --git a/storage/tokudb/PerconaFT/ftcxx/slice.hpp b/storage/tokudb/PerconaFT/ftcxx/slice.hpp
new file mode 100644
index 00000000..138d8a4b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/slice.hpp
@@ -0,0 +1,189 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <cassert>
+#include <iterator>
+#include <memory>
+
+#include <db.h>
+
+namespace ftcxx {
+
+ class Slice {
+ public:
+ Slice()
+ : _data(nullptr),
+ _size(0)
+ {}
+
+ explicit Slice(size_t sz)
+ : _buf(new char[sz], std::default_delete<char[]>()),
+ _data(_buf.get()),
+ _size(sz)
+ {}
+
+ Slice(const char *p, size_t sz)
+ : _data(p),
+ _size(sz)
+ {}
+
+ explicit Slice(const DBT &d)
+ : _data(reinterpret_cast<char *>(d.data)),
+ _size(d.size)
+ {}
+
+ explicit Slice(const std::string &str)
+ : _data(str.c_str()),
+ _size(str.size())
+ {}
+
+ Slice(const Slice &other)
+ : _buf(other._buf),
+ _data(other._data),
+ _size(other._size)
+ {}
+
+ Slice& operator=(const Slice &other) {
+ _buf = other._buf;
+ _data = other._data;
+ _size = other._size;
+ return *this;
+ }
+
+ Slice(Slice&& other)
+ : _buf(),
+ _data(nullptr),
+ _size(0)
+ {
+ std::swap(_buf, other._buf);
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ }
+
+ Slice& operator=(Slice&& other) {
+ std::swap(_buf, other._buf);
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ return *this;
+ }
+
+ template<typename T>
+ static Slice slice_of(const T &v) {
+ return Slice(reinterpret_cast<const char *>(&v), sizeof v);
+ }
+
+ template<typename T>
+ T as() const {
+ assert(size() == sizeof(T));
+ const T *p = reinterpret_cast<const T *>(data());
+ return *p;
+ }
+
+ const char *data() const { return _data; }
+
+ char *mutable_data() const {
+ assert(_buf);
+ return _buf.get();
+ }
+
+ size_t size() const { return _size; }
+
+ bool empty() const { return size() == 0; }
+
+ char operator[](size_t n) const {
+ assert(n < size());
+ return _data[n];
+ }
+
+ char *begin() { return mutable_data(); }
+ char *end() { return mutable_data() + size(); }
+ char *rbegin() { return end(); }
+ char *rend() { return begin(); }
+ const char *begin() const { return data(); }
+ const char *end() const { return data() + size(); }
+ const char *rbegin() const { return end(); }
+ const char *rend() const { return begin(); }
+ const char *cbegin() const { return data(); }
+ const char *cend() const { return data() + size(); }
+ const char *crbegin() const { return end(); }
+ const char *crend() const { return begin(); }
+
+ Slice copy() const {
+ Slice s(size());
+ std::copy(begin(), end(), s.begin());
+ return s;
+ }
+
+ Slice owned() const {
+ if (_buf) {
+ return *this;
+ } else {
+ return copy();
+ }
+ }
+
+ DBT dbt() const {
+ DBT d;
+ d.data = const_cast<void *>(static_cast<const void *>(data()));
+ d.size = size();
+ d.ulen = size();
+ d.flags = 0;
+ return d;
+ }
+
+ private:
+ std::shared_ptr<char> _buf;
+ const char *_data;
+ size_t _size;
+ };
+
+} // namespace ftcxx
+
+namespace std {
+
+ template<>
+ class iterator_traits<ftcxx::Slice> {
+ typedef typename std::iterator_traits<const char *>::difference_type difference_type;
+ typedef typename std::iterator_traits<const char *>::value_type value_type;
+ typedef typename std::iterator_traits<const char *>::pointer pointer;
+ typedef typename std::iterator_traits<const char *>::reference reference;
+ typedef typename std::iterator_traits<const char *>::iterator_category iterator_category;
+ };
+
+} // namespace std
diff --git a/storage/tokudb/PerconaFT/ftcxx/stats.hpp b/storage/tokudb/PerconaFT/ftcxx/stats.hpp
new file mode 100644
index 00000000..5c4cb642
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/stats.hpp
@@ -0,0 +1,48 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+namespace ftcxx {
+
+ struct Stats {
+ Stats() : data_size(0), file_size(0), num_keys(0) {};
+ size_t data_size;
+ size_t file_size;
+ size_t num_keys;
+ };
+}
diff --git a/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
new file mode 100644
index 00000000..b4db82ff
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/tests/CMakeLists.txt
@@ -0,0 +1,47 @@
+include_directories(..)
+include_directories(../../src)
+include_directories(../../src/tests)
+
+if (BUILD_TESTING)
+ find_library(JEMALLOC_STATIC_LIBRARY libjemalloc.a)
+
+ ## reference implementation with simple size-doubling buffer without
+ ## jemalloc size tricks
+ add_library(doubling_buffer_ftcxx STATIC
+ doubling_buffer
+ ../cursor
+ )
+ add_dependencies(doubling_buffer_ftcxx install_tdb_h)
+
+ foreach (impl
+ ftcxx
+ doubling_buffer_ftcxx
+ )
+ foreach (with_jemalloc
+ ON
+ OFF
+ )
+ foreach (test
+ buffer_test
+ cursor_test
+ )
+ set(_testname ${impl}_${test})
+ if (with_jemalloc AND JEMALLOC_STATIC_LIBRARY)
+ set(_testname ${_testname}_j)
+ endif ()
+ add_executable(${_testname} ${test})
+ if (with_jemalloc AND JEMALLOC_STATIC_LIBRARY)
+ if (APPLE)
+ target_link_libraries(${_testname} -Wl,-force_load ${JEMALLOC_STATIC_LIBRARY})
+ else ()
+ target_link_libraries(${_testname} -Wl,--whole-archive ${JEMALLOC_STATIC_LIBRARY} -Wl,--no-whole-archive)
+ endif ()
+ endif ()
+ target_link_libraries(${_testname} ${impl})
+ target_link_libraries(${_testname} ${LIBTOKUDB} ${LIBTOKUPORTABILITY})
+
+ add_test(${_testname} ${_testname})
+ endforeach ()
+ endforeach ()
+ endforeach ()
+endif (BUILD_TESTING) \ No newline at end of file
diff --git a/storage/tokudb/PerconaFT/ftcxx/tests/buffer_test.cpp b/storage/tokudb/PerconaFT/ftcxx/tests/buffer_test.cpp
new file mode 100644
index 00000000..293e6443
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/tests/buffer_test.cpp
@@ -0,0 +1,217 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <algorithm>
+#include <cassert>
+#include <iostream>
+#include <string>
+#include <sstream>
+#include <vector>
+
+#include "buffer.hpp"
+
+class Item {
+ const size_t _sz;
+
+public:
+ Item(size_t sz=0)
+ : _sz(sz)
+ {}
+
+ operator std::string() const {
+ std::stringstream ss;
+ ss << "Item(" << _sz << ")";
+ return ss.str();
+ }
+
+ bool operator==(const Item &other) const {
+ return _sz == other._sz;
+ }
+
+ bool operator!=(const Item &other) const {
+ return !(*this == other);
+ }
+
+ size_t serialized_size() const {
+ return (sizeof _sz) + _sz;
+ }
+
+ void serialize(char *p) const {
+ size_t *szp = reinterpret_cast<size_t *>(p);
+ *szp = _sz;
+ }
+
+ static Item deserialize(const char *p) {
+ const size_t *szp = reinterpret_cast<const size_t *>(p);
+ return Item(*szp);
+ }
+
+ bool check_serialized(const char *p) {
+ return deserialize(p) == *this;
+ }
+};
+
+class SingleSizeGenerator {
+ const size_t _sz;
+
+public:
+ SingleSizeGenerator(size_t sz)
+ : _sz(sz)
+ {}
+
+ std::string name() const {
+ std::stringstream ss;
+ ss << "SingleSizeGenerator(" << _sz << ")";
+ return ss.str();
+ }
+
+ Item next() {
+ return Item(_sz);
+ }
+};
+
+class RoundRobinGenerator {
+ const std::vector<size_t> _szs;
+ std::vector<size_t>::const_iterator _it;
+
+public:
+ RoundRobinGenerator(const std::vector<size_t> &szs)
+ : _szs(szs),
+ _it(_szs.begin())
+ {}
+
+ std::string name() const {
+ std::stringstream ss;
+ ss << "RoundRobinGenerator(";
+ for (auto it = _szs.begin(); it != _szs.end(); ++it) {
+ if (it != _szs.begin()) {
+ ss << ", ";
+ }
+ ss << *it;
+ }
+ ss << ")";
+ return ss.str();
+ }
+
+ Item next() {
+ if (_it == _szs.end()) {
+ _it = _szs.begin();
+ }
+ return Item(*(_it++));
+ }
+};
+
+template<class Generator>
+void test(Generator gen) {
+ std::vector<Item> expected;
+ std::vector<Item> received;
+
+ const size_t N = 1000000;
+
+ ftcxx::Buffer b;
+
+ std::cout << gen.name() << ": ";
+
+ for (size_t i = 0; i < N; ++i) {
+ if (b.full()) {
+ // drain
+ while (b.more()) {
+ Item it = Item::deserialize(b.current());
+ received.push_back(it);
+ b.advance(it.serialized_size());
+ }
+ b.clear();
+ }
+
+ // push
+ Item it = gen.next();
+ expected.push_back(it);
+ char *p = b.alloc(it.serialized_size());
+ it.serialize(p);
+ }
+
+ // drain one more time
+ while (b.more()) {
+ Item i = Item::deserialize(b.current());
+ received.push_back(i);
+ b.advance(i.serialized_size());
+ }
+ b.clear();
+
+ if (expected.size() != received.size()) {
+ std::cout << "fail" << std::endl;
+ std::cerr << "expected.size() != received.size()" << std::endl;
+ std::cerr << expected.size() << " != " << received.size() << std::endl;
+ return;
+ }
+
+ for (size_t i = 0; i < expected.size(); ++i) {
+ if (expected[i] != received[i]) {
+ std::cout << "fail" << std::endl;
+ std::cerr << "expected[" << i << "] != received[" << i << "]" << std::endl;
+ std::cerr << std::string(expected[i]) << " != " << std::string(received[i]) << std::endl;
+ return;
+ }
+ }
+
+ std::cout << "ok" << std::endl;
+}
+
+int main(void) {
+ test(SingleSizeGenerator(1));
+ test(SingleSizeGenerator(3));
+ test(SingleSizeGenerator(32));
+ test(SingleSizeGenerator(1<<11));
+ test(SingleSizeGenerator(1<<12));
+ test(SingleSizeGenerator((1<<12) - 1));
+ test(SingleSizeGenerator((1<<12) + 1));
+ test(SingleSizeGenerator(1<<20));
+
+ test(RoundRobinGenerator({8, 16}));
+ test(RoundRobinGenerator({8, 1<<12}));
+ test(RoundRobinGenerator({8, (1<<12) - 1}));
+ test(RoundRobinGenerator({8, (1<<12) + 1}));
+ test(RoundRobinGenerator({8, (1<<12) - 1, (1<<12) + 1}));
+ test(RoundRobinGenerator({8, (1<<20)}));
+ test(RoundRobinGenerator({(1<<12) - 1, (1<<12) + 1}));
+ test(RoundRobinGenerator({(1<<12) , (1<<12) + 1}));
+ test(RoundRobinGenerator({(1<<12) - 1, (1<<12) }));
+ test(RoundRobinGenerator({1<<12, 1<<20}));
+ test(RoundRobinGenerator({1<<16, 1<<17}));
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ftcxx/tests/cursor_test.cpp b/storage/tokudb/PerconaFT/ftcxx/tests/cursor_test.cpp
new file mode 100644
index 00000000..5156091f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/tests/cursor_test.cpp
@@ -0,0 +1,178 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iostream>
+#include <utility>
+
+#include <db.h>
+#include "test.h" // hax
+
+#include "cursor.hpp"
+#include "db.hpp"
+#include "db_env.hpp"
+#include "db_txn.hpp"
+#include "slice.hpp"
+
+const uint32_t N = 100000;
+
+static void fill(const ftcxx::DBEnv &env, const ftcxx::DB &db) {
+ ftcxx::DBTxn txn(env);
+
+ ftcxx::Slice val(1<<10);
+ memset(val.mutable_data(), 'x', val.size());
+ for (uint32_t i = 0; i < N; ++i) {
+ int r = db.put(txn, ftcxx::Slice::slice_of(i), val);
+ assert_zero(r);
+ }
+
+ txn.commit();
+}
+
+struct UIntComparator {
+ int operator()(const ftcxx::Slice &a, const ftcxx::Slice &b) {
+ DBT adbt = a.dbt();
+ DBT bdbt = b.dbt();
+ return uint_dbt_cmp((DB *) this /*lol*/, &adbt, &bdbt);
+ }
+};
+
+static void run_test(const ftcxx::DBEnv &env, const ftcxx::DB &db) {
+ fill(env, db);
+
+ ftcxx::DBTxn txn(env);
+
+ {
+ uint32_t lk;
+ uint32_t rk;
+
+ for (uint32_t i = 0; i < N; i += 1000) {
+ lk = i;
+ rk = i + 499;
+
+ ftcxx::Slice key;
+ ftcxx::Slice val;
+ uint32_t expect = i;
+ uint32_t last = 0;
+ for (auto cur(db.buffered_cursor(txn, ftcxx::Slice::slice_of(lk), ftcxx::Slice::slice_of(rk),
+ UIntComparator(), ftcxx::DB::NullFilter()));
+ cur.next(key, val);
+ ) {
+ last = key.as<uint32_t>();
+ assert(expect == last);
+ expect++;
+ }
+ assert(last == (i + 499));
+ }
+ }
+
+ txn.commit();
+
+ ftcxx::DBTxn extxn(env);
+
+ {
+ ftcxx::Slice key;
+ ftcxx::Slice val;
+ uint32_t expect = 0;
+ uint32_t last = 0;
+ for (auto cur(db.buffered_cursor(extxn, UIntComparator(), ftcxx::DB::NullFilter())); cur.next(key, val); ) {
+ last = key.as<uint32_t>();
+ assert(expect == last);
+ expect++;
+ }
+ assert(last == N - 1);
+ }
+
+ {
+ ftcxx::Slice key;
+ ftcxx::Slice val;
+ uint32_t expect = 0;
+ uint32_t last = 0;
+ for (auto cur(db.simple_cursor(extxn, UIntComparator(), key, val)); ; ) {
+ std::cout << key.as<uint32_t>() << std::endl;
+ last = key.as<uint32_t>();
+ assert(expect == last);
+ expect++;
+ if (!cur.next()) {
+ break;
+ }
+ }
+ assert(last == N - 1);
+ }
+
+ extxn.commit();
+}
+
+int test_main(int argc, char *const argv[]) {
+ int r;
+ const char *old_env_dir = TOKU_TEST_FILENAME;
+ char env_dir[strlen(old_env_dir)+32]; // use unique env directories for parallel tests
+ snprintf(env_dir, sizeof env_dir, "%s.%d", old_env_dir, getpid());
+ const char *db_filename = "ftcxx_cursor_test";
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd);
+ assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+ assert_zero(r);
+
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ ftcxx::DBEnv env = ftcxx::DBEnvBuilder()
+ .set_default_bt_compare(uint_dbt_cmp)
+ .open(env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+
+ ftcxx::DBTxn create_txn(env);
+ ftcxx::DB db = ftcxx::DBBuilder()
+ .open(env, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ create_txn.commit();
+
+ run_test(env, db);
+
+ db.close();
+
+ env.close();
+
+ r = system(rm_cmd);
+ assert_zero(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/ftcxx/tests/doubling_buffer.cpp b/storage/tokudb/PerconaFT/ftcxx/tests/doubling_buffer.cpp
new file mode 100644
index 00000000..e2080180
--- /dev/null
+++ b/storage/tokudb/PerconaFT/ftcxx/tests/doubling_buffer.cpp
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <algorithm>
+#include <cassert>
+#include <cstdlib>
+#include <memory>
+
+#include "buffer.hpp"
+
+namespace ftcxx {
+
+ const size_t Buffer::INITIAL_CAPACITY = 1<<10;
+ const size_t Buffer::MAXIMUM_CAPACITY = 1<<18;
+ const double Buffer::FULLNESS_RATIO = 0.9;
+
+ Buffer::Buffer()
+ : _cur(0),
+ _end(0),
+ _capacity(INITIAL_CAPACITY),
+ _buf(nullptr, &std::free)
+ {
+ init();
+ }
+
+ Buffer::Buffer(size_t capacity)
+ : _end(0),
+ _capacity(capacity),
+ _buf(nullptr, &std::free)
+ {
+ init();
+ }
+
+ char *Buffer::alloc(size_t sz) {
+ grow(sz);
+ char *p = raw(_end);
+ _end += sz;
+ return p;
+ }
+
+ bool Buffer::full() const {
+ return _end > MAXIMUM_CAPACITY * FULLNESS_RATIO;
+ }
+
+ bool Buffer::more() const {
+ return _cur < _end;
+ }
+
+ char *Buffer::current() const {
+ return raw(_cur);
+ }
+
+ void Buffer::advance(size_t sz) {
+ _cur += sz;
+ }
+
+ void Buffer::clear() {
+ _cur = 0;
+ _end = 0;
+ }
+
+ void Buffer::init() {
+ _buf.reset(static_cast<char *>(std::malloc(_capacity)));
+ }
+
+ size_t Buffer::next_alloc_size(size_t sz) {
+ return sz * 2;
+ }
+
+ void Buffer::grow(size_t sz) {
+ size_t new_capacity = _capacity;
+ while (new_capacity < _end + sz) {
+ new_capacity = next_alloc_size(new_capacity);
+ }
+ assert(new_capacity >= _capacity); // overflow?
+ if (new_capacity > _capacity) {
+ std::unique_ptr<char, void (*)(void *)> new_buf(static_cast<char *>(std::malloc(new_capacity)), &std::free);
+ std::copy(raw(0), raw(_end), &new_buf.get()[0]);
+ std::swap(_buf, new_buf);
+ _capacity = new_capacity;
+ }
+ }
+
+} // namespace ftcxx
diff --git a/storage/tokudb/PerconaFT/locktree/CMakeLists.txt b/storage/tokudb/PerconaFT/locktree/CMakeLists.txt
new file mode 100644
index 00000000..9eaed1c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/CMakeLists.txt
@@ -0,0 +1,29 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+set(locktree_srcs
+ locktree
+ manager
+ lock_request
+ txnid_set
+ range_buffer
+ keyrange
+ wfg
+ )
+
+## make the shared library for tests
+add_library(locktree SHARED ${locktree_srcs})
+add_dependencies(locktree install_tdb_h)
+
+## make the real library, it's going to go into libtokudb.so so it needs
+## to be PIC
+add_library(locktree_static STATIC ${locktree_srcs})
+add_space_separated_property(TARGET locktree_static COMPILE_FLAGS "-fvisibility=hidden -fPIC")
+add_dependencies(locktree_static install_tdb_h)
+# The locktree uses the standard portability layer and also the ybt
+# functions from the ft layer. Maybe one day the ybt functions will be
+# elsewhere.
+target_link_libraries(locktree LINK_PRIVATE ft ${LIBTOKUPORTABILITY})
+
+maybe_add_gcov_to_libraries(locktree locktree_static)
+
+add_subdirectory(tests)
diff --git a/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc b/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc
new file mode 100644
index 00000000..e07f32c9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_assert.h>
+
+void concurrent_tree::create(const comparator *cmp) {
+ // start with an empty root node. we do this instead of
+ // setting m_root to null so there's always a root to lock
+ m_root.create_root(cmp);
+}
+
+void concurrent_tree::destroy(void) {
+ m_root.destroy_root();
+}
+
+bool concurrent_tree::is_empty(void) {
+ return m_root.is_empty();
+}
+
+uint64_t concurrent_tree::get_insertion_memory_overhead(void) {
+ return sizeof(treenode);
+}
+
+void concurrent_tree::locked_keyrange::prepare(concurrent_tree *tree) {
+ // the first step in acquiring a locked keyrange is locking the root
+ treenode *const root = &tree->m_root;
+ m_tree = tree;
+ m_subtree = root;
+ m_range = keyrange::get_infinite_range();
+ root->mutex_lock();
+}
+
+void concurrent_tree::locked_keyrange::acquire(const keyrange &range) {
+ treenode *const root = &m_tree->m_root;
+
+ treenode *subtree;
+ if (root->is_empty() || root->range_overlaps(range)) {
+ subtree = root;
+ } else {
+ // we do not have a precomputed comparison hint, so pass null
+ const keyrange::comparison *cmp_hint = nullptr;
+ subtree = root->find_node_with_overlapping_child(range, cmp_hint);
+ }
+
+ // subtree is locked. it will be unlocked when this is release()'d
+ invariant_notnull(subtree);
+ m_range = range;
+ m_subtree = subtree;
+}
+
+void concurrent_tree::locked_keyrange::release(void) {
+ m_subtree->mutex_unlock();
+}
+
+template <class F>
+void concurrent_tree::locked_keyrange::iterate(F *function) const {
+ // if the subtree is non-empty, traverse it by calling the given
+ // function on each range, txnid pair found that overlaps.
+ if (!m_subtree->is_empty()) {
+ m_subtree->traverse_overlaps(m_range, function);
+ }
+}
+
+void concurrent_tree::locked_keyrange::insert(const keyrange &range, TXNID txnid) {
+ // empty means no children, and only the root should ever be empty
+ if (m_subtree->is_empty()) {
+ m_subtree->set_range_and_txnid(range, txnid);
+ } else {
+ m_subtree->insert(range, txnid);
+ }
+}
+
+void concurrent_tree::locked_keyrange::remove(const keyrange &range) {
+ invariant(!m_subtree->is_empty());
+ treenode *new_subtree = m_subtree->remove(range);
+ // if removing range changed the root of the subtree,
+ // then the subtree must be the root of the entire tree.
+ if (new_subtree == nullptr) {
+ invariant(m_subtree->is_root());
+ invariant(m_subtree->is_empty());
+ }
+}
+
+void concurrent_tree::locked_keyrange::remove_all(void) {
+ m_subtree->recursive_remove();
+}
diff --git a/storage/tokudb/PerconaFT/locktree/concurrent_tree.h b/storage/tokudb/PerconaFT/locktree/concurrent_tree.h
new file mode 100644
index 00000000..66a7ff17
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/concurrent_tree.h
@@ -0,0 +1,165 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ft/comparator.h>
+
+#include "treenode.h"
+#include "keyrange.h"
+
+namespace toku {
+
+// A concurrent_tree stores non-overlapping ranges.
+// Access to disjoint parts of the tree usually occurs concurrently.
+
+class concurrent_tree {
+public:
+
+ // A locked_keyrange gives you exclusive access to read and write
+ // operations that occur on any keys in that range. You only have
+ // the right to operate on keys in that range or keys that were read
+ // from the keyrange using iterate()
+ //
+ // Access model:
+ // - user prepares a locked keyrange. all threads serialize behind prepare().
+ // - user breaks the serialzation point by acquiring a range, or releasing.
+ // - one thread operates on a certain locked_keyrange object at a time.
+ // - when the thread is finished, it releases
+
+ class locked_keyrange {
+ public:
+ // effect: prepare to acquire a locked keyrange over the given
+ // concurrent_tree, preventing other threads from preparing
+ // until this thread either does acquire() or release().
+ // note: operations performed on a prepared keyrange are equivalent
+ // to ones performed on an acquired keyrange over -inf, +inf.
+ // rationale: this provides the user with a serialization point for descending
+ // or modifying the the tree. it also proives a convenient way of
+ // doing serializable operations on the tree.
+ // There are two valid sequences of calls:
+ // - prepare, acquire, [operations], release
+ // - prepare, [operations],release
+ void prepare(concurrent_tree *tree);
+
+ // requires: the locked keyrange was prepare()'d
+ // effect: acquire a locked keyrange over the given concurrent_tree.
+ // the locked keyrange represents the range of keys overlapped
+ // by the given range
+ void acquire(const keyrange &range);
+
+ // effect: releases a locked keyrange and the mutex it holds
+ void release(void);
+
+ // effect: iterate over each range this locked_keyrange represents,
+ // calling function->fn() on each node's keyrange and txnid
+ // until there are no more or the function returns false
+ template <class F>
+ void iterate(F *function) const;
+
+ // inserts the given range into the tree, with an associated txnid.
+ // requires: range does not overlap with anything in this locked_keyrange
+ // rationale: caller is responsible for only inserting unique ranges
+ void insert(const keyrange &range, TXNID txnid);
+
+ // effect: removes the given range from the tree
+ // requires: range exists exactly in this locked_keyrange
+ // rationale: caller is responsible for only removing existing ranges
+ void remove(const keyrange &range);
+
+ // effect: removes all of the keys represented by this locked keyrange
+ // rationale: we'd like a fast way to empty out a tree
+ void remove_all(void);
+
+ private:
+ // the concurrent tree this locked keyrange is for
+ concurrent_tree *m_tree;
+
+ // the range of keys this locked keyrange represents
+ keyrange m_range;
+
+ // the subtree under which all overlapping ranges exist
+ treenode *m_subtree;
+
+ friend class concurrent_tree_unit_test;
+ };
+
+ // effect: initialize the tree to an empty state
+ void create(const comparator *cmp);
+
+ // effect: destroy the tree.
+ // requires: tree is empty
+ void destroy(void);
+
+ // returns: true iff the tree is empty
+ bool is_empty(void);
+
+ // returns: the memory overhead of a single insertion into the tree
+ static uint64_t get_insertion_memory_overhead(void);
+
+private:
+ // the root needs to always exist so there's a lock to grab
+ // even if the tree is empty. that's why we store a treenode
+ // here and not a pointer to one.
+ treenode m_root;
+
+ friend class concurrent_tree_unit_test;
+};
+
+// include the implementation here so we can use templated member
+// functions in locked_keyrange, which are expanded and defined in the
+// compilation unit that includes "concurrent_tree.h". if we didn't
+// include the source here, then there would be problems with multiple
+// definitions of the tree functions
+#include "concurrent_tree.cc"
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/keyrange.cc b/storage/tokudb/PerconaFT/locktree/keyrange.cc
new file mode 100644
index 00000000..24a999e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/keyrange.cc
@@ -0,0 +1,216 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "keyrange.h"
+
+#include <util/dbt.h>
+
+namespace toku {
+
+ // create a keyrange by borrowing the left and right dbt
+ // pointers. no memory is copied. no checks for infinity needed.
+ void keyrange::create(const DBT *left, const DBT *right) {
+ init_empty();
+ m_left_key = left;
+ m_right_key = right;
+ }
+
+ // destroy the key copies. if they were never set, then destroy does nothing.
+ void keyrange::destroy(void) {
+ toku_destroy_dbt(&m_left_key_copy);
+ toku_destroy_dbt(&m_right_key_copy);
+ }
+
+ // create a keyrange by copying the keys from the given range.
+ void keyrange::create_copy(const keyrange &range) {
+ // start with an initialized, empty range
+ init_empty();
+
+ // optimize the case where the left and right keys are the same.
+ // we'd like to only have one copy of the data.
+ if (toku_dbt_equals(range.get_left_key(), range.get_right_key())) {
+ set_both_keys(range.get_left_key());
+ } else {
+ // replace our empty left and right keys with
+ // copies of the range's left and right keys
+ replace_left_key(range.get_left_key());
+ replace_right_key(range.get_right_key());
+ }
+ }
+
+ // extend this keyrange by choosing the leftmost and rightmost
+ // endpoints between this range and the given. replaced keys
+ // in this range are freed and inherited keys are copied.
+ void keyrange::extend(const comparator &cmp, const keyrange &range) {
+ const DBT *range_left = range.get_left_key();
+ const DBT *range_right = range.get_right_key();
+ if (cmp(range_left, get_left_key()) < 0) {
+ replace_left_key(range_left);
+ }
+ if (cmp(range_right, get_right_key()) > 0) {
+ replace_right_key(range_right);
+ }
+ }
+
+ // how much memory does this keyrange take?
+ // - the size of the left and right keys
+ // --- ignore the fact that we may have optimized the point case.
+ // it complicates things for little gain.
+ // - the size of the keyrange class itself
+ uint64_t keyrange::get_memory_size(void) const {
+ const DBT *left_key = get_left_key();
+ const DBT *right_key = get_right_key();
+ return left_key->size + right_key->size + sizeof(keyrange);
+ }
+
+ // compare ranges.
+ keyrange::comparison keyrange::compare(const comparator &cmp, const keyrange &range) const {
+ if (cmp(get_right_key(), range.get_left_key()) < 0) {
+ return comparison::LESS_THAN;
+ } else if (cmp(get_left_key(), range.get_right_key()) > 0) {
+ return comparison::GREATER_THAN;
+ } else if (cmp(get_left_key(), range.get_left_key()) == 0 &&
+ cmp(get_right_key(), range.get_right_key()) == 0) {
+ return comparison::EQUALS;
+ } else {
+ return comparison::OVERLAPS;
+ }
+ }
+
+ bool keyrange::overlaps(const comparator &cmp, const keyrange &range) const {
+ // equality is a stronger form of overlapping.
+ // so two ranges "overlap" if they're either equal or just overlapping.
+ comparison c = compare(cmp, range);
+ return c == comparison::EQUALS || c == comparison::OVERLAPS;
+ }
+
+ keyrange keyrange::get_infinite_range(void) {
+ keyrange range;
+ range.create(toku_dbt_negative_infinity(), toku_dbt_positive_infinity());
+ return range;
+ }
+
+ void keyrange::init_empty(void) {
+ m_left_key = nullptr;
+ m_right_key = nullptr;
+ toku_init_dbt(&m_left_key_copy);
+ toku_init_dbt(&m_right_key_copy);
+ m_point_range = false;
+ }
+
+ const DBT *keyrange::get_left_key(void) const {
+ if (m_left_key) {
+ return m_left_key;
+ } else {
+ return &m_left_key_copy;
+ }
+ }
+
+ const DBT *keyrange::get_right_key(void) const {
+ if (m_right_key) {
+ return m_right_key;
+ } else {
+ return &m_right_key_copy;
+ }
+ }
+
+ // copy the given once and set both the left and right pointers.
+ // optimization for point ranges, so the left and right ranges
+ // are not copied twice.
+ void keyrange::set_both_keys(const DBT *key) {
+ if (toku_dbt_is_infinite(key)) {
+ m_left_key = key;
+ m_right_key = key;
+ } else {
+ toku_clone_dbt(&m_left_key_copy, *key);
+ toku_copyref_dbt(&m_right_key_copy, m_left_key_copy);
+ }
+ m_point_range = true;
+ }
+
+ // destroy the current left key. set and possibly copy the new one
+ void keyrange::replace_left_key(const DBT *key) {
+ // a little magic:
+ //
+ // if this is a point range, then the left and right keys share
+ // one copy of the data, and it lives in the left key copy. so
+ // if we're replacing the left key, move the real data to the
+ // right key copy instead of destroying it. now, the memory is
+ // owned by the right key and the left key may be replaced.
+ if (m_point_range) {
+ m_right_key_copy = m_left_key_copy;
+ } else {
+ toku_destroy_dbt(&m_left_key_copy);
+ }
+
+ if (toku_dbt_is_infinite(key)) {
+ m_left_key = key;
+ } else {
+ toku_clone_dbt(&m_left_key_copy, *key);
+ m_left_key = nullptr;
+ }
+ m_point_range = false;
+ }
+
+ // destroy the current right key. set and possibly copy the new one
+ void keyrange::replace_right_key(const DBT *key) {
+ toku_destroy_dbt(&m_right_key_copy);
+ if (toku_dbt_is_infinite(key)) {
+ m_right_key = key;
+ } else {
+ toku_clone_dbt(&m_right_key_copy, *key);
+ m_right_key = nullptr;
+ }
+ m_point_range = false;
+ }
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/keyrange.h b/storage/tokudb/PerconaFT/locktree/keyrange.h
new file mode 100644
index 00000000..8347e4d8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/keyrange.h
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ft/comparator.h>
+
+namespace toku {
+
+// A keyrange has a left and right key as endpoints.
+//
+// When a keyrange is created it owns no memory, but when it copies
+// or extends another keyrange, it copies memory as necessary. This
+// means it is cheap in the common case.
+
+class keyrange {
+public:
+
+ // effect: constructor that borrows left and right key pointers.
+ // no memory is allocated or copied.
+ void create(const DBT *left_key, const DBT *right_key);
+
+ // effect: constructor that allocates and copies another keyrange's points.
+ void create_copy(const keyrange &range);
+
+ // effect: destroys the keyrange, freeing any allocated memory
+ void destroy(void);
+
+ // effect: extends the keyrange by choosing the leftmost and rightmost
+ // endpoints from this range and the given range.
+ // replaced keys in this range are freed, new keys are copied.
+ void extend(const comparator &cmp, const keyrange &range);
+
+ // returns: the amount of memory this keyrange takes. does not account
+ // for point optimizations or malloc overhead.
+ uint64_t get_memory_size(void) const;
+
+ // returns: pointer to the left key of this range
+ const DBT *get_left_key(void) const;
+
+ // returns: pointer to the right key of this range
+ const DBT *get_right_key(void) const;
+
+ // two ranges are either equal, lt, gt, or overlapping
+ enum comparison {
+ EQUALS,
+ LESS_THAN,
+ GREATER_THAN,
+ OVERLAPS
+ };
+
+ // effect: compares this range to the given range
+ // returns: LESS_THAN if given range is strictly to the left
+ // GREATER_THAN if given range is strictly to the right
+ // EQUALS if given range has the same left and right endpoints
+ // OVERLAPS if at least one of the given range's endpoints falls
+ // between this range's endpoints
+ comparison compare(const comparator &cmp, const keyrange &range) const;
+
+ // returns: true if the range and the given range are equal or overlapping
+ bool overlaps(const comparator &cmp, const keyrange &range) const;
+
+ // returns: a keyrange representing -inf, +inf
+ static keyrange get_infinite_range(void);
+
+private:
+ // some keys should be copied, some keys should not be.
+ //
+ // to support both, we use two DBTs for copies and two pointers
+ // for temporaries. the access rule is:
+ // - if a pointer is non-null, then it reprsents the key.
+ // - otherwise the pointer is null, and the key is in the copy.
+ DBT m_left_key_copy;
+ DBT m_right_key_copy;
+ const DBT *m_left_key;
+ const DBT *m_right_key;
+
+ // if this range is a point range, then m_left_key == m_right_key
+ // and the actual data is stored exactly once in m_left_key_copy.
+ bool m_point_range;
+
+ // effect: initializes a keyrange to be empty
+ void init_empty(void);
+
+ // effect: copies the given key once into the left key copy
+ // and sets the right key copy to share the left.
+ // rationale: optimization for point ranges to only do one malloc
+ void set_both_keys(const DBT *key);
+
+ // effect: destroys the current left key. sets and copies the new one.
+ void replace_left_key(const DBT *key);
+
+ // effect: destroys the current right key. sets and copies the new one.
+ void replace_right_key(const DBT *key);
+};
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.cc b/storage/tokudb/PerconaFT/locktree/lock_request.cc
new file mode 100644
index 00000000..cc6fcafc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/lock_request.cc
@@ -0,0 +1,525 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "portability/toku_race_tools.h"
+
+#include "ft/txn/txn.h"
+#include "locktree/locktree.h"
+#include "locktree/lock_request.h"
+#include "util/dbt.h"
+
+namespace toku {
+
+// initialize a lock request's internals
+void lock_request::create(void) {
+ m_txnid = TXNID_NONE;
+ m_conflicting_txnid = TXNID_NONE;
+ m_start_time = 0;
+ m_left_key = nullptr;
+ m_right_key = nullptr;
+ toku_init_dbt(&m_left_key_copy);
+ toku_init_dbt(&m_right_key_copy);
+
+ m_type = type::UNKNOWN;
+ m_lt = nullptr;
+
+ m_complete_r = 0;
+ m_state = state::UNINITIALIZED;
+ m_info = nullptr;
+
+ toku_cond_init(*lock_request_m_wait_cond_key, &m_wait_cond, nullptr);
+
+ m_start_test_callback = nullptr;
+ m_start_before_pending_test_callback = nullptr;
+ m_retry_test_callback = nullptr;
+}
+
+// destroy a lock request.
+void lock_request::destroy(void) {
+ invariant(m_state != state::PENDING);
+ invariant(m_state != state::DESTROYED);
+ m_state = state::DESTROYED;
+ toku_destroy_dbt(&m_left_key_copy);
+ toku_destroy_dbt(&m_right_key_copy);
+ toku_cond_destroy(&m_wait_cond);
+}
+
+void lock_request::clearmem(char c) {
+ memset(this, c, sizeof(* this));
+}
+
+// set the lock request parameters. this API allows a lock request to be reused.
+void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn, void *extra) {
+ invariant(m_state != state::PENDING);
+ m_lt = lt;
+ m_txnid = txnid;
+ m_left_key = left_key;
+ m_right_key = right_key;
+ toku_destroy_dbt(&m_left_key_copy);
+ toku_destroy_dbt(&m_right_key_copy);
+ m_type = lock_type;
+ m_state = state::INITIALIZED;
+ m_info = lt ? lt->get_lock_request_info() : nullptr;
+ m_big_txn = big_txn;
+ m_extra = extra;
+}
+
+// get rid of any stored left and right key copies and
+// replace them with copies of the given left and right key
+void lock_request::copy_keys() {
+ if (!toku_dbt_is_infinite(m_left_key)) {
+ toku_clone_dbt(&m_left_key_copy, *m_left_key);
+ m_left_key = &m_left_key_copy;
+ }
+ if (!toku_dbt_is_infinite(m_right_key)) {
+ toku_clone_dbt(&m_right_key_copy, *m_right_key);
+ m_right_key = &m_right_key_copy;
+ }
+}
+
+// what are the conflicts for this pending lock request?
+void lock_request::get_conflicts(txnid_set *conflicts) {
+ invariant(m_state == state::PENDING);
+ const bool is_write_request = m_type == type::WRITE;
+ m_lt->get_conflicts(is_write_request, m_txnid, m_left_key, m_right_key, conflicts);
+}
+
+// build a wait-for-graph for this lock request and the given conflict set
+// for each transaction B that blocks A's lock request
+// if B is blocked then
+// add (A,T) to the WFG and if B is new, fill in the WFG from B
+void lock_request::build_wait_graph(wfg *wait_graph, const txnid_set &conflicts) {
+ size_t num_conflicts = conflicts.size();
+ for (size_t i = 0; i < num_conflicts; i++) {
+ TXNID conflicting_txnid = conflicts.get(i);
+ lock_request *conflicting_request = find_lock_request(conflicting_txnid);
+ invariant(conflicting_txnid != m_txnid);
+ invariant(conflicting_request != this);
+ if (conflicting_request) {
+ bool already_exists = wait_graph->node_exists(conflicting_txnid);
+ wait_graph->add_edge(m_txnid, conflicting_txnid);
+ if (!already_exists) {
+ // recursively build the wait for graph rooted at the conflicting
+ // request, given its set of lock conflicts.
+ txnid_set other_conflicts;
+ other_conflicts.create();
+ conflicting_request->get_conflicts(&other_conflicts);
+ conflicting_request->build_wait_graph(wait_graph, other_conflicts);
+ other_conflicts.destroy();
+ }
+ }
+ }
+}
+
+// returns: true if the current set of lock requests contains
+// a deadlock, false otherwise.
+bool lock_request::deadlock_exists(const txnid_set &conflicts) {
+ wfg wait_graph;
+ wait_graph.create();
+
+ build_wait_graph(&wait_graph, conflicts);
+ bool deadlock = wait_graph.cycle_exists_from_txnid(m_txnid);
+
+ wait_graph.destroy();
+ return deadlock;
+}
+
+// try to acquire a lock described by this lock request.
+int lock_request::start(void) {
+ int r;
+
+ txnid_set conflicts;
+ conflicts.create();
+ if (m_type == type::WRITE) {
+ r = m_lt->acquire_write_lock(m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
+ } else {
+ invariant(m_type == type::READ);
+ r = m_lt->acquire_read_lock(m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
+ }
+
+ // if the lock is not granted, save it to the set of lock requests
+ // and check for a deadlock. if there is one, complete it as failed
+ if (r == DB_LOCK_NOTGRANTED) {
+ copy_keys();
+ m_state = state::PENDING;
+ m_start_time = toku_current_time_microsec() / 1000;
+ m_conflicting_txnid = conflicts.get(0);
+ if (m_start_before_pending_test_callback)
+ m_start_before_pending_test_callback();
+ toku_mutex_lock(&m_info->mutex);
+ insert_into_lock_requests();
+ if (deadlock_exists(conflicts)) {
+ remove_from_lock_requests();
+ r = DB_LOCK_DEADLOCK;
+ }
+ toku_mutex_unlock(&m_info->mutex);
+ if (m_start_test_callback)
+ m_start_test_callback(); // test callback
+ }
+
+ if (r != DB_LOCK_NOTGRANTED) {
+ complete(r);
+ }
+
+ conflicts.destroy();
+ return r;
+}
+
+// sleep on the lock request until it becomes resolved or the wait time has elapsed.
+int lock_request::wait(uint64_t wait_time_ms) {
+ return wait(wait_time_ms, 0, nullptr);
+}
+
+int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*killed_callback)(void),
+ void (*lock_wait_callback)(void *, TXNID, TXNID)) {
+ uint64_t t_now = toku_current_time_microsec();
+ uint64_t t_start = t_now;
+ uint64_t t_end = t_start + wait_time_ms * 1000;
+
+ toku_mutex_lock(&m_info->mutex);
+
+ // check again, this time locking out other retry calls
+ if (m_state == state::PENDING) {
+ GrowableArray<TXNID> conflicts_collector;
+ conflicts_collector.init();
+ retry(&conflicts_collector);
+ if (m_state == state::PENDING) {
+ report_waits(&conflicts_collector, lock_wait_callback);
+ }
+ conflicts_collector.deinit();
+ }
+
+ while (m_state == state::PENDING) {
+ // check if this thread is killed
+ if (killed_callback && killed_callback()) {
+ remove_from_lock_requests();
+ complete(DB_LOCK_NOTGRANTED);
+ continue;
+ }
+
+ // compute next wait time
+ uint64_t t_wait;
+ if (killed_time_ms == 0) {
+ t_wait = t_end;
+ } else {
+ t_wait = t_now + killed_time_ms * 1000;
+ if (t_wait > t_end)
+ t_wait = t_end;
+ }
+ struct timespec ts = {};
+ ts.tv_sec = t_wait / 1000000;
+ ts.tv_nsec = (t_wait % 1000000) * 1000;
+ int r = toku_cond_timedwait(&m_wait_cond, &m_info->mutex, &ts);
+ invariant(r == 0 || r == ETIMEDOUT);
+
+ t_now = toku_current_time_microsec();
+ if (m_state == state::PENDING && (t_now >= t_end)) {
+ m_info->counters.timeout_count += 1;
+
+ // if we're still pending and we timed out, then remove our
+ // request from the set of lock requests and fail.
+ remove_from_lock_requests();
+
+ // complete sets m_state to COMPLETE, breaking us out of the loop
+ complete(DB_LOCK_NOTGRANTED);
+ }
+ }
+
+ uint64_t t_real_end = toku_current_time_microsec();
+ uint64_t duration = t_real_end - t_start;
+ m_info->counters.wait_count += 1;
+ m_info->counters.wait_time += duration;
+ if (duration >= 1000000) {
+ m_info->counters.long_wait_count += 1;
+ m_info->counters.long_wait_time += duration;
+ }
+ toku_mutex_unlock(&m_info->mutex);
+
+ invariant(m_state == state::COMPLETE);
+ return m_complete_r;
+}
+
+// complete this lock request with the given return value
+void lock_request::complete(int complete_r) {
+ m_complete_r = complete_r;
+ m_state = state::COMPLETE;
+}
+
+const DBT *lock_request::get_left_key(void) const {
+ return m_left_key;
+}
+
+const DBT *lock_request::get_right_key(void) const {
+ return m_right_key;
+}
+
+TXNID lock_request::get_txnid(void) const {
+ return m_txnid;
+}
+
+uint64_t lock_request::get_start_time(void) const {
+ return m_start_time;
+}
+
+TXNID lock_request::get_conflicting_txnid(void) const {
+ return m_conflicting_txnid;
+}
+
+int lock_request::retry(GrowableArray<TXNID> *conflicts_collector) {
+ invariant(m_state == state::PENDING);
+ int r;
+ txnid_set conflicts;
+ conflicts.create();
+
+ if (m_type == type::WRITE) {
+ r = m_lt->acquire_write_lock(
+ m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
+ } else {
+ r = m_lt->acquire_read_lock(
+ m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
+ }
+
+ // if the acquisition succeeded then remove ourselves from the
+ // set of lock requests, complete, and signal the waiting thread.
+ if (r == 0) {
+ remove_from_lock_requests();
+ complete(r);
+ if (m_retry_test_callback)
+ m_retry_test_callback(); // test callback
+ toku_cond_broadcast(&m_wait_cond);
+ } else {
+ m_conflicting_txnid = conflicts.get(0);
+ add_conflicts_to_waits(&conflicts, conflicts_collector);
+ }
+ conflicts.destroy();
+
+ return r;
+}
+
+void lock_request::retry_all_lock_requests(
+ locktree *lt,
+ void (*lock_wait_callback)(void *, TXNID, TXNID),
+ void (*after_retry_all_test_callback)(void)) {
+ lt_lock_request_info *info = lt->get_lock_request_info();
+
+ // if there are no pending lock requests than there is nothing to do
+ // the unlocked data race on pending_is_empty is OK since lock requests
+ // are retried after added to the pending set.
+ if (info->pending_is_empty)
+ return;
+
+ // get my retry generation (post increment of retry_want)
+ unsigned long long my_retry_want = (info->retry_want += 1);
+
+ toku_mutex_lock(&info->retry_mutex);
+
+ GrowableArray<TXNID> conflicts_collector;
+ conflicts_collector.init();
+
+ // here is the group retry algorithm.
+ // get the latest retry_want count and use it as the generation number of
+ // this retry operation. if this retry generation is > the last retry
+ // generation, then do the lock retries. otherwise, no lock retries
+ // are needed.
+ if ((my_retry_want - 1) == info->retry_done) {
+ for (;;) {
+ if (!info->running_retry) {
+ info->running_retry = true;
+ info->retry_done = info->retry_want;
+ toku_mutex_unlock(&info->retry_mutex);
+ retry_all_lock_requests_info(info, &conflicts_collector);
+ if (after_retry_all_test_callback)
+ after_retry_all_test_callback();
+ toku_mutex_lock(&info->retry_mutex);
+ info->running_retry = false;
+ toku_cond_broadcast(&info->retry_cv);
+ break;
+ } else {
+ toku_cond_wait(&info->retry_cv, &info->retry_mutex);
+ }
+ }
+ }
+ toku_mutex_unlock(&info->retry_mutex);
+
+ report_waits(&conflicts_collector, lock_wait_callback);
+ conflicts_collector.deinit();
+}
+
+void lock_request::retry_all_lock_requests_info(lt_lock_request_info *info, GrowableArray<TXNID> *collector) {
+ toku_mutex_lock(&info->mutex);
+ // retry all of the pending lock requests.
+ for (size_t i = 0; i < info->pending_lock_requests.size();) {
+ lock_request *request;
+ int r = info->pending_lock_requests.fetch(i, &request);
+ invariant_zero(r);
+
+ // retry the lock request. if it didn't succeed,
+ // move on to the next lock request. otherwise
+ // the request is gone from the list so we may
+ // read the i'th entry for the next one.
+ r = request->retry(collector);
+ if (r != 0) {
+ i++;
+ }
+ }
+
+ // future threads should only retry lock requests if some still exist
+ info->should_retry_lock_requests = info->pending_lock_requests.size() > 0;
+ toku_mutex_unlock(&info->mutex);
+}
+
+void lock_request::add_conflicts_to_waits(txnid_set *conflicts,
+ GrowableArray<TXNID> *wait_conflicts) {
+ size_t num_conflicts = conflicts->size();
+ for (size_t i = 0; i < num_conflicts; i++) {
+ wait_conflicts->push(m_txnid);
+ wait_conflicts->push(conflicts->get(i));
+ }
+}
+
+void lock_request::report_waits(GrowableArray<TXNID> *wait_conflicts,
+ void (*lock_wait_callback)(void *, TXNID, TXNID)) {
+ if (!lock_wait_callback)
+ return;
+ size_t num_conflicts = wait_conflicts->get_size();
+ for (size_t i = 0; i < num_conflicts; i += 2) {
+ TXNID blocked_txnid = wait_conflicts->fetch_unchecked(i);
+ TXNID blocking_txnid = wait_conflicts->fetch_unchecked(i+1);
+ (*lock_wait_callback)(nullptr, blocked_txnid, blocking_txnid);
+ }
+}
+
+void *lock_request::get_extra(void) const {
+ return m_extra;
+}
+
+void lock_request::kill_waiter(void) {
+ remove_from_lock_requests();
+ complete(DB_LOCK_NOTGRANTED);
+ toku_cond_broadcast(&m_wait_cond);
+}
+
+void lock_request::kill_waiter(locktree *lt, void *extra) {
+ lt_lock_request_info *info = lt->get_lock_request_info();
+ toku_mutex_lock(&info->mutex);
+ for (size_t i = 0; i < info->pending_lock_requests.size(); i++) {
+ lock_request *request;
+ int r = info->pending_lock_requests.fetch(i, &request);
+ if (r == 0 && request->get_extra() == extra) {
+ request->kill_waiter();
+ break;
+ }
+ }
+ toku_mutex_unlock(&info->mutex);
+}
+
+// find another lock request by txnid. must hold the mutex.
+lock_request *lock_request::find_lock_request(const TXNID &txnid) {
+ lock_request *request;
+ int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(txnid, &request, nullptr);
+ if (r != 0) {
+ request = nullptr;
+ }
+ return request;
+}
+
+// insert this lock request into the locktree's set. must hold the mutex.
+void lock_request::insert_into_lock_requests(void) {
+ uint32_t idx;
+ lock_request *request;
+ int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(
+ m_txnid, &request, &idx);
+ invariant(r == DB_NOTFOUND);
+ r = m_info->pending_lock_requests.insert_at(this, idx);
+ invariant_zero(r);
+ m_info->pending_is_empty = false;
+}
+
+// remove this lock request from the locktree's set. must hold the mutex.
+void lock_request::remove_from_lock_requests(void) {
+ uint32_t idx;
+ lock_request *request;
+ int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(
+ m_txnid, &request, &idx);
+ invariant_zero(r);
+ invariant(request == this);
+ r = m_info->pending_lock_requests.delete_at(idx);
+ invariant_zero(r);
+ if (m_info->pending_lock_requests.size() == 0)
+ m_info->pending_is_empty = true;
+}
+
+int lock_request::find_by_txnid(lock_request *const &request,
+ const TXNID &txnid) {
+ TXNID request_txnid = request->m_txnid;
+ if (request_txnid < txnid) {
+ return -1;
+ } else if (request_txnid == txnid) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+void lock_request::set_start_test_callback(void (*f)(void)) {
+ m_start_test_callback = f;
+}
+
+void lock_request::set_start_before_pending_test_callback(void (*f)(void)) {
+ m_start_before_pending_test_callback = f;
+}
+
+void lock_request::set_retry_test_callback(void (*f)(void)) {
+ m_retry_test_callback = f;
+}
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.h b/storage/tokudb/PerconaFT/locktree/lock_request.h
new file mode 100644
index 00000000..e16e77ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/lock_request.h
@@ -0,0 +1,231 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+#include "portability/toku_pthread.h"
+
+#include "locktree/locktree.h"
+#include "locktree/txnid_set.h"
+#include "locktree/wfg.h"
+#include "ft/comparator.h"
+
+namespace toku {
+
+// A lock request contains the db, the key range, the lock type, and
+// the transaction id that describes a potential row range lock.
+//
+// the typical use case is:
+// - initialize a lock request
+// - start to try to acquire the lock
+// - do something else
+// - wait for the lock request to be resolved on a timed condition
+// - destroy the lock request
+// a lock request is resolved when its state is no longer pending, or
+// when it becomes granted, or timedout, or deadlocked. when resolved, the
+// state of the lock request is changed and any waiting threads are awakened.
+
+class lock_request {
+public:
+ enum type {
+ UNKNOWN,
+ READ,
+ WRITE
+ };
+
+ // effect: Initializes a lock request.
+ void create(void);
+
+ // effect: Destroys a lock request.
+ void destroy(void);
+ void clearmem(char c);
+
+ // effect: Resets the lock request parameters, allowing it to be reused.
+ // requires: Lock request was already created at some point
+ void set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, type lock_type, bool big_txn, void *extra = nullptr);
+
+ // effect: Tries to acquire a lock described by this lock request.
+ // returns: The return code of locktree::acquire_[write,read]_lock()
+ // or DB_LOCK_DEADLOCK if this request would end up deadlocked.
+ int start(void);
+
+ // effect: Sleeps until either the request is granted or the wait time expires.
+ // returns: The return code of locktree::acquire_[write,read]_lock()
+ // or simply DB_LOCK_NOTGRANTED if the wait time expired.
+ int wait(uint64_t wait_time_ms);
+ int wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*killed_callback)(void),
+ void (*lock_wait_callback)(void *, TXNID, TXNID) = nullptr);
+
+ // return: left end-point of the lock range
+ const DBT *get_left_key(void) const;
+
+ // return: right end-point of the lock range
+ const DBT *get_right_key(void) const;
+
+ // return: the txnid waiting for a lock
+ TXNID get_txnid(void) const;
+
+ // return: when this lock request started, as milliseconds from epoch
+ uint64_t get_start_time(void) const;
+
+ // return: which txnid is blocking this request (there may be more, though)
+ TXNID get_conflicting_txnid(void) const;
+
+ // effect: Retries all of the lock requests for the given locktree.
+ // Any lock requests successfully restarted is completed and woken
+ // up.
+ // The rest remain pending.
+ static void retry_all_lock_requests(
+ locktree *lt,
+ void (*lock_wait_callback)(void *, TXNID, TXNID) = nullptr,
+ void (*after_retry_test_callback)(void) = nullptr);
+ static void retry_all_lock_requests_info(lt_lock_request_info *info, GrowableArray<TXNID> *collector);
+
+ void set_start_test_callback(void (*f)(void));
+ void set_start_before_pending_test_callback(void (*f)(void));
+ void set_retry_test_callback(void (*f)(void));
+
+ void *get_extra(void) const;
+
+ void kill_waiter(void);
+ static void kill_waiter(locktree *lt, void *extra);
+
+ private:
+ enum state {
+ UNINITIALIZED,
+ INITIALIZED,
+ PENDING,
+ COMPLETE,
+ DESTROYED,
+ };
+
+ // The keys for a lock request are stored "unowned" in m_left_key
+ // and m_right_key. When the request is about to go to sleep, it
+ // copies these keys and stores them in m_left_key_copy etc and
+ // sets the temporary pointers to null.
+ TXNID m_txnid;
+ TXNID m_conflicting_txnid;
+ uint64_t m_start_time;
+ const DBT *m_left_key;
+ const DBT *m_right_key;
+ DBT m_left_key_copy;
+ DBT m_right_key_copy;
+
+ // The lock request type and associated locktree
+ type m_type;
+ locktree *m_lt;
+
+ // If the lock request is in the completed state, then its
+ // final return value is stored in m_complete_r
+ int m_complete_r;
+ state m_state;
+
+ toku_cond_t m_wait_cond;
+
+ bool m_big_txn;
+
+ // the lock request info state stored in the
+ // locktree that this lock request is for.
+ struct lt_lock_request_info *m_info;
+
+ void *m_extra;
+
+ // effect: tries again to acquire the lock described by this lock request
+ // returns: 0 if retrying the request succeeded and is now complete
+ int retry(GrowableArray<TXNID> *conflict_collector);
+
+ void complete(int complete_r);
+
+ // effect: Finds another lock request by txnid.
+ // requires: The lock request info mutex is held
+ lock_request *find_lock_request(const TXNID &txnid);
+
+ // effect: Insert this lock request into the locktree's set.
+ // requires: the locktree's mutex is held
+ void insert_into_lock_requests(void);
+
+ // effect: Removes this lock request from the locktree's set.
+ // requires: The lock request info mutex is held
+ void remove_from_lock_requests(void);
+
+ // effect: Asks this request's locktree which txnids are preventing
+ // us from getting the lock described by this request.
+ // returns: conflicts is populated with the txnid's that this request
+ // is blocked on
+ void get_conflicts(txnid_set *conflicts);
+
+ // effect: Builds a wait-for-graph for this lock request and the given conflict set
+ void build_wait_graph(wfg *wait_graph, const txnid_set &conflicts);
+
+ // returns: True if this lock request is in deadlock with the given conflicts set
+ bool deadlock_exists(const txnid_set &conflicts);
+
+ void copy_keys(void);
+
+ static int find_by_txnid(lock_request *const &request, const TXNID &txnid);
+
+ // Report list of conflicts to lock wait callback.
+ static void report_waits(GrowableArray<TXNID> *wait_conflicts,
+ void (*lock_wait_callback)(void *, TXNID, TXNID));
+ void add_conflicts_to_waits(txnid_set *conflicts, GrowableArray<TXNID> *wait_conflicts);
+
+ void (*m_start_test_callback)(void);
+ void (*m_start_before_pending_test_callback)(void);
+ void (*m_retry_test_callback)(void);
+
+ friend class lock_request_unit_test;
+};
+ENSURE_POD(lock_request);
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/locktree.cc b/storage/tokudb/PerconaFT/locktree/locktree.cc
new file mode 100644
index 00000000..8e81adef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/locktree.cc
@@ -0,0 +1,787 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <memory.h>
+
+#include <util/growable_array.h>
+
+#include <portability/toku_pthread.h>
+#include <portability/toku_time.h>
+
+#include "locktree.h"
+#include "range_buffer.h"
+
+// including the concurrent_tree here expands the templates
+// and "defines" the implementation, so we do it here in
+// the locktree source file instead of the header.
+#include "concurrent_tree.h"
+
+namespace toku {
+
+// A locktree represents the set of row locks owned by all transactions
+// over an open dictionary. Read and write ranges are represented as
+// a left and right key which are compared with the given descriptor
+// and comparison fn.
+//
+// Each locktree has a reference count which it manages
+// but does nothing based on the value of the reference count - it is
+// up to the user of the locktree to destroy it when it sees fit.
+
+void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const comparator &cmp) {
+ m_mgr = mgr;
+ m_dict_id = dict_id;
+
+ m_cmp.create_from(cmp);
+ m_reference_count = 1;
+ m_userdata = nullptr;
+
+ XCALLOC(m_rangetree);
+ m_rangetree->create(&m_cmp);
+
+ m_sto_txnid = TXNID_NONE;
+ m_sto_buffer.create();
+ m_sto_score = STO_SCORE_THRESHOLD;
+ m_sto_end_early_count = 0;
+ m_sto_end_early_time = 0;
+
+ m_lock_request_info.init();
+}
+
+void lt_lock_request_info::init(void) {
+ pending_lock_requests.create();
+ pending_is_empty = true;
+ ZERO_STRUCT(mutex);
+ toku_mutex_init(*locktree_request_info_mutex_key, &mutex, nullptr);
+ retry_want = retry_done = 0;
+ ZERO_STRUCT(counters);
+ ZERO_STRUCT(retry_mutex);
+ toku_mutex_init(
+ *locktree_request_info_retry_mutex_key, &retry_mutex, nullptr);
+ toku_cond_init(*locktree_request_info_retry_cv_key, &retry_cv, nullptr);
+ running_retry = false;
+
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&pending_is_empty,
+ sizeof(pending_is_empty));
+ TOKU_DRD_IGNORE_VAR(pending_is_empty);
+}
+
+void locktree::destroy(void) {
+ invariant(m_reference_count == 0);
+ invariant(m_lock_request_info.pending_lock_requests.size() == 0);
+ m_cmp.destroy();
+ m_rangetree->destroy();
+ toku_free(m_rangetree);
+ m_sto_buffer.destroy();
+ m_lock_request_info.destroy();
+}
+
+void lt_lock_request_info::destroy(void) {
+ pending_lock_requests.destroy();
+ toku_mutex_destroy(&mutex);
+ toku_mutex_destroy(&retry_mutex);
+ toku_cond_destroy(&retry_cv);
+}
+
+void locktree::add_reference(void) {
+ (void)toku_sync_add_and_fetch(&m_reference_count, 1);
+}
+
+uint32_t locktree::release_reference(void) {
+ return toku_sync_sub_and_fetch(&m_reference_count, 1);
+}
+
+uint32_t locktree::get_reference_count(void) {
+ return m_reference_count;
+}
+
+// a container for a range/txnid pair
+struct row_lock {
+ keyrange range;
+ TXNID txnid;
+};
+
+// iterate over a locked keyrange and copy out all of the data,
+// storing each row lock into the given growable array. the
+// caller does not own the range inside the returned row locks,
+// so remove from the tree with care using them as keys.
+static void iterate_and_get_overlapping_row_locks(const concurrent_tree::locked_keyrange *lkr,
+ GrowableArray<row_lock> *row_locks) {
+ struct copy_fn_obj {
+ GrowableArray<row_lock> *row_locks;
+ bool fn(const keyrange &range, TXNID txnid) {
+ row_lock lock = { .range = range, .txnid = txnid };
+ row_locks->push(lock);
+ return true;
+ }
+ } copy_fn;
+ copy_fn.row_locks = row_locks;
+ lkr->iterate(&copy_fn);
+}
+
+// given a txnid and a set of overlapping row locks, determine
+// which txnids are conflicting, and store them in the conflicts
+// set, if given.
+static bool determine_conflicting_txnids(const GrowableArray<row_lock> &row_locks,
+ const TXNID &txnid, txnid_set *conflicts) {
+ bool conflicts_exist = false;
+ const size_t num_overlaps = row_locks.get_size();
+ for (size_t i = 0; i < num_overlaps; i++) {
+ const row_lock lock = row_locks.fetch_unchecked(i);
+ const TXNID other_txnid = lock.txnid;
+ if (other_txnid != txnid) {
+ if (conflicts) {
+ conflicts->add(other_txnid);
+ }
+ conflicts_exist = true;
+ }
+ }
+ return conflicts_exist;
+}
+
+// how much memory does a row lock take up in a concurrent tree?
+static uint64_t row_lock_size_in_tree(const row_lock &lock) {
+ const uint64_t overhead = concurrent_tree::get_insertion_memory_overhead();
+ return lock.range.get_memory_size() + overhead;
+}
+
+// remove and destroy the given row lock from the locked keyrange,
+// then notify the memory tracker of the newly freed lock.
+static void remove_row_lock_from_tree(concurrent_tree::locked_keyrange *lkr,
+ const row_lock &lock, locktree_manager *mgr) {
+ const uint64_t mem_released = row_lock_size_in_tree(lock);
+ lkr->remove(lock.range);
+ if (mgr != nullptr) {
+ mgr->note_mem_released(mem_released);
+ }
+}
+
+// insert a row lock into the locked keyrange, then notify
+// the memory tracker of this newly acquired lock.
+static void insert_row_lock_into_tree(concurrent_tree::locked_keyrange *lkr,
+ const row_lock &lock, locktree_manager *mgr) {
+ uint64_t mem_used = row_lock_size_in_tree(lock);
+ lkr->insert(lock.range, lock.txnid);
+ if (mgr != nullptr) {
+ mgr->note_mem_used(mem_used);
+ }
+}
+
+void locktree::sto_begin(TXNID txnid) {
+ invariant(m_sto_txnid == TXNID_NONE);
+ invariant(m_sto_buffer.is_empty());
+ m_sto_txnid = txnid;
+}
+
+void locktree::sto_append(const DBT *left_key, const DBT *right_key) {
+ uint64_t buffer_mem, delta;
+ keyrange range;
+ range.create(left_key, right_key);
+
+ buffer_mem = m_sto_buffer.total_memory_size();
+ m_sto_buffer.append(left_key, right_key);
+ delta = m_sto_buffer.total_memory_size() - buffer_mem;
+ if (m_mgr != nullptr) {
+ m_mgr->note_mem_used(delta);
+ }
+}
+
+void locktree::sto_end(void) {
+ uint64_t mem_size = m_sto_buffer.total_memory_size();
+ if (m_mgr != nullptr) {
+ m_mgr->note_mem_released(mem_size);
+ }
+ m_sto_buffer.destroy();
+ m_sto_buffer.create();
+ m_sto_txnid = TXNID_NONE;
+}
+
+void locktree::sto_end_early_no_accounting(void *prepared_lkr) {
+ sto_migrate_buffer_ranges_to_tree(prepared_lkr);
+ sto_end();
+ toku_unsafe_set(m_sto_score, 0);
+}
+
+void locktree::sto_end_early(void *prepared_lkr) {
+ m_sto_end_early_count++;
+
+ tokutime_t t0 = toku_time_now();
+ sto_end_early_no_accounting(prepared_lkr);
+ tokutime_t t1 = toku_time_now();
+
+ m_sto_end_early_time += (t1 - t0);
+}
+
+void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) {
+ // There should be something to migrate, and nothing in the rangetree.
+ invariant(!m_sto_buffer.is_empty());
+ invariant(m_rangetree->is_empty());
+
+ concurrent_tree sto_rangetree;
+ concurrent_tree::locked_keyrange sto_lkr;
+ sto_rangetree.create(&m_cmp);
+
+ // insert all of the ranges from the single txnid buffer into a new rangtree
+ range_buffer::iterator iter(&m_sto_buffer);
+ range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ sto_lkr.prepare(&sto_rangetree);
+ int r = acquire_lock_consolidated(&sto_lkr,
+ m_sto_txnid, rec.get_left_key(), rec.get_right_key(), nullptr);
+ invariant_zero(r);
+ sto_lkr.release();
+ iter.next();
+ }
+
+ // Iterate the newly created rangetree and insert each range into the
+ // locktree's rangetree, on behalf of the old single txnid.
+ struct migrate_fn_obj {
+ concurrent_tree::locked_keyrange *dst_lkr;
+ bool fn(const keyrange &range, TXNID txnid) {
+ dst_lkr->insert(range, txnid);
+ return true;
+ }
+ } migrate_fn;
+ migrate_fn.dst_lkr = static_cast<concurrent_tree::locked_keyrange *>(prepared_lkr);
+ sto_lkr.prepare(&sto_rangetree);
+ sto_lkr.iterate(&migrate_fn);
+ sto_lkr.remove_all();
+ sto_lkr.release();
+ sto_rangetree.destroy();
+ invariant(!m_rangetree->is_empty());
+}
+
+bool locktree::sto_try_acquire(void *prepared_lkr,
+ TXNID txnid,
+ const DBT *left_key, const DBT *right_key) {
+ if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && toku_unsafe_fetch(m_sto_score) >= STO_SCORE_THRESHOLD) {
+ // We can do the optimization because the rangetree is empty, and
+ // we know its worth trying because the sto score is big enough.
+ sto_begin(txnid);
+ } else if (m_sto_txnid != TXNID_NONE) {
+ // We are currently doing the optimization. Check if we need to cancel
+ // it because a new txnid appeared, or if the current single txnid has
+ // taken too many locks already.
+ if (m_sto_txnid != txnid || m_sto_buffer.get_num_ranges() > STO_BUFFER_MAX_SIZE) {
+ sto_end_early(prepared_lkr);
+ }
+ }
+
+ // At this point the sto txnid is properly set. If it is valid, then
+ // this txnid can append its lock to the sto buffer successfully.
+ if (m_sto_txnid != TXNID_NONE) {
+ invariant(m_sto_txnid == txnid);
+ sto_append(left_key, right_key);
+ return true;
+ } else {
+ invariant(m_sto_buffer.is_empty());
+ return false;
+ }
+}
+
+// try to acquire a lock and consolidate it with existing locks if possible
+// param: lkr, a prepared locked keyrange
+// return: 0 on success, DB_LOCK_NOTGRANTED if conflicting locks exist.
+int locktree::acquire_lock_consolidated(void *prepared_lkr,
+ TXNID txnid,
+ const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts) {
+ int r = 0;
+ concurrent_tree::locked_keyrange *lkr;
+
+ keyrange requested_range;
+ requested_range.create(left_key, right_key);
+ lkr = static_cast<concurrent_tree::locked_keyrange *>(prepared_lkr);
+ lkr->acquire(requested_range);
+
+ // copy out the set of overlapping row locks.
+ GrowableArray<row_lock> overlapping_row_locks;
+ overlapping_row_locks.init();
+ iterate_and_get_overlapping_row_locks(lkr, &overlapping_row_locks);
+ size_t num_overlapping_row_locks = overlapping_row_locks.get_size();
+
+ // if any overlapping row locks conflict with this request, bail out.
+ bool conflicts_exist = determine_conflicting_txnids(overlapping_row_locks,
+ txnid, conflicts);
+ if (!conflicts_exist) {
+ // there are no conflicts, so all of the overlaps are for the requesting txnid.
+ // so, we must consolidate all existing overlapping ranges and the requested
+ // range into one dominating range. then we insert the dominating range.
+ for (size_t i = 0; i < num_overlapping_row_locks; i++) {
+ row_lock overlapping_lock = overlapping_row_locks.fetch_unchecked(i);
+ invariant(overlapping_lock.txnid == txnid);
+ requested_range.extend(m_cmp, overlapping_lock.range);
+ remove_row_lock_from_tree(lkr, overlapping_lock, m_mgr);
+ }
+
+ row_lock new_lock = { .range = requested_range, .txnid = txnid };
+ insert_row_lock_into_tree(lkr, new_lock, m_mgr);
+ } else {
+ r = DB_LOCK_NOTGRANTED;
+ }
+
+ requested_range.destroy();
+ overlapping_row_locks.deinit();
+ return r;
+}
+
+// acquire a lock in the given key range, inclusive. if successful,
+// return 0. otherwise, populate the conflicts txnid_set with the set of
+// transactions that conflict with this request.
+int locktree::acquire_lock(bool is_write_request,
+ TXNID txnid,
+ const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts) {
+ int r = 0;
+
+ // we are only supporting write locks for simplicity
+ invariant(is_write_request);
+
+ // acquire and prepare a locked keyrange over the requested range.
+ // prepare is a serialzation point, so we take the opportunity to
+ // try the single txnid optimization first.
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(m_rangetree);
+
+ bool acquired = sto_try_acquire(&lkr, txnid, left_key, right_key);
+ if (!acquired) {
+ r = acquire_lock_consolidated(&lkr, txnid, left_key, right_key, conflicts);
+ }
+
+ lkr.release();
+ return r;
+}
+
+int locktree::try_acquire_lock(bool is_write_request,
+ TXNID txnid,
+ const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts, bool big_txn) {
+ // All ranges in the locktree must have left endpoints <= right endpoints.
+ // Range comparisons rely on this fact, so we make a paranoid invariant here.
+ paranoid_invariant(m_cmp(left_key, right_key) <= 0);
+ int r = m_mgr == nullptr ? 0 :
+ m_mgr->check_current_lock_constraints(big_txn);
+ if (r == 0) {
+ r = acquire_lock(is_write_request, txnid, left_key, right_key, conflicts);
+ }
+ return r;
+}
+
+// the locktree silently upgrades read locks to write locks for simplicity
+int locktree::acquire_read_lock(TXNID txnid, const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts, bool big_txn) {
+ return acquire_write_lock(txnid, left_key, right_key, conflicts, big_txn);
+}
+
+int locktree::acquire_write_lock(TXNID txnid, const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts, bool big_txn) {
+ return try_acquire_lock(true, txnid, left_key, right_key, conflicts, big_txn);
+}
+
+void locktree::get_conflicts(bool is_write_request,
+ TXNID txnid, const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts) {
+ // because we only support write locks, ignore this bit for now.
+ (void) is_write_request;
+
+ // preparing and acquire a locked keyrange over the range
+ keyrange range;
+ range.create(left_key, right_key);
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(m_rangetree);
+ lkr.acquire(range);
+
+ // copy out the set of overlapping row locks and determine the conflicts
+ GrowableArray<row_lock> overlapping_row_locks;
+ overlapping_row_locks.init();
+ iterate_and_get_overlapping_row_locks(&lkr, &overlapping_row_locks);
+
+ // we don't care if conflicts exist. we just want the conflicts set populated.
+ (void) determine_conflicting_txnids(overlapping_row_locks, txnid, conflicts);
+
+ lkr.release();
+ overlapping_row_locks.deinit();
+ range.destroy();
+}
+
+// Effect:
+// For each range in the lock tree that overlaps the given range and has
+// the given txnid, remove it.
+// Rationale:
+// In the common case, there is only the range [left_key, right_key] and
+// it is associated with txnid, so this is a single tree delete.
+//
+// However, consolidation and escalation change the objects in the tree
+// without telling the txn anything. In this case, the txn may own a
+// large range lock that represents its ownership of many smaller range
+// locks. For example, the txn may think it owns point locks on keys 1,
+// 2, and 3, but due to escalation, only the object [1,3] exists in the
+// tree.
+//
+// The first call for a small lock will remove the large range lock, and
+// the rest of the calls should do nothing. After the first release,
+// another thread can acquire one of the locks that the txn thinks it
+// still owns. That's ok, because the txn doesn't want it anymore (it
+// unlocks everything at once), but it may find a lock that it does not
+// own.
+//
+// In our example, the txn unlocks key 1, which actually removes the
+// whole lock [1,3]. Now, someone else can lock 2 before our txn gets
+// around to unlocking 2, so we should not remove that lock.
+void locktree::remove_overlapping_locks_for_txnid(TXNID txnid,
+ const DBT *left_key,
+ const DBT *right_key) {
+ keyrange release_range;
+ release_range.create(left_key, right_key);
+
+ // acquire and prepare a locked keyrange over the release range
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(m_rangetree);
+ lkr.acquire(release_range);
+
+ // copy out the set of overlapping row locks.
+ GrowableArray<row_lock> overlapping_row_locks;
+ overlapping_row_locks.init();
+ iterate_and_get_overlapping_row_locks(&lkr, &overlapping_row_locks);
+ size_t num_overlapping_row_locks = overlapping_row_locks.get_size();
+
+ for (size_t i = 0; i < num_overlapping_row_locks; i++) {
+ row_lock lock = overlapping_row_locks.fetch_unchecked(i);
+ // If this isn't our lock, that's ok, just don't remove it.
+ // See rationale above.
+ if (lock.txnid == txnid) {
+ remove_row_lock_from_tree(&lkr, lock, m_mgr);
+ }
+ }
+
+ lkr.release();
+ overlapping_row_locks.deinit();
+ release_range.destroy();
+}
+
+bool locktree::sto_txnid_is_valid_unsafe(void) const {
+ return toku_unsafe_fetch(m_sto_txnid) != TXNID_NONE;
+}
+
+int locktree::sto_get_score_unsafe(void) const {
+ return toku_unsafe_fetch(m_sto_score);
+}
+
+bool locktree::sto_try_release(TXNID txnid) {
+ bool released = false;
+ if (toku_unsafe_fetch(m_sto_txnid) != TXNID_NONE) {
+ // check the bit again with a prepared locked keyrange,
+ // which protects the optimization bits and rangetree data
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(m_rangetree);
+ if (m_sto_txnid != TXNID_NONE) {
+ // this txnid better be the single txnid on this locktree,
+ // or else we are in big trouble (meaning the logic is broken)
+ invariant(m_sto_txnid == txnid);
+ invariant(m_rangetree->is_empty());
+ sto_end();
+ released = true;
+ }
+ lkr.release();
+ }
+ return released;
+}
+
+// release all of the locks for a txnid whose endpoints are pairs
+// in the given range buffer.
+void locktree::release_locks(TXNID txnid, const range_buffer *ranges) {
+ // try the single txn optimization. if it worked, then all of the
+ // locks are already released, otherwise we need to do it here.
+ bool released = sto_try_release(txnid);
+ if (!released) {
+ range_buffer::iterator iter(ranges);
+ range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ const DBT *left_key = rec.get_left_key();
+ const DBT *right_key = rec.get_right_key();
+ // All ranges in the locktree must have left endpoints <= right endpoints.
+ // Range comparisons rely on this fact, so we make a paranoid invariant here.
+ paranoid_invariant(m_cmp(left_key, right_key) <= 0);
+ remove_overlapping_locks_for_txnid(txnid, left_key, right_key);
+ iter.next();
+ }
+ // Increase the sto score slightly. Eventually it will hit
+ // the threshold and we'll try the optimization again. This
+ // is how a previously multithreaded system transitions into
+ // a single threaded system that benefits from the optimization.
+ if (toku_unsafe_fetch(m_sto_score) < STO_SCORE_THRESHOLD) {
+ toku_sync_fetch_and_add(&m_sto_score, 1);
+ }
+ }
+}
+
+// iterate over a locked keyrange and extract copies of the first N
+// row locks, storing each one into the given array of size N,
+// then removing each extracted lock from the locked keyrange.
+static int extract_first_n_row_locks(concurrent_tree::locked_keyrange *lkr,
+ locktree_manager *mgr,
+ row_lock *row_locks, int num_to_extract) {
+
+ struct extract_fn_obj {
+ int num_extracted;
+ int num_to_extract;
+ row_lock *row_locks;
+ bool fn(const keyrange &range, TXNID txnid) {
+ if (num_extracted < num_to_extract) {
+ row_lock lock;
+ lock.range.create_copy(range);
+ lock.txnid = txnid;
+ row_locks[num_extracted++] = lock;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ } extract_fn;
+
+ extract_fn.row_locks = row_locks;
+ extract_fn.num_to_extract = num_to_extract;
+ extract_fn.num_extracted = 0;
+ lkr->iterate(&extract_fn);
+
+ // now that the ranges have been copied out, complete
+ // the extraction by removing the ranges from the tree.
+ // use remove_row_lock_from_tree() so we properly track the
+ // amount of memory and number of locks freed.
+ int num_extracted = extract_fn.num_extracted;
+ invariant(num_extracted <= num_to_extract);
+ for (int i = 0; i < num_extracted; i++) {
+ remove_row_lock_from_tree(lkr, row_locks[i], mgr);
+ }
+
+ return num_extracted;
+}
+
+// Store each newly escalated lock in a range buffer for appropriate txnid.
+// We'll rebuild the locktree by iterating over these ranges, and then we
+// can pass back each txnid/buffer pair individually through a callback
+// to notify higher layers that locks have changed.
+struct txnid_range_buffer {
+ TXNID txnid;
+ range_buffer buffer;
+
+ static int find_by_txnid(struct txnid_range_buffer *const &other_buffer, const TXNID &txnid) {
+ if (txnid < other_buffer->txnid) {
+ return -1;
+ } else if (other_buffer->txnid == txnid) {
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+};
+
+// escalate the locks in the locktree by merging adjacent
+// locks that have the same txnid into one larger lock.
+//
+// if there's only one txnid in the locktree then this
+// approach works well. if there are many txnids and each
+// has locks in a random/alternating order, then this does
+// not work so well.
+void locktree::escalate(lt_escalate_cb after_escalate_callback, void *after_escalate_callback_extra) {
+ omt<struct txnid_range_buffer *, struct txnid_range_buffer *> range_buffers;
+ range_buffers.create();
+
+ // prepare and acquire a locked keyrange on the entire locktree
+ concurrent_tree::locked_keyrange lkr;
+ keyrange infinite_range = keyrange::get_infinite_range();
+ lkr.prepare(m_rangetree);
+ lkr.acquire(infinite_range);
+
+ // if we're in the single txnid optimization, simply call it off.
+ // if you have to run escalation, you probably don't care about
+ // the optimization anyway, and this makes things easier.
+ if (m_sto_txnid != TXNID_NONE) {
+ // We are already accounting for this escalation time and
+ // count, so don't do it for sto_end_early too.
+ sto_end_early_no_accounting(&lkr);
+ }
+
+ // extract and remove batches of row locks from the locktree
+ int num_extracted;
+ const int num_row_locks_per_batch = 128;
+ row_lock *XCALLOC_N(num_row_locks_per_batch, extracted_buf);
+
+ // we always remove the "first" n because we are removing n
+ // each time we do an extraction. so this loops until its empty.
+ while ((num_extracted =
+ extract_first_n_row_locks(&lkr, m_mgr, extracted_buf,
+ num_row_locks_per_batch)) > 0) {
+ int current_index = 0;
+ while (current_index < num_extracted) {
+ // every batch of extracted locks is in range-sorted order. search
+ // through them and merge adjacent locks with the same txnid into
+ // one dominating lock and save it to a set of escalated locks.
+ //
+ // first, find the index of the next row lock with a different txnid
+ int next_txnid_index = current_index + 1;
+ while (next_txnid_index < num_extracted &&
+ extracted_buf[current_index].txnid == extracted_buf[next_txnid_index].txnid) {
+ next_txnid_index++;
+ }
+
+ // Create an escalated range for the current txnid that dominates
+ // each range between the current indext and the next txnid's index.
+ const TXNID current_txnid = extracted_buf[current_index].txnid;
+ const DBT *escalated_left_key = extracted_buf[current_index].range.get_left_key();
+ const DBT *escalated_right_key = extracted_buf[next_txnid_index - 1].range.get_right_key();
+
+ // Try to find a range buffer for the current txnid. Create one if it doesn't exist.
+ // Then, append the new escalated range to the buffer.
+ uint32_t idx;
+ struct txnid_range_buffer *existing_range_buffer;
+ int r = range_buffers.find_zero<TXNID, txnid_range_buffer::find_by_txnid>(
+ current_txnid,
+ &existing_range_buffer,
+ &idx
+ );
+ if (r == DB_NOTFOUND) {
+ struct txnid_range_buffer *XMALLOC(new_range_buffer);
+ new_range_buffer->txnid = current_txnid;
+ new_range_buffer->buffer.create();
+ new_range_buffer->buffer.append(escalated_left_key, escalated_right_key);
+ range_buffers.insert_at(new_range_buffer, idx);
+ } else {
+ invariant_zero(r);
+ invariant(existing_range_buffer->txnid == current_txnid);
+ existing_range_buffer->buffer.append(escalated_left_key, escalated_right_key);
+ }
+
+ current_index = next_txnid_index;
+ }
+
+ // destroy the ranges copied during the extraction
+ for (int i = 0; i < num_extracted; i++) {
+ extracted_buf[i].range.destroy();
+ }
+ }
+ toku_free(extracted_buf);
+
+ // Rebuild the locktree from each range in each range buffer,
+ // then notify higher layers that the txnid's locks have changed.
+ invariant(m_rangetree->is_empty());
+ const size_t num_range_buffers = range_buffers.size();
+ for (size_t i = 0; i < num_range_buffers; i++) {
+ struct txnid_range_buffer *current_range_buffer;
+ int r = range_buffers.fetch(i, &current_range_buffer);
+ invariant_zero(r);
+
+ const TXNID current_txnid = current_range_buffer->txnid;
+ range_buffer::iterator iter(&current_range_buffer->buffer);
+ range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ keyrange range;
+ range.create(rec.get_left_key(), rec.get_right_key());
+ row_lock lock = { .range = range, .txnid = current_txnid };
+ insert_row_lock_into_tree(&lkr, lock, m_mgr);
+ iter.next();
+ }
+
+ // Notify higher layers that locks have changed for the current txnid
+ if (after_escalate_callback) {
+ after_escalate_callback(current_txnid, this, current_range_buffer->buffer, after_escalate_callback_extra);
+ }
+ current_range_buffer->buffer.destroy();
+ }
+
+ while (range_buffers.size() > 0) {
+ struct txnid_range_buffer *buffer;
+ int r = range_buffers.fetch(0, &buffer);
+ invariant_zero(r);
+ r = range_buffers.delete_at(0);
+ invariant_zero(r);
+ toku_free(buffer);
+ }
+ range_buffers.destroy();
+
+ lkr.release();
+}
+
+void *locktree::get_userdata(void) const {
+ return m_userdata;
+}
+
+void locktree::set_userdata(void *userdata) {
+ m_userdata = userdata;
+}
+
+struct lt_lock_request_info *locktree::get_lock_request_info(void) {
+ return &m_lock_request_info;
+}
+
+void locktree::set_comparator(const comparator &cmp) {
+ m_cmp.inherit(cmp);
+}
+
+locktree_manager *locktree::get_manager(void) const {
+ return m_mgr;
+}
+
+int locktree::compare(const locktree *lt) const {
+ if (m_dict_id.dictid < lt->m_dict_id.dictid) {
+ return -1;
+ } else if (m_dict_id.dictid == lt->m_dict_id.dictid) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+DICTIONARY_ID locktree::get_dict_id() const {
+ return m_dict_id;
+}
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/locktree.h b/storage/tokudb/PerconaFT/locktree/locktree.h
new file mode 100644
index 00000000..4d9e5bda
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/locktree.h
@@ -0,0 +1,523 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <atomic>
+
+#include <db.h>
+#include <toku_pthread.h>
+#include <toku_time.h>
+
+#include <ft/comparator.h>
+#include <ft/ft-ops.h> // just for DICTIONARY_ID..
+
+#include <util/omt.h>
+
+#include "txnid_set.h"
+#include "wfg.h"
+#include "range_buffer.h"
+
+
+namespace toku {
+
+ class locktree;
+ class locktree_manager;
+ class lock_request;
+ class concurrent_tree;
+
+ typedef int (*lt_create_cb)(locktree *lt, void *extra);
+ typedef void (*lt_destroy_cb)(locktree *lt);
+ typedef void (*lt_escalate_cb)(TXNID txnid, const locktree *lt, const range_buffer &buffer, void *extra);
+
+ struct lt_counters {
+ uint64_t wait_count, wait_time;
+ uint64_t long_wait_count, long_wait_time;
+ uint64_t timeout_count;
+
+ void add(const lt_counters &rhs) {
+ wait_count += rhs.wait_count;
+ wait_time += rhs.wait_time;
+ long_wait_count += rhs.long_wait_count;
+ long_wait_time += rhs.long_wait_time;
+ timeout_count += rhs.timeout_count;
+ }
+ };
+
+ // Lock request state for some locktree
+ struct lt_lock_request_info {
+ omt<lock_request *> pending_lock_requests;
+ std::atomic_bool pending_is_empty;
+ toku_mutex_t mutex;
+ bool should_retry_lock_requests;
+ lt_counters counters;
+ std::atomic_ullong retry_want;
+ unsigned long long retry_done;
+ toku_mutex_t retry_mutex;
+ toku_cond_t retry_cv;
+ bool running_retry;
+
+ void init(void);
+ void destroy(void);
+ };
+
+ // The locktree manager manages a set of locktrees, one for each open
+ // dictionary. Locktrees are retrieved from the manager. When they are no
+ // longer needed, they are be released by the user.
+ class locktree_manager {
+ public:
+ // param: create_cb, called just after a locktree is first created.
+ // destroy_cb, called just before a locktree is destroyed.
+ // escalate_cb, called after a locktree is escalated (with extra
+ // param)
+ void create(lt_create_cb create_cb,
+ lt_destroy_cb destroy_cb,
+ lt_escalate_cb escalate_cb,
+ void *extra);
+
+ void destroy(void);
+
+ size_t get_max_lock_memory(void);
+
+ int set_max_lock_memory(size_t max_lock_memory);
+
+ // effect: Get a locktree from the manager. If a locktree exists with the given
+ // dict_id, it is referenced and then returned. If one did not exist, it
+ // is created. It will use the comparator for comparing keys. The on_create
+ // callback (passed to locktree_manager::create()) will be called with the
+ // given extra parameter.
+ locktree *get_lt(DICTIONARY_ID dict_id, const comparator &cmp, void *on_create_extra);
+
+ void reference_lt(locktree *lt);
+
+ // effect: Releases one reference on a locktree. If the reference count transitions
+ // to zero, the on_destroy callback is called before it gets destroyed.
+ void release_lt(locktree *lt);
+
+ void get_status(LTM_STATUS status);
+
+ // effect: calls the iterate function on each pending lock request
+ // note: holds the manager's mutex
+ typedef int (*lock_request_iterate_callback)(DICTIONARY_ID dict_id,
+ TXNID txnid,
+ const DBT *left_key,
+ const DBT *right_key,
+ TXNID blocking_txnid,
+ uint64_t start_time,
+ void *extra);
+ int iterate_pending_lock_requests(lock_request_iterate_callback cb, void *extra);
+
+ // effect: Determines if too many locks or too much memory is being used,
+ // Runs escalation on the manager if so.
+ // param: big_txn, if the current transaction is 'big' (has spilled rollback logs)
+ // returns: 0 if there enough resources to create a new lock, or TOKUDB_OUT_OF_LOCKS
+ // if there are not enough resources and lock escalation failed to free up
+ // enough resources for a new lock.
+ int check_current_lock_constraints(bool big_txn);
+
+ bool over_big_threshold(void);
+
+ void note_mem_used(uint64_t mem_used);
+
+ void note_mem_released(uint64_t mem_freed);
+
+ bool out_of_locks(void) const;
+
+ // Escalate all locktrees
+ void escalate_all_locktrees(void);
+
+ // Escalate a set of locktrees
+ void escalate_locktrees(locktree **locktrees, int num_locktrees);
+
+ // effect: calls the private function run_escalation(), only ok to
+ // do for tests.
+ // rationale: to get better stress test coverage, we want a way to
+ // deterministicly trigger lock escalation.
+ void run_escalation_for_test(void);
+ void run_escalation(void);
+
+ // Add time t to the escalator's wait time statistics
+ void add_escalator_wait_time(uint64_t t);
+
+ void kill_waiter(void *extra);
+
+ private:
+ static const uint64_t DEFAULT_MAX_LOCK_MEMORY = 64L * 1024 * 1024;
+
+ // tracks the current number of locks and lock memory
+ uint64_t m_max_lock_memory;
+ uint64_t m_current_lock_memory;
+
+ struct lt_counters m_lt_counters;
+
+ // the create and destroy callbacks for the locktrees
+ lt_create_cb m_lt_create_callback;
+ lt_destroy_cb m_lt_destroy_callback;
+ lt_escalate_cb m_lt_escalate_callback;
+ void *m_lt_escalate_callback_extra;
+
+ omt<locktree *> m_locktree_map;
+
+ // the manager's mutex protects the locktree map
+ toku_mutex_t m_mutex;
+
+ void mutex_lock(void);
+
+ void mutex_unlock(void);
+
+ // Manage the set of open locktrees
+ locktree *locktree_map_find(const DICTIONARY_ID &dict_id);
+ void locktree_map_put(locktree *lt);
+ void locktree_map_remove(locktree *lt);
+
+ static int find_by_dict_id(locktree *const &lt, const DICTIONARY_ID &dict_id);
+
+ void escalator_init(void);
+ void escalator_destroy(void);
+
+ // statistics about lock escalation.
+ toku_mutex_t m_escalation_mutex;
+ uint64_t m_escalation_count;
+ tokutime_t m_escalation_time;
+ uint64_t m_escalation_latest_result;
+ uint64_t m_wait_escalation_count;
+ uint64_t m_wait_escalation_time;
+ uint64_t m_long_wait_escalation_count;
+ uint64_t m_long_wait_escalation_time;
+
+ // the escalator coordinates escalation on a set of locktrees for a bunch of threads
+ class locktree_escalator {
+ public:
+ void create(void);
+ void destroy(void);
+ void run(locktree_manager *mgr, void (*escalate_locktrees_fun)(void *extra), void *extra);
+
+ private:
+ toku_mutex_t m_escalator_mutex;
+ toku_cond_t m_escalator_done;
+ bool m_escalator_running;
+ };
+
+ locktree_escalator m_escalator;
+
+ friend class manager_unit_test;
+ };
+
+ // A locktree represents the set of row locks owned by all transactions
+ // over an open dictionary. Read and write ranges are represented as
+ // a left and right key which are compared with the given comparator
+ //
+ // Locktrees are not created and destroyed by the user. Instead, they are
+ // referenced and released using the locktree manager.
+ //
+ // A sample workflow looks like this:
+ // - Create a manager.
+ // - Get a locktree by dictionaroy id from the manager.
+ // - Perform read/write lock acquision on the locktree, add references to
+ // the locktree using the manager, release locks, release references, etc.
+ // - ...
+ // - Release the final reference to the locktree. It will be destroyed.
+ // - Destroy the manager.
+ class locktree {
+ public:
+ // effect: Creates a locktree
+ void create(locktree_manager *mgr, DICTIONARY_ID dict_id, const comparator &cmp);
+
+ void destroy(void);
+
+ // For thread-safe, external reference counting
+ void add_reference(void);
+
+ // requires: the reference count is > 0
+ // returns: the reference count, after decrementing it by one
+ uint32_t release_reference(void);
+
+ // returns: the current reference count
+ uint32_t get_reference_count(void);
+
+ // effect: Attempts to grant a read lock for the range of keys between [left_key, right_key].
+ // returns: If the lock cannot be granted, return DB_LOCK_NOTGRANTED, and populate the
+ // given conflicts set with the txnids that hold conflicting locks in the range.
+ // If the locktree cannot create more locks, return TOKUDB_OUT_OF_LOCKS.
+ // note: Read locks cannot be shared between txnids, as one would expect.
+ // This is for simplicity since read locks are rare in MySQL.
+ int acquire_read_lock(TXNID txnid, const DBT *left_key, const DBT *right_key, txnid_set *conflicts, bool big_txn);
+
+ // effect: Attempts to grant a write lock for the range of keys between [left_key, right_key].
+ // returns: If the lock cannot be granted, return DB_LOCK_NOTGRANTED, and populate the
+ // given conflicts set with the txnids that hold conflicting locks in the range.
+ // If the locktree cannot create more locks, return TOKUDB_OUT_OF_LOCKS.
+ int acquire_write_lock(TXNID txnid, const DBT *left_key, const DBT *right_key, txnid_set *conflicts, bool big_txn);
+
+ // effect: populate the conflicts set with the txnids that would preventing
+ // the given txnid from getting a lock on [left_key, right_key]
+ void get_conflicts(bool is_write_request, TXNID txnid,
+ const DBT *left_key, const DBT *right_key, txnid_set *conflicts);
+
+ // effect: Release all of the lock ranges represented by the range buffer for a txnid.
+ void release_locks(TXNID txnid, const range_buffer *ranges);
+
+ // effect: Runs escalation on this locktree
+ void escalate(lt_escalate_cb after_escalate_callback, void *extra);
+
+ // returns: The userdata associated with this locktree, or null if it has not been set.
+ void *get_userdata(void) const;
+
+ void set_userdata(void *userdata);
+
+ locktree_manager *get_manager(void) const;
+
+ void set_comparator(const comparator &cmp);
+
+ int compare(const locktree *lt) const;
+
+ DICTIONARY_ID get_dict_id() const;
+
+ // Private info struct for storing pending lock request state.
+ // Only to be used by lock requests. We store it here as
+ // something less opaque than usual to strike a tradeoff between
+ // abstraction and code complexity. It is still fairly abstract
+ // since the lock_request object is opaque
+ struct lt_lock_request_info *get_lock_request_info(void);
+
+ private:
+ locktree_manager *m_mgr;
+ DICTIONARY_ID m_dict_id;
+ uint32_t m_reference_count;
+
+ // Since the memory referenced by this comparator is not owned by the
+ // locktree, the user must guarantee it will outlive the locktree.
+ //
+ // The ydb API accomplishes this by opening an ft_handle in the on_create
+ // callback, which will keep the underlying FT (and its descriptor) in memory
+ // for as long as the handle is open. The ft_handle is stored opaquely in the
+ // userdata pointer below. see locktree_manager::get_lt w/ on_create_extra
+ comparator m_cmp;
+
+ concurrent_tree *m_rangetree;
+
+ void *m_userdata;
+ struct lt_lock_request_info m_lock_request_info;
+
+ // The following fields and members prefixed with "sto_" are for
+ // the single txnid optimization, intended to speed up the case
+ // when only one transaction is using the locktree. If we know
+ // the locktree has only one transaction, then acquiring locks
+ // takes O(1) work and releasing all locks takes O(1) work.
+ //
+ // How do we know that the locktree only has a single txnid?
+ // What do we do if it does?
+ //
+ // When a txn with txnid T requests a lock:
+ // - If the tree is empty, the optimization is possible. Set the single
+ // txnid to T, and insert the lock range into the buffer.
+ // - If the tree is not empty, check if the single txnid is T. If so,
+ // append the lock range to the buffer. Otherwise, migrate all of
+ // the locks in the buffer into the rangetree on behalf of txnid T,
+ // and invalid the single txnid.
+ //
+ // When a txn with txnid T releases its locks:
+ // - If the single txnid is valid, it must be for T. Destroy the buffer.
+ // - If it's not valid, release locks the normal way in the rangetree.
+ //
+ // To carry out the optimization we need to record a single txnid
+ // and a range buffer for each locktree, each protected by the root
+ // lock of the locktree's rangetree. The root lock for a rangetree
+ // is grabbed by preparing a locked keyrange on the rangetree.
+ TXNID m_sto_txnid;
+ range_buffer m_sto_buffer;
+
+ // The single txnid optimization speeds up the case when only one
+ // transaction is using the locktree. But it has the potential to
+ // hurt the case when more than one txnid exists.
+ //
+ // There are two things we need to do to make the optimization only
+ // optimize the case we care about, and not hurt the general case.
+ //
+ // Bound the worst-case latency for lock migration when the
+ // optimization stops working:
+ // - Idea: Stop the optimization and migrate immediate if we notice
+ // the single txnid has takes many locks in the range buffer.
+ // - Implementation: Enforce a max size on the single txnid range buffer.
+ // - Analysis: Choosing the perfect max value, M, is difficult to do
+ // without some feedback from the field. Intuition tells us that M should
+ // not be so small that the optimization is worthless, and it should not
+ // be so big that it's unreasonable to have to wait behind a thread doing
+ // the work of converting M buffer locks into rangetree locks.
+ //
+ // Prevent concurrent-transaction workloads from trying the optimization
+ // in vain:
+ // - Idea: Don't even bother trying the optimization if we think the
+ // system is in a concurrent-transaction state.
+ // - Implementation: Do something even simpler than detecting whether the
+ // system is in a concurent-transaction state. Just keep a "score" value
+ // and some threshold. If at any time the locktree is eligible for the
+ // optimization, only do it if the score is at this threshold. When you
+ // actually do the optimization but someone has to migrate locks in the buffer
+ // (expensive), then reset the score back to zero. Each time a txn
+ // releases locks, the score is incremented by 1.
+ // - Analysis: If you let the threshold be "C", then at most 1 / C txns will
+ // do the optimization in a concurrent-transaction system. Similarly, it
+ // takes at most C txns to start using the single txnid optimzation, which
+ // is good when the system transitions from multithreaded to single threaded.
+ //
+ // STO_BUFFER_MAX_SIZE:
+ //
+ // We choose the max value to be 1 million since most transactions are smaller
+ // than 1 million and we can create a rangetree of 1 million elements in
+ // less than a second. So we can be pretty confident that this threshold
+ // enables the optimization almost always, and prevents super pathological
+ // latency issues for the first lock taken by a second thread.
+ //
+ // STO_SCORE_THRESHOLD:
+ //
+ // A simple first guess at a good value for the score threshold is 100.
+ // By our analysis, we'd end up doing the optimization in vain for
+ // around 1% of all transactions, which seems reasonable. Further,
+ // if the system goes single threaded, it ought to be pretty quick
+ // for 100 transactions to go by, so we won't have to wait long before
+ // we start doing the single txind optimzation again.
+ static const int STO_BUFFER_MAX_SIZE = 50 * 1024;
+ static const int STO_SCORE_THRESHOLD = 100;
+ int m_sto_score;
+
+ // statistics about time spent ending the STO early
+ uint64_t m_sto_end_early_count;
+ tokutime_t m_sto_end_early_time;
+
+ // effect: begins the single txnid optimizaiton, setting m_sto_txnid
+ // to the given txnid.
+ // requires: m_sto_txnid is invalid
+ void sto_begin(TXNID txnid);
+
+ // effect: append a range to the sto buffer
+ // requires: m_sto_txnid is valid
+ void sto_append(const DBT *left_key, const DBT *right_key);
+
+ // effect: ends the single txnid optimization, releaseing any memory
+ // stored in the sto buffer, notifying the tracker, and
+ // invalidating m_sto_txnid.
+ // requires: m_sto_txnid is valid
+ void sto_end(void);
+
+ // params: prepared_lkr is a void * to a prepared locked keyrange. see below.
+ // effect: ends the single txnid optimization early, migrating buffer locks
+ // into the rangetree, calling sto_end(), and then setting the
+ // sto_score back to zero.
+ // requires: m_sto_txnid is valid
+ void sto_end_early(void *prepared_lkr);
+ void sto_end_early_no_accounting(void *prepared_lkr);
+
+ // params: prepared_lkr is a void * to a prepared locked keyrange. we can't use
+ // the real type because the compiler won't allow us to forward declare
+ // concurrent_tree::locked_keyrange without including concurrent_tree.h,
+ // which we cannot do here because it is a template implementation.
+ // requires: the prepared locked keyrange is for the locktree's rangetree
+ // requires: m_sto_txnid is valid
+ // effect: migrates each lock in the single txnid buffer into the locktree's
+ // rangetree, notifying the memory tracker as necessary.
+ void sto_migrate_buffer_ranges_to_tree(void *prepared_lkr);
+
+ // effect: If m_sto_txnid is valid, then release the txnid's locks
+ // by ending the optimization.
+ // requires: If m_sto_txnid is valid, it is equal to the given txnid
+ // returns: True if locks were released for this txnid
+ bool sto_try_release(TXNID txnid);
+
+ // params: prepared_lkr is a void * to a prepared locked keyrange. see above.
+ // requires: the prepared locked keyrange is for the locktree's rangetree
+ // effect: If m_sto_txnid is valid and equal to the given txnid, then
+ // append a range onto the buffer. Otherwise, if m_sto_txnid is valid
+ // but not equal to this txnid, then migrate the buffer's locks
+ // into the rangetree and end the optimization, setting the score
+ // back to zero.
+ // returns: true if the lock was acquired for this txnid
+ bool sto_try_acquire(void *prepared_lkr, TXNID txnid,
+ const DBT *left_key, const DBT *right_key);
+
+ // Effect:
+ // Provides a hook for a helgrind suppression.
+ // Returns:
+ // true if m_sto_txnid is not TXNID_NONE
+ bool sto_txnid_is_valid_unsafe(void) const;
+
+ // Effect:
+ // Provides a hook for a helgrind suppression.
+ // Returns:
+ // m_sto_score
+ int sto_get_score_unsafe(void )const;
+
+ void remove_overlapping_locks_for_txnid(TXNID txnid,
+ const DBT *left_key, const DBT *right_key);
+
+ int acquire_lock_consolidated(void *prepared_lkr, TXNID txnid,
+ const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts);
+
+ int acquire_lock(bool is_write_request, TXNID txnid,
+ const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts);
+
+ int try_acquire_lock(bool is_write_request, TXNID txnid,
+ const DBT *left_key, const DBT *right_key,
+ txnid_set *conflicts, bool big_txn);
+
+
+ friend class locktree_unit_test;
+ friend class manager_unit_test;
+ friend class lock_request_unit_test;
+
+ // engine status reaches into the locktree to read some stats
+ friend void locktree_manager::get_status(LTM_STATUS status);
+ };
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/manager.cc b/storage/tokudb/PerconaFT/locktree/manager.cc
new file mode 100644
index 00000000..5662150d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/manager.cc
@@ -0,0 +1,513 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdlib.h>
+#include <string.h>
+#include <portability/toku_pthread.h>
+
+#include "locktree.h"
+#include "lock_request.h"
+
+#include <util/status.h>
+
+namespace toku {
+
+void locktree_manager::create(lt_create_cb create_cb, lt_destroy_cb destroy_cb, lt_escalate_cb escalate_cb, void *escalate_extra) {
+ m_max_lock_memory = DEFAULT_MAX_LOCK_MEMORY;
+ m_current_lock_memory = 0;
+
+ m_locktree_map.create();
+ m_lt_create_callback = create_cb;
+ m_lt_destroy_callback = destroy_cb;
+ m_lt_escalate_callback = escalate_cb;
+ m_lt_escalate_callback_extra = escalate_extra;
+ ZERO_STRUCT(m_mutex);
+ toku_mutex_init(*manager_mutex_key, &m_mutex, nullptr);
+
+ ZERO_STRUCT(m_lt_counters);
+
+ escalator_init();
+}
+
+void locktree_manager::destroy(void) {
+ escalator_destroy();
+ invariant(m_current_lock_memory == 0);
+ invariant(m_locktree_map.size() == 0);
+ m_locktree_map.destroy();
+ toku_mutex_destroy(&m_mutex);
+}
+
+void locktree_manager::mutex_lock(void) {
+ toku_mutex_lock(&m_mutex);
+}
+
+void locktree_manager::mutex_unlock(void) {
+ toku_mutex_unlock(&m_mutex);
+}
+
+size_t locktree_manager::get_max_lock_memory(void) {
+ return m_max_lock_memory;
+}
+
+int locktree_manager::set_max_lock_memory(size_t max_lock_memory) {
+ int r = 0;
+ mutex_lock();
+ if (max_lock_memory < m_current_lock_memory) {
+ r = EDOM;
+ } else {
+ m_max_lock_memory = max_lock_memory;
+ }
+ mutex_unlock();
+ return r;
+}
+
+int locktree_manager::find_by_dict_id(locktree *const &lt, const DICTIONARY_ID &dict_id) {
+ if (lt->get_dict_id().dictid < dict_id.dictid) {
+ return -1;
+ } else if (lt->get_dict_id().dictid == dict_id.dictid) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+locktree *locktree_manager::locktree_map_find(const DICTIONARY_ID &dict_id) {
+ locktree *lt;
+ int r = m_locktree_map.find_zero<DICTIONARY_ID, find_by_dict_id>(dict_id, &lt, nullptr);
+ return r == 0 ? lt : nullptr;
+}
+
+void locktree_manager::locktree_map_put(locktree *lt) {
+ int r = m_locktree_map.insert<DICTIONARY_ID, find_by_dict_id>(lt, lt->get_dict_id(), nullptr);
+ invariant_zero(r);
+}
+
+void locktree_manager::locktree_map_remove(locktree *lt) {
+ uint32_t idx;
+ locktree *found_lt;
+ int r = m_locktree_map.find_zero<DICTIONARY_ID, find_by_dict_id>(
+ lt->get_dict_id(), &found_lt, &idx);
+ invariant_zero(r);
+ invariant(found_lt == lt);
+ r = m_locktree_map.delete_at(idx);
+ invariant_zero(r);
+}
+
+locktree *locktree_manager::get_lt(DICTIONARY_ID dict_id,
+ const comparator &cmp, void *on_create_extra) {
+
+ // hold the mutex around searching and maybe
+ // inserting into the locktree map
+ mutex_lock();
+
+ locktree *lt = locktree_map_find(dict_id);
+ if (lt == nullptr) {
+ XCALLOC(lt);
+ lt->create(this, dict_id, cmp);
+
+ // new locktree created - call the on_create callback
+ // and put it in the locktree map
+ if (m_lt_create_callback) {
+ int r = m_lt_create_callback(lt, on_create_extra);
+ if (r != 0) {
+ lt->release_reference();
+ lt->destroy();
+ toku_free(lt);
+ lt = nullptr;
+ }
+ }
+ if (lt) {
+ locktree_map_put(lt);
+ }
+ } else {
+ reference_lt(lt);
+ }
+
+ mutex_unlock();
+
+ return lt;
+}
+
+void locktree_manager::reference_lt(locktree *lt) {
+ // increment using a sync fetch and add.
+ // the caller guarantees that the lt won't be
+ // destroyed while we increment the count here.
+ //
+ // the caller can do this by already having an lt
+ // reference or by holding the manager mutex.
+ //
+ // if the manager's mutex is held, it is ok for the
+ // reference count to transition from 0 to 1 (no race),
+ // since we're serialized with other opens and closes.
+ lt->add_reference();
+}
+
+void locktree_manager::release_lt(locktree *lt) {
+ bool do_destroy = false;
+ DICTIONARY_ID dict_id = lt->get_dict_id();
+
+ // Release a reference on the locktree. If the count transitions to zero,
+ // then we *may* need to do the cleanup.
+ //
+ // Grab the manager's mutex and look for a locktree with this locktree's
+ // dictionary id. Since dictionary id's never get reused, any locktree
+ // found must be the one we just released a reference on.
+ //
+ // At least two things could have happened since we got the mutex:
+ // - Another thread gets a locktree with the same dict_id, increments
+ // the reference count. In this case, we shouldn't destroy it.
+ // - Another thread gets a locktree with the same dict_id and then
+ // releases it quickly, transitioning the reference count from zero to
+ // one and back to zero. In this case, only one of us should destroy it.
+ // It doesn't matter which. We originally missed this case, see #5776.
+ //
+ // After 5776, the high level rule for release is described below.
+ //
+ // If a thread releases a locktree and notices the reference count transition
+ // to zero, then that thread must immediately:
+ // - assume the locktree object is invalid
+ // - grab the manager's mutex
+ // - search the locktree map for a locktree with the same dict_id and remove
+ // it, if it exists. the destroy may be deferred.
+ // - release the manager's mutex
+ //
+ // This way, if many threads transition the same locktree's reference count
+ // from 1 to zero and wait behind the manager's mutex, only one of them will
+ // do the actual destroy and the others will happily do nothing.
+ uint32_t refs = lt->release_reference();
+ if (refs == 0) {
+ mutex_lock();
+ // lt may not have already been destroyed, so look it up.
+ locktree *find_lt = locktree_map_find(dict_id);
+ if (find_lt != nullptr) {
+ // A locktree is still in the map with that dict_id, so it must be
+ // equal to lt. This is true because dictionary ids are never reused.
+ // If the reference count is zero, it's our responsibility to remove
+ // it and do the destroy. Otherwise, someone still wants it.
+ // If the locktree is still valid then check if it should be deleted.
+ if (find_lt == lt) {
+ if (lt->get_reference_count() == 0) {
+ locktree_map_remove(lt);
+ do_destroy = true;
+ }
+ m_lt_counters.add(lt->get_lock_request_info()->counters);
+ }
+ }
+ mutex_unlock();
+ }
+
+ // if necessary, do the destroy without holding the mutex
+ if (do_destroy) {
+ if (m_lt_destroy_callback) {
+ m_lt_destroy_callback(lt);
+ }
+ lt->destroy();
+ toku_free(lt);
+ }
+}
+
+void locktree_manager::run_escalation(void) {
+ struct escalation_fn {
+ static void run(void *extra) {
+ locktree_manager *mgr = (locktree_manager *) extra;
+ mgr->escalate_all_locktrees();
+ };
+ };
+ m_escalator.run(this, escalation_fn::run, this);
+}
+
+// test-only version of lock escalation
+void locktree_manager::run_escalation_for_test(void) {
+ run_escalation();
+}
+
+void locktree_manager::escalate_all_locktrees(void) {
+ uint64_t t0 = toku_current_time_microsec();
+
+ // get all locktrees
+ mutex_lock();
+ int num_locktrees = m_locktree_map.size();
+ locktree **locktrees = new locktree *[num_locktrees];
+ for (int i = 0; i < num_locktrees; i++) {
+ int r = m_locktree_map.fetch(i, &locktrees[i]);
+ invariant_zero(r);
+ reference_lt(locktrees[i]);
+ }
+ mutex_unlock();
+
+ // escalate them
+ escalate_locktrees(locktrees, num_locktrees);
+
+ delete [] locktrees;
+
+ uint64_t t1 = toku_current_time_microsec();
+ add_escalator_wait_time(t1 - t0);
+}
+
+void locktree_manager::note_mem_used(uint64_t mem_used) {
+ (void) toku_sync_fetch_and_add(&m_current_lock_memory, mem_used);
+}
+
+void locktree_manager::note_mem_released(uint64_t mem_released) {
+ uint64_t old_mem_used = toku_sync_fetch_and_sub(&m_current_lock_memory, mem_released);
+ invariant(old_mem_used >= mem_released);
+}
+
+bool locktree_manager::out_of_locks(void) const {
+ return m_current_lock_memory >= m_max_lock_memory;
+}
+
+bool locktree_manager::over_big_threshold(void) {
+ return m_current_lock_memory >= m_max_lock_memory / 2;
+}
+
+int locktree_manager::iterate_pending_lock_requests(lock_request_iterate_callback callback,
+ void *extra) {
+ mutex_lock();
+ int r = 0;
+ size_t num_locktrees = m_locktree_map.size();
+ for (size_t i = 0; i < num_locktrees && r == 0; i++) {
+ locktree *lt;
+ r = m_locktree_map.fetch(i, &lt);
+ invariant_zero(r);
+
+ struct lt_lock_request_info *info = lt->get_lock_request_info();
+ toku_mutex_lock(&info->mutex);
+
+ size_t num_requests = info->pending_lock_requests.size();
+ for (size_t k = 0; k < num_requests && r == 0; k++) {
+ lock_request *req;
+ r = info->pending_lock_requests.fetch(k, &req);
+ invariant_zero(r);
+ r = callback(lt->get_dict_id(), req->get_txnid(),
+ req->get_left_key(), req->get_right_key(),
+ req->get_conflicting_txnid(), req->get_start_time(), extra);
+ }
+
+ toku_mutex_unlock(&info->mutex);
+ }
+ mutex_unlock();
+ return r;
+}
+
+int locktree_manager::check_current_lock_constraints(bool big_txn) {
+ int r = 0;
+ if (big_txn && over_big_threshold()) {
+ run_escalation();
+ if (over_big_threshold()) {
+ r = TOKUDB_OUT_OF_LOCKS;
+ }
+ }
+ if (r == 0 && out_of_locks()) {
+ run_escalation();
+ if (out_of_locks()) {
+ // return an error if we're still out of locks after escalation.
+ r = TOKUDB_OUT_OF_LOCKS;
+ }
+ }
+ return r;
+}
+
+void locktree_manager::escalator_init(void) {
+ ZERO_STRUCT(m_escalation_mutex);
+ toku_mutex_init(
+ *manager_escalation_mutex_key, &m_escalation_mutex, nullptr);
+ m_escalation_count = 0;
+ m_escalation_time = 0;
+ m_wait_escalation_count = 0;
+ m_wait_escalation_time = 0;
+ m_long_wait_escalation_count = 0;
+ m_long_wait_escalation_time = 0;
+ m_escalation_latest_result = 0;
+ m_escalator.create();
+}
+
+void locktree_manager::escalator_destroy(void) {
+ m_escalator.destroy();
+ toku_mutex_destroy(&m_escalation_mutex);
+}
+
+void locktree_manager::add_escalator_wait_time(uint64_t t) {
+ toku_mutex_lock(&m_escalation_mutex);
+ m_wait_escalation_count += 1;
+ m_wait_escalation_time += t;
+ if (t >= 1000000) {
+ m_long_wait_escalation_count += 1;
+ m_long_wait_escalation_time += t;
+ }
+ toku_mutex_unlock(&m_escalation_mutex);
+}
+
+void locktree_manager::escalate_locktrees(locktree **locktrees, int num_locktrees) {
+ // there are too many row locks in the system and we need to tidy up.
+ //
+ // a simple implementation of escalation does not attempt
+ // to reduce the memory foot print of each txn's range buffer.
+ // doing so would require some layering hackery (or a callback)
+ // and more complicated locking. for now, just escalate each
+ // locktree individually, in-place.
+ tokutime_t t0 = toku_time_now();
+ for (int i = 0; i < num_locktrees; i++) {
+ locktrees[i]->escalate(m_lt_escalate_callback, m_lt_escalate_callback_extra);
+ release_lt(locktrees[i]);
+ }
+ tokutime_t t1 = toku_time_now();
+
+ toku_mutex_lock(&m_escalation_mutex);
+ m_escalation_count++;
+ m_escalation_time += (t1 - t0);
+ m_escalation_latest_result = m_current_lock_memory;
+ toku_mutex_unlock(&m_escalation_mutex);
+}
+
+struct escalate_args {
+ locktree_manager *mgr;
+ locktree **locktrees;
+ int num_locktrees;
+};
+
+void locktree_manager::locktree_escalator::create(void) {
+ ZERO_STRUCT(m_escalator_mutex);
+ toku_mutex_init(*manager_escalator_mutex_key, &m_escalator_mutex, nullptr);
+ toku_cond_init(*manager_m_escalator_done_key, &m_escalator_done, nullptr);
+ m_escalator_running = false;
+}
+
+void locktree_manager::locktree_escalator::destroy(void) {
+ toku_cond_destroy(&m_escalator_done);
+ toku_mutex_destroy(&m_escalator_mutex);
+}
+
+void locktree_manager::locktree_escalator::run(locktree_manager *mgr, void (*escalate_locktrees_fun)(void *extra), void *extra) {
+ uint64_t t0 = toku_current_time_microsec();
+ toku_mutex_lock(&m_escalator_mutex);
+ if (!m_escalator_running) {
+ // run escalation on this thread
+ m_escalator_running = true;
+ toku_mutex_unlock(&m_escalator_mutex);
+ escalate_locktrees_fun(extra);
+ toku_mutex_lock(&m_escalator_mutex);
+ m_escalator_running = false;
+ toku_cond_broadcast(&m_escalator_done);
+ } else {
+ toku_cond_wait(&m_escalator_done, &m_escalator_mutex);
+ }
+ toku_mutex_unlock(&m_escalator_mutex);
+ uint64_t t1 = toku_current_time_microsec();
+ mgr->add_escalator_wait_time(t1 - t0);
+}
+
+void locktree_manager::get_status(LTM_STATUS statp) {
+ ltm_status.init();
+ LTM_STATUS_VAL(LTM_SIZE_CURRENT) = m_current_lock_memory;
+ LTM_STATUS_VAL(LTM_SIZE_LIMIT) = m_max_lock_memory;
+ LTM_STATUS_VAL(LTM_ESCALATION_COUNT) = m_escalation_count;
+ LTM_STATUS_VAL(LTM_ESCALATION_TIME) = m_escalation_time;
+ LTM_STATUS_VAL(LTM_ESCALATION_LATEST_RESULT) = m_escalation_latest_result;
+ LTM_STATUS_VAL(LTM_WAIT_ESCALATION_COUNT) = m_wait_escalation_count;
+ LTM_STATUS_VAL(LTM_WAIT_ESCALATION_TIME) = m_wait_escalation_time;
+ LTM_STATUS_VAL(LTM_LONG_WAIT_ESCALATION_COUNT) = m_long_wait_escalation_count;
+ LTM_STATUS_VAL(LTM_LONG_WAIT_ESCALATION_TIME) = m_long_wait_escalation_time;
+
+ uint64_t lock_requests_pending = 0;
+ uint64_t sto_num_eligible = 0;
+ uint64_t sto_end_early_count = 0;
+ tokutime_t sto_end_early_time = 0;
+ size_t num_locktrees = 0;
+ struct lt_counters lt_counters = {};
+
+ if (toku_mutex_trylock(&m_mutex) == 0) {
+ lt_counters = m_lt_counters;
+ num_locktrees = m_locktree_map.size();
+ for (size_t i = 0; i < num_locktrees; i++) {
+ locktree *lt;
+ int r = m_locktree_map.fetch(i, &lt);
+ invariant_zero(r);
+ if (toku_mutex_trylock(&lt->m_lock_request_info.mutex) == 0) {
+ lock_requests_pending += lt->m_lock_request_info.pending_lock_requests.size();
+ lt_counters.add(lt->get_lock_request_info()->counters);
+ toku_mutex_unlock(&lt->m_lock_request_info.mutex);
+ }
+ sto_num_eligible += lt->sto_txnid_is_valid_unsafe() ? 1 : 0;
+ sto_end_early_count += lt->m_sto_end_early_count;
+ sto_end_early_time += lt->m_sto_end_early_time;
+ }
+ mutex_unlock();
+ }
+
+ LTM_STATUS_VAL(LTM_NUM_LOCKTREES) = num_locktrees;
+ LTM_STATUS_VAL(LTM_LOCK_REQUESTS_PENDING) = lock_requests_pending;
+ LTM_STATUS_VAL(LTM_STO_NUM_ELIGIBLE) = sto_num_eligible;
+ LTM_STATUS_VAL(LTM_STO_END_EARLY_COUNT) = sto_end_early_count;
+ LTM_STATUS_VAL(LTM_STO_END_EARLY_TIME) = sto_end_early_time;
+ LTM_STATUS_VAL(LTM_WAIT_COUNT) = lt_counters.wait_count;
+ LTM_STATUS_VAL(LTM_WAIT_TIME) = lt_counters.wait_time;
+ LTM_STATUS_VAL(LTM_LONG_WAIT_COUNT) = lt_counters.long_wait_count;
+ LTM_STATUS_VAL(LTM_LONG_WAIT_TIME) = lt_counters.long_wait_time;
+ LTM_STATUS_VAL(LTM_TIMEOUT_COUNT) = lt_counters.timeout_count;
+ *statp = ltm_status;
+}
+
+void locktree_manager::kill_waiter(void *extra) {
+ mutex_lock();
+ int r = 0;
+ size_t num_locktrees = m_locktree_map.size();
+ for (size_t i = 0; i < num_locktrees; i++) {
+ locktree *lt;
+ r = m_locktree_map.fetch(i, &lt);
+ invariant_zero(r);
+ lock_request::kill_waiter(lt, extra);
+ }
+ mutex_unlock();
+}
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/range_buffer.cc b/storage/tokudb/PerconaFT/locktree/range_buffer.cc
new file mode 100644
index 00000000..33ee481a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/range_buffer.cc
@@ -0,0 +1,259 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <string.h>
+
+#include "portability/memory.h"
+
+#include "locktree/range_buffer.h"
+#include "util/dbt.h"
+
+namespace toku {
+
+ bool range_buffer::record_header::left_is_infinite(void) const {
+ return left_neg_inf || left_pos_inf;
+ }
+
+ bool range_buffer::record_header::right_is_infinite(void) const {
+ return right_neg_inf || right_pos_inf;
+ }
+
+ void range_buffer::record_header::init(const DBT *left_key, const DBT *right_key) {
+ left_neg_inf = left_key == toku_dbt_negative_infinity();
+ left_pos_inf = left_key == toku_dbt_positive_infinity();
+ left_key_size = toku_dbt_is_infinite(left_key) ? 0 : left_key->size;
+ if (right_key) {
+ right_neg_inf = right_key == toku_dbt_negative_infinity();
+ right_pos_inf = right_key == toku_dbt_positive_infinity();
+ right_key_size = toku_dbt_is_infinite(right_key) ? 0 : right_key->size;
+ } else {
+ right_neg_inf = left_neg_inf;
+ right_pos_inf = left_pos_inf;
+ right_key_size = 0;
+ }
+ }
+
+ const DBT *range_buffer::iterator::record::get_left_key(void) const {
+ if (_header.left_neg_inf) {
+ return toku_dbt_negative_infinity();
+ } else if (_header.left_pos_inf) {
+ return toku_dbt_positive_infinity();
+ } else {
+ return &_left_key;
+ }
+ }
+
+ const DBT *range_buffer::iterator::record::get_right_key(void) const {
+ if (_header.right_neg_inf) {
+ return toku_dbt_negative_infinity();
+ } else if (_header.right_pos_inf) {
+ return toku_dbt_positive_infinity();
+ } else {
+ return &_right_key;
+ }
+ }
+
+ size_t range_buffer::iterator::record::size(void) const {
+ return sizeof(record_header) + _header.left_key_size + _header.right_key_size;
+ }
+
+ void range_buffer::iterator::record::deserialize(const char *buf) {
+ size_t current = 0;
+
+ // deserialize the header
+ memcpy(&_header, buf, sizeof(record_header));
+ current += sizeof(record_header);
+
+ // deserialize the left key if necessary
+ if (!_header.left_is_infinite()) {
+ // point the left DBT's buffer into ours
+ toku_fill_dbt(&_left_key, buf + current, _header.left_key_size);
+ current += _header.left_key_size;
+ }
+
+ // deserialize the right key if necessary
+ if (!_header.right_is_infinite()) {
+ if (_header.right_key_size == 0) {
+ toku_copyref_dbt(&_right_key, _left_key);
+ } else {
+ toku_fill_dbt(&_right_key, buf + current, _header.right_key_size);
+ }
+ }
+ }
+
+ toku::range_buffer::iterator::iterator() :
+ _ma_chunk_iterator(nullptr),
+ _current_chunk_base(nullptr),
+ _current_chunk_offset(0), _current_chunk_max(0),
+ _current_rec_size(0) {
+ }
+
+ toku::range_buffer::iterator::iterator(const range_buffer *buffer) :
+ _ma_chunk_iterator(&buffer->_arena),
+ _current_chunk_base(nullptr),
+ _current_chunk_offset(0), _current_chunk_max(0),
+ _current_rec_size(0) {
+ reset_current_chunk();
+ }
+
+ void range_buffer::iterator::reset_current_chunk() {
+ _current_chunk_base = _ma_chunk_iterator.current(&_current_chunk_max);
+ _current_chunk_offset = 0;
+ }
+
+ bool range_buffer::iterator::current(record *rec) {
+ if (_current_chunk_offset < _current_chunk_max) {
+ const char *buf = reinterpret_cast<const char *>(_current_chunk_base);
+ rec->deserialize(buf + _current_chunk_offset);
+ _current_rec_size = rec->size();
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ // move the iterator to the next record in the buffer
+ void range_buffer::iterator::next(void) {
+ invariant(_current_chunk_offset < _current_chunk_max);
+ invariant(_current_rec_size > 0);
+
+ // the next record is _current_rec_size bytes forward
+ _current_chunk_offset += _current_rec_size;
+ // now, we don't know how big the current is, set it to 0.
+ _current_rec_size = 0;
+
+ if (_current_chunk_offset >= _current_chunk_max) {
+ // current chunk is exhausted, try moving to the next one
+ if (_ma_chunk_iterator.more()) {
+ _ma_chunk_iterator.next();
+ reset_current_chunk();
+ }
+ }
+ }
+
+ void range_buffer::create(void) {
+ // allocate buffer space lazily instead of on creation. this way,
+ // no malloc/free is done if the transaction ends up taking no locks.
+ _arena.create(0);
+ _num_ranges = 0;
+ }
+
+ void range_buffer::append(const DBT *left_key, const DBT *right_key) {
+ // if the keys are equal, then only one copy is stored.
+ if (toku_dbt_equals(left_key, right_key)) {
+ invariant(left_key->size <= MAX_KEY_SIZE);
+ append_point(left_key);
+ } else {
+ invariant(left_key->size <= MAX_KEY_SIZE);
+ invariant(right_key->size <= MAX_KEY_SIZE);
+ append_range(left_key, right_key);
+ }
+ _num_ranges++;
+ }
+
+ bool range_buffer::is_empty(void) const {
+ return total_memory_size() == 0;
+ }
+
+ uint64_t range_buffer::total_memory_size(void) const {
+ return _arena.total_size_in_use();
+ }
+
+ int range_buffer::get_num_ranges(void) const {
+ return _num_ranges;
+ }
+
+ void range_buffer::destroy(void) {
+ _arena.destroy();
+ }
+
+ void range_buffer::append_range(const DBT *left_key, const DBT *right_key) {
+ size_t record_length = sizeof(record_header) + left_key->size + right_key->size;
+ char *buf = reinterpret_cast<char *>(_arena.malloc_from_arena(record_length));
+
+ record_header h;
+ h.init(left_key, right_key);
+
+ // serialize the header
+ memcpy(buf, &h, sizeof(record_header));
+ buf += sizeof(record_header);
+
+ // serialize the left key if necessary
+ if (!h.left_is_infinite()) {
+ memcpy(buf, left_key->data, left_key->size);
+ buf += left_key->size;
+ }
+
+ // serialize the right key if necessary
+ if (!h.right_is_infinite()) {
+ memcpy(buf, right_key->data, right_key->size);
+ }
+ }
+
+ void range_buffer::append_point(const DBT *key) {
+ size_t record_length = sizeof(record_header) + key->size;
+ char *buf = reinterpret_cast<char *>(_arena.malloc_from_arena(record_length));
+
+ record_header h;
+ h.init(key, nullptr);
+
+ // serialize the header
+ memcpy(buf, &h, sizeof(record_header));
+ buf += sizeof(record_header);
+
+ // serialize the key if necessary
+ if (!h.left_is_infinite()) {
+ memcpy(buf, key->data, key->size);
+ }
+ }
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/range_buffer.h b/storage/tokudb/PerconaFT/locktree/range_buffer.h
new file mode 100644
index 00000000..3b08e895
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/range_buffer.h
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "portability/toku_stdint.h"
+
+#include "util/dbt.h"
+#include "util/memarena.h"
+
+namespace toku {
+
+ // a key range buffer represents a set of key ranges that can
+ // be stored, iterated over, and then destroyed all at once.
+ class range_buffer {
+ private:
+
+ // the key range buffer is a bunch of records in a row.
+ // each record has the following header, followed by the
+ // left key and right key data payload, if applicable.
+ // we limit keys to be 2^16, since we store lengths as 2 bytes.
+ static const size_t MAX_KEY_SIZE = 1 << 16;
+
+ struct record_header {
+ bool left_neg_inf;
+ bool left_pos_inf;
+ bool right_pos_inf;
+ bool right_neg_inf;
+ uint16_t left_key_size;
+ uint16_t right_key_size;
+
+ bool left_is_infinite(void) const;
+
+ bool right_is_infinite(void) const;
+
+ void init(const DBT *left_key, const DBT *right_key);
+ };
+ static_assert(sizeof(record_header) == 8, "record header format is off");
+
+ public:
+
+ // the iterator abstracts reading over a buffer of variable length
+ // records one by one until there are no more left.
+ class iterator {
+ public:
+ iterator();
+ iterator(const range_buffer *buffer);
+
+ // a record represents the user-view of a serialized key range.
+ // it handles positive and negative infinity and the optimized
+ // point range case, where left and right points share memory.
+ class record {
+ public:
+ // get a read-only pointer to the left key of this record's range
+ const DBT *get_left_key(void) const;
+
+ // get a read-only pointer to the right key of this record's range
+ const DBT *get_right_key(void) const;
+
+ // how big is this record? this tells us where the next record is
+ size_t size(void) const;
+
+ // populate a record header and point our DBT's
+ // buffers into ours if they are not infinite.
+ void deserialize(const char *buf);
+
+ private:
+ record_header _header;
+ DBT _left_key;
+ DBT _right_key;
+ };
+
+ // populate the given record object with the current
+ // the memory referred to by record is valid for only
+ // as long as the record exists.
+ bool current(record *rec);
+
+ // move the iterator to the next record in the buffer
+ void next(void);
+
+ private:
+ void reset_current_chunk();
+
+ // the key range buffer we are iterating over, the current
+ // offset in that buffer, and the size of the current record.
+ memarena::chunk_iterator _ma_chunk_iterator;
+ const void *_current_chunk_base;
+ size_t _current_chunk_offset;
+ size_t _current_chunk_max;
+ size_t _current_rec_size;
+ };
+
+ // allocate buffer space lazily instead of on creation. this way,
+ // no malloc/free is done if the transaction ends up taking no locks.
+ void create(void);
+
+ // append a left/right key range to the buffer.
+ // if the keys are equal, then only one copy is stored.
+ void append(const DBT *left_key, const DBT *right_key);
+
+ // is this range buffer empty?
+ bool is_empty(void) const;
+
+ // how much memory is being used by this range buffer?
+ uint64_t total_memory_size(void) const;
+
+ // how many ranges are stored in this range buffer?
+ int get_num_ranges(void) const;
+
+ void destroy(void);
+
+ private:
+ memarena _arena;
+ int _num_ranges;
+
+ void append_range(const DBT *left_key, const DBT *right_key);
+
+ // append a point to the buffer. this is the space/time saving
+ // optimization for key ranges where left == right.
+ void append_point(const DBT *key);
+ };
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/locktree/tests/CMakeLists.txt
new file mode 100644
index 00000000..20ab682a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/CMakeLists.txt
@@ -0,0 +1,15 @@
+if(BUILD_TESTING)
+ function(add_locktree_test bin)
+ add_toku_test(locktree ${bin} ${ARGN})
+ endfunction(add_locktree_test)
+
+ file(GLOB srcs *.cc)
+ foreach(src ${srcs})
+ get_filename_component(base ${src} NAME_WE)
+
+ add_executable(${base} ${base}.cc)
+ add_space_separated_property(TARGET ${base} COMPILE_FLAGS -fvisibility=hidden)
+ target_link_libraries(${base} locktree ft ${LIBTOKUPORTABILITY})
+ add_locktree_test(${base})
+ endforeach(src)
+endif(BUILD_TESTING)
diff --git a/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_create_destroy.cc b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_create_destroy.cc
new file mode 100644
index 00000000..3221fc20
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_create_destroy.cc
@@ -0,0 +1,68 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "concurrent_tree_unit_test.h"
+
+namespace toku {
+
+static comparator cmp;
+
+// test that creating a concurrent tree puts it in a valid, empty state.
+// the root node should be properly marked and have the correct comparator.
+void concurrent_tree_unit_test::test_create_destroy(void) {
+ concurrent_tree tree;
+ tree.create(&cmp);
+
+ invariant(tree.m_root.is_root());
+ invariant(tree.m_root.is_empty());
+ invariant(tree.m_root.m_cmp == &cmp);
+ invariant_null(tree.m_root.m_left_child.ptr);
+ invariant_null(tree.m_root.m_right_child.ptr);
+
+ invariant(tree.is_empty());
+
+ tree.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::concurrent_tree_unit_test test;
+ test.test_create_destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_acquire_release.cc b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_acquire_release.cc
new file mode 100644
index 00000000..4a1936b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_acquire_release.cc
@@ -0,0 +1,120 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "concurrent_tree_unit_test.h"
+
+namespace toku {
+
+void concurrent_tree_unit_test::test_lkr_acquire_release(void) {
+ comparator cmp;
+ cmp.create(compare_dbts, nullptr);
+
+ // we'll test a tree that has values 0..20
+ const uint64_t min = 0;
+ const uint64_t max = 20;
+
+ // acquire/release should work regardless of how the
+ // data was inserted into the tree, so we test it
+ // on a tree whose elements were populated starting
+ // at each value 0..20 (so we get different rotation
+ // behavior for each starting value in the tree).
+ for (uint64_t start = min; start <= max; start++) {
+ concurrent_tree tree;
+ tree.create(&cmp);
+ populate_tree(&tree, start, min, max);
+ invariant(!tree.is_empty());
+
+ for (uint64_t i = 0; i <= max; i++) {
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(&tree);
+ invariant(lkr.m_tree == &tree);
+ invariant(lkr.m_subtree->is_root());
+
+ keyrange range;
+ range.create(get_dbt(i), get_dbt(i));
+ lkr.acquire(range);
+ // the tree is not empty so the subtree root should not be empty
+ invariant(!lkr.m_subtree->is_empty());
+
+ // if the subtree root does not overlap then one of its children
+ // must exist and have an overlapping range.
+ if (!lkr.m_subtree->m_range.overlaps(cmp, range)) {
+ treenode *left = lkr.m_subtree->m_left_child.ptr;
+ treenode *right = lkr.m_subtree->m_right_child.ptr;
+ if (left != nullptr) {
+ // left exists, so if it does not overlap then the right must
+ if (!left->m_range.overlaps(cmp, range)) {
+ invariant_notnull(right);
+ invariant(right->m_range.overlaps(cmp, range));
+ }
+ } else {
+ // no left child, so the right must exist and be overlapping
+ invariant_notnull(right);
+ invariant(right->m_range.overlaps(cmp, range));
+ }
+ }
+
+ lkr.release();
+ }
+
+ // remove everything one by one and then destroy
+ keyrange range;
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(&tree);
+ invariant(lkr.m_subtree->is_root());
+ range.create(get_dbt(min), get_dbt(max));
+ lkr.acquire(range);
+ invariant(lkr.m_subtree->is_root());
+ for (uint64_t i = 0; i <= max; i++) {
+ range.create(get_dbt(i), get_dbt(i));
+ lkr.remove(range);
+ }
+ lkr.release();
+ tree.destroy();
+ }
+
+ cmp.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::concurrent_tree_unit_test test;
+ test.test_lkr_acquire_release();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_remove.cc b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_remove.cc
new file mode 100644
index 00000000..ccca0427
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_remove.cc
@@ -0,0 +1,158 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "concurrent_tree_unit_test.h"
+
+namespace toku {
+
+// "random" (derived from the digits of PI) but deterministic keys
+const uint64_t keys[] = {
+ 141, 592, 653, 589, 793, 238, 462, 643, 383, 327, 950, 288, 419,
+ 716, 939, 937, 510, 582, 97, 494, 459, 230, 781, 640, 628, 620, 899,
+ 862, 803, 482, 534, 211, 706, 798, 214, 808, 651, 328, 239, 664, 709,
+ 384, 460, 955, 58, 223, 172, 535, 940, 812, 848,
+};
+const uint64_t num_keys = sizeof(keys) / sizeof(keys[0]);
+
+static const DBT *get_ith_key_from_set(uint64_t i) {
+ return get_dbt(keys[i]);
+}
+
+static void verify_unique_keys(void) {
+ for (uint64_t i = 0; i < num_keys; i++) {
+ for (uint64_t j = 0; j < num_keys; j++) {
+ if (i != j) {
+ invariant(keys[i] != keys[j]);
+ }
+ }
+ }
+}
+
+static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr,
+ const comparator &cmp, const keyrange &range, bool range_should_exist) {
+
+ struct check_fn_obj {
+ const comparator *cmp;
+ uint64_t count;
+ keyrange target_range;
+ bool target_range_found;
+
+ bool fn(const keyrange &query_range, TXNID txnid) {
+ (void) txnid;
+ if (query_range.compare(*cmp, target_range) == keyrange::comparison::EQUALS) {
+ invariant(!target_range_found);
+ target_range_found = true;
+ }
+ count++;
+ return true;
+ }
+ } check_fn;
+ check_fn.cmp = &cmp;
+ check_fn.count = 0;
+ check_fn.target_range = range;
+ check_fn.target_range_found = false;
+
+ lkr->iterate<check_fn_obj>(&check_fn);
+
+ if (range_should_exist) {
+ invariant(check_fn.target_range_found);
+ } else {
+ invariant(!check_fn.target_range_found);
+ }
+ return check_fn.count;
+}
+
+// test that insert/remove work properly together, confirming
+// whether keys exist using iterate()
+void concurrent_tree_unit_test::test_lkr_insert_remove(void) {
+ verify_unique_keys();
+ comparator cmp;
+ cmp.create(compare_dbts, nullptr);
+
+ concurrent_tree tree;
+ tree.create(&cmp);
+
+ // prepare and acquire the infinte range
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(&tree);
+ lkr.acquire(keyrange::get_infinite_range());
+
+ // populate the tree with all the keys
+ uint64_t n;
+ const uint64_t cap = 15;
+ for (uint64_t i = 0; i < num_keys; i++) {
+ keyrange range;
+ range.create(get_ith_key_from_set(i), get_ith_key_from_set(i));
+ // insert an element. it should exist and the
+ // count should be correct.
+ lkr.insert(range, i);
+ n = check_for_range_and_count(&lkr, cmp, range, true);
+ if (i >= cap) {
+ invariant(n == cap + 1);
+ // remove an element previously inserted. it should
+ // no longer exist and the count should be correct.
+ range.create(get_ith_key_from_set(i - cap), get_ith_key_from_set(i - cap));
+ lkr.remove(range);
+ n = check_for_range_and_count(&lkr, cmp, range, false);
+ invariant(n == cap);
+ } else {
+ invariant(n == i + 1);
+ }
+ }
+
+ // clean up the rest of the keys
+ for (uint64_t i = 0; i < cap; i++) {
+ keyrange range;
+ range.create(get_ith_key_from_set(num_keys - i - 1), get_ith_key_from_set(num_keys - i - 1));
+ lkr.remove(range);
+ n = check_for_range_and_count(&lkr, cmp, range, false);
+ invariant(n == (cap - i - 1));
+ }
+
+ lkr.release();
+ tree.destroy();
+ cmp.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::concurrent_tree_unit_test test;
+ test.test_lkr_insert_remove();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc
new file mode 100644
index 00000000..1140260d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc
@@ -0,0 +1,95 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_pthread.h>
+
+#include "concurrent_tree_unit_test.h"
+
+namespace toku {
+
+// This is intended to be a black-box test for the concurrent_tree's
+// ability to rebalance in the face of many serial insertions.
+// If the code survives many inserts, it is considered successful.
+void concurrent_tree_unit_test::test_lkr_insert_serial_large(void) {
+ comparator cmp;
+ cmp.create(compare_dbts, nullptr);
+
+ concurrent_tree tree;
+ tree.create(&cmp);
+
+ // prepare and acquire the infinte range
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(&tree);
+ lkr.acquire(keyrange::get_infinite_range());
+
+ // 128k keys should be fairly stressful.
+ // a bad tree will flatten and die way earlier than 128k inserts.
+ // a good tree will rebalance and reach height logn(128k) ~= 17,
+ // survival the onslaught of inserts.
+ const uint64_t num_keys = 128 * 1024;
+
+ // populate the tree with all the keys
+ for (uint64_t i = 0; i < num_keys; i++) {
+ DBT k;
+ toku_fill_dbt(&k, &i, sizeof(i));
+ keyrange range;
+ range.create(&k, &k);
+ lkr.insert(range, i);
+ }
+
+ // remove all of the keys
+ for (uint64_t i = 0; i < num_keys; i++) {
+ DBT k;
+ toku_fill_dbt(&k, &i, sizeof(i));
+ keyrange range;
+ range.create(&k, &k);
+ lkr.remove(range);
+ }
+
+ lkr.release();
+ tree.destroy();
+ cmp.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::concurrent_tree_unit_test test;
+ test.test_lkr_insert_serial_large();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_remove_all.cc b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_remove_all.cc
new file mode 100644
index 00000000..19f57c89
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_lkr_remove_all.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "concurrent_tree_unit_test.h"
+
+namespace toku {
+
+// remove_all on a locked keyrange should properly remove everything
+// from the tree and account correctly for the amount of memory released.
+void concurrent_tree_unit_test::test_lkr_remove_all(void) {
+ comparator cmp;
+ cmp.create(compare_dbts, nullptr);
+
+ // we'll test a tree that has values 0..20
+ const uint64_t min = 0;
+ const uint64_t max = 20;
+
+ // remove_all should work regardless of how the
+ // data was inserted into the tree, so we test it
+ // on a tree whose elements were populated starting
+ // at each value 0..20 (so we get different rotation
+ // behavior for each starting value in the tree).
+ for (uint64_t start = min; start <= max; start++) {
+ concurrent_tree tree;
+ concurrent_tree::locked_keyrange lkr;
+
+ tree.create(&cmp);
+ populate_tree(&tree, start, min, max);
+ invariant(!tree.is_empty());
+
+ lkr.prepare(&tree);
+ invariant(lkr.m_subtree->is_root());
+ invariant(!lkr.m_subtree->is_empty());
+
+ // remove_all() from the locked keyrange and assert that
+ // the number of elements and memory removed is correct.
+ lkr.remove_all();
+
+ invariant(lkr.m_subtree->is_empty());
+ invariant(tree.is_empty());
+ invariant_null(tree.m_root.m_right_child.ptr);
+ invariant_null(tree.m_root.m_left_child.ptr);
+
+ lkr.release();
+ tree.destroy();
+ }
+
+ cmp.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::concurrent_tree_unit_test test;
+ test.test_lkr_remove_all();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_unit_test.h b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_unit_test.h
new file mode 100644
index 00000000..d0810610
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/concurrent_tree_unit_test.h
@@ -0,0 +1,95 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "test.h"
+
+#include <concurrent_tree.h>
+
+namespace toku {
+
+class concurrent_tree_unit_test {
+public:
+ // creating a concurrent tree should initialize it to a valid,
+ // empty state. the root node should be properly marked, have
+ // no children, and the correct comparator.
+ void test_create_destroy(void);
+
+ // acquiring a locked keyrange should lock and "root" itself at
+ // the proper subtree node. releasing it should unlock that node.
+ void test_lkr_acquire_release(void);
+
+ // remove_all on a locked keyrange should properly remove everything
+ // from the tree and account correctly for the amount of memory released.
+ void test_lkr_remove_all(void);
+
+ // test that insert/remove work properly together, confirming
+ // whether keys exist using iterate()
+ void test_lkr_insert_remove(void);
+
+ // test that the concurrent tree can survive many serial inserts
+ // this is a blackbox test for tree rotations.
+ void test_lkr_insert_serial_large(void);
+
+private:
+
+ // populate the given concurrent tree with elements from min..max but
+ // starting with a certain element. this allows the caller to modestly
+ // control the way the tree is built/rotated, for test variability.
+ static void populate_tree(concurrent_tree *tree, uint64_t start, uint64_t min, uint64_t max) {
+ concurrent_tree::locked_keyrange lkr;
+ lkr.prepare(tree);
+ lkr.acquire(keyrange::get_infinite_range());
+
+ for (uint64_t i = start; i <= max; i++) {
+ keyrange range;
+ range.create(get_dbt(i), get_dbt(i));
+ lkr.insert(range, i);
+ }
+ for (uint64_t i = min; i < start; i++) {
+ keyrange range;
+ range.create(get_dbt(i), get_dbt(i));
+ lkr.insert(range, i);
+ }
+
+ lkr.release();
+ }
+};
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/tests/kill_waiter.cc b/storage/tokudb/PerconaFT/locktree/tests/kill_waiter.cc
new file mode 100644
index 00000000..8d93c0bb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/kill_waiter.cc
@@ -0,0 +1,100 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+
+// test the lock manager kill waiter function
+
+#include "locktree.h"
+#include "lock_request.h"
+#include "test.h"
+#include "locktree_unit_test.h"
+#include <thread>
+#include <atomic>
+
+namespace toku {
+
+const uint64_t my_lock_wait_time = 1000 * 1000;
+const uint64_t my_killed_time = 500 * 1000;
+const int n_locks = 4;
+
+static int my_killed_callback(void) {
+ if (1) fprintf(stderr, "%s:%u %s\n", __FILE__, __LINE__, __FUNCTION__);
+ return 0;
+}
+
+static void locktree_release_lock(locktree *lt, TXNID txn_id, const DBT *left, const DBT *right) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(left, right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+}
+
+static void wait_lock(lock_request *lr, std::atomic_int *done) {
+ int r = lr->wait(my_lock_wait_time, my_killed_time, my_killed_callback);
+ assert(r == DB_LOCK_NOTGRANTED);
+ *done = 1;
+}
+
+static void test_kill_waiter(void) {
+ int r;
+
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ DICTIONARY_ID dict_id = { 1 };
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ const DBT *one = get_dbt(1);
+
+ lock_request locks[n_locks];
+ std::thread waiters[n_locks-1];
+ for (int i = 0; i < n_locks; i++) {
+ locks[i].create();
+ locks[i].set(lt, i+1, one, one, lock_request::type::WRITE, false, &waiters[i]);
+ }
+
+ // txn 'n_locks' grabs the lock
+ r = locks[n_locks-1].start();
+ assert_zero(r);
+
+ for (int i = 0; i < n_locks-1; i++) {
+ r = locks[i].start();
+ assert(r == DB_LOCK_NOTGRANTED);
+ }
+
+ std::atomic_int done[n_locks-1];
+ for (int i = 0; i < n_locks-1; i++) {
+ done[i] = 0;
+ waiters[i] = std::thread(wait_lock, &locks[i], &done[i]);
+ }
+
+ for (int i = 0; i < n_locks-1; i++) {
+ assert(!done[i]);
+ }
+
+ sleep(1);
+ for (int i = 0; i < n_locks-1; i++) {
+ mgr.kill_waiter(&waiters[i]);
+ while (!done[i]) sleep(1);
+ waiters[i].join();
+ for (int j = i+1; j < n_locks-1; j++)
+ assert(!done[j]);
+ }
+
+ locktree_release_lock(lt, n_locks, one, one);
+
+ for (int i = 0; i < n_locks; i++) {
+ locks[i].destroy();
+ }
+
+ mgr.release_lt(lt);
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::test_kill_waiter();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_create_set.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_create_set.cc
new file mode 100644
index 00000000..8ae685b9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_create_set.cc
@@ -0,0 +1,72 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+// create and set the object's internals, destroy should not crash.
+void lock_request_unit_test::test_create_destroy(void) {
+ lock_request request;
+ request.create();
+
+ invariant(request.m_txnid == TXNID_NONE);
+ invariant(request.m_left_key == nullptr);
+ invariant(request.m_right_key == nullptr);
+ invariant(request.m_left_key_copy.flags == 0);
+ invariant(request.m_left_key_copy.data == nullptr);
+ invariant(request.m_right_key_copy.flags == 0);
+ invariant(request.m_right_key_copy.data == nullptr);
+
+ invariant(request.m_type == lock_request::type::UNKNOWN);
+ invariant(request.m_lt == nullptr);
+
+ invariant(request.m_complete_r == 0);
+ invariant(request.m_state == lock_request::state::UNINITIALIZED);
+
+ request.destroy();
+}
+
+}
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_create_destroy();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_get_set_keys.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_get_set_keys.cc
new file mode 100644
index 00000000..fd57b70f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_get_set_keys.cc
@@ -0,0 +1,88 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+// make setting keys and getting them back works properly.
+// at a high level, we want to make sure keys are copied
+// when appropriate and plays nice with +/- infinity.
+void lock_request_unit_test::test_get_set_keys(void) {
+ lock_request request;
+ request.create();
+
+ locktree *const null_lt = nullptr;
+
+ TXNID txnid_a = 1001;
+
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+ const DBT *neg_inf = toku_dbt_negative_infinity();
+ const DBT *pos_inf = toku_dbt_negative_infinity();
+
+ // request should not copy dbts for neg/pos inf, so get_left
+ // and get_right should return the same pointer given
+ request.set(null_lt, txnid_a, neg_inf, pos_inf, lock_request::type::WRITE, false);
+ invariant(request.get_left_key() == neg_inf);
+ invariant(request.get_right_key() == pos_inf);
+
+ // request should make copies of non-infinity-valued keys.
+ request.set(null_lt, txnid_a, neg_inf, one, lock_request::type::WRITE, false);
+ invariant(request.get_left_key() == neg_inf);
+ invariant(request.get_right_key() == one);
+
+ request.set(null_lt, txnid_a, two, pos_inf, lock_request::type::WRITE, false);
+ invariant(request.get_left_key() == two);
+ invariant(request.get_right_key() == pos_inf);
+
+ request.set(null_lt, txnid_a, one, two, lock_request::type::WRITE, false);
+ invariant(request.get_left_key() == one);
+ invariant(request.get_right_key() == two);
+
+ request.destroy();
+}
+
+}
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_get_set_keys();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_killed.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_killed.cc
new file mode 100644
index 00000000..ec464444
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_killed.cc
@@ -0,0 +1,124 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the kill callback. the lock wait is killed 1/2 of the way through the wait.
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+const uint64_t my_lock_wait_time = 10 * 1000; // 10 seconds
+const uint64_t my_killed_time = 1 * 1000;
+
+static int killed_calls = 0;
+static uint64_t t_last_kill;
+static uint64_t t_do_kill;
+
+static int my_killed_callback(void) {
+ uint64_t t_now = toku_current_time_microsec();
+ if (t_now == t_last_kill)
+ return 0;
+ assert(t_now >= t_last_kill);
+ t_last_kill = t_now;
+ killed_calls++;
+ if (t_now >= t_do_kill)
+ return 1;
+ else
+ return 0;
+}
+
+// make sure deadlocks are detected when a lock request starts
+void lock_request_unit_test::test_wait_time_callback(void) {
+ int r;
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ TXNID txnid_a = 1001;
+ lock_request request_a;
+ request_a.create();
+
+ TXNID txnid_b = 2001;
+ lock_request request_b;
+ request_b.create();
+
+ const DBT *one = get_dbt(1);
+
+ // a locks 'one'
+ request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
+ r = request_a.start();
+ assert_zero(r);
+
+ // b tries to lock 'one'
+ request_b.set(&lt, txnid_b, one, one, lock_request::type::WRITE, false);
+ r = request_b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+
+ uint64_t t_start = toku_current_time_microsec();
+ t_last_kill = t_start;
+ t_do_kill = t_start + my_lock_wait_time * 1000 / 2;
+ r = request_b.wait(my_lock_wait_time, my_killed_time, my_killed_callback);
+ assert(r == DB_LOCK_NOTGRANTED);
+
+ uint64_t t_end = toku_current_time_microsec();
+ assert(t_end > t_start);
+ uint64_t t_delta = t_end - t_start;
+ // fprintf(stderr, "delta=%" PRIu64 "\n", t_delta);
+ assert(t_delta >= my_lock_wait_time / 2);
+
+ // fprintf(stderr, "killed_calls=%d\n", killed_calls);
+ assert(killed_calls > 0);
+
+ request_b.destroy();
+
+ release_lock_and_retry_requests(&lt, txnid_a, one, one);
+ request_a.destroy();
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_wait_time_callback();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_not_killed.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_not_killed.cc
new file mode 100644
index 00000000..647b4d3c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_not_killed.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the kill callback. the kill callback never kills the lock wait in this test.
+// the test verifies that the kill callback is called close to its requested frequency.
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+const uint64_t my_lock_wait_time = 10 * 1000; // 10 seconds
+const uint64_t my_killed_time = 1 * 1000;
+
+static int killed_calls = 0;
+static uint64_t t_last_kill;
+
+static int my_killed_callback(void) {
+ uint64_t t_now = toku_current_time_microsec();
+ assert(t_now >= t_last_kill);
+ t_last_kill = t_now;
+ killed_calls++;
+ return 0;
+}
+
+// make sure deadlocks are detected when a lock request starts
+void lock_request_unit_test::test_wait_time_callback(void) {
+ int r;
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ TXNID txnid_a = 1001;
+ lock_request request_a;
+ request_a.create();
+
+ TXNID txnid_b = 2001;
+ lock_request request_b;
+ request_b.create();
+
+ const DBT *one = get_dbt(1);
+
+ // a locks 'one'
+ request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
+ r = request_a.start();
+ assert_zero(r);
+
+ // b tries to lock 'one'
+ request_b.set(&lt, txnid_b, one, one, lock_request::type::WRITE, false);
+ r = request_b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+
+ uint64_t t_start = toku_current_time_microsec();
+ t_last_kill = t_start;
+ r = request_b.wait(my_lock_wait_time, my_killed_time, my_killed_callback);
+ assert(r == DB_LOCK_NOTGRANTED);
+
+ uint64_t t_end = toku_current_time_microsec();
+ assert(t_end > t_start);
+ uint64_t t_delta = t_end - t_start;
+ // fprintf(stderr, "delta=%" PRIu64 "\n", t_delta);
+ assert(t_delta >= my_lock_wait_time);
+
+ // fprintf(stderr, "killed_calls=%d\n", killed_calls);
+ assert(killed_calls > 0);
+
+ request_b.destroy();
+
+ release_lock_and_retry_requests(&lt, txnid_a, one, one);
+ request_a.destroy();
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_wait_time_callback();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_deadlock.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_deadlock.cc
new file mode 100644
index 00000000..343becfc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_deadlock.cc
@@ -0,0 +1,120 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+// make sure deadlocks are detected when a lock request starts
+void lock_request_unit_test::test_start_deadlock(void) {
+ int r;
+ locktree lt;
+
+ // something short
+ const uint64_t lock_wait_time = 10;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+ TXNID txnid_c = 3001;
+ lock_request request_a;
+ lock_request request_b;
+ lock_request request_c;
+ request_a.create();
+ request_b.create();
+ request_c.create();
+
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+
+ // start and succeed 1,1 for A and 2,2 for B.
+ request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
+ r = request_a.start();
+ invariant_zero(r);
+ request_b.set(&lt, txnid_b, two, two, lock_request::type::WRITE, false);
+ r = request_b.start();
+ invariant_zero(r);
+
+ // txnid A should not be granted a lock on 2,2, so it goes pending.
+ request_a.set(&lt, txnid_a, two, two, lock_request::type::WRITE, false);
+ r = request_a.start();
+ invariant(r == DB_LOCK_NOTGRANTED);
+
+ // if txnid B wants a lock on 1,1 it should deadlock with A
+ request_b.set(&lt, txnid_b, one, one, lock_request::type::WRITE, false);
+ r = request_b.start();
+ invariant(r == DB_LOCK_DEADLOCK);
+
+ // txnid C should not deadlock on either of these - it should just time out.
+ request_c.set(&lt, txnid_c, one, one, lock_request::type::WRITE, false);
+ r = request_c.start();
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = request_c.wait(lock_wait_time);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ request_c.set(&lt, txnid_c, two, two, lock_request::type::WRITE, false);
+ r = request_c.start();
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = request_c.wait(lock_wait_time);
+ invariant(r == DB_LOCK_NOTGRANTED);
+
+ // release locks for A and B, then wait on A's request which should succeed
+ // since B just unlocked and should have completed A's pending request.
+ release_lock_and_retry_requests(&lt, txnid_a, one, one);
+ release_lock_and_retry_requests(&lt, txnid_b, two, two);
+ r = request_a.wait(lock_wait_time);
+ invariant_zero(r);
+ release_lock_and_retry_requests(&lt, txnid_a, two, two);
+
+ request_a.destroy();
+ request_b.destroy();
+ request_c.destroy();
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_start_deadlock();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_pending.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_pending.cc
new file mode 100644
index 00000000..ce651035
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_pending.cc
@@ -0,0 +1,106 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+// starting a lock request without immediate success should get
+// stored in the lock request set as pending.
+void lock_request_unit_test::test_start_pending(void) {
+ int r;
+ locktree lt;
+ lock_request request;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+
+ const DBT *zero = get_dbt(0);
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+
+ // take a range lock using txnid b
+ r = lt.acquire_write_lock(txnid_b, zero, two, nullptr, false);
+ invariant_zero(r);
+
+ lt_lock_request_info *info = lt.get_lock_request_info();
+
+ // start a lock request for 1,1
+ // it should fail. the request should be stored and in the pending state.
+ request.create();
+ request.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
+ r = request.start();
+ invariant(r == DB_LOCK_NOTGRANTED);
+ invariant(info->pending_lock_requests.size() == 1);
+ invariant(request.m_state == lock_request::state::PENDING);
+
+ // should have made copies of the keys, and they should be equal
+ invariant(request.m_left_key_copy.flags == DB_DBT_MALLOC);
+ invariant(request.m_right_key_copy.flags == DB_DBT_MALLOC);
+ invariant(compare_dbts(nullptr, &request.m_left_key_copy, one) == 0);
+ invariant(compare_dbts(nullptr, &request.m_right_key_copy, one) == 0);
+
+ // release the range lock for txnid b
+ locktree_unit_test::locktree_test_release_lock(&lt, txnid_b, zero, two);
+
+ // now retry the lock requests.
+ // it should transition the request to successfully complete.
+ lock_request::retry_all_lock_requests(&lt);
+ invariant(info->pending_lock_requests.size() == 0);
+ invariant(request.m_state == lock_request::state::COMPLETE);
+ invariant(request.m_complete_r == 0);
+
+ locktree_unit_test::locktree_test_release_lock(&lt, txnid_a, one, one);
+
+ request.destroy();
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_start_pending();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_release_wait.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_release_wait.cc
new file mode 100644
index 00000000..717628f9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_release_wait.cc
@@ -0,0 +1,91 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+
+// test the race between start, release, and wait. since start does not put
+// its lock request into the pending set, the blocking txn could release its
+// lock before the first txn waits. this will block the first txn because its
+// lock request is not known when the lock is released. the bug fix is to try
+// again when lock retries are locked out.
+
+#include "lock_request.h"
+#include <atomic>
+#include <thread>
+#include "locktree.h"
+#include "locktree_unit_test.h"
+#include "test.h"
+
+namespace toku {
+
+ const uint64_t my_lock_wait_time = 1000 * 1000; // ms
+ const uint64_t my_killed_time = 1 * 1000; // ms
+
+ static uint64_t t_wait;
+
+ static int my_killed_callback(void) {
+ uint64_t t_now = toku_current_time_microsec();
+ assert(t_now >= t_wait);
+ if (t_now - t_wait >= my_killed_time * 1000)
+ abort();
+ return 0;
+ }
+
+ static void locktree_release_lock(locktree *lt,
+ TXNID txn_id,
+ const DBT *left,
+ const DBT *right) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(left, right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+ }
+
+ static void test_start_release_wait(void) {
+ int r;
+
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ DICTIONARY_ID dict_id = {1};
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ const DBT *one = get_dbt(1);
+
+ // a locks one
+ lock_request a;
+ a.create();
+ a.set(lt, 1, one, one, lock_request::type::WRITE, false);
+ r = a.start();
+ assert(r == 0);
+
+ // b tries to lock one, fails
+ lock_request b;
+ b.create();
+ b.set(lt, 2, one, one, lock_request::type::WRITE, false);
+ r = b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+
+ // a releases its lock
+ locktree_release_lock(lt, 1, one, one);
+
+ // b waits for one, gets locks immediately
+ t_wait = toku_current_time_microsec();
+ r = b.wait(my_lock_wait_time, my_killed_time, my_killed_callback);
+ assert(r == 0);
+
+ // b releases its lock so we can exit cleanly
+ locktree_release_lock(lt, 2, one, one);
+
+ a.destroy();
+ b.destroy();
+
+ mgr.release_lt(lt);
+ mgr.destroy();
+ }
+
+} /* namespace toku */
+
+int main(void) {
+ toku::test_start_release_wait();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc
new file mode 100644
index 00000000..83436a65
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request.h"
+#include <iostream>
+#include <thread>
+#include "locktree.h"
+#include "test.h"
+
+// Test FT-633, the data race on the lock request between ::start and ::retry
+// This test is non-deterministic. It uses sleeps at 2 critical places to
+// expose the data race on the lock requests state.
+
+namespace toku {
+
+ static void locker_callback(void) { usleep(10000); }
+
+ static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
+ int i;
+ for (i = 0; i < 1000; i++) {
+ lock_request request;
+ request.create();
+
+ request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
+
+ // set the test callbacks
+ request.set_start_test_callback(locker_callback);
+ request.set_retry_test_callback(locker_callback);
+
+ // try to acquire the lock
+ int r = request.start();
+ if (r == DB_LOCK_NOTGRANTED) {
+ // wait for the lock to be granted
+ r = request.wait(10 * 1000);
+ }
+
+ if (r == 0) {
+ // release the lock
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(key, key);
+ lt->release_locks(txnid, &buffer);
+ buffer.destroy();
+
+ // retry pending lock requests
+ lock_request::retry_all_lock_requests(lt);
+ }
+
+ request.destroy();
+ request.clearmem(0xab);
+
+ toku_pthread_yield();
+ if ((i % 10) == 0)
+ std::cerr << std::this_thread::get_id() << " " << i
+ << std::endl;
+ }
+ }
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree lt;
+ DICTIONARY_ID dict_id = {1};
+ lt.create(nullptr, dict_id, toku::dbt_comparator);
+
+ const DBT *one = toku::get_dbt(1);
+
+ const int n_workers = 2;
+ std::thread worker[n_workers];
+ for (int i = 0; i < n_workers; i++) {
+ worker[i] = std::thread(toku::run_locker, &lt, i, one);
+ }
+ for (int i = 0; i < n_workers; i++) {
+ worker[i].join();
+ }
+
+ lt.release_reference();
+ lt.destroy();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc
new file mode 100644
index 00000000..6748ae30
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request.h"
+#include <pthread.h>
+#include <iostream>
+#include <thread>
+#include "locktree.h"
+#include "test.h"
+
+// Suppose that 3 threads are running a lock acquire, release, retry sequence.
+// There is a race in the retry algorithm with 2 threads running lock retry
+// simultaneously. The first thread to run retry sets a flag that will cause
+// the second thread to skip the lock retries. If the first thread progressed
+// past the contended lock, then the second threa will HANG until its lock timer
+// pops, even when the contended lock is no longer held.
+
+// This test exposes this problem as a test hang. The group retry algorithm
+// fixes the race in the lock request retry algorihm and this test should no
+// longer hang.
+
+namespace toku {
+
+ // use 1000 when after_retry_all is implemented, otherwise use 100000
+ static const int n_tests = 1000; // 100000;
+
+ static void after_retry_all(void) { usleep(10000); }
+
+ static void run_locker(locktree *lt,
+ TXNID txnid,
+ const DBT *key,
+ pthread_barrier_t *b) {
+ for (int i = 0; i < n_tests; i++) {
+ int r;
+ r = pthread_barrier_wait(b);
+ assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD);
+
+ lock_request request;
+ request.create();
+
+ request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
+
+ // try to acquire the lock
+ r = request.start();
+ if (r == DB_LOCK_NOTGRANTED) {
+ // wait for the lock to be granted
+ r = request.wait(1000 * 1000);
+ }
+
+ if (r == 0) {
+ // release the lock
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(key, key);
+ lt->release_locks(txnid, &buffer);
+ buffer.destroy();
+
+ // retry pending lock requests
+ lock_request::retry_all_lock_requests(lt, nullptr, after_retry_all);
+ }
+
+ request.destroy();
+ request.clearmem(0xab);
+
+ toku_pthread_yield();
+ if ((i % 10) == 0)
+ std::cerr << std::this_thread::get_id() << " " << i
+ << std::endl;
+ }
+ }
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree lt;
+ DICTIONARY_ID dict_id = {1};
+ lt.create(nullptr, dict_id, toku::dbt_comparator);
+
+ const DBT *one = toku::get_dbt(1);
+
+ const int n_workers = 3;
+ std::thread worker[n_workers];
+ pthread_barrier_t b;
+ int r = pthread_barrier_init(&b, nullptr, n_workers);
+ assert(r == 0);
+ for (int i = 0; i < n_workers; i++) {
+ worker[i] = std::thread(toku::run_locker, &lt, i, one, &b);
+ }
+ for (int i = 0; i < n_workers; i++) {
+ worker[i].join();
+ }
+ r = pthread_barrier_destroy(&b);
+ assert(r == 0);
+ lt.release_reference();
+ lt.destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc
new file mode 100644
index 00000000..cd3dc7b3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request.h"
+#include <pthread.h>
+#include <iostream>
+#include <thread>
+#include "locktree.h"
+#include "test.h"
+
+// Suppose that 2 threads are running a lock acquire, release, retry sequence.
+// There is a race between the acquire and the release with 2 threads.
+// If thread 1 acquires a lock, and thread 2 tries to acquire the same lock and
+// fails, thread 1 may release its lock and retry pending lock requests BEFORE
+// thread 2 adds itself to the pending lock requests. If this happens, then
+// thread 2 will HANG until its lock timer expires even when the lock it is
+// waiting for is FREE.
+
+// This test exposes this problem as a test hang. If the race is fixed, then
+// the test runs to completion.
+
+namespace toku {
+
+ static void start_before_pending(void) { usleep(10000); }
+
+ static void run_locker(locktree *lt,
+ TXNID txnid,
+ const DBT *key,
+ pthread_barrier_t *b) {
+ for (int i = 0; i < 100000; i++) {
+ int r;
+ r = pthread_barrier_wait(b);
+ assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD);
+
+ lock_request request;
+ request.create();
+ request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
+
+ // if the callback is included, then the race is easy to reproduce.
+ // Otherwise, several test runs may be required before the race
+ // happens.
+ request.set_start_before_pending_test_callback(
+ start_before_pending);
+
+ // try to acquire the lock
+ r = request.start();
+ if (r == DB_LOCK_NOTGRANTED) {
+ // wait for the lock to be granted
+ r = request.wait(1000 * 1000);
+ }
+
+ if (r == 0) {
+ // release the lock
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(key, key);
+ lt->release_locks(txnid, &buffer);
+ buffer.destroy();
+
+ // retry pending lock requests
+ lock_request::retry_all_lock_requests(lt);
+ }
+
+ request.destroy();
+ request.clearmem(0xab);
+
+ toku_pthread_yield();
+ if ((i % 10) == 0)
+ std::cerr << std::this_thread::get_id() << " " << i
+ << std::endl;
+ }
+ }
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree lt;
+ DICTIONARY_ID dict_id = {1};
+ lt.create(nullptr, dict_id, toku::dbt_comparator);
+
+ const DBT *one = toku::get_dbt(1);
+
+ const int n_workers = 2;
+ std::thread worker[n_workers];
+ pthread_barrier_t b;
+ int r = pthread_barrier_init(&b, nullptr, n_workers);
+ assert(r == 0);
+ for (int i = 0; i < n_workers; i++) {
+ worker[i] = std::thread(toku::run_locker, &lt, i, one, &b);
+ }
+ for (int i = 0; i < n_workers; i++) {
+ worker[i].join();
+ }
+ r = pthread_barrier_destroy(&b);
+ assert(r == 0);
+ lt.release_reference();
+ lt.destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_unit_test.h b/storage/tokudb/PerconaFT/locktree/tests/lock_request_unit_test.h
new file mode 100644
index 00000000..81e6db25
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_unit_test.h
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "test.h"
+#include "locktree_unit_test.h"
+
+#include "lock_request.h"
+
+namespace toku {
+
+class lock_request_unit_test {
+public:
+ // create and set the object's internals, destroy should not crash.
+ void test_create_destroy(void);
+
+ // make setting keys and getting them back works properly.
+ // at a high level, we want to make sure keys are copied
+ // when appropriate and plays nice with +/- infinity.
+ void test_get_set_keys(void);
+
+ // starting a lock request without immediate success should get
+ // stored in the lock request set as pending.
+ void test_start_pending(void);
+
+ // make sure deadlocks are detected when a lock request starts
+ void test_start_deadlock(void);
+
+ // test that the get_wait_time callback works
+ void test_wait_time_callback(void);
+
+private:
+ // releases a single range lock and retries all lock requests.
+ // this is kind of like what the ydb layer does, except that
+ // the ydb layer releases all of a txn's locks at once using
+ // lt->release_locks(), not individually using lt->remove_overlapping_locks_for_txnid).
+ void release_lock_and_retry_requests(locktree *lt,
+ TXNID txnid, const DBT *left_key, const DBT * right_key) {
+ locktree_unit_test::locktree_test_release_lock(lt, txnid, left_key, right_key);
+ lock_request::retry_all_lock_requests(lt);
+ }
+};
+
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_wait_time_callback.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_wait_time_callback.cc
new file mode 100644
index 00000000..1647cee1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_wait_time_callback.cc
@@ -0,0 +1,96 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+static const uint64_t my_lock_wait_time = 10 * 1000; // 10 sec
+
+// make sure deadlocks are detected when a lock request starts
+void lock_request_unit_test::test_wait_time_callback(void) {
+ int r;
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ TXNID txnid_a = 1001;
+ lock_request request_a;
+ request_a.create();
+
+ TXNID txnid_b = 2001;
+ lock_request request_b;
+ request_b.create();
+
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+
+ // a locks 'one'
+ request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
+ r = request_a.start();
+ assert_zero(r);
+
+ // b tries to lock 'one'
+ request_b.set(&lt, txnid_b, one, two, lock_request::type::WRITE, false);
+ r = request_b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+ uint64_t t_start = toku_current_time_microsec();
+ r = request_b.wait(my_lock_wait_time);
+ uint64_t t_end = toku_current_time_microsec();
+ assert(r == DB_LOCK_NOTGRANTED);
+ assert(t_end > t_start);
+ uint64_t t_delta = t_end - t_start;
+ assert(t_delta >= my_lock_wait_time);
+ request_b.destroy();
+
+ release_lock_and_retry_requests(&lt, txnid_a, one, one);
+ request_a.destroy();
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::lock_request_unit_test test;
+ test.test_wait_time_callback();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_conflicts.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_conflicts.cc
new file mode 100644
index 00000000..5edef673
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_conflicts.cc
@@ -0,0 +1,126 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+#include <toku_time.h>
+
+__attribute__((__unused__))
+static long current_time_usec(void) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ return t.tv_usec + t.tv_sec * 1000000;
+}
+
+namespace toku {
+
+// test write lock conflicts when read or write locks exist
+// test read lock conflicts when write locks exist
+void locktree_unit_test::test_conflicts(void) {
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ int r;
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+ const DBT *zero = get_dbt(0);
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+ const DBT *three = get_dbt(3);
+ const DBT *four = get_dbt(4);
+ const DBT *five = get_dbt(5);
+
+ for (int test_run = 0; test_run < 2; test_run++) {
+ // test_run == 0 means test with read lock
+ // test_run == 1 means test with write lock
+#define ACQUIRE_LOCK(txn, left, right, conflicts) \
+ test_run == 0 ? lt.acquire_read_lock(txn, left, right, conflicts, false) \
+ : lt.acquire_write_lock(txn, left, right, conflicts, false)
+
+ // acquire some locks for txnid_a
+ r = ACQUIRE_LOCK(txnid_a, one, one, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_a, three, four, nullptr);
+ invariant(r == 0);
+
+#undef ACQUIRE_LOCK
+
+ for (int sub_test_run = 0; sub_test_run < 2; sub_test_run++) {
+ // sub_test_run == 0 means test read lock on top of write lock
+ // sub_test_run == 1 means test write lock on top of write lock
+ // if test_run == 0, then read locks exist. only test write locks.
+#define ACQUIRE_LOCK(txn, left, right, conflicts) \
+ sub_test_run == 0 && test_run == 1 ? \
+ lt.acquire_read_lock(txn, left, right, conflicts, false) \
+ : lt.acquire_write_lock(txn, left, right, conflicts, false)
+ // try to get point write locks for txnid_b, should fail
+ r = ACQUIRE_LOCK(txnid_b, one, one, nullptr);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = ACQUIRE_LOCK(txnid_b, three, three, nullptr);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = ACQUIRE_LOCK(txnid_b, four, four, nullptr);
+ invariant(r == DB_LOCK_NOTGRANTED);
+
+ // try to get some overlapping range write locks for txnid_b, should fail
+ r = ACQUIRE_LOCK(txnid_b, zero, two, nullptr);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = ACQUIRE_LOCK(txnid_b, four, five, nullptr);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = ACQUIRE_LOCK(txnid_b, two, three, nullptr);
+ invariant(r == DB_LOCK_NOTGRANTED);
+#undef ACQUIRE_LOCK
+ }
+
+ lt.remove_overlapping_locks_for_txnid(txnid_a, one, one);
+ lt.remove_overlapping_locks_for_txnid(txnid_a, three, four);
+ invariant(no_row_locks(&lt));
+ }
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_conflicts();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_create_destroy.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_create_destroy.cc
new file mode 100644
index 00000000..9b2bb7f0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_create_destroy.cc
@@ -0,0 +1,73 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+namespace toku {
+
+// test simple create and destroy of the locktree
+void locktree_unit_test::test_create_destroy(void) {
+ locktree lt;
+ DICTIONARY_ID dict_id = { 1 };
+
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ lt_lock_request_info *info = lt.get_lock_request_info();
+ invariant_notnull(info);
+ toku_mutex_lock(&info->mutex);
+ toku_mutex_unlock(&info->mutex);
+
+ invariant(lt.m_dict_id.dictid == dict_id.dictid);
+ invariant(lt.m_reference_count == 1);
+ invariant(lt.m_rangetree != nullptr);
+ invariant(lt.m_userdata == nullptr);
+ invariant(info->pending_lock_requests.size() == 0);
+ invariant(lt.m_sto_end_early_count == 0);
+ invariant(lt.m_sto_end_early_time == 0);
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_create_destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_1big7lt_1small.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_1big7lt_1small.cc
new file mode 100644
index 00000000..32029b5b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_1big7lt_1small.cc
@@ -0,0 +1,234 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include "locktree.h"
+#include "test.h"
+
+// ensure that small transactions do not get stalled by a big transaction that has lots of locks
+// ./locktree_escalation_big7_small1 --stalls 100 --max_lock_memory 1000000000 --verbose
+
+using namespace toku;
+
+static int verbose = 0;
+static int killed = 0;
+static pthread_t big_id, small_id;
+
+static void locktree_release_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k) {
+ range_buffer buffer;
+ buffer.create();
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ buffer.append(&left, &right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+}
+
+// grab a write range lock on int64 keys bounded by left_k and right_k
+static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k, bool big_txn) {
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
+}
+
+static void run_big_txn(locktree_manager *mgr UU(), locktree **lt, int n_lt, TXNID txn_id) {
+ int64_t last_i = -1;
+ for (int64_t i = 0; !killed; i++) {
+ for (int j = 0; j < n_lt; j++) {
+ uint64_t t_start = toku_current_time_microsec();
+ int r = locktree_write_lock(lt[j], txn_id, i, i, true);
+ assert(r == 0);
+ last_i = i;
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_duration = t_end - t_start;
+ if (t_duration > 100000) {
+ printf("%u %s %" PRId64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_duration);
+ }
+ }
+ toku_pthread_yield();
+ }
+ if (last_i != -1)
+ for (int j = 0; j < n_lt; j++)
+ locktree_release_lock(lt[j], txn_id, 0, last_i); // release the range 0 .. last_i
+ if (verbose)
+ printf("%u %s %" PRId64 "\n", toku_os_gettid(), __FUNCTION__, last_i);
+}
+
+struct big_arg {
+ locktree_manager *mgr;
+ locktree **lt;
+ int n_lt;
+ TXNID txn_id;
+};
+
+static void *big_f(void *_arg) {
+ struct big_arg *arg = (struct big_arg *) _arg;
+ assert(pthread_equal(pthread_self(), big_id));
+ printf("%u %s\n", toku_os_gettid(), __FUNCTION__);
+ run_big_txn(arg->mgr, arg->lt, arg->n_lt, arg->txn_id);
+ return arg;
+}
+
+static void run_small_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t k) {
+ int64_t i;
+ for (i = 0; !killed; i++) {
+ uint64_t t_start = toku_current_time_microsec();
+ int r = locktree_write_lock(lt, txn_id, k, k, false);
+ assert(r == 0);
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_duration = t_end - t_start;
+ if (t_duration > 100000) {
+ printf("%u %s %" PRId64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_duration);
+ }
+ locktree_release_lock(lt, txn_id, k, k);
+ toku_pthread_yield();
+ }
+ if (verbose)
+ printf("%u %s %" PRId64 "\n", toku_os_gettid(), __FUNCTION__, i);
+}
+
+struct small_arg {
+ locktree_manager *mgr;
+ locktree *lt;
+ TXNID txn_id;
+ int64_t k;
+};
+
+static void *small_f(void *_arg) {
+ struct small_arg *arg = (struct small_arg *) _arg;
+ printf("%u %s\n", toku_os_gettid(), __FUNCTION__);
+ run_small_txn(arg->mgr, arg->lt, arg->txn_id, arg->k);
+ return arg;
+}
+
+static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buffer, void *extra) {
+ assert(pthread_equal(pthread_self(), big_id));
+ if (verbose)
+ printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
+}
+
+static uint64_t get_escalation_count(locktree_manager &mgr) {
+ LTM_STATUS_S ltm_status_test;
+ mgr.get_status(&ltm_status_test);
+
+ TOKU_ENGINE_STATUS_ROW key_status = NULL;
+ // lookup keyname in status
+ for (int i = 0; ; i++) {
+ TOKU_ENGINE_STATUS_ROW status = &ltm_status_test.status[i];
+ if (status->keyname == NULL)
+ break;
+ if (strcmp(status->keyname, "LTM_ESCALATION_COUNT") == 0) {
+ key_status = status;
+ break;
+ }
+ }
+ assert(key_status);
+ return key_status->value.num;
+}
+
+int main(int argc, const char *argv[]) {
+ uint64_t stalls = 1;
+ int n_big = 7;
+ uint64_t max_lock_memory = 1000000;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "--stalls") == 0 && i+1 < argc) {
+ stalls = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--max_lock_memory") == 0 && i+1 < argc) {
+ max_lock_memory = atoll(argv[++i]);
+ continue;
+ }
+ }
+
+ int r;
+
+ // create a manager
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, e_callback, nullptr);
+ mgr.set_max_lock_memory(max_lock_memory);
+
+ // create lock trees
+ uint64_t next_dict_id = 1;
+ DICTIONARY_ID dict_id;
+ locktree *big_lt[n_big];
+ for (int i = 0; i < n_big; i++) {
+ dict_id = { next_dict_id }; next_dict_id++;
+ big_lt[i] = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+ }
+
+ dict_id = { next_dict_id }; next_dict_id++;
+ locktree *small_lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ // create the worker threads
+ struct big_arg big_arg = {&mgr, big_lt, n_big, 1000};
+ r = toku_pthread_create(
+ toku_uninstrumented, &big_id, nullptr, big_f, &big_arg);
+ assert(r == 0);
+
+ struct small_arg small_arg = {&mgr, small_lt, 2000, 0};
+ r = toku_pthread_create(
+ toku_uninstrumented, &small_id, nullptr, small_f, &small_arg);
+ assert(r == 0);
+
+ // wait for some escalations to occur
+ while (get_escalation_count(mgr) < stalls) {
+ sleep(1);
+ }
+ killed = 1;
+
+ // cleanup
+ void *ret;
+ r = toku_pthread_join(big_id, &ret);
+ assert(r == 0);
+
+ r = toku_pthread_join(small_id, &ret);
+ assert(r == 0);
+
+ for (int i = 0; i < n_big; i++)
+ mgr.release_lt(big_lt[i]);
+ mgr.release_lt(small_lt);
+ mgr.destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_1lt.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_1lt.cc
new file mode 100644
index 00000000..ff59a7bd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_1lt.cc
@@ -0,0 +1,197 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include "locktree.h"
+#include "test.h"
+
+// One client locks 1,2,3...
+// The other client locks -1,-2,-3...
+// Eventually lock escalation runs.
+
+using namespace toku;
+
+static int verbose = 0;
+static int killed = 0;
+
+static void locktree_release_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k) {
+ range_buffer buffer;
+ buffer.create();
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ buffer.append(&left, &right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+}
+
+// grab a write range lock on int64 keys bounded by left_k and right_k
+static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k, bool big_txn) {
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
+}
+
+static void run_big_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t start_i) {
+ fprintf(stderr, "%u run_big_txn %p %" PRIu64 " %" PRId64 "\n", toku_os_gettid(), lt, txn_id, start_i);
+ int64_t last_i = -1;
+ for (int64_t i = start_i; !killed; i++) {
+ if (0)
+ printf("%u %" PRId64 "\n", toku_os_gettid(), i);
+ uint64_t t_start = toku_current_time_microsec();
+ int r = locktree_write_lock(lt, txn_id, i, i, true);
+ if (r != 0)
+ break;
+ last_i = i;
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_duration = t_end - t_start;
+ if (t_duration > 100000) {
+ printf("%u %s %" PRId64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_duration);
+ }
+ toku_pthread_yield();
+ }
+ if (last_i != -1)
+ locktree_release_lock(lt, txn_id, start_i, last_i); // release the range start_i .. last_i
+}
+
+struct arg {
+ locktree_manager *mgr;
+ locktree *lt;
+ TXNID txn_id;
+ int64_t start_i;
+};
+
+static void *big_f(void *_arg) {
+ struct arg *arg = (struct arg *) _arg;
+ run_big_txn(arg->mgr, arg->lt, arg->txn_id, arg->start_i);
+ return arg;
+}
+
+static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buffer, void *extra) {
+ if (verbose)
+ printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
+}
+
+static uint64_t get_escalation_count(locktree_manager &mgr) {
+ LTM_STATUS_S ltm_status_test;
+ mgr.get_status(&ltm_status_test);
+
+ TOKU_ENGINE_STATUS_ROW key_status = NULL;
+ // lookup keyname in status
+ for (int i = 0; ; i++) {
+ TOKU_ENGINE_STATUS_ROW status = &ltm_status_test.status[i];
+ if (status->keyname == NULL)
+ break;
+ if (strcmp(status->keyname, "LTM_ESCALATION_COUNT") == 0) {
+ key_status = status;
+ break;
+ }
+ }
+ assert(key_status);
+ return key_status->value.num;
+}
+
+int main(int argc, const char *argv[]) {
+ const int n_big = 2;
+ int n_lt = 1;
+ uint64_t stalls = 1;
+ uint64_t max_lock_memory = 1000000;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "--stalls") == 0 && i+1 < argc) {
+ stalls = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--n_lt") == 0 && i+1 < argc) {
+ n_lt = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--max_lock_memory") == 0 && i+1 < argc) {
+ max_lock_memory = atoll(argv[++i]);
+ continue;
+ }
+ }
+
+ int r;
+
+ // create a manager
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, e_callback, nullptr);
+ mgr.set_max_lock_memory(max_lock_memory);
+
+ // create lock trees
+ locktree *lt[n_big];
+ for (int i = 0; i < n_lt; i++) {
+ DICTIONARY_ID dict_id = { .dictid = (uint64_t) i };
+ lt[i] = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+ assert(lt[i]);
+ }
+
+ // create the worker threads
+ struct arg big_arg[n_big];
+ pthread_t big_ids[n_big];
+ for (int i = 0; i < n_big; i++) {
+ big_arg[i] = {
+ &mgr, lt[i % n_lt], (TXNID)(1000 + i), i == 0 ? 1 : -1000000000};
+ r = toku_pthread_create(
+ toku_uninstrumented, &big_ids[i], nullptr, big_f, &big_arg[i]);
+ assert(r == 0);
+ }
+
+ // wait for some escalations to occur
+ while (get_escalation_count(mgr) < stalls) {
+ sleep(1);
+ }
+ killed = 1;
+
+ // cleanup
+ for (int i = 0; i < n_big; i++) {
+ void *ret;
+ r = toku_pthread_join(big_ids[i], &ret);
+ assert(r == 0);
+ }
+ for (int i = 0; i < n_lt ; i++) {
+ mgr.release_lt(lt[i]);
+ }
+ mgr.destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_2lt.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_2lt.cc
new file mode 100644
index 00000000..be1ddaba
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_2big_2lt.cc
@@ -0,0 +1,197 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include "locktree.h"
+#include "test.h"
+
+// One client locks 1,2,3...
+// The other client locks -1,-2,-3...
+// Eventually lock escalation runs.
+
+using namespace toku;
+
+static int verbose = 0;
+static int killed = 0;
+
+static void locktree_release_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k) {
+ range_buffer buffer;
+ buffer.create();
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ buffer.append(&left, &right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+}
+
+// grab a write range lock on int64 keys bounded by left_k and right_k
+static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k, bool big_txn) {
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
+}
+
+static void run_big_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t start_i) {
+ fprintf(stderr, "%u run_big_txn %p %" PRIu64 " %" PRId64 "\n", toku_os_gettid(), lt, txn_id, start_i);
+ int64_t last_i = -1;
+ for (int64_t i = start_i; !killed; i++) {
+ if (0)
+ printf("%u %" PRId64 "\n", toku_os_gettid(), i);
+ uint64_t t_start = toku_current_time_microsec();
+ int r = locktree_write_lock(lt, txn_id, i, i, true);
+ if (r != 0)
+ break;
+ last_i = i;
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_duration = t_end - t_start;
+ if (t_duration > 100000) {
+ printf("%u %s %" PRId64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_duration);
+ }
+ toku_pthread_yield();
+ }
+ if (last_i != -1)
+ locktree_release_lock(lt, txn_id, start_i, last_i); // release the range start_i .. last_i
+}
+
+struct arg {
+ locktree_manager *mgr;
+ locktree *lt;
+ TXNID txn_id;
+ int64_t start_i;
+};
+
+static void *big_f(void *_arg) {
+ struct arg *arg = (struct arg *) _arg;
+ run_big_txn(arg->mgr, arg->lt, arg->txn_id, arg->start_i);
+ return arg;
+}
+
+static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buffer, void *extra) {
+ if (verbose)
+ printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
+}
+
+static uint64_t get_escalation_count(locktree_manager &mgr) {
+ LTM_STATUS_S ltm_status_test;
+ mgr.get_status(&ltm_status_test);
+
+ TOKU_ENGINE_STATUS_ROW key_status = NULL;
+ // lookup keyname in status
+ for (int i = 0; ; i++) {
+ TOKU_ENGINE_STATUS_ROW status = &ltm_status_test.status[i];
+ if (status->keyname == NULL)
+ break;
+ if (strcmp(status->keyname, "LTM_ESCALATION_COUNT") == 0) {
+ key_status = status;
+ break;
+ }
+ }
+ assert(key_status);
+ return key_status->value.num;
+}
+
+int main(int argc, const char *argv[]) {
+ const int n_big = 2;
+ int n_lt = 2;
+ uint64_t stalls = 1;
+ uint64_t max_lock_memory = 1000000;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "--stalls") == 0 && i+1 < argc) {
+ stalls = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--n_lt") == 0 && i+1 < argc) {
+ n_lt = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--max_lock_memory") == 0 && i+1 < argc) {
+ max_lock_memory = atoll(argv[++i]);
+ continue;
+ }
+ }
+
+ int r;
+
+ // create a manager
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, e_callback, nullptr);
+ mgr.set_max_lock_memory(max_lock_memory);
+
+ // create lock trees
+ locktree *lt[n_big];
+ for (int i = 0; i < n_lt; i++) {
+ DICTIONARY_ID dict_id = { .dictid = (uint64_t)i };
+ lt[i] = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+ assert(lt[i]);
+ }
+
+ // create the worker threads
+ struct arg big_arg[n_big];
+ pthread_t big_ids[n_big];
+ for (int i = 0; i < n_big; i++) {
+ big_arg[i] = {
+ &mgr, lt[i % n_lt], (TXNID)(1000 + i), i == 0 ? 1 : -1000000000};
+ r = toku_pthread_create(
+ toku_uninstrumented, &big_ids[i], nullptr, big_f, &big_arg[i]);
+ assert(r == 0);
+ }
+
+ // wait for some escalations to occur
+ while (get_escalation_count(mgr) < stalls) {
+ sleep(1);
+ }
+ killed = 1;
+
+ // cleanup
+ for (int i = 0; i < n_big; i++) {
+ void *ret;
+ r = toku_pthread_join(big_ids[i], &ret);
+ assert(r == 0);
+ }
+ for (int i = 0; i < n_lt ; i++) {
+ mgr.release_lt(lt[i]);
+ }
+ mgr.destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_impossible.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_impossible.cc
new file mode 100644
index 00000000..cf610ed0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_impossible.cc
@@ -0,0 +1,150 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include "locktree.h"
+#include "test.h"
+
+// Two big txn's grab alternating locks in a single lock tree.
+// Eventually lock escalation runs.
+// Since the locks can not be consolidated, the out of locks error should be returned.
+
+using namespace toku;
+
+static int verbose = 0;
+
+static inline void locktree_release_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k) {
+ range_buffer buffer;
+ buffer.create();
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ buffer.append(&left, &right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+}
+
+// grab a write range lock on int64 keys bounded by left_k and right_k
+static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k, bool big_txn) {
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
+}
+
+static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buffer, void *extra) {
+ if (verbose)
+ printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
+}
+
+static uint64_t get_escalation_count(locktree_manager &mgr) {
+ LTM_STATUS_S ltm_status_test;
+ mgr.get_status(&ltm_status_test);
+
+ TOKU_ENGINE_STATUS_ROW key_status = NULL;
+ // lookup keyname in status
+ for (int i = 0; ; i++) {
+ TOKU_ENGINE_STATUS_ROW status = &ltm_status_test.status[i];
+ if (status->keyname == NULL)
+ break;
+ if (strcmp(status->keyname, "LTM_ESCALATION_COUNT") == 0) {
+ key_status = status;
+ break;
+ }
+ }
+ assert(key_status);
+ return key_status->value.num;
+}
+
+int main(int argc, const char *argv[]) {
+ uint64_t max_lock_memory = 1000000;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "--max_lock_memory") == 0 && i+1 < argc) {
+ max_lock_memory = atoll(argv[++i]);
+ continue;
+ }
+ }
+
+ int r;
+
+ // create a manager
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, e_callback, nullptr);
+ mgr.set_max_lock_memory(max_lock_memory);
+
+ const TXNID txn_a = 10;
+ const TXNID txn_b = 100;
+
+ // create lock trees
+ DICTIONARY_ID dict_id = { .dictid = 1 };
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ int64_t last_i = -1;
+ for (int64_t i = 0; ; i++) {
+ if (verbose)
+ printf("%" PRId64 "\n", i);
+ int64_t k = 2*i;
+ r = locktree_write_lock(lt, txn_a, k, k, true);
+ if (r != 0) {
+ assert(r == TOKUDB_OUT_OF_LOCKS);
+ break;
+ }
+ last_i = i;
+ r = locktree_write_lock(lt, txn_b, k+1, k+1, true);
+ if (r != 0) {
+ assert(r == TOKUDB_OUT_OF_LOCKS);
+ break;
+ }
+ }
+
+ // wait for an escalation to occur
+ assert(get_escalation_count(mgr) > 0);
+
+ if (last_i != -1) {
+ locktree_release_lock(lt, txn_a, 0, 2*last_i);
+ locktree_release_lock(lt, txn_b, 0, 2*last_i+1);
+ }
+
+ mgr.release_lt(lt);
+ mgr.destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_stalls.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_stalls.cc
new file mode 100644
index 00000000..9dc9596a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_escalation_stalls.cc
@@ -0,0 +1,226 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test verifies that small txn's do not get stalled for a long time by lock escalation.
+// Two lock trees are used by the test: a big lock tree and a small lock tree.
+// One big txn grabs lots of write locks on the big lock tree.
+// Several small txn's grab a single write lock on the small lock tree.
+// None of the locks conflict.
+// Eventually, the locks for the big txn consume all of the lock tree memory, so lock escalation runs.
+// The test measures the lock acquisition time and makes sure that the small txn's are not blocked for
+
+// locktree_escalation_stalls -v --stalls 10
+// verify that only big txn's get tagged with > 1 second stalls
+
+#include <stdio.h>
+#include "locktree.h"
+#include "test.h"
+
+using namespace toku;
+
+static int verbose = 0;
+static int killed = 0;
+
+static void locktree_release_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k) {
+ range_buffer buffer;
+ buffer.create();
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ buffer.append(&left, &right);
+ lt->release_locks(txn_id, &buffer);
+ buffer.destroy();
+}
+
+// grab a write range lock on int64 keys bounded by left_k and right_k
+static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64_t right_k, bool big_txn) {
+ DBT left; toku_fill_dbt(&left, &left_k, sizeof left_k);
+ DBT right; toku_fill_dbt(&right, &right_k, sizeof right_k);
+ return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
+}
+
+static void run_big_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id) {
+ int64_t last_i = -1;
+ for (int64_t i = 0; !killed; i++) {
+ uint64_t t_start = toku_current_time_microsec();
+ int r = locktree_write_lock(lt, txn_id, i, i, true);
+ assert(r == 0);
+ last_i = i;
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_duration = t_end - t_start;
+ if (t_duration > 100000) {
+ printf("%u %s %" PRId64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_duration);
+ }
+ toku_pthread_yield();
+ }
+ if (last_i != -1)
+ locktree_release_lock(lt, txn_id, 0, last_i); // release the range 0 .. last_i
+}
+
+static void run_small_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t k) {
+ for (int64_t i = 0; !killed; i++) {
+ uint64_t t_start = toku_current_time_microsec();
+ int r = locktree_write_lock(lt, txn_id, k, k, false);
+ assert(r == 0);
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_duration = t_end - t_start;
+ if (t_duration > 100000) {
+ printf("%u %s %" PRId64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_duration);
+ }
+ locktree_release_lock(lt, txn_id, k, k);
+ toku_pthread_yield();
+ }
+}
+
+struct arg {
+ locktree_manager *mgr;
+ locktree *lt;
+ TXNID txn_id;
+ int64_t k;
+};
+
+static void *big_f(void *_arg) {
+ struct arg *arg = (struct arg *) _arg;
+ run_big_txn(arg->mgr, arg->lt, arg->txn_id);
+ return arg;
+}
+
+static void *small_f(void *_arg) {
+ struct arg *arg = (struct arg *) _arg;
+ run_small_txn(arg->mgr, arg->lt, arg->txn_id, arg->k);
+ return arg;
+}
+
+static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buffer, void *extra) {
+ if (verbose)
+ printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
+}
+
+static uint64_t get_escalation_count(locktree_manager &mgr) {
+ LTM_STATUS_S ltm_status_test;
+ mgr.get_status(&ltm_status_test);
+
+ TOKU_ENGINE_STATUS_ROW key_status = NULL;
+ // lookup keyname in status
+ for (int i = 0; ; i++) {
+ TOKU_ENGINE_STATUS_ROW status = &ltm_status_test.status[i];
+ if (status->keyname == NULL)
+ break;
+ if (strcmp(status->keyname, "LTM_ESCALATION_COUNT") == 0) {
+ key_status = status;
+ break;
+ }
+ }
+ assert(key_status);
+ return key_status->value.num;
+}
+
+int main(int argc, const char *argv[]) {
+ uint64_t stalls = 0;
+ uint64_t max_lock_memory = 1000000000;
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "--stalls") == 0 && i+1 < argc) {
+ stalls = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--max_lock_memory") == 0 && i+1 < argc) {
+ max_lock_memory = atoll(argv[++i]);
+ continue;
+ }
+ }
+
+ int r;
+
+ // create a manager
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, e_callback, nullptr);
+ mgr.set_max_lock_memory(max_lock_memory);
+
+ // create lock trees
+ DICTIONARY_ID dict_id_0 = { .dictid = 1 };
+ locktree *lt_0 = mgr.get_lt(dict_id_0, dbt_comparator, nullptr);
+
+ DICTIONARY_ID dict_id_1 = { .dictid = 2 };
+ locktree *lt_1 = mgr.get_lt(dict_id_1, dbt_comparator, nullptr);
+
+ // create the worker threads
+ struct arg big_arg = {&mgr, lt_0, 1000};
+ pthread_t big_id;
+ r = toku_pthread_create(
+ toku_uninstrumented, &big_id, nullptr, big_f, &big_arg);
+ assert(r == 0);
+
+ const int n_small = 7;
+ pthread_t small_ids[n_small];
+ struct arg small_args[n_small];
+
+ for (int i = 0; i < n_small; i++) {
+ small_args[i] = {&mgr, lt_1, (TXNID)(2000 + i), i};
+ r = toku_pthread_create(toku_uninstrumented,
+ &small_ids[i],
+ nullptr,
+ small_f,
+ &small_args[i]);
+ assert(r == 0);
+ }
+
+ // wait for some escalations to occur
+ while (get_escalation_count(mgr) < stalls) {
+ sleep(1);
+ }
+ killed = 1;
+
+ // cleanup
+ void *ret;
+ r = toku_pthread_join(big_id, &ret);
+ assert(r == 0);
+
+ for (int i = 0; i < n_small; i++) {
+ r = toku_pthread_join(small_ids[i], &ret);
+ assert(r == 0);
+ }
+
+ mgr.release_lt(lt_0);
+ mgr.release_lt(lt_1);
+ mgr.destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_infinity.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_infinity.cc
new file mode 100644
index 00000000..e755158a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_infinity.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+namespace toku {
+
+// test that ranges with infinite endpoints work
+void locktree_unit_test::test_infinity(void) {
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ int r;
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+ const DBT *zero = get_dbt(0);
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+ const DBT *five = get_dbt(5);
+ const DBT min_int = min_dbt();
+ const DBT max_int = max_dbt();
+
+ // txn A will lock -inf, 5.
+ r = lt.acquire_write_lock(txnid_a, toku_dbt_negative_infinity(), five, nullptr, false);
+ invariant(r == 0);
+ // txn B will fail to get any lock <= 5, even min_int
+ r = lt.acquire_write_lock(txnid_b, five, five, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, zero, one, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, &min_int, &min_int, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), &min_int, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+
+ lt.remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), five);
+
+ // txn A will lock 1, +inf
+ r = lt.acquire_write_lock(txnid_a, one, toku_dbt_positive_infinity(), nullptr, false);
+ invariant(r == 0);
+ // txn B will fail to get any lock >= 1, even max_int
+ r = lt.acquire_write_lock(txnid_b, one, one, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, two, five, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, &max_int, &max_int, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, &max_int, toku_dbt_positive_infinity(), nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+
+ lt.remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), five);
+
+ // txn A will lock -inf, +inf
+ r = lt.acquire_write_lock(txnid_a, toku_dbt_negative_infinity(), toku_dbt_positive_infinity(), nullptr, false);
+ invariant(r == 0);
+ // txn B will fail to get any lock
+ r = lt.acquire_write_lock(txnid_b, zero, one, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, two, five, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, &min_int, &min_int, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, &min_int, &max_int, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, &max_int, &max_int, nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), toku_dbt_negative_infinity(), nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), toku_dbt_positive_infinity(), nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+ r = lt.acquire_write_lock(txnid_b, toku_dbt_positive_infinity(), toku_dbt_positive_infinity(), nullptr, false);
+ invariant(r == DB_LOCK_NOTGRANTED);
+
+ lt.remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), toku_dbt_positive_infinity());
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_infinity();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_misc.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_misc.cc
new file mode 100644
index 00000000..efd3ca3d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_misc.cc
@@ -0,0 +1,105 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+namespace toku {
+
+static DBT *expected_a;
+static DBT *expected_b;
+static DESCRIPTOR expected_descriptor;
+static int expected_comparison_magic = 55;
+
+static int my_compare_dbts(DB *db, const DBT *a, const DBT *b) {
+ invariant(db->cmp_descriptor == expected_descriptor);
+ (void) a;
+ (void) b;
+ return expected_comparison_magic;
+}
+
+// test that get/set userdata works, and that get_manager() works
+void locktree_unit_test::test_misc(void) {
+ locktree lt;
+ DICTIONARY_ID dict_id = { 1 };
+ toku::comparator my_dbt_comparator;
+ my_dbt_comparator.create(my_compare_dbts, nullptr);
+ lt.create(nullptr, dict_id, my_dbt_comparator);
+
+ invariant(lt.get_userdata() == nullptr);
+ int userdata;
+ lt.set_userdata(&userdata);
+ invariant(lt.get_userdata() == &userdata);
+ lt.set_userdata(nullptr);
+ invariant(lt.get_userdata() == nullptr);
+
+ int r;
+ DBT dbt_a, dbt_b;
+ DESCRIPTOR_S d1, d2;
+ expected_a = &dbt_a;
+ expected_b = &dbt_b;
+
+ toku::comparator cmp_d1, cmp_d2;
+ cmp_d1.create(my_compare_dbts, &d1);
+ cmp_d2.create(my_compare_dbts, &d2);
+
+ // make sure the comparator object has the correct
+ // descriptor when we set the locktree's descriptor
+ lt.set_comparator(cmp_d1);
+ expected_descriptor = &d1;
+ r = lt.m_cmp(&dbt_a, &dbt_b);
+ invariant(r == expected_comparison_magic);
+ lt.set_comparator(cmp_d2);
+ expected_descriptor = &d2;
+ r = lt.m_cmp(&dbt_a, &dbt_b);
+ invariant(r == expected_comparison_magic);
+
+ lt.release_reference();
+ lt.destroy();
+
+ cmp_d1.destroy();
+ cmp_d2.destroy();
+ my_dbt_comparator.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_misc();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_overlapping_relock.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_overlapping_relock.cc
new file mode 100644
index 00000000..0e4d3d72
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_overlapping_relock.cc
@@ -0,0 +1,164 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+namespace toku {
+
+// test that the same txn can relock ranges it already owns
+// ensure that existing read locks can be upgrading to
+// write locks if overlapping and ensure that existing read
+// or write locks are consolidated by overlapping relocks.
+void locktree_unit_test::test_overlapping_relock(void) {
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ const DBT *zero = get_dbt(0);
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+ const DBT *three = get_dbt(3);
+ const DBT *four = get_dbt(4);
+ const DBT *five = get_dbt(5);
+
+ int r;
+ TXNID txnid_a = 1001;
+
+ // because of the single txnid optimization, there is no consolidation
+ // of read or write ranges until there is at least two txnids in
+ // the locktree. so here we add some arbitrary txnid to get a point
+ // lock [100, 100] so that the test below can expect to actually
+ // do something. at the end of the test, we release 100, 100.
+ const TXNID the_other_txnid = 9999;
+ const DBT *hundred = get_dbt(100);
+ r = lt.acquire_write_lock(the_other_txnid, hundred, hundred, nullptr, false);
+ invariant(r == 0);
+
+ for (int test_run = 0; test_run < 2; test_run++) {
+ // test_run == 0 means test with read lock
+ // test_run == 1 means test with write lock
+#define ACQUIRE_LOCK(txn, left, right, conflicts) \
+ test_run == 0 ? lt.acquire_read_lock(txn, left, right, conflicts, false) \
+ : lt.acquire_write_lock(txn, left, right, conflicts, false)
+
+ // lock [1,1] and [2,2]. then lock [1,2].
+ // ensure only [1,2] exists in the tree
+ r = ACQUIRE_LOCK(txnid_a, one, one, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_a, two, two, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_a, one, two, nullptr);
+ invariant(r == 0);
+
+ struct verify_fn_obj {
+ bool saw_the_other;
+ TXNID expected_txnid;
+ keyrange *expected_range;
+ const comparator *cmp;
+ bool fn(const keyrange &range, TXNID txnid) {
+ if (txnid == the_other_txnid) {
+ invariant(!saw_the_other);
+ saw_the_other = true;
+ return true;
+ }
+ invariant(txnid == expected_txnid);
+ keyrange::comparison c = range.compare(*cmp, *expected_range);
+ invariant(c == keyrange::comparison::EQUALS);
+ return true;
+ }
+ } verify_fn;
+ verify_fn.cmp = &lt.m_cmp;
+
+#define do_verify() \
+ do { verify_fn.saw_the_other = false; locktree_iterate<verify_fn_obj>(&lt, &verify_fn); } while (0)
+
+ keyrange range;
+ range.create(one, two);
+ verify_fn.expected_txnid = txnid_a;
+ verify_fn.expected_range = &range;
+ do_verify();
+
+ // unlocking [1,1] should remove the only range,
+ // the other unlocks shoudl do nothing.
+ lt.remove_overlapping_locks_for_txnid(txnid_a, one, one);
+ lt.remove_overlapping_locks_for_txnid(txnid_a, two, two);
+ lt.remove_overlapping_locks_for_txnid(txnid_a, one, two);
+
+ // try overlapping from the right
+ r = ACQUIRE_LOCK(txnid_a, one, three, nullptr);
+ r = ACQUIRE_LOCK(txnid_a, two, five, nullptr);
+ verify_fn.expected_txnid = txnid_a;
+ range.create(one, five);
+ verify_fn.expected_range = &range;
+ do_verify();
+
+ // now overlap from the left
+ r = ACQUIRE_LOCK(txnid_a, zero, four, nullptr);
+ verify_fn.expected_txnid = txnid_a;
+ range.create(zero, five);
+ verify_fn.expected_range = &range;
+ do_verify();
+
+ // now relock in a range that is already dominated
+ r = ACQUIRE_LOCK(txnid_a, five, five, nullptr);
+ verify_fn.expected_txnid = txnid_a;
+ range.create(zero, five);
+ verify_fn.expected_range = &range;
+ do_verify();
+
+ // release one of the locks we acquired. this should clean up the whole range.
+ lt.remove_overlapping_locks_for_txnid(txnid_a, zero, four);
+
+#undef ACQUIRE_LOCK
+ }
+
+ // remove the other txnid's lock now
+ lt.remove_overlapping_locks_for_txnid(the_other_txnid, hundred, hundred);
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_overlapping_relock();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_simple_lock.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_simple_lock.cc
new file mode 100644
index 00000000..72b151ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_simple_lock.cc
@@ -0,0 +1,149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+namespace toku {
+
+// test simple, non-overlapping read locks and then write locks
+void locktree_unit_test::test_simple_lock(void) {
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ DICTIONARY_ID dict_id = { .dictid = 1 };
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+
+ int r;
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+ TXNID txnid_c = 3001;
+ TXNID txnid_d = 4001;
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+ const DBT *three = get_dbt(3);
+ const DBT *four = get_dbt(4);
+
+ for (int test_run = 0; test_run < 2; test_run++) {
+ // test_run == 0 means test with read lock
+ // test_run == 1 means test with write lock
+#define ACQUIRE_LOCK(txn, left, right, conflicts) \
+ test_run == 0 ? lt->acquire_read_lock(txn, left, right, conflicts, false) \
+ : lt->acquire_write_lock(txn, left, right, conflicts, false)
+
+ // four txns, four points
+ r = ACQUIRE_LOCK(txnid_a, one, one, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_b, two, two, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_c, three, three, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_d, four, four, nullptr);
+ invariant(r == 0);
+ locktree_test_release_lock(lt, txnid_a, one, one);
+ locktree_test_release_lock(lt, txnid_b, two, two);
+ locktree_test_release_lock(lt, txnid_c, three, three);
+ locktree_test_release_lock(lt, txnid_d, four, four);
+ invariant(no_row_locks(lt));
+
+ // two txns, two ranges
+ r = ACQUIRE_LOCK(txnid_c, one, two, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_b, three, four, nullptr);
+ invariant(r == 0);
+ locktree_test_release_lock(lt, txnid_c, one, two);
+ locktree_test_release_lock(lt, txnid_b, three, four);
+ invariant(no_row_locks(lt));
+
+ // two txns, one range, one point
+ r = ACQUIRE_LOCK(txnid_c, three, four, nullptr);
+ invariant(r == 0);
+ r = ACQUIRE_LOCK(txnid_d, one, one, nullptr);
+ invariant(r == 0);
+ locktree_test_release_lock(lt, txnid_c, three, four);
+ locktree_test_release_lock(lt, txnid_d, one, one);
+ invariant(no_row_locks(lt));
+
+#undef ACQUIRE_LOCK
+ }
+
+ const int64_t num_locks = 10000;
+
+ int64_t *keys = (int64_t *) toku_malloc(num_locks * sizeof(int64_t));
+ for (int64_t i = 0; i < num_locks; i++) {
+ keys[i] = i;
+ }
+ for (int64_t i = 0; i < num_locks; i++) {
+ int64_t k = rand() % num_locks;
+ int64_t tmp = keys[k];
+ keys[k] = keys[i];
+ keys[i] = tmp;
+ }
+
+
+ r = mgr.set_max_lock_memory((num_locks + 1) * 500);
+ invariant_zero(r);
+
+ DBT k;
+ k.ulen = 0;
+ k.size = sizeof(keys[0]);
+ k.flags = DB_DBT_USERMEM;
+
+ for (int64_t i = 0; i < num_locks; i++) {
+ k.data = (void *) &keys[i];
+ r = lt->acquire_read_lock(txnid_a, &k, &k, nullptr, false);
+ invariant(r == 0);
+ }
+
+ for (int64_t i = 0; i < num_locks; i++) {
+ k.data = (void *) &keys[i];
+ locktree_test_release_lock(lt, txnid_a, &k, &k);
+ }
+
+ toku_free(keys);
+
+ mgr.release_lt(lt);
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_simple_lock();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_single_txnid_optimization.cc b/storage/tokudb/PerconaFT/locktree/tests/locktree_single_txnid_optimization.cc
new file mode 100644
index 00000000..b437e19c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_single_txnid_optimization.cc
@@ -0,0 +1,130 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "locktree_unit_test.h"
+
+namespace toku {
+
+// test that the same txn can relock ranges it already owns
+// ensure that existing read locks can be upgrading to
+// write locks if overlapping and ensure that existing read
+// or write locks are consolidated by overlapping relocks.
+void locktree_unit_test::test_single_txnid_optimization(void) {
+ locktree lt;
+
+ DICTIONARY_ID dict_id = { 1 };
+ lt.create(nullptr, dict_id, dbt_comparator);
+
+ const DBT *zero = get_dbt(0);
+ const DBT *one = get_dbt(1);
+ const DBT *two = get_dbt(2);
+ const DBT *three = get_dbt(3);
+
+ int r;
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+
+ // the single txnid optimization takes advantage of the fact that
+ // a locktree with only locks for a single txnid can be unlocked
+ // by just destroy every node. if this is implemented incorrectly,
+ // then some other txnid's lock might get lost. so test that no
+ // matter where txnid b takes its write lock in the middle of a bunch
+ // of txnid a locks, the txnid b lock does not get lost.
+ for (int where = 0; where < 4; where++) {
+ range_buffer buffer;
+ buffer.create();
+
+#define lock_and_append_point_for_txnid_a(key) \
+ r = lt.acquire_write_lock(txnid_a, key, key, nullptr, false); \
+ invariant_zero(r); \
+ buffer.append(key, key);
+
+#define maybe_point_locks_for_txnid_b(i) \
+ if (where == i) { \
+ r = lt.acquire_write_lock(txnid_b, one, one, nullptr, false); \
+ invariant_zero(r); \
+ }
+
+ lock_and_append_point_for_txnid_a(two);
+ maybe_point_locks_for_txnid_b(0);
+
+ lock_and_append_point_for_txnid_a(three);
+ maybe_point_locks_for_txnid_b(1);
+
+ lock_and_append_point_for_txnid_a(zero);
+ maybe_point_locks_for_txnid_b(2);
+
+ lt.release_locks(txnid_a, &buffer);
+
+ // txnid b does not take a lock on iteration 3
+ if (where != 3) {
+ struct verify_fn_obj {
+ TXNID expected_txnid;
+ keyrange *expected_range;
+ const comparator *cmp;
+ bool fn(const keyrange &range, TXNID txnid) {
+ invariant(txnid == expected_txnid);
+ keyrange::comparison c = range.compare(*cmp, *expected_range);
+ invariant(c == keyrange::comparison::EQUALS);
+ return true;
+ }
+ } verify_fn;
+ verify_fn.cmp = &lt.m_cmp;
+
+ keyrange range;
+ range.create(one, one);
+ verify_fn.expected_txnid = txnid_b;
+ verify_fn.expected_range = &range;
+ locktree_iterate<verify_fn_obj>(&lt, &verify_fn);
+ lt.remove_overlapping_locks_for_txnid(txnid_b, one, one);
+ }
+
+ buffer.destroy();
+ }
+
+ lt.release_reference();
+ lt.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::locktree_unit_test test;
+ test.test_single_txnid_optimization();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/locktree_unit_test.h b/storage/tokudb/PerconaFT/locktree/tests/locktree_unit_test.h
new file mode 100644
index 00000000..d1b31037
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/locktree_unit_test.h
@@ -0,0 +1,105 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "test.h"
+
+#include "locktree.h"
+#include "concurrent_tree.h"
+
+namespace toku {
+
+class locktree_unit_test {
+public:
+ // test simple create and destroy of the locktree
+ void test_create_destroy(void);
+
+ // test that get/set userdata works, and that get_manager() works
+ void test_misc(void);
+
+ // test that simple read and write locks can be acquired
+ void test_simple_lock(void);
+
+ // test that:
+ // - existing read locks can be upgrading to write locks
+ // - overlapping locks are consolidated in the tree
+ // - dominated locks succeed and are not stored in the tree
+ void test_overlapping_relock(void);
+
+ // test write lock conflicts when read or write locks exist
+ // test read lock conflicts when write locks exist
+ void test_conflicts(void);
+
+ // test that ranges with infinite endpoints work
+ void test_infinity(void);
+
+ // make sure the single txnid optimization does not screw
+ // up when there is more than one txnid with locks in the tree
+ void test_single_txnid_optimization(void);
+
+private:
+
+
+ template <typename F>
+ static void locktree_iterate(const locktree *lt, F *function) {
+ concurrent_tree::locked_keyrange ltr;
+ keyrange infinite_range = keyrange::get_infinite_range();
+
+ ltr.prepare(lt->m_rangetree);
+ ltr.acquire(infinite_range);
+ ltr.iterate<F>(function);
+ ltr.release();
+ }
+
+ static bool no_row_locks(const locktree *lt) {
+ return lt->m_rangetree->is_empty() && lt->m_sto_buffer.is_empty();
+ }
+
+ static void locktree_test_release_lock(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(left_key, right_key);
+ lt->release_locks(txnid, &buffer);
+ buffer.destroy();
+ }
+
+ friend class lock_request_unit_test;
+};
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_create_destroy.cc b/storage/tokudb/PerconaFT/locktree/tests/manager_create_destroy.cc
new file mode 100644
index 00000000..dd496e3d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_create_destroy.cc
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "manager_unit_test.h"
+
+namespace toku {
+
+void manager_unit_test::test_create_destroy(void) {
+ locktree_manager mgr;
+ lt_create_cb create_callback = (lt_create_cb) (long) 1;
+ lt_destroy_cb destroy_callback = (lt_destroy_cb) (long) 2;
+ lt_escalate_cb escalate_callback = (lt_escalate_cb) (long) 3;
+ void *extra = (void *) (long) 4;
+ mgr.create(create_callback, destroy_callback, escalate_callback, extra);
+
+ invariant(mgr.m_max_lock_memory == locktree_manager::DEFAULT_MAX_LOCK_MEMORY);
+ invariant(mgr.m_current_lock_memory == 0);
+ invariant(mgr.m_escalation_count == 0);
+ invariant(mgr.m_escalation_time == 0);
+ invariant(mgr.m_escalation_latest_result == 0);
+
+ invariant(mgr.m_locktree_map.size() == 0);
+ invariant(mgr.m_lt_create_callback == create_callback);
+ invariant(mgr.m_lt_destroy_callback == destroy_callback);
+ invariant(mgr.m_lt_escalate_callback == escalate_callback);
+ invariant(mgr.m_lt_escalate_callback_extra == extra);
+
+ mgr.mutex_lock();
+ mgr.mutex_unlock();
+
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::manager_unit_test test;
+ test.test_create_destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_locktree_map.cc b/storage/tokudb/PerconaFT/locktree/tests/manager_locktree_map.cc
new file mode 100644
index 00000000..202092ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_locktree_map.cc
@@ -0,0 +1,100 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "manager_unit_test.h"
+
+namespace toku {
+
+void manager_unit_test::test_lt_map(void) {
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ locktree aa;
+ locktree bb;
+ locktree cc;
+ locktree *alt = &aa;
+ locktree *blt = &bb;
+ locktree *clt = &cc;
+ DICTIONARY_ID a = { 1 };
+ DICTIONARY_ID b = { 2 };
+ DICTIONARY_ID c = { 3 };
+ DICTIONARY_ID d = { 4 };
+ alt->m_dict_id = a;
+ blt->m_dict_id = b;
+ clt->m_dict_id = c;
+
+ mgr.locktree_map_put(alt);
+ mgr.locktree_map_put(blt);
+ mgr.locktree_map_put(clt);
+
+ locktree *lt;
+
+ lt = mgr.locktree_map_find(a);
+ invariant(lt == alt);
+ lt = mgr.locktree_map_find(c);
+ invariant(lt == clt);
+ lt = mgr.locktree_map_find(b);
+ invariant(lt == blt);
+
+ mgr.locktree_map_remove(alt);
+ lt = mgr.locktree_map_find(a);
+ invariant(lt == nullptr);
+ lt = mgr.locktree_map_find(c);
+ invariant(lt == clt);
+ lt = mgr.locktree_map_find(b);
+ invariant(lt == blt);
+ lt = mgr.locktree_map_find(d);
+ invariant(lt == nullptr);
+
+ mgr.locktree_map_remove(clt);
+ mgr.locktree_map_remove(blt);
+ lt = mgr.locktree_map_find(c);
+ invariant(lt == nullptr);
+ lt = mgr.locktree_map_find(b);
+ invariant(lt == nullptr);
+
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::manager_unit_test test;
+ test.test_lt_map();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_parallel_locktree_get_release.cc b/storage/tokudb/PerconaFT/locktree/tests/manager_parallel_locktree_get_release.cc
new file mode 100644
index 00000000..08ce6314
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_parallel_locktree_get_release.cc
@@ -0,0 +1,93 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test crashes prior to the FT-600 fix.
+
+#include "manager_unit_test.h"
+
+namespace toku {
+
+static int my_cmp(DB *UU(db), const DBT *UU(a), const DBT *UU(b)) {
+ return 0;
+}
+
+static void my_test(locktree_manager *mgr) {
+ toku::comparator my_comparator;
+ my_comparator.create(my_cmp, nullptr);
+ DICTIONARY_ID a = { 42 };
+ for (int i=0; i<100000; i++) {
+ locktree *alt = mgr->get_lt(a, my_comparator, nullptr);
+ invariant_notnull(alt);
+ mgr->release_lt(alt);
+ }
+ my_comparator.destroy();
+}
+
+static void *my_tester(void *arg) {
+ locktree_manager *mgr = (locktree_manager *) arg;
+ my_test(mgr);
+ return arg;
+}
+
+void manager_unit_test::test_reference_release_lt(void) {
+ int r;
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+ const int nthreads = 2;
+ pthread_t ids[nthreads];
+ for (int i = 0; i < nthreads; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &ids[i], nullptr, my_tester, &mgr);
+ assert(r == 0);
+ }
+ for (int i = 0; i < nthreads; i++) {
+ void *ret;
+ r = toku_pthread_join(ids[i], &ret);
+ assert(r == 0);
+ }
+ my_test(&mgr);
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::manager_unit_test test;
+ test.test_reference_release_lt();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_params.cc b/storage/tokudb/PerconaFT/locktree/tests/manager_params.cc
new file mode 100644
index 00000000..ca444467
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_params.cc
@@ -0,0 +1,68 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "manager_unit_test.h"
+
+namespace toku {
+
+void manager_unit_test::test_params(void) {
+ int r;
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ uint64_t new_max_lock_memory = 15307752356;
+ r = mgr.set_max_lock_memory(new_max_lock_memory);
+ invariant(r == 0);
+ invariant(mgr.get_max_lock_memory() == new_max_lock_memory);
+
+ mgr.m_current_lock_memory = 100000;
+ r = mgr.set_max_lock_memory(mgr.m_current_lock_memory - 1);
+ invariant(r == EDOM);
+ invariant(mgr.get_max_lock_memory() == new_max_lock_memory);
+
+ mgr.m_current_lock_memory = 0;
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::manager_unit_test test;
+ test.test_params();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_reference_release_lt.cc b/storage/tokudb/PerconaFT/locktree/tests/manager_reference_release_lt.cc
new file mode 100644
index 00000000..38bc0dcd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_reference_release_lt.cc
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "manager_unit_test.h"
+
+namespace toku {
+
+static int create_cb(locktree *lt, void *extra) {
+ lt->set_userdata(extra);
+ bool *k = (bool *) extra;
+ invariant(!(*k));
+ (*k) = true;
+ return 0;
+}
+
+static void destroy_cb(locktree *lt) {
+ bool *k = (bool *) lt->get_userdata();
+ invariant(*k);
+ (*k) = false;
+}
+
+static int my_cmp(DB *UU(db), const DBT *UU(a), const DBT *UU(b)) {
+ return 0;
+}
+
+void manager_unit_test::test_reference_release_lt(void) {
+ locktree_manager mgr;
+ mgr.create(create_cb, destroy_cb, nullptr, nullptr);
+ toku::comparator my_comparator;
+ my_comparator.create(my_cmp, nullptr);
+
+ DICTIONARY_ID a = { 0 };
+ DICTIONARY_ID b = { 1 };
+ DICTIONARY_ID c = { 2 };
+ bool aok = false;
+ bool bok = false;
+ bool cok = false;
+
+ locktree *alt = mgr.get_lt(a, my_comparator, &aok);
+ invariant_notnull(alt);
+ locktree *blt = mgr.get_lt(b, my_comparator, &bok);
+ invariant_notnull(alt);
+ locktree *clt = mgr.get_lt(c, my_comparator, &cok);
+ invariant_notnull(alt);
+
+ // three distinct locktrees should have been returned
+ invariant(alt != blt && alt != clt && blt != clt);
+
+ // on create callbacks should have been called
+ invariant(aok);
+ invariant(bok);
+ invariant(cok);
+
+ // add 3 refs. b should still exist.
+ mgr.reference_lt(blt);
+ mgr.reference_lt(blt);
+ mgr.reference_lt(blt);
+ invariant(bok);
+ // remove 3 refs. b should still exist.
+ mgr.release_lt(blt);
+ mgr.release_lt(blt);
+ mgr.release_lt(blt);
+ invariant(bok);
+
+ // get another handle on a and b, they shoudl be the same
+ // as the original alt and blt
+ locktree *blt2 = mgr.get_lt(b, my_comparator, &bok);
+ invariant(blt2 == blt);
+ locktree *alt2 = mgr.get_lt(a, my_comparator, &aok);
+ invariant(alt2 == alt);
+
+ // remove one ref from everything. c should die. a and b are ok.
+ mgr.release_lt(alt);
+ mgr.release_lt(blt);
+ mgr.release_lt(clt);
+ invariant(aok);
+ invariant(bok);
+ invariant(!cok);
+
+ // release a and b. both should die.
+ mgr.release_lt(blt2);
+ mgr.release_lt(alt2);
+ invariant(!aok);
+ invariant(!bok);
+
+ my_comparator.destroy();
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::manager_unit_test test;
+ test.test_reference_release_lt();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_status.cc b/storage/tokudb/PerconaFT/locktree/tests/manager_status.cc
new file mode 100644
index 00000000..abba450f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_status.cc
@@ -0,0 +1,119 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "manager_unit_test.h"
+#include "locktree_unit_test.h"
+#include "lock_request_unit_test.h"
+
+namespace toku {
+
+static void assert_status(LTM_STATUS ltm_status, const char *keyname, uint64_t v) {
+ TOKU_ENGINE_STATUS_ROW key_status = NULL;
+ // lookup keyname in status
+ for (int i = 0; ; i++) {
+ TOKU_ENGINE_STATUS_ROW status = &ltm_status->status[i];
+ if (status->keyname == NULL)
+ break;
+ if (strcmp(status->keyname, keyname) == 0) {
+ key_status = status;
+ break;
+ }
+ }
+ assert(key_status);
+ assert(key_status->value.num == v);
+}
+
+void manager_unit_test::test_status(void) {
+ locktree_manager mgr;
+ mgr.create(nullptr, nullptr, nullptr, nullptr);
+
+ LTM_STATUS_S status;
+ mgr.get_status(&status);
+ assert_status(&status, "LTM_WAIT_COUNT", 0);
+ assert_status(&status, "LTM_TIMEOUT_COUNT", 0);
+
+ DICTIONARY_ID dict_id = { .dictid = 1 };
+ locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
+ int r;
+ TXNID txnid_a = 1001;
+ TXNID txnid_b = 2001;
+ const DBT *one = get_dbt(1);
+
+ // txn a write locks one
+ r = lt->acquire_write_lock(txnid_a, one, one, nullptr, false);
+ assert(r == 0);
+
+ // txn b tries to write lock one, conflicts, waits, and fails to lock one
+ lock_request request_b;
+ request_b.create();
+ request_b.set(lt, txnid_b, one, one, lock_request::type::WRITE, false);
+ r = request_b.start();
+ assert(r == DB_LOCK_NOTGRANTED);
+ r = request_b.wait(1000);
+ assert(r == DB_LOCK_NOTGRANTED);
+ request_b.destroy();
+
+ range_buffer buffer;
+ buffer.create();
+ buffer.append(one, one);
+ lt->release_locks(txnid_a, &buffer);
+ buffer.destroy();
+
+ assert(lt->m_rangetree->is_empty() && lt->m_sto_buffer.is_empty());
+
+ // assert that wait counters incremented
+ mgr.get_status(&status);
+ assert_status(&status, "LTM_WAIT_COUNT", 1);
+ assert_status(&status, "LTM_TIMEOUT_COUNT", 1);
+
+ // assert that wait counters are persistent after the lock tree is destroyed
+ mgr.release_lt(lt);
+ mgr.get_status(&status);
+ assert_status(&status, "LTM_WAIT_COUNT", 1);
+ assert_status(&status, "LTM_TIMEOUT_COUNT", 1);
+
+ mgr.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::manager_unit_test test;
+ test.test_status();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/manager_unit_test.h b/storage/tokudb/PerconaFT/locktree/tests/manager_unit_test.h
new file mode 100644
index 00000000..674f11ba
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/manager_unit_test.h
@@ -0,0 +1,59 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_assert.h>
+#include <locktree/locktree.h>
+
+namespace toku {
+
+class manager_unit_test {
+public:
+ void test_create_destroy(void);
+
+ void test_params(void);
+
+ void test_lt_map(void);
+
+ void test_reference_release_lt(void);
+
+ void test_status(void);
+};
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/tests/range_buffer_test.cc b/storage/tokudb/PerconaFT/locktree/tests/range_buffer_test.cc
new file mode 100644
index 00000000..b03eb05d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/range_buffer_test.cc
@@ -0,0 +1,197 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <string.h>
+
+#include <portability/memory.h>
+
+#include <locktree/range_buffer.h>
+
+namespace toku {
+
+const size_t num_points = 60;
+
+static const DBT *get_dbt_by_iteration(size_t i) {
+ if (i == 0) {
+ return toku_dbt_negative_infinity();
+ } else if (i < (num_points - 1)) {
+ return get_dbt(i);
+ } else {
+ return toku_dbt_positive_infinity();
+ }
+}
+
+static void test_points(void) {
+ range_buffer buffer;
+ buffer.create();
+
+ for (size_t i = 0; i < num_points; i++) {
+ const DBT *point = get_dbt_by_iteration(i);
+ buffer.append(point, point);
+ }
+
+ size_t i = 0;
+ range_buffer::iterator iter(&buffer);
+ range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ const DBT *expected_point = get_dbt_by_iteration(i);
+ invariant(compare_dbts(nullptr, expected_point, rec.get_left_key()) == 0);
+ invariant(compare_dbts(nullptr, expected_point, rec.get_right_key()) == 0);
+ iter.next();
+ i++;
+ }
+ invariant(i == num_points);
+
+ buffer.destroy();
+}
+
+static void test_ranges(void) {
+ range_buffer buffer;
+ buffer.create();
+
+ // we are going to store adjacent points as ranges,
+ // so make sure there are an even number of points.
+ invariant(num_points % 2 == 0);
+
+ for (size_t i = 0; i < num_points; i += 2) {
+ const DBT *left = get_dbt_by_iteration(i);
+ const DBT *right = get_dbt_by_iteration(i + 1);
+ buffer.append(left, right);
+ }
+
+ size_t i = 0;
+ range_buffer::iterator iter(&buffer);
+ range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ const DBT *expected_left = get_dbt_by_iteration(i);
+ const DBT *expected_right = get_dbt_by_iteration(i + 1);
+ invariant(compare_dbts(nullptr, expected_left, rec.get_left_key()) == 0);
+ invariant(compare_dbts(nullptr, expected_right, rec.get_right_key()) == 0);
+ iter.next();
+ i += 2;
+ }
+ invariant(i == num_points);
+
+ buffer.destroy();
+
+}
+
+static void test_mixed(void) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.destroy();
+
+ // we are going to store adjacent points as ranges,
+ // followed by a single point, so make sure the
+ // number of points is a multiple of 3.
+ invariant(num_points % 3 == 0);
+
+ for (size_t i = 0; i < num_points; i += 3) {
+ const DBT *left = get_dbt_by_iteration(i);
+ const DBT *right = get_dbt_by_iteration(i + 1);
+ const DBT *point = get_dbt_by_iteration(i + 2);
+ buffer.append(left, right);
+ buffer.append(point, point);
+ }
+
+ size_t i = 0;
+ range_buffer::iterator iter(&buffer);
+ range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ const DBT *expected_left = get_dbt_by_iteration(i);
+ const DBT *expected_right = get_dbt_by_iteration(i + 1);
+ invariant(compare_dbts(nullptr, expected_left, rec.get_left_key()) == 0);
+ invariant(compare_dbts(nullptr, expected_right, rec.get_right_key()) == 0);
+ iter.next();
+
+ const DBT *expected_point = get_dbt_by_iteration(i + 2);
+ bool had_point = iter.current(&rec);
+ invariant(had_point);
+ invariant(compare_dbts(nullptr, expected_point, rec.get_left_key()) == 0);
+ invariant(compare_dbts(nullptr, expected_point, rec.get_right_key()) == 0);
+ iter.next();
+ i += 3;
+ }
+ invariant(i == num_points);
+
+ buffer.destroy();
+}
+
+static void test_small_and_large_points(void) {
+ range_buffer buffer;
+ buffer.create();
+ buffer.destroy();
+
+ // Test a bug where a small append would cause
+ // the range buffer to not grow properly for
+ // a subsequent large append.
+ const size_t small_size = 32;
+ const size_t large_size = 16 * 1024;
+ char *small_buf = (char *) toku_xmalloc(small_size);
+ char *large_buf = (char *) toku_xmalloc(large_size);
+ DBT small_dbt, large_dbt;
+ memset(&small_dbt, 0, sizeof(DBT));
+ memset(&large_dbt, 0, sizeof(DBT));
+ small_dbt.data = small_buf;
+ small_dbt.size = small_size;
+ large_dbt.data = large_buf;
+ large_dbt.size = large_size;
+
+ // Append a small dbt, the buf should be able to fit it.
+ buffer.append(&small_dbt, &small_dbt);
+ invariant(buffer.total_memory_size() >= small_dbt.size);
+ // Append a large dbt, the buf should be able to fit it.
+ buffer.append(&large_dbt, &large_dbt);
+ invariant(buffer.total_memory_size() >= (small_dbt.size + large_dbt.size));
+
+ toku_free(small_buf);
+ toku_free(large_buf);
+ buffer.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::test_points();
+ toku::test_ranges();
+ toku::test_mixed();
+ toku::test_small_and_large_points();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/test.h b/storage/tokudb/PerconaFT/locktree/tests/test.h
new file mode 100644
index 00000000..921f2468
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/test.h
@@ -0,0 +1,122 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <limits.h>
+
+#include "ft/comparator.h"
+#include "util/dbt.h"
+
+namespace toku {
+
+ __attribute__((__unused__))
+ static DBT min_dbt(void) {
+ static int64_t min = INT_MIN;
+ DBT dbt;
+ toku_fill_dbt(&dbt, &min, sizeof(int64_t));
+ dbt.flags = DB_DBT_USERMEM;
+ return dbt;
+ }
+
+ __attribute__((__unused__))
+ static DBT max_dbt(void) {
+ static int64_t max = INT_MAX;
+ DBT dbt;
+ toku_fill_dbt(&dbt, &max, sizeof(int64_t));
+ dbt.flags = DB_DBT_USERMEM;
+ return dbt;
+ }
+
+ __attribute__((__unused__))
+ static const DBT *get_dbt(int64_t key) {
+ static const int NUM_DBTS = 1000;
+ static bool initialized;
+ static int64_t static_ints[NUM_DBTS];
+ static DBT static_dbts[NUM_DBTS];
+ invariant(key < NUM_DBTS);
+ if (!initialized) {
+ for (int i = 0; i < NUM_DBTS; i++) {
+ static_ints[i] = i;
+ toku_fill_dbt(&static_dbts[i],
+ &static_ints[i],
+ sizeof(int64_t));
+ static_dbts[i].flags = DB_DBT_USERMEM;
+ }
+ initialized = true;
+ }
+
+ invariant(key < NUM_DBTS);
+ return &static_dbts[key];
+ }
+
+ __attribute__((__unused__))
+ static int compare_dbts(DB *db, const DBT *key1, const DBT *key2) {
+ (void) db;
+
+ // this emulates what a "infinity-aware" comparator object does
+ if (toku_dbt_is_infinite(key1) || toku_dbt_is_infinite(key2)) {
+ return toku_dbt_infinite_compare(key1, key2);
+ } else {
+ invariant(key1->size == sizeof(int64_t));
+ invariant(key2->size == sizeof(int64_t));
+ int64_t a = *(int64_t*) key1->data;
+ int64_t b = *(int64_t*) key2->data;
+ if (a < b) {
+ return -1;
+ } else if (a == b) {
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+ }
+
+ __attribute__((__unused__)) comparator dbt_comparator;
+
+ __attribute__((__constructor__))
+ static void construct_dbt_comparator(void) {
+ dbt_comparator.create(compare_dbts, nullptr);
+ }
+
+ __attribute__((__destructor__))
+ static void destruct_dbt_comparator(void) {
+ dbt_comparator.destroy();
+ }
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/tests/txnid_set_test.cc b/storage/tokudb/PerconaFT/locktree/tests/txnid_set_test.cc
new file mode 100644
index 00000000..d68c0292
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/txnid_set_test.cc
@@ -0,0 +1,111 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_assert.h>
+#include <locktree/locktree.h>
+
+int main(void) {
+ toku::txnid_set set;
+ set.create();
+ // make sure this set is a real set
+ for (size_t i = 0; i < 5; i++) {
+ set.add(0);
+ invariant(set.size() == 1);
+ }
+ set.add(1);
+ set.add(100);
+ invariant(set.size() == 3);
+ invariant(!set.contains(2));
+ invariant(!set.contains(99));
+ invariant(!set.contains(101));
+ invariant(set.contains(100));
+ invariant(set.contains(1));
+
+ // go through the set and make sure we saw everything
+ bool saw0 = false, saw1 = false, saw100 = false;
+ for (size_t i = 0; i < set.size(); i++) {
+ TXNID x = set.get(i);
+ if (x == 0) {
+ invariant(!saw0);
+ saw0 = true;
+ } else if (x == 1) {
+ invariant(!saw1);
+ saw1 = true;
+ } else if (x == 100) {
+ invariant(!saw100);
+ saw100 = true;
+ } else {
+ assert(false);
+ }
+ }
+ invariant(saw0);
+ invariant(saw1);
+ invariant(saw100);
+
+ // make sure we see 0 and 100 but not 1
+ set.remove(1);
+ saw0 = false;
+ saw100 = false;
+ for (size_t i = 0; i < set.size(); i++) {
+ TXNID x = set.get(i);
+ if (x == 0) {
+ invariant(!saw0);
+ saw0 = true;
+ } else if (x == 100) {
+ invariant(!saw100);
+ saw100 = true;
+ } else {
+ assert(false);
+ }
+ }
+ invariant(saw0);
+ invariant(saw100);
+
+ // removing non-existant things is okay
+ set.remove(0);
+ set.remove(100);
+ set.remove(1);
+ set.remove(1010101);
+ set.remove(1212121);
+
+ // empty out what we know is in there
+ set.remove(100);
+ set.remove(0);
+ invariant(set.size() == 0);
+ set.destroy();
+}
diff --git a/storage/tokudb/PerconaFT/locktree/tests/wfg_test.cc b/storage/tokudb/PerconaFT/locktree/tests/wfg_test.cc
new file mode 100644
index 00000000..484de13d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/tests/wfg_test.cc
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <locktree/wfg.h>
+
+namespace toku {
+
+enum {
+ WFG_TEST_MAX_TXNID = 10
+};
+
+struct visit_extra {
+ bool nodes_visited[WFG_TEST_MAX_TXNID];
+ bool edges_visited[WFG_TEST_MAX_TXNID][WFG_TEST_MAX_TXNID];
+
+ void clear(void) {
+ memset(nodes_visited, 0, sizeof(nodes_visited));
+ memset(edges_visited, 0, sizeof(edges_visited));
+ }
+};
+
+// wfg node visit callback
+static int visit_nodes(TXNID txnid, void *extra) {
+ visit_extra *ve = static_cast<visit_extra *>(extra);
+ invariant(txnid < WFG_TEST_MAX_TXNID);
+ invariant(!ve->nodes_visited[txnid]);
+ ve->nodes_visited[txnid] = true;
+ return 0;
+}
+
+// wfg edge visit callback
+static int visit_edges(TXNID txnid, TXNID edge_txnid, void *extra) {
+ visit_extra *ve = static_cast<visit_extra *>(extra);
+ invariant(txnid < WFG_TEST_MAX_TXNID);
+ invariant(edge_txnid < WFG_TEST_MAX_TXNID);
+ invariant(!ve->edges_visited[txnid][edge_txnid]);
+ ve->edges_visited[txnid][edge_txnid] = true;
+ return 0;
+}
+
+// the graph should only have 3 nodes labelled 0 1 and 2
+static void verify_only_nodes_012_exist(wfg *g) {
+ visit_extra ve;
+ ve.clear();
+ g->apply_nodes(visit_nodes, &ve);
+ for (int i = 0; i < WFG_TEST_MAX_TXNID; i++) {
+ if (i == 0 || i == 1 || i == 2) {
+ invariant(ve.nodes_visited[i]);
+ } else {
+ invariant(!ve.nodes_visited[i]);
+ }
+ }
+}
+
+// the graph should only have edges 0->1 and 1->2
+static void verify_only_edges_01_12_exist(wfg *g) {
+ visit_extra ve;
+ ve.clear();
+ g->apply_edges(0, visit_edges, &ve);
+ g->apply_edges(1, visit_edges, &ve);
+ g->apply_edges(2, visit_edges, &ve);
+ for (int i = 0; i < WFG_TEST_MAX_TXNID; i++) {
+ for (int j = 0; j < WFG_TEST_MAX_TXNID; j++) {
+ if ((i == 0 && j == 1) || (i == 1 && j == 2)) {
+ invariant(ve.edges_visited[i][j]);
+ } else {
+ invariant(!ve.edges_visited[i][j]);
+ }
+ }
+ }
+}
+
+static void test_add_cycle_exists() {
+ wfg g;
+ g.create();
+
+ // test that adding edges works and is idempotent
+
+ g.add_edge(0, 1);
+ invariant(g.node_exists(0));
+ invariant(g.node_exists(1));
+ g.add_edge(1, 2);
+ invariant(g.node_exists(0));
+ invariant(g.node_exists(1));
+ invariant(g.node_exists(2));
+
+ // verify that adding edges with the same nodes
+ // does not store multiple nodes with the same txnid.
+ verify_only_nodes_012_exist(&g);
+ verify_only_edges_01_12_exist(&g);
+ g.add_edge(0, 1);
+ g.add_edge(1, 2);
+ verify_only_nodes_012_exist(&g);
+ verify_only_edges_01_12_exist(&g);
+
+ // confirm that no cycle exists from txnid 0 1 or 2
+ invariant(!g.cycle_exists_from_txnid(0));
+ invariant(!g.cycle_exists_from_txnid(1));
+ invariant(!g.cycle_exists_from_txnid(2));
+
+ // add 2,3 and 3,1. now there should be a cycle
+ // from 1 2 and 3 but not 0.
+ //
+ // 0 --> 1 --> 2
+ // ^ /
+ // ^ 3 <
+ g.add_edge(2, 3);
+ g.add_edge(3, 1);
+ invariant(!g.cycle_exists_from_txnid(0));
+ invariant(g.cycle_exists_from_txnid(1));
+ invariant(g.cycle_exists_from_txnid(2));
+ invariant(g.cycle_exists_from_txnid(3));
+
+ // add 2,4. should not have a cycle from 4, but yes from 2.
+ g.add_edge(2, 4);
+ invariant(!g.cycle_exists_from_txnid(4));
+ invariant(g.cycle_exists_from_txnid(2));
+
+ g.destroy();
+}
+
+static void test_find_cycles() {
+ wfg g;
+ g.create();
+
+ // TODO: verify that finding cycles works
+
+ g.destroy();
+}
+
+} /* namespace toku */
+
+int main(void) {
+ toku::test_add_cycle_exists();
+ toku::test_find_cycles();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/treenode.cc b/storage/tokudb/PerconaFT/locktree/treenode.cc
new file mode 100644
index 00000000..f328bf34
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/treenode.cc
@@ -0,0 +1,491 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_race_tools.h>
+
+// TODO: source location info might have to be pulled up one caller
+// to be useful
+void treenode::mutex_lock(void) { toku_mutex_lock(&m_mutex); }
+
+void treenode::mutex_unlock(void) {
+ toku_mutex_unlock(&m_mutex);
+}
+
+void treenode::init(const comparator *cmp) {
+ m_txnid = TXNID_NONE;
+ m_is_root = false;
+ m_is_empty = true;
+ m_cmp = cmp;
+ // use an adaptive mutex at each node since we expect the time the
+ // lock is held to be relatively short compared to a context switch.
+ // indeed, this improves performance at high thread counts considerably.
+ memset(&m_mutex, 0, sizeof(toku_mutex_t));
+ toku_pthread_mutexattr_t attr;
+ toku_mutexattr_init(&attr);
+ toku_mutexattr_settype(&attr, TOKU_MUTEX_ADAPTIVE);
+ toku_mutex_init(*treenode_mutex_key, &m_mutex, &attr);
+ toku_mutexattr_destroy(&attr);
+ m_left_child.set(nullptr);
+ m_right_child.set(nullptr);
+}
+
+void treenode::create_root(const comparator *cmp) {
+ init(cmp);
+ m_is_root = true;
+}
+
+void treenode::destroy_root(void) {
+ invariant(is_root());
+ invariant(is_empty());
+ toku_mutex_destroy(&m_mutex);
+ m_cmp = nullptr;
+}
+
+void treenode::set_range_and_txnid(const keyrange &range, TXNID txnid) {
+ // allocates a new copy of the range for this node
+ m_range.create_copy(range);
+ m_txnid = txnid;
+ m_is_empty = false;
+}
+
+bool treenode::is_root(void) {
+ return m_is_root;
+}
+
+bool treenode::is_empty(void) {
+ return m_is_empty;
+}
+
+bool treenode::range_overlaps(const keyrange &range) {
+ return m_range.overlaps(*m_cmp, range);
+}
+
+treenode *treenode::alloc(const comparator *cmp, const keyrange &range, TXNID txnid) {
+ treenode *XCALLOC(node);
+ node->init(cmp);
+ node->set_range_and_txnid(range, txnid);
+ return node;
+}
+
+void treenode::swap_in_place(treenode *node1, treenode *node2) {
+ keyrange tmp_range = node1->m_range;
+ TXNID tmp_txnid = node1->m_txnid;
+ node1->m_range = node2->m_range;
+ node1->m_txnid = node2->m_txnid;
+ node2->m_range = tmp_range;
+ node2->m_txnid = tmp_txnid;
+}
+
+void treenode::free(treenode *node) {
+ // destroy the range, freeing any copied keys
+ node->m_range.destroy();
+
+ // the root is simply marked as empty.
+ if (node->is_root()) {
+ toku_mutex_assert_locked(&node->m_mutex);
+ node->m_is_empty = true;
+ } else {
+ toku_mutex_assert_unlocked(&node->m_mutex);
+ toku_mutex_destroy(&node->m_mutex);
+ toku_free(node);
+ }
+}
+
+uint32_t treenode::get_depth_estimate(void) const {
+ const uint32_t left_est = m_left_child.depth_est;
+ const uint32_t right_est = m_right_child.depth_est;
+ return (left_est > right_est ? left_est : right_est) + 1;
+}
+
+treenode *treenode::find_node_with_overlapping_child(const keyrange &range,
+ const keyrange::comparison *cmp_hint) {
+
+ // determine which child to look at based on a comparison. if we were
+ // given a comparison hint, use that. otherwise, compare them now.
+ keyrange::comparison c = cmp_hint ? *cmp_hint : range.compare(*m_cmp, m_range);
+
+ treenode *child;
+ if (c == keyrange::comparison::LESS_THAN) {
+ child = lock_and_rebalance_left();
+ } else {
+ // The caller (locked_keyrange::acquire) handles the case where
+ // the root of the locked_keyrange is the node that overlaps.
+ // range is guaranteed not to overlap this node.
+ invariant(c == keyrange::comparison::GREATER_THAN);
+ child = lock_and_rebalance_right();
+ }
+
+ // if the search would lead us to an empty subtree (child == nullptr),
+ // or the child overlaps, then we know this node is the parent we want.
+ // otherwise we need to recur into that child.
+ if (child == nullptr) {
+ return this;
+ } else {
+ c = range.compare(*m_cmp, child->m_range);
+ if (c == keyrange::comparison::EQUALS || c == keyrange::comparison::OVERLAPS) {
+ child->mutex_unlock();
+ return this;
+ } else {
+ // unlock this node before recurring into the locked child,
+ // passing in a comparison hint since we just comapred range
+ // to the child's range.
+ mutex_unlock();
+ return child->find_node_with_overlapping_child(range, &c);
+ }
+ }
+}
+
+template <class F>
+void treenode::traverse_overlaps(const keyrange &range, F *function) {
+ keyrange::comparison c = range.compare(*m_cmp, m_range);
+ if (c == keyrange::comparison::EQUALS) {
+ // Doesn't matter if fn wants to keep going, there
+ // is nothing left, so return.
+ function->fn(m_range, m_txnid);
+ return;
+ }
+
+ treenode *left = m_left_child.get_locked();
+ if (left) {
+ if (c != keyrange::comparison::GREATER_THAN) {
+ // Target range is less than this node, or it overlaps this
+ // node. There may be something on the left.
+ left->traverse_overlaps(range, function);
+ }
+ left->mutex_unlock();
+ }
+
+ if (c == keyrange::comparison::OVERLAPS) {
+ bool keep_going = function->fn(m_range, m_txnid);
+ if (!keep_going) {
+ return;
+ }
+ }
+
+ treenode *right = m_right_child.get_locked();
+ if (right) {
+ if (c != keyrange::comparison::LESS_THAN) {
+ // Target range is greater than this node, or it overlaps this
+ // node. There may be something on the right.
+ right->traverse_overlaps(range, function);
+ }
+ right->mutex_unlock();
+ }
+}
+
+void treenode::insert(const keyrange &range, TXNID txnid) {
+ // choose a child to check. if that child is null, then insert the new node there.
+ // otherwise recur down that child's subtree
+ keyrange::comparison c = range.compare(*m_cmp, m_range);
+ if (c == keyrange::comparison::LESS_THAN) {
+ treenode *left_child = lock_and_rebalance_left();
+ if (left_child == nullptr) {
+ left_child = treenode::alloc(m_cmp, range, txnid);
+ m_left_child.set(left_child);
+ } else {
+ left_child->insert(range, txnid);
+ left_child->mutex_unlock();
+ }
+ } else {
+ invariant(c == keyrange::comparison::GREATER_THAN);
+ treenode *right_child = lock_and_rebalance_right();
+ if (right_child == nullptr) {
+ right_child = treenode::alloc(m_cmp, range, txnid);
+ m_right_child.set(right_child);
+ } else {
+ right_child->insert(range, txnid);
+ right_child->mutex_unlock();
+ }
+ }
+}
+
+treenode *treenode::find_child_at_extreme(int direction, treenode **parent) {
+ treenode *child = direction > 0 ?
+ m_right_child.get_locked() : m_left_child.get_locked();
+
+ if (child) {
+ *parent = this;
+ treenode *child_extreme = child->find_child_at_extreme(direction, parent);
+ child->mutex_unlock();
+ return child_extreme;
+ } else {
+ return this;
+ }
+}
+
+treenode *treenode::find_leftmost_child(treenode **parent) {
+ return find_child_at_extreme(-1, parent);
+}
+
+treenode *treenode::find_rightmost_child(treenode **parent) {
+ return find_child_at_extreme(1, parent);
+}
+
+treenode *treenode::remove_root_of_subtree() {
+ // if this node has no children, just free it and return null
+ if (m_left_child.ptr == nullptr && m_right_child.ptr == nullptr) {
+ // treenode::free requires that non-root nodes are unlocked
+ if (!is_root()) {
+ mutex_unlock();
+ }
+ treenode::free(this);
+ return nullptr;
+ }
+
+ // we have a child, so get either the in-order successor or
+ // predecessor of this node to be our replacement.
+ // replacement_parent is updated by the find functions as
+ // they recur down the tree, so initialize it to this.
+ treenode *child, *replacement;
+ treenode *replacement_parent = this;
+ if (m_left_child.ptr != nullptr) {
+ child = m_left_child.get_locked();
+ replacement = child->find_rightmost_child(&replacement_parent);
+ invariant(replacement == child || replacement_parent != this);
+
+ // detach the replacement from its parent
+ if (replacement_parent == this) {
+ m_left_child = replacement->m_left_child;
+ } else {
+ replacement_parent->m_right_child = replacement->m_left_child;
+ }
+ } else {
+ child = m_right_child.get_locked();
+ replacement = child->find_leftmost_child(&replacement_parent);
+ invariant(replacement == child || replacement_parent != this);
+
+ // detach the replacement from its parent
+ if (replacement_parent == this) {
+ m_right_child = replacement->m_right_child;
+ } else {
+ replacement_parent->m_left_child = replacement->m_right_child;
+ }
+ }
+ child->mutex_unlock();
+
+ // swap in place with the detached replacement, then destroy it
+ treenode::swap_in_place(replacement, this);
+ treenode::free(replacement);
+
+ return this;
+}
+
+void treenode::recursive_remove(void) {
+ treenode *left = m_left_child.ptr;
+ if (left) {
+ left->recursive_remove();
+ }
+ m_left_child.set(nullptr);
+
+ treenode *right = m_right_child.ptr;
+ if (right) {
+ right->recursive_remove();
+ }
+ m_right_child.set(nullptr);
+
+ // we do not take locks on the way down, so we know non-root nodes
+ // are unlocked here and the caller is required to pass a locked
+ // root, so this free is correct.
+ treenode::free(this);
+}
+
+treenode *treenode::remove(const keyrange &range) {
+ treenode *child;
+ // if the range is equal to this node's range, then just remove
+ // the root of this subtree. otherwise search down the tree
+ // in either the left or right children.
+ keyrange::comparison c = range.compare(*m_cmp, m_range);
+ switch (c) {
+ case keyrange::comparison::EQUALS:
+ return remove_root_of_subtree();
+ case keyrange::comparison::LESS_THAN:
+ child = m_left_child.get_locked();
+ invariant_notnull(child);
+ child = child->remove(range);
+
+ // unlock the child if there still is one.
+ // regardless, set the right child pointer
+ if (child) {
+ child->mutex_unlock();
+ }
+ m_left_child.set(child);
+ break;
+ case keyrange::comparison::GREATER_THAN:
+ child = m_right_child.get_locked();
+ invariant_notnull(child);
+ child = child->remove(range);
+
+ // unlock the child if there still is one.
+ // regardless, set the right child pointer
+ if (child) {
+ child->mutex_unlock();
+ }
+ m_right_child.set(child);
+ break;
+ case keyrange::comparison::OVERLAPS:
+ // shouldn't be overlapping, since the tree is
+ // non-overlapping and this range must exist
+ abort();
+ }
+
+ return this;
+}
+
+bool treenode::left_imbalanced(int threshold) const {
+ uint32_t left_depth = m_left_child.depth_est;
+ uint32_t right_depth = m_right_child.depth_est;
+ return m_left_child.ptr != nullptr && left_depth > threshold + right_depth;
+}
+
+bool treenode::right_imbalanced(int threshold) const {
+ uint32_t left_depth = m_left_child.depth_est;
+ uint32_t right_depth = m_right_child.depth_est;
+ return m_right_child.ptr != nullptr && right_depth > threshold + left_depth;
+}
+
+// effect: rebalances the subtree rooted at this node
+// using AVL style O(1) rotations. unlocks this
+// node if it is not the new root of the subtree.
+// requires: node is locked by this thread, children are not
+// returns: locked root node of the rebalanced tree
+treenode *treenode::maybe_rebalance(void) {
+ // if we end up not rotating at all, the new root is this
+ treenode *new_root = this;
+ treenode *child = nullptr;
+
+ if (left_imbalanced(IMBALANCE_THRESHOLD)) {
+ child = m_left_child.get_locked();
+ if (child->right_imbalanced(0)) {
+ treenode *grandchild = child->m_right_child.get_locked();
+
+ child->m_right_child = grandchild->m_left_child;
+ grandchild->m_left_child.set(child);
+
+ m_left_child = grandchild->m_right_child;
+ grandchild->m_right_child.set(this);
+
+ new_root = grandchild;
+ } else {
+ m_left_child = child->m_right_child;
+ child->m_right_child.set(this);
+ new_root = child;
+ }
+ } else if (right_imbalanced(IMBALANCE_THRESHOLD)) {
+ child = m_right_child.get_locked();
+ if (child->left_imbalanced(0)) {
+ treenode *grandchild = child->m_left_child.get_locked();
+
+ child->m_left_child = grandchild->m_right_child;
+ grandchild->m_right_child.set(child);
+
+ m_right_child = grandchild->m_left_child;
+ grandchild->m_left_child.set(this);
+
+ new_root = grandchild;
+ } else {
+ m_right_child = child->m_left_child;
+ child->m_left_child.set(this);
+ new_root = child;
+ }
+ }
+
+ // up to three nodes may be locked.
+ // - this
+ // - child
+ // - grandchild (but if it is locked, its the new root)
+ //
+ // one of them is the new root. we unlock everything except the new root.
+ if (child && child != new_root) {
+ TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(&child->m_mutex);
+ child->mutex_unlock();
+ }
+ if (this != new_root) {
+ TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(&m_mutex);
+ mutex_unlock();
+ }
+ TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(&new_root->m_mutex);
+ return new_root;
+}
+
+
+treenode *treenode::lock_and_rebalance_left(void) {
+ treenode *child = m_left_child.get_locked();
+ if (child) {
+ treenode *new_root = child->maybe_rebalance();
+ m_left_child.set(new_root);
+ child = new_root;
+ }
+ return child;
+}
+
+treenode *treenode::lock_and_rebalance_right(void) {
+ treenode *child = m_right_child.get_locked();
+ if (child) {
+ treenode *new_root = child->maybe_rebalance();
+ m_right_child.set(new_root);
+ child = new_root;
+ }
+ return child;
+}
+
+void treenode::child_ptr::set(treenode *node) {
+ ptr = node;
+ depth_est = ptr ? ptr->get_depth_estimate() : 0;
+}
+
+treenode *treenode::child_ptr::get_locked(void) {
+ if (ptr) {
+ ptr->mutex_lock();
+ depth_est = ptr->get_depth_estimate();
+ }
+ return ptr;
+}
diff --git a/storage/tokudb/PerconaFT/locktree/treenode.h b/storage/tokudb/PerconaFT/locktree/treenode.h
new file mode 100644
index 00000000..fb2adc47
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/treenode.h
@@ -0,0 +1,245 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <string.h>
+
+#include "portability/memory.h"
+#include "portability/toku_pthread.h"
+
+#include "ft/comparator.h"
+#include "ft/txn/txn.h"
+#include "locktree/keyrange.h"
+
+namespace toku {
+
+// a node in a tree with its own mutex
+// - range is the "key" of this node
+// - txnid is the single txnid associated with this node
+// - left and right children may be null
+//
+// to build a tree on top of this abstraction, the user:
+// - provides memory for a root node, initializes it via create_root()
+// - performs tree operations on the root node. memory management
+// below the root node is handled by the abstraction, not the user.
+// this pattern:
+// - guaruntees a root node always exists.
+// - does not allow for rebalances on the root node
+
+class treenode {
+public:
+
+ // every treenode function has some common requirements:
+ // - node is locked and children are never locked
+ // - node may be unlocked if no other thread has visibility
+
+ // effect: create the root node
+ void create_root(const comparator *cmp);
+
+ // effect: destroys the root node
+ void destroy_root(void);
+
+ // effect: sets the txnid and copies the given range for this node
+ void set_range_and_txnid(const keyrange &range, TXNID txnid);
+
+ // returns: true iff this node is marked as empty
+ bool is_empty(void);
+
+ // returns: true if this is the root node, denoted by a null parent
+ bool is_root(void);
+
+ // returns: true if the given range overlaps with this node's range
+ bool range_overlaps(const keyrange &range);
+
+ // effect: locks the node
+ void mutex_lock(void);
+
+ // effect: unlocks the node
+ void mutex_unlock(void);
+
+ // return: node whose child overlaps, or a child that is empty
+ // and would contain range if it existed
+ // given: if cmp_hint is non-null, then it is a precomputed
+ // comparison of this node's range to the given range.
+ treenode *find_node_with_overlapping_child(const keyrange &range,
+ const keyrange::comparison *cmp_hint);
+
+ // effect: performs an in-order traversal of the ranges that overlap the
+ // given range, calling function->fn() on each node that does
+ // requires: function signature is: bool fn(const keyrange &range, TXNID txnid)
+ // requires: fn returns true to keep iterating, false to stop iterating
+ // requires: fn does not attempt to use any ranges read out by value
+ // after removing a node with an overlapping range from the tree.
+ template <class F>
+ void traverse_overlaps(const keyrange &range, F *function);
+
+ // effect: inserts the given range and txnid into a subtree, recursively
+ // requires: range does not overlap with any node below the subtree
+ void insert(const keyrange &range, TXNID txnid);
+
+ // effect: removes the given range from the subtree
+ // requires: range exists in the subtree
+ // returns: the root of the resulting subtree
+ treenode *remove(const keyrange &range);
+
+ // effect: removes this node and all of its children, recursively
+ // requires: every node at and below this node is unlocked
+ void recursive_remove(void);
+
+private:
+
+ // the child_ptr is a light abstraction for the locking of
+ // a child and the maintenence of its depth estimate.
+
+ struct child_ptr {
+ // set the child pointer
+ void set(treenode *node);
+
+ // get and lock this child if it exists
+ treenode *get_locked(void);
+
+ treenode *ptr;
+ uint32_t depth_est;
+ };
+
+ // the balance factor at which a node is considered imbalanced
+ static const int32_t IMBALANCE_THRESHOLD = 2;
+
+ // node-level mutex
+ toku_mutex_t m_mutex;
+
+ // the range and txnid for this node. the range contains a copy
+ // of the keys originally inserted into the tree. nodes may
+ // swap ranges. but at the end of the day, when a node is
+ // destroyed, it frees the memory associated with whatever range
+ // it has at the time of destruction.
+ keyrange m_range;
+ TXNID m_txnid;
+
+ // two child pointers
+ child_ptr m_left_child;
+ child_ptr m_right_child;
+
+ // comparator for ranges
+ const comparator *m_cmp;
+
+ // marked for the root node. the root node is never free()'d
+ // when removed, but instead marked as empty.
+ bool m_is_root;
+
+ // marked for an empty node. only valid for the root.
+ bool m_is_empty;
+
+ // effect: initializes an empty node with the given comparator
+ void init(const comparator *cmp);
+
+ // requires: *parent is initialized to something meaningful.
+ // requires: subtree is non-empty
+ // returns: the leftmost child of the given subtree
+ // returns: a pointer to the parent of said child in *parent, only
+ // if this function recurred, otherwise it is untouched.
+ treenode *find_leftmost_child(treenode **parent);
+
+ // requires: *parent is initialized to something meaningful.
+ // requires: subtree is non-empty
+ // returns: the rightmost child of the given subtree
+ // returns: a pointer to the parent of said child in *parent, only
+ // if this function recurred, otherwise it is untouched.
+ treenode *find_rightmost_child(treenode **parent);
+
+ // effect: remove the root of this subtree, destroying the old root
+ // returns: the new root of the subtree
+ treenode *remove_root_of_subtree(void);
+
+ // requires: subtree is non-empty, direction is not 0
+ // returns: the child of the subtree at either the left or rightmost extreme
+ treenode *find_child_at_extreme(int direction, treenode **parent);
+
+ // effect: retrieves and possibly rebalances the left child
+ // returns: a locked left child, if it exists
+ treenode *lock_and_rebalance_left(void);
+
+ // effect: retrieves and possibly rebalances the right child
+ // returns: a locked right child, if it exists
+ treenode *lock_and_rebalance_right(void);
+
+ // returns: the estimated depth of this subtree
+ uint32_t get_depth_estimate(void) const;
+
+ // returns: true iff left subtree depth is sufficiently less than the right
+ bool left_imbalanced(int threshold) const;
+
+ // returns: true iff right subtree depth is sufficiently greater than the left
+ bool right_imbalanced(int threshold) const;
+
+ // effect: performs an O(1) rebalance, which will "heal" an imbalance by at most 1.
+ // effect: if the new root is not this node, then this node is unlocked.
+ // returns: locked node representing the new root of the rebalanced subtree
+ treenode *maybe_rebalance(void);
+
+ // returns: allocated treenode populated with a copy of the range and txnid
+ static treenode *alloc(const comparator *cmp, const keyrange &range, TXNID txnid);
+
+ // requires: node is a locked root node, or an unlocked non-root node
+ static void free(treenode *node);
+
+ // effect: swaps the range/txnid pairs for node1 and node2.
+ static void swap_in_place(treenode *node1, treenode *node2);
+
+ friend class concurrent_tree_unit_test;
+};
+
+// include the implementation here so we can use templated member functions
+#include "treenode.cc"
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/txnid_set.cc b/storage/tokudb/PerconaFT/locktree/txnid_set.cc
new file mode 100644
index 00000000..f5ed6b2e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/txnid_set.cc
@@ -0,0 +1,116 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include "txnid_set.h"
+
+namespace toku {
+
+int find_by_txnid(const TXNID &txnid_a, const TXNID &txnid_b);
+int find_by_txnid(const TXNID &txnid_a, const TXNID &txnid_b) {
+ if (txnid_a < txnid_b) {
+ return -1;
+ } else if (txnid_a == txnid_b) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+void txnid_set::create(void) {
+ // lazily allocate the underlying omt, since it is common
+ // to create a txnid set and never put anything in it.
+ m_txnids.create_no_array();
+}
+
+void txnid_set::destroy(void) {
+ m_txnids.destroy();
+}
+
+// Return true if the given transaction id is a member of the set.
+// Otherwise, return false.
+bool txnid_set::contains(TXNID txnid) const {
+ TXNID find_txnid;
+ int r = m_txnids.find_zero<TXNID, find_by_txnid>(txnid, &find_txnid, nullptr);
+ return r == 0 ? true : false;
+}
+
+// Add a given txnid to the set
+void txnid_set::add(TXNID txnid) {
+ int r = m_txnids.insert<TXNID, find_by_txnid>(txnid, txnid, nullptr);
+ invariant(r == 0 || r == DB_KEYEXIST);
+}
+
+// Delete a given txnid from the set.
+void txnid_set::remove(TXNID txnid) {
+ uint32_t idx;
+ int r = m_txnids.find_zero<TXNID, find_by_txnid>(txnid, nullptr, &idx);
+ if (r == 0) {
+ r = m_txnids.delete_at(idx);
+ invariant_zero(r);
+ }
+}
+
+// Return the size of the set
+size_t txnid_set::size(void) const {
+ return m_txnids.size();
+}
+
+// Get the ith id in the set, assuming that the set is sorted.
+TXNID txnid_set::get(size_t i) const {
+ TXNID txnid;
+ int r = m_txnids.fetch(i, &txnid);
+ invariant_zero(r);
+ return txnid;
+}
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/txnid_set.h b/storage/tokudb/PerconaFT/locktree/txnid_set.h
new file mode 100644
index 00000000..70b0ce76
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/txnid_set.h
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "ft/txn/txn.h"
+
+#include "util/omt.h"
+
+namespace toku {
+
+class txnid_set {
+public:
+ // effect: Creates an empty set. Does not malloc space for
+ // any entries yet. That is done lazily on add().
+ void create(void);
+
+ // effect: Destroy the set's internals.
+ void destroy(void);
+
+ // returns: True if the given txnid is a member of the set.
+ bool contains(TXNID id) const;
+
+ // effect: Adds a given txnid to the set if it did not exist
+ void add(TXNID txnid);
+
+ // effect: Deletes a txnid from the set if it exists.
+ void remove(TXNID txnid);
+
+ // returns: Size of the set
+ size_t size(void) const;
+
+ // returns: The "i'th" id in the set, as if it were sorted.
+ TXNID get(size_t i) const;
+
+private:
+ toku::omt<TXNID> m_txnids;
+
+ friend class txnid_set_unit_test;
+};
+ENSURE_POD(txnid_set);
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/wfg.cc b/storage/tokudb/PerconaFT/locktree/wfg.cc
new file mode 100644
index 00000000..8fd079be
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/wfg.cc
@@ -0,0 +1,202 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include <toku_assert.h>
+#include <memory.h>
+#include <string.h>
+
+#include "txnid_set.h"
+#include "wfg.h"
+
+namespace toku {
+
+// Create a lock request graph
+void wfg::create(void) {
+ m_nodes.create();
+}
+
+// Destroy the internals of the lock request graph
+void wfg::destroy(void) {
+ size_t n_nodes = m_nodes.size();
+ for (size_t i = 0; i < n_nodes; i++) {
+ node *n;
+ int r = m_nodes.fetch(i, &n);
+ invariant_zero(r);
+ invariant_notnull(n);
+ node::free(n);
+ }
+ m_nodes.destroy();
+}
+
+// Add an edge (a_id, b_id) to the graph
+void wfg::add_edge(TXNID a_txnid, TXNID b_txnid) {
+ node *a_node = find_create_node(a_txnid);
+ node *b_node = find_create_node(b_txnid);
+ a_node->edges.add(b_node->txnid);
+}
+
+// Return true if a node with the given transaction id exists in the graph.
+// Return false otherwise.
+bool wfg::node_exists(TXNID txnid) {
+ node *n = find_node(txnid);
+ return n != NULL;
+}
+
+bool wfg::cycle_exists_from_node(node *target, node *head) {
+ bool cycle_found = false;
+ head->visited = true;
+ size_t n_edges = head->edges.size();
+ for (size_t i = 0; i < n_edges && !cycle_found; i++) {
+ TXNID edge_id = head->edges.get(i);
+ if (target->txnid == edge_id) {
+ cycle_found = true;
+ } else {
+ node *new_head = find_node(edge_id);
+ if (new_head && !new_head->visited) {
+ cycle_found = cycle_exists_from_node(target, new_head);
+ }
+ }
+ }
+ head->visited = false;
+ return cycle_found;
+}
+
+// Return true if there exists a cycle from a given transaction id in the graph.
+// Return false otherwise.
+bool wfg::cycle_exists_from_txnid(TXNID txnid) {
+ node *a_node = find_node(txnid);
+ bool cycles_found = false;
+ if (a_node) {
+ cycles_found = cycle_exists_from_node(a_node, a_node);
+ }
+ return cycles_found;
+}
+
+// Apply a given function f to all of the nodes in the graph. The apply function
+// returns when the function f is called for all of the nodes in the graph, or the
+// function f returns non-zero.
+void wfg::apply_nodes(int (*fn)(TXNID id, void *extra), void *extra) {
+ int r = 0;
+ size_t n_nodes = m_nodes.size();
+ for (size_t i = 0; i < n_nodes && r == 0; i++) {
+ node *n;
+ r = m_nodes.fetch(i, &n);
+ invariant_zero(r);
+ r = fn(n->txnid, extra);
+ }
+}
+
+// Apply a given function f to all of the edges whose origin is a given node id.
+// The apply function returns when the function f is called for all edges in the
+// graph rooted at node id, or the function f returns non-zero.
+void wfg::apply_edges(TXNID txnid,
+ int (*fn)(TXNID txnid, TXNID edge_txnid, void *extra), void *extra) {
+ node *n = find_node(txnid);
+ if (n) {
+ int r = 0;
+ size_t n_edges = n->edges.size();
+ for (size_t i = 0; i < n_edges && r == 0; i++) {
+ r = fn(txnid, n->edges.get(i), extra);
+ }
+ }
+}
+
+// find node by id
+wfg::node *wfg::find_node(TXNID txnid) {
+ node *n = nullptr;
+ int r = m_nodes.find_zero<TXNID, find_by_txnid>(txnid, &n, nullptr);
+ invariant(r == 0 || r == DB_NOTFOUND);
+ return n;
+}
+
+// this is the omt comparison function
+// nodes are compared by their txnid.
+int wfg::find_by_txnid(node *const &node_a, const TXNID &txnid_b) {
+ TXNID txnid_a = node_a->txnid;
+ if (txnid_a < txnid_b) {
+ return -1;
+ } else if (txnid_a == txnid_b) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+// insert a new node
+wfg::node *wfg::find_create_node(TXNID txnid) {
+ node *n;
+ uint32_t idx;
+ int r = m_nodes.find_zero<TXNID, find_by_txnid>(txnid, &n, &idx);
+ if (r == DB_NOTFOUND) {
+ n = node::alloc(txnid);
+ r = m_nodes.insert_at(n, idx);
+ invariant_zero(r);
+ }
+ invariant_notnull(n);
+ return n;
+}
+
+wfg::node *wfg::node::alloc(TXNID txnid) {
+ node *XCALLOC(n);
+ n->txnid = txnid;
+ n->visited = false;
+ n->edges.create();
+ return n;
+}
+
+void wfg::node::free(wfg::node *n) {
+ n->edges.destroy();
+ toku_free(n);
+}
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/locktree/wfg.h b/storage/tokudb/PerconaFT/locktree/wfg.h
new file mode 100644
index 00000000..32f14e4b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/locktree/wfg.h
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "locktree/txnid_set.h"
+#include "util/omt.h"
+
+namespace toku {
+
+// A wfg is a 'wait-for' graph. A directed edge in represents one
+// txn waiting for another to finish before it can acquire a lock.
+
+class wfg {
+public:
+ // Create a lock request graph
+ void create(void);
+
+ // Destroy the internals of the lock request graph
+ void destroy(void);
+
+ // Add an edge (a_id, b_id) to the graph
+ void add_edge(TXNID a_txnid, TXNID b_txnid);
+
+ // Return true if a node with the given transaction id exists in the graph.
+ // Return false otherwise.
+ bool node_exists(TXNID txnid);
+
+ // Return true if there exists a cycle from a given transaction id in the graph.
+ // Return false otherwise.
+ bool cycle_exists_from_txnid(TXNID txnid);
+
+ // Apply a given function f to all of the nodes in the graph. The apply function
+ // returns when the function f is called for all of the nodes in the graph, or the
+ // function f returns non-zero.
+ void apply_nodes(int (*fn)(TXNID txnid, void *extra), void *extra);
+
+ // Apply a given function f to all of the edges whose origin is a given node id.
+ // The apply function returns when the function f is called for all edges in the
+ // graph rooted at node id, or the function f returns non-zero.
+ void apply_edges(TXNID txnid,
+ int (*fn)(TXNID txnid, TXNID edge_txnid, void *extra), void *extra);
+
+private:
+ struct node {
+ // txnid for this node and the associated set of edges
+ TXNID txnid;
+ txnid_set edges;
+ bool visited;
+
+ static node *alloc(TXNID txnid);
+
+ static void free(node *n);
+ };
+ ENSURE_POD(node);
+
+ toku::omt<node *> m_nodes;
+
+ node *find_node(TXNID txnid);
+
+ node *find_create_node(TXNID txnid);
+
+ bool cycle_exists_from_node(node *target, node *head);
+
+ static int find_by_txnid(node *const &node_a, const TXNID &txnid_b);
+};
+ENSURE_POD(wfg);
+
+} /* namespace toku */
diff --git a/storage/tokudb/PerconaFT/portability/CMakeLists.txt b/storage/tokudb/PerconaFT/portability/CMakeLists.txt
new file mode 100644
index 00000000..e5576a5d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/CMakeLists.txt
@@ -0,0 +1,63 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+set(tokuportability_srcs
+ huge_page_detection
+ file
+ memory
+ os_malloc
+ portability
+ toku_assert
+ toku_crash
+ toku_instr_mysql
+ toku_path
+ toku_pthread
+ toku_time
+ )
+
+add_library(${LIBTOKUPORTABILITY} SHARED ${tokuportability_srcs})
+target_link_libraries(${LIBTOKUPORTABILITY} LINK_PUBLIC ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
+
+add_library(tokuportability_static_conv STATIC ${tokuportability_srcs})
+set_target_properties(tokuportability_static_conv PROPERTIES POSITION_INDEPENDENT_CODE ON)
+set(tokuportability_source_libs tokuportability_static_conv ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
+toku_merge_static_libs(${LIBTOKUPORTABILITY}_static ${LIBTOKUPORTABILITY}_static "${tokuportability_source_libs}")
+
+maybe_add_gcov_to_libraries(${LIBTOKUPORTABILITY} tokuportability_static_conv)
+set_property(TARGET ${LIBTOKUPORTABILITY} tokuportability_static_conv APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE)
+
+set_property(SOURCE file memory os_malloc portability toku_assert toku_rwlock APPEND PROPERTY
+ COMPILE_DEFINITIONS TOKU_ALLOW_DEPRECATED=1)
+
+configure_file(toku_config.h.in toku_config.h)
+add_custom_target(generate_config_h DEPENDS
+ "${CMAKE_CURRENT_BINARY_DIR}/toku_config.h")
+
+# detect when we are being built as a subproject
+if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ install(
+ FILES toku_os_types.h toku_time.h toku_list.h toku_os.h
+ DESTINATION include
+ COMPONENT tokukv_headers
+ )
+ install(
+ FILES "${CMAKE_CURRENT_BINARY_DIR}/toku_config.h"
+ DESTINATION include
+ COMPONENT tokukv_headers
+ )
+ install(
+ TARGETS ${LIBTOKUPORTABILITY}_static
+ DESTINATION ${INSTALL_LIBDIR}
+ COMPONENT tokukv_libs_static
+ )
+ install(
+ TARGETS ${LIBTOKUPORTABILITY}
+ DESTINATION ${INSTALL_LIBDIR}
+ COMPONENT tokukv_libs_shared
+ )
+else ()
+ set_property(SOURCE toku_pthread portability APPEND PROPERTY
+ COMPILE_DEFINITIONS MYSQL_TOKUDB_ENGINE=1 )
+ target_link_libraries(${LIBTOKUPORTABILITY} LINK_PRIVATE mysys)
+endif ()
+
+add_subdirectory(tests)
diff --git a/storage/tokudb/PerconaFT/portability/file.cc b/storage/tokudb/PerconaFT/portability/file.cc
new file mode 100644
index 00000000..485bfac8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/file.cc
@@ -0,0 +1,821 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <unistd.h>
+#include <errno.h>
+#include <toku_assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "memory.h"
+#include "toku_time.h"
+#include "toku_path.h"
+#include <portability/toku_atomic.h>
+
+toku_instr_key *tokudb_file_data_key;
+
+static int toku_assert_on_write_enospc = 0;
+static const int toku_write_enospc_sleep = 1;
+static uint64_t toku_write_enospc_last_report; // timestamp of most recent
+ // report to error log
+static time_t toku_write_enospc_last_time; // timestamp of most recent ENOSPC
+static uint32_t toku_write_enospc_current; // number of threads currently blocked on ENOSPC
+static uint64_t toku_write_enospc_total; // total number of times ENOSPC was returned from an attempt to write
+
+void toku_set_assert_on_write_enospc(int do_assert) {
+ toku_assert_on_write_enospc = do_assert;
+}
+
+void toku_fs_get_write_info(time_t *enospc_last_time, uint64_t *enospc_current, uint64_t *enospc_total) {
+ *enospc_last_time = toku_write_enospc_last_time;
+ *enospc_current = toku_write_enospc_current;
+ *enospc_total = toku_write_enospc_total;
+}
+
+//Print any necessary errors
+//Return whether we should try the write again.
+static void
+try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) {
+ int try_again = 0;
+
+ assert(r_write < 0);
+ int errno_write = get_error_errno();
+ switch (errno_write) {
+ case EINTR: { //The call was interrupted by a signal before any data was written; see signal(7).
+ char err_msg[sizeof("Write of [] bytes to fd=[] interrupted. Retrying.") + 20+10]; //64 bit is 20 chars, 32 bit is 10 chars
+ snprintf(err_msg, sizeof(err_msg), "Write of [%" PRIu64 "] bytes to fd=[%d] interrupted. Retrying.", (uint64_t)len, fd);
+ perror(err_msg);
+ fflush(stderr);
+ try_again = 1;
+ break;
+ }
+ case ENOSPC: {
+ if (toku_assert_on_write_enospc) {
+ char err_msg[sizeof("Failed write of [] bytes to fd=[].") + 20+10]; //64 bit is 20 chars, 32 bit is 10 chars
+ snprintf(err_msg, sizeof(err_msg), "Failed write of [%" PRIu64 "] bytes to fd=[%d].", (uint64_t)len, fd);
+ perror(err_msg);
+ fflush(stderr);
+ int out_of_disk_space = 1;
+ assert(!out_of_disk_space); //Give an error message that might be useful if this is the only one that survives.
+ } else {
+ toku_sync_fetch_and_add(&toku_write_enospc_total, 1);
+ toku_sync_fetch_and_add(&toku_write_enospc_current, 1);
+
+ time_t tnow = time(0);
+ toku_write_enospc_last_time = tnow;
+ if (toku_write_enospc_last_report == 0 || tnow - toku_write_enospc_last_report >= 60) {
+ toku_write_enospc_last_report = tnow;
+
+ const int tstr_length = 26;
+ char tstr[tstr_length];
+ time_t t = time(0);
+ ctime_r(&t, tstr);
+
+ const int MY_MAX_PATH = 256;
+ char fname[MY_MAX_PATH], symname[MY_MAX_PATH+1];
+ sprintf(fname, "/proc/%d/fd/%d", getpid(), fd);
+ ssize_t n = readlink(fname, symname, MY_MAX_PATH);
+
+ if ((int)n == -1)
+ fprintf(stderr, "%.24s PerconaFT No space when writing %" PRIu64 " bytes to fd=%d ", tstr, (uint64_t) len, fd);
+ else {
+ tstr[n] = 0; // readlink doesn't append a NUL to the end of the buffer.
+ fprintf(stderr, "%.24s PerconaFT No space when writing %" PRIu64 " bytes to %*s ", tstr, (uint64_t) len, (int) n, symname);
+ }
+ fprintf(stderr, "retry in %d second%s\n", toku_write_enospc_sleep, toku_write_enospc_sleep > 1 ? "s" : "");
+ fflush(stderr);
+ }
+ sleep(toku_write_enospc_sleep);
+ try_again = 1;
+ toku_sync_fetch_and_sub(&toku_write_enospc_current, 1);
+ break;
+ }
+ }
+ default:
+ break;
+ }
+ assert(try_again);
+ errno = errno_write;
+}
+
+static ssize_t (*t_write)(int, const void *, size_t);
+static ssize_t (*t_full_write)(int, const void *, size_t);
+static ssize_t (*t_pwrite)(int, const void *, size_t, off_t);
+static ssize_t (*t_full_pwrite)(int, const void *, size_t, off_t);
+static FILE * (*t_fdopen)(int, const char *);
+static FILE * (*t_fopen)(const char *, const char *);
+static int (*t_open)(const char *, int, int);
+static int (*t_fclose)(FILE *);
+static ssize_t (*t_read)(int, void *, size_t);
+static ssize_t (*t_pread)(int, void *, size_t, off_t);
+static size_t (*os_fwrite_fun)(const void *, size_t, size_t, FILE *) = nullptr;
+
+void toku_set_func_fwrite(
+ size_t (*fwrite_fun)(const void *, size_t, size_t, FILE *)) {
+ os_fwrite_fun = fwrite_fun;
+}
+
+void toku_set_func_write(ssize_t (*write_fun)(int, const void *, size_t)) {
+ t_write = write_fun;
+}
+
+void toku_set_func_full_write (ssize_t (*write_fun)(int, const void *, size_t)) {
+ t_full_write = write_fun;
+}
+
+void toku_set_func_pwrite (ssize_t (*pwrite_fun)(int, const void *, size_t, off_t)) {
+ t_pwrite = pwrite_fun;
+}
+
+void toku_set_func_full_pwrite (ssize_t (*pwrite_fun)(int, const void *, size_t, off_t)) {
+ t_full_pwrite = pwrite_fun;
+}
+
+void toku_set_func_fdopen(FILE * (*fdopen_fun)(int, const char *)) {
+ t_fdopen = fdopen_fun;
+}
+
+void toku_set_func_fopen(FILE * (*fopen_fun)(const char *, const char *)) {
+ t_fopen = fopen_fun;
+}
+
+void toku_set_func_open(int (*open_fun)(const char *, int, int)) {
+ t_open = open_fun;
+}
+
+void toku_set_func_fclose(int (*fclose_fun)(FILE*)) {
+ t_fclose = fclose_fun;
+}
+
+void toku_set_func_read (ssize_t (*read_fun)(int, void *, size_t)) {
+ t_read = read_fun;
+}
+
+void toku_set_func_pread (ssize_t (*pread_fun)(int, void *, size_t, off_t)) {
+ t_pread = pread_fun;
+}
+
+int toku_os_delete_with_source_location(const char *name,
+ const char *src_file,
+ uint src_line) {
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_name_close_begin(io_annotation,
+ *tokudb_file_data_key,
+ toku_instr_file_op::file_delete,
+ name,
+ src_file,
+ src_line);
+ const int result = unlink(name);
+
+ /* Register the result value with the instrumentation system */
+ toku_instr_file_close_end(io_annotation, result);
+
+ return result;
+}
+
+int toku_os_rename_with_source_location(const char *old_name,
+ const char *new_name,
+ const char *src_file,
+ uint src_line) {
+ int result;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_name_io_begin(io_annotation,
+ *tokudb_file_data_key,
+ toku_instr_file_op::file_rename,
+ new_name,
+ 0,
+ src_file,
+ src_line);
+
+ result = rename(old_name, new_name);
+ /* Regsiter the result value with the instrumentation system */
+ toku_instr_file_io_end(io_annotation, 0);
+
+ return result;
+}
+
+void toku_os_full_write_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ const char *src_file,
+ uint src_line) {
+ const char *bp = (const char *)buf;
+ size_t bytes_written = len;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_write,
+ fd,
+ len,
+ src_file,
+ src_line);
+
+ while (len > 0) {
+ ssize_t r;
+ if (t_full_write) {
+ r = t_full_write(fd, bp, len);
+ } else {
+ r = write(fd, bp, len);
+ }
+ if (r > 0) {
+ len -= r;
+ bp += r;
+ }
+ else {
+ try_again_after_handling_write_error(fd, len, r);
+ }
+ }
+ assert(len == 0);
+
+ /* Register the result value with the instrumentaion system */
+ toku_instr_file_io_end(io_annotation, bytes_written);
+}
+
+int toku_os_write_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ const char *src_file,
+ uint src_line) {
+ const char *bp = (const char *)buf;
+ int result = 0;
+ ssize_t r;
+
+ size_t bytes_written = len;
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_write,
+ fd,
+ len,
+ src_file,
+ src_line);
+
+ while (len > 0) {
+ if (t_write) {
+ r = t_write(fd, bp, len);
+ } else {
+ r = write(fd, bp, len);
+ }
+ if (r < 0) {
+ result = errno;
+ break;
+ }
+ len -= r;
+ bp += r;
+ }
+ /* Register the result value with the instrumentation system */
+ toku_instr_file_io_end(io_annotation, bytes_written - len);
+
+ return result;
+}
+
+void toku_os_full_pwrite_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off,
+ const char *src_file,
+ uint src_line) {
+ assert(0 == ((long long)buf) % 512);
+ assert((len % 512 == 0) && (off % 512) == 0); // to make pwrite work.
+ const char *bp = (const char *)buf;
+
+ size_t bytes_written = len;
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_write,
+ fd,
+ len,
+ src_file,
+ src_line);
+ while (len > 0) {
+ ssize_t r;
+ if (t_full_pwrite) {
+ r = t_full_pwrite(fd, bp, len, off);
+ } else {
+ r = pwrite(fd, bp, len, off);
+ }
+ if (r > 0) {
+ len -= r;
+ bp += r;
+ off += r;
+ }
+ else {
+ try_again_after_handling_write_error(fd, len, r);
+ }
+ }
+ assert(len == 0);
+
+ /* Register the result value with the instrumentation system */
+ toku_instr_file_io_end(io_annotation, bytes_written);
+}
+
+ssize_t toku_os_pwrite_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off,
+ const char *src_file,
+ uint src_line) {
+ assert(0 ==
+ ((long long)buf) %
+ 512); // these asserts are to ensure that direct I/O will work.
+ assert(0 == len % 512);
+ assert(0 == off % 512);
+ const char *bp = (const char *)buf;
+ ssize_t result = 0;
+ ssize_t r;
+
+ size_t bytes_written = len;
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_write,
+ fd,
+ len,
+ src_file,
+ src_line);
+ while (len > 0) {
+ r = (t_pwrite) ? t_pwrite(fd, bp, len, off) : pwrite(fd, bp, len, off);
+
+ if (r < 0) {
+ result = errno;
+ break;
+ }
+ len -= r;
+ bp += r;
+ off += r;
+ }
+ /* Register the result value with the instrumentation system */
+ toku_instr_file_io_end(io_annotation, bytes_written - len);
+
+ return result;
+}
+
+int toku_os_fwrite_with_source_location(const void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line) {
+ int result = 0;
+ size_t bytes_written;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_stream_io_begin(io_annotation,
+ toku_instr_file_op::file_write,
+ *stream,
+ nmemb,
+ src_file,
+ src_line);
+
+ if (os_fwrite_fun) {
+ bytes_written = os_fwrite_fun(ptr, size, nmemb, stream->file);
+ } else {
+ bytes_written = fwrite(ptr, size, nmemb, stream->file);
+ }
+
+ if (bytes_written != nmemb) {
+ if (os_fwrite_fun) // if using hook to induce artificial errors (for
+ // testing) ...
+ result = get_maybe_error_errno(); // ... then there is no error in
+ // the stream, but there is one
+ // in errno
+ else
+ result = ferror(stream->file);
+ invariant(result != 0); // Should we assert here?
+ }
+ /* Register the result value with the instrumentation system */
+ toku_instr_file_io_end(io_annotation, bytes_written);
+
+ return result;
+}
+
+int toku_os_fread_with_source_location(void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line) {
+ int result = 0;
+ size_t bytes_read;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_stream_io_begin(io_annotation,
+ toku_instr_file_op::file_read,
+ *stream,
+ nmemb,
+ src_file,
+ src_line);
+
+ if ((bytes_read = fread(ptr, size, nmemb, stream->file)) != nmemb) {
+ if ((feof(stream->file)))
+ result = EOF;
+ else
+ result = ferror(stream->file);
+ invariant(result != 0); // Should we assert here?
+ }
+ /* Register the result value with the instrumentation system */
+ toku_instr_file_io_end(io_annotation, bytes_read);
+
+ return result;
+}
+
+TOKU_FILE *toku_os_fdopen_with_source_location(int fildes,
+ const char *mode,
+ const char *filename,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line) {
+ TOKU_FILE *XMALLOC(rval);
+ if (FT_LIKELY(rval != nullptr)) {
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_open_begin(io_annotation,
+ instr_key,
+ toku_instr_file_op::file_stream_open,
+ filename,
+ src_file,
+ src_line);
+
+ rval->file = (t_fdopen) ? t_fdopen(fildes, mode) : fdopen(fildes, mode);
+ toku_instr_file_stream_open_end(io_annotation, *rval);
+
+ if (FT_UNLIKELY(rval->file == nullptr)) {
+ toku_free(rval);
+ rval = nullptr;
+ }
+ }
+ return rval;
+}
+
+TOKU_FILE *toku_os_fopen_with_source_location(const char *filename,
+ const char *mode,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line) {
+ TOKU_FILE *XMALLOC(rval);
+ if (FT_UNLIKELY(rval == nullptr))
+ return nullptr;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_open_begin(io_annotation,
+ instr_key,
+ toku_instr_file_op::file_stream_open,
+ filename,
+ src_file,
+ src_line);
+ rval->file = t_fopen ? t_fopen(filename, mode) : fopen(filename, mode);
+ /* Register the returning "file" value with the system */
+ toku_instr_file_stream_open_end(io_annotation, *rval);
+
+ if (FT_UNLIKELY(rval->file == nullptr)) {
+ toku_free(rval);
+ rval = nullptr;
+ }
+ return rval;
+}
+
+int toku_os_open_with_source_location(const char *path,
+ int oflag,
+ int mode,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line) {
+ int fd;
+ toku_io_instrumentation io_annotation;
+ /* register a file open or creation depending on "oflag" */
+ toku_instr_file_open_begin(
+ io_annotation,
+ instr_key,
+ ((oflag & O_CREAT) ? toku_instr_file_op::file_create
+ : toku_instr_file_op::file_open),
+ path,
+ src_file,
+ src_line);
+ if (t_open)
+ fd = t_open(path, oflag, mode);
+ else
+ fd = open(path, oflag, mode);
+
+ toku_instr_file_open_end(io_annotation, fd);
+ return fd;
+}
+
+int toku_os_open_direct(const char *path,
+ int oflag,
+ int mode,
+ const toku_instr_key &instr_key) {
+ int rval;
+#if defined(HAVE_O_DIRECT)
+ rval = toku_os_open(path, oflag | O_DIRECT, mode, instr_key);
+#elif defined(HAVE_F_NOCACHE)
+ rval = toku_os_open(path, oflag, mode, instr_key);
+ if (rval >= 0) {
+ int r = fcntl(rval, F_NOCACHE, 1);
+ if (r == -1) {
+ perror("setting F_NOCACHE");
+ }
+ }
+#else
+# error "No direct I/O implementation found."
+#endif
+ return rval;
+}
+
+int toku_os_fclose_with_source_location(TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line) {
+ int rval = -1;
+ if (FT_LIKELY(stream != nullptr)) {
+ /* register a file stream close " */
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_stream_close_begin(
+ io_annotation,
+ toku_instr_file_op::file_stream_close,
+ *stream,
+ src_file,
+ src_line);
+
+ if (t_fclose)
+ rval = t_fclose(stream->file);
+ else { // if EINTR, retry until success
+ while (rval != 0) {
+ rval = fclose(stream->file);
+ if (rval && (errno != EINTR))
+ break;
+ }
+ }
+ /* Register the returning "rval" value with the system */
+ toku_instr_file_close_end(io_annotation, rval);
+ toku_free(stream);
+ stream = nullptr;
+ }
+ return rval;
+}
+
+int toku_os_close_with_source_location(
+ int fd,
+ const char *src_file,
+ uint src_line) { // if EINTR, retry until success
+ /* register the file close */
+ int r = -1;
+
+ /* register a file descriptor close " */
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_fd_close_begin(
+ io_annotation, toku_instr_file_op::file_close, fd, src_file, src_line);
+ while (r != 0) {
+ r = close(fd);
+ if (r) {
+ int rr = errno;
+ if (rr != EINTR)
+ printf("rr=%d (%s)\n", rr, strerror(rr));
+ assert(rr == EINTR);
+ }
+ }
+
+ /* Regsiter the returning value with the system */
+ toku_instr_file_close_end(io_annotation, r);
+
+ return r;
+}
+
+ssize_t toku_os_read_with_source_location(int fd,
+ void *buf,
+ size_t count,
+ const char *src_file,
+ uint src_line) {
+ ssize_t bytes_read;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_read,
+ fd,
+ count,
+ src_file,
+ src_line);
+
+ bytes_read = (t_read) ? t_read(fd, buf, count) : read(fd, buf, count);
+
+ toku_instr_file_io_end(io_annotation, bytes_read);
+
+ return bytes_read;
+}
+
+ssize_t inline_toku_os_pread_with_source_location(int fd,
+ void *buf,
+ size_t count,
+ off_t offset,
+ const char *src_file,
+ uint src_line) {
+ assert(0 == ((long long)buf) % 512);
+ assert(0 == count % 512);
+ assert(0 == offset % 512);
+ ssize_t bytes_read;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_read,
+ fd,
+ count,
+ src_file,
+ src_line);
+ if (t_pread) {
+ bytes_read = t_pread(fd, buf, count, offset);
+ } else {
+ bytes_read = pread(fd, buf, count, offset);
+ }
+ toku_instr_file_io_end(io_annotation, bytes_read);
+
+ return bytes_read;
+}
+
+void toku_os_recursive_delete(const char *path) {
+ char buf[TOKU_PATH_MAX + sizeof("rm -rf ")];
+ strcpy(buf, "rm -rf ");
+ strncat(buf, path, TOKU_PATH_MAX);
+ int r = system(buf);
+ assert_zero(r);
+}
+
+// fsync logic:
+
+// t_fsync exists for testing purposes only
+static int (*t_fsync)(int) = 0;
+static uint64_t toku_fsync_count;
+static uint64_t toku_fsync_time;
+static uint64_t toku_long_fsync_threshold = 1000000;
+static uint64_t toku_long_fsync_count;
+static uint64_t toku_long_fsync_time;
+static uint64_t toku_long_fsync_eintr_count;
+static int toku_fsync_debug = 0;
+
+void toku_set_func_fsync(int (*fsync_function)(int)) {
+ t_fsync = fsync_function;
+}
+
+// keep trying if fsync fails because of EINTR
+void file_fsync_internal_with_source_location(int fd,
+ const char *src_file,
+ uint src_line) {
+ uint64_t tstart = toku_current_time_microsec();
+ int r = -1;
+ uint64_t eintr_count = 0;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_sync,
+ fd,
+ 0,
+ src_file,
+ src_line);
+
+ while (r != 0) {
+ if (t_fsync) {
+ r = t_fsync(fd);
+ } else {
+ r = fsync(fd);
+ }
+ if (r) {
+ assert(get_error_errno() == EINTR);
+ eintr_count++;
+ }
+ }
+ toku_sync_fetch_and_add(&toku_fsync_count, 1);
+ uint64_t duration = toku_current_time_microsec() - tstart;
+ toku_sync_fetch_and_add(&toku_fsync_time, duration);
+
+ toku_instr_file_io_end(io_annotation, 0);
+
+ if (duration >= toku_long_fsync_threshold) {
+ toku_sync_fetch_and_add(&toku_long_fsync_count, 1);
+ toku_sync_fetch_and_add(&toku_long_fsync_time, duration);
+ toku_sync_fetch_and_add(&toku_long_fsync_eintr_count, eintr_count);
+ if (toku_fsync_debug) {
+ const int tstr_length = 26;
+ char tstr[tstr_length];
+ time_t t = time(0);
+#if __linux__
+ char fdname[256];
+ snprintf(fdname, sizeof fdname, "/proc/%d/fd/%d", getpid(), fd);
+ char lname[256];
+ ssize_t s = readlink(fdname, lname, sizeof lname);
+ if (0 < s && s < (ssize_t) sizeof lname)
+ lname[s] = 0;
+ fprintf(stderr, "%.24s toku_file_fsync %s fd=%d %s duration=%" PRIu64 " usec eintr=%" PRIu64 "\n",
+ ctime_r(&t, tstr), __FUNCTION__, fd, s > 0 ? lname : "?", duration, eintr_count);
+#else
+ fprintf(stderr, "%.24s toku_file_fsync %s fd=%d duration=%" PRIu64 " usec eintr=%" PRIu64 "\n",
+ ctime_r(&t, tstr), __FUNCTION__, fd, duration, eintr_count);
+#endif
+ fflush(stderr);
+ }
+ }
+}
+
+void toku_file_fsync_without_accounting(int fd) {
+ file_fsync_internal(fd);
+}
+
+void toku_fsync_dirfd_without_accounting(DIR *dir) {
+ int fd = dirfd(dir);
+ toku_file_fsync_without_accounting(fd);
+}
+
+int toku_fsync_dir_by_name_without_accounting(const char *dir_name) {
+ int r = 0;
+ DIR * dir = opendir(dir_name);
+ if (!dir) {
+ r = get_error_errno();
+ } else {
+ toku_fsync_dirfd_without_accounting(dir);
+ r = closedir(dir);
+ if (r != 0) {
+ r = get_error_errno();
+ }
+ }
+ return r;
+}
+
+// include fsync in scheduling accounting
+void toku_file_fsync(int fd) {
+ file_fsync_internal (fd);
+}
+
+// for real accounting
+void toku_get_fsync_times(uint64_t *fsync_count, uint64_t *fsync_time, uint64_t *long_fsync_threshold, uint64_t *long_fsync_count, uint64_t *long_fsync_time) {
+ *fsync_count = toku_fsync_count;
+ *fsync_time = toku_fsync_time;
+ *long_fsync_threshold = toku_long_fsync_threshold;
+ *long_fsync_count = toku_long_fsync_count;
+ *long_fsync_time = toku_long_fsync_time;
+}
+
+int toku_fsync_directory(const char *fname) {
+ int result = 0;
+
+ // extract dirname from fname
+ const char *sp = strrchr(fname, '/');
+ size_t len;
+ char *dirname = NULL;
+ if (sp) {
+ resource_assert(sp >= fname);
+ len = sp - fname + 1;
+ MALLOC_N(len+1, dirname);
+ if (dirname == NULL) {
+ result = get_error_errno();
+ } else {
+ strncpy(dirname, fname, len);
+ dirname[len] = 0;
+ }
+ } else {
+ dirname = toku_strdup(".");
+ if (dirname == NULL) {
+ result = get_error_errno();
+ }
+ }
+
+ if (result == 0) {
+ result = toku_fsync_dir_by_name_without_accounting(dirname);
+ }
+ toku_free(dirname);
+ return result;
+}
diff --git a/storage/tokudb/PerconaFT/portability/huge_page_detection.cc b/storage/tokudb/PerconaFT/portability/huge_page_detection.cc
new file mode 100644
index 00000000..8e73c56a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/huge_page_detection.cc
@@ -0,0 +1,148 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <portability/toku_assert.h>
+#include <portability/toku_os.h>
+
+static bool check_huge_pages_config_file(const char *fname)
+// Effect: Return true if huge pages are there. If so, print diagnostics.
+{
+ bool huge_pages_enabled = false;
+ FILE *f = fopen(fname, "r");
+ if (f) {
+ // It's redhat and the feature appears to be there. Is it enabled?
+ char buf[1000];
+ char *r = fgets(buf, sizeof(buf), f);
+ assert(r != NULL);
+ if (strstr(buf, "[always]")) {
+ fprintf(stderr, "Transparent huge pages are enabled, according to %s\n", fname);
+ huge_pages_enabled = true;
+ } else {
+ huge_pages_enabled =false;
+ }
+ fclose(f);
+ }
+ return huge_pages_enabled;
+}
+
+static bool check_huge_pages_in_practice(void)
+// Effect: Return true if huge pages appear to be defined in practice.
+{
+#ifdef HAVE_MINCORE
+#ifdef HAVE_MAP_ANONYMOUS
+ const int map_anonymous = MAP_ANONYMOUS;
+#else
+ const int map_anonymous = MAP_ANON;
+#endif
+ const size_t TWO_MB = 2UL*1024UL*1024UL;
+
+ void *first = mmap(NULL, 2*TWO_MB, PROT_READ|PROT_WRITE, MAP_PRIVATE|map_anonymous, -1, 0);
+ if ((long)first==-1) perror("mmap failed");
+ {
+ int r = munmap(first, 2*TWO_MB);
+ assert(r==0);
+ }
+
+ void *second_addr = (void*)(((unsigned long)first + TWO_MB) & ~(TWO_MB -1));
+ void *second = mmap(second_addr, TWO_MB, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|map_anonymous, -1, 0);
+ if ((long)second==-1) perror("mmap failed");
+ assert((long)second%TWO_MB == 0);
+
+ const long pagesize = 4096;
+ const long n_pages = TWO_MB/pagesize;
+#ifdef __linux__
+ // On linux mincore is defined as mincore(void *, size_t, unsigned char *)
+ unsigned char vec[n_pages];
+#else
+ // On BSD (OS X included) it is defined as mincore(void *, size_t, char *)
+ char vec[n_pages];
+#endif
+ {
+ int r = mincore(second, TWO_MB, vec);
+ if (r!=0 && errno==ENOMEM) {
+ // On some kernels (e.g., Centos 5.8), mincore doesn't work. It seems unlikely that huge pages are here.
+ munmap(second, TWO_MB);
+ return false;
+ }
+ assert(r==0);
+ }
+ for (long i=0; i<n_pages; i++) {
+ assert(!vec[i]);
+ }
+ ((char*)second)[0] = 1;
+ {
+ int r = mincore(second, TWO_MB, vec);
+ // If the mincore worked the first time, it probably works here too.x
+ assert(r==0);
+ }
+ assert(vec[0]);
+ {
+ int r = munmap(second, TWO_MB);
+ assert(r==0);
+ }
+ if (vec[1]) {
+ fprintf(stderr, "Transparent huge pages appear to be enabled according to mincore()\n");
+ return true;
+ } else {
+ return false;
+ }
+#else
+ // No mincore, so no way to check this in practice
+ return false;
+#endif
+}
+
+bool toku_os_huge_pages_enabled(void)
+// Effect: Return true if huge pages appear to be enabled. If so, print some diagnostics to stderr.
+// If environment variable TOKU_HUGE_PAGES_OK is set, then don't complain.
+{
+ char *toku_huge_pages_ok = getenv("TOKU_HUGE_PAGES_OK");
+ if (toku_huge_pages_ok) {
+ return false;
+ } else {
+ bool conf1 = check_huge_pages_config_file("/sys/kernel/mm/redhat_transparent_hugepage/enabled");
+ bool conf2 = check_huge_pages_config_file("/sys/kernel/mm/transparent_hugepage/enabled");
+ bool prac = check_huge_pages_in_practice();
+ return conf1|conf2|prac;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/portability/memory.cc b/storage/tokudb/PerconaFT/portability/memory.cc
new file mode 100644
index 00000000..f4888dcc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/memory.cc
@@ -0,0 +1,516 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+
+#include <toku_portability.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#if defined(HAVE_MALLOC_H)
+# include <malloc.h>
+#elif defined(HAVE_SYS_MALLOC_H)
+# include <sys/malloc.h>
+#endif
+#include <dlfcn.h>
+#include <toku_race_tools.h>
+#include "memory.h"
+#include "toku_assert.h"
+#include <portability/toku_atomic.h>
+
+static malloc_fun_t t_malloc = 0;
+static malloc_aligned_fun_t t_malloc_aligned = 0;
+static malloc_fun_t t_xmalloc = 0;
+static malloc_aligned_fun_t t_xmalloc_aligned = 0;
+static free_fun_t t_free = 0;
+static realloc_fun_t t_realloc = 0;
+static realloc_aligned_fun_t t_realloc_aligned = 0;
+static realloc_fun_t t_xrealloc = 0;
+
+static LOCAL_MEMORY_STATUS_S status;
+int toku_memory_do_stats = 0;
+
+static bool memory_startup_complete = false;
+
+int
+toku_memory_startup(void) {
+ if (memory_startup_complete) {
+ return 0;
+ }
+ memory_startup_complete = true;
+
+ int result = 0;
+
+#if defined(HAVE_M_MMAP_THRESHOLD)
+ // initialize libc malloc
+ size_t mmap_threshold = 64 * 1024; // 64K and larger should be malloced with mmap().
+ int success = mallopt(M_MMAP_THRESHOLD, mmap_threshold);
+ if (success) {
+ status.mallocator_version = "libc";
+ status.mmap_threshold = mmap_threshold;
+ } else {
+ result = EINVAL;
+ }
+ assert(result == 0);
+#else
+ // just a guess
+ status.mallocator_version = "darwin";
+ status.mmap_threshold = 16 * 1024;
+#endif
+
+ // jemalloc has a mallctl function, while libc malloc does not. we can check if jemalloc
+ // is loaded by checking if the mallctl function can be found. if it can, we call it
+ // to get version and mmap threshold configuration.
+ typedef int (*mallctl_fun_t)(const char *, void *, size_t *, void *, size_t);
+ mallctl_fun_t mallctl_f;
+ mallctl_f = (mallctl_fun_t) dlsym(RTLD_DEFAULT, "mallctl");
+ if (mallctl_f) { // jemalloc is loaded
+ size_t version_length = sizeof status.mallocator_version;
+ result = mallctl_f("version", &status.mallocator_version, &version_length, NULL, 0);
+ assert(result == 0);
+ if (result == 0) {
+ size_t lg_chunk; // log2 of the mmap threshold
+ size_t lg_chunk_length = sizeof lg_chunk;
+ result = mallctl_f("opt.lg_chunk", &lg_chunk, &lg_chunk_length, NULL, 0);
+ if (result == 0) {
+ status.mmap_threshold = 1 << lg_chunk;
+ } else {
+ status.mmap_threshold = 1 << 22;
+ result = 0;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool memory_shutdown_complete;
+
+void
+toku_memory_shutdown(void) {
+ if (memory_shutdown_complete) {
+ return;
+ }
+ memory_shutdown_complete = true;
+}
+
+void
+toku_memory_get_status(LOCAL_MEMORY_STATUS s) {
+ *s = status;
+}
+
+// jemalloc's malloc_usable_size does not work with a NULL pointer, so we implement a version that works
+static size_t
+my_malloc_usable_size(void *p) {
+ return p == NULL ? 0 : os_malloc_usable_size(p);
+}
+
+// Note that max_in_use may be slightly off because use of max_in_use is not thread-safe.
+// It is not worth the overhead to make it completely accurate, but
+// this logic is intended to guarantee that it increases monotonically.
+// Note that status.sum_used and status.sum_freed increase monotonically
+// and that status.max_in_use is declared volatile.
+static inline void
+set_max(uint64_t sum_used, uint64_t sum_freed) {
+ if (sum_used >= sum_freed) {
+ uint64_t in_use = sum_used - sum_freed;
+ uint64_t old_max;
+ do {
+ old_max = status.max_in_use;
+ } while (old_max < in_use &&
+ !toku_sync_bool_compare_and_swap(&status.max_in_use, old_max, in_use));
+ }
+}
+
+// Effect: Like toku_memory_footprint, except instead of passing p,
+// we pass toku_malloc_usable_size(p).
+size_t
+toku_memory_footprint_given_usable_size(size_t touched, size_t usable)
+{
+ size_t pagesize = toku_os_get_pagesize();
+ if (usable >= status.mmap_threshold) {
+ int num_pages = (touched + pagesize) / pagesize;
+ return num_pages * pagesize;
+ }
+ return usable;
+}
+
+// Effect: Return an estimate how how much space an object is using, possibly by
+// using toku_malloc_usable_size(p).
+// If p is NULL then returns 0.
+size_t
+toku_memory_footprint(void * p, size_t touched)
+{
+ if (!p) return 0;
+ return toku_memory_footprint_given_usable_size(touched,
+ my_malloc_usable_size(p));
+}
+
+void *
+toku_malloc(size_t size) {
+#if defined(__APPLE__)
+ if (size == 0) {
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ void *p = t_malloc ? t_malloc(size) : os_malloc(size);
+ if (p) {
+ TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(p);
+ toku_sync_add_and_fetch(&status.malloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested,size);
+ toku_sync_add_and_fetch(&status.used, used);
+ set_max(status.used, status.freed);
+ }
+ } else {
+ toku_sync_add_and_fetch(&status.malloc_fail, 1);
+ status.last_failed_size = size;
+ }
+ return p;
+}
+
+void *toku_malloc_aligned(size_t alignment, size_t size) {
+#if defined(__APPLE__)
+ if (size == 0) {
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ void *p = t_malloc_aligned ? t_malloc_aligned(alignment, size) : os_malloc_aligned(alignment, size);
+ if (p) {
+ TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(p);
+ toku_sync_add_and_fetch(&status.malloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested,size);
+ toku_sync_add_and_fetch(&status.used, used);
+ set_max(status.used, status.freed);
+ }
+ } else {
+ toku_sync_add_and_fetch(&status.malloc_fail, 1);
+ status.last_failed_size = size;
+ }
+ return p;
+}
+
+void *
+toku_calloc(size_t nmemb, size_t size) {
+ size_t newsize = nmemb * size;
+ void *p = toku_malloc(newsize);
+ if (p) memset(p, 0, newsize);
+ return p;
+}
+
+void *
+toku_realloc(void *p, size_t size) {
+#if defined(__APPLE__)
+ if (size == 0) {
+ if (p != nullptr) {
+ toku_free(p);
+ }
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ size_t used_orig = p ? my_malloc_usable_size(p) : 0;
+ void *q = t_realloc ? t_realloc(p, size) : os_realloc(p, size);
+ if (q) {
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(q);
+ toku_sync_add_and_fetch(&status.realloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested, size);
+ toku_sync_add_and_fetch(&status.used, used);
+ toku_sync_add_and_fetch(&status.freed, used_orig);
+ set_max(status.used, status.freed);
+ }
+ } else {
+ toku_sync_add_and_fetch(&status.realloc_fail, 1);
+ status.last_failed_size = size;
+ }
+ return q;
+}
+
+void *toku_realloc_aligned(size_t alignment, void *p, size_t size) {
+#if defined(__APPLE__)
+ if (size == 0) {
+ if (p != nullptr) {
+ toku_free(p);
+ }
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ size_t used_orig = p ? my_malloc_usable_size(p) : 0;
+ void *q = t_realloc_aligned ? t_realloc_aligned(alignment, p, size) : os_realloc_aligned(alignment, p, size);
+ if (q) {
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(q);
+ toku_sync_add_and_fetch(&status.realloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested, size);
+ toku_sync_add_and_fetch(&status.used, used);
+ toku_sync_add_and_fetch(&status.freed, used_orig);
+ set_max(status.used, status.freed);
+ }
+ } else {
+ toku_sync_add_and_fetch(&status.realloc_fail, 1);
+ status.last_failed_size = size;
+ }
+ return q;
+}
+
+
+void *
+toku_memdup(const void *v, size_t len) {
+ void *p = toku_malloc(len);
+ if (p) memcpy(p, v,len);
+ return p;
+}
+
+char *
+toku_strdup(const char *s) {
+ return (char *) toku_memdup(s, strlen(s)+1);
+}
+
+char *toku_strndup(const char *s, size_t n) {
+ size_t s_size = strlen(s);
+ size_t bytes_to_copy = n > s_size ? s_size : n;
+ ++bytes_to_copy;
+ char *result = (char *)toku_memdup(s, bytes_to_copy);
+ result[bytes_to_copy - 1] = 0;
+ return result;
+}
+
+void
+toku_free(void *p) {
+ if (p) {
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(p);
+ toku_sync_add_and_fetch(&status.free_count, 1);
+ toku_sync_add_and_fetch(&status.freed, used);
+ }
+ if (t_free)
+ t_free(p);
+ else
+ os_free(p);
+ }
+}
+
+void *
+toku_xmalloc(size_t size) {
+#if defined(__APPLE__)
+ if (size == 0) {
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ void *p = t_xmalloc ? t_xmalloc(size) : os_malloc(size);
+ if (p == NULL) { // avoid function call in common case
+ status.last_failed_size = size;
+ resource_assert(p);
+ }
+ TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(p);
+ toku_sync_add_and_fetch(&status.malloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested, size);
+ toku_sync_add_and_fetch(&status.used, used);
+ set_max(status.used, status.freed);
+ }
+ return p;
+}
+
+void* toku_xmalloc_aligned(size_t alignment, size_t size)
+// Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Fail with a resource_assert if the allocation fails (don't return an error code).
+// Requires: alignment is a power of two.
+{
+#if defined(__APPLE__)
+ if (size == 0) {
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ void *p = t_xmalloc_aligned ? t_xmalloc_aligned(alignment, size) : os_malloc_aligned(alignment,size);
+ if (p == NULL && size != 0) {
+ status.last_failed_size = size;
+ resource_assert(p);
+ }
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(p);
+ toku_sync_add_and_fetch(&status.malloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested, size);
+ toku_sync_add_and_fetch(&status.used, used);
+ set_max(status.used, status.freed);
+ }
+ return p;
+}
+
+void *
+toku_xcalloc(size_t nmemb, size_t size) {
+ size_t newsize = nmemb * size;
+ void *vp = toku_xmalloc(newsize);
+ if (vp) memset(vp, 0, newsize);
+ return vp;
+}
+
+void *
+toku_xrealloc(void *v, size_t size) {
+#if defined(__APPLE__)
+ if (size == 0) {
+ if (v != nullptr) {
+ toku_free(v);
+ }
+ return nullptr;
+ }
+#endif
+
+ if (size > status.max_requested_size) {
+ status.max_requested_size = size;
+ }
+ size_t used_orig = v ? my_malloc_usable_size(v) : 0;
+ void *p = t_xrealloc ? t_xrealloc(v, size) : os_realloc(v, size);
+ if (p == 0) { // avoid function call in common case
+ status.last_failed_size = size;
+ resource_assert(p);
+ }
+ if (toku_memory_do_stats) {
+ size_t used = my_malloc_usable_size(p);
+ toku_sync_add_and_fetch(&status.realloc_count, 1);
+ toku_sync_add_and_fetch(&status.requested, size);
+ toku_sync_add_and_fetch(&status.used, used);
+ toku_sync_add_and_fetch(&status.freed, used_orig);
+ set_max(status.used, status.freed);
+ }
+ return p;
+}
+
+size_t
+toku_malloc_usable_size(void *p) {
+ return my_malloc_usable_size(p);
+}
+
+void *
+toku_xmemdup (const void *v, size_t len) {
+ void *p = toku_xmalloc(len);
+ memcpy(p, v, len);
+ return p;
+}
+
+char *
+toku_xstrdup (const char *s) {
+ return (char *) toku_xmemdup(s, strlen(s)+1);
+}
+
+void
+toku_set_func_malloc(malloc_fun_t f) {
+ t_malloc = f;
+ t_xmalloc = f;
+}
+
+void
+toku_set_func_xmalloc_only(malloc_fun_t f) {
+ t_xmalloc = f;
+}
+
+void
+toku_set_func_malloc_only(malloc_fun_t f) {
+ t_malloc = f;
+}
+
+void
+toku_set_func_realloc(realloc_fun_t f) {
+ t_realloc = f;
+ t_xrealloc = f;
+}
+
+void
+toku_set_func_xrealloc_only(realloc_fun_t f) {
+ t_xrealloc = f;
+}
+
+void
+toku_set_func_realloc_only(realloc_fun_t f) {
+ t_realloc = f;
+
+}
+
+void
+toku_set_func_free(free_fun_t f) {
+ t_free = f;
+}
+
+#include <toku_race_tools.h>
+void __attribute__((constructor)) toku_memory_helgrind_ignore(void);
+void
+toku_memory_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&status, sizeof status);
+}
diff --git a/storage/tokudb/PerconaFT/portability/memory.h b/storage/tokudb/PerconaFT/portability/memory.h
new file mode 100644
index 00000000..b8044634
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/memory.h
@@ -0,0 +1,196 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdlib.h>
+#include <toku_portability.h>
+
+/* Percona memory allocation functions and macros.
+ * These are functions for malloc and free */
+
+int toku_memory_startup(void) __attribute__((constructor));
+void toku_memory_shutdown(void) __attribute__((destructor));
+
+/* Generally: errno is set to 0 or a value to indicate problems. */
+
+// Everything should call toku_malloc() instead of malloc(), and toku_calloc() instead of calloc()
+// That way the tests can can, e.g., replace the malloc function using toku_set_func_malloc().
+void *toku_calloc(size_t nmemb, size_t size) __attribute__((__visibility__("default")));
+void *toku_xcalloc(size_t nmemb, size_t size) __attribute__((__visibility__("default")));
+void *toku_malloc(size_t size) __attribute__((__visibility__("default")));
+void *toku_malloc_aligned(size_t alignment, size_t size) __attribute__((__visibility__("default")));
+
+// xmalloc aborts instead of return NULL if we run out of memory
+void *toku_xmalloc(size_t size) __attribute__((__visibility__("default")));
+void *toku_xrealloc(void*, size_t size) __attribute__((__visibility__("default")));
+void *toku_xmalloc_aligned(size_t alignment, size_t size) __attribute__((__visibility__("default")));
+// Effect: Perform a os_malloc_aligned(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Fail with a resource_assert if the allocation fails (don't return an error code).
+// If the alloc_aligned function has been set then call it instead.
+// Requires: alignment is a power of two.
+
+void toku_free(void*) __attribute__((__visibility__("default")));
+void *toku_realloc(void *, size_t size) __attribute__((__visibility__("default")));
+void *toku_realloc_aligned(size_t alignment, void *p, size_t size) __attribute__((__visibility__("default")));
+// Effect: Perform a os_realloc_aligned(alignment, p, size) which has the additional property that the returned pointer is a multiple of ALIGNMENT.
+// If the malloc_aligned function has been set then call it instead.
+// Requires: alignment is a power of two.
+
+size_t toku_malloc_usable_size(void *p) __attribute__((__visibility__("default")));
+
+/* MALLOC is a macro that helps avoid a common error:
+ * Suppose I write
+ * struct foo *x = malloc(sizeof(struct foo));
+ * That works fine. But if I change it to this, I've probably made an mistake:
+ * struct foo *x = malloc(sizeof(struct bar));
+ * It can get worse, since one might have something like
+ * struct foo *x = malloc(sizeof(struct foo *))
+ * which looks reasonable, but it allocoates enough to hold a pointer instead of the amount needed for the struct.
+ * So instead, write
+ * struct foo *MALLOC(x);
+ * and you cannot go wrong.
+ */
+#define MALLOC(v) CAST_FROM_VOIDP(v, toku_malloc(sizeof(*v)))
+/* MALLOC_N is like calloc(Except no 0ing of data): It makes an array. Write
+ * int *MALLOC_N(5,x);
+ * to make an array of 5 integers.
+ */
+#define MALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_malloc((n)*sizeof(*v)))
+#define MALLOC_N_ALIGNED(align, n, v) CAST_FROM_VOIDP(v, toku_malloc_aligned((align), (n)*sizeof(*v)))
+
+
+//CALLOC_N is like calloc with auto-figuring out size of members
+#define CALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_calloc((n), sizeof(*v)))
+
+#define CALLOC(v) CALLOC_N(1,v)
+
+#define REALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_realloc(v, (n)*sizeof(*v)))
+#define REALLOC_N_ALIGNED(align, n,v) CAST_FROM_VOIDP(v, toku_realloc_aligned((align), v, (n)*sizeof(*v)))
+
+// XMALLOC macros are like MALLOC except they abort if the operation fails
+#define XMALLOC(v) CAST_FROM_VOIDP(v, toku_xmalloc(sizeof(*v)))
+#define XMALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xmalloc((n)*sizeof(*v)))
+#define XCALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xcalloc((n), (sizeof(*v))))
+#define XCALLOC(v) XCALLOC_N(1,v)
+#define XREALLOC(v,s) CAST_FROM_VOIDP(v, toku_xrealloc(v, s))
+#define XREALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xrealloc(v, (n)*sizeof(*v)))
+
+#define XMALLOC_N_ALIGNED(align, n, v) CAST_FROM_VOIDP(v, toku_xmalloc_aligned((align), (n)*sizeof(*v)))
+
+#define XMEMDUP(dst, src) CAST_FROM_VOIDP(dst, toku_xmemdup(src, sizeof(*src)))
+#define XMEMDUP_N(dst, src, len) CAST_FROM_VOIDP(dst, toku_xmemdup(src, len))
+
+// ZERO_ARRAY writes zeroes to a stack-allocated array
+#define ZERO_ARRAY(o) do { memset((o), 0, sizeof (o)); } while (0)
+// ZERO_STRUCT writes zeroes to a stack-allocated struct
+#define ZERO_STRUCT(o) do { memset(&(o), 0, sizeof (o)); } while (0)
+
+/* Copy memory. Analogous to strdup() */
+void *toku_memdup (const void *v, size_t len);
+/* Toku-version of strdup. Use this so that it calls toku_malloc() */
+char *toku_strdup (const char *s) __attribute__((__visibility__("default")));
+/* Toku-version of strndup. Use this so that it calls toku_malloc() */
+char *toku_strndup(const char *s, size_t n)
+ __attribute__((__visibility__("default")));
+/* Copy memory. Analogous to strdup() Crashes instead of returning NULL */
+void *toku_xmemdup (const void *v, size_t len) __attribute__((__visibility__("default")));
+/* Toku-version of strdup. Use this so that it calls toku_xmalloc() Crashes instead of returning NULL */
+char *toku_xstrdup (const char *s) __attribute__((__visibility__("default")));
+
+void toku_malloc_cleanup (void); /* Before exiting, call this function to free up any internal data structures from toku_malloc. Otherwise valgrind will complain of memory leaks. */
+
+/* Check to see if everything malloc'd was free. Might be a no-op depending on how memory.c is configured. */
+void toku_memory_check_all_free (void);
+/* Check to see if memory is "sane". Might be a no-op. Probably better to simply use valgrind. */
+void toku_do_memory_check(void);
+
+typedef void *(*malloc_fun_t)(size_t);
+typedef void (*free_fun_t)(void*);
+typedef void *(*realloc_fun_t)(void*,size_t);
+typedef void *(*malloc_aligned_fun_t)(size_t /*alignment*/, size_t /*size*/);
+typedef void *(*realloc_aligned_fun_t)(size_t /*alignment*/, void */*pointer*/, size_t /*size*/);
+
+void toku_set_func_malloc(malloc_fun_t f);
+void toku_set_func_xmalloc_only(malloc_fun_t f);
+void toku_set_func_malloc_only(malloc_fun_t f);
+void toku_set_func_realloc(realloc_fun_t f);
+void toku_set_func_xrealloc_only(realloc_fun_t f);
+void toku_set_func_realloc_only(realloc_fun_t f);
+void toku_set_func_free(free_fun_t f);
+
+typedef struct memory_status {
+ uint64_t malloc_count; // number of malloc operations
+ uint64_t free_count; // number of free operations
+ uint64_t realloc_count; // number of realloc operations
+ uint64_t malloc_fail; // number of malloc operations that failed
+ uint64_t realloc_fail; // number of realloc operations that failed
+ uint64_t requested; // number of bytes requested
+ uint64_t used; // number of bytes used (requested + overhead), obtained from malloc_usable_size()
+ uint64_t freed; // number of bytes freed;
+ uint64_t max_requested_size; // largest attempted allocation size
+ uint64_t last_failed_size; // size of the last failed allocation attempt
+ volatile uint64_t max_in_use; // maximum memory footprint (used - freed), approximate (not worth threadsafety overhead for exact)
+ const char *mallocator_version;
+ uint64_t mmap_threshold;
+} LOCAL_MEMORY_STATUS_S, *LOCAL_MEMORY_STATUS;
+
+void toku_memory_get_status(LOCAL_MEMORY_STATUS s);
+
+// Effect: Like toku_memory_footprint, except instead of passing p,
+// we pass toku_malloc_usable_size(p).
+size_t toku_memory_footprint_given_usable_size(size_t touched, size_t usable);
+
+// Effect: Return an estimate how how much space an object is using, possibly by
+// using toku_malloc_usable_size(p).
+// If p is NULL then returns 0.
+size_t toku_memory_footprint(void * p, size_t touched);
diff --git a/storage/tokudb/PerconaFT/portability/os_malloc.cc b/storage/tokudb/PerconaFT/portability/os_malloc.cc
new file mode 100644
index 00000000..15a3ec1d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/os_malloc.cc
@@ -0,0 +1,294 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+
+#include <toku_portability.h>
+#include <stdlib.h>
+#if defined(HAVE_MALLOC_H)
+# include <malloc.h>
+#elif defined(HAVE_SYS_MALLOC_H)
+# include <sys/malloc.h>
+#endif
+#include <dlfcn.h>
+
+#include <string.h>
+
+// #define this to use a version of os_malloc that helps to debug certain features.
+// This version uses the real malloc (so that valgrind should still work) but it forces things to be slightly
+// misaligned (in particular, avoiding 512-byte alignment if possible, to find situations where O_DIRECT will fail.
+// #define USE_DEBUGGING_MALLOCS
+
+#ifdef USE_DEBUGGING_MALLOCS
+#include <pthread.h>
+
+// Make things misaligned on 512-byte boundaries
+static size_t malloced_now_count=0, malloced_now_size=0;
+struct malloc_pair {
+ void *returned_pointer;
+ void *true_pointer;
+ size_t requested_size = 0;
+};
+static struct malloc_pair *malloced_now;
+static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void malloc_lock(void) {
+ int r = pthread_mutex_lock(&malloc_mutex);
+ assert(r==0);
+}
+static void malloc_unlock(void) {
+ int r = pthread_mutex_unlock(&malloc_mutex);
+ assert(r==0);
+}
+
+static void push_to_malloced_memory(void *returned_pointer, void *true_pointer, size_t requested_size) {
+ malloc_lock();
+ if (malloced_now_count == malloced_now_size) {
+ malloced_now_size = 2*malloced_now_size + 1;
+ malloced_now = (struct malloc_pair *)realloc(malloced_now, malloced_now_size * sizeof(*malloced_now));
+ }
+ malloced_now[malloced_now_count].returned_pointer = returned_pointer;
+ malloced_now[malloced_now_count].true_pointer = true_pointer;
+ malloced_now[malloced_now_count].requested_size = requested_size;
+ malloced_now_count++;
+ malloc_unlock();
+}
+
+static struct malloc_pair *find_malloced_pair(const void *p)
+// Requires: Lock must be held before calling.
+{
+ for (size_t i=0; i<malloced_now_count; i++) {
+ if (malloced_now[i].returned_pointer==p) return &malloced_now[i];
+ }
+ return 0;
+}
+
+void *os_malloc(size_t size) {
+ void *raw_ptr = malloc(size+16); // allocate 16 extra bytes
+ size_t raw_ptr_i = (size_t) raw_ptr;
+ if (raw_ptr_i%512==0) {
+ push_to_malloced_memory(16+(char*)raw_ptr, raw_ptr, size);
+ return 16+(char*)raw_ptr;
+ } else {
+ push_to_malloced_memory(raw_ptr, raw_ptr, size);
+ return raw_ptr;
+ }
+}
+
+void *os_malloc_aligned(size_t alignment, size_t size)
+// Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Requires: alignment is a power of two.
+{
+ void *p;
+ int r = posix_memalign(&p, alignment, size);
+ if (r != 0) {
+ errno = r;
+ p = nullptr;
+ }
+ return p;
+ if (alignment%512==0) {
+ void *raw_ptr;
+ int r = posix_memalign(&raw_ptr, alignment, size);
+ if (r != 0) {
+ errno = r;
+ return nullptr;
+ }
+ push_to_malloced_memory(raw_ptr, raw_ptr, size);
+ return raw_ptr;
+ } else {
+ // Make sure it isn't 512-byte aligned
+ void *raw_ptr;
+ int r = posix_memalign(&raw_ptr, alignment, size+alignment);
+ if (r != 0) {
+ errno = r;
+ return nullptr;
+ }
+ size_t raw_ptr_i = (size_t) raw_ptr;
+ if (raw_ptr_i%512==0) {
+ push_to_malloced_memory(alignment+(char*)raw_ptr, raw_ptr, size);
+ return alignment+(char*)raw_ptr;
+ } else {
+ push_to_malloced_memory(raw_ptr, raw_ptr, size);
+ return raw_ptr;
+ }
+ }
+}
+
+static size_t min(size_t a, size_t b) {
+ if (a<b) return a;
+ else return b;
+}
+
+void *os_realloc(void *p, size_t size) {
+ size_t alignment;
+ if (size<4) {
+ alignment = 1;
+ } else if (size<8) {
+ alignment = 4;
+ } else if (size<16) {
+ alignment = 8;
+ } else {
+ alignment = 16;
+ }
+ return os_realloc_aligned(alignment, p, size);
+}
+
+void * os_realloc_aligned(size_t alignment, void *p, size_t size)
+// Effect: Perform a realloc(p, size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Requires: alignment is a power of two.
+{
+ if (p==NULL) {
+ return os_malloc_aligned(alignment, size);
+ } else {
+ void *result = os_malloc_aligned(alignment, size);
+ malloc_lock();
+ struct malloc_pair *mp = find_malloced_pair(p);
+ assert(mp);
+ // now copy all the good stuff from p to result
+ memcpy(result, p, min(size, mp->requested_size));
+ malloc_unlock();
+ os_free(p);
+ return result;
+ }
+}
+
+
+void os_free(void* p) {
+ malloc_lock();
+ struct malloc_pair *mp = find_malloced_pair(p);
+ assert(mp);
+ free(mp->true_pointer);
+ *mp = malloced_now[--malloced_now_count];
+ malloc_unlock();
+}
+
+size_t os_malloc_usable_size(const void *p) {
+ malloc_lock();
+ struct malloc_pair *mp = find_malloced_pair(p);
+ assert(mp);
+ size_t size = mp->requested_size;
+ malloc_unlock();
+ return size;
+}
+
+#else
+
+void *
+os_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+void *os_malloc_aligned(size_t alignment, size_t size)
+// Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Requires: alignment is a power of two.
+{
+ void *p;
+ int r = posix_memalign(&p, alignment, size);
+ if (r != 0) {
+ errno = r;
+ p = nullptr;
+ }
+ return p;
+}
+
+void *
+os_realloc(void *p, size_t size)
+{
+ return realloc(p, size);
+}
+
+void * os_realloc_aligned(size_t alignment, void *p, size_t size)
+// Effect: Perform a realloc(p, size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Requires: alignment is a power of two.
+{
+#if 1
+ if (p==NULL) {
+ return os_malloc_aligned(alignment, size);
+ } else {
+ void *newp = realloc(p, size);
+ if (0!=((long long)newp%alignment)) {
+ // it's not aligned, so align it ourselves.
+ void *newp2 = os_malloc_aligned(alignment, size);
+ memcpy(newp2, newp, size);
+ free(newp);
+ newp = newp2;
+ }
+ return newp;
+ }
+#else
+ // THIS STUFF SEEMS TO FAIL VALGRIND
+ if (p==NULL) {
+ return os_malloc_aligned(alignment, size);
+ } else {
+ size_t ignore;
+ int r = rallocm(&p, // returned pointer
+ &ignore, // actual size of returned object.
+ size, // the size we want
+ 0, // extra bytes to "try" to allocate at the end
+ ALLOCM_ALIGN(alignment));
+ if (r!=0) return NULL;
+ else return p;
+ }
+#endif
+}
+
+
+void
+os_free(void* p)
+{
+ free(p);
+}
+
+typedef size_t (*malloc_usable_size_fun_t)(const void *);
+static malloc_usable_size_fun_t malloc_usable_size_f = NULL;
+
+size_t os_malloc_usable_size(const void *p) {
+ if (p==NULL) return 0;
+ if (!malloc_usable_size_f) {
+ malloc_usable_size_f = (malloc_usable_size_fun_t) dlsym(RTLD_DEFAULT, "malloc_usable_size");
+ if (!malloc_usable_size_f) {
+ malloc_usable_size_f = (malloc_usable_size_fun_t) dlsym(RTLD_DEFAULT, "malloc_size"); // darwin
+ if (!malloc_usable_size_f) {
+ abort(); // couldn't find a malloc size function
+ }
+ }
+ }
+ return malloc_usable_size_f(p);
+}
+#endif
diff --git a/storage/tokudb/PerconaFT/portability/portability.cc b/storage/tokudb/PerconaFT/portability/portability.cc
new file mode 100644
index 00000000..556a34fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/portability.cc
@@ -0,0 +1,477 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <toku_assert.h>
+#if defined(HAVE_MALLOC_H)
+# include <malloc.h>
+#elif defined(HAVE_SYS_MALLOC_H)
+# include <sys/malloc.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#if defined(HAVE_SYSCALL_H)
+# include <syscall.h>
+#endif
+#if defined(HAVE_SYS_SYSCALL_H)
+# include <sys/syscall.h>
+#endif
+#if defined(HAVE_SYS_SYSCTL_H) && !defined(_SC_PHYS_PAGES)
+# include <sys/sysctl.h>
+#endif
+#if defined(HAVE_PTHREAD_H)
+# include <pthread.h>
+#endif
+#if defined(HAVE_PTHREAD_NP_H)
+# include <pthread_np.h>
+#endif
+#include <inttypes.h>
+#include <sys/time.h>
+#if defined(HAVE_SYS_RESOURCE_H)
+# include <sys/resource.h>
+#endif
+#include <sys/statvfs.h>
+#include "toku_portability.h"
+#include "toku_os.h"
+#include "toku_time.h"
+#include "memory.h"
+
+#include "toku_instrumentation.h"
+
+#include <portability/toku_atomic.h>
+#include <util/partitioned_counter.h>
+
+int
+toku_portability_init(void) {
+ int r = toku_memory_startup();
+ assert(r==0);
+ if (r == 0) {
+ uint64_t hz;
+ r = toku_os_get_processor_frequency(&hz); // get and cache freq
+ assert(r==0);
+ }
+ (void) toku_os_get_pagesize(); // get and cache pagesize
+ return r;
+}
+
+void
+toku_portability_destroy(void) {
+ toku_memory_shutdown();
+}
+
+int
+toku_os_getpid(void) {
+ return getpid();
+}
+
+int
+toku_os_gettid(void) {
+#if defined(HAVE_PTHREAD_THREADID_NP)
+ uint64_t result;
+ pthread_threadid_np(NULL, &result);
+ return (int) result; // Used for instrumentation so overflow is ok here.
+#elif defined(__NR_gettid)
+ return syscall(__NR_gettid);
+#elif defined(SYS_gettid)
+ return syscall(SYS_gettid);
+#elif defined(HAVE_PTHREAD_GETTHREADID_NP)
+ return pthread_getthreadid_np();
+#else
+# error "no implementation of gettid available"
+#endif
+}
+
+int
+toku_os_get_number_processors(void) {
+ return sysconf(_SC_NPROCESSORS_CONF);
+}
+
+int
+toku_os_get_number_active_processors(void) {
+ int n = sysconf(_SC_NPROCESSORS_ONLN);
+#define DO_TOKU_NCPUS 1
+#if DO_TOKU_NCPUS
+ {
+ char *toku_ncpus = getenv("TOKU_NCPUS");
+ if (toku_ncpus) {
+ int ncpus = atoi(toku_ncpus);
+ if (ncpus < n)
+ n = ncpus;
+ }
+ }
+#endif
+ return n;
+}
+
+int toku_cached_pagesize = 0;
+
+int
+toku_os_get_pagesize(void) {
+ int pagesize = toku_cached_pagesize;
+ if (pagesize == 0) {
+ pagesize = sysconf(_SC_PAGESIZE);
+ if (pagesize) {
+ toku_cached_pagesize = pagesize;
+ }
+ }
+ return pagesize;
+}
+
+uint64_t
+toku_os_get_phys_memory_size(void) {
+#if defined(_SC_PHYS_PAGES)
+ uint64_t npages = sysconf(_SC_PHYS_PAGES);
+ uint64_t pagesize = sysconf(_SC_PAGESIZE);
+ return npages*pagesize;
+#elif defined(HAVE_SYS_SYSCTL_H)
+ uint64_t memsize;
+ size_t len = sizeof memsize;
+ sysctlbyname("hw.memsize", &memsize, &len, NULL, 0);
+ return memsize;
+#else
+# error "cannot find _SC_PHYS_PAGES or sysctlbyname()"
+#endif
+}
+
+int toku_os_get_file_size_with_source_location(int fildes,
+ int64_t *fsize,
+ const char *src_file,
+ uint src_line) {
+ toku_struct_stat sbuf;
+
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_stat,
+ fildes,
+ 0,
+ src_file,
+ src_line);
+
+ int r = fstat(fildes, &sbuf);
+ if (r == 0) {
+ *fsize = sbuf.st_size;
+ }
+ toku_instr_file_io_end(io_annotation, 0);
+
+ return r;
+}
+
+int
+toku_os_get_unique_file_id(int fildes, struct fileid *id) {
+ toku_struct_stat statbuf;
+ memset(id, 0, sizeof(*id));
+ int r=fstat(fildes, &statbuf);
+ if (r==0) {
+ id->st_dev = statbuf.st_dev;
+ id->st_ino = statbuf.st_ino;
+ }
+ return r;
+}
+
+int
+toku_os_lock_file(const char *name) {
+ int r;
+ int fd = open(name, O_RDWR|O_CREAT, S_IRUSR | S_IWUSR);
+ if (fd>=0) {
+ r = flock(fd, LOCK_EX | LOCK_NB);
+ if (r!=0) {
+ r = errno; //Save errno from flock.
+ close(fd);
+ fd = -1; //Disable fd.
+ errno = r;
+ }
+ }
+ return fd;
+}
+
+int
+toku_os_unlock_file(int fildes) {
+ int r = flock(fildes, LOCK_UN);
+ if (r==0) r = close(fildes);
+ return r;
+}
+
+int
+toku_os_mkdir(const char *pathname, mode_t mode) {
+ int r = mkdir(pathname, mode);
+ return r;
+}
+
+int
+toku_os_get_process_times(struct timeval *usertime, struct timeval *kerneltime) {
+ int r;
+ struct rusage rusage;
+ r = getrusage(RUSAGE_SELF, &rusage);
+ if (r == -1)
+ return get_error_errno();
+ if (usertime)
+ *usertime = rusage.ru_utime;
+ if (kerneltime)
+ *kerneltime = rusage.ru_stime;
+ return 0;
+}
+
+int
+toku_os_initialize_settings(int UU(verbosity)) {
+ int r = 0;
+ static int initialized = 0;
+ assert(initialized==0);
+ initialized=1;
+ return r;
+}
+
+bool toku_os_is_absolute_name(const char* path) {
+ return path[0] == '/';
+}
+
+int
+toku_os_get_max_process_data_size(uint64_t *maxdata) {
+ int r;
+ struct rlimit rlimit;
+
+ r = getrlimit(RLIMIT_DATA, &rlimit);
+ if (r == 0) {
+ uint64_t d;
+ d = rlimit.rlim_max;
+ // with the "right" macros defined, the rlimit is a 64 bit number on a
+ // 32 bit system. getrlimit returns 2**64-1 which is clearly wrong.
+
+ // for 32 bit processes, we assume that 1/2 of the address space is
+ // used for mapping the kernel. this may be pessimistic.
+ if (sizeof (void *) == 4 && d > (1ULL << 31))
+ d = 1ULL << 31;
+ *maxdata = d;
+ } else
+ r = get_error_errno();
+ return r;
+}
+
+int toku_stat_with_source_location(const char *name,
+ toku_struct_stat *buf,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line) {
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_name_io_begin(io_annotation,
+ instr_key,
+ toku_instr_file_op::file_stat,
+ name,
+ 0,
+ src_file,
+ src_line);
+ int r = stat(name, buf);
+
+ toku_instr_file_io_end(io_annotation, 0);
+ return r;
+}
+
+int toku_os_fstat_with_source_location(int fd,
+ toku_struct_stat *buf,
+ const char *src_file,
+ uint src_line) {
+ toku_io_instrumentation io_annotation;
+ toku_instr_file_io_begin(io_annotation,
+ toku_instr_file_op::file_stat,
+ fd,
+ 0,
+ src_file,
+ src_line);
+
+ int r = fstat(fd, buf);
+ toku_instr_file_io_end(io_annotation, 0);
+ return r;
+}
+
+static int
+toku_get_processor_frequency_sys(uint64_t *hzret) {
+ int r;
+ FILE *fp = fopen("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", "r");
+ if (!fp)
+ r = get_error_errno();
+ else {
+ unsigned int khz = 0;
+ if (fscanf(fp, "%u", &khz) == 1) {
+ *hzret = khz * 1000ULL;
+ r = 0;
+ } else
+ r = ENOENT;
+ fclose(fp);
+ }
+ return r;
+}
+
+static int
+toku_get_processor_frequency_cpuinfo(uint64_t *hzret) {
+ int r;
+ FILE *fp = fopen("/proc/cpuinfo", "r");
+ if (!fp) {
+ r = get_error_errno();
+ } else {
+ uint64_t maxhz = 0;
+ /*
+ Some lines in the "/proc/cpuinfo" output can be long, e.g.:
+ "flags : fpu vme de pse tsc ms .... smep erms"
+ In case a line does not fit into "buf", it will be read
+ in parts by multiple "fgets" calls. This is ok, as
+ it is very unlikely that a non-leading substring of a line
+ will match again the pattern "processor: %u".
+ */
+ char buf[512];
+ while (fgets(buf, (int) sizeof(buf), fp) != NULL) {
+ unsigned int cpu;
+ sscanf(buf, "processor : %u", &cpu);
+ unsigned int ma, mb;
+ if (sscanf(buf, "cpu MHz : %u.%u", &ma, &mb) == 2) {
+ uint64_t hz = ma * 1000000ULL + mb * 1000ULL;
+ if (hz > maxhz)
+ maxhz = hz;
+ }
+ }
+ fclose(fp);
+ *hzret = maxhz;
+ r = maxhz == 0 ? ENOENT : 0;;
+ }
+ return r;
+}
+
+static int
+toku_get_processor_frequency_sysctl(const char * const cmd, uint64_t *hzret) {
+ int r = 0;
+ FILE *fp = popen(cmd, "r");
+ if (!fp) {
+ r = EINVAL; // popen doesn't return anything useful in errno,
+ // gotta pick something
+ } else {
+ r = fscanf(fp, "%" SCNu64, hzret);
+ if (r != 1) {
+ r = get_maybe_error_errno();
+ } else {
+ r = 0;
+ }
+ pclose(fp);
+ }
+ return r;
+}
+
+static uint64_t toku_cached_hz; // cache the value of hz so that we avoid opening files to compute it later
+
+int
+toku_os_get_processor_frequency(uint64_t *hzret) {
+ int r;
+ if (toku_cached_hz) {
+ *hzret = toku_cached_hz;
+ r = 0;
+ } else {
+ r = toku_get_processor_frequency_sys(hzret);
+ if (r != 0)
+ r = toku_get_processor_frequency_cpuinfo(hzret);
+ if (r != 0)
+ r = toku_get_processor_frequency_sysctl("sysctl -n hw.cpufrequency", hzret);
+ if (r != 0)
+ r = toku_get_processor_frequency_sysctl("sysctl -n machdep.tsc_freq", hzret);
+ if (r == 0)
+ toku_cached_hz = *hzret;
+ }
+ return r;
+}
+
+int
+toku_get_filesystem_sizes(const char *path, uint64_t *avail_size, uint64_t *free_size, uint64_t *total_size) {
+ struct statvfs s;
+ int r = statvfs(path, &s);
+ if (r == -1) {
+ r = get_error_errno();
+ } else {
+ // get the block size in bytes
+ uint64_t bsize = s.f_frsize ? s.f_frsize : s.f_bsize;
+ // convert blocks to bytes
+ if (avail_size)
+ *avail_size = (uint64_t) s.f_bavail * bsize;
+ if (free_size)
+ *free_size = (uint64_t) s.f_bfree * bsize;
+ if (total_size)
+ *total_size = (uint64_t) s.f_blocks * bsize;
+ }
+ return r;
+}
+
+
+int
+toku_dup2(int fd, int fd2) {
+ int r;
+ r = dup2(fd, fd2);
+ return r;
+}
+
+
+// Time
+static double seconds_per_clock = -1;
+
+double tokutime_to_seconds(tokutime_t t) {
+ // Convert tokutime to seconds.
+ if (seconds_per_clock<0) {
+ uint64_t hz;
+ int r = toku_os_get_processor_frequency(&hz);
+ assert(r==0);
+ // There's a race condition here, but it doesn't really matter. If two threads call tokutime_to_seconds
+ // for the first time at the same time, then both will fetch the value and set the same value.
+ seconds_per_clock = 1.0/hz;
+ }
+ return t*seconds_per_clock;
+}
+
+#include <toku_race_tools.h>
+void __attribute__((constructor)) toku_portability_helgrind_ignore(void);
+void
+toku_portability_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&toku_cached_hz, sizeof toku_cached_hz);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&toku_cached_pagesize,
+ sizeof toku_cached_pagesize);
+}
+
+static const pfs_key_t pfs_not_instrumented = 0xFFFFFFFF;
+toku_instr_key toku_uninstrumented(pfs_not_instrumented);
diff --git a/storage/tokudb/PerconaFT/portability/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/portability/tests/CMakeLists.txt
new file mode 100644
index 00000000..ff233327
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/CMakeLists.txt
@@ -0,0 +1,50 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE)
+
+if(BUILD_TESTING)
+ function(add_portability_test bin)
+ add_toku_test(portability ${bin} ${ARGN})
+ endfunction(add_portability_test)
+
+ file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
+
+ foreach(src ${srcs})
+ get_filename_component(test ${src} NAME_WE)
+ add_executable(${test} ${test}.cc)
+ target_link_libraries(${test} ${LIBTOKUPORTABILITY})
+ set_target_properties(${test} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ add_space_separated_property(TARGET ${test} COMPILE_FLAGS -fvisibility=hidden)
+ list(APPEND tests ${test})
+ endforeach(src)
+
+ include(CheckCCompilerFlag)
+ check_c_compiler_flag(-Wno-unused-result HAVE_WNO_UNUSED_RESULT)
+ if (HAVE_WNO_UNUSED_RESULT)
+ add_space_separated_property(SOURCE try-leak-lost COMPILE_FLAGS -Wno-unused-result)
+ endif ()
+ check_c_compiler_flag(-Wno-maybe-uninitialized HAVE_WNO_MAYBE_UNINITIALIZED)
+ if (HAVE_WNO_MAYBE_UNINITIALIZED)
+ add_space_separated_property(SOURCE try-uninit COMPILE_FLAGS -Wno-maybe-uninitialized)
+ add_space_separated_property(TARGET try-uninit LINK_FLAGS -Wno-maybe-uninitialized)
+ endif ()
+
+ configure_file(ensure_memcheck_fails.sh . COPYONLY)
+ foreach(test try-leak-lost try-leak-reachable try-uninit)
+ list(REMOVE_ITEM tests ${test})
+
+ add_test(NAME portability/${test}
+ COMMAND ensure_memcheck_fails.sh $<TARGET_FILE:${test}> valgrind --error-exitcode=1 --quiet --leak-check=full --show-reachable=yes --trace-children=yes --trace-children-skip=sh,*/sh,rm,*/rm,cp,*/cp,mv,*/mv,cat,*/cat,diff,test,wc,*/wc)
+ setup_toku_test_properties(portability/${test} ${test})
+ endforeach(test)
+
+ foreach(test ${tests})
+ add_portability_test(${test})
+ endforeach(test)
+
+ set(portability_tests_should_fail
+ portability/try-assert0
+ portability/try-assert-zero
+ )
+
+ set_tests_properties(${portability_tests_should_fail} PROPERTIES WILL_FAIL TRUE)
+endif(BUILD_TESTING)
diff --git a/storage/tokudb/PerconaFT/portability/tests/ensure_memcheck_fails.sh b/storage/tokudb/PerconaFT/portability/tests/ensure_memcheck_fails.sh
new file mode 100644
index 00000000..9b3f50f8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/ensure_memcheck_fails.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+test $# -ge 1 || exit 1
+
+bin=$1; shift
+valgrind=$@
+
+$valgrind --log-file=$bin.check.valgrind $bin >$bin.check.output 2>&1
+if [[ $? = 0 ]]
+then
+ lines=$(cat $bin.check.valgrind | wc -l)
+ if [[ lines -ne 0 ]]
+ then
+ cat $bin.check.valgrind
+ exit 0
+ else
+ exit 1
+ fi
+else
+ exit 0
+fi
diff --git a/storage/tokudb/PerconaFT/portability/tests/rwlock_condvar.h b/storage/tokudb/PerconaFT/portability/tests/rwlock_condvar.h
new file mode 100644
index 00000000..d1ebc81e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/rwlock_condvar.h
@@ -0,0 +1,193 @@
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Fair readers writer lock implemented using condition variables.
+ * This is maintained so that we can measure the performance of a relatively simple implementation (this one)
+ * compared to a fast one that uses compare-and-swap (the one in ../toku_rwlock.c)
+ */
+
+
+// Fair readers/writer locks. These are fair (meaning first-come first-served. No reader starvation, and no writer starvation). And they are
+// probably faster than the linux readers/writer locks (pthread_rwlock_t).
+struct toku_cv_fair_rwlock_waiter_state; // this structure is used internally.
+typedef struct toku_cv_fair_rwlock_s {
+ toku_mutex_t mutex;
+ int state; // 0 means no locks, + is number of readers locked, -1 is a writer
+ struct toku_cv_fair_rwlock_waiter_state *waiters_head, *waiters_tail;
+} toku_cv_fair_rwlock_t;
+
+int toku_cv_fair_rwlock_init (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_destroy (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_rdlock (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_wrlock (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_unlock (toku_cv_fair_rwlock_t *rwlock);
+
+struct toku_cv_fair_rwlock_waiter_state {
+ char is_read;
+ struct toku_cv_fair_rwlock_waiter_state *next;
+ toku_cond_t cond;
+};
+
+#if defined(HAVE_GNU_TLS)
+static __thread struct toku_cv_fair_rwlock_waiter_state waitstate_var = {0, NULL, { PTHREAD_COND_INITIALIZER } };
+#define GET_WAITSTATE(name) name = &waitstate_var
+#else
+#include <memory.h>
+static pthread_key_t waitstate_key;
+static bool waitstate_key_initialized = false;
+
+void
+toku_rwlock_init(void)
+{
+ assert(!waitstate_key_initialized);
+ int r = toku_pthread_key_create(&waitstate_key, toku_free);
+ assert_zero(r);
+ waitstate_key_initialized = true;
+}
+
+void
+toku_rwlock_destroy(void)
+{
+ assert(waitstate_key_initialized);
+ int r = toku_pthread_key_delete(waitstate_key);
+ assert_zero(r);
+ waitstate_key_initialized = false;
+}
+
+static struct toku_cv_fair_rwlock_waiter_state *
+get_waitstate(void)
+{
+ assert(waitstate_key_initialized);
+ struct toku_cv_fair_rwlock_waiter_state *waitstate = NULL;
+ void *p = toku_pthread_getspecific(waitstate_key);
+ if (!p) {
+ p = toku_xmalloc(sizeof *waitstate);
+ int r = toku_pthread_setspecific(waitstate_key, p);
+ assert_zero(r);
+ }
+ waitstate = static_cast<struct toku_cv_fair_rwlock_waiter_state *>(p);
+ return waitstate;
+}
+
+#define GET_WAITSTATE(name) name = get_waitstate()
+#endif
+
+int toku_cv_fair_rwlock_init (toku_cv_fair_rwlock_t *rwlock) {
+ rwlock->state = 0;
+ rwlock->waiters_head = NULL;
+ rwlock->waiters_tail = NULL;
+ toku_mutex_init(toku_uninstrumented, &rwlock->mutex, nullptr);
+ return 0;
+}
+
+int toku_cv_fair_rwlock_destroy (toku_cv_fair_rwlock_t *rwlock) {
+ toku_mutex_destroy(&rwlock->mutex);
+ return 0;
+}
+
+int toku_cv_fair_rwlock_rdlock (toku_cv_fair_rwlock_t *rwlock) {
+ struct toku_cv_fair_rwlock_waiter_state *GET_WAITSTATE(waitstate);
+ toku_mutex_lock(&rwlock->mutex);
+ if (rwlock->waiters_head!=NULL || rwlock->state<0) {
+ // Someone is ahead of me in the queue, or someone has a lock.
+ // We use per-thread-state for the condition variable. A thread cannot get control and try to reuse the waiter state for something else.
+ if (rwlock->waiters_tail) {
+ rwlock->waiters_tail->next = waitstate;
+ } else {
+ rwlock->waiters_head = waitstate;
+ }
+ rwlock->waiters_tail = waitstate;
+ waitstate->next = NULL;
+ waitstate->is_read = 1;
+ do {
+ toku_cond_wait(&waitstate->cond, &rwlock->mutex);
+ } while (rwlock->waiters_head!=waitstate || rwlock->state<0);
+ rwlock->state++;
+ rwlock->waiters_head=waitstate->next;
+ if (waitstate->next==NULL) rwlock->waiters_tail=NULL;
+ if (rwlock->waiters_head && rwlock->waiters_head->is_read) {
+ toku_cond_signal(&rwlock->waiters_head->cond);
+ }
+ } else {
+ // No one is waiting, and any holders are readers.
+ rwlock->state++;
+ }
+ toku_mutex_unlock(&rwlock->mutex);
+ return 0;
+}
+
+int toku_cv_fair_rwlock_wrlock (toku_cv_fair_rwlock_t *rwlock) {
+ struct toku_cv_fair_rwlock_waiter_state *GET_WAITSTATE(waitstate);
+ toku_mutex_lock(&rwlock->mutex);
+ if (rwlock->waiters_head!=NULL || rwlock->state!=0) {
+ // Someone else is ahead of me, or someone has a lock the lock, so we must wait our turn.
+ if (rwlock->waiters_tail) {
+ rwlock->waiters_tail->next = waitstate;
+ } else {
+ rwlock->waiters_head = waitstate;
+ }
+ rwlock->waiters_tail = waitstate;
+ waitstate->next = NULL;
+ waitstate->is_read = 0;
+ do {
+ toku_cond_wait(&waitstate->cond, &rwlock->mutex);
+ } while (rwlock->waiters_head!=waitstate || rwlock->state!=0);
+ rwlock->waiters_head = waitstate->next;
+ if (waitstate->next==NULL) rwlock->waiters_tail=NULL;
+ }
+ rwlock->state = -1;
+ toku_mutex_unlock(&rwlock->mutex);
+ return 0;
+}
+
+int toku_cv_fair_rwlock_unlock (toku_cv_fair_rwlock_t *rwlock) {
+ toku_mutex_lock(&rwlock->mutex);
+ assert(rwlock->state!=0);
+ if (rwlock->state>0) {
+ rwlock->state--;
+ } else {
+ rwlock->state=0;
+ }
+ if (rwlock->state==0 && rwlock->waiters_head) {
+ toku_cond_signal(&rwlock->waiters_head->cond);
+ } else {
+ // printf(" No one to wake\n");
+ }
+ toku_mutex_unlock(&rwlock->mutex);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-active-cpus.cc b/storage/tokudb/PerconaFT/portability/tests/test-active-cpus.cc
new file mode 100644
index 00000000..1b8764a1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-active-cpus.cc
@@ -0,0 +1,65 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <toku_stdint.h>
+#include <unistd.h>
+#include <toku_assert.h>
+#include "toku_os.h"
+
+int main(void) {
+ int r;
+ r = unsetenv("TOKU_NCPUS");
+ assert(r == 0);
+
+ int max_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ assert(toku_os_get_number_active_processors() == max_cpus);
+
+ // change the TOKU_NCPUS env variable and verify that the correct number is computed
+ for (int ncpus = 1; ncpus <= max_cpus; ncpus++) {
+ char ncpus_str[32];
+ sprintf(ncpus_str, "%d", ncpus);
+ r = setenv("TOKU_NCPUS", ncpus_str, 1);
+ assert(r == 0);
+
+ assert(toku_os_get_number_active_processors() == ncpus);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-cache-line-boundary-fails.cc b/storage/tokudb/PerconaFT/portability/tests/test-cache-line-boundary-fails.cc
new file mode 100644
index 00000000..64f6407d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-cache-line-boundary-fails.cc
@@ -0,0 +1,122 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+#include <portability/toku_atomic.h>
+
+#include <memory.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+
+#include "test.h"
+
+int verbose = 0;
+
+static const size_t cachelinesize = 64;
+
+// cache line is 64 bytes
+// nine 7-byte structs fill 63 bytes
+// the tenth spans one byte of the first cache line and six of the next cache line
+// we first SFAA the first 9 structs and ensure we don't crash, then we set a signal handler and SFAA the 10th and ensure we do crash
+
+struct unpackedsevenbytestruct {
+ uint32_t i;
+ char pad[3];
+};
+struct __attribute__((packed)) packedsevenbytestruct {
+ uint32_t i;
+ char pad[3];
+};
+
+struct packedsevenbytestruct *psevenbytestructs;
+static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__unused__))) {
+ toku_free(psevenbytestructs);
+#ifdef TOKU_DEBUG_PARANOID
+ exit(EXIT_SUCCESS); // with paranoid asserts, we expect to assert and reach this handler
+#else
+ exit(EXIT_FAILURE); // we should not have crashed without paranoid asserts
+#endif
+}
+
+int test_main(int UU(argc), char *const argv[] UU()) {
+ if (sizeof(unpackedsevenbytestruct) != 8) {
+ exit(EXIT_FAILURE);
+ }
+ if (sizeof(packedsevenbytestruct) != 7) {
+ exit(EXIT_FAILURE);
+ }
+
+ {
+ struct unpackedsevenbytestruct *MALLOC_N_ALIGNED(cachelinesize, 10, usevenbytestructs);
+ if (usevenbytestructs == NULL) {
+ // this test is supposed to crash, so exiting cleanly is a failure
+ perror("posix_memalign");
+ exit(EXIT_FAILURE);
+ }
+
+ for (int idx = 0; idx < 10; ++idx) {
+ usevenbytestructs[idx].i = idx + 1;
+ (void) toku_sync_fetch_and_add(&usevenbytestructs[idx].i, 32U - idx);
+ }
+ toku_free(usevenbytestructs);
+ }
+
+
+ MALLOC_N_ALIGNED(cachelinesize, 10, psevenbytestructs);
+ if (psevenbytestructs == NULL) {
+ // this test is supposed to crash, so exiting cleanly is a failure
+ perror("posix_memalign");
+ exit(EXIT_FAILURE);
+ }
+
+ for (int idx = 0; idx < 9; ++idx) {
+ psevenbytestructs[idx].i = idx + 1;
+ (void) toku_sync_fetch_and_add(&psevenbytestructs[idx].i, 32U - idx);
+ }
+ psevenbytestructs[9].i = 10;
+ signal(SIGABRT, catch_abort);
+ (void) toku_sync_fetch_and_add(&psevenbytestructs[9].i, 32U);
+
+#ifdef TOKU_DEBUG_PARANOID
+ exit(EXIT_FAILURE); // with paranoid asserts, we should already have crashed
+#else
+ exit(EXIT_SUCCESS); // without them, we should make it here
+#endif
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-cpu-freq-openlimit17.cc b/storage/tokudb/PerconaFT/portability/tests/test-cpu-freq-openlimit17.cc
new file mode 100644
index 00000000..1ca756df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-cpu-freq-openlimit17.cc
@@ -0,0 +1,68 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <toku_assert.h>
+#include <toku_stdint.h>
+#include <toku_os.h>
+
+// verify that we can compute processor frequency even when out of file descriptors.
+
+int verbose = 0;
+
+static void run_test(void) {
+ uint64_t cpuhz;
+ int r = toku_os_get_processor_frequency(&cpuhz);
+ assert(r == 0);
+ if (verbose) {
+ printf("%" PRIu64 "\n", cpuhz);
+ }
+ assert(cpuhz>100000000);
+}
+
+int main(void) {
+ run_test();
+ while (1) {
+ int fd = open("/dev/null", O_RDONLY);
+ if (fd < 0)
+ break;
+ }
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-cpu-freq.cc b/storage/tokudb/PerconaFT/portability/tests/test-cpu-freq.cc
new file mode 100644
index 00000000..c21f3aed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-cpu-freq.cc
@@ -0,0 +1,55 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_assert.h>
+#include <toku_stdint.h>
+#include <toku_os.h>
+
+int verbose = 0;
+
+int main(void) {
+ uint64_t cpuhz;
+ int r = toku_os_get_processor_frequency(&cpuhz);
+ assert(r == 0);
+ if (verbose) {
+ printf("%" PRIu64 "\n", cpuhz);
+ }
+ assert(cpuhz>100000000);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-filesystem-sizes.cc b/storage/tokudb/PerconaFT/portability/tests/test-filesystem-sizes.cc
new file mode 100644
index 00000000..0ea7b1ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-filesystem-sizes.cc
@@ -0,0 +1,87 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#define _CRT_SECURE_NO_DEPRECATE
+#include <test.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <toku_stdint.h>
+#include <unistd.h>
+#include <toku_assert.h>
+#include "toku_os.h"
+
+int test_main(int argc, char *const argv[]) {
+ int verbose = 0;
+ int limit = 1;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose = 1;
+ continue;
+ }
+ if (strcmp(argv[i], "--timeit") == 0) {
+ limit = 100000;
+ continue;
+ }
+ }
+
+ int r;
+
+#if 0
+ r = toku_get_filesystem_sizes(NULL, NULL, NULL, NULL);
+ assert(r == EFAULT);
+#endif
+
+ r = toku_get_filesystem_sizes(".", NULL, NULL, NULL);
+ assert(r == 0);
+
+ uint64_t free_size = 0, avail_size = 0, total_size = 0;
+ for (int i = 0; i < limit; i++) {
+ r = toku_get_filesystem_sizes(".", &avail_size, &free_size, &total_size);
+ assert(r == 0);
+ assert(avail_size <= free_size && free_size <= total_size);
+ }
+ if (verbose) {
+ printf("avail=%" PRIu64 "\n", avail_size);
+ printf("free=%" PRIu64 "\n", free_size);
+ printf("total=%" PRIu64 "\n", total_size);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-flock.cc b/storage/tokudb/PerconaFT/portability/tests/test-flock.cc
new file mode 100644
index 00000000..e135da9a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-flock.cc
@@ -0,0 +1,67 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_assert.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include "toku_portability.h"
+#include <portability/toku_path.h>
+
+int main(void) {
+ int fd = toku_os_lock_file(TOKU_TEST_FILENAME);
+ assert(fd != -1);
+ pid_t pid = fork();
+ assert(pid != -1);
+ if (pid == 0) {
+ int fd2 = toku_os_lock_file(TOKU_TEST_FILENAME);
+ assert(fd2 == -1);
+ return 0;
+ } else {
+ int status;
+ pid_t wpid = waitpid(-1, &status, 0);
+ assert(wpid == pid);
+ assert(status == 0);
+ }
+
+ int r = toku_os_unlock_file(fd);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-fsync-directory.cc b/storage/tokudb/PerconaFT/portability/tests/test-fsync-directory.cc
new file mode 100644
index 00000000..ba735585
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-fsync-directory.cc
@@ -0,0 +1,77 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// use strace to very that the toku_fsync_directory function works
+
+#include <stdlib.h>
+#include <string.h>
+#include "test.h"
+#include <portability/toku_path.h>
+#include <limits.h>
+
+static int verbose = 0;
+
+int test_main(int argc, char *const argv[]) {
+ int r;
+
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ if (verbose < 0) verbose = 0;
+ verbose++;
+ continue;
+ } else if (strcmp(argv[i], "-q") == 0) {
+ verbose = 0;
+ continue;
+ } else {
+ exit(1);
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ char buf[TOKU_PATH_MAX + 1];
+ r = toku_os_mkdir(toku_path_join(buf, 2, TOKU_TEST_FILENAME, "test"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = toku_fsync_directory(""); CKERR(r);
+ r = toku_fsync_directory("."); CKERR(r);
+ r = toku_fsync_directory(toku_path_join(buf, 3, TOKU_TEST_FILENAME, "test", "a")); CKERR(r);
+ r = toku_fsync_directory(toku_path_join(buf, 4, ".", TOKU_TEST_FILENAME, "test", "a")); CKERR(r);
+ r = toku_fsync_directory("/tmp/x"); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-fsync.cc b/storage/tokudb/PerconaFT/portability/tests/test-fsync.cc
new file mode 100644
index 00000000..264a731d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-fsync.cc
@@ -0,0 +1,271 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include "test.h"
+#include "toku_time.h"
+#include <portability/toku_path.h>
+
+
+int verbose = 0;
+static void
+create_files(int N, int fds[/*N*/]) {
+ int r;
+ int i;
+ char name[30];
+ for (i = 0; i < N; i++) {
+ snprintf(name, sizeof(name), "%d", i);
+ fds[i] = open(name, O_CREAT|O_WRONLY, 0644);
+ if (fds[i] < 0) {
+ r = get_error_errno();
+ CKERR(r);
+ }
+ }
+}
+
+static void
+write_to_files(int N, int bytes, int fds[/*N*/]) {
+ char junk[bytes];
+
+ int i;
+ for (i = 0; i < bytes; i++) {
+ junk[i] = random() & 0xFF;
+ }
+
+ int r;
+ for (i = 0; i < N; i++) {
+ r = toku_os_write(fds[i], junk, bytes);
+ CKERR(r);
+ }
+}
+
+static void
+time_many_fsyncs_one_file(int N, int bytes, int fds[/*N*/]) {
+ if (verbose>1) {
+ printf("Starting %s\n", __FUNCTION__);
+ fflush(stdout);
+ }
+ struct timeval begin;
+ struct timeval after_first;
+ struct timeval end;
+ write_to_files(1, bytes, fds);
+ if (verbose>1) {
+ printf("Done writing to os buffers\n");
+ fflush(stdout);
+ }
+ int i;
+ int r;
+
+ r = gettimeofday(&begin, NULL);
+ CKERR(r);
+ r = fsync(fds[0]);
+ CKERR(r);
+ r = gettimeofday(&after_first, NULL);
+ CKERR(r);
+ for (i = 0; i < N; i++) {
+ r = fsync(fds[0]);
+ CKERR(r);
+ }
+ r = gettimeofday(&end, NULL);
+ CKERR(r);
+
+ if (verbose) {
+ printf("Fsyncing one file %d times:\n"
+ "\tFirst fsync took: [%f] seconds\n"
+ "\tRemaining %d fsyncs took additional: [%f] seconds\n"
+ "\tTotal time [%f] seconds\n",
+ N + 1,
+ toku_tdiff(&after_first, &begin),
+ N,
+ toku_tdiff(&end, &after_first),
+ toku_tdiff(&end, &begin));
+ fflush(stdout);
+ }
+}
+
+static void
+time_fsyncs_many_files(int N, int bytes, int fds[/*N*/]) {
+ if (verbose>1) {
+ printf("Starting %s\n", __FUNCTION__);
+ fflush(stdout);
+ }
+ write_to_files(N, bytes, fds);
+ if (verbose>1) {
+ printf("Done writing to os buffers\n");
+ fflush(stdout);
+ }
+ struct timeval begin;
+ struct timeval after_first;
+ struct timeval end;
+ int i;
+ int r;
+
+ r = gettimeofday(&begin, NULL);
+ CKERR(r);
+ for (i = 0; i < N; i++) {
+ r = fsync(fds[i]);
+ CKERR(r);
+ if (i==0) {
+ r = gettimeofday(&after_first, NULL);
+ CKERR(r);
+ }
+ if (verbose>2) {
+ printf("Done fsyncing %d\n", i);
+ fflush(stdout);
+ }
+ }
+ r = gettimeofday(&end, NULL);
+ CKERR(r);
+ if (verbose) {
+ printf("Fsyncing %d files:\n"
+ "\tFirst fsync took: [%f] seconds\n"
+ "\tRemaining %d fsyncs took additional: [%f] seconds\n"
+ "\tTotal time [%f] seconds\n",
+ N,
+ toku_tdiff(&after_first, &begin),
+ N-1,
+ toku_tdiff(&end, &after_first),
+ toku_tdiff(&end, &begin));
+ fflush(stdout);
+ }
+}
+
+static void
+time_sync_fsyncs_many_files(int N, int bytes, int fds[/*N*/]) {
+ if (verbose>1) {
+ printf("Starting %s\n", __FUNCTION__);
+ fflush(stdout);
+ }
+ //TODO: timing
+ write_to_files(N, bytes, fds);
+ if (verbose>1) {
+ printf("Done writing to os buffers\n");
+ fflush(stdout);
+ }
+ int i;
+ int r;
+ struct timeval begin;
+ struct timeval after_sync;
+ struct timeval end;
+
+ r = gettimeofday(&begin, NULL);
+ CKERR(r);
+
+ sync();
+
+ r = gettimeofday(&after_sync, NULL);
+ CKERR(r);
+ if (verbose>1) {
+ printf("Done with sync()\n");
+ fflush(stdout);
+ }
+
+ for (i = 0; i < N; i++) {
+ r = fsync(fds[i]);
+ CKERR(r);
+ if (verbose>2) {
+ printf("Done fsyncing %d\n", i);
+ fflush(stdout);
+ }
+ }
+ r = gettimeofday(&end, NULL);
+ CKERR(r);
+
+ if (verbose) {
+ printf("sync() then fsyncing %d files:\n"
+ "\tsync() took: [%f] seconds\n"
+ "\tRemaining %d fsyncs took additional: [%f] seconds\n"
+ "\tTotal time [%f] seconds\n",
+ N,
+ toku_tdiff(&after_sync, &begin),
+ N,
+ toku_tdiff(&end, &after_sync),
+ toku_tdiff(&end, &begin));
+ fflush(stdout);
+ }
+}
+
+int test_main(int argc, char *const argv[]) {
+ int i;
+ int r;
+ int N = 1000;
+ int bytes = 4096;
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ if (verbose < 0) verbose = 0;
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(argv[i], "-b") == 0) {
+ i++;
+ if (i>=argc) exit(1);
+ bytes = atoi(argv[i]);
+ if (bytes <= 0) exit(1);
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0) {
+ i++;
+ if (i>=argc) exit(1);
+ N = atoi(argv[i]);
+ if (N <= 0) exit(1);
+ continue;
+ }
+ }
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = chdir(TOKU_TEST_FILENAME);
+ CKERR(r);
+
+ int fds[N];
+ create_files(N, fds);
+
+ time_many_fsyncs_one_file(N, bytes, fds);
+ time_fsyncs_many_files(N, bytes, fds);
+ time_sync_fsyncs_many_files(N, bytes, fds);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-gettime.cc b/storage/tokudb/PerconaFT/portability/tests/test-gettime.cc
new file mode 100644
index 00000000..ec2175f8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-gettime.cc
@@ -0,0 +1,56 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <toku_assert.h>
+#include <unistd.h>
+#include <toku_time.h>
+
+int main(void) {
+ int r;
+ struct timespec ts;
+
+ r = toku_clock_gettime(CLOCK_REALTIME, &ts);
+ assert(r == 0);
+ sleep(10);
+ r = toku_clock_gettime(CLOCK_REALTIME, &ts);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-gettimeofday.cc b/storage/tokudb/PerconaFT/portability/tests/test-gettimeofday.cc
new file mode 100644
index 00000000..a7af86a7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-gettimeofday.cc
@@ -0,0 +1,53 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <toku_assert.h>
+#include <toku_time.h>
+
+int main(void) {
+ int r;
+ struct timeval tv;
+ struct timezone tz;
+
+ r = gettimeofday(&tv, &tz);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-hugepage.cc b/storage/tokudb/PerconaFT/portability/tests/test-hugepage.cc
new file mode 100644
index 00000000..92134ac8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-hugepage.cc
@@ -0,0 +1,46 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_os.h>
+#include <portability/toku_assert.h>
+
+int main(void) {
+ bool enabled = toku_os_huge_pages_enabled();
+ invariant(!enabled);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc b/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc
new file mode 100644
index 00000000..fb5fc371
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-max-data.cc
@@ -0,0 +1,76 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_stdint.h>
+#include <unistd.h>
+#include <toku_assert.h>
+#include <string.h>
+#include "toku_os.h"
+
+int main(int argc, char *const argv[]) {
+ int verbose = 0;
+ int i;
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ // get the data size
+ uint64_t maxdata;
+ int r = toku_os_get_max_process_data_size(&maxdata);
+ assert(r == 0);
+ if (verbose) printf("maxdata=%" PRIu64 " 0x%" PRIx64 "\n", maxdata, maxdata);
+
+ // check the data size
+#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
+ assert(maxdata > (1ULL << 32));
+#elif __i386__
+ assert(maxdata < (1ULL << 32));
+#else
+ #error
+#endif
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-memory-status.cc b/storage/tokudb/PerconaFT/portability/tests/test-memory-status.cc
new file mode 100644
index 00000000..582b96d1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-memory-status.cc
@@ -0,0 +1,50 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include "memory.h"
+
+int main(void) {
+ toku_memory_startup();
+ LOCAL_MEMORY_STATUS_S s;
+ toku_memory_get_status(&s);
+ printf("mallocator: %s\n", s.mallocator_version);
+ printf("mmap threshold: %" PRIu64 "\n", s.mmap_threshold);
+ toku_memory_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-pagesize.cc b/storage/tokudb/PerconaFT/portability/tests/test-pagesize.cc
new file mode 100644
index 00000000..55e06845
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-pagesize.cc
@@ -0,0 +1,48 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_stdint.h>
+#include <unistd.h>
+#include <toku_assert.h>
+#include "toku_os.h"
+
+int main(void) {
+ assert(toku_os_get_pagesize() == sysconf(_SC_PAGESIZE));
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rdlock.cc b/storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rdlock.cc
new file mode 100644
index 00000000..62aa5205
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rdlock.cc
@@ -0,0 +1,60 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <memory.h>
+#include <toku_assert.h>
+#include <toku_pthread.h>
+#include "test.h"
+
+int test_main(int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ toku_pthread_rwlock_t rwlock;
+ ZERO_STRUCT(rwlock);
+
+ toku_pthread_rwlock_init(toku_uninstrumented, &rwlock, nullptr);
+ toku_pthread_rwlock_rdlock(&rwlock);
+ toku_pthread_rwlock_rdlock(&rwlock);
+ toku_pthread_rwlock_rdunlock(&rwlock);
+ toku_pthread_rwlock_rdunlock(&rwlock);
+ toku_pthread_rwlock_destroy(&rwlock);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rwr.cc b/storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rwr.cc
new file mode 100644
index 00000000..92b30421
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-pthread-rwlock-rwr.cc
@@ -0,0 +1,103 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <test.h>
+#include <toku_assert.h>
+#include <toku_pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+
+// write a test see if things happen in the right order.
+
+volatile int state = 0;
+int verbose = 0;
+
+static void *f(void *arg) {
+ toku_pthread_rwlock_t *mylock = (toku_pthread_rwlock_t *) arg;
+ sleep(2);
+ assert(state==42); state = 16; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+ toku_pthread_rwlock_wrlock(mylock);
+ assert(state==49); state = 17; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+ toku_pthread_rwlock_wrunlock(mylock);
+ sleep(10);
+ assert(state==52); state = 20; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+ return arg;
+}
+
+int test_main(int argc , char *const argv[] ) {
+ assert(argc==1 || argc==2);
+ if (argc==2) {
+ assert(strcmp(argv[1],"-v")==0);
+ verbose = 1;
+ }
+ int r;
+ toku_pthread_rwlock_t rwlock;
+ toku_pthread_t tid;
+ void *retptr;
+
+ toku_pthread_rwlock_init(toku_uninstrumented, &rwlock, nullptr);
+ state = 37;
+ if (verbose)
+ printf("%s:%d\n", __FUNCTION__, __LINE__);
+ toku_pthread_rwlock_rdlock(&rwlock);
+
+ r = toku_pthread_create(toku_uninstrumented, &tid, nullptr, f, &rwlock);
+ assert(r == 0);
+
+ assert(state == 37);
+ state = 42;
+ if (verbose)
+ printf("%s:%d\n", __FUNCTION__, __LINE__);
+ sleep(4);
+ assert(state==16); state = 44; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+ toku_pthread_rwlock_rdlock(&rwlock);
+ assert(state==44); state = 46; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+ toku_pthread_rwlock_rdunlock(&rwlock);
+ sleep(4);
+ assert(state==46); state=49; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__); // still have a read lock
+ toku_pthread_rwlock_rdunlock(&rwlock);
+ sleep(6);
+ assert(state==17); state=52; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+
+ r = toku_pthread_join(tid, &retptr); assert(r == 0);
+
+ toku_pthread_rwlock_destroy(&rwlock);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-pwrite4g.cc b/storage/tokudb/PerconaFT/portability/tests/test-pwrite4g.cc
new file mode 100644
index 00000000..9bf50b9b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-pwrite4g.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Verify that toku_os_full_pwrite does the right thing when writing beyond 4GB. */
+#include <test.h>
+#include <fcntl.h>
+#include <toku_assert.h>
+#include <memory.h>
+#include <string.h>
+#include <stdio.h>
+#include <portability/toku_path.h>
+
+static int iszero(char *cp, size_t n) {
+ size_t i;
+ for (i=0; i<n; i++)
+ if (cp[i] != 0)
+ return 0;
+ return 1;
+}
+
+int test_main(int UU(argc), char *const UU(argv[])) {
+ int r;
+ unlink(TOKU_TEST_FILENAME);
+ int fd = open(TOKU_TEST_FILENAME, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert(fd>=0);
+ char *XMALLOC_N_ALIGNED(512, 512, buf);
+ memset(buf, 0, 512);
+ strcpy(buf, "hello");
+ int64_t offset = (1LL<<32) + 512;
+ toku_os_full_pwrite(fd, buf, 512, offset);
+ char newbuf[512];
+ r = pread(fd, newbuf, sizeof newbuf, 100);
+ assert(r==sizeof newbuf);
+ assert(iszero(newbuf, sizeof newbuf));
+ r = pread(fd, newbuf, sizeof newbuf, offset);
+ assert(r==sizeof newbuf);
+ assert(memcmp(newbuf, buf, sizeof newbuf) == 0);
+ int64_t fsize;
+ r = toku_os_get_file_size(fd, &fsize);
+ assert(r == 0);
+ assert(fsize > 100 + 512);
+ toku_free(buf);
+ r = close(fd);
+ assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-snprintf.cc b/storage/tokudb/PerconaFT/portability/tests/test-snprintf.cc
new file mode 100644
index 00000000..5653dec6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-snprintf.cc
@@ -0,0 +1,82 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <test.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <toku_os.h>
+
+static void
+check_snprintf(int i) {
+ char buf_before[8];
+ char target[5];
+ char buf_after[8];
+ memset(target, 0xFF, sizeof(target));
+ memset(buf_before, 0xFF, sizeof(buf_before));
+ memset(buf_after, 0xFF, sizeof(buf_after));
+ int64_t n = 1;
+
+ int j;
+ for (j = 0; j < i; j++) n *= 10;
+
+ int bytes = snprintf(target, sizeof target, "%" PRId64, n);
+ assert(bytes==i+1 ||
+ (i+1>=(int)(sizeof target) && bytes>=(int)(sizeof target)));
+ if (bytes>=(int)(sizeof target)) {
+ //Overflow prevented by snprintf
+ assert(target[sizeof target - 1] == '\0');
+ assert(strlen(target)==sizeof target-1);
+ }
+ else {
+ assert(target[bytes] == '\0');
+ assert(strlen(target)==(size_t)bytes);
+ }
+}
+
+
+int test_main(int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ int i;
+ for (i = 0; i < 8; i++) {
+ check_snprintf(i);
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-stat.cc b/storage/tokudb/PerconaFT/portability/tests/test-stat.cc
new file mode 100644
index 00000000..57201764
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-stat.cc
@@ -0,0 +1,88 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <toku_assert.h>
+#include <toku_portability.h>
+
+static void test_stat(const char *dirname, int result, int ex_errno) {
+ int r;
+ toku_struct_stat buf;
+ r = toku_stat(dirname, &buf, toku_uninstrumented);
+ // printf("stat %s %d %d\n", dirname, r, errno); fflush(stdout);
+ assert(r == result);
+ if (r != 0)
+ assert(get_maybe_error_errno() == ex_errno);
+}
+
+int main(void) {
+ int r;
+
+ test_stat(".", 0, 0);
+ test_stat("./", 0, 0);
+
+ r = system("rm -rf testdir"); assert(r==0);
+ test_stat("testdir", -1, ENOENT);
+ test_stat("testdir/", -1, ENOENT);
+ test_stat("testdir/foo", -1, ENOENT);
+ test_stat("testdir/foo/", -1, ENOENT);
+ r = toku_os_mkdir("testdir", S_IRWXU);
+ assert(r == 0);
+ test_stat("testdir/foo", -1, ENOENT);
+ test_stat("testdir/foo/", -1, ENOENT);
+ r = system("touch testdir/foo"); assert(r==0);
+ test_stat("testdir/foo", 0, 0);
+ test_stat("testdir/foo/", -1, ENOTDIR);
+
+ test_stat("testdir", 0, 0);
+
+ test_stat("./testdir", 0, 0);
+
+ test_stat("./testdir/", 0, 0);
+
+ test_stat("/", 0, 0);
+
+ test_stat("/usr", 0, 0);
+ test_stat("/usr/", 0, 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-toku-malloc.cc b/storage/tokudb/PerconaFT/portability/tests/test-toku-malloc.cc
new file mode 100644
index 00000000..0d99b36b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-toku-malloc.cc
@@ -0,0 +1,66 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_assert.h>
+#include <memory.h>
+#include <toku_pthread.h>
+
+static void *f(void *arg) {
+ void *vp = toku_malloc(32);
+ assert(vp);
+ toku_free(vp);
+ return arg;
+}
+
+int main(void) {
+ int r;
+ int i;
+ const int max_threads = 2;
+ toku_pthread_t tids[max_threads];
+ for (i = 0; i < max_threads; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, f, nullptr);
+ assert(r == 0);
+ }
+ for (i = 0; i < max_threads; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test-xid.cc b/storage/tokudb/PerconaFT/portability/tests/test-xid.cc
new file mode 100644
index 00000000..71736f89
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test-xid.cc
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+#include <stdio.h>
+#include <toku_stdint.h>
+#include <unistd.h>
+#include <toku_assert.h>
+#include "toku_os.h"
+#if defined(HAVE_SYSCALL_H)
+# include <syscall.h>
+#endif
+#if defined(HAVE_SYS_SYSCALL_H)
+# include <sys/syscall.h>
+#endif
+#if defined(HAVE_PTHREAD_NP_H)
+# include <pthread_np.h>
+#endif
+#if defined(HAVE_PTHREAD_H)
+# include <pthread.h>
+#endif
+
+// since we implement the same thing here as in toku_os_gettid, this test
+// is pretty pointless
+static int gettid(void) {
+#if defined(HAVE_PTHREAD_THREADID_NP)
+ uint64_t result;
+ pthread_threadid_np(NULL, &result);
+ return (int) result;
+#elif defined(__NR_gettid)
+ return syscall(__NR_gettid);
+#elif defined(SYS_gettid)
+ return syscall(SYS_gettid);
+#elif defined(HAVE_PTHREAD_GETTHREADID_NP)
+ return pthread_getthreadid_np();
+#else
+# error "no implementation of gettid available"
+#endif
+}
+
+int main(void) {
+ assert(toku_os_getpid() == getpid());
+ assert(toku_os_gettid() == gettid());
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/test.h b/storage/tokudb/PerconaFT/portability/tests/test.h
new file mode 100644
index 00000000..c5150553
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/test.h
@@ -0,0 +1,61 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <toku_assert.h>
+
+#define CKERR(r) ({ int __r = r; if (__r!=0) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, __r, strerror(r)); assert(__r==0); })
+#define CKERR2(r,r2) do { if (r!=r2) fprintf(stderr, "%s:%d error %d %s, expected %d\n", __FILE__, __LINE__, r, strerror(r), r2); assert(r==r2); } while (0)
+#define CKERR2s(r,r2,r3) do { if (r!=r2 && r!=r3) fprintf(stderr, "%s:%d error %d %s, expected %d or %d\n", __FILE__, __LINE__, r, strerror(r), r2,r3); assert(r==r2||r==r3); } while (0)
+
+#define DEBUG_LINE do { \
+ fprintf(stderr, "%s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ fflush(stderr); \
+} while (0)
+
+int test_main(int argc, char *const argv[]);
+
+int
+main(int argc, char *const argv[]) {
+ int ri = toku_portability_init();
+ assert(ri==0);
+ int r = test_main(argc, argv);
+ toku_portability_destroy();
+ return r;
+}
+
diff --git a/storage/tokudb/PerconaFT/portability/tests/try-assert-zero.cc b/storage/tokudb/PerconaFT/portability/tests/try-assert-zero.cc
new file mode 100644
index 00000000..3c2b4bf6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/try-assert-zero.cc
@@ -0,0 +1,54 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_assert.h>
+#include <stdlib.h>
+#include <signal.h>
+
+
+static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__unused__))) {
+ exit(1);
+}
+
+int main(void) {
+ signal (SIGABRT, catch_abort);
+ int result = 42;
+ assert_zero(result);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/try-assert0.cc b/storage/tokudb/PerconaFT/portability/tests/try-assert0.cc
new file mode 100644
index 00000000..7b45716f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/try-assert0.cc
@@ -0,0 +1,53 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_assert.h>
+#include <stdlib.h>
+#include <signal.h>
+
+
+static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__unused__))) {
+ exit(1);
+}
+
+int main(void) {
+ signal (SIGABRT, catch_abort);
+ assert(0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/try-leak-lost.cc b/storage/tokudb/PerconaFT/portability/tests/try-leak-lost.cc
new file mode 100644
index 00000000..82f4ac27
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/try-leak-lost.cc
@@ -0,0 +1,46 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(void) {
+ // GCC has gotten smart enough to optimize this away unless we use it.
+ printf("%p\n", malloc(42));
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/try-leak-reachable.cc b/storage/tokudb/PerconaFT/portability/tests/try-leak-reachable.cc
new file mode 100644
index 00000000..f93d1008
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/try-leak-reachable.cc
@@ -0,0 +1,46 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdlib.h>
+
+static void *vp;
+
+int main(void) {
+ vp = malloc(42);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/tests/try-uninit.cc b/storage/tokudb/PerconaFT/portability/tests/try-uninit.cc
new file mode 100644
index 00000000..963ce207
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/tests/try-uninit.cc
@@ -0,0 +1,54 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <stdlib.h>
+
+static void foo(int i) {
+ printf("%d\n", i);
+}
+
+int main(void) {
+ int arg;
+ char *buf = (char *) &arg;
+ for (int i = 0; i < 2; i++) {
+ buf[i] = 'a';
+ }
+ foo(arg);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_assert.cc b/storage/tokudb/PerconaFT/portability/toku_assert.cc
new file mode 100644
index 00000000..fcf1139d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_assert.cc
@@ -0,0 +1,194 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/toku_config.h>
+
+#include <toku_portability.h>
+#include "toku_assert.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#if defined(HAVE_MALLOC_H)
+# include <malloc.h>
+#elif defined(HAVE_SYS_MALLOC_H)
+# include <sys/malloc.h>
+#endif
+#include <dlfcn.h>
+#include <execinfo.h>
+
+// These are statically allocated so that the backtrace can run without any calls to malloc()
+#define N_POINTERS 1000
+static void *backtrace_pointers[N_POINTERS];
+
+static uint64_t engine_status_num_rows = 0;
+
+typedef void (*malloc_stats_fun_t)(void);
+static malloc_stats_fun_t malloc_stats_f;
+
+void
+toku_assert_init(void)
+{
+ malloc_stats_f = (malloc_stats_fun_t) dlsym(RTLD_DEFAULT, "malloc_stats");
+}
+
+// Function pointers are zero by default so asserts can be used by ft-layer tests without an environment.
+static int (*toku_maybe_get_engine_status_text_p)(char* buff, int buffsize) = 0;
+static int (*toku_maybe_err_engine_status_p)(void) = 0;
+static void (*toku_maybe_set_env_panic_p)(int code, const char* msg) = 0;
+
+void toku_assert_set_fpointers(int (*toku_maybe_get_engine_status_text_pointer)(char*, int),
+ int (*toku_maybe_err_engine_status_pointer)(void),
+ void (*toku_maybe_set_env_panic_pointer)(int, const char*),
+ uint64_t num_rows) {
+ toku_maybe_get_engine_status_text_p = toku_maybe_get_engine_status_text_pointer;
+ toku_maybe_err_engine_status_p = toku_maybe_err_engine_status_pointer;
+ toku_maybe_set_env_panic_p = toku_maybe_set_env_panic_pointer;
+ engine_status_num_rows = num_rows;
+}
+
+bool toku_gdb_dump_on_assert = false;
+void (*do_assert_hook)(void) = NULL;
+
+void db_env_do_backtrace_errfunc(toku_env_err_func errfunc, const void *env) {
+ // backtrace
+ int n = backtrace(backtrace_pointers, N_POINTERS);
+ errfunc(env, 0, "Backtrace: (Note: toku_do_assert=0x%p)\n", toku_do_assert);
+ char **syms = backtrace_symbols(backtrace_pointers, n);
+ if (syms) {
+ for (char **symstr = syms; symstr != NULL && (symstr - syms) < n; ++symstr) {
+ errfunc(env, 0, *symstr);
+ }
+ free(syms);
+ }
+
+ if (engine_status_num_rows && toku_maybe_err_engine_status_p) {
+ toku_maybe_err_engine_status_p();
+ } else {
+ errfunc(env, 0, "Engine status function not available\n");
+ }
+ errfunc(env, 0, "Memory usage:\n");
+ if (malloc_stats_f) {
+ malloc_stats_f();
+ }
+
+ if (do_assert_hook) do_assert_hook();
+ if (toku_gdb_dump_on_assert) {
+ toku_try_gdb_stack_trace(nullptr);
+ }
+}
+
+void db_env_do_backtrace(FILE *outf) {
+ // backtrace
+ int n = backtrace(backtrace_pointers, N_POINTERS);
+ fprintf(outf, "Backtrace: (Note: toku_do_assert=0x%p)\n", toku_do_assert); fflush(outf);
+ backtrace_symbols_fd(backtrace_pointers, n, fileno(outf));
+
+ fflush(outf);
+
+ if (engine_status_num_rows && toku_maybe_get_engine_status_text_p) {
+ int buffsize = engine_status_num_rows * 128; // assume 128 characters per row (gross overestimate, should be safe)
+ char buff[buffsize];
+ toku_maybe_get_engine_status_text_p(buff, buffsize);
+ fprintf(outf, "Engine status:\n%s\n", buff);
+ } else {
+ fprintf(outf, "Engine status function not available\n");
+ }
+ fprintf(outf, "Memory usage:\n");
+ fflush(outf); // just in case malloc_stats() crashes, we still want engine status (and to know that malloc_stats() failed)
+ if (malloc_stats_f) {
+ malloc_stats_f();
+ }
+ fflush(outf);
+
+ if (do_assert_hook) do_assert_hook();
+ if (toku_gdb_dump_on_assert) {
+ toku_try_gdb_stack_trace(nullptr);
+ }
+}
+
+__attribute__((noreturn))
+static void toku_do_backtrace_abort(void) {
+ db_env_do_backtrace(stderr);
+ abort();
+}
+
+
+static void
+set_panic_if_not_panicked(int caller_errno, char * msg) {
+ int code = caller_errno ? caller_errno : -1;
+ if (toku_maybe_set_env_panic_p) {
+ toku_maybe_set_env_panic_p(code, msg);
+ }
+}
+
+
+#define MSGLEN 1024
+
+void
+toku_do_assert_fail (const char *expr_as_string, const char *function, const char *file, int line, int caller_errno) {
+ char msg[MSGLEN];
+ snprintf(msg, MSGLEN, "%s:%d %s: Assertion `%s' failed (errno=%d)\n", file, line, function, expr_as_string, caller_errno);
+ perror(msg);
+ set_panic_if_not_panicked(caller_errno, msg);
+ toku_do_backtrace_abort();
+}
+
+void
+toku_do_assert_zero_fail (uintptr_t expr, const char *expr_as_string, const char *function, const char *file, int line, int caller_errno) {
+ char msg[MSGLEN];
+ snprintf(msg, MSGLEN, "%s:%d %s: Assertion `%s == 0' failed (errno=%d) (%s=%" PRIuPTR ")\n", file, line, function, expr_as_string, caller_errno, expr_as_string, expr);
+ perror(msg);
+ set_panic_if_not_panicked(caller_errno, msg);
+ toku_do_backtrace_abort();
+}
+
+void
+toku_do_assert_expected_fail (uintptr_t expr, uintptr_t expected, const char *expr_as_string, const char *function, const char *file, int line, int caller_errno) {
+ char msg[MSGLEN];
+ snprintf(msg, MSGLEN, "%s:%d %s: Assertion `%s == %" PRIuPTR "' failed (errno=%d) (%s=%" PRIuPTR ")\n", file, line, function, expr_as_string, expected, caller_errno, expr_as_string, expr);
+ perror(msg);
+ set_panic_if_not_panicked(caller_errno, msg);
+ toku_do_backtrace_abort();
+}
+
+void
+toku_do_assert(int expr, const char *expr_as_string, const char *function, const char* file, int line, int caller_errno) {
+ if (expr == 0)
+ toku_do_assert_fail(expr_as_string, function, file, line, caller_errno);
+}
+
diff --git a/storage/tokudb/PerconaFT/portability/toku_assert.h b/storage/tokudb/PerconaFT/portability/toku_assert.h
new file mode 100644
index 00000000..a5593a93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_assert.h
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/* The problem with assert.h: If NDEBUG is set then it doesn't execute the function, if NDEBUG isn't set then we get a branch that isn't taken. */
+
+/* This version will complain if NDEBUG is set. */
+/* It evaluates the argument and then calls a function toku_do_assert() which takes all the hits for the branches not taken. */
+
+#include <portability/toku_config.h>
+
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+
+#ifdef NDEBUG
+#error NDEBUG should not be set
+#endif
+
+inline int get_error_errno(void);
+
+static inline int get_maybe_error_errno(void) { return errno; }
+
+static inline void
+set_errno(int new_errno)
+{
+ errno = new_errno;
+}
+
+void toku_assert_init(void) __attribute__((constructor));
+
+void toku_assert_set_fpointers(int (*toku_maybe_get_engine_status_text_pointer)(char*, int),
+ int (*toku_maybe_err_engine_status_pointer)(void),
+ void (*toku_maybe_set_env_panic_pointer)(int, const char*),
+ uint64_t num_rows);
+
+void toku_do_assert(int /*expr*/,const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default")));
+
+void toku_do_assert_fail(const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__));
+void toku_do_assert_zero_fail(uintptr_t/*expr*/, const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__));
+void toku_do_assert_expected_fail(uintptr_t/*expr*/, uintptr_t /*expected*/, const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__));
+
+// Define GCOV if you want to get test-coverage information that ignores the assert statements.
+// #define GCOV
+
+extern void (*do_assert_hook)(void); // Set this to a function you want called after printing the assertion failure message but before calling abort(). By default this is NULL.
+// copied here from ydb-internal.h to avoid inclusion hell, the void * is really a DB_ENV but we don't have that type here
+typedef void (*toku_env_err_func)(const void * env, int error, const char *fmt, ...);
+void db_env_do_backtrace_errfunc(toku_env_err_func errfunc, const void *env);
+void db_env_do_backtrace(FILE *outf);
+
+#ifdef assert
+# undef assert
+#endif
+#if defined(GCOV)
+#define assert(expr) toku_do_assert((expr) != 0, #expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno())
+#define assert_zero(expr) toku_do_assert((expr) == 0, #expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno())
+#define assert_equals(expr, expected) toku_do_assert((expr) == (expected), (expected), #expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno())
+#else
+#define assert(expr) ((expr) ? (void)0 : toku_do_assert_fail(#expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno()))
+#define assert_zero(expr) ((expr) == 0 ? (void)0 : toku_do_assert_zero_fail((uintptr_t)(expr), #expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno()))
+#define assert_equals(expr, expected) ((expr) == (expected) ? (void)0 : toku_do_assert_expected_fail((uintptr_t)(expr), (uintptr_t)(expected), #expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno()))
+#define assert_null(expr) ((expr) == nullptr ? (void)0 : toku_do_assert_zero_fail((uintptr_t)(expr), #expr, __FUNCTION__, __FILE__, __LINE__, get_maybe_error_errno()))
+#endif
+
+#ifdef GCOV
+#define WHEN_GCOV(x) x
+#define WHEN_NOT_GCOV(x)
+#else
+#define WHEN_GCOV(x)
+#define WHEN_NOT_GCOV(x) x
+#endif
+
+#if !defined(__clang__)
+#include <type_traits>
+# define ENSURE_POD(type) static_assert(std::is_pod<type>::value, #type " isn't POD")
+#else
+# define ENSURE_POD(type) // TEMP, clang is much more strict about POD.
+#endif
+
+#define lazy_assert(a) assert(a) // indicates code is incomplete
+#define lazy_assert_zero(a) assert_zero(a) // indicates code is incomplete
+#define lazy_assert_equals(a, b) assert_equals(a, b) // indicates code is incomplete
+#define invariant(a) assert(a) // indicates a code invariant that must be true
+#define invariant_null(a) assert_null(a) // indicates a code invariant that must be true
+#define invariant_notnull(a) assert(a) // indicates a code invariant that must be true
+#define invariant_zero(a) assert_zero(a) // indicates a code invariant that must be true
+#define invariant_equals(a, b) assert_equals(a, b) // indicates a code invariant that must be true
+#define resource_assert(a) assert(a) // indicates resource must be available, otherwise unrecoverable
+#define resource_assert_zero(a) assert_zero(a) // indicates resource must be available, otherwise unrecoverable
+#define resource_assert_equals(a, b) assert_equals(a, b) // indicates resource must be available, otherwise unrecoverable
+
+#if defined(TOKU_DEBUG_PARANOID) && TOKU_DEBUG_PARANOID
+#define paranoid_invariant(a) assert(a)
+#define paranoid_invariant_null(a) assert_null(a)
+#define paranoid_invariant_notnull(a) assert(a)
+#define paranoid_invariant_zero(a) assert_zero(a)
+#else // !TOKU_DEBUG_PARANOID
+#define paranoid_invariant(a) ((void) 0)
+#define paranoid_invariant_null(a) ((void) 0)
+#define paranoid_invariant_notnull(a) ((void) 0)
+#define paranoid_invariant_zero(a) ((void)0)
+#endif
+
+inline int get_error_errno(void) {
+ invariant(errno);
+ return errno;
+}
+
+extern bool toku_gdb_dump_on_assert;
diff --git a/storage/tokudb/PerconaFT/portability/toku_atomic.h b/storage/tokudb/PerconaFT/portability/toku_atomic.h
new file mode 100644
index 00000000..88644f56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_atomic.h
@@ -0,0 +1,122 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <portability/toku_config.h>
+#include <toku_assert.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stddef.h>
+
+__attribute__((const, always_inline))
+static inline intptr_t which_cache_line(intptr_t addr) {
+ static const size_t assumed_cache_line_size = 64;
+ return addr / assumed_cache_line_size;
+}
+template <typename T> __attribute__((const, always_inline))
+static inline bool crosses_boundary(T *addr, size_t width) {
+ const intptr_t int_addr = reinterpret_cast<intptr_t>(addr);
+ const intptr_t last_byte = int_addr + width - 1;
+ return which_cache_line(int_addr) != which_cache_line(last_byte);
+}
+
+template <typename T, typename U> __attribute__((always_inline))
+static inline T toku_sync_fetch_and_add(T *addr, U diff) {
+ paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
+ return __sync_fetch_and_add(addr, diff);
+}
+template <typename T, typename U> __attribute__((always_inline))
+static inline T toku_sync_add_and_fetch(T *addr, U diff) {
+ paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
+ return __sync_add_and_fetch(addr, diff);
+}
+template <typename T, typename U> __attribute__((always_inline))
+static inline T toku_sync_fetch_and_sub(T *addr, U diff) {
+ paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
+ return __sync_fetch_and_sub(addr, diff);
+}
+template <typename T, typename U> __attribute__((always_inline))
+static inline T toku_sync_sub_and_fetch(T *addr, U diff) {
+ paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
+ return __sync_sub_and_fetch(addr, diff);
+}
+template <typename T, typename U, typename V> __attribute__((always_inline))
+static inline T toku_sync_val_compare_and_swap(T *addr, U oldval, V newval) {
+ paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+}
+template <typename T, typename U, typename V> __attribute__((always_inline))
+static inline bool toku_sync_bool_compare_and_swap(T *addr, U oldval, V newval) {
+ paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
+ return __sync_bool_compare_and_swap(addr, oldval, newval);
+}
+
+// in case you include this but not toku_portability.h
+#pragma GCC poison __sync_fetch_and_add
+#pragma GCC poison __sync_fetch_and_sub
+#pragma GCC poison __sync_fetch_and_or
+#pragma GCC poison __sync_fetch_and_and
+#pragma GCC poison __sync_fetch_and_xor
+#pragma GCC poison __sync_fetch_and_nand
+#pragma GCC poison __sync_add_and_fetch
+#pragma GCC poison __sync_sub_and_fetch
+#pragma GCC poison __sync_or_and_fetch
+#pragma GCC poison __sync_and_and_fetch
+#pragma GCC poison __sync_xor_and_fetch
+#pragma GCC poison __sync_nand_and_fetch
+#pragma GCC poison __sync_bool_compare_and_swap
+#pragma GCC poison __sync_val_compare_and_swap
+#pragma GCC poison __sync_synchronize
+#pragma GCC poison __sync_lock_test_and_set
+#pragma GCC poison __sync_release
diff --git a/storage/tokudb/PerconaFT/portability/toku_byteswap.h b/storage/tokudb/PerconaFT/portability/toku_byteswap.h
new file mode 100644
index 00000000..d99eac35
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_byteswap.h
@@ -0,0 +1,51 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <portability/toku_config.h>
+
+#if defined(HAVE_BYTESWAP_H)
+# include <byteswap.h>
+#elif defined(HAVE_SYS_ENDIAN_H)
+# include <sys/endian.h>
+# define bswap_64 bswap64
+#elif defined(HAVE_LIBKERN_OSBYTEORDER_H)
+# include <libkern/OSByteOrder.h>
+# define bswap_64 OSSwapInt64
+#endif
diff --git a/storage/tokudb/PerconaFT/portability/toku_config.h.in b/storage/tokudb/PerconaFT/portability/toku_config.h.in
new file mode 100644
index 00000000..714835c2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_config.h.in
@@ -0,0 +1,104 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#ifndef __CONFIG_H__
+#define __CONFIG_H__
+
+#define TOKUDB_REVISION @CMAKE_TOKUDB_REVISION@
+
+#cmakedefine TOKU_DEBUG_PARANOID 1
+#cmakedefine USE_VALGRIND 1
+#cmakedefine HAVE_ALLOCA_H 1
+#cmakedefine HAVE_ARPA_INET_H 1
+#cmakedefine HAVE_BYTESWAP_H 1
+#cmakedefine HAVE_ENDIAN_H 1
+#cmakedefine HAVE_FCNTL_H 1
+#cmakedefine HAVE_INTTYPES_H 1
+#cmakedefine HAVE_LIBKERN_OSATOMIC_H 1
+#cmakedefine HAVE_LIBKERN_OSBYTEORDER_H 1
+#cmakedefine HAVE_LIMITS_H 1
+#cmakedefine HAVE_MACHINE_ENDIAN_H 1
+#cmakedefine HAVE_MALLOC_H 1
+#cmakedefine HAVE_MALLOC_MALLOC_H 1
+#cmakedefine HAVE_MALLOC_NP_H 1
+#cmakedefine HAVE_PTHREAD_H 1
+#cmakedefine HAVE_PTHREAD_NP_H 1
+#cmakedefine HAVE_STDINT_H 1
+#cmakedefine HAVE_STDLIB_H 1
+#cmakedefine HAVE_STRING_H 1
+#cmakedefine HAVE_SYSCALL_H 1
+#cmakedefine HAVE_SYS_ENDIAN_H 1
+#cmakedefine HAVE_SYS_FILE_H 1
+#cmakedefine HAVE_SYS_MALLOC_H 1
+#cmakedefine HAVE_SYS_RESOURCE_H 1
+#cmakedefine HAVE_SYS_STATVFS_H 1
+#cmakedefine HAVE_SYS_SYSCALL_H 1
+#cmakedefine HAVE_SYS_SYSCTL_H 1
+#cmakedefine HAVE_SYS_SYSLIMITS_H 1
+#cmakedefine HAVE_SYS_TIME_H 1
+#cmakedefine HAVE_UNISTD_H 1
+
+#cmakedefine HAVE_M_MMAP_THRESHOLD 1
+#cmakedefine HAVE_CLOCK_REALTIME 1
+#cmakedefine HAVE_O_DIRECT 1
+#cmakedefine HAVE_F_NOCACHE 1
+
+#cmakedefine HAVE_MAP_ANONYMOUS 1
+#cmakedefine HAVE_MINCORE 1
+#cmakedefine HAVE_PR_SET_PTRACER 1
+#cmakedefine HAVE_PR_SET_PTRACER_ANY 1
+#cmakedefine HAVE_MALLOC_SIZE 1
+#cmakedefine HAVE_MALLOC_USABLE_SIZE 1
+#cmakedefine HAVE_MEMALIGN 1
+#cmakedefine HAVE_VALLOC 1
+#cmakedefine HAVE_NRAND48 1
+#cmakedefine HAVE_RANDOM_R 1
+
+#cmakedefine HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP 1
+#cmakedefine HAVE_PTHREAD_YIELD 1
+#cmakedefine HAVE_PTHREAD_YIELD_NP 1
+#cmakedefine HAVE_PTHREAD_THREADID_NP 1
+#cmakedefine HAVE_PTHREAD_GETTHREADID_NP 1
+
+#cmakedefine PTHREAD_YIELD_RETURNS_INT 1
+#cmakedefine PTHREAD_YIELD_RETURNS_VOID 1
+
+#cmakedefine HAVE_SCHED_GETCPU 1
+
+#cmakedefine HAVE_GNU_TLS 1
+
+#endif /* __CONFIG_H__ */
diff --git a/storage/tokudb/PerconaFT/portability/toku_crash.cc b/storage/tokudb/PerconaFT/portability/toku_crash.cc
new file mode 100644
index 00000000..297cc29d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_crash.cc
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <unistd.h>
+#ifdef HAVE_SYS_PRCTL_H
+#include <sys/prctl.h>
+#endif
+
+#include <sys/wait.h>
+#include <toku_race_tools.h>
+#include "toku_crash.h"
+#include "toku_atomic.h"
+
+enum { MAX_GDB_ARGS = 128 };
+
+static void
+run_gdb(pid_t parent_pid, const char *gdb_path) {
+ // 3 bytes per intbyte, null byte
+ char pid_buf[sizeof(pid_t) * 3 + 1];
+ char exe_buf[sizeof(pid_buf) + sizeof("/proc//exe")];
+
+ // Get pid and path to executable.
+ int n;
+ n = snprintf(pid_buf, sizeof(pid_buf), "%d", parent_pid);
+ invariant(n >= 0 && n < (int)sizeof(pid_buf));
+ n = snprintf(exe_buf, sizeof(exe_buf), "/proc/%d/exe", parent_pid);
+ invariant(n >= 0 && n < (int)sizeof(exe_buf));
+
+ dup2(2, 1); // redirect output to stderr
+ // Arguments are not dynamic due to possible security holes.
+ execlp(gdb_path, gdb_path, "--batch", "-n",
+ "-ex", "thread",
+ "-ex", "bt",
+ "-ex", "bt full",
+ "-ex", "thread apply all bt",
+ "-ex", "thread apply all bt full",
+ exe_buf, pid_buf,
+ (char*) NULL);
+}
+
+static void
+intermediate_process(pid_t parent_pid, const char *gdb_path) {
+ // Disable generating of core dumps
+#if defined(HAVE_SYS_PRCTL_H)
+ prctl(PR_SET_DUMPABLE, 0, 0, 0);
+#endif
+ pid_t worker_pid = fork();
+ if (worker_pid < 0) {
+ perror("spawn gdb fork: ");
+ goto failure;
+ }
+ if (worker_pid == 0) {
+ // Child (debugger)
+ run_gdb(parent_pid, gdb_path);
+ // Normally run_gdb will not return.
+ // In case it does, kill the process.
+ goto failure;
+ } else {
+ pid_t timeout_pid = fork();
+ if (timeout_pid < 0) {
+ perror("spawn timeout fork: ");
+ kill(worker_pid, SIGKILL);
+ goto failure;
+ }
+
+ if (timeout_pid == 0) {
+ sleep(5); // Timeout of 5 seconds
+ goto success;
+ } else {
+ pid_t exited_pid = wait(NULL); // Wait for first child to exit
+ if (exited_pid == worker_pid) {
+ // Kill slower child
+ kill(timeout_pid, SIGKILL);
+ goto success;
+ } else if (exited_pid == timeout_pid) {
+ // Kill slower child
+ kill(worker_pid, SIGKILL);
+ goto failure; // Timed out.
+ } else {
+ perror("error while waiting for gdb or timer to end: ");
+ //Some failure. Kill everything.
+ kill(timeout_pid, SIGKILL);
+ kill(worker_pid, SIGKILL);
+ goto failure;
+ }
+ }
+ }
+success:
+ _exit(EXIT_SUCCESS);
+failure:
+ _exit(EXIT_FAILURE);
+}
+
+static void
+spawn_gdb(const char *gdb_path) {
+ pid_t parent_pid = getpid();
+#if defined(HAVE_SYS_PRCTL_H)
+ // On systems that require permission for the same user to ptrace,
+ // give permission for this process and (more importantly) all its children to debug this process.
+ prctl(PR_SET_PTRACER, parent_pid, 0, 0, 0);
+#endif
+ fprintf(stderr, "Attempting to use gdb @[%s] on pid[%d]\n", gdb_path, parent_pid);
+ fflush(stderr);
+ int intermediate_pid = fork();
+ if (intermediate_pid < 0) {
+ perror("spawn_gdb intermediate process fork: ");
+ } else if (intermediate_pid == 0) {
+ intermediate_process(parent_pid, gdb_path);
+ } else {
+ waitpid(intermediate_pid, NULL, 0);
+ }
+}
+
+void
+toku_try_gdb_stack_trace(const char *gdb_path) {
+ char default_gdb_path[] = "/usr/bin/gdb";
+ static bool started = false;
+ if (RUNNING_ON_VALGRIND) {
+ fprintf(stderr, "gdb stack trace skipped due to running under valgrind\n");
+ fflush(stderr);
+ } else if (toku_sync_bool_compare_and_swap(&started, false, true)) {
+ spawn_gdb(gdb_path ? gdb_path : default_gdb_path);
+ }
+}
+
diff --git a/storage/tokudb/PerconaFT/portability/toku_crash.h b/storage/tokudb/PerconaFT/portability/toku_crash.h
new file mode 100644
index 00000000..de632f59
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_crash.h
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include "toku_assert.h"
+
+//Simulate as hard a crash as possible.
+//Choices:
+// raise(SIGABRT)
+// kill -SIGKILL $pid
+// divide by 0
+// null dereference
+// abort()
+// assert(false) (from <assert.h>)
+// assert(false) (from <toku_assert.h>)
+//
+//Linux:
+// abort() and both assert(false) cause FILE buffers to be flushed and written to disk: Unacceptable
+//
+//kill -SIGKILL $pid is annoying (and so far untested)
+//
+//raise(SIGABRT) has the downside that perhaps it could be caught?
+//I'm choosing raise(SIGABRT), followed by divide by 0, followed by null dereference, followed by all the others just in case one gets caught.
+static void __attribute__((unused, noreturn))
+toku_hard_crash_on_purpose(void) {
+ raise(SIGKILL); //Does not flush buffers on linux; cannot be caught.
+ {
+ int zero = 0;
+ int infinity = 1/zero;
+ fprintf(stderr, "Force use of %d\n", infinity);
+ fflush(stderr); //Make certain the string is calculated.
+ }
+ {
+ void * intothevoid = NULL;
+ (*(int*)intothevoid)++;
+ fprintf(stderr, "Force use of *(%p) = %d\n", intothevoid, *(int*)intothevoid);
+ fflush(stderr);
+ }
+ abort();
+ fprintf(stderr, "This line should never be printed\n");
+ fflush(stderr);
+}
+
+// Similar to toku_hard_crash_on_purpose, but the goal isn't to crash hard, the primary goal is to get a corefile, the secondary goal is to terminate in any way possible.
+// We don't really care if buffers get flushed etc, in fact they may as well flush since there may be useful output in stdout or stderr.
+//
+// By default, the following signals generate cores:
+// Linux, from signal(7):
+// SIGQUIT 3 Core
+// SIGILL 4 Core
+// SIGABRT 6 Core
+// SIGFPE 8 Core
+// SIGSEGV 11 Core
+//
+// Darwin and FreeBSD, from signal(3):
+// 3 SIGQUIT create core image
+// 4 SIGILL create core image
+// 5 SIGTRAP create core image
+// 6 SIGABRT create core image
+// 7 SIGEMT create core image
+// 8 SIGFPE create core image
+// 10 SIGBUS create core image
+// 11 SIGSEGV create core image
+// 12 SIGSYS create core image
+//
+// We'll raise these in some sequence (common ones first), then try emulating the things that would cause these signals to be raised, then eventually just try to die normally and then loop like abort does.
+// Start with a toku assert because that hopefully prints a stacktrace.
+static void __attribute__((unused, noreturn))
+toku_crash_and_dump_core_on_purpose(void) {
+ assert(false);
+ invariant(0);
+ raise(SIGQUIT);
+ raise(SIGILL);
+ raise(SIGABRT);
+ raise(SIGFPE);
+ raise(SIGSEGV);
+#if defined(__FreeBSD__) || defined(__APPLE__)
+ raise(SIGTRAP);
+ raise(SIGEMT);
+ raise(SIGBUS);
+ raise(SIGSYS);
+#endif
+ abort();
+ {
+ int zero = 0;
+ int infinity = 1/zero;
+ fprintf(stderr, "Force use of %d\n", infinity);
+ fflush(stderr); //Make certain the string is calculated.
+ }
+ {
+ void * intothevoid = NULL;
+ (*(int*)intothevoid)++;
+ fprintf(stderr, "Force use of *(%p) = %d\n", intothevoid, *(int*)intothevoid);
+ fflush(stderr);
+ }
+ raise(SIGKILL);
+ while (true) {
+ // don't return
+ }
+}
+
+void toku_try_gdb_stack_trace(const char *gdb_path);
diff --git a/storage/tokudb/PerconaFT/portability/toku_debug_sync.h b/storage/tokudb/PerconaFT/portability/toku_debug_sync.h
new file mode 100644
index 00000000..affe3054
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_debug_sync.h
@@ -0,0 +1,76 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+struct tokutxn;
+
+#if defined(MYSQL_TOKUDB_ENGINE) && MYSQL_TOKUDB_ENGINE && \
+ defined(ENABLED_DEBUG_SYNC) && ENABLED_DEBUG_SYNC
+
+/*
+ the below macros are defined in my_global.h
+ the same macros are defined in TokuSetupCompiler.cmake as compiler options,
+ undefine them here to avoid build errors
+*/
+#undef __STDC_FORMAT_MACROS
+#undef __STDC_LIMIT_MACROS
+
+#include "my_global.h"
+#include "m_string.h"
+#include "debug_sync.h"
+
+void toku_txn_get_client_id(struct tokutxn *txn,
+ uint64_t *client_id,
+ void **client_extra);
+
+inline void toku_debug_sync(struct tokutxn *txn, const char *sync_point_name) {
+ uint64_t client_id;
+ void *client_extra;
+ THD *thd;
+
+ toku_txn_get_client_id(txn, &client_id, &client_extra);
+ thd = reinterpret_cast<THD *>(client_extra);
+ DEBUG_SYNC(thd, sync_point_name);
+}
+
+#else // defined(ENABLED_DEBUG_SYNC)
+
+inline void toku_debug_sync(struct tokutxn *, const char *) {};
+
+#endif // defined(ENABLED_DEBUG_SYNC)
diff --git a/storage/tokudb/PerconaFT/portability/toku_htod.h b/storage/tokudb/PerconaFT/portability/toku_htod.h
new file mode 100644
index 00000000..a8a2f2a6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_htod.h
@@ -0,0 +1,114 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/* Purpose of this file is to provide definitions of
+ * Host to Disk byte transposition functions, an abstraction of
+ * htod32()/dtoh32() and htod16()/dtoh16() functions.
+ *
+ * These htod/dtoh functions will only perform the transposition
+ * if the disk and host are defined to be in opposite endian-ness.
+ * If we define the disk to be in host order, then no byte
+ * transposition is performed. (We might do this to save the
+ * the time used for byte transposition.)
+ *
+ * This abstraction layer allows us to define the disk to be in
+ * any byte order with a single compile-time switch (in htod.c).
+ *
+ * NOTE: THIS FILE DOES NOT CURRENTLY SUPPORT A BIG-ENDIAN
+ * HOST AND A LITTLE-ENDIAN DISK.
+ */
+
+#include <portability/toku_config.h>
+
+#if defined(HAVE_ENDIAN_H)
+# include <endian.h>
+#elif defined(HAVE_MACHINE_ENDIAN_H)
+# include <machine/endian.h>
+# define __BYTE_ORDER __DARWIN_BYTE_ORDER
+# define __LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN
+# define __BIG_ENDIAN __DARWIN_BIG_ENDIAN
+#endif
+#if !defined(__BYTE_ORDER) || \
+ !defined(__LITTLE_ENDIAN) || \
+ !defined(__BIG_ENDIAN)
+#error Standard endianness things not all defined
+#endif
+
+
+static const int64_t toku_byte_order_host = 0x0102030405060708LL;
+
+#define NETWORK_BYTE_ORDER (__BIG_ENDIAN)
+#define INTEL_BYTE_ORDER (__LITTLE_ENDIAN)
+#define HOST_BYTE_ORDER (__BYTE_ORDER)
+
+//DISK_BYTE_ORDER is the byte ordering for integers written to disk.
+//If DISK_BYTE_ORDER is the same as HOST_BYTE_ORDER no conversions are necessary.
+//Otherwise some structures require conversion to HOST_BYTE_ORDER on loading from disk (HOST_BYTE_ORDER in memory), and
+//others require conversion to HOST_BYTE_ORDER on every access/mutate (DISK_BYTE_ORDER in memory).
+#define DISK_BYTE_ORDER (INTEL_BYTE_ORDER)
+
+#if HOST_BYTE_ORDER!=INTEL_BYTE_ORDER
+//Even though the functions are noops if DISK==HOST, we do not have the logic to test whether the file was moved from another BYTE_ORDER machine.
+#error Only intel byte order supported so far.
+#endif
+
+#if DISK_BYTE_ORDER == HOST_BYTE_ORDER
+static inline uint64_t
+toku_dtoh64(uint64_t i) {
+ return i;
+}
+
+static inline uint64_t
+toku_htod64(uint64_t i) {
+ return i;
+}
+
+static inline uint32_t
+toku_dtoh32(uint32_t i) {
+ return i;
+}
+
+static inline uint32_t
+toku_htod32(uint32_t i) {
+ return i;
+}
+#else
+#error Not supported
+#endif
diff --git a/storage/tokudb/PerconaFT/portability/toku_htonl.h b/storage/tokudb/PerconaFT/portability/toku_htonl.h
new file mode 100644
index 00000000..c51fc191
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_htonl.h
@@ -0,0 +1,50 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_htod.h>
+#include <arpa/inet.h>
+
+static inline uint32_t toku_htonl(uint32_t i) {
+ return htonl(i);
+}
+
+static inline uint32_t toku_ntohl(uint32_t i) {
+ return ntohl(i);
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
new file mode 100644
index 00000000..0f287429
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@ -0,0 +1,374 @@
+#ifdef TOKU_MYSQL_WITH_PFS
+#include "toku_portability.h"
+#include "toku_pthread.h"
+
+toku_instr_probe_pfs::toku_instr_probe_pfs(const toku_instr_key &key)
+ : mutex(new toku_mutex_t) {
+ toku_mutex_init(key, mutex.get(), nullptr);
+}
+
+toku_instr_probe_pfs::~toku_instr_probe_pfs() {
+ toku_mutex_destroy(mutex.get());
+}
+
+// Thread instrumentation
+
+int toku_pthread_create(const toku_instr_key &key,
+ pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg) {
+#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return PSI_THREAD_CALL(spawn_thread)(
+ key.id(), reinterpret_cast<my_thread_handle *>(thread),
+ attr, start_routine, arg);
+#else
+ return PSI_THREAD_CALL(spawn_thread)(
+ key.id(), thread, attr, start_routine, arg);
+#endif
+}
+
+void toku_instr_register_current_thread(const toku_instr_key &key) {
+ struct PSI_thread *psi_thread =
+ PSI_THREAD_CALL(new_thread)(key.id(), nullptr, 0);
+ PSI_THREAD_CALL(set_thread)(psi_thread);
+}
+
+void toku_instr_delete_current_thread() {
+ PSI_THREAD_CALL(delete_current_thread)();
+}
+
+// I/O instrumentation
+
+void toku_instr_file_open_begin(toku_io_instrumentation &io_instr,
+ const toku_instr_key &key,
+ toku_instr_file_op op,
+ const char *name,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker =
+ PSI_FILE_CALL(get_thread_file_name_locker)(
+ &io_instr.state, key.id(), static_cast<PSI_file_operation>(op),
+ name, io_instr.locker);
+ if (io_instr.locker != nullptr) {
+ PSI_FILE_CALL(start_file_open_wait)
+ (io_instr.locker, src_file, src_line);
+ }
+}
+
+void toku_instr_file_stream_open_end(toku_io_instrumentation &io_instr,
+ TOKU_FILE &file) {
+ file.key = nullptr;
+ if (FT_LIKELY(io_instr.locker)) {
+ file.key =
+ PSI_FILE_CALL(end_file_open_wait)(io_instr.locker, file.file);
+ }
+}
+
+void toku_instr_file_open_end(toku_io_instrumentation &io_instr, int fd) {
+ if (FT_LIKELY(io_instr.locker))
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)
+ (io_instr.locker, fd);
+}
+
+void toku_instr_file_name_close_begin(toku_io_instrumentation &io_instr,
+ const toku_instr_key &key,
+ toku_instr_file_op op,
+ const char *name,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker =
+ PSI_FILE_CALL(get_thread_file_name_locker)(
+ &io_instr.state, key.id(), static_cast<PSI_file_operation>(op),
+ name,
+ io_instr.locker);
+ if (FT_LIKELY(io_instr.locker)) {
+ PSI_FILE_CALL(start_file_close_wait)
+ (io_instr.locker, src_file, src_line);
+ }
+}
+
+void toku_instr_file_stream_close_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ const TOKU_FILE &file,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker = nullptr;
+ if (FT_LIKELY(file.key)) {
+ io_instr.locker = PSI_FILE_CALL(get_thread_file_stream_locker)(
+ &io_instr.state, file.key, (PSI_file_operation)op);
+ if (FT_LIKELY(io_instr.locker)) {
+ PSI_FILE_CALL(start_file_close_wait)
+ (io_instr.locker, src_file, src_line);
+ }
+ }
+}
+
+void toku_instr_file_fd_close_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ int fd,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &io_instr.state, fd, (PSI_file_operation)op);
+ if (FT_LIKELY(io_instr.locker)) {
+ PSI_FILE_CALL(start_file_close_wait)
+ (io_instr.locker, src_file, src_line);
+ }
+}
+
+void toku_instr_file_close_end(const toku_io_instrumentation &io_instr,
+ int result) {
+ if (FT_LIKELY(io_instr.locker))
+ PSI_FILE_CALL(end_file_close_wait)
+ (io_instr.locker, result);
+}
+
+void toku_instr_file_io_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ int fd,
+ ssize_t count,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)(
+ &io_instr.state, fd, (PSI_file_operation)op);
+ if (FT_LIKELY(io_instr.locker)) {
+ PSI_FILE_CALL(start_file_wait)
+ (io_instr.locker, count, src_file, src_line);
+ }
+}
+
+void toku_instr_file_name_io_begin(toku_io_instrumentation &io_instr,
+ const toku_instr_key &key,
+ toku_instr_file_op op,
+ const char *name,
+ ssize_t count,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker =
+ PSI_FILE_CALL(get_thread_file_name_locker)(&io_instr.state,
+ key.id(),
+ (PSI_file_operation)op,
+ name,
+ &io_instr.locker);
+ if (FT_LIKELY(io_instr.locker)) {
+ PSI_FILE_CALL(start_file_wait)
+ (io_instr.locker, count, src_file, src_line);
+ }
+}
+
+void toku_instr_file_stream_io_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ const TOKU_FILE &file,
+ ssize_t count,
+ const char *src_file,
+ int src_line) {
+ io_instr.locker = nullptr;
+ if (FT_LIKELY(file.key)) {
+ io_instr.locker = PSI_FILE_CALL(get_thread_file_stream_locker)(
+ &io_instr.state, file.key, (PSI_file_operation)op);
+ if (FT_LIKELY(io_instr.locker)) {
+ PSI_FILE_CALL(start_file_wait)
+ (io_instr.locker, count, src_file, src_line);
+ }
+ }
+}
+
+void toku_instr_file_io_end(toku_io_instrumentation &io_instr, ssize_t count) {
+ if (FT_LIKELY(io_instr.locker))
+ PSI_FILE_CALL(end_file_wait)
+ (io_instr.locker, count);
+}
+
+// Mutex instrumentation
+
+void toku_instr_mutex_init(const toku_instr_key &key, toku_mutex_t &mutex) {
+ mutex.psi_mutex = PSI_MUTEX_CALL(init_mutex)(key.id(), &mutex.pmutex);
+#if defined(TOKU_PTHREAD_DEBUG)
+ mutex.instr_key_id = key.id();
+#endif // defined(TOKU_PTHREAD_DEBUG)
+}
+
+void toku_instr_mutex_destroy(PSI_mutex *&mutex_instr) {
+ if (mutex_instr != nullptr) {
+ PSI_MUTEX_CALL(destroy_mutex)(mutex_instr);
+ mutex_instr = nullptr;
+ }
+}
+
+void toku_instr_mutex_lock_start(toku_mutex_instrumentation &mutex_instr,
+ toku_mutex_t &mutex,
+ const char *src_file,
+ int src_line) {
+ mutex_instr.locker = nullptr;
+ if (mutex.psi_mutex) {
+ mutex_instr.locker =
+ PSI_MUTEX_CALL(start_mutex_wait)(&mutex_instr.state,
+ mutex.psi_mutex,
+ PSI_MUTEX_LOCK,
+ src_file,
+ src_line);
+ }
+}
+
+void toku_instr_mutex_trylock_start(toku_mutex_instrumentation &mutex_instr,
+ toku_mutex_t &mutex,
+ const char *src_file,
+ int src_line) {
+ mutex_instr.locker = nullptr;
+ if (mutex.psi_mutex) {
+ mutex_instr.locker =
+ PSI_MUTEX_CALL(start_mutex_wait)(&mutex_instr.state,
+ mutex.psi_mutex,
+ PSI_MUTEX_TRYLOCK,
+ src_file,
+ src_line);
+ }
+}
+
+void toku_instr_mutex_lock_end(toku_mutex_instrumentation &mutex_instr,
+ int pthread_mutex_lock_result) {
+ if (mutex_instr.locker)
+ PSI_MUTEX_CALL(end_mutex_wait)
+ (mutex_instr.locker, pthread_mutex_lock_result);
+}
+
+void toku_instr_mutex_unlock(PSI_mutex *mutex_instr) {
+ if (mutex_instr)
+ PSI_MUTEX_CALL(unlock_mutex)(mutex_instr);
+}
+
+// Condvar instrumentation
+
+void toku_instr_cond_init(const toku_instr_key &key, toku_cond_t &cond) {
+ cond.psi_cond = PSI_COND_CALL(init_cond)(key.id(), &cond.pcond);
+#if defined(TOKU_PTHREAD_DEBUG)
+ cond.instr_key_id = key.id();
+#endif // // defined(TOKU_PTHREAD_DEBUG)
+}
+
+void toku_instr_cond_destroy(PSI_cond *&cond_instr) {
+ if (cond_instr != nullptr) {
+ PSI_COND_CALL(destroy_cond)(cond_instr);
+ cond_instr = nullptr;
+ }
+}
+
+void toku_instr_cond_wait_start(toku_cond_instrumentation &cond_instr,
+ toku_instr_cond_op op,
+ toku_cond_t &cond,
+ toku_mutex_t &mutex,
+ const char *src_file,
+ int src_line) {
+ cond_instr.locker = nullptr;
+ if (cond.psi_cond) {
+ /* Instrumentation start */
+ cond_instr.locker =
+ PSI_COND_CALL(start_cond_wait)(&cond_instr.state,
+ cond.psi_cond,
+ mutex.psi_mutex,
+ (PSI_cond_operation)op,
+ src_file,
+ src_line);
+ }
+}
+
+void toku_instr_cond_wait_end(toku_cond_instrumentation &cond_instr,
+ int pthread_cond_wait_result) {
+ if (cond_instr.locker)
+ PSI_COND_CALL(end_cond_wait)
+ (cond_instr.locker, pthread_cond_wait_result);
+}
+
+void toku_instr_cond_signal(const toku_cond_t &cond) {
+ if (cond.psi_cond)
+ PSI_COND_CALL(signal_cond)(cond.psi_cond);
+}
+
+void toku_instr_cond_broadcast(const toku_cond_t &cond) {
+ if (cond.psi_cond)
+ PSI_COND_CALL(broadcast_cond)(cond.psi_cond);
+}
+
+// rwlock instrumentation
+
+void toku_instr_rwlock_init(const toku_instr_key &key,
+ toku_pthread_rwlock_t &rwlock) {
+ rwlock.psi_rwlock = PSI_RWLOCK_CALL(init_rwlock)(key.id(), &rwlock.rwlock);
+#if defined(TOKU_PTHREAD_DEBUG)
+ rwlock.instr_key_id = key.id();
+#endif // defined(TOKU_PTHREAD_DEBUG)
+}
+
+void toku_instr_rwlock_destroy(PSI_rwlock *&rwlock_instr) {
+ if (rwlock_instr != nullptr) {
+ PSI_RWLOCK_CALL(destroy_rwlock)(rwlock_instr);
+ rwlock_instr = nullptr;
+ }
+}
+
+void toku_instr_rwlock_rdlock_wait_start(
+ toku_rwlock_instrumentation &rwlock_instr,
+ toku_pthread_rwlock_t &rwlock,
+ const char *src_file,
+ int src_line) {
+ rwlock_instr.locker = nullptr;
+ if (rwlock.psi_rwlock) {
+ /* Instrumentation start */
+ rwlock_instr.locker =
+ PSI_RWLOCK_CALL(start_rwlock_rdwait)(&rwlock_instr.state,
+ rwlock.psi_rwlock,
+ PSI_RWLOCK_READLOCK,
+ src_file,
+ src_line);
+ }
+}
+
+void toku_instr_rwlock_wrlock_wait_start(
+ toku_rwlock_instrumentation &rwlock_instr,
+ toku_pthread_rwlock_t &rwlock,
+ const char *src_file,
+ int src_line) {
+ rwlock_instr.locker = nullptr;
+ if (rwlock.psi_rwlock) {
+ /* Instrumentation start */
+ rwlock_instr.locker =
+ PSI_RWLOCK_CALL(start_rwlock_wrwait)(&rwlock_instr.state,
+ rwlock.psi_rwlock,
+ PSI_RWLOCK_WRITELOCK,
+ src_file,
+ src_line);
+ }
+}
+
+void toku_instr_rwlock_rdlock_wait_end(
+ toku_rwlock_instrumentation &rwlock_instr,
+ int pthread_rwlock_wait_result) {
+ if (rwlock_instr.locker)
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)
+ (rwlock_instr.locker, pthread_rwlock_wait_result);
+}
+
+void toku_instr_rwlock_wrlock_wait_end(
+ toku_rwlock_instrumentation &rwlock_instr,
+ int pthread_rwlock_wait_result) {
+ if (rwlock_instr.locker)
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)
+ (rwlock_instr.locker, pthread_rwlock_wait_result);
+}
+
+void toku_instr_rwlock_unlock(toku_pthread_rwlock_t &rwlock) {
+ if (rwlock.psi_rwlock)
+
+// Due to change introduced in e4148f2a22922687f7652c4e3d21a22da07c9e78
+// PSI rwlock version and interface changed
+// PSI_CURRENT_RWLOCK_VERSION is not defined in MySQL 5.6 and is defined
+// as 1 in 5.7 and < 8.0.17
+#if defined(PSI_CURRENT_RWLOCK_VERSION) && (PSI_CURRENT_RWLOCK_VERSION == 2)
+ PSI_RWLOCK_CALL(unlock_rwlock)(rwlock.psi_rwlock, PSI_RWLOCK_UNLOCK);
+#else
+ PSI_RWLOCK_CALL(unlock_rwlock)(rwlock.psi_rwlock);
+#endif
+}
+
+#endif // TOKU_MYSQL_WITH_PFS
diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h
new file mode 100644
index 00000000..beb833a1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h
@@ -0,0 +1,256 @@
+#ifdef TOKU_INSTR_MYSQL_H
+// This file can be included only from toku_instumentation.h because
+// it replaces the defintitions for the case if MySQL PFS is available
+#error "toku_instr_mysql.h can be included only once"
+#else // TOKU_INSTR_MYSQL_H
+#define TOKU_INSTR_MYSQL_H
+
+#include <memory>
+
+// As these macros are defined in my_global.h
+// and they are also defined in command line
+// undefine them here to avoid compilation errors.
+#undef __STDC_FORMAT_MACROS
+#undef __STDC_LIMIT_MACROS
+#include "mysql/psi/mysql_file.h" // PSI_file
+#include "mysql/psi/mysql_thread.h" // PSI_mutex
+#include "mysql/psi/mysql_stage.h" // PSI_stage
+
+#if (MYSQL_VERSION_ID >= 80000) && ( MYSQL_VERSION_ID <= 100000)
+#include "mysql/psi/mysql_cond.h"
+#include "mysql/psi/mysql_mutex.h"
+#include "mysql/psi/mysql_rwlock.h"
+#endif // (MYSQL_VERSION_ID >= nn)
+
+#ifndef HAVE_PSI_MUTEX_INTERFACE
+#error HAVE_PSI_MUTEX_INTERFACE required
+#endif
+#ifndef HAVE_PSI_RWLOCK_INTERFACE
+#error HAVE_PSI_RWLOCK_INTERFACE required
+#endif
+#ifndef HAVE_PSI_THREAD_INTERFACE
+#error HAVE_PSI_THREAD_INTERFACE required
+#endif
+
+// Instrumentation keys
+
+class toku_instr_key {
+ private:
+ pfs_key_t m_id;
+
+ public:
+ toku_instr_key(toku_instr_object_type type,
+ const char *group,
+ const char *name) {
+ switch (type) {
+ case toku_instr_object_type::mutex: {
+ PSI_mutex_info mutex_info{&m_id, name, 0};
+ mysql_mutex_register(group, &mutex_info, 1);
+ } break;
+ case toku_instr_object_type::rwlock: {
+ PSI_rwlock_info rwlock_info{&m_id, name, 0};
+ mysql_rwlock_register(group, &rwlock_info, 1);
+ } break;
+ case toku_instr_object_type::cond: {
+ PSI_cond_info cond_info{&m_id, name, 0};
+ mysql_cond_register(group, &cond_info, 1);
+ } break;
+ case toku_instr_object_type::thread: {
+ PSI_thread_info thread_info{&m_id, name, 0};
+ mysql_thread_register(group, &thread_info, 1);
+ } break;
+ case toku_instr_object_type::file: {
+ PSI_file_info file_info{&m_id, name, 0};
+ mysql_file_register(group, &file_info, 1);
+ } break;
+ }
+ }
+
+ explicit toku_instr_key(pfs_key_t key_id) : m_id(key_id) {}
+
+ pfs_key_t id() const { return m_id; }
+};
+
+// Thread instrumentation
+int toku_pthread_create(const toku_instr_key &key,
+ pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg);
+void toku_instr_register_current_thread(const toku_instr_key &key);
+void toku_instr_delete_current_thread();
+
+// I/O instrumentation
+
+enum class toku_instr_file_op {
+ file_stream_open = PSI_FILE_STREAM_OPEN,
+ file_create = PSI_FILE_CREATE,
+ file_open = PSI_FILE_OPEN,
+ file_delete = PSI_FILE_DELETE,
+ file_rename = PSI_FILE_RENAME,
+ file_read = PSI_FILE_READ,
+ file_write = PSI_FILE_WRITE,
+ file_sync = PSI_FILE_SYNC,
+ file_stream_close = PSI_FILE_STREAM_CLOSE,
+ file_close = PSI_FILE_CLOSE,
+ file_stat = PSI_FILE_STAT
+};
+
+struct toku_io_instrumentation {
+ struct PSI_file_locker *locker;
+ PSI_file_locker_state state;
+
+ toku_io_instrumentation() : locker(nullptr) {}
+};
+
+void toku_instr_file_open_begin(toku_io_instrumentation &io_instr,
+ const toku_instr_key &key,
+ toku_instr_file_op op,
+ const char *name,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_stream_open_end(toku_io_instrumentation &io_instr,
+ TOKU_FILE &file);
+void toku_instr_file_open_end(toku_io_instrumentation &io_instr, int fd);
+void toku_instr_file_name_close_begin(toku_io_instrumentation &io_instr,
+ const toku_instr_key &key,
+ toku_instr_file_op op,
+ const char *name,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_stream_close_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ const TOKU_FILE &file,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_fd_close_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ int fd,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_close_end(const toku_io_instrumentation &io_instr,
+ int result);
+void toku_instr_file_io_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ int fd,
+ ssize_t count,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_name_io_begin(toku_io_instrumentation &io_instr,
+ const toku_instr_key &key,
+ toku_instr_file_op op,
+ const char *name,
+ ssize_t count,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_stream_io_begin(toku_io_instrumentation &io_instr,
+ toku_instr_file_op op,
+ const TOKU_FILE &file,
+ ssize_t count,
+ const char *src_file,
+ int src_line);
+void toku_instr_file_io_end(toku_io_instrumentation &io_instr, ssize_t count);
+
+// Mutex instrumentation
+
+struct toku_mutex_instrumentation {
+ struct PSI_mutex_locker *locker;
+ PSI_mutex_locker_state state;
+
+ toku_mutex_instrumentation() : locker(nullptr) {}
+};
+
+void toku_instr_mutex_init(const toku_instr_key &key, toku_mutex_t &mutex);
+void toku_instr_mutex_destroy(PSI_mutex *&mutex_instr);
+void toku_instr_mutex_lock_start(toku_mutex_instrumentation &mutex_instr,
+ toku_mutex_t &mutex,
+ const char *src_file,
+ int src_line);
+void toku_instr_mutex_trylock_start(toku_mutex_instrumentation &mutex_instr,
+ toku_mutex_t &mutex,
+ const char *src_file,
+ int src_line);
+void toku_instr_mutex_lock_end(toku_mutex_instrumentation &mutex_instr,
+ int pthread_mutex_lock_result);
+void toku_instr_mutex_unlock(PSI_mutex *mutex_instr);
+
+// Instrumentation probes
+
+class toku_instr_probe_pfs {
+ private:
+ std::unique_ptr<toku_mutex_t> mutex;
+ toku_mutex_instrumentation mutex_instr;
+
+ public:
+ explicit toku_instr_probe_pfs(const toku_instr_key &key);
+
+ ~toku_instr_probe_pfs();
+
+ void start_with_source_location(const char *src_file, int src_line) {
+ mutex_instr.locker = nullptr;
+ toku_instr_mutex_lock_start(mutex_instr, *mutex, src_file, src_line);
+ }
+
+ void stop() { toku_instr_mutex_lock_end(mutex_instr, 0); }
+};
+
+typedef toku_instr_probe_pfs toku_instr_probe;
+
+// Condvar instrumentation
+
+struct toku_cond_instrumentation {
+ struct PSI_cond_locker *locker;
+ PSI_cond_locker_state state;
+
+ toku_cond_instrumentation() : locker(nullptr) {}
+};
+
+enum class toku_instr_cond_op {
+ cond_wait = PSI_COND_WAIT,
+ cond_timedwait = PSI_COND_TIMEDWAIT,
+};
+
+void toku_instr_cond_init(const toku_instr_key &key, toku_cond_t &cond);
+void toku_instr_cond_destroy(PSI_cond *&cond_instr);
+void toku_instr_cond_wait_start(toku_cond_instrumentation &cond_instr,
+ toku_instr_cond_op op,
+ toku_cond_t &cond,
+ toku_mutex_t &mutex,
+ const char *src_file,
+ int src_line);
+void toku_instr_cond_wait_end(toku_cond_instrumentation &cond_instr,
+ int pthread_cond_wait_result);
+void toku_instr_cond_signal(const toku_cond_t &cond);
+void toku_instr_cond_broadcast(const toku_cond_t &cond);
+
+// rwlock instrumentation
+
+struct toku_rwlock_instrumentation {
+ struct PSI_rwlock_locker *locker;
+ PSI_rwlock_locker_state state;
+
+ toku_rwlock_instrumentation() : locker(nullptr) { }
+};
+
+void toku_instr_rwlock_init(const toku_instr_key &key,
+ toku_pthread_rwlock_t &rwlock);
+void toku_instr_rwlock_destroy(PSI_rwlock *&rwlock_instr);
+void toku_instr_rwlock_rdlock_wait_start(
+ toku_rwlock_instrumentation &rwlock_instr,
+ toku_pthread_rwlock_t &rwlock,
+ const char *src_file,
+ int src_line);
+void toku_instr_rwlock_wrlock_wait_start(
+ toku_rwlock_instrumentation &rwlock_instr,
+ toku_pthread_rwlock_t &rwlock,
+ const char *src_file,
+ int src_line);
+void toku_instr_rwlock_rdlock_wait_end(
+ toku_rwlock_instrumentation &rwlock_instr,
+ int pthread_rwlock_wait_result);
+void toku_instr_rwlock_wrlock_wait_end(
+ toku_rwlock_instrumentation &rwlock_instr,
+ int pthread_rwlock_wait_result);
+void toku_instr_rwlock_unlock(toku_pthread_rwlock_t &rwlock);
+
+#endif // TOKU_INSTR_MYSQL_H
diff --git a/storage/tokudb/PerconaFT/portability/toku_instrumentation.h b/storage/tokudb/PerconaFT/portability/toku_instrumentation.h
new file mode 100644
index 00000000..01552f9f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_instrumentation.h
@@ -0,0 +1,387 @@
+/*======
+This file is part of PerconaFT.
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#pragma once
+
+#include <stdio.h> // FILE
+
+// Performance instrumentation object identifier type
+typedef unsigned int pfs_key_t;
+
+enum class toku_instr_object_type { mutex, rwlock, cond, thread, file };
+
+struct PSI_file;
+
+struct TOKU_FILE {
+ /** The real file. */
+ FILE *file;
+ struct PSI_file *key;
+ TOKU_FILE() : file(nullptr), key(nullptr) {}
+};
+
+struct PSI_mutex;
+struct PSI_cond;
+struct PSI_rwlock;
+
+struct toku_mutex_t;
+struct toku_cond_t;
+struct toku_pthread_rwlock_t;
+
+class toku_instr_key;
+
+class toku_instr_probe_empty {
+ public:
+ explicit toku_instr_probe_empty(UU(const toku_instr_key &key)) {}
+
+ void start_with_source_location(UU(const char *src_file),
+ UU(int src_line)) {}
+
+ void stop() {}
+};
+
+#define TOKU_PROBE_START(p) p->start_with_source_location(__FILE__, __LINE__)
+#define TOKU_PROBE_STOP(p) p->stop
+
+extern toku_instr_key toku_uninstrumented;
+
+#ifndef TOKU_MYSQL_WITH_PFS
+
+#include <pthread.h>
+
+class toku_instr_key {
+ public:
+ toku_instr_key(UU(toku_instr_object_type type),
+ UU(const char *group),
+ UU(const char *name)) {}
+
+ explicit toku_instr_key(UU(pfs_key_t key_id)) {}
+
+ ~toku_instr_key() {}
+};
+
+typedef toku_instr_probe_empty toku_instr_probe;
+
+enum class toku_instr_file_op {
+ file_stream_open,
+ file_create,
+ file_open,
+ file_delete,
+ file_rename,
+ file_read,
+ file_write,
+ file_sync,
+ file_stream_close,
+ file_close,
+ file_stat
+};
+
+struct PSI_file {};
+struct PSI_mutex {};
+
+struct toku_io_instrumentation {};
+
+inline int toku_pthread_create(UU(const toku_instr_key &key),
+ pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg) {
+ return pthread_create(thread, attr, start_routine, arg);
+}
+
+inline void toku_instr_register_current_thread() {}
+
+inline void toku_instr_delete_current_thread() {}
+
+// Instrument file creation, opening, closing, and renaming
+inline void toku_instr_file_open_begin(UU(toku_io_instrumentation &io_instr),
+ UU(const toku_instr_key &key),
+ UU(toku_instr_file_op op),
+ UU(const char *name),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_stream_open_end(
+ UU(toku_io_instrumentation &io_instr),
+ UU(TOKU_FILE &file)) {}
+
+inline void toku_instr_file_open_end(UU(toku_io_instrumentation &io_instr),
+ UU(int fd)) {}
+
+inline void toku_instr_file_name_close_begin(
+ UU(toku_io_instrumentation &io_instr),
+ UU(const toku_instr_key &key),
+ UU(toku_instr_file_op op),
+ UU(const char *name),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_stream_close_begin(
+ UU(toku_io_instrumentation &io_instr),
+ UU(toku_instr_file_op op),
+ UU(TOKU_FILE &file),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_fd_close_begin(
+ UU(toku_io_instrumentation &io_instr),
+ UU(toku_instr_file_op op),
+ UU(int fd),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_close_end(UU(toku_io_instrumentation &io_instr),
+ UU(int result)) {}
+
+inline void toku_instr_file_io_begin(UU(toku_io_instrumentation &io_instr),
+ UU(toku_instr_file_op op),
+ UU(int fd),
+ UU(unsigned int count),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_name_io_begin(UU(toku_io_instrumentation &io_instr),
+ UU(const toku_instr_key &key),
+ UU(toku_instr_file_op op),
+ UU(const char *name),
+ UU(unsigned int count),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_stream_io_begin(
+ UU(toku_io_instrumentation &io_instr),
+ UU(toku_instr_file_op op),
+ UU(TOKU_FILE &file),
+ UU(unsigned int count),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_file_io_end(UU(toku_io_instrumentation &io_instr),
+ UU(unsigned int count)) {}
+
+struct toku_mutex_t;
+
+struct toku_mutex_instrumentation {};
+
+inline PSI_mutex *toku_instr_mutex_init(UU(const toku_instr_key &key),
+ UU(toku_mutex_t &mutex)) {
+ return nullptr;
+}
+
+inline void toku_instr_mutex_destroy(UU(PSI_mutex *&mutex_instr)) {}
+
+inline void toku_instr_mutex_lock_start(
+ UU(toku_mutex_instrumentation &mutex_instr),
+ UU(toku_mutex_t &mutex),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_mutex_trylock_start(
+ UU(toku_mutex_instrumentation &mutex_instr),
+ UU(toku_mutex_t &mutex),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_mutex_lock_end(
+ UU(toku_mutex_instrumentation &mutex_instr),
+ UU(int pthread_mutex_lock_result)) {}
+
+inline void toku_instr_mutex_unlock(UU(PSI_mutex *mutex_instr)) {}
+
+struct toku_cond_instrumentation {};
+
+enum class toku_instr_cond_op {
+ cond_wait,
+ cond_timedwait,
+};
+
+inline PSI_cond *toku_instr_cond_init(UU(const toku_instr_key &key),
+ UU(toku_cond_t &cond)) {
+ return nullptr;
+}
+
+inline void toku_instr_cond_destroy(UU(PSI_cond *&cond_instr)) {}
+
+inline void toku_instr_cond_wait_start(
+ UU(toku_cond_instrumentation &cond_instr),
+ UU(toku_instr_cond_op op),
+ UU(toku_cond_t &cond),
+ UU(toku_mutex_t &mutex),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_cond_wait_end(UU(toku_cond_instrumentation &cond_instr),
+ UU(int pthread_cond_wait_result)) {}
+
+inline void toku_instr_cond_signal(UU(toku_cond_t &cond)) {}
+
+inline void toku_instr_cond_broadcast(UU(toku_cond_t &cond)) {}
+
+// rwlock instrumentation
+struct toku_rwlock_instrumentation {};
+
+inline PSI_rwlock *toku_instr_rwlock_init(UU(const toku_instr_key &key),
+ UU(toku_pthread_rwlock_t &rwlock)) {
+ return nullptr;
+}
+
+inline void toku_instr_rwlock_destroy(UU(PSI_rwlock *&rwlock_instr)) {}
+
+inline void toku_instr_rwlock_rdlock_wait_start(
+ UU(toku_rwlock_instrumentation &rwlock_instr),
+ UU(toku_pthread_rwlock_t &rwlock),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_rwlock_wrlock_wait_start(
+ UU(toku_rwlock_instrumentation &rwlock_instr),
+ UU(toku_pthread_rwlock_t &rwlock),
+ UU(const char *src_file),
+ UU(int src_line)) {}
+
+inline void toku_instr_rwlock_rdlock_wait_end(
+ UU(toku_rwlock_instrumentation &rwlock_instr),
+ UU(int pthread_rwlock_wait_result)) {}
+
+inline void toku_instr_rwlock_wrlock_wait_end(
+ UU(toku_rwlock_instrumentation &rwlock_instr),
+ UU(int pthread_rwlock_wait_result)) {}
+
+inline void toku_instr_rwlock_unlock(UU(toku_pthread_rwlock_t &rwlock)) {}
+
+#else // TOKU_MYSQL_WITH_PFS
+// There can be not only mysql but also mongodb or any other PFS stuff
+#include <toku_instr_mysql.h>
+#endif // TOKU_MYSQL_WITH_PFS
+
+extern toku_instr_key toku_uninstrumented;
+
+extern toku_instr_probe *toku_instr_probe_1;
+
+// threads
+extern toku_instr_key *extractor_thread_key;
+extern toku_instr_key *fractal_thread_key;
+extern toku_instr_key *io_thread_key;
+extern toku_instr_key *eviction_thread_key;
+extern toku_instr_key *kibbutz_thread_key;
+extern toku_instr_key *minicron_thread_key;
+extern toku_instr_key *tp_internal_thread_key;
+
+// Files
+extern toku_instr_key *tokudb_file_data_key;
+extern toku_instr_key *tokudb_file_load_key;
+extern toku_instr_key *tokudb_file_tmp_key;
+extern toku_instr_key *tokudb_file_log_key;
+
+// Mutexes
+extern toku_instr_key *kibbutz_mutex_key;
+extern toku_instr_key *minicron_p_mutex_key;
+extern toku_instr_key *queue_result_mutex_key;
+extern toku_instr_key *tpool_lock_mutex_key;
+extern toku_instr_key *workset_lock_mutex_key;
+extern toku_instr_key *bjm_jobs_lock_mutex_key;
+extern toku_instr_key *log_internal_lock_mutex_key;
+extern toku_instr_key *cachetable_ev_thread_lock_mutex_key;
+extern toku_instr_key *cachetable_disk_nb_mutex_key;
+extern toku_instr_key *cachetable_m_mutex_key;
+extern toku_instr_key *safe_file_size_lock_mutex_key;
+extern toku_instr_key *checkpoint_safe_mutex_key;
+extern toku_instr_key *ft_ref_lock_mutex_key;
+extern toku_instr_key *loader_error_mutex_key;
+extern toku_instr_key *bfs_mutex_key;
+extern toku_instr_key *loader_bl_mutex_key;
+extern toku_instr_key *loader_fi_lock_mutex_key;
+extern toku_instr_key *loader_out_mutex_key;
+extern toku_instr_key *result_output_condition_lock_mutex_key;
+extern toku_instr_key *block_table_mutex_key;
+extern toku_instr_key *rollback_log_node_cache_mutex_key;
+extern toku_instr_key *txn_lock_mutex_key;
+extern toku_instr_key *txn_state_lock_mutex_key;
+extern toku_instr_key *txn_child_manager_mutex_key;
+extern toku_instr_key *txn_manager_lock_mutex_key;
+extern toku_instr_key *treenode_mutex_key;
+extern toku_instr_key *manager_mutex_key;
+extern toku_instr_key *manager_escalation_mutex_key;
+extern toku_instr_key *manager_escalator_mutex_key;
+extern toku_instr_key *db_txn_struct_i_txn_mutex_key;
+extern toku_instr_key *indexer_i_indexer_lock_mutex_key;
+extern toku_instr_key *indexer_i_indexer_estimate_lock_mutex_key;
+extern toku_instr_key *locktree_request_info_mutex_key;
+extern toku_instr_key *locktree_request_info_retry_mutex_key;
+
+// condition vars
+extern toku_instr_key *result_state_cond_key;
+extern toku_instr_key *bjm_jobs_wait_key;
+extern toku_instr_key *cachetable_p_refcount_wait_key;
+extern toku_instr_key *cachetable_m_flow_control_cond_key;
+extern toku_instr_key *cachetable_m_ev_thread_cond_key;
+extern toku_instr_key *bfs_cond_key;
+extern toku_instr_key *result_output_condition_key;
+extern toku_instr_key *manager_m_escalator_done_key;
+extern toku_instr_key *lock_request_m_wait_cond_key;
+extern toku_instr_key *queue_result_cond_key;
+extern toku_instr_key *ws_worker_wait_key;
+extern toku_instr_key *rwlock_wait_read_key;
+extern toku_instr_key *rwlock_wait_write_key;
+extern toku_instr_key *rwlock_cond_key;
+extern toku_instr_key *tp_thread_wait_key;
+extern toku_instr_key *tp_pool_wait_free_key;
+extern toku_instr_key *frwlock_m_wait_read_key;
+extern toku_instr_key *kibbutz_k_cond_key;
+extern toku_instr_key *minicron_p_condvar_key;
+extern toku_instr_key *locktree_request_info_retry_cv_key;
+
+// rwlocks
+extern toku_instr_key *multi_operation_lock_key;
+extern toku_instr_key *low_priority_multi_operation_lock_key;
+extern toku_instr_key *cachetable_m_list_lock_key;
+extern toku_instr_key *cachetable_m_pending_lock_expensive_key;
+extern toku_instr_key *cachetable_m_pending_lock_cheap_key;
+extern toku_instr_key *cachetable_m_lock_key;
+extern toku_instr_key *result_i_open_dbs_rwlock_key;
+extern toku_instr_key *checkpoint_safe_rwlock_key;
+extern toku_instr_key *cachetable_value_key;
+extern toku_instr_key *safe_file_size_lock_rwlock_key;
+extern toku_instr_key *cachetable_disk_nb_rwlock_key;
diff --git a/storage/tokudb/PerconaFT/portability/toku_list.h b/storage/tokudb/PerconaFT/portability/toku_list.h
new file mode 100644
index 00000000..e1f9da5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_list.h
@@ -0,0 +1,121 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// This toku_list is intended to be embedded in other data structures.
+struct toku_list {
+ struct toku_list *next, *prev;
+};
+
+static inline int toku_list_num_elements_est(struct toku_list *head) {
+ if (head->next == head) return 0;
+ if (head->next == head->prev) return 1;
+ return 2;
+}
+
+
+static inline void toku_list_init(struct toku_list *head) {
+ head->next = head->prev = head;
+}
+
+static inline int toku_list_empty(struct toku_list *head) {
+ return head->next == head;
+}
+
+static inline struct toku_list *toku_list_head(struct toku_list *head) {
+ return head->next;
+}
+
+static inline struct toku_list *toku_list_tail(struct toku_list *head) {
+ return head->prev;
+}
+
+static inline void toku_list_insert_between(struct toku_list *a, struct toku_list *toku_list, struct toku_list *b) {
+
+ toku_list->next = a->next;
+ toku_list->prev = b->prev;
+ a->next = b->prev = toku_list;
+}
+
+static inline void toku_list_push(struct toku_list *head, struct toku_list *toku_list) {
+ toku_list_insert_between(head->prev, toku_list, head);
+}
+
+static inline void toku_list_push_head(struct toku_list *head, struct toku_list *toku_list) {
+ toku_list_insert_between(head, toku_list, head->next);
+}
+
+static inline void toku_list_remove(struct toku_list *toku_list) {
+ struct toku_list *prev = toku_list->prev;
+ struct toku_list *next = toku_list->next;
+ next->prev = prev;
+ prev->next = next;
+ toku_list_init(toku_list); // Set the toku_list element to be empty
+}
+
+static inline struct toku_list *toku_list_pop(struct toku_list *head) {
+ struct toku_list *toku_list = head->prev;
+ toku_list_remove(toku_list);
+ return toku_list;
+}
+
+static inline struct toku_list *toku_list_pop_head(struct toku_list *head) {
+ struct toku_list *toku_list = head->next;
+ toku_list_remove(toku_list);
+ return toku_list;
+}
+
+static inline void toku_list_move(struct toku_list *newhead, struct toku_list *oldhead) {
+ struct toku_list *first = oldhead->next;
+ struct toku_list *last = oldhead->prev;
+ // assert(!toku_list_empty(oldhead));
+ newhead->next = first;
+ newhead->prev = last;
+ last->next = first->prev = newhead;
+ toku_list_init(oldhead);
+}
+
+// Note: Need the extra level of parens in these macros so that
+// toku_list_struct(h, foo, b)->zot
+// will work right. Otherwise the type cast will try to include ->zot, and it will be all messed up.
+#if ((defined(__GNUC__) && __GNUC__ >= 4) || defined(__builtin_offsetof) ) && !defined(__clang__)
+#define toku_list_struct(p, t, f) ((t*)((char*)(p) - __builtin_offsetof(t, f)))
+#else
+#define toku_list_struct(p, t, f) ((t*)((char*)(p) - ((char*)&((t*)0)->f)))
+#endif
diff --git a/storage/tokudb/PerconaFT/portability/toku_os.h b/storage/tokudb/PerconaFT/portability/toku_os.h
new file mode 100644
index 00000000..d7cfcfef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_os.h
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <dirent.h>
+#include <sys/time.h>
+
+#include "toku_stdint.h"
+#include "toku_os_types.h"
+
+// Returns: the current process id
+int toku_os_getpid(void) __attribute__((__visibility__("default")));
+
+// Returns: the current thread id
+int toku_os_gettid(void) __attribute__((__visibility__("default")));
+
+// Returns: the number of processors in the system
+int toku_os_get_number_processors(void);
+
+// Returns: the number of active processors in the system
+int toku_os_get_number_active_processors(void);
+
+// Returns: the system page size (in bytes)
+int toku_os_get_pagesize(void);
+
+// Returns: the size of physical memory (in bytes)
+uint64_t toku_os_get_phys_memory_size(void) __attribute__((__visibility__("default")));
+
+// Returns the processor frequency in Hz
+// Returns 0 if success
+int toku_os_get_processor_frequency(uint64_t *hz);
+
+// Returns: 0 on success
+// sets fsize to the number of bytes in a file
+int toku_os_get_file_size(int fildes, int64_t *fsize) __attribute__((__visibility__("default")));
+
+// Returns: 0 on success
+// Initializes id as a unique fileid for fildes on success.
+int toku_os_get_unique_file_id(int fildes, struct fileid *id);
+
+//Locks a file (should not be open to begin with).
+//Returns: file descriptor (or -1 on error)
+int toku_os_lock_file(const char *name);
+
+//Unlocks and closes a file locked by toku_os_lock_on_file
+int toku_os_unlock_file(int fildes);
+
+int toku_os_mkdir(const char *pathname, mode_t mode) __attribute__((__visibility__("default")));
+
+// Get the current process user and kernel use times
+int toku_os_get_process_times(struct timeval *usertime, struct timeval *kerneltime);
+
+// Get the maximum size of the process data size (in bytes)
+// Success: returns 0 and sets *maxdata to the data size
+// Fail: returns an error number
+int toku_os_get_max_process_data_size(uint64_t *maxdata) __attribute__((__visibility__("default")));
+
+int toku_os_initialize_settings(int verbosity) __attribute__((__visibility__("default")));
+
+bool toku_os_is_absolute_name(const char* path) __attribute__((__visibility__("default")));
+
+// Return true if huge pages are enabled. See portability/huge_page_detection.cc for methodology.
+bool toku_os_huge_pages_enabled(void) __attribute__((__visibility__("default")));
+
+// Set whether or not writes assert when ENOSPC is returned or they wait for space
+void toku_set_assert_on_write_enospc(int do_assert) __attribute__((__visibility__("default")));
+
+// Get file system write information
+// *enospc_last_time is the last time ENOSPC was returned by write or pwrite
+// *enospc_current is the number of threads waiting on space
+// *enospc_total is the number of times ENOSPC was returned by write or pwrite
+void toku_fs_get_write_info(time_t *enospc_last_time, uint64_t *enospc_current, uint64_t *enospc_total);
+
+void toku_fsync_dirfd_without_accounting(DIR *dirp);
+
+int toku_fsync_dir_by_name_without_accounting(const char *dir_name);
+
+// Get the file system free and total space for the file system that contains a path name
+// *avail_size is set to the bytes of free space in the file system available for non-root
+// *free_size is set to the bytes of free space in the file system
+// *total_size is set to the total bytes in the file system
+// Return 0 on success, otherwise an error number
+int toku_get_filesystem_sizes(const char *path,
+ uint64_t *avail_size,
+ uint64_t *free_size,
+ uint64_t *total_size);
+
+// Portable linux 'dup2'
+int toku_dup2(int fd, int fd2) __attribute__((__visibility__("default")));
diff --git a/storage/tokudb/PerconaFT/portability/toku_os_types.h b/storage/tokudb/PerconaFT/portability/toku_os_types.h
new file mode 100644
index 00000000..bc466f76
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_os_types.h
@@ -0,0 +1,77 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+typedef int toku_os_handle_t;
+
+struct fileid {
+ dev_t st_dev; /* device and inode are enough to uniquely identify a file in unix. */
+ ino_t st_ino;
+};
+
+static inline int toku_fileid_cmp(const struct fileid &a, const struct fileid &b) {
+ if (a.st_dev < b.st_dev) {
+ return -1;
+ } else if (a.st_dev > b.st_dev) {
+ return +1;
+ } else {
+ if (a.st_ino < b.st_ino) {
+ return -1;
+ } else if (a.st_ino > b.st_ino) {
+ return +1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+__attribute__((const, nonnull, warn_unused_result))
+static inline bool toku_fileids_are_equal(struct fileid *a, struct fileid *b) {
+ return toku_fileid_cmp(*a, *b) == 0;
+}
+
+typedef struct stat toku_struct_stat;
+
+#if !defined(O_BINARY)
+#define O_BINARY 0
+#endif
diff --git a/storage/tokudb/PerconaFT/portability/toku_path.cc b/storage/tokudb/PerconaFT/portability/toku_path.cc
new file mode 100644
index 00000000..eacc1384
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_path.cc
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "toku_path.h"
+#include <toku_assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <libgen.h>
+
+const char *toku_test_filename(const char *default_filename) {
+ const char *filename = getenv("TOKU_TEST_FILENAME");
+ if (filename == nullptr) {
+ filename = basename((char *) default_filename);
+ assert(filename != nullptr);
+ }
+ return filename;
+}
+
+// Guarantees NUL termination (unless siz == 0)
+// siz is full size of dst (including NUL terminator)
+// Appends src to end of dst, (truncating if necessary) to use no more than siz bytes (including NUL terminator)
+// Returns strnlen(dst, siz) (size (excluding NUL) of string we tried to create)
+size_t toku_strlcat(char *dst, const char *src, size_t siz)
+{
+ if (siz == 0) {
+ return 0;
+ }
+ dst[siz-1] = '\0'; //Guarantee NUL termination.
+
+ const size_t old_dst_len = strnlen(dst, siz - 1);
+ paranoid_invariant(old_dst_len <= siz - 1);
+ if (old_dst_len == siz - 1) {
+ // No room for anything more.
+ return old_dst_len;
+ }
+ char *d = &dst[old_dst_len]; //Points to null ptr at end of old string
+ const size_t remaining_space = siz-old_dst_len-1;
+ const size_t allowed_src_len = strnlen(src, remaining_space); // Limit to remaining space (leave space for NUL)
+ paranoid_invariant(allowed_src_len <= remaining_space);
+ paranoid_invariant(old_dst_len + allowed_src_len < siz);
+ memcpy(d, src, allowed_src_len);
+ d[allowed_src_len] = '\0'; // NUL terminate (may be redundant with previous NUL termination)
+
+ return old_dst_len + allowed_src_len;
+}
+
+// Guarantees NUL termination (unless siz == 0)
+// siz is full size of dst (including NUL terminator)
+// Appends src to end of dst, (truncating if necessary) to use no more than siz bytes (including NUL terminator)
+// Returns strnlen(dst, siz) (size (excluding NUL) of string we tried to create)
+//
+// Implementation note: implemented for simplicity as oppsed to performance
+size_t toku_strlcpy(char *dst, const char *src, size_t siz)
+{
+ if (siz == 0) {
+ return 0;
+ }
+ *dst = '\0';
+ return toku_strlcat(dst, src, siz);
+}
+
+char *toku_path_join(char *dest, int n, const char *base, ...) {
+ static const char PATHSEP = '/';
+ size_t written;
+ written = toku_strlcpy(dest, base, TOKU_PATH_MAX);
+ paranoid_invariant(written < TOKU_PATH_MAX);
+ paranoid_invariant(dest[written] == '\0');
+
+ va_list ap;
+ va_start(ap, base);
+ for (int i = 1; written < TOKU_PATH_MAX && i < n; ++i) {
+ if (dest[written - 1] != PATHSEP) {
+ if (written+2 >= TOKU_PATH_MAX) {
+ // No room.
+ break;
+ }
+ dest[written++] = PATHSEP;
+ dest[written] = '\0';
+ }
+ const char *next = va_arg(ap, const char *);
+ written = toku_strlcat(dest, next, TOKU_PATH_MAX);
+ paranoid_invariant(written < TOKU_PATH_MAX);
+ paranoid_invariant(dest[written] == '\0');
+ }
+ va_end(ap);
+
+ // Zero out rest of buffer for security
+ memset(&dest[written], 0, TOKU_PATH_MAX - written);
+ return dest;
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_path.h b/storage/tokudb/PerconaFT/portability/toku_path.h
new file mode 100644
index 00000000..6c330138
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_path.h
@@ -0,0 +1,72 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdarg.h>
+#include <limits.h>
+#include <sys/types.h>
+
+__attribute__((nonnull))
+const char *toku_test_filename(const char *default_filename);
+
+#define TOKU_TEST_FILENAME toku_test_filename(__FILE__)
+
+#define TOKU_PATH_MAX PATH_MAX
+
+// Guarantees NUL termination (unless siz == 0)
+// siz is full size of dst (including NUL terminator)
+// Appends src to end of dst, (truncating if necessary) to use no more than siz bytes (including NUL terminator)
+// Returns strnlen(dst, siz)
+size_t toku_strlcat(char *dst, const char *src, size_t siz);
+
+// Guarantees NUL termination (unless siz == 0)
+// siz is full size of dst (including NUL terminator)
+// Appends src to end of dst, (truncating if necessary) to use no more than siz bytes (including NUL terminator)
+// Returns strnlen(dst, siz)
+size_t toku_strlcpy(char *dst, const char *src, size_t siz);
+
+char *toku_path_join(char *dest, int n, const char *base, ...);
+// Effect:
+// Concatenate all the parts into a filename, using portable path separators.
+// Store the result in dest.
+// Requires:
+// dest is a buffer of size at least TOKU_PATH_MAX + 1.
+// There are n path components, including base.
+// Returns:
+// dest (useful for chaining function calls)
diff --git a/storage/tokudb/PerconaFT/portability/toku_portability.h b/storage/tokudb/PerconaFT/portability/toku_portability.h
new file mode 100644
index 00000000..7027b2a1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_portability.h
@@ -0,0 +1,576 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "toku_config.h"
+
+// Percona portability layer
+
+#if defined(__clang__)
+# define constexpr_static_assert(a, b)
+#else
+# define constexpr_static_assert(a, b) static_assert(a, b)
+#endif
+
+#if defined(_MSC_VER)
+# error "Windows is not supported."
+#endif
+
+#define DEV_NULL_FILE "/dev/null"
+
+#include <my_global.h>
+
+// include here, before they get deprecated
+#include <toku_atomic.h>
+
+#if defined(__GNUC__)
+// GCC linux
+
+#define DO_GCC_PRAGMA(x) _Pragma (#x)
+
+#include <toku_stdint.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/stat.h>
+#include <stdio.h>
+
+#if defined(__FreeBSD__)
+#include <stdarg.h>
+#endif
+
+#if defined(HAVE_ALLOCA_H)
+# include <alloca.h>
+#endif
+
+#if defined(__cplusplus)
+# include <type_traits>
+#endif
+
+#if defined(__cplusplus)
+# define cast_to_typeof(v) (decltype(v))
+#else
+# define cast_to_typeof(v) (__typeof__(v))
+#endif
+
+#else // __GNUC__ was not defined, so...
+# error "Must use a GNUC-compatible compiler."
+#endif
+
+// Define some constants for Yama in case the build-machine's software is too old.
+#if !defined(HAVE_PR_SET_PTRACER)
+/*
+ * Set specific pid that is allowed to ptrace the current task.
+ * A value of 0 mean "no process".
+ */
+// Well defined ("Yama" in ascii)
+#define PR_SET_PTRACER 0x59616d61
+#endif
+#if !defined(HAVE_PR_SET_PTRACER_ANY)
+#define PR_SET_PTRACER_ANY ((unsigned long)-1)
+#endif
+
+#if defined(__cplusplus)
+// decltype() here gives a reference-to-pointer instead of just a pointer,
+// just use __typeof__
+# define CAST_FROM_VOIDP(name, value) name = static_cast<__typeof__(name)>(value)
+#else
+# define CAST_FROM_VOIDP(name, value) name = cast_to_typeof(name) (value)
+#endif
+
+#ifndef TOKU_OFF_T_DEFINED
+#define TOKU_OFF_T_DEFINED
+typedef int64_t toku_off_t;
+#endif
+
+#include "toku_os.h"
+#include "toku_htod.h"
+#include "toku_assert.h"
+#include "toku_crash.h"
+#include "toku_debug_sync.h"
+
+#define UU(x) x __attribute__((__unused__))
+
+// Branch prediction macros.
+// If supported by the compiler, will hint in inctruction caching for likely
+// branching. Should only be used where there is a very good idea of the correct
+// branch heuristics as determined by profiling. Mostly copied from InnoDB.
+// Use:
+// "if (FT_LIKELY(x))" where the chances of "x" evaluating true are higher
+// "if (FT_UNLIKELY(x))" where the chances of "x" evaluating false are higher
+#if defined(__GNUC__) && (__GNUC__ > 2) && !defined(__INTEL_COMPILER)
+
+// Tell the compiler that 'expr' probably evaluates to 'constant'.
+#define FT_EXPECT(expr, constant) __builtin_expect(expr, constant)
+
+#else
+
+#warning "No FT branch prediction operations in use!"
+#define FT_EXPECT(expr, constant) (expr)
+
+#endif // defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER)
+
+// Tell the compiler that cond is likely to hold
+#define FT_LIKELY(cond) FT_EXPECT(bool(cond), true)
+
+// Tell the compiler that cond is unlikely to hold
+#define FT_UNLIKELY(cond) FT_EXPECT(bool(cond), false)
+
+#include "toku_instrumentation.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+// Deprecated functions.
+#if !defined(TOKU_ALLOW_DEPRECATED) && !defined(__clang__)
+int creat(const char *pathname, mode_t mode) __attribute__((__deprecated__));
+int fstat(int fd, struct stat *buf) __attribute__((__deprecated__));
+int stat(const char *path, struct stat *buf) __attribute__((__deprecated__));
+int getpid(void) __attribute__((__deprecated__));
+# if defined(__FreeBSD__) || defined(__APPLE__)
+int syscall(int __sysno, ...) __attribute__((__deprecated__));
+# else
+long int syscall(long int __sysno, ...) __attribute__((__deprecated__));
+# endif
+ long int sysconf(int) __attribute__((__deprecated__));
+int mkdir(const char *pathname, mode_t mode) __attribute__((__deprecated__));
+int dup2(int fd, int fd2) __attribute__((__deprecated__));
+int _dup2(int fd, int fd2) __attribute__((__deprecated__));
+// strdup is a macro in some libraries.
+#undef strdup
+# if defined(__FreeBSD__)
+char* strdup(const char *) __malloc_like __attribute__((__deprecated__));
+# elif defined(__APPLE__)
+char* strdup(const char *) __attribute__((__deprecated__));
+# else
+char* strdup(const char *) __THROW __attribute_malloc__ __nonnull ((1)) __attribute__((__deprecated__));
+# endif
+#undef __strdup
+char* __strdup(const char *) __attribute__((__deprecated__));
+# ifndef DONT_DEPRECATE_WRITES
+ssize_t write(int, const void *, size_t) __attribute__((__deprecated__));
+ssize_t pwrite(int, const void *, size_t, off_t) __attribute__((__deprecated__));
+#endif
+# ifndef DONT_DEPRECATE_MALLOC
+# if defined(__FreeBSD__)
+extern void *malloc(size_t) __malloc_like __attribute__((__deprecated__));
+extern void free(void*) __attribute__((__deprecated__));
+extern void *realloc(void*, size_t) __malloc_like __attribute__((__deprecated__));
+# elif defined(__APPLE__)
+extern void *malloc(size_t) __attribute__((__deprecated__));
+extern void free(void*) __attribute__((__deprecated__));
+extern void *realloc(void*, size_t) __attribute__((__deprecated__));
+# else
+extern void *malloc(size_t) __THROW __attribute__((__deprecated__));
+extern void free(void*) __THROW __attribute__((__deprecated__));
+extern void *realloc(void*, size_t) __THROW __attribute__((__deprecated__));
+# endif
+# endif
+# ifndef DONT_DEPRECATE_ERRNO
+//extern int errno __attribute__((__deprecated__));
+# endif
+#if !defined(__APPLE__)
+// Darwin headers use these types, we should not poison them
+#undef TRUE
+#undef FALSE
+# pragma GCC poison u_int8_t
+# pragma GCC poison u_int16_t
+# pragma GCC poison u_int32_t
+# pragma GCC poison u_int64_t
+# pragma GCC poison BOOL
+#if !defined(MYSQL_TOKUDB_ENGINE)
+# pragma GCC poison FALSE
+# pragma GCC poison TRUE
+#endif // MYSQL_TOKUDB_ENGINE
+#endif
+#pragma GCC poison __sync_fetch_and_add
+#pragma GCC poison __sync_fetch_and_sub
+#pragma GCC poison __sync_fetch_and_or
+#pragma GCC poison __sync_fetch_and_and
+#pragma GCC poison __sync_fetch_and_xor
+#pragma GCC poison __sync_fetch_and_nand
+#pragma GCC poison __sync_add_and_fetch
+#pragma GCC poison __sync_sub_and_fetch
+#pragma GCC poison __sync_or_and_fetch
+#pragma GCC poison __sync_and_and_fetch
+#pragma GCC poison __sync_xor_and_fetch
+#pragma GCC poison __sync_nand_and_fetch
+#pragma GCC poison __sync_bool_compare_and_swap
+#pragma GCC poison __sync_val_compare_and_swap
+#pragma GCC poison __sync_synchronize
+#pragma GCC poison __sync_lock_test_and_set
+#pragma GCC poison __sync_release
+#endif
+
+#if defined(__cplusplus)
+};
+#endif
+
+void *os_malloc(size_t) __attribute__((__visibility__("default")));
+// Effect: See man malloc(2)
+
+void *os_malloc_aligned(size_t /*alignment*/, size_t /*size*/) __attribute__((__visibility__("default")));
+// Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Requires: alignment is a power of two.
+
+
+void *os_realloc(void*,size_t) __attribute__((__visibility__("default")));
+// Effect: See man realloc(2)
+
+void *os_realloc_aligned(size_t/*alignment*/, void*,size_t) __attribute__((__visibility__("default")));
+// Effect: Perform a realloc(p, size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
+// Requires: alignment is a power of two.
+
+void os_free(void*) __attribute__((__visibility__("default")));
+// Effect: See man free(2)
+
+size_t os_malloc_usable_size(const void *p) __attribute__((__visibility__("default")));
+// Effect: Return an estimate of the usable size inside a pointer. If this function is not defined the memory.cc will
+// look for the jemalloc, libc, or darwin versions of the function for computing memory footprint.
+
+// full_pwrite and full_write performs a pwrite, and checks errors. It doesn't return unless all the data was written. */
+void toku_os_full_pwrite (int fd, const void *buf, size_t len, toku_off_t off) __attribute__((__visibility__("default")));
+void toku_os_full_write (int fd, const void *buf, size_t len) __attribute__((__visibility__("default")));
+
+// os_write returns 0 on success, otherwise an errno.
+ssize_t toku_os_pwrite (int fd, const void *buf, size_t len, toku_off_t off) __attribute__((__visibility__("default")));
+int toku_os_write(int fd, const void *buf, size_t len)
+ __attribute__((__visibility__("default")));
+
+// wrappers around file system calls
+void toku_os_recursive_delete(const char *path);
+
+TOKU_FILE *toku_os_fdopen_with_source_location(int fildes,
+ const char *mode,
+ const char *filename,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line);
+#define toku_os_fdopen(FD, M, FN, K) \
+ toku_os_fdopen_with_source_location(FD, M, FN, K, __FILE__, __LINE__)
+
+TOKU_FILE *toku_os_fopen_with_source_location(const char *filename,
+ const char *mode,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line);
+#define toku_os_fopen(F, M, K) \
+ toku_os_fopen_with_source_location(F, M, K, __FILE__, __LINE__)
+
+int toku_os_open_with_source_location(const char *path,
+ int oflag,
+ int mode,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line);
+#define toku_os_open(FD, F, M, K) \
+ toku_os_open_with_source_location(FD, F, M, K, __FILE__, __LINE__)
+
+int toku_os_open_direct(const char *path,
+ int oflag,
+ int mode,
+ const toku_instr_key &instr_key);
+
+int toku_os_delete_with_source_location(const char *name,
+ const char *src_file,
+ uint src_line);
+#define toku_os_delete(FN) \
+ toku_os_delete_with_source_location(FN, __FILE__, __LINE__)
+
+int toku_os_rename_with_source_location(const char *old_name,
+ const char *new_name,
+ const char *src_file,
+ uint src_line);
+#define toku_os_rename(old_name, new_name) \
+ toku_os_rename_with_source_location(old_name, new_name, __FILE__, __LINE__)
+
+void toku_os_full_write_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ const char *src_file,
+ uint src_line);
+#define toku_os_full_write(FD, B, L) \
+ toku_os_full_write_with_source_location(FD, B, L, __FILE__, __LINE__)
+
+int toku_os_write_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ const char *src_file,
+ uint src_line);
+#define toku_os_write(FD, B, L) \
+ toku_os_write_with_source_location(FD, B, L, __FILE__, __LINE__)
+
+void toku_os_full_pwrite_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off,
+ const char *src_file,
+ uint src_line);
+#define toku_os_full_pwrite(FD, B, L, O) \
+ toku_os_full_pwrite_with_source_location(FD, B, L, O, __FILE__, __LINE__)
+
+ssize_t toku_os_pwrite_with_source_location(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off,
+ const char *src_file,
+ uint src_line);
+
+#define toku_os_pwrite(FD, B, L, O) \
+ toku_os_pwrite_with_source_location(FD, B, L, O, __FILE__, __LINE__)
+
+int toku_os_fwrite_with_source_location(const void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line);
+
+#define toku_os_fwrite(P, S, N, FS) \
+ toku_os_fwrite_with_source_location(P, S, N, FS, __FILE__, __LINE__)
+
+int toku_os_fread_with_source_location(void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line);
+#define toku_os_fread(P, S, N, FS) \
+ toku_os_fread_with_source_location(P, S, N, FS, __FILE__, __LINE__)
+
+TOKU_FILE *toku_os_fopen_with_source_location(const char *filename,
+ const char *mode,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line);
+
+int toku_os_fclose_with_source_location(TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line);
+
+#define toku_os_fclose(FS) \
+ toku_os_fclose_with_source_location(FS, __FILE__, __LINE__)
+
+int toku_os_close_with_source_location(int fd,
+ const char *src_file,
+ uint src_line);
+#define toku_os_close(FD) \
+ toku_os_close_with_source_location(FD, __FILE__, __LINE__)
+
+ssize_t toku_os_read_with_source_location(int fd,
+ void *buf,
+ size_t count,
+ const char *src_file,
+ uint src_line);
+
+#define toku_os_read(FD, B, C) \
+ toku_os_read_with_source_location(FD, B, C, __FILE__, __LINE__);
+
+ssize_t inline_toku_os_pread_with_source_location(int fd,
+ void *buf,
+ size_t count,
+ off_t offset,
+ const char *src_file,
+ uint src_line);
+#define toku_os_pread(FD, B, C, O) \
+ inline_toku_os_pread_with_source_location(FD, B, C, O, __FILE__, __LINE__);
+
+void file_fsync_internal_with_source_location(int fd,
+ const char *src_file,
+ uint src_line);
+
+#define file_fsync_internal(FD) \
+ file_fsync_internal_with_source_location(FD, __FILE__, __LINE__);
+
+int toku_os_get_file_size_with_source_location(int fildes,
+ int64_t *fsize,
+ const char *src_file,
+ uint src_line);
+
+#define toku_os_get_file_size(D, S) \
+ toku_os_get_file_size_with_source_location(D, S, __FILE__, __LINE__)
+
+// TODO: should this prototype be moved to toku_os.h?
+int toku_stat_with_source_location(const char *name,
+ toku_struct_stat *buf,
+ const toku_instr_key &instr_key,
+ const char *src_file,
+ uint src_line)
+ __attribute__((__visibility__("default")));
+
+#define toku_stat(N, B, K) \
+ toku_stat_with_source_location(N, B, K, __FILE__, __LINE__)
+
+int toku_os_fstat_with_source_location(int fd,
+ toku_struct_stat *buf,
+ const char *src_file,
+ uint src_line)
+ __attribute__((__visibility__("default")));
+
+#define toku_os_fstat(FD, B) \
+ toku_os_fstat_with_source_location(FD, B, __FILE__, __LINE__)
+
+#ifdef HAVE_PSI_FILE_INTERFACE2
+int inline_toku_os_close(int fd, const char *src_file, uint src_line);
+int inline_toku_os_fclose(TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line);
+ssize_t inline_toku_os_read(int fd,
+ void *buf,
+ size_t count,
+ const char *src_file,
+ uint src_line);
+ssize_t inline_toku_os_pread(int fd,
+ void *buf,
+ size_t count,
+ off_t offset,
+ const char *src_file,
+ uint src_line);
+int inline_toku_os_fwrite(const void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line);
+int inline_toku_os_fread(void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream,
+ const char *src_file,
+ uint src_line);
+int inline_toku_os_write(int fd,
+ const void *buf,
+ size_t len,
+ const char *src_file,
+ uint src_line);
+ssize_t inline_toku_os_pwrite(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off,
+ const char *src_file,
+ uint src_line);
+void inline_toku_os_full_write(int fd,
+ const void *buf,
+ size_t len,
+ const char *src_file,
+ uint src_line);
+void inline_toku_os_full_pwrite(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off,
+ const char *src_file,
+ uint src_line);
+int inline_toku_os_delete(const char *name,
+ const char *srv_file,
+ uint src_line);
+//#else
+int inline_toku_os_close(int fd);
+int inline_toku_os_fclose(TOKU_FILE *stream);
+ssize_t inline_toku_os_read(int fd, void *buf, size_t count);
+ssize_t inline_toku_os_pread(int fd, void *buf, size_t count, off_t offset);
+int inline_toku_os_fwrite(const void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream);
+int inline_toku_os_fread(void *ptr,
+ size_t size,
+ size_t nmemb,
+ TOKU_FILE *stream);
+int inline_toku_os_write(int fd, const void *buf, size_t len);
+ssize_t inline_toku_os_pwrite(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off);
+void inline_toku_os_full_write(int fd, const void *buf, size_t len);
+void inline_toku_os_full_pwrite(int fd,
+ const void *buf,
+ size_t len,
+ toku_off_t off);
+int inline_toku_os_delete(const char *name);
+#endif
+
+// wrapper around fsync
+void toku_file_fsync(int fd);
+int toku_fsync_directory(const char *fname);
+void toku_file_fsync_without_accounting(int fd);
+
+// get the number of fsync calls and the fsync times (total)
+void toku_get_fsync_times(uint64_t *fsync_count,
+ uint64_t *fsync_time,
+ uint64_t *long_fsync_threshold,
+ uint64_t *long_fsync_count,
+ uint64_t *long_fsync_time);
+
+void toku_set_func_fsync (int (*fsync_function)(int));
+void toku_set_func_pwrite (ssize_t (*)(int, const void *, size_t, toku_off_t));
+void toku_set_func_full_pwrite (ssize_t (*)(int, const void *, size_t, toku_off_t));
+void toku_set_func_write (ssize_t (*)(int, const void *, size_t));
+void toku_set_func_full_write (ssize_t (*)(int, const void *, size_t));
+void toku_set_func_fdopen (FILE * (*)(int, const char *));
+void toku_set_func_fopen (FILE * (*)(const char *, const char *));
+void toku_set_func_open (int (*)(const char *, int, int));
+void toku_set_func_fclose(int (*)(FILE *));
+void toku_set_func_read(ssize_t (*)(int, void *, size_t));
+void toku_set_func_pread(ssize_t (*)(int, void *, size_t, off_t));
+void toku_set_func_fwrite(
+ size_t (*fwrite_fun)(const void *, size_t, size_t, FILE *));
+
+int toku_portability_init(void);
+void toku_portability_destroy(void);
+
+// Effect: Return X, where X the smallest multiple of ALIGNMENT such that X>=V.
+// Requires: ALIGNMENT is a power of two
+static inline uint64_t roundup_to_multiple(uint64_t alignment, uint64_t v) {
+ return (v + alignment - 1) & ~(alignment - 1);
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.cc b/storage/tokudb/PerconaFT/portability/toku_pthread.cc
new file mode 100644
index 00000000..fe8a4d48
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_pthread.cc
@@ -0,0 +1,73 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#define _GNU_SOURCE 1
+#include <portability/toku_config.h>
+#include <toku_pthread.h>
+
+int toku_pthread_yield(void) {
+#if defined(HAVE_PTHREAD_YIELD)
+# if defined(PTHREAD_YIELD_RETURNS_INT)
+ return pthread_yield();
+# elif defined(PTHREAD_YIELD_RETURNS_VOID)
+ pthread_yield();
+ return 0;
+# else
+# error "don't know what pthread_yield() returns"
+# endif
+#elif defined(HAVE_PTHREAD_YIELD_NP)
+ pthread_yield_np();
+ return 0;
+#else
+# error "cannot find pthread_yield or pthread_yield_np"
+#endif
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h
new file mode 100644
index 00000000..a0831f67
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h
@@ -0,0 +1,545 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <pthread.h>
+#include <time.h>
+#include <stdint.h>
+
+#include "toku_portability.h"
+#include "toku_assert.h"
+
+// TODO: some things moved toku_instrumentation.h, not necessarily the best
+// place
+typedef pthread_attr_t toku_pthread_attr_t;
+typedef pthread_t toku_pthread_t;
+typedef pthread_mutex_t toku_pthread_mutex_t;
+typedef pthread_condattr_t toku_pthread_condattr_t;
+typedef pthread_cond_t toku_pthread_cond_t;
+typedef pthread_rwlockattr_t toku_pthread_rwlockattr_t;
+typedef pthread_key_t toku_pthread_key_t;
+typedef struct timespec toku_timespec_t;
+
+// TODO: break this include loop
+#include <pthread.h>
+typedef pthread_mutexattr_t toku_pthread_mutexattr_t;
+
+struct toku_mutex_t {
+ pthread_mutex_t pmutex;
+ struct PSI_mutex
+ *psi_mutex; /* The performance schema instrumentation hook */
+#if defined(TOKU_PTHREAD_DEBUG)
+ pthread_t owner; // = pthread_self(); // for debugging
+ bool locked;
+ bool valid;
+ pfs_key_t instr_key_id;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+};
+
+struct toku_cond_t {
+ pthread_cond_t pcond;
+ struct PSI_cond *psi_cond;
+#if defined(TOKU_PTHREAD_DEBUG)
+ pfs_key_t instr_key_id;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+};
+
+#if defined(TOKU_PTHREAD_DEBUG)
+#define TOKU_COND_INITIALIZER \
+ { \
+ .pcond = PTHREAD_COND_INITIALIZER, .psi_cond = nullptr, \
+ .instr_key_id = 0 \
+ }
+#else
+#define TOKU_COND_INITIALIZER \
+ { .pcond = PTHREAD_COND_INITIALIZER, .psi_cond = nullptr }
+#endif // defined(TOKU_PTHREAD_DEBUG)
+
+struct toku_pthread_rwlock_t {
+ pthread_rwlock_t rwlock;
+ struct PSI_rwlock *psi_rwlock;
+#if defined(TOKU_PTHREAD_DEBUG)
+ pfs_key_t instr_key_id;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+};
+
+typedef struct toku_mutex_aligned {
+ toku_mutex_t aligned_mutex __attribute__((__aligned__(64)));
+} toku_mutex_aligned_t;
+
+// Initializing with {} will fill in a struct with all zeros.
+// But you may also need a pragma to suppress the warnings, as follows
+//
+// #pragma GCC diagnostic push
+// #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+// toku_mutex_t foo = ZERO_MUTEX_INITIALIZER;
+// #pragma GCC diagnostic pop
+//
+// In general it will be a lot of busy work to make this codebase compile
+// cleanly with -Wmissing-field-initializers
+
+#define ZERO_MUTEX_INITIALIZER \
+ {}
+
+#if defined(TOKU_PTHREAD_DEBUG)
+#define TOKU_MUTEX_INITIALIZER \
+ { \
+ .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr, .owner = 0, \
+ .locked = false, .valid = true, .instr_key_id = 0 \
+ }
+#else
+#define TOKU_MUTEX_INITIALIZER \
+ { .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr }
+#endif // defined(TOKU_PTHREAD_DEBUG)
+
+// Darwin doesn't provide adaptive mutexes
+#if defined(__APPLE__)
+#define TOKU_MUTEX_ADAPTIVE PTHREAD_MUTEX_DEFAULT
+#if defined(TOKU_PTHREAD_DEBUG)
+#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
+ { \
+ .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr, .owner = 0, \
+ .locked = false, .valid = true, .instr_key_id = 0 \
+ }
+#else
+#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
+ { .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr }
+#endif // defined(TOKU_PTHREAD_DEBUG)
+#else // __FreeBSD__, __linux__, at least
+#define TOKU_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
+#if defined(TOKU_PTHREAD_DEBUG)
+#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
+ { \
+ .pmutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, .psi_mutex = nullptr, \
+ .owner = 0, .locked = false, .valid = true, .instr_key_id = 0 \
+ }
+#else
+#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
+ { .pmutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, .psi_mutex = nullptr }
+#endif // defined(TOKU_PTHREAD_DEBUG)
+#endif // defined(__APPLE__)
+
+// Different OSes implement mutexes as different amounts of nested structs.
+// C++ will fill out all missing values with zeroes if you provide at least one
+// zero, but it needs the right amount of nesting.
+#if defined(__FreeBSD__)
+#define ZERO_COND_INITIALIZER \
+ { 0 }
+#elif defined(__APPLE__)
+#if TOKU_PTHREAD_DEBUG
+#define ZERO_COND_INITIALIZER \
+ { \
+ { 0 , { 0 } }, \
+ nullptr, \
+ 0 \
+ }
+#else
+#define ZERO_COND_INITIALIZER \
+ { \
+ { 0 , { 0 } }, \
+ nullptr \
+ }
+#endif
+#else // __linux__, at least
+#define ZERO_COND_INITIALIZER \
+ {}
+#endif
+
+static inline void toku_mutexattr_init(toku_pthread_mutexattr_t *attr) {
+ int r = pthread_mutexattr_init(attr);
+ assert_zero(r);
+}
+
+static inline void
+toku_mutexattr_settype(toku_pthread_mutexattr_t *attr, int type) {
+ int r = pthread_mutexattr_settype(attr, type);
+ assert_zero(r);
+}
+
+static inline void
+toku_mutexattr_destroy(toku_pthread_mutexattr_t *attr) {
+ int r = pthread_mutexattr_destroy(attr);
+ assert_zero(r);
+}
+
+#if defined(TOKU_PTHREAD_DEBUG)
+static inline void toku_mutex_assert_locked(const toku_mutex_t *mutex) {
+ invariant(mutex->locked);
+ invariant(mutex->owner == pthread_self());
+}
+#else
+static inline void
+toku_mutex_assert_locked(const toku_mutex_t *mutex __attribute__((unused))) {
+}
+#endif // defined(TOKU_PTHREAD_DEBUG)
+
+// asserting that a mutex is unlocked only makes sense
+// if the calling thread can guaruntee that no other threads
+// are trying to lock this mutex at the time of the assertion
+//
+// a good example of this is a tree with mutexes on each node.
+// when a node is locked the caller knows that no other threads
+// can be trying to lock its childrens' mutexes. the children
+// are in one of two fixed states: locked or unlocked.
+#if defined(TOKU_PTHREAD_DEBUG)
+static inline void
+toku_mutex_assert_unlocked(toku_mutex_t *mutex) {
+ invariant(mutex->owner == 0);
+ invariant(!mutex->locked);
+}
+#else
+static inline void toku_mutex_assert_unlocked(toku_mutex_t *mutex
+ __attribute__((unused))) {}
+#endif // defined(TOKU_PTHREAD_DEBUG)
+
+#define toku_mutex_lock(M) \
+ toku_mutex_lock_with_source_location(M, __FILE__, __LINE__)
+
+static inline void toku_cond_init(toku_cond_t *cond,
+ const toku_pthread_condattr_t *attr) {
+ int r = pthread_cond_init(&cond->pcond, attr);
+ assert_zero(r);
+}
+
+#define toku_mutex_trylock(M) \
+ toku_mutex_trylock_with_source_location(M, __FILE__, __LINE__)
+
+inline void toku_mutex_unlock(toku_mutex_t *mutex) {
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(mutex->owner == pthread_self());
+ invariant(mutex->valid);
+ invariant(mutex->locked);
+ mutex->locked = false;
+ mutex->owner = 0;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+ toku_instr_mutex_unlock(mutex->psi_mutex);
+ int r = pthread_mutex_unlock(&mutex->pmutex);
+ assert_zero(r);
+}
+
+inline void toku_mutex_lock_with_source_location(toku_mutex_t *mutex,
+ const char *src_file,
+ int src_line) {
+
+ toku_mutex_instrumentation mutex_instr;
+ toku_instr_mutex_lock_start(mutex_instr, *mutex, src_file, src_line);
+
+ const int r = pthread_mutex_lock(&mutex->pmutex);
+ toku_instr_mutex_lock_end(mutex_instr, r);
+
+ assert_zero(r);
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(mutex->valid);
+ invariant(!mutex->locked);
+ invariant(mutex->owner == 0);
+ mutex->locked = true;
+ mutex->owner = pthread_self();
+#endif // defined(TOKU_PTHREAD_DEBUG)
+}
+
+inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex,
+ const char *src_file,
+ int src_line) {
+
+ toku_mutex_instrumentation mutex_instr;
+ toku_instr_mutex_trylock_start(mutex_instr, *mutex, src_file, src_line);
+
+ const int r = pthread_mutex_lock(&mutex->pmutex);
+ toku_instr_mutex_lock_end(mutex_instr, r);
+
+#if defined(TOKU_PTHREAD_DEBUG)
+ if (r == 0) {
+ invariant(mutex->valid);
+ invariant(!mutex->locked);
+ invariant(mutex->owner == 0);
+ mutex->locked = true;
+ mutex->owner = pthread_self();
+ }
+#endif // defined(TOKU_PTHREAD_DEBUG)
+ return r;
+}
+
+#define toku_cond_wait(C, M) \
+ toku_cond_wait_with_source_location(C, M, __FILE__, __LINE__)
+
+#define toku_cond_timedwait(C, M, W) \
+ toku_cond_timedwait_with_source_location(C, M, W, __FILE__, __LINE__)
+
+inline void toku_cond_init(const toku_instr_key &key,
+ toku_cond_t *cond,
+ const pthread_condattr_t *attr) {
+ toku_instr_cond_init(key, *cond);
+ int r = pthread_cond_init(&cond->pcond, attr);
+ assert_zero(r);
+}
+
+inline void toku_cond_destroy(toku_cond_t *cond) {
+ toku_instr_cond_destroy(cond->psi_cond);
+ int r = pthread_cond_destroy(&cond->pcond);
+ assert_zero(r);
+}
+
+inline void toku_cond_wait_with_source_location(toku_cond_t *cond,
+ toku_mutex_t *mutex,
+ const char *src_file,
+ uint src_line) {
+
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(mutex->locked);
+ mutex->locked = false;
+ mutex->owner = 0;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+
+ /* Instrumentation start */
+ toku_cond_instrumentation cond_instr;
+ toku_instr_cond_wait_start(cond_instr,
+ toku_instr_cond_op::cond_wait,
+ *cond,
+ *mutex,
+ src_file,
+ src_line);
+
+ /* Instrumented code */
+ const int r = pthread_cond_wait(&cond->pcond, &mutex->pmutex);
+
+ /* Instrumentation end */
+ toku_instr_cond_wait_end(cond_instr, r);
+
+ assert_zero(r);
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(!mutex->locked);
+ mutex->locked = true;
+ mutex->owner = pthread_self();
+#endif // defined(TOKU_PTHREAD_DEBUG)
+}
+
+inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond,
+ toku_mutex_t *mutex,
+ toku_timespec_t *wakeup_at,
+ const char *src_file,
+ uint src_line) {
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(mutex->locked);
+ mutex->locked = false;
+ mutex->owner = 0;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+
+ /* Instrumentation start */
+ toku_cond_instrumentation cond_instr;
+ toku_instr_cond_wait_start(cond_instr,
+ toku_instr_cond_op::cond_timedwait,
+ *cond,
+ *mutex,
+ src_file,
+ src_line);
+
+ /* Instrumented code */
+ const int r = pthread_cond_timedwait(
+ &cond->pcond, &mutex->pmutex, wakeup_at);
+
+ /* Instrumentation end */
+ toku_instr_cond_wait_end(cond_instr, r);
+
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(!mutex->locked);
+ mutex->locked = true;
+ mutex->owner = pthread_self();
+#endif // defined(TOKU_PTHREAD_DEBUG)
+ return r;
+}
+
+inline void toku_cond_signal(toku_cond_t *cond) {
+ toku_instr_cond_signal(*cond);
+ const int r = pthread_cond_signal(&cond->pcond);
+ assert_zero(r);
+}
+
+inline void toku_cond_broadcast(toku_cond_t *cond) {
+ toku_instr_cond_broadcast(*cond);
+ const int r = pthread_cond_broadcast(&cond->pcond);
+ assert_zero(r);
+}
+
+inline void toku_mutex_init(const toku_instr_key &key,
+ toku_mutex_t *mutex,
+ const toku_pthread_mutexattr_t *attr) {
+#if defined(TOKU_PTHREAD_DEBUG)
+ mutex->valid = true;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+ toku_instr_mutex_init(key, *mutex);
+ const int r = pthread_mutex_init(&mutex->pmutex, attr);
+ assert_zero(r);
+#if defined(TOKU_PTHREAD_DEBUG)
+ mutex->locked = false;
+ invariant(mutex->valid);
+ mutex->valid = true;
+ mutex->owner = 0;
+#endif // defined(TOKU_PTHREAD_DEBUG)
+}
+
+inline void toku_mutex_destroy(toku_mutex_t *mutex) {
+#if defined(TOKU_PTHREAD_DEBUG)
+ invariant(mutex->valid);
+ mutex->valid = false;
+ invariant(!mutex->locked);
+#endif // defined(TOKU_PTHREAD_DEBUG)
+ toku_instr_mutex_destroy(mutex->psi_mutex);
+ int r = pthread_mutex_destroy(&mutex->pmutex);
+ assert_zero(r);
+}
+
+#define toku_pthread_rwlock_rdlock(RW) \
+ toku_pthread_rwlock_rdlock_with_source_location(RW, __FILE__, __LINE__)
+
+#define toku_pthread_rwlock_wrlock(RW) \
+ toku_pthread_rwlock_wrlock_with_source_location(RW, __FILE__, __LINE__)
+
+inline void toku_pthread_rwlock_init(
+ const toku_instr_key &key,
+ toku_pthread_rwlock_t *__restrict rwlock,
+ const toku_pthread_rwlockattr_t *__restrict attr) {
+ toku_instr_rwlock_init(key, *rwlock);
+ int r = pthread_rwlock_init(&rwlock->rwlock, attr);
+ assert_zero(r);
+}
+
+inline void toku_pthread_rwlock_destroy(toku_pthread_rwlock_t *rwlock) {
+ toku_instr_rwlock_destroy(rwlock->psi_rwlock);
+ int r = pthread_rwlock_destroy(&rwlock->rwlock);
+ assert_zero(r);
+}
+
+inline void toku_pthread_rwlock_rdlock_with_source_location(
+ toku_pthread_rwlock_t *rwlock,
+ const char *src_file,
+ uint src_line) {
+
+ /* Instrumentation start */
+ toku_rwlock_instrumentation rwlock_instr;
+ toku_instr_rwlock_rdlock_wait_start(
+ rwlock_instr, *rwlock, src_file, src_line);
+ /* Instrumented code */
+ const int r = pthread_rwlock_rdlock(&rwlock->rwlock);
+
+ /* Instrumentation end */
+ toku_instr_rwlock_rdlock_wait_end(rwlock_instr, r);
+
+ assert_zero(r);
+}
+
+inline void toku_pthread_rwlock_wrlock_with_source_location(
+ toku_pthread_rwlock_t *rwlock,
+ const char *src_file,
+ uint src_line) {
+
+ /* Instrumentation start */
+ toku_rwlock_instrumentation rwlock_instr;
+ toku_instr_rwlock_wrlock_wait_start(
+ rwlock_instr, *rwlock, src_file, src_line);
+ /* Instrumented code */
+ const int r = pthread_rwlock_wrlock(&rwlock->rwlock);
+
+ /* Instrumentation end */
+ toku_instr_rwlock_wrlock_wait_end(rwlock_instr, r);
+
+ assert_zero(r);
+}
+
+inline void toku_pthread_rwlock_rdunlock(toku_pthread_rwlock_t *rwlock) {
+ toku_instr_rwlock_unlock(*rwlock);
+ const int r = pthread_rwlock_unlock(&rwlock->rwlock);
+ assert_zero(r);
+}
+
+inline void toku_pthread_rwlock_wrunlock(toku_pthread_rwlock_t *rwlock) {
+ toku_instr_rwlock_unlock(*rwlock);
+ const int r = pthread_rwlock_unlock(&rwlock->rwlock);
+ assert_zero(r);
+}
+
+static inline int toku_pthread_join(toku_pthread_t thread, void **value_ptr) {
+ return pthread_join(thread, value_ptr);
+}
+
+static inline int
+toku_pthread_detach(toku_pthread_t thread) {
+ return pthread_detach(thread);
+}
+
+static inline int
+toku_pthread_key_create(toku_pthread_key_t *key, void (*destroyf)(void *)) {
+ return pthread_key_create(key, destroyf);
+}
+
+static inline int
+toku_pthread_key_delete(toku_pthread_key_t key) {
+ return pthread_key_delete(key);
+}
+
+static inline void *
+toku_pthread_getspecific(toku_pthread_key_t key) {
+ return pthread_getspecific(key);
+}
+
+static inline int toku_pthread_setspecific(toku_pthread_key_t key, void *data) {
+ return pthread_setspecific(key, data);
+}
+
+int toku_pthread_yield(void) __attribute__((__visibility__("default")));
+
+static inline toku_pthread_t toku_pthread_self(void) { return pthread_self(); }
+
+static inline void *toku_pthread_done(void *exit_value) {
+ toku_instr_delete_current_thread();
+ pthread_exit(exit_value);
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_race_tools.h b/storage/tokudb/PerconaFT/portability/toku_race_tools.h
new file mode 100644
index 00000000..47936354
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_race_tools.h
@@ -0,0 +1,163 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <portability/toku_config.h>
+
+#ifdef HAVE_valgrind
+#undef USE_VALGRIND
+#define USE_VALGRIND 1
+#endif
+
+#if defined(__linux__) && defined(USE_VALGRIND) && USE_VALGRIND
+
+# include <valgrind/helgrind.h>
+# include <valgrind/drd.h>
+
+# define TOKU_ANNOTATE_NEW_MEMORY(p, size) ANNOTATE_NEW_MEMORY(p, size)
+# define TOKU_VALGRIND_HG_ENABLE_CHECKING(p, size) VALGRIND_HG_ENABLE_CHECKING(p, size)
+# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) VALGRIND_HG_DISABLE_CHECKING(p, size)
+# define TOKU_DRD_IGNORE_VAR(v) DRD_IGNORE_VAR(v)
+# define TOKU_DRD_STOP_IGNORING_VAR(v) DRD_STOP_IGNORING_VAR(v)
+# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ANNOTATE_IGNORE_READS_BEGIN()
+# define TOKU_ANNOTATE_IGNORE_READS_END() ANNOTATE_IGNORE_READS_END()
+# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ANNOTATE_IGNORE_WRITES_BEGIN()
+# define TOKU_ANNOTATE_IGNORE_WRITES_END() ANNOTATE_IGNORE_WRITES_END()
+
+/*
+ * How to make helgrind happy about tree rotations and new mutex orderings:
+ *
+ * // Tell helgrind that we unlocked it so that the next call doesn't get a "destroyed a locked mutex" error.
+ * // Tell helgrind that we destroyed the mutex.
+ * VALGRIND_HG_MUTEX_UNLOCK_PRE(&locka);
+ * VALGRIND_HG_MUTEX_DESTROY_PRE(&locka);
+ *
+ * // And recreate it. It would be better to simply be able to say that the order on these two can now be reversed, because this code forgets all the ordering information for this mutex.
+ * // Then tell helgrind that we have locked it again.
+ * VALGRIND_HG_MUTEX_INIT_POST(&locka, 0);
+ * VALGRIND_HG_MUTEX_LOCK_POST(&locka);
+ *
+ * When the ordering of two locks changes, we don't need tell Helgrind about do both locks. Just one is good enough.
+ */
+
+# define TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(mutex) \
+ VALGRIND_HG_MUTEX_UNLOCK_PRE(mutex); \
+ VALGRIND_HG_MUTEX_DESTROY_PRE(mutex); \
+ VALGRIND_HG_MUTEX_INIT_POST(mutex, 0); \
+ VALGRIND_HG_MUTEX_LOCK_POST(mutex);
+
+#else // !defined(__linux__) || !USE_VALGRIND
+
+# define NVALGRIND 1
+# define TOKU_ANNOTATE_NEW_MEMORY(p, size) ((void) 0)
+# define TOKU_VALGRIND_HG_ENABLE_CHECKING(p, size) ((void) 0)
+# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) ((void) 0)
+# define TOKU_DRD_IGNORE_VAR(v)
+# define TOKU_DRD_STOP_IGNORING_VAR(v)
+# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ((void) 0)
+# define TOKU_ANNOTATE_IGNORE_READS_END() ((void) 0)
+# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ((void) 0)
+# define TOKU_ANNOTATE_IGNORE_WRITES_END() ((void) 0)
+# define TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(mutex)
+#undef RUNNING_ON_VALGRIND
+# define RUNNING_ON_VALGRIND (0U)
+#endif
+
+// Valgrind 3.10.1 (and previous versions).
+// Problems with VALGRIND_HG_DISABLE_CHECKING and VALGRIND_HG_ENABLE_CHECKING.
+// Helgrind's implementation of disable and enable checking causes false races to be
+// reported. In addition, the race report does not include ANY information about
+// the code that uses the helgrind disable and enable functions. Therefore, it is
+// very difficult to figure out the cause of the race.
+// DRD does implement the disable and enable functions.
+
+// Problems with ANNOTATE_IGNORE_READS.
+// Helgrind does not implement ignore reads.
+// Annotate ignore reads is the way to inform DRD to ignore racy reads.
+
+// FT code uses unsafe reads in several places. These unsafe reads have been noted
+// as valid since they use the toku_unsafe_fetch function. Unfortunately, this
+// causes helgrind to report erroneous data races which makes use of helgrind problematic.
+
+// Unsafely fetch and return a `T' from src, telling drd to ignore
+// racey access to src for the next sizeof(*src) bytes
+template <typename T>
+T toku_unsafe_fetch(T *src) {
+ if (0) TOKU_VALGRIND_HG_DISABLE_CHECKING(src, sizeof *src); // disabled, see comment
+ TOKU_ANNOTATE_IGNORE_READS_BEGIN();
+ T r = *src;
+ TOKU_ANNOTATE_IGNORE_READS_END();
+ if (0) TOKU_VALGRIND_HG_ENABLE_CHECKING(src, sizeof *src); // disabled, see comment
+ return r;
+}
+
+template <typename T>
+T toku_unsafe_fetch(T &src) {
+ return toku_unsafe_fetch(&src);
+}
+
+// Unsafely set a `T' value into *dest from src, telling drd to ignore
+// racey access to dest for the next sizeof(*dest) bytes
+template <typename T>
+void toku_unsafe_set(T *dest, const T src) {
+ if (0) TOKU_VALGRIND_HG_DISABLE_CHECKING(dest, sizeof *dest); // disabled, see comment
+ TOKU_ANNOTATE_IGNORE_WRITES_BEGIN();
+ *dest = src;
+ TOKU_ANNOTATE_IGNORE_WRITES_END();
+ if (0) TOKU_VALGRIND_HG_ENABLE_CHECKING(dest, sizeof *dest); // disabled, see comment
+}
+
+template <typename T>
+void toku_unsafe_set(T &dest, const T src) {
+ toku_unsafe_set(&dest, src);
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_random.h b/storage/tokudb/PerconaFT/portability/toku_random.h
new file mode 100644
index 00000000..7850ec56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_random.h
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <portability/toku_config.h>
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+
+#if defined(HAVE_RANDOM_R)
+// Definition of randu62 and randu64 assume myrandom_r generates 31 low-order bits
+static_assert(RAND_MAX == INT32_MAX, "Unexpected RAND_MAX");
+static inline int
+myinitstate_r(unsigned int seed, char *statebuf, size_t statelen, struct random_data *buf)
+{
+ return initstate_r(seed, statebuf, statelen, buf);
+}
+static inline int32_t
+myrandom_r(struct random_data *buf)
+{
+ int32_t x;
+ int r = random_r(buf, &x);
+ lazy_assert_zero(r);
+ return x;
+}
+#elif defined(HAVE_NRAND48)
+struct random_data {
+ unsigned short xsubi[3];
+};
+static inline int
+myinitstate_r(unsigned int seed, char *UU(statebuf), size_t UU(statelen), struct random_data *buf)
+{
+ buf->xsubi[0] = (seed & 0xffff0000) >> 16;
+ buf->xsubi[0] = (seed & 0x0000ffff);
+ buf->xsubi[2] = (seed & 0x00ffff00) >> 8;
+ return 0;
+}
+static inline int32_t
+myrandom_r(struct random_data *buf)
+{
+ int32_t x = nrand48(buf->xsubi);
+ return x;
+}
+#else
+# error "no suitable reentrant random function available (checked random_r and nrand48)"
+#endif
+
+static inline uint64_t
+randu62(struct random_data *buf)
+{
+ uint64_t a = myrandom_r(buf);
+ uint64_t b = myrandom_r(buf);
+ return (a | (b << 31));
+}
+
+static inline uint64_t
+randu64(struct random_data *buf)
+{
+ uint64_t r62 = randu62(buf);
+ uint64_t c = myrandom_r(buf);
+ return (r62 | ((c & 0x3) << 62));
+}
+
+static inline uint32_t
+rand_choices(struct random_data *buf, uint32_t choices) {
+ invariant(choices >= 2);
+ invariant(choices < INT32_MAX);
+ uint32_t bits = 2;
+ while (bits < choices) {
+ bits *= 2;
+ }
+ --bits;
+
+ uint32_t result;
+ do {
+ result = myrandom_r(buf) & bits;
+ } while (result >= choices);
+
+ return result;
+}
diff --git a/storage/tokudb/PerconaFT/portability/toku_stdint.h b/storage/tokudb/PerconaFT/portability/toku_stdint.h
new file mode 100644
index 00000000..0d2d7348
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_stdint.h
@@ -0,0 +1,42 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdint.h>
+#include <inttypes.h>
diff --git a/storage/tokudb/PerconaFT/portability/toku_stdlib.h b/storage/tokudb/PerconaFT/portability/toku_stdlib.h
new file mode 100644
index 00000000..4580ffb5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_stdlib.h
@@ -0,0 +1,41 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdlib.h>
diff --git a/storage/tokudb/PerconaFT/portability/toku_time.cc b/storage/tokudb/PerconaFT/portability/toku_time.cc
new file mode 100644
index 00000000..bbc5689a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_time.cc
@@ -0,0 +1,86 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "toku_time.h"
+
+#if !defined(HAVE_CLOCK_REALTIME)
+
+#include <errno.h>
+#include <mach/clock.h>
+#include <mach/mach.h>
+
+int toku_clock_gettime(clockid_t clk_id, struct timespec *ts) {
+ if (clk_id != CLOCK_REALTIME) {
+ // dunno how to fake any of the other types of clock on osx
+ return EINVAL;
+ }
+ // We may want to share access to cclock for performance, but that requires
+ // initialization and destruction that's more complex than it's worth for
+ // OSX right now. Some day we'll probably just use pthread_once or
+ // library constructors.
+ clock_serv_t cclock;
+ mach_timespec_t mts;
+ host_get_clock_service(mach_host_self(), REALTIME_CLOCK, &cclock);
+ clock_get_time(cclock, &mts);
+ mach_port_deallocate(mach_task_self(), cclock);
+ ts->tv_sec = mts.tv_sec;
+ ts->tv_nsec = mts.tv_nsec;
+ return 0;
+}
+#else // defined(HAVE_CLOCK_REALTIME)
+
+#include <time.h>
+int toku_clock_gettime(clockid_t clk_id, struct timespec *ts) {
+ return clock_gettime(clk_id, ts);
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/portability/toku_time.h b/storage/tokudb/PerconaFT/portability/toku_time.h
new file mode 100644
index 00000000..b9789bc5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/portability/toku_time.h
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "toku_config.h"
+
+#include <time.h>
+#include <sys/time.h>
+#include <stdint.h>
+#if defined(__powerpc__)
+# include <sys/platform/ppc.h>
+#endif
+
+static inline float toku_tdiff (struct timeval *a, struct timeval *b) {
+ return (float)((a->tv_sec - b->tv_sec) + 1e-6 * (a->tv_usec - b->tv_usec));
+}
+
+#if !defined(HAVE_CLOCK_REALTIME)
+// OS X does not have clock_gettime, we fake clockid_t for the interface, and we'll implement it with clock_get_time.
+typedef int clockid_t;
+// just something bogus, it doesn't matter, we just want to make sure we're
+// only supporting this mode because we're not sure we can support other modes
+// without a real clock_gettime()
+#define CLOCK_REALTIME 0x01867234
+#endif
+int toku_clock_gettime(clockid_t clk_id, struct timespec *ts) __attribute__((__visibility__("default")));
+
+// *************** Performance timers ************************
+// What do you really want from a performance timer:
+// (1) Can determine actual time of day from the performance time.
+// (2) Time goes forward, never backward.
+// (3) Same time on different processors (or even different machines).
+// (4) Time goes forward at a constant rate (doesn't get faster and slower)
+// (5) Portable.
+// (6) Getting the time is cheap.
+// Unfortuately it seems tough to get Properties 1-5. So we go for Property 6,, but we abstract it.
+// We offer a type tokutime_t which can hold the time.
+// This type can be subtracted to get a time difference.
+// We can get the present time cheaply.
+// We can convert this type to seconds (but that can be expensive).
+// The implementation is to use RDTSC (hence we lose property 3: not portable).
+// Recent machines have constant_tsc in which case we get property (4).
+// Recent OSs on recent machines (that have RDTSCP) fix the per-processor clock skew, so we get property (3).
+// We get property 2 with RDTSC (as long as there's not any skew).
+// We don't even try to get propety 1, since we don't need it.
+// The decision here is that these times are really accurate only on modern machines with modern OSs.
+typedef uint64_t tokutime_t; // Time type used in by tokutek timers.
+
+// The value of tokutime_t is not specified here.
+// It might be microseconds since 1/1/1970 (if gettimeofday() is
+// used), or clock cycles since boot (if rdtsc is used). Or something
+// else.
+// Two tokutime_t values can be subtracted to get a time difference.
+// Use tokutime_to_seconds to that convert difference to seconds.
+// We want get_tokutime() to be fast, but don't care so much about tokutime_to_seconds();
+//
+// For accurate time calculations do the subtraction in the right order:
+// Right: tokutime_to_seconds(t1-t2);
+// Wrong tokutime_to_seconds(t1)-toku_time_to_seconds(t2);
+// Doing it the wrong way is likely to result in loss of precision.
+// A double can hold numbers up to about 53 bits. RDTSC which uses about 33 bits every second, so that leaves
+// 2^20 seconds from booting (about 2 weeks) before the RDTSC value cannot be represented accurately as a double.
+//
+double tokutime_to_seconds(tokutime_t) __attribute__((__visibility__("default"))); // Convert tokutime to seconds.
+
+// Get the value of tokutime for right now. We want this to be fast, so we expose the implementation as RDTSC.
+static inline tokutime_t toku_time_now(void) {
+#if defined(__x86_64__) || defined(__i386__)
+ uint32_t lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (uint64_t)hi << 32 | lo;
+#elif defined (__aarch64__)
+ uint64_t result;
+ __asm __volatile__ ("mrs %[rt], cntvct_el0" : [rt] "=r" (result));
+ return result;
+#elif defined(__powerpc__)
+ return __ppc_get_timebase();
+#else
+#error No timer implementation for this platform
+#endif
+}
+
+static inline uint64_t toku_current_time_microsec(void) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ return t.tv_sec * (1UL * 1000 * 1000) + t.tv_usec;
+}
+
+// sleep microseconds
+static inline void toku_sleep_microsec(uint64_t ms) {
+ struct timeval t;
+
+ t.tv_sec = ms / 1000000;
+ t.tv_usec = ms % 1000000;
+
+ select(0, NULL, NULL, NULL, &t);
+}
diff --git a/storage/tokudb/PerconaFT/scripts/run-all-nightly-tests.bash b/storage/tokudb/PerconaFT/scripts/run-all-nightly-tests.bash
new file mode 100644
index 00000000..0aed0a5f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run-all-nightly-tests.bash
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+pushd $(dirname $0) &>/dev/null
+scriptdir=$PWD
+popd &>/dev/null
+
+bash $scriptdir/run-nightly-release-tests.bash
+bash $scriptdir/run-nightly-drd-tests.bash
+bash $scriptdir/run-nightly-coverage-tests.bash
+
diff --git a/storage/tokudb/PerconaFT/scripts/run-nightly-coverage-tests.bash b/storage/tokudb/PerconaFT/scripts/run-nightly-coverage-tests.bash
new file mode 100644
index 00000000..3daa87ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run-nightly-coverage-tests.bash
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+set -e
+
+pushd $(dirname $0) &>/dev/null
+scriptdir=$PWD
+popd &>/dev/null
+tokudbdir=$(dirname $scriptdir)
+
+cd $tokudbdir
+
+if [ ! -d build ] ; then
+ mkdir build
+ pushd build
+ cmake \
+ -D CMAKE_BUILD_TYPE=Debug \
+ -D USE_VALGRIND=ON \
+ -D TOKU_DEBUG_PARANOID=ON \
+ -D USE_CTAGS=OFF \
+ -D USE_GTAGS=OFF \
+ -D USE_CSCOPE=OFF \
+ -D USE_ETAGS=OFF \
+ -D USE_GCOV=ON \
+ -D CMAKE_LINK_DEPENDS_NO_SHARED=ON \
+ -G Ninja \
+ -D RUN_LONG_TESTS=ON \
+ -D TOKUDB_DATA=$tokudbdir/../tokudb.data \
+ ..
+ ninja build_lzma build_snappy
+ popd
+fi
+
+cd build
+ctest -j16 \
+ -D NightlyStart \
+ -D NightlyUpdate \
+ -D NightlyConfigure \
+ -D NightlyBuild \
+ -D NightlyTest \
+ -D NightlyCoverage \
+ -D NightlySubmit \
+ -E '/drd|/helgrind'
diff --git a/storage/tokudb/PerconaFT/scripts/run-nightly-drd-tests.bash b/storage/tokudb/PerconaFT/scripts/run-nightly-drd-tests.bash
new file mode 100644
index 00000000..1ef819d8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run-nightly-drd-tests.bash
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+set -e
+
+pushd $(dirname $0) &>/dev/null
+scriptdir=$PWD
+popd &>/dev/null
+tokudbdir=$(dirname $scriptdir)
+
+cd $tokudbdir
+
+if [ ! -d build ] ; then
+ mkdir build
+ pushd build
+ cmake \
+ -D CMAKE_BUILD_TYPE=drd \
+ -D USE_VALGRIND=ON \
+ -D TOKU_DEBUG_PARANOID=ON \
+ -D USE_CTAGS=OFF \
+ -D USE_GTAGS=OFF \
+ -D USE_CSCOPE=OFF \
+ -D USE_ETAGS=OFF \
+ -D CMAKE_LINK_DEPENDS_NO_SHARED=ON \
+ -G Ninja \
+ -D RUN_LONG_TESTS=ON \
+ -D TOKUDB_DATA=$tokudbdir/../tokudb.data \
+ ..
+ ninja build_lzma build_snappy
+ popd
+fi
+
+cd build
+ctest -j16 \
+ -D Nightly \
+ -R '/drd|/helgrind'
diff --git a/storage/tokudb/PerconaFT/scripts/run-nightly-release-tests.bash b/storage/tokudb/PerconaFT/scripts/run-nightly-release-tests.bash
new file mode 100644
index 00000000..c1039a0b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run-nightly-release-tests.bash
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+set -e
+
+pushd $(dirname $0) &>/dev/null
+scriptdir=$PWD
+popd &>/dev/null
+tokudbdir=$(dirname $scriptdir)
+
+cd $tokudbdir
+
+if [ ! -d build ] ; then
+ mkdir build
+ pushd build
+ cmake \
+ -D CMAKE_BUILD_TYPE=Release \
+ -D USE_VALGRIND=ON \
+ -D TOKU_DEBUG_PARANOID=OFF \
+ -D USE_CTAGS=OFF \
+ -D USE_GTAGS=OFF \
+ -D USE_CSCOPE=OFF \
+ -D USE_ETAGS=OFF \
+ -D CMAKE_LINK_DEPENDS_NO_SHARED=ON \
+ -G Ninja \
+ -D RUN_LONG_TESTS=ON \
+ -D TOKUDB_DATA=$tokudbdir/../tokudb.data \
+ ..
+ ninja build_lzma build_snappy
+ popd
+fi
+
+cd build
+set +e
+ctest -j16 \
+ -D NightlyStart \
+ -D NightlyUpdate \
+ -D NightlyConfigure \
+ -D NightlyBuild \
+ -D NightlyTest \
+ -E '/drd|/helgrind'
+ctest -j16 \
+ -D NightlyMemCheck \
+ -E 'test1426\.tdb|/drd|/helgrind'
+set -e
+ctest -D NightlySubmit
diff --git a/storage/tokudb/PerconaFT/scripts/run.fractal.tree.tests.cmake b/storage/tokudb/PerconaFT/scripts/run.fractal.tree.tests.cmake
new file mode 100644
index 00000000..64d52a56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run.fractal.tree.tests.cmake
@@ -0,0 +1,135 @@
+set(CTEST_PROJECT_NAME "tokudb")
+get_filename_component(CTEST_SOURCE_DIRECTORY "${CTEST_SCRIPT_DIRECTORY}/.." ABSOLUTE)
+
+## given an executable, follows symlinks and resolves paths until it runs
+## out of symlinks, then gives you the basename
+macro(real_executable_name filename_input out)
+ set(res 0)
+ set(filename ${filename_input})
+ while(NOT(res))
+ execute_process(
+ COMMAND which ${filename}
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE full_filename
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT(res))
+ execute_process(
+ COMMAND readlink ${full_filename}
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE link_target
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT(res))
+ execute_process(
+ COMMAND dirname ${full_filename}
+ OUTPUT_VARIABLE filepath
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set(filename "${filepath}/${link_target}")
+ else()
+ set(filename ${full_filename})
+ endif()
+ else()
+ set(filename ${filename})
+ endif()
+ endwhile()
+ execute_process(
+ COMMAND basename ${filename}
+ OUTPUT_VARIABLE real_filename
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set(${out} ${real_filename})
+endmacro(real_executable_name)
+
+## gives you `uname ${flag}`
+macro(uname flag out)
+ execute_process(
+ COMMAND uname ${flag}
+ OUTPUT_VARIABLE ${out}
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+endmacro(uname)
+
+## gather machine info
+uname("-m" machine_type)
+get_filename_component(branchname "${CTEST_SOURCE_DIRECTORY}" NAME)
+
+set(ncpus 2)
+execute_process(
+ COMMAND grep bogomips /proc/cpuinfo
+ COMMAND wc -l
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE proc_ncpus
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+if(NOT res)
+ set(ncpus ${proc_ncpus})
+endif()
+
+## construct BUILDNAME
+set(BUILDNAME "${branchname} ${CMAKE_SYSTEM} ${machine_type}" CACHE STRING "CTest build name" FORCE)
+set(CTEST_BUILD_NAME "${BUILDNAME}")
+set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
+site_name(CTEST_SITE)
+
+find_program(CTEST_SVN_COMMAND NAMES svn)
+find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
+find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
+
+list(APPEND CTEST_NOTES_FILES
+ "${CTEST_SCRIPT_DIRECTORY}/${CTEST_SCRIPT_NAME}"
+ "${CMAKE_CURRENT_LIST_FILE}"
+ )
+
+set(all_opts
+ -DBUILD_TESTING=ON
+ -DUSE_CILK=OFF
+ )
+set(rel_opts
+ ${all_opts}
+ -DCMAKE_BUILD_TYPE=Release
+ )
+set(dbg_opts
+ ${all_opts}
+ -DCMAKE_BUILD_TYPE=Debug
+ )
+set(cov_opts
+ ${all_opts}
+ -DCMAKE_BUILD_TYPE=Debug
+ -DUSE_GCOV=ON
+ )
+
+set(CTEST_BINARY_DIRECTORY "${CTEST_SOURCE_DIRECTORY}/NightlyRelease")
+ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
+ctest_start(Nightly ${CTEST_SOURCE_DIRECTORY} ${CTEST_BINARY_DIRECTORY})
+ctest_update(SOURCE ${CTEST_SOURCE_DIRECTORY})
+ctest_configure(BUILD ${CTEST_BINARY_DIRECTORY} SOURCE ${CTEST_SOURCE_DIRECTORY}
+ OPTIONS "${rel_opts}")
+configure_file("${CTEST_SOURCE_DIRECTORY}/CTestConfig.cmake" "${CTEST_BINARY_DIRECTORY}/CTestConfig.cmake")
+configure_file("${CTEST_SOURCE_DIRECTORY}/CTestCustom.cmake" "${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake")
+ctest_build(BUILD ${CTEST_BINARY_DIRECTORY})
+ctest_read_custom_files("${CTEST_BINARY_DIRECTORY}")
+ctest_test(BUILD ${CTEST_BINARY_DIRECTORY} PARALLEL_LEVEL ${ncpus})
+ctest_submit()
+
+set(CTEST_BINARY_DIRECTORY "${CTEST_SOURCE_DIRECTORY}/NightlyDebug")
+ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
+ctest_start(Nightly ${CTEST_SOURCE_DIRECTORY} ${CTEST_BINARY_DIRECTORY})
+ctest_configure(BUILD ${CTEST_BINARY_DIRECTORY} SOURCE ${CTEST_SOURCE_DIRECTORY}
+ OPTIONS "${dbg_opts}")
+configure_file("${CTEST_SOURCE_DIRECTORY}/CTestConfig.cmake" "${CTEST_BINARY_DIRECTORY}/CTestConfig.cmake")
+configure_file("${CTEST_SOURCE_DIRECTORY}/CTestCustom.cmake" "${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake")
+ctest_build(BUILD ${CTEST_BINARY_DIRECTORY})
+ctest_read_custom_files("${CTEST_BINARY_DIRECTORY}")
+ctest_test(BUILD ${CTEST_BINARY_DIRECTORY} PARALLEL_LEVEL ${ncpus})
+ctest_memcheck(BUILD ${CTEST_BINARY_DIRECTORY} PARALLEL_LEVEL ${ncpus})
+ctest_submit()
+
+set(CTEST_BINARY_DIRECTORY "${CTEST_SOURCE_DIRECTORY}/NightlyCoverage")
+ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
+ctest_start(Nightly ${CTEST_SOURCE_DIRECTORY} ${CTEST_BINARY_DIRECTORY})
+ctest_configure(BUILD ${CTEST_BINARY_DIRECTORY} SOURCE ${CTEST_SOURCE_DIRECTORY}
+ OPTIONS "${cov_opts}")
+configure_file("${CTEST_SOURCE_DIRECTORY}/CTestConfig.cmake" "${CTEST_BINARY_DIRECTORY}/CTestConfig.cmake")
+configure_file("${CTEST_SOURCE_DIRECTORY}/CTestCustom.cmake" "${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake")
+ctest_build(BUILD ${CTEST_BINARY_DIRECTORY})
+ctest_read_custom_files("${CTEST_BINARY_DIRECTORY}")
+ctest_test(BUILD ${CTEST_BINARY_DIRECTORY} PARALLEL_LEVEL ${ncpus})
+ctest_coverage(BUILD ${CTEST_BINARY_DIRECTORY} LABELS RUN_GCOV)
+ctest_submit()
diff --git a/storage/tokudb/PerconaFT/scripts/run.stress-tests-forever.bash b/storage/tokudb/PerconaFT/scripts/run.stress-tests-forever.bash
new file mode 100644
index 00000000..d049378a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run.stress-tests-forever.bash
@@ -0,0 +1,97 @@
+#!/bin/bash
+# $Id: run.stress-tests.bash 38773 2012-01-13 20:35:00Z leifwalsh $
+
+set -e
+
+scriptname=$(basename "$0")
+toku_toplevel=$(dirname $(dirname $(readlink -f "$PWD/$0")))
+
+src_tests="${toku_toplevel}/src/tests"
+testnames=(test_stress1.tdb \
+ test_stress5.tdb \
+ test_stress6.tdb)
+
+declare -a pids=(0)
+i=0
+
+savepid() {
+ pids[$i]=$1
+ (( i = i + 1 ))
+}
+
+killchildren() {
+ kill ${pids[@]} || true
+ for exec in ${testnames[@]}
+ do
+ pkill -f $exec || true
+ done
+}
+
+trap killchildren INT TERM EXIT
+
+run_test() {
+ exec="$1"; shift
+ table_size="$1"; shift
+ cachetable_size="$1"; shift
+ num_ptquery="$1"; shift
+ num_update="$1"; shift
+
+ rundir=$(mktemp -d ./rundir.XXXXXXXX)
+ tmplog=$(mktemp)
+
+ ulimit -c unlimited
+ t0="$(date)"
+ t1=""
+ envdir="../${exec}-${table_size}-${cachetable_size}-${num_ptquery}-${num_update}-forever-$$.dir"
+ cd $rundir
+ if LD_LIBRARY_PATH=../../../lib:$LD_LIBRARY_PATH \
+ ../$exec -v --only_create --envdir "$envdir" \
+ --num_elements $table_size \
+ --cachetable_size $cachetable_size &> $tmplog
+ then
+ rm -f $tmplog
+ t1="$(date)"
+ echo "Running $exec -v --only_stress --num_seconds 0 --envdir \"$envdir\" --num_elements $table_size --cachetable_size $cachetable_size --num_ptquery_threads $num_ptquery --no-crash_on_update_failure --num_update_threads $num_update &> $tmplog in $rundir."
+ (LD_LIBRARY_PATH=../../../lib:$LD_LIBRARY_PATH \
+ ../$exec -v --only_stress --num_seconds 0 --envdir "$envdir" \
+ --num_elements $table_size \
+ --cachetable_size $cachetable_size \
+ --num_ptquery_threads $num_ptquery \
+ --no-crash_on_update_failure \
+ --num_update_threads $num_update &> $tmplog) & mypid=$!
+ savepid $mypid
+ while true
+ do
+ sleep 10s
+ cpu=$(ps -o pcpu -p $mypid h)
+ if [[ -z $cpu ]]
+ then
+ echo "Process $mypid must have crashed: $exec,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,FAIL" 1>&2
+ echo "Check rundir $rundir, envdir $envdir, corefile core.$mypid." 1>&2
+ return
+ fi
+ if expr $cpu == 0.0 &>/dev/null
+ then
+ echo "Deadlock detected in process $mypid: $exec,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,FAIL" 1>&2
+ echo "Check rundir $rundir, envdir $envdir, corefile core.$mypid." 1>&2
+ return
+ fi
+ done
+ else
+ echo "Create phase failed: $exec,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,FAIL" 1>&2
+ fi
+ cd ..
+ rm -rf $rundir "$envdir"
+}
+
+cd $src_tests
+for exec in ${testnames[@]}
+do
+ for table_size in 2000 200000 50000000
+ do
+ (( small_cachetable = table_size * 50 ))
+ run_test $exec $table_size $small_cachetable 4 4 & savepid $!
+ done
+done
+
+wait ${pids[@]} || true
diff --git a/storage/tokudb/PerconaFT/scripts/run.stress-tests.py b/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
new file mode 100644
index 00000000..e983fe8c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/run.stress-tests.py
@@ -0,0 +1,800 @@
+#!/usr/bin/env python
+"""
+A script for running our stress tests repeatedly to see if any fail.
+
+Runs a list of stress tests in parallel, reporting passes and collecting
+failure scenarios until killed. Runs with different table sizes,
+cachetable sizes, and numbers of threads.
+
+Suitable for running on a dev branch, or a release branch, or main.
+
+Just run the script from within a branch you want to test.
+
+By default, we stop everything, update from git, rebuild, and restart the
+tests once a day.
+"""
+
+import logging
+import os
+import re
+import stat
+import sys
+import time
+
+from email.mime.text import MIMEText
+from glob import glob
+from logging import debug, info, warning, error, exception
+from optparse import OptionGroup, OptionParser
+from Queue import Queue
+from random import randrange, shuffle
+from resource import setrlimit, RLIMIT_CORE
+from shutil import copy, copytree, move, rmtree
+from signal import signal, SIGHUP, SIGINT, SIGPIPE, SIGALRM, SIGTERM
+from smtplib import SMTP
+from socket import gethostname
+from subprocess import call, Popen, PIPE, STDOUT
+from traceback import format_exc
+from tempfile import mkdtemp, mkstemp
+from threading import Event, Thread, Timer
+
+__version__ = '$Id$'
+__copyright__ = """Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."""
+
+# relpath implementation for python <2.6
+# from http://unittest-ext.googlecode.com/hg-history/1df911640f7be239e58fb185b06ac2a8489dcdc4/unittest2/unittest2/compatibility.py
+if not hasattr(os.path, 'relpath'):
+ if os.path is sys.modules.get('ntpath'):
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+ if start_list[0].lower() != path_list[0].lower():
+ unc_path, rest = os.path.splitunc(path)
+ unc_start, rest = os.path.splitunc(start)
+ if bool(unc_path) ^ bool(unc_start):
+ raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+ % (path, start))
+ else:
+ raise ValueError("path is on drive %s, start on drive %s"
+ % (path_list[0], start_list[0]))
+ # Work out how much of the filepath is shared by start and path.
+ for i in range(min(len(start_list), len(path_list))):
+ if start_list[i].lower() != path_list[i].lower():
+ break
+ else:
+ i += 1
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+ else:
+ # default to posixpath definition
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+ os.path.relpath = relpath
+
+def setlimits():
+ setrlimit(RLIMIT_CORE, (-1, -1))
+ os.nice(7)
+
+class TestFailure(Exception):
+ pass
+
+class Killed(Exception):
+ pass
+
+class TestRunnerBase(object):
+ def __init__(self, scheduler, builddir, rev, execf, tsize, csize, default_test_time, savedir):
+ self.scheduler = scheduler
+ self.builddir = builddir
+ self.rev = rev
+ self.execf = execf
+ self.tsize = tsize
+ self.csize = csize
+ self.default_test_time = default_test_time
+ self.long_test_index = randrange(16)
+ self.savedir = savedir
+
+ self.env = os.environ
+
+ self.nruns = 0
+ self.num_ptquery = 1
+ self.num_update = 1
+ self.rundir = None
+ self.outf = None
+ self.times = [0, 0]
+ self.is_large = (tsize >= 10000000)
+ self.oldversionstr = 'noupgrade'
+
+ def __str__(self):
+ return (self.__class__.__name__ +
+ '<%(execf)s, %(tsize)d, %(csize)d, %(oldversionstr)s>') % self
+
+ def __getitem__(self, k):
+ return self.__getattribute__(k)
+
+ def infostr(self):
+ return '\t'.join(['%(execf)s',
+ '%(rev)s',
+ '%(tsize)d',
+ '%(csize)d',
+ '%(oldversionstr)s',
+ '%(num_ptquery)d',
+ '%(num_update)d',
+ '%(time)d']) % self
+
+ @property
+ def time(self):
+ if self.times[0] != 0 and self.times[1] != 0:
+ return self.times[1] - self.times[0]
+ else:
+ return 0
+
+ @property
+ def test_time(self):
+ if self.nruns % 16 == self.long_test_index:
+ return 3600
+ else:
+ return self.default_test_time
+
+ @property
+ def envdir(self):
+ return os.path.join(self.rundir, 'envdir')
+
+ @property
+ def prepareloc(self):
+ preparename = 'dir.%(execf)s-%(tsize)d-%(csize)d' % self
+ return os.path.join(self.builddir, 'src', 'tests', preparename)
+
+ def prepare(self):
+ if os.path.isdir(self.prepareloc):
+ debug('%s found existing environment.', self)
+ copytree(self.prepareloc, self.envdir)
+ else:
+ debug('%s preparing an environment.', self)
+ self.run_prepare()
+ self.save_prepared_envdir()
+
+ def save_prepared_envdir(self):
+ debug('%s copying environment to %s.', self, self.prepareloc)
+ copytree(self.envdir, self.prepareloc)
+
+ def run(self):
+ if self.nruns % 2 < 1:
+ self.num_ptquery = 1
+ else:
+ self.num_ptquery = randrange(16)
+ if self.nruns % 4 < 2:
+ self.num_update = 1
+ else:
+ self.num_update = randrange(16)
+
+ srctests = os.path.join(self.builddir, 'src', 'tests')
+ self.rundir = mkdtemp(dir=srctests)
+
+ try:
+ outname = os.path.join(self.rundir, 'output.txt')
+ self.outf = open(outname, 'w')
+
+ try:
+ self.prepare()
+ debug('%s testing.', self)
+ self.times[0] = time.time()
+ self.run_test()
+ self.times[1] = time.time()
+ debug('%s done.', self)
+ except Killed:
+ pass
+ except TestFailure:
+ self.times[1] = time.time()
+ savepfx = '%(execf)s-%(rev)s-%(tsize)d-%(csize)d-%(num_ptquery)d-%(num_update)d-%(phase)s-' % self
+ if not os.path.exists(self.savedir):
+ os.mkdir(self.savedir)
+ savedir = mkdtemp(dir=self.savedir, prefix=savepfx)
+ tarfile = '%s.tar' % savedir
+ commands = ''
+ try:
+ f = open(os.path.join(self.rundir, 'commands.txt'))
+ commands = f.read()
+ f.close()
+ except:
+ pass
+ output = ''
+ try:
+ f = open(os.path.join(self.rundir, 'output.txt'))
+ output = f.read()
+ f.close()
+ except:
+ pass
+ self.scheduler.email_failure(self, tarfile, commands, output)
+ self.save(savedir, tarfile)
+ self.scheduler.report_failure(self)
+ warning('Saved environment to %s', tarfile)
+ else:
+ self.scheduler.report_success(self)
+ finally:
+ self.outf.close()
+ rmtree(self.rundir)
+ self.rundir = None
+ self.times = [0, 0]
+ self.nruns += 1
+
+ def save(self, savedir, tarfile):
+ def targetfor(path):
+ return os.path.join(savedir, os.path.basename(path))
+
+ for f in glob(os.path.join(self.rundir, '*')):
+ if os.path.isdir(f):
+ copytree(f, targetfor(f))
+ else:
+ copy(f, targetfor(f))
+ fullexecf = os.path.join(self.builddir, 'src', 'tests', self.execf)
+ copy(fullexecf, targetfor(fullexecf))
+
+ # TODO: Leif was lazy and did this in bash, it should be done in python for portability
+ os.system("for l in $(ldd %(fullexecf)s | sed 's/\ *(0x[0-9a-f]*)$//;s/.*=>\ \?//;s/^\ *|\ *$//' | grep -v '^$'); do mkdir -p %(savedir)s/$(dirname $l); cp $l %(savedir)s/$l; done" % {'fullexecf': fullexecf, 'savedir': savedir})
+
+ r = call(['tar', 'cf', os.path.basename(tarfile), os.path.basename(savedir)], cwd=os.path.dirname(savedir))
+ if r != 0:
+ error('tarring up %s failed.' % savedir)
+ sys.exit(r)
+ os.chmod(tarfile, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+
+ def waitfor(self, proc):
+ while proc.poll() is None:
+ self.scheduler.stopping.wait(1)
+ if self.scheduler.stopping.isSet():
+ os.kill(proc.pid, SIGTERM)
+ raise Killed()
+
+ def spawn_child(self, args):
+ logging.debug('%s spawning %s', self, ' '.join([self.execf] + args))
+ commandsf = open(os.path.join(self.rundir, 'commands.txt'), 'a')
+ print >>commandsf, ' '.join([self.execf] + args)
+ commandsf.close()
+ proc = Popen([self.execf] + args,
+ executable=os.path.join('..', self.execf),
+ env=self.env,
+ cwd=self.rundir,
+ preexec_fn=setlimits,
+ stdout=self.outf,
+ stderr=STDOUT)
+ self.waitfor(proc)
+ return proc.returncode
+
+ @property
+ def extraargs(self):
+ # for overriding
+ return []
+
+ @property
+ def prepareargs(self):
+ return ['-v',
+ '--envdir', 'envdir',
+ '--num_elements', str(self.tsize),
+ '--cachetable_size', str(self.csize)] + self.extraargs
+
+ @property
+ def testargs(self):
+ return ['--num_seconds', str(self.test_time),
+ '--join_timeout', str(60 * 60 * 12),
+ '--no-crash_on_operation_failure',
+ '--num_ptquery_threads', str(self.num_ptquery),
+ '--num_update_threads', str(self.num_update)] + self.prepareargs
+
+class TestRunner(TestRunnerBase):
+ def run_prepare(self):
+ self.phase = "create"
+ if self.spawn_child(['--only_create'] + self.prepareargs) != 0:
+ raise TestFailure('%s crashed during --only_create.' % self.execf)
+
+ def run_test(self):
+ self.phase = "stress"
+ if self.spawn_child(['--only_stress'] + self.testargs) != 0:
+ raise TestFailure('%s crashed during --only_stress.' % self.execf)
+
+class RecoverTestRunner(TestRunnerBase):
+ def run_prepare(self):
+ self.phase = "create"
+ if self.spawn_child(['--only_create', '--test'] + self.prepareargs) != 0:
+ raise TestFailure('%s crashed during --only_create --test.' % self.execf)
+
+ def run_test(self):
+ self.phase = "test"
+ if self.spawn_child(['--only_stress', '--test'] + self.testargs) == 0:
+ raise TestFailure('%s did not crash during --only_stress --test' % self.execf)
+ self.phase = "recover"
+ if self.spawn_child(['--recover'] + self.prepareargs) != 0:
+ raise TestFailure('%s crashed during --recover' % self.execf)
+
+class UpgradeTestRunnerMixin(TestRunnerBase):
+ def __init__(self, old_environments_dir, version, pristine_or_stressed, **kwargs):
+ super(UpgradeTestRunnerMixin, self).__init__(**kwargs)
+ self.version = version
+ self.pristine_or_stressed = pristine_or_stressed
+ self.old_env_dirs = os.path.join(old_environments_dir, version)
+ self.oldversionstr = '%(version)s-%(pristine_or_stressed)s' % self
+
+ @property
+ def extraargs(self):
+ return ['--num_DBs', '1']
+
+ @property
+ def old_envdir(self):
+ oldname = 'saved%(pristine_or_stressed)s-%(tsize)d-dir' % self
+ logging.debug('%s using old version environment %s from %s.', self, oldname, self.old_env_dirs)
+ return os.path.join(self.old_env_dirs, oldname)
+
+ def save_prepared_envdir(self):
+ # no need to do this
+ pass
+
+ def run_prepare(self):
+ self.phase = "create"
+ copytree(self.old_envdir, self.envdir)
+
+class DoubleTestRunnerMixin(TestRunnerBase):
+ """Runs the test phase twice in a row.
+
+ Good for upgrade tests, to run the test once to upgrade it and then
+ again to make sure the upgrade left it in a good state.
+ """
+
+ def run_test(self):
+ super(DoubleTestRunnerMixin, self).run_test()
+ super(DoubleTestRunnerMixin, self).run_test()
+
+class UpgradeTestRunner(UpgradeTestRunnerMixin, TestRunner):
+ pass
+
+class UpgradeRecoverTestRunner(UpgradeTestRunnerMixin, RecoverTestRunner):
+ pass
+
+class DoubleUpgradeTestRunner(DoubleTestRunnerMixin, UpgradeTestRunner):
+ pass
+
+class DoubleUpgradeRecoverTestRunner(DoubleTestRunnerMixin, UpgradeRecoverTestRunner):
+ pass
+
+class Worker(Thread):
+ def __init__(self, scheduler):
+ super(Worker, self).__init__()
+ self.scheduler = scheduler
+
+ def run(self):
+ debug('%s starting.' % self)
+ while not self.scheduler.stopping.isSet():
+ test_runner = self.scheduler.get()
+ if test_runner.is_large:
+ if self.scheduler.nlarge + 1 > self.scheduler.maxlarge:
+ debug('%s pulled a large test, but there are already %d running. Putting it back.',
+ self, self.scheduler.nlarge)
+ self.scheduler.put(test_runner)
+ continue
+ self.scheduler.nlarge += 1
+ try:
+ test_runner.run()
+ except Exception, e:
+ exception('Fatal error in worker thread.')
+ info('Killing all workers.')
+ self.scheduler.error = format_exc()
+ self.scheduler.stop()
+ if test_runner.is_large:
+ self.scheduler.nlarge -= 1
+ if not self.scheduler.stopping.isSet():
+ self.scheduler.put(test_runner)
+ debug('%s exiting.' % self)
+
+class Scheduler(Queue):
+ def __init__(self, nworkers, maxlarge, logger, email, branch):
+ Queue.__init__(self)
+ info('Initializing scheduler with %d jobs.', nworkers)
+ self.nworkers = nworkers
+ self.logger = logger
+ self.maxlarge = maxlarge
+ self.nlarge = 0 # not thread safe, don't really care right now
+ self.passed = 0
+ self.failed = 0
+ self.workers = []
+ self.stopping = Event()
+ self.timer = None
+ self.error = None
+ self.email = email
+ self.branch = branch
+
+ def run(self, timeout):
+ info('Starting workers.')
+ self.stopping.clear()
+ for i in range(self.nworkers):
+ w = Worker(self)
+ self.workers.append(w)
+ w.start()
+ if timeout != 0:
+ self.timer = Timer(timeout, self.stop)
+ self.timer.start()
+ while not self.stopping.isSet():
+ try:
+ for w in self.workers:
+ if self.stopping.isSet():
+ break
+ w.join(timeout=1.0)
+ except (KeyboardInterrupt, SystemExit):
+ debug('Scheduler interrupted. Stopping and joining threads.')
+ self.stop()
+ self.join()
+ sys.exit(0)
+ else:
+ debug('Scheduler stopped by someone else. Joining threads.')
+ self.join()
+ if self.error:
+ send_mail(self.email, 'Stress tests scheduler stopped by something, on %s' % gethostname(), self.error)
+ sys.exit(77)
+
+ def join(self):
+ if self.timer is not None:
+ self.timer.cancel()
+ while len(self.workers) > 0:
+ self.workers.pop().join()
+
+ def stop(self):
+ info('Stopping workers.')
+ self.stopping.set()
+
+ def __getitem__(self, k):
+ return self.__dict__[k]
+
+ def reportstr(self):
+ return '[PASS=%(passed)d FAIL=%(failed)d]' % self
+
+ def report_success(self, runner):
+ self.passed += 1
+ self.logger.info('PASSED %s', runner.infostr())
+ info('%s PASSED %s', self.reportstr(), runner.infostr())
+
+ def report_failure(self, runner):
+ self.failed += 1
+ self.logger.warning('FAILED %s', runner.infostr())
+ warning('%s FAILED %s', self.reportstr(), runner.infostr())
+
+ def email_failure(self, runner, savedtarfile, commands, output):
+ if self.email is None:
+ return
+
+ h = gethostname()
+ if isinstance(runner, UpgradeTestRunnerMixin):
+ upgradestr = '''
+The test was upgrading from %s.''' % runner.oldversionstr
+ else:
+ upgradestr = ''
+ send_mail(self.email,
+ 'Stress test failure on %(hostname)s running %(branch)s.' % { 'hostname': h, 'branch': self.branch },
+ ('''A stress test failed on %(hostname)s running %(branch)s at revision %(rev)s after %(test_duration)d seconds.%(upgradestr)s
+Its environment is saved to %(tarfile)s on that machine.
+
+The test configuration was:
+
+testname: %(execf)s
+num_elements: %(tsize)d
+cachetable_size: %(csize)d
+num_ptquery_threads: %(num_ptquery)d
+num_update_threads: %(num_update)d
+
+Commands run:
+%(commands)s
+
+Test output:
+%(output)s
+''' % {
+ 'hostname': h,
+ 'rev': runner.rev,
+ 'test_duration': runner.time,
+ 'upgradestr': upgradestr,
+ 'tarfile': savedtarfile,
+ 'execf': runner.execf,
+ 'tsize': runner.tsize,
+ 'csize': runner.csize,
+ 'num_ptquery': runner.num_ptquery,
+ 'num_update': runner.num_update,
+ 'branch': self.branch,
+ 'commands': commands,
+ 'output': output,
+ }))
+
+def send_mail(toaddrs, subject, body):
+ # m = MIMEText(body)
+ # fromaddr = 'dev-private@percona.com'
+ # m['From'] = fromaddr
+ # m['To'] = ', '.join(toaddrs)
+ # m['Subject'] = subject
+ # s = SMTP('192.168.1.114')
+ # s.sendmail(fromaddr, toaddrs, str(m))
+ # s.quit()
+ info(subject);
+ info(body);
+
+def update(tokudb):
+ info('Updating from git.')
+ devnull = open(os.devnull, 'w')
+ call(['git', 'pull'], stdout=devnull, stderr=STDOUT, cwd=tokudb)
+ devnull.close()
+
+def rebuild(tokudb, builddir, tokudb_data, cc, cxx, tests):
+ info('Building tokudb.')
+ if not os.path.exists(builddir):
+ os.mkdir(builddir)
+ newenv = os.environ
+ newenv['CC'] = cc
+ newenv['CXX'] = cxx
+ r = call(['cmake',
+ '-DCMAKE_BUILD_TYPE=Debug',
+ '-DUSE_GTAGS=OFF',
+ '-DUSE_CTAGS=OFF',
+ '-DUSE_ETAGS=OFF',
+ '-DUSE_CSCOPE=OFF',
+ '-DTOKUDB_DATA=%s' % tokudb_data,
+ tokudb],
+ env=newenv,
+ cwd=builddir)
+ if r != 0:
+ send_mail(['dev-private@percona.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
+ error('Building the tests failed.')
+ sys.exit(r)
+ r = call(['make', '-j8'], cwd=builddir)
+ if r != 0:
+ send_mail(['dev-private@percona.com'], 'Stress tests on %s failed to build.' % gethostname(), '')
+ error('Building the tests failed.')
+ sys.exit(r)
+
+def revfor(tokudb):
+ proc = Popen("git describe --tags",
+ shell=True, cwd=tokudb, stdout=PIPE)
+ (out, err) = proc.communicate()
+ rev = out.strip()
+ info('Using tokudb at r%s.', rev)
+ return rev
+
+def main(opts):
+ builddir = os.path.join(opts.tokudb, 'build')
+ if opts.build:
+ rebuild(opts.tokudb, builddir, opts.tokudb_data, opts.cc, opts.cxx, opts.testnames + opts.recover_testnames)
+ rev = revfor(opts.tokudb)
+
+ if not os.path.exists(opts.savedir):
+ os.mkdir(opts.savedir)
+
+ logger = logging.getLogger('stress')
+ logger.propagate = False
+ logger.setLevel(logging.INFO)
+ logger.addHandler(logging.FileHandler(opts.log))
+
+ info('Saving pass/fail logs to %s.', opts.log)
+ info('Saving failure environments to %s.', opts.savedir)
+
+ scheduler = Scheduler(opts.jobs, opts.maxlarge, logger, opts.email, opts.branch)
+
+ runners = []
+ for tsize in [2000, 200000, 50000000]:
+ for csize in [50 * tsize, 1000 ** 3]:
+ kwargs = {
+ 'scheduler': scheduler,
+ 'builddir': builddir,
+ 'rev': rev,
+ 'tsize': tsize,
+ 'csize': csize,
+ 'default_test_time': opts.test_time,
+ 'savedir': opts.savedir
+ }
+ for test in opts.testnames:
+ if opts.run_non_upgrade:
+ runners.append(TestRunner(execf=test, **kwargs))
+
+ # never run test_stress_openclose.tdb on existing
+ # environments, it doesn't want them
+ if opts.run_upgrade and test != 'test_stress_openclose.tdb':
+ for version in opts.old_versions:
+ for pristine_or_stressed in ['pristine', 'stressed']:
+ upgrade_kwargs = {
+ 'old_environments_dir': opts.old_environments_dir,
+ 'version': version,
+ 'pristine_or_stressed': pristine_or_stressed
+ }
+ upgrade_kwargs.update(kwargs)
+ # skip running test_stress4.tdb on any env
+ # that has already been stressed, as that
+ # breaks its assumptions
+ if opts.double_upgrade and test != 'test_stress4.tdb':
+ runners.append(DoubleUpgradeTestRunner(
+ execf=test,
+ **upgrade_kwargs))
+ elif not (test == 'test_stress4.tdb' and pristine_or_stressed == 'stressed'):
+ runners.append(UpgradeTestRunner(
+ execf=test,
+ **upgrade_kwargs))
+
+ for test in opts.recover_testnames:
+ if opts.run_non_upgrade:
+ runners.append(RecoverTestRunner(execf=test, **kwargs))
+
+ if opts.run_upgrade:
+ for version in opts.old_versions:
+ for pristine_or_stressed in ['pristine', 'stressed']:
+ upgrade_kwargs = {
+ 'old_environments_dir': opts.old_environments_dir,
+ 'version': version,
+ 'pristine_or_stressed': pristine_or_stressed
+ }
+ upgrade_kwargs.update(kwargs)
+ if opts.double_upgrade:
+ runners.append(DoubleUpgradeRecoverTestRunner(
+ execf=test,
+ **upgrade_kwargs))
+ else:
+ runners.append(UpgradeRecoverTestRunner(
+ execf=test,
+ **upgrade_kwargs))
+
+ shuffle(runners)
+
+ for runner in runners:
+ scheduler.put(runner)
+
+ try:
+ while scheduler.error is None:
+ scheduler.run(opts.rebuild_period)
+ if scheduler.error is not None:
+ error('Scheduler reported an error.')
+ raise scheduler.error
+ update(opts.tokudb)
+ rebuild(opts.tokudb, builddir, opts.tokudb_data, opts.cc, opts.cxx, opts.testnames + opts.recover_testnames)
+ rev = revfor(opts.tokudb)
+ for runner in runners:
+ runner.rev = rev
+ except (KeyboardInterrupt, SystemExit):
+ sys.exit(0)
+ except Exception, e:
+ exception('Unhandled exception caught in main.')
+ send_mail(['dev-private@percona.com'], 'Stress tests caught unhandled exception in main, on %s' % gethostname(), format_exc())
+ raise e
+
+if __name__ == '__main__':
+ a0 = os.path.abspath(sys.argv[0])
+ usage = '%prog [options]\n' + __doc__
+ parser = OptionParser(usage=usage)
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='show build status, passing tests, and other info')
+ parser.add_option('-d', '--debug', action='store_true', dest='debug', default=False, help='show debugging info')
+ parser.add_option('-l', '--log', type='string', dest='log',
+ default='/tmp/run.stress-tests.log',
+ help='where to save logfiles')
+ parser.add_option('-s', '--savedir', type='string', dest='savedir',
+ default='/tmp/run.stress-tests.failures',
+ help='where to save environments and extra data for failed tests')
+ parser.add_option('--email', action='append', type='string', dest='email', default=[], help='where to send emails')
+ parser.add_option('--no-email', action='store_false', dest='send_emails', default=True, help='suppress emails on failure')
+ default_toplevel = os.path.dirname(os.path.dirname(a0))
+ parser.add_option('--tokudb', type='string', dest='tokudb',
+ default=default_toplevel,
+ help=('top of the tokudb tree (contains ft/ and src/) [default=%s]' % os.path.relpath(default_toplevel)))
+ toplevel_basename = os.path.basename(default_toplevel)
+ if toplevel_basename == 'tokudb':
+ maybe_absolute_branchpath = os.path.dirname(default_toplevel)
+ if os.path.basename(os.path.dirname(maybe_absolute_branchpath)) == 'mysql.branches':
+ default_branchname = os.path.basename(maybe_absolute_branchpath)
+ else:
+ default_branchname = 'mainline'
+ elif toplevel_basename[:7] == 'tokudb.':
+ default_branchname = toplevel_basename[7:]
+ else:
+ default_branchname = 'unknown branch'
+ parser.add_option('--branch', type='string', dest='branch',
+ default=default_branchname,
+ help=('what to call this branch [default=%s]' % default_branchname))
+
+ test_group = OptionGroup(parser, 'Scheduler Options', 'Control how the scheduler runs jobs.')
+ test_group.add_option('-t', '--test_time', type='int', dest='test_time',
+ default=60,
+ help='time to run each test, in seconds [default=60]'),
+ test_group.add_option('-j', '--jobs', type='int', dest='jobs', default=8,
+ help='how many concurrent tests to run [default=8]')
+ test_group.add_option('--maxlarge', type='int', dest='maxlarge', default=2,
+ help='maximum number of large tests to run concurrently (helps prevent swapping) [default=2]')
+ parser.add_option_group(test_group)
+
+
+ default_testnames = ['test_stress0.tdb',
+ 'test_stress1.tdb',
+ 'test_stress2.tdb',
+ 'test_stress3.tdb',
+ 'test_stress4.tdb',
+ 'test_stress5.tdb',
+ 'test_stress6.tdb',
+ 'test_stress7.tdb',
+ 'test_stress_hot_indexing.tdb',
+ 'test_stress_with_verify.tdb',
+ 'test_stress_openclose.tdb']
+ default_recover_testnames = ['recover-test_stress1.tdb',
+ 'recover-test_stress2.tdb',
+ 'recover-test_stress3.tdb',
+ 'recover-child-rollback.tdb',
+ 'recover-test_stress_openclose.tdb']
+ build_group = OptionGroup(parser, 'Build Options', 'Control how the fractal tree and tests get built.')
+ build_group.add_option('--skip_build', action='store_false', dest='build', default=True,
+ help='skip the git pull and build phase before testing [default=False]')
+ build_group.add_option('--rebuild_period', type='int', dest='rebuild_period', default=60 * 60 * 24,
+ help='how many seconds between doing an git pull and rebuild, 0 means never rebuild [default=24 hours]')
+ default_tokudb_data = os.path.abspath(os.path.join(default_toplevel, '..', 'tokudb.data'))
+ build_group.add_option('--tokudb_data', type='string', dest='tokudb_data', default=default_tokudb_data,
+ help='passed to cmake as TOKUDB_DATA [default=%s]' % default_tokudb_data)
+ build_group.add_option('--cc', type='string', dest='cc', default='gcc47',
+ help='which compiler to use [default=gcc47]')
+ build_group.add_option('--cxx', type='string', dest='cxx', default='g++47',
+ help='which compiler to use [default=g++47]')
+ build_group.add_option('--add_test', action='append', type='string', dest='testnames', default=default_testnames,
+ help=('add a stress test to run [default=%r]' % default_testnames))
+ build_group.add_option('--add_recover_test', action='append', type='string', dest='recover_testnames', default=default_recover_testnames,
+ help=('add a recover stress test to run [default=%r]' % default_recover_testnames))
+ parser.add_option_group(build_group)
+
+ upgrade_group = OptionGroup(parser, 'Upgrade Options', 'Also run on environments from old versions of tokudb.')
+ upgrade_group.add_option('--run_upgrade', action='store_true', dest='run_upgrade', default=False,
+ help='run the tests on old dictionaries as well, to test upgrade [default=False]')
+ upgrade_group.add_option('--skip_non_upgrade', action='store_false', dest='run_non_upgrade', default=True,
+ help="skip the tests that don't involve upgrade [default=False]")
+ upgrade_group.add_option('--double_upgrade', action='store_true', dest='double_upgrade', default=False,
+ help='run the upgrade tests twice in a row [default=False]')
+ upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3', '7.0.1','7.1.6','v26','7.5.0'],
+ help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3, 7.1.0, 7.1.6, v26, 7.5.0]')
+ upgrade_group.add_option('--old_environments_dir', type='string', dest='old_environments_dir',
+ default=('%s/old-stress-test-envs' % default_tokudb_data),
+ help='directory containing old version environments (should contain 5.0.8/, 5.2.7/, etc, and the environments should be in those) [default=../../tokudb.data/stress_environments]')
+ parser.add_option_group(upgrade_group)
+
+ (opts, args) = parser.parse_args()
+ if len(args) > 0:
+ parser.error('Invalid arguments: %r' % args)
+
+ if opts.old_versions is not None and len(opts.old_versions) > 0:
+ opts.run_upgrade = True
+
+ if opts.run_upgrade:
+ if not os.path.isdir(opts.old_environments_dir):
+ parser.error('You specified --run_upgrade but did not specify an --old_environments_dir that exists.')
+ if len(opts.old_versions) < 1:
+ parser.error('You specified --run_upgrade but gave no --old_versions to run against.')
+ for version in opts.old_versions:
+ version_dir = os.path.join(opts.old_environments_dir, version)
+ if not os.path.isdir(version_dir):
+ parser.error('You specified --run_upgrade but %s is not a directory.' % version_dir)
+
+ if not opts.send_emails:
+ opts.email = None
+ elif len(opts.email) == 0:
+ opts.email.append('dev-private@percona.com')
+
+ if opts.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ elif opts.verbose:
+ logging.basicConfig(level=logging.INFO)
+ else:
+ logging.basicConfig(level=logging.WARNING)
+
+ main(opts)
diff --git a/storage/tokudb/PerconaFT/scripts/tokugrind b/storage/tokudb/PerconaFT/scripts/tokugrind
new file mode 100644
index 00000000..a099a1f2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/tokugrind
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+function usage() {
+ echo "check for valgrind error and set the exit code"
+}
+
+function cleanup() {
+ if [ "$logfile" != "" ] ; then rm $logfile; fi
+ exit 1
+}
+
+args=$*
+
+logfile=
+createlogfile=0
+errorexitcode=1
+
+while [ $# -gt 0 ] ; do
+ arg=$1; shift
+ if [[ $arg =~ "--" ]] ; then
+ if [[ $arg =~ --log-file=(.*) ]] ; then
+ logfile=${BASH_REMATCH[1]}
+ elif [[ $arg =~ --error-exitcode=(.*) ]] ; then
+ errorexitcode=${BASH_REMATCH[1]}
+ fi
+ else
+ break
+ fi
+done
+
+if [ "$logfile" = "" ] ; then
+ createlogfile=1
+ trap cleanup SIGINT
+ logfile=`mktemp /tmp/$(whoami).tokugrind.XXXXXXXX`
+ args="--log-file=$logfile $args"
+fi
+
+valgrind $args
+exitcode=$?
+if [ $exitcode = 0 ] ; then
+ lines=$(wc -l <$logfile)
+ if [ $lines -ne 0 ] ; then
+ exitcode=$errorexitcode
+ fi
+fi
+
+if [ $createlogfile != 0 ] ; then
+ cat $logfile >>/dev/stderr
+ rm $logfile
+fi
+
+exit $exitcode
diff --git a/storage/tokudb/PerconaFT/scripts/watch.stress-tests.bash b/storage/tokudb/PerconaFT/scripts/watch.stress-tests.bash
new file mode 100644
index 00000000..9dccbe2b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/scripts/watch.stress-tests.bash
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+watch "date ; awk '{ print \$1, \$3 }' < /tmp/stress-tests-log | sort -k 2 | uniq -c | sort -k 3 -r -s | head -n10; echo ; echo; echo 'Failing tests:'; grep FAILED /tmp/stress-tests-log | sort -k 3 -r -s"
diff --git a/storage/tokudb/PerconaFT/src/CMakeLists.txt b/storage/tokudb/PerconaFT/src/CMakeLists.txt
new file mode 100644
index 00000000..bae37389
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/CMakeLists.txt
@@ -0,0 +1,56 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR} "${CMAKE_CURRENT_BINARY_DIR}/..")
+
+set(tokudb_srcs
+ ydb
+ ydb_cursor
+ ydb_db
+ ydb_env_func
+ ydb_row_lock
+ ydb_txn
+ ydb_write
+ errors
+ loader
+ indexer
+ indexer-undo-do
+ toku_patent
+ )
+
+## make the shared library
+add_library(${LIBTOKUDB} SHARED ${tokudb_srcs})
+add_dependencies(${LIBTOKUDB} install_tdb_h generate_log_code)
+target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy dbug ${LIBTOKUPORTABILITY})
+target_link_libraries(${LIBTOKUDB} LINK_PUBLIC ${ZLIB_LIBRARY} )
+
+## make the static library
+add_library(tokudb_static_conv STATIC ${tokudb_srcs})
+add_dependencies(tokudb_static_conv install_tdb_h generate_log_code)
+set_target_properties(tokudb_static_conv PROPERTIES POSITION_INDEPENDENT_CODE ON)
+set(tokudb_source_libs tokudb_static_conv locktree_static ft_static util_static lzma snappy)
+toku_merge_static_libs(${LIBTOKUDB}_static ${LIBTOKUDB}_static "${tokudb_source_libs}")
+
+## add gcov and define _GNU_SOURCE
+maybe_add_gcov_to_libraries(${LIBTOKUDB} tokudb_static_conv)
+set_property(TARGET ${LIBTOKUDB} tokudb_static_conv APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE)
+
+## add a version script and set -fvisibility=hidden for the shared library
+configure_file(export.map . COPYONLY)
+if (NOT CMAKE_SYSTEM_NAME STREQUAL Darwin)
+ add_space_separated_property(TARGET ${LIBTOKUDB} COMPILE_FLAGS "-fvisibility=hidden -fvisibility-inlines-hidden")
+ add_space_separated_property(TARGET ${LIBTOKUDB} LINK_FLAGS "-Wl,--version-script=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/export.map")
+endif ()
+
+# detect when we are being built as a subproject
+if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ install(
+ TARGETS ${LIBTOKUDB}
+ DESTINATION ${INSTALL_LIBDIR}
+ COMPONENT tokukv_libs_shared
+ )
+ install(
+ TARGETS ${LIBTOKUDB}_static
+ DESTINATION ${INSTALL_LIBDIR}
+ COMPONENT tokukv_libs_static
+ )
+endif ()
+
+add_subdirectory(tests)
diff --git a/storage/tokudb/PerconaFT/src/errors.cc b/storage/tokudb/PerconaFT/src/errors.cc
new file mode 100644
index 00000000..0ec5b6d9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/errors.cc
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/**
+ \file errors.c
+ \brief Error handling
+
+ The error handling routines for ydb
+*/
+
+#include <toku_portability.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "ydb-internal.h"
+
+/** Checks whether the environment has panicked */
+int toku_env_is_panicked(DB_ENV *dbenv /**< The environment to check */) {
+ if (dbenv==0) return 0;
+ return dbenv->i->is_panicked;
+}
+
+/* Prints an error message to a file specified by env (or stderr),
+ preceded by the environment's error prefix. */
+static void toku__ydb_error_file(const DB_ENV *env, bool use_stderr,
+ char errmsg[]) {
+ /* Determine the error file to use */
+ FILE *CAST_FROM_VOIDP(efile, env->i->errfile);
+ if (efile==NULL && env->i->errcall==0 && use_stderr) efile = stderr;
+
+ /* Print out on a file */
+ if (efile) {
+ if (env->i->errpfx) fprintf(efile, "%s: ", env->i->errpfx);
+ fprintf(efile, "%s", errmsg);
+ }
+}
+
+/**
+
+ Prints out environment errors, adjusting to a variety of options
+ and formats.
+ The printout format can be controlled to print the following optional
+ messages:
+ - The environment error message prefix
+ - User-supplied prefix obtained by printing ap with the
+ fmt string
+ - The standard db error string
+ The print out takes place via errcall (if set), errfile (if set),
+ or stderr if neither is set (and the user so toggles the printout).
+ Both errcall and errfile can be set.
+ The error message is truncated to approximately 4,000 characters.
+
+ \param env The environment that the error refers to.
+ \param error The error code
+ \param include_stderrstring Controls whether the standard db error
+ string should be included in the print out
+ \param use_stderr_if_nothing_else Toggles the use of stderr.
+ \param fmt Output format for optional prefix arguments (must be NULL
+ if the prefix is empty)
+ \param ap Optional prefix
+*/
+void toku_ydb_error_all_cases(const DB_ENV * env,
+ int error,
+ bool include_stderrstring,
+ bool use_stderr_if_nothing_else,
+ const char *fmt, va_list ap) {
+ /* Construct the error message */
+ char buf [4000];
+ int count=0;
+ if (fmt) count=vsnprintf(buf, sizeof(buf), fmt, ap);
+ if (include_stderrstring) {
+ count+=snprintf(&buf[count], sizeof(buf)-count, ": %s",
+ db_strerror(error));
+ }
+
+ /* Print via errcall */
+ if (env->i->errcall) env->i->errcall(env, env->i->errpfx, buf);
+
+ /* Print out on a file */
+ toku__ydb_error_file(env, use_stderr_if_nothing_else, buf);
+}
+
+/** Handle all the error cases (but don't do the default thing.)
+ \param dbenv The environment that is subject to errors
+ \param error The error code
+ \param fmt The format string for additional variable arguments to
+ be printed */
+int toku_ydb_do_error (const DB_ENV *dbenv, int error, const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ toku_ydb_error_all_cases(dbenv, error, false, false, fmt, ap);
+ va_end(ap);
+ return error;
+}
+
+/** Handle errors on an environment,
+ \param dbenv The environment that is subject to errors
+ \param error The error code
+ \param fmt The format string for additional variable arguments to
+ be printed */
+void toku_env_err(const DB_ENV * env, int error, const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ toku_ydb_error_all_cases(env, error, false, true, fmt, ap);
+ va_end(ap);
+}
diff --git a/storage/tokudb/PerconaFT/src/export.map b/storage/tokudb/PerconaFT/src/export.map
new file mode 100644
index 00000000..fc2be5f4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/export.map
@@ -0,0 +1,99 @@
+{
+ global:
+ db_create;
+ db_env_create;
+ db_strerror;
+ db_version;
+ db_env_set_direct_io;
+ db_env_set_compress_buffers_before_eviction;
+ db_env_set_func_fsync;
+ db_env_set_func_malloc;
+ db_env_set_func_realloc;
+ db_env_set_func_free;
+ db_env_set_func_pwrite;
+ db_env_set_func_full_pwrite;
+ db_env_set_func_write;
+ db_env_set_func_full_write;
+ db_env_set_func_fdopen;
+ db_env_set_func_fopen;
+ db_env_set_func_open;
+ db_env_set_func_fclose;
+ db_env_set_func_pread;
+ db_env_set_func_loader_fwrite;
+ db_env_set_checkpoint_callback;
+ db_env_set_checkpoint_callback2;
+ db_env_set_recover_callback;
+ db_env_set_recover_callback2;
+ db_env_set_loader_size_factor;
+ db_env_set_mvcc_garbage_collection_verification;
+ db_env_enable_engine_status;
+ db_env_set_flusher_thread_callback;
+ db_env_set_num_bucket_mutexes;
+ db_env_set_toku_product_name;
+ db_env_try_gdb_stack_trace;
+
+ read_partitioned_counter;
+
+ toku_ydb_error_all_cases;
+ toku_set_trace_file;
+ toku_close_trace_file;
+
+ toku_add_trace_mem;
+ toku_print_trace_mem;
+
+
+ toku_free;
+ toku_malloc;
+ toku_calloc;
+ toku_xmemdup;
+ toku_xrealloc;
+ toku_os_get_file_size;
+ toku_os_getpid;
+ toku_os_gettid;
+ toku_os_initialize_settings;
+ toku_os_is_absolute_name;
+ toku_os_mkdir;
+ toku_realloc;
+ toku_strdup;
+ toku_pthread_yield;
+
+ toku_cachetable_print_hash_histogram;
+ toku_set_lsn_increment;
+
+ toku_builtin_compare_fun;
+
+ toku_stat;
+ toku_fstat;
+ toku_dup2;
+
+ toku_os_full_write;
+ toku_os_full_pwrite;
+
+ toku_os_get_max_process_data_size;
+ toku_os_get_phys_memory_size;
+
+ tokutime_to_seconds;
+
+ toku_do_assert;
+ toku_do_assert_fail;
+ toku_do_assert_zero_fail;
+ toku_set_assert_on_write_enospc;
+
+ toku_test_db_redirect_dictionary;
+ toku_test_get_latest_lsn;
+ toku_test_get_checkpointing_user_data_status;
+ toku_set_test_txn_sync_callback;
+ toku_indexer_set_test_only_flags;
+ toku_increase_last_xid;
+
+ toku_patent_string;
+ toku_copyright_string;
+
+ toku_dbt_array_init;
+ toku_dbt_array_destroy;
+ toku_dbt_array_destroy_shallow;
+ toku_dbt_array_resize;
+
+ local: *;
+};
+
diff --git a/storage/tokudb/PerconaFT/src/indexer-internal.h b/storage/tokudb/PerconaFT/src/indexer-internal.h
new file mode 100644
index 00000000..fdaa561e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/indexer-internal.h
@@ -0,0 +1,116 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ft/txn/txn_state.h>
+#include <toku_pthread.h>
+
+// the indexer_commit_keys is an ordered set of keys described by a DBT in the keys array.
+// the array is a resizable array with max size "max_keys" and current size "current_keys".
+// the ordered set is used by the hotindex undo function to collect the commit keys.
+struct indexer_commit_keys {
+ int max_keys; // max number of keys
+ int current_keys; // number of valid keys
+ DBT *keys; // the variable length keys array
+};
+
+// a ule and all of its provisional txn info
+// used by the undo-do algorithm to gather up ule provisional info in
+// a cursor callback that provides exclusive access to the source DB
+// with respect to txn commit and abort
+struct ule_prov_info {
+ // these are pointers to the allocated leafentry and ule needed to calculate
+ // provisional info. we only borrow them - whoever created the provisional info
+ // is responsible for cleaning up the leafentry and ule when done.
+ LEAFENTRY le;
+ ULEHANDLE ule;
+ void* key;
+ uint32_t keylen;
+ // provisional txn info for the ule
+ uint32_t num_provisional;
+ uint32_t num_committed;
+ TXNID *prov_ids;
+ TOKUTXN *prov_txns;
+ TOKUTXN_STATE *prov_states;
+};
+
+struct __toku_indexer_internal {
+ DB_ENV *env;
+ DB_TXN *txn;
+ toku_mutex_t indexer_lock;
+ toku_mutex_t indexer_estimate_lock;
+ DBT position_estimate;
+ DB *src_db;
+ int N;
+ DB **dest_dbs; /* [N] */
+ uint32_t indexer_flags;
+ void (*error_callback)(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra);
+ void *error_extra;
+ int (*poll_func)(void *poll_extra, float progress);
+ void *poll_extra;
+ uint64_t estimated_rows; // current estimate of table size
+ uint64_t loop_mod; // how often to call poll_func
+ LE_CURSOR lec;
+ FILENUM *fnums; /* [N] */
+ FILENUMS filenums;
+
+ // undo state
+ struct indexer_commit_keys commit_keys; // set of keys to commit
+ DBT_ARRAY *hot_keys;
+ DBT_ARRAY *hot_vals;
+
+ // test functions
+ int (*undo_do)(DB_INDEXER *indexer, DB *hotdb, DBT* key, ULEHANDLE ule);
+ TOKUTXN_STATE (*test_xid_state)(DB_INDEXER *indexer, TXNID xid);
+ void (*test_lock_key)(DB_INDEXER *indexer, TXNID xid, DB *hotdb, DBT *key);
+ int (*test_delete_provisional)(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids);
+ int (*test_delete_committed)(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids);
+ int (*test_insert_provisional)(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids);
+ int (*test_insert_committed)(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids);
+ int (*test_commit_any)(DB_INDEXER *indexer, DB *db, DBT *key, XIDS xids);
+
+ // test flags
+ int test_only_flags;
+};
+
+void indexer_undo_do_init(DB_INDEXER *indexer);
+
+void indexer_undo_do_destroy(DB_INDEXER *indexer);
+
+int indexer_undo_do(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info *prov_info, DBT_ARRAY *hot_keys, DBT_ARRAY *hot_vals);
diff --git a/storage/tokudb/PerconaFT/src/indexer-undo-do.cc b/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
new file mode 100644
index 00000000..cc864027
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/indexer-undo-do.cc
@@ -0,0 +1,654 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <toku_assert.h>
+
+#include <stdio.h>
+#include <string.h>
+
+#include <ft/le-cursor.h>
+#include <ft/ft-ops.h>
+#include <ft/leafentry.h>
+#include <ft/ule.h>
+#include <ft/txn/txn_manager.h>
+#include <ft/txn/xids.h>
+#include <ft/cachetable/checkpoint.h>
+
+#include "ydb-internal.h"
+#include "ydb_row_lock.h"
+#include "indexer.h"
+#include "indexer-internal.h"
+
+// initialize the commit keys
+static void
+indexer_commit_keys_init(struct indexer_commit_keys *keys) {
+ keys->max_keys = keys->current_keys = 0;
+ keys->keys = NULL;
+}
+
+// destroy the commit keys
+static void
+indexer_commit_keys_destroy(struct indexer_commit_keys *keys) {
+ for (int i = 0; i < keys->max_keys; i++)
+ toku_destroy_dbt(&keys->keys[i]);
+ toku_free(keys->keys);
+}
+
+// return the number of keys in the ordered set
+static int
+indexer_commit_keys_valid(struct indexer_commit_keys *keys) {
+ return keys->current_keys;
+}
+
+// add a key to the commit keys
+static void
+indexer_commit_keys_add(struct indexer_commit_keys *keys, size_t length, void *ptr) {
+ if (keys->current_keys >= keys->max_keys) {
+ int new_max_keys = keys->max_keys == 0 ? 256 : keys->max_keys * 2;
+ keys->keys = (DBT *) toku_xrealloc(keys->keys, new_max_keys * sizeof (DBT));
+ for (int i = keys->current_keys; i < new_max_keys; i++)
+ toku_init_dbt_flags(&keys->keys[i], DB_DBT_REALLOC);
+ keys->max_keys = new_max_keys;
+ }
+ DBT *key = &keys->keys[keys->current_keys];
+ toku_dbt_set(length, ptr, key, NULL);
+ keys->current_keys++;
+}
+
+// set the ordered set to empty
+static void
+indexer_commit_keys_set_empty(struct indexer_commit_keys *keys) {
+ keys->current_keys = 0;
+}
+
+// internal functions
+static int indexer_set_xid(DB_INDEXER *indexer, TXNID xid, XIDS *xids_result);
+static int indexer_append_xid(DB_INDEXER *indexer, TXNID xid, XIDS *xids_result);
+
+static bool indexer_find_prev_xr(DB_INDEXER *indexer, ULEHANDLE ule, uint64_t xrindex, uint64_t *prev_xrindex);
+
+static int indexer_generate_hot_keys_vals(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info* prov_info, UXRHANDLE uxr, DBT_ARRAY *hotkeys, DBT_ARRAY *hotvals);
+static int indexer_ft_delete_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids, TOKUTXN txn);
+static int indexer_ft_delete_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids);
+static int indexer_ft_insert_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids, TOKUTXN txn);
+static int indexer_ft_insert_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids);
+static int indexer_ft_commit(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids);
+static void indexer_lock_key(DB_INDEXER *indexer, DB *hotdb, DBT *key, TXNID outermost_live_xid, TOKUTXN txn);
+
+
+// initialize undo globals located in the indexer private object
+void
+indexer_undo_do_init(DB_INDEXER *indexer) {
+ indexer_commit_keys_init(&indexer->i->commit_keys);
+ XMALLOC_N(indexer->i->N, indexer->i->hot_keys);
+ XMALLOC_N(indexer->i->N, indexer->i->hot_vals);
+ for (int which = 0; which < indexer->i->N; which++) {
+ toku_dbt_array_init(&indexer->i->hot_keys[which], 1);
+ toku_dbt_array_init(&indexer->i->hot_vals[which], 1);
+ }
+}
+
+// destroy the undo globals
+void
+indexer_undo_do_destroy(DB_INDEXER *indexer) {
+ indexer_commit_keys_destroy(&indexer->i->commit_keys);
+ if (indexer->i->hot_keys) {
+ invariant(indexer->i->hot_vals);
+ for (int which = 0; which < indexer->i->N; which++) {
+ toku_dbt_array_destroy(&indexer->i->hot_keys[which]);
+ toku_dbt_array_destroy(&indexer->i->hot_vals[which]);
+ }
+ toku_free(indexer->i->hot_keys);
+ toku_free(indexer->i->hot_vals);
+ }
+}
+
+static int
+indexer_undo_do_committed(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info *prov_info, DBT_ARRAY *hot_keys, DBT_ARRAY *hot_vals) {
+ int result = 0;
+ ULEHANDLE ule = prov_info->ule;
+
+ // init the xids to the root xid
+ XIDS xids = toku_xids_get_root_xids();
+
+ // scan the committed stack from bottom to top
+ uint32_t num_committed = ule_get_num_committed(ule);
+ for (uint64_t xrindex = 0; xrindex < num_committed; xrindex++) {
+
+ indexer_commit_keys_set_empty(&indexer->i->commit_keys);
+
+ // get the transaction record
+ UXRHANDLE uxr = ule_get_uxr(ule, xrindex);
+
+ // setup up the xids
+ TXNID this_xid = uxr_get_txnid(uxr);
+ result = indexer_set_xid(indexer, this_xid, &xids);
+ if (result != 0)
+ break;
+
+ // placeholders in the committed stack are not allowed
+ invariant(!uxr_is_placeholder(uxr));
+
+ // undo
+ if (xrindex > 0) {
+ uint64_t prev_xrindex = xrindex - 1;
+ UXRHANDLE prevuxr = ule_get_uxr(ule, prev_xrindex);
+ if (uxr_is_delete(prevuxr)) {
+ ; // do nothing
+ } else if (uxr_is_insert(prevuxr)) {
+ // generate the hot delete key
+ result = indexer_generate_hot_keys_vals(indexer, hotdb, prov_info, prevuxr, hot_keys, NULL);
+ if (result == 0) {
+ paranoid_invariant(hot_keys->size <= hot_keys->capacity);
+ for (uint32_t i = 0; i < hot_keys->size; i++) {
+ DBT *hotkey = &hot_keys->dbts[i];
+
+ // send the delete message
+ result = indexer_ft_delete_committed(indexer, hotdb, hotkey, xids);
+ if (result == 0) {
+ indexer_commit_keys_add(&indexer->i->commit_keys, hotkey->size, hotkey->data);
+ }
+ }
+ }
+ } else {
+ assert(0);
+ }
+ }
+ if (result != 0) {
+ break;
+ }
+
+ // do
+ if (uxr_is_delete(uxr)) {
+ ; // do nothing
+ } else if (uxr_is_insert(uxr)) {
+ // generate the hot insert key and val
+ result = indexer_generate_hot_keys_vals(indexer, hotdb, prov_info, uxr, hot_keys, hot_vals);
+ if (result == 0) {
+ paranoid_invariant(hot_keys->size == hot_vals->size);
+ paranoid_invariant(hot_keys->size <= hot_keys->capacity);
+ paranoid_invariant(hot_vals->size <= hot_vals->capacity);
+ for (uint32_t i = 0; i < hot_keys->size; i++) {
+ DBT *hotkey = &hot_keys->dbts[i];
+ DBT *hotval = &hot_vals->dbts[i];
+
+ // send the insert message
+ result = indexer_ft_insert_committed(indexer, hotdb, hotkey, hotval, xids);
+ if (result == 0) {
+ indexer_commit_keys_add(&indexer->i->commit_keys, hotkey->size, hotkey->data);
+ }
+ }
+ }
+ } else
+ assert(0);
+
+ // send commit messages if needed
+ for (int i = 0; result == 0 && i < indexer_commit_keys_valid(&indexer->i->commit_keys); i++)
+ result = indexer_ft_commit(indexer, hotdb, &indexer->i->commit_keys.keys[i], xids);
+
+ if (result != 0)
+ break;
+ }
+
+ toku_xids_destroy(&xids);
+
+ return result;
+}
+
+static void release_txns(
+ ULEHANDLE ule,
+ TOKUTXN_STATE* prov_states,
+ TOKUTXN* prov_txns,
+ DB_INDEXER *indexer
+ )
+{
+ uint32_t num_provisional = ule_get_num_provisional(ule);
+ if (indexer->i->test_xid_state) {
+ goto exit;
+ }
+ for (uint32_t i = 0; i < num_provisional; i++) {
+ if (prov_states[i] == TOKUTXN_LIVE || prov_states[i] == TOKUTXN_PREPARING) {
+ toku_txn_unpin_live_txn(prov_txns[i]);
+ }
+ }
+exit:
+ return;
+}
+
+static int
+indexer_undo_do_provisional(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info *prov_info, DBT_ARRAY *hot_keys, DBT_ARRAY *hot_vals) {
+ int result = 0;
+ indexer_commit_keys_set_empty(&indexer->i->commit_keys);
+ ULEHANDLE ule = prov_info->ule;
+
+ // init the xids to the root xid
+ XIDS xids = toku_xids_get_root_xids();
+
+ uint32_t num_provisional = prov_info->num_provisional;
+ uint32_t num_committed = prov_info->num_committed;
+ TXNID *prov_ids = prov_info->prov_ids;
+ TOKUTXN *prov_txns = prov_info->prov_txns;
+ TOKUTXN_STATE *prov_states = prov_info->prov_states;
+
+ // nothing to do if there's nothing provisional
+ if (num_provisional == 0) {
+ goto exit;
+ }
+
+ TXNID outermost_xid_state;
+ outermost_xid_state = prov_states[0];
+
+ // scan the provisional stack from the outermost to the innermost transaction record
+ TOKUTXN curr_txn;
+ curr_txn = NULL;
+ for (uint64_t xrindex = num_committed; xrindex < num_committed + num_provisional; xrindex++) {
+
+ // get the ith transaction record
+ UXRHANDLE uxr = ule_get_uxr(ule, xrindex);
+
+ TXNID this_xid = uxr_get_txnid(uxr);
+ TOKUTXN_STATE this_xid_state = prov_states[xrindex - num_committed];
+
+ if (this_xid_state == TOKUTXN_ABORTING) {
+ break; // nothing to do once we reach a transaction that is aborting
+ }
+
+ if (xrindex == num_committed) { // if this is the outermost xr
+ result = indexer_set_xid(indexer, this_xid, &xids); // always add the outermost xid to the XIDS list
+ curr_txn = prov_txns[xrindex - num_committed];
+ } else {
+ switch (this_xid_state) {
+ case TOKUTXN_LIVE:
+ result = indexer_append_xid(indexer, this_xid, &xids); // append a live xid to the XIDS list
+ curr_txn = prov_txns[xrindex - num_committed];
+ if (!indexer->i->test_xid_state) {
+ assert(curr_txn);
+ }
+ break;
+ case TOKUTXN_PREPARING:
+ assert(0); // not allowed
+ case TOKUTXN_COMMITTING:
+ case TOKUTXN_ABORTING:
+ case TOKUTXN_RETIRED:
+ break; // nothing to do
+ }
+ }
+ if (result != 0)
+ break;
+
+ if (outermost_xid_state != TOKUTXN_LIVE && xrindex > num_committed) {
+ // If the outermost is not live, then the inner state must be retired. That's the way that the txn API works.
+ assert(this_xid_state == TOKUTXN_RETIRED);
+ }
+
+ if (uxr_is_placeholder(uxr)) {
+ continue; // skip placeholders
+ }
+ // undo
+ uint64_t prev_xrindex;
+ bool prev_xrindex_found = indexer_find_prev_xr(indexer, ule, xrindex, &prev_xrindex);
+ if (prev_xrindex_found) {
+ UXRHANDLE prevuxr = ule_get_uxr(ule, prev_xrindex);
+ if (uxr_is_delete(prevuxr)) {
+ ; // do nothing
+ } else if (uxr_is_insert(prevuxr)) {
+ // generate the hot delete key
+ result = indexer_generate_hot_keys_vals(indexer, hotdb, prov_info, prevuxr, hot_keys, NULL);
+ if (result == 0) {
+ paranoid_invariant(hot_keys->size <= hot_keys->capacity);
+ for (uint32_t i = 0; i < hot_keys->size; i++) {
+ DBT *hotkey = &hot_keys->dbts[i];
+
+ // send the delete message
+ switch (outermost_xid_state) {
+ case TOKUTXN_LIVE:
+ case TOKUTXN_PREPARING:
+ invariant(this_xid_state != TOKUTXN_ABORTING);
+ invariant(!curr_txn || toku_txn_get_state(curr_txn) == TOKUTXN_LIVE || toku_txn_get_state(curr_txn) == TOKUTXN_PREPARING);
+ result = indexer_ft_delete_provisional(indexer, hotdb, hotkey, xids, curr_txn);
+ if (result == 0) {
+ indexer_lock_key(indexer, hotdb, hotkey, prov_ids[0], curr_txn);
+ }
+ break;
+ case TOKUTXN_COMMITTING:
+ case TOKUTXN_RETIRED:
+ result = indexer_ft_delete_committed(indexer, hotdb, hotkey, xids);
+ if (result == 0)
+ indexer_commit_keys_add(&indexer->i->commit_keys, hotkey->size, hotkey->data);
+ break;
+ case TOKUTXN_ABORTING: // can not happen since we stop processing the leaf entry if the outer most xr is aborting
+ assert(0);
+ }
+ }
+ }
+ } else
+ assert(0);
+ }
+ if (result != 0)
+ break;
+
+ // do
+ if (uxr_is_delete(uxr)) {
+ ; // do nothing
+ } else if (uxr_is_insert(uxr)) {
+ // generate the hot insert key and val
+ result = indexer_generate_hot_keys_vals(indexer, hotdb, prov_info, uxr, hot_keys, hot_vals);
+ if (result == 0) {
+ paranoid_invariant(hot_keys->size == hot_vals->size);
+ paranoid_invariant(hot_keys->size <= hot_keys->capacity);
+ paranoid_invariant(hot_vals->size <= hot_vals->capacity);
+ for (uint32_t i = 0; i < hot_keys->size; i++) {
+ DBT *hotkey = &hot_keys->dbts[i];
+ DBT *hotval = &hot_vals->dbts[i];
+
+ // send the insert message
+ switch (outermost_xid_state) {
+ case TOKUTXN_LIVE:
+ case TOKUTXN_PREPARING:
+ assert(this_xid_state != TOKUTXN_ABORTING);
+ invariant(!curr_txn || toku_txn_get_state(curr_txn) == TOKUTXN_LIVE || toku_txn_get_state(curr_txn) == TOKUTXN_PREPARING);
+ result = indexer_ft_insert_provisional(indexer, hotdb, hotkey, hotval, xids, curr_txn);
+ if (result == 0) {
+ indexer_lock_key(indexer, hotdb, hotkey, prov_ids[0], prov_txns[0]);
+ }
+ break;
+ case TOKUTXN_COMMITTING:
+ case TOKUTXN_RETIRED:
+ result = indexer_ft_insert_committed(indexer, hotdb, hotkey, hotval, xids);
+ // no need to do this because we do implicit commits on inserts
+ if (0 && result == 0)
+ indexer_commit_keys_add(&indexer->i->commit_keys, hotkey->size, hotkey->data);
+ break;
+ case TOKUTXN_ABORTING: // can not happen since we stop processing the leaf entry if the outer most xr is aborting
+ assert(0);
+ }
+ }
+ }
+ } else
+ assert(0);
+
+ if (result != 0)
+ break;
+ }
+
+ // send commits if the outermost provisional transaction is committed
+ for (int i = 0; result == 0 && i < indexer_commit_keys_valid(&indexer->i->commit_keys); i++) {
+ result = indexer_ft_commit(indexer, hotdb, &indexer->i->commit_keys.keys[i], xids);
+ }
+
+ // be careful with this in the future. Right now, only exit path
+ // is BEFORE we call fill_prov_info, so this happens before exit
+ // If in the future we add a way to exit after fill_prov_info,
+ // then this will need to be handled below exit
+ release_txns(ule, prov_states, prov_txns, indexer);
+exit:
+ toku_xids_destroy(&xids);
+ return result;
+}
+
+int
+indexer_undo_do(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info *prov_info, DBT_ARRAY *hot_keys, DBT_ARRAY *hot_vals) {
+ int result = indexer_undo_do_committed(indexer, hotdb, prov_info, hot_keys, hot_vals);
+ if (result == 0) {
+ result = indexer_undo_do_provisional(indexer, hotdb, prov_info, hot_keys, hot_vals);
+ }
+ if (indexer->i->test_only_flags == INDEXER_TEST_ONLY_ERROR_CALLBACK) {
+ result = EINVAL;
+ }
+
+ return result;
+}
+
+// set xids_result = [root_xid, this_xid]
+// Note that this could be sped up by adding a new xids constructor that constructs the stack with
+// exactly one xid.
+static int
+indexer_set_xid(DB_INDEXER *UU(indexer), TXNID this_xid, XIDS *xids_result) {
+ int result = 0;
+ XIDS old_xids = *xids_result;
+ XIDS new_xids = toku_xids_get_root_xids();
+ if (this_xid != TXNID_NONE) {
+ XIDS child_xids;
+ result = toku_xids_create_child(new_xids, &child_xids, this_xid);
+ toku_xids_destroy(&new_xids);
+ if (result == 0)
+ new_xids = child_xids;
+ }
+ if (result == 0) {
+ toku_xids_destroy(&old_xids);
+ *xids_result = new_xids;
+ }
+
+ return result;
+}
+
+// append xid to xids_result
+static int
+indexer_append_xid(DB_INDEXER *UU(indexer), TXNID xid, XIDS *xids_result) {
+ XIDS old_xids = *xids_result;
+ XIDS new_xids;
+ int result = toku_xids_create_child(old_xids, &new_xids, xid);
+ if (result == 0) {
+ toku_xids_destroy(&old_xids);
+ *xids_result = new_xids;
+ }
+ return result;
+}
+
+static int
+indexer_generate_hot_keys_vals(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info *prov_info, UXRHANDLE uxr, DBT_ARRAY *hotkeys, DBT_ARRAY *hotvals) {
+ int result = 0;
+
+ // setup the source key
+ DBT srckey;
+ toku_fill_dbt(&srckey, prov_info->key, prov_info->keylen);
+
+ // setup the source val
+ DBT srcval;
+ toku_fill_dbt(&srcval, uxr_get_val(uxr), uxr_get_vallen(uxr));
+
+ // generate the secondary row
+ DB_ENV *env = indexer->i->env;
+ if (hotvals) {
+ result = env->i->generate_row_for_put(hotdb, indexer->i->src_db, hotkeys, hotvals, &srckey, &srcval);
+ }
+ else {
+ result = env->i->generate_row_for_del(hotdb, indexer->i->src_db, hotkeys, &srckey, &srcval);
+ }
+ toku_destroy_dbt(&srckey);
+ toku_destroy_dbt(&srcval);
+
+ return result;
+}
+
+// Take a write lock on the given key for the outermost xid in the xids list.
+static void
+indexer_lock_key(DB_INDEXER *indexer, DB *hotdb, DBT *key, TXNID outermost_live_xid, TOKUTXN txn) {
+ // TEST
+ if (indexer->i->test_lock_key) {
+ indexer->i->test_lock_key(indexer, outermost_live_xid, hotdb, key);
+ } else {
+ toku_db_grab_write_lock(hotdb, key, txn);
+ }
+}
+
+// find the index of a non-placeholder transaction record that is previous to the transaction record
+// found at xrindex. return true if one is found and return its index in prev_xrindex. otherwise,
+// return false.
+static bool
+indexer_find_prev_xr(DB_INDEXER *UU(indexer), ULEHANDLE ule, uint64_t xrindex, uint64_t *prev_xrindex) {
+ assert(xrindex < ule_num_uxrs(ule));
+ bool prev_found = false;
+ while (xrindex > 0) {
+ xrindex -= 1;
+ UXRHANDLE uxr = ule_get_uxr(ule, xrindex);
+ if (!uxr_is_placeholder(uxr)) {
+ *prev_xrindex = xrindex;
+ prev_found = true;
+ break;
+ }
+ }
+ return prev_found;
+}
+
+// inject "delete" message into ft with logging in recovery and rollback logs,
+// and making association between txn and ft
+static int
+indexer_ft_delete_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids, TOKUTXN txn) {
+ int result = 0;
+ // TEST
+ if (indexer->i->test_delete_provisional) {
+ result = indexer->i->test_delete_provisional(indexer, hotdb, hotkey, xids);
+ } else {
+ result = toku_ydb_check_avail_fs_space(indexer->i->env);
+ if (result == 0) {
+ assert(txn != NULL);
+ // Not sure if this is really necessary, as
+ // the hot index DB should have to be checkpointed
+ // upon commit of the hot index transaction, but
+ // it is safe to do this
+ // this question apples to delete_committed, insert_provisional
+ // and insert_committed
+ toku_ft_maybe_delete (hotdb->i->ft_handle, hotkey, txn, false, ZERO_LSN, true);
+ }
+ }
+ return result;
+}
+
+// send a delete message into the tree without rollback or recovery logging
+static int
+indexer_ft_delete_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids) {
+ int result = 0;
+ // TEST
+ if (indexer->i->test_delete_committed) {
+ result = indexer->i->test_delete_committed(indexer, hotdb, hotkey, xids);
+ } else {
+ result = toku_ydb_check_avail_fs_space(indexer->i->env);
+ if (result == 0) {
+ FT_HANDLE ft_h = db_struct_i(hotdb)->ft_handle;
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ oldest_referenced_xid_estimate,
+ true);
+ toku_ft_send_delete(db_struct_i(hotdb)->ft_handle, hotkey, xids, &gc_info);
+ toku_ft_adjust_logical_row_count(db_struct_i(hotdb)->ft_handle->ft, -1);
+ }
+ }
+ return result;
+}
+
+// inject "insert" message into ft with logging in recovery and rollback logs,
+// and making association between txn and ft
+static int
+indexer_ft_insert_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids, TOKUTXN txn) {
+ int result = 0;
+ // TEST
+ if (indexer->i->test_insert_provisional) {
+ result = indexer->i->test_insert_provisional(indexer, hotdb, hotkey, hotval, xids);
+ } else {
+ result = toku_ydb_check_avail_fs_space(indexer->i->env);
+ if (result == 0) {
+ assert(txn != NULL);
+ // comment/question in indexer_ft_delete_provisional applies
+ toku_ft_maybe_insert (hotdb->i->ft_handle, hotkey, hotval, txn, false, ZERO_LSN, true, FT_INSERT);
+ }
+ }
+ return result;
+}
+
+// send an insert message into the tree without rollback or recovery logging
+// and without associating the txn and the ft
+static int
+indexer_ft_insert_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids) {
+ int result = 0;
+ // TEST
+ if (indexer->i->test_insert_committed) {
+ result = indexer->i->test_insert_committed(indexer, hotdb, hotkey, hotval, xids);
+ } else {
+ result = toku_ydb_check_avail_fs_space(indexer->i->env);
+ if (result == 0) {
+ FT_HANDLE ft_h = db_struct_i(hotdb)->ft_handle;
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ oldest_referenced_xid_estimate,
+ true);
+ toku_ft_send_insert(db_struct_i(hotdb)->ft_handle, hotkey, hotval, xids, FT_INSERT, &gc_info);
+ toku_ft_adjust_logical_row_count(db_struct_i(hotdb)->ft_handle->ft, 1);
+ }
+ }
+ return result;
+}
+
+// send a commit message into the tree
+// Note: If the xid is zero, then the leafentry will already have a committed transaction
+// record and no commit message is needed. (A commit message with xid of zero is
+// illegal anyway.)
+static int
+indexer_ft_commit(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids) {
+ int result = 0;
+ if (toku_xids_get_num_xids(xids) > 0) {// send commit only when not the root xid
+ // TEST
+ if (indexer->i->test_commit_any) {
+ result = indexer->i->test_commit_any(indexer, hotdb, hotkey, xids);
+ } else {
+ result = toku_ydb_check_avail_fs_space(indexer->i->env);
+ if (result == 0) {
+ FT_HANDLE ft_h = db_struct_i(hotdb)->ft_handle;
+ TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h);
+ txn_manager_state txn_state_for_gc(txn_manager);
+
+ TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h);
+ txn_gc_info gc_info(&txn_state_for_gc,
+ oldest_referenced_xid_estimate,
+ oldest_referenced_xid_estimate,
+ true);
+ toku_ft_send_commit_any(db_struct_i(hotdb)->ft_handle, hotkey, xids, &gc_info);
+ }
+ }
+ }
+ return result;
+}
diff --git a/storage/tokudb/PerconaFT/src/indexer.cc b/storage/tokudb/PerconaFT/src/indexer.cc
new file mode 100644
index 00000000..044ffac9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/indexer.cc
@@ -0,0 +1,720 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/*
+ * The indexer
+ */
+#include <stdio.h>
+#include <string.h>
+#include <toku_portability.h>
+#include "toku_assert.h"
+#include "ydb-internal.h"
+#include <ft/le-cursor.h>
+#include "indexer.h"
+#include <ft/ft-ops.h>
+#include <ft/leafentry.h>
+#include <ft/ule.h>
+#include <ft/txn/xids.h>
+#include <ft/logger/log-internal.h>
+#include <ft/cachetable/checkpoint.h>
+#include <portability/toku_atomic.h>
+#include "loader.h"
+#include <util/status.h>
+
+///////////////////////////////////////////////////////////////////////////////////
+// Engine status
+//
+// Status is intended for display to humans to help understand system behavior.
+// It does not need to be perfectly thread-safe.
+
+static INDEXER_STATUS_S indexer_status;
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(indexer_status, k, c, t, "indexer: " l, inc)
+
+static void
+status_init(void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+ STATUS_INIT(INDEXER_CREATE, nullptr, UINT64, "number of indexers successfully created", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_CREATE_FAIL, nullptr, UINT64, "number of calls to toku_indexer_create_indexer() that failed", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_BUILD, nullptr, UINT64, "number of calls to indexer->build() succeeded", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_BUILD_FAIL, nullptr, UINT64, "number of calls to indexer->build() failed", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_CLOSE, nullptr, UINT64, "number of calls to indexer->close() that succeeded", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_CLOSE_FAIL, nullptr, UINT64, "number of calls to indexer->close() that failed", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_ABORT, nullptr, UINT64, "number of calls to indexer->abort()", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_CURRENT, nullptr, UINT64, "number of indexers currently in existence", TOKU_ENGINE_STATUS);
+ STATUS_INIT(INDEXER_MAX, nullptr, UINT64, "max number of indexers that ever existed simultaneously", TOKU_ENGINE_STATUS);
+ indexer_status.initialized = true;
+}
+#undef STATUS_INIT
+
+void
+toku_indexer_get_status(INDEXER_STATUS statp) {
+ if (!indexer_status.initialized)
+ status_init();
+ *statp = indexer_status;
+}
+
+#define STATUS_VALUE(x) indexer_status.status[x].value.num
+
+#include "indexer-internal.h"
+
+static int build_index(DB_INDEXER *indexer);
+static int close_indexer(DB_INDEXER *indexer);
+static int abort_indexer(DB_INDEXER *indexer);
+static void free_indexer_resources(DB_INDEXER *indexer);
+static void free_indexer(DB_INDEXER *indexer);
+static int update_estimated_rows(DB_INDEXER *indexer);
+static int maybe_call_poll_func(DB_INDEXER *indexer, uint64_t loop_count);
+
+static int
+associate_indexer_with_hot_dbs(DB_INDEXER *indexer, DB *dest_dbs[], int N) {
+ int result =0;
+ for (int i = 0; i < N; i++) {
+ result = toku_db_set_indexer(dest_dbs[i], indexer);
+ if (result != 0) {
+ for (int j = 0; j < i; j++) {
+ int result2 = toku_db_set_indexer(dest_dbs[j], NULL);
+ lazy_assert(result2 == 0);
+ }
+ break;
+ }
+ }
+ return result;
+}
+
+static void
+disassociate_indexer_from_hot_dbs(DB_INDEXER *indexer) {
+ for (int i = 0; i < indexer->i->N; i++) {
+ int result = toku_db_set_indexer(indexer->i->dest_dbs[i], NULL);
+ lazy_assert(result == 0);
+ }
+}
+
+/*
+ * free_indexer_resources() frees all of the resources associated with
+ * struct __toku_indexer_internal
+ * assumes any previously freed items set the field pointer to NULL
+ */
+
+static void
+free_indexer_resources(DB_INDEXER *indexer) {
+ if ( indexer->i ) {
+ toku_mutex_destroy(&indexer->i->indexer_lock);
+ toku_mutex_destroy(&indexer->i->indexer_estimate_lock);
+ toku_destroy_dbt(&indexer->i->position_estimate);
+ if ( indexer->i->lec ) {
+ toku_le_cursor_close(indexer->i->lec);
+ }
+ if ( indexer->i->fnums ) {
+ toku_free(indexer->i->fnums);
+ indexer->i->fnums = NULL;
+ }
+ indexer_undo_do_destroy(indexer);
+ // indexer->i
+ toku_free(indexer->i);
+ indexer->i = NULL;
+ }
+}
+
+static void
+free_indexer(DB_INDEXER *indexer) {
+ if ( indexer ) {
+ free_indexer_resources(indexer);
+ toku_free(indexer);
+ indexer = NULL;
+ }
+}
+
+void
+toku_indexer_lock(DB_INDEXER* indexer) {
+ toku_mutex_lock(&indexer->i->indexer_lock);
+}
+
+void
+toku_indexer_unlock(DB_INDEXER* indexer) {
+ toku_mutex_unlock(&indexer->i->indexer_lock);
+}
+
+// a shortcut call
+//
+// a cheap(er) call to see if a key must be inserted
+// into the DB. If true, then we know we have to insert.
+// If false, then we don't know, and have to check again
+// after grabbing the indexer lock
+bool
+toku_indexer_may_insert(DB_INDEXER* indexer, const DBT* key) {
+ bool may_insert = false;
+ toku_mutex_lock(&indexer->i->indexer_estimate_lock);
+
+ // if we have no position estimate, we can't tell, so return false
+ if (indexer->i->position_estimate.data == nullptr) {
+ may_insert = false;
+ } else {
+ DB *db = indexer->i->src_db;
+ const toku::comparator &cmp = toku_ft_get_comparator(db->i->ft_handle);
+ int c = cmp(&indexer->i->position_estimate, key);
+
+ // if key > position_estimate, then we know the indexer cursor
+ // is past key, and we can safely say that associated values of
+ // key must be inserted into the indexer's db
+ may_insert = c < 0;
+ }
+
+ toku_mutex_unlock(&indexer->i->indexer_estimate_lock);
+ return may_insert;
+}
+
+void
+toku_indexer_update_estimate(DB_INDEXER* indexer) {
+ toku_mutex_lock(&indexer->i->indexer_estimate_lock);
+ toku_le_cursor_update_estimate(indexer->i->lec, &indexer->i->position_estimate);
+ toku_mutex_unlock(&indexer->i->indexer_estimate_lock);
+}
+
+// forward declare the test-only wrapper function for undo-do
+static int test_indexer_undo_do(DB_INDEXER *indexer, DB *hotdb, DBT* key, ULEHANDLE ule);
+
+int
+toku_indexer_create_indexer(DB_ENV *env,
+ DB_TXN *txn,
+ DB_INDEXER **indexerp,
+ DB *src_db,
+ int N,
+ DB *dest_dbs[/*N*/],
+ uint32_t db_flags[/*N*/] UU(),
+ uint32_t indexer_flags)
+{
+ int rval;
+ DB_INDEXER *indexer = 0; // set later when created
+ HANDLE_READ_ONLY_TXN(txn);
+
+ *indexerp = NULL;
+
+ XCALLOC(indexer); // init to all zeroes (thus initializing the error_callback and poll_func)
+ if ( !indexer ) { rval = ENOMEM; goto create_exit; }
+ XCALLOC(indexer->i); // init to all zeroes (thus initializing all pointers to NULL)
+ if ( !indexer->i ) { rval = ENOMEM; goto create_exit; }
+
+ indexer->i->env = env;
+ indexer->i->txn = txn;
+ indexer->i->src_db = src_db;
+ indexer->i->N = N;
+ indexer->i->dest_dbs = dest_dbs;
+ indexer->i->indexer_flags = indexer_flags;
+ indexer->i->loop_mod = 1000; // call poll_func every 1000 rows
+ indexer->i->estimated_rows = 0;
+ indexer->i->undo_do = test_indexer_undo_do; // TEST export the undo do function
+
+ XCALLOC_N(N, indexer->i->fnums);
+ if ( !indexer->i->fnums ) { rval = ENOMEM; goto create_exit; }
+ for(int i=0;i<indexer->i->N;i++) {
+ indexer->i->fnums[i] = toku_cachefile_filenum(db_struct_i(dest_dbs[i])->ft_handle->ft->cf);
+ }
+ indexer->i->filenums.num = N;
+ indexer->i->filenums.filenums = indexer->i->fnums;
+ indexer->i->test_only_flags = 0; // for test use only
+
+ indexer->set_error_callback = toku_indexer_set_error_callback;
+ indexer->set_poll_function = toku_indexer_set_poll_function;
+ indexer->build = build_index;
+ indexer->close = close_indexer;
+ indexer->abort = abort_indexer;
+
+ toku_mutex_init(
+ *indexer_i_indexer_lock_mutex_key, &indexer->i->indexer_lock, nullptr);
+ toku_mutex_init(*indexer_i_indexer_estimate_lock_mutex_key,
+ &indexer->i->indexer_estimate_lock,
+ nullptr);
+ toku_init_dbt(&indexer->i->position_estimate);
+
+ //
+ // create and close a dummy loader to get redirection going for the hot
+ // indexer
+ // This way, if the hot index aborts, but other transactions have references
+ // to the
+ // underlying FT, then those transactions can do dummy operations on the FT
+ // while the DB gets redirected back to an empty dictionary
+ //
+ {
+ DB_LOADER* loader = NULL;
+ rval = toku_loader_create_loader(env, txn, &loader, NULL, N, &dest_dbs[0], NULL, NULL, DB_PRELOCKED_WRITE | LOADER_DISALLOW_PUTS, true);
+ if (rval) {
+ goto create_exit;
+ }
+ rval = loader->close(loader);
+ if (rval) {
+ goto create_exit;
+ }
+ }
+
+ // create and initialize the leafentry cursor
+ rval = toku_le_cursor_create(&indexer->i->lec, db_struct_i(src_db)->ft_handle, db_txn_struct_i(txn)->tokutxn);
+ if ( !indexer->i->lec ) { goto create_exit; }
+
+ // 2954: add recovery and rollback entries
+ LSN hot_index_lsn; // not used (yet)
+ TOKUTXN ttxn;
+ ttxn = db_txn_struct_i(txn)->tokutxn;
+ FILENUMS filenums;
+ filenums = indexer->i->filenums;
+ toku_multi_operation_client_lock();
+ toku_ft_hot_index(NULL, ttxn, filenums, 1, &hot_index_lsn);
+ toku_multi_operation_client_unlock();
+
+ if (rval == 0) {
+ rval = associate_indexer_with_hot_dbs(indexer, dest_dbs, N);
+ }
+create_exit:
+ if ( rval == 0 ) {
+
+ indexer_undo_do_init(indexer);
+
+ *indexerp = indexer;
+
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_CREATE), 1);
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_CURRENT), 1);
+ if ( STATUS_VALUE(INDEXER_CURRENT) > STATUS_VALUE(INDEXER_MAX) )
+ STATUS_VALUE(INDEXER_MAX) = STATUS_VALUE(INDEXER_CURRENT); // NOT WORTH A LOCK TO MAKE THREADSAFE), may be inaccurate
+
+ } else {
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_CREATE_FAIL), 1);
+ free_indexer(indexer);
+ }
+
+ return rval;
+}
+
+int
+toku_indexer_set_poll_function(DB_INDEXER *indexer,
+ int (*poll_func)(void *poll_extra,
+ float progress),
+ void *poll_extra)
+{
+ invariant(indexer != NULL);
+ indexer->i->poll_func = poll_func;
+ indexer->i->poll_extra = poll_extra;
+ return 0;
+}
+
+int
+toku_indexer_set_error_callback(DB_INDEXER *indexer,
+ void (*error_cb)(DB *db, int i, int err,
+ DBT *key, DBT *val,
+ void *error_extra),
+ void *error_extra)
+{
+ invariant(indexer != NULL);
+ indexer->i->error_callback = error_cb;
+ indexer->i->error_extra = error_extra;
+ return 0;
+}
+
+// a key is to the right of the indexer's cursor if it compares
+// greater than the current le cursor position.
+bool
+toku_indexer_should_insert_key(DB_INDEXER *indexer, const DBT *key) {
+ // the hot indexer runs from the end to the beginning, it gets the largest keys first
+ //
+ // if key is less than indexer's position, then we should NOT insert it because
+ // the indexer will get to it. If it is greater or equal, that means the indexer
+ // has already processed the key, and will not get to it, therefore, we need
+ // to handle it
+ return toku_le_cursor_is_key_greater_or_equal(indexer->i->lec, key);
+}
+
+// initialize provisional info by allocating enough space to hold provisional
+// ids, states, and txns for each of the provisional entries in the ule. the
+// ule and le remain owned by the caller, not this struct.
+static void
+ule_prov_info_init(struct ule_prov_info *prov_info, const void* key, uint32_t keylen, LEAFENTRY le, ULEHANDLE ule) {
+ prov_info->le = le;
+ prov_info->ule = ule;
+ prov_info->keylen = keylen;
+ prov_info->key = toku_xmalloc(keylen);
+ memcpy(prov_info->key, key, keylen);
+ prov_info->num_provisional = ule_get_num_provisional(ule);
+ prov_info->num_committed = ule_get_num_committed(ule);
+ uint32_t n = prov_info->num_provisional;
+ if (n > 0) {
+ XMALLOC_N(n, prov_info->prov_ids);
+ XMALLOC_N(n, prov_info->prov_states);
+ XMALLOC_N(n, prov_info->prov_txns);
+ }
+}
+
+// clean up anything possibly created by ule_prov_info_init()
+static void
+ule_prov_info_destroy(struct ule_prov_info *prov_info) {
+ if (prov_info->num_provisional > 0) {
+ toku_free(prov_info->prov_ids);
+ toku_free(prov_info->prov_states);
+ toku_free(prov_info->prov_txns);
+ } else {
+ // nothing to free if there was nothing provisional
+ invariant(prov_info->prov_ids == NULL);
+ invariant(prov_info->prov_states == NULL);
+ invariant(prov_info->prov_txns == NULL);
+ }
+}
+
+static void
+indexer_fill_prov_info(DB_INDEXER *indexer, struct ule_prov_info *prov_info) {
+ ULEHANDLE ule = prov_info->ule;
+ uint32_t num_provisional = prov_info->num_provisional;
+ uint32_t num_committed = prov_info->num_committed;
+ TXNID *prov_ids = prov_info->prov_ids;
+ TOKUTXN_STATE *prov_states = prov_info->prov_states;
+ TOKUTXN *prov_txns = prov_info->prov_txns;
+
+ // don't both grabbing the txn manager lock if we don't
+ // have any provisional txns to record
+ if (num_provisional == 0) {
+ return;
+ }
+
+ // handle test case first
+ if (indexer->i->test_xid_state) {
+ for (uint32_t i = 0; i < num_provisional; i++) {
+ UXRHANDLE uxr = ule_get_uxr(ule, num_committed + i);
+ prov_ids[i] = uxr_get_txnid(uxr);
+ prov_states[i] = indexer->i->test_xid_state(indexer, prov_ids[i]);
+ prov_txns[i] = NULL;
+ }
+ return;
+ }
+
+ // hold the txn manager lock while we inspect txn state
+ // and pin some live txns
+ DB_ENV *env = indexer->i->env;
+ TXN_MANAGER txn_manager = toku_logger_get_txn_manager(env->i->logger);
+ TXNID parent_xid = uxr_get_txnid(ule_get_uxr(ule, num_committed));
+
+ // let's first initialize things to defaults
+ for (uint32_t i = 0; i < num_provisional; i++) {
+ UXRHANDLE uxr = ule_get_uxr(ule, num_committed + i);
+ prov_ids[i] = uxr_get_txnid(uxr);
+ prov_txns[i] = NULL;
+ prov_states[i] = TOKUTXN_RETIRED;
+ }
+
+ toku_txn_manager_suspend(txn_manager);
+ TXNID_PAIR root_xid_pair = {.parent_id64=parent_xid, .child_id64 = TXNID_NONE};
+ TOKUTXN root_txn = NULL;
+ toku_txn_manager_id2txn_unlocked(
+ txn_manager,
+ root_xid_pair,
+ &root_txn
+ );
+ if (root_txn == NULL) {
+ toku_txn_manager_resume(txn_manager);
+ return; //everything is retired in this case, the default
+ }
+ prov_txns[0] = root_txn;
+ prov_states[0] = toku_txn_get_state(root_txn);
+ toku_txn_lock_state(root_txn);
+ prov_states[0] = toku_txn_get_state(root_txn);
+ if (prov_states[0] == TOKUTXN_LIVE || prov_states[0] == TOKUTXN_PREPARING) {
+ // pin this live txn so it can't commit or abort until we're done with it
+ toku_txn_pin_live_txn_unlocked(root_txn);
+ }
+ toku_txn_unlock_state(root_txn);
+
+ root_txn->child_manager->suspend();
+ for (uint32_t i = 1; i < num_provisional; i++) {
+ UXRHANDLE uxr = ule_get_uxr(ule, num_committed + i);
+ TXNID child_id = uxr_get_txnid(uxr);
+ TOKUTXN txn = NULL;
+
+ TXNID_PAIR txnid_pair = {.parent_id64 = parent_xid, .child_id64 = child_id};
+ root_txn->child_manager->find_tokutxn_by_xid_unlocked(txnid_pair, &txn);
+ prov_txns[i] = txn;
+ if (txn) {
+ toku_txn_lock_state(txn);
+ prov_states[i] = toku_txn_get_state(txn);
+ if (prov_states[i] == TOKUTXN_LIVE || prov_states[i] == TOKUTXN_PREPARING) {
+ // pin this live txn so it can't commit or abort until we're done with it
+ toku_txn_pin_live_txn_unlocked(txn);
+ }
+ toku_txn_unlock_state(txn);
+ }
+ else {
+ prov_states[i] = TOKUTXN_RETIRED;
+ }
+ }
+ root_txn->child_manager->resume();
+ toku_txn_manager_resume(txn_manager);
+}
+
+struct le_cursor_extra {
+ DB_INDEXER *indexer;
+ struct ule_prov_info *prov_info;
+};
+
+// cursor callback, so its synchronized with other db operations using
+// cachetable pair locks. because no txn can commit on this db, read
+// the provisional info for the newly read ule.
+static int
+le_cursor_callback(uint32_t keylen, const void *key, uint32_t UU(vallen), const void *val, void *extra, bool lock_only) {
+ if (lock_only || val == NULL) {
+ ; // do nothing if only locking. do nothing if val==NULL, means DB_NOTFOUND
+ } else {
+ struct le_cursor_extra *CAST_FROM_VOIDP(cursor_extra, extra);
+ struct ule_prov_info *prov_info = cursor_extra->prov_info;
+ // the val here is a leafentry. ule_create does not copy the entire
+ // contents of the leafentry it is given into its own buffers, so we
+ // must allocate space for a leafentry and keep it around with the ule.
+ LEAFENTRY CAST_FROM_VOIDP(le, toku_xmemdup(val, vallen));
+ ULEHANDLE ule = toku_ule_create(le);
+ invariant(ule);
+ // when we initialize prov info, we also pass in the leafentry and ule
+ // pointers so the caller can access them later. it's their job to free
+ // them when they're not needed.
+ ule_prov_info_init(prov_info, key, keylen, le, ule);
+ indexer_fill_prov_info(cursor_extra->indexer, prov_info);
+ }
+ return 0;
+}
+
+// get the next ule and fill out its provisional info in the
+// prov_info struct provided. caller is responsible for cleaning
+// up the ule info after it's done.
+static int
+get_next_ule_with_prov_info(DB_INDEXER *indexer, struct ule_prov_info *prov_info) {
+ struct le_cursor_extra extra = {
+ .indexer = indexer,
+ .prov_info = prov_info,
+ };
+ int r = toku_le_cursor_next(indexer->i->lec, le_cursor_callback, &extra);
+ return r;
+}
+
+static int
+build_index(DB_INDEXER *indexer) {
+ int result = 0;
+
+ bool done = false;
+ for (uint64_t loop_count = 0; !done; loop_count++) {
+
+ toku_indexer_lock(indexer);
+ // grab the multi operation lock because we will be injecting messages
+ // grab it here because we must hold it before
+ // trying to pin any live transactions, as discovered by #5775
+ toku_multi_operation_client_lock();
+
+ // grab the next leaf entry and get its provisional info. we'll
+ // need the provisional info for the undo-do algorithm, and we get
+ // it here so it can be read atomically with respect to txn commit
+ // and abort. the atomicity comes from the root-to-leaf path pinned
+ // by the query and in the getf callback function
+ //
+ // this allocates space for the prov info, so we have to destroy it
+ // when we're done.
+ struct ule_prov_info prov_info;
+ memset(&prov_info, 0, sizeof(prov_info));
+ result = get_next_ule_with_prov_info(indexer, &prov_info);
+
+ if (result != 0) {
+ invariant(prov_info.ule == NULL);
+ done = true;
+ if (result == DB_NOTFOUND) {
+ result = 0; // all done, normal way to exit loop successfully
+ }
+ }
+ else {
+ invariant(prov_info.le);
+ invariant(prov_info.ule);
+ for (int which_db = 0; (which_db < indexer->i->N) && (result == 0); which_db++) {
+ DB *db = indexer->i->dest_dbs[which_db];
+ DBT_ARRAY *hot_keys = &indexer->i->hot_keys[which_db];
+ DBT_ARRAY *hot_vals = &indexer->i->hot_vals[which_db];
+ result = indexer_undo_do(indexer, db, &prov_info, hot_keys, hot_vals);
+ if ((result != 0) && (indexer->i->error_callback != NULL)) {
+ // grab the key and call the error callback
+ DBT key; toku_init_dbt_flags(&key, DB_DBT_REALLOC);
+ toku_dbt_set(prov_info.keylen, prov_info.key, &key, NULL);
+ indexer->i->error_callback(db, which_db, result, &key, NULL, indexer->i->error_extra);
+ toku_destroy_dbt(&key);
+ }
+ }
+ // the leafentry and ule are not owned by the prov_info,
+ // and are still our responsibility to free
+ toku_free(prov_info.le);
+ toku_free(prov_info.key);
+ toku_ule_free(prov_info.ule);
+ }
+
+ toku_multi_operation_client_unlock();
+ toku_indexer_unlock(indexer);
+ ule_prov_info_destroy(&prov_info);
+
+ if (result == 0) {
+ result = maybe_call_poll_func(indexer, loop_count);
+ }
+ if (result != 0) {
+ done = true;
+ }
+ }
+
+ // post index creation cleanup
+ // - optimize?
+ // - garbage collect?
+ // - unique checks?
+
+ if ( result == 0 ) {
+ // Perform a checkpoint so that all of the indexing makes it to disk before continuing.
+ // Otherwise indexing would not be crash-safe becasue none of the undo-do messages are in the recovery log.
+ DB_ENV *env = indexer->i->env;
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(env->i->cachetable);
+ toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, INDEXER_CHECKPOINT);
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_BUILD), 1);
+ } else {
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_BUILD_FAIL), 1);
+ }
+
+ return result;
+}
+
+// Clients must not operate on any of the hot dbs concurrently with close
+static int
+close_indexer(DB_INDEXER *indexer) {
+ int r = 0;
+ (void) toku_sync_fetch_and_sub(&STATUS_VALUE(INDEXER_CURRENT), 1);
+
+ // Disassociate the indexer from the hot db and free_indexer
+ disassociate_indexer_from_hot_dbs(indexer);
+ free_indexer(indexer);
+
+ if ( r == 0 ) {
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_CLOSE), 1);
+ } else {
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_CLOSE_FAIL), 1);
+ }
+ return r;
+}
+
+// Clients must not operate on any of the hot dbs concurrently with abort
+static int
+abort_indexer(DB_INDEXER *indexer) {
+ (void) toku_sync_fetch_and_sub(&STATUS_VALUE(INDEXER_CURRENT), 1);
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(INDEXER_ABORT), 1);
+ // Disassociate the indexer from the hot db and free_indexer
+ disassociate_indexer_from_hot_dbs(indexer);
+ free_indexer(indexer);
+ return 0;
+}
+
+
+// derived from the handlerton's estimate_num_rows()
+static int
+update_estimated_rows(DB_INDEXER *indexer) {
+ int error;
+ DB_TXN *txn = NULL;
+ DB_ENV *db_env = indexer->i->env;
+ error = db_env->txn_begin(db_env, 0, &txn, DB_READ_UNCOMMITTED);
+ if (error == 0) {
+ DB_BTREE_STAT64 stats;
+ DB *db = indexer->i->src_db;
+ error = db->stat64(db, txn, &stats);
+ if (error == 0) {
+ indexer->i->estimated_rows = stats.bt_ndata;
+ }
+ txn->commit(txn, 0);
+ }
+ return error;
+}
+
+static int
+maybe_call_poll_func(DB_INDEXER *indexer, uint64_t loop_count) {
+ int result = 0;
+ if ( indexer->i->poll_func != NULL && ( loop_count % indexer->i->loop_mod ) == 0 ) {
+ int r __attribute__((unused)) = update_estimated_rows(indexer);
+ // what happens if estimate_rows fails?
+ // - currently does not modify estimate, which is probably sufficient
+ float progress;
+ if ( indexer->i->estimated_rows == 0 || loop_count > indexer->i->estimated_rows)
+ progress = 1.0;
+ else
+ progress = (float)loop_count / (float)indexer->i->estimated_rows;
+ result = indexer->i->poll_func(indexer->i->poll_extra, progress);
+ }
+ return result;
+}
+
+
+// this allows us to force errors under test. Flags are defined in indexer.h
+void
+toku_indexer_set_test_only_flags(DB_INDEXER *indexer, int flags) {
+ invariant(indexer != NULL);
+ indexer->i->test_only_flags = flags;
+}
+
+// this allows us to call the undo do function in tests using
+// a convenience wrapper that gets and destroys the ule's prov info
+static int
+test_indexer_undo_do(DB_INDEXER *indexer, DB *hotdb, DBT* key, ULEHANDLE ule) {
+ int which_db;
+ for (which_db = 0; which_db < indexer->i->N; which_db++) {
+ if (indexer->i->dest_dbs[which_db] == hotdb) {
+ break;
+ }
+ }
+ if (which_db == indexer->i->N) {
+ return EINVAL;
+ }
+ struct ule_prov_info prov_info;
+ memset(&prov_info, 0, sizeof(prov_info));
+ // pass null for the leafentry - we don't need it, neither does the info
+ ule_prov_info_init(&prov_info, key->data, key->size, NULL, ule); // mallocs prov_info->key, owned by this function
+ indexer_fill_prov_info(indexer, &prov_info);
+ DBT_ARRAY *hot_keys = &indexer->i->hot_keys[which_db];
+ DBT_ARRAY *hot_vals = &indexer->i->hot_vals[which_db];
+ int r = indexer_undo_do(indexer, hotdb, &prov_info, hot_keys, hot_vals);
+ toku_free(prov_info.key);
+ ule_prov_info_destroy(&prov_info);
+ return r;
+}
+
+DB *
+toku_indexer_get_src_db(DB_INDEXER *indexer) {
+ return indexer->i->src_db;
+}
+
+
+#undef STATUS_VALUE
+
diff --git a/storage/tokudb/PerconaFT/src/indexer.h b/storage/tokudb/PerconaFT/src/indexer.h
new file mode 100644
index 00000000..dc0c2909
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/indexer.h
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// locking and unlocking functions to synchronize cursor position with
+// XXX_multiple APIs
+void toku_indexer_lock(DB_INDEXER* indexer);
+
+void toku_indexer_unlock(DB_INDEXER* indexer);
+bool toku_indexer_may_insert(DB_INDEXER* indexer, const DBT* key);
+void toku_indexer_update_estimate(DB_INDEXER* indexer);
+
+// The indexer populates multiple destination db's from the contents of one source db.
+// While the indexes are being built by the indexer, the application may continue to
+// change the contents of the source db. The changes will be reflected into the destination
+// db's by the indexer.
+//
+// Each indexer references one source db.
+// A source db may have multiple indexers referencing it.
+// Each indexer references one or more destination db's.
+// Each destination db references the one and only indexer that is building it.
+//
+// env must be set to the YDB environment
+// txn must be set to the transaction under which the indexer will run
+// *indexer is set to the address of the indexer object returned by the create function
+// src_db is the source db
+// N is the number of destination db's
+// dest_dbs is an array of pointers to destination db's
+// db_flags is currently unused
+// indexer_flags is currently unused
+//
+// Returns 0 if the indexer has been created and sets *indexer to the indexer object.
+// If an error occurred while creating the indexer object, a non-zero error number is returned.
+//
+// Clients must not operate on any of the dest_dbs concurrently with create_indexer();
+int toku_indexer_create_indexer(DB_ENV *env,
+ DB_TXN *txn,
+ DB_INDEXER **indexer,
+ DB *src_db,
+ int N,
+ DB *dest_dbs[/*N*/],
+ uint32_t db_flags[/*N*/],
+ uint32_t indexer_flags) __attribute__((__visibility__("default")));
+
+// Set the indexer poll function
+int toku_indexer_set_poll_function(DB_INDEXER *indexer,
+ int (*poll_function)(void *poll_extra,
+ float progress),
+ void *poll_extra);
+
+// Set the indexer error callback
+int toku_indexer_set_error_callback(DB_INDEXER *indexer,
+ void (*error_cb)(DB *db, int i, int err,
+ DBT *key, DBT *val,
+ void *error_extra),
+ void *error_extra);
+
+// Is the key right of the indexer's leaf entry cursor?
+// Returns true if right of le_cursor
+// Returns false if left or equal to le_cursor
+bool toku_indexer_should_insert_key(DB_INDEXER *indexer, const DBT *key);
+
+// Get the indexer's source db
+DB *toku_indexer_get_src_db(DB_INDEXER *indexer);
+
+// TEST set the indexer's test flags
+extern "C" void toku_indexer_set_test_only_flags(DB_INDEXER *indexer, int flags) __attribute__((__visibility__("default")));
+
+#define INDEXER_TEST_ONLY_ERROR_CALLBACK 1
+
+typedef enum {
+ INDEXER_CREATE = 0, // number of indexers successfully created
+ INDEXER_CREATE_FAIL, // number of calls to toku_indexer_create_indexer() that failed
+ INDEXER_BUILD, // number of calls to indexer->build() succeeded
+ INDEXER_BUILD_FAIL, // number of calls to indexer->build() failed
+ INDEXER_CLOSE, // number of calls to indexer->close() that succeeded
+ INDEXER_CLOSE_FAIL, // number of calls to indexer->close() that failed
+ INDEXER_ABORT, // number of calls to indexer->abort()
+ INDEXER_CURRENT, // number of indexers currently in existence
+ INDEXER_MAX, // max number of indexers that ever existed simultaneously
+ INDEXER_STATUS_NUM_ROWS
+} indexer_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[INDEXER_STATUS_NUM_ROWS];
+} INDEXER_STATUS_S, *INDEXER_STATUS;
+
+void toku_indexer_get_status(INDEXER_STATUS s);
diff --git a/storage/tokudb/PerconaFT/src/loader.cc b/storage/tokudb/PerconaFT/src/loader.cc
new file mode 100644
index 00000000..4a195d11
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/loader.cc
@@ -0,0 +1,518 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/*
+ * The loader
+ */
+
+#include <toku_portability.h>
+#include <portability/toku_atomic.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <ft/ft.h>
+#include <ft/loader/loader.h>
+#include <ft/cachetable/checkpoint.h>
+
+#include "ydb-internal.h"
+#include "ydb_db.h"
+#include "ydb_load.h"
+
+#include "loader.h"
+#include <util/status.h>
+
+enum {MAX_FILE_SIZE=256};
+
+///////////////////////////////////////////////////////////////////////////////////
+// Engine status
+//
+// Status is intended for display to humans to help understand system behavior.
+// It does not need to be perfectly thread-safe.
+
+static LOADER_STATUS_S loader_status;
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(loader_status, k, c, t, "loader: " l, inc)
+
+static void
+status_init(void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+ STATUS_INIT(LOADER_CREATE, LOADER_NUM_CREATED, UINT64, "number of loaders successfully created", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ STATUS_INIT(LOADER_CREATE_FAIL, nullptr, UINT64, "number of calls to toku_loader_create_loader() that failed", TOKU_ENGINE_STATUS);
+ STATUS_INIT(LOADER_PUT, nullptr, UINT64, "number of calls to loader->put() succeeded", TOKU_ENGINE_STATUS);
+ STATUS_INIT(LOADER_PUT_FAIL, nullptr, UINT64, "number of calls to loader->put() failed", TOKU_ENGINE_STATUS);
+ STATUS_INIT(LOADER_CLOSE, nullptr, UINT64, "number of calls to loader->close() that succeeded", TOKU_ENGINE_STATUS);
+ STATUS_INIT(LOADER_CLOSE_FAIL, nullptr, UINT64, "number of calls to loader->close() that failed", TOKU_ENGINE_STATUS);
+ STATUS_INIT(LOADER_ABORT, nullptr, UINT64, "number of calls to loader->abort()", TOKU_ENGINE_STATUS);
+ STATUS_INIT(LOADER_CURRENT, LOADER_NUM_CURRENT, UINT64, "number of loaders currently in existence", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ STATUS_INIT(LOADER_MAX, LOADER_NUM_MAX, UINT64, "max number of loaders that ever existed simultaneously", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ loader_status.initialized = true;
+}
+#undef STATUS_INIT
+
+void
+toku_loader_get_status(LOADER_STATUS statp) {
+ if (!loader_status.initialized)
+ status_init();
+ *statp = loader_status;
+}
+
+#define STATUS_VALUE(x) loader_status.status[x].value.num
+
+
+struct __toku_loader_internal {
+ DB_ENV *env;
+ DB_TXN *txn;
+ FTLOADER ft_loader;
+ int N;
+ DB **dbs; /* [N] */
+ DB *src_db;
+ uint32_t *db_flags;
+ uint32_t *dbt_flags;
+ uint32_t loader_flags;
+ void (*error_callback)(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra);
+ void *error_extra;
+ int (*poll_func)(void *poll_extra, float progress);
+ void *poll_extra;
+ char *temp_file_template;
+
+ DBT err_key; /* error key */
+ DBT err_val; /* error val */
+ int err_i; /* error i */
+ int err_errno;
+
+ char **inames_in_env; /* [N] inames of new files to be created */
+};
+
+static void free_inames(char **inames, int n) {
+ for (int i = 0; i < n; i++) {
+ toku_free(inames[i]);
+ }
+ toku_free(inames);
+}
+
+/*
+ * free_loader_resources() frees all of the resources associated with
+ * struct __toku_loader_internal
+ * assumes any previously freed items set the field pointer to NULL
+ * Requires that the ft_loader is closed or destroyed before calling this function.
+ */
+static void free_loader_resources(DB_LOADER *loader)
+{
+ if ( loader->i ) {
+ toku_destroy_dbt(&loader->i->err_key);
+ toku_destroy_dbt(&loader->i->err_val);
+
+ if (loader->i->inames_in_env) {
+ free_inames(loader->i->inames_in_env, loader->i->N);
+ loader->i->inames_in_env = nullptr;
+ }
+ toku_free(loader->i->temp_file_template);
+ loader->i->temp_file_template = nullptr;
+
+ // loader->i
+ toku_free(loader->i);
+ loader->i = nullptr;
+ }
+}
+
+static void free_loader(DB_LOADER *loader)
+{
+ if ( loader ) free_loader_resources(loader);
+ toku_free(loader);
+}
+
+static const char *loader_temp_prefix = "tokuld"; // #2536
+static const char *loader_temp_suffix = "XXXXXX";
+
+static int ft_loader_close_and_redirect(DB_LOADER *loader) {
+ int r;
+ // use the bulk loader
+ // in case you've been looking - here is where the real work is done!
+ r = toku_ft_loader_close(loader->i->ft_loader,
+ loader->i->error_callback, loader->i->error_extra,
+ loader->i->poll_func, loader->i->poll_extra);
+ if ( r==0 ) {
+ for (int i=0; i<loader->i->N; i++) {
+ toku_multi_operation_client_lock(); //Must hold MO lock for dictionary_redirect.
+ r = toku_dictionary_redirect(loader->i->inames_in_env[i],
+ loader->i->dbs[i]->i->ft_handle,
+ db_txn_struct_i(loader->i->txn)->tokutxn);
+ toku_multi_operation_client_unlock();
+ if ( r!=0 ) break;
+ }
+ }
+ return r;
+}
+
+
+// loader_flags currently has the following flags:
+// LOADER_DISALLOW_PUTS loader->put is not allowed.
+// Loader is only being used for its side effects
+// DB_PRELOCKED_WRITE Table lock is already held, no need to relock.
+int
+toku_loader_create_loader(DB_ENV *env,
+ DB_TXN *txn,
+ DB_LOADER **blp,
+ DB *src_db,
+ int N,
+ DB *dbs[],
+ uint32_t db_flags[/*N*/],
+ uint32_t dbt_flags[/*N*/],
+ uint32_t loader_flags,
+ bool check_empty) {
+ int rval;
+ HANDLE_READ_ONLY_TXN(txn);
+ DB_TXN *loader_txn = nullptr;
+
+ *blp = NULL; // set later when created
+
+ DB_LOADER *loader = NULL;
+ bool puts_allowed = !(loader_flags & LOADER_DISALLOW_PUTS);
+ bool compress_intermediates = (loader_flags & LOADER_COMPRESS_INTERMEDIATES) != 0;
+ XCALLOC(loader); // init to all zeroes (thus initializing the error_callback and poll_func)
+ XCALLOC(loader->i); // init to all zeroes (thus initializing all pointers to NULL)
+
+ loader->i->env = env;
+ loader->i->txn = txn;
+ loader->i->N = N;
+ loader->i->dbs = dbs;
+ loader->i->src_db = src_db;
+ loader->i->db_flags = db_flags;
+ loader->i->dbt_flags = dbt_flags;
+ loader->i->loader_flags = loader_flags;
+ loader->i->temp_file_template = (char *)toku_malloc(MAX_FILE_SIZE);
+
+ int n = snprintf(loader->i->temp_file_template, MAX_FILE_SIZE, "%s/%s%s", env->i->real_tmp_dir, loader_temp_prefix, loader_temp_suffix);
+ if ( !(n>0 && n<MAX_FILE_SIZE) ) {
+ rval = ENAMETOOLONG;
+ goto create_exit;
+ }
+
+ toku_init_dbt(&loader->i->err_key);
+ toku_init_dbt(&loader->i->err_val);
+ loader->i->err_i = 0;
+ loader->i->err_errno = 0;
+
+ loader->set_error_callback = toku_loader_set_error_callback;
+ loader->set_poll_function = toku_loader_set_poll_function;
+ loader->put = toku_loader_put;
+ loader->close = toku_loader_close;
+ loader->abort = toku_loader_abort;
+
+ // lock tables and check empty
+ for(int i=0;i<N;i++) {
+ if (!(loader_flags&DB_PRELOCKED_WRITE)) {
+ rval = toku_db_pre_acquire_table_lock(dbs[i], txn);
+ if (rval!=0) {
+ goto create_exit;
+ }
+ }
+ if (check_empty) {
+ bool empty = toku_ft_is_empty_fast(dbs[i]->i->ft_handle);
+ if (!empty) {
+ rval = ENOTEMPTY;
+ goto create_exit;
+ }
+ }
+ }
+
+ {
+ if (env->i->open_flags & DB_INIT_TXN) {
+ rval = env->txn_begin(env, txn, &loader_txn, 0);
+ if (rval) {
+ goto create_exit;
+ }
+ }
+
+ ft_compare_func compare_functions[N];
+ for (int i=0; i<N; i++) {
+ compare_functions[i] = env->i->bt_compare;
+ }
+
+ // time to open the big kahuna
+ char **XMALLOC_N(N, new_inames_in_env);
+ for (int i = 0; i < N; i++) {
+ new_inames_in_env[i] = nullptr;
+ }
+ FT_HANDLE *XMALLOC_N(N, fts);
+ for (int i=0; i<N; i++) {
+ fts[i] = dbs[i]->i->ft_handle;
+ }
+ LSN load_lsn;
+ rval = locked_load_inames(env, loader_txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed);
+ if ( rval!=0 ) {
+ free_inames(new_inames_in_env, N);
+ toku_free(fts);
+ goto create_exit;
+ }
+ TOKUTXN ttxn = loader_txn ? db_txn_struct_i(loader_txn)->tokutxn : NULL;
+ rval = toku_ft_loader_open(&loader->i->ft_loader,
+ env->i->cachetable,
+ env->i->generate_row_for_put,
+ src_db,
+ N,
+ fts, dbs,
+ (const char **)new_inames_in_env,
+ compare_functions,
+ loader->i->temp_file_template,
+ load_lsn,
+ ttxn,
+ puts_allowed,
+ env->get_loader_memory_size(env),
+ compress_intermediates,
+ puts_allowed);
+ if ( rval!=0 ) {
+ free_inames(new_inames_in_env, N);
+ toku_free(fts);
+ goto create_exit;
+ }
+
+ loader->i->inames_in_env = new_inames_in_env;
+ toku_free(fts);
+
+ if (!puts_allowed) {
+ rval = ft_loader_close_and_redirect(loader);
+ assert_zero(rval);
+ loader->i->ft_loader = NULL;
+ // close the ft_loader and skip to the redirection
+ rval = 0;
+ }
+
+ rval = loader_txn->commit(loader_txn, 0);
+ assert_zero(rval);
+ loader_txn = nullptr;
+
+ rval = 0;
+ }
+ *blp = loader;
+ create_exit:
+ if (loader_txn) {
+ int r = loader_txn->abort(loader_txn);
+ assert_zero(r);
+ loader_txn = nullptr;
+ }
+ if (rval == 0) {
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CREATE), 1);
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CURRENT), 1);
+ if (STATUS_VALUE(LOADER_CURRENT) > STATUS_VALUE(LOADER_MAX) )
+ STATUS_VALUE(LOADER_MAX) = STATUS_VALUE(LOADER_CURRENT); // not worth a lock to make threadsafe, may be inaccurate
+ }
+ else {
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CREATE_FAIL), 1);
+ free_loader(loader);
+ }
+ return rval;
+}
+
+int toku_loader_set_poll_function(DB_LOADER *loader,
+ int (*poll_func)(void *extra, float progress),
+ void *poll_extra)
+{
+ invariant(loader != NULL);
+ loader->i->poll_func = poll_func;
+ loader->i->poll_extra = poll_extra;
+ return 0;
+}
+
+int toku_loader_set_error_callback(DB_LOADER *loader,
+ void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra),
+ void *error_extra)
+{
+ invariant(loader != NULL);
+ loader->i->error_callback = error_cb;
+ loader->i->error_extra = error_extra;
+ return 0;
+}
+
+int toku_loader_put(DB_LOADER *loader, DBT *key, DBT *val)
+{
+ int r = 0;
+ int i = 0;
+ // err_i is unused now( always 0). How would we know which dictionary
+ // the error happens in? (put_multiple and toku_ft_loader_put do NOT report
+ // which dictionary).
+
+ // skip put if error already found
+ if ( loader->i->err_errno != 0 ) {
+ r = -1;
+ goto cleanup;
+ }
+
+ if (loader->i->loader_flags & LOADER_DISALLOW_PUTS) {
+ r = EINVAL;
+ goto cleanup;
+ }
+ else {
+ // calling toku_ft_loader_put without a lock assumes that the
+ // handlerton is guaranteeing single access to the loader
+ // future multi-threaded solutions may need to protect this call
+ r = toku_ft_loader_put(loader->i->ft_loader, key, val);
+ }
+ if ( r != 0 ) {
+ // spec says errors all happen on close
+ // - have to save key, val, errno (r) and i for duplicate callback
+ toku_clone_dbt(&loader->i->err_key, *key);
+ toku_clone_dbt(&loader->i->err_val, *val);
+
+ loader->i->err_i = i;
+ loader->i->err_errno = r;
+
+ // deliberately return content free value
+ // - must call error_callback to get error info
+ r = -1;
+ }
+ cleanup:
+ if (r==0)
+ STATUS_VALUE(LOADER_PUT)++; // executed too often to be worth making threadsafe
+ else
+ STATUS_VALUE(LOADER_PUT_FAIL)++;
+ return r;
+}
+
+static void redirect_loader_to_empty_dictionaries(DB_LOADER *loader) {
+ DB_LOADER* tmp_loader = NULL;
+ int r = toku_loader_create_loader(
+ loader->i->env,
+ loader->i->txn,
+ &tmp_loader,
+ loader->i->src_db,
+ loader->i->N,
+ loader->i->dbs,
+ loader->i->db_flags,
+ loader->i->dbt_flags,
+ LOADER_DISALLOW_PUTS,
+ false
+ );
+ lazy_assert_zero(r);
+ r = toku_loader_close(tmp_loader);
+}
+
+int toku_loader_close(DB_LOADER *loader)
+{
+ (void) toku_sync_fetch_and_sub(&STATUS_VALUE(LOADER_CURRENT), 1);
+ int r=0;
+ if ( loader->i->err_errno != 0 ) {
+ if ( loader->i->error_callback != NULL ) {
+ loader->i->error_callback(loader->i->dbs[loader->i->err_i], loader->i->err_i, loader->i->err_errno, &loader->i->err_key, &loader->i->err_val, loader->i->error_extra);
+ }
+ if (!(loader->i->loader_flags & LOADER_DISALLOW_PUTS ) ) {
+ r = toku_ft_loader_abort(loader->i->ft_loader, true);
+ redirect_loader_to_empty_dictionaries(loader);
+ }
+ else {
+ r = loader->i->err_errno;
+ }
+ }
+ else { // no error outstanding
+ if (!(loader->i->loader_flags & LOADER_DISALLOW_PUTS ) ) {
+ r = ft_loader_close_and_redirect(loader);
+ if (r) {
+ redirect_loader_to_empty_dictionaries(loader);
+ }
+ }
+ }
+ free_loader(loader);
+ if (r==0)
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CLOSE), 1);
+ else
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CLOSE_FAIL), 1);
+ return r;
+}
+
+int toku_loader_abort(DB_LOADER *loader)
+{
+ (void) toku_sync_fetch_and_sub(&STATUS_VALUE(LOADER_CURRENT), 1);
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_ABORT), 1);
+ int r=0;
+ if ( loader->i->err_errno != 0 ) {
+ if ( loader->i->error_callback != NULL ) {
+ loader->i->error_callback(loader->i->dbs[loader->i->err_i], loader->i->err_i, loader->i->err_errno, &loader->i->err_key, &loader->i->err_val, loader->i->error_extra);
+ }
+ }
+
+ if (!(loader->i->loader_flags & LOADER_DISALLOW_PUTS) ) {
+ r = toku_ft_loader_abort(loader->i->ft_loader, true);
+ lazy_assert_zero(r);
+ }
+
+ redirect_loader_to_empty_dictionaries(loader);
+ free_loader(loader);
+ return r;
+}
+
+
+// find all of the files in the environments home directory that match the loader temp name and remove them
+int toku_loader_cleanup_temp_files(DB_ENV *env) {
+ int result;
+ struct dirent *de;
+ char * dir = env->i->real_tmp_dir;
+ DIR *d = opendir(dir);
+ if (d==0) {
+ result = get_error_errno(); goto exit;
+ }
+
+ result = 0;
+ while ((de = readdir(d))) {
+ int r = memcmp(de->d_name, loader_temp_prefix, strlen(loader_temp_prefix));
+ if (r == 0 && strlen(de->d_name) == strlen(loader_temp_prefix) + strlen(loader_temp_suffix)) {
+ int fnamelen = strlen(dir) + 1 + strlen(de->d_name) + 1; // One for the slash and one for the trailing NUL.
+ char fname[fnamelen];
+ int l = snprintf(fname, fnamelen, "%s/%s", dir, de->d_name);
+ assert(l+1 == fnamelen);
+ r = unlink(fname);
+ if (r!=0) {
+ result = get_error_errno();
+ perror("Trying to delete a rolltmp file");
+ }
+ }
+ }
+ {
+ int r = closedir(d);
+ if (r == -1)
+ result = get_error_errno();
+ }
+
+exit:
+ return result;
+}
+
+
+
+#undef STATUS_VALUE
+
diff --git a/storage/tokudb/PerconaFT/src/loader.h b/storage/tokudb/PerconaFT/src/loader.h
new file mode 100644
index 00000000..c5e7a357
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/loader.h
@@ -0,0 +1,156 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/*
+Create and set up a loader.
+ - The loader will operate in environment env, and the load will happen within transaction txn.
+ - You must remember to close (or abort) the loader eventually (otherwise the resulting DBs will
+ not be valid, and you will have a memory leak).
+ - The number of databases to be loaded is N.
+ - The databases must already be open, and their handles are passed in in the array dbs.
+ In particular dbs[i] is the ith database.
+ - The loader will work right whether the DBs are empty or full. However if any of the DBs are not empty,
+ it may not be fast (e.g., the loader may simply perform DB->put() operations).
+ - For each row that is put into the loader, for i over each of the N DBs, generate_row is invoked on the
+ row to generate a secondary row.
+ - The DBTs passed to generate_row() will have the DB_DBT_REALLOC flag set, and the extract
+ function should realloc the memory passed in. The ulen field indicates how large the realloc'd
+ storage is, and if the extract function does perform a realloc it should update the ulen field.
+ - We require that the extract function always return 0.
+ - The generate_row function must be thread safe.
+ - Whenever two rows in dbs[i] need to be compared we use that db's comparison function. The
+ comparison function must be thread safe.
+ - DBs must have been set up with descriptors and comparison functions before calling any extract
+ or compare functions.
+ - loader_flags is used to specify loader specific behavior. For instance, LOADER_USE_PUTS tells the
+ loader to use traditional puts to save disk space while loading (at the cost of performance)
+ - The new loader is returned in *blp.
+
+ Modifies: :: env, txn, blp, and dbs.
+*/
+int toku_loader_create_loader(DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, bool check_empty);
+
+
+/*
+Set a error callback.
+ - If at any point during the load the system notices that an error has occurred, error information is recorded.
+ - The callback function may be called during DB_LOADER->close() or DB_LOADER->abort(), at which time the error
+ information is returned.
+ - A key-val pair for one of the errors is returned along with the db, and the index i indicating which db
+ had the problem.
+ - This function will be called at most once (so even if there are many problems, only one call will be made.)
+ - If a duplicate is discovered, the error is DB_KEYEXIST.
+ - The error_extra passed at the time of set_error_callback is the value passed as the error_extra when an error occurs.
+*/
+int toku_loader_set_error_callback(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra), void *error_extra);
+
+
+/*
+Set the polling function.
+ - During the DB_LOADER->close operation, the poll function is called periodically.
+ - If it ever returns nonzero, then the loader stops as soon as possible.
+ - The poll function is called with the extra passed into the loader create function.
+ - A floating point number is also returned, which ranges from 0.0 to 1.0, indicating progress. Progress of 0.0 means
+ no progress so far. Progress of 0.5 means that the job is about half done. Progress of 1.0 means the job is done.
+ The progress is just an estimate.
+*/
+int toku_loader_set_poll_function(DB_LOADER *loader, int (*poll_func)(void *poll_extra, float progress), void *poll_extra);
+
+
+/*
+Give a row to the loader.
+ - Returns zero if no error, non-zero if error.
+ - When the application sees a non-zero return from put(), it must abort(), which would then call the error callback.
+ - Once put() returns a non-zero value, any loader calls other than abort() are unsupported and will result in undefined behavior.
+*/
+int toku_loader_put(DB_LOADER *loader, DBT *key, DBT *val);
+
+
+/*
+Finish the load,
+ - Take all the rows and put them into dictionaries which are returned as open handlers through the original dbs array.
+ - Frees all the memory allocated by the loader.
+ - You may not use the loader handle again after calling close.
+ - The system will return an DB_KEYEXIST if in any of the resulting databases, there are two different rows with keys
+ that compare to be equal (and the duplicate callback function, if set, is called first).
+ - If the polling function has been set, the loader will periodically call the polling function. If the polling function
+ ever returns a nonzero value, then the loader will return immediately, possibly with the dictionaries in some
+ inconsistent state. (To get them to a consistent state, the enclosing transaction should abort.)
+ - To free the resources used by a loader, either DB_LOADER->close or DB_LOADER->abort must be called. After calling either
+ of those functions, no further loader operations can be performed with that loader.
+ - The DBs remain open after the loader is closed.
+*/
+int toku_loader_close(DB_LOADER *loader);
+
+
+/*
+Abort the load,
+ - Possibly leave none, some, or all of the puts in effect. You may need to abort the enclosing transaction to get
+ back to a sane state.
+ - To free the resources used by a loader, either DB_LOADER->close or DB_LOADER->abort must be called. After calling either
+ of those functions, no further loader operations can be performed with that loader.
+ - The DBs remain open after the loader is aborted.
+ */
+int toku_loader_abort(DB_LOADER *loader);
+
+// Remove any loader temp files that may have been left from a crashed system
+int toku_loader_cleanup_temp_files(DB_ENV *env);
+
+
+typedef enum {
+ LOADER_CREATE = 0, // number of loaders successfully created
+ LOADER_CREATE_FAIL, // number of calls to toku_loader_create_loader() that failed
+ LOADER_PUT, // number of calls to toku_loader_put() that succeeded
+ LOADER_PUT_FAIL, // number of calls to toku_loader_put() that failed
+ LOADER_CLOSE, // number of calls to toku_loader_close()
+ LOADER_CLOSE_FAIL, // number of calls to toku_loader_close() that failed
+ LOADER_ABORT, // number of calls to toku_loader_abort()
+ LOADER_CURRENT, // number of loaders currently in existence
+ LOADER_MAX, // max number of loaders that ever existed simultaneously
+ LOADER_STATUS_NUM_ROWS
+} loader_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[LOADER_STATUS_NUM_ROWS];
+} LOADER_STATUS_S, *LOADER_STATUS;
+
+
+void toku_loader_get_status(LOADER_STATUS s);
diff --git a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
new file mode 100644
index 00000000..c01a8f0d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt
@@ -0,0 +1,493 @@
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO)
+
+if(BUILD_TESTING OR BUILD_SRC_TESTS)
+ function(add_ydb_test bin)
+ add_toku_test(ydb ${bin} ${ARGN})
+ endfunction(add_ydb_test)
+ function(add_ydb_test_aux name bin)
+ add_toku_test_aux(ydb ${name} ${bin} ${ARGN})
+ endfunction(add_ydb_test_aux)
+
+ function(add_ydb_helgrind_test bin)
+ add_helgrind_test(ydb helgrind_${bin} $<TARGET_FILE:${bin}> ${ARGN})
+ endfunction(add_ydb_helgrind_test)
+ function(add_ydb_drd_test_aux name bin)
+ add_drd_test(ydb ${name} $<TARGET_FILE:${bin}> ${ARGN})
+ endfunction(add_ydb_drd_test_aux)
+ function(add_ydb_drd_test bin)
+ add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN})
+ endfunction(add_ydb_drd_test)
+
+ file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc)
+
+ file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
+ list(REMOVE_ITEM srcs ${transparent_upgrade_srcs})
+
+ set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc)
+ file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc)
+ file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc)
+ file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc)
+ string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}")
+ string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}")
+ string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}")
+ string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}")
+
+ set(tdb_srcs ${srcs})
+ string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}")
+ list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs})
+ string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}")
+
+ set(tdb_tests_that_should_fail
+ test_db_no_env.tdb
+ test_log8.recover
+ test_log9.recover
+ test_log10.recover
+ recover-missing-dbfile.abortrecover
+ recover-missing-dbfile-2.abortrecover
+ loader-tpch-load.loader
+ )
+
+ ## #5138 only reproduces when using the static library.
+ list(REMOVE_ITEM tdb_bins test-5138.tdb)
+ add_executable(test-5138.tdb test-5138.cc)
+ target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
+ add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden)
+ add_ydb_test(test-5138.tdb)
+ add_ydb_test(rollback-inconsistency.tdb)
+ foreach(bin ${tdb_bins})
+ get_filename_component(base ${bin} NAME_WE)
+
+ add_executable(${base}.tdb ${base}.cc)
+ # Some of the symbols in util may not be exported properly by
+ # libtokudb.so.
+ # We link the test with util directly so that the test code itself can use
+ # some of those things (i.e. kibbutz in the threaded tests).
+ target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY})
+ add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden)
+ endforeach(bin)
+
+ foreach(bin loader-cleanup-test.tdb diskfull.tdb)
+ set_property(TARGET ${bin} APPEND PROPERTY
+ COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES)
+ endforeach(bin)
+
+ macro(declare_custom_tests)
+ foreach(test ${ARGN})
+ list(REMOVE_ITEM tdb_tests ${test})
+ endforeach(test)
+ endmacro(declare_custom_tests)
+
+ declare_custom_tests(test1426.tdb)
+
+ string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}")
+ declare_custom_tests(${recover_would_be_tdb_tests})
+
+ declare_custom_tests(powerfail.tdb)
+ add_test(ydb/powerfail.tdb echo must run powerfail by hand)
+
+ declare_custom_tests(checkpoint_stress.tdb)
+ configure_file(run_checkpoint_stress_test.sh . COPYONLY)
+ add_test(NAME ydb/checkpoint_stress.tdb
+ COMMAND run_checkpoint_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
+ setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb)
+
+ configure_file(run_recover_stress_test.sh . COPYONLY)
+ add_test(NAME ydb/recover_stress.tdb
+ COMMAND run_recover_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
+ setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb)
+
+ declare_custom_tests(diskfull.tdb)
+ configure_file(run_diskfull_test.sh . COPYONLY)
+ add_test(NAME ydb/diskfull.tdb
+ COMMAND run_diskfull_test.sh $<TARGET_FILE:diskfull.tdb> 134)
+ setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb)
+
+ declare_custom_tests(recovery_fileops_unit.tdb)
+ configure_file(run_recovery_fileops_unit.sh . COPYONLY)
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir")
+ foreach(ov c d r)
+
+ if (ov STREQUAL c)
+ set(hset 0)
+ set(iset 0)
+ else ()
+ set(hset 0 1 2 3 4 5)
+ set(iset 0 1)
+ endif ()
+
+ foreach(av 0 1)
+ foreach(bv 0 1)
+
+ if (bv)
+ set(dset 0 1)
+ set(eset 0 1)
+ else ()
+ set(dset 0)
+ set(eset 0)
+ endif ()
+
+ foreach(cv 0 1 2)
+ foreach(dv ${dset})
+ foreach(ev ${eset})
+ foreach(fv 0 1)
+ foreach(gv 0 1)
+ foreach(hv ${hset})
+ foreach(iv ${iset})
+
+ if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv)))
+ set(jset 0 1)
+ else ()
+ set(jset 0)
+ endif ()
+
+ foreach(jv ${jset})
+ set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}")
+ set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}")
+ set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}.ctest-errors")
+ add_test(NAME ${testname}
+ COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137
+ -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} -J ${jv}
+ )
+ setup_toku_test_properties(${testname} ${envdir})
+ set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}")
+ endforeach(jv)
+ endforeach(iv)
+ endforeach(hv)
+ endforeach(gv)
+ endforeach(fv)
+ endforeach(ev)
+ endforeach(dv)
+ endforeach(cv)
+ endforeach(bv)
+ endforeach(av)
+ endforeach(ov)
+
+ if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR USE_GCOV))
+ declare_custom_tests(helgrind1.tdb)
+ add_test(NAME ydb/helgrind_helgrind1.tdb
+ COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $<TARGET_FILE:helgrind1.tdb>)
+ setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb)
+ set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE)
+ endif()
+ declare_custom_tests(helgrind2.tdb)
+ declare_custom_tests(helgrind3.tdb)
+ add_ydb_helgrind_test(helgrind2.tdb)
+ add_ydb_helgrind_test(helgrind3.tdb)
+
+ declare_custom_tests(test_groupcommit_count.tdb)
+ add_ydb_test(test_groupcommit_count.tdb -n 1)
+ add_ydb_drd_test(test_groupcommit_count.tdb -n 2)
+
+ add_ydb_drd_test(test_4015.tdb)
+
+ # We link the locktree so that stress test 0 can call some
+ # functions (ie: lock escalation) directly.
+ target_link_libraries(test_stress0.tdb locktree)
+
+ # Set up default stress tests and drd tests. Exclude hot_index.
+ foreach(src ${stress_test_srcs})
+ if(NOT ${src} MATCHES hot_index)
+ get_filename_component(base ${src} NAME_WE)
+ set(test ${base}.tdb)
+
+ if (${src} MATCHES test_stress0)
+ add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600)
+ else ()
+ add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600)
+ endif ()
+
+ add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000)
+ set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600)
+
+ add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000)
+ setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test})
+ add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400)
+ set_tests_properties(ydb/drd_mid_${test} PROPERTIES
+ DEPENDS ydb/drd_mid_${test}/prepare
+ REQUIRED_FILES "drd_mid_${test}.ctest-data"
+ TIMEOUT 15000
+ )
+
+ add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000)
+ setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test})
+ add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800)
+ set_tests_properties(ydb/drd_large_${test} PROPERTIES
+ DEPENDS ydb/drd_large_${test}/prepare
+ REQUIRED_FILES "drd_large_${test}.ctest-data"
+ TIMEOUT 30000
+ )
+ endif()
+ endforeach(src)
+
+ # Set up upgrade tests. Exclude test_stress_openclose
+ foreach(src ${stress_test_srcs})
+ if (NOT ${src} MATCHES test_stress_openclose)
+ get_filename_component(base ${src} NAME_WE)
+ set(test ${base}.tdb)
+
+ foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3)
+ set(versiondir ${TOKUDB_DATA}/old-stress-test-envs/${oldver})
+ if (NOT EXISTS "${versiondir}/" AND NOT WARNED_ABOUT_DATA AND CMAKE_PROJECT_NAME STREQUAL TokuDB)
+ message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.")
+ set(WARNED_ABOUT_DATA 1)
+ endif ()
+ foreach(p_or_s pristine stressed)
+ if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed))
+ foreach(size 2000)
+ set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir")
+ set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}")
+ set(envdir "${envdirbase}.ctest-data")
+ set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size})
+
+ add_test(NAME ${testnamebase}/remove
+ COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}")
+ add_test(NAME ${testnamebase}/copy
+ COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}")
+ set_tests_properties(${testnamebase}/copy PROPERTIES
+ DEPENDS ${testnamebase}/remove
+ REQUIRED_FILES "${oldenvdir}")
+
+ add_test(NAME ${testnamebase}
+ COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200)
+ setup_toku_test_properties(${testnamebase} "${envdirbase}")
+ set_tests_properties(${testnamebase} PROPERTIES
+ DEPENDS ${testnamebase}/copy
+ REQUIRED_FILES "${envdir}"
+ TIMEOUT 10800)
+ endforeach(size)
+ endif ()
+ endforeach(p_or_s)
+ endforeach(oldver)
+ endif ()
+ endforeach(src)
+
+ if (NOT EXISTS "${TOKUDB_DATA}/test_5902/" AND NOT WARNED_ABOUT_DATA AND CMAKE_PROJECT_NAME STREQUAL TokuDB)
+ message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKUDB_DATA}/test_5902 first or dump-env.tdb may fail.")
+ set(WARNED_ABOUT_DATA 1)
+ endif ()
+ declare_custom_tests(dump-env.tdb)
+ add_test(NAME ydb/dump-env.tdb/remove
+ COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data")
+ add_test(NAME ydb/dump-env.tdb/copy
+ COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKUDB_DATA}/test_5902" "dump-env.tdb.ctest-data")
+ set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES
+ DEPENDS ydb/dump-env.tdb/remove
+ REQUIRED_FILES "${TOKUDB_DATA}/test_5902")
+ add_ydb_test(dump-env.tdb)
+ set_tests_properties(ydb/dump-env.tdb PROPERTIES
+ DEPENDS ydb/dump-env.tdb/copy
+ REQUIRED_FILES "dump-env.tdb.ctest-data")
+
+ ## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case
+ #declare_custom_tests(test_thread_stack.tdb)
+ #add_custom_command(OUTPUT run_test_thread_stack.sh
+ # COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}"
+ # MAIN_DEPENDENCY run_test_thread_stack.sh
+ # VERBATIM)
+ #add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh)
+ #add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb")
+
+ declare_custom_tests(root_fifo_41.tdb)
+ foreach(num RANGE 1 100)
+ add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate)
+ add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num})
+ endforeach(num)
+
+ add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000)
+
+ declare_custom_tests(test_abort4.tdb)
+ foreach(num RANGE -1 19)
+ add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num})
+ add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num})
+ endforeach(num)
+
+ set(old_loader_upgrade_data "${TOKUDB_DATA}/env_preload.4.2.0.emptydictionaries.cleanshutdown")
+ if (NOT EXISTS "${old_loader_upgrade_data}/" AND NOT WARNED_ABOUT_DATA AND CMAKE_PROJECT_NAME STREQUAL TokuDB)
+ message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.")
+ set(WARNED_ABOUT_DATA 1)
+ endif ()
+ function(add_loader_upgrade_test name bin)
+ add_test(NAME ydb/${name}/remove
+ COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data")
+ add_test(NAME ydb/${name}/copy
+ COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data")
+ set_tests_properties(ydb/${name}/copy PROPERTIES
+ DEPENDS ydb/${name}/remove
+ REQUIRED_FILES "${old_loader_upgrade_data}")
+ add_ydb_test_aux(${name} ${bin} -u ${ARGN})
+ set_tests_properties(ydb/${name} PROPERTIES
+ DEPENDS ydb/${name}/copy
+ REQUIRED_FILES "${name}.ctest-data")
+ endfunction(add_loader_upgrade_test)
+
+ list(REMOVE_ITEM loader_tests loader-stress-test.loader)
+ add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c)
+ add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p)
+ add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s)
+ add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c)
+ add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c)
+ add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z)
+ add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z)
+ add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z)
+ add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z)
+ add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28)
+
+ list(REMOVE_ITEM loader_tests loader-dup-test.loader)
+ add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb)
+ add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000)
+ add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000)
+ add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100)
+ add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000)
+ add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E)
+ add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z)
+ add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z)
+ add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z)
+ add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z)
+ add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z)
+ add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z)
+
+ ## as part of #4503, we took out test 1 and 3
+ list(REMOVE_ITEM loader_tests loader-cleanup-test.loader)
+ add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800)
+ #add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p)
+ add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000)
+ #add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p)
+ add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z)
+ add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z)
+
+ declare_custom_tests(keyrange.tdb)
+ add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0)
+ add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1)
+ add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1)
+ add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1)
+ add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1)
+ add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1)
+
+ declare_custom_tests(maxsize-for-loader.tdb)
+ add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c)
+ add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c)
+ add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c)
+ add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c)
+
+ declare_custom_tests(hotindexer-undo-do-test.tdb)
+ file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test")
+ file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result")
+ configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY)
+ foreach(result ${hotindexer_results})
+ configure_file(${result} ${result} COPYONLY)
+ endforeach(result)
+ foreach(test ${hotindexer_tests})
+ configure_file(${test} ${test} COPYONLY)
+ add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test})
+ setup_toku_test_properties(ydb/${test} ${test})
+ endforeach()
+
+ foreach(test ${tdb_tests})
+ add_ydb_test(${test})
+ endforeach(test)
+
+ configure_file(run_recover_test.sh . COPYONLY)
+ foreach(recover_test ${recover_tests})
+ get_filename_component(base ${recover_test} NAME_WE)
+ add_test(NAME ydb/${recover_test}
+ COMMAND run_recover_test.sh $<TARGET_FILE:${base}.tdb> "${recover_test}.ctest-data" $<TARGET_FILE:tdb-recover> $<TARGET_FILE:tokudb_dump>)
+ setup_toku_test_properties(ydb/${recover_test} ${recover_test})
+ endforeach(recover_test)
+
+ configure_file(run_abortrecover_test.sh . COPYONLY)
+ foreach(abortrecover_test ${abortrecover_tests})
+ get_filename_component(base ${abortrecover_test} NAME_WE)
+ add_test(NAME ydb/${abortrecover_test}
+ COMMAND run_abortrecover_test.sh $<TARGET_FILE:${base}.tdb>)
+ setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test})
+ endforeach(abortrecover_test)
+ ## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected
+ # foreach(abortrecover_test ${abortrecover_tests})
+ # get_filename_component(base ${abortrecover_test} NAME_WE)
+ # set(test ${base}.tdb)
+ # add_test(NAME ydb/${test}/abort
+ # COMMAND ${test} --test)
+ # setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test})
+ # set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE)
+
+ # add_test(NAME ydb/${test}/recover
+ # COMMAND ${test} --recover)
+ # setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test})
+ # set_tests_properties(ydb/${test}/recover PROPERTIES
+ # DEPENDS ydb/${test}/abort
+ # REQUIRED_FILES "${abortrecover_test}.ctest-data")
+ # endforeach(abortrecover_test)
+
+ foreach(loader_test ${loader_tests})
+ get_filename_component(base ${loader_test} NAME_WE)
+ add_ydb_test_aux(${base}.nop.loader ${base}.tdb)
+ add_ydb_test_aux(${base}.p.loader ${base}.tdb -p)
+ add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z)
+ if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader")
+ list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader)
+ list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader)
+ endif()
+ endforeach(loader_test)
+
+ set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}")
+ string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}")
+ set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE)
+
+ ## give some tests, that time out normally, 1 hour to complete
+ set(long_tests
+ ydb/drd_test_groupcommit_count.tdb
+ ydb/env-put-multiple.tdb
+ ydb/filesize.tdb
+ ydb/loader-cleanup-test0.tdb
+ ydb/loader-cleanup-test0z.tdb
+ ydb/manyfiles.tdb
+ ydb/recover-loader-test.abortrecover
+ ydb/recovery_fileops_stress.tdb
+ ydb/root_fifo_1.tdb
+ ydb/root_fifo_2.tdb
+ ydb/root_fifo_31.tdb
+ ydb/root_fifo_32.tdb
+ ydb/shutdown-3344.tdb
+ ydb/stat64-create-modify-times.tdb
+ ydb/test1572.tdb
+ ydb/test_abort4_19_0.tdb
+ ydb/test_abort4_19_1.tdb
+ ydb/test_abort5.tdb
+ ydb/test_archive1.tdb
+ ydb/test_logmax.tdb
+ ydb/test_query.tdb
+ ydb/test_txn_abort5.tdb
+ ydb/test_txn_abort5a.tdb
+ ydb/test_txn_abort6.tdb
+ ydb/test_txn_nested2.tdb
+ ydb/test_txn_nested4.tdb
+ ydb/test_txn_nested5.tdb
+ ydb/test_update_broadcast_stress.tdb
+ )
+ set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
+ ## some take even longer, with valgrind
+ set(extra_long_tests
+ ydb/drd_test_4015.tdb
+ ydb/hotindexer-with-queries.tdb
+ ydb/hot-optimize-table-tests.tdb
+ ydb/loader-cleanup-test2.tdb
+ ydb/loader-cleanup-test2z.tdb
+ ydb/loader-dup-test0.tdb
+ ydb/loader-stress-del.nop.loader
+ ydb/loader-stress-del.p.loader
+ ydb/loader-stress-del.comp.loader
+ ydb/test3039.tdb
+ ydb/test_update_stress.tdb
+ )
+ set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
+ ## these really take a long time with valgrind
+ set(phenomenally_long_tests
+ ydb/checkpoint_stress.tdb
+ ydb/loader-stress-test4.tdb
+ ydb/loader-stress-test4z.tdb
+ ydb/recover_stress.tdb
+ ydb/test3529.tdb
+ ydb/test_insert_unique.tdb
+ )
+ set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
+endif(BUILD_TESTING OR BUILD_SRC_TESTS)
diff --git a/storage/tokudb/PerconaFT/src/tests/big-nested-abort-abort.cc b/storage/tokudb/PerconaFT/src/tests/big-nested-abort-abort.cc
new file mode 100644
index 00000000..796ce7dd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/big-nested-abort-abort.cc
@@ -0,0 +1,151 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test to see if a big nested transaction (so big that it's rollbacks spill into a file)
+ * can commit properly.
+ * Four Tests:
+ * big child aborts, parent aborts (This test)
+ * big child aborts, parent commits
+ * big child commits, parent aborts
+ * big child commits, parent commits
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+int N = 50000;
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *xchild, *xparent;
+
+static void insert (int i, int j) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", j);
+ int r = db->put(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void lookup (int i, int expect, int expectj) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", expectj);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+static void
+test_abort_abort (void) {
+ int i, r;
+ assert(N%2==0); // this test won't work if N is too small
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N/2; i++) {
+ insert(i*2,i*4+1);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &xparent, 0); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ insert(i, i);
+ }
+ r=xchild->abort(xchild); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, (i%2==0)?0:DB_NOTFOUND, i*2+1);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=xparent->abort(xparent); CKERR(r);
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, (i%2==0)?0:DB_NOTFOUND, i*2+1);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+}
+
+static void
+setup (void) {
+ DB_TXN *txn;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+
+ r=env->set_redzone(env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ test_abort_abort();
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/big-nested-abort-commit.cc b/storage/tokudb/PerconaFT/src/tests/big-nested-abort-commit.cc
new file mode 100644
index 00000000..c3ebb272
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/big-nested-abort-commit.cc
@@ -0,0 +1,149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test to see if a big nested transaction (so big that it's rollbacks spill into a file)
+ * can commit properly.
+ * Four Tests:
+ * big child aborts, parent aborts
+ * big child aborts, parent commits (This test)
+ * big child commits, parent aborts
+ * big child commits, parent commits
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+int N = 50000;
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *xchild, *xparent;
+
+static void insert (int i, int j) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", j);
+ int r = db->put(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void lookup (int i, int expect, int expectj) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", expectj);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+static void
+test_abort_commit (void) {
+ int i, r;
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N/2; i++) {
+ insert(i*2,i*4+1);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &xparent, 0); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ insert(i, i);
+ }
+ r=xchild->abort(xchild); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, (i%2==0)?0:DB_NOTFOUND, i*2+1);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=xparent->commit(xparent, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, (i%2==0)?0:DB_NOTFOUND, i*2+1);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+}
+
+static void
+setup (void) {
+ DB_TXN *txn;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ test_abort_commit();
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/big-nested-commit-abort.cc b/storage/tokudb/PerconaFT/src/tests/big-nested-commit-abort.cc
new file mode 100644
index 00000000..55ac00f1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/big-nested-commit-abort.cc
@@ -0,0 +1,144 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test to see if a big nested transaction (so big that it's rollbacks spill into a file)
+ * can commit properly.
+ * Four Tests:
+ * big child aborts, parent aborts
+ * big child aborts, parent commits
+ * big child commits, parent aborts (This test)
+ * big child commits, parent commits
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+int N = 50000;
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *xchild, *xparent;
+
+static void insert (int i) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ int r = db->put(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void lookup (int i, int expect, int expectj) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", expectj);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+static void
+test_commit_abort (void) {
+ int i, r;
+ r=env->txn_begin(env, 0, &xparent, 0); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ insert(i);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, 0, i);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=xparent->abort(xparent); CKERR(r);
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, DB_NOTFOUND, 0);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+}
+
+static void
+setup (void) {
+ DB_TXN *txn;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ test_commit_abort();
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/big-nested-commit-commit.cc b/storage/tokudb/PerconaFT/src/tests/big-nested-commit-commit.cc
new file mode 100644
index 00000000..414ad3ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/big-nested-commit-commit.cc
@@ -0,0 +1,145 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test to see if a big nested transaction (so big that it's rollbacks spill into a file)
+ * can commit properly.
+ * Four Tests:
+ * big child aborts, parent aborts
+ * big child aborts, parent commits
+ * big child commits, parent aborts
+ * big child commits, parent commits (This test)
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *xchild, *xparent;
+
+static void insert (int i) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ int r = db->put(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void lookup (int i, int expect, int expectj) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", expectj);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+int N = 50000;
+
+static void
+test_commit_commit (void) {
+ int i, r;
+ r=env->txn_begin(env, 0, &xparent, 0); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ insert(i);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, 0, i);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ r=xparent->commit(xparent, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, 0, i);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+}
+
+static void
+setup (void) {
+ DB_TXN *txn;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->set_redzone(env, 0); CKERR(r);
+
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ test_commit_commit();
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/big-shutdown.cc b/storage/tokudb/PerconaFT/src/tests/big-shutdown.cc
new file mode 100644
index 00000000..9fb84816
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/big-shutdown.cc
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Create a lot of dirty nodes, kick off a checkpoint, and close the environment.
+// Measure the time it takes to close the environment since we are speeding up that
+// function.
+
+#include "test.h"
+#include <toku_time.h>
+
+// Insert max_rows key/val pairs into the db
+static void do_inserts(DB_ENV *env, DB *db, uint64_t max_rows, size_t val_size) {
+ char val_data[val_size]; memset(val_data, 0, val_size);
+ int r;
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ for (uint64_t i = 1; i <= max_rows; i++) {
+ // pick a sequential key but it does not matter for this test.
+ uint64_t k[2] = {
+ htonl(i), random64(),
+ };
+ DBT key = { .data = k, .size = sizeof k };
+ DBT val = { .data = val_data, .size = (uint32_t) val_size };
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+
+ if ((i % 1000) == 0) {
+ if (verbose)
+ fprintf(stderr, "put %" PRIu64 "\n", i);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+ }
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+}
+
+// Create a cache with a lot of dirty nodes, kick off a checkpoint, and measure the time to
+// close the environment.
+static void big_shutdown(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_cachesize(env, 8, 0, 1);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ do_inserts(env, db, 1000000, 1024);
+
+ // kick the checkpoint thread
+ if (verbose)
+ fprintf(stderr, "env->checkpointing_set_period\n");
+ r = env->checkpointing_set_period(env, 2);
+ CKERR(r);
+ sleep(3);
+
+ if (verbose)
+ fprintf(stderr, "db->close\n");
+ r = db->close(db, 0);
+ CKERR(r);
+
+ // measure the shutdown time
+ uint64_t tstart = toku_current_time_microsec();
+ if (verbose)
+ fprintf(stderr, "env->close\n");
+ r = env->close(env, 0);
+ CKERR(r);
+ uint64_t tend = toku_current_time_microsec();
+ if (verbose)
+ fprintf(stderr, "env->close complete %" PRIu64 " sec\n", (tend - tstart)/1000000);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ big_shutdown();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/bigtxn27.cc b/storage/tokudb/PerconaFT/src/tests/bigtxn27.cc
new file mode 100644
index 00000000..7a003fd2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/bigtxn27.cc
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <pthread.h>
+
+// verify that a commit of a big txn does not block the commits of other txn's
+// commit writer (0) happens before bigtxn commit (1) happens before checkpoint (2)
+static int test_state = 0;
+
+static void *checkpoint_thread(void *arg) {
+ sleep(1);
+ DB_ENV *env = (DB_ENV *) arg;
+ printf("%s start\n", __FUNCTION__);
+ int r = env->txn_checkpoint(env, 0, 0, 0);
+ assert(r == 0);
+ printf("%s done\n", __FUNCTION__);
+ int old_state = toku_sync_fetch_and_add(&test_state, 1);
+ assert(old_state == 2);
+ return arg;
+}
+
+struct writer_arg {
+ DB_ENV *env;
+ DB *db;
+ int k;
+};
+
+static void *w_thread(void *arg) {
+ sleep(2);
+ struct writer_arg *warg = (struct writer_arg *) arg;
+ DB_ENV *env = warg->env;
+ DB *db = warg->db;
+ int k = warg->k;
+ printf("%s start\n", __FUNCTION__);
+ int r;
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ assert(r == 0);
+ if (1) {
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &k, .size = sizeof k };
+ r = db->put(db, txn, &key, &val, 0);
+ assert(r == 0);
+ }
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+ printf("%s done\n", __FUNCTION__);
+ int old_state = toku_sync_fetch_and_add(&test_state, 1);
+ assert(old_state == 0);
+ return arg;
+}
+
+static void bigtxn_progress(TOKU_TXN_PROGRESS progress, void *extra) {
+ printf("%s %" PRIu64 " %" PRIu64 " %p\n", __FUNCTION__, progress->entries_processed, progress->entries_total, extra);
+ sleep(1);
+}
+
+int test_main (int argc, char *const argv[]) {
+ int r;
+ int N = 25000;
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "--N") == 0 && i+1 < argc) {
+ N = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ // avoid locktree escalation by picking a big enough lock tree
+ r = env->set_lk_max_memory(env, 128*1024*1024);
+ assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+
+ r = db->open(db, NULL, "testit", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *bigtxn = NULL;
+ r = env->txn_begin(env, NULL, &bigtxn, 0);
+ assert(r == 0);
+
+ // use a big key so that the rollback log spills
+ char k[1024]; memset(k, 0, sizeof k);
+ char v[8]; memset(v, 0, sizeof v);
+
+ for (int i = 0; i < N; i++) {
+ memcpy(k, &i, sizeof i);
+ memcpy(v, &i, sizeof i);
+ DBT key = { .data = k, .size = sizeof k };
+ DBT val = { .data = v, .size = sizeof v };
+ r = db->put(db, bigtxn, &key, &val, 0);
+ assert(r == 0);
+ if ((i % 10000) == 0)
+ printf("put %d\n", i);
+ }
+
+ pthread_t checkpoint_tid = 0;
+ r = pthread_create(&checkpoint_tid, NULL, checkpoint_thread, env);
+ assert(r == 0);
+
+ pthread_t w_tid = 0;
+ struct writer_arg w_arg = { env, db, N };
+ r = pthread_create(&w_tid, NULL, w_thread, &w_arg);
+ assert(r == 0);
+
+ r = bigtxn->commit_with_progress(bigtxn, 0, bigtxn_progress, NULL);
+ assert(r == 0);
+ int old_state = toku_sync_fetch_and_add(&test_state, 1);
+ assert(old_state == 1);
+
+ void *ret;
+ r = pthread_join(w_tid, &ret);
+ assert(r == 0);
+ r = pthread_join(checkpoint_tid, &ret);
+ assert(r == 0);
+
+ r = db->close(db, 0);
+ assert(r == 0);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blackhole.cc b/storage/tokudb/PerconaFT/src/tests/blackhole.cc
new file mode 100644
index 00000000..1bb11af0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blackhole.cc
@@ -0,0 +1,129 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that a db ignores insert messages in blackhole mode
+
+#include "test.h"
+#include <util/dbt.h>
+
+static DB *db;
+static DB *blackhole_db;
+static DB_ENV *env;
+
+static int num_inserts = 10000;
+
+static void fill_dbt(DBT *dbt, void *data, size_t size) {
+ dbt->data = data;
+ dbt->size = dbt->ulen = size;
+ dbt->flags = DB_DBT_USERMEM;
+}
+
+static void setup (bool use_txns) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, 0, 0);
+ int txnflags = use_txns ? (DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN) : 0;
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_PRIVATE|txnflags, 0777);
+
+ // create a regular db and a blackhole db
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db_create(&blackhole_db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "test.db", 0, DB_BTREE,
+ DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = blackhole_db->open(blackhole_db, NULL, "blackhole.db", 0, DB_BTREE,
+ DB_CREATE | DB_BLACKHOLE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+}
+
+static void cleanup (void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = blackhole_db->close(blackhole_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void test_blackhole(void) {
+ int r = 0;
+
+ for (int i = 0; i < num_inserts; i++) {
+ int k = random();
+ int v = k + 100;
+ DBT key, value;
+ fill_dbt(&key, &k, sizeof k);
+ fill_dbt(&value, &v, sizeof v);
+
+ // put a random key into the regular db.
+ r = db->put(db, NULL, &key, &value, 0);
+ assert(r == 0);
+
+ // put that key into the blackhole db.
+ r = blackhole_db->put(blackhole_db, NULL, &key, &value, 0);
+ assert(r == 0);
+
+ // we should be able to find this key in the regular db
+ int get_v;
+ DBT get_value;
+ fill_dbt(&get_value, &get_v, sizeof get_v);
+ r = db->get(db, NULL, &key, &get_value, 0);
+ assert(r == 0);
+ assert(*(int *)get_value.data == v);
+ assert(get_value.size == sizeof v);
+
+ // we shouldn't be able to get it back from the blackhole
+ r = blackhole_db->get(blackhole_db, NULL, &key, &get_value, 0);
+ assert(r == DB_NOTFOUND);
+ }
+}
+
+int test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ // without txns
+ setup(false);
+ test_blackhole();
+ cleanup();
+
+ // with txns
+ setup(true);
+ test_blackhole();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-first-empty.cc b/storage/tokudb/PerconaFT/src/tests/blocking-first-empty.cc
new file mode 100644
index 00000000..3b53911e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-first-empty.cc
@@ -0,0 +1,183 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor first on an empty tree with a write lock suspends the conflicting threads.
+
+#include "test.h"
+#include <toku_pthread.h>
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_first_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_first(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0); // get a write lock on -inf +inf
+ r = cursor->c_getf_first(cursor, DB_RMW, blocking_first_callback, &context); assert(r == DB_NOTFOUND);
+
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_first_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_first_thread(void *arg) {
+ struct blocking_first_args *a = (struct blocking_first_args *) arg;
+ blocking_first(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_first_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_first_thread, &a);
+ assert(r == 0);
+ }
+ blocking_first(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-first.cc b/storage/tokudb/PerconaFT/src/tests/blocking-first.cc
new file mode 100644
index 00000000..c104eabd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-first.cc
@@ -0,0 +1,203 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor first with a write lock suspends the conflicting threads.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_first_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_first(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0); // get a write lock on -inf ... 0
+ r = cursor->c_getf_first(cursor, DB_RMW, blocking_first_callback, &context); assert(r == 0);
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_first_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_first_thread(void *arg) {
+ struct blocking_first_args *a = (struct blocking_first_args *) arg;
+ blocking_first(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_first_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_first_thread, &a);
+ assert(r == 0);
+ }
+ blocking_first(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-last.cc b/storage/tokudb/PerconaFT/src/tests/blocking-last.cc
new file mode 100644
index 00000000..3a702e3f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-last.cc
@@ -0,0 +1,203 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor last operations with conflicting locks suspend the calling threads.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_last_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_last(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0); // get a write lock on -inf ... 0
+ r = cursor->c_getf_last(cursor, DB_RMW, blocking_last_callback, &context); assert(r == 0);
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_last_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_last_thread(void *arg) {
+ struct blocking_last_args *a = (struct blocking_last_args *) arg;
+ blocking_last(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_last_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_last_thread, &a);
+ assert(r == 0);
+ }
+ blocking_last(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-next-prev-deadlock.cc b/storage/tokudb/PerconaFT/src/tests/blocking-next-prev-deadlock.cc
new file mode 100644
index 00000000..781708cc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-next-prev-deadlock.cc
@@ -0,0 +1,268 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that two transactions doing cursor next and prev operations detect a deadlock when
+// using write locking cursor operations.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static uint64_t get_key(DBT *key) {
+ uint64_t k = 0;
+ assert(key->size == sizeof k);
+ memcpy(&k, key->data, key->size);
+ return htonl(k);
+}
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_next_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_next(DB_ENV *db_env, DB *db, uint64_t nrows UU(), long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+
+ uint64_t i;
+ for (i = 0; ; i++) {
+ r = cursor->c_getf_next(cursor, DB_RMW, blocking_next_callback, &context);
+ if (r != 0)
+ break;
+ if (verbose)
+ printf("%lu next %" PRIu64 "\n", (unsigned long) toku_pthread_self(), get_key(&context.key));
+ usleep(sleeptime);
+ }
+
+ if (verbose)
+ printf("%lu next=%d\n", (unsigned long) toku_pthread_self(), r);
+ assert(r == DB_NOTFOUND || r == DB_LOCK_DEADLOCK);
+
+ int rr = cursor->c_close(cursor); assert(rr == 0);
+
+ if (r == DB_NOTFOUND) {
+ if (verbose) printf("%lu commit\n", (unsigned long) toku_pthread_self());
+ r = txn->commit(txn, 0);
+ } else {
+ if (verbose) printf("%lu abort\n", (unsigned long) toku_pthread_self());
+ r = txn->abort(txn);
+ }
+ assert(r == 0);
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+static void blocking_prev(DB_ENV *db_env, DB *db, uint64_t nrows UU(), long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+
+ uint64_t i;
+ for (i = 0; ; i++) {
+ r = cursor->c_getf_prev(cursor, DB_RMW, blocking_next_callback, &context);
+ if (r != 0)
+ break;
+ if (verbose)
+ printf("%lu prev %" PRIu64 "\n", (unsigned long) toku_pthread_self(), get_key(&context.key));
+ usleep(sleeptime);
+ }
+
+ if (verbose)
+ printf("%lu prev=%d\n", (unsigned long) toku_pthread_self(), r);
+ assert(r == DB_NOTFOUND || r == DB_LOCK_DEADLOCK);
+
+ int rr = cursor->c_close(cursor); assert(rr == 0);
+
+ if (r == DB_NOTFOUND) {
+ if (verbose) printf("%lu commit\n", (unsigned long) toku_pthread_self());
+ r = txn->commit(txn, 0);
+ } else {
+ if (verbose) printf("%lu abort\n", (unsigned long) toku_pthread_self());
+ r = txn->abort(txn);
+ }
+ assert(r == 0);
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_next_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_next_thread(void *arg) {
+ struct blocking_next_args *a = (struct blocking_next_args *) arg;
+ blocking_next(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_next_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_next_thread, &a);
+ assert(r == 0);
+ }
+ blocking_prev(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-next-prev.cc b/storage/tokudb/PerconaFT/src/tests/blocking-next-prev.cc
new file mode 100644
index 00000000..aa179e88
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-next-prev.cc
@@ -0,0 +1,274 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that two transactions doing cursor next and prev operations on a tree do not conflict.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static uint64_t get_key(DBT *key) {
+ uint64_t k = 0;
+ assert(key->size == sizeof k);
+ memcpy(&k, key->data, key->size);
+ return htonl(k);
+}
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_next_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_next(DB_ENV *db_env, DB *db, uint64_t nrows UU(), long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+
+ uint64_t i;
+ for (i = 0; ; i++) {
+ r = cursor->c_getf_next(cursor, 0, blocking_next_callback, &context);
+ if (r != 0)
+ break;
+ if (verbose)
+ printf("%lu next %" PRIu64 "\n", (unsigned long) toku_pthread_self(), get_key(&context.key));
+ usleep(sleeptime);
+ }
+
+ if (verbose)
+ printf("%lu next=%d\n", (unsigned long) toku_pthread_self(), r);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ assert(r == DB_NOTFOUND || r == DB_LOCK_DEADLOCK || r == DB_LOCK_NOTGRANTED);
+#else
+ assert(r == DB_NOTFOUND);
+#endif
+
+ int rr = cursor->c_close(cursor); assert(rr == 0);
+
+ if (r == DB_NOTFOUND) {
+ if (verbose) printf("%lu commit\n", (unsigned long) toku_pthread_self());
+ r = txn->commit(txn, 0);
+ } else {
+ if (verbose) printf("%lu abort\n", (unsigned long) toku_pthread_self());
+ r = txn->abort(txn);
+ }
+ assert(r == 0);
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+static void blocking_prev(DB_ENV *db_env, DB *db, uint64_t nrows UU(), long sleeptime) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+
+ uint64_t i;
+ for (i = 0; ; i++) {
+ r = cursor->c_getf_prev(cursor, 0, blocking_next_callback, &context);
+ if (r != 0)
+ break;
+ if (verbose)
+ printf("%lu prev %" PRIu64 "\n", (unsigned long) toku_pthread_self(), get_key(&context.key));
+ usleep(sleeptime);
+ }
+
+ if (verbose)
+ printf("%lu prev=%d\n", (unsigned long) toku_pthread_self(), r);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ assert(r == DB_NOTFOUND || r == DB_LOCK_DEADLOCK || r == DB_LOCK_NOTGRANTED);
+#else
+ assert(r == DB_NOTFOUND);
+#endif
+
+ int rr = cursor->c_close(cursor); assert(rr == 0);
+
+ if (r == DB_NOTFOUND) {
+ if (verbose) printf("%lu commit\n", (unsigned long) toku_pthread_self());
+ r = txn->commit(txn, 0);
+ } else {
+ if (verbose) printf("%lu abort\n", (unsigned long) toku_pthread_self());
+ r = txn->abort(txn);
+ }
+ assert(r == 0);
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_next_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_next_thread(void *arg) {
+ struct blocking_next_args *a = (struct blocking_next_args *) arg;
+ blocking_next(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_next_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_next_thread, &a);
+ assert(r == 0);
+ }
+ blocking_prev(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-prelock-range.cc b/storage/tokudb/PerconaFT/src/tests/blocking-prelock-range.cc
new file mode 100644
index 00000000..38875a2f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-prelock-range.cc
@@ -0,0 +1,163 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that conflicting range locks works suspend the conflicting threads.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void blocking_range_lock(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, DB_RMW); assert(r == 0);
+
+ uint64_t k = 0;
+ DBT key = { .data = &k, .size = sizeof k};
+ r = cursor->c_set_bounds(cursor, &key, &key, true, 0); assert(r == 0);
+
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+}
+
+struct blocking_range_lock_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_range_lock_thread(void *arg) {
+ struct blocking_range_lock_args *a = (struct blocking_range_lock_args *) arg;
+ blocking_range_lock(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 100;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ toku_pthread_t tids[nthreads];
+ struct blocking_range_lock_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(toku_uninstrumented,
+ &tids[i],
+ nullptr,
+ blocking_range_lock_thread,
+ &a);
+ assert(r == 0);
+ }
+ blocking_range_lock(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-put-timeout.cc b/storage/tokudb/PerconaFT/src/tests/blocking-put-timeout.cc
new file mode 100644
index 00000000..bf4a8a24
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-put-timeout.cc
@@ -0,0 +1,194 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that blocking lock waits eventually time out if the lock owner never releases the lock.
+
+// A begin txn
+// A write locks 0
+// A sleeps
+// B begin txn
+// B tries to write lock 0, blocks
+// B's write lock times out, B aborts its txn
+// A wakes up and commits its txn
+
+#include "test.h"
+#include "toku_pthread.h"
+
+struct test_seq {
+ int state;
+ toku_mutex_t lock;
+ toku_cond_t cv;
+};
+
+static void test_seq_init(struct test_seq *seq) {
+ seq->state = 0;
+ toku_mutex_init(toku_uninstrumented, &seq->lock, nullptr);
+ toku_cond_init(toku_uninstrumented, &seq->cv, nullptr);
+}
+
+static void test_seq_destroy(struct test_seq *seq) {
+ toku_mutex_destroy(&seq->lock);
+ toku_cond_destroy(&seq->cv);
+}
+
+static void test_seq_sleep(struct test_seq *seq, int new_state) {
+ toku_mutex_lock(&seq->lock);
+ while (seq->state != new_state) {
+ toku_cond_wait(&seq->cv, &seq->lock);
+ }
+ toku_mutex_unlock(&seq->lock);
+}
+
+static void test_seq_next_state(struct test_seq *seq) {
+ toku_mutex_lock(&seq->lock);
+ seq->state++;
+ toku_cond_broadcast(&seq->cv);
+ toku_mutex_unlock(&seq->lock);
+}
+
+static void t_a(DB_ENV *db_env, DB *db, struct test_seq *seq) {
+ int r;
+ test_seq_sleep(seq, 0);
+ int k = 0;
+ DB_TXN *txn_a = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn_a, 0); assert(r == 0);
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &k, .size = sizeof k };
+ r = db->put(db, txn_a, &key, &val, 0); assert(r == 0);
+ test_seq_next_state(seq);
+ sleep(10);
+ r = txn_a->commit(txn_a, 0); assert(r == 0);
+}
+
+static void t_b(DB_ENV *db_env, DB *db, struct test_seq *seq) {
+ int r;
+ test_seq_sleep(seq, 1);
+ int k = 0;
+ DB_TXN *txn_b = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn_b, 0); assert(r == 0);
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &k, .size = sizeof k };
+ r = db->put(db, txn_b, &key, &val, 0);
+ assert(r == DB_LOCK_NOTGRANTED);
+ r = txn_b->abort(txn_b); assert(r == 0);
+}
+
+struct t_a_args {
+ DB_ENV *env;
+ DB *db;
+ struct test_seq *seq;
+};
+
+static void *t_a_thread(void *arg) {
+ struct t_a_args *a = (struct t_a_args *) arg;
+ t_a(a->env, a->db, a->seq);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ uint64_t lock_timeout_msec;
+ r = db_env->get_lock_timeout(db_env, &lock_timeout_msec); assert(r == 0);
+ if (verbose) printf("lock timeout: %" PRIu64 "\n", lock_timeout_msec);
+ r = db_env->set_lock_timeout(db_env, 5000, nullptr); assert(r == 0);
+ r = db_env->get_lock_timeout(db_env, &lock_timeout_msec); assert(r == 0);
+ if (verbose) printf("lock timeout: %" PRIu64 "\n", lock_timeout_msec);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // run test
+ struct test_seq seq;
+ ZERO_STRUCT(seq);
+ test_seq_init(&seq);
+ toku_pthread_t t_a_id;
+ struct t_a_args t_a_args = {db_env, db, &seq};
+ r = toku_pthread_create(
+ toku_uninstrumented, &t_a_id, nullptr, t_a_thread, &t_a_args);
+ assert(r == 0);
+ t_b(db_env, db, &seq);
+ void *ret;
+ r = toku_pthread_join(t_a_id, &ret);
+ assert(r == 0);
+ test_seq_destroy(&seq);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-put-wakeup.cc b/storage/tokudb/PerconaFT/src/tests/blocking-put-wakeup.cc
new file mode 100644
index 00000000..dba7962d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-put-wakeup.cc
@@ -0,0 +1,189 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that a blocking lock gets granted when the owning transaction commits.
+
+// A begin txn
+// A write locks 0
+// A sleeps
+// B begin txn
+// B tries to write lock 0, blocks
+// A wakes up and commits its txn
+// B's write lock is granted, B's thread resumes,
+// B commits its txn
+
+#include "test.h"
+#include "toku_pthread.h"
+
+struct test_seq {
+ int state;
+ toku_mutex_t lock;
+ toku_cond_t cv;
+};
+
+static void test_seq_init(struct test_seq *seq) {
+ seq->state = 0;
+ toku_mutex_init(toku_uninstrumented, &seq->lock, nullptr);
+ toku_cond_init(toku_uninstrumented, &seq->cv, nullptr);
+}
+
+static void test_seq_destroy(struct test_seq *seq) {
+ toku_mutex_destroy(&seq->lock);
+ toku_cond_destroy(&seq->cv);
+}
+
+static void test_seq_sleep(struct test_seq *seq, int new_state) {
+ toku_mutex_lock(&seq->lock);
+ while (seq->state != new_state) {
+ toku_cond_wait(&seq->cv, &seq->lock);
+ }
+ toku_mutex_unlock(&seq->lock);
+}
+
+static void test_seq_next_state(struct test_seq *seq) {
+ toku_mutex_lock(&seq->lock);
+ seq->state++;
+ toku_cond_broadcast(&seq->cv);
+ toku_mutex_unlock(&seq->lock);
+}
+
+static void t_a(DB_ENV *db_env, DB *db, struct test_seq *seq) {
+ int r;
+ test_seq_sleep(seq, 0);
+ int k = 0;
+ DB_TXN *txn_a = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn_a, 0); assert(r == 0);
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &k, .size = sizeof k };
+ r = db->put(db, txn_a, &key, &val, 0); assert(r == 0);
+ test_seq_next_state(seq);
+ sleep(10);
+ r = txn_a->commit(txn_a, 0); assert(r == 0);
+}
+
+static void t_b(DB_ENV *db_env, DB *db, struct test_seq *seq) {
+ int r;
+ test_seq_sleep(seq, 1);
+ int k = 0;
+ DB_TXN *txn_b = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn_b, 0); assert(r == 0);
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &k, .size = sizeof k };
+ r = db->put(db, txn_b, &key, &val, 0); assert(r == 0);
+ r = txn_b->commit(txn_b, 0); assert(r == 0);
+}
+
+struct t_a_args {
+ DB_ENV *env;
+ DB *db;
+ struct test_seq *seq;
+};
+
+static void *t_a_thread(void *arg) {
+ struct t_a_args *a = (struct t_a_args *) arg;
+ t_a(a->env, a->db, a->seq);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // run test
+ struct test_seq seq;
+ ZERO_STRUCT(seq);
+ test_seq_init(&seq);
+ toku_pthread_t t_a_id;
+ struct t_a_args t_a_args = {db_env, db, &seq};
+ r = toku_pthread_create(
+ toku_uninstrumented, &t_a_id, nullptr, t_a_thread, &t_a_args);
+ assert(r == 0);
+ t_b(db_env, db, &seq);
+ void *ret;
+ r = toku_pthread_join(t_a_id, &ret);
+ assert(r == 0);
+ test_seq_destroy(&seq);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-put.cc b/storage/tokudb/PerconaFT/src/tests/blocking-put.cc
new file mode 100644
index 00000000..95481062
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-put.cc
@@ -0,0 +1,159 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// have multiple threads try to put key 0 into the same db. one thread should gain a write lock on the key.
+// the other threads should block until the thread that owns the lock commits its transaction. then, one
+// of the blocked transactions should gain the lock and its owning thread resumed.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+// pound on key == 0 and hold the write lock for a time less than the lock timeout
+static void blocking_put(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ uint64_t k = 0;
+ DBT key = { .data = &k, .size = sizeof k};
+ DBT val = { .data = &k, .size = sizeof k};
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+
+ usleep(sleeptime);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+}
+
+struct blocking_put_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_put_thread(void *arg) {
+ struct blocking_put_args *a = (struct blocking_put_args *) arg;
+ blocking_put(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 100;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ toku_pthread_t tids[nthreads];
+ struct blocking_put_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_put_thread, &a);
+ assert(r == 0);
+ }
+ blocking_put(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-set-range-0.cc b/storage/tokudb/PerconaFT/src/tests/blocking-set-range-0.cc
new file mode 100644
index 00000000..7c2e7b0f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-set-range-0.cc
@@ -0,0 +1,216 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor set range operations suspend the conflicting threads when another transaction
+// owns a lock on the key. the test uses keys 0, nrows/2, and nrows-1.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_set_range_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_set_range(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime, uint64_t the_key) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0); // get a write lock on the key
+
+ uint64_t k = htonl(the_key);
+ DBT key = { .data = &k, .size = sizeof k };
+ r = cursor->c_getf_set_range(cursor, DB_RMW, &key, blocking_set_range_callback, &context); assert(r == 0);
+ uint64_t v;
+ assert(context.val.size == sizeof v);
+ memcpy(&v, context.val.data, context.val.size);
+ assert(v == the_key); // verify the value
+
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_set_range_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+ uint64_t the_key;
+};
+
+static void *blocking_set_range_thread(void *arg) {
+ struct blocking_set_range_args *a = (struct blocking_set_range_args *) arg;
+ blocking_set_range(a->db_env, a->db, a->nrows, a->sleeptime, a->the_key);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime, uint64_t the_key) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_set_range_args a = {db_env, db, nrows, sleeptime, the_key};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr,
+ blocking_set_range_thread, &a);
+ assert(r == 0);
+ }
+ blocking_set_range(db_env, db, nrows, sleeptime, the_key);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime, 0);
+ run_test(db_env, db, nthreads, nrows, sleeptime, nrows/2);
+ run_test(db_env, db, nthreads, nrows, sleeptime, nrows-1);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-set-range-n.cc b/storage/tokudb/PerconaFT/src/tests/blocking-set-range-n.cc
new file mode 100644
index 00000000..54a5846e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-set-range-n.cc
@@ -0,0 +1,209 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor set range operations suspend the conflicting threads when another transaction
+// owns a lock on the key. the key is at the right edge of the key space.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_set_range_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_set_range(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime, uint64_t the_key) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0); // get a write lock on the key
+
+ uint64_t k = htonl(the_key);
+ DBT key = { .data = &k, .size = sizeof k };
+ r = cursor->c_getf_set_range(cursor, DB_RMW, &key, blocking_set_range_callback, &context); assert(r == DB_NOTFOUND);
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_set_range_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+ uint64_t the_key;
+};
+
+static void *blocking_set_range_thread(void *arg) {
+ struct blocking_set_range_args *a = (struct blocking_set_range_args *) arg;
+ blocking_set_range(a->db_env, a->db, a->nrows, a->sleeptime, a->the_key);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime, uint64_t the_key) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_set_range_args a = {db_env, db, nrows, sleeptime, the_key};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr,
+ blocking_set_range_thread, &a);
+ assert(r == 0);
+ }
+ blocking_set_range(db_env, db, nrows, sleeptime, the_key);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime, nrows);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-set-range-reverse-0.cc b/storage/tokudb/PerconaFT/src/tests/blocking-set-range-reverse-0.cc
new file mode 100644
index 00000000..59e58254
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-set-range-reverse-0.cc
@@ -0,0 +1,214 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor set range reverse operations suspend the conflicting threads when another transaction
+// owns a lock on the key.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+struct my_callback_context {
+ DBT key;
+ DBT val;
+};
+
+static int blocking_set_range_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ DBT const *found_key = a;
+ DBT const *found_val = b;
+ struct my_callback_context *context = (struct my_callback_context *) e;
+ copy_dbt(&context->key, found_key);
+ copy_dbt(&context->val, found_val);
+ return 0;
+}
+
+static void blocking_set_range(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime, uint64_t the_key) {
+ int r;
+
+ struct my_callback_context context;
+ dbt_init_realloc(&context.key);
+ dbt_init_realloc(&context.val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, DB_RMW); assert(r == 0); // get a write lock on the key
+
+ uint64_t k = htonl(the_key);
+ DBT key = { .data = &k, .size = sizeof k };
+ r = cursor->c_getf_set_range_reverse(cursor, 0, &key, blocking_set_range_callback, &context); assert(r == 0);
+ uint64_t v;
+ assert(context.val.size == sizeof v);
+ memcpy(&v, context.val.data, context.val.size);
+ assert(v == 0); // verify the value
+
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(context.key.data);
+ toku_free(context.val.data);
+}
+
+struct blocking_set_range_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+ uint64_t the_key;
+};
+
+static void *blocking_set_range_thread(void *arg) {
+ struct blocking_set_range_args *a = (struct blocking_set_range_args *) arg;
+ blocking_set_range(a->db_env, a->db, a->nrows, a->sleeptime, a->the_key);
+ return arg;
+}
+
+static void run_test(DB_ENV *db_env, DB *db, int nthreads, uint64_t nrows, long sleeptime, uint64_t the_key) {
+ int r;
+ toku_pthread_t tids[nthreads];
+ struct blocking_set_range_args a = {db_env, db, nrows, sleeptime, the_key};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr,
+ blocking_set_range_thread, &a);
+ assert(r == 0);
+ }
+ blocking_set_range(db_env, db, nrows, sleeptime, the_key);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 10;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ run_test(db_env, db, nthreads, nrows, sleeptime, 0);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-set.cc b/storage/tokudb/PerconaFT/src/tests/blocking-set.cc
new file mode 100644
index 00000000..aa433c87
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-set.cc
@@ -0,0 +1,202 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that cursor set operations suspend the conflicting threads when another transaction
+// owns a lock on the key.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void populate(DB_ENV *db_env, DB *db, uint64_t nrows) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+
+ uint64_t k = htonl(i);
+ uint64_t v = i;
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &v, .size = sizeof v };
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+}
+
+static int blocking_set_callback(DBT const *a UU(), DBT const *b UU(), void *e UU()) {
+ // DBT const *found_key = a;
+ DBT const *found_val = b;
+ DBT *my_val = (DBT *) e;
+ assert(my_val->flags == DB_DBT_REALLOC);
+ my_val->data = toku_xrealloc(my_val->data, found_val->size);
+ my_val->size = found_val->size;
+ memcpy(my_val->data, found_val->data, found_val->size);
+ return 0;
+}
+
+static void blocking_set(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ DBT val;
+ dbt_init_realloc(&val);
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0); // get a write lock on the key
+
+ uint64_t k = htonl(0); // set to key 0
+ DBT key = { .data = &k, .size = sizeof k };
+ r = cursor->c_getf_set(cursor, DB_RMW, &key, blocking_set_callback, &val); assert(r == 0);
+ uint64_t v;
+ assert(val.size == sizeof v);
+ memcpy(&v, val.data, val.size);
+ assert(v == 0); // verify the value
+
+ usleep(sleeptime);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+
+ toku_free(val.data);
+}
+
+struct blocking_set_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_set_thread(void *arg) {
+ struct blocking_set_args *a = (struct blocking_set_args *) arg;
+ blocking_set(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 100;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ // populate the db
+ populate(db_env, db, nrows);
+
+ toku_pthread_t tids[nthreads];
+ struct blocking_set_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, blocking_set_thread, &a);
+ assert(r == 0);
+ }
+ blocking_set(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/blocking-table-lock.cc b/storage/tokudb/PerconaFT/src/tests/blocking-table-lock.cc
new file mode 100644
index 00000000..0953a4e8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/blocking-table-lock.cc
@@ -0,0 +1,156 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that table locks used by multiple transactions suspend the conflicting thread rather than just return DB_LOCK_NOTGRANTED.
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void blocking_table_lock(DB_ENV *db_env, DB *db, uint64_t nrows, long sleeptime) {
+ int r;
+
+ for (uint64_t i = 0; i < nrows; i++) {
+ DB_TXN *txn = NULL;
+ r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0);
+
+ r = db->pre_acquire_table_lock(db, txn); assert(r == 0);
+
+ usleep(sleeptime);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ if (verbose)
+ printf("%lu %" PRIu64 "\n", (unsigned long) toku_pthread_self(), i);
+ }
+}
+
+struct blocking_table_lock_args {
+ DB_ENV *db_env;
+ DB *db;
+ uint64_t nrows;
+ long sleeptime;
+};
+
+static void *blocking_table_lock_thread(void *arg) {
+ struct blocking_table_lock_args *a = (struct blocking_table_lock_args *) arg;
+ blocking_table_lock(a->db_env, a->db, a->nrows, a->sleeptime);
+ return arg;
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ uint64_t nrows = 100;
+ int nthreads = 2;
+ long sleeptime = 100000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "test.db";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--nthreads") == 0 && i+1 < argc) {
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--sleeptime") == 0 && i+1 < argc) {
+ sleeptime = atol(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, NULL, db_filename, NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT|DB_THREAD, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+
+ toku_pthread_t tids[nthreads];
+ struct blocking_table_lock_args a = {db_env, db, nrows, sleeptime};
+ for (int i = 0; i < nthreads - 1; i++) {
+ r = toku_pthread_create(toku_uninstrumented,
+ &tids[i],
+ nullptr,
+ blocking_table_lock_thread,
+ &a);
+ assert(r == 0);
+ }
+ blocking_table_lock(db_env, db, nrows, sleeptime);
+ for (int i = 0; i < nthreads - 1; i++) {
+ void *ret;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0);
+ }
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/bug1381.cc b/storage/tokudb/PerconaFT/src/tests/bug1381.cc
new file mode 100644
index 00000000..0e59cabf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/bug1381.cc
@@ -0,0 +1,189 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test for #1381: If we insert into a locked empty table, not much goes into the rollback data structure. */
+
+#include <db.h>
+#include <sys/stat.h>
+#include <memory.h>
+
+static int generate_row_for_put(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ DBT_ARRAY *dest_val_arrays,
+ const DBT *src_key,
+ const DBT *src_val
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ dest_key->data = src_key->data;
+ dest_key->size = src_key->size;
+ dest_val->data = src_val->data;
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+
+static void do_1381_maybe_lock (int do_loader, uint64_t *raw_count) {
+ int r;
+ DB_TXN * const null_txn = 0;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+
+ // Create an empty file
+ {
+ DB_ENV *env;
+ DB *db;
+
+ const int envflags = DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_THREAD|DB_PRIVATE;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, generate_row_for_put); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, "main", 0, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ }
+ // Now open the empty file and insert
+ {
+ DB_ENV *env;
+ DB *db;
+ const int envflags = DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_THREAD |DB_PRIVATE;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, generate_row_for_put); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, "main", 0, DB_BTREE, 0, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ uint32_t mult_put_flags = 0;
+ uint32_t mult_dbt_flags = 0;
+ DB_LOADER* loader = NULL;
+ if (do_loader) {
+ r = env->create_loader(
+ env,
+ txn,
+ &loader,
+ NULL, // no src_db needed
+ 1,
+ &db,
+ &mult_put_flags,
+ &mult_dbt_flags,
+ LOADER_COMPRESS_INTERMEDIATES
+ );
+ CKERR(r);
+ }
+
+ struct txn_stat *s1, *s2;
+ r = txn->txn_stat(txn, &s1); CKERR(r);
+
+ {
+ DBT key;
+ dbt_init(&key, "hi", 3);
+ DBT val;
+ dbt_init(&val, "v", 2);
+ if (do_loader) {
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ }
+ else {
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+ }
+ if (do_loader) {
+ r = loader->close(loader);
+ CKERR(r);
+ }
+
+ r = txn->txn_stat(txn, &s2); CKERR(r);
+ //printf("Raw counts = %" PRId64 ", %" PRId64 "\n", s1->rollback_raw_count, s2->rollback_raw_count);
+
+ *raw_count = s2->rollback_raw_count - s1->rollback_raw_count;
+ if (do_loader) {
+ assert(s1->rollback_raw_count < s2->rollback_raw_count);
+ assert(s1->rollback_num_entries + 1 == s2->rollback_num_entries);
+ } else {
+ assert(s1->rollback_raw_count < s2->rollback_raw_count);
+ assert(s1->rollback_num_entries < s2->rollback_num_entries);
+ }
+
+ toku_free(s1); toku_free(s2);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ }
+}
+
+static void
+do_1381 (void) {
+ int do_table_lock;
+ uint64_t raw_counts[2];
+ for (do_table_lock = 0; do_table_lock < 2 ; do_table_lock++) {
+ do_1381_maybe_lock(do_table_lock, &raw_counts[do_table_lock]);
+ }
+ assert(raw_counts[0] > raw_counts[1]); // the raw counts should be less for the tablelock case.
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ parse_args(argc, argv);
+ do_1381();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/cachetable-race.cc b/storage/tokudb/PerconaFT/src/tests/cachetable-race.cc
new file mode 100644
index 00000000..54d78c05
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/cachetable-race.cc
@@ -0,0 +1,152 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* This test is a little bit ugly, but I think it's mostly harmless.
+ * It's ugly because it requires a hook into cachetable.c, and that
+ * hook is a little ugly.
+ *
+ * The way it works:
+ *
+ * In the cachetable we set a variable that indicates that we are
+ * saving user data. (This is toku_checkpointing_user_data_status).
+ *
+ * In this test (cachetable-race), we close 2 of 5 dbs, and then busy
+ * wait for that status variable to go to 1 and close the others.
+ *
+ * In this way, the close happens (with good chances) right in the
+ * middle of the checkpoint loop for saving all the user data, which
+ * seems to trigger the race error.
+ */
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb.h"
+#include "ydb-internal.h"
+
+DB_ENV *env;
+enum {NUM_DBS=5};
+
+const char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+
+static void run_cachetable_race_test(void)
+{
+ int r;
+ toku_os_recursive_delete(env_dir);
+ r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 5); CKERR(r);
+
+ enum {MAX_NAME=128};
+ char name[MAX_NAME];
+
+ DB **XMALLOC_N(NUM_DBS, dbs);
+ int idx[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ }
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+
+ if (i==2) {
+ if (verbose) printf("%s:%d c=%d\n", __FILE__, __LINE__, toku_test_get_checkpointing_user_data_status());
+ while (toku_test_get_checkpointing_user_data_status()==0)
+ sched_yield();
+ if (verbose) printf("%s:%d c=%d\n", __FILE__, __LINE__, toku_test_get_checkpointing_user_data_status());
+ }
+ }
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_cachetable_race_test();
+
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: [-h] [-v] [-q] -s\n%s\n", cmd);
+ fprintf(stderr, " where -h print this message\n");
+ fprintf(stderr, " -v verbose (multiple times for more verbosity)\n");
+ fprintf(stderr, " -q quiet (default is verbosity==1)\n");
+ fprintf(stderr, " -e <env> uses <env> to construct the directory (so that different tests can run concurrently)\n");
+ fprintf(stderr, " -s use size factor of 1 and count temporary files\n");
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-s")==0) {
+ printf("\nTesting loader with size_factor=1\n");
+ db_env_set_loader_size_factor(1);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/checkpoint1.cc b/storage/tokudb/PerconaFT/src/tests/checkpoint1.cc
new file mode 100644
index 00000000..9f9337b3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/checkpoint1.cc
@@ -0,0 +1,94 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/stat.h>
+
+/* basic checkpoint testing. Do things end up in the log? */
+
+DB_ENV *env;
+DB *db;
+DB_TXN *txn;
+
+static void
+insert(int i)
+{
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ int r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+checkpoint1 (void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(0);
+ r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ parse_args(argc, argv);
+ checkpoint1();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/checkpoint_fairness.cc b/storage/tokudb/PerconaFT/src/tests/checkpoint_fairness.cc
new file mode 100644
index 00000000..f62dfbc7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/checkpoint_fairness.cc
@@ -0,0 +1,137 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test fails if the multi_operation_lock prefers readers. (See #4347).
+// But works well if the multi_operation_lock prefers writers (which, since there is typically only one writer, makes it fair).
+// What this test does:
+// Starts a bunch of threads (100 seems to work): Each executes many transactions (and thus obtains the multi_operation_lock during the txn->commit, and until #4346 is changed, holds it through the fsync. If we fix #4346 then
+// this test may not be sensitive to the bug.)
+// Meanwhile another thread tries to do W checkpoints. (W=10 seems to work).
+// The checkpoint thread waits until all the transaction threads have gotten going (waits until each transaction thread has done 10 transactions).
+// The transaction threads get upset if they manage to run for 1000 transactions without the W checkpoints being finished.
+// The theory is that the transaction threads can starve the checkpoint thread by obtaining the multi_operation_lock.
+// But making the multi_operation_lock prefer writers means that the checkpoint gets a chance to run.
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <portability/toku_atomic.h>
+
+DB_ENV *env;
+DB *db;
+const char *env_dir = TOKU_TEST_FILENAME;
+
+const int n_threads = 100;
+volatile int reader_start_count = 0;
+
+const int W = 10;
+volatile int writer_done_count = 0;
+
+static void *start_txns (void *e) {
+ int *CAST_FROM_VOIDP(idp, e);
+ int id = *idp;
+ int j;
+ DBT k;
+ dbt_init(&k, &id, sizeof id);
+ for (j=0; writer_done_count<W; j++) { // terminate the loop when the checkpoint thread has done it's W items.
+ DB_TXN *txn;
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->put(db, txn, &k, &k, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn, 0); CKERR(chk_r); }
+ if (j==10) (void)toku_sync_fetch_and_add(&reader_start_count, 1);
+ if (j%1000==999) { printf("."); fflush(stdout); }
+ assert(j<1000); // Get upset if we manage to run this many transactions without the checkpoint thread
+ }
+ if (verbose) printf("rdone j=%d\n", j);
+ return NULL;
+}
+static void start_checkpoints (void) {
+ while (reader_start_count < n_threads) { sched_yield(); }
+ for (int i=0; i<W; i++) {
+ if (verbose) printf("cks\n");
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+ if (verbose) printf("ck\n");
+ sched_yield();
+ (void)toku_sync_fetch_and_add(&writer_done_count, 1);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ // try to starve the checkpoint
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ { int chk_r = env->set_redzone(env, 0); CKERR(chk_r); }
+ {
+ const int size = 10+strlen(env_dir);
+ char cmd[size];
+ snprintf(cmd, size, "rm -rf %s", env_dir);
+ int r = system(cmd);
+ CKERR(r);
+ }
+ { int chk_r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER;
+ { int chk_r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+
+ { int chk_r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+
+ pthread_t thds[n_threads];
+ int ids[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ ids[i] = i;
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &thds[i], nullptr, start_txns, &ids[i]);
+ CKERR(chk_r);
+ }
+ }
+ start_checkpoints();
+
+ for (int i=0; i<n_threads; i++) {
+ void *retval;
+ { int chk_r = toku_pthread_join(thds[i], &retval); CKERR(chk_r); }
+ assert(retval==NULL);
+ }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc b/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc
new file mode 100644
index 00000000..d3e5ddd5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/checkpoint_stress.cc
@@ -0,0 +1,380 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "checkpoint_test.h"
+
+
+/***
+
+
+Purpose of this test is to stress the checkpoint logic.
+
+Multiple dictionaries are used. Data is inserted, checkpoints are taken,
+and this test verifies that all checkpoints are valid.
+
+
+Parameters:
+ -cC crash or not (not crashing useful for running valgrind)
+ -i # iteration number (default is to run 5 iterations)
+ -n # number of operations per iteration (default 5001)
+ -v verbose
+ -q quiet
+
+Each iteration does:
+ - Verify that previous two iterations were correctly executed
+ - Previous inserts were done correctly
+ - There are no rows after last expected
+ - Take checkpoint
+ - Scribble over database (verifying that changes after checkpoint are not effective)
+ - Spawn another thread to perform random acts (inserts/deletes/queries) to
+ simulate normal database operations
+ - Drop dead
+
+***/
+
+#define NUM_DICTIONARIES 4 // any more than 3 is overkill to exercise linked list logic
+
+static int oper_per_iter = 5001; // not-very-nice odd number (not a multiple of a power of two)
+static int do_log_recover = 0;
+
+static toku_pthread_t thread;
+
+// scribble over database to make sure that changes made after checkpoint are not saved
+static void UU()
+scribble(DB* db, int iter) {
+ int64_t firstkey; // first key to verify/insert
+ int64_t numkeys; // number of keys to verify/insert
+
+ if (iter > 0){
+ if (iter == 1) {
+ firstkey = 0;
+ numkeys = oper_per_iter;
+ }
+ else {
+ firstkey = (iter - 2) * oper_per_iter;
+ numkeys = oper_per_iter * 2;
+ }
+ }
+
+ // now insert new rows for this iteration
+ firstkey = iter * oper_per_iter;
+ numkeys = oper_per_iter;
+
+ insert_n_broken(db, NULL, NULL, firstkey, numkeys);
+}
+
+// scribble over database to make sure that changes made after checkpoint are not saved
+// by deleting three of every four rows
+static void UU()
+thin_out(DB* db, int iter) {
+ int64_t firstkey; // first key to verify/insert
+ int64_t numkeys; // number of keys to verify/insert
+
+ if (iter > 0){
+ if (iter == 1) {
+ firstkey = 0;
+ numkeys = oper_per_iter;
+ }
+ else {
+ firstkey = (iter - 2) * oper_per_iter;
+ numkeys = oper_per_iter * 2;
+ }
+ }
+
+ int r;
+ DBT keydbt;
+ int64_t key;
+ DB_TXN * txn;
+
+ dbt_init(&keydbt, &key, sizeof(key));
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = db->pre_acquire_table_lock(db, txn);
+ CKERR(r);
+
+ // now delete three of four rows
+ firstkey = iter * oper_per_iter;
+ numkeys = oper_per_iter;
+
+ for (key = firstkey; key < (firstkey + numkeys); key++) {
+ if (key & 0x03) { // leave every fourth key alone
+ r = db->del(db, txn, &keydbt, DB_DELETE_ANY);
+ CKERR(r);
+ }
+ }
+
+ if ( !do_log_recover )
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+}
+
+
+
+static void
+drop_dead(void) {
+ // deliberate zerodivide or sigsegv
+ fprintf(stderr, "HAPPY CRASH\n");
+ fflush(stdout);
+ fflush(stderr);
+ toku_hard_crash_on_purpose();
+ printf("This line should never be printed\n");
+ fflush(stdout);
+}
+
+
+static void
+verify_and_insert (DB* db, int iter) {
+
+ int64_t firstkey; // first key to verify/insert
+ int64_t numkeys; // number of keys to verify/insert
+
+ if (iter > 0){
+ if (iter == 1) {
+ firstkey = 0;
+ numkeys = oper_per_iter;
+ }
+ else {
+ firstkey = (iter - 2) * oper_per_iter;
+ numkeys = oper_per_iter * 2;
+ }
+ verify_sequential_rows(db, firstkey, numkeys);
+ }
+
+ // now insert new rows for this iteration
+ firstkey = iter * oper_per_iter;
+ numkeys = oper_per_iter;
+
+ int r;
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ insert_n_fixed(db, NULL, NULL, firstkey, numkeys);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+
+// Purpose of this function is to perform a variety of random acts.
+// This will simulate normal database operations. The idea is for the
+// the crash to occur sometimes during an insert, sometimes during a query, etc.
+static void *
+random_acts(void * d) {
+ void * intothevoid = NULL;
+ DICTIONARY dictionaries = (DICTIONARY) d;
+ if (verbose)
+ printf("perform random acts, %s\n", dictionaries[0].filename);
+ fflush(stdout);
+ int i = 0;
+ int64_t k = 0;
+
+ while (1) { // run until crash
+ // main thread is scribbling over dictionary 0
+ // this thread will futz with other dictionaries
+ for (i = 1; i < NUM_DICTIONARIES; i++) {
+ int j;
+ DB * db = dictionaries[i].db;
+ insert_random(db, NULL, NULL);
+ delete_both_random(db, NULL, NULL, 0); // delete only if found (performs query)
+ delete_both_random(db, NULL, NULL, DB_DELETE_ANY); // delete whether or not found (no query)
+ for (j = 0; j < 10; j++) {
+ delete_fixed(db, NULL, NULL, k, 0); // delete only if found to provoke more queries
+ k++;
+ }
+ }
+ }
+
+ return intothevoid;
+}
+
+uint64_t max_cachesize = 256 << 20;
+
+static void
+run_test (int iter, int die) {
+
+ uint32_t flags = 0;
+
+ int i;
+
+ if (iter == 0)
+ dir_create(TOKU_TEST_FILENAME); // create directory if first time through
+
+ // Run with cachesize of 256 bytes per iteration
+ // to force lots of disk I/O
+ // (each iteration inserts about 4K rows/dictionary, 16 bytes/row, 4 dictionaries = 256K bytes inserted per iteration)
+ const int32_t K256 = 256 * 1024;
+ uint64_t cachebytes = 0;
+ cachebytes = K256 * (iter + 1) - (128 * 1024);
+ if (cachebytes > max_cachesize)
+ cachebytes = 0;
+ if (iter & 2) cachebytes = 0; // use default cachesize half the time
+
+ if (verbose)
+ printf("checkpoint_stress: iter = %d, cachesize (bytes) = 0x%08" PRIx64 "\n", iter, cachebytes);
+
+ int recovery_flags = 0;
+ if ( do_log_recover ) {
+ recovery_flags += DB_INIT_LOG|DB_INIT_TXN;
+ if ( iter != 0 )
+ recovery_flags += DB_RECOVER;
+ }
+ env_startup(TOKU_TEST_FILENAME, cachebytes, recovery_flags);
+
+ // create array of dictionaries
+ // for each dictionary verify previous iterations and perform new inserts
+
+ DICTIONARY_S dictionaries[NUM_DICTIONARIES];
+ for (i = 0; i < NUM_DICTIONARIES; i++) {
+ char name[32];
+ sprintf(name, "stress_%d", i);
+ init_dictionary(&dictionaries[i], flags, name);
+ db_startup(&dictionaries[i], NULL);
+ DB* db = dictionaries[i].db;
+ verify_and_insert(db, iter);
+ }
+
+ // take checkpoint (all dictionaries)
+ snapshot(NULL, 1);
+
+ if (die) {
+ // separate thread will perform random acts on other dictionaries (not
+ // 0)
+ int r = toku_pthread_create(
+ toku_uninstrumented, &thread, nullptr,
+ random_acts, static_cast<void*>(dictionaries));
+ CKERR(r);
+ // this thead will scribble over dictionary 0 before crash to verify
+ // that
+ // post-checkpoint inserts are not in the database
+ DB* db = dictionaries[0].db;
+ if (iter & 1)
+ scribble(db, iter);
+ else
+ thin_out(db, iter);
+ uint32_t delay = myrandom();
+ delay &=
+ 0xFFF; // select lower 12 bits, shifted up 8 for random number ...
+ delay = delay << 8; // ... uniformly distributed between 0 and 1M ...
+ usleep(delay); // ... to sleep up to one second (1M usec)
+ drop_dead();
+ }
+ else {
+ for (i = 0; i < NUM_DICTIONARIES; i++) {
+ db_shutdown(&dictionaries[i]);
+ }
+ env_shutdown();
+ }
+}
+
+
+static void
+usage(char *progname) {
+ fprintf(stderr, "Usage:\n%s [-c] [-C] [-i N] [-n N] [-l] [-q|-v]\n"
+ " \n%s [-h]\n", progname,
+ progname);
+}
+
+
+int
+test_main (int argc, char * const argv[]) {
+
+ // get arguments, set parameters
+
+ int iter = -1;
+
+ int c;
+ int crash = 0;
+ while ((c = getopt(argc, (char * const *)argv, "cChi:qvn:lX:")) != -1) {
+ switch(c) {
+ case 'c':
+ crash = 1;
+ break;
+ case 'C':
+ crash = 0;
+ break;
+ case 'i':
+ iter = atoi(optarg);
+ break;
+ case 'n':
+ oper_per_iter = atoi(optarg);
+ break;
+ case 'l':
+ do_log_recover = 1;
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'q':
+ verbose--;
+ if (verbose<0) verbose=0;
+ break;
+ case 'X':
+ if (strcmp(optarg, "novalgrind") == 0) {
+ // provide a way for the shell script runner to pass an
+ // arg that suppresses valgrind on this child process
+ break;
+ }
+ /* fall through */ // otherwise, fall through to an error
+ case 'h':
+ case '?':
+ usage(argv[0]);
+ return 1;
+ default:
+ assert(false);
+ return 1;
+ }
+ }
+ if (argc!=optind) { usage(argv[0]); return 1; }
+
+ // for developing this test and for exercising with valgrind (no crash)
+ if (iter <0) {
+ if (verbose)
+ printf("No argument, just run five times without crash\n");
+ for (iter = 0; iter<5; iter++) {
+ run_test(iter, 0);
+ }
+ }
+ else {
+ run_test(iter, crash);
+ }
+
+ return 0;
+
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/checkpoint_test.h b/storage/tokudb/PerconaFT/src/tests/checkpoint_test.h
new file mode 100644
index 00000000..9a8d24c2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/checkpoint_test.h
@@ -0,0 +1,484 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+DB_ENV *env;
+
+enum {MAX_NAME=128};
+
+enum {NUM_FIXED_ROWS=1025}; // 4K + 1
+
+typedef struct {
+ DB* db;
+ uint32_t flags;
+ char filename[MAX_NAME]; //Relative to envdir/
+ int num;
+} DICTIONARY_S, *DICTIONARY;
+
+
+static inline int64_t
+generate_val(int64_t key) {
+ int64_t val = key + 314;
+ return val;
+}
+
+// return 0 if same
+static int
+verify_identical_dbts(const DBT *dbt1, const DBT *dbt2) {
+ int r = 0;
+ if (dbt1->size != dbt2->size) r = 1;
+ else if (memcmp(dbt1->data, dbt2->data, dbt1->size)!=0) r = 1;
+ return r;
+}
+
+// return 0 if same
+static int UU()
+compare_dbs(DB *compare_db1, DB *compare_db2) {
+ //This does not lock the dbs/grab table locks.
+ //This means that you CANNOT CALL THIS while another thread is modifying the db.
+ //You CAN call it while a txn is open however.
+ int rval = 0;
+ DB_TXN *compare_txn;
+ int r, r1, r2;
+ r = env->txn_begin(env, NULL, &compare_txn, DB_READ_UNCOMMITTED);
+ CKERR(r);
+ DBC *c1;
+ DBC *c2;
+ r = compare_db1->cursor(compare_db1, compare_txn, &c1, 0);
+ CKERR(r);
+ r = compare_db2->cursor(compare_db2, compare_txn, &c2, 0);
+ CKERR(r);
+
+ DBT key1, val1;
+ DBT key2, val2;
+
+ dbt_init_realloc(&key1);
+ dbt_init_realloc(&val1);
+ dbt_init_realloc(&key2);
+ dbt_init_realloc(&val2);
+
+ do {
+ r1 = c1->c_get(c1, &key1, &val1, DB_NEXT);
+ r2 = c2->c_get(c2, &key2, &val2, DB_NEXT);
+ assert(r1==0 || r1==DB_NOTFOUND);
+ assert(r2==0 || r2==DB_NOTFOUND);
+ if (r1!=r2) rval = 1;
+ else if (r1==0 && r2==0) {
+ //Both found
+ rval = verify_identical_dbts(&key1, &key2) |
+ verify_identical_dbts(&val1, &val2);
+ }
+ } while (r1==0 && r2==0 && rval==0);
+ c1->c_close(c1);
+ c2->c_close(c2);
+ if (key1.data) toku_free(key1.data);
+ if (val1.data) toku_free(val1.data);
+ if (key2.data) toku_free(key2.data);
+ if (val2.data) toku_free(val2.data);
+ compare_txn->commit(compare_txn, 0);
+ return rval;
+}
+
+
+static void UU()
+dir_create(const char *envdir) {
+ int r;
+ toku_os_recursive_delete(envdir);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+}
+
+// pass in zeroes for default cachesize
+static void UU()
+env_startup(const char *envdir, int64_t bytes, int recovery_flags) {
+ int r;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp);
+ CKERR(r);
+ if (bytes) {
+ r = env->set_cachesize(env, bytes >> 30, bytes % (1<<30), 1);
+ CKERR(r);
+ }
+ int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | recovery_flags;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 0); //Disable auto-checkpointing.
+ CKERR(r);
+}
+
+static void UU()
+env_shutdown(void) {
+ int r;
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+static void UU()
+fill_name(DICTIONARY d, char *buf, int bufsize) {
+ int bytes;
+ bytes = snprintf(buf, bufsize, "%s_%08x", d->filename, d->num);
+ assert(bytes>0);
+ assert(bytes>(int)strlen(d->filename));
+ assert(bytes<bufsize);
+ assert(buf[bytes] == 0);
+}
+
+static void UU()
+fill_full_name(const char *envdir, DICTIONARY d, char *buf, int bufsize) {
+ int bytes;
+ bytes = snprintf(buf, bufsize, "%s/%s_%08x", envdir, d->filename, d->num);
+ assert(bytes>0);
+ assert(bytes>(int)strlen(d->filename));
+ assert(bytes<bufsize);
+ assert(buf[bytes] == 0);
+}
+
+static void UU()
+db_startup(DICTIONARY d, DB_TXN *open_txn) {
+ int r;
+ r = db_create(&d->db, env, 0);
+ CKERR(r);
+ DB *db = d->db;
+ if (d->flags) {
+ r = db->set_flags(db, d->flags);
+ CKERR(r);
+ }
+ //Want to simulate much larger test.
+ //Small nodesize means many nodes.
+ db->set_pagesize(db, 1<<10);
+ {
+ char name[MAX_NAME*2];
+ fill_name(d, name, sizeof(name));
+ r = db->open(db, open_txn, name, NULL, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ }
+ {
+ DBT desc;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ dbt_init(&desc, "foo", sizeof("foo"));
+ { int chk_r = db->change_descriptor(db, txn_desc, &desc,0); CKERR(chk_r); }
+ });
+ }
+}
+
+static void UU()
+db_shutdown(DICTIONARY d) {
+ int r;
+ r = d->db->close(d->db, 0);
+ CKERR(r);
+ d->db = NULL;
+}
+
+static void UU()
+null_dictionary(DICTIONARY d) {
+ memset(d, 0, sizeof(*d));
+}
+
+static void UU()
+init_dictionary(DICTIONARY d, uint32_t flags, const char *name) {
+ null_dictionary(d);
+ d->flags = flags;
+ strcpy(d->filename, name);
+}
+
+
+static void UU()
+db_delete(DICTIONARY d) {
+ db_shutdown(d);
+ int r;
+ {
+ char name[MAX_NAME*2];
+ fill_name(d, name, sizeof(name));
+ r = env->dbremove(env, NULL, name, NULL, 0);
+ CKERR(r);
+ }
+ null_dictionary(d);
+}
+
+// Create a new dictionary (dest) with a new dname that has same contents as given dictionary (src).
+// Method:
+// create new dictionary
+// close new dictionary
+// get inames of both dictionaries
+// copy file (by iname) of src to dest
+// open dest dictionary
+static void UU()
+dbcpy(const char *envdir, DICTIONARY dest, DICTIONARY src, DB_TXN *open_txn) {
+ int r;
+
+ assert(dest->db == NULL);
+ *dest = *src;
+ dest->db = NULL;
+ dest->num++;
+
+ db_startup(dest, open_txn);
+ db_shutdown(dest);
+
+ char dest_dname[MAX_NAME*2];
+ fill_name(dest, dest_dname, sizeof(dest_dname));
+
+ char src_dname[MAX_NAME*2];
+ fill_name(src, src_dname, sizeof(src_dname));
+
+ DBT dest_dname_dbt;
+ DBT dest_iname_dbt;
+ DBT src_dname_dbt;
+ DBT src_iname_dbt;
+
+ dbt_init(&dest_dname_dbt, dest_dname, strlen(dest_dname)+1);
+ dbt_init(&dest_iname_dbt, NULL, 0);
+ dest_iname_dbt.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dest_dname_dbt, &dest_iname_dbt);
+ CKERR(r);
+
+ dbt_init(&src_dname_dbt, src_dname, strlen(src_dname)+1);
+ dbt_init(&src_iname_dbt, NULL, 0);
+ src_iname_dbt.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &src_dname_dbt, &src_iname_dbt);
+ CKERR(r);
+
+ char * CAST_FROM_VOIDP(src_iname, src_iname_dbt.data);
+ char * CAST_FROM_VOIDP(dest_iname, dest_iname_dbt.data);
+
+ int bytes;
+
+ char command[sizeof("cp -f ") + strlen(src_iname)+ 2 * (strlen(envdir) + strlen("/ ")) + strlen(dest_iname)];
+ bytes = snprintf(command, sizeof(command), "cp -f %s/%s %s/%s", envdir, src_iname, envdir, dest_iname);
+ assert(bytes<(int)sizeof(command));
+
+ toku_free(src_iname);
+ toku_free(dest_iname);
+
+ r = system(command);
+ CKERR(r);
+ db_startup(dest, open_txn);
+}
+
+static void UU()
+db_replace(const char *envdir, DICTIONARY d, DB_TXN *open_txn) {
+ //Replaces a dictionary with a physical copy that is reopened.
+ //Filename is changed by incrementing the number.
+ //This should be equivalent to 'rollback to checkpoint'.
+ //The DB* disappears.
+ DICTIONARY_S temp;
+ null_dictionary(&temp);
+ dbcpy(envdir, &temp, d, open_txn);
+ db_delete(d);
+ *d = temp;
+}
+
+static void UU()
+insert_random(DB *db1, DB *db2, DB_TXN *txn) {
+ int64_t v = random();
+ int64_t k = ((int64_t)(random()) << 32) + v;
+ int r;
+ DBT key;
+ DBT val;
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+
+ if (db1) {
+ r = db1->put(db1, txn, &key, &val, 0);
+ CKERR(r);
+ }
+ if (db2) {
+ r = db2->put(db2, txn, &key, &val, 0);
+ CKERR(r);
+ }
+}
+
+static void UU()
+delete_both_random(DB *db1, DB *db2, DB_TXN *txn, uint32_t flags) {
+ int64_t k = random64();
+ int r;
+ DBT key;
+ dbt_init(&key, &k, sizeof(k));
+
+ if (db1) {
+ r = db1->del(db1, txn, &key, flags);
+ CKERR2s(r, 0, DB_NOTFOUND);
+ }
+ if (db2) {
+ r = db2->del(db2, txn, &key, flags);
+ CKERR2s(r, 0, DB_NOTFOUND);
+ }
+}
+
+
+
+static void UU()
+delete_fixed(DB *db1, DB *db2, DB_TXN *txn, int64_t k, uint32_t flags) {
+ int r;
+ DBT key;
+
+ dbt_init(&key, &k, sizeof(k));
+
+ if (db1) {
+ r = db1->del(db1, txn, &key, flags);
+ CKERR2s(r, 0, DB_NOTFOUND);
+ }
+ if (db2) {
+ r = db2->del(db2, txn, &key, flags);
+ CKERR2s(r, 0, DB_NOTFOUND);
+ }
+}
+
+static void UU()
+delete_n(DB *db1, DB *db2, DB_TXN *txn, int firstkey, int n, uint32_t flags) {
+ int i;
+ for (i=0;i<n;i++) {
+ delete_fixed(db1, db2, txn, firstkey+i, flags);
+ }
+}
+
+static void
+insert_n(DB *db1, DB *db2, DB_TXN *txn, int firstkey, int n, int offset) {
+ int64_t k;
+ int64_t v;
+ int r;
+ DBT key;
+ DBT val;
+ int i;
+
+ // printf("enter %s, iter = %d\n", __FUNCTION__, iter);
+ // printf("db1 = 0x%08lx, db2 = 0x%08lx, *txn = 0x%08lx, firstkey = %d, n = %d\n",
+ // (unsigned long) db1, (unsigned long) db2, (unsigned long) txn, firstkey, n);
+
+ fflush(stdout);
+
+ for (i = 0; i<n; i++) {
+ int64_t kk = firstkey+i;
+ v = generate_val(kk) + offset;
+ k = (kk<<32) + v;
+ //printf("I(%32lx,%32lx)\n", k, v);
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+ if (db1) {
+ r = db1->put(db1, txn, &key, &val, 0);
+ CKERR(r);
+ }
+ if (db2) {
+ r = db2->put(db2, txn, &key, &val, 0);
+ CKERR(r);
+ }
+ }
+}
+
+
+static void UU()
+insert_n_broken(DB *db1, DB *db2, DB_TXN *txn, int firstkey, int n) {
+ insert_n(db1, db2, txn, firstkey, n, 2718);
+}
+
+
+static void UU()
+insert_n_fixed(DB *db1, DB *db2, DB_TXN *txn, int firstkey, int n) {
+ insert_n(db1, db2, txn, firstkey, n, 0);
+}
+
+
+// assert that correct values are in expected rows
+static void UU()
+verify_sequential_rows(DB* compare_db, int64_t firstkey, int64_t numkeys) {
+ //This does not lock the dbs/grab table locks.
+ //This means that you CANNOT CALL THIS while another thread is modifying the db.
+ //You CAN call it while a txn is open however.
+ DB_TXN *compare_txn;
+ int r, r1;
+
+ assert(numkeys >= 1);
+ r = env->txn_begin(env, NULL, &compare_txn, DB_READ_UNCOMMITTED);
+ CKERR(r);
+ DBC *c1;
+
+ r = compare_db->cursor(compare_db, compare_txn, &c1, 0);
+ CKERR(r);
+
+
+ DBT key1, val1;
+ DBT key2, val2;
+
+ int64_t k, v;
+
+ dbt_init_realloc(&key1);
+ dbt_init_realloc(&val1);
+
+ dbt_init(&key2, &k, sizeof(k));
+ dbt_init(&val2, &v, sizeof(v));
+
+ v = generate_val(firstkey);
+ k = (firstkey<<32) + v;
+ r1 = c1->c_get(c1, &key2, &val2, DB_SET);
+ CKERR(r1);
+
+ int64_t i;
+ for (i = 1; i<numkeys; i++) {
+ int64_t kk = firstkey+i;
+ v = generate_val(kk);
+ k = (kk<<32) + v;
+ r1 = c1->c_get(c1, &key1, &val1, DB_NEXT);
+ assert(r1==0);
+ assert(key1.size==8 && val1.size==8 && *(int64_t*)key1.data==k && *(int64_t*)val1.data==v);
+ }
+ // now verify that there are no rows after the last expected
+ r1 = c1->c_get(c1, &key1, &val1, DB_NEXT);
+ assert(r1 == DB_NOTFOUND);
+
+ c1->c_close(c1);
+ if (key1.data) toku_free(key1.data);
+ if (val1.data) toku_free(val1.data);
+ compare_txn->commit(compare_txn, 0);
+}
+
+
+
+static void UU()
+snapshot(DICTIONARY d, int do_checkpoint) {
+ if (do_checkpoint) {
+ int r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ }
+ else {
+ db_shutdown(d);
+ db_startup(d, NULL);
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/create-datadir.cc b/storage/tokudb/PerconaFT/src/tests/create-datadir.cc
new file mode 100644
index 00000000..4845a7dd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/create-datadir.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test data directories
+
+#include <sys/stat.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "a.db", NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "bdir/b.db", NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ CKERR(r); //Success, so need a new handle
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ char path[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(path, 2, TOKU_TEST_FILENAME, "bdir"), 0777); assert(r == 0);
+ r = db->open(db, NULL, "bdir/b.db", NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+
+ r = toku_os_mkdir(toku_path_join(path, 2, TOKU_TEST_FILENAME, "cdir"), 0777); assert(r == 0);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "cdir"); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "c.db", NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+const char *cmd;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/cursor-isolation.cc b/storage/tokudb/PerconaFT/src/tests/cursor-isolation.cc
new file mode 100644
index 00000000..e748f957
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/cursor-isolation.cc
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that flag settings for cursor isolation works
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ {
+ DB_TXN *txna;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ DBT key,val;
+ r = db->put(db, txna, dbt_init(&key, "a", 2), dbt_init(&val, "a", 2), 0); CKERR(r);
+
+ r = txna->commit(txna, 0); CKERR(r);
+ }
+
+ DB_TXN *txn_serializable, *txn_committed, *txn_uncommitted;
+ DBC* cursor = NULL;
+ r = env->txn_begin(env, NULL, &txn_serializable, DB_SERIALIZABLE); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn_committed, DB_READ_COMMITTED); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn_uncommitted, DB_READ_UNCOMMITTED); CKERR(r);
+
+
+ r = db->cursor(db, txn_serializable, &cursor, DB_SERIALIZABLE|DB_READ_COMMITTED); CKERR2(r, EINVAL);
+ r = db->cursor(db, txn_serializable, &cursor, DB_SERIALIZABLE|DB_READ_UNCOMMITTED); CKERR2(r, EINVAL);
+ r = db->cursor(db, txn_serializable, &cursor, DB_READ_UNCOMMITTED|DB_READ_COMMITTED); CKERR2(r, EINVAL);
+
+
+ r = db->cursor(db, txn_serializable, &cursor, 0); CKERR(r);
+ r = cursor->c_close(cursor); CKERR(r);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_serializable, &cursor, DB_SERIALIZABLE); CKERR(r);
+ r = cursor->c_close(cursor); CKERR(r);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_serializable, &cursor, DB_READ_COMMITTED); CKERR2(r, EINVAL);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_serializable, &cursor, DB_READ_UNCOMMITTED); CKERR2(r, EINVAL);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_committed, &cursor, 0); CKERR(r);
+ r = cursor->c_close(cursor); CKERR(r);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_committed, &cursor, DB_SERIALIZABLE); CKERR(r);
+ r = cursor->c_close(cursor); CKERR(r);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_committed, &cursor, DB_READ_COMMITTED); CKERR2(r, EINVAL);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_committed, &cursor, DB_READ_UNCOMMITTED); CKERR2(r, EINVAL);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_uncommitted, &cursor, 0); CKERR(r);
+ r = cursor->c_close(cursor); CKERR(r);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_uncommitted, &cursor, DB_SERIALIZABLE); CKERR(r);
+ r = cursor->c_close(cursor); CKERR(r);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_uncommitted, &cursor, DB_READ_COMMITTED); CKERR2(r, EINVAL);
+ cursor = NULL;
+
+ r = db->cursor(db, txn_uncommitted, &cursor, DB_READ_UNCOMMITTED); CKERR2(r, EINVAL);
+ cursor = NULL;
+
+
+
+
+ r = txn_serializable->commit(txn_serializable, 0); CKERR(r);
+ r = txn_committed->commit(txn_committed, 0); CKERR(r);
+ r = txn_uncommitted->commit(txn_uncommitted, 0); CKERR(r);
+
+
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/cursor-more-than-a-leaf-provdel.cc b/storage/tokudb/PerconaFT/src/tests/cursor-more-than-a-leaf-provdel.cc
new file mode 100644
index 00000000..2f2b964b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/cursor-more-than-a-leaf-provdel.cc
@@ -0,0 +1,145 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+DB_TXN *txn;
+
+const int num_insert = 25000;
+
+static void
+setup (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->set_redzone(env, 0); CKERR(r);
+ r=env->set_default_bt_compare(env, int_dbt_cmp); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r= db->close(db, 0); CKERR(r);
+ r= env->close(env, 0); CKERR(r);
+}
+
+static void
+doit (bool committed_provdels) {
+ DBT key,data;
+ DBC *dbc;
+ int r;
+ int i;
+ int j;
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (i = 0; i < num_insert; i++) {
+ j = (i<<1) + 37;
+ r=db->put(db, txn, dbt_init(&key, &i, sizeof(i)), dbt_init(&data, &j, sizeof(j)), 0);
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->cursor(db, txn, &dbc, 0); CKERR(r);
+ for (i = 0; i < num_insert; i++) {
+ j = (i<<1) + 37;
+ r = dbc->c_get(dbc, &key, &data, DB_NEXT); CKERR(r);
+ assert(*(int*)key.data == i);
+ assert(*(int*)data.data == j);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); CKERR(r);
+ }
+ r = dbc->c_get(dbc, &key, &data, DB_NEXT); CKERR2(r, DB_NOTFOUND);
+ r = dbc->c_get(dbc, &key, &data, DB_FIRST); CKERR2(r, DB_NOTFOUND);
+ if (committed_provdels) {
+ r = dbc->c_close(dbc); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->cursor(db, txn, &dbc, 0); CKERR(r);
+ }
+ int ifirst, ilast, jfirst, jlast;
+ ilast=2*num_insert;
+ jlast=(ilast<<1)+37;
+ ifirst=-1*num_insert;
+ jfirst=(ifirst<<1)+37;
+ r=db->put(db, txn, dbt_init(&key, &ifirst, sizeof(ifirst)), dbt_init(&data, &jfirst, sizeof(jfirst)), 0);
+ CKERR(r);
+ r=db->put(db, txn, dbt_init(&key, &ilast, sizeof(ilast)), dbt_init(&data, &jlast, sizeof(jlast)), 0);
+ CKERR(r);
+
+ r = dbc->c_get(dbc, dbt_init(&key, NULL, 0), dbt_init(&data, NULL, 0), DB_FIRST); CKERR(r);
+ assert(*(int*)key.data == ifirst);
+ assert(*(int*)data.data == jfirst);
+ r = dbc->c_get(dbc, dbt_init(&key, NULL, 0), dbt_init(&data, NULL, 0), DB_NEXT); CKERR(r);
+ assert(*(int*)key.data == ilast);
+ assert(*(int*)data.data == jlast);
+ r = dbc->c_get(dbc, dbt_init(&key, NULL, 0), dbt_init(&data, NULL, 0), DB_LAST); CKERR(r);
+ assert(*(int*)key.data == ilast);
+ assert(*(int*)data.data == jlast);
+ r = dbc->c_get(dbc, dbt_init(&key, NULL, 0), dbt_init(&data, NULL, 0), DB_PREV); CKERR(r);
+ assert(*(int*)key.data == ifirst);
+ assert(*(int*)data.data == jfirst);
+ r = dbc->c_close(dbc); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ setup();
+ doit(true);
+ test_shutdown();
+ setup();
+ doit(false);
+ test_shutdown();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/cursor-set-del-rmw.cc b/storage/tokudb/PerconaFT/src/tests/cursor-set-del-rmw.cc
new file mode 100644
index 00000000..5ebdf82b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/cursor-set-del-rmw.cc
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// TODO
+
+static void test_del_rmw(DB_ENV *env, DB *db, uint32_t t1_flags, uint32_t t2_flags, uint32_t c1_flags, uint32_t c2_flags, int expect_r) {
+ int r;
+
+ {
+ DB_TXN *write_txn = NULL;
+ r = env->txn_begin(env, NULL, &write_txn, 0); assert_zero(r);
+ for (int i = 1; i <= 3; i++) {
+ int k = htonl(i); int v = i;
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v, sizeof v);
+ r = db->put(db, write_txn, &key, &val, 0); assert_zero(r);
+ }
+ r = write_txn->commit(write_txn, 0); assert_zero(r);
+ }
+
+ {
+ DB_TXN *txn1 = NULL;
+ r = env->txn_begin(env, NULL, &txn1, t1_flags); assert_zero(r);
+
+ DB_TXN *txn2 = NULL;
+ r = env->txn_begin(env, NULL, &txn2, t2_flags); assert_zero(r);
+
+ DBC *c1 = NULL;
+ r = db->cursor(db, txn1, &c1, c1_flags); assert_zero(r);
+
+ DBC *c2 = NULL;
+ r = db->cursor(db, txn2, &c2, c2_flags); assert_zero(r);
+
+ r = c1->c_set_bounds(c1, db->dbt_neg_infty(), db->dbt_pos_infty(), true, 0); assert_zero(r);
+
+ int k = htonl(2);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn1, &key, 0); assert_zero(r);
+
+ k = htonl(1);
+ DBT val; memset(&val, 0, sizeof val);
+ r = c2->c_get(c2, &key, &val, DB_SET); assert(r == expect_r);
+
+ r = c1->c_close(c1); assert_zero(r);
+ r = c2->c_close(c2); assert_zero(r);
+
+ r = txn1->commit(txn1, 0); assert_zero(r);
+ r = txn2->commit(txn2, 0); assert_zero(r);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "rmwtest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ // t1: prelock read, del(2)
+ // t2: set(1)
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_READ_UNCOMMITTED, 0, 0, 0);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_READ_COMMITTED, 0, 0, 0);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_TXN_SNAPSHOT, 0, 0, 0);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_SERIALIZABLE, 0, 0, DB_LOCK_NOTGRANTED);
+#else
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_SERIALIZABLE, 0, 0, 0);
+#endif
+
+ // t1: prelock write, del(2)
+ // t2: set(1)
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_READ_UNCOMMITTED, DB_RMW, 0, 0);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_READ_COMMITTED, DB_RMW, 0, 0);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_TXN_SNAPSHOT , DB_RMW, 0, 0);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_SERIALIZABLE, DB_RMW, 0, DB_LOCK_NOTGRANTED);
+
+ // t1: prelock write, del(2)
+ // t2: rmw set(1)
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_READ_UNCOMMITTED, DB_RMW, DB_RMW, DB_LOCK_NOTGRANTED);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_READ_COMMITTED, DB_RMW, DB_RMW, DB_LOCK_NOTGRANTED);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_TXN_SNAPSHOT , DB_RMW, DB_RMW, DB_LOCK_NOTGRANTED);
+ test_del_rmw(env, db, DB_SERIALIZABLE, DB_SERIALIZABLE, DB_RMW, DB_RMW, DB_LOCK_NOTGRANTED);
+
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/cursor-set-range-rmw.cc b/storage/tokudb/PerconaFT/src/tests/cursor-set-range-rmw.cc
new file mode 100644
index 00000000..6a19da73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/cursor-set-range-rmw.cc
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that the DB_RMW flag on cursor create grabs write locks for cursor set operations
+
+static void test_create_rmw(DB_ENV *env, DB *db, int k, uint32_t txn1_flags, uint32_t txn2_flags, int expect_r) {
+ int r;
+
+ DB_TXN *txn1 = NULL;
+ r = env->txn_begin(env, NULL, &txn1, 0); assert_zero(r);
+
+ DB_TXN *txn2 = NULL;
+ r = env->txn_begin(env, NULL, &txn2, 0); assert_zero(r);
+
+ DBC *c1 = NULL;
+ r = db->cursor(db, txn1, &c1, txn1_flags); assert_zero(r);
+
+ DBC *c2 = NULL;
+ r = db->cursor(db, txn2, &c2, txn2_flags); assert_zero(r);
+
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; memset(&val, 0, sizeof val);
+ r = c1->c_get(c1, &key, &val, DB_SET); assert_zero(r);
+
+ r = c2->c_get(c2, &key, &val, DB_SET); assert(r == expect_r);
+
+ r = c1->c_close(c1); assert_zero(r);
+ r = c2->c_close(c2); assert_zero(r);
+
+ r = txn1->commit(txn1, 0); assert_zero(r);
+ r = txn2->commit(txn2, 0); assert_zero(r);
+}
+
+// verify that the DB_RMW flag to the cursor set operations grabs write locks
+
+static void test_set_rmw(DB_ENV *env, DB *db, int k, uint32_t txn1_flags, uint32_t txn2_flags, int expect_r) {
+ int r;
+
+ DB_TXN *txn1 = NULL;
+ r = env->txn_begin(env, NULL, &txn1, 0); assert_zero(r);
+
+ DB_TXN *txn2 = NULL;
+ r = env->txn_begin(env, NULL, &txn2, 0); assert_zero(r);
+
+ DBC *c1 = NULL;
+ r = db->cursor(db, txn1, &c1, 0); assert_zero(r);
+
+ DBC *c2 = NULL;
+ r = db->cursor(db, txn2, &c2, 0); assert_zero(r);
+
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; memset(&val, 0, sizeof val);
+ r = c1->c_get(c1, &key, &val, DB_SET + txn1_flags); assert_zero(r);
+
+ r = c2->c_get(c2, &key, &val, DB_SET + txn2_flags); assert(r == expect_r);
+
+ r = c1->c_close(c1); assert_zero(r);
+ r = c2->c_close(c2); assert_zero(r);
+
+ r = txn1->commit(txn1, 0); assert_zero(r);
+ r = txn2->commit(txn2, 0); assert_zero(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "rmwtest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ DB_TXN *write_txn = NULL;
+ r = env->txn_begin(env, NULL, &write_txn, 0); assert_zero(r);
+
+ int k = htonl(42); int v = 42;
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v, sizeof v);
+ r = db->put(db, write_txn, &key, &val, DB_NOOVERWRITE); assert_zero(r);
+ r = write_txn->commit(write_txn, 0); assert_zero(r);
+
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ test_set_rmw(env, db, k, 0, 0, DB_LOCK_NOTGRANTED);
+#else
+ test_set_rmw(env, db, k, 0, 0, 0);
+#endif
+ test_set_rmw(env, db, k, 0, DB_RMW, DB_LOCK_NOTGRANTED);
+ test_set_rmw(env, db, k, DB_RMW, 0, DB_LOCK_NOTGRANTED);
+ test_set_rmw(env, db, k, DB_RMW, DB_RMW, DB_LOCK_NOTGRANTED);
+
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ test_create_rmw(env, db, k, 0, 0, DB_LOCK_NOTGRANTED);
+#else
+ test_create_rmw(env, db, k, 0, 0, 0);
+#endif
+ test_create_rmw(env, db, k, 0, DB_RMW, DB_LOCK_NOTGRANTED);
+ test_create_rmw(env, db, k, DB_RMW, 0, DB_LOCK_NOTGRANTED);
+ test_create_rmw(env, db, k, DB_RMW, DB_RMW, DB_LOCK_NOTGRANTED);
+
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/cursor-step-over-delete.cc b/storage/tokudb/PerconaFT/src/tests/cursor-step-over-delete.cc
new file mode 100644
index 00000000..eac7d3d7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/cursor-step-over-delete.cc
@@ -0,0 +1,108 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+DB_TXN *txn;
+
+static void
+test_setup (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r= db->close(db, 0); CKERR(r);
+ r= env->close(env, 0); CKERR(r);
+}
+
+static void
+doit (void) {
+ DBT key,data;
+ int r;
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->put(db, txn, dbt_init(&key, "a", 2), dbt_init(&data, "a", 2), 0);
+ r=db->put(db, txn, dbt_init(&key, "b", 2), dbt_init(&data, "b", 2), 0);
+ r=db->put(db, txn, dbt_init(&key, "c", 2), dbt_init(&data, "c", 2), 0);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->del(db, txn, dbt_init(&key, "b", 2), 0); assert(r==0);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ DBC *dbc;
+ r = db->cursor(db, txn, &dbc, 0); assert(r==0);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ r = dbc->c_get(dbc, &key, &data, DB_FIRST); assert(r==0);
+ assert(strcmp((char*)key.data, "a")==0);
+ assert(strcmp((char*)data.data, "a")==0);
+ r = dbc->c_get(dbc, &key, &data, DB_NEXT); assert(r==0);
+ assert(strcmp((char*)key.data, "c")==0);
+ assert(strcmp((char*)data.data, "c")==0);
+ r = dbc->c_close(dbc); assert(r==0);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ test_setup();
+ doit();
+ test_shutdown();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock-threads.cc b/storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock-threads.cc
new file mode 100644
index 00000000..e5bcc6af
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock-threads.cc
@@ -0,0 +1,242 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test demonstrates that the lock manager can detect a simple deadlock with 2 transactions on 2 threads
+// the threads do:
+// T(a) put 0, grabs write lock on 0
+// T(b) put N-1, grabs write lock on N-1
+// T(a) put N-1, try's to grab write lock on N-1, should return lock not granted
+// T(b) put 0, try's to grab write lock on 0, should return deadlock
+// T(b) abort
+// T(a) gets lock W(N-1)
+// T(A) commit
+
+#include "test.h"
+#include "toku_pthread.h"
+
+struct test_seq {
+ int state;
+ toku_mutex_t lock;
+ toku_cond_t cv;
+};
+
+static void test_seq_init(struct test_seq *seq) {
+ seq->state = 0;
+ toku_mutex_init(toku_uninstrumented, &seq->lock, nullptr);
+ toku_cond_init(toku_uninstrumented, &seq->cv, nullptr);
+}
+
+static void test_seq_destroy(struct test_seq *seq) {
+ toku_mutex_destroy(&seq->lock);
+ toku_cond_destroy(&seq->cv);
+}
+
+static void test_seq_sleep(struct test_seq *seq, int new_state) {
+ toku_mutex_lock(&seq->lock);
+ while (seq->state != new_state) {
+ toku_cond_wait(&seq->cv, &seq->lock);
+ }
+ toku_mutex_unlock(&seq->lock);
+}
+
+static void test_seq_next_state(struct test_seq *seq) {
+ toku_mutex_lock(&seq->lock);
+ seq->state++;
+ toku_cond_broadcast(&seq->cv);
+ toku_mutex_unlock(&seq->lock);
+}
+
+static void insert_row(DB *db, DB_TXN *txn, int k, int v, int expect_r) {
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT value; dbt_init(&value, &v, sizeof v);
+ int r = db->put(db, txn, &key, &value, 0); assert(r == expect_r);
+}
+
+struct run_txn_b_arg {
+ struct test_seq *test_seq;
+ DB_TXN *txn_b;
+ DB *db;
+ int n;
+};
+
+static void *run_txn_b(void *arg) {
+ struct run_txn_b_arg *b_arg = (struct run_txn_b_arg *) arg;
+ struct test_seq *test_seq = b_arg->test_seq;
+ DB_TXN *txn_b = b_arg->txn_b;
+ DB *db = b_arg->db;
+ int n = b_arg->n;
+
+ test_seq_sleep(test_seq, 1);
+ insert_row(db, txn_b, htonl(n-1), n-1, 0);
+ test_seq_next_state(test_seq);
+
+ test_seq_sleep(test_seq, 3);
+ insert_row(db, txn_b, htonl(0), 0, DB_LOCK_NOTGRANTED);
+ test_seq_next_state(test_seq);
+
+ test_seq_sleep(test_seq, 5);
+ int r = txn_b->abort(txn_b); assert(r == 0);
+
+ return arg;
+}
+
+static void simple_deadlock(DB_ENV *db_env, DB *db, int do_txn, int n) {
+ int r;
+
+ DB_TXN *txn_init = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_init, 0); assert(r == 0);
+ }
+
+ for (int k = 0; k < n; k++) {
+ insert_row(db, txn_init, htonl(k), k, 0);
+ }
+
+ if (do_txn) {
+ r = txn_init->commit(txn_init, 0); assert(r == 0);
+ }
+
+ uint32_t txn_flags = 0;
+
+ DB_TXN *txn_a = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_a, txn_flags); assert(r == 0);
+ }
+
+ DB_TXN *txn_b = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_b, txn_flags); assert(r == 0);
+ }
+
+ struct test_seq test_seq; ZERO_STRUCT(test_seq); test_seq_init(&test_seq);
+
+ toku_pthread_t tid;
+ struct run_txn_b_arg arg = {&test_seq, txn_b, db, n};
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, run_txn_b, &arg);
+
+ test_seq_sleep(&test_seq, 0);
+ insert_row(db, txn_a, htonl(0), 0, 0);
+ test_seq_next_state(&test_seq);
+
+ test_seq_sleep(&test_seq, 2);
+ insert_row(db, txn_a, htonl(n-1), n-1, DB_LOCK_NOTGRANTED);
+ test_seq_next_state(&test_seq);
+
+ test_seq_sleep(&test_seq, 4);
+ if (do_txn) {
+ r = txn_a->abort(txn_a); assert(r == 0);
+ }
+ test_seq_next_state(&test_seq);
+
+ void *ret = NULL;
+ r = toku_pthread_join(tid, &ret); assert(r == 0);
+
+ test_seq_destroy(&test_seq);
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ int do_txn = 1;
+ int nrows = 1000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "simple_deadlock";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ if (!do_txn)
+ db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG);
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 0, nullptr); assert(r == 0); // no wait
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ DB_TXN *create_txn = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert(r == 0);
+ }
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ if (do_txn) {
+ r = create_txn->commit(create_txn, 0); assert(r == 0);
+ }
+
+ // run test
+ simple_deadlock(db_env, db, do_txn, nrows);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock.cc b/storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock.cc
new file mode 100644
index 00000000..c208a462
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/db-put-simple-deadlock.cc
@@ -0,0 +1,158 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test demonstrates that a simple deadlock with 2 transactions on a single thread works with tokudb
+
+#include "test.h"
+
+static void insert_row(DB *db, DB_TXN *txn, int k, int v, int expect_r) {
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT value; dbt_init(&value, &v, sizeof v);
+ int r = db->put(db, txn, &key, &value, 0); assert(r == expect_r);
+}
+
+static void simple_deadlock(DB_ENV *db_env, DB *db, int do_txn, int n) {
+ int r;
+
+ DB_TXN *txn_init = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_init, 0); assert(r == 0);
+ }
+
+ for (int k = 0; k < n; k++) {
+ insert_row(db, txn_init, htonl(k), k, 0);
+ }
+
+ if (do_txn) {
+ r = txn_init->commit(txn_init, 0); assert(r == 0);
+ }
+
+ uint32_t txn_flags = 0;
+
+ DB_TXN *txn_a = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_a, txn_flags); assert(r == 0);
+ }
+
+ DB_TXN *txn_b = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_b, txn_flags); assert(r == 0);
+ }
+
+ insert_row(db, txn_a, htonl(0), 0, 0);
+
+ insert_row(db, txn_b, htonl(n-1), n-1, 0);
+
+ insert_row(db, txn_a, htonl(n-1), n-1, DB_LOCK_NOTGRANTED);
+
+ insert_row(db, txn_b, htonl(0), 0, DB_LOCK_NOTGRANTED);
+
+ if (do_txn) {
+ r = txn_a->commit(txn_a, 0); assert(r == 0);
+ r = txn_b->commit(txn_b, 0); assert(r == 0);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ int do_txn = 1;
+ int nrows = 1000;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "simple_deadlock";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ if (!do_txn)
+ db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG);
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ DB_TXN *create_txn = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert(r == 0);
+ }
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ if (do_txn) {
+ r = create_txn->commit(create_txn, 0); assert(r == 0);
+ }
+
+ // run test
+ simple_deadlock(db_env, db, do_txn, nrows);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/db-put-simple-lockwait.cc b/storage/tokudb/PerconaFT/src/tests/db-put-simple-lockwait.cc
new file mode 100644
index 00000000..0fbe8515
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/db-put-simple-lockwait.cc
@@ -0,0 +1,190 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// T(a) put 0
+// T(b) put 0, should block
+// T(c) put 0, should block
+// T(a) commit
+// T(b) put 0 succeeds
+// T(b) commit
+// T(c) put 0 succeeds
+
+#include "test.h"
+#include "toku_pthread.h"
+
+static void insert_row(DB *db, DB_TXN *txn, int k, int v, int expect_r) {
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT value; dbt_init(&value, &v, sizeof v);
+ int r = db->put(db, txn, &key, &value, 0); assert(r == expect_r);
+}
+
+struct insert_one_arg {
+ DB_TXN *txn;
+ DB *db;
+};
+
+static void *insert_one(void *arg) {
+ struct insert_one_arg *f_arg = (struct insert_one_arg *) arg;
+ DB_TXN *txn = f_arg->txn;
+ DB *db = f_arg->db;
+
+ insert_row(db, txn, htonl(0), 0, 0);
+ if (txn) {
+ int r = txn->commit(txn, 0); assert(r == 0);
+ }
+ return arg;
+}
+
+static void simple_lockwait(DB_ENV *db_env, DB *db, int do_txn, int nrows, int ntxns) {
+ int r;
+
+ DB_TXN *txn_init = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_init, 0); assert(r == 0);
+ }
+ for (int k = 0; k < nrows; k++) {
+ insert_row(db, txn_init, htonl(k), k, 0);
+ }
+ if (do_txn) {
+ r = txn_init->commit(txn_init, 0); assert(r == 0);
+ }
+
+ DB_TXN *txns[ntxns];
+ for (int i = 0; i < ntxns; i++) {
+ txns[i] = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txns[i], 0); assert(r == 0);
+ }
+ }
+
+ insert_row(db, txns[0], htonl(0), 0, 0);
+
+ toku_pthread_t tids[ntxns];
+ for (int i = 1; i < ntxns; i++) {
+ struct insert_one_arg *XMALLOC(arg);
+ *arg = (struct insert_one_arg){txns[i], db};
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, insert_one, arg);
+ }
+
+ sleep(10);
+ if (do_txn) {
+ r = txns[0]->commit(txns[0], 0); assert(r == 0);
+ }
+
+ for (int i = 1; i < ntxns; i++) {
+ void *ret = NULL;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0); toku_free(ret);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ int do_txn = 1;
+ int nrows = 1000;
+ int ntxns = 2;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "simple_lockwait";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--ntxns") == 0 && i+1 < argc) {
+ ntxns = atoi(argv[++i]);
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ if (!do_txn)
+ db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG);
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ DB_TXN *create_txn = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert(r == 0);
+ }
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ if (do_txn) {
+ r = create_txn->commit(create_txn, 0); assert(r == 0);
+ }
+
+ // run test
+ simple_lockwait(db_env, db, do_txn, nrows, ntxns);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/db-put-update-deadlock.cc b/storage/tokudb/PerconaFT/src/tests/db-put-update-deadlock.cc
new file mode 100644
index 00000000..7a968058
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/db-put-update-deadlock.cc
@@ -0,0 +1,240 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// for all i: T(i) reads 0, gets a read lock on 0
+// for all i: T(i) writes 0, enters a deadlock
+// tokudb detects deadlock on the fly
+// --poll runs the deadlock detector until all the txns are resolved
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <portability/toku_atomic.h>
+
+static void write_row(DB *db, DB_TXN *txn, int k, int v, int expect_r) {
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT value; dbt_init(&value, &v, sizeof v);
+ int r = db->put(db, txn, &key, &value, 0); assert(r == expect_r);
+}
+
+static void read_row(DB *db, DB_TXN *txn, int k, int expect_r) {
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT value; dbt_init_malloc(&value);
+ int r = db->get(db, txn, &key, &value, 0); assert(r == expect_r);
+ toku_free(value.data);
+}
+
+static volatile int n_txns;
+
+struct write_one_arg {
+ DB_TXN *txn;
+ DB *db;
+ int k;
+ int v;
+};
+
+static void *write_one_f(void *arg) {
+ struct write_one_arg *f_arg = (struct write_one_arg *) arg;
+ DB_TXN *txn = f_arg->txn;
+ DB *db = f_arg->db;
+ int k = f_arg->k;
+ int v = f_arg->v;
+
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT value; dbt_init(&value, &v, sizeof v);
+ int r = db->put(db, txn, &key, &value, 0);
+ if (verbose)
+ printf("%s %p %d\n", __FUNCTION__, arg, r);
+ assert(r == 0 || r == DB_LOCK_DEADLOCK);
+ if (r == 0) {
+ r = txn->commit(txn, 0); assert(r == 0);
+ } else {
+ r = txn->abort(txn); assert(r == 0);
+ }
+ (void) toku_sync_fetch_and_sub(&n_txns, 1);
+
+ return arg;
+}
+
+static void update_deadlock(DB_ENV *db_env, DB *db, int do_txn, int nrows, int ntxns, int poll_deadlock UU()) {
+ int r;
+
+ // populate the initial tree
+ DB_TXN *txn_init = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txn_init, 0); assert(r == 0);
+ }
+ for (int k = 0; k < nrows; k++) {
+ write_row(db, txn_init, htonl(k), k, 0);
+ }
+ if (do_txn) {
+ r = txn_init->commit(txn_init, 0); assert(r == 0);
+ }
+
+ // create the transactions
+ n_txns = ntxns;
+ DB_TXN *txns[ntxns];
+ for (int i = 0; i < ntxns; i++) {
+ txns[i] = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &txns[i], 0); assert(r == 0);
+ }
+ }
+
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ // spice this test up a bit when reads locks are not shared.
+ // test that a dining philosopher's style deadlock is detected
+ // by having each txn take a distinct read lock, and then request
+ // a write lock on the value "next" to it (i + 1 mod ntxns)
+
+ // get read locks
+ for (int i = 0; i < ntxns; i++) {
+ read_row(db, txns[i], htonl(i), 0);
+ }
+
+ // get write locks
+ toku_pthread_t tids[ntxns];
+ for (int i = 0; i < ntxns; i++) {
+ struct write_one_arg *XMALLOC(arg);
+ *arg =
+ (struct write_one_arg){txns[i], db, (int)htonl((i + 1) % ntxns), 0};
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, write_one_f, arg);
+ }
+#else
+ // get read locks
+ for (int i = 0; i < ntxns; i++) {
+ read_row(db, txns[i], htonl(0), 0);
+ }
+
+ // get write locks
+ toku_pthread_t tids[ntxns];
+ for (int i = 0; i < ntxns; i++) {
+ struct write_one_arg *XMALLOC(arg);
+ *arg = (struct write_one_arg){txns[i], db, (int)htonl(0), 0};
+ r = toku_pthread_create(
+ toku_uninstrumented, &tids[i], nullptr, write_one_f, arg);
+ }
+#endif
+
+ // cleanup
+ for (int i = 0; i < ntxns; i++) {
+ void *ret = NULL;
+ r = toku_pthread_join(tids[i], &ret); assert(r == 0); toku_free(ret);
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+ int do_txn = 1;
+ int nrows = 1000;
+ int ntxns = 2;
+ int poll_deadlock = 0;
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "simple_deadlock";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_THREAD;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0 || strcmp(argv[i], "--quiet") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--ntxns") == 0 && i+1 < argc) {
+ ntxns = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--poll") == 0) {
+ poll_deadlock = 1;
+ continue;
+ }
+ assert(0);
+ }
+
+ // setup env
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert(r == 0);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0);
+
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert(r == 0);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0);
+ }
+ if (!do_txn)
+ db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG);
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ r = db_env->set_lock_timeout(db_env, 30 * 1000, nullptr); assert(r == 0);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert(r == 0);
+ DB_TXN *create_txn = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert(r == 0);
+ }
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0);
+ if (do_txn) {
+ r = create_txn->commit(create_txn, 0); assert(r == 0);
+ }
+
+ // run test
+ update_deadlock(db_env, db, do_txn, nrows, ntxns, poll_deadlock);
+
+ // close env
+ r = db->close(db, 0); assert(r == 0); db = NULL;
+ r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/dbremove-nofile-limit.cc b/storage/tokudb/PerconaFT/src/tests/dbremove-nofile-limit.cc
new file mode 100644
index 00000000..1d656efa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/dbremove-nofile-limit.cc
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test verifies that the env->dbremove function returns an error rather than
+// crash when the NOFILE resource limit is exceeded.
+
+#include "test.h"
+#include <db.h>
+#include <sys/resource.h>
+
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void test_dbremove() {
+ int r;
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ char fname[32];
+ sprintf(fname, "db%d", 0);
+ r = db->open(db, nullptr, fname, nullptr, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, nullptr, &txn, 0); CKERR(r);
+
+ struct rlimit current_limit;
+ r = getrlimit(RLIMIT_NOFILE, &current_limit);
+ assert(r == 0);
+
+ struct rlimit new_limit = current_limit;
+ new_limit.rlim_cur = 0;
+ r = setrlimit(RLIMIT_NOFILE, &new_limit);
+ assert(r == 0);
+
+ r = env->dbremove(env, txn, fname, nullptr, 0);
+ CKERR2(r, EMFILE);
+
+ r = setrlimit(RLIMIT_NOFILE, &current_limit);
+ assert(r == 0);
+
+ r = env->dbremove(env, txn, fname, nullptr, 0);
+ CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s -h -v -q\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ test_dbremove();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/del-multiple-huge-primary-row.cc b/storage/tokudb/PerconaFT/src/tests/del-multiple-huge-primary-row.cc
new file mode 100644
index 00000000..66105053
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/del-multiple-huge-primary-row.cc
@@ -0,0 +1,240 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that del_multiple logs individual delete log entries in the recovery log when
+// the sum of the log sizes of the individual deletes.
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ toku_dbt_array_resize(dest_keys, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_keys; (void) src_key; (void) src_data;
+ assert(src_db == NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_data->size / sizeof (int));
+ int *pri_data = (int *) src_data->data;
+
+ assert(dest_key->flags == 0);
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+
+ return 0;
+}
+
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static inline int
+max(int a, int b) {
+ return a < b ? b : a;
+}
+
+static void
+verify_del_multiple(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ int r;
+ DB_TXN *deltxn = NULL;
+ r = env->txn_begin(env, NULL, &deltxn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ int v[max(ndbs,1024)]; get_data(v, i, ndbs);
+ DBT pri_data; dbt_init(&pri_data, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_del_multiple_test_no_array(env, NULL, deltxn, &pri_key, &pri_data, ndbs, db, keys, flags); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_locked(env, db[dbnum], get_key(i, dbnum));
+ }
+ r = deltxn->commit(deltxn, 0); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_empty(env, db[dbnum]);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[max(ndbs, 1024)]; memset(v, 0, sizeof v); get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ verify_del_multiple(env, db, ndbs, nrows);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/del-multiple-srcdb.cc b/storage/tokudb/PerconaFT/src/tests/del-multiple-srcdb.cc
new file mode 100644
index 00000000..02580390
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/del-multiple-srcdb.cc
@@ -0,0 +1,235 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that del_multiple deletes the correct key from N dictionaries
+// verify that del_multiple locks the correct key for N dictionaries
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ toku_dbt_array_resize(dest_keys, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_keys; (void) src_key; (void) src_data;
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_data->size / sizeof (int));
+
+ int *pri_data = (int *) src_data->data;
+
+ assert(dest_key->flags == 0);
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+
+ return 0;
+}
+
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_del_multiple(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ int r;
+ DB_TXN *deltxn = NULL;
+ r = env->txn_begin(env, NULL, &deltxn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_data; dbt_init(&pri_data, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_del_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, deltxn, &pri_key, &pri_data, ndbs, db, keys, flags); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_locked(env, db[dbnum], get_key(i, dbnum));
+ }
+ r = deltxn->commit(deltxn, 0); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_empty(env, db[dbnum]);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ verify_del_multiple(env, db, ndbs, nrows);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/del-multiple.cc b/storage/tokudb/PerconaFT/src/tests/del-multiple.cc
new file mode 100644
index 00000000..db9f1b7b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/del-multiple.cc
@@ -0,0 +1,236 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that del_multiple deletes the correct key from N dictionaries
+// verify that del_multiple locks the correct key for N dictionaries
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ toku_dbt_array_resize(dest_keys, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_keys; (void) src_key; (void) src_data;
+ assert(src_db == NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_data->size / sizeof (int));
+
+ int *pri_data = (int *) src_data->data;
+
+ assert(dest_key->flags == 0);
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+
+ return 0;
+}
+
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_del_multiple(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ int r;
+ DB_TXN *deltxn = NULL;
+ r = env->txn_begin(env, NULL, &deltxn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_data; dbt_init(&pri_data, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_del_multiple_test_no_array(env, NULL, deltxn, &pri_key, &pri_data, ndbs, db, keys, flags); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_locked(env, db[dbnum], get_key(i, dbnum));
+ }
+ r = deltxn->commit(deltxn, 0); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_empty(env, db[dbnum]);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ verify_del_multiple(env, db, ndbs, nrows);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/del-simple.cc b/storage/tokudb/PerconaFT/src/tests/del-simple.cc
new file mode 100644
index 00000000..20b8742f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/del-simple.cc
@@ -0,0 +1,152 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that deletes on single dictionaries delete the correct key and create the correct lock
+
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_del(DB_ENV *env, DB *db, int nrows) {
+ int r;
+ DB_TXN *deltxn = NULL;
+ r = env->txn_begin(env, NULL, &deltxn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+ int k = htonl(i);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, deltxn, &key, DB_DELETE_ANY); assert_zero(r);
+ verify_locked(env, db, k);
+ }
+ r = deltxn->commit(deltxn, 0); assert_zero(r);
+ verify_empty(env, db);
+}
+
+static void
+test_del(int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+
+ r = db->open(db, NULL, "test.db", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = htonl(i);
+ int v = i;
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v, sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ verify_del(env, db, nrows);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ test_del(nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/directory_lock.cc b/storage/tokudb/PerconaFT/src/tests/directory_lock.cc
new file mode 100644
index 00000000..b28a7170
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/directory_lock.cc
@@ -0,0 +1,390 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+
+static int
+put_multiple_callback(DB *dest_db UU(), DB *src_db UU(), DBT_ARRAY *dest_keys UU(), DBT_ARRAY *dest_vals UU(), const DBT *src_key UU(), const DBT *src_val UU()) {
+ return 0;
+}
+
+static int
+del_multiple_callback(DB *dest_db UU(), DB *src_db UU(), DBT_ARRAY *dest_keys UU(), const DBT *src_key UU(), const DBT *src_val UU()) {
+ return 0;
+}
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *UU(extra),
+ void UU((*set_val)(const DBT *new_val,
+ void *set_extra)),
+ void *UU(set_extra)) {
+ return 0;
+}
+
+static void verify_shared_ops_fail(DB_ENV* env, DB* db) {
+ int r;
+ DB_TXN* txn = NULL;
+ uint32_t flags = 0;
+ DBT key,val;
+ DBT in_key,in_val;
+ uint32_t in_key_data = 0, in_val_data = 0;
+ memset(&in_key, 0, sizeof(in_key));
+ memset(&in_val, 0, sizeof(in_val));
+ in_key.size = sizeof(in_key_data);
+ in_val.size = sizeof(in_val_data);
+ in_key.data = &in_key_data;
+ in_val.data = &in_val_data;
+ in_key.flags = DB_DBT_USERMEM;
+ in_val.flags = DB_DBT_USERMEM;
+ in_key.ulen = sizeof(in_key_data);
+ in_val.ulen = sizeof(in_val_data);
+ DBT in_keys[2];
+ memset(&in_keys, 0, sizeof(in_keys));
+ dbt_init(&key, "a", 2);
+ dbt_init(&val, "a", 2);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = db->put(
+ db,
+ txn,
+ &key,
+ &val,
+ 0
+ );
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = db->del(
+ db,
+ txn,
+ &key,
+ DB_DELETE_ANY
+ );
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env_put_multiple_test_no_array(
+ env, db, txn,
+ &key, &val,
+ 1, &db, &in_key, &in_val, &flags);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env_put_multiple_test_no_array(
+ env, NULL, txn,
+ &key, &val,
+ 1, &db, &in_key, &in_val, &flags);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ flags = DB_DELETE_ANY;
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env_del_multiple_test_no_array(
+ env, db, txn,
+ &key, &val,
+ 1, &db, &in_key, &flags);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env_del_multiple_test_no_array(
+ env, NULL, txn,
+ &key, &val,
+ 1, &db, &in_key, &flags);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ flags = 0;
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env_update_multiple_test_no_array(
+ env, NULL, txn,
+ &key, &val,
+ &key, &val,
+ 1, &db, &flags,
+ 2, in_keys,
+ 1, &in_val);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env_update_multiple_test_no_array(
+ env, db, txn,
+ &key, &val,
+ &key, &val,
+ 1, &db, &flags,
+ 2, in_keys,
+ 1, &in_val);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+
+ DBT extra_up;
+ dbt_init(&extra_up, NULL, 0);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = db->update(
+ db,
+ txn,
+ &key,
+ &extra_up,
+ 0
+ );
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = db->update_broadcast(db, txn, &extra_up, 0);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = db->update_broadcast(db, txn, &extra_up, DB_IS_RESETTING_OP);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = txn->commit(txn,0); CKERR(r);
+
+}
+
+static void verify_excl_ops_fail(DB_ENV* env, const char* name) {
+ DB_TXN* txn = NULL;
+ int r;
+ DBT extra_up;
+ dbt_init(&extra_up, NULL, 0);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env->dbrename(env, txn, name, NULL, "asdf.db", 0);
+ CKERR2(r, EINVAL);
+ r = txn->commit(txn,0); CKERR(r);
+
+}
+
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ DBT in_key,in_val;
+ uint32_t in_key_data = 123456;
+ uint32_t in_val_data = 654321;
+ memset(&in_key, 0, sizeof(in_key));
+ memset(&in_val, 0, sizeof(in_val));
+ in_key.size = sizeof(in_key_data);
+ in_val.size = sizeof(in_val_data);
+ in_key.data = &in_key_data;
+ in_val.data = &in_val_data;
+ in_key.flags = DB_DBT_USERMEM;
+ in_val.flags = DB_DBT_USERMEM;
+ in_key.ulen = sizeof(in_key_data);
+ in_val.ulen = sizeof(in_val_data);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB_LOADER* loader = NULL;
+ uint32_t put_flags = 0;
+ uint32_t dbt_flags = 0;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_callback);
+ CKERR(r);
+ r = env->set_generate_row_callback_for_del(env, del_multiple_callback);
+ CKERR(r);
+ env->set_update(env, update_fun);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB* db;
+ DB* db2;
+
+ DB_TXN* txna = NULL;
+ DB_TXN* txnb = NULL;
+
+
+ //
+ // transactionally create dictionary
+ //
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = db_create(&db2, env, 0); CKERR(r);
+ r = db2->open(db2, txna, "foo2.db", NULL, DB_BTREE, DB_CREATE|DB_IS_HOT_INDEX, 0666); CKERR(r);
+ verify_excl_ops_fail(env, "foo2.db");
+ r = txna->commit(txna, 0); CKERR(r);
+
+
+ //
+ // transactionally create dictionary
+ //
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = txna->commit(txna, 0); CKERR(r);
+
+ //
+ // create loader
+ //
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->create_loader(env, txna, &loader, NULL, 1, &db, &put_flags, &dbt_flags, 0); CKERR(r);
+ verify_shared_ops_fail(env,db);
+ r = loader->abort(loader); CKERR(r);
+ loader=NULL;
+ r = txna->commit(txna, 0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnb, 0); CKERR(r);
+ DBT key,val;
+ dbt_init(&key, "a", 2);
+ dbt_init(&val, "a", 2);
+ r = db->put(db, txna, &key, &val, 0); CKERR(r);
+ dbt_init(&key, "b", 2);
+ dbt_init(&val, "b", 2);
+ r = db->put(db, txnb, &key, &val, 0); CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+ r = txnb->abort(txnb); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnb, 0); CKERR(r);
+ dbt_init(&key, "a", 2);
+ r = db->del(db, txna, &key, DB_DELETE_ANY); CKERR(r);
+ dbt_init(&key, "b", 2);
+ r = db->del(db, txnb, &key, DB_DELETE_ANY); CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+ r = txnb->abort(txnb); CKERR(r);
+
+
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnb, 0); CKERR(r);
+ dbt_init(&key, "a", 2);
+ r = db->update(db, txna, &key, &val, 0); CKERR(r);
+ dbt_init(&key, "b", 2);
+ r = db->update(db, txnb, &key, &val, 0); CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+ r = txnb->abort(txnb); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = db->update_broadcast(db, txna, &val, 0); CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+
+ uint32_t flags = 0;
+
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnb, 0); CKERR(r);
+ dbt_init(&key, "a", 2);
+ dbt_init(&val, "a", 2);
+ env_put_multiple_test_no_array(
+ env, NULL, txna,
+ &key, &val,
+ 1, &db, &in_key, &in_val, &flags);
+ CKERR(r);
+ dbt_init(&key, "b", 2);
+ dbt_init(&val, "b", 2);
+ env_put_multiple_test_no_array(
+ env, NULL, txnb,
+ &key, &val,
+ 1, &db, &in_key, &in_val, &flags);
+ CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+ r = txnb->abort(txnb); CKERR(r);
+
+ flags = DB_DELETE_ANY;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnb, 0); CKERR(r);
+ dbt_init(&key, "a", 2);
+ dbt_init(&val, "a", 2);
+ env_del_multiple_test_no_array(
+ env, NULL, txna,
+ &key, &val,
+ 1, &db, &in_key, &flags);
+ CKERR(r);
+ dbt_init(&key, "b", 2);
+ dbt_init(&val, "b", 2);
+ env_del_multiple_test_no_array(
+ env, db, txnb,
+ &key, &val,
+ 1, &db, &in_key, &flags);
+ CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+ r = txnb->abort(txnb); CKERR(r);
+
+ flags = 0;
+ DBT in_keys[2];
+ memset(&in_keys, 0, sizeof(in_keys));
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnb, 0); CKERR(r);
+ dbt_init(&key, "a", 2);
+ dbt_init(&val, "a", 2);
+ env_update_multiple_test_no_array(
+ env, NULL, txna,
+ &key, &val,
+ &key, &val,
+ 1, &db, &flags,
+ 2, in_keys,
+ 1, &in_val);
+ CKERR(r);
+ dbt_init(&key, "b", 2);
+ dbt_init(&val, "b", 2);
+ env_update_multiple_test_no_array(
+ env, db, txnb,
+ &key, &val,
+ &key, &val,
+ 1, &db, &flags,
+ 2, in_keys,
+ 1, &in_val);
+ CKERR(r);
+ verify_excl_ops_fail(env,"foo.db");
+ r = txna->abort(txna); CKERR(r);
+ r = txnb->abort(txnb); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = db2->close(db2, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/diskfull.cc b/storage/tokudb/PerconaFT/src/tests/diskfull.cc
new file mode 100644
index 00000000..0d27ad73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/diskfull.cc
@@ -0,0 +1,254 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Simulate disk full by making pwrite return ENOSPC */
+/* Strategy, repeatedly run a test, and on the Ith run of the test make the Ith write fail. */
+
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <portability/toku_atomic.h>
+
+#define DOERR(r) do { if (r!=0) { did_fail=1; fprintf(error_file, "%s:%d error %d (%s)\n", __FILE__, __LINE__, r, db_strerror(r)); }} while (0)
+
+static void
+do_db_work(void) {
+ int r;
+ int did_fail=0;
+ {
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ FILE *error_file = 0;
+ if (verbose==0) {
+ char errname[TOKU_PATH_MAX+1];
+ error_file = fopen(toku_path_join(errname, 2, TOKU_TEST_FILENAME, "stderr"), "w"); assert(error_file);
+ }
+ else error_file = stderr;
+
+ DB_ENV *env;
+ DB_TXN *tid;
+ DB *db;
+ DBT key,data;
+
+ r=db_env_create(&env, 0); assert(r==0);
+ r = env->set_redzone(env, 0); CKERR(r);
+ env->set_errfile(env, error_file ? error_file : stderr);
+ // Don't set the lg bsize for the small experiment.
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); DOERR(r);
+ if (did_fail) {
+ r=tid->abort(tid); CKERR(r);
+ } else {
+ r=tid->commit(tid, 0); DOERR(r);
+ }
+ if (did_fail) goto shutdown1;
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->put(db, tid, dbt_init(&key, "a", 2), dbt_init(&data, "b", 2), 0); DOERR(r);
+ if (did_fail) {
+ r = tid->abort(tid); CKERR2s(r, 0, ENOSPC);
+ } else {
+ r=tid->commit(tid, 0); DOERR(r);
+ }
+
+ shutdown1:
+ r=db->close(db, 0); DOERR(r);
+ r=env->close(env, 0); DOERR(r);
+ if (error_file && error_file!=stderr) fclose(error_file);
+ if (did_fail) return;
+ }
+ {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ FILE *error_file = 0;
+ if (verbose==0) {
+ char errname[TOKU_PATH_MAX+1];
+ error_file = fopen(toku_path_join(errname, 2, TOKU_TEST_FILENAME, "stderr"), "w"); assert(error_file);
+ }
+ else error_file = stderr;
+
+ DB_ENV *env;
+ DB_TXN *tid;
+ DB *db;
+ DBT key,data;
+
+ // Repeat with more put operations
+ r=db_env_create(&env, 0); assert(r==0);
+ r = env->set_redzone(env, 0); CKERR(r);
+ env->set_errfile(env, error_file ? error_file : stderr);
+ r=env->set_lg_bsize(env, 4096); assert(r==0);
+ r=env->set_cachesize(env, 0, 1, 1); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->set_pagesize(db, 4096);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); DOERR(r);
+ if (did_fail) {
+ r = tid->abort(tid); CKERR2s(r, 0, ENOSPC);
+ } else {
+ r=tid->commit(tid, 0); DOERR(r);
+ }
+ if (did_fail) goto shutdown2;
+
+ // Put an extra item in
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->put(db, tid, dbt_init(&key, "a", 2), dbt_init(&data, "b", 2), 0); DOERR(r);
+ if (did_fail) {
+ r=tid->abort(tid); CKERR(r);
+ } else {
+ r=tid->commit(tid, 0); DOERR(r);
+ }
+ if (did_fail) goto shutdown2;
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ {
+ int i;
+ for (i=0; i<100; i++) {
+ int kvsize=50;
+ int kvsize_i = kvsize / sizeof(int);
+ int keyi[kvsize_i],vali[kvsize_i];
+ int j;
+ keyi[0] = vali[0] = toku_htonl(i);
+ for (j=1; j<kvsize_i; j++) {
+ keyi[j] = random();
+ vali[j] = random();
+ }
+ r=db->put(db, tid, dbt_init(&key, keyi, sizeof keyi), dbt_init(&data, vali, sizeof vali), 0);
+ DOERR(r);
+ if (did_fail) goto break_out_of_loop;
+ }
+ }
+ break_out_of_loop:
+ //system("ls -l " TOKU_TEST_FILENAME);
+ if (did_fail) {
+ r = tid->abort(tid); CKERR2s(r, 0, ENOSPC);
+ } else {
+ r=tid->commit(tid, 0); DOERR(r);
+ }
+ shutdown2:
+ r=db->close(db, 0); DOERR(r);
+ r=env->close(env, 0); DOERR(r);
+ if (error_file && error_file!=stderr) fclose(error_file);
+ }
+}
+
+static volatile int write_count = 0;
+#define FAIL_NEVER 0x7FFFFFFF
+static int fail_at = FAIL_NEVER;
+
+static ssize_t
+pwrite_counting_and_failing (int fd, const void *buf, size_t size, toku_off_t off)
+{
+ int this_count = toku_sync_add_and_fetch(&write_count, 1);
+ if (this_count>fail_at) {
+ if (verbose>1) { printf("Failure imminent at %d:\n", fail_at); fflush(stdout); }
+ errno = ENOSPC;
+ return -1;
+ } else {
+ return pwrite(fd, buf, size, off);
+ }
+}
+
+static ssize_t
+write_counting_and_failing (int fd, const void *buf, size_t size)
+{
+ int this_count = toku_sync_add_and_fetch(&write_count, 1);
+ if (this_count>fail_at) {
+ if (verbose>1) { printf("Failure imminent at %d:\n", fail_at); fflush(stdout); }
+ errno = ENOSPC;
+ return -1;
+ } else {
+ return write(fd, buf, size);
+ }
+}
+
+static void
+do_writes_that_fail (void) {
+ if (verbose) { printf("About to fail at %d:\n", fail_at); fflush(stdout); }
+ toku_set_assert_on_write_enospc(true);
+ db_env_set_func_pwrite(pwrite_counting_and_failing);
+ db_env_set_func_full_pwrite(pwrite_counting_and_failing);
+ db_env_set_func_write (write_counting_and_failing);
+ db_env_set_func_full_write (write_counting_and_failing);
+ write_count=0;
+ do_db_work();
+ if (fail_at != FAIL_NEVER && write_count <= fail_at) {
+ abort(); // if we don't encounter the write (because there are not enough), then in fail_at mode, we need to abort so that the test will be happy.
+ }
+ printf("%d", write_count);
+}
+
+static void
+diskfull_parse_args (int argc, char * const argv[]) {
+ int c;
+ char *argv0 = argv[0];
+ while ((c = getopt(argc, (char * const *)argv, "cC:vq")) != -1) {
+ switch(c) {
+ case 'C':
+ fail_at = atoi(optarg);
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'q':
+ verbose--;
+ if (verbose<0) verbose=0;
+ break;
+ default:
+do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q] [-C number]\n", argv0);
+ exit(1);
+ }
+ }
+ if (argc!=optind) {
+ goto do_usage;
+ }
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ diskfull_parse_args(argc, argv);
+ do_writes_that_fail();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/drd.suppressions b/storage/tokudb/PerconaFT/src/tests/drd.suppressions
new file mode 100644
index 00000000..ccf7abfe
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/drd.suppressions
@@ -0,0 +1,107 @@
+{
+ open_problem
+ drd:ConflictingAccess
+ fun:open64
+}
+
+{
+ read_problem
+ drd:ConflictingAccess
+ fun:read
+}
+
+{
+ write_problem
+ drd:ConflictingAccess
+ fun:write
+}
+
+{
+ pread_problem
+ drd:ConflictingAccess
+ fun:pread
+}
+
+{
+ pwrite_problem
+ drd:ConflictingAccess
+ fun:pwrite
+}
+
+{
+ fsync_problem
+ drd:ConflictingAccess
+ fun:fsync
+}
+
+{
+ lseek_problem
+ drd:ConflictingAccess
+ fun:lseek
+}
+
+{
+ close_problem
+ drd:ConflictingAccess
+ fun:close
+}
+
+{
+ centos62_open_problem
+ drd:ConflictingAccess
+ fun:open
+}
+
+{
+ centos62_pthread_clone_problem
+ drd:ConflictingAccess
+ fun:clone
+}
+{
+ EvictorSignalUnlocked
+ drd:CondRaceErr
+ fun:pthread_cond_signal@*
+ fun:_ZL16toku_cond_signalP9toku_cond
+ fun:_ZN7evictor22signal_eviction_threadEv
+}
+
+{
+ <insert_a_suppression_name_here>
+ drd:ConflictingAccess
+ ...
+ fun:_dl_runtime_resolve
+}
+{
+ <insert_a_suppression_name_here>
+ drd:ConflictingAccess
+ ...
+ fun:random
+}
+
+{
+ unsafe_touch_clock
+ drd:ConflictingAccess
+ fun:_ZL18unsafe_touch_clockP6ftnodei
+ ...
+}
+{
+ unsafe_read_single_txnid_optimization_possible
+ drd:ConflictingAccess
+ fun:_ZNK4toku8locktree46unsafe_read_single_txnid_optimization_possibleEv
+}
+{
+ unsafe_read_size_current
+ drd:ConflictingAccess
+ fun:_ZNK7evictor24unsafe_read_size_currentEv
+}
+{
+ signal_ev_thread_without_holding_mutex_for_condition
+ drd:CondRaceErr
+ fun:pthread_cond_signal@*
+ fun:_ZN7evictor22signal_eviction_threadEv
+}
+{
+ locktree_single_txn_optimization_unsafe_read_txn_is_valid
+ drd:ConflictingAccess
+ fun:_ZNK4toku8locktree25sto_txnid_is_valid_unsafeEv
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/dump-env.cc b/storage/tokudb/PerconaFT/src/tests/dump-env.cc
new file mode 100644
index 00000000..4a434968
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/dump-env.cc
@@ -0,0 +1,128 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+DB_TXN *txn;
+
+
+static void
+setup (void) {
+ int r;
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) {
+ CKERR2(errno, EEXIST);
+ }
+
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->set_redzone(env, 0); CKERR(r);
+ r=env->set_default_bt_compare(env, int_dbt_cmp); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r= db->close(db, 0); CKERR(r);
+ r= env->close(env, 0); CKERR(r);
+}
+
+static void
+doit(void) {
+ int r;
+
+ DBC *dbc;
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = env->get_cursor_for_persistent_environment(env, txn, &dbc); CKERR(r);
+ DBT key;
+ DBT val;
+ dbt_init_realloc(&key);
+ dbt_init_realloc(&val);
+
+ while ((r = dbc->c_get(dbc, &key, &val, DB_NEXT)) == 0) {
+ if (verbose) {
+ printf("ENTRY\n\tKEY [%.*s]",
+ key.size,
+ (char*)key.data);
+ if (val.size == sizeof(uint32_t)) {
+ //assume integer
+ printf("\n\tVAL [%" PRIu32"]\n",
+ toku_dtoh32(*(uint32_t*)val.data));
+ } else if (val.size == sizeof(uint64_t)) {
+ //assume 64 bit integer
+ printf("\n\tVAL [%" PRIu64"]\n",
+ toku_dtoh64(*(uint64_t*)val.data));
+ } else {
+ printf("\n\tVAL [%.*s]\n",
+ val.size,
+ (char*)val.data);
+ }
+ }
+ }
+ CKERR2(r, DB_NOTFOUND);
+ r = dbc->c_close(dbc);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ toku_free(key.data);
+ toku_free(val.data);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ setup();
+ doit();
+ test_shutdown();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/env-put-multiple.cc b/storage/tokudb/PerconaFT/src/tests/env-put-multiple.cc
new file mode 100644
index 00000000..5d1d0b69
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/env-put-multiple.cc
@@ -0,0 +1,322 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure the LSN filtering is used during recovery of put_multiple
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+enum {MAX_DBS = 64, MAX_KEY = 8, MAX_VAL = 8};
+DB *dbs_multiple[MAX_DBS];
+DB *dbs_single[MAX_DBS];
+char names_single[MAX_DBS][sizeof("dbs_0xFFF")];
+char names_multiple[MAX_DBS][sizeof("dbm_0xFFF")];
+uint32_t num_dbs;
+uint32_t flags[MAX_DBS];
+uint32_t ids[MAX_DBS];
+uint32_t kbuf[MAX_DBS][MAX_KEY/4];
+uint32_t vbuf[MAX_DBS][MAX_VAL/4];
+DBT dest_keys[MAX_DBS];
+DBT dest_vals[MAX_DBS];
+
+#define CKERRIFNOT0(r) do { if (num_dbs>0) { CKERR(r); } else { CKERR2(r, EINVAL); } } while (0)
+#define CKERR2IFNOT0(r, rexpect) do { if (num_dbs>0) { CKERR2(r, rexpect); } else { CKERR2(r, EINVAL); } } while (0)
+
+static int
+put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys_arrays, DBT_ARRAY *dest_datas, const DBT *src_key, const DBT *src_data) {
+ toku_dbt_array_resize(dest_keys_arrays, 1);
+ toku_dbt_array_resize(dest_datas, 1);
+ DBT *dest_key = &dest_keys_arrays->dbts[0];
+ DBT *dest_data = &dest_datas->dbts[0];
+ dest_key->flags = 0;
+ dest_data->flags = 0;
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+ assert(which < MAX_DBS);
+
+ assert(src_key->size == 4);
+ assert(src_data->size == 4);
+ kbuf[which][0] = *(uint32_t*)src_key->data;
+ kbuf[which][1] = which;
+ vbuf[which][0] = which;
+ vbuf[which][1] = *(uint32_t*)src_data->data;
+ dest_key->data = kbuf[which];
+ dest_key->size = sizeof(kbuf[which]);
+ dest_data->data = vbuf[which];
+ dest_data->size = sizeof(vbuf[which]);
+ return 0;
+}
+
+static void run_test (void) {
+ int r;
+ if (verbose)
+ printf("env-put-multiple num_dbs[%u]\n", num_dbs);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ uint32_t which;
+ {
+ //Create dbs.
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ DB *db;
+ for (which = 0; which < num_dbs; which++) {
+ ids[which] = which;
+ r = db_create(&dbs_multiple[which], env, 0);
+ CKERR(r);
+ db = dbs_multiple[which];
+ r = db->open(db, txn, names_multiple[which], NULL, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ db->app_private = &ids[which];
+ r = db_create(&dbs_single[which], env, 0);
+ CKERR(r);
+ db = dbs_single[which];
+ r = db->open(db, txn, names_single[which], NULL, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ }
+
+
+ uint32_t magic = 0xDEADBEEF;
+ // txn_begin; insert magic number
+ {
+ for (which = 0; which < num_dbs; which++) {
+ flags[which] = 0;
+ }
+ memset(flags, 0, sizeof(flags)); //reset
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ uint32_t magic2 = ~magic;
+ DBT keydbt = {.data=&magic, .size=sizeof(magic)};
+ DBT valdbt = {.data=&magic2, .size=sizeof(magic2)};
+ r = env_put_multiple_test_no_array(env, NULL, txn, &keydbt, &valdbt, num_dbs, dbs_multiple, dest_keys, dest_vals, flags);
+ CKERRIFNOT0(r);
+ for (which = 0; which < num_dbs; which++) {
+ DBT key={.data = kbuf[which], .size = sizeof(kbuf[which])};
+ DBT val={.data = vbuf[which], .size = sizeof(vbuf[which])};
+ DB *db = dbs_single[which];
+ r = db->put(db, txn, &key, &val, flags[which]);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ }
+ {
+ //Insert again with 0, expect it to work.
+ for (which = 0; which < num_dbs; which++) {
+ flags[which] = 0;
+ }
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ uint32_t magic2 = ~magic;
+ DBT keydbt = {.data=&magic, .size=sizeof(magic)};
+ DBT valdbt = {.data=&magic2, .size=sizeof(magic2)};
+ r = env_put_multiple_test_no_array(env, NULL, txn, &keydbt, &valdbt, num_dbs, dbs_multiple, dest_keys, dest_vals, flags);
+ CKERRIFNOT0(r);
+ for (which = 0; which < num_dbs; which++) {
+ DBT key={.data = kbuf[which], .size = sizeof(kbuf[which])};
+ DBT val={.data = vbuf[which], .size = sizeof(vbuf[which])};
+ DB *db = dbs_single[which];
+ r = db->put(db, txn, &key, &val, flags[which]);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ }
+ {
+ //Insert again with DB_NOOVERWRITE, expect it to fail (unless 0 dbs).
+ for (which = 0; which < num_dbs; which++) {
+ flags[which] = DB_NOOVERWRITE;
+ }
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ uint32_t magic2 = ~magic;
+ DBT keydbt = {.data=&magic, .size=sizeof(magic)};
+ DBT valdbt = {.data=&magic2, .size=sizeof(magic2)};
+ r = env_put_multiple_test_no_array(env, NULL, txn, &keydbt, &valdbt, num_dbs, dbs_multiple, dest_keys, dest_vals, flags);
+ CKERR2IFNOT0(r, DB_KEYEXIST);
+ for (which = 0; which < num_dbs; which++) {
+ DBT key={.data = kbuf[which], .size = sizeof(kbuf[which])};
+ DBT val={.data = vbuf[which], .size = sizeof(vbuf[which])};
+ DB *db = dbs_single[which];
+ r = db->put(db, txn, &key, &val, flags[which]);
+ CKERR2(r, DB_KEYEXIST);
+ }
+ r = txn->commit(txn, 0);
+ }
+
+ {
+ //Different number
+ magic = 0xFEEDADAD;
+ //Insert again with 0, using 2 transactions, expect it to fail (unless 0 dbs).
+ for (which = 0; which < num_dbs; which++) {
+ flags[which] = 0;
+ }
+ DB_TXN *txna;
+ r = env->txn_begin(env, NULL, &txna, 0);
+ CKERR(r);
+
+ uint32_t magic2 = ~magic;
+ DBT keydbt = {.data=&magic, .size=sizeof(magic)};
+ DBT valdbt = {.data=&magic2, .size=sizeof(magic2)};
+ r = env_put_multiple_test_no_array(env, NULL, txna, &keydbt, &valdbt, num_dbs, dbs_multiple, dest_keys, dest_vals, flags);
+ CKERRIFNOT0(r);
+ for (which = 0; which < num_dbs; which++) {
+ DBT key={.data = kbuf[which], .size = sizeof(kbuf[which])};
+ DBT val={.data = vbuf[which], .size = sizeof(vbuf[which])};
+ DB *db = dbs_single[which];
+ r = db->put(db, txna, &key, &val, flags[which]);
+ CKERR(r);
+ }
+
+ DB_TXN *txnb;
+ r = env->txn_begin(env, NULL, &txnb, 0);
+ CKERR(r);
+
+ //Lock should fail
+ r = env_put_multiple_test_no_array(env, NULL, txnb, &keydbt, &valdbt, num_dbs, dbs_multiple, dest_keys, dest_vals, flags);
+ CKERR2IFNOT0(r, DB_LOCK_NOTGRANTED);
+ for (which = 0; which < num_dbs; which++) {
+ DBT key={.data = kbuf[which], .size = sizeof(kbuf[which])};
+ DBT val={.data = vbuf[which], .size = sizeof(vbuf[which])};
+ DB *db = dbs_single[which];
+ r = db->put(db, txnb, &key, &val, flags[which]);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ }
+ r = txna->commit(txna, 0);
+
+ //Should succeed this time.
+ r = env_put_multiple_test_no_array(env, NULL, txnb, &keydbt, &valdbt, num_dbs, dbs_multiple, dest_keys, dest_vals, flags);
+ CKERRIFNOT0(r);
+ for (which = 0; which < num_dbs; which++) {
+ DBT key={.data = kbuf[which], .size = sizeof(kbuf[which])};
+ DBT val={.data = vbuf[which], .size = sizeof(vbuf[which])};
+ DB *db = dbs_single[which];
+ r = db->put(db, txnb, &key, &val, flags[which]);
+ CKERR(r);
+ }
+
+ r = txnb->commit(txnb, 0);
+ }
+
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ DBC *c_single;
+ DBC *c_multiple;
+
+ DBT k_single, v_single, k_multiple, v_multiple;
+ memset(&k_single, 0, sizeof(k_single));
+ memset(&v_single, 0, sizeof(v_single));
+ memset(&k_multiple, 0, sizeof(k_multiple));
+ memset(&v_multiple, 0, sizeof(v_multiple));
+ for (which = 0; which < num_dbs; which++) {
+ r = dbs_multiple[which]->cursor(dbs_multiple[which], txn, &c_multiple, 0);
+ CKERR(r);
+ r = dbs_single[which]->cursor(dbs_single[which], txn, &c_single, 0);
+ CKERR(r);
+
+ int r1 = 0;
+ int r2;
+ while (r1 == 0) {
+ r1 = c_single->c_get(c_single, &k_single, &v_single, DB_NEXT);
+ r2 = c_multiple->c_get(c_multiple, &k_multiple, &v_multiple, DB_NEXT);
+ assert(r1==r2);
+ CKERR2s(r1, 0, DB_NOTFOUND);
+ if (r1 == 0) {
+ assert(k_single.size == k_multiple.size);
+ assert(v_single.size == v_multiple.size);
+ assert(memcmp(k_single.data, k_multiple.data, k_single.size) == 0);
+ assert(memcmp(v_single.data, v_multiple.data, v_single.size) == 0);
+ }
+ }
+ r = c_single->c_close(c_single);
+ CKERR(r);
+ r = c_multiple->c_close(c_multiple);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ }
+ {
+ for (which = 0; which < num_dbs; which++) {
+ r = dbs_single[which]->close(dbs_single[which], 0);
+ CKERR(r);
+ r = dbs_multiple[which]->close(dbs_multiple[which], 0);
+ CKERR(r);
+ }
+ }
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ uint32_t which;
+ for (which = 0; which < MAX_DBS; which++) {
+ sprintf(names_multiple[which], "dbm_0x%02X", which);
+ sprintf(names_single[which], "dbs_0x%02X", which);
+ dbt_init(&dest_keys[which], NULL, 0);
+ dbt_init(&dest_vals[which], NULL, 0);
+ }
+ for (num_dbs = 0; num_dbs < 4; num_dbs++) {
+ run_test();
+ }
+ for (num_dbs = 4; num_dbs <= MAX_DBS; num_dbs *= 2) {
+ run_test();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/env_loader_memory.cc b/storage/tokudb/PerconaFT/src/tests/env_loader_memory.cc
new file mode 100644
index 00000000..445ed804
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/env_loader_memory.cc
@@ -0,0 +1,62 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+
+static uint64_t my_loader_memory_size;
+
+static uint64_t get_loader_memory_size(void) {
+ return my_loader_memory_size;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert_zero(r);
+ env->set_loader_memory_size(env, get_loader_memory_size);
+ for (uint64_t n = 0 ; n < 10000000000; n += 1000000000) {
+ my_loader_memory_size = n;
+ assert(env->get_loader_memory_size(env) == n);
+ }
+ r = env->close(env, 0);
+ assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/env_nproc.cc b/storage/tokudb/PerconaFT/src/tests/env_nproc.cc
new file mode 100644
index 00000000..9b5415e7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/env_nproc.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/resource.h>
+
+static void env_open_close(void) {
+ int r;
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK+DB_INIT_MPOOL+DB_INIT_TXN+DB_INIT_LOG + DB_CREATE + DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) {
+ fprintf(stderr, "%s:%u r=%d\n", __FILE__, __LINE__, r);
+ }
+ r = env->close(env, 0);
+ assert(r == 0);
+}
+
+int test_main (int argc, char * const argv[]) {
+ int r;
+ int limit = 1;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ limit = atoi(argv[i]);
+ continue;
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ struct rlimit nproc_rlimit;
+ r = getrlimit(RLIMIT_NPROC, &nproc_rlimit);
+ assert(r == 0);
+
+ nproc_rlimit.rlim_cur = limit;
+ r = setrlimit(RLIMIT_NPROC, &nproc_rlimit);
+ assert(r == 0);
+
+ env_open_close();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/env_startup.cc b/storage/tokudb/PerconaFT/src/tests/env_startup.cc
new file mode 100644
index 00000000..e6d704e8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/env_startup.cc
@@ -0,0 +1,197 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this test is to verify correct behavior of
+ * environment startup:
+ *
+ * All three of the following should exist or all three should not exist:
+ * - persistent environment
+ * - fileops directory
+ * - recovery log (if DB_INIT_LOG)
+ *
+ * If all three are missing, env->open() should create a new environment.
+ * If any one is present and any other is missing, env->open() should return ENOENT.
+ *
+ * TODO: experiment with DB_INIT_LOG off.
+ */
+
+
+#include "test.h"
+#include <db.h>
+
+static DB_ENV *env;
+
+#define FLAGS_NOLOG DB_INIT_LOCK|DB_INIT_MPOOL|DB_CREATE|DB_PRIVATE
+#define FLAGS_LOG FLAGS_NOLOG|DB_INIT_TXN|DB_INIT_LOG
+
+static int mode = S_IRWXU+S_IRWXG+S_IRWXO;
+
+static void test_shutdown(void);
+
+static void
+setup (uint32_t flags) {
+ int r;
+ if (env)
+ test_shutdown();
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r=db_env_create(&env, 0);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, flags, mode);
+ CKERR(r);
+}
+
+
+
+static void
+test_shutdown(void) {
+ int r;
+ r=env->close(env, 0); CKERR(r);
+ env = NULL;
+}
+
+
+static void
+reopen_env(uint32_t flags, int expected_r) {
+ int r;
+ if (env)
+ test_shutdown();
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, flags, mode);
+ CKERR2(r, expected_r);
+}
+
+static void
+delete_persistent(void) {
+ char cmd[1024];
+ sprintf(cmd, "rm -rf %s%s%s", TOKU_TEST_FILENAME, "/", "tokudb.environment");
+ int r = system(cmd);
+ CKERR(r);
+}
+
+
+static void
+delete_directory(void) {
+ char cmd[1024];
+ sprintf(cmd, "rm -rf %s%s%s", TOKU_TEST_FILENAME, "/", "tokudb.directory");
+ int r = system(cmd);
+ CKERR(r);
+}
+
+
+static void
+delete_log(void) {
+ char cmd[1024];
+ sprintf(cmd, "rm -rf %s%s%s", TOKU_TEST_FILENAME, "/", "*.tokulog*");
+ int r = system(cmd);
+ CKERR(r);
+}
+
+
+static void
+create_env(uint32_t flags) {
+ setup(flags); // create new environment
+ test_shutdown();
+ reopen_env(flags, 0); // reopen existing environment, should have log now
+ test_shutdown();
+}
+
+
+static void
+test_env_startup(int logging) {
+ uint32_t flags;
+
+ if (logging)
+ flags = FLAGS_LOG;
+ else
+ flags = FLAGS_NOLOG;
+
+ create_env(flags);
+
+ // delete persistent info and try to reopen
+ delete_persistent();
+ reopen_env(flags, ENOENT);
+
+ // recreate, then try to open with missing fileops directory
+ create_env(flags);
+ delete_directory();
+ reopen_env(flags, ENOENT);
+
+
+ if (logging) {
+ // recreate, then try to open with missing recovery log
+ create_env(flags);
+ delete_log();
+ reopen_env(flags, ENOENT);
+
+
+ // now try two missing items, if log can be present
+
+ // log is only item present
+ create_env(flags);
+ delete_persistent();
+ delete_directory();
+ reopen_env(flags, ENOENT);
+
+ // persistent env is only item present
+ create_env(flags);
+ delete_log();
+ delete_directory();
+ reopen_env(flags, ENOENT);
+
+ // directory is only item present
+ create_env(flags);
+ delete_persistent();
+ delete_log();
+ reopen_env(flags, ENOENT);
+ }
+
+ test_shutdown();
+}
+
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ test_env_startup(0); // transactionless env
+ test_env_startup(1); // with transactions and logging
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/filesize.cc b/storage/tokudb/PerconaFT/src/tests/filesize.cc
new file mode 100644
index 00000000..9dfeea13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/filesize.cc
@@ -0,0 +1,267 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Idea:
+ * create a dictionary
+ * repeat:
+ * lots of inserts
+ * checkpoint
+ * note file size
+ * lots of deletes
+ * optimize (flatten tree)
+ * checkpoint
+ * note file size
+ *
+ */
+
+#include "test.h"
+
+#define PATHSIZE 1024
+
+DB_ENV *env;
+DB *db;
+char dbname[] = "foo.db";
+char path[PATHSIZE];
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_PRIVATE;
+
+int ninsert, nread, nread_notfound, nread_failed, ndelete, ndelete_notfound, ndelete_failed;
+
+static TOKU_DB_FRAGMENTATION_S report;
+
+static void
+check_fragmentation(void) {
+ int r = db->get_fragmentation(db, &report);
+ CKERR(r);
+}
+
+static void
+print_fragmentation(void) {
+ printf("Fragmentation:\n");
+ printf("\tTotal file size in bytes (file_size_bytes): %" PRIu64 "\n", report.file_size_bytes);
+ printf("\tCompressed User Data in bytes (data_bytes): %" PRIu64 "\n", report.data_bytes);
+ printf("\tNumber of blocks of compressed User Data (data_blocks): %" PRIu64 "\n", report.data_blocks);
+ printf("\tAdditional bytes used for checkpoint system (checkpoint_bytes_additional): %" PRIu64 "\n", report.checkpoint_bytes_additional);
+ printf("\tAdditional blocks used for checkpoint system (checkpoint_blocks_additional): %" PRIu64 "\n", report.checkpoint_blocks_additional);
+ printf("\tUnused space in file (unused_bytes): %" PRIu64 "\n", report.unused_bytes);
+ printf("\tNumber of contiguous regions of unused space (unused_blocks): %" PRIu64 "\n", report.unused_blocks);
+ printf("\tSize of largest contiguous unused space (largest_unused_block): %" PRIu64 "\n", report.largest_unused_block);
+}
+
+static void
+close_em (void)
+{
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+static void
+setup(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+}
+
+
+static void
+fill_rand(int n, uint64_t * d) {
+ for (int i = 0; i < n; i++){
+ *(d+i) = random64();
+ }
+}
+
+#define INSERT_BIG 1500
+#define INSERT_SMALL 0
+static void
+insert_n (uint32_t ah, int datasize) {
+ uint64_t vdata[datasize];
+ fill_rand(datasize, vdata);
+ uint32_t an = htonl(ah);
+ // if (verbose) printf("insert an = %0X (ah = %0X)\n", an, ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ DBT val;
+ dbt_init(&val, vdata, sizeof vdata);
+ int r = db->put(db, NULL, &key, &val, 0);
+ CKERR(r);
+ ninsert++;
+}
+
+static void
+delete_n (uint32_t ah)
+{
+ uint32_t an = htonl(ah);
+ // if (verbose) printf("delete an = %0X (ah = %0X)\n", an, ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ int r = db->del(db, NULL, &key, DB_DELETE_ANY);
+ if (r == 0)
+ ndelete++;
+ else if (r == DB_NOTFOUND)
+ ndelete_notfound++;
+ else
+ ndelete_failed++;
+ CKERR(r);
+}
+
+static void
+optimize(void) {
+ if (verbose) printf("Filesize: begin optimize dictionary\n");
+ uint64_t loops_run;
+ int r = db->hot_optimize(db, NULL, NULL, NULL, NULL, &loops_run);
+ CKERR(r);
+ if (verbose) printf("Filesize: end optimize dictionary\n");
+}
+
+
+static void
+get_file_pathname(void) {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, dbname, sizeof(dbname));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ int r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ sprintf(path, "%s/%s", TOKU_TEST_FILENAME, (char*)iname.data);
+ toku_free(iname.data);
+ if (verbose) printf("path = %s\n", path);
+}
+
+static int getsizeM(void) {
+ toku_struct_stat buf;
+ int r = toku_stat(path, &buf, toku_uninstrumented);
+ CKERR(r);
+ int sizeM = (int)buf.st_size >> 20;
+ check_fragmentation();
+ if (verbose>1)
+ print_fragmentation();
+ return sizeM;
+}
+
+static void
+test_filesize (bool sequential)
+{
+ int N=1<<14;
+ int r, i, sizeM;
+
+ get_file_pathname();
+
+ for (int iter = 0; iter < 3; iter++) {
+ int offset = N * iter;
+
+ if (sequential) {
+ for (i=0; i<N; i++) {
+ insert_n(i + offset, INSERT_BIG);
+ }
+ } else {
+ for (i=N-1; i>=0; --i) {
+ insert_n(i + offset, INSERT_BIG);
+ }
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ int sizefirst = sizeM = getsizeM();
+ if (verbose) printf("Filesize after iteration %d insertion and checkpoint = %dM\n", iter, sizeM);
+
+ int preserve = 2;
+ for (i = preserve; i<(N); i++) { // leave a little at the beginning
+ delete_n(i + offset);
+ }
+ optimize();
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ sizeM = getsizeM();
+ if (verbose) printf("Filesize after iteration %d deletion and checkpoint 1 = %dM\n", iter, sizeM);
+
+ if (sequential) {
+ for (i=0; i<N; i++) {
+ insert_n(i + offset, INSERT_SMALL);
+ }
+ } else {
+ for (i=N-1; i>=0; --i) {
+ insert_n(i + offset, INSERT_SMALL);
+ }
+ }
+ for (i = preserve; i<(N); i++) { // leave a little at the beginning
+ delete_n(i + offset);
+ }
+ optimize();
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ sizeM = getsizeM();
+ if (verbose) printf("Filesize after iteration %d deletion and checkpoint 2 = %dM\n", iter, sizeM);
+ assert(sizeM <= sizefirst);
+
+ if (verbose) printf("ninsert = %d\n", ninsert);
+ if (verbose) printf("nread = %d, nread_notfound = %d, nread_failed = %d\n", nread, nread_notfound, nread_failed);
+ if (verbose) printf("ndelete = %d, ndelete_notfound = %d, ndelete_failed = %d\n", ndelete, ndelete_notfound, ndelete_failed);
+ }
+}
+
+int test_main (int argc __attribute__((__unused__)), char * const argv[] __attribute__((__unused__))) {
+ parse_args(argc, argv);
+ setup();
+ if (verbose) print_engine_status(env);
+ test_filesize(true);
+ if (verbose) {
+ print_engine_status(env);
+ }
+ check_fragmentation();
+ if (verbose) print_fragmentation();
+ close_em();
+ setup();
+ if (verbose) print_engine_status(env);
+ test_filesize(false);
+ if (verbose) {
+ print_engine_status(env);
+ }
+ check_fragmentation();
+ if (verbose) print_fragmentation();
+ close_em();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/get_key_after_bytes_unit.cc b/storage/tokudb/PerconaFT/src/tests/get_key_after_bytes_unit.cc
new file mode 100644
index 00000000..70e1bddc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/get_key_after_bytes_unit.cc
@@ -0,0 +1,247 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <algorithm>
+
+// Unit test for db->get_key_after_bytes.
+
+static const int num_keys = 1<<10;
+
+static void setup(DB_ENV **envp, DB **dbp, uint32_t nodesize, uint32_t basementnodesize) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r = db_env_create(envp, 0);
+ CKERR(r);
+ DB_ENV *env = *envp;
+ r = env->set_default_bt_compare(env, int_dbt_cmp);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r = db_create(dbp, env, 0);
+ CKERR(r);
+ DB *db = *dbp;
+ {
+ r = db->set_pagesize(db, nodesize);
+ CKERR(r);
+ r = db->set_readpagesize(db, basementnodesize);
+ CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+}
+
+static void fill(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ int k, v;
+ DBT key, val;
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, &v, sizeof v);
+ for (int i = 0; i < num_keys; ++i) {
+ k = i;
+ v = i;
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+}
+
+struct check_extra {
+ int start_key;
+ uint64_t skip_len;
+ bool filled;
+ bool exact;
+};
+
+static void check_callback(const DBT *end_key, uint64_t actually_skipped, void *extra) {
+ struct check_extra *CAST_FROM_VOIDP(e, extra);
+
+ int real_start_key = std::min(std::max(e->start_key, 0), num_keys);
+ int expected_key = std::min(real_start_key + (e->skip_len / (2 * sizeof(int))), (uint64_t) num_keys);
+
+ if (e->exact) {
+ if (!e->filled || expected_key >= num_keys) {
+ expected_key = -1;
+ }
+ assert(actually_skipped <= e->skip_len);
+ if (expected_key == -1) {
+ assert(end_key == nullptr);
+ } else {
+ assert(e->skip_len - actually_skipped < 2 * (int) sizeof(int));
+ assert(end_key != nullptr);
+ assert(end_key->size == sizeof expected_key);
+ assert((*(int *) end_key->data) == expected_key);
+ }
+ } else {
+ // no sense in doing an inexact check if the table's empty
+ assert(e->filled);
+ int found;
+ if (end_key == nullptr) {
+ found = num_keys;
+ } else {
+ assert(end_key->size == sizeof found);
+ found = *(int *) end_key->data;
+ }
+ // These are just guesses. I don't have a good reason but they
+ // seem like alright bounds.
+ double skipped_portion = (double) e->skip_len / (num_keys * 2 * sizeof(int));
+ int key_slack = num_keys * std::max(std::min(skipped_portion, 0.25), 0.01);
+ int size_slack = key_slack * 2 * sizeof(int);
+ assert(found <= expected_key + key_slack);
+ assert(found >= expected_key - key_slack);
+ assert(actually_skipped <= e->skip_len + size_slack);
+ if (end_key != nullptr) {
+ // if we hit the end of the table, this definitely won't hold up
+ assert((int) actually_skipped >= (int) e->skip_len - size_slack);
+ }
+ }
+}
+
+static void check(DB_ENV *env, DB *db, int start_key, uint64_t skip_len, bool filled, bool exact) {
+ int r;
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+
+ DBT start_dbt, end_key;
+ dbt_init(&start_dbt, &start_key, sizeof start_key);
+ dbt_init(&end_key, nullptr, 0);
+
+ struct check_extra extra = {start_key, skip_len, filled, exact};
+ r = db->get_key_after_bytes(db, txn, (start_key == -2 ? nullptr : &start_dbt), skip_len, check_callback, &extra, 0);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+}
+
+static void teardown(DB_ENV *env, DB *db) {
+ int r;
+ r = db->close(db, 0);
+ CKERR(r);
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+ default_parse_args(argc, argv);
+
+ DB_ENV *env;
+ DB *db;
+
+ setup(&env, &db, 4<<20, 64<<10);
+
+ // if the table is empty, always say DB_NOTFOUND
+ for (int start_key = -2; start_key <= 1; ++start_key) {
+ for (int skip_len = 0; skip_len < 2; ++skip_len) {
+ check(env, db, start_key, skip_len, false, true);
+ }
+ }
+
+ fill(env, db);
+
+ // if start_key is bigger than any key, assert that we get DB_NOTFOUND
+ for (int extra_key = 0; extra_key < 10; extra_key += 5) {
+ for (int skip_len = 0; skip_len < 24; ++skip_len) {
+ check(env, db, num_keys + extra_key, skip_len, true, true);
+ }
+ }
+
+ // if start_key is nullptr or the first key or before the first key, we start at the beginning
+ for (int start_key = -2; start_key <= 0; ++start_key) {
+ for (int skip_len = 0; skip_len < 48; ++skip_len) {
+ check(env, db, start_key, skip_len, true, true);
+ }
+ }
+
+ // check a bunch of places in the middle too (use prime increments to get a good distribution of stuff)
+ for (int start_key = 0; start_key <= num_keys; start_key += 31) {
+ for (int skip_len = 0; skip_len < (num_keys + 1 - start_key) * (2 * (int) sizeof(int)); skip_len += 67) {
+ check(env, db, start_key, skip_len, true, true);
+ }
+ }
+
+ // TODO: test mvcc stuff (check that we only look at the latest val, which is the current behavior)
+
+ teardown(env, db);
+
+ // Try many bn and nodesizes
+ for (int basementnodesize = 1<<10; basementnodesize <= 64<<10; basementnodesize <<= 1) {
+ for (int nodesize = basementnodesize; nodesize <= 128<<10; nodesize <<= 2) {
+ setup(&env, &db, nodesize, basementnodesize);
+ fill(env, db);
+ // forces a rebalance of the root, to get multiple bns
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ // near the beginning
+ for (int start_key = -2; start_key <= 1; ++start_key) {
+ for (int skip_len = 0; skip_len <= (num_keys + 1 - start_key) * (2 * (int) sizeof(int)); skip_len += 41) {
+ check(env, db, start_key, skip_len, true, false);
+ }
+ }
+ // near the end
+ for (int start_key = num_keys - 1; start_key <= num_keys + 1; ++start_key) {
+ for (int skip_len = 0; skip_len <= (num_keys + 1 - start_key) * (2 * (int) sizeof(int)); skip_len += 41) {
+ check(env, db, start_key, skip_len, true, false);
+ }
+ }
+ for (int start_key = 0; start_key <= num_keys; start_key += 17) {
+ for (int skip_len = 0; skip_len <= (num_keys + 1 - start_key) * (2 * (int) sizeof(int)); skip_len += 31) {
+ check(env, db, start_key, skip_len, true, false);
+ }
+ }
+ teardown(env, db);
+ }
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/get_last_key.cc b/storage/tokudb/PerconaFT/src/tests/get_last_key.cc
new file mode 100644
index 00000000..ad6cf34f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/get_last_key.cc
@@ -0,0 +1,259 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/**
+ * Test that various queries behave correctly
+ *
+ * Zardosht says:
+ *
+ * write a test that inserts a bunch of elements into the tree,
+ * and then verify that the following types of queries work:
+ * - db->get
+ * - next
+ * - prev
+ * - set_range
+ * - set_range_reverse
+ * - first
+ * - last
+ * - current
+ *
+ * do it on a table with:
+ * - just a leaf node
+ * - has internal nodes (make node size 4K and bn size 1K)
+ * - big cachetable such that everything fits
+ * - small cachetable such that not a lot fits
+ *
+ * make sure APIs are the callback APIs (getf_XXX)
+ * make sure your callbacks all return TOKUDB_CURSOR_CONTINUE,
+ * so we ensure that returning TOKUDB_CURSOR_CONTINUE does not
+ * mess anything up.
+ */
+
+#include "test.h"
+
+/**
+ * Calculate or verify that a value for a given key is correct
+ * Returns 0 if the value is correct, nonzero otherwise.
+ */
+static void get_value_by_key(DBT * key, DBT * value)
+{
+ // keys/values are always stored in the DBT in net order
+ int * CAST_FROM_VOIDP(k, key->data);
+ int v = toku_ntohl(*k) * 2 + 1;
+ memcpy(value->data, &v, sizeof(int));
+}
+
+static void prepare_for_env(void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
+}
+
+static void init_env(DB_ENV ** env, size_t ct_size)
+{
+ int r;
+ const int envflags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ printf("initializing environment\n");
+
+ r = db_env_create(env, 0); { int chk_r = r; CKERR(chk_r); }
+ assert(ct_size < 1024 * 1024 * 1024L);
+ r = (*env)->set_cachesize(*env, 0, ct_size, 1); { int chk_r = r; CKERR(chk_r); }
+ r = (*env)->open(*env, TOKU_TEST_FILENAME, envflags, 0755); { int chk_r = r; CKERR(chk_r); }
+}
+
+static void init_db(DB_ENV * env, DB ** db)
+{
+ int r;
+ const int node_size = 4096;
+ const int bn_size = 1024;
+
+ printf("initializing db\n");
+
+ DB_TXN * txn;
+ r = db_create(db, env, 0); { int chk_r = r; CKERR(chk_r); }
+ r = (*db)->set_readpagesize(*db, bn_size); { int chk_r = r; CKERR(chk_r); }
+ r = (*db)->set_pagesize(*db, node_size); { int chk_r = r; CKERR(chk_r); }
+ r = env->txn_begin(env, nullptr, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = (*db)->open(*db, txn, "db", nullptr, DB_BTREE, DB_CREATE, 0644); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+}
+
+static void cleanup_env_and_db(DB_ENV * env, DB * db)
+{
+ int r;
+
+ printf("cleaning up environment and db\n");
+ r = db->close(db, 0); { int chk_r = r; CKERR(chk_r); }
+ r = env->close(env, 0); { int chk_r = r; CKERR(chk_r); }
+}
+
+static int get_last_key_cb(const DBT *key, const DBT *value, void *extra) {
+ if (key->data) {
+ invariant_null(value);
+ int expected_key = *(int*)extra;
+ int found_key = *(int*)key->data;
+ invariant(expected_key == (int)ntohl(found_key));
+ }
+ return 0;
+}
+
+
+static void check_last_key_matches(DB *db, int expect_r, int key) {
+ int r = db->get_last_key(db, get_last_key_cb, &key);
+ CKERR2(r, expect_r);
+}
+
+static void do_test(size_t ct_size, int num_keys)
+{
+ int i, r;
+ DB * db;
+ DB_ENV * env;
+ DB_TXN *txn = nullptr;
+ DB_TXN *txn2 = nullptr;
+ uint64_t loops_run = 0;
+
+
+ printf("doing tests for ct_size %lu, num_keys %d\n",
+ ct_size, num_keys);
+
+ // initialize everything and insert data
+ prepare_for_env();
+ init_env(&env, ct_size);
+ assert(env != nullptr);
+ init_db(env, &db);
+ assert(db != nullptr);
+
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+ {
+ DBT key, value;
+ for (i = 0; i < num_keys; i++) {
+ int v, k = toku_htonl(i);
+ dbt_init(&key, &k, sizeof(int));
+ dbt_init(&value, &v, sizeof(int));
+ get_value_by_key(&key, &value);
+ if (0) printf("put %d\n", k);
+ r = db->put(db, txn, &key, &value, 0);
+ CKERR(r);
+ }
+ }
+
+ int expect_r = num_keys == 0 ? DB_NOTFOUND : 0;
+ check_last_key_matches(db, expect_r, num_keys - 1);
+
+ r = txn->commit(txn, 0);
+ check_last_key_matches(db, expect_r, num_keys - 1);
+
+ if (num_keys == 0) {
+ goto cleanup;
+ }
+ r = env->txn_begin(env, nullptr, &txn2, 0);
+ CKERR(r);
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ // Delete the last key
+ {
+ DBT key;
+ int k = toku_htonl(num_keys - 1);
+ dbt_init(&key, &k, sizeof(int));
+ if (0) printf("del %d\n", *(int*)key.data);
+ r = db->del(db, txn, &key, 0);
+ CKERR(r);
+ }
+ check_last_key_matches(db, 0, num_keys - 1);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ check_last_key_matches(db, 0, num_keys - 1);
+
+ r = txn2->commit(txn2, 0);
+ CKERR(r);
+ check_last_key_matches(db, 0, num_keys - 1);
+
+ //Run Garbage collection (NOTE does not work when everything fits in root??? WHY)
+ r = db->hot_optimize(db, nullptr, nullptr, nullptr, nullptr, &loops_run);
+ CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ //Run Garbage collection (NOTE does not work when everything fits in root??? WHY)
+ r = db->hot_optimize(db, nullptr, nullptr, nullptr, nullptr, &loops_run);
+ CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ //Fully close and reopen
+ //This clears cachetable
+ //note that closing a db and reopening may not flush the cachetable so we close env as well
+ cleanup_env_and_db(env, db);
+ init_env(&env, ct_size);
+ assert(env != nullptr);
+ init_db(env, &db);
+ assert(db != nullptr);
+
+ //NOTE: tried overkill (double optimize, double checkpoint.. gc still doesn't happen for everything in root in single basement
+
+ if (num_keys >= 2) {
+ // At least one key remains.
+ check_last_key_matches(db, 0, num_keys - 2);
+ } else {
+ //no key remains. Should find nothing.
+ check_last_key_matches(db, DB_NOTFOUND, -1);
+ }
+cleanup:
+ cleanup_env_and_db(env, db);
+}
+
+int test_main(int argc, char * const argv[])
+{
+ default_parse_args(argc, argv);
+
+ for (int i = 0; i <= 2; i++) {
+ do_test(1024*1024, i);
+ }
+ for (int i = 4; i <= 1024; i*=2) {
+ do_test(1024*1024, i);
+ }
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/helgrind.suppressions b/storage/tokudb/PerconaFT/src/tests/helgrind.suppressions
new file mode 100644
index 00000000..4729c1d1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/helgrind.suppressions
@@ -0,0 +1,158 @@
+{
+ helgrind_3.5.0_false_positive_against_pthread_create
+ Helgrind:Race
+ fun:mythread_wrapper
+}
+{
+ ignore_race_on_toku_checkpointing_user_data_status
+ Helgrind:Race
+ fun:toku_set_checkpointing_user_data_status.*
+}
+{
+ ignore_race_on_toku_checkpointing_user_data_status
+ Helgrind:Race
+ fun:toku_get_checkpointing_user_data_status
+}
+{
+ ignore_race_inside_pthread_mutex_lock
+ Helgrind:Race
+ fun:pthread_mutex_lock
+}
+{
+ ignore_race_inside_pthread_mutex_unlock
+ Helgrind:Race
+ ...
+ fun:pthread_mutex_unlock
+}
+{
+ ignore_race_inside_pthread_join
+ Helgrind:Race
+ fun:pthread_join
+}
+{
+ unsafe_touch_clock
+ Helgrind:Race
+ fun:_ZL18unsafe_touch_clockP6ftnodei
+}
+{
+ unsafe_read_single_txnid_optimization_possible
+ Helgrind:Race
+ fun:_ZNK4toku8locktree46unsafe_read_single_txnid_optimization_possibleEv
+}
+{
+ unsafe_read_size_current
+ Helgrind:Race
+ fun:_ZNK7evictor24unsafe_read_size_currentEv
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_ZL14ctpair_destroyP6ctpair
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_Z23toku_blocktable_destroyPP11block_table
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_Z11bjm_destroyP29background_job_manager_struct
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_Z24toku_txn_manager_destroyP11txn_manager
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_Z24toku_thread_pool_destroyPP16toku_thread_pool
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_Z22toku_minicron_shutdownP8minicron
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_ZN4toku12lock_request7destroyEv
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_Z17toku_logger_closePP10tokulogger
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_ZL20cachetable_free_pairP6ctpair
+}
+{
+ kde_bug_307082_cond_destroy_without_signal
+ Helgrind:Misc
+ ...
+ fun:pthread_cond_destroy@*
+ ...
+ fun:_ZN7evictor7destroyEv
+}
+{
+ <helgrind_doesnt_understand_the_way_the_world_works_and_ignores_our_disable_checking_instructions>
+ Helgrind:Race
+ fun:_ZN4toku8locktree15sto_try_acquireEPvmPK10__toku_dbtS4_
+ fun:_ZN4toku8locktree12acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setE
+ fun:_ZN4toku8locktree16try_acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setEb
+ fun:_ZN4toku8locktree18acquire_write_lockEmPK10__toku_dbtS3_PNS_9txnid_setEb
+ fun:_ZN4toku12lock_request5startEv
+ ...
+}
+{
+ <helgrind_bug_323432_see_http://permalink.gmane.org/gmane.comp.debugging.valgrind/13325>
+ Helgrind:Race
+ obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
+ fun:pthread_mutex_destroy
+ fun:toku_mutex_destroy
+ fun:_ZN4toku8treenode4freeEPS0_
+ fun:_ZN4toku8treenode22remove_root_of_subtreeEv
+ ...
+}
+{
+ helgrind_bug_https://bugs.kde.org/show_bug.cgi?id=327548
+ Helgrind:Race
+ fun:my_memcmp
+ fun:pthread_mutex_destroy
+}
+{
+ helgrind_bug_2_helgrind_bug_https://bugs.kde.org/show_bug.cgi?id=327548
+ Helgrind:Race
+ ...
+ fun:pthread_mutex_destroy
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/helgrind1.cc b/storage/tokudb/PerconaFT/src/tests/helgrind1.cc
new file mode 100644
index 00000000..bb2b714e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/helgrind1.cc
@@ -0,0 +1,65 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+// The helgrind1.tdbrun test should fail. This is merely a check to verify that helgrind actually notices a race.
+
+#include <pthread.h>
+int x;
+
+static void *starta(void* ignore __attribute__((__unused__))) {
+ if (verbose) printf("%s %d\n", __FUNCTION__, x);
+ x++;
+ return 0;
+}
+static void *startb(void* ignore __attribute__((__unused__))) {
+ if (verbose) printf("%s %d\n", __FUNCTION__, x);
+ x++;
+ return 0;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ pthread_t a,b;
+ { int x_l = pthread_create(&a, NULL, starta, NULL); assert(x_l==0); }
+ { int x_l = pthread_create(&b, NULL, startb, NULL); assert(x_l==0); }
+ { int x_l = pthread_join(a, NULL); assert(x_l==0); }
+ { int x_l = pthread_join(b, NULL); assert(x_l==0); }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/helgrind2.cc b/storage/tokudb/PerconaFT/src/tests/helgrind2.cc
new file mode 100644
index 00000000..3060bcaf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/helgrind2.cc
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+// The helgrind2 test performs a DB->get() in two different concurrent threads.
+#include <arpa/inet.h>
+
+#include <db.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <memory.h>
+
+DB_ENV *env;
+DB *db;
+
+static void initialize (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, 0777);
+
+ // setup environment
+ {
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_redzone(env, 0); CKERR(r);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_PRIVATE + DB_CREATE, 0777);
+ assert(r == 0);
+ }
+
+ // setup DB
+ {
+ DB_TXN *txn = 0;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, 0777); assert(r == 0);
+ }
+
+ // Put some stuff in
+ {
+ char v[10];
+ DB_TXN *txn = 0;
+ int i;
+ const int n = 10;
+ memset(v, 0, sizeof(v));
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, v, sizeof(v)), 0);
+ assert(r == 0);
+ }
+ }
+}
+
+static void finish (void) {
+ int r;
+ r = db->close(db, 0); assert(r==0);
+ r = env->close(env, 0); assert(r==0);
+}
+
+static void *starta(void* ignore __attribute__((__unused__))) {
+ DB_TXN *txn = 0;
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ val.flags |= DB_DBT_MALLOC;
+ int k = htonl(0);
+ int r = db->get(db, txn, dbt_init(&key, &k, sizeof k), &val, 0);
+ assert(r==0);
+ //printf("val.data=%p\n", val.data);
+ int i; for (i=0; i<10; i++) assert(((char*)val.data)[i]==0);
+ toku_free(val.data);
+ return 0;
+}
+static void *startb(void* ignore __attribute__((__unused__))) {
+ DB_TXN *txn = 0;
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ int k = htonl(0);
+ val.flags |= DB_DBT_MALLOC;
+ int r = db->get(db, txn, dbt_init(&key, &k, sizeof k), &val, 0);
+ assert(r==0);
+ //printf("val.data=%p\n", val.data);
+ int i; for (i=0; i<10; i++) assert(((char*)val.data)[i]==0);
+ toku_free(val.data);
+ return 0;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ pthread_t a,b;
+ initialize();
+ { int x = pthread_create(&a, NULL, starta, NULL); assert(x==0); }
+ { int x = pthread_create(&b, NULL, startb, NULL); assert(x==0); }
+ { int x = pthread_join(a, NULL); assert(x==0); }
+ { int x = pthread_join(b, NULL); assert(x==0); }
+ finish();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/helgrind3.cc b/storage/tokudb/PerconaFT/src/tests/helgrind3.cc
new file mode 100644
index 00000000..6ccf0d76
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/helgrind3.cc
@@ -0,0 +1,135 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+// The helgrind2 test performs a DB->get() in two different concurrent threads.
+#include <arpa/inet.h>
+
+#include <db.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <memory.h>
+
+DB_ENV *env;
+DB *db;
+
+static void initialize (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, 0777);
+
+ // setup environment
+ {
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_redzone(env, 0); CKERR(r);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_PRIVATE + DB_CREATE, 0777);
+ assert(r == 0);
+ }
+
+ // setup DB
+ {
+ DB_TXN *txn = 0;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, 0777); assert(r == 0);
+ }
+
+ // Put some stuff in
+ {
+ char v[10];
+ DB_TXN *txn = 0;
+ int i;
+ const int n = 10;
+ memset(v, 0, sizeof(v));
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, v, sizeof(v)), 0);
+ assert(r == 0);
+ }
+ }
+}
+
+static void finish (void) {
+ int r;
+ r = db->close(db, 0); assert(r==0);
+ r = env->close(env, 0); assert(r==0);
+}
+
+static void *starta(void* ignore __attribute__((__unused__))) {
+ DB_TXN *txn = 0;
+ DBT key, val;
+ char data[10];
+ val.data = data;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ val.flags |= DB_DBT_MALLOC;
+ int k = htonl(99);
+ int r = db->put(db, txn, dbt_init(&key, &k, sizeof k), &val, 0);
+ assert(r==0);
+ //printf("val.data=%p\n", val.data);
+ return 0;
+}
+static void *startb(void* ignore __attribute__((__unused__))) {
+ DB_TXN *txn = 0;
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ int k = htonl(0);
+ val.flags |= DB_DBT_MALLOC;
+ int r = db->get(db, txn, dbt_init(&key, &k, sizeof k), &val, 0);
+ assert(r==0);
+ //printf("val.data=%p\n", val.data);
+ int i; for (i=0; i<10; i++) assert(((char*)val.data)[i]==0);
+ toku_free(val.data);
+ return 0;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ pthread_t a,b;
+ initialize();
+ { int x = pthread_create(&a, NULL, starta, NULL); assert(x==0); }
+ { int x = pthread_create(&b, NULL, startb, NULL); assert(x==0); }
+ { int x = pthread_join(a, NULL); assert(x==0); }
+ { int x = pthread_join(b, NULL); assert(x==0); }
+ finish();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hot-optimize-table-tests.cc b/storage/tokudb/PerconaFT/src/tests/hot-optimize-table-tests.cc
new file mode 100644
index 00000000..f0fe3c90
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hot-optimize-table-tests.cc
@@ -0,0 +1,239 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// hot-optimize-table-tests.c
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL |
+ DB_CREATE |
+ DB_THREAD |
+ DB_INIT_LOCK |
+ DB_INIT_LOG |
+ DB_INIT_TXN |
+ DB_PRIVATE;
+
+DB_ENV* env;
+unsigned int leaf_hits;
+
+// Custom Update Function for our test FT.
+static int
+update_func(DB* UU(db),
+ const DBT* key,
+ const DBT* old_val,
+ const DBT* extra,
+ void (*set_val)(const DBT* new_val, void* set_extra) __attribute__((unused)),
+ void* UU(set_extra))
+{
+ unsigned int *x_results;
+ assert(extra->size == sizeof x_results);
+ x_results = *(unsigned int **) extra->data;
+ assert(x_results);
+ assert(old_val->size > 0);
+ unsigned int* indexptr;
+ assert(key->size == (sizeof *indexptr));
+ indexptr = (unsigned int*)key->data;
+ ++leaf_hits;
+
+ if (verbose && x_results[*indexptr] != 0) {
+ printf("x_results = %p, indexptr = %p, *indexptr = %u, x_results[*indexptr] = %u\n", x_results, indexptr, *indexptr, x_results[*indexptr]);
+ }
+
+ assert(x_results[*indexptr] == 0);
+ x_results[*indexptr]++;
+ // ++(x_results[*indexptr]);
+ // memset(&new_val, 0, sizeof(new_val));
+ // set_val(&new_val, set_extra);
+ unsigned int i = *indexptr;
+ if (verbose && ((i + 1) % 50000 == 0)) {
+ printf("applying update to %u\n", i);
+ //printf("x_results[] = %u\n", x_results[*indexptr]);
+ }
+
+ return 0;
+}
+
+///
+static void
+hot_test_setup(void)
+{
+ int r = 0;
+ // Remove any previous environment.
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ // Set up a new environment.
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp);CKERR(r);
+ env->set_update(env, update_func);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void
+hot_test_destroy(void)
+{
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+///
+static void
+hot_insert_keys(DB* db, unsigned int key_count)
+{
+ int r = 0;
+ DB_TXN * xact;
+ unsigned int limit = 1;
+ if (key_count > 10) {
+ limit = 100000;
+ }
+
+ // Dummy data.
+ const unsigned int DUMMY_SIZE = 100;
+ size_t size = DUMMY_SIZE;
+ char* dummy = NULL;
+ dummy = (char*)toku_xmalloc(size);
+ memset(dummy, 0, size);
+
+ // Start the transaction for insertions.
+ //
+ r = env->txn_begin(env, 0, &xact, 0); CKERR(r);
+
+ unsigned int key;
+
+ DBT key_thing;
+ DBT *keyptr = dbt_init(&key_thing, &key, sizeof(key));
+ DBT value_thing;
+ DBT *valueptr = dbt_init(&value_thing, dummy, size);
+ for (key = 0; key < key_count; ++key)
+ {
+ { int chk_r = db->put(db, xact, keyptr, valueptr, 0); CKERR(chk_r); }
+
+ // DEBUG OUTPUT
+ //
+ if (verbose && (key + 1) % limit == 0) {
+ printf("%d Elements inserted.\n", key + 1);
+ }
+ }
+
+ // Commit the insert transaction.
+ //
+ r = xact->commit(xact, 0); CKERR(r);
+
+ toku_free(dummy);
+}
+
+///
+static void
+hot_create_db(DB** db, const char* c)
+{
+ int r = 0;
+ DB_TXN* xact;
+ verbose ? printf("Creating DB.\n") : 0;
+ r = env->txn_begin(env, 0, &xact, 0); CKERR(r);
+ { int chk_r = db_create(db, env, 0); CKERR(chk_r); }
+ { int chk_r = (*db)->open((*db), xact, c, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ r = xact->commit(xact, 0); CKERR(r);
+ verbose ? printf("DB Created.\n") : 0;
+}
+
+///
+static void
+hot_test(DB* db, unsigned int size)
+{
+ int r = 0;
+ leaf_hits = 0;
+ verbose ? printf("Insert some data.\n") : 0;
+
+ // Insert our keys to assemble the tree.
+ hot_insert_keys(db, size);
+
+ // Insert Broadcast Message.
+ verbose ? printf("Insert Broadcast Message.\n") : 0;
+ unsigned int *XMALLOC_N(size, x_results);
+ memset(x_results, 0, (sizeof x_results[0]) * size);
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, &x_results, sizeof x_results);
+ DB_TXN * xact;
+ r = env->txn_begin(env, 0, &xact, 0); CKERR(r);
+ r = db->update_broadcast(db, xact, extrap, 0); CKERR(r);
+ r = xact->commit(xact, 0); CKERR(r);
+
+ // Flatten the tree.
+ verbose ? printf("Calling hot optimize...\n") : 0;
+ uint64_t loops_run;
+ r = db->hot_optimize(db, NULL, NULL, NULL, NULL, &loops_run);
+ assert(r == 0);
+ verbose ? printf("HOT Finished!\n") : 0;
+ for (unsigned int i = 0; i < size; ++i) {
+ assert(x_results[i] == 1);
+ }
+ verbose ? printf("Leaves hit = %u\n", leaf_hits) :0;
+ toku_free(x_results);
+}
+
+///
+int
+test_main(int argc, char * const argv[])
+{
+ int r = 0;
+ default_parse_args(argc, argv);
+ hot_test_setup();
+
+ // Create and Open the Database/FT
+ DB *db = NULL;
+ const unsigned int BIG = 4000000;
+ const unsigned int SMALL = 10;
+ const unsigned int NONE = 0;
+
+ hot_create_db(&db, "none.db");
+ hot_test(db, NONE);
+ r = db->close(db, 0);
+ CKERR(r);
+ hot_create_db(&db, "small.db");
+ hot_test(db, SMALL);
+ r = db->close(db, 0);
+ CKERR(r);
+ hot_create_db(&db, "big.db");
+ hot_test(db, BIG);
+ r = db->close(db, 0);
+ CKERR(r);
+
+ hot_test_destroy();
+ verbose ? printf("Exiting Test.\n") : 0;
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-bw.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-bw.cc
new file mode 100644
index 00000000..5336bc33
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-bw.cc
@@ -0,0 +1,475 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "key-val.h"
+
+toku_mutex_t put_lock;
+
+enum {NUM_INDEXER_INDEXES=1};
+static const int NUM_DBS = NUM_INDEXER_INDEXES + 1; // 1 for source DB
+static const int NUM_ROWS = 100000;
+static int num_rows;
+static const int FORWARD = 0;
+static const int BACKWARD = 1;
+typedef int Direction;
+static const int TXN_CREATE = 1;
+static const int TXN_END = 2;
+typedef int TxnWork;
+
+DB_ENV *env;
+
+/*
+ * client() is a routine intended to be run in a separate thread from index creation
+ * - it takes a client spec which describes work to be done
+ * - direction : move to ever increasing or decreasing rows
+ * - txnwork : whether a transaction should be created or closed within the client
+ * (allows client transaction to start before or during index creation,
+ * and to close during or after index creation)
+ */
+
+
+typedef struct {
+ uint32_t num; // number of rows to write
+ uint32_t start; // approximate start row
+ int offset; // offset from stride (= MAX_CLIENTS)
+ Direction dir;
+ TxnWork txnwork;
+ DB_TXN *txn;
+ DB **dbs;
+ int client_number;
+ uint32_t *flags;
+} client_spec_t, *client_spec;
+
+int client_count = 0;
+
+static void * client(void *arg)
+{
+ client_spec CAST_FROM_VOIDP(cs, arg);
+ client_count++;
+ if ( verbose ) printf("client[%d]\n", cs->client_number);
+ assert(cs->client_number < MAX_CLIENTS);
+ assert(cs->dir == FORWARD || cs->dir == BACKWARD);
+
+ int r;
+ if ( cs->txnwork & TXN_CREATE ) { r = env->txn_begin(env, NULL, &cs->txn, 0); CKERR(r); }
+
+ DBT key, val;
+ DBT dest_keys[NUM_DBS];
+ DBT dest_vals[NUM_DBS];
+ uint32_t k, v;
+ int n = cs->start;
+
+ for(int which=0;which<NUM_DBS;which++) {
+ dbt_init(&dest_keys[which], NULL, 0);
+ dest_keys[which].flags = DB_DBT_REALLOC;
+
+ dbt_init(&dest_vals[which], NULL, 0);
+ dest_vals[which].flags = DB_DBT_REALLOC;
+ }
+
+ int rr = 0;
+ int retry = 0;
+ for (uint32_t i = 0; i < cs->num; i++ ) {
+ DB_TXN *txn;
+ env->txn_begin(env, cs->txn, &txn, 0);
+ k = key_to_put(n, cs->offset);
+ v = generate_val(k, 0);
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+
+ while ( retry++ < 10 ) {
+ toku_mutex_lock(&put_lock);
+ rr = env_put_multiple_test_no_array(env,
+ cs->dbs[0],
+ txn,
+ &key,
+ &val,
+ NUM_DBS,
+ cs->dbs, // dest dbs
+ dest_keys,
+ dest_vals,
+ cs->flags);
+ toku_mutex_unlock(&put_lock);
+ if ( rr == 0 ) break;
+ sleep(0);
+ }
+ if ( rr != 0 ) {
+ if ( verbose ) printf("client[%u] : put_multiple returns %d, i=%u, n=%u, key=%u\n", cs->client_number, rr, i, n, k);
+ r = txn->abort(txn); CKERR(r);
+ break;
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ n = ( cs->dir == FORWARD ) ? n + 1 : n - 1;
+ retry = 0;
+ }
+
+ if ( cs->txnwork & TXN_END ) { r = cs->txn->commit(cs->txn, DB_TXN_SYNC); CKERR(r); }
+ if (verbose) printf("client[%d] done\n", cs->client_number);
+
+ for (int which=0; which<NUM_DBS; which++) {
+ toku_free(dest_keys[which].data);
+ toku_free(dest_vals[which].data);
+ }
+
+ return 0;
+}
+
+toku_pthread_t *client_threads;
+client_spec_t *client_specs;
+
+static void clients_init(DB **dbs, uint32_t *flags)
+{
+ XMALLOC_N(MAX_CLIENTS, client_threads);
+ XMALLOC_N(MAX_CLIENTS, client_specs);
+
+ client_specs[0].client_number = 0;
+// client_specs[0].start = 0;
+ client_specs[0].start = num_rows - 1;
+ client_specs[0].num = num_rows;
+ client_specs[0].offset = -1;
+// client_specs[0].dir = FORWARD;
+ client_specs[0].dir = BACKWARD;
+ client_specs[0].txnwork = TXN_CREATE | TXN_END;
+ client_specs[0].txn = NULL;
+ client_specs[0].dbs = dbs;
+ client_specs[0].flags = flags;
+
+ client_specs[1].client_number = 1;
+ client_specs[1].start = 0;
+ client_specs[1].num = num_rows;
+ client_specs[1].offset = 1;
+ client_specs[1].dir = FORWARD;
+ client_specs[1].txnwork = TXN_CREATE | TXN_END;
+ client_specs[1].txn = NULL;
+ client_specs[1].dbs = dbs;
+ client_specs[1].flags = flags;
+
+}
+
+static void clients_cleanup(void)
+{
+ toku_free(client_threads); client_threads = NULL;
+ toku_free(client_specs); client_specs = NULL;
+}
+
+// verify results
+// - read the keys in the primary table, then calculate what keys should exist
+// in the other DB. Read the other table to verify.
+static void check_results(DB *src, DB *db)
+{
+ int r;
+ int pass = 1;
+
+ int clients = client_count;
+
+ int max_rows = ( clients + 1 ) * num_rows;
+ unsigned int *db_keys = (unsigned int *) toku_malloc(max_rows * sizeof (unsigned int));
+
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DBC *cursor;
+ r = src->cursor(src, txn, &cursor, 0); CKERR(r);
+
+ int which = *(uint32_t*)db->app_private;
+
+ // scan the primary table,
+ // calculate the expected keys in 'db'
+ int row = 0;
+ while ( r != DB_NOTFOUND ) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if ( r != DB_NOTFOUND ) {
+ k = *((uint32_t *)(key.data));
+ db_keys[row] = twiddle32(k, which);
+ row++;
+ }
+ }
+ if ( verbose ) printf("primary table scanned, contains %d rows\n", row);
+ int primary_rows = row;
+ r = cursor->c_close(cursor); CKERR(r);
+ // sort the expected keys
+ qsort(db_keys, primary_rows, sizeof (unsigned int), uint_cmp);
+
+ if ( verbose > 1 ) {
+ for(int i=0;i<primary_rows;i++) {
+ printf("primary table[%u] = %u\n", i, db_keys[i]);
+ }
+ }
+
+ // scan the indexer-created DB, comparing keys with expected keys
+ // - there should be exactly 'primary_rows' in the new index
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ for (int i=0;i<primary_rows;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if ( r == DB_NOTFOUND ) {
+ printf("scan of index finds last row is %d\n", i);
+ }
+ CKERR(r);
+ k = *((uint32_t *)(key.data));
+ if ( db_keys[i] != k ) {
+ if ( verbose ) printf("ERROR expecting key %10u for row %d, found key = %10u\n", db_keys[i],i,k);
+ pass = 0;
+ i++;
+// goto check_results_error;
+ }
+ }
+ // next cursor op should return DB_NOTFOUND
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ // we're done - cleanup and close
+//check_results_error:
+ r = cursor->c_close(cursor); CKERR(r);
+ toku_free(db_keys);
+ r = txn->commit(txn, 0); CKERR(r);
+ if ( verbose ) {
+ if ( pass ) printf("check_results : pass\n");
+ else printf("check_results : fail\n");
+ }
+ assert(pass);
+ return;
+}
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = 0;
+ }
+ clients_init(dbs, db_flags);
+
+ // create and initialize indexer
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ toku_mutex_lock(&put_lock);
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS-1, &dbs[1], db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+ toku_mutex_unlock(&put_lock);
+
+ // start threads doing additional inserts - no lock issues since indexer
+ // already created
+ r = toku_pthread_create(toku_uninstrumented,
+ &client_threads[0],
+ nullptr,
+ client,
+ static_cast<void *>(&client_specs[0]));
+ CKERR(r);
+ // r = toku_pthread_create(toku_uninstrumented, &client_threads[1], 0,
+ // client, (void *)&client_specs[1]); CKERR(r);
+
+ struct timeval start, now;
+ if (verbose) {
+ printf("test_indexer build\n");
+ gettimeofday(&start,0);
+ }
+ r = indexer->build(indexer);
+ CKERR(r);
+ if ( verbose ) {
+ gettimeofday(&now,0);
+ int duration = (int)(now.tv_sec - start.tv_sec);
+ if ( duration > 0 )
+ printf("test_indexer build : sec = %d\n", duration);
+ }
+
+ if ( verbose ) printf("test_indexer close\n");
+ toku_mutex_lock(&put_lock);
+ r = indexer->close(indexer);
+ CKERR(r);
+ toku_mutex_unlock(&put_lock);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ void *t0;
+ r = toku_pthread_join(client_threads[0], &t0); CKERR(r);
+// void *t1;
+// r = toku_pthread_join(client_threads[1], &t1); CKERR(r);
+
+ clients_cleanup();
+
+ if ( verbose ) printf("check_results\n");
+ check_results(src, dbs[1]);
+
+ if ( verbose ) printf("PASS\n");
+ if ( verbose ) printf("test_indexer done\n");
+}
+
+static void run_test(void) {
+ int r;
+ toku_mutex_init(toku_uninstrumented, &put_lock, nullptr);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU + S_IRWXG + S_IRWXO);
+ CKERR(r);
+ char logname[TOKU_PATH_MAX + 1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ generate_permute_tables();
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ int ids[MAX_DBS];
+ DB *dbs[MAX_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ ids[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &ids[i];
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // generate the src DB (do not use put_multiple)
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = generate_initial_table(dbs[0], txn, num_rows); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ // -------------------------- //
+ if (1) test_indexer(dbs[0], dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+ toku_mutex_destroy(&put_lock);
+ r = env->close(env, 0); CKERR(r);
+}
+
+// ------------ infrastructure ----------
+
+static inline void
+do_args (int argc, char * const argv[]) {
+ const char *progname=argv[0];
+ num_rows = NUM_ROWS;
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r")==0) {
+ argc--; argv++;
+ num_rows = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q] [-r rows]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+
+/*
+ * Please ignore this code - I don't think I'm going to use it, but I don't want to lose it
+ * I will delete this later - Dave
+
+ if ( rr != 0 ) { // possible lock deadlock
+ if (verbose > 1) {
+ printf("client[%u] : put_multiple returns %d, i=%u, n=%u, key=%u\n", cs->client_number, rr, i, n, k);
+ if ( verbose > 2 ) print_engine_status(env);
+ }
+ // abort the transaction, freeing up locks associated with previous put_multiples
+ if ( verbose > 1 ) printf("start txn abort\n");
+ r = txn->abort(txn); CKERR(r);
+ if ( verbose > 1 ) printf(" txn aborted\n");
+ sleep(2 + cs->client_number);
+ // now retry, waiting until the deadlock resolves itself
+ r = env->txn_begin(env, cs->txn, &txn, 0); CKERR(r);
+ if ( verbose > 1 ) printf("txn begin\n");
+ while ( rr != 0 ) {
+ rr = env->put_multiple(env,
+ cs->dbs[0],
+ txn,
+ &key,
+ &val,
+ NUM_DBS,
+ cs->dbs, // dest dbs
+ dest_keys,
+ dest_vals,
+ cs->flags,
+ NULL);
+ if ( rr != 0 ) {
+ if ( verbose ) printf("client[%u] : put_multiple returns %d, i=%u, n=%u, key=%u\n", cs->client_number, rr, i, n, k);
+ if ( verbose ) printf("start txn abort\n");
+ r = txn->abort(txn); CKERR(r);
+ if ( verbose ) printf(" txn aborted\n");
+ sleep(2 + cs->client_number);
+ r = env->txn_begin(env, cs->txn, &txn, 0); CKERR(r);
+ if ( verbose ) printf("txn begin\n");
+ }
+ }
+ */
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-error-callback.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-error-callback.cc
new file mode 100644
index 00000000..97e136ea
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-error-callback.cc
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "key-val.h"
+#include "ydb.h"
+#include "indexer.h"
+
+enum {NUM_DBS=1};
+static const int NUM_ROWS = 10;
+typedef enum {FORWARD = 0, BACKWARD} Direction;
+typedef enum {TXN_NONE = 0, TXN_CREATE = 1, TXN_END = 2} TxnWork;
+
+DB_ENV *env;
+
+int error_cb_count = 0;
+static void error_callback(DB *db, int which_db, int err, DBT *key, DBT *val, void *extra)
+{
+ error_cb_count++;
+ if ( verbose ) {
+ printf("error_callback (%d) : db_p = %p, which_db = %d, error = %d, key_p = %p, val_p = %p, extra_p = %p\n",
+ error_cb_count,
+ db, which_db,
+ err,
+ key, val, extra);
+ }
+}
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ }
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS, dbs, db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, error_callback, NULL);
+ CKERR(r);
+ toku_indexer_set_test_only_flags(indexer, INDEXER_TEST_ONLY_ERROR_CALLBACK);
+
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ r = indexer->build(indexer);
+ assert(r != 0 ); // build should return an error
+ assert(error_cb_count == 1); // error callback count should be 1
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ if ( verbose ) printf("PASS\n");
+ if ( verbose ) printf("test_indexer done\n");
+}
+
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+// r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_default_bt_compare(env, int_dbt_cmp); CKERR(r);
+ generate_permute_tables();
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ const char *src_name="src.db";
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = generate_initial_table(src_db, txn, NUM_ROWS); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+
+ DB *dbs[NUM_DBS];
+ int idx[MAX_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ idx[i] = i+1;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // -------------------------- //
+ if (1) test_indexer(src_db, dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+// ------------ infrastructure ----------
+
+int test_main(int argc, char * const argv[]) {
+ default_parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed-optimized.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed-optimized.cc
new file mode 100644
index 00000000..0ffc57dc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed-optimized.cc
@@ -0,0 +1,183 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "key-val.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ }
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS, dbs, db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer build\n");
+ r = indexer->build(indexer);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ if ( verbose ) printf("PASS\n");
+ if ( verbose ) printf("test_indexer done\n");
+}
+
+const char *src_name="src.db";
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate_switch); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT key, val;
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = src_db->put(src_db, txn, &key, &val, 0); CKERR(r);
+ }
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ r = src_db->optimize(src_db); CKERR(r);
+
+ DB *dbs[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ dbs[i]->app_private = (void *) (intptr_t) i;
+ }
+
+ // -------------------------- //
+ if (1) test_indexer(src_db, dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed.cc
new file mode 100644
index 00000000..21b4ecfc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-committed.cc
@@ -0,0 +1,181 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "key-val.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ }
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS, dbs, db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer build\n");
+ r = indexer->build(indexer);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ if ( verbose ) printf("PASS\n");
+ if ( verbose ) printf("test_indexer done\n");
+}
+
+const char *src_name="src.db";
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate_switch); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT key, val;
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = src_db->put(src_db, txn, &key, &val, 0); CKERR(r);
+ }
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ DB *dbs[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ dbs[i]->app_private = (void *) (intptr_t) i;
+ }
+
+ // -------------------------- //
+ if (1) test_indexer(src_db, dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-provisional.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-provisional.cc
new file mode 100644
index 00000000..aa1173ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-insert-provisional.cc
@@ -0,0 +1,182 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "key-val.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ }
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS, dbs, db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer build\n");
+ r = indexer->build(indexer);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ if ( verbose ) printf("PASS\n");
+ if ( verbose ) printf("test_indexer done\n");
+}
+
+const char *src_name="src.db";
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate_switch); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT key, val;
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = src_db->put(src_db, txn, &key, &val, 0); CKERR(r);
+ }
+
+ DB *dbs[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ dbs[i]->app_private = (void *) (intptr_t) i;
+ }
+
+ // -------------------------- //
+ if (1) test_indexer(src_db, dbs);
+ // -------------------------- //
+
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-lock-test.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-lock-test.cc
new file mode 100644
index 00000000..c06a06dc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-lock-test.cc
@@ -0,0 +1,220 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "key-val.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static void run_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ }
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("run_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS, dbs, db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ if ( verbose ) printf("run_indexer build\n");
+ r = indexer->build(indexer);
+ CKERR(r);
+
+ if ( verbose ) printf("run_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ if ( verbose ) printf("run_indexer done\n");
+}
+
+const char *src_name="src.db";
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate_switch); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT key, val;
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = src_db->put(src_db, txn, &key, &val, 0); CKERR(r);
+ }
+
+ DB *dbs[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ dbs[i]->app_private = (void *) (intptr_t) i;
+ }
+
+ run_indexer(src_db, dbs);
+
+ // at this point the hot dictionary should have locks on the rows since the transaction
+ // that created the src dictionary is still open
+
+ // try overwriting a value in hot dictionary[0]
+ {
+ DB_TXN *owrt_txn;
+ r = env->txn_begin(env, NULL, &owrt_txn, 0);
+ CKERR(r);
+
+ dbt_init(&key, &kv_pairs[0].key, sizeof(kv_pairs[0].key));
+ dbt_init(&key, &kv_pairs[0].val, sizeof(kv_pairs[0].val));
+ r = dbs[0]->put(dbs[0], owrt_txn, &key, &val, 0);
+
+ assert(r == DB_LOCK_NOTGRANTED );
+ if ( verbose ) printf("lock contention detected, as expected ( put returns DB_LOCK_NOTGRANTED )\n");
+
+ r = owrt_txn->commit(owrt_txn, DB_TXN_SYNC);
+ CKERR(r);
+ }
+
+ // close the transaction (releasing locks), and try writing again
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+ {
+ DB_TXN *owrt_txn;
+ r = env->txn_begin(env, NULL, &owrt_txn, 0);
+ CKERR(r);
+
+ dbt_init(&key, &kv_pairs[0].key, sizeof(kv_pairs[0].key));
+ dbt_init(&key, &kv_pairs[0].val, sizeof(kv_pairs[0].val));
+ r = dbs[0]->put(dbs[0], owrt_txn, &key, &val, 0);
+
+ assert(r == 0 );
+ if ( verbose ) printf("no lock contention detected, as expected ( put returns 0 )\n");
+
+ r = owrt_txn->commit(owrt_txn, DB_TXN_SYNC);
+ CKERR(r);
+ }
+
+
+ if ( verbose ) printf("PASS\n");
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-multiclient.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-multiclient.cc
new file mode 100644
index 00000000..004a19ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-multiclient.cc
@@ -0,0 +1,478 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "key-val.h"
+
+enum {NUM_INDEXER_INDEXES=1};
+static const int NUM_DBS = NUM_INDEXER_INDEXES + 1; // 1 for source DB
+static const int NUM_ROWS = 10000;
+int num_rows;
+typedef enum {FORWARD = 0, BACKWARD} Direction;
+typedef enum {TXN_NONE = 0, TXN_CREATE = 1, TXN_END = 2} TxnWork;
+
+DB_ENV *env;
+
+/*
+ * client() is a routine intended to be run in a separate thread from index creation
+ * - it takes a client spec which describes work to be done
+ * - direction : move to ever increasing or decreasing rows
+ * - txnwork : whether a transaction should be created or closed within the client
+ * (allows client transaction to start before or during index creation,
+ * and to close during or after index creation)
+ */
+
+typedef struct {
+ uint32_t num; // number of rows to write
+ uint32_t start; // approximate start row
+ int offset; // offset from stride (= MAX_CLIENTS)
+ Direction dir;
+ int txnwork;
+ DB_TXN *txn;
+ uint32_t max_inserts_per_txn; // this is for the parent transaction
+ DB **dbs;
+ int client_number;
+ uint32_t *flags;
+} client_spec_t, *client_spec;
+
+int client_count = 0;
+
+static void * client(void *arg)
+{
+ client_spec CAST_FROM_VOIDP(cs, arg);
+ client_count++;
+ if ( verbose ) printf("client[%d]\n", cs->client_number);
+ assert(cs->client_number < MAX_CLIENTS);
+ assert(cs->dir == FORWARD || cs->dir == BACKWARD);
+
+ int r;
+ if ( cs->txnwork & TXN_CREATE ) { r = env->txn_begin(env, NULL, &cs->txn, 0); CKERR(r); }
+
+ DBT key, val;
+ DBT dest_keys[NUM_DBS];
+ DBT dest_vals[NUM_DBS];
+ uint32_t k, v;
+ int n = cs->start;
+
+ for(int which=0;which<NUM_DBS;which++) {
+ dbt_init(&dest_keys[which], NULL, 0);
+ dest_keys[which].flags = DB_DBT_REALLOC;
+
+ dbt_init(&dest_vals[which], NULL, 0);
+ dest_vals[which].flags = DB_DBT_REALLOC;
+ }
+
+ int rr;
+ uint32_t inserts = 0;
+ for (uint32_t i = 0; i < cs->num; i++ ) {
+ DB_TXN *txn;
+ env->txn_begin(env, cs->txn, &txn, 0);
+ k = key_to_put(n, cs->offset);
+ v = generate_val(k, 0);
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+
+ rr = env_put_multiple_test_no_array(env,
+ cs->dbs[0],
+ txn,
+ &key,
+ &val,
+ NUM_DBS,
+ cs->dbs, // dest dbs
+ dest_keys,
+ dest_vals,
+ cs->flags);
+ if ( rr != 0 ) {
+ if ( verbose ) printf("client[%u] : put_multiple returns %d, i=%u, n=%u, key=%u\n", cs->client_number, rr, i, n, k);
+ r = txn->abort(txn); CKERR(r);
+ break;
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ // limit the number of inserts per parent transaction to prevent lock escalation
+ inserts++;
+ if ( inserts >= cs->max_inserts_per_txn ) {
+ r = cs->txn->commit(cs->txn, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &cs->txn, 0); CKERR(r);
+ inserts = 0;
+ }
+ n = ( cs->dir == FORWARD ) ? n + 1 : n - 1;
+ }
+
+ if ( cs->txnwork & TXN_END ) { r = cs->txn->commit(cs->txn, DB_TXN_SYNC); CKERR(r); }
+ if (verbose) printf("client[%d] done\n", cs->client_number);
+
+ for (int which=0; which<NUM_DBS; which++) {
+ toku_free(dest_keys[which].data);
+ toku_free(dest_vals[which].data);
+ }
+
+
+ return 0;
+}
+
+toku_pthread_t *client_threads;
+client_spec_t *client_specs;
+
+static void clients_init(DB **dbs, uint32_t *flags)
+{
+ XMALLOC_N(MAX_CLIENTS, client_threads);
+ XMALLOC_N(MAX_CLIENTS, client_specs);
+
+ client_specs[0].client_number = 0;
+ client_specs[0].start = 0;
+ client_specs[0].num = num_rows;
+ client_specs[0].offset = -1;
+ client_specs[0].dir = FORWARD;
+ client_specs[0].txnwork = TXN_CREATE | TXN_END;
+ client_specs[0].txn = NULL;
+ client_specs[0].max_inserts_per_txn = 1000;
+ client_specs[0].dbs = dbs;
+ client_specs[0].flags = flags;
+
+ client_specs[1].client_number = 1;
+ client_specs[1].start = 0;
+ client_specs[1].num = num_rows;
+ client_specs[1].offset = 1;
+ client_specs[1].dir = FORWARD;
+ client_specs[1].txnwork = TXN_CREATE | TXN_END;
+ client_specs[1].txn = NULL;
+ client_specs[1].max_inserts_per_txn = 100;
+ client_specs[1].dbs = dbs;
+ client_specs[1].flags = flags;
+
+ client_specs[2].client_number = 2;
+ client_specs[2].start = num_rows -1;
+ client_specs[2].num = num_rows;
+ client_specs[2].offset = -2;
+ client_specs[2].dir = BACKWARD;
+ client_specs[2].txnwork = TXN_CREATE | TXN_END;
+ client_specs[2].txn = NULL;
+ client_specs[2].max_inserts_per_txn = 1000;
+ client_specs[2].dbs = dbs;
+ client_specs[2].flags = flags;
+}
+
+static void clients_cleanup(void)
+{
+ toku_free(client_threads); client_threads = NULL;
+ toku_free(client_specs); client_specs = NULL;
+}
+
+// verify results
+// - read the keys in the primary table, then calculate what keys should exist
+// in the other DB. Read the other table to verify.
+static int check_results(DB *src, DB *db)
+{
+ int r;
+ int fail = 0;
+
+ int clients = client_count;
+
+ int max_rows = ( clients + 1 ) * num_rows;
+ unsigned int *db_keys = (unsigned int *) toku_malloc(max_rows * sizeof (unsigned int));
+
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DBC *cursor;
+ r = src->cursor(src, txn, &cursor, 0); CKERR(r);
+
+ int which = *(uint32_t*)db->app_private;
+
+ // scan the primary table,
+ // calculate the expected keys in 'db'
+ int row = 0;
+ while ( r != DB_NOTFOUND ) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if ( r != DB_NOTFOUND ) {
+ k = *((uint32_t *)(key.data));
+ db_keys[row] = twiddle32(k, which);
+ row++;
+ }
+ }
+ if ( verbose ) printf("primary table scanned, contains %d rows\n", row);
+ int primary_rows = row;
+ r = cursor->c_close(cursor); CKERR(r);
+ // sort the expected keys
+ qsort(db_keys, primary_rows, sizeof (unsigned int), uint_cmp);
+
+ if ( verbose > 1 ) {
+ for(int i=0;i<primary_rows;i++) {
+ printf("primary table[%u] = %u\n", i, db_keys[i]);
+ }
+ }
+
+ // scan the indexer-created DB, comparing keys with expected keys
+ // - there should be exactly 'primary_rows' in the new index
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ for (int i=0;i<primary_rows;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if ( r == DB_NOTFOUND ) {
+ printf("scan of index finds last row is %d\n", i);
+ }
+ CKERR(r);
+ k = *((uint32_t *)(key.data));
+ if ( db_keys[i] != k ) {
+ if ( verbose ) printf("ERROR expecting key %10u for row %d, found key = %10u\n", db_keys[i],i,k);
+ fail = 1;
+ goto check_results_error;
+ }
+ }
+ // next cursor op should return DB_NOTFOUND
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ // we're done - cleanup and close
+check_results_error:
+ r = cursor->c_close(cursor); CKERR(r);
+ toku_free(db_keys);
+ r = txn->commit(txn, 0); CKERR(r);
+ if ( verbose ) {
+ if ( fail ) printf("check_results : fail\n");
+ else printf("check_results : pass\n");
+ }
+ return fail;
+}
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = 0;
+ }
+ clients_init(dbs, db_flags);
+
+ // create and initialize indexer
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS-1, &dbs[1], db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ // start threads doing additional inserts - no lock issues since indexer
+ // already created
+ r = toku_pthread_create(toku_uninstrumented,
+ &client_threads[0],
+ nullptr,
+ client,
+ static_cast<void *>(&client_specs[0]));
+ CKERR(r);
+ r = toku_pthread_create(toku_uninstrumented,
+ &client_threads[1],
+ nullptr,
+ client,
+ static_cast<void *>(&client_specs[1]));
+ CKERR(r);
+ // r = toku_pthread_create(toku_uninstrumented, &client_threads[2], 0,
+ // client, (void *)&client_specs[2]); CKERR(r);
+
+ struct timeval start, now;
+ if (verbose) {
+ printf("test_indexer build\n");
+ gettimeofday(&start,0);
+ }
+ r = indexer->build(indexer);
+ CKERR(r);
+ if ( verbose ) {
+ gettimeofday(&now,0);
+ int duration = (int)(now.tv_sec - start.tv_sec);
+ if ( duration > 0 )
+ printf("test_indexer build : sec = %d\n", duration);
+ }
+
+ void *t0; r = toku_pthread_join(client_threads[0], &t0); CKERR(r);
+ void *t1; r = toku_pthread_join(client_threads[1], &t1); CKERR(r);
+// void *t2; r = toku_pthread_join(client_threads[2], &t2); CKERR(r);
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ clients_cleanup();
+
+ if ( verbose ) printf("check_results\n");
+ r = check_results(src, dbs[1]);
+ CKERR(r);
+
+ if ( verbose && (r == 0)) printf("PASS\n");
+ if ( verbose && (r == 0)) printf("test_indexer done\n");
+}
+
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ generate_permute_tables();
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ int ids[MAX_DBS];
+ DB *dbs[MAX_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ ids[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &ids[i];
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // generate the src DB (do not use put_multiple)
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = generate_initial_table(dbs[0], txn, num_rows); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ // -------------------------- //
+ if (1) test_indexer(dbs[0], dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+}
+
+// ------------ infrastructure ----------
+
+static inline void
+do_args (int argc, char * const argv[]) {
+ const char *progname=argv[0];
+ num_rows = NUM_ROWS;
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r")==0) {
+ argc--; argv++;
+ num_rows = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q] [-r rows]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+
+/*
+ * Please ignore this code - I don't think I'm going to use it, but I don't want to lose it
+ * I will delete this later - Dave
+
+ if ( rr != 0 ) { // possible lock deadlock
+ if (verbose > 1) {
+ printf("client[%u] : put_multiple returns %d, i=%u, n=%u, key=%u\n", cs->client_number, rr, i, n, k);
+ if ( verbose > 2 ) print_engine_status(env);
+ }
+ // abort the transaction, freeing up locks associated with previous put_multiples
+ if ( verbose > 1 ) printf("start txn abort\n");
+ r = txn->abort(txn); CKERR(r);
+ if ( verbose > 1 ) printf(" txn aborted\n");
+ sleep(2 + cs->client_number);
+ // now retry, waiting until the deadlock resolves itself
+ r = env->txn_begin(env, cs->txn, &txn, 0); CKERR(r);
+ if ( verbose > 1 ) printf("txn begin\n");
+ while ( rr != 0 ) {
+ rr = env->put_multiple(env,
+ cs->dbs[0],
+ txn,
+ &key,
+ &val,
+ NUM_DBS,
+ cs->dbs, // dest dbs
+ dest_keys,
+ dest_vals,
+ cs->flags,
+ NULL);
+ if ( rr != 0 ) {
+ if ( verbose ) printf("client[%u] : put_multiple returns %d, i=%u, n=%u, key=%u\n", cs->client_number, rr, i, n, k);
+ if ( verbose ) printf("start txn abort\n");
+ r = txn->abort(txn); CKERR(r);
+ if ( verbose ) printf(" txn aborted\n");
+ sleep(2 + cs->client_number);
+ r = env->txn_begin(env, cs->txn, &txn, 0); CKERR(r);
+ if ( verbose ) printf("txn begin\n");
+ }
+ }
+ */
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-nested-insert-committed.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-nested-insert-committed.cc
new file mode 100644
index 00000000..25b01728
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-nested-insert-committed.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "key-val.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ }
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS, dbs, db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer build\n");
+ r = indexer->build(indexer);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ if ( verbose ) printf("PASS\n");
+ if ( verbose ) printf("test_indexer done\n");
+}
+
+const char *src_name="src.db";
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate_switch); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT key, val;
+
+ DB_TXN *txn0 = NULL;
+ r = env->txn_begin(env, NULL, &txn0, 0); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn0, &txn, 0); CKERR(r);
+
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = src_db->put(src_db, txn, &key, &val, 0); CKERR(r);
+ }
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ r = txn0->commit(txn0, DB_TXN_SYNC); CKERR(r);
+
+ DB *dbs[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ dbs[i]->app_private = (void *) (intptr_t) i;
+ }
+
+ // -------------------------- //
+ if (1) test_indexer(src_db, dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-put-abort.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-put-abort.cc
new file mode 100644
index 00000000..f4dd9c53
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-put-abort.cc
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "ydb.h"
+#include "toku_pthread.h"
+
+// this test reproduces the rollback log corruption that occurs when hot indexing runs concurrent with a long abort
+// the concurrent operation occurs when the abort periodically releases the ydb lock which allows the hot indexer
+// to run. the hot indexer erroneously append to the rollback log that is in the process of being aborted.
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ (void) dest_db; (void) src_db; (void) dest_keys; (void) dest_vals; (void) src_key; (void) src_val;
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ if (dest_key->flags == DB_DBT_REALLOC) {
+ dest_key->data = toku_realloc(dest_key->data, src_val->size);
+ memcpy(dest_key->data, src_val->data, src_val->size);
+ dest_key->size = src_val->size;
+ }
+ dest_val->size = 0;
+
+ return 0;
+}
+
+struct indexer_arg {
+ DB_ENV *env;
+ DB *src_db;
+ int n_dest_db;
+ DB **dest_db;
+};
+
+static void *
+indexer_thread(void *arg) {
+ struct indexer_arg *indexer_arg = (struct indexer_arg *) arg;
+ DB_ENV *env = indexer_arg->env;
+ int r;
+
+ DB_TXN *indexer_txn = NULL;
+ r = env->txn_begin(env, NULL, &indexer_txn, 0); assert_zero(r);
+
+ DB_INDEXER *indexer = NULL;
+ r = env->create_indexer(env, indexer_txn, &indexer, indexer_arg->src_db, indexer_arg->n_dest_db, indexer_arg->dest_db, NULL, 0); assert_zero(r);
+
+ r = indexer->build(indexer); assert_zero(r);
+
+ r = indexer->close(indexer); assert_zero(r);
+
+ r = indexer_txn->commit(indexer_txn, 0); assert_zero(r);
+
+ return arg;
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init(&key, 0, 0), dbt_init(&val, 0, 0), DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); assert_zero(r);
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); assert_zero(r);
+ r = src_db->open(src_db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *dest_db = NULL;
+ r = db_create(&dest_db, env, 0); assert_zero(r);
+ r = dest_db->open(dest_db, NULL, "1.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // insert some
+ for (int i = 0; i < 246723; i++) {
+ int k = htonl(i);
+ int v = i;
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v, sizeof v);
+ r = src_db->put(src_db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ // run the indexer
+ struct indexer_arg indexer_arg = {env, src_db, 1, &dest_db};
+ toku_pthread_t pid;
+ r = toku_pthread_create(
+ toku_uninstrumented, &pid, nullptr, indexer_thread, &indexer_arg);
+ assert_zero(r);
+
+ r = txn->abort(txn);
+ assert_zero(r);
+
+ void *ret;
+ r = toku_pthread_join(pid, &ret); assert_zero(r);
+
+ verify_empty(env, src_db);
+ verify_empty(env, dest_db);
+
+ r = src_db->close(src_db, 0); assert_zero(r);
+
+ r = dest_db->close(dest_db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-put-commit.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-put-commit.cc
new file mode 100644
index 00000000..99c9bf30
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-put-commit.cc
@@ -0,0 +1,218 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "ydb.h"
+#include "toku_pthread.h"
+
+// this test reproduces the rollback log corruption that occurs when hot indexing runs concurrent with a long commit.
+// the concurrent operation occurs when the commit periodically releases the ydb lock which allows the hot indexer
+// to run. the hot indexer erroneously append to the rollback log that is in the process of being committed.
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ if (dest_key->flags == DB_DBT_REALLOC) {
+ dest_key->data = toku_realloc(dest_key->data, src_key->size);
+ memcpy(dest_key->data, src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ }
+ if (dest_val->flags == DB_DBT_REALLOC) {
+ dest_val->data = toku_realloc(dest_val->data, src_val->size);
+ memcpy(dest_val->data, src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ }
+
+ return 0;
+}
+
+struct indexer_arg {
+ DB_ENV *env;
+ DB *src_db;
+ int n_dest_db;
+ DB **dest_db;
+};
+
+static void *
+indexer_thread(void *arg) {
+ struct indexer_arg *indexer_arg = (struct indexer_arg *) arg;
+ DB_ENV *env = indexer_arg->env;
+ int r;
+
+ DB_TXN *indexer_txn = NULL;
+ r = env->txn_begin(env, NULL, &indexer_txn, 0); assert_zero(r);
+
+ DB_INDEXER *indexer = NULL;
+ r = env->create_indexer(env, indexer_txn, &indexer, indexer_arg->src_db, indexer_arg->n_dest_db, indexer_arg->dest_db, NULL, 0); assert_zero(r);
+
+ if (verbose) fprintf(stderr, "build start\n");
+ r = indexer->build(indexer); assert_zero(r);
+ if (verbose) fprintf(stderr, "build end\n");
+
+ r = indexer->close(indexer); assert_zero(r);
+
+ r = indexer_txn->commit(indexer_txn, 0); assert_zero(r);
+
+ return arg;
+}
+
+static void
+verify_full(DB_ENV *env, DB *db, int n) {
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+
+ int i = 0;
+ DBT key; dbt_init_realloc(&key);
+ DBT val; dbt_init_realloc(&val);
+ while (1) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r == DB_NOTFOUND)
+ break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == (int) htonl(i));
+ int v;
+ assert(val.size == sizeof v);
+ memcpy(&v, val.data, val.size);
+ assert(v == i);
+ i++;
+ }
+ assert(i == n);
+ toku_free(key.data);
+ toku_free(val.data);
+
+ r = cursor->c_close(cursor); assert_zero(r);
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); assert_zero(r);
+ r = src_db->open(src_db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *dest_db = NULL;
+ r = db_create(&dest_db, env, 0); assert_zero(r);
+ r = dest_db->open(dest_db, NULL, "1.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // insert some
+ int n = 246723;
+ for (int i = 0; i < n; i++) {
+ int k = htonl(i);
+ int v = i;
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v, sizeof v);
+ r = src_db->put(src_db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ // run the indexer
+ struct indexer_arg indexer_arg = {env, src_db, 1, &dest_db};
+ toku_pthread_t pid;
+ r = toku_pthread_create(
+ toku_uninstrumented, &pid, nullptr, indexer_thread, &indexer_arg);
+ assert_zero(r);
+
+ if (verbose)
+ fprintf(stderr, "commit start\n");
+ r = txn->commit(txn, 0);
+ assert_zero(r);
+ if (verbose) fprintf(stderr, "commit end\n");
+
+ void *ret;
+ r = toku_pthread_join(pid, &ret); assert_zero(r);
+
+ verify_full(env, src_db, n);
+ verify_full(env, dest_db, n);
+
+ r = src_db->close(src_db, 0); assert_zero(r);
+
+ r = dest_db->close(dest_db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-put-multiple.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-put-multiple.cc
new file mode 100644
index 00000000..d26a78b9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-put-multiple.cc
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ (void) src_db;
+
+ uint32_t which = (uint32_t) (intptr_t) dest_db->app_private;
+
+ if (which == NUM_DBS) {
+ // primary
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ } else {
+ // secondaries: switch the key and val
+ dbt_init(dest_key, src_val->data, src_val->size);
+ dbt_init(dest_val, src_key->data, src_key->size);
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+static int poll_print(void *extra, float progress) {
+ (void) progress;
+ (void) extra;
+ if ( verbose ) printf("poll_print %f\n", progress);
+ return 0;
+}
+
+const char *src_name="src.db";
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); CKERR(r);
+ r = src_db->open(src_db, NULL, src_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ src_db->app_private = (void *) NUM_DBS;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ DBT key, val;
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = src_db->put(src_db, txn, &key, &val, 0); CKERR(r);
+ }
+
+ DB *dbs[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ dbs[i]->app_private = (void *) (intptr_t) i;
+ }
+
+ DB_TXN *hottxn;
+ r = env->txn_begin(env, NULL, &hottxn, 0);
+ CKERR(r);
+
+ DB_INDEXER *indexer;
+ r = env->create_indexer(env, hottxn, &indexer, src_db, NUM_DBS, dbs, NULL, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ // setup putm
+ DB *putm_dbs[NUM_DBS+1];
+ for (int i = 0; i < NUM_DBS; i++)
+ putm_dbs[i] = dbs[i];
+ putm_dbs[NUM_DBS] = src_db;
+
+ DBT putm_keys[NUM_DBS+1], putm_vals[NUM_DBS+1];
+
+ uint32_t putm_flags[NUM_DBS+1];
+ for (int i = 0; i < NUM_DBS+1; i++)
+ putm_flags[i] = 0;
+
+ DBT prikey; int64_t pk;
+ dbt_init(&prikey, &pk, sizeof pk);
+
+ DBT prival; int64_t pv;
+ dbt_init(&prival, &pv, sizeof pv);
+
+ // putm (8,9)
+ pk = 8; pv = 9;
+ r = env_put_multiple_test_no_array(env, src_db, txn, &prikey, &prival, NUM_DBS+1, putm_dbs, putm_keys, putm_vals, putm_flags);
+ CKERR(r);
+
+ r = indexer->build(indexer);
+ CKERR(r);
+
+ // putm (9, 10)
+ pk = 9; pv = 10;
+ r = env_put_multiple_test_no_array(env, src_db, txn, &prikey, &prival, NUM_DBS+1, putm_dbs, putm_keys, putm_vals, putm_flags);
+ CKERR(r);
+
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = hottxn->commit(hottxn, DB_TXN_SYNC);
+ CKERR(r);
+
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = src_db->close(src_db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort-put.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort-put.cc
new file mode 100644
index 00000000..59ba79df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort-put.cc
@@ -0,0 +1,130 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ toku_free(dest_key->data);
+ dest_key->data = toku_xmemdup(src_val->data, src_val->size);
+ dest_key->ulen = dest_key->size = src_val->size;
+ dest_val->size = 0;
+
+ return 0;
+}
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); assert_zero(r);
+ r = src_db->open(src_db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *dest_db = NULL;
+ r = db_create(&dest_db, env, 0); assert_zero(r);
+ r = dest_db->open(dest_db, NULL, "1.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN* index_txn = NULL;
+ r = env->txn_begin(env, NULL, &index_txn , 0); assert_zero(r);
+ DB_TXN* put_txn = NULL;
+ r = env->txn_begin(env, NULL, &put_txn , 0); assert_zero(r);
+
+ DBT key,data;
+ r = src_db->put(
+ src_db,
+ put_txn,
+ dbt_init(&key, "hello", 6),
+ dbt_init(&data, "there", 6),
+ 0
+ );
+
+ DB_INDEXER *indexer = NULL;
+ r = env->create_indexer(env, index_txn, &indexer, src_db, 1, &dest_db, NULL, 0); assert_zero(r);
+ r = indexer->build(indexer); assert_zero(r);
+ r = indexer->close(indexer); assert_zero(r);
+ r = index_txn->abort(index_txn); assert_zero(r);
+
+ r = put_txn->abort(put_txn); assert_zero(r);
+
+
+ r = src_db->close(src_db, 0); assert_zero(r);
+ r = dest_db->close(dest_db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort.cc
new file mode 100644
index 00000000..c7fe44ff
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-simple-abort.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ dest_key->data = src_val->data;
+ dest_key->size = src_val->size;
+ dest_val->size = 0;
+
+ return 0;
+}
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); assert_zero(r);
+ r = src_db->open(src_db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *dest_db = NULL;
+ r = db_create(&dest_db, env, 0); assert_zero(r);
+ r = dest_db->open(dest_db, NULL, "1.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DB_INDEXER *indexer = NULL;
+ r = env->create_indexer(env, txn, &indexer, src_db, 1, &dest_db, NULL, 0); assert_zero(r);
+
+ r = indexer->abort(indexer); assert_zero(r);
+
+ r = txn->abort(txn); assert_zero(r);
+
+ r = src_db->close(src_db, 0); assert_zero(r);
+ r = dest_db->close(dest_db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-test.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-test.cc
new file mode 100644
index 00000000..e688a457
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-test.cc
@@ -0,0 +1,596 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the hotindexer undo do function
+// read a description of the live transactions and a leafentry from a test file, run the undo do function,
+// and print out the actions taken by the undo do function while processing the leafentry
+
+#include "test.h"
+
+#include <ft/ule.h>
+#include <ft/ule-internal.h>
+#include <ft/le-cursor.h>
+#include <ft/txn/xids.h>
+
+#include "indexer-internal.h"
+
+struct txn {
+ TXNID xid;
+ TOKUTXN_STATE state;
+};
+
+struct live {
+ int n;
+ int o;
+ struct txn *txns;
+};
+
+static void
+live_init(struct live *live) {
+ live->n = live->o = 0;
+ live->txns = NULL;
+}
+
+static void
+live_destroy(struct live *live) {
+ toku_free(live->txns);
+}
+
+static void
+live_add(struct live *live, TXNID xid, TOKUTXN_STATE state) {
+ if (live->o >= live->n) {
+ int newn = live->n == 0 ? 1 : live->n * 2;
+ live->txns = (struct txn *) toku_realloc(live->txns, newn * sizeof (struct txn));
+ resource_assert(live->txns);
+ live->n = newn;
+ }
+ live->txns[live->o++] = (struct txn ) { xid, state };
+}
+
+static TOKUTXN_STATE
+lookup_txn_state(struct live *live, TXNID xid) {
+ TOKUTXN_STATE r = TOKUTXN_RETIRED;
+ for (int i = 0; i < live->o; i++) {
+ if (live->txns[i].xid == xid) {
+ r = live->txns[i].state;
+ break;
+ }
+ }
+ return r;
+}
+
+// live transaction ID set
+struct live live_xids;
+
+static void
+uxr_init(UXR uxr, uint8_t type, void *val, uint32_t vallen, TXNID xid) {
+ uxr->type = type;
+ uxr->valp = toku_malloc(vallen); resource_assert(uxr->valp);
+ memcpy(uxr->valp, val, vallen);
+ uxr->vallen = vallen;
+ uxr->xid = xid;
+}
+
+static void
+uxr_destroy(UXR uxr) {
+ toku_free(uxr->valp);
+ uxr->valp = NULL;
+}
+
+static ULE
+ule_init(ULE ule) {
+ ule->num_puxrs = 0;
+ ule->num_cuxrs = 0;
+ ule->uxrs = ule->uxrs_static;
+ return ule;
+}
+
+static void
+ule_destroy(ULE ule) {
+ for (unsigned int i = 0; i < ule->num_cuxrs + ule->num_puxrs; i++) {
+ uxr_destroy(&ule->uxrs[i]);
+ }
+}
+
+static void
+ule_add_provisional(ULE ule, UXR uxr) {
+ invariant(ule->num_cuxrs + ule->num_puxrs + 1 <= MAX_TRANSACTION_RECORDS*2);
+ ule->uxrs[ule->num_cuxrs + ule->num_puxrs] = *uxr;
+ ule->num_puxrs++;
+}
+
+static void
+ule_add_committed(ULE ule, UXR uxr) {
+ lazy_assert(ule->num_puxrs == 0);
+ invariant(ule->num_cuxrs + 1 <= MAX_TRANSACTION_RECORDS*2);
+ ule->uxrs[ule->num_cuxrs] = *uxr;
+ ule->num_cuxrs++;
+}
+
+static ULE
+ule_create(void) {
+ ULE ule = (ULE) toku_calloc(1, sizeof (ULE_S)); resource_assert(ule);
+ if (ule)
+ ule_init(ule);
+ return ule;
+}
+
+static void
+ule_free(ULE ule) {
+ ule_destroy(ule);
+ toku_free(ule);
+}
+
+static void
+print_xids(XIDS xids) {
+ printf("[");
+ if (xids->num_xids == 0)
+ printf("0");
+ else {
+ for (int i = 0; i < xids->num_xids; i++) {
+ printf("%" PRIu64, xids->ids[i]);
+ if (i+1 < xids->num_xids)
+ printf(",");
+ }
+ }
+ printf("] ");
+}
+
+static void
+print_dbt(DBT *dbt) {
+ printf("%.*s ", dbt->size, (char *) dbt->data);
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->data = src_val->data;
+ dest_key->size = src_val->size;
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->data = toku_realloc(dest_key->data, src_val->size);
+ memcpy(dest_key->data, src_val->data, src_val->size);
+ dest_key->size = src_val->size;
+ break;
+ default:
+ lazy_assert(0);
+ }
+
+ if (dest_val)
+ switch (dest_val->flags) {
+ case 0:
+ lazy_assert(0);
+ break;
+ case DB_DBT_REALLOC:
+ dest_val->data = toku_realloc(dest_val->data, src_key->size);
+ memcpy(dest_val->data, src_key->data, src_key->size);
+ dest_val->size = src_key->size;
+ break;
+ default:
+ lazy_assert(0);
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ toku_dbt_array_resize(dest_keys, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) src_key; (void) src_data;
+
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->data = src_data->data;
+ dest_key->size = src_data->size;
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->data = toku_realloc(dest_key->data, src_data->size);
+ memcpy(dest_key->data, src_data->data, src_data->size);
+ dest_key->size = src_data->size;
+ break;
+ default:
+ lazy_assert(0);
+ }
+ return 0;
+}
+
+
+static DB_INDEXER *test_indexer = NULL;
+static DB *test_hotdb = NULL;
+
+static TOKUTXN_STATE
+test_xid_state(DB_INDEXER *indexer, TXNID xid) {
+ invariant(indexer == test_indexer);
+ TOKUTXN_STATE r = lookup_txn_state(&live_xids, xid);
+ return r;
+}
+
+static void
+test_lock_key(DB_INDEXER *indexer, TXNID xid, DB *hotdb, DBT *key) {
+ invariant(indexer == test_indexer);
+ invariant(hotdb == test_hotdb);
+ TOKUTXN_STATE txn_state = test_xid_state(indexer, xid);
+ invariant(txn_state == TOKUTXN_LIVE || txn_state == TOKUTXN_PREPARING);
+ printf("lock [%" PRIu64 "] ", xid);
+ print_dbt(key);
+ printf("\n");
+}
+
+static int
+test_delete_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids) {
+ invariant(indexer == test_indexer);
+ invariant(hotdb == test_hotdb);
+ printf("delete_provisional ");
+ print_xids(xids);
+ print_dbt(hotkey);
+ printf("\n");
+ return 0;
+}
+
+static int
+test_delete_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids) {
+ invariant(indexer == test_indexer);
+ invariant(hotdb == test_hotdb);
+ printf("delete_committed ");
+ print_xids(xids);
+ print_dbt(hotkey);
+ printf("\n");
+ return 0;
+}
+
+static int
+test_insert_provisional(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids) {
+ invariant(indexer == test_indexer);
+ invariant(hotdb == test_hotdb);
+ printf("insert_provisional ");
+ print_xids(xids);
+ print_dbt(hotkey);
+ print_dbt(hotval);
+ printf("\n");
+ return 0;
+}
+
+static int
+test_insert_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *hotval, XIDS xids) {
+ invariant(indexer == test_indexer);
+ invariant(hotdb == test_hotdb);
+ printf("insert_committed ");
+ print_xids(xids);
+ print_dbt(hotkey);
+ print_dbt(hotval);
+ printf("\n");
+ return 0;
+}
+
+static int
+test_commit_any(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids) {
+ invariant(indexer == test_indexer);
+ invariant(hotdb == test_hotdb);
+ printf("commit_any ");
+ print_xids(xids);
+ print_dbt(hotkey);
+ printf("\n");
+ return 0;
+}
+
+static int
+split_fields(char *line, char *fields[], int maxfields) {
+ int i;
+ for (i = 0; i < maxfields; i++, line = NULL) {
+ fields[i] = strtok(line, " ");
+ if (fields[i] == NULL)
+ break;
+ }
+ return i;
+}
+
+static int
+read_line(char **line_ptr, size_t *len_ptr, FILE *f) {
+ char *line = *line_ptr;
+ size_t len = 0;
+ bool in_comment = false;
+ while (1) {
+ int c = fgetc(f);
+ if (c == EOF)
+ break;
+ else if (c == '\n') {
+ in_comment = false;
+ if (len > 0)
+ break;
+ } else {
+ if (c == '#')
+ in_comment = true;
+ if (!in_comment) {
+ XREALLOC_N(len+1, line);
+ line[len++] = c;
+ }
+ }
+ }
+ if (len > 0) {
+ XREALLOC_N(len+1, line);
+ line[len] = '\0';
+ }
+ *line_ptr = line;
+ *len_ptr = len;
+ return len == 0 ? -1 : 0;
+}
+
+struct saved_lines_t {
+ char** savedlines;
+ uint32_t capacity;
+ uint32_t used;
+};
+
+static void
+save_line(char** line, saved_lines_t* saved) {
+ if (saved->capacity == saved->used) {
+ if (saved->capacity == 0) {
+ saved->capacity = 1;
+ }
+ saved->capacity *= 2;
+ XREALLOC_N(saved->capacity, saved->savedlines);
+ }
+ saved->savedlines[saved->used++] = *line;
+ *line = nullptr;
+}
+
+static int
+read_test(char *testname, ULE ule, DBT* key, saved_lines_t* saved) {
+ int r = 0;
+ FILE *f = fopen(testname, "r");
+ if (f) {
+ char *line = NULL;
+ size_t len = 0;
+ while (read_line(&line, &len, f) != -1) {
+ // printf("%s", line);
+
+ const int maxfields = 8;
+ char *fields[maxfields];
+ int nfields = split_fields(line, fields, maxfields);
+ // for (int i = 0; i < nfields; i++); printf("%s ", fields[i]); printf("\n");
+
+ if (nfields < 1)
+ continue;
+ // live xid...
+ if (strcmp(fields[0], "live") == 0) {
+ for (int i = 1; i < nfields; i++)
+ live_add(&live_xids, atoll(fields[i]), TOKUTXN_LIVE);
+ continue;
+ }
+ // xid <XID> [live|committing|aborting]
+ if (strcmp(fields[0], "xid") == 0 && nfields == 3) {
+ TXNID xid = atoll(fields[1]);
+ TOKUTXN_STATE state = TOKUTXN_RETIRED;
+ if (strcmp(fields[2], "live") == 0)
+ state = TOKUTXN_LIVE;
+ else if (strcmp(fields[2], "preparing") == 0)
+ state = TOKUTXN_PREPARING;
+ else if (strcmp(fields[2], "committing") == 0)
+ state = TOKUTXN_COMMITTING;
+ else if (strcmp(fields[2], "aborting") == 0)
+ state = TOKUTXN_ABORTING;
+ else
+ assert(0);
+ live_add(&live_xids, xid, state);
+ continue;
+ }
+ // key KEY
+ if (strcmp(fields[0], "key") == 0 && nfields == 2) {
+ save_line(&line, saved);
+ dbt_init(key, fields[1], strlen(fields[1]));
+ continue;
+ }
+ // insert committed|provisional XID DATA
+ if (strcmp(fields[0], "insert") == 0 && nfields == 4) {
+ save_line(&line, saved);
+ UXR_S uxr_s;
+ uxr_init(&uxr_s, XR_INSERT, fields[3], strlen(fields[3]), atoll(fields[2]));
+ if (fields[1][0] == 'p')
+ ule_add_provisional(ule, &uxr_s);
+ if (fields[1][0] == 'c')
+ ule_add_committed(ule, &uxr_s);
+ continue;
+ }
+ // delete committed|provisional XID
+ if (strcmp(fields[0], "delete") == 0 && nfields == 3) {
+ UXR_S uxr_s;
+ uxr_init(&uxr_s, XR_DELETE, NULL, 0, atoll(fields[2]));
+ if (fields[1][0] == 'p')
+ ule_add_provisional(ule, &uxr_s);
+ if (fields[1][0] == 'c')
+ ule_add_committed(ule, &uxr_s);
+ continue;
+ }
+ // placeholder XID
+ if (strcmp(fields[0], "placeholder") == 0 && nfields == 2) {
+ UXR_S uxr_s;
+ uxr_init(&uxr_s, XR_PLACEHOLDER, NULL, 0, atoll(fields[1]));
+ ule_add_provisional(ule, &uxr_s);
+ continue;
+ }
+ // placeholder provisional XID
+ if (strcmp(fields[0], "placeholder") == 0 && nfields == 3 && fields[1][0] == 'p') {
+ UXR_S uxr_s;
+ uxr_init(&uxr_s, XR_PLACEHOLDER, NULL, 0, atoll(fields[2]));
+ ule_add_provisional(ule, &uxr_s);
+ continue;
+ }
+ fprintf(stderr, "%s???\n", line);
+ r = EINVAL;
+ }
+ toku_free(line);
+ fclose(f);
+ } else {
+ r = errno;
+ fprintf(stderr, "fopen %s errno=%d\n", testname, errno);
+ }
+ return r;
+ }
+
+static int
+run_test(char *envdir, char *testname) {
+ if (verbose)
+ printf("%s\n", testname);
+
+ live_init(&live_xids);
+
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_redzone(env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, envdir, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); assert_zero(r);
+ r = src_db->open(src_db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *dest_db = NULL;
+ r = db_create(&dest_db, env, 0); assert_zero(r);
+ r = dest_db->open(dest_db, NULL, "1.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DB_INDEXER *indexer = NULL;
+ r = env->create_indexer(env, txn, &indexer, src_db, 1, &dest_db, NULL, 0); assert_zero(r);
+
+ // set test callbacks
+ indexer->i->test_xid_state = test_xid_state;
+ indexer->i->test_lock_key = test_lock_key;
+ indexer->i->test_delete_provisional = test_delete_provisional;
+ indexer->i->test_delete_committed = test_delete_committed;
+ indexer->i->test_insert_provisional = test_insert_provisional;
+ indexer->i->test_insert_committed = test_insert_committed;
+ indexer->i->test_commit_any = test_commit_any;
+
+ // verify indexer and hotdb in the callbacks
+ test_indexer = indexer;
+ test_hotdb = dest_db;
+
+ // create a ule
+ ULE ule = ule_create();
+ ule_init(ule);
+
+ saved_lines_t saved;
+ ZERO_STRUCT(saved);
+ // read the test
+ DBT key;
+ ZERO_STRUCT(key);
+ r = read_test(testname, ule, &key, &saved);
+ if (r != 0)
+ return r;
+
+ r = indexer->i->undo_do(indexer, dest_db, &key, ule); assert_zero(r);
+
+ ule_free(ule);
+ key.data = NULL;
+
+ for (uint32_t i = 0; i < saved.used; i++) {
+ toku_free(saved.savedlines[i]);
+ }
+ toku_free(saved.savedlines);
+
+ r = indexer->close(indexer); assert_zero(r);
+
+ r = txn->abort(txn); assert_zero(r);
+
+ r = src_db->close(src_db, 0); assert_zero(r);
+ r = dest_db->close(dest_db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+
+ live_destroy(&live_xids);
+
+ return r;
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ int i;
+ for (i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+
+ break;
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+ for (r = 0 ; r == 0 && i < argc; i++) {
+ char *testname = argv[i];
+ char pid[10];
+ sprintf(pid, "%d", toku_os_getpid());
+ char envdir[TOKU_PATH_MAX+1];
+ toku_path_join(envdir, 2, TOKU_TEST_FILENAME, pid);
+
+ toku_os_recursive_delete(envdir);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ r = run_test(envdir, testname);
+ }
+
+ return r;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/README b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/README
new file mode 100644
index 00000000..184669b9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/README
@@ -0,0 +1,77 @@
+= Hotindexer undo do testing =
+
+The hotindexer undo do function is tested by feeding it a leafentry,
+capturing the actions taken on the hot dictionary,
+and comparing the actions with the expected actions.
+The test passes if the actions match the expected actions.
+
+Each test is described by a .test file and a .result file.
+The .test file describes the set of transactions that are live
+when the undo do function is called as well as the transaction records
+that comprise a leaf entry.
+
+The .result file describes the actions taken by the undo do function
+when processing the leaf entry.
+
+= Contents of a *.test =
+
+Comments begin with '#'.
+
+An <XIDLIST> is a list of transaction id's separated by a SPACE.
+
+<XIDS> is a stack of transaction id's separated by a COMMA.
+
+An <XID> is a 64 bit number.
+
+A <KEY> is a string.
+
+A <VALUE> is a string.
+
+The field separator is a single SPACE.
+
+== set the leaf entry key ==
+key <KEY>
+
+== add transaction IDs to the live transaction set ==
+live <XIDLIST>
+
+the live transaction set is initially empty
+
+xid <XID> [live|preparing|committing|aborting]
+
+== push a delete transaction record onto the leaf entry stack ==
+delete [committed|provisional] <XID>
+
+== push an insert transaction records onto the leaf entry stack ==
+insert [committed|provisional] <XID> <VALUE>
+
+== push a placeholder onto the leaf entry stack ==
+placeholder <XID>
+
+= Contents of a *.result =
+
+== insert committed ==
+insert_committed [<XIDS>] <KEY> <VALUE>
+commit_any [<XIDS>] <KEY>
+
+== delete committed ==
+delete_committed [<XIDS>] <KEY>
+commit_any [<XIDS>] <KEY>
+
+== insert provisional ==
+insert_provisional [<XIDS>] <KEY> <VALUE>
+
+== delete provisional ==
+delete_provisional [<XIDS>] <KEY>
+
+== lock ==
+lock [<XIDS>] <KEY>
+
+= Schema =
+
+Source dictionary: <source key, source data>
+
+Hot dictionary: <source data, source key>
+
+
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.test
new file mode 100644
index 00000000..77f2fbbb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.d200.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+delete committed 100
+delete committed 200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.result
new file mode 100644
index 00000000..2c56b6d1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.result
@@ -0,0 +1,2 @@
+insert_committed [200] v100 k1
+commit_any [200] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.test
new file mode 100644
index 00000000..e58ffbb6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.i200.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+delete committed 100
+insert committed 200 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.test
new file mode 100644
index 00000000..f7099778
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.d100.test
@@ -0,0 +1,3 @@
+key k1
+delete committed 0
+delete committed 100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.result
new file mode 100644
index 00000000..be42bdfb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.result
@@ -0,0 +1,4 @@
+insert_committed [100] v100 k1
+commit_any [100] v100
+delete_committed [200] v100
+commit_any [200] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.test
new file mode 100644
index 00000000..a2d7a805
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.d200.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+insert committed 100 v100
+delete committed 200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.result
new file mode 100644
index 00000000..a7fe43c0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.result
@@ -0,0 +1,6 @@
+insert_committed [100] v100 k1
+commit_any [100] v100
+delete_committed [200] v100
+insert_committed [200] v200 k1
+commit_any [200] v100
+commit_any [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.test
new file mode 100644
index 00000000..cd4ed356
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.i200.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+insert committed 100 v100
+insert committed 200 v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.result
new file mode 100644
index 00000000..2dd40c83
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.result
@@ -0,0 +1,2 @@
+insert_committed [100] v100 k1
+commit_any [100] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.test
new file mode 100644
index 00000000..ef695688
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.i100.test
@@ -0,0 +1,3 @@
+key k1
+delete committed 0
+insert committed 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.test
new file mode 100644
index 00000000..ebf9be61
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.d0.test
@@ -0,0 +1,3 @@
+# this test runs the undo do function on a leaf entry that consists of a single committed delete
+key k1
+delete committed 0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.test
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.d200.test
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.result
new file mode 100644
index 00000000..e63e2f5d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_committed [100] v0
+commit_any [100] v0
+insert_committed [200] v100 k1
+commit_any [200] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.test
new file mode 100644
index 00000000..65a7f0e4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.i200.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+delete committed 100
+insert committed 200 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.result
new file mode 100644
index 00000000..8523a779
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.result
@@ -0,0 +1,3 @@
+insert_committed [0] v10 k1
+delete_committed [100] v10
+commit_any [100] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.test
new file mode 100644
index 00000000..d6a1e6b8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.d100.test
@@ -0,0 +1,3 @@
+key k1
+insert committed 0 v10
+delete committed 100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.result
new file mode 100644
index 00000000..f866683b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.result
@@ -0,0 +1,7 @@
+insert_committed [0] v0 k1
+delete_committed [100] v0
+insert_committed [100] v100 k1
+commit_any [100] v0
+commit_any [100] v100
+delete_committed [200] v100
+commit_any [200] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.test
new file mode 100644
index 00000000..f9902c09
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.d200.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+insert committed 100 v100
+delete committed 200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.result
new file mode 100644
index 00000000..1d7c7f12
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.result
@@ -0,0 +1,9 @@
+insert_committed [0] v0 k1
+delete_committed [100] v0
+insert_committed [100] v100 k1
+commit_any [100] v0
+commit_any [100] v100
+delete_committed [200] v100
+insert_committed [200] v200 k1
+commit_any [200] v100
+commit_any [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.test
new file mode 100644
index 00000000..d3b901e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.i200.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+insert committed 100 v100
+insert committed 200 v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.result
new file mode 100644
index 00000000..425bf7e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.result
@@ -0,0 +1,5 @@
+insert_committed [0] v10 k1
+delete_committed [100] v10
+insert_committed [100] v20 k1
+commit_any [100] v10
+commit_any [100] v20
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.test
new file mode 100644
index 00000000..6c868efa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.i100.test
@@ -0,0 +1,3 @@
+key k1
+insert committed 0 v10
+insert committed 100 v20
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.result
new file mode 100644
index 00000000..88d7c9f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.result
@@ -0,0 +1 @@
+insert_committed [0] v100 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.test
new file mode 100644
index 00000000..7cce68e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/commit.i0.test
@@ -0,0 +1,3 @@
+# committed insert
+key k1
+insert committed 0 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.result
new file mode 100644
index 00000000..5b73e538
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.result
@@ -0,0 +1,9 @@
+insert_committed [0] v100 k1
+delete_committed [200] v100
+insert_committed [200] v200 k1
+commit_any [200] v100
+commit_any [200] v200
+delete_committed [300] v200
+insert_committed [300] v300 k1
+commit_any [300] v200
+commit_any [300] v300
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.test
new file mode 100644
index 00000000..915c3781
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/insert.300.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v100
+insert committed 200 v200
+insert committed 300 v300
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.result
new file mode 100644
index 00000000..2b7db461
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.result
@@ -0,0 +1,2 @@
+insert_provisional [300,301,302] v10 k1
+lock [300] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.test
new file mode 100644
index 00000000..82b2e311
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.live.test
@@ -0,0 +1,7 @@
+live 300 301 302
+key k1
+delete committed 0
+placeholder 300
+placeholder 301
+insert provisional 302 v10
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.result
new file mode 100644
index 00000000..292b9ca0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.result
@@ -0,0 +1 @@
+insert_committed [300] v10 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.test
new file mode 100644
index 00000000..b65532b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.0.test
@@ -0,0 +1,6 @@
+key k1
+delete committed 0
+placeholder 300
+placeholder 301
+insert provisional 302 v10
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.result
new file mode 100644
index 00000000..d2f7b01b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.result
@@ -0,0 +1,2 @@
+insert_provisional [300,301] v10 k1
+lock [300] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.test
new file mode 100644
index 00000000..e6dc15fd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.1.live.test
@@ -0,0 +1,7 @@
+live 300 301
+key k1
+delete committed 0
+placeholder 300
+placeholder 301
+insert provisional 302 v10
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.result
new file mode 100644
index 00000000..527a0db8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.result
@@ -0,0 +1,6 @@
+insert_committed [100] v10 k1
+commit_any [100] v10
+delete_provisional [300,301,302] v10
+lock [300] v10
+insert_provisional [300,301,302] v20 k1
+lock [300] v20
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.test
new file mode 100644
index 00000000..da6e8834
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.live.test
@@ -0,0 +1,8 @@
+live 300 301 302
+key k1
+delete committed 0
+insert committed 100 v10
+placeholder 300
+placeholder 301
+insert provisional 302 v20
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.result
new file mode 100644
index 00000000..772eb585
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.result
@@ -0,0 +1,5 @@
+insert_committed [100] v10 k1
+commit_any [100] v10
+delete_committed [300] v10
+insert_committed [300] v20 k1
+commit_any [300] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.test
new file mode 100644
index 00000000..6dc9f80b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.2.test
@@ -0,0 +1,7 @@
+key k1
+delete committed 0
+insert committed 100 v10
+placeholder 300
+placeholder 301
+insert provisional 302 v20
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.result
new file mode 100644
index 00000000..a3b11d6f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.result
@@ -0,0 +1,4 @@
+insert_committed [300] v18 k1
+delete_committed [300] v18
+insert_committed [300] v20 k1
+commit_any [300] v18
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.test
new file mode 100644
index 00000000..29c8871a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/placeholder.3.test
@@ -0,0 +1,5 @@
+key k1
+delete committed 0
+insert provisional 300 v18
+placeholder 301
+insert provisional 302 v20
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov-2.py b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov-2.py
new file mode 100644
index 00000000..a61174aa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov-2.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# generate hotindexing undo provisional tests with 2 nested transactions
+
+import sys
+
+def print_tr(fp, tr, trstack):
+ trtype = tr[0]
+ xid = tr[1:]
+ if trtype == 'i':
+ print >>fp, "insert", trstack, xid, "v"+xid
+ if trtype == 'd':
+ print >>fp, "delete", trstack, xid
+ if trtype == 'p':
+ print >>fp, "placeholder", trstack, xid
+
+def print_test(fp, live, commit, prov0, prov1):
+ if live != "":
+ for xid in live.split(","):
+ print >>fp, "live", xid
+ print >>fp, "key k1"
+ print_tr(fp, commit, "committed")
+ print_tr(fp, prov0, "provisional")
+ print_tr(fp, prov1, "provisional")
+
+def main():
+ # live transactions
+ for live in ["", "200", "200,201"]:
+ # committed transaction records
+ for commit in ["i0", "d0"]:
+ # provisional level 0 transaction records
+ for prov0 in ["i200", "d200", "p200"]:
+ # provisional level 1 transaction records
+ for prov1 in ["i201", "d201"]:
+ if live == "":
+ fname = "prov.%s.%s.%s.test" % (commit, prov0, prov1)
+ else:
+ fname = "prov.live%s.%s.%s.%s.test" % (live, commit, prov0, prov1)
+ print fname
+ fp = open(fname, "w")
+ if fp:
+ print_test(fp, live, commit, prov0, prov1)
+ fp.close()
+ return 0
+
+sys.exit(main())
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.result
new file mode 100644
index 00000000..53609227
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.result
@@ -0,0 +1,15 @@
+insert_committed [0] v10 k1
+delete_committed [100] v10
+insert_committed [100] v20 k1
+commit_any [100] v10
+commit_any [100] v20
+delete_committed [200] v20
+insert_committed [200] v10 k1
+commit_any [200] v20
+commit_any [200] v10
+delete_provisional [300,301] v10
+lock [300] v10
+insert_provisional [300,301] v30 k1
+lock [300] v30
+delete_provisional [300,301,302] v30
+lock [300] v30
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.test
new file mode 100644
index 00000000..10056eed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.live.test
@@ -0,0 +1,8 @@
+live 300 301 302
+key k1
+insert committed 0 v10
+insert committed 100 v20
+insert committed 200 v10
+placeholder 300
+insert provisional 301 v30
+delete provisional 302
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.result
new file mode 100644
index 00000000..304c2c0d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.result
@@ -0,0 +1,14 @@
+insert_committed [0] v10 k1
+delete_committed [100] v10
+insert_committed [100] v20 k1
+commit_any [100] v10
+commit_any [100] v20
+delete_committed [200] v20
+insert_committed [200] v10 k1
+commit_any [200] v20
+commit_any [200] v10
+delete_committed [300] v10
+insert_committed [300] v30 k1
+delete_committed [300] v30
+commit_any [300] v10
+commit_any [300] v30
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.test
new file mode 100644
index 00000000..69043006
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.1.test
@@ -0,0 +1,7 @@
+key k1
+insert committed 0 v10
+insert committed 100 v20
+insert committed 200 v10
+placeholder 300
+insert provisional 301 v30
+delete provisional 302
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.test
new file mode 100644
index 00000000..ee87b180
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.d0.i100.test
@@ -0,0 +1,4 @@
+xid 100 aborting
+key k1
+delete committed 0
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.result
new file mode 100644
index 00000000..88d7c9f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.result
@@ -0,0 +1 @@
+insert_committed [0] v100 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.test
new file mode 100644
index 00000000..a6bd167c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.aborting.i100.i200.test
@@ -0,0 +1,4 @@
+xid 100 aborting
+key k1
+insert committed 0 v100
+insert provisional 100 v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.result
new file mode 100644
index 00000000..b32d3a59
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.result
@@ -0,0 +1 @@
+insert_committed [100] v100 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.test
new file mode 100644
index 00000000..9558c0e4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.d0.i100.test
@@ -0,0 +1,4 @@
+xid 100 committing
+key k1
+delete committed 0
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.result
new file mode 100644
index 00000000..be161e5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.result
@@ -0,0 +1,4 @@
+insert_committed [0] v100 k1
+delete_committed [100] v100
+insert_committed [100] v200 k1
+commit_any [100] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.test
new file mode 100644
index 00000000..c8db5bba
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.committing.i100.i200.test
@@ -0,0 +1,4 @@
+xid 100 committing
+key k1
+insert committed 0 v100
+insert provisional 100 v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.test
new file mode 100644
index 00000000..37ab505c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d100.test
@@ -0,0 +1,3 @@
+key k1
+delete committed 0
+delete provisional 100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.test
new file mode 100644
index 00000000..960d9c5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.d201.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+delete provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.result
new file mode 100644
index 00000000..d05f2f64
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.result
@@ -0,0 +1 @@
+insert_committed [200] v201 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.test
new file mode 100644
index 00000000..c7fa5450
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.d200.i201.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+delete provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.result
new file mode 100644
index 00000000..b32d3a59
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.result
@@ -0,0 +1 @@
+insert_committed [100] v100 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.test
new file mode 100644
index 00000000..adcde759
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i100.test
@@ -0,0 +1,3 @@
+key k1
+delete committed 0
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.result
new file mode 100644
index 00000000..38bfcbbc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [200] v200 k1
+delete_committed [200] v200
+commit_any [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.test
new file mode 100644
index 00000000..fa52fe19
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.d201.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+insert provisional 200 v200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.result
new file mode 100644
index 00000000..217272dd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.result
@@ -0,0 +1,4 @@
+insert_committed [200] v200 k1
+delete_committed [200] v200
+insert_committed [200] v201 k1
+commit_any [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.test
new file mode 100644
index 00000000..568cdc17
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.i200.i201.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.test
new file mode 100644
index 00000000..cf56d8df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.d201.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+placeholder provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.result
new file mode 100644
index 00000000..d05f2f64
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.result
@@ -0,0 +1 @@
+insert_committed [200] v201 k1
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.test
new file mode 100644
index 00000000..60c4b6c0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.d0.p200.i201.test
@@ -0,0 +1,4 @@
+key k1
+delete committed 0
+placeholder provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.result
new file mode 100644
index 00000000..8523a779
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.result
@@ -0,0 +1,3 @@
+insert_committed [0] v10 k1
+delete_committed [100] v10
+commit_any [100] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.test
new file mode 100644
index 00000000..05d444cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d100.test
@@ -0,0 +1,3 @@
+key k1
+insert committed 0 v10
+delete provisional 100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.result
new file mode 100644
index 00000000..f22da09c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [0] v0 k1
+delete_committed [200] v0
+commit_any [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.test
new file mode 100644
index 00000000..cd919eb4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.d201.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+delete provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.result
new file mode 100644
index 00000000..75f2f8f1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.result
@@ -0,0 +1,4 @@
+insert_committed [0] v0 k1
+delete_committed [200] v0
+insert_committed [200] v201 k1
+commit_any [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.test
new file mode 100644
index 00000000..7c5a696a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.d200.i201.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+delete provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.result
new file mode 100644
index 00000000..6946d3ad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.result
@@ -0,0 +1,4 @@
+insert_committed [0] v10 k1
+delete_committed [100] v10
+insert_committed [100] v100 k1
+commit_any [100] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.test
new file mode 100644
index 00000000..c29f1a64
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i100.test
@@ -0,0 +1,3 @@
+key k1
+insert committed 0 v10
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.result
new file mode 100644
index 00000000..d69c4c45
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.result
@@ -0,0 +1,6 @@
+insert_committed [0] v0 k1
+delete_committed [200] v0
+insert_committed [200] v200 k1
+delete_committed [200] v200
+commit_any [200] v0
+commit_any [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.test
new file mode 100644
index 00000000..c20923b8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.d201.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.result
new file mode 100644
index 00000000..04f9a49c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.result
@@ -0,0 +1,7 @@
+insert_committed [0] v0 k1
+delete_committed [200] v0
+insert_committed [200] v200 k1
+delete_committed [200] v200
+insert_committed [200] v201 k1
+commit_any [200] v0
+commit_any [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.test
new file mode 100644
index 00000000..e8043f57
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.i200.i201.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.result
new file mode 100644
index 00000000..f22da09c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [0] v0 k1
+delete_committed [200] v0
+commit_any [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.test
new file mode 100644
index 00000000..a51a5c54
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.d201.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+placeholder provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.result
new file mode 100644
index 00000000..75f2f8f1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.result
@@ -0,0 +1,4 @@
+insert_committed [0] v0 k1
+delete_committed [200] v0
+insert_committed [200] v201 k1
+commit_any [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.test
new file mode 100644
index 00000000..14c521f8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.i0.p200.i201.test
@@ -0,0 +1,4 @@
+key k1
+insert committed 0 v0
+placeholder provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.test
new file mode 100644
index 00000000..14a3e4a0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.d100.test
@@ -0,0 +1,4 @@
+live 100
+key k1
+delete committed 0
+delete provisional 100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.result
new file mode 100644
index 00000000..cf5ccdb6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.result
@@ -0,0 +1,2 @@
+insert_provisional [100] v100 k1
+lock [100] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.test
new file mode 100644
index 00000000..9b2b7ebc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.d0.i100.test
@@ -0,0 +1,4 @@
+live 100
+key k1
+delete committed 0
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.result
new file mode 100644
index 00000000..ac869f39
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.result
@@ -0,0 +1,3 @@
+insert_committed [0] v10 k1
+delete_provisional [100] v10
+lock [100] v10
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.test
new file mode 100644
index 00000000..e74f22b4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.d100.test
@@ -0,0 +1,4 @@
+live 100
+key k1
+insert committed 0 v10
+delete provisional 100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.result
new file mode 100644
index 00000000..d5ef14e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.result
@@ -0,0 +1,5 @@
+insert_committed [0] v10 k1
+delete_provisional [100] v10
+lock [100] v10
+insert_provisional [100] v100 k1
+lock [100] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.test
new file mode 100644
index 00000000..2a3ec8e5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live.i0.i100.test
@@ -0,0 +1,4 @@
+live 100
+key k1
+insert committed 0 v10
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.test
new file mode 100644
index 00000000..e9c30ec5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.d201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+delete committed 0
+delete provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.result
new file mode 100644
index 00000000..2a2700cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.result
@@ -0,0 +1,2 @@
+insert_provisional [200,201] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.test
new file mode 100644
index 00000000..585f7d24
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.d200.i201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+delete committed 0
+delete provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.result
new file mode 100644
index 00000000..1fb6fb9f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.result
@@ -0,0 +1,4 @@
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200,201] v200
+lock [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.test
new file mode 100644
index 00000000..f1b64294
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.d201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+delete committed 0
+insert provisional 200 v200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.result
new file mode 100644
index 00000000..8e7dcba5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.result
@@ -0,0 +1,6 @@
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200,201] v200
+lock [200] v200
+insert_provisional [200,201] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.test
new file mode 100644
index 00000000..b85cf241
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.i200.i201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+delete committed 0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.test
new file mode 100644
index 00000000..b687ed5e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.d201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+delete committed 0
+placeholder provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.result
new file mode 100644
index 00000000..2a2700cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.result
@@ -0,0 +1,2 @@
+insert_provisional [200,201] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.test
new file mode 100644
index 00000000..b761f352
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.d0.p200.i201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+delete committed 0
+placeholder provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.result
new file mode 100644
index 00000000..015e1314
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.test
new file mode 100644
index 00000000..0d1a36fc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.d201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+insert committed 0 v0
+delete provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.result
new file mode 100644
index 00000000..7fbfc89f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200,201] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.test
new file mode 100644
index 00000000..c819d1d5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.d200.i201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+insert committed 0 v0
+delete provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.result
new file mode 100644
index 00000000..461992b2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.result
@@ -0,0 +1,7 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200,201] v200
+lock [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.test
new file mode 100644
index 00000000..ba4078dc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.d201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.result
new file mode 100644
index 00000000..e433f894
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.result
@@ -0,0 +1,9 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200,201] v200
+lock [200] v200
+insert_provisional [200,201] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.test
new file mode 100644
index 00000000..537bab1b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.i200.i201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.result
new file mode 100644
index 00000000..acd7f46e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [0] v0 k1
+delete_provisional [200,201] v0
+lock [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.test
new file mode 100644
index 00000000..5ca7424f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.d201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+insert committed 0 v0
+placeholder provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.result
new file mode 100644
index 00000000..3ff4dbcc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_provisional [200,201] v0
+lock [200] v0
+insert_provisional [200,201] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.test
new file mode 100644
index 00000000..0e128ef6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200,201.i0.p200.i201.test
@@ -0,0 +1,6 @@
+live 200
+live 201
+key k1
+insert committed 0 v0
+placeholder provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.result
new file mode 100644
index 00000000..cbb863f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.test
new file mode 100644
index 00000000..e7d60f97
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.commit202.i0.i200.i201.i202.test
@@ -0,0 +1,8 @@
+xid 200 live
+xid 201 aborting
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+insert provisional 201 v201
+insert provisional 202 v202
+
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.result
new file mode 100644
index 00000000..cbb863f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.test
new file mode 100644
index 00000000..9ad509e2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.abort201.i0.i200.i201.test
@@ -0,0 +1,6 @@
+xid 200 live
+xid 201 aborting
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.result
new file mode 100644
index 00000000..ab15d13f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.result
@@ -0,0 +1,9 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200] v200
+lock [200] v200
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.test
new file mode 100644
index 00000000..ba6775d3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.committing201.i0.i200.i201.test
@@ -0,0 +1,6 @@
+xid 200 live
+xid 201 committing
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.test
new file mode 100644
index 00000000..e8e205d8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.d201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+delete committed 0
+delete provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.result
new file mode 100644
index 00000000..58d40ca1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.result
@@ -0,0 +1,2 @@
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.test
new file mode 100644
index 00000000..af95b8fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.d200.i201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+delete committed 0
+delete provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.result
new file mode 100644
index 00000000..60da48d2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.result
@@ -0,0 +1,4 @@
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200] v200
+lock [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.test
new file mode 100644
index 00000000..7db0a9ad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.d201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+delete committed 0
+insert provisional 200 v200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.result
new file mode 100644
index 00000000..49884e70
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.result
@@ -0,0 +1,6 @@
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200] v200
+lock [200] v200
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.test
new file mode 100644
index 00000000..5b2c9da5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.i200.i201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+delete committed 0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.result
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.result
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.test
new file mode 100644
index 00000000..b773f437
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.d201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+delete committed 0
+placeholder provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.result
new file mode 100644
index 00000000..58d40ca1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.result
@@ -0,0 +1,2 @@
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.test
new file mode 100644
index 00000000..360e4f07
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.d0.p200.i201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+delete committed 0
+placeholder provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.result
new file mode 100644
index 00000000..015e1314
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.test
new file mode 100644
index 00000000..24486236
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.d201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+insert committed 0 v0
+delete provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.result
new file mode 100644
index 00000000..a7a31ffa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.test
new file mode 100644
index 00000000..40d13183
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.d200.i201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+insert committed 0 v0
+delete provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.result
new file mode 100644
index 00000000..415c5513
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.result
@@ -0,0 +1,7 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200] v200
+lock [200] v200
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.test
new file mode 100644
index 00000000..e7461d58
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.d201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.result
new file mode 100644
index 00000000..ab15d13f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.result
@@ -0,0 +1,9 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200] v200
+lock [200] v200
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.test
new file mode 100644
index 00000000..ed25c487
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.i200.i201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+insert committed 0 v0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.result
new file mode 100644
index 00000000..015e1314
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.result
@@ -0,0 +1,3 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.test
new file mode 100644
index 00000000..84f83e6e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.d201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+insert committed 0 v0
+placeholder provisional 200
+delete provisional 201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.result
new file mode 100644
index 00000000..a7a31ffa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.result
@@ -0,0 +1,5 @@
+insert_committed [0] v0 k1
+delete_provisional [200] v0
+lock [200] v0
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.test
new file mode 100644
index 00000000..c3fbe42b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.live200.i0.p200.i201.test
@@ -0,0 +1,5 @@
+live 200
+key k1
+insert committed 0 v0
+placeholder provisional 200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.result
new file mode 100644
index 00000000..cf5ccdb6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.result
@@ -0,0 +1,2 @@
+insert_provisional [100] v100 k1
+lock [100] v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.test
new file mode 100644
index 00000000..1fc4dc60
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i100.test
@@ -0,0 +1,4 @@
+xid 100 preparing
+key k1
+delete committed 0
+insert provisional 100 v100
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.result b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.result
new file mode 100644
index 00000000..49884e70
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.result
@@ -0,0 +1,6 @@
+insert_provisional [200] v200 k1
+lock [200] v200
+delete_provisional [200] v200
+lock [200] v200
+insert_provisional [200] v201 k1
+lock [200] v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.test b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.test
new file mode 100644
index 00000000..b7764174
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-undo-do-tests/prov.preparing.d0.i200.i201.test
@@ -0,0 +1,5 @@
+xid 200 preparing
+key k1
+delete committed 0
+insert provisional 200 v200
+insert provisional 201 v201
diff --git a/storage/tokudb/PerconaFT/src/tests/hotindexer-with-queries.cc b/storage/tokudb/PerconaFT/src/tests/hotindexer-with-queries.cc
new file mode 100644
index 00000000..2bc60142
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/hotindexer-with-queries.cc
@@ -0,0 +1,280 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "toku_config.h"
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "key-val.h"
+
+enum {NUM_INDEXER_INDEXES=1};
+static const int NUM_DBS = NUM_INDEXER_INDEXES + 1; // 1 for source DB
+static const int NUM_ROWS = 1000000;
+int num_rows;
+typedef enum {FORWARD = 0, BACKWARD} Direction;
+typedef enum {TXN_NONE = 0, TXN_CREATE = 1, TXN_END = 2} TxnWork;
+
+DB_ENV *env;
+
+/*
+ * client scans the primary table (like a range query)
+ */
+
+static void * client(void *arg)
+{
+ DB *src = (DB *)arg;
+ if ( verbose ) printf("client start\n");
+
+ int r;
+ struct timeval start, now;
+ DB_TXN *txn;
+ DBT key, val;
+ uint32_t k, v;
+
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DBC *cursor;
+ r = src->cursor(src, txn, &cursor, 0); CKERR(r);
+
+ int row = 0;
+
+ gettimeofday(&start,0);
+ while ( r != DB_NOTFOUND ) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if ( r != DB_NOTFOUND ) {
+ row++;
+ }
+ }
+ gettimeofday(&now, 0);
+ if ( verbose ) printf("client : primary table scanned in %d sec, contains %d rows\n",
+ (int)(now.tv_sec - start.tv_sec),
+ row);
+
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ if ( verbose ) printf("client done\n");
+
+ return NULL;
+}
+
+toku_pthread_t *client_thread;
+static void client_init(void)
+{
+ client_thread = (toku_pthread_t *)toku_malloc(sizeof(toku_pthread_t));
+}
+
+static void client_cleanup(void)
+{
+ toku_free(client_thread); client_thread = NULL;
+}
+
+static void query_only(DB *src)
+{
+ int r;
+ void *t0;
+ client_init();
+
+ // start thread doing query
+ r = toku_pthread_create(
+ toku_uninstrumented, client_thread, nullptr,
+ client, static_cast<void *>(src));
+ CKERR(r);
+
+ r = toku_pthread_join(*client_thread, &t0);
+ CKERR(r);
+
+ client_cleanup();
+}
+
+static void test_indexer(DB *src, DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_INDEXER *indexer;
+ uint32_t db_flags[NUM_DBS];
+
+ if ( verbose ) printf("test_indexer\n");
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = 0;
+ }
+
+ client_init();
+
+ // create and initialize indexer
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("test_indexer create_indexer\n");
+ r = env->create_indexer(env, txn, &indexer, src, NUM_DBS-1, &dbs[1], db_flags, 0);
+ CKERR(r);
+ r = indexer->set_error_callback(indexer, NULL, NULL);
+ CKERR(r);
+ r = indexer->set_poll_function(indexer, poll_print, NULL);
+ CKERR(r);
+
+ // start thread doing query
+ r = toku_pthread_create(
+ toku_uninstrumented, client_thread, nullptr,
+ client, static_cast<void *>(src));
+ CKERR(r);
+
+ struct timeval start, now;
+ if (verbose) {
+ printf("test_indexer build\n");
+ gettimeofday(&start,0);
+ }
+ r = indexer->build(indexer);
+ CKERR(r);
+ if ( verbose ) {
+ gettimeofday(&now,0);
+ int duration = (int)(now.tv_sec - start.tv_sec);
+ if ( duration > 0 )
+ printf("test_indexer build : sec = %d\n", duration);
+ }
+
+ void *t0;
+ r = toku_pthread_join(*client_thread, &t0); CKERR(r);
+
+ if ( verbose ) printf("test_indexer close\n");
+ r = indexer->close(indexer);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC);
+ CKERR(r);
+
+ client_cleanup();
+}
+
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logname[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(logname, 2, TOKU_TEST_FILENAME, "log"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log"); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ generate_permute_tables();
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ int ids[MAX_DBS];
+ DB *dbs[MAX_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ ids[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &ids[i];
+ char key_name[32];
+ sprintf(key_name, "key%d", i);
+ r = dbs[i]->open(dbs[i], NULL, key_name, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // generate the src DB (do not use put_multiple)
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = generate_initial_table(dbs[0], txn, num_rows); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ // scan the whole table twice to reduce possible flattening effects
+ // -------------------------- //
+ query_only(dbs[0]);
+ query_only(dbs[0]);
+ // -------------------------- //
+
+ // scan the whole table while running the indexer
+ // -------------------------- //
+ test_indexer(dbs[0], dbs);
+ // -------------------------- //
+
+ // scan the whole table again to confirm performance
+ // -------------------------- //
+ query_only(dbs[0]);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+
+ if ( verbose && (r == 0)) printf("PASS\n");
+}
+
+// ------------ infrastructure ----------
+
+static inline void
+do_args (int argc, char * const argv[]) {
+ const char *progname=argv[0];
+ num_rows = NUM_ROWS;
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else if (strcmp(argv[0],"-r")==0) {
+ argc--; argv++;
+ num_rows = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q] [-r rows]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/inflate.cc b/storage/tokudb/PerconaFT/src/tests/inflate.cc
new file mode 100644
index 00000000..c1143873
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/inflate.cc
@@ -0,0 +1,171 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Idea: inflate a node by
+ * create a 2-level tree
+ * Nodes are A B C D E F G H
+ * Fill them up sequentially so they'll all be near 4MB.
+ * Close the file
+ * Insert some more to H (buffered in the root)
+ * Delete stuff from G (so that H merges with G)
+ * G ends up too big.
+ */
+
+#include "test.h"
+
+DB_ENV *env;
+DB *db;
+const char dbname[] = "foo.db";
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_PRIVATE|DB_INIT_TXN;
+
+static void
+open_em (void)
+{
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+}
+
+static void
+close_em (void)
+{
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void
+reopen_em (void)
+{
+ close_em();
+ open_em();
+}
+
+
+static void
+setup(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 8192); CKERR(r);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+}
+
+char vdata[150];
+
+static void
+insert_n (uint32_t ah) {
+ uint32_t an = htonl(ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ DBT val;
+ dbt_init(&val, vdata, sizeof vdata);
+ int r = db->put(db, NULL, &key, &val, 0);
+ CKERR(r);
+}
+
+static void
+delete_n (uint32_t ah)
+{
+ uint32_t an = htonl(ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ int r = db->del(db, NULL, &key, DB_DELETE_ANY);
+ CKERR(r);
+}
+
+static void
+get_n (uint32_t ah, int expect_r)
+{
+ uint32_t an = htonl(ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ DBT val;
+ dbt_init_malloc(&val);
+ int r = db->get(db, NULL, &key, &val, 0);
+ assert(r==expect_r);
+ if (r==0) toku_free(val.data);
+}
+
+static void
+doit (void)
+{
+ uint32_t N=100;
+ for (uint32_t i=0; i<N; i++) {
+ insert_n(i<<16);
+ }
+ reopen_em();
+ for (uint32_t j=0; j<46; j++) {
+ insert_n(('.'<<16) + 1 +j);
+ }
+ for (uint32_t i=N-1; i<N; i++) {
+ delete_n(i<<16);
+ get_n(i<<16, DB_NOTFOUND);
+ }
+ reopen_em();
+ insert_n(N<<16);
+ get_n(N<<16, 0);
+ reopen_em();
+ for (uint32_t i='J'; i<N+1; i++) {
+ delete_n(i<<16);
+ get_n(i<<16, DB_NOTFOUND);
+ }
+ reopen_em();
+ reopen_em();
+ for (uint32_t j=0; j<46; j++) {
+ insert_n(('.'<<16) + 1 +j +46);
+ }
+ for (uint32_t i=0; i<13; i++) {
+ delete_n((73 - i)<< 16);
+ get_n((73-i) << 16, DB_NOTFOUND);
+ }
+ reopen_em(); // now a node is 9143 bytes
+}
+
+int test_main (int argc __attribute__((__unused__)), char * const argv[] __attribute__((__unused__))) {
+ setup();
+ doit();
+ close_em();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/inflate2.cc b/storage/tokudb/PerconaFT/src/tests/inflate2.cc
new file mode 100644
index 00000000..af4d9d50
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/inflate2.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Idea: inflate a node by
+ * create a 2-level tree
+ * Nodes are A B C D E F G H
+ * Fill them up sequentially so they'll all be near 4MB.
+ * Close the file
+ * Insert some more to H (buffered in the root)
+ * Delete stuff from G (so that H merges with G)
+ * G ends up too big (but it's not the merge)
+ */
+
+#include "test.h"
+
+DB_ENV *env;
+DB *db;
+const char dbname[] = "foo.db";
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_PRIVATE|DB_INIT_TXN;
+
+static void
+open_em (void)
+{
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+}
+
+static void
+close_em (void)
+{
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void
+reopen_em (void)
+{
+ close_em();
+ open_em();
+}
+
+
+static void
+setup(void)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 8192); CKERR(r);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+}
+
+char vdata[150];
+
+static void
+insert_n (uint32_t ah) {
+ uint32_t an = htonl(ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ DBT val;
+ dbt_init(&val, vdata, sizeof vdata);
+ int r = db->put(db, NULL, &key, &val, 0);
+ CKERR(r);
+}
+
+static void
+get_n (uint32_t ah, int expect_r)
+{
+ uint32_t an = htonl(ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ DBT val;
+ dbt_init_malloc(&val);
+ int r = db->get(db, NULL, &key, &val, 0);
+ assert(r==expect_r);
+ if (r==0) toku_free(val.data);
+}
+
+
+static void
+delete_n_now (uint32_t ah)
+{
+ uint32_t an = htonl(ah);
+ DBT key;
+ dbt_init(&key, &an, 4);
+ int r = db->del(db, NULL, &key, DB_DELETE_ANY);
+ CKERR(r);
+ get_n(ah, DB_NOTFOUND);
+}
+
+static void
+doit (void)
+{
+ uint32_t N=46;
+ uint32_t BIG=1<<16;
+ for (uint32_t i=0; i<2*N; i++) {
+ insert_n(i<<8);
+ }
+ insert_n(BIG);
+ insert_n(BIG+1);
+ reopen_em();
+ for (uint32_t i=0; i<N; i++) {
+ insert_n((2*N+i)<<8);
+ }
+ delete_n_now(BIG+1);
+ reopen_em();
+ for (uint32_t i=0; i<N; i++) {
+ insert_n((48<<8) + i);
+ }
+ insert_n((48<<8) + N);
+ return;
+}
+
+int test_main (int argc __attribute__((__unused__)), char * const argv[] __attribute__((__unused__))) {
+ setup();
+ doit();
+ close_em();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/insert-dup-prelock.cc b/storage/tokudb/PerconaFT/src/tests/insert-dup-prelock.cc
new file mode 100644
index 00000000..87733c90
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/insert-dup-prelock.cc
@@ -0,0 +1,171 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <toku_byteswap.h>
+#include <portability/toku_path.h>
+
+static int verbose = 0;
+static uint64_t maxk = 100000;
+
+static int usage(const char *prog) {
+ fprintf(stderr, "%s: run single row insertions with prelocking\n", prog);
+ fprintf(stderr, "[--n %" PRIu64 "]\n", maxk);
+ return 1;
+}
+
+static int inserter(DB_ENV *env, DB *db, uint64_t _maxk, int putflags, int expectr) {
+ if (verbose) printf("%p %p\n", env, db);
+ int r;
+ for (uint64_t k = 0; k < _maxk; k++) {
+
+ if (verbose) printf("%" PRIu64 "\n", k);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ assert(r == 0);
+
+ r = db->pre_acquire_table_lock(db, txn);
+ assert(r == 0);
+
+ uint64_t kk = bswap_64(k);
+ DBT key = { .data = &kk, .size = sizeof kk };
+ DBT val = { .data = &k, .size = sizeof k };
+ r = db->put(db, txn, &key, &val, putflags);
+ assert(r == expectr);
+
+ r = txn->commit(txn, DB_TXN_NOSYNC);
+ assert(r == 0);
+ }
+
+ return 0;
+}
+
+static int env_init(DB_ENV **envptr, const char *envdir) {
+ int r;
+ DB_ENV *env;
+
+ r = db_env_create(&env, 0);
+ if (r == 0) {
+ // env setup
+
+ // env open
+ r = env->open(env, envdir, DB_CREATE+DB_PRIVATE+DB_INIT_LOCK+DB_INIT_LOG+DB_INIT_MPOOL+DB_INIT_TXN, 0777);
+ }
+ if (r == 0)
+ *envptr = env;
+ return r;
+}
+
+static int db_init(DB_ENV *env, const char *dbname, DB **dbptr) {
+ int r;
+ DB *db;
+
+ r = db_create(&db, env, 0);
+ if (r == 0) {
+ // db create
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_CREATE, 0777);
+ if (r != 0) {
+ r = db->close(db, 0);
+ assert(r == 0);
+ }
+ }
+ if (r == 0)
+ *dbptr = db;
+ return r;
+}
+
+int main(int argc, char *argv[]) {
+ int r;
+
+ for (int i = 1; i < argc; i++) {
+ char *arg = argv[i];
+ if (strcmp(arg, "--n") == 0 && i+1 < argc) {
+ maxk = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--verbose") == 0 || strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+
+ return usage(argv[0]);
+ }
+
+ const char *envdir = TOKU_TEST_FILENAME;
+ char cmd[TOKU_PATH_MAX+sizeof("rm -rf ")];
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", TOKU_TEST_FILENAME);
+ r = system(cmd);
+ assert(r == 0);
+ r = mkdir(envdir, 0777);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = env_init(&env, envdir);
+ assert(r == 0);
+
+ DB *db;
+ r = db_init(env, "db0", &db);
+ assert(r == 0);
+
+ r = inserter(env, db, maxk, 0, 0);
+ assert(r == 0);
+
+ r = inserter(env, db, maxk, DB_NOOVERWRITE, DB_KEYEXIST);
+ assert(r == 0);
+
+ r = db->close(db, 0);
+ assert(r == 0);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/ipm.py b/storage/tokudb/PerconaFT/src/tests/ipm.py
new file mode 100644
index 00000000..84dfee54
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/ipm.py
@@ -0,0 +1,61 @@
+#!/usr/local/bin/python2.6
+
+import sys
+import os
+import pexpect
+import getpass
+
+#
+# remote_cmd
+#
+
+nameaddr='admn@192.168.1.254'
+passwd='admn'
+
+def IPM_cmd(cmds):
+ # password handling
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ p=pexpect.spawn('ssh %s' % nameaddr, timeout=60)
+ i=p.expect([ssh_newkey,'Password:',pexpect.EOF])
+ if i==0:
+ p.sendline('yes')
+ i=p.expect([ssh_newkey,'Password:',pexpect.EOF])
+ if i==1:
+ p.sendline(passwd)
+ elif i==2:
+ print "I either got key or connection timeout"
+ pass
+
+ # run command(s)
+ i = p.expect('Sentry:')
+ for cmd in cmds:
+ if i==0:
+ p.sendline(cmd)
+ else:
+ print 'p.expect saw', p.before
+ i = p.expect('Sentry:')
+ print p.before
+
+ # close session
+ p.sendline('quit')
+ p.expect(pexpect.EOF)
+ return 0
+
+def IPM_power_on():
+ IPM_cmd(['on all'])
+
+def IPM_power_off():
+ IPM_cmd(['off all'])
+
+def main(argv):
+# passwd = getpass.getpass('password for %s:' % (nameaddr))
+ if argv[1] == 'on':
+ IPM_power_on()
+ elif argv[1] == 'off':
+ IPM_power_off()
+ else:
+ IPM_cmd(argv[1:])
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/storage/tokudb/PerconaFT/src/tests/isolation-read-committed.cc b/storage/tokudb/PerconaFT/src/tests/isolation-read-committed.cc
new file mode 100644
index 00000000..ab0905d8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/isolation-read-committed.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ {
+ DB_TXN *txna;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ DBT key,val;
+ r = db->put(db, txna, dbt_init(&key, "a", 2), dbt_init(&val, "a", 2), 0); CKERR(r);
+
+ r = txna->commit(txna, 0); CKERR(r);
+ }
+ DB_TXN *txn_put, *txn_committed, *txn_uncommitted;
+ r = env->txn_begin(env, NULL, &txn_put, DB_READ_COMMITTED); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn_committed, DB_READ_COMMITTED); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn_uncommitted, DB_READ_UNCOMMITTED); CKERR(r);
+
+ //
+ // test a simple get
+ //
+ {
+ DBT key,val;
+ r = db->put(db, txn_put, dbt_init(&key, "x", 2), dbt_init(&val, "x", 2), 0); CKERR(r);
+ dbt_init_malloc(&val);
+ r = db->get(db, txn_put, dbt_init(&key, "x", 2), &val, 0); CKERR(r);
+ toku_free(val.data);
+
+ dbt_init_malloc(&val);
+ r = db->get(db, txn_committed, dbt_init(&key, "x", 2), &val, 0); CKERR2(r, DB_NOTFOUND);
+ toku_free(val.data);
+
+ dbt_init_malloc(&val);
+ r = db->get(db, txn_uncommitted, dbt_init(&key, "x", 2), &val, 0); CKERR(r);
+ toku_free(val.data);
+
+ r = db->del(db, txn_put, dbt_init(&key, "a", 2), 0); CKERR(r);
+
+ dbt_init_malloc(&val);
+ r = db->get(db, txn_put, dbt_init(&key, "a", 2), &val, 0); CKERR2(r, DB_NOTFOUND);
+ toku_free(val.data);
+
+ dbt_init_malloc(&val);
+ r = db->get(db, txn_committed, dbt_init(&key, "a", 2), &val, 0); CKERR(r);
+ toku_free(val.data);
+
+ dbt_init_malloc(&val);
+ r = db->get(db, txn_uncommitted, dbt_init(&key, "a", 2), &val, 0); CKERR2(r, DB_NOTFOUND);
+ toku_free(val.data);
+
+ val.data = NULL;
+ }
+
+
+ r = txn_put->commit(txn_put, 0); CKERR(r);
+ r = txn_committed->commit(txn_committed, 0); CKERR(r);
+ r = txn_uncommitted->commit(txn_uncommitted, 0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn_put, DB_READ_COMMITTED); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn_committed, DB_READ_COMMITTED); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn_uncommitted, DB_READ_UNCOMMITTED); CKERR(r);
+
+ //
+ // test a simple get
+ //
+ {
+ DBT key,val;
+ DBT curr_key, curr_val;
+ DBC* cursor_committed = NULL;
+ DBC* cursor_uncommitted = NULL;
+ memset(&curr_key, 0, sizeof(curr_key));
+ memset(&curr_val, 0, sizeof(curr_val));
+
+ r = db->cursor(db, txn_committed, &cursor_committed, 0); assert(r == 0);
+ r = db->cursor(db, txn_uncommitted, &cursor_uncommitted, 0); assert(r == 0);
+
+ r = db->put(db, txn_put, dbt_init(&key, "y", 2), dbt_init(&val, "y", 2), 0); CKERR(r);
+
+ r = cursor_uncommitted->c_get(cursor_uncommitted, &curr_key, &curr_val, DB_NEXT); CKERR(r);
+ assert(((char *)(curr_key.data))[0] == 'x');
+ assert(((char *)(curr_val.data))[0] == 'x');
+
+ r = cursor_committed->c_get(cursor_committed, &curr_key, &curr_val, DB_NEXT); CKERR(r);
+ assert(((char *)(curr_key.data))[0] == 'x');
+ assert(((char *)(curr_val.data))[0] == 'x');
+
+
+
+ r = cursor_committed->c_get(cursor_committed, &curr_key, &curr_val, DB_NEXT); CKERR2(r, DB_NOTFOUND);
+ r = cursor_uncommitted->c_get(cursor_uncommitted, &curr_key, &curr_val, DB_NEXT); CKERR(r);
+ assert(((char *)(curr_key.data))[0] == 'y');
+ assert(((char *)(curr_val.data))[0] == 'y');
+
+ cursor_committed->c_close(cursor_committed);
+ cursor_uncommitted->c_close(cursor_uncommitted);
+ }
+ r = txn_put->commit(txn_put, 0); CKERR(r);
+ r = txn_committed->commit(txn_committed, 0); CKERR(r);
+ r = txn_uncommitted->commit(txn_uncommitted, 0); CKERR(r);
+
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/isolation.cc b/storage/tokudb/PerconaFT/src/tests/isolation.cc
new file mode 100644
index 00000000..abed04f1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/isolation.cc
@@ -0,0 +1,94 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ {
+ DB_TXN *txna;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ DBT key,val;
+ r = db->put(db, txna, dbt_init(&key, "a", 2), dbt_init(&val, "a", 2), 0); CKERR(r);
+
+ r = txna->commit(txna, 0); CKERR(r);
+ }
+ DB_TXN *txna, *txnx;
+ r = env->txn_begin(env, NULL, &txna, DB_READ_UNCOMMITTED); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnx, 0); CKERR(r);
+
+ // X writes a value, and B tries to read it in uncommitted
+ {
+// DB_TXN *txnb;
+// r = env->txn_begin(env, txna, &txnb, DB_READ_UNCOMMITTED); CKERR(r);
+ {
+ DBT key,val;
+ r = db->put(db, txnx, dbt_init(&key, "x", 2), dbt_init(&val, "x", 2), 0); CKERR(r);
+ dbt_init_malloc(&val);
+ r = db->get(db, txna, dbt_init(&key, "x", 2), &val, 0); CKERR(r);
+ toku_free(val.data);
+ val.data = NULL;
+ }
+// r = txnb->commit(txnb, 0); CKERR(r);
+ }
+ r = txna->commit(txna, 0); CKERR(r);
+ r = txnx->commit(txnx, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/key-val.h b/storage/tokudb/PerconaFT/src/tests/key-val.h
new file mode 100644
index 00000000..294f5304
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/key-val.h
@@ -0,0 +1,245 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// To use, during initialization:
+// generate_permute_tables();
+// r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+//
+
+
+enum {MAX_DBS=32};
+enum {MAGIC=311};
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[MAX_DBS][32];
+int inv[MAX_DBS][32];
+
+// rotate right and left functions
+static inline uint32_t UU() rotr32(const uint32_t x, const uint32_t num) {
+ const uint32_t n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+}
+static inline uint64_t UU() rotr64(const uint64_t x, const uint64_t num) {
+ const uint64_t n = num % 64;
+ return ( x >> n ) | ( x << (64 - n));
+}
+static inline uint32_t UU() rotl32(const uint32_t x, const uint32_t num) {
+ const uint32_t n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+}
+static inline uint64_t UU() rotl64(const uint64_t x, const uint64_t num) {
+ const uint64_t n = num % 64;
+ return ( x << n ) | ( x >> (64 - n));
+}
+
+static void UU() generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static uint32_t UU() twiddle32(uint32_t x, int db)
+{
+ uint32_t b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << a[db][i];
+ }
+ return b;
+}
+
+// permute bits of x based on inverse permute table bitmap
+static uint32_t UU() inv_twiddle32(uint32_t x, int db)
+{
+ uint32_t b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static uint32_t UU() generate_val(int key, int i) {
+ return rotl32((key + MAGIC), i);
+}
+static uint32_t UU() pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int UU() put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) src_db;
+ (void) src_val;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ assert(which != 0);
+ assert(dest_db != src_db);
+ {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(uint32_t)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(uint32_t));
+ dest_key->ulen = sizeof(uint32_t);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(uint32_t)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(uint32_t));
+ dest_val->ulen = sizeof(uint32_t);
+ }
+ uint32_t *new_key = (uint32_t *)dest_key->data;
+ uint32_t *new_val = (uint32_t *)dest_val->data;
+
+ *new_key = twiddle32(*(uint32_t*)src_key->data, which);
+ *new_val = generate_val(*(uint32_t*)src_key->data, which);
+
+ dest_key->size = sizeof(uint32_t);
+ dest_val->size = sizeof(uint32_t);
+ //data is already set above
+ }
+
+// printf("pmg : dest_key.data = %u, dest_val.data = %u \n", *(unsigned int*)dest_key->data, *(unsigned int*)dest_val->data);
+
+ return 0;
+}
+
+UU()
+static int put_multiple_generate_switch(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ (void) src_db;
+
+ uint32_t which = (uint32_t) (intptr_t) dest_db->app_private;
+ assert(which == 0);
+
+ // switch the key and val
+ dbt_init(dest_key, src_val->data, src_val->size);
+ dbt_init(dest_val, src_key->data, src_key->size);
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+static int UU() uint_cmp(const void *ap, const void *bp) {
+ unsigned int an = *(unsigned int *)ap;
+ unsigned int bn = *(unsigned int *)bp;
+ if (an < bn)
+ return -1;
+ if (an > bn)
+ return +1;
+ return 0;
+}
+
+float last_progress = 0.0;
+static int UU() poll_print(void *extra, float progress) {
+ if ( verbose ) {
+ if ( last_progress + 0.01 < progress ) {
+ printf(" progress : %3.0f%%\n", progress * 100.0);
+ last_progress = progress;
+ }
+ }
+ (void) extra;
+ return 0;
+}
+
+enum {MAX_CLIENTS=10};
+static inline UU() uint32_t key_to_put(int iter, int offset)
+{
+ return (uint32_t)(((iter+1) * MAX_CLIENTS) + offset);
+}
+
+static int UU() generate_initial_table(DB *db, DB_TXN *txn, uint32_t rows)
+{
+ struct timeval start, now;
+ if ( verbose ) {
+ printf("generate_initial_table\n");
+ gettimeofday(&start,0);
+ }
+ int r = 0;
+ DBT key, val;
+ uint32_t k, v, i;
+ // create keys of stride MAX_CLIENTS
+ for (i=0; i<rows; i++)
+ {
+ k = key_to_put(i, 0);
+ v = generate_val(k, 0);
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+ r = db->put(db, txn, &key, &val, 0);
+ if ( r != 0 ) break;
+ }
+ if ( verbose ) {
+ gettimeofday(&now,0);
+ int duration = (int)(now.tv_sec - start.tv_sec);
+ if ( duration > 0 )
+ printf("generate_initial_table : %u rows in %d sec = %d rows/sec\n", rows, duration, rows/duration);
+ }
+
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/keyrange-merge.cc b/storage/tokudb/PerconaFT/src/tests/keyrange-merge.cc
new file mode 100644
index 00000000..4c491e84
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/keyrange-merge.cc
@@ -0,0 +1,234 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that key_range64 returns reasonable results after leaf merges
+
+// create a tree with at least 2 child nodes and large rows.
+// replace the rows with small rows.
+// this should cause a leaf node merge.
+// verify stats after the merge.
+
+#include <db.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+static DB_ENV *env = NULL;
+static DB_TXN *txn = NULL;
+static DB *db = NULL;
+static uint32_t db_page_size = 4096;
+static uint32_t db_basement_size = 4096;
+static const char *envdir = TOKU_TEST_FILENAME;
+static uint64_t nrows = 0;
+
+static uint64_t
+max64(uint64_t a, uint64_t b) {
+ return a < b ? b : a;
+}
+
+static void
+run_test(void) {
+ if (verbose) printf("%s %" PRIu64 "\n", __FUNCTION__, nrows);
+
+ // create a tree with 2 children
+ uint32_t key_size = 9;
+ uint32_t val_size = db_basement_size / 32;
+ size_t est_row_size_with_overhead = 8 + key_size + 4 + val_size + 4; // xid + key + key_len + val + val)len
+ size_t rows_per_basement = db_basement_size / est_row_size_with_overhead;
+
+ if (nrows == 0)
+ nrows = 2 * (db_page_size / est_row_size_with_overhead);
+
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->open(env, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, db_page_size);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // insert keys 1, 3, 5, ... 2*(nrows-1) + 1
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (uint64_t i=0; i<nrows; i++) {
+ char key[100];
+ snprintf(key, sizeof key, "%08llu", (unsigned long long)2*i+1);
+ char val[val_size];
+ memset(val, 0, val_size);
+ DBT k = { .data = key, .size = key_size };
+ DBT v = { .data = val, .size = val_size };
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ }
+
+ DB_BTREE_STAT64 s64;
+ r = db->stat64(db, txn, &s64); CKERR(r);
+ if (verbose)
+ printf("stats %" PRId64 " %" PRId64 "\n", s64.bt_nkeys, s64.bt_dsize);
+ assert(0 < s64.bt_nkeys && s64.bt_nkeys <= nrows);
+ assert(0 < s64.bt_dsize && s64.bt_dsize <= nrows * (key_size + val_size));
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // lose the seqinsert bit by flushing the tree from the cache table
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // replace the rows with small values. this should shrink the leaf node and induce merging.
+ // do this until a leaf node merge occurs.
+ int t;
+ for (t = 0; t<100; t++) {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ // replace in reverse order to disable the sequential insertion code
+ for (uint64_t i=nrows; i>0; i--) {
+ char key[100];
+ snprintf(key, sizeof key, "%08llu", (unsigned long long)2*(i-1)+1);
+ assert(1+strlen(key) == key_size);
+ DBT k;
+ dbt_init(&k, key, 1+strlen(key));
+ DBT v;
+ dbt_init(&v, NULL, 0);
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ uint64_t merge_leaf = get_engine_status_val(env, "FT_FLUSHER_MERGE_LEAF");
+ if (merge_leaf > 0) {
+ if (verbose) printf("t=%d\n", t);
+ break;
+ }
+ }
+ assert(t < 100); // if this asserts, then no leaf merge occurred
+
+ // verify key_range for keys that exist in the tree
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (uint64_t i=0; i<nrows; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ DBT k;
+ uint64_t less,equal,greater;
+ int is_exact;
+ r = db->key_range64(db, txn, dbt_init(&k, key, 1+strlen(key)), &less, &equal, &greater, &is_exact); CKERR(r);
+ if (verbose)
+ printf("key %llu/%llu %llu %llu %llu %llu\n", (unsigned long long)2*i, (unsigned long long)2*nrows, (unsigned long long)less, (unsigned long long)equal, (unsigned long long)greater,
+ (unsigned long long)(less+equal+greater));
+ assert(is_exact == 0);
+ assert(0 < less + equal + greater);
+ assert(less + equal + greater < 2*nrows);
+ assert(equal == 1);
+ uint64_t est_i = max64(i, i + rows_per_basement/2);
+ assert(less <= est_i + est_i / 1);
+ assert(greater <= nrows - i + rows_per_basement/2);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // verify key range for keys that do not exist in the tree
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (uint64_t i=0; i<1+nrows; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i);
+ DBT k;
+ uint64_t less,equal,greater;
+ int is_exact;
+ r = db->key_range64(db, txn, dbt_init(&k, key, 1+strlen(key)), &less, &equal, &greater, &is_exact); CKERR(r);
+ if (verbose)
+ printf("key %llu/%llu %llu %llu %llu %llu\n", (unsigned long long)2*i, (unsigned long long)2*nrows, (unsigned long long)less, (unsigned long long)equal, (unsigned long long)greater,
+ (unsigned long long)(less+equal+greater));
+ assert(is_exact == 0);
+ assert(0 < less + equal + greater);
+ assert(less + equal + greater < 2*nrows);
+ assert(equal == 0);
+ uint64_t est_i = max64(i, i + rows_per_basement/2);
+ assert(less <= est_i + est_i / 1);
+ assert(greater <= nrows - i + rows_per_basement/2);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static int
+usage(void) {
+ fprintf(stderr, "-v (verbose)\n");
+ fprintf(stderr, "-q (quiet)\n");
+ fprintf(stderr, "--envdir %s\n", envdir);
+ fprintf(stderr, "--nrows %" PRIu64 " (number of rows)\n", nrows);
+ fprintf(stderr, "--nrows %" PRIu64 " (number of rows)\n", nrows);
+ return 1;
+}
+
+int
+test_main (int argc , char * const argv[]) {
+ for (int i = 1 ; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--envdir") == 0 && i+1 < argc) {
+ envdir = argv[++i];
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ int r;
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ run_test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/keyrange.cc b/storage/tokudb/PerconaFT/src/tests/keyrange.cc
new file mode 100644
index 00000000..13567600
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/keyrange.cc
@@ -0,0 +1,336 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that key_range64 returns reasonable results after inserting rows into a tree.
+// variations include:
+// 1. trickle load versus bulk load
+// 2. sequential keys versus random keys
+// 3. basements on disk versus basements in memory
+
+#include <db.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+static DB_ENV *env = NULL;
+static DB_TXN *txn = NULL;
+static DB *db = NULL;
+static uint32_t db_page_size = 4096;
+static uint32_t db_basement_size = 4096;
+static const char *envdir = TOKU_TEST_FILENAME;
+static uint64_t nrows = 30000;
+static bool get_all = true;
+static bool use_loader = false;
+static bool random_keys = false;
+
+static int
+my_compare(DB *this_db UU(), const DBT *a UU(), const DBT *b UU()) {
+ assert(a->size == b->size);
+ return memcmp(a->data, b->data, a->size);
+}
+
+static int
+my_generate_row(DB *dest_db UU(), DB *src_db UU(), DBT_ARRAY *dest_keys UU(), DBT_ARRAY *dest_vals UU(), const DBT *src_key UU(), const DBT *src_val UU()) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ dest_key->data = toku_realloc(dest_key->data, src_key->size);
+ memcpy(dest_key->data, src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ assert(dest_val->flags == DB_DBT_REALLOC);
+ dest_val->data = toku_realloc(dest_val->data, src_val->size);
+ memcpy(dest_val->data, src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static void
+swap(uint64_t keys[], uint64_t i, uint64_t j) {
+ uint64_t t = keys[i]; keys[i] = keys[j]; keys[j] = t;
+}
+
+static uint64_t
+max64(uint64_t a, uint64_t b) {
+ return a < b ? b : a;
+}
+
+static void open_env(void) {
+ int r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, my_generate_row); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+}
+
+static void
+run_test(void) {
+ if (verbose) printf("%s %" PRIu64 "\n", __FUNCTION__, nrows);
+
+ size_t key_size = 9;
+ size_t val_size = 9;
+ size_t est_row_size_with_overhead = 8 + key_size + 4 + val_size + 4 + 5; // xid + key + key_len + val + val_len + mvcc overhead
+ size_t rows_per_basement = db_basement_size / est_row_size_with_overhead;
+
+ open_env();
+ int r;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, db_page_size); CKERR(r);
+ r = db->set_readpagesize(db, db_basement_size); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ uint64_t *XMALLOC_N(nrows, keys);
+ for (uint64_t i = 0; i < nrows; i++)
+ keys[i] = 2*i + 1;
+
+ if (random_keys)
+ for (uint64_t i = 0; i < nrows; i++)
+ swap(keys, random() % nrows, random() % nrows);
+
+ // insert keys 1, 3, 5, ... 2*(nrows-1) + 1
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ if (use_loader) {
+ DB_LOADER *loader = NULL;
+ r = env->create_loader(env, txn, &loader, db, 1, &db, NULL, NULL, 0); CKERR(r);
+ for (uint64_t i=0; i<nrows; i++) {
+ char key[100],val[100];
+ snprintf(key, sizeof key, "%08llu", (unsigned long long)keys[i]);
+ snprintf(val, sizeof val, "%08llu", (unsigned long long)keys[i]);
+ assert(1+strlen(key) == key_size && 1+strlen(val) == val_size);
+ DBT k,v;
+ r = loader->put(loader, dbt_init(&k, key, 1+strlen(key)), dbt_init(&v,val, 1+strlen(val))); CKERR(r);
+ }
+ r = loader->close(loader); CKERR(r);
+ } else {
+ for (uint64_t i=0; i<nrows; i++) {
+ char key[100],val[100];
+ snprintf(key, sizeof key, "%08llu", (unsigned long long)keys[i]);
+ snprintf(val, sizeof val, "%08llu", (unsigned long long)keys[i]);
+ assert(1+strlen(key) == key_size && 1+strlen(val) == val_size);
+ DBT k,v;
+ r = db->put(db, txn, dbt_init(&k, key, 1+strlen(key)), dbt_init(&v,val, 1+strlen(val)), 0); CKERR(r);
+ }
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // close and reopen to get rid of basements
+ r = db->close(db, 0); CKERR(r); // close MUST flush the nodes of this db out of the cache table for this test to be valid
+ r = env->close(env, 0); CKERR(r);
+ env = NULL;
+ open_env();
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ if (get_all) {
+ // read the basements into memory
+ for (uint64_t i=0; i<nrows; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ DBT k,v;
+ memset(&v, 0, sizeof(v));
+ r = db->get(db, txn, dbt_init(&k, key, 1+strlen(key)), &v, 0); CKERR(r);
+ }
+ }
+
+ DB_BTREE_STAT64 s64;
+ r = db->stat64(db, txn, &s64); CKERR(r);
+ if (verbose)
+ printf("stats %" PRId64 " %" PRId64 "\n", s64.bt_nkeys, s64.bt_dsize);
+ if (use_loader) {
+ assert(s64.bt_nkeys == nrows);
+ assert(s64.bt_dsize == nrows * (key_size + val_size));
+ } else {
+ assert(0 < s64.bt_nkeys && s64.bt_nkeys <= nrows);
+ assert(0 < s64.bt_dsize && s64.bt_dsize <= nrows * (key_size + val_size));
+ }
+
+ if (0) goto skipit; // debug: just write the tree
+
+ bool last_basement;
+ last_basement = false;
+ // verify key_range for keys that exist in the tree
+ uint64_t random_fudge;
+ random_fudge = random_keys ? rows_per_basement + nrows / 10 : 0;
+ for (uint64_t i=0; i<nrows; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
+ DBT k;
+ uint64_t less,equal,greater;
+ int is_exact;
+ r = db->key_range64(db, txn, dbt_init(&k, key, 1+strlen(key)), &less, &equal, &greater, &is_exact); CKERR(r);
+ if (verbose)
+ printf("key %llu/%llu %llu %llu %llu %llu\n", (unsigned long long)2*i, (unsigned long long)2*nrows, (unsigned long long)less, (unsigned long long)equal, (unsigned long long)greater,
+ (unsigned long long)(less+equal+greater));
+ assert(is_exact == 0);
+ assert(0 < less + equal + greater);
+ if (use_loader) {
+ assert(less + equal + greater <= nrows);
+ if (get_all || last_basement) {
+ assert(equal == 1);
+ } else if (i < nrows - rows_per_basement * 2) {
+ assert(equal == 0);
+ } else if (i == nrows - 1) {
+ assert(equal == 1);
+ } else if (equal == 1) {
+ last_basement = true;
+ }
+ assert(less <= max64(i, i + rows_per_basement/2));
+ assert(greater <= nrows - less);
+ } else {
+ assert(less + equal + greater <= nrows + nrows / 8);
+ if (get_all || last_basement) {
+ assert(equal == 1);
+ } else if (i < nrows - rows_per_basement * 2) {
+ assert(equal == 0);
+ } else if (i == nrows - 1) {
+ assert(equal == 1);
+ } else if (equal == 1) {
+ last_basement = true;
+ }
+ uint64_t est_i = i * 2 + rows_per_basement;
+ assert(less <= est_i + random_fudge);
+ assert(greater <= nrows - i + rows_per_basement + random_fudge);
+ }
+ }
+
+ // verify key range for keys that do not exist in the tree
+ for (uint64_t i=0; i<1+nrows; i++) {
+ char key[100];
+ snprintf(key, 100, "%08llu", (unsigned long long)2*i);
+ DBT k;
+ uint64_t less,equal,greater;
+ int is_exact;
+ r = db->key_range64(db, txn, dbt_init(&k, key, 1+strlen(key)), &less, &equal, &greater, &is_exact); CKERR(r);
+ if (verbose)
+ printf("key %llu/%llu %llu %llu %llu %llu\n", (unsigned long long)2*i, (unsigned long long)2*nrows, (unsigned long long)less, (unsigned long long)equal, (unsigned long long)greater,
+ (unsigned long long)(less+equal+greater));
+ assert(is_exact == 0);
+ assert(0 < less + equal + greater);
+ if (use_loader) {
+ assert(less + equal + greater <= nrows);
+ assert(equal == 0);
+ assert(less <= max64(i, i + rows_per_basement/2));
+ assert(greater <= nrows - less);
+ } else {
+ assert(less + equal + greater <= nrows + nrows / 8);
+ assert(equal == 0);
+ uint64_t est_i = i * 2 + rows_per_basement;
+ assert(less <= est_i + random_fudge);
+ assert(greater <= nrows - i + rows_per_basement + random_fudge);
+ }
+ }
+
+ skipit:
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ toku_free(keys);
+}
+
+static int
+usage(void) {
+ fprintf(stderr, "-v (verbose)\n");
+ fprintf(stderr, "-q (quiet)\n");
+ fprintf(stderr, "--nrows %" PRIu64 " (number of rows)\n", nrows);
+ fprintf(stderr, "--nrows %" PRIu64 " (number of rows)\n", nrows);
+ fprintf(stderr, "--loader %u (use the loader to load the keys)\n", use_loader);
+ fprintf(stderr, "--get %u (get all keys before keyrange)\n", get_all);
+ fprintf(stderr, "--random_keys %u\n", random_keys);
+ fprintf(stderr, "--page_size %u\n", db_page_size);
+ fprintf(stderr, "--basement_size %u\n", db_basement_size);
+ return 1;
+}
+
+int
+test_main (int argc , char * const argv[]) {
+ for (int i = 1 ; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--nrows") == 0 && i+1 < argc) {
+ nrows = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--get") == 0 && i+1 < argc) {
+ get_all = atoi(argv[++i]) != 0;
+ continue;
+ }
+ if (strcmp(argv[i], "--loader") == 0 && i+1 < argc) {
+ use_loader = atoi(argv[++i]) != 0;
+ continue;
+ }
+ if (strcmp(argv[i], "--random_keys") == 0 && i+1 < argc) {
+ random_keys = atoi(argv[++i]) != 0;
+ continue;
+ }
+ if (strcmp(argv[i], "--page_size") == 0 && i+1 < argc) {
+ db_page_size = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--basement_size") == 0 && i+1 < argc) {
+ db_basement_size = atoi(argv[++i]);
+ continue;
+ }
+ return usage();
+ }
+
+ toku_os_recursive_delete(envdir);
+ int r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ run_test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/last-verify-time.cc b/storage/tokudb/PerconaFT/src/tests/last-verify-time.cc
new file mode 100644
index 00000000..0c7fac4c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/last-verify-time.cc
@@ -0,0 +1,150 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// test for the last verify time
+
+static void
+test_verify_time_after_create(DB_ENV *env) {
+ int r;
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+
+ r = db->open(db, NULL, "test.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_BTREE_STAT64 stats;
+ r = db->stat64(db, NULL, &stats); assert_zero(r);
+ assert(stats.bt_verify_time_sec == 0);
+
+ r = db->close(db, 0); assert_zero(r);
+}
+
+static void
+test_verify_time_after_open(DB_ENV *env) {
+ int r;
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+
+ r = db->open(db, NULL, "test.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_BTREE_STAT64 stats;
+ r = db->stat64(db, NULL, &stats); assert_zero(r);
+ assert(stats.bt_verify_time_sec == 0);
+
+ r = db->close(db, 0); assert_zero(r);
+}
+
+static void
+test_verify_time_after_check(DB_ENV *env) {
+ int r;
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+
+ r = db->open(db, NULL, "test.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_BTREE_STAT64 stats;
+ r = db->stat64(db, NULL, &stats); assert_zero(r);
+ assert(stats.bt_verify_time_sec == 0);
+
+ r = db->verify_with_progress(db, NULL, NULL, 0, 0); assert_zero(r);
+
+ r = db->stat64(db, NULL, &stats); assert_zero(r);
+ assert(stats.bt_verify_time_sec != 0);
+
+ r = db->close(db, 0); assert_zero(r);
+}
+
+static void
+test_verify_time_after_reopen(DB_ENV *env) {
+ int r;
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+
+ r = db->open(db, NULL, "test.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_BTREE_STAT64 stats;
+ r = db->stat64(db, NULL, &stats); assert_zero(r);
+ assert(stats.bt_verify_time_sec != 0);
+
+ r = db->close(db, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ assert(0);
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ test_verify_time_after_create(env);
+
+ test_verify_time_after_open(env);
+
+ test_verify_time_after_check(env);
+
+ test_verify_time_after_reopen(env);
+
+ r = env->close(env, 0); assert_zero(r);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc b/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc
new file mode 100644
index 00000000..a229cb5b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-cleanup-test.cc
@@ -0,0 +1,1067 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* TODO:
+ *
+ * When ready, add simulated errors on calls to malloc()
+ *
+ */
+
+
+/* Purpose is to verify that when a loader fails:
+ * - there are no temp files remaining
+ * - the loader-generated iname file is not present
+ *
+ * A loader can fail in the following ways:
+ * - user calls loader->abort()
+ * - user aborts transaction
+ * - disk full (ENOSPC)
+ * - crash (not tested in this test program)
+ *
+ * Mechanism:
+ * This test is derived from the loader-stress-test.
+ *
+ * The outline of the test is as follows:
+ * - use loader to create table
+ * - verify presence of temp files
+ * - commit / abort / inject error (simulated error from system call)
+ * - verify absence of temp files
+ * - verify absence of unwanted iname files (old inames if committed, new inames if aborted)
+ *
+ *
+ */
+
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+#include <sys/types.h>
+#include <dirent.h>
+
+#include "ydb-internal.h"
+
+enum test_type {event, // any event
+ commit, // close loader, commit txn
+ abort_txn, // close loader, abort txn
+ abort_loader, // abort loader, abort txn
+ abort_via_poll, // close loader, but poll function returns non-zero, abort txn
+ enospc_w, // close loader, but close fails due to enospc return from toku_os_write
+ enospc_f, // either loader->put() or loader->close() fails due to enospc return from do_fwrite()
+ enospc_p, // loader->close() fails due to enospc return from toku_os_pwrite()
+ einval_fdo, // return einval from fdopen()
+ einval_fo, // return einval from fopen()
+ einval_o, // return einval from open()
+ enospc_fc}; // return enospc from fclose()
+
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {MAX_DBS=256};
+#define default_NUM_DBS 5
+int NUM_DBS=default_NUM_DBS;
+#define default_NUM_ROWS 100000
+int NUM_ROWS=default_NUM_ROWS;
+//static int NUM_ROWS=50000000;
+int CHECK_RESULTS=0;
+int DISALLOW_PUTS=0;
+int COMPRESS=0;
+int event_trigger_lo=0; // what event triggers to use?
+int event_trigger_hi =0; // 0 and 0 mean none.
+enum {MAGIC=311};
+
+
+DBT old_inames[MAX_DBS];
+DBT new_inames[MAX_DBS];
+
+static const char *loader_temp_prefix = "tokuld"; // #2536
+static int count_temp(char * dirname);
+static void get_inames(DBT* inames, DB** dbs);
+static int verify_file(char * dirname, char * filename);
+static void assert_inames_missing(DBT* inames);
+static void run_all_tests(void);
+static void free_inames(DBT* inames);
+
+
+// how many different system calls are intercepted with error injection
+#define NUM_ERR_TYPES 7+1 // abort_via_poll does not exactly inject errors
+
+int64_t event_count = 0; // number of calls of all types so far (in this run)
+int64_t event_count_nominal = 0; // number of calls of all types in the nominally error-free run.
+int64_t event_count_trigger = 0; // which call will we complain about
+
+int fwrite_count = 0;
+int fwrite_count_nominal = 0; // number of fwrite calls for normal operation, initially zero
+int fwrite_count_trigger = 0; // sequence number of fwrite call that will fail (zero disables induced failure)
+
+int write_count = 0;
+int write_count_nominal = 0; // number of write calls for normal operation, initially zero
+int write_count_trigger = 0; // sequence number of write call that will fail (zero disables induced failure)
+
+int pwrite_count = 0;
+int pwrite_count_nominal = 0; // number of pwrite calls for normal operation, initially zero
+int pwrite_count_trigger = 0; // sequence number of pwrite call that will fail (zero disables induced failure)
+
+int fdopen_count = 0;
+int fdopen_count_nominal = 0; // number of fdopen calls for normal operation, initially zero
+int fdopen_count_trigger = 0; // sequence number of fdopen call that will fail (zero disables induced failure)
+
+int fopen_count = 0;
+int fopen_count_nominal = 0; // number of fopen calls for normal operation, initially zero
+int fopen_count_trigger = 0; // sequence number of fopen call that will fail (zero disables induced failure)
+
+int open_count = 0;
+int open_count_nominal = 0; // number of open calls for normal operation, initially zero
+int open_count_trigger = 0; // sequence number of open call that will fail (zero disables induced failure)
+
+int fclose_count = 0;
+int fclose_count_nominal = 0; // number of fclose calls for normal operation, initially zero
+int fclose_count_trigger = 0; // sequence number of fclose call that will fail (zero disables induced failure)
+
+int poll_count = 0;
+int poll_count_nominal = 0; // number of fclose calls for normal operation, initially zero
+int poll_count_trigger = 0; // sequence number of fclose call that will fail (zero disables induced failure)
+
+int error_injected = 0;
+
+static const char *
+err_type_str (enum test_type t) {
+ switch(t) {
+ case event: return "anyevent";
+ case enospc_f: return "fwrite";
+ case enospc_w: return "write";
+ case enospc_p: return "pwrite";
+ case einval_fdo: return "fdopen";
+ case einval_fo: return "fopen";
+ case einval_o: return "open";
+ case enospc_fc: return "fclose";
+ case abort_via_poll: return "abort_via_poll";
+ case commit: abort();
+ case abort_txn: abort();
+ case abort_loader: abort();
+ }
+ // I know that Barry prefers the single-return case, but writing the code this way means that the compiler will complain if I forget something in the enum. -Bradley
+ abort();
+ return NULL;
+}
+
+static const char *
+err_msg_type_str (enum test_type t) {
+ switch(t) {
+ case event: return "ENOSPC/EINVAL/POLL";
+ case enospc_f: return "ENOSPC";
+ case enospc_w: return "ENOSPC";
+ case enospc_p: return "ENOSPC";
+ case einval_fdo: return "EINVAL";
+ case einval_fo: return "EINVAL";
+ case einval_o: return "EINVAL";
+ case enospc_fc: return "ENOSPC";
+ case abort_via_poll: return "non-zero";
+ case commit: abort();
+ case abort_txn: abort();
+ case abort_loader: abort();
+ }
+ // I know that Barry prefers the single-return case, but writing the code this way means that the compiler will complain if I forget something in the enum. -Bradley
+ abort();
+ return NULL;
+}
+
+static size_t bad_fwrite (const void *ptr, size_t size, size_t nmemb, FILE *stream) {
+ fwrite_count++;
+ event_count++;
+ size_t r;
+ if (fwrite_count_trigger == fwrite_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = ENOSPC;
+ r = (size_t) -1;
+ } else {
+ r = fwrite(ptr, size, nmemb, stream);
+ if (r!=nmemb) {
+ errno = ferror(stream);
+ }
+ }
+ return r;
+}
+
+
+static ssize_t
+bad_write(int fd, const void * bp, size_t len) {
+ ssize_t r;
+ write_count++;
+ event_count++;
+ if (write_count_trigger == write_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = ENOSPC;
+ r = -1;
+ } else {
+ r = write(fd, bp, len);
+ }
+ return r;
+}
+
+static ssize_t
+bad_pwrite (int fd, const void *buf, size_t len, toku_off_t off) {
+ int r;
+ pwrite_count++;
+ event_count++;
+ if (pwrite_count_trigger == pwrite_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = ENOSPC;
+ r = -1;
+ } else {
+ r = pwrite(fd, buf, len, off);
+ }
+ return r;
+}
+
+
+
+static FILE *
+bad_fdopen(int fd, const char * mode) {
+ FILE * rval;
+ fdopen_count++;
+ event_count++;
+ if (fdopen_count_trigger == fdopen_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = EINVAL;
+ rval = NULL;
+ } else {
+ rval = fdopen(fd, mode);
+ }
+ return rval;
+}
+
+static FILE *
+bad_fopen(const char *filename, const char *mode) {
+ FILE * rval;
+ fopen_count++;
+ event_count++;
+ if (fopen_count_trigger == fopen_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = EINVAL;
+ rval = NULL;
+ } else {
+ rval = fopen(filename, mode);
+ }
+ return rval;
+}
+
+
+static int
+bad_open(const char *path, int oflag, int mode) {
+ int rval;
+ open_count++;
+ event_count++;
+ if (open_count_trigger == open_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = EINVAL;
+ rval = -1;
+ } else {
+ rval = open(path, oflag, mode);
+ }
+ return rval;
+}
+
+
+
+static int
+bad_fclose(FILE * stream) {
+ int rval;
+ fclose_count++;
+ event_count++;
+ // Must close the stream even in the "error case" because otherwise there is no way to get the memory back.
+ rval = fclose(stream);
+ if (rval==0) {
+ if (fclose_count_trigger == fclose_count || event_count == event_count_trigger) {
+ error_injected++;
+ errno = ENOSPC;
+ rval = -1;
+ }
+ }
+ return rval;
+}
+
+
+
+///////////////
+
+
+// return number of temp files
+static int
+count_temp(char * dirname) {
+ int n = 0;
+
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent=readdir(dir))) {
+ if ((ent->d_type==DT_REG || ent->d_type==DT_UNKNOWN) && strncmp(ent->d_name, loader_temp_prefix, 6)==0) {
+ n++;
+ if (verbose >= 3) {
+ printf("Temp files\n");
+ printf(" %s/%s\n", dirname, ent->d_name);
+ }
+ }
+ }
+ closedir(dir);
+ return n;
+}
+
+
+
+// return non-zero if file exists
+static int
+verify_file(char * dirname, char * filename) {
+ int n = 0;
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent=readdir(dir))) {
+ if ((ent->d_type==DT_REG || ent->d_type==DT_UNKNOWN) && strcmp(ent->d_name, filename)==0) {
+ n++;
+ }
+ }
+ closedir(dir);
+ return n;
+}
+
+static void
+get_inames(DBT* inames, DB** dbs) {
+ int i;
+ for (i = 0; i < NUM_DBS; i++) {
+ DBT dname;
+ char * dname_str = dbs[i]->i->dname;
+ dbt_init(&dname, dname_str, strlen(dname_str)+1);
+ dbt_init(&(inames[i]), NULL, 0);
+ inames[i].flags |= DB_DBT_MALLOC;
+ int r = env->get_iname(env, &dname, &inames[i]);
+ CKERR(r);
+ char * iname_str = (char*) (inames[i].data);
+ if (verbose >= 2) printf("dname = %s, iname = %s\n", dname_str, iname_str);
+ }
+}
+
+
+static void
+assert_inames_missing(DBT* inames) {
+ int i;
+ char * dir = env->i->real_data_dir;
+ for (i=0; i<NUM_DBS; i++) {
+ char * CAST_FROM_VOIDP(iname, inames[i].data);
+ int r = verify_file(dir, iname);
+ if (r) {
+ printf("File %s exists, but it should not\n", iname);
+ }
+ assert(r == 0);
+ if (verbose) printf("File has been properly deleted: %s\n", iname);
+ }
+}
+
+static
+void free_inames(DBT* inames) {
+ int i;
+ for (i=0; i<NUM_DBS; i++) {
+ toku_free(inames[i].data);
+ }
+}
+
+#if 0
+static void
+print_inames(DB** dbs) {
+ int i;
+ for (i = 0; i < NUM_DBS; i++) {
+ DBT dname;
+ DBT iname;
+ char * dname_str = dbs[i]->i->dname;
+ dbt_init(&dname, dname_str, sizeof(dname_str));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ int r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ char * iname_str = (char*)iname.data;
+ if (verbose) printf("dname = %s, iname = %s\n", dname_str, iname_str);
+ int n = verify_file(env->i->real_data_dir, iname_str);
+ assert(n == 1);
+ toku_free(iname.data);
+ }
+}
+#endif
+
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[MAX_DBS][32];
+int inv[MAX_DBS][32];
+
+
+// rotate right and left functions
+static inline unsigned int rotr32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+}
+static inline unsigned int rotl32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+}
+
+static void generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static unsigned int twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << a[db][i];
+ }
+ return b;
+}
+
+// permute bits of x based on inverse permute table bitmap
+static unsigned int inv_twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static unsigned int generate_val(int key, int i) {
+ return rotl32((key + MAGIC), i);
+}
+static unsigned int pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ }
+ else {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = twiddle32(*(unsigned int*)src_key->data, which);
+ *new_val = generate_val(*(unsigned int*)src_key->data, which);
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+
+static void check_results(DB **dbs)
+{
+ for(int j=0;j<NUM_DBS;j++){
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int r;
+ unsigned int pkey_for_db_key;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(int i=0;i<NUM_ROWS;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ k = *(unsigned int*)key.data;
+ pkey_for_db_key = (j == 0) ? k : inv_twiddle32(k, j);
+ v = *(unsigned int*)val.data;
+ // test that we have the expected keys and values
+ assert((unsigned int)pkey_for_db_key == (unsigned int)pkey_for_val(v, j));
+// printf(" DB[%d] key = %10u, val = %10u, pkey_for_db_key = %10u, pkey_for_val=%10d\n", j, v, k, pkey_for_db_key, pkey_for_val(v, j));
+ }
+ }
+ {printf("."); fflush(stdout);}
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ printf("\nCheck OK\n");
+}
+
+static void *expect_poll_void = &expect_poll_void;
+static int poll_function (void *extra, float progress) {
+ int r;
+ if (0) {
+ static int did_one=0;
+ static struct timeval start;
+ struct timeval now;
+ gettimeofday(&now, 0);
+ if (!did_one) {
+ start=now;
+ did_one=1;
+ }
+ printf("%6.6f %5.1f%%\n", now.tv_sec - start.tv_sec + 1e-6*(now.tv_usec - start.tv_usec), progress*100);
+ }
+ assert(extra==expect_poll_void);
+ assert(0.0<=progress && progress<=1.0);
+ poll_count++;
+ event_count++;
+ if (poll_count_trigger == poll_count || event_count == event_count_trigger) {
+ r = 1;
+ }
+ else {
+ r = 0;
+ }
+ return r;
+}
+
+static void test_loader(enum test_type t, DB **dbs, int trigger)
+{
+ int failed_put = 0;
+ int error_injection; // are we expecting simulated errors from system calls?
+ error_injected = 0; // number of errors actually injected
+
+ if (t == commit ||
+ t == abort_txn ||
+ t == abort_loader ||
+ t == abort_via_poll)
+ error_injection = 0;
+ else
+ error_injection = 1;
+
+
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p/-z option
+
+ if (verbose >= 2)
+ printf("old inames:\n");
+ get_inames(old_inames, dbs);
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, poll_function, expect_poll_void);
+ CKERR(r);
+
+ if (verbose) {
+ printf("DISALLOW_PUTS = %d\n", DISALLOW_PUTS);
+ printf("COMPRESS = %d\n", COMPRESS);
+ }
+ if (verbose >= 2)
+ printf("new inames:\n");
+ get_inames(new_inames, dbs);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ for(int i=1;i<=NUM_ROWS && !failed_put;i++) {
+ k = i;
+ v = generate_val(i, 0);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ assert(r == EINVAL);
+ } else if (r != 0) {
+ assert(error_injection && error_injected);
+ failed_put = r;
+ }
+ if ( CHECK_RESULTS || verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ }
+ if( CHECK_RESULTS || verbose ) {printf("\n"); fflush(stdout);}
+
+ assert(poll_count == 0); // no polling before loader->close() is called
+
+ // You cannot count the temp files here.
+ if (verbose) {
+ printf("Data dir is %s\n", env->i->real_data_dir);
+ }
+ if (t == commit || t == abort_txn) {
+ // close the loader
+ if (verbose) {
+ printf("closing\n");
+ fflush(stdout);
+ }
+ r = loader->close(loader);
+ CKERR(r);
+ if (!DISALLOW_PUTS) {
+ assert(poll_count>0);
+ // You cannot count temp files here
+ }
+ }
+ else if (t == abort_via_poll) {
+ assert(!DISALLOW_PUTS); // test makes no sense with DISALLOW_PUTS
+ if (verbose)
+ printf("closing, but expecting abort via poll\n");
+ r = loader->close(loader);
+ if (r == 0) {
+ printf("loader->close() returned 0 but should have failed due to non-zero return from polling function.\n");
+ fflush(stdout);
+ }
+ assert(r); // not defined what close() returns when poll function returns non-zero
+ }
+ else if (error_injection && !failed_put) {
+ const char * type = err_type_str(t);
+ r = loader->close(loader);
+ if (verbose) {
+ if (error_injected)
+ printf("closing, but expecting failure from simulated error (enospc or einval)%s\n", type);
+ else
+ printf("closing, expecting no error because number of system calls was less than predicted (%s)\n", type);
+ }
+ if (!DISALLOW_PUTS && error_injected) {
+ if (r == 0) {
+ printf("loader->close() returned 0 but should have failed due to injected error from %s on call %d\n",
+ err_type_str(t), trigger);
+ fflush(stdout);
+ }
+ assert(r);
+ }
+ else
+ CKERR(r); // if using puts, "outer" loader should close without error, if no errors injected should also close without error
+ }
+ else {
+ if (verbose)
+ printf("aborting loader");
+ r = loader->abort(loader);
+ CKERR(r);
+ }
+ int n = count_temp(env->i->real_data_dir);
+ if (verbose) printf("Num temp files = %d\n", n);
+ fflush(stdout);
+ assert(n==0);
+
+ if (verbose)
+ printf(" done\n");
+
+ if (t == commit) {
+ event_count_nominal = event_count;
+ fwrite_count_nominal = fwrite_count; // capture how many fwrites were required for normal operation
+ write_count_nominal = write_count; // capture how many writes were required for normal operation
+ pwrite_count_nominal = pwrite_count; // capture how many pwrites were required for normal operation
+ fdopen_count_nominal = fdopen_count; // capture how many fdopens were required for normal operation
+ fopen_count_nominal = fopen_count; // capture how many fopens were required for normal operation
+ open_count_nominal = open_count; // capture how many opens were required for normal operation
+ fclose_count_nominal = fclose_count; // capture how many fcloses were required for normal operation
+ poll_count_nominal = poll_count; // capture how many times the polling function was called
+
+ if (verbose) {
+ printf("Nominal calls: function calls (number of calls for normal operation)\n");
+ printf(" events %" PRId64 "\n", event_count_nominal);
+ printf(" fwrite %d\n", fwrite_count_nominal);
+ printf(" write %d\n", write_count_nominal);
+ printf(" pwrite %d\n", pwrite_count_nominal);
+ printf(" fdopen %d\n", fdopen_count_nominal);
+ printf(" fopen %d\n", fopen_count_nominal);
+ printf(" open %d\n", open_count_nominal);
+ printf(" fclose %d\n", fclose_count_nominal);
+ printf(" poll %d\n", poll_count_nominal);
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ if (!DISALLOW_PUTS) {
+ assert_inames_missing(old_inames);
+ }
+ if ( CHECK_RESULTS ) {
+ check_results(dbs);
+ }
+
+ }
+ else {
+ r = txn->abort(txn);
+ CKERR(r);
+ if (!DISALLOW_PUTS) {
+ assert_inames_missing(new_inames);
+ }
+ }
+ free_inames(old_inames);
+ free_inames(new_inames);
+}
+
+
+static int run_test_count = 0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void run_test(enum test_type t, int trigger)
+{
+ run_test_count++;
+
+ int r;
+
+ if (verbose>0) { // Don't print anything if verbose is 0. Use "+" to indicate progress if verbose is positive
+ printf("+");
+ fflush(stdout);
+ }
+
+ toku_os_recursive_delete(envdir);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ event_count_trigger = event_count = 0;
+ fwrite_count_trigger = fwrite_count = 0;
+ write_count_trigger = write_count = 0;
+ pwrite_count_trigger = pwrite_count = 0;
+ fdopen_count_trigger = fdopen_count = 0;
+ fopen_count_trigger = fopen_count = 0;
+ open_count_trigger = open_count = 0;
+ fclose_count_trigger = fclose_count = 0;
+ poll_count_trigger = poll_count = 0;
+
+ switch(t) {
+ case commit:
+ case abort_txn:
+ case abort_loader:
+ break;
+ case event:
+ event_count_trigger = trigger; break;
+ case enospc_f:
+ fwrite_count_trigger = trigger; break;
+ case enospc_w:
+ write_count_trigger = trigger; break;
+ case enospc_p:
+ pwrite_count_trigger = trigger; break;
+ case einval_fdo:
+ fdopen_count_trigger = trigger; break;
+ case einval_fo:
+ fopen_count_trigger = trigger; break;
+ case einval_o:
+ open_count_trigger = trigger; break;
+ case enospc_fc:
+ fclose_count_trigger = trigger; break;
+ case abort_via_poll:
+ poll_count_trigger = trigger; break;
+ default:
+ abort();
+ }
+
+
+ db_env_set_func_loader_fwrite(bad_fwrite);
+ db_env_set_func_write(bad_write);
+ db_env_set_func_pwrite(bad_pwrite);
+ db_env_set_func_fdopen(bad_fdopen);
+ db_env_set_func_fopen(bad_fopen);
+ db_env_set_func_open(bad_open);
+ db_env_set_func_fclose(bad_fclose);
+
+ test_loader(t, dbs, trigger);
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ if (verbose >= 3)
+ print_engine_status(env);
+
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+static void run_all_tests(void) {
+ int trigger;
+
+ if (verbose) printf("\n\nTesting loader with loader close and txn commit (normal)\n");
+ run_test(commit, 0);
+
+ if (verbose) printf("\n\nTesting loader with loader abort and txn abort\n");
+ run_test(abort_loader, 0);
+
+ if (verbose) printf("\n\nTesting loader with loader close and txn abort\n");
+ run_test(abort_txn, 0);
+
+ if (event_trigger_lo || event_trigger_hi) {
+ printf("\n\nDoing events %d-%d\n", event_trigger_lo, event_trigger_hi);
+ for (int i=event_trigger_lo; i<=event_trigger_hi; i++) {
+ run_test(event, i);
+ }
+ } else {
+
+ enum test_type et[NUM_ERR_TYPES] = {enospc_f, enospc_w, enospc_p, einval_fdo, einval_fo, einval_o, enospc_fc, abort_via_poll};
+ int * nomp[NUM_ERR_TYPES] = {&fwrite_count_nominal, &write_count_nominal, &pwrite_count_nominal,
+ &fdopen_count_nominal, &fopen_count_nominal, &open_count_nominal,
+ &fclose_count_nominal, &poll_count_nominal};
+ int limit = NUM_DBS * 5;
+ int j;
+ for (j = 0; j<NUM_ERR_TYPES; j++) {
+ enum test_type t = et[j];
+ const char * err_type = err_type_str(t);
+ const char * err_msg_type = err_msg_type_str(t);
+
+ int nominal = *(nomp[j]);
+ if (verbose)
+ printf("\nNow test with induced %s returned from %s, nominal = %d\n", err_msg_type, err_type, nominal);
+ int i;
+ // induce write error at beginning of process
+ for (i = 1; i < limit && i < nominal+1; i++) {
+ trigger = i;
+ if (verbose) printf("\n\nTesting loader with %s induced at %s count %d (of %d)\n",
+ err_msg_type, err_type, trigger, nominal);
+ run_test(t, trigger);
+ }
+ if (nominal > limit) { // if we didn't already test every possible case
+ // induce write error sprinkled through process
+ for (i = 2; i < 5; i++) {
+ trigger = nominal / i;
+ if (verbose) printf("\n\nTesting loader with %s induced at %s count %d (of %d)\n",
+ err_msg_type, err_type, trigger, nominal);
+ run_test(t, trigger);
+ }
+ // induce write error at end of process
+ for (i = 0; i < limit; i++) {
+ trigger = nominal - i;
+ assert(trigger > 0);
+ if (verbose) printf("\n\nTesting loader with %s induced at %s count %d (of %d)\n",
+ err_msg_type, err_type, trigger, nominal);
+ run_test(t, trigger);
+ }
+ }
+ }
+ }
+}
+
+static int test_only_abort_via_poll = 0;
+
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ if (test_only_abort_via_poll) {
+ printf("Testing only normal operation and abort via polling, but test abort_via_polling exhaustively.\n");
+ if (verbose) {
+ print_time_now();
+ printf(": Testing loader with loader close and txn commit (normal)\n");
+ }
+ run_test(commit, 0);
+ if (verbose) {
+ printf("\n\nTesting loader with abort_via_polling exhaustively,\n");
+ printf("returning 1 from polling function on each iteration from 1 to %d\n", poll_count_nominal);
+ }
+ for (int i = 1; i < poll_count_nominal+1; i++) {
+ const char * err_type = err_type_str(abort_via_poll);
+ const char * err_msg_type = err_msg_type_str(abort_via_poll);
+ if (verbose) {
+ print_time_now();
+ printf(": Testing loader with %s induced at %s count %d (of %d)\n",
+ err_msg_type, err_type, i, poll_count_nominal);
+ print_time_now();
+ }
+ run_test(abort_via_poll, i);
+ }
+ if (verbose) {
+ print_time_now();
+ printf(": Done.\n");
+ }
+ }
+ else
+ run_all_tests();
+ printf("run_test_count=%d\n", run_test_count);
+ return 0;
+}
+
+static void usage(const char *cmd) {
+ fprintf(stderr, "Usage: -h -c -s -p -d <num_dbs> -r <num_rows> -t <elow> <ehi> \n%s\n", cmd);
+ fprintf(stderr, " where -h print this message.\n");
+ fprintf(stderr, " -c check the results.\n");
+ fprintf(stderr, " -p LOADER_DISALLOW_PUTS.\n");
+ fprintf(stderr, " -z LOADER_COMPRESS_INTERMEDIATES.\n");
+ fprintf(stderr, " -k Test only normal operation and abort_via_poll (but thoroughly).\n");
+ fprintf(stderr, " -s size_factor=1.\n");
+ fprintf(stderr, " -d <num_dbs> Number of indexes to create (default=%d).\n", default_NUM_DBS);
+ fprintf(stderr, " -r <num_rows> Number of rows to put (default=%d).\n", default_NUM_ROWS);
+ fprintf(stderr, " -t <elo> <ehi> Instrument only events <elo> to <ehi> (default: instrument all).\n");
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ usage(cmd); exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0 && argc > 1) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0 && argc > 1) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ printf("DISABLED Using puts as part of #4503\n");
+ } else if (strcmp(argv[0], "-k")==0) {
+ test_only_abort_via_poll = 1;
+ printf("Perform only abort_via_poll test\n");
+ } else if (strcmp(argv[0], "-t")==0 && argc > 2) {
+ argc--; argv++;
+ event_trigger_lo = atoi(argv[0]);
+ argc--; argv++;
+ event_trigger_hi = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-s")==0) {
+ db_env_set_loader_size_factor(1);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-close-nproc-limit.cc b/storage/tokudb/PerconaFT/src/tests/loader-close-nproc-limit.cc
new file mode 100644
index 00000000..f8b16126
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-close-nproc-limit.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/resource.h>
+
+static int loader_flags = 0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void run_test(int ndb) {
+ int r;
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ DB *dbs[ndb];
+ uint32_t db_flags[ndb];
+ uint32_t dbt_flags[ndb];
+ for (int i = 0; i < ndb; i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char name[32];
+ sprintf(name, "db%d", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DB_LOADER *loader;
+ r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); CKERR(r);
+
+ struct rlimit current_nproc_limit;
+ r = getrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ struct rlimit new_nproc_limit = current_nproc_limit;
+ new_nproc_limit.rlim_cur = 0;
+ r = setrlimit(RLIMIT_NPROC, &new_nproc_limit);
+ assert(r == 0);
+
+ r = loader->close(loader);
+
+ if (loader_flags & LOADER_DISALLOW_PUTS)
+ CKERR(r);
+ else
+ CKERR2(r, EAGAIN);
+
+ r = setrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ r = txn->abort(txn); CKERR(r);
+
+ for (int i = 0; i < ndb; i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-p") == 0) {
+ loader_flags |= LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z") == 0) {
+ loader_flags |= LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test(1);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-create-abort.cc b/storage/tokudb/PerconaFT/src/tests/loader-create-abort.cc
new file mode 100644
index 00000000..ca0d5019
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-create-abort.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Ensure that loader->abort free all of its resources. The test just creates a loader and then
+// aborts it.
+
+#include "test.h"
+#include <db.h>
+
+static int loader_flags = 0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static int put_multiple_generate(DB *UU(dest_db), DB *UU(src_db), DBT_ARRAY *UU(dest_keys), DBT_ARRAY *UU(dest_vals), const DBT *UU(src_key), const DBT *UU(src_val)) {
+ return ENOMEM;
+}
+
+static void loader_open_abort(void) {
+ int r;
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DB_LOADER *loader;
+ r = env->create_loader(env, txn, &loader, NULL, 0, NULL, NULL, NULL, loader_flags); CKERR(r);
+
+ r = loader->abort(loader); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: [-h] [-v] [-q] [-p]\n%s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-p") == 0) {
+ loader_flags |= LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-z") == 0) {
+ loader_flags |= LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ loader_open_abort();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-create-close.cc b/storage/tokudb/PerconaFT/src/tests/loader-create-close.cc
new file mode 100644
index 00000000..eb9b3f26
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-create-close.cc
@@ -0,0 +1,130 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Ensure that loader->abort free all of its resources. The test just creates a loader and then
+// aborts it.
+
+#include "test.h"
+#include <db.h>
+
+static int loader_flags = 0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void test_loader_create_close(int ndb) {
+ int r;
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ DB *dbs[ndb];
+ uint32_t db_flags[ndb];
+ uint32_t dbt_flags[ndb];
+ for (int i = 0; i < ndb; i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char name[32];
+ sprintf(name, "db%d", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DB_LOADER *loader;
+ r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); CKERR(r);
+
+ r = loader->close(loader); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ for (int i = 0; i < ndb; i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-p") == 0) {
+ loader_flags |= LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z") == 0) {
+ loader_flags |= LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ test_loader_create_close(0);
+ test_loader_create_close(1);
+ test_loader_create_close(2);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-create-commit-nproc-limit.cc b/storage/tokudb/PerconaFT/src/tests/loader-create-commit-nproc-limit.cc
new file mode 100644
index 00000000..e158a00d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-create-commit-nproc-limit.cc
@@ -0,0 +1,159 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test crashes if a failed loader creation causes the db to be corrupted by unlinking
+// the underlying fractal tree files. This unlinking occurs because the txn that logs the
+// load log entries is committed rather than aborted.
+
+#include "test.h"
+#include <db.h>
+#include <sys/resource.h>
+
+static int loader_flags = 0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void run_test(int ndb) {
+ int r;
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ DB *dbs[ndb];
+ uint32_t db_flags[ndb];
+ uint32_t dbt_flags[ndb];
+ for (int i = 0; i < ndb; i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char name[32];
+ sprintf(name, "db%d", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ struct rlimit current_nproc_limit;
+ r = getrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ struct rlimit new_nproc_limit = current_nproc_limit;
+ new_nproc_limit.rlim_cur = 0;
+ r = setrlimit(RLIMIT_NPROC, &new_nproc_limit);
+ assert(r == 0);
+
+ DB_LOADER *loader;
+ int loader_r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags);
+
+ r = setrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ if (loader_flags & LOADER_DISALLOW_PUTS) {
+ CKERR(loader_r);
+ loader_r = loader->close(loader);
+ CKERR(loader_r);
+ } else {
+ CKERR2(loader_r, EAGAIN);
+ }
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ for (int i = 0; i < ndb; i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ for (int i = 0; i < ndb; i++) {
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char name[32];
+ sprintf(name, "db%d", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, 0, 0666); CKERR(r);
+ }
+
+ for (int i = 0; i < ndb; i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-p") == 0) {
+ loader_flags |= LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z") == 0) {
+ loader_flags |= LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test(1);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-create-nproc-limit.cc b/storage/tokudb/PerconaFT/src/tests/loader-create-nproc-limit.cc
new file mode 100644
index 00000000..6d4b4d7b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-create-nproc-limit.cc
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Verify that env->create_loader works correctly (does not crash, does not leak memory, returns the right error code)
+// when the NPROC limit is exceeded.
+
+#include "test.h"
+#include <db.h>
+#include <sys/resource.h>
+
+static int loader_flags = 0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void run_test(int ndb) {
+ int r;
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+
+ DB *dbs[ndb];
+ uint32_t db_flags[ndb];
+ uint32_t dbt_flags[ndb];
+ for (int i = 0; i < ndb; i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ char name[32];
+ sprintf(name, "db%d", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ struct rlimit current_nproc_limit;
+ r = getrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ struct rlimit new_nproc_limit = current_nproc_limit;
+ new_nproc_limit.rlim_cur = 0;
+ r = setrlimit(RLIMIT_NPROC, &new_nproc_limit);
+ assert(r == 0);
+
+ DB_LOADER *loader;
+ int loader_r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags);
+
+ r = setrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ if (loader_flags & LOADER_DISALLOW_PUTS) {
+ CKERR(loader_r);
+ loader_r = loader->close(loader);
+ CKERR(loader_r);
+ } else {
+ CKERR2(loader_r, EAGAIN);
+ }
+
+ r = txn->abort(txn); CKERR(r);
+
+ for (int i = 0; i < ndb; i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-p") == 0) {
+ loader_flags |= LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z") == 0) {
+ loader_flags |= LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test(1);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-dup-test.cc b/storage/tokudb/PerconaFT/src/tests/loader-dup-test.cc
new file mode 100644
index 00000000..aaf77c50
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-dup-test.cc
@@ -0,0 +1,452 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {MAX_DBS=256};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int CHECK_RESULTS=0;
+int DISALLOW_PUTS=0;
+int COMPRESS=0;
+enum {MAGIC=311};
+
+bool dup_row_at_end = false; // false: duplicate at the beginning. true: duplicate at the end. The duplicated row is row 0.
+int dup_row_id = 0; // 0 means to use row 1 if inserting at the end, row NUM_ROWS if inserting at the beginning. Otherwise insert the row specified here.
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[MAX_DBS][32];
+int inv[MAX_DBS][32];
+
+
+// rotate right and left functions
+static inline unsigned int rotr32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+}
+static inline unsigned int rotl32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+}
+
+static void generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static unsigned int twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << a[db][i];
+ }
+ return b;
+}
+
+// permute bits of x based on inverse permute table bitmap
+static unsigned int inv_twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static unsigned int generate_val(int key, int i) {
+ return rotl32((key + MAGIC), i);
+}
+static unsigned int pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ }
+ else {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = twiddle32(*(unsigned int*)src_key->data, which);
+ *new_val = generate_val(*(unsigned int*)src_key->data, which);
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+
+static void check_results(DB **dbs)
+{
+ for(int j=0;j<NUM_DBS;j++){
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int r;
+ unsigned int pkey_for_db_key;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(int i=0;i<NUM_ROWS;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ k = *(unsigned int*)key.data;
+ pkey_for_db_key = (j == 0) ? k : inv_twiddle32(k, j);
+ v = *(unsigned int*)val.data;
+ // test that we have the expected keys and values
+ assert((unsigned int)pkey_for_db_key == (unsigned int)pkey_for_val(v, j));
+// printf(" DB[%d] key = %10u, val = %10u, pkey_for_db_key = %10u, pkey_for_val=%10d\n", j, v, k, pkey_for_db_key, pkey_for_val(v, j));
+ }
+ }
+ {printf("."); fflush(stdout);}
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ printf("\nCheck OK\n");
+}
+
+struct error_extra {
+ int bad_i;
+ int error_count;
+};
+
+static void error_callback (DB *db, int which_db, int err, DBT *key, DBT *val, void *extra) {
+ assert(db);
+ assert(extra);
+ assert(err==DB_KEYEXIST);
+ assert(which_db>=0);
+ assert(key->size==4);
+ assert(which_db==0);
+ struct error_extra *e =(struct error_extra *)extra;
+ assert(e->bad_i == *(int*)key->data);
+ (void)val;
+ assert(e->error_count==0);
+ e->error_count++;
+}
+
+static void test_loader(DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p option
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ struct error_extra error_extra = {.bad_i = 0, .error_count=0};
+ r = loader->set_error_callback(loader, error_callback, (void*)&error_extra);
+ CKERR(r);
+ r = loader->set_poll_function(loader, NULL, NULL);
+ CKERR(r);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ if (!dup_row_at_end) {
+ // put a duplicate row in.
+ int i = dup_row_id==0 ? NUM_ROWS : dup_row_id;
+ k = i;
+ v = generate_val(i, 0);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ if ( CHECK_RESULTS || verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ error_extra.bad_i = i;
+ }
+ for(int i=1;i<=NUM_ROWS;i++) {
+ k = i;
+ v = generate_val(i, 0);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ }
+ if ( CHECK_RESULTS || verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ }
+ if (dup_row_at_end) {
+ // put a duplicate row in.
+ int i = dup_row_id==0 ? 1 : dup_row_id;
+ k = i;
+ v = generate_val(i, 0);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ if ( CHECK_RESULTS || verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ error_extra.bad_i = i;
+ }
+
+ if( CHECK_RESULTS || verbose ) {printf("\n"); fflush(stdout);}
+
+ // close the loader
+ if (verbose) { printf("closing"); fflush(stdout); }
+ r = loader->close(loader);
+ if (verbose) { printf(" done\n"); }
+ if (NUM_ROWS > 0) {
+ assert(r==DB_KEYEXIST);
+ assert(error_extra.error_count==1);
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // verify the DBs
+ if ( CHECK_RESULTS ) {
+ check_results(dbs);
+ }
+}
+
+char *free_me = NULL;
+const char *env_dir = TOKU_TEST_FILENAME; // the default env_dir
+
+static void run_test(void)
+{
+ int r;
+ toku_os_recursive_delete(env_dir);
+ r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOG | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ if (verbose) printf("running test_loader()\n");
+ // -------------------------- //
+ test_loader(dbs);
+ // -------------------------- //
+ if (verbose) printf("done test_loader()\n");
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int num_rows_set = false;
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ if (num_rows_set)
+ run_test();
+ else {
+ int sizes[]={1,4000000,-1};
+ //Make PUT loader take about the same amount of time:
+ if (DISALLOW_PUTS) sizes[1] /= 25;
+ for (int i=0; sizes[i]>=0; i++) {
+ if (verbose) printf("Doing %d\n", sizes[i]);
+ NUM_ROWS = sizes[i];
+ run_test();
+ }
+ }
+ if (free_me) toku_free(free_me);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s -h -c -d %d -r %d\n", cmd, NUM_DBS, NUM_ROWS);
+ fprintf(stderr, " where -e <env> uses <env> to construct the directory (so that different tests can run concurrently)\n");
+ fprintf(stderr, " -s use size factor of 1 (makes internal loader buffers small so certain cases are easier to test)\n");
+ fprintf(stderr, " -E duplicate the first row at the end (not the beginning).\n");
+ fprintf(stderr, " -D <rid> use row id <rid> when duplicating. (Default is 1 if inserting at end, <numrows> if inserting at beginning\n");
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ num_rows_set = true;
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-s")==0) {
+ db_env_set_loader_size_factor(1);
+ } else if (strcmp(argv[0], "-E")==0) {
+ dup_row_at_end = true;
+ } else if (strcmp(argv[0], "-D")==0) {
+ argc--; argv++;
+ dup_row_id = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ assert(0<=dup_row_id && dup_row_id<=NUM_ROWS);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-no-puts.cc b/storage/tokudb/PerconaFT/src/tests/loader-no-puts.cc
new file mode 100644
index 00000000..2a29b150
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-no-puts.cc
@@ -0,0 +1,245 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+static const char *envdir = TOKU_TEST_FILENAME;
+
+DB_ENV *env;
+int DISALLOW_PUTS=0;
+int COMPRESS=0;
+enum {MAX_NAME=128};
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+ assert(which == 0);
+
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+static void test_loader(DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[NUM_DBS];
+ uint32_t dbt_flags[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p option
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, NULL, NULL);
+ CKERR(r);
+
+/* // using loader->put, put values into DB
+ DBT key, val;
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ }
+ }
+*/
+ // close the loader
+ r = loader->close(loader);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // verify the DBs
+/*
+ DBC *cursor;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ for(int j=0;j<NUM_DBS;j++) {
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, DB_NOTFOUND);
+ } else {
+ if (r!=0) { fprintf(stderr, "r==%d, failure\n", r); }
+ CKERR(r);
+ assert(*(int64_t*)key.data == kv_pairs[i].key);
+ assert(*(int64_t*)val.data == kv_pairs[i].val);
+ }
+ }
+ cursor->c_close(cursor);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+*/
+
+ printf("PASS\n");
+}
+
+static void run_test(void)
+{
+ int r;
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+// int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB *dbs[NUM_DBS];
+ int idx[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // -------------------------- //
+ test_loader(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ r = env->close(env, 0); CKERR(r);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-reference-test.cc b/storage/tokudb/PerconaFT/src/tests/loader-reference-test.cc
new file mode 100644
index 00000000..22be28ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-reference-test.cc
@@ -0,0 +1,254 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+static const char *envdir = TOKU_TEST_FILENAME;
+
+DB_ENV *env;
+int DISALLOW_PUTS=0;
+int COMPRESS=0;
+enum {MAX_NAME=128};
+enum {NUM_DBS=1};
+enum {NUM_KV_PAIRS=3};
+struct kv_pair {
+ int64_t key;
+ int64_t val;
+};
+struct kv_pair kv_pairs[NUM_KV_PAIRS] = {{1,4},
+ {2,5},
+ {3,6}};
+static uint32_t block_size = 0;
+
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+ assert(which == 0);
+
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+static void test_loader(DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[NUM_DBS];
+ uint32_t dbt_flags[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p or -c option
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, NULL, NULL);
+ CKERR(r);
+
+ uint64_t before_puts = toku_test_get_latest_lsn(env);
+ // using loader->put, put values into DB
+ DBT key, val;
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ dbt_init(&key, &kv_pairs[i].key, sizeof(kv_pairs[i].key));
+ dbt_init(&val, &kv_pairs[i].val, sizeof(kv_pairs[i].val));
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ }
+ }
+ uint64_t after_puts = toku_test_get_latest_lsn(env);
+ assert(before_puts == after_puts);
+
+ // close the loader
+ r = loader->close(loader);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // verify the DBs
+ DBC *cursor;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ for(int j=0;j<NUM_DBS;j++) {
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(int i=0;i<NUM_KV_PAIRS;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r!=0) { fprintf(stderr, "r==%d, failure\n", r); }
+ if (DISALLOW_PUTS) {
+ CKERR2(r, DB_NOTFOUND);
+ } else {
+ CKERR(r);
+ assert(*(int64_t*)key.data == kv_pairs[i].key);
+ assert(*(int64_t*)val.data == kv_pairs[i].val);
+ }
+ }
+ cursor->c_close(cursor);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ if ( verbose ) printf("PASS\n");
+}
+
+static void run_test(void)
+{
+ int r;
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char logdir[8 + strlen(envdir)];
+ snprintf(logdir, sizeof logdir, "%s/log", envdir);
+ r = toku_os_mkdir(logdir, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_dir(env, "log");
+ CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+// int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB *dbs[NUM_DBS];
+ int idx[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ if (block_size != 0) {
+ r = dbs[i]->set_pagesize(dbs[i], block_size); CKERR(r);
+ }
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // -------------------------- //
+ test_loader(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ r = env->close(env, 0); CKERR(r);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "--block_size") == 0) {
+ argc--; argv++;
+ block_size = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-stress-del.cc b/storage/tokudb/PerconaFT/src/tests/loader-stress-del.cc
new file mode 100644
index 00000000..aaf75d13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-stress-del.cc
@@ -0,0 +1,733 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Need to use malloc for the malloc instrumentation tests
+#ifndef TOKU_ALLOW_DEPRECATED
+#define TOKU_ALLOW_DEPRECATED
+#endif
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+#include <memory.h>
+#include <dlfcn.h>
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {MAX_DBS=1024};
+int NUM_DBS=1;
+int NUM_ROWS=1000000;
+int CHECK_RESULTS=1;
+int DISALLOW_PUTS=0;
+int COMPRESS=0;
+enum { old_default_cachesize=1024 }; // MB
+int CACHESIZE=old_default_cachesize;
+int ALLOW_DUPS=0;
+enum {MAGIC=311};
+char *datadir = NULL;
+bool check_est = true; // do check the estimates by default
+bool footprint_print = false; // print memory footprint info
+bool upgrade_test = false;
+
+// Code for showing memory footprint information.
+pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
+size_t hiwater;
+size_t water;
+size_t hiwater_start;
+static long long mcount = 0, fcount=0;
+
+
+static void my_free(void*p) {
+ if (p) {
+ water-=toku_malloc_usable_size(p);
+ }
+ free(p);
+}
+
+static void *my_malloc(size_t size) {
+ void *r = malloc(size);
+ if (r) {
+ water += toku_malloc_usable_size(r);
+ if (water>hiwater) hiwater=water;
+ }
+ return r;
+}
+
+static void *my_realloc(void *p, size_t size) {
+ size_t old_usable = p ? toku_malloc_usable_size(p) : 0;
+ void *r = realloc(p, size);
+ if (r) {
+ water -= old_usable;
+ water += toku_malloc_usable_size(r);
+ }
+ return r;
+}
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[MAX_DBS][32];
+int inv[MAX_DBS][32];
+
+
+static const char *loader_temp_prefix = "tokuld"; // #2536
+
+// return number of temp files
+static int
+count_temp(char * dirname) {
+ int n = 0;
+
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent=readdir(dir))) {
+ if ((ent->d_type==DT_REG || ent->d_type==DT_UNKNOWN) && strncmp(ent->d_name, loader_temp_prefix, 6)==0) {
+ n++;
+ if (verbose) {
+ printf("Temp files (%d)\n", n);
+ printf(" %s/%s\n", dirname, ent->d_name);
+ }
+ }
+ }
+ closedir(dir);
+ return n;
+}
+
+// rotate right and left functions
+static inline unsigned int rotr32(const unsigned int x, const unsigned int num) {
+ if (num == 0) {
+ return x;
+ } else {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+ }
+}
+static inline unsigned int rotl32(const unsigned int x, const unsigned int num) {
+ if (num == 0) {
+ return x;
+ } else {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+ }
+}
+
+static void generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static unsigned int twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << a[db][i];
+ }
+ return b;
+}
+
+// permute bits of x based on inverse permute table bitmap
+static unsigned int inv_twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static unsigned int generate_val(int key, int i) {
+ return rotl32((key + MAGIC), i);
+}
+static unsigned int pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ }
+ else {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = twiddle32(*(unsigned int*)src_key->data, which);
+ *new_val = generate_val(*(unsigned int*)src_key->data, which);
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+
+static int uint_cmp(const void *ap, const void *bp) {
+ unsigned int an = *(unsigned int *)ap;
+ unsigned int bn = *(unsigned int *)bp;
+ if (an < bn)
+ return -1;
+ if (an > bn)
+ return +1;
+ return 0;
+}
+
+static void check_results(DB **dbs) {
+ for(int j=0;j<NUM_DBS;j++) {
+ unsigned int prev_k = 0;
+
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+
+ int r;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+
+ // generate the expected keys
+ unsigned int *expected_key = (unsigned int *) toku_malloc(NUM_ROWS * sizeof (unsigned int));
+ for (int i = 0; i < NUM_ROWS; i++) {
+ expected_key[i] = j == 0 ? (unsigned int)(i+1) : twiddle32(i+1, j);
+ }
+ // sort the keys
+ qsort(expected_key, NUM_ROWS, sizeof (unsigned int), uint_cmp);
+
+ for (int i = 0; i < NUM_ROWS+1; i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, DB_NOTFOUND);
+ break;
+ }
+ if (r == DB_NOTFOUND) {
+ assert(i == NUM_ROWS); // check that there are exactly NUM_ROWS in the dictionary
+ break;
+ }
+ CKERR(r);
+
+ k = *(unsigned int*)key.data;
+
+ unsigned int pkey_for_db_key = (j == 0) ? k : inv_twiddle32(k, j);
+ v = *(unsigned int*)val.data;
+ // test that we have the expected keys and values
+ assert((unsigned int)pkey_for_db_key == (unsigned int)pkey_for_val(v, j));
+// printf(" DB[%d] key = %10u, val = %10u, pkey_for_db_key = %10u, pkey_for_val=%10d\n", j, v, k, pkey_for_db_key, pkey_for_val(v, j));
+
+ // check the expected keys
+ assert(k == expected_key[i]);
+
+ // check prev_key < key
+ if (i > 0)
+ assert(prev_k < k);
+
+ // update prev = current
+ prev_k = k;
+ }
+
+ toku_free(expected_key);
+
+ if ( verbose ) {printf("."); fflush(stdout);}
+ r = cursor->c_close(cursor);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ if ( verbose ) printf("\nCheck OK\n");
+}
+
+static void delete_all(DB **dbs) {
+ for(int j=0;j<NUM_DBS;j++) {
+
+ int r;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ // generate the expected keys
+ unsigned int *expected_key = (unsigned int *) toku_malloc(NUM_ROWS * sizeof (unsigned int));
+ for (int i = 0; i < NUM_ROWS; i++)
+ expected_key[i] = j == 0 ? (unsigned int)(i+1) : twiddle32(i+1, j);
+ // sort the keys
+ qsort(expected_key, NUM_ROWS, sizeof (unsigned int), uint_cmp);
+
+ // delete all of the keys
+ for (int i = 0; i < NUM_ROWS; i++) {
+ DBT key;
+ dbt_init(&key, &expected_key[i], sizeof expected_key[i]);
+ r = dbs[j]->del(dbs[j], txn, &key, DB_DELETE_ANY);
+ assert(r == 0);
+ }
+
+ // verify empty
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ toku_free(expected_key);
+
+ if ( verbose ) {printf("."); fflush(stdout);}
+ r = cursor->c_close(cursor);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ if ( verbose ) printf("\nCheck OK\n");
+}
+
+static void *expect_poll_void = &expect_poll_void;
+static uint64_t poll_count=0;
+static uint64_t bomb_after_poll_count=UINT64_MAX;
+
+static struct progress_info {
+ double time;
+ double progress;
+} *progress_infos=NULL;
+static int progress_infos_count=0;
+static int progress_infos_limit=0;
+
+// timing
+static bool did_start=false;
+static struct timeval start;
+
+static int poll_function (void *extra, float progress) {
+ if (verbose>=2) {
+ assert(did_start);
+ struct timeval now;
+ gettimeofday(&now, 0);
+ double elapsed = now.tv_sec - start.tv_sec + 1e-6*(now.tv_usec - start.tv_usec);
+ printf("Progress: %6.6fs %5.1f%%\n", elapsed, progress*100);
+ if (progress_infos_count>=progress_infos_limit) {
+ progress_infos_limit = 2*progress_infos_limit + 1;
+ XREALLOC_N(progress_infos_limit, progress_infos);
+ }
+ progress_infos[progress_infos_count++] = (struct progress_info){elapsed, progress};
+ }
+ assert(extra==expect_poll_void);
+ assert(0.0<=progress && progress<=1.0);
+ poll_count++; // Calls to poll_function() are protected by a lock, so we don't have to do this atomically.
+ if (poll_count>bomb_after_poll_count)
+ return TOKUDB_CANCELED;
+ else
+ return 0;
+}
+
+static struct timeval starttime;
+static double elapsed_time (void) {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return now.tv_sec - starttime.tv_sec + 1e-6*(now.tv_usec - starttime.tv_usec);
+}
+
+static void test_loader(DB **dbs)
+{
+ gettimeofday(&starttime, NULL);
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ uint32_t flags = DB_NOOVERWRITE;
+ if ( (DISALLOW_PUTS != 0) && (ALLOW_DUPS == 1) ) flags = 0;
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = flags;
+ dbt_flags[i] = 0;
+ }
+
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p option
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ hiwater_start = hiwater;
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld\n", __FILE__, __LINE__, hiwater, water);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld\n", __FILE__, __LINE__, hiwater, water);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, poll_function, expect_poll_void);
+ CKERR(r);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ for(int i=1;i<=NUM_ROWS;i++) {
+ k = i;
+ v = generate_val(i, 0);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ }
+ if ( verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ }
+ if ( verbose ) {printf("\n"); fflush(stdout);}
+
+ poll_count=0;
+
+ int n = count_temp(env->i->real_data_dir);
+ if (verbose) printf("Num temp files = %d\n", n);
+
+ did_start = true;
+ gettimeofday(&start, 0);
+
+ // close the loader
+ if ( verbose ) printf("%9.6fs closing\n", elapsed_time());
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld\n", __FILE__, __LINE__, hiwater, water);
+ r = loader->close(loader);
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld (extra hiwater=%ldM)\n", __FILE__, __LINE__, hiwater, water, (hiwater-hiwater_start)/(1024*1024));
+ if ( verbose ) printf("%9.6fs done\n", elapsed_time());
+ CKERR2s(r,0,TOKUDB_CANCELED);
+
+ if (r==0) {
+ if ( DISALLOW_PUTS == 0 ) {
+ if (poll_count == 0) printf("%s:%d\n", __FILE__, __LINE__);
+ assert(poll_count>0);
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // verify the DBs
+ if ( CHECK_RESULTS ) {
+ check_results(dbs);
+ delete_all(dbs);
+ }
+
+ } else {
+ r = txn->abort(txn);
+ CKERR(r);
+ }
+}
+
+static const char *envdir = TOKU_TEST_FILENAME;
+const char *tmp_subdir = "tmp.subdir";
+
+#define OLDDATADIR "../../../../tokudb.data/"
+const char *db_v4_dir = OLDDATADIR "env_preload.4.1.1.emptydictionaries.cleanshutdown";
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ const char * src_db_dir;
+
+ src_db_dir = db_v4_dir;
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, envdir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+}
+
+static void run_test(void)
+{
+ int r;
+
+ int cmdlen = strlen(envdir) + strlen(tmp_subdir) + 10;
+ char tmpdir[cmdlen];
+ r = snprintf(tmpdir, cmdlen, "%s/%s", envdir, tmp_subdir);
+ assert(r<cmdlen);
+
+ // first delete anything left from previous run of this test
+ {
+ int len = strlen(envdir) + 20;
+ char syscmd[len];
+ r = snprintf(syscmd, len, "rm -rf %s", envdir);
+ assert(r<len);
+ r = system(syscmd); CKERR(r);
+ }
+ if (upgrade_test) {
+ setup();
+ }
+ else {
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = toku_os_mkdir(tmpdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ }
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_tmp_dir(env, tmp_subdir); CKERR(r);
+
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ if ( verbose ) printf("CACHESIZE = %d MB\n", CACHESIZE);
+ r = env->set_cachesize(env, CACHESIZE / 1024, (CACHESIZE % 1024)*1024*1024, 1); CKERR(r);
+ if (datadir) {
+ r = env->set_data_dir(env, datadir); CKERR(r);
+ }
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 60); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ // -------------------------- //
+ test_loader(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+
+ run_test();
+
+ if (progress_infos) {
+ if (verbose>=2) {
+ double ratio=progress_infos[progress_infos_count-1].time/progress_infos[progress_infos_count-1].progress;
+ printf("Progress ratios:\n");
+ for (int i=0; i<progress_infos_count; i++) {
+ printf(" %5.3f\n", (progress_infos[i].time/progress_infos[i].progress)/ratio);
+ }
+ }
+ toku_free(progress_infos);
+ }
+ if (footprint_print) {
+ printf("%s:%d Hiwater=%ld water=%ld (extra hiwater=%ldM) mcount=%lld fcount=%lld\n", __FILE__, __LINE__, hiwater, water, (hiwater-hiwater_start)/(1024*1024), mcount, fcount);
+ typedef void (*malloc_stats_fun_t)(void);
+ malloc_stats_fun_t malloc_stats_f = (malloc_stats_fun_t) dlsym(RTLD_DEFAULT, "malloc_stats");
+ if (malloc_stats_f) {
+ malloc_stats_f();
+ }
+ }
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+
+ // Must look for "-f" right away before we malloc anything.
+ for (int i=1; i<argc; i++) {
+
+ if (strcmp(argv[i], "-f")) {
+ db_env_set_func_malloc(my_malloc);
+ db_env_set_func_realloc(my_realloc);
+ db_env_set_func_free(my_free);
+ }
+ }
+
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ CACHESIZE = (toku_os_get_phys_memory_size() / (1024*1024))/2; //MB
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -d <num_dbs> -r <num_rows> [ -b <num_calls> ] [-m <megabytes>] [-M]\n%s\n", cmd);
+ fprintf(stderr, " where -d <num_dbs> is the number of dictionaries to build (primary & secondary). (Default=%d)\n", NUM_DBS);
+ fprintf(stderr, " -b <num_calls> causes the poll function to return nonzero after <num_calls>\n");
+ fprintf(stderr, " -e <env> uses <env> to construct the directory (so that different tests can run concurrently)\n");
+ fprintf(stderr, " -m <m> use m MB of memory for the cachetable (default is %d MB)\n", CACHESIZE);
+ fprintf(stderr, " -M use %d MB of memory for the cachetable\n", old_default_cachesize);
+ fprintf(stderr, " -s use size factor of 1 and count temporary files\n");
+ fprintf(stderr, " -f print memory footprint information at various points in the load\n");
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-e")==0) {
+ argc--; argv++;
+ envdir = argv[0];
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-f")==0) {
+ footprint_print = true;
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-m")==0) {
+ argc--; argv++;
+ CACHESIZE = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-M")==0) {
+ CACHESIZE = old_default_cachesize;
+ } else if (strcmp(argv[0], "-y")==0) {
+ ALLOW_DUPS = 1;
+ } else if (strcmp(argv[0], "-s")==0) {
+ //printf("\nTesting loader with size_factor=1\n");
+ db_env_set_loader_size_factor(1);
+ } else if (strcmp(argv[0], "-b")==0) {
+ argc--; argv++;
+ char *end;
+ errno=0;
+ bomb_after_poll_count = strtoll(argv[0], &end, 10);
+ assert(errno==0);
+ assert(*end==0); // make sure we consumed the whole integer.
+ } else if (strcmp(argv[0], "--datadir") == 0 && argc > 1) {
+ argc--; argv++;
+ datadir = argv[0];
+ } else if (strcmp(argv[0], "--dont_check_est") == 0) {
+ check_est = false;
+ } else if (strcmp(argv[0], "-u")==0) {
+ upgrade_test = true;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-stress-test.cc b/storage/tokudb/PerconaFT/src/tests/loader-stress-test.cc
new file mode 100644
index 00000000..dfd7053d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-stress-test.cc
@@ -0,0 +1,697 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/**************
+ *
+ * NOTE: This test is used for upgrade testing as well as for exercising the loader.
+ * Changes should not be made gratuitously.
+ * The 4.2.0 version of this test was used to create many of the preloaded
+ * environments in the <svn-top>/tokudb/tokudb.data directory.
+ */
+
+
+// Need to use malloc for the malloc instrumentation tests
+#ifndef TOKU_ALLOW_DEPRECATED
+#define TOKU_ALLOW_DEPRECATED
+#endif
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+#include <memory.h>
+#include <dlfcn.h>
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {MAX_DBS=1024};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int VALSIZE=sizeof(unsigned int);
+int CHECK_RESULTS=0;
+int DISALLOW_PUTS=0;
+int COMPRESS=0;
+enum { old_default_cachesize=1024 }; // MB
+int CACHESIZE=old_default_cachesize;
+int ALLOW_DUPS=0;
+enum {MAGIC=311};
+char *datadir = NULL;
+bool check_est = true; // do check the estimates by default
+bool footprint_print = false; // print memory footprint info
+bool upgrade_test = false;
+
+// Code for showing memory footprint information.
+pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
+size_t hiwater;
+size_t water;
+size_t hiwater_start;
+static long long mcount = 0, fcount=0;
+
+static void my_free(void*p) {
+ if (p) {
+ water-=toku_malloc_usable_size(p);
+ }
+ free(p);
+}
+
+static void *my_malloc(size_t size) {
+ void *r = malloc(size);
+ if (r) {
+ water += toku_malloc_usable_size(r);
+ if (water>hiwater) hiwater=water;
+ }
+ return r;
+}
+
+static void *my_realloc(void *p, size_t size) {
+ size_t old_usable = p ? toku_malloc_usable_size(p) : 0;
+ void *r = realloc(p, size);
+ if (r) {
+ water -= old_usable;
+ water += toku_malloc_usable_size(r);
+ }
+ return r;
+}
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[MAX_DBS][32];
+int inv[MAX_DBS][32];
+
+static const char *loader_temp_prefix = "tokuld"; // #2536
+
+// return number of temp files
+static int
+count_temp(char * dirname) {
+ int n = 0;
+
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent=readdir(dir))) {
+ if ((ent->d_type==DT_REG || ent->d_type==DT_UNKNOWN) && strncmp(ent->d_name, loader_temp_prefix, 6)==0) {
+ n++;
+ if (verbose) {
+ printf("Temp files (%d)\n", n);
+ printf(" %s/%s\n", dirname, ent->d_name);
+ }
+ }
+ }
+ closedir(dir);
+ return n;
+}
+
+// rotate right and left functions
+static inline unsigned int rotr32(const unsigned int x, const unsigned int num) {
+ if (num == 0) {
+ return x;
+ } else {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+ }
+}
+static inline unsigned int rotl32(const unsigned int x, const unsigned int num) {
+ if (num == 0) {
+ return x;
+ } else {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+ }
+}
+
+static void generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static unsigned int twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << a[db][i];
+ }
+ return b;
+}
+
+// permute bits of x based on inverse permute table bitmap
+static unsigned int inv_twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static void generate_val(int key, int i, unsigned int*v) {
+ v[0] = rotl32((key + MAGIC), i);
+ for (unsigned w = 1; w < VALSIZE/sizeof(unsigned int); w++) {
+ v[w] = rotr32(v[w-1], 1);
+ }
+}
+
+static unsigned int pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *UU(src_val)) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ assert(src_db);
+ assert(dest_db != src_db);
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+ assert(which != 0);
+
+ {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < (unsigned)VALSIZE) {
+ dest_val->data = toku_xrealloc(dest_val->data, VALSIZE);
+ dest_val->ulen = VALSIZE;
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+
+ *new_key = twiddle32(*(unsigned int*)src_key->data, which);
+ generate_val(*(unsigned int*)src_key->data, which, (unsigned int*)dest_val->data);
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = VALSIZE;
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+static int uint_cmp(const void *ap, const void *bp) {
+ unsigned int an = *(unsigned int *)ap;
+ unsigned int bn = *(unsigned int *)bp;
+ if (an < bn)
+ return -1;
+ if (an > bn)
+ return +1;
+ return 0;
+}
+
+static void check_results(DB **dbs) {
+ // verify trees
+ for (int j = 0;j < NUM_DBS; j++) {
+ int r = dbs[j]->verify_with_progress(dbs[j], NULL, NULL, 0, 0);
+ assert(r == 0);
+ }
+
+ // verify rows
+ for (int j = 0;j < NUM_DBS; j++) {
+ unsigned int prev_k = 0;
+
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+
+ int r;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+
+ // generate the expected keys
+ unsigned int *expected_key = (unsigned int *) toku_malloc(NUM_ROWS * sizeof (unsigned int));
+ for (int i = 0; i < NUM_ROWS; i++)
+ expected_key[i] = j == 0 ? (unsigned int)(i+1) : twiddle32(i+1, j);
+ // sort the keys
+ qsort(expected_key, NUM_ROWS, sizeof (unsigned int), uint_cmp);
+
+ unsigned int valcheck[VALSIZE/sizeof(unsigned int)];
+ for (int i = 0; i < NUM_ROWS+1; i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, DB_NOTFOUND);
+ break;
+ }
+ if (r == DB_NOTFOUND) {
+ assert(i == NUM_ROWS); // check that there are exactly NUM_ROWS in the dictionary
+ break;
+ }
+ CKERR(r);
+
+ k = *(unsigned int*)key.data;
+
+ unsigned int pkey_for_db_key = (j == 0) ? k : inv_twiddle32(k, j);
+ v = *(unsigned int*)val.data;
+ // test that we have the expected keys and values
+ assert((unsigned int)pkey_for_db_key == (unsigned int)pkey_for_val(v, j));
+
+
+// printf(" DB[%d] key = %10u, val = %10u, pkey_for_db_key = %10u, pkey_for_val=%10d\n", j, v, k, pkey_for_db_key, pkey_for_val(v, j));
+
+ // check the expected keys
+ assert(k == expected_key[i]);
+ generate_val(pkey_for_db_key, j, &valcheck[0]);
+ assert(val.size == (unsigned)VALSIZE);
+ assert(memcmp(val.data, &valcheck[0], VALSIZE)==0);
+
+ // check prev_key < key
+ if (i > 0)
+ assert(prev_k < k);
+
+ // update prev = current
+ prev_k = k;
+ }
+
+ toku_free(expected_key);
+
+ if ( verbose ) {printf("."); fflush(stdout);}
+ r = cursor->c_close(cursor);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ if ( verbose ) printf("\nCheck OK\n");
+}
+
+static void *expect_poll_void = &expect_poll_void;
+static uint64_t poll_count=0;
+static uint64_t bomb_after_poll_count=UINT64_MAX;
+
+static struct progress_info {
+ double time;
+ double progress;
+} *progress_infos=NULL;
+static int progress_infos_count=0;
+static int progress_infos_limit=0;
+
+// timing
+static bool did_start=false;
+static struct timeval start;
+
+static int poll_function (void *extra, float progress) {
+ if (verbose>=2) {
+ assert(did_start);
+ struct timeval now;
+ gettimeofday(&now, 0);
+ double elapsed = now.tv_sec - start.tv_sec + 1e-6*(now.tv_usec - start.tv_usec);
+ printf("Progress: %6.6fs %5.1f%%\n", elapsed, progress*100);
+ if (progress_infos_count>=progress_infos_limit) {
+ progress_infos_limit = 2*progress_infos_limit + 1;
+ XREALLOC_N(progress_infos_limit, progress_infos);
+ }
+ progress_infos[progress_infos_count++] = (struct progress_info){elapsed, progress};
+ }
+ assert(extra==expect_poll_void);
+ assert(0.0<=progress && progress<=1.0);
+ poll_count++; // Calls to poll_function() are protected by a lock, so we don't have to do this atomically.
+ if (poll_count>bomb_after_poll_count)
+ return TOKUDB_CANCELED;
+ else
+ return 0;
+}
+
+static struct timeval starttime;
+static double elapsed_time (void) {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return now.tv_sec - starttime.tv_sec + 1e-6*(now.tv_usec - starttime.tv_usec);
+}
+
+static void test_loader(DB **dbs)
+{
+ gettimeofday(&starttime, NULL);
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ uint32_t flags = DB_NOOVERWRITE;
+ if ( (DISALLOW_PUTS) && (ALLOW_DUPS == 1) ) flags = 0;
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = flags;
+ dbt_flags[i] = 0;
+ }
+
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p option
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ hiwater_start = hiwater;
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld\n", __FILE__, __LINE__, hiwater, water);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld\n", __FILE__, __LINE__, hiwater, water);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, poll_function, expect_poll_void);
+ CKERR(r);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k;
+ unsigned int v[VALSIZE/sizeof(unsigned int)];
+ for(int i=1;i<=NUM_ROWS;i++) {
+ k = i;
+ generate_val(i, 0, &v[0]);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v[0], VALSIZE);
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ }
+ if ( verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ }
+ if ( verbose ) {printf("\n"); fflush(stdout);}
+
+ poll_count=0;
+
+ int n = count_temp(env->i->real_data_dir);
+ if (verbose) printf("Num temp files = %d\n", n);
+
+ did_start = true;
+ gettimeofday(&start, 0);
+
+ // close the loader
+ if ( verbose ) printf("%9.6fs closing\n", elapsed_time());
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld\n", __FILE__, __LINE__, hiwater, water);
+ r = loader->close(loader);
+ if (footprint_print) printf("%s:%d Hiwater=%ld water=%ld (extra hiwater=%ldM)\n", __FILE__, __LINE__, hiwater, water, (hiwater-hiwater_start)/(1024*1024));
+ if ( verbose ) printf("%9.6fs done\n", elapsed_time());
+ CKERR2s(r,0,TOKUDB_CANCELED);
+
+ if (r==0) {
+ if (!DISALLOW_PUTS) {
+ if (poll_count == 0) printf("%s:%d\n", __FILE__, __LINE__);
+ assert(poll_count>0);
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // verify the DBs
+ if ( CHECK_RESULTS ) {
+ check_results(dbs);
+ }
+
+ if ( check_est ) {
+ for (int i=0; i<NUM_DBS; i++) {
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ DB_BTREE_STAT64 stats;
+ r = dbs[i]->stat64(dbs[i], txn, &stats);
+ CKERR(r);
+ if (verbose)
+ printf("NUM_ROWS=%d n_keys=%" PRIu64 " n_data=%" PRIu64 " dsize=%" PRIu64 " fsize=%" PRIu64 "\n",
+ NUM_ROWS, stats.bt_nkeys, stats.bt_ndata, stats.bt_dsize, stats.bt_fsize);
+ if (DISALLOW_PUTS) {
+ assert(stats.bt_nkeys == 0); // Fix as part of #4129. Was ==
+ assert(stats.bt_ndata == 0);
+ assert(stats.bt_dsize == 0);
+ } else {
+ assert(stats.bt_nkeys <= (uint64_t)NUM_ROWS); // Fix as part of #4129. Was ==
+ assert(stats.bt_ndata <= (uint64_t)NUM_ROWS);
+ assert(stats.bt_dsize == ((uint64_t)NUM_ROWS) * (sizeof(unsigned int) + VALSIZE));
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ }
+ } else {
+ r = txn->abort(txn);
+ CKERR(r);
+ }
+}
+
+static const char *envdir = TOKU_TEST_FILENAME;
+const char *tmp_subdir = "tmp.subdir";
+
+static void run_test(void)
+{
+ int r;
+
+ if (upgrade_test) {
+ // cmake set up the environment
+ }
+ else {
+ toku_os_recursive_delete(envdir);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char tmpdir[TOKU_PATH_MAX+1];
+ toku_path_join(tmpdir, 2, envdir, tmp_subdir);
+ r = toku_os_mkdir(tmpdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ }
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_tmp_dir(env, tmp_subdir); CKERR(r);
+
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ if ( verbose ) printf("CACHESIZE = %d MB\n", CACHESIZE);
+ r = env->set_cachesize(env, CACHESIZE / 1024, (CACHESIZE % 1024)*1024*1024, 1); CKERR(r);
+ if (datadir) {
+ r = env->set_data_dir(env, datadir); CKERR(r);
+ }
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 60); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ // -------------------------- //
+ test_loader(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+
+ run_test();
+
+ if (progress_infos) {
+ if (verbose>=2) {
+ double ratio=progress_infos[progress_infos_count-1].time/progress_infos[progress_infos_count-1].progress;
+ printf("Progress ratios:\n");
+ for (int i=0; i<progress_infos_count; i++) {
+ printf(" %5.3f\n", (progress_infos[i].time/progress_infos[i].progress)/ratio);
+ }
+ }
+ toku_free(progress_infos);
+ }
+ if (footprint_print) {
+ printf("%s:%d Hiwater=%ld water=%ld (extra hiwater=%ldM) mcount=%lld fcount=%lld\n", __FILE__, __LINE__, hiwater, water, (hiwater-hiwater_start)/(1024*1024), mcount, fcount);
+ typedef void (*malloc_stats_fun_t)(void);
+ malloc_stats_fun_t malloc_stats_f = (malloc_stats_fun_t) dlsym(RTLD_DEFAULT, "malloc_stats");
+ if (malloc_stats_f) {
+ malloc_stats_f();
+ }
+ }
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+
+ // Must look for "-f" right away before we malloc anything.
+ for (int i=1; i<argc; i++) {
+
+ if (strcmp(argv[i], "-f")) {
+ db_env_set_func_malloc(my_malloc);
+ db_env_set_func_realloc(my_realloc);
+ db_env_set_func_free(my_free);
+ }
+ }
+
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ CACHESIZE = (toku_os_get_phys_memory_size() / (1024*1024))/2; //MB
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -d <num_dbs> -r <num_rows> [ -b <num_calls> ] [-m <megabytes>] [-M]\n%s\n", cmd);
+ fprintf(stderr, " where -d <num_dbs> is the number of dictionaries to build (primary & secondary). (Default=%d)\n", NUM_DBS);
+ fprintf(stderr, " -b <num_calls> causes the poll function to return nonzero after <num_calls>\n");
+ fprintf(stderr, " -m <m> use m MB of memory for the cachetable (default is %d MB)\n", CACHESIZE);
+ fprintf(stderr, " -M use %d MB of memory for the cachetable\n", old_default_cachesize);
+ fprintf(stderr, " -s use size factor of 1 and count temporary files\n");
+ fprintf(stderr, " -f print memory footprint information at various points in the load\n");
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-f")==0) {
+ footprint_print = true;
+ } else if (strcmp(argv[0], "--valsize")==0) {
+ argc--; argv++;
+ VALSIZE=atoi(argv[0]);
+ VALSIZE -= VALSIZE % sizeof(unsigned int);
+ if ( VALSIZE < (int)sizeof(unsigned int) ) {
+ fprintf(stderr, "--valsize must be multiple of %d\n", (int)sizeof(unsigned int));
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-m")==0) {
+ argc--; argv++;
+ CACHESIZE = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-M")==0) {
+ CACHESIZE = old_default_cachesize;
+ } else if (strcmp(argv[0], "-y")==0) {
+ ALLOW_DUPS = 1;
+ } else if (strcmp(argv[0], "-s")==0) {
+ //printf("\nTesting loader with size_factor=1\n");
+ db_env_set_loader_size_factor(1);
+ } else if (strcmp(argv[0], "-b")==0) {
+ argc--; argv++;
+ char *end;
+ errno=0;
+ bomb_after_poll_count = strtoll(argv[0], &end, 10);
+ assert(errno==0);
+ assert(*end==0); // make sure we consumed the whole integer.
+ } else if (strcmp(argv[0], "--datadir") == 0 && argc > 1) {
+ argc--; argv++;
+ datadir = argv[0];
+ } else if (strcmp(argv[0], "--dont_check_est") == 0) {
+ check_est = false;
+ } else if (strcmp(argv[0], "-u")==0) {
+ upgrade_test = true;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/loader-tpch-load.cc b/storage/tokudb/PerconaFT/src/tests/loader-tpch-load.cc
new file mode 100644
index 00000000..b2ecb253
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/loader-tpch-load.cc
@@ -0,0 +1,508 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {MAX_DBS=16};
+enum {MAX_ROW_LEN=1024};
+static int NUM_DBS=10;
+static int DISALLOW_PUTS=0;
+static int COMPRESS=0;
+static int USE_REGION=0;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static int generate_rows_for_region(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) __attribute__((unused));
+static int generate_rows_for_lineitem(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) __attribute__((unused));
+
+// linenumber,orderkey form a unique, primary key
+// key is a potentially duplicate secondary key
+struct tpch_key {
+ uint32_t linenumber;
+ uint32_t orderkey;
+ uint32_t key;
+};
+
+static __attribute__((__unused__)) int
+tpch_dbt_cmp (DB *db, const DBT *a, const DBT *b) {
+ assert(db && a && b);
+ assert(a->size == sizeof(struct tpch_key));
+ assert(b->size == sizeof(struct tpch_key));
+
+ unsigned int xl = (*((struct tpch_key *) a->data)).linenumber;
+ unsigned int xo = (*((struct tpch_key *) a->data)).orderkey;
+ unsigned int xk = (*((struct tpch_key *) a->data)).key;
+
+ unsigned int yl = (*((struct tpch_key *) b->data)).linenumber;
+ unsigned int yo = (*((struct tpch_key *) b->data)).orderkey;
+ unsigned int yk = (*((struct tpch_key *) b->data)).key;
+
+// printf("tpch_dbt_cmp xl:%d, yl:%d, xo:%d, yo:%d, xk:%d, yk:%d\n", xl, yl, xo, yo, xk, yk);
+
+ if (xk<yk) return -1;
+ if (xk>yk) return 1;
+
+ if (xl<yl) return -1;
+ if (xl>yl) return 1;
+
+ if (xo>yo) return -1;
+ if (xo<yo) return 1;
+ return 0;
+}
+
+
+static int lineno = 0;
+static char *tpch_read_row(FILE *fp, int *key, char *val)
+{
+ *key = lineno++;
+ return fgets(val, MAX_ROW_LEN , fp);
+}
+
+
+/*
+ * split '|' separated fields into fields array
+ */
+static void tpch_parse_row(char *row, char *fields[], int fields_N)
+{
+ int field = 0;
+ int i = 0;
+ int p = 0;
+ char c = row[p];
+
+ while(c != '\0')
+ {
+ if ( c == '|') {
+ fields[field][i] = '\0';
+ //printf("field : <%s>\n", fields[field]);
+ field++;
+ i = 0;
+ }
+ else
+ fields[field][i++] = c;
+ c = row[++p];
+ }
+ assert(field == fields_N);
+}
+
+/*
+ * region table
+ */
+
+static int generate_rows_for_region(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val)
+{
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ // not used
+ (void) src_db;
+ (void) src_key;
+ assert(*(uint32_t*)dest_db->app_private == 0);
+
+ // region fields
+ char regionkey[8];
+ char name[32];
+ char comment[160];
+ char row[8+32+160+8];
+ sprintf(row, "%s", (char*)src_val->data);
+
+ const uint32_t fields_N = 3;
+ char *fields[3] = {regionkey, name, comment};
+ tpch_parse_row(row, fields, fields_N);
+
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+
+ struct tpch_key *XMALLOC(key);
+ key->orderkey = atoi(regionkey);
+ key->linenumber = atoi(regionkey);
+ key->key = atoi(regionkey);
+
+ char *XMALLOC_N(sizeof(row), val);
+ sprintf(val, "%s|%s", name, comment);
+
+ dbt_init(dest_key, key, sizeof(struct tpch_key));
+ dest_key->flags = DB_DBT_REALLOC;
+
+ dbt_init(dest_val, val, strlen(val)+1);
+ dest_val->flags = DB_DBT_REALLOC;
+
+ return 0;
+}
+
+/*
+ * lineitem table
+ */
+
+
+static int generate_rows_for_lineitem(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val)
+{
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ // not used
+ (void) src_db;
+ (void) src_key;
+
+ // lineitem fields
+ char orderkey[16];
+ char partkey[16];
+ char suppkey[16];
+ char linenumber[8];
+ char quantity[8];
+ char extendedprice[16];
+ char discount[8];
+ char tax[8];
+ char returnflag[8];
+ char linestatus[8];
+ char shipdate[16];
+ char commitdate[16];
+ char receiptdate[16];
+ char shipinstruct[32];
+ char shipmode[16];
+ char comment[48];
+ char row[16+16+16+8+8+16+8+8+8+8+16+16+16+32+16+48 + 8];
+ sprintf(row, "%s", (char*)src_val->data);
+
+ const uint32_t fields_N = 16;
+ char *fields[16] = {orderkey,
+ partkey,
+ suppkey,
+ linenumber,
+ quantity,
+ extendedprice,
+ discount,
+ tax,
+ returnflag,
+ linestatus,
+ shipdate,
+ commitdate,
+ receiptdate,
+ shipinstruct,
+ shipmode,
+ comment};
+ tpch_parse_row(row, fields, fields_N);
+
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+
+ struct tpch_key *XMALLOC(key);
+ key->orderkey = atoi(linenumber);
+ key->linenumber = atoi(orderkey);
+
+ char *val;
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ val = toku_xstrdup(row);
+ }
+ else {
+ val = toku_xstrdup(orderkey);
+ }
+
+ switch(which) {
+ case 0:
+ key->key = atoi(linenumber);
+ break;
+ case 1:
+ // lineitem_fk1
+ key->key = atoi(orderkey);
+ break;
+ case 2:
+ // lineitem_fk2
+ key->key = atoi(suppkey);
+ break;
+ case 3:
+ // lineitem_fk3
+ key->key = atoi(partkey);// not really, ...
+ break;
+ case 4:
+ // lineitem_fk4
+ key->key = atoi(partkey);
+ break;
+ case 5:
+ // li_shp_dt_idx
+ key->key = atoi(linenumber) + atoi(suppkey); // not really ...
+ break;
+ case 6:
+ key->key = atoi(linenumber) +atoi(partkey); // not really ...
+ break;
+ case 7:
+ // li_rcpt_dt_idx
+ key->key = atoi(suppkey) + atoi(partkey); // not really ...
+ break;
+ default:
+ assert(0);
+ }
+
+ dbt_init(dest_key, key, sizeof(struct tpch_key));
+ dest_key->flags = DB_DBT_REALLOC;
+
+ dbt_init(dest_val, val, strlen(val)+1);
+ dest_val->flags = DB_DBT_REALLOC;
+
+ return 0;
+}
+
+
+static void *expect_poll_void = &expect_poll_void;
+static int poll_count=0;
+static int poll_function (void *extra, float progress) {
+ if (0) {
+ static int did_one=0;
+ static struct timeval start;
+ struct timeval now;
+ gettimeofday(&now, 0);
+ if (!did_one) {
+ start=now;
+ did_one=1;
+ }
+ printf("%6.6f %5.1f%%\n", now.tv_sec - start.tv_sec + 1e-6*(now.tv_usec - start.tv_usec), progress*100);
+ }
+ assert(extra==expect_poll_void);
+ assert(0.0<=progress && progress<=1.0);
+ poll_count++;
+ return 0;
+}
+
+static int test_loader(DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = DISALLOW_PUTS | COMPRESS; // set with -p option
+
+ FILE *fp;
+ // select which table to loader
+ if ( USE_REGION ) {
+ fp = fopen("./region.tbl", "r");
+ if (fp == NULL) {
+ fprintf(stderr, "%s:%d %s\n", __FUNCTION__, __LINE__, strerror(errno));
+ return 1;
+ }
+ assert(fp != NULL);
+ } else {
+ fp = fopen("./lineitem.tbl", "r");
+ if (fp == NULL) {
+ fprintf(stderr, "%s:%d %s\n", __FUNCTION__, __LINE__, strerror(errno));
+ return 1;
+ }
+ assert(fp != NULL);
+ }
+
+ // create and initialize loader
+
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, poll_function, expect_poll_void);
+ CKERR(r);
+
+ // using loader->put, put values into DB
+ printf("puts "); fflush(stdout);
+ DBT key, val;
+ int k;
+ char v[MAX_ROW_LEN];
+ char *c;
+ c = tpch_read_row(fp, &k, v);
+ int i = 1;
+ while ( c != NULL ) {
+ v[strlen(v)-1] = '\0'; // remove trailing \n
+ dbt_init(&key, &k, sizeof(int));
+ dbt_init(&val, v, strlen(v)+1);
+ r = loader->put(loader, &key, &val);
+ if (DISALLOW_PUTS) {
+ CKERR2(r, EINVAL);
+ } else {
+ CKERR(r);
+ }
+ if (verbose) { if((i++%10000) == 0){printf("."); fflush(stdout);} }
+ c = tpch_read_row(fp, &k, v);
+ }
+ if(verbose) {printf("\n"); fflush(stdout);}
+ fclose(fp);
+
+ poll_count=0;
+
+ // close the loader
+ printf("closing"); fflush(stdout);
+ r = loader->close(loader);
+ printf(" done\n");
+ CKERR(r);
+
+ if ( DISALLOW_PUTS == 0 ) assert(poll_count>0);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ return 0;
+}
+
+static int run_test(void)
+{
+ int r;
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->set_default_bt_compare(env, tpch_dbt_cmp); CKERR(r);
+ // select which TPC-H table to load
+ if ( USE_REGION ) {
+ r = env->set_generate_row_callback_for_put(env, generate_rows_for_region); CKERR(r);
+ NUM_DBS=1;
+ }
+ else {
+ r = env->set_generate_row_callback_for_put(env, generate_rows_for_lineitem); CKERR(r);
+ NUM_DBS=8;
+ }
+
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ // -------------------------- //
+ int testr = test_loader(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+ return testr;
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ int r = run_test();
+ return r;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -p -g\n%s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-p")==0) {
+ DISALLOW_PUTS = LOADER_DISALLOW_PUTS;
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-g")==0) {
+ USE_REGION = 1;
+ } else if (strcmp(argv[0], "-e") == 0) {
+ argc--; argv++;
+ if (argc > 0)
+ envdir = argv[0];
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/locktree_escalation_stalls.cc b/storage/tokudb/PerconaFT/src/tests/locktree_escalation_stalls.cc
new file mode 100644
index 00000000..e6c1b18b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/locktree_escalation_stalls.cc
@@ -0,0 +1,264 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test ensures that lock escalation occurs on the big transaction thread.
+// locktree_escalation_stalls --max_i 1000000000 --n_small 16 --verbose
+
+#include "test.h"
+#include <db.h>
+#include "toku_time.h"
+#include "toku_pthread.h"
+
+// from #include "threaded_stress_test_helpers.h"
+// For each line of engine status output, look for lines that contain substrings
+// that match any of the strings in the pattern string. The pattern string contains
+// 0 or more strings separated by the '|' character, kind of like a regex.
+static void print_matching_engine_status_rows(DB_ENV *env, const char *pattern) {
+ uint64_t num_rows;
+ env->get_engine_status_num_rows(env, &num_rows);
+ uint64_t buf_size = num_rows * 128;
+ const char *row;
+ char *row_r;
+
+ char *pattern_copy = toku_xstrdup(pattern);
+ int num_patterns = 1;
+ for (char *p = pattern_copy; *p != '\0'; p++) {
+ if (*p == '|') {
+ *p = '\0';
+ num_patterns++;
+ }
+ }
+
+ char *XMALLOC_N(buf_size, buf);
+ int r = env->get_engine_status_text(env, buf, buf_size);
+ invariant_zero(r);
+
+ for (row = strtok_r(buf, "\n", &row_r); row != nullptr; row = strtok_r(nullptr, "\n", &row_r)) {
+ const char *p = pattern_copy;
+ for (int i = 0; i < num_patterns; i++, p += strlen(p) + 1) {
+ if (strstr(row, p) != nullptr) {
+ fprintf(stderr, "%s\n", row);
+ }
+ }
+ }
+
+ toku_free(pattern_copy);
+ toku_free(buf);
+ fflush(stderr);
+}
+
+static volatile int killed = 0;
+
+// in a big transaction, insert a bunch of rows.
+static void big_test(DB_ENV *env, DB *db, uint64_t max_i) {
+ if (verbose)
+ fprintf(stderr, "%u %s\n", toku_os_gettid(), __FUNCTION__);
+ int r;
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ assert(r == 0);
+
+ for (uint64_t i = 0; !killed && i < max_i; i++) {
+ uint64_t k = htonl(i);
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &i, .size = sizeof i };
+ uint64_t t_start = toku_current_time_microsec();
+ r = db->put(db, txn, &key, &val, 0);
+ assert(r == 0);
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_delta = t_end - t_start;
+ if (t_delta >= 1000000) {
+ fprintf(stderr, "%u %s i=%" PRIu64 " %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, i, t_delta);
+ if (verbose)
+ print_matching_engine_status_rows(env, "locktree");
+ }
+
+ toku_pthread_yield();
+ }
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+}
+
+// insert a row in a single transaction.
+static void small_test(DB_ENV *env, DB *db, uint64_t max_i) {
+ if (verbose)
+ fprintf(stderr, "%u %s\n", toku_os_gettid(), __FUNCTION__);
+ int r;
+ uint64_t k = toku_os_gettid(); // get a unique number
+ for (uint64_t i = 0; !killed && i < max_i; i++) {
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ assert(r == 0);
+
+ DBT key = { .data = &k, .size = sizeof k };
+ DBT val = { .data = &i, .size = sizeof i };
+ uint64_t t_start = toku_current_time_microsec();
+ r = db->put(db, txn, &key, &val, 0);
+ assert(r == 0);
+ uint64_t t_end = toku_current_time_microsec();
+ uint64_t t_delta = t_end - t_start;
+ if (t_delta >= 1000000) {
+ fprintf(stderr, "%u %s %" PRIu64 "\n", toku_os_gettid(), __FUNCTION__, t_delta);
+ assert(0);
+ }
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ toku_pthread_yield();
+ }
+}
+
+struct test_args {
+ DB_ENV *env;
+ DB *db;
+ uint64_t max_i;
+ void (*test_f)(DB_ENV *env, DB *db, uint64_t max_i);
+};
+
+static void *test_f(void *args) {
+ struct test_args *test_args = (struct test_args *) args;
+ test_args->test_f(test_args->env, test_args->db, test_args->max_i);
+ return args;
+}
+
+static void run_test(uint64_t max_i, int n_small) {
+ int r;
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->set_cachesize(env, 8, 0, 1);
+ assert(r == 0);
+ r = env->set_lk_max_memory(env, 1000000000);
+ assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK+DB_INIT_MPOOL+DB_INIT_TXN+DB_INIT_LOG + DB_CREATE + DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB *big_db = NULL;
+ r = db_create(&big_db, env, 0);
+ assert(r == 0);
+
+ r = big_db->open(big_db, NULL, "big", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB *small_db = NULL;
+ r = db_create(&small_db, env, 0);
+ assert(r == 0);
+
+ r = small_db->open(small_db, NULL, "small", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ struct test_args big_test_args = {
+ env, big_db, max_i, big_test,
+ };
+ toku_pthread_t big_id;
+ r = toku_pthread_create(
+ toku_uninstrumented, &big_id, nullptr, test_f, &big_test_args);
+ assert(r == 0);
+
+ struct test_args small_test_args[n_small];
+ toku_pthread_t small_id[n_small];
+ for (int i = 0; i < n_small; i++) {
+ small_test_args[i] = {env, small_db, max_i, small_test};
+ r = toku_pthread_create(toku_uninstrumented,
+ &small_id[i],
+ nullptr,
+ test_f,
+ &small_test_args[i]);
+ assert(r == 0);
+ }
+
+ void *big_ret;
+ r = toku_pthread_join(big_id, &big_ret);
+ assert(r == 0);
+
+ killed = 1;
+
+ for (int i = 0; i < n_small; i++) {
+ void *small_ret;
+ r = toku_pthread_join(small_id[i], &small_ret);
+ assert(r == 0);
+ }
+
+ r = small_db->close(small_db, 0);
+ assert(r == 0);
+
+ r = big_db->close(big_db, 0);
+ assert(r == 0);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+}
+
+int test_main (int argc, char * const argv[]) {
+ int r;
+ uint64_t max_i = 10000;
+ int n_small = 1;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--max_i") == 0 && i+1 < argc) {
+ max_i = atoll(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "--n_small") == 0 && i+1 < argc) {
+ n_small = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ run_test(max_i, n_small);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/manyfiles.cc b/storage/tokudb/PerconaFT/src/tests/manyfiles.cc
new file mode 100644
index 00000000..c82bf4c3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/manyfiles.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* How fast can we do insertions when there are many files? */
+
+#include <db.h>
+#include <sys/stat.h>
+
+#define NFILES 1000
+#define NINSERTS_PER 1000
+
+static DB_ENV *env;
+static DB *dbs[NFILES];
+DB_TXN *txn;
+
+static void
+test_setup (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ multiply_locks_for_n_dbs(env, NFILES);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+
+ int i;
+
+ for (i=0; i<NFILES; i++) {
+ char fname[20];
+ snprintf(fname, sizeof(fname), "foo%d.db", i);
+ r=db_create(&dbs[i], env, 0); CKERR(r);
+ r = dbs[i]->set_pagesize(dbs[i], 4096);
+ r=dbs[i]->open(dbs[i], txn, fname, 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ }
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int i;
+ int r;
+ for (i=0; i<NFILES; i++) {
+ r= dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+ r= env->close(env, 0); CKERR(r);
+}
+
+static void
+doit (void) {
+ int j;
+ int r;
+ struct timeval startt, endt;
+ gettimeofday(&startt, 0);
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ for (j=0; j<NINSERTS_PER; j++) {
+ int i;
+ DBT key,data;
+ char str[10];
+ snprintf(str, sizeof(str), "%08d", j);
+ dbt_init(&key, str, 1+strlen(str));
+ dbt_init(&data, str, 1+strlen(str));
+ for (i=0; i<NFILES; i++) {
+ r = dbs[i]->put(dbs[i], txn, &key, &data, 0);
+ CKERR(r);
+ }
+ }
+ r=txn->commit(txn, 0); assert(r==0);
+ gettimeofday(&endt, 0);
+ long long ninserts = NINSERTS_PER * NFILES;
+ double diff = (endt.tv_sec - startt.tv_sec) + 1e-6*(endt.tv_usec-startt.tv_usec);
+ if (verbose) printf("%lld insertions in %9.6fs, %9.3f ins/s \n", ninserts, diff, ninserts/diff);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ test_setup();
+ doit();
+ test_shutdown();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/maxsize-for-loader.cc b/storage/tokudb/PerconaFT/src/tests/maxsize-for-loader.cc
new file mode 100644
index 00000000..78d91715
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/maxsize-for-loader.cc
@@ -0,0 +1,392 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "toku_random.h"
+
+bool fast = false;
+
+DB_ENV *env;
+enum {NUM_DBS=2};
+uint32_t USE_COMPRESS=0;
+
+bool do_check = false;
+uint32_t num_rows = 1;
+uint32_t which_db_to_fail = (uint32_t) -1;
+uint32_t which_row_to_fail = (uint32_t) -1;
+enum how_to_fail { FAIL_NONE, FAIL_KSIZE, FAIL_VSIZE } how_to_fail = FAIL_NONE;
+
+static struct random_data random_data[NUM_DBS];
+char random_buf[NUM_DBS][8];
+
+static int put_multiple_generate(DB *dest_db,
+ DB *src_db __attribute__((__unused__)),
+ DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals,
+ const DBT *src_key, const DBT *src_val __attribute__((__unused__))) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+ assert(src_key->size==4);
+ uint32_t rownum = *(uint32_t*)src_key->data;
+
+ uint32_t ksize, vsize;
+ const uint32_t kmax=32*1024, vmax=32*1024*1024;
+ if (which==which_db_to_fail && rownum==which_row_to_fail) {
+ switch (how_to_fail) {
+ case FAIL_NONE: ksize=kmax; vsize=vmax; goto gotsize;
+ case FAIL_KSIZE: ksize=kmax+1; vsize=vmax; goto gotsize;
+ case FAIL_VSIZE: ksize=kmax; vsize=vmax+1; goto gotsize;
+ }
+ assert(0);
+ gotsize:;
+ } else {
+ ksize=4; vsize=100;
+ }
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < ksize) {
+ dest_key->data = toku_xrealloc(dest_key->data, ksize);
+ dest_key->ulen = ksize;
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < vsize) {
+ dest_val->data = toku_xrealloc(dest_val->data, vsize);
+ dest_val->ulen = vsize;
+ }
+ assert(ksize>=sizeof(uint32_t));
+ for (uint32_t i=0; i<ksize; i++) ((char*)dest_key->data)[i] = myrandom_r(&random_data[which]);
+ for (uint32_t i=0; i<vsize; i++) ((char*)dest_val->data)[i] = myrandom_r(&random_data[which]);
+ *(uint32_t*)dest_key->data = rownum;
+ dest_key->size = ksize;
+ dest_val->size = vsize;
+
+ return 0;
+}
+
+struct error_extra {
+ int bad_i;
+ int error_count;
+};
+
+static void error_callback (DB *db __attribute__((__unused__)), int which_db, int err, DBT *key __attribute__((__unused__)), DBT *val __attribute__((__unused__)), void *extra) {
+ struct error_extra *e =(struct error_extra *)extra;
+ assert(which_db==(int)which_db_to_fail);
+ assert(err==EINVAL);
+ assert(e->error_count==0);
+ e->error_count++;
+}
+
+static void reset_random(void) {
+ int r;
+
+ for (int i = 0; i < NUM_DBS; i++) {
+ ZERO_STRUCT(random_data[i]);
+ ZERO_ARRAY(random_buf[i]);
+ r = myinitstate_r(i, random_buf[i], 8, &random_data[i]);
+ assert(r==0);
+ }
+}
+
+static void test_loader_maxsize(DB **dbs, DB **check_dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[NUM_DBS];
+ uint32_t dbt_flags[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = USE_COMPRESS; // set with -p option
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, nullptr, NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ assert(which_db_to_fail != 0);
+ CKERR(r);
+ struct error_extra error_extra = {.bad_i=0,.error_count=0};
+ r = loader->set_error_callback(loader, error_callback, (void*)&error_extra);
+ CKERR(r);
+ r = loader->set_poll_function(loader, NULL, NULL);
+ CKERR(r);
+
+ reset_random();
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ for(uint32_t i=0;i<num_rows;i++) {
+ k = i;
+ v = i;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ }
+
+ // close the loader
+ if (verbose) { printf("closing"); fflush(stdout); }
+ r = loader->close(loader);
+ if (verbose) { printf(" done\n"); }
+ switch(how_to_fail) {
+ case FAIL_NONE: assert(r==0); assert(error_extra.error_count==0); goto checked;
+ case FAIL_KSIZE: assert(r==EINVAL); assert(error_extra.error_count==1); goto checked;
+ case FAIL_VSIZE: assert(r==EINVAL); assert(error_extra.error_count==1); goto checked;
+ }
+ assert(0);
+ checked:
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ if (do_check && how_to_fail==FAIL_NONE) {
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ reset_random();
+ DBT keys[NUM_DBS];
+ DBT vals[NUM_DBS];
+ uint32_t flags[NUM_DBS];
+ for (int i = 0; i < NUM_DBS; i++) {
+ dbt_init_realloc(&keys[i]);
+ dbt_init_realloc(&vals[i]);
+ flags[i] = 0;
+ }
+
+ for(uint32_t i=0;i<num_rows;i++) {
+ k = i;
+ v = i;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = env_put_multiple_test_no_array(env, nullptr, txn, &key, &val, NUM_DBS, check_dbs, keys, vals, flags);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ for (int i = 0; i < NUM_DBS; i++) {
+ DBC *loader_cursor;
+ DBC *check_cursor;
+ r = dbs[i]->cursor(dbs[i], txn, &loader_cursor, 0);
+ CKERR(r);
+ r = dbs[i]->cursor(check_dbs[i], txn, &check_cursor, 0);
+ CKERR(r);
+ DBT loader_key;
+ DBT loader_val;
+ DBT check_key;
+ DBT check_val;
+ dbt_init_realloc(&loader_key);
+ dbt_init_realloc(&loader_val);
+ dbt_init_realloc(&check_key);
+ dbt_init_realloc(&check_val);
+ for (uint32_t x = 0; x <= num_rows; x++) {
+ int r_loader = loader_cursor->c_get(loader_cursor, &loader_key, &loader_val, DB_NEXT);
+ int r_check = check_cursor->c_get(check_cursor, &check_key, &check_val, DB_NEXT);
+ assert(r_loader == r_check);
+ if (x == num_rows) {
+ CKERR2(r_loader, DB_NOTFOUND);
+ CKERR2(r_check, DB_NOTFOUND);
+ } else {
+ CKERR(r_loader);
+ CKERR(r_check);
+ }
+ assert(loader_key.size == check_key.size);
+ assert(loader_val.size == check_val.size);
+ assert(memcmp(loader_key.data, check_key.data, loader_key.size) == 0);
+ assert(memcmp(loader_val.data, check_val.data, loader_val.size) == 0);
+ }
+ toku_free(loader_key.data);
+ toku_free(loader_val.data);
+ toku_free(check_key.data);
+ toku_free(check_val.data);
+ loader_cursor->c_close(loader_cursor);
+ check_cursor->c_close(check_cursor);
+ }
+
+ for (int i = 0; i < NUM_DBS; i++) {
+ toku_free(keys[i].data);
+ toku_free(vals[i].data);
+ dbt_init_realloc(&keys[i]);
+ dbt_init_realloc(&vals[i]);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+
+
+}
+
+char *free_me = NULL;
+const char *env_dir = TOKU_TEST_FILENAME; // the default env_dir
+
+static void create_and_open_dbs(DB **dbs, const char *suffix, int *idx) {
+ int r;
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ enum {MAX_NAME=128};
+ char name[MAX_NAME*2];
+
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x_%s", i, suffix);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+}
+
+static int
+uint_or_size_dbt_cmp (DB *db, const DBT *a, const DBT *b) {
+ assert(db && a && b);
+ if (a->size == sizeof(unsigned int) && b->size == sizeof(unsigned int)) {
+ return uint_dbt_cmp(db, a, b);
+ }
+ return a->size - b->size;
+}
+
+static void run_test(uint32_t nr, uint32_t wdb, uint32_t wrow, enum how_to_fail htf) {
+ num_rows = nr; which_db_to_fail = wdb; which_row_to_fail = wrow; how_to_fail = htf;
+
+ int r;
+ toku_os_recursive_delete(env_dir);
+ r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_or_size_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOG | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DB **XMALLOC_N(NUM_DBS, dbs);
+ DB **XMALLOC_N(NUM_DBS, check_dbs);
+ int idx[NUM_DBS];
+
+ create_and_open_dbs(dbs, "loader", &idx[0]);
+ if (do_check && how_to_fail==FAIL_NONE) {
+ create_and_open_dbs(check_dbs, "check", &idx[0]);
+ }
+
+ if (verbose) printf("running test_loader()\n");
+ // -------------------------- //
+ test_loader_maxsize(dbs, check_dbs);
+ // -------------------------- //
+ if (verbose) printf("done test_loader()\n");
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ if (do_check && how_to_fail==FAIL_NONE) {
+ check_dbs[i]->close(check_dbs[i], 0); CKERR(r);
+ check_dbs[i] = NULL;
+ }
+ }
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+ toku_free(check_dbs);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int num_rows_set = false;
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+
+ run_test(1, (uint32_t) -1, (uint32_t) -1, FAIL_NONE);
+ run_test(1, 1, 0, FAIL_NONE);
+ run_test(1, 1, 0, FAIL_KSIZE);
+ run_test(1, 1, 0, FAIL_VSIZE);
+ if (!fast) {
+ run_test(1000000, 1, 500000, FAIL_KSIZE);
+ run_test(1000000, 1, 500000, FAIL_VSIZE);
+ }
+ toku_free(free_me);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: %s [-h] [-v] [-q] [-p] [-f]\n", cmd);
+ fprintf(stderr, " where -e <env> uses <env> to construct the directory (so that different tests can run concurrently)\n");
+ fprintf(stderr, " -h help\n");
+ fprintf(stderr, " -v verbose\n");
+ fprintf(stderr, " -q quiet\n");
+ fprintf(stderr, " -z compress intermediates\n");
+ fprintf(stderr, " -c compare with regular dbs\n");
+ fprintf(stderr, " -f fast (suitable for vgrind)\n");
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-c")==0) {
+ do_check = true;
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-z")==0) {
+ USE_COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ } else if (strcmp(argv[0], "-f")==0) {
+ fast = true;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/medium-nested-commit-commit.cc b/storage/tokudb/PerconaFT/src/tests/medium-nested-commit-commit.cc
new file mode 100644
index 00000000..6adfdc93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/medium-nested-commit-commit.cc
@@ -0,0 +1,152 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test to see if a medium-size nested transaction (the nested pieces are not too big, but the whole thing is so big that it's rollbacks spill into a file)
+ * can commit properly.
+ * Four Tests:
+ * big child aborts, parent aborts
+ * big child aborts, parent commits
+ * big child commits, parent aborts
+ * big child commits, parent commits (This test)
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *xchild, *xparent;
+
+static void insert (int i) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ int r = db->put(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void lookup (int i, int expect, int expectj) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", expectj);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+const int N = 50000;
+const int DIV = 10;
+
+static void
+test_commit_commit (void) {
+ int i, j, k, r;
+ r=env->txn_begin(env, 0, &xparent, 0); CKERR(r);
+ k=0;
+ for (j=0; j<DIV; j++) {
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N/DIV; i++) {
+ insert(k);
+ k++;
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ }
+ k=0;
+ for (j=0; j<DIV; j++) {
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ for (i=0; i<N/DIV; i++) {
+ lookup(k, 0, k);
+ k++;
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ }
+ r=xparent->commit(xparent, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &xchild, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ lookup(i, 0, i);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+}
+
+static void
+test_setup (void) {
+ DB_TXN *txn;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void
+test_shutdown (void) {
+ int r;
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ test_setup();
+ test_commit_commit();
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/multiprocess.cc b/storage/tokudb/PerconaFT/src/tests/multiprocess.cc
new file mode 100644
index 00000000..3c8cd60d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/multiprocess.cc
@@ -0,0 +1,234 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+#include "test.h"
+
+static void
+test_env (const char *envdir0, const char *envdir1, int expect_open_return) {
+ int r;
+ toku_os_recursive_delete(envdir0);
+ r = toku_os_mkdir(envdir0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ if (strcmp(envdir0, envdir1) != 0) {
+ toku_os_recursive_delete(envdir1);
+ r = toku_os_mkdir(envdir1, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ }
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_redzone(env, 0);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_RECOVER;
+ r = env->open(env, envdir0, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env2;
+ r = db_env_create(&env2, 0);
+ CKERR(r);
+ r = env2->set_redzone(env2, 0);
+ CKERR(r);
+ r = env2->open(env2, envdir1, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR2(r, expect_open_return);
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ if (expect_open_return != 0) {
+ r = env2->open(env2, envdir1, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ }
+
+ r = env2->close(env2, 0);
+ CKERR(r);
+}
+
+static void
+test_datadir (const char *envdir0, const char *datadir0, const char *envdir1, const char *datadir1, int expect_open_return) {
+ char s[256];
+
+ int r;
+ sprintf(s, "rm -rf %s", envdir0);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(envdir0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ sprintf(s, "rm -rf %s", datadir0);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(datadir0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ sprintf(s, "rm -rf %s", envdir1);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(envdir1, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ sprintf(s, "rm -rf %s", datadir1);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(datadir1, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_redzone(env, 0);
+ CKERR(r);
+ r = env->set_data_dir(env, datadir0);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_RECOVER;
+ r = env->open(env, envdir0, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env2;
+ r = db_env_create(&env2, 0);
+ CKERR(r);
+ r = env2->set_redzone(env2, 0);
+ CKERR(r);
+ r = env2->set_data_dir(env2, datadir1);
+ CKERR(r);
+ r = env2->open(env2, envdir1, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR2(r, expect_open_return);
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ if (expect_open_return != 0) {
+ r = env2->open(env2, envdir1, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ }
+
+ r = env2->close(env2, 0);
+ CKERR(r);
+}
+static void
+test_logdir (const char *envdir0, const char *datadir0, const char *envdir1, const char *datadir1, int expect_open_return) {
+ char s[256];
+
+ int r;
+ sprintf(s, "rm -rf %s", envdir0);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(envdir0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ sprintf(s, "rm -rf %s", datadir0);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(datadir0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ sprintf(s, "rm -rf %s", envdir1);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(envdir1, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ sprintf(s, "rm -rf %s", datadir1);
+ r = system(s);
+ CKERR(r);
+ r = toku_os_mkdir(datadir1, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_redzone(env, 0);
+ CKERR(r);
+ r = env->set_lg_dir(env, datadir0);
+ CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_RECOVER;
+ r = env->open(env, envdir0, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env2;
+ r = db_env_create(&env2, 0);
+ CKERR(r);
+ r = env2->set_redzone(env2, 0);
+ CKERR(r);
+ r = env2->set_lg_dir(env2, datadir1);
+ CKERR(r);
+ r = env2->open(env2, envdir1, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR2(r, expect_open_return);
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ if (expect_open_return != 0) {
+ r = env2->open(env2, envdir1, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ }
+
+ r = env2->close(env2, 0);
+ CKERR(r);
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU|S_IRWXG|S_IRWXO);
+ assert_zero(r);
+
+ char env0[TOKU_PATH_MAX+1];
+ char env1[TOKU_PATH_MAX+1];
+ toku_path_join(env0, 2, TOKU_TEST_FILENAME, "e0");
+ toku_path_join(env1, 2, TOKU_TEST_FILENAME, "e1");
+ test_env(env0, env1, 0);
+ test_env(env0, env0, EWOULDBLOCK);
+ char wd[TOKU_PATH_MAX+1];
+ char *cwd = getcwd(wd, sizeof wd);
+ assert(cwd != nullptr);
+ char data0[TOKU_PATH_MAX+1];
+ toku_path_join(data0, 3, cwd, TOKU_TEST_FILENAME, "d0");
+ char data1[TOKU_PATH_MAX+1];
+ toku_path_join(data1, 3, cwd, TOKU_TEST_FILENAME, "d1");
+ test_datadir(env0, data0, env1, data1, 0);
+ test_datadir(env0, data0, env1, data0, EWOULDBLOCK);
+ test_logdir(env0, data0, env1, data1, 0);
+ test_logdir(env0, data0, env1, data0, EWOULDBLOCK);
+
+ toku_os_recursive_delete(env0);
+ toku_os_recursive_delete(env1);
+ toku_os_recursive_delete(data0);
+ toku_os_recursive_delete(data1);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/mvcc-create-table.cc b/storage/tokudb/PerconaFT/src/tests/mvcc-create-table.cc
new file mode 100644
index 00000000..5f8becb2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/mvcc-create-table.cc
@@ -0,0 +1,88 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+
+ DB_TXN* txna = NULL;
+ DB_TXN* txnb = NULL;
+ DB_TXN* txnc = NULL;
+ DBC* c;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ DBT key,val;
+ r = db->put(db, txna, dbt_init(&key, "a", 2), dbt_init(&val, "a", 2), 0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txnb, DB_TXN_SNAPSHOT); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnc, DB_READ_COMMITTED); CKERR(r);
+ r = db->cursor(db, txna, &c, 0); CKERR(r);
+ r = c->c_close(c); CKERR(r);
+ c = NULL;
+ r = txna->commit(txna, 0); CKERR(r);
+
+ r = db->cursor(db, txnb, &c, 0); assert(r == TOKUDB_MVCC_DICTIONARY_TOO_NEW);
+ r = db->cursor(db, txnc, &c, 0); assert(r == TOKUDB_MVCC_DICTIONARY_TOO_NEW);
+
+
+ r = txnb->commit(txnb, 0); CKERR(r);
+ r = txnc->commit(txnc, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/mvcc-many-committed.cc b/storage/tokudb/PerconaFT/src/tests/mvcc-many-committed.cc
new file mode 100644
index 00000000..3a89cb39
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/mvcc-many-committed.cc
@@ -0,0 +1,138 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ uint32_t i = 0;
+ uint32_t num_read_txns = 1000;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+
+ DB_TXN* create_txn;
+ DB_TXN* read_txns[num_read_txns];
+ DB_TXN* read_uncommitted_txn;
+ memset(read_txns, 0, sizeof(read_txns));
+
+ r = env->txn_begin(env, NULL, &create_txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, create_txn, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = create_txn->commit(create_txn, 0); CKERR(r);
+
+ DBT key,val;
+
+ for (i = 0; i < num_read_txns; i++) {
+ DB_TXN* put_txn = NULL;
+ uint32_t data = i;
+ r = env->txn_begin(env, NULL, &put_txn, DB_TXN_SNAPSHOT);
+ CKERR(r);
+ r = db->put(
+ db,
+ put_txn,
+ dbt_init(&key, "a", 2),
+ dbt_init(&val, &data, 4),
+ 0
+ );
+ CKERR(r);
+ r = put_txn->commit(put_txn, 0);
+ CKERR(r);
+ //this should read the above put
+ r = env->txn_begin(env, NULL, &read_txns[i], DB_TXN_SNAPSHOT);
+ CKERR(r);
+
+ }
+
+ for (i = 0; i < num_read_txns; i++) {
+ DBT curr_key, curr_val;
+ memset(&curr_key, 0, sizeof(curr_key));
+ memset(&curr_val, 0, sizeof(curr_val));
+ DBC* snapshot_cursor = NULL;
+ r = db->cursor(db, read_txns[i], &snapshot_cursor, 0); CKERR(r);
+ r = snapshot_cursor->c_get(snapshot_cursor, &curr_key, &curr_val, DB_NEXT); CKERR(r);
+ assert(((char *)(curr_key.data))[0] == 'a');
+ assert((*(uint32_t *)(curr_val.data)) == i);
+ assert(curr_key.size == 2);
+ assert(curr_val.size == 4);
+ snapshot_cursor->c_close(snapshot_cursor);
+ }
+ {
+ DBT curr_key, curr_val;
+ memset(&curr_key, 0, sizeof(curr_key));
+ memset(&curr_val, 0, sizeof(curr_val));
+ r = env->txn_begin(env, NULL, &read_uncommitted_txn, DB_READ_UNCOMMITTED);
+ CKERR(r);
+ DBC* read_uncommitted_cursor = NULL;
+ r = db->cursor(db, read_uncommitted_txn, &read_uncommitted_cursor, 0); CKERR(r);
+ r = read_uncommitted_cursor->c_get(
+ read_uncommitted_cursor,
+ &curr_key,
+ &curr_val,
+ DB_NEXT
+ );
+ CKERR(r);
+ assert(((char *)(curr_key.data))[0] == 'a');
+ assert((*(uint32_t *)(curr_val.data)) == (num_read_txns - 1));
+ assert(curr_key.size == 2);
+ assert(curr_val.size == 4);
+ read_uncommitted_cursor->c_close(read_uncommitted_cursor);
+ }
+ for (i = 0; i < num_read_txns; i++) {
+ r = read_txns[i]->commit(read_txns[i], 0);
+ CKERR(r);
+ }
+ r = read_uncommitted_txn->commit(read_uncommitted_txn, 0);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/mvcc-read-committed.cc b/storage/tokudb/PerconaFT/src/tests/mvcc-read-committed.cc
new file mode 100644
index 00000000..eb25bda9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/mvcc-read-committed.cc
@@ -0,0 +1,96 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+
+ DB_TXN* txna = NULL;
+ DB_TXN* txnb = NULL;
+ DB_TXN* txnc = NULL;
+ DB_TXN* txnb_child = NULL;
+ DB_TXN* txnc_child = NULL;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = txna->commit(txna, 0); CKERR(r);
+
+
+
+ r = env->txn_begin(env, NULL, &txnb, DB_TXN_SNAPSHOT); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnc, DB_READ_COMMITTED); CKERR(r);
+
+ DBT key,val;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+ r = db->put(db, txna, dbt_init(&key, "a", 2), dbt_init(&val, "a", 2), 0); CKERR(r);
+ r = txna->commit(txna, 0); CKERR(r);
+
+ // do a simple test to show that DB_TXN_SNAPSHOT and DB_READ_COMMITTED
+ // work differently
+ r = env->txn_begin(env, txnb, &txnb_child, DB_TXN_SNAPSHOT); CKERR(r);
+ r = env->txn_begin(env, txnc, &txnc_child, DB_READ_COMMITTED); CKERR(r);
+ r = db->get(db, txnb_child, &key, &val, 0);
+ CKERR2(r, DB_NOTFOUND);
+ r = db->get(db, txnc_child, &key, &val, 0);
+ CKERR(r);
+
+ r = txnb_child->commit(txnb_child, 0); CKERR(r);
+ r = txnc_child->commit(txnc_child, 0); CKERR(r);
+ r = txnb->commit(txnb, 0); CKERR(r);
+ r = txnc->commit(txnc, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/openlimit17-locktree.cc b/storage/tokudb/PerconaFT/src/tests/openlimit17-locktree.cc
new file mode 100644
index 00000000..0c5056e3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/openlimit17-locktree.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/resource.h>
+
+// create 200 databases and close them. set the open file limit to 100 and try to open all of them.
+// eventually, the locktree can not clone fractal tree, and the db open fails.
+
+int test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ int r;
+
+ const int N = 200;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB **dbs = new DB *[N];
+ for (int i = 0; i < N; i++) {
+ dbs[i] = NULL;
+ }
+ for (int i = 0; i < N; i++) {
+ r = db_create(&dbs[i], env, 0);
+ assert(r == 0);
+
+ char dbname[32]; sprintf(dbname, "%d.test", i);
+ r = dbs[i]->open(dbs[i], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ }
+
+ for (int i = 0; i < N; i++) {
+ if (dbs[i]) {
+ r = dbs[i]->close(dbs[i], 0);
+ assert(r == 0);
+ }
+ }
+
+ struct rlimit nofile_limit = { N/2, N/2 };
+ r = setrlimit(RLIMIT_NOFILE, &nofile_limit);
+ // assert(r == 0); // valgrind does not like this
+ if (r != 0) {
+ printf("warning: set nofile limit to %d failed %d %s\n", N, errno, strerror(errno));
+ }
+
+ for (int i = 0; i < N; i++) {
+ dbs[i] = NULL;
+ }
+ bool emfile_happened = false; // should happen since there are less than N unused file descriptors
+ for (int i = 0; i < N; i++) {
+ r = db_create(&dbs[i], env, 0);
+ assert(r == 0);
+
+ char dbname[32]; sprintf(dbname, "%d.test", i);
+ r = dbs[i]->open(dbs[i], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r == EMFILE) {
+ emfile_happened = true;
+ break;
+ }
+ }
+ assert(emfile_happened);
+ for (int i = 0; i < N; i++) {
+ if (dbs[i]) {
+ r = dbs[i]->close(dbs[i], 0);
+ assert(r == 0);
+ }
+ }
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ delete [] dbs;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/openlimit17-metafiles.cc b/storage/tokudb/PerconaFT/src/tests/openlimit17-metafiles.cc
new file mode 100644
index 00000000..2fea1509
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/openlimit17-metafiles.cc
@@ -0,0 +1,105 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <fcntl.h>
+#include <sys/resource.h>
+
+// try to open the environment with a small number of unused file descriptors
+
+int test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ struct rlimit nofile_limit;
+ r = getrlimit(RLIMIT_NOFILE, &nofile_limit);
+ assert(r == 0);
+ const int N = 100;
+ nofile_limit.rlim_cur = N;
+ r = setrlimit(RLIMIT_NOFILE, &nofile_limit);
+ assert(r == 0);
+
+ // compute the number of unused file descriptors
+ int fds[N];
+ for (int i = 0; i < N; i++) {
+ fds[i] = -1;
+ }
+ int unused = 0;
+ for (int i = 0; i < N; i++, unused++) {
+ fds[i] = open("/dev/null", O_RDONLY);
+ if (fds[i] == -1)
+ break;
+ }
+ for (int i = 0; i < N; i++) {
+ if (fds[i] != -1) {
+ close(fds[i]);
+ }
+ }
+
+ // try to open the environment with a constrained number of unused file descriptors. the env open should return an error rather than crash.
+ for (int n = N - unused; n < N; n++) {
+ nofile_limit.rlim_cur = n;
+ r = setrlimit(RLIMIT_NOFILE, &nofile_limit);
+ assert(r == 0);
+
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r == 0) {
+ r = env->close(env, 0);
+ assert(r == 0);
+ break;
+ }
+ assert(r == EMFILE);
+ r = env->close(env, 0);
+ assert(r == 0);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/openlimit17.cc b/storage/tokudb/PerconaFT/src/tests/openlimit17.cc
new file mode 100644
index 00000000..a3655127
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/openlimit17.cc
@@ -0,0 +1,100 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/resource.h>
+
+// try to open N databases when N > open file limit. should fail gracefully.
+
+int test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ int r;
+
+ const int N = 200;
+
+ struct rlimit nofile_limit = { N, N };
+ r = setrlimit(RLIMIT_NOFILE, &nofile_limit);
+ // assert(r == 0); // valgrind does not like this
+ if (r != 0) {
+ printf("warning: set nofile limit to %d failed %d %s\n", N, errno, strerror(errno));
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB **dbs = new DB *[N];
+ for (int i = 0; i < N; i++) {
+ dbs[i] = NULL;
+ }
+ bool emfile_happened = false; // should happen since there are less than N unused file descriptors
+ for (int i = 0; i < N; i++) {
+ r = db_create(&dbs[i], env, 0);
+ assert(r == 0);
+
+ char dbname[32]; sprintf(dbname, "%d.test", i);
+ r = dbs[i]->open(dbs[i], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r == EMFILE) {
+ emfile_happened = true;
+ break;
+ }
+ assert(r == 0);
+ }
+
+ assert(emfile_happened);
+
+ for (int i = 0; i < N; i++) {
+ if (dbs[i]) {
+ r = dbs[i]->close(dbs[i], 0);
+ assert(r == 0);
+ }
+ }
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ delete [] dbs;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_checkpoint_var.cc b/storage/tokudb/PerconaFT/src/tests/perf_checkpoint_var.cc
new file mode 100644
index 00000000..c7683f7e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_checkpoint_var.cc
@@ -0,0 +1,142 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+static int checkpoint_var(DB_TXN *txn, ARG arg, void* operation_extra, void *stats_extra) {
+ int db_index = random() % arg->cli->num_DBs;
+ int r = 0;
+ int val_size = *(int *)operation_extra;
+ DB* db = arg->dbp[db_index];
+ char data[val_size];
+ memset(data, 0, sizeof(data));
+ int i;
+ for (i = 0; i < 10; i++) {
+ // do point queries
+ ptquery_and_maybe_check_op(db, txn, arg, false);
+ }
+ increment_counter(stats_extra, PTQUERIES, i);
+ for (i = 0; i < 20; i++) {
+ // do a random insertion
+ int rand_key = random() % arg->cli->num_elements;
+ DBT key, val;
+ r = db->put(
+ db,
+ txn,
+ dbt_init(&key, &rand_key, sizeof(rand_key)),
+ dbt_init(&val, data, sizeof(data)),
+ 0);
+ if (r != 0) {
+ goto cleanup;
+ }
+ }
+cleanup:
+ increment_counter(stats_extra, PUTS, i);
+ return r;
+}
+
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - some threads constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - some threads doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ int val_size = cli_args->val_size;
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ for (int i = 0; i < num_threads; i++) {
+ myargs[i].operation = checkpoint_var;
+ myargs[i].operation_extra = &val_size;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ args.env_args.checkpointing_period = 30;
+ args.num_DBs = 4;
+ args.num_ptquery_threads = 4;
+ args.crash_on_operation_failure = false;
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_child_txn.cc b/storage/tokudb/PerconaFT/src/tests/perf_child_txn.cc
new file mode 100644
index 00000000..5d4c2ccb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_child_txn.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the performace of creating and destroying child
+// transactions. Child transactions should have less work associated with them. They
+// are not added to the live root list and they should not be creating their own snapshots.
+// Nevertheless, benchmarks like tpcc and sysbench create many child transactions
+// for each root transaction, and do little work per child transaction
+
+static int create_child_txn(DB_TXN* txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ DB_TXN* child_txn = NULL;
+ DB_ENV* env = arg->env;
+ int r = env->txn_begin(env, txn, &child_txn, arg->txn_flags);
+ CKERR(r);
+ r = child_txn->commit(child_txn, 0);
+ CKERR(r);
+ return 0;
+}
+
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = create_child_txn;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ args.single_txn = true;
+ parse_stress_test_args(argc, argv, &args);
+ args.num_elements = 0;
+ args.num_DBs = 0;
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_cursor_nop.cc b/storage/tokudb/PerconaFT/src/tests/perf_cursor_nop.cc
new file mode 100644
index 00000000..9e85dfd1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_cursor_nop.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the throughput of cursor create and close
+// with multiple threads.
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - some threads constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - some threads doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = cursor_create_close_op;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_iibench.cc b/storage/tokudb/PerconaFT/src/tests/perf_iibench.cc
new file mode 100644
index 00000000..559401c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_iibench.cc
@@ -0,0 +1,453 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include <portability/toku_atomic.h>
+
+#include "test.h"
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test tries to emulate iibench at the ydb layer.
+//
+// The schema is simple:
+// 8 byte primary key
+// 8 byte key A
+// 8 byte key B
+// 8 byte key C
+//
+// There's one primary DB for the pk and three secondary DBs.
+//
+// The primary key stores the other columns as the value.
+// The secondary keys have the primary key appended to them.
+//
+
+static const size_t iibench_secondary_key_size = 16;
+
+struct iibench_row {
+ uint64_t pk;
+ int64_t a;
+ int64_t b;
+ int64_t c;
+};
+
+struct iibench_secondary_row {
+ int64_t column;
+ uint64_t pk;
+};
+
+static int64_t hash(uint64_t key) {
+ uint64_t hash = 0;
+ uint8_t *buf = (uint8_t *) &key;
+ for (int i = 0; i < 8; i++) {
+ hash += (((buf[i] + 1) * 17) & 0xFF) << (i * 8);
+ }
+ return hash;
+}
+
+static int64_t iibench_generate_column_by_pk(int64_t pk, int db_idx) {
+ invariant(db_idx > 0);
+ return hash(pk * db_idx);
+}
+
+static void iibench_generate_row(int64_t pk, struct iibench_row *row) {
+ row->a = iibench_generate_column_by_pk(pk, 1);
+ row->b = iibench_generate_column_by_pk(pk, 2);
+ row->c = iibench_generate_column_by_pk(pk, 3);
+}
+
+static void iibench_parse_row(const DBT *key, const DBT *val, struct iibench_row *row) {
+ char *CAST_FROM_VOIDP(val_buf, val->data);
+ invariant(key->size == 8);
+ invariant(val->size == 24);
+ memcpy(&row->pk, key->data, 8);
+ memcpy(&row->a, val_buf + 0, 8);
+ memcpy(&row->b, val_buf + 8, 8);
+ memcpy(&row->c, val_buf + 16, 8);
+}
+
+static void UU() iibench_verify_row(const struct iibench_row *row) {
+ struct iibench_row expected_row;
+ iibench_generate_row(row->pk, &expected_row);
+ invariant(row->a == expected_row.a);
+ invariant(row->b == expected_row.b);
+ invariant(row->c == expected_row.c);
+}
+
+static void iibench_parse_secondary_row(const DBT *key, const DBT *val, struct iibench_secondary_row *row) {
+ char *CAST_FROM_VOIDP(key_buf, key->data);
+ invariant(key->size == iibench_secondary_key_size);
+ invariant(val->size == 0);
+ memcpy(&row->column, key_buf + 0, 8);
+ memcpy(&row->pk, key_buf + 8, 8);
+}
+
+static void UU() iibench_verify_secondary_row(const struct iibench_secondary_row *row, int db_idx) {
+ int64_t expected = iibench_generate_column_by_pk(row->pk, db_idx);
+ invariant(row->column == expected);
+}
+
+static void iibench_fill_key_buf(uint64_t pk, int64_t *buf) {
+ memcpy(&buf[0], &pk, 8);
+}
+
+static void iibench_fill_val_buf(uint64_t pk, int64_t *buf) {
+ struct iibench_row row;
+ iibench_generate_row(pk, &row);
+ memcpy(&buf[0], &row.a, sizeof(row.a));
+ memcpy(&buf[1], &row.b, sizeof(row.b));
+ memcpy(&buf[2], &row.c, sizeof(row.c));
+}
+
+static int iibench_get_db_idx(DB *db) {
+ DESCRIPTOR desc = db->cmp_descriptor;
+ invariant_notnull(desc->dbt.data);
+ invariant(desc->dbt.size == sizeof(int));
+ int db_idx;
+ memcpy(&db_idx, desc->dbt.data, desc->dbt.size);
+ return db_idx;
+}
+
+static void iibench_rangequery_cb(DB *db, const DBT *key, const DBT *val, void *extra) {
+ invariant_null(extra);
+ const int db_idx = iibench_get_db_idx(db);
+ if (db_idx == 0) {
+ struct iibench_row row;
+ iibench_parse_row(key, val, &row);
+ iibench_verify_row(&row);
+ } else {
+ struct iibench_secondary_row row;
+ iibench_parse_secondary_row(key, val, &row);
+ iibench_verify_secondary_row(&row, db_idx);
+ }
+}
+
+struct iibench_put_op_extra {
+ uint64_t autoincrement;
+};
+
+static int UU() iibench_put_op(DB_TXN *txn, ARG arg, void *operation_extra, void *stats_extra) {
+ const int num_dbs = arg->cli->num_DBs;
+ DB **dbs = arg->dbp;
+ DB_ENV *env = arg->env;
+ DBT_ARRAY mult_key_dbt[num_dbs];
+ DBT_ARRAY mult_val_dbt[num_dbs];
+ uint32_t mult_put_flags[num_dbs];
+
+ // The first index is unique with serial autoincrement keys.
+ // The rest are have keys generated with this thread's random data.
+ mult_put_flags[0] = get_put_flags(arg->cli) |
+ // If the table was already created, don't check for uniqueness.
+ (arg->cli->num_elements > 0 ? 0 : DB_NOOVERWRITE);
+ for (int i = 0; i < num_dbs; i++) {
+ toku_dbt_array_init(&mult_key_dbt[i], 1);
+ toku_dbt_array_init(&mult_val_dbt[i], 1);
+ mult_put_flags[i] = get_put_flags(arg->cli);
+ }
+ mult_key_dbt[0].dbts[0].flags = 0;
+ mult_val_dbt[0].dbts[0].flags = 0;
+
+ int r = 0;
+
+ uint64_t puts_to_increment = 0;
+ for (uint32_t i = 0; i < arg->cli->txn_size; ++i) {
+ struct iibench_put_op_extra *CAST_FROM_VOIDP(info, operation_extra);
+
+ // Get a random primary key, generate secondary key columns in valbuf
+ uint64_t pk = toku_sync_fetch_and_add(&info->autoincrement, 1);
+ if (arg->bounded_element_range && arg->cli->num_elements > 0) {
+ pk = pk % arg->cli->num_elements;
+ }
+ int64_t keybuf[1];
+ int64_t valbuf[3];
+ iibench_fill_key_buf(pk, keybuf);
+ iibench_fill_val_buf(pk, valbuf);
+ dbt_init(&mult_key_dbt[0].dbts[0], keybuf, sizeof keybuf);
+ dbt_init(&mult_val_dbt[0].dbts[0], valbuf, sizeof valbuf);
+
+ r = env->put_multiple(
+ env,
+ dbs[0], // source db.
+ txn,
+ &mult_key_dbt[0].dbts[0], // source db key
+ &mult_val_dbt[0].dbts[0], // source db value
+ num_dbs, // total number of dbs
+ dbs, // array of dbs
+ mult_key_dbt, // array of keys
+ mult_val_dbt, // array of values
+ mult_put_flags // array of flags
+ );
+ if (r != 0) {
+ goto cleanup;
+ }
+ puts_to_increment++;
+ if (puts_to_increment == 100) {
+ increment_counter(stats_extra, PUTS, puts_to_increment);
+ puts_to_increment = 0;
+ }
+ }
+
+cleanup:
+ for (int i = 0; i < num_dbs; i++) {
+ toku_dbt_array_destroy(&mult_key_dbt[i]);
+ toku_dbt_array_destroy(&mult_val_dbt[i]);
+ }
+ return r;
+}
+
+static int iibench_generate_row_for_put(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *UU(src_key), const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ invariant(src_db != dest_db);
+ // 8 byte primary key, REALLOC secondary key
+ invariant_notnull(src_key->data);
+ invariant(src_key->size == 8);
+ invariant(dest_key->flags == DB_DBT_REALLOC);
+ // Expand the secondary key data buffer if necessary
+ if (dest_key->size != iibench_secondary_key_size) {
+ dest_key->data = toku_xrealloc(dest_key->data, iibench_secondary_key_size);
+ dest_key->size = iibench_secondary_key_size;
+ }
+
+ // Get the db index from the descriptor. This is a secondary index
+ // so it has to be greater than zero (which would be the pk). Then
+ // grab the appropriate secondary key from the source val, which is
+ // an array of the 3 columns, so we have to subtract 1 from the index.
+ const int db_idx = iibench_get_db_idx(dest_db);
+ int64_t *CAST_FROM_VOIDP(columns, src_val->data);
+ int64_t secondary_key = columns[db_idx - 1];
+
+ // First write down the secondary key, then the primary key (in src_key)
+ int64_t *CAST_FROM_VOIDP(dest_key_buf, dest_key->data);
+ memcpy(&dest_key_buf[0], &secondary_key, sizeof(secondary_key));
+ memcpy(&dest_key_buf[1], src_key->data, src_key->size);
+ dest_val->data = nullptr;
+ dest_val->size = 0;
+ return 0;
+}
+
+// After each DB opens, set the descriptor to store the DB idx value.
+// Close and reopen the DB so we can use db->cmp_descriptor during comparisons.
+static DB *iibench_set_descriptor_after_db_opens(DB_ENV *env, DB *db, int idx, reopen_db_fn reopen, struct cli_args *cli_args) {
+ int r;
+ DBT desc_dbt;
+ desc_dbt.data = &idx;
+ desc_dbt.size = sizeof(idx);
+ desc_dbt.ulen = 0;
+ desc_dbt.flags = 0;
+ r = db->change_descriptor(db, nullptr, &desc_dbt, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ reopen(db, idx, cli_args);
+ return db;
+}
+
+static int iibench_compare_keys(DB *db, const DBT *a, const DBT *b) {
+ const int db_idx = iibench_get_db_idx(db);
+ if (db_idx == 0) {
+ invariant(a->size == 8);
+ invariant(b->size == 8);
+ uint64_t x = *(uint64_t *) a->data;
+ uint64_t y = *(uint64_t *) b->data;
+ if (x < y) {
+ return -1;
+ } else if (x == y) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else {
+ invariant(a->size == 16);
+ invariant(b->size == 16);
+ int64_t x = *(int64_t *) a->data;
+ int64_t y = *(int64_t *) b->data;
+ uint64_t pk_x = *(uint64_t *) (((char *) a->data) + 8);
+ uint64_t pk_y = *(uint64_t *) (((char *) b->data) + 8);
+ if (x < y) {
+ return -1;
+ } else if (x == y) {
+ if (pk_x < pk_y) {
+ return -1;
+ } else if (pk_x == pk_y) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else {
+ return 1;
+ }
+ }
+}
+
+static void iibench_rangequery_db(DB *db, DB_TXN *txn, ARG arg, uint64_t max_pk) {
+ const int limit = arg->cli->range_query_limit;
+
+ int r;
+ DBC *cursor;
+
+ // Get a random key no greater than max pk
+ DBT start_key, end_key;
+ uint64_t start_k = myrandom_r(arg->random_data) % (max_pk + 1);
+ uint64_t end_k = start_k + limit;
+ dbt_init(&start_key, &start_k, 8);
+ dbt_init(&end_key, &end_k, 8);
+
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ r = cursor->c_set_bounds(cursor, &start_key, &end_key, true, 0); CKERR(r);
+ struct rangequery_cb_extra extra = {
+ .rows_read = 0,
+ .limit = limit,
+ .cb = iibench_rangequery_cb,
+ .db = db,
+ .cb_extra = nullptr,
+ };
+ r = cursor->c_getf_set(cursor, 0, &start_key, rangequery_cb, &extra);
+ while (r == 0 && extra.rows_read < extra.limit && run_test) {
+ r = cursor->c_getf_next(cursor, 0, rangequery_cb, &extra);
+ }
+
+ r = cursor->c_close(cursor); CKERR(r);
+}
+
+// Do a range query over the primary index, verifying the contents of the rows
+static int iibench_rangequery_op(DB_TXN *txn, ARG arg, void *operation_extra, void *stats_extra) {
+ struct iibench_put_op_extra *CAST_FROM_VOIDP(info, operation_extra);
+ DB *db = arg->dbp[0];
+
+ // Assume the max PK is the table size. If it isn't specified, do a
+ // safe read of the current autoincrement key from the put thread.
+ uint64_t max_pk = arg->cli->num_elements;
+ if (max_pk == 0) {
+ max_pk = toku_sync_fetch_and_add(&info->autoincrement, 0);
+ }
+ iibench_rangequery_db(db, txn, arg, max_pk);
+ increment_counter(stats_extra, PTQUERIES, 1);
+ return 0;
+}
+
+static int iibench_fill_tables(DB_ENV *env, DB **dbs, struct cli_args *cli_args, bool UU(fill_with_zeroes)) {
+ const int num_dbs = cli_args->num_DBs;
+ int r = 0;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ DB_LOADER *loader;
+ uint32_t db_flags[num_dbs];
+ uint32_t dbt_flags[num_dbs];
+ for (int i = 0; i < num_dbs; i++) {
+ db_flags[i] = DB_PRELOCKED_WRITE;
+ dbt_flags[i] = DB_DBT_REALLOC;
+ }
+
+ r = env->create_loader(env, txn, &loader, dbs[0], num_dbs, dbs, db_flags, dbt_flags, 0); CKERR(r);
+ for (int i = 0; i < cli_args->num_elements; i++) {
+ DBT key, val;
+ uint64_t pk = i;
+ int64_t keybuf[1];
+ int64_t valbuf[3];
+ iibench_fill_key_buf(pk, keybuf);
+ iibench_fill_val_buf(pk, valbuf);
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, valbuf, sizeof valbuf);
+ r = loader->put(loader, &key, &val); CKERR(r);
+ if (verbose && i > 0 && i % 10000 == 0) {
+ report_overall_fill_table_progress(cli_args, 10000);
+ }
+ }
+ r = loader->close(loader); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB **dbs, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_put_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+
+ // Put threads do iibench-like inserts with an auto-increment primary key
+ // Query threads do range queries of a certain size, verifying row contents.
+
+ struct iibench_put_op_extra put_extra = {
+ .autoincrement = 0
+ };
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbs, env, cli_args);
+ if (i < cli_args->num_put_threads) {
+ myargs[i].operation = iibench_put_op;
+ myargs[i].operation_extra = &put_extra;
+ } else {
+ myargs[i].operation = iibench_rangequery_op;
+ myargs[i].operation_extra = &put_extra;
+ myargs[i].txn_flags |= DB_TXN_READ_ONLY;
+ myargs[i].sleep_ms = 1000; // 1 second between range queries
+ }
+ }
+ const bool crash_at_end = false;
+ run_workers(myargs, num_threads, cli_args->num_seconds, crash_at_end, cli_args);
+}
+
+int test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ args.num_elements = 0; // want to start with empty DBs
+ // Puts per transaction is configurable. It defaults to 1k.
+ args.txn_size = 1000;
+ // Default to one writer on 4 indexes (pk + 3 secondaries), no readers.
+ args.num_DBs = 4;
+ args.num_put_threads = 1;
+ args.num_ptquery_threads = 0;
+ parse_stress_test_args(argc, argv, &args);
+ // The schema is not configurable. Silently ignore whatever was passed in.
+ args.key_size = 8;
+ args.val_size = 32;
+ // when there are multiple threads, its valid for two of them to
+ // generate the same key and one of them fail with DB_LOCK_NOTGRANTED
+ if (args.num_put_threads > 1) {
+ args.crash_on_operation_failure = false;
+ }
+ args.env_args.generate_put_callback = iibench_generate_row_for_put;
+ after_db_open_hook = iibench_set_descriptor_after_db_opens;
+ fill_tables = iibench_fill_tables;
+ perf_test_main_with_cmp(&args, iibench_compare_keys);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_insert.cc b/storage/tokudb/PerconaFT/src/tests/perf_insert.cc
new file mode 100644
index 00000000..1355cbaa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_insert.cc
@@ -0,0 +1,91 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the throughput of db->puts
+// with multiple threads.
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_put_threads;
+ struct arg myargs[num_threads];
+ operation_t put_op = (cli_args->serial_insert
+ ? serial_put_op
+ : random_put_op_singledb);
+ struct serial_put_extra spe[num_threads];
+ ZERO_ARRAY(spe);
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = put_op;
+ if (cli_args->serial_insert) {
+ spe[i].current = cli_args->num_elements;
+ myargs[i].operation_extra = &spe[i];
+ }
+ }
+ const bool crash_at_end = false;
+ run_workers(myargs, num_threads, cli_args->num_seconds, crash_at_end, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ args.num_elements = 0; // want to start with empty DBs
+ args.key_size = 8;
+ args.val_size = 8;
+ parse_stress_test_args(argc, argv, &args);
+ // when there are multiple threads, its valid for two of them to
+ // generate the same key and one of them fail with DB_LOCK_NOTGRANTED
+ if (args.num_put_threads > 1) {
+ args.crash_on_operation_failure = false;
+ }
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_malloc_free.cc b/storage/tokudb/PerconaFT/src/tests/perf_malloc_free.cc
new file mode 100644
index 00000000..b7d7dfdc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_malloc_free.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#define DONT_DEPRECATE_MALLOC 1
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the throughput of malloc and free
+// with multiple threads.
+
+static int xmalloc_free_op(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ size_t s = 256;
+ void *p = toku_xmalloc(s);
+ toku_free(p);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = xmalloc_free_op;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_nop.cc b/storage/tokudb/PerconaFT/src/tests/perf_nop.cc
new file mode 100644
index 00000000..4c1dfd60
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_nop.cc
@@ -0,0 +1,77 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the throughput of the test infrastructure executing a nop
+// on multiple threads.
+
+static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = nop;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_partitioned_counter.cc b/storage/tokudb/PerconaFT/src/tests/perf_partitioned_counter.cc
new file mode 100644
index 00000000..52ac408f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_partitioned_counter.cc
@@ -0,0 +1,105 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// Measure the throughput of incrementing a status variable on multiple threads.
+
+struct partitioned_counter {
+ struct {
+ union {
+ uint64_t counter;
+ uint64_t junk[64/8];
+ } c;
+ } b[4096/64];
+
+ void increment() {
+#if HAVE_SCHED_GETCPU
+ int n = sched_getcpu();
+#else
+ int n = random();
+#endif
+ assert(n >= 0);
+ toku_sync_fetch_and_add(&b[ n % (4096/64)].c.counter, 1);
+ }
+
+ uint64_t read() {
+ uint64_t s = 0;
+ for (int i = 0; i < 4096/64; i++)
+ s += b[i].c.counter;
+ return s;
+ }
+};
+
+struct partitioned_counter the_counter;
+
+static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ the_counter.increment();
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = nop;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_ptquery.cc b/storage/tokudb/PerconaFT/src/tests/perf_ptquery.cc
new file mode 100644
index 00000000..274e93f0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_ptquery.cc
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - some threads constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - some threads doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = ptquery_op;
+ myargs[i].txn_flags |= DB_TXN_READ_ONLY;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_ptquery2.cc b/storage/tokudb/PerconaFT/src/tests/perf_ptquery2.cc
new file mode 100644
index 00000000..51574302
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_ptquery2.cc
@@ -0,0 +1,115 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+
+static int ptquery_op2(DB_TXN *txn, ARG arg, void* operation_extra, void *stats_extra) {
+ int db_index = *(int *)operation_extra;
+ DB* db = arg->dbp[db_index];
+ int r = ptquery_and_maybe_check_op(db, txn, arg, true);
+ increment_counter(stats_extra, PTQUERIES, 1);
+ return r;
+}
+
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - some threads constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - some threads doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ int thread_ids[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ thread_ids[i] = i % cli_args->num_DBs;
+ myargs[i].operation = ptquery_op2;
+ myargs[i].operation_extra = &thread_ids[i];
+ myargs[i].txn_flags |= DB_TXN_READ_ONLY;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_rangequery.cc b/storage/tokudb/PerconaFT/src/tests/perf_rangequery.cc
new file mode 100644
index 00000000..bab6a12a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_rangequery.cc
@@ -0,0 +1,71 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = rangequery_op;
+ myargs[i].txn_flags |= DB_TXN_READ_ONLY;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_read_txn.cc b/storage/tokudb/PerconaFT/src/tests/perf_read_txn.cc
new file mode 100644
index 00000000..0a17c0ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_read_txn.cc
@@ -0,0 +1,84 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the throughput of creating and destroying
+// root read-only transactions that create snapshots
+
+static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ return 0;
+}
+
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].txn_flags |= DB_TXN_READ_ONLY;
+ myargs[i].operation = nop;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ args.single_txn = false;
+ args.num_elements = 0;
+ args.num_DBs = 0;
+ args.num_put_threads = 0;
+ args.num_update_threads = 0;
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_read_txn_single_thread.cc b/storage/tokudb/PerconaFT/src/tests/perf_read_txn_single_thread.cc
new file mode 100644
index 00000000..374559d8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_read_txn_single_thread.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure how fast a single thread can
+// commit and create transactions when there exist N transactions.
+
+DB_TXN** txns;
+int num_txns;
+
+static int commit_and_create_txn(
+ DB_TXN* UU(txn),
+ ARG arg,
+ void* UU(operation_extra),
+ void* UU(stats_extra)
+ )
+{
+ int rand_txn_id = random() % num_txns;
+ int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0);
+ CKERR(r);
+ r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags | DB_TXN_READ_ONLY);
+ CKERR(r);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting running of stress\n");
+
+ num_txns = cli_args->txn_size;
+ XCALLOC_N(num_txns, txns);
+ for (int i = 0; i < num_txns; i++) {
+ int r = env->txn_begin(env, 0, &txns[i], DB_TXN_SNAPSHOT);
+ CKERR(r);
+ }
+
+ struct arg myarg;
+ arg_init(&myarg, dbp, env, cli_args);
+ myarg.operation = commit_and_create_txn;
+
+ run_workers(&myarg, 1, cli_args->num_seconds, false, cli_args);
+
+ for (int i = 0; i < num_txns; i++) {
+ int chk_r = txns[i]->commit(txns[i], 0);
+ CKERR(chk_r);
+ }
+ toku_free(txns);
+ num_txns = 0;
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ num_txns = 0;
+ txns = NULL;
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ args.single_txn = true;
+ // this test is all about transactions, make the DB small
+ args.num_elements = 1;
+ args.num_DBs= 1;
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_read_write.cc b/storage/tokudb/PerconaFT/src/tests/perf_read_write.cc
new file mode 100644
index 00000000..31bb2976
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_read_write.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+static int perf_read(DB_TXN *txn, ARG arg, void* operation_extra, void *stats_extra) {
+ int db_index = *(int *)operation_extra;
+ DB* db = arg->dbp[db_index];
+
+ for (uint32_t i = 0; i < arg->cli->txn_size; i++) {
+ ptquery_and_maybe_check_op(db, txn, arg, true);
+ increment_counter(stats_extra, PTQUERIES, 1);
+ }
+ return 0;
+}
+
+static int perf_write(DB_TXN *txn, ARG arg, void* operation_extra, void *stats_extra) {
+ int db_index = *(int *)operation_extra;
+ DB* db = arg->dbp[db_index];
+ return random_put_in_db(db, txn, arg, true, stats_extra);
+}
+
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - some threads constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - some threads doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads + cli_args->num_update_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ const int num_update_threads = cli_args->num_update_threads;
+ int upd_thread_ids[num_update_threads];
+ for (int i = 0; i < cli_args->num_update_threads; ++i) {
+ upd_thread_ids[i] = i % cli_args->num_DBs;
+ myargs[i].operation_extra = &upd_thread_ids[i];
+ myargs[i].operation = perf_write;
+ }
+
+ const int num_ptquery_threads = cli_args->num_ptquery_threads;
+ int ptq_thread_ids[num_ptquery_threads];
+ for (int i = cli_args->num_update_threads; i < num_threads; i++) {
+ ptq_thread_ids[i] = i % cli_args->num_DBs;
+ myargs[i].operation_extra = &ptq_thread_ids[i];
+ myargs[i].operation = perf_read;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ args.env_args.checkpointing_period = 30;
+ args.num_DBs = 1;
+ args.num_ptquery_threads = 1;
+ args.num_update_threads = 1;
+ args.crash_on_operation_failure = false;
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_root_txn.cc b/storage/tokudb/PerconaFT/src/tests/perf_root_txn.cc
new file mode 100644
index 00000000..d0b35e13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_root_txn.cc
@@ -0,0 +1,83 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure the throughput of creating and destroying
+// root read-only transactions that create snapshots
+
+static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ return 0;
+}
+
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = nop;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ args.single_txn = false;
+ args.num_elements = 0;
+ args.num_DBs = 0;
+ args.num_put_threads = 0;
+ args.num_update_threads = 0;
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_simple_counter.cc b/storage/tokudb/PerconaFT/src/tests/perf_simple_counter.cc
new file mode 100644
index 00000000..3f5686d5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_simple_counter.cc
@@ -0,0 +1,79 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// Measure the throughput of incrementing a status variable on multiple threads.
+
+volatile uint64_t the_counter = 0;
+
+static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ toku_sync_fetch_and_add(&the_counter, 1);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = nop;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_thread_counter.cc b/storage/tokudb/PerconaFT/src/tests/perf_thread_counter.cc
new file mode 100644
index 00000000..1897c5fa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_thread_counter.cc
@@ -0,0 +1,79 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// Measure the throughput of incrementing a status variable on multiple threads.
+
+volatile __thread uint64_t the_counter = 0;
+
+static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ the_counter++; // toku_sync_fetch_and_add(&the_counter, 1);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = nop;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/perf_txn_single_thread.cc b/storage/tokudb/PerconaFT/src/tests/perf_txn_single_thread.cc
new file mode 100644
index 00000000..4e0ffc39
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/perf_txn_single_thread.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure how fast a single thread can
+// commit and create transactions when there exist N transactions.
+
+DB_TXN** txns;
+int num_txns;
+
+static int commit_and_create_txn(
+ DB_TXN* UU(txn),
+ ARG arg,
+ void* UU(operation_extra),
+ void* UU(stats_extra)
+ )
+{
+ int rand_txn_id = random() % num_txns;
+ int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0);
+ CKERR(r);
+ r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags);
+ CKERR(r);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting running of stress\n");
+
+ num_txns = cli_args->txn_size;
+ XCALLOC_N(num_txns, txns);
+ for (int i = 0; i < num_txns; i++) {
+ int r = env->txn_begin(env, 0, &txns[i], DB_TXN_SNAPSHOT);
+ CKERR(r);
+ }
+
+ struct arg myarg;
+ arg_init(&myarg, dbp, env, cli_args);
+ myarg.operation = commit_and_create_txn;
+
+ run_workers(&myarg, 1, cli_args->num_seconds, false, cli_args);
+
+ for (int i = 0; i < num_txns; i++) {
+ int chk_r = txns[i]->commit(txns[i], 0);
+ CKERR(chk_r);
+ }
+ toku_free(txns);
+ num_txns = 0;
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ num_txns = 0;
+ txns = NULL;
+ struct cli_args args = get_default_args_for_perf();
+ parse_stress_test_args(argc, argv, &args);
+ args.single_txn = true;
+ // this test is all about transactions, make the DB small
+ args.num_elements = 1;
+ args.num_DBs= 1;
+ perf_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/powerfail.cc b/storage/tokudb/PerconaFT/src/tests/powerfail.cc
new file mode 100644
index 00000000..448c7a0e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/powerfail.cc
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* a powerfail test. */
+#include "test.h"
+#include <stdio.h>
+
+static void print_usage (void) {
+ printf("Two modes:\n");
+ printf(" ./powerfail --write\n");
+ printf(" creates a database, and writes numbers out to stdout. While this is running you can crash the machine and record\n");
+ printf(" the last number printed. It may be helpful to run this program via ssh so that you can see the output after the\n");
+ printf(" machine crashes. It would be wrong to pipe stdout into a file on the machine that crashes, since if we think there\n");
+ printf(" is any possibility that recovery will fail, then the system cannot be trusted to restore that file properly either.\n");
+ printf(" ./powerfail --check N\n");
+ printf(" Feed the recorded number into the command line. The system will check that transaction N committed properly and\n");
+ printf(" that no more than one additional transaction committed.\n");
+}
+
+
+DB_ENV *env;
+enum { N_DBS = 10 };
+DB *dbs[N_DBS];
+char dbname_template[]="foo%d.db";
+const int envflags = DB_INIT_MPOOL|DB_THREAD|DB_CREATE |DB_INIT_LOCK|DB_PRIVATE | DB_INIT_LOG|DB_INIT_TXN|DB_RECOVER;
+
+static void put (DB *db, DB_TXN *txn, long k, long v, int flags) {
+ DBT key, val;
+ int r = db->put(db, txn, dbt_init(&key, &k, sizeof(k)), dbt_init(&val, &v, sizeof(k)), flags);
+ assert(r==0);
+}
+
+static void open_dbs (void) {
+ int r;
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N_DBS; i++) {
+ char dbname[sizeof(dbname_template)+10];
+ r = snprintf(dbname, sizeof(dbname), dbname_template, i);
+ assert(r>0 && r<(int)sizeof(dbname));
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ r = dbs[i]->open(dbs[i], txn, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static void close_dbs (void) {
+ for (int i=0; i<N_DBS; i++) {
+ int r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+
+}
+
+static long shuffle (long l, int i) {
+ (void)i;
+ return l;
+}
+
+static void do_write (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ open_dbs();
+ // DB[0] contains the pairs TXN TXN
+ // A transaction inserts a bunch of records where the vals all add up to 1.
+ for (long N=0; 1; N++) {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ put(dbs[0], txn, N, N, 0);
+ int sum=0;
+ for (int i=1; i+1<N_DBS; i++) {
+ int rval = (random()%2048)-1024;
+ sum+=rval;
+ put(dbs[i], txn, shuffle(N, i), rval, (i%2==0) ? 0 : 0); // even numbered databases are overwritten
+ }
+ put(dbs[N_DBS-1],txn, N, sum, 0);
+ r = txn->commit(txn, 0); CKERR(r);
+ printf("%ld\n", N);
+ }
+}
+
+static void scan(DB *db, DB_TXN *txn,
+ void (*reduce)(DBT *k, DBT *v, void *extra), void *extra) {
+ DBC *cursor;
+ int r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ key.flags = DB_DBT_MALLOC;
+ val.flags = DB_DBT_MALLOC;
+ int n=0;
+ while (0==(r = cursor->c_get(cursor, &key, &val, DB_NEXT))) {
+ reduce(&key, &val, extra);
+ n++;
+ }
+ printf("n=%d\n", n);
+ r = cursor->c_close(cursor); CKERR(r);
+ toku_free(key.data);
+ toku_free(val.data);
+}
+
+static long maxl (long a, long b) {
+ if (a<b) return b; else return a;
+}
+
+static void maxf (DBT *k, DBT *v, void *extrav) {
+ long *CAST_FROM_VOIDP(extra, extrav);
+ long *CAST_FROM_VOIDP(kd, k->data);
+ long *CAST_FROM_VOIDP(vd, v->data);
+ extra[0] = maxl(extra[0], *kd);
+ extra[1] = maxl(extra[0], *vd);
+}
+
+static void do_check (long N) {
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ open_dbs();
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ long maxp[2]={0,0};
+ scan(dbs[0], txn, maxf, &maxp);
+ printf("max k,v = %ld, %ld\n", maxp[0], maxp[1]);
+ assert(maxp[0]==maxp[1]);
+ assert(maxp[0]>=N);
+ r = txn->commit(txn, 0); CKERR(r);
+ close_dbs();
+ r = env->close(env, 0); CKERR(r);
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ assert(argc>=2 && argc<=3);
+ if (strcmp(argv[1], "--write")==0) {
+ assert(argc==2);
+ do_write();
+ } else if (strcmp(argv[1], "--check")==0) {
+ assert(argc==3);
+ char *end;
+ errno=0;
+ long N=strtol(argv[2], &end, 10);
+ assert(0==errno && 0==*end);
+ do_check(N);
+ } else if (strcmp(argv[1],"-h")==0) {
+ print_usage();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/preload-db-nested.cc b/storage/tokudb/PerconaFT/src/tests/preload-db-nested.cc
new file mode 100644
index 00000000..5d47b1a8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/preload-db-nested.cc
@@ -0,0 +1,340 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/*********************
+ *
+ * Purpose is to preload a set of dictionaries using nested transactions,
+ * to be used to test version upgrade.
+ *
+ * Each row will be inserted using nested transactions MAXDEPTH deep.
+ * Each nested transaction will insert a value one greater than the parent transaction.
+ * For each row, a single transaction will be aborted, the rest will be committed.
+ * The transaction to be aborted will be the row number mod MAXDEPTH.
+ * So, for row 0, the outermost transaction will be aborted and the row will not appear in the database.
+ * For row 1, transaction 1 will be aborted, so the inserted value will be the original generated value.
+ * For each row, the inserted value will be:
+ * if row%MAXDEPTH == 0 no row
+ * else value = generated value + (row%MAXDEPTH -1)
+ *
+ *
+ * For each row
+ * generate k,v pair
+ * for txndepth = 0 to MAXDEPTH-1 {
+ * add txndepth to v
+ * begin txn
+ * insert
+ * if txndepth = row%MAXDEPTH abort
+ * else commit
+ * }
+ * }
+ *
+ */
+
+
+
+
+
+#define kv_pair_funcs 1 // pull in kv_pair generators from test.h
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+/*
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {ROWS_PER_TRANSACTION=10000};
+uint NUM_DBS=1;
+uint NUM_ROWS=100000;
+int CHECK_RESULTS=0;
+int optimize=0;
+int littlenode = 0;
+enum { old_default_cachesize=1024 }; // MB
+int CACHESIZE=old_default_cachesize;
+int ALLOW_DUPS=0;
+
+// max depth of nested transactions for this test
+//#define MAXDEPTH 128
+#define MAXDEPTH 64
+
+static void
+nested_insert(DB ** dbs, uint depth, DB_TXN *parent_txn, uint k, uint generated_value);
+
+
+static void
+check_results_nested(DB ** dbs, const uint num_rows) {
+ int num_dbs = 1; // maybe someday increase
+ for(int j=0;j<num_dbs;j++){
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int r;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(uint i=0;i<num_rows;i++) {
+ if (i % MAXDEPTH) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR(r);
+ uint observed_k = *(unsigned int*)key.data;
+ uint observed_v = *(unsigned int*)val.data;
+ uint expected_k = i;
+ uint generated_value = generate_val(i, 0);
+ uint expected_v = generated_value + (i%MAXDEPTH - 1);
+ if (verbose >= 3)
+ printf("expected key %d, observed key %d, expected val %d, observed val %d\n",
+ expected_k, observed_k, expected_v, observed_v);
+ // test that we have the expected keys and values
+ assert(observed_k == expected_k);
+ assert(observed_v == expected_v);
+ }
+ dbt_init(&key, NULL, sizeof(unsigned int));
+ dbt_init(&val, NULL, sizeof(unsigned int));
+ if ( verbose && (i%10000 == 0)) {printf("."); fflush(stdout);}
+ }
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_NOSYNC);
+ CKERR(r);
+ }
+ if ( verbose ) {printf("ok");fflush(stdout);}
+}
+
+
+
+
+
+
+static struct timeval starttime;
+static double UU() elapsed_time (void) {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return now.tv_sec - starttime.tv_sec + 1e-6*(now.tv_usec - starttime.tv_usec);
+}
+
+static void preload_dbs(DB **dbs)
+{
+ gettimeofday(&starttime, NULL);
+ uint row;
+
+ if ( verbose ) { printf("loading");fflush(stdout); }
+
+ for(row = 0; row <= NUM_ROWS; row++) {
+ uint generated_value = generate_val(row, 0);
+ nested_insert(dbs, 0, NULL, row, generated_value);
+ }
+
+ if (optimize) {
+ if (verbose) { printf("\noptimizing");fflush(stdout);}
+ do_hot_optimize_on_dbs(env, dbs, 1);
+ }
+
+ if ( CHECK_RESULTS) {
+ if ( verbose ) {printf("\nchecking");fflush(stdout);}
+ check_results_nested(&dbs[0], NUM_ROWS);
+ }
+ if ( verbose) {printf("\ndone\n");fflush(stdout);}
+}
+
+static void
+nested_insert(DB ** dbs, uint depth, DB_TXN *parent_txn, uint k, uint generated_value) {
+ if (depth < MAXDEPTH) {
+ DBT key, val;
+ dbt_init_realloc(&key);
+ dbt_init_realloc(&val);
+ uint v = generated_value + depth;
+ DB_TXN * txn;
+ int r = env->txn_begin(env, parent_txn, &txn, 0);
+ CKERR(r);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int db = 0; // maybe later replace with loop
+ r = dbs[db]->put(dbs[db], txn, &key, &val, 0);
+ CKERR(r);
+ if (key.flags == 0) { dbt_init_realloc(&key); }
+ if (val.flags == 0) { dbt_init_realloc(&val); }
+ nested_insert(dbs, depth+1, txn, k, generated_value);
+ if (depth == (k % MAXDEPTH)) {
+ r = txn->abort(txn);
+ CKERR(r);
+ if (verbose>=3)
+ printf("abort k = %d, v= %d, depth = %d\n", k, v, depth);
+ }
+ else {
+ r = txn->commit(txn, DB_TXN_NOSYNC);
+ CKERR(r);
+ if (verbose>=3)
+ printf("commit k = %d, v= %d, depth = %d\n", k, v, depth);
+ }
+ if ( verbose && (k%10000 == 0)) {printf(".");fflush(stdout);}
+
+ if ( key.flags ) { toku_free(key.data); key.data = NULL; }
+ if ( val.flags ) { toku_free(val.data); key.data = NULL; }
+ }
+}
+
+
+char *free_me = NULL;
+const char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+
+static void run_test(void)
+{
+ int r;
+ {
+ int len = strlen(env_dir) + 20;
+ char syscmd[len];
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd); CKERR(r);
+ }
+ r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+// r = env->set_default_dup_compare(env, uint_dbt_cmp); CKERR(r);
+// if ( verbose ) printf("CACHESIZE = %d MB\n", CACHESIZE);
+// r = env->set_cachesize(env, CACHESIZE / 1024, (CACHESIZE % 1024)*1024*1024, 1); CKERR(r);
+// CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(uint i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ if (littlenode) {
+ r=dbs[i]->set_pagesize(dbs[i], 4096);
+ CKERR(0);
+ }
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ // -------------------------- //
+ preload_dbs(dbs);
+ // -------------------------- //
+
+ for(uint i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const argv[]) {
+ do_args(argc, argv);
+ run_test();
+ if (free_me) toku_free(free_me);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -n -d <num_dbs> -r <num_rows> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-n")==0) {
+ littlenode = 1;
+ } else if (strcmp(argv[0], "-o")==0) {
+ optimize = 1;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/preload-db.cc b/storage/tokudb/PerconaFT/src/tests/preload-db.cc
new file mode 100644
index 00000000..d06d48a8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/preload-db.cc
@@ -0,0 +1,246 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#define kv_pair_funcs 1 // pull in kv_pair generators from test.h
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+/*
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {ROWS_PER_TRANSACTION=10000};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int CHECK_RESULTS=0;
+int optimize=0;
+int littlenode = 0;
+enum { old_default_cachesize=1024 }; // MB
+int CACHESIZE=old_default_cachesize;
+int ALLOW_DUPS=0;
+
+static struct timeval starttime;
+static double UU() elapsed_time (void) {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return now.tv_sec - starttime.tv_sec + 1e-6*(now.tv_usec - starttime.tv_usec);
+}
+
+static void preload_dbs(DB **dbs)
+{
+ gettimeofday(&starttime, NULL);
+ int r;
+ DB_TXN *txn;
+
+ DBT skey, sval;
+ DBT key, val;
+ dbt_init_realloc(&key);
+ dbt_init_realloc(&val);
+ unsigned int k, v;
+ if ( verbose ) { printf("loading");fflush(stdout); }
+ int outer_loop_num = ( NUM_ROWS <= ROWS_PER_TRANSACTION ) ? 1 : (NUM_ROWS / ROWS_PER_TRANSACTION);
+ for(int x=0;x<outer_loop_num;x++) {
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for(int i=1;i<=ROWS_PER_TRANSACTION;i++) {
+ k = i + (x*ROWS_PER_TRANSACTION);
+ v = generate_val(k, 0);
+ dbt_init(&skey, &k, sizeof(unsigned int));
+ dbt_init(&sval, &v, sizeof(unsigned int));
+
+ for(int db = 0;db < NUM_DBS;db++) {
+ put_multiple_generate(dbs[db], // dest_db
+ NULL, // src_db, ignored
+ &key, &val, // dest_key, dest_val
+ &skey, &sval, // src_key, src_val
+ NULL); // extra, ignored
+
+ r = dbs[db]->put(dbs[db], txn, &key, &val, 0); CKERR(r);
+ if (key.flags == 0) { dbt_init_realloc(&key); }
+ if (val.flags == 0) { dbt_init_realloc(&val); }
+ }
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ if ( verbose ) {printf(".");fflush(stdout);}
+ }
+ if ( key.flags ) { toku_free(key.data); key.data = NULL; }
+ if ( val.flags ) { toku_free(val.data); key.data = NULL; }
+
+ if (optimize) {
+ if (verbose) { printf("\noptimizing");fflush(stdout);}
+ do_hot_optimize_on_dbs(env, dbs, NUM_DBS);
+ }
+
+ if ( CHECK_RESULTS) {
+ if ( verbose ) {printf("\nchecking");fflush(stdout);}
+ check_results(env, dbs, NUM_DBS, NUM_ROWS);
+ }
+ if ( verbose) {printf("\ndone\n");fflush(stdout);}
+}
+
+
+char *free_me = NULL;
+const char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+
+static void run_test(void)
+{
+ int r;
+ {
+ int len = strlen(env_dir) + 20;
+ char syscmd[len];
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd); CKERR(r);
+ }
+ r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+// r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+// r = env->set_default_dup_compare(env, uint_dbt_cmp); CKERR(r);
+// if ( verbose ) printf("CACHESIZE = %d MB\n", CACHESIZE);
+// r = env->set_cachesize(env, CACHESIZE / 1024, (CACHESIZE % 1024)*1024*1024, 1); CKERR(r);
+// CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ if (littlenode) {
+ r=dbs[i]->set_pagesize(dbs[i], 4096);
+ CKERR(0);
+ }
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ // -------------------------- //
+ preload_dbs(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+ /*********** DO NOT TRIM LOGFILES: Trimming logfiles defeats purpose of upgrade tests which must handle untrimmed logfiles.
+ // reopen, then close environment to trim logfiles
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ ***********/
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const argv[]) {
+ do_args(argc, argv);
+ run_test();
+ if (free_me) toku_free(free_me);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -n -d <num_dbs> -r <num_rows> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-n")==0) {
+ littlenode = 1;
+ } else if (strcmp(argv[0], "-o")==0) {
+ optimize = 1;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/prelock-read-read.cc b/storage/tokudb/PerconaFT/src/tests/prelock-read-read.cc
new file mode 100644
index 00000000..100f3af5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/prelock-read-read.cc
@@ -0,0 +1,112 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that prelocking read ranges on multiple transactions do not conflict
+
+static int prelock_range(DBC *cursor, int left, int right) {
+ DBT key_left; dbt_init(&key_left, &left, sizeof left);
+ DBT key_right; dbt_init(&key_right, &right, sizeof right);
+ int r = cursor->c_set_bounds(cursor, &key_left, &key_right, true, 0);
+ return r;
+}
+
+static void test_read_read(DB_ENV *env, DB *db, uint32_t iso_flags, int expect_r) {
+ int r;
+
+ DB_TXN *txn_a = NULL;
+ r = env->txn_begin(env, NULL, &txn_a, iso_flags); assert_zero(r);
+ DB_TXN *txn_b = NULL;
+ r = env->txn_begin(env, NULL, &txn_b, iso_flags); assert_zero(r);
+
+ DBC *cursor_a = NULL;
+ r = db->cursor(db, txn_a, &cursor_a, 0); assert_zero(r);
+ DBC *cursor_b = NULL;
+ r = db->cursor(db, txn_b, &cursor_b, 0); assert_zero(r);
+
+ r = prelock_range(cursor_a, htonl(10), htonl(100)); assert_zero(r);
+ r = prelock_range(cursor_b, htonl(50), htonl(200)); assert(r == expect_r);
+
+ r = cursor_a->c_close(cursor_a); assert_zero(r);
+ r = cursor_b->c_close(cursor_b); assert_zero(r);
+
+ r = txn_a->commit(txn_a, 0); assert_zero(r);
+ r = txn_b->commit(txn_b, 0); assert_zero(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "prelocktest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ test_read_read(env, db, DB_READ_COMMITTED, 0);
+ test_read_read(env, db, DB_READ_UNCOMMITTED, 0);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ test_read_read(env, db, DB_SERIALIZABLE, DB_LOCK_NOTGRANTED);
+#else
+ test_read_read(env, db, DB_SERIALIZABLE, 0);
+#endif
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/prelock-read-write.cc b/storage/tokudb/PerconaFT/src/tests/prelock-read-write.cc
new file mode 100644
index 00000000..a573eeaf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/prelock-read-write.cc
@@ -0,0 +1,106 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that prelocking a write range that overlapping a read lock conflicts
+
+static int prelock_range(DBC *cursor, int left, int right) {
+ DBT key_left; dbt_init(&key_left, &left, sizeof left);
+ DBT key_right; dbt_init(&key_right, &right, sizeof right);
+ int r = cursor->c_set_bounds(cursor, &key_left, &key_right, true, 0);
+ return r;
+}
+
+static void test_read_write(DB_ENV *env, DB *db, uint32_t iso_flags, int expect_r) {
+ int r;
+
+ DB_TXN *txn_a = NULL;
+ r = env->txn_begin(env, NULL, &txn_a, iso_flags); assert_zero(r);
+ DB_TXN *txn_b = NULL;
+ r = env->txn_begin(env, NULL, &txn_b, iso_flags); assert_zero(r);
+
+ DBC *cursor_a = NULL;
+ r = db->cursor(db, txn_a, &cursor_a, 0); assert_zero(r);
+ DBC *cursor_b = NULL;
+ r = db->cursor(db, txn_b, &cursor_b, DB_RMW); assert_zero(r);
+
+ r = prelock_range(cursor_a, htonl(10), htonl(100)); assert_zero(r);
+ r = prelock_range(cursor_b, htonl(50), htonl(200)); assert(r == expect_r);
+
+ r = cursor_a->c_close(cursor_a); assert_zero(r);
+ r = cursor_b->c_close(cursor_b); assert_zero(r);
+
+ r = txn_a->commit(txn_a, 0); assert_zero(r);
+ r = txn_b->commit(txn_b, 0); assert_zero(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "prelocktest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ test_read_write(env, db, DB_SERIALIZABLE, DB_LOCK_NOTGRANTED);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/prelock-write-read.cc b/storage/tokudb/PerconaFT/src/tests/prelock-write-read.cc
new file mode 100644
index 00000000..a03fe7e4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/prelock-write-read.cc
@@ -0,0 +1,106 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that prelocking a write range that overlapping a read lock conflicts
+
+static int prelock_range(DBC *cursor, int left, int right) {
+ DBT key_left; dbt_init(&key_left, &left, sizeof left);
+ DBT key_right; dbt_init(&key_right, &right, sizeof right);
+ int r = cursor->c_set_bounds(cursor, &key_left, &key_right, true, 0);
+ return r;
+}
+
+static void test_write_read(DB_ENV *env, DB *db, uint32_t iso_flags, int expect_r) {
+ int r;
+
+ DB_TXN *txn_a = NULL;
+ r = env->txn_begin(env, NULL, &txn_a, iso_flags); assert_zero(r);
+ DB_TXN *txn_b = NULL;
+ r = env->txn_begin(env, NULL, &txn_b, iso_flags); assert_zero(r);
+
+ DBC *cursor_a = NULL;
+ r = db->cursor(db, txn_a, &cursor_a, DB_RMW); assert_zero(r);
+ DBC *cursor_b = NULL;
+ r = db->cursor(db, txn_b, &cursor_b, 0); assert_zero(r);
+
+ r = prelock_range(cursor_a, htonl(10), htonl(100)); assert_zero(r);
+ r = prelock_range(cursor_b, htonl(50), htonl(200)); assert(r == expect_r);
+
+ r = cursor_a->c_close(cursor_a); assert_zero(r);
+ r = cursor_b->c_close(cursor_b); assert_zero(r);
+
+ r = txn_a->commit(txn_a, 0); assert_zero(r);
+ r = txn_b->commit(txn_b, 0); assert_zero(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "prelocktest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ test_write_read(env, db, DB_SERIALIZABLE, DB_LOCK_NOTGRANTED);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/prelock-write-write.cc b/storage/tokudb/PerconaFT/src/tests/prelock-write-write.cc
new file mode 100644
index 00000000..5f1383e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/prelock-write-write.cc
@@ -0,0 +1,106 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that prelocking a write range that overlaps a write lock conflicts
+
+static int prelock_range(DBC *cursor, int left, int right) {
+ DBT key_left; dbt_init(&key_left, &left, sizeof left);
+ DBT key_right; dbt_init(&key_right, &right, sizeof right);
+ int r = cursor->c_set_bounds(cursor, &key_left, &key_right, true, 0);
+ return r;
+}
+
+static void test_write_write(DB_ENV *env, DB *db, uint32_t iso_flags, int expect_r) {
+ int r;
+
+ DB_TXN *txn_a = NULL;
+ r = env->txn_begin(env, NULL, &txn_a, iso_flags); assert_zero(r);
+ DB_TXN *txn_b = NULL;
+ r = env->txn_begin(env, NULL, &txn_b, iso_flags); assert_zero(r);
+
+ DBC *cursor_a = NULL;
+ r = db->cursor(db, txn_a, &cursor_a, DB_RMW); assert_zero(r);
+ DBC *cursor_b = NULL;
+ r = db->cursor(db, txn_b, &cursor_b, DB_RMW); assert_zero(r);
+
+ r = prelock_range(cursor_a, htonl(10), htonl(100)); assert_zero(r);
+ r = prelock_range(cursor_b, htonl(50), htonl(200)); assert(r == expect_r);
+
+ r = cursor_a->c_close(cursor_a); assert_zero(r);
+ r = cursor_b->c_close(cursor_b); assert_zero(r);
+
+ r = txn_a->commit(txn_a, 0); assert_zero(r);
+ r = txn_b->commit(txn_b, 0); assert_zero(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "prelocktest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ test_write_write(env, db, DB_SERIALIZABLE, DB_LOCK_NOTGRANTED);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/print_engine_status.cc b/storage/tokudb/PerconaFT/src/tests/print_engine_status.cc
new file mode 100644
index 00000000..decad031
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/print_engine_status.cc
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this test is to verify the basic functioning
+ * of the engine status functions.
+ */
+
+
+#include "test.h"
+#include <db.h>
+#include "toku_time.h"
+
+static DB_ENV *env;
+
+#define FLAGS_NOLOG DB_INIT_LOCK|DB_INIT_MPOOL|DB_CREATE|DB_PRIVATE
+#define FLAGS_LOG FLAGS_NOLOG|DB_INIT_TXN|DB_INIT_LOG
+
+static int mode = S_IRWXU+S_IRWXG+S_IRWXO;
+
+static void test_shutdown(void);
+
+static void
+test_shutdown(void) {
+ int r;
+ r=env->close(env, 0); CKERR(r);
+ env = NULL;
+}
+
+static void
+setup (uint32_t flags) {
+ int r;
+ if (env)
+ test_shutdown();
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r=db_env_create(&env, 0);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, flags, mode);
+ CKERR(r);
+}
+
+
+static void
+print_raw(TOKU_ENGINE_STATUS_ROW row) {
+ printf("keyname is %s, type is %d, legend is %s\n",
+ row->keyname,
+ row->type,
+ row->legend);
+}
+
+static void
+status_format_time(const time_t *timer, char *buf) {
+ ctime_r(timer, buf);
+ size_t len = strlen(buf);
+ assert(len < 26);
+ char end;
+
+ assert(len>=1);
+ end = buf[len-1];
+ while (end == '\n' || end == '\r') {
+ buf[len-1] = '\0';
+ len--;
+ assert(len>=1);
+ end = buf[len-1];
+ }
+}
+
+
+int
+test_main (int argc, char * const argv[]) {
+ uint64_t nrows;
+ uint64_t max_rows;
+ fs_redzone_state redzone_state;
+ uint64_t panic;
+ const int panic_string_len = 1024;
+ char panic_string[panic_string_len];
+
+ // char buf[bufsiz] = {'\0'};
+ parse_args(argc, argv);
+ setup(FLAGS_LOG);
+ env->txn_checkpoint(env, 0, 0, 0);
+
+ env->get_engine_status_num_rows(env, &max_rows);
+ TOKU_ENGINE_STATUS_ROW_S mystat[max_rows];
+ int r = env->get_engine_status (env, mystat, max_rows, &nrows, &redzone_state, &panic, panic_string, panic_string_len, TOKU_ENGINE_STATUS);
+ assert(r==0);
+
+ if (verbose) {
+ printf("First all the raw fields:\n");
+ for (uint64_t i = 0; i < nrows; i++) {
+ printf("%s ", mystat[i].keyname);
+ printf("%s ", mystat[i].columnname ? mystat[i].columnname : "(null)");
+ printf("%s ", mystat[i].legend);
+ printf("type=%d val = ", mystat[i].type);
+ switch(mystat[i].type) {
+ case FS_STATE:
+ printf("fs_state not supported yet, code is %" PRIu64 "\n", mystat[i].value.num);
+ break;
+ case UINT64:
+ printf("%" PRIu64 "\n", mystat[i].value.num);
+ break;
+ case CHARSTR:
+ printf("%s\n", mystat[i].value.str);
+ break;
+ case UNIXTIME:
+ {
+ char tbuf[26];
+ status_format_time((time_t*)&mystat[i].value.num, tbuf);
+ printf("%s\n", tbuf);
+ }
+ break;
+ case TOKUTIME:
+ {
+ double t = tokutime_to_seconds(mystat[i].value.num);
+ printf("%.6f\n", t);
+ }
+ break;
+ default:
+ printf("UNKNOWN STATUS TYPE:\n");
+ print_raw(&mystat[i]);
+ break;
+ }
+ }
+
+ printf("\n\n\n\n\nNow as reported by get_engine_status_text():\n\n");
+
+ int bufsiz = nrows * 128; // assume 128 characters per row
+ char buff[bufsiz];
+ r = env->get_engine_status_text(env, buff, bufsiz);
+ printf("%s", buff);
+
+ printf("\n\n\n\n\nFinally, print as reported by test utility print_engine_status()\n");
+
+ print_engine_status(env);
+
+ printf("That's all, folks.\n");
+ }
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/progress.cc b/storage/tokudb/PerconaFT/src/tests/progress.cc
new file mode 100644
index 00000000..561da118
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/progress.cc
@@ -0,0 +1,445 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/*
+ - ydb layer test of progress report on commit, abort.
+ - test1:
+ create two txns
+ perform operations (inserts and deletes)
+ commit or abort inner txn
+ if abort, verify progress callback was called with correct args
+ if commit, verify progress callback was not called
+ commit or abort outer txn
+ verify progress callback was called with correct args
+
+ Note: inner loop ends with commit, so when outer loop completes,
+ it should be called for all operations performed by inner loop.
+
+ perform_ops {
+ for i = 0 -> 5 {
+ for j = 0 -> 1023
+ if (j & 0x20) insert
+ else op_delete
+ }
+
+ verify (n) {
+ verify that callback was called n times with correct args
+ }
+
+ test1:
+ for c0 = 0, 1 {
+ for c1 = 0, 1 {
+ begin txn0
+ perform_ops (txn0)
+ begin txn1
+ perform ops (tnx1)
+ if c1
+ abort txn1
+ verify (n)
+ else
+ commit txn1
+ verify (0)
+ }
+ if c0
+ abort txn0
+ verify (2n)
+ else
+ commit txn0
+ verify (2n)
+ }
+
+
+ - test2
+ - create empty dictionary
+ - begin txn
+ - lock empty dictionary (full range lock)
+ - abort
+ - verify that callback was called twice, first with stalled-on-checkpoint true, then with stalled-on-checkpoint false
+
+
+*/
+
+
+#define DICT_0 "dict_0.db"
+static DB_ENV *env = NULL;
+static DB_TXN *txn_parent = NULL;
+static DB_TXN *txn_child = NULL;
+static DB_TXN *txn_hold_dname_lock = NULL;
+static DB *db;
+static const char *dname = DICT_0;
+static DBT key;
+static DBT val;
+
+
+static void start_txn(void);
+static void commit_txn(int);
+static void open_db(void);
+static void close_db(void);
+static void insert(void);
+static void op_delete(void);
+static void
+
+start_env(void) {
+ assert(env==NULL);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ dname = DICT_0;
+
+ dbt_init(&key, "key", strlen("key")+1);
+ dbt_init(&val, "val", strlen("val")+1);
+
+ open_db();
+ close_db();
+}
+
+static void
+end_env(void) {
+ int r;
+ r=env->close(env, 0);
+ CKERR(r);
+ env = NULL;
+}
+
+static void
+start_txn_prevent_dname_lock(void) {
+ assert(env!=NULL);
+ assert(txn_hold_dname_lock==NULL);
+ int r;
+ r=env->txn_begin(env, 0, &txn_hold_dname_lock, 0);
+ CKERR(r);
+ DB *db2;
+
+ r = db_create(&db2, env, 0);
+ CKERR(r);
+
+ r=db2->open(db2, txn_hold_dname_lock, dname, 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db2->close(db2, 0);
+}
+
+static void nopoll(TOKU_TXN_PROGRESS UU(progress), void *UU(extra)) {
+ assert(false);
+}
+
+static void
+commit_txn_prevent_dname_lock(void) {
+ assert(env!=NULL);
+ assert(txn_hold_dname_lock!=NULL);
+ int r;
+ r = txn_hold_dname_lock->commit_with_progress(txn_hold_dname_lock, 0, nopoll, NULL);
+ CKERR(r);
+ txn_hold_dname_lock = NULL;
+}
+
+static void
+start_txn(void) {
+ assert(env!=NULL);
+ int r;
+ if (!txn_parent) {
+ r=env->txn_begin(env, 0, &txn_parent, 0);
+ }
+ else {
+ assert(!txn_child);
+ r=env->txn_begin(env, txn_parent, &txn_child, 0);
+ }
+ CKERR(r);
+}
+
+struct progress_expect {
+ int num_calls;
+ uint8_t is_commit_expected;
+ uint8_t stalled_on_checkpoint_expected;
+ uint64_t min_entries_total_expected;
+ uint64_t last_entries_processed;
+};
+
+static void poll(TOKU_TXN_PROGRESS progress, void *extra) {
+ struct progress_expect *CAST_FROM_VOIDP(info, extra);
+ info->num_calls++;
+ assert(progress->is_commit == info->is_commit_expected);
+ assert(progress->stalled_on_checkpoint == info->stalled_on_checkpoint_expected);
+ assert(progress->entries_total >= info->min_entries_total_expected);
+ assert(progress->entries_processed == 1024 + info->last_entries_processed);
+ info->last_entries_processed = progress->entries_processed;
+}
+
+//expect_number_polls is number of times polling function should be called.
+static void
+abort_txn(int expect_number_polls) {
+ assert(env!=NULL);
+ DB_TXN *txn;
+ bool child;
+ if (txn_child) {
+ txn = txn_child;
+ child = true;
+ }
+ else {
+ txn = txn_parent;
+ child = false;
+ }
+ assert(txn);
+
+ struct progress_expect extra = {
+ .num_calls = 0,
+ .is_commit_expected = 0,
+ .stalled_on_checkpoint_expected = 0,
+ .min_entries_total_expected = (uint64_t) expect_number_polls * 1024,
+ .last_entries_processed = 0
+ };
+
+ int r;
+ r=txn->abort_with_progress(txn, poll, &extra);
+ CKERR(r);
+ assert(extra.num_calls == expect_number_polls);
+ if (child)
+ txn_child = NULL;
+ else
+ txn_parent = NULL;
+}
+
+static void
+commit_txn(int expect_number_polls) {
+ assert(env!=NULL);
+ DB_TXN *txn;
+ bool child;
+ if (txn_child) {
+ txn = txn_child;
+ child = true;
+ }
+ else {
+ txn = txn_parent;
+ child = false;
+ }
+ assert(txn);
+ if (child)
+ assert(expect_number_polls == 0);
+
+ struct progress_expect extra = {
+ .num_calls = 0,
+ .is_commit_expected = 1,
+ .stalled_on_checkpoint_expected = 0,
+ .min_entries_total_expected = (uint64_t) expect_number_polls * 1024,
+ .last_entries_processed = 0
+ };
+
+ int r;
+ r=txn->commit_with_progress(txn, 0, poll, &extra);
+ CKERR(r);
+ assert(extra.num_calls == expect_number_polls);
+ if (child)
+ txn_child = NULL;
+ else
+ txn_parent = NULL;
+}
+
+static void
+open_db(void) {
+ assert(env!=NULL);
+ assert(db == NULL);
+
+ int r;
+
+ r = db_create(&db, env, 0);
+ CKERR(r);
+
+ r=db->open(db, NULL, dname, 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+}
+
+static void
+close_db(void) {
+ assert(env!=NULL);
+ assert(db != NULL);
+
+ int r;
+ r = db->close(db, 0);
+ CKERR(r);
+ db = NULL;
+}
+
+static void
+insert(void) {
+ assert(env!=NULL);
+ assert(db!=NULL);
+ DB_TXN *txn = txn_child ? txn_child : txn_parent;
+ assert(txn);
+
+ int r=db->put(db, txn,
+ &key,
+ &val,
+ 0);
+ CKERR(r);
+}
+
+static void
+op_delete(void) {
+ assert(env!=NULL);
+ assert(db!=NULL);
+ DB_TXN *txn = txn_child ? txn_child : txn_parent;
+ assert(txn);
+
+ int r=db->del(db, txn,
+ &key,
+ DB_DELETE_ANY);
+ CKERR(r);
+}
+
+static void
+perform_ops(int n) {
+ int i;
+ int j;
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < 1024; j++) {
+ if (j & 0x20)
+ op_delete();
+ else
+ insert();
+ }
+ }
+}
+
+static void
+progress_test_1(int n, int commit) {
+ start_env();
+ open_db();
+ {
+ start_txn();
+ {
+ start_txn();
+ perform_ops(n);
+ abort_txn(n);
+ }
+ {
+ start_txn();
+ perform_ops(n);
+ commit_txn(0);
+ }
+ perform_ops(n);
+ if (commit)
+ commit_txn(2*n);
+ else
+ abort_txn(2*n);
+ }
+ close_db();
+ end_env();
+}
+
+static void
+abort_txn_stall_checkpoint(void) {
+ //We have disabled the norollback log fallback optimization.
+ //Checkpoint will not stall
+ assert(env!=NULL);
+ assert(txn_parent);
+ assert(!txn_child);
+
+ int r;
+ r=txn_parent->abort_with_progress(txn_parent, nopoll, NULL);
+ CKERR(r);
+ txn_parent = NULL;
+}
+
+static void
+abort_txn_nostall_checkpoint(void) {
+ assert(env!=NULL);
+ assert(txn_parent);
+ assert(!txn_child);
+
+ int r;
+ r=txn_parent->abort_with_progress(txn_parent, nopoll, NULL);
+ CKERR(r);
+ txn_parent = NULL;
+}
+
+
+static void
+lock(void) {
+ assert(env!=NULL);
+ assert(db!=NULL);
+ assert(txn_parent);
+ assert(!txn_child);
+
+ int r=db->pre_acquire_table_lock(db, txn_parent);
+ CKERR(r);
+}
+
+static void
+progress_test_2(void) {
+ start_env();
+ open_db();
+ start_txn();
+ start_txn_prevent_dname_lock();
+ lock();
+ commit_txn_prevent_dname_lock();
+ abort_txn_stall_checkpoint();
+ close_db();
+ end_env();
+}
+
+static void
+progress_test_3(void) {
+ start_env();
+ open_db();
+ start_txn();
+ lock();
+ abort_txn_nostall_checkpoint();
+ close_db();
+ end_env();
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ parse_args(argc, argv);
+ int commit;
+ for (commit = 0; commit <= 1; commit++) {
+ progress_test_1(4, commit);
+ }
+ progress_test_2();
+ progress_test_3();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/put-del-multiple-array-indexing.cc b/storage/tokudb/PerconaFT/src/tests/put-del-multiple-array-indexing.cc
new file mode 100644
index 00000000..69f533f0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/put-del-multiple-array-indexing.cc
@@ -0,0 +1,371 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that put_multiple inserts the correct rows into N dictionaries
+// verify that pu_multiple locks the correct keys for N dictionaries
+
+const int max_rows_per_primary = 9;
+
+static uint32_t
+get_total_secondary_rows(uint32_t num_primary) {
+ assert((num_primary % (max_rows_per_primary+1)) == 0);
+ return num_primary / (max_rows_per_primary+1) *
+ ( (max_rows_per_primary) * (max_rows_per_primary+1) / 2 );
+}
+
+static uint8_t
+get_num_keys(uint16_t i, uint8_t dbnum) {
+ return (i+dbnum) % (max_rows_per_primary + 1); // 0..9.. 10 choices
+}
+
+static uint16_t
+get_total_num_keys(uint16_t i, uint8_t num_dbs) {
+ uint16_t sum = 0;
+ for (uint8_t db = 0; db < num_dbs; ++db) {
+ sum += get_num_keys(i, db);
+ }
+ return sum;
+}
+
+static uint32_t
+get_key(uint16_t i, uint8_t dbnum, uint8_t which) {
+ uint32_t i32 = i;
+ uint32_t dbnum32 = dbnum;
+ uint32_t which32 = which;
+ uint32_t x = (dbnum32<<24) | (i32) | (which32<<8);
+ return x;
+}
+
+static void
+get_data(uint32_t *v, uint8_t i, uint8_t ndbs) {
+ int index = 0;
+ for (uint8_t dbnum = 0; dbnum < ndbs; dbnum++) {
+ for (uint8_t which = 0; which < get_num_keys(i, dbnum); ++which) {
+ v[index++] = get_key(i, dbnum, which);
+ }
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ (void) src_val;
+ uint8_t dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+
+ assert(dbnum > 0); // Does not get called for primary.
+ assert(dest_db != src_db);
+
+ assert(src_key->size == 2);
+ uint16_t i = *(uint16_t*)src_key->data;
+ uint8_t num_keys = get_num_keys(i, dbnum);
+
+ toku_dbt_array_resize(dest_keys, num_keys);
+ if (dest_vals) {
+ toku_dbt_array_resize(dest_vals, num_keys);
+ }
+
+ for (uint8_t which = 0; which < num_keys; ++which) {
+ DBT *dest_key = &dest_keys->dbts[which];
+
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ {
+ // Memory management
+ if (dest_key->ulen < sizeof(uint32_t)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(uint32_t));
+ dest_key->ulen = sizeof(uint32_t);
+ }
+ dest_key->size = sizeof(uint32_t);
+ }
+ *(uint32_t*)dest_key->data = get_key(i, dbnum, which);
+
+ if (dest_vals) {
+ DBT *dest_val = &dest_vals->dbts[which];
+ dest_val->flags = 0;
+ dest_val->data = nullptr;
+ dest_val->size = 0;
+ }
+ }
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_keys, NULL, src_key, src_data);
+}
+
+static void
+verify_locked(DB_ENV *env, DB *db, uint8_t dbnum, uint16_t i) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ if (dbnum == 0) {
+ DBT key; dbt_init(&key, &i, sizeof i);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); CKERR2(r, DB_LOCK_NOTGRANTED);
+ } else {
+ for (uint8_t which = 0; which < get_num_keys(i, dbnum); ++which) {
+ uint32_t k = get_key(i, dbnum, which);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); CKERR2(r, DB_LOCK_NOTGRANTED);
+ }
+ }
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_seq_primary(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ assert(dbnum==0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ uint16_t k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == i);
+
+ uint32_t total_rows = get_total_num_keys(i, ndbs);
+ assert(val.size == total_rows * sizeof (uint32_t));
+ uint32_t v[total_rows]; get_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ }
+ assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_seq(DB_ENV *env, DB *db, uint8_t dbnum, uint8_t ndbs, uint16_t nrows_primary) {
+ assert(dbnum > 0);
+ assert(dbnum < ndbs);
+ uint32_t nrows = get_total_secondary_rows(nrows_primary);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ uint16_t rows_found = 0;
+ uint16_t source_i = 0;
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ for (source_i = 0; source_i < nrows_primary; ++source_i) {
+ uint8_t num_keys = get_num_keys(source_i, dbnum);
+ for (uint8_t which = 0; which < num_keys; ++which) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR(r);
+ uint32_t k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == get_key(source_i, dbnum, which));
+ assert(val.size == 0);
+ rows_found++;
+ }
+ }
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR2(r, DB_NOTFOUND);
+ assert(rows_found == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ verify_seq_primary(env, db[0], 0, ndbs, nrows);
+ for (int dbnum = 1; dbnum < ndbs; dbnum++)
+ verify_seq(env, db[dbnum], dbnum, ndbs, nrows);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_del(DB_ENV *env, DB *db[], int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_empty(env, db[dbnum]);
+}
+
+static void
+populate(DB_ENV *env, DB *db[], uint8_t ndbs, uint16_t nrows, bool del) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBT_ARRAY key_arrays[ndbs];
+ DBT_ARRAY val_arrays[ndbs];
+ for (uint8_t i = 0; i < ndbs; ++i) {
+ toku_dbt_array_init(&key_arrays[i], 1);
+ toku_dbt_array_init(&val_arrays[i], 1);
+ }
+ // populate
+ for (uint16_t i = 0; i < nrows; i++) {
+ uint32_t total_rows = get_total_num_keys(i, ndbs);
+ uint16_t k = i;
+ uint32_t v[total_rows]; get_data(v, i, ndbs);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ DBT pri_val; dbt_init(&pri_val, &v[0], sizeof v);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ if (del) {
+ r = env->del_multiple(env, db[0], txn, &pri_key, &pri_val, ndbs, db, key_arrays, flags);
+ } else {
+ r = env->put_multiple(env, db[0], txn, &pri_key, &pri_val, ndbs, db, key_arrays, val_arrays, flags);
+ }
+ assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_locked(env, db[dbnum], dbnum, i);
+ }
+ for (uint8_t i = 0; i < ndbs; ++i) {
+ toku_dbt_array_destroy(&key_arrays[i]);
+ toku_dbt_array_destroy(&val_arrays[i]);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (uint8_t dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ populate(env, db, ndbs, nrows, false);
+
+ verify(env, db, ndbs, nrows);
+
+ populate(env, db, ndbs, nrows, true);
+
+ verify_del(env, db, ndbs);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 16;
+ int nrows = 100;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+ //rows should be divisible by max_rows + 1 (so that we have an equal number of each type and we know the total)
+ if (nrows % (max_rows_per_primary+1) != 0) {
+ nrows += (max_rows_per_primary+1) - (nrows % (max_rows_per_primary+1));
+ }
+ assert(ndbs >= 0);
+ assert(ndbs < (1<<8) - 1);
+ assert(nrows >= 0);
+ assert(nrows < (1<<15)); // Leave plenty of room
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/queries_with_deletes.cc b/storage/tokudb/PerconaFT/src/tests/queries_with_deletes.cc
new file mode 100644
index 00000000..7e9f8af9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/queries_with_deletes.cc
@@ -0,0 +1,196 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test verifies that queries that have a provisional delete at the end of a basement node work.
+// The issue is that when we read off the end of a basement node, the next basement node may not be available memory, so we
+// need to release the ydb lock and try again. This test verifies that this scenario works by having many deletes
+// and a small cachetable.
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ // set a cachetable size of 10K
+ uint32_t cachesize = 100*1024;
+ // as part of #4503, arbitrarily increasing sizze of cachetable
+ // the idea is to make it small enough such that all data
+ // cannot fit in the cachetable, but big enough such that
+ // we don't have cachet pressure
+ r = env->set_cachesize(env, 0, 4*cachesize, 1); CKERR(r);
+ r = env->set_lg_bsize(env, 4096); CKERR(r);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ {
+ DB_TXN *txna;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 4096);
+ CKERR(r);
+ r = db->set_readpagesize(db, 1024);
+ CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ r = txna->commit(txna, 0); CKERR(r);
+ }
+ if (verbose) printf("starting insertion of even elements\n");
+ //
+ // now insert a bunch of elements
+ //
+ DB_TXN* txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ for (uint32_t i = 0; i < cachesize; i++) {
+ DBT key,val;
+ uint64_t key_data = 2*i;
+ uint64_t val_data = 4*i;
+ r = db->put(
+ db,
+ txn,
+ dbt_init(&key, &key_data, sizeof(key_data)),
+ dbt_init(&val, &val_data, sizeof(val_data)),
+ 0
+ );
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // this transaction will read all even keys inserted above
+ DB_TXN* txn_first = NULL;
+ r = env->txn_begin(env, NULL, &txn_first, DB_TXN_SNAPSHOT);
+ CKERR(r);
+
+ if (verbose) printf("starting insertion of odd elements and deletion of even elements\n");
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ for (uint32_t i = 0; i < cachesize; i++) {
+ //
+ // insert odd values, and delete even values
+ //
+ DBT key,val;
+ uint64_t key_data = 2*i+1;
+ uint64_t val_data = 4*i+2;
+ dbt_init(&key, &key_data, sizeof(key_data));
+ dbt_init(&val, &val_data, sizeof(val_data));
+ r = db->put(
+ db,
+ txn,
+ &key,
+ &val,
+ 0
+ );
+ CKERR(r);
+
+ key_data = 2*i;
+ r = db->del(db, txn, &key, DB_DELETE_ANY);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // this transaction will read all odd keys inserted in the second round
+ DB_TXN* txn_second = NULL;
+ r = env->txn_begin(env, NULL, &txn_second, DB_TXN_SNAPSHOT);
+ CKERR(r);
+
+ DBC* cursor_first = NULL;
+ DBC* cursor_second = NULL;
+ r = db->cursor(db, txn_first, &cursor_first, 0);
+ CKERR(r);
+ r = db->cursor(db, txn_second, &cursor_second, 0);
+ CKERR(r);
+
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ if (verbose) printf("starting cursor first query\n");
+ // now let's do the cursor reads and verify that all the data is read properly
+ for (uint32_t i = 0; i < cachesize; i++) {
+ r = cursor_first->c_get(cursor_first, &key, &val, DB_NEXT);
+ CKERR(r);
+ assert(key.size == 8);
+ assert(val.size == 8);
+ assert(*(uint64_t *)key.data == 2*i);
+ assert(*(uint64_t *)val.data == 4*i);
+ }
+ r = cursor_first->c_get(cursor_first, &key, &val, DB_NEXT);
+ CKERR2(r, DB_NOTFOUND);
+
+ if (verbose) printf("starting cursor second query\n");
+ // now let's do the cursor reads and verify that all the data is read properly
+ for (uint32_t i = 0; i < cachesize; i++) {
+ r = cursor_second->c_get(cursor_second, &key, &val, DB_NEXT);
+ CKERR(r);
+ assert(key.size == 8);
+ assert(val.size == 8);
+ assert(*(uint64_t *)key.data == 2*i+1);
+ assert(*(uint64_t *)val.data == 4*i+2);
+ }
+ r = cursor_second->c_get(cursor_second, &key, &val, DB_NEXT);
+ CKERR2(r, DB_NOTFOUND);
+
+ if (verbose) printf("cleaning up\n");
+
+ r = cursor_first->c_close(cursor_first);
+ CKERR(r);
+ r = cursor_second->c_close(cursor_second);
+ CKERR(r);
+
+ r = txn_first->commit(txn_first,0);
+ CKERR(r);
+ r = txn_second->commit(txn_second,0);
+ CKERR(r);
+
+ r = db->close(db, 0);
+ CKERR(r);
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-2483.cc b/storage/tokudb/PerconaFT/src/tests/recover-2483.cc
new file mode 100644
index 00000000..5e556fe6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-2483.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the table lock log entry is handled
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+DB_ENV *env;
+DB_TXN *tid;
+DB *db;
+DBT key,data;
+int i;
+enum {N=10000};
+char *keys[N];
+char *vals[N];
+
+static void
+do_x1_shutdown (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ DB_TXN *oldest;
+ r=env->txn_begin(env, 0, &oldest, 0);
+ CKERR(r);
+ }
+
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ {
+ DB_LOADER *loader;
+ DB *dbs[1] = {db};
+ uint32_t db_flags[1] = {DB_NOOVERWRITE};
+ uint32_t dbt_flags[1] = {0};
+ uint32_t loader_flags = 0;
+
+ r = env->create_loader(env, tid, &loader, NULL, 1, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, NULL, NULL);
+ CKERR(r);
+ // close the loader
+ r = loader->close(loader);
+ CKERR(r);
+ }
+ for (i=0; i<N; i++) {
+ r=db->put(db, tid, dbt_init(&key, keys[i], strlen(keys[i])+1), dbt_init(&data, vals[i], strlen(vals[i])+1), 0); assert(r==0);
+ if (i%500==499) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+
+ //leave db open (prevent local checkpoint)
+
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ for (i=0; i<N; i++) {
+ r=db->get(db, tid, dbt_init(&key, keys[i], 1+strlen(keys[i])), dbt_init_malloc(&data), 0); assert(r==0);
+ assert(strcmp((char*)data.data, vals[i])==0);
+ toku_free(data.data);
+ data.data=0;
+ if (i%500==499) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ toku_free(data.data);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+bool do_commit=false, do_recover_committed=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ srandom(0xDEADBEEF);
+ for (i=0; i<N; i++) {
+ char ks[100]; snprintf(ks, sizeof(ks), "k%09ld.%d", random(), i);
+ char vs[1000]; snprintf(vs, sizeof(vs), "v%d.%0*d", i, (int)(sizeof(vs)-100), i);
+ keys[i]=toku_strdup(ks);
+ vals[i]=toku_strdup(vs);
+ }
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown();
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ }
+ for (i=0; i<N; i++) {
+ toku_free(keys[i]);
+ toku_free(vals[i]);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-3113.cc b/storage/tokudb/PerconaFT/src/tests/recover-3113.cc
new file mode 100644
index 00000000..82b11b6c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-3113.cc
@@ -0,0 +1,178 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+
+/*****************
+ * Purpose: Verify fix for 3113
+ * Bug: Rollback log is checkpointed along with other cachefiles,
+ * but system crashes before checkpoint_end is written to recovery log.
+ * When recovery runs, it uses latest rollback log, which is out of synch
+ * with recovery log. Latest version of rollback log would be correct for
+ * last checkpoint if it completed, but version of rollback log needed
+ * is for last complete checkpoint.
+ * Fix: When opening rollback log for recovery, do not use latest, but use
+ * latest that is no newer than last complete checkpoint.
+ * Test: begin txn
+ * insert
+ * commit
+ * complete checkpoint (no live txns in checkpoint)
+ * begin txn
+ * insert
+ * begin checkpoint (txn in checkpointed rollback log)
+ * crash using callback2 (just before checkpoint_end is written to disk)
+ * attempt to recover, should crash with 3113
+ */
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const char *namea="a.db";
+static void checkpoint_callback_2(void * UU(extra));
+static DB_ENV *env;
+static bool do_test=false, do_recover=false;
+
+static void
+run_test(void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ // txn_begin; insert <a,a>; txn_abort
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "a", 2);
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+
+ // checkpoint, no live txns in rollback log
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "b", 2);
+ dbt_init(&v, "b", 2);
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ }
+
+ // cause crash at next checkpoint, after xstillopen written, before checkpoint_end is written
+ db_env_set_checkpoint_callback2(checkpoint_callback_2, NULL);
+
+ // checkpoint, putting xstillopen in recovery log (txn is still active)
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+}
+
+
+static void checkpoint_callback_2(void * UU(extra)) {
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover (void) {
+ int r;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+
+}
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char * cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+
+
+int
+test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+
+ if (do_test)
+ run_test();
+ else if (do_recover)
+ run_recover();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-5146.cc b/storage/tokudb/PerconaFT/src/tests/recover-5146.cc
new file mode 100644
index 00000000..49137976
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-5146.cc
@@ -0,0 +1,180 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+
+/*****************
+ * Purpose: Verify fix for 3113
+ * Bug: Rollback log is checkpointed along with other cachefiles,
+ * but system crashes before checkpoint_end is written to recovery log.
+ * When recovery runs, it uses latest rollback log, which is out of synch
+ * with recovery log. Latest version of rollback log would be correct for
+ * last checkpoint if it completed, but version of rollback log needed
+ * is for last complete checkpoint.
+ * Fix: When opening rollback log for recovery, do not use latest, but use
+ * latest that is no newer than last complete checkpoint.
+ * Test: begin txn
+ * insert
+ * commit
+ * complete checkpoint (no live txns in checkpoint)
+ * begin txn
+ * insert
+ * begin checkpoint (txn in checkpointed rollback log)
+ * crash using callback2 (just before checkpoint_end is written to disk)
+ * attempt to recover, should crash with 3113
+ */
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const char *namea="a.db";
+static DB_ENV *env;
+static bool do_test=false, do_recover=false;
+
+static void checkpoint_callback_2(void * UU(extra)) {
+ toku_hard_crash_on_purpose();
+}
+
+
+static void
+run_test(void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "a", 2);
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ gid[0]=42;
+ r = txn->prepare(txn, gid, 0); CKERR(r);
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ DB_TXN *txn2;
+ {
+ r = env->txn_begin(env, NULL, &txn2, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "b", 2);
+ dbt_init(&v, "b", 2);
+ r = db->put(db, txn2, &k, &v, 0); CKERR(r);
+ }
+
+ // cause crash at next checkpoint, after xstillopen written, before checkpoint_end is written
+ db_env_set_checkpoint_callback2(checkpoint_callback_2, NULL);
+
+ // checkpoint, putting xstillopen in recovery log (txn is still active)
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+}
+
+static void run_recover (void) {
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ // recover the prepared transaction and commit it
+ DB_PREPLIST l[1];
+ long count=-1;
+ CKERR(env->txn_recover(env, l, 1, &count, DB_FIRST));
+ printf("%s:%d count=%ld\n", __FILE__, __LINE__, count);
+ assert(count==1);
+ assert(l[0].gid[0]==42);
+ r = l->txn->commit(l->txn, 0);
+ CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+
+}
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char * cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+
+
+int
+test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+
+ if (do_test)
+ run_test();
+ else if (do_recover)
+ run_recover();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc b/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc
new file mode 100644
index 00000000..1cd2e1db
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc
@@ -0,0 +1,165 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// fcreate, fdelete, fcreate after a checkpoint
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *db;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // create
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ // delete
+ r = env->dbremove(env, NULL, namea, NULL, 0); CKERR(r);
+
+ // create
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ uint32_t dbflags;
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = db->get_flags(db, &dbflags); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char *const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover_only();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-abort.cc
new file mode 100644
index 00000000..b3833005
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-abort.cc
@@ -0,0 +1,249 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test verifies that db creation after a checkpoint works for nodup and dupsort dictionaries
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ uint32_t dbflags;
+ dbflags = 0;
+ r = dba->get_flags(dba, &dbflags); CKERR(r);
+ assert(dbflags == 0);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ dbflags = 0;
+ r = dbb->get_flags(dbb, &dbflags); CKERR(r);
+ assert(dbflags == 0);
+
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void run_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+}
+
+const char *cmd;
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0 || strcmp(argv[0], "--test") == 0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_commit) {
+ run_test(true, false);
+ } else if (do_abort) {
+ run_test(false, true);
+ } else if (do_recover_committed) {
+ run_recover(true);
+ } else if (do_recover_aborted) {
+ run_recover(false);
+ } else if (do_recover_only) {
+ run_recover_only();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-commit.cc b/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-commit.cc
new file mode 100644
index 00000000..95da1d82
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-checkpoint-fopen-commit.cc
@@ -0,0 +1,249 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test verifies that db creation after a checkpoint works for nodup and dupsort dictionaries
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ uint32_t dbflags;
+ dbflags = 0;
+ r = dba->get_flags(dba, &dbflags); CKERR(r);
+ assert(dbflags == 0);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ dbflags = 0;
+ r = dbb->get_flags(dbb, &dbflags); CKERR(r);
+ assert(dbflags == 0);
+
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void run_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+}
+
+const char *cmd;
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_commit) {
+ run_test(true, false);
+ } else if (do_abort) {
+ run_test(false, true);
+ } else if (do_recover_committed) {
+ run_recover(true);
+ } else if (do_recover_aborted) {
+ run_recover(false);
+ } else if (do_recover_only) {
+ run_recover_only();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-child-rollback.cc b/storage/tokudb/PerconaFT/src/tests/recover-child-rollback.cc
new file mode 100644
index 00000000..ea4a8294
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-child-rollback.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one (or more) thread(s) constantly updating random values, wrapped in a persistent parent transaction.
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_update_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 0; i < cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ myargs[i].do_prepare = true;
+ myargs[i].wrap_in_parent = true;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, true, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ args.num_seconds = 5;
+ //args.txn_size = 64; // 100 * 256 is more than enough to spill (4096) byte rollback nodes for parent and child.
+ //args.val_size = 512; // Large values to overflow a rollback log node fast.
+ //args.env_args.node_size = 4*1024*1024; // Large nodes to prevent spending much time
+ //args.env_args.basement_node_size = 128*1024; // Large nodes to prevent spending much time
+ args.env_args.checkpointing_period = 1;
+ parse_stress_test_args(argc, argv, &args);
+ if (args.do_test_and_crash) {
+ stress_test_main(&args);
+ }
+ if (args.do_recover) {
+ stress_recover(&args);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-compare-db-descriptor.cc b/storage/tokudb/PerconaFT/src/tests/recover-compare-db-descriptor.cc
new file mode 100644
index 00000000..249d26d2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-compare-db-descriptor.cc
@@ -0,0 +1,330 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the comparison function get a valid db object pointer
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+char descriptor_contents[] = "Spoon full of sugar";
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static int my_compare(DB *UU(db), const DBT *a, const DBT *b) {
+ assert(db);
+ assert(db->cmp_descriptor);
+ assert(db->cmp_descriptor->dbt.size == sizeof(descriptor_contents));
+ assert(memcmp(db->cmp_descriptor->dbt.data, descriptor_contents, sizeof(descriptor_contents)) == 0);
+
+ assert(a->size == b->size);
+ return memcmp(a->data, b->data, a->size);
+}
+
+static void
+change_descriptor(DB_ENV* env, DB* db) {
+ DBT descriptor;
+ dbt_init(&descriptor, descriptor_contents, sizeof(descriptor_contents));
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db->change_descriptor(db, txn_desc, &descriptor, DB_UPDATE_CMP_DESCRIPTOR); CKERR(chk_r); }
+ });
+}
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char datadir[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(datadir, 2, TOKU_TEST_FILENAME, "data"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "data"); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ change_descriptor(env, dba);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ change_descriptor(env, dbb);
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = dba->put(dba, txn, &b, &a, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ char datadir[TOKU_PATH_MAX+1];
+ toku_path_join(datadir, 2, TOKU_TEST_FILENAME, "data");
+ toku_os_recursive_delete(datadir);
+ r = toku_os_mkdir(datadir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "data"); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == 0);
+ assert(aa.size == 2 && ab.size == 2 && memcmp(aa.data, b, 2) == 0 && memcmp(ab.data, a, 2) == 0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit)
+{
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-compare-db.cc b/storage/tokudb/PerconaFT/src/tests/recover-compare-db.cc
new file mode 100644
index 00000000..02f2cbdc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-compare-db.cc
@@ -0,0 +1,306 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the comparison function get a valid db object pointer
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static int my_compare(DB *UU(db), const DBT *a, const DBT *b) {
+ assert(db);
+ assert(a->size == b->size);
+ return memcmp(a->data, b->data, a->size);
+}
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = dba->put(dba, txn, &b, &a, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == 0);
+ assert(aa.size == 2 && ab.size == 2 && memcmp(aa.data, b, 2) == 0 && memcmp(ab.data, a, 2) == 0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit)
+{
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc
new file mode 100644
index 00000000..425c12e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-abort.cc
@@ -0,0 +1,285 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of a delete multiple log entry
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+ assert(src_db == NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ abort();
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else {
+ dest_val->size = 0;
+ }
+ break;
+ case DB_DBT_REALLOC:
+ abort();
+ default:
+ abort();
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_keys, NULL, src_key, src_data);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ DBT pri_val; dbt_init(&pri_val, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbs]; memset(vals, 0, sizeof vals);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_put_multiple_test_no_array(env, NULL, txn, &pri_key, &pri_val, ndbs, db, keys, vals, flags);
+ assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_data; dbt_init(&pri_data, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_del_multiple_test_no_array(env, NULL, txn, &pri_key, &pri_data, ndbs, db, keys, flags);
+ assert_zero(r);
+ }
+
+ toku_hard_crash_on_purpose();
+}
+
+static void
+verify_seq(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == get_key(i, dbnum));
+
+ if (dbnum == 0) {
+ assert(val.size == ndbs * sizeof (int));
+ int v[ndbs]; get_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ } else
+ assert(val.size == 0);
+ }
+ assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_all(DB_ENV *env, int ndbs, int nrows) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ verify_seq(env, db, dbnum, ndbs, nrows);
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int nrows) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs, nrows);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 1;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc
new file mode 100644
index 00000000..75479cb6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple-srcdb-fdelete-all.cc
@@ -0,0 +1,285 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of a delete multiple log entry
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = NULL;
+ if (dest_vals) {
+ toku_dbt_array_resize(dest_vals, 1);
+ dest_val = &dest_vals->dbts[0];
+ }
+
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+ assert(src_db != NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ abort();
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else {
+ dest_val->size = 0;
+ }
+ break;
+ case DB_DBT_REALLOC:
+ abort();
+ default:
+ abort();
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_keys, NULL, src_key, src_data);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ DBT pri_val; dbt_init(&pri_val, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbs]; memset(vals, 0, sizeof vals);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_put_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, txn, &pri_key, &pri_val, ndbs, db, keys, vals, flags);
+ assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_data; dbt_init(&pri_data, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_del_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, txn, &pri_key, &pri_data, ndbs, db, keys, flags);
+ assert_zero(r);
+ }
+
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = env->dbremove(env, txn, dbname, NULL, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_all(DB_ENV *env, int ndbs) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ verify_empty(env, db);
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int UU(nrows)) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 1;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc
new file mode 100644
index 00000000..9f4b1cd9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-del-multiple.cc
@@ -0,0 +1,277 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of a delete multiple log entry
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = NULL;
+ if (dest_vals) {
+ toku_dbt_array_resize(dest_vals, 1);
+ dest_val = &dest_vals->dbts[0];
+ }
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+ assert(src_db == NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ abort();
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else {
+ dest_val->size = 0;
+ }
+ break;
+ case DB_DBT_REALLOC:
+ abort();
+ default:
+ abort();
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_keys, NULL, src_key, src_data);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ DBT pri_val; dbt_init(&pri_val, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbs]; memset(vals, 0, sizeof vals);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_put_multiple_test_no_array(env, NULL, txn, &pri_key, &pri_val, ndbs, db, keys, vals, flags);
+ assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_data; dbt_init(&pri_data, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_del_multiple_test_no_array(env, NULL, txn, &pri_key, &pri_data, ndbs, db, keys, flags);
+ assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_all(DB_ENV *env, int ndbs) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ verify_empty(env, db);
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int UU(nrows)) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 1;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-delboth-after-checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-delboth-after-checkpoint.cc
new file mode 100644
index 00000000..7b6d7b7c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-delboth-after-checkpoint.cc
@@ -0,0 +1,247 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test delboth commit before checkpoint
+
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+static void
+run_test (bool do_commit, bool do_abort) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ // insert (i,i) pairs
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for (int i=0; i<256; i++) {
+ unsigned char c = (unsigned char) i;
+ DBT k = {.data=&c, .size=sizeof c};
+ DBT v = {.data=&c, .size=sizeof c};
+ r = dba->put(dba, txn, &k, &v, 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // delete (128,128)
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ unsigned char c = 128;
+ DBT k = {.data=&c, .size=sizeof c};
+ r = dba->del(dba, txn, &k, 0); CKERR(r);
+ }
+
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+run_recover (bool UU(did_commit)) {
+ DB_ENV *env;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify all but (128,128) exist
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *ca;
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ int i;
+ for (i=0; ; i++) {
+ if (i == 128)
+ continue;
+ DBT k,v;
+ dbt_init(&k, NULL, 0);
+ dbt_init(&v, NULL, 0);
+ r = ca->c_get(ca, &k, &v, DB_NEXT);
+ if (r != 0)
+ break;
+ assert(k.size == 1 && v.size == 1);
+ unsigned char kk, vv;
+ memcpy(&kk, k.data, k.size);
+ memcpy(&vv, v.data, v.size);
+ assert(kk == i);
+ assert(vv == i);
+ }
+ assert(i == 256);
+
+ r = ca->c_close(ca); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dba->close(dba, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+run_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ run_test (true, false);
+ } else if (do_abort) {
+ run_test (false, false);
+ } else if (do_explicit_abort) {
+ run_test(false, true);
+ } else if (do_recover_committed) {
+ run_recover(true);
+ } else if (do_recover_aborted) {
+ run_recover(false);
+ } else if (do_recover_only) {
+ run_recover_only();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-delboth-checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-delboth-checkpoint.cc
new file mode 100644
index 00000000..8dd070a0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-delboth-checkpoint.cc
@@ -0,0 +1,247 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test delboth commit before checkpoint
+
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+static void
+run_test (bool do_commit, bool do_abort) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ // insert (i,i) pairs
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for (int i=0; i<256; i++) {
+ unsigned char c = (unsigned char) i;
+ DBT k = {.data=&c, .size=sizeof c};
+ DBT v = {.data=&c, .size=sizeof c};
+ r = dba->put(dba, txn, &k, &v, 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // delete (128,128)
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ unsigned char c = 128;
+ DBT k = {.data=&c, .size=sizeof c};
+ r = dba->del(dba, txn, &k, 0); CKERR(r);
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+run_recover (bool UU(did_commit)) {
+ DB_ENV *env;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify all but (128,128) exist
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *ca;
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ int i;
+ for (i=0; ; i++) {
+ if (i == 128)
+ continue;
+ DBT k,v;
+ dbt_init(&k, NULL, 0);
+ dbt_init(&v, NULL, 0);
+ r = ca->c_get(ca, &k, &v, DB_NEXT);
+ if (r != 0)
+ break;
+ assert(k.size == 1 && v.size == 1);
+ unsigned char kk, vv;
+ memcpy(&kk, k.data, k.size);
+ memcpy(&vv, v.data, v.size);
+ assert(kk == i);
+ assert(vv == i);
+ }
+ assert(i == 256);
+
+ r = ca->c_close(ca); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dba->close(dba, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+run_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ run_test (true, false);
+ } else if (do_abort) {
+ run_test (false, false);
+ } else if (do_explicit_abort) {
+ run_test(false, true);
+ } else if (do_recover_committed) {
+ run_recover(true);
+ } else if (do_recover_aborted) {
+ run_recover(false);
+ } else if (do_recover_only) {
+ run_recover_only();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor.cc
new file mode 100644
index 00000000..6f9241b3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor.cc
@@ -0,0 +1,185 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor10.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor10.cc
new file mode 100644
index 00000000..23b4c79c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor10.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+bool do_crash;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ if (do_crash) {
+ toku_hard_crash_on_purpose();
+ }
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+
+ do_crash = false;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback2(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ do_crash = true;
+ env->txn_checkpoint(env,0,0,0);
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor11.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor11.cc
new file mode 100644
index 00000000..0c35ece7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor11.cc
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+DB_ENV *env;
+DB_TXN* txn;
+DB_TXN* txn2;
+DB_TXN* txn3;
+DB *db;
+DB *db2;
+DB *db3;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+}
+
+
+static void run_test(void)
+{
+ txn = NULL;
+ txn2 = NULL;
+ txn3 = NULL;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback2(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor12.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor12.cc
new file mode 100644
index 00000000..36180820
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor12.cc
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+DB_ENV *env;
+DB_TXN* txn;
+DB_TXN* txn2;
+DB_TXN* txn3;
+DB *db;
+DB *db2;
+DB *db3;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+}
+
+
+static void run_test(void)
+{
+ txn = NULL;
+ txn2 = NULL;
+ txn3 = NULL;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor2.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor2.cc
new file mode 100644
index 00000000..c5120728
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor2.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+ { int chk_r = db2->close(db2,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+ { int chk_r = db3->close(db3,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor3.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor3.cc
new file mode 100644
index 00000000..2139a753
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor3.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = db2->close(db2,0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+ { int chk_r = db3->close(db3,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor4.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor4.cc
new file mode 100644
index 00000000..c5f2eaf2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor4.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor5.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor5.cc
new file mode 100644
index 00000000..d538e271
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor5.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor6.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor6.cc
new file mode 100644
index 00000000..39dfaf29
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor6.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor7.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor7.cc
new file mode 100644
index 00000000..f31c5693
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor7.cc
@@ -0,0 +1,199 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+bool do_crash;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ if (do_crash) {
+ toku_hard_crash_on_purpose();
+ }
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+
+ do_crash = false;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ do_crash = true;
+ env->txn_checkpoint(env,0,0,0);
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor8.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor8.cc
new file mode 100644
index 00000000..38058459
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor8.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+bool do_crash;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ if (do_crash) {
+ toku_hard_crash_on_purpose();
+ }
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+
+ do_crash = false;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env,0,0,0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ do_crash = true;
+ env->txn_checkpoint(env,0,0,0);
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-descriptor9.cc b/storage/tokudb/PerconaFT/src/tests/recover-descriptor9.cc
new file mode 100644
index 00000000..ffc8a02c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-descriptor9.cc
@@ -0,0 +1,199 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+bool do_crash;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ if (do_crash) {
+ toku_hard_crash_on_purpose();
+ }
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+ DB_TXN* txn;
+ DB_TXN* txn2;
+ DB_TXN* txn3;
+ DBT desc;
+
+ do_crash = false;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.size = sizeof(four_byte_desc);
+ desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback2(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, txn_2, "foo2.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn_2, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ });
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, txn_3, "foo3.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn_3, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ });
+
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn2, 0); CKERR(chk_r); }
+ { int chk_r = db2->change_descriptor(db2, txn2, &desc, 0); CKERR(chk_r); }
+ { int chk_r = txn2->abort(txn2); CKERR(chk_r); }
+
+ { int chk_r = env->txn_begin(env, NULL, &txn3, 0); CKERR(chk_r); }
+ { int chk_r = db3->change_descriptor(db3, txn3, &desc, 0); CKERR(chk_r); }
+
+ do_crash = true;
+ env->txn_checkpoint(env,0,0,0);
+}
+
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+ DB *db2;
+ DB *db3;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo2.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db3, env, 0); CKERR(chk_r); }
+ { int chk_r = db3->open(db3, NULL, "foo3.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db3);
+ { int chk_r = db3->close(db3, 0); CKERR(chk_r); }
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fassociate.cc b/storage/tokudb/PerconaFT/src/tests/recover-fassociate.cc
new file mode 100644
index 00000000..31093f13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fassociate.cc
@@ -0,0 +1,165 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure that fassociate can open nodup and dupsort dictionaries
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ uint32_t dbflags;
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = dba->get_flags(dba, &dbflags); CKERR(r);
+ assert(dbflags == 0);
+ r = dba->close(dba, 0); CKERR(r);
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = dbb->get_flags(dbb, &dbflags); CKERR(r);
+ assert(dbflags == 0);
+ r = dbb->close(dbb, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fclose-in-checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-fclose-in-checkpoint.cc
new file mode 100644
index 00000000..22b89a0f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fclose-in-checkpoint.cc
@@ -0,0 +1,157 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test fopen, checkpoint fclose
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+
+static void checkpoint_callback_closeit(void *extra) {
+ DB *db = (DB *) extra;
+ int r = db->close(db, 0); CKERR(r);
+}
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // fcreate
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ // dummy transaction
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ // fopen
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ db_env_set_checkpoint_callback(checkpoint_callback_closeit, db);
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fcreate-basementnodesize.cc b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-basementnodesize.cc
new file mode 100644
index 00000000..52063338
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-basementnodesize.cc
@@ -0,0 +1,192 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify thtat we can create the correct tree type after the db is removed
+#include <sys/stat.h>
+#include "test.h"
+
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static const char *namea="a.db"; uint32_t nodesizea = 0;
+static const char *nameb="b.db"; uint32_t nodesizeb = 32*1024;
+
+static void do_remove(DB_ENV *env, const char *filename) {
+ int r;
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, filename, strlen(filename)+1);
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname); CKERR(r);
+ if (verbose) printf("%s -> %s\n", filename, (char *) iname.data);
+ char rmpath[TOKU_PATH_MAX+1];
+ toku_path_join(rmpath, 2, TOKU_TEST_FILENAME, iname.data);
+ toku_os_recursive_delete(rmpath);
+ toku_free(iname.data);
+}
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ // create a db with the default nodesize
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->get_readpagesize(dba, &nodesizea); CKERR(r);
+ if (verbose) printf("nodesizea=%u", nodesizea);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+
+ // create a db with a small nodesize
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->set_readpagesize(dbb, nodesizeb); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // remove the inames to force recovery to recreate them
+ do_remove(env, namea);
+ do_remove(env, nameb);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ int r;
+
+ // run recovery
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify that the trees have the correct nodesizes
+ uint32_t pagesize;
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = dba->get_readpagesize(dba, &pagesize); CKERR(r);
+ if (verbose) printf("%u\n", pagesize);
+ // assert(pagesize == nodesizea);
+ r = dba->close(dba, 0); CKERR(r);
+
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = dbb->get_readpagesize(dbb, &pagesize); CKERR(r);
+ if (verbose) printf("%u\n", pagesize);
+ assert(pagesize == nodesizeb);
+ r = dbb->close(dbb, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static const char *cmd;
+
+static bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fcreate-fclose.cc b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-fclose.cc
new file mode 100644
index 00000000..1dc6f88b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-fclose.cc
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify thtat we can create the correct tree type after the db is removed
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *db;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+// r = db->close(db, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fcreate-fdelete.cc b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-fdelete.cc
new file mode 100644
index 00000000..09ebadd5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-fdelete.cc
@@ -0,0 +1,156 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify thtat we can create the correct tree type after the db is removed
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *db;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->dbremove(env, NULL, namea, NULL, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ uint32_t dbflags;
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = db->get_flags(db, &dbflags); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fcreate-nodesize.cc b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-nodesize.cc
new file mode 100644
index 00000000..8b0ac7ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-nodesize.cc
@@ -0,0 +1,193 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify thtat we can create the correct tree type after the db is removed
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static const char *namea="a.db"; uint32_t nodesizea = 0;
+static const char *nameb="b.db"; uint32_t nodesizeb = 64*1024;
+
+static void do_remove(DB_ENV *env, const char *filename) {
+ int r;
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, filename, strlen(filename)+1);
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname); CKERR(r);
+ if (verbose) printf("%s -> %s\n", filename, (char *) iname.data);
+ char rmpath[TOKU_PATH_MAX+1];
+ toku_path_join(rmpath, 2, TOKU_TEST_FILENAME, iname.data);
+ toku_os_recursive_delete(rmpath);
+ toku_free(iname.data);
+}
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ // create a db with the default nodesize
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->get_pagesize(dba, &nodesizea); CKERR(r);
+ if (verbose) printf("nodesizea=%u", nodesizea);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+
+ // create a db with a small nodesize
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->set_pagesize(dbb, nodesizeb); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // remove the inames to force recovery to recreate them
+ do_remove(env, namea);
+ do_remove(env, nameb);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ int r;
+
+ // run recovery
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify that the trees have the correct nodesizes
+ uint32_t pagesize;
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = dba->get_pagesize(dba, &pagesize); CKERR(r);
+ if (verbose) printf("%u\n", pagesize);
+ // assert(pagesize == nodesizea);
+ r = dba->close(dba, 0); CKERR(r);
+
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = dbb->get_pagesize(dbb, &pagesize); CKERR(r);
+ if (verbose) printf("%u\n", pagesize);
+ assert(pagesize == nodesizeb);
+ r = dbb->close(dbb, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static const char *cmd;
+
+static bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fcreate-xabort.cc b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-xabort.cc
new file mode 100644
index 00000000..f42a180e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fcreate-xabort.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify thtat we can create the correct tree type after the db is removed
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *db;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, namea, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR2(r, ENOENT);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt1.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt1.cc
new file mode 100644
index 00000000..28baeb74
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt1.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 1;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt10.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt10.cc
new file mode 100644
index 00000000..ef0bdad0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt10.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 10;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt2.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt2.cc
new file mode 100644
index 00000000..fc6a5fe2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt2.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 2;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt3.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt3.cc
new file mode 100644
index 00000000..79dbe3db
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt3.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 3;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt4.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt4.cc
new file mode 100644
index 00000000..9139dd26
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt4.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 4;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt5.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt5.cc
new file mode 100644
index 00000000..98cbe3a3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt5.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 5;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt6.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt6.cc
new file mode 100644
index 00000000..3aac0e8c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt6.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 6;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt7.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt7.cc
new file mode 100644
index 00000000..e60df8ca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt7.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 7;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt8.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt8.cc
new file mode 100644
index 00000000..17b809ad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt8.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 8;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-flt9.cc b/storage/tokudb/PerconaFT/src/tests/recover-flt9.cc
new file mode 100644
index 00000000..c620a413
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-flt9.cc
@@ -0,0 +1,58 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+#include "recover-test_crash_in_flusher_thread.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ state_to_crash = 9;
+ return run_recover_flt_test(argc, argv);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fopen-checkpoint-fclose.cc b/storage/tokudb/PerconaFT/src/tests/recover-fopen-checkpoint-fclose.cc
new file mode 100644
index 00000000..b0865449
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fopen-checkpoint-fclose.cc
@@ -0,0 +1,153 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test fopen, checkpoint fclose
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // fcreate
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ // dummy transaction
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ // fopen
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // fclose
+ r = db->close(db, 0); CKERR(r);
+
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char *const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fopen-fclose-checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-fopen-fclose-checkpoint.cc
new file mode 100644
index 00000000..595624fd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fopen-fclose-checkpoint.cc
@@ -0,0 +1,153 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test fopen, checkpoint fclose
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // fcreate
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ // dummy transaction
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ // fopen
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ // fclose
+ r = db->close(db, 0); CKERR(r);
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc b/storage/tokudb/PerconaFT/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc
new file mode 100644
index 00000000..4265adff
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that we can fcreate after fdelete with different treeflags
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void put_something(DB_ENV *env, DB *db, const char *k, const char *v) {
+ int r;
+ DBT key, val;
+ dbt_init(&key, k, strlen(k));
+ dbt_init(&val, v, strlen(v));
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->put(db, txn, &key, &val, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // fcreate
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ // dummy transaction
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ // fopen
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ // insert something
+ put_something(env, db, "a", "b");
+
+ r = db->close(db, 0); CKERR(r);
+
+ // fdelete
+ r = env->dbremove(env, NULL, namea, NULL, 0); CKERR(r);
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // fcreate with different treeflags
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ // insert something
+ put_something(env, db, "c", "d");
+
+ r = db->close(db, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ uint32_t dbflags;
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+ r = db->get_flags(db, &dbflags); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-hotindexer-simple-abort-put.cc b/storage/tokudb/PerconaFT/src/tests/recover-hotindexer-simple-abort-put.cc
new file mode 100644
index 00000000..39aebc66
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-hotindexer-simple-abort-put.cc
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ if (dest_key->flags == DB_DBT_REALLOC) {
+ toku_free(dest_key->data);
+ }
+ dest_key->flags = DB_DBT_REALLOC;
+ dest_key->data = toku_xmemdup(src_val->data, src_val->size);
+ dest_key->size = src_val->size;
+ dest_val->size = 0;
+
+ return 0;
+}
+
+int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ DB *src_db = NULL;
+ r = db_create(&src_db, env, 0); assert_zero(r);
+ r = src_db->open(src_db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *dest_db = NULL;
+ r = db_create(&dest_db, env, 0); assert_zero(r);
+ r = dest_db->open(dest_db, NULL, "1.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_TXN* index_txn = NULL;
+ r = env->txn_begin(env, NULL, &index_txn , 0); assert_zero(r);
+ DB_TXN* put_txn = NULL;
+ r = env->txn_begin(env, NULL, &put_txn , 0); assert_zero(r);
+
+ DBT key,data;
+ r = src_db->put(
+ src_db,
+ put_txn,
+ dbt_init(&key, "hello", 6),
+ dbt_init(&data, "there", 6),
+ 0
+ );
+
+ DB_INDEXER *indexer = NULL;
+ r = env->create_indexer(env, index_txn, &indexer, src_db, 1, &dest_db, NULL, 0); assert_zero(r);
+ r = indexer->build(indexer); assert_zero(r);
+ r = indexer->close(indexer); assert_zero(r);
+ r = index_txn->abort(index_txn); assert_zero(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ assert_zero(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void
+run_recover(void) {
+ DB_ENV *env;
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ }
+
+ if (do_test) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-loader-test.cc b/storage/tokudb/PerconaFT/src/tests/recover-loader-test.cc
new file mode 100644
index 00000000..09ce645a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-loader-test.cc
@@ -0,0 +1,518 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* NOTE:
+ *
+ * Someday figure out a better way to verify inames that should not be
+ * in data dir after recovery. Currently, they are just hard-coded in
+ * the new_iname_str[] array. This will break when something changes,
+ * such as the xid of the transaction that creates the loader.
+ */
+
+
+/* Purpose is to verify that when a loader crashes:
+ * - there are no temp files remaining
+ * - the loader-generated iname file is not present
+ *
+ * In the event of a crash, the verification of no temp files and
+ * no loader-generated iname file is done after recovery.
+ *
+ * Mechanism:
+ * This test is derived from loader-cleanup-test, which was derived from loader-stress-test.
+ *
+ * The outline of the test is as follows:
+ * - use loader to create table
+ * - verify presence of temp files
+ * - crash
+ * - recover
+ * - verify absence of temp files
+ * - verify absence of unwanted iname files (new inames) - how?
+ *
+ *
+ */
+
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+
+#include <sys/types.h>
+#include <dirent.h>
+
+#include "ydb-internal.h"
+
+static const int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+
+#define NUM_DBS 5
+
+static bool do_test=false, do_recover=false;
+
+static DB_ENV *env;
+static int NUM_ROWS=50000000;
+static int COMPRESS=0;
+
+enum {MAX_NAME=128};
+enum {MAGIC=311};
+
+static DBT old_inames[NUM_DBS];
+static DBT new_inames[NUM_DBS];
+
+
+static char const * const new_iname_str[NUM_DBS] = {"qo_0000_35_c_L_0.tokudb",
+ "qo_0001_35_c_L_1.tokudb",
+ "qo_0002_35_c_L_2.tokudb",
+ "qo_0003_35_c_L_3.tokudb",
+ "qo_0004_35_c_L_4.tokudb"};
+
+static const char *loader_temp_prefix = "tokuld"; // 2536
+static int count_temp(char * dirname);
+static void get_inames(DBT* inames, DB** dbs);
+static int verify_file(char const * const dirname, char const * const filename);
+static int print_dir(char * dirname);
+
+// return number of temp files
+int
+count_temp(char * dirname) {
+ int n = 0;
+
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent = readdir(dir)))
+ if ((ent->d_type == DT_REG || ent->d_type == DT_UNKNOWN) && strncmp(ent->d_name, loader_temp_prefix, 6) == 0)
+ n++;
+ closedir(dir);
+ return n;
+}
+
+// print contents of directory
+int
+print_dir(char * dirname) {
+ int n = 0;
+
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent = readdir(dir))) {
+ if (ent->d_type == DT_REG || ent->d_type == DT_UNKNOWN) {
+ n++;
+ printf("File: %s\n", ent->d_name);
+ }
+ }
+ closedir(dir);
+ return n;
+}
+
+
+
+// return non-zero if file exists
+int
+verify_file(char const * const dirname, char const * const filename) {
+ int n = 0;
+ DIR * dir = opendir(dirname);
+
+ struct dirent *ent;
+ while ((ent=readdir(dir))) {
+ if ((ent->d_type==DT_REG || ent->d_type==DT_UNKNOWN) && strcmp(ent->d_name, filename)==0) {
+ n++;
+ }
+ }
+ closedir(dir);
+ return n;
+}
+
+void
+get_inames(DBT* inames, DB** dbs) {
+ int i;
+ for (i = 0; i < NUM_DBS; i++) {
+ DBT dname;
+ char * dname_str = dbs[i]->i->dname;
+ dbt_init(&dname, dname_str, strlen(dname_str)+1);
+ dbt_init(&(inames[i]), NULL, 0);
+ inames[i].flags |= DB_DBT_MALLOC;
+ int r = env->get_iname(env, &dname, &inames[i]);
+ CKERR(r);
+ char * iname_str = (char*) (inames[i].data);
+ // if (verbose)
+ printf("dname = %s, iname = %s\n", dname_str, iname_str);
+ }
+}
+
+
+#if 0
+void print_inames(DB** dbs);
+void
+print_inames(DB** dbs) {
+ int i;
+ for (i = 0; i < NUM_DBS; i++) {
+ DBT dname;
+ DBT iname;
+ char * dname_str = dbs[i]->i->dname;
+ dbt_init(&dname, dname_str, sizeof(dname_str));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ int r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ char * iname_str = (char*)iname.data;
+ if (verbose) printf("dname = %s, iname = %s\n", dname_str, iname_str);
+ int n = verify_file(env->i->real_data_dir, iname_str);
+ assert(n == 1);
+ toku_free(iname.data);
+ }
+}
+#endif
+
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[NUM_DBS][32];
+int inv[NUM_DBS][32];
+
+
+// rotate right and left functions
+#if 0
+static inline unsigned int rotr32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+}
+#endif
+static inline unsigned int rotl32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+}
+
+static void generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<NUM_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static unsigned int twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << a[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static unsigned int generate_val(int key, int i) {
+ return rotl32((key + MAGIC), i);
+}
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ }
+ else {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = twiddle32(*(unsigned int*)src_key->data, which);
+ *new_val = generate_val(*(unsigned int*)src_key->data, which);
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+
+static void *expect_poll_void = &expect_poll_void;
+
+static int poll_function (void *UU(extra), float UU(progress)) {
+ toku_hard_crash_on_purpose();
+ return -1;
+}
+
+static void test_loader(DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[NUM_DBS];
+ uint32_t dbt_flags[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = COMPRESS; // set with -p option
+
+ int n = count_temp(env->i->real_data_dir);
+ assert(n == 0); // Must be no temp files before loader is run
+
+ if (verbose) printf("old inames:\n");
+ get_inames(old_inames, dbs);
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+ r = loader->set_error_callback(loader, NULL, NULL);
+ CKERR(r);
+ r = loader->set_poll_function(loader, poll_function, expect_poll_void);
+ CKERR(r);
+
+ printf("COMPRESS = %d\n", COMPRESS);
+ if (verbose) printf("new inames:\n");
+ get_inames(new_inames, dbs);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ for(int i=1;i<=NUM_ROWS;i++) {
+ k = i;
+ v = generate_val(i, 0);
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ if (verbose) { if((i%10000) == 0){printf("."); fflush(stdout);} }
+ }
+ if( verbose) {printf("\n"); fflush(stdout);}
+
+ printf("Data dir is %s\n", env->i->real_data_dir);
+ n = count_temp(env->i->real_data_dir);
+ printf("Num temp files = %d\n", n);
+ assert(n); // test is useless unless at least one temp file is created
+ if (verbose) {
+ printf("Contents of data dir:\n");
+ print_dir(env->i->real_data_dir);
+ }
+ printf("closing, will crash\n"); fflush(stdout);
+ r = loader->close(loader);
+ printf("Should never return from loader->close()\n"); fflush(stdout);
+ assert(0);
+
+}
+
+
+static void run_test(void)
+{
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+// int envflags = DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ //Disable auto-checkpointing
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[NUM_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ test_loader(dbs);
+ printf("Should never return from test_loader\n"); fflush(stdout);
+ assert(0);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+
+
+static void run_recover (void) {
+ int i;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ int r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // now verify contents of data_dir, should be no temp files, no loader-created iname files
+ if (verbose)
+ print_dir(env->i->real_data_dir);
+
+ int n = count_temp(env->i->real_data_dir);
+ printf("Num temp files = %d\n", n);
+ assert(n==0); // There should be no temp files remaining after recovery
+
+ for (i = 0; i < NUM_DBS; i++) {
+ char const * const iname = new_iname_str[i];
+ r = verify_file(env->i->real_data_dir, iname);
+ if (r) {
+ printf("File %s exists, but it should not\n", iname);
+ }
+ assert(r == 0);
+ if (verbose)
+ printf("File has been properly deleted: %s\n", iname);
+ }
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+
+}
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+
+ if (do_test) {
+ printf("\n\n perform test, crash\n");
+ fflush(stdout);
+ run_test();
+ }
+ else if (do_recover) {
+ printf("\n\n perform recovery\n");
+ run_recover();
+ }
+ else {
+ printf("\n\n BOGUS!\n");
+ assert(0);
+ }
+
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -d <num_dbs> -r <num_rows>\n%s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-z")==0) {
+ COMPRESS = LOADER_COMPRESS_INTERMEDIATES;
+ printf("Compressing\n");
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-lsn-filter-multiple.cc b/storage/tokudb/PerconaFT/src/tests/recover-lsn-filter-multiple.cc
new file mode 100644
index 00000000..7ea51595
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-lsn-filter-multiple.cc
@@ -0,0 +1,248 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure the LSN filtering is used during recovery of put_multiple
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+enum {num_dbs = 2};
+static DBT dest_keys[num_dbs];
+static DBT dest_vals[num_dbs];
+
+bool do_test=false, do_recover=false;
+
+static int
+put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ if (src_db) {
+ assert(src_db->descriptor);
+ assert(src_db->descriptor->dbt.size == 4);
+ assert((*(uint32_t*)src_db->descriptor->dbt.data) == 0);
+ }
+ assert(dest_db->descriptor->dbt.size == 4);
+ uint32_t which = *(uint32_t*)dest_db->descriptor->dbt.data;
+ assert(which < num_dbs);
+
+ if (dest_key->data) toku_free(dest_key->data);
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_key->data = toku_xmemdup (src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ dest_val->data = toku_xmemdup (src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // create a txn that never closes, forcing recovery to run from the beginning of the log
+ {
+ DB_TXN *oldest_living_txn;
+ r = env->txn_begin(env, NULL, &oldest_living_txn, 0); CKERR(r);
+ }
+
+ DBT descriptor;
+ uint32_t which;
+ for (which = 0; which < num_dbs; which++) {
+ dbt_init_realloc(&dest_keys[which]);
+ dbt_init_realloc(&dest_vals[which]);
+ }
+ dbt_init(&descriptor, &which, sizeof(which));
+ DB *dba;
+ DB *dbb;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 0;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dba->change_descriptor(dba, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 1;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbb->change_descriptor(dbb, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+ DB *dbs[num_dbs] = {dba, dbb};
+ uint32_t flags[num_dbs] = {0, 0};
+ // txn_begin; insert <a,a>; txn_abort
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+ r = env_put_multiple_test_no_array(env, dba, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->abort(txn); CKERR(r);
+ }
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(r);
+ dbs[1] = dbb;
+
+ // txn_begin; insert <a,b>;
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+ r = env_put_multiple_test_no_array(env, NULL, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ }
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT k, v;
+ r = cursor->c_get(cursor, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ }
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT k, v;
+ r = cursor->c_get(cursor, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-lsn-filter.cc b/storage/tokudb/PerconaFT/src/tests/recover-lsn-filter.cc
new file mode 100644
index 00000000..cbb3de61
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-lsn-filter.cc
@@ -0,0 +1,190 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure the LSN filtering is used during recovery
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // create a txn that never closes, forcing recovery to run from the beginning of the log
+ {
+ DB_TXN *oldest_living_txn;
+ r = env->txn_begin(env, NULL, &oldest_living_txn, 0); CKERR(r);
+ }
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ // txn_begin; insert <a,a>; txn_abort
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ r = txn->abort(txn); CKERR(r);
+ }
+
+ // txn_begin; insert <a,b>;
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+ r = db->put(db, txn, &k, &v, 0); CKERR(r);
+ }
+
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT k, v;
+ r = cursor->c_get(cursor, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile-2.cc b/storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile-2.cc
new file mode 100644
index 00000000..2c056094
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile-2.cc
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that DB_RUNRECOVERY is returned when there is a missing db file
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+#define NAMEA "a.db"
+const char *namea=NAMEA;
+#define NAMEB "b.db"
+#define NAMEB_HINT "b_db"
+const char *nameb=NAMEB;
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ char saveddbs[TOKU_PATH_MAX+1];
+ toku_path_join(saveddbs, 2, TOKU_TEST_FILENAME, "saveddbs");
+ toku_os_recursive_delete(saveddbs);
+ r = toku_os_mkdir(saveddbs, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ char glob[TOKU_PATH_MAX+1];
+ toku_path_join(glob, 2, TOKU_TEST_FILENAME, NAMEB_HINT "*.tokudb");
+ char cmd[2 * TOKU_PATH_MAX + sizeof("mv ")];
+ snprintf(cmd, sizeof(cmd), "mv %s %s", glob, saveddbs);
+ r = system(cmd);
+ CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", glob);
+ r = system(cmd);
+ CKERR(r);
+
+ snprintf(cmd, sizeof(cmd), "mv %s/*.tokudb %s", saveddbs, TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile.cc b/storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile.cc
new file mode 100644
index 00000000..5ecbc01a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-missing-dbfile.cc
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that DB_RUNRECOVERY is returned when there is a missing db file
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+#define NAMEA "a.db"
+const char *namea=NAMEA;
+#define NAMEB "b.db"
+const char *nameb=NAMEB;
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ }
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ char saveddbs[TOKU_PATH_MAX+1];
+ toku_path_join(saveddbs, 2, TOKU_TEST_FILENAME, "saveddbs");
+ toku_os_recursive_delete(saveddbs);
+ r = toku_os_mkdir(saveddbs, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ char glob[TOKU_PATH_MAX+1];
+ toku_path_join(glob, 2, TOKU_TEST_FILENAME, "*.tokudb");
+ char cmd[2 * TOKU_PATH_MAX + sizeof("mv ")];
+ snprintf(cmd, sizeof(cmd), "mv %s %s", glob, saveddbs);
+ r = system(cmd);
+ CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR2(r, DB_RUNRECOVERY);
+
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", glob);
+ r = system(cmd);
+ CKERR(r);
+
+ snprintf(cmd, sizeof(cmd), "mv %s/*.tokudb %s", saveddbs, TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-missing-logfile.cc b/storage/tokudb/PerconaFT/src/tests/recover-missing-logfile.cc
new file mode 100644
index 00000000..d65acc41
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-missing-logfile.cc
@@ -0,0 +1,182 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that DB_RUNRECOVERY is returned when there is a missing logfile
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void run_test (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ DB_ENV *env;
+ DB *dba;
+
+ // create logfile 0
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ // create logfile 1
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ // create logfile 2
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ }
+
+ r = txn->commit(txn, 0); CKERR(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ char savedlogs[TOKU_PATH_MAX+1];
+ toku_path_join(savedlogs, 2, TOKU_TEST_FILENAME, "savedlogs");
+ toku_os_recursive_delete(savedlogs);
+ r = toku_os_mkdir(savedlogs, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ char glob[TOKU_PATH_MAX+1];
+ toku_path_join(glob, 2, TOKU_TEST_FILENAME, "*.tokulog*");
+ char cmd[2 * TOKU_PATH_MAX + sizeof("mv ")];
+ snprintf(cmd, sizeof(cmd), "mv %s %s", glob, savedlogs);
+ r = system(cmd);
+ CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR2(r, ENOENT);
+
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", glob);
+ r = system(cmd);
+ CKERR(r);
+
+ snprintf(cmd, sizeof(cmd), "mv %s/*.tokulog* %s", savedlogs, TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc
new file mode 100644
index 00000000..da40a61f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-abort.cc
@@ -0,0 +1,257 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of a put multiple log entry
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+ assert(src_db == NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ abort();
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else {
+ dest_val->size = 0;
+ }
+ break;
+ case DB_DBT_REALLOC:
+ abort();
+ default:
+ abort();
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_keys, NULL, src_key, src_data);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ DBT pri_val; dbt_init(&pri_val, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbs]; memset(vals, 0, sizeof vals);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_put_multiple_test_no_array(env, NULL, txn, &pri_key, &pri_val, ndbs, db, keys, vals, flags);
+ assert_zero(r);
+ }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_all(DB_ENV *env, int ndbs) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ verify_empty(env, db);
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int UU(nrows)) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 1;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-all.cc b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-all.cc
new file mode 100644
index 00000000..f7c877c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-all.cc
@@ -0,0 +1,232 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure the LSN filtering is used during recovery of put_multiple
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+enum {num_dbs = 2};
+static DBT dest_keys[num_dbs];
+static DBT dest_vals[num_dbs];
+
+bool do_test=false, do_recover=false;
+
+static int
+put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+
+ assert(src_db == NULL);
+ assert(dest_db->descriptor->dbt.size == 4);
+ uint32_t which = *(uint32_t*)dest_db->descriptor->dbt.data;
+ assert(which < num_dbs);
+
+ if (dest_key->data) toku_free(dest_key->data);
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_key->data = toku_xmemdup (src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ dest_val->data = toku_xmemdup (src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // create a txn that never closes, forcing recovery to run from the beginning of the log
+ {
+ DB_TXN *oldest_living_txn;
+ r = env->txn_begin(env, NULL, &oldest_living_txn, 0); CKERR(r);
+ }
+
+ DBT descriptor;
+ uint32_t which;
+ for (which = 0; which < num_dbs; which++) {
+ dbt_init_realloc(&dest_keys[which]);
+ dbt_init_realloc(&dest_vals[which]);
+ }
+ dbt_init(&descriptor, &which, sizeof(which));
+ DB *dba;
+ DB *dbb;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 0;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dba->change_descriptor(dba, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 1;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbb->change_descriptor(dbb, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+
+ DB *dbs[num_dbs] = {dba, dbb};
+ uint32_t flags[num_dbs] = {0, 0};
+ // txn_begin; insert <a,a>; txn_abort
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+
+ r = env_put_multiple_test_no_array(env, NULL, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->abort(txn); CKERR(r);
+ }
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(r);
+ dbs[1] = dbb;
+
+ // txn_begin; insert <a,b>;
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+
+ r = env_put_multiple_test_no_array(env, NULL, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = env->dbremove(env, txn, namea, NULL, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->dbremove(env, txn, nameb, NULL, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+
+ r = env->log_flush(env, NULL); CKERR(r);
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR2(r, ENOENT);
+ r = db->close(db, 0); CKERR(r);
+ }
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR2(r, ENOENT);
+ r = db->close(db, 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-some.cc b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-some.cc
new file mode 100644
index 00000000..9441d93e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-fdelete-some.cc
@@ -0,0 +1,251 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure the LSN filtering is used during recovery of put_multiple
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+enum {num_dbs = 2};
+static DBT dest_keys[num_dbs];
+static DBT dest_vals[num_dbs];
+
+bool do_test=false, do_recover=false;
+
+static int
+put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ if (src_db) {
+ assert(src_db->descriptor);
+ assert(src_db->descriptor->dbt.size == 4);
+ assert((*(uint32_t*)src_db->descriptor->dbt.data) == 0);
+ }
+ assert(dest_db->descriptor->dbt.size == 4);
+ uint32_t which = *(uint32_t*)dest_db->descriptor->dbt.data;
+ assert(which < num_dbs);
+
+ if (dest_key->data) toku_free(dest_key->data);
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_key->data = toku_xmemdup (src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ dest_val->data = toku_xmemdup (src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // create a txn that never closes, forcing recovery to run from the beginning of the log
+ {
+ DB_TXN *oldest_living_txn;
+ r = env->txn_begin(env, NULL, &oldest_living_txn, 0); CKERR(r);
+ }
+
+ DBT descriptor;
+ uint32_t which;
+ for (which = 0; which < num_dbs; which++) {
+ dbt_init_realloc(&dest_keys[which]);
+ dbt_init_realloc(&dest_vals[which]);
+ }
+ dbt_init(&descriptor, &which, sizeof(which));
+ DB *dba;
+ DB *dbb;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 0;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dba->change_descriptor(dba, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 1;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbb->change_descriptor(dbb, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+
+ DB *dbs[num_dbs] = {dba, dbb};
+ uint32_t flags[num_dbs] = {0, 0};
+ // txn_begin; insert <a,a>; txn_abort
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+
+ r = env_put_multiple_test_no_array(env, dba, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->abort(txn); CKERR(r);
+ }
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(r);
+ dbs[1] = dbb;
+
+ // txn_begin; insert <a,b>;
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+
+ r = env_put_multiple_test_no_array(env, NULL, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->close(dbb, 0); CKERR(r);
+ r = env->dbremove(env, txn, nameb, NULL, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+
+ r = env->log_flush(env, NULL); CKERR(r);
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR2(r, ENOENT);
+ r = db->close(db, 0); CKERR(r);
+ }
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT k, v;
+ r = cursor->c_get(cursor, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST);
+ CKERR(r);
+ assert(k.size == 2);
+ assert(v.size == 2);
+ assert(memcmp(k.data, "a", 2) == 0);
+ assert(memcmp(v.data, "b", 2) == 0);
+ toku_free(k.data);
+ toku_free(v.data);
+
+ r = cursor->c_close(cursor); CKERR(r);
+
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-srcdb-fdelete-all.cc b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-srcdb-fdelete-all.cc
new file mode 100644
index 00000000..9f49e892
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple-srcdb-fdelete-all.cc
@@ -0,0 +1,233 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test makes sure the LSN filtering is used during recovery of put_multiple
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+const char *nameb="b.db";
+enum {num_dbs = 2};
+static DBT dest_keys[num_dbs];
+static DBT dest_vals[num_dbs];
+
+bool do_test=false, do_recover=false;
+
+static int
+put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ assert(src_db != NULL);
+ assert(dest_db->descriptor->dbt.size == 4);
+ uint32_t which = *(uint32_t*)dest_db->descriptor->dbt.data;
+ assert(which < num_dbs);
+
+ if (dest_key->data) toku_free(dest_key->data);
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_key->data = toku_xmemdup (src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ dest_val->data = toku_xmemdup (src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // create a txn that never closes, forcing recovery to run from the beginning of the log
+ {
+ DB_TXN *oldest_living_txn;
+ r = env->txn_begin(env, NULL, &oldest_living_txn, 0); CKERR(r);
+ }
+
+ DBT descriptor;
+ uint32_t which;
+ for (which = 0; which < num_dbs; which++) {
+ dbt_init_realloc(&dest_keys[which]);
+ dbt_init_realloc(&dest_vals[which]);
+ }
+ dbt_init(&descriptor, &which, sizeof(which));
+ DB *dba;
+ DB *dbb;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 0;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dba->change_descriptor(dba, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ which = 1;
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbb->change_descriptor(dbb, txn_desc, &descriptor, 0); CKERR(chk_r); }
+ });
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ DB *dbs[num_dbs] = {dba, dbb};
+ uint32_t flags[num_dbs] = {0, 0};
+ // txn_begin; insert <a,a>; txn_abort
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+
+ r = env_put_multiple_test_no_array(env, num_dbs > 0 ? dbs[0] : NULL, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->abort(txn); CKERR(r);
+ }
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(r);
+ dbs[1] = dbb;
+
+ // txn_begin; insert <a,b>;
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+
+ r = env_put_multiple_test_no_array(env, num_dbs > 0 ? dbs[0] : NULL, txn, &k, &v, num_dbs, dbs, dest_keys, dest_vals, flags);
+ CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = env->dbremove(env, txn, namea, NULL, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->dbremove(env, txn, nameb, NULL, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+
+ r = env->log_flush(env, NULL); CKERR(r);
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // Recovery starts from oldest_living_txn, which is older than any inserts done in run_test,
+ // so recovery always runs over the entire log.
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR2(r, ENOENT);
+ r = db->close(db, 0); CKERR(r);
+ }
+ {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, nameb, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR2(r, ENOENT);
+ r = db->close(db, 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-put-multiple.cc b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple.cc
new file mode 100644
index 00000000..d5612867
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-put-multiple.cc
@@ -0,0 +1,277 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of a put multiple log entry
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+ assert(src_db == NULL);
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else
+ dest_val->size = 0;
+ break;
+ case DB_DBT_REALLOC:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = toku_realloc(dest_val->data, dest_val->size);
+ memcpy(dest_val->data, src_val->data, dest_val->size);
+ } else
+ dest_val->size = 0;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT pri_key; dbt_init(&pri_key, &k, sizeof k);
+ DBT pri_val; dbt_init(&pri_val, &v[0], sizeof v);
+ DBT keys[ndbs]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbs]; memset(vals, 0, sizeof vals);
+ uint32_t flags[ndbs]; memset(flags, 0, sizeof flags);
+ r = env_put_multiple_test_no_array(env, NULL, txn, &pri_key, &pri_val, ndbs, db, keys, vals, flags);
+ assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void
+verify_seq(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == get_key(i, dbnum));
+
+ if (dbnum == 0) {
+ assert(val.size == ndbs * sizeof (int));
+ int v[ndbs]; get_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ } else
+ assert(val.size == 0);
+ }
+ assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_all(DB_ENV *env, int ndbs, int nrows) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ verify_seq(env, db, dbnum, ndbs, nrows);
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int nrows) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs, nrows);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 1;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-rollback.cc b/storage/tokudb/PerconaFT/src/tests/recover-rollback.cc
new file mode 100644
index 00000000..beb37ec3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-rollback.cc
@@ -0,0 +1,209 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test dirty upgrade.
+// Generate a rollback log that requires recovery.
+
+#include "test.h"
+
+// Insert max_rows key/val pairs into the db
+static void do_inserts(DB_TXN *txn, DB *db, uint64_t max_rows, size_t val_size) {
+ char val_data[val_size]; memset(val_data, 0, val_size);
+ int r;
+
+ for (uint64_t i = 0; i < max_rows; i++) {
+ // pick a sequential key but it does not matter for this test.
+ uint64_t k[2] = {
+ htonl(i), random64(),
+ };
+
+ DBT key = { .data = k, .size = sizeof k };
+ DBT val = { .data = val_data, .size = (uint32_t) val_size };
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+}
+
+static void run_test(uint64_t num_rows, size_t val_size, bool do_crash) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_cachesize(env, 8, 0, 1);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ do_inserts(txn, db, num_rows, val_size);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ if (do_crash)
+ assert(0); // crash on purpose
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+static void do_verify(DB_ENV *env, DB *db, uint64_t num_rows, size_t val_size UU()) {
+ int r;
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ DBC *c = nullptr;
+ r = db->cursor(db, txn, &c, 0);
+ CKERR(r);
+
+ uint64_t i = 0;
+ while (1) {
+ DBT key = {};
+ DBT val = {};
+ r = c->c_get(c, &key, &val, DB_NEXT);
+ if (r == DB_NOTFOUND)
+ break;
+ CKERR(r);
+ assert(key.size == 16);
+ uint64_t k[2];
+ memcpy(k, key.data, key.size);
+ assert(htonl(k[0]) == i);
+ assert(val.size == val_size);
+ i++;
+ }
+ assert(i == num_rows);
+
+ r = c->c_close(c);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+}
+
+static void run_recover(uint64_t num_rows, size_t val_size) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_cachesize(env, 8, 0, 1);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ do_verify(env, db, num_rows, val_size);
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ bool do_crash = true;
+ uint64_t num_rows = 1;
+ size_t val_size = 1;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(argv[i], "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(argv[i], "--crash") == 0 && i+1 < argc) {
+ do_crash = atoi(argv[++i]);
+ continue;
+ }
+ }
+ if (do_test) {
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ run_test(num_rows, val_size, do_crash);
+ }
+ if (do_recover) {
+ run_recover(num_rows, val_size);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-rollinclude.cc b/storage/tokudb/PerconaFT/src/tests/recover-rollinclude.cc
new file mode 100644
index 00000000..6a847af7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-rollinclude.cc
@@ -0,0 +1,221 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Create a rollback log with a rollinclude log entry, crash after the txn commits and before the last checkpoint.
+// Recovery crashes 7.1.0, should succeed.
+
+#include "test.h"
+
+// Insert max_rows key/val pairs into the db
+
+// We want to force a rollinclude so we use a child transaction and insert enough rows so that it spills.
+// It spills at about 144K and 289K rows.
+static void do_inserts(DB_ENV *env, DB *db, uint64_t max_rows, size_t val_size) {
+ char val_data[val_size]; memset(val_data, 0, val_size);
+ int r;
+ DB_TXN *parent = nullptr;
+ r = env->txn_begin(env, nullptr, &parent, 0);
+ CKERR(r);
+
+ DB_TXN *child = nullptr;
+ r = env->txn_begin(env, parent, &child, 0);
+ CKERR(r);
+
+ for (uint64_t i = 0; i < max_rows; i++) {
+ // pick a sequential key but it does not matter for this test.
+ uint64_t k[2] = {
+ htonl(i), random64(),
+ };
+
+ DBT key = { .data = k, .size = sizeof k };
+ DBT val = { .data = val_data, .size = (uint32_t) val_size };
+ r = db->put(db, child, &key, &val, 0);
+ CKERR(r);
+
+ if (i == max_rows-1) {
+ r = child->commit(child, 0);
+ CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ }
+ }
+
+ r = parent->commit(parent, 0);
+ CKERR(r);
+}
+
+static void run_test(uint64_t num_rows, size_t val_size, bool do_crash) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_cachesize(env, 8, 0, 1);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ do_inserts(env, db, num_rows, val_size);
+
+ if (do_crash)
+ assert(0); // crash on purpose
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+static void do_verify(DB_ENV *env, DB *db, uint64_t num_rows, size_t val_size UU()) {
+ int r;
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ DBC *c = nullptr;
+ r = db->cursor(db, txn, &c, 0);
+ CKERR(r);
+
+ uint64_t i = 0;
+ while (1) {
+ DBT key = {};
+ DBT val = {};
+ r = c->c_get(c, &key, &val, DB_NEXT);
+ if (r == DB_NOTFOUND)
+ break;
+ CKERR(r);
+ assert(key.size == 16);
+ uint64_t k[2];
+ memcpy(k, key.data, key.size);
+ assert(htonl(k[0]) == i);
+ assert(val.size == val_size);
+ i++;
+ }
+ assert(i == num_rows);
+
+ r = c->c_close(c);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+}
+
+static void run_recover(uint64_t num_rows, size_t val_size) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_cachesize(env, 8, 0, 1);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ do_verify(env, db, num_rows, val_size);
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ bool do_crash = true;
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(argv[i], "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(argv[i], "--crash") == 0 && i+1 < argc) {
+ do_crash = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ uint64_t num_rows = 300000;
+ size_t val_size = 1;
+
+ if (do_test) {
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ run_test(num_rows, val_size, do_crash);
+ }
+ if (do_recover) {
+ run_recover(num_rows, val_size);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-split-checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-split-checkpoint.cc
new file mode 100644
index 00000000..b53e6752
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-split-checkpoint.cc
@@ -0,0 +1,198 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// force a checkpoint to span multiple tokulog files. in other words, the begin checkpoint log entry and the
+// end checkpoint log entry for the same checkpoint are in different log files.
+
+#include <sys/stat.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static void test_checkpoint_callback(void *extra) {
+ int r;
+ DB_ENV *env = (DB_ENV *) extra;
+
+ // create and commit a bunch of transactions. the last commit fsync's the log. since the log is
+ // really small, a new log file is created before the end checkpoint is logged.
+ int i;
+ for (i=0; i<100; i++) {
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, i == 99 ? DB_TXN_SYNC : 0); CKERR(r);
+ }
+}
+
+static void test_checkpoint_callback2(void *extra) {
+ (void) extra;
+}
+
+static void run_test (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+
+ db_env_set_checkpoint_callback(test_checkpoint_callback, env);
+ db_env_set_checkpoint_callback2(test_checkpoint_callback2, env);
+
+ r = env->set_lg_max(env, 1024); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (bool did_commit) {
+ (void) did_commit;
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void run_recover_only (void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void run_no_recover (void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+}
+
+const char *cmd;
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_commit) {
+ run_test(true, false);
+ } else if (do_abort) {
+ run_test(false, true);
+ } else if (do_recover_committed) {
+ run_recover(true);
+ } else if (do_recover_aborted) {
+ run_recover(false);
+ } else if (do_recover_only) {
+ run_recover_only();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-straddle-txn-nested.cc b/storage/tokudb/PerconaFT/src/tests/recover-straddle-txn-nested.cc
new file mode 100644
index 00000000..18818c73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-straddle-txn-nested.cc
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// TODO
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ DB_TXN *atxn;
+ r = env->txn_begin(env, NULL, &atxn, 0); CKERR(r);
+
+ // create a live transaction so that recovery needs to come here
+ DB_TXN *livetxn;
+ r = env->txn_begin(env, NULL, &livetxn, 0); CKERR(r);
+
+ DB_TXN *btxn;
+ r = env->txn_begin(env, atxn, &btxn, 0); CKERR(r);
+
+ r = btxn->commit(btxn, 0); CKERR(r);
+
+ r = atxn->commit(atxn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT k, v;
+ r = cursor->c_get(cursor, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST);
+ assert(r == DB_NOTFOUND);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-straddle-txn.cc b/storage/tokudb/PerconaFT/src/tests/recover-straddle-txn.cc
new file mode 100644
index 00000000..163724a0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-straddle-txn.cc
@@ -0,0 +1,176 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// nested txn straddles the recovery turn around point in the log
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+const char *namea="a.db";
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ DB_TXN *atxn;
+ r = env->txn_begin(env, NULL, &atxn, 0); CKERR(r);
+
+ DB_TXN *btxn;
+ r = env->txn_begin(env, atxn, &btxn, 0); CKERR(r);
+
+ // create a live transaction so that recovery needs to come here
+ DB_TXN *livetxn;
+ r = env->txn_begin(env, NULL, &livetxn, 0); CKERR(r);
+
+ DBT k,v;
+ dbt_init(&k, "a", 2);
+ dbt_init(&v, "b", 2);
+ r = db->put(db, btxn, &k, &v, 0); CKERR(r);
+ r = btxn->commit(btxn, 0); CKERR(r);
+
+ r = atxn->abort(atxn); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ // abort the process
+ toku_hard_crash_on_purpose();
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // verify the data
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, namea, NULL, DB_UNKNOWN, DB_AUTO_COMMIT, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ DBT k, v;
+ r = cursor->c_get(cursor, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST);
+ assert(r == DB_NOTFOUND);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-tablelock.cc b/storage/tokudb/PerconaFT/src/tests/recover-tablelock.cc
new file mode 100644
index 00000000..30b61689
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-tablelock.cc
@@ -0,0 +1,239 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the table lock log entry is handled
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = db->pre_acquire_table_lock(db, txn); CKERR(r);
+
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = db->put(db, txn, &a, &b, 0); CKERR(r);
+
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DBC *ca;
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ DBT aa,ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ int r;
+ DB_ENV *env;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ int r;
+ DB_ENV *env;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress-put.cc b/storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress-put.cc
new file mode 100644
index 00000000..4fccb003
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress-put.cc
@@ -0,0 +1,290 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Verify that log-suppress recovery is done properly. (See ticket 2781.)
+// TODO: determine if this is useful at all anymore (log suppression does not exist anymore)
+
+
+#include <sys/stat.h>
+#include <db.h>
+#include "test.h"
+#include "ydb-internal.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+DB_ENV *env;
+DB_TXN *parent;
+DB_TXN *child;
+
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Some loader functions to allow test to work:
+
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+
+ (void) src_db;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ }
+ else {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = 1;
+ *new_val = 2;
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// The test itself:
+
+#define MAX_DBS 1
+#define NUM_ROWS 1
+#define NUM_DBS 1
+
+// Create loader, insert row(s)
+static void
+load(DB **dbs) {
+ int r;
+ DB_TXN *txn;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = LOADER_COMPRESS_INTERMEDIATES;
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ for(int i=1;i<=NUM_ROWS;i++) {
+ k = i;
+ v = i+1;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ }
+
+}
+
+
+static void
+checkpoint_callback(void * UU(extra)){
+ printf("Deliberately crash during checkpoint\n");
+ fflush(stdout);
+ int r = env->log_flush(env, NULL); //TODO: USe a real DB_LSN* instead of NULL
+ CKERR(r);
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_shutdown (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+
+ char name[128];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ load(dbs);
+
+ // r=env->txn_begin(env, 0, &parent, 0); assert(r==0);
+ // r=env->txn_begin(env, &parent, &child, 0); assert(r==0);
+
+ // crash during checkpoint
+ db_env_set_checkpoint_callback(checkpoint_callback, NULL);
+ r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ /*****
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db->get(db, tid, dbt_init(&key, "a", 2), dbt_init_malloc(&data), 0); assert(r==0);
+ r=tid->commit(tid, 0); assert(r==0);
+ toku_free(data.data);
+ r=db->close(db, 0); CKERR(r);
+ *********/
+ r=env->close(env, 0); CKERR(r);
+}
+
+bool do_commit=false, do_recover_committed=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown();
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress.cc b/storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress.cc
new file mode 100644
index 00000000..36f14c4c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test-logsuppress.cc
@@ -0,0 +1,287 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Verify that log-suppress recovery is done properly. (See ticket 2781.)
+
+
+#include <sys/stat.h>
+#include <db.h>
+#include "test.h"
+#include "ydb-internal.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+DB_ENV *env;
+DB_TXN *parent;
+DB_TXN *child;
+
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Some loader functions to allow test to work:
+
+
+// There is no handlerton in this test, so this function is a local replacement
+// for the handlerton's generate_row_for_put().
+static int put_multiple_generate(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_keys, 1);
+ toku_dbt_array_resize(dest_vals, 1);
+ DBT *dest_key = &dest_keys->dbts[0];
+ DBT *dest_val = &dest_vals->dbts[0];
+ (void) src_db;
+ (void) src_key;
+ (void) src_val;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ assert(which != 0);
+ assert(dest_db != src_db);
+ {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = 1;
+ *new_val = 2;
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+
+// printf("dest_key.data = %d\n", *(int*)dest_key->data);
+// printf("dest_val.data = %d\n", *(int*)dest_val->data);
+
+ return 0;
+}
+
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// The test itself:
+
+#define MAX_DBS 1
+#define NUM_ROWS 1
+#define NUM_DBS 1
+
+// Create loader, insert row(s)
+static void
+load(DB **dbs) {
+ int r;
+ DB_TXN *ptxn = NULL;
+ DB_TXN *txn = NULL;
+ DB_LOADER *loader;
+ uint32_t db_flags[MAX_DBS];
+ uint32_t dbt_flags[MAX_DBS];
+ for(int i=0;i<MAX_DBS;i++) {
+ db_flags[i] = DB_NOOVERWRITE;
+ dbt_flags[i] = 0;
+ }
+ uint32_t loader_flags = 0;
+
+ // create and initialize loader
+ r = env->txn_begin(env, NULL, &ptxn, 0);
+ CKERR(r);
+ r = env->txn_begin(env, ptxn, &txn, 0);
+ CKERR(r);
+ r = env->create_loader(env, txn, &loader, dbs[0], NUM_DBS, dbs, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+
+ // using loader->put, put values into DB
+ DBT key, val;
+ unsigned int k, v;
+ for(int i=1;i<=NUM_ROWS;i++) {
+ k = i;
+ v = i+1;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ }
+
+ // close loader
+ r = loader->close(loader);
+ CKERR(r);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+}
+
+
+static void
+checkpoint_callback(void * UU(extra)){
+ printf("Deliberately crash during checkpoint\n");
+ fflush(stdout);
+ int r = env->log_flush(env, NULL); //TODO: USe a real DB_LSN* instead of NULL
+ CKERR(r);
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_shutdown (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+
+ char name[128];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ load(dbs);
+
+ // r=env->txn_begin(env, 0, &parent, 0); assert(r==0);
+ // r=env->txn_begin(env, &parent, &child, 0); assert(r==0);
+
+ // crash during checkpoint
+ db_env_set_checkpoint_callback(checkpoint_callback, NULL);
+ r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ /*****
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db->get(db, tid, dbt_init(&key, "a", 2), dbt_init_malloc(&data), 0); assert(r==0);
+ r=tid->commit(tid, 0); assert(r==0);
+ toku_free(data.data);
+ r=db->close(db, 0); CKERR(r);
+ *********/
+ r=env->close(env, 0); CKERR(r);
+}
+
+bool do_commit=false, do_recover_committed=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown();
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test1.cc b/storage/tokudb/PerconaFT/src/tests/recover-test1.cc
new file mode 100644
index 00000000..1b159e21
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test1.cc
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the table lock log entry is handled
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+DB_ENV *env;
+DB_TXN *tid;
+DB *db;
+DBT key,data;
+
+static void
+do_x1_shutdown (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ DB_TXN *oldest;
+ r=env->txn_begin(env, 0, &oldest, 0);
+ CKERR(r);
+ }
+
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->put(db, tid, dbt_init(&key, "a", 2), dbt_init(&data, "b", 2), 0); assert(r==0);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=db->close(db, 0); assert(r==0);
+
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+ char glob[TOKU_PATH_MAX+1];
+ toku_os_recursive_delete(toku_path_join(glob, 2, TOKU_TEST_FILENAME, "*.tokudb"));
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db->get(db, tid, dbt_init(&key, "a", 2), dbt_init_malloc(&data), 0); assert(r==0);
+ r=tid->commit(tid, 0); assert(r==0);
+ toku_free(data.data);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+bool do_commit=false, do_recover_committed=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown();
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test2.cc b/storage/tokudb/PerconaFT/src/tests/recover-test2.cc
new file mode 100644
index 00000000..167c8f58
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test2.cc
@@ -0,0 +1,180 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the table lock log entry is handled
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const char *namea="a.db";
+
+DB_ENV *env;
+DB_TXN *tid;
+DB *db;
+DBT key,data;
+int i;
+enum {N=1000};
+char *keys[N];
+char *vals[N];
+
+static void
+do_x1_shutdown (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ DB_TXN *oldest;
+ r=env->txn_begin(env, 0, &oldest, 0);
+ CKERR(r);
+ }
+
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ for (i=0; i<N; i++) {
+ r=db->put(db, tid, dbt_init(&key, keys[i], strlen(keys[i])+1), dbt_init(&data, vals[i], strlen(vals[i])+1), 0); assert(r==0);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=db->close(db, 0); assert(r==0);
+
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+ char glob[TOKU_PATH_MAX+1];
+ toku_os_recursive_delete(toku_path_join(glob, 2, TOKU_TEST_FILENAME, "*.tokudb"));
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ for (i=0; i<N; i++) {
+ r=db->get(db, tid, dbt_init(&key, keys[i], 1+strlen(keys[i])), dbt_init_malloc(&data), 0); assert(r==0);
+ assert(strcmp((char*)data.data, vals[i])==0);
+ toku_free(data.data);
+ data.data=0;
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ toku_free(data.data);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+bool do_commit=false, do_recover_committed=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ srandom(0xDEADBEEF);
+ for (i=0; i<N; i++) {
+ char ks[100]; snprintf(ks, sizeof(ks), "k%09ld.%d", random(), i);
+ char vs[1000]; snprintf(vs, sizeof(vs), "v%d.%0*d", i, (int)(sizeof(vs)-100), i);
+ keys[i]=toku_strdup(ks);
+ vals[i]=toku_strdup(vs);
+ }
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown();
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ }
+ for (i=0; i<N; i++) {
+ toku_free(keys[i]);
+ toku_free(vals[i]);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test3.cc b/storage/tokudb/PerconaFT/src/tests/recover-test3.cc
new file mode 100644
index 00000000..9123c6db
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test3.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the table lock log entry is handled
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const char *namea="a.db";
+
+DB_ENV *env;
+DB_TXN *tid;
+DB *db;
+DBT key,data;
+int i;
+enum {N=10000};
+char *keys[N];
+char *vals[N];
+
+static void
+do_x1_shutdown (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ DB_TXN *oldest;
+ r=env->txn_begin(env, 0, &oldest, 0);
+ CKERR(r);
+ }
+
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ for (i=0; i<N; i++) {
+ r=db->put(db, tid, dbt_init(&key, keys[i], strlen(keys[i])+1), dbt_init(&data, vals[i], strlen(vals[i])+1), 0); assert(r==0);
+ if (i%500==499) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=db->close(db, 0); assert(r==0);
+
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool UU(did_commit)) {
+ int r;
+ char glob[TOKU_PATH_MAX+1];
+ toku_os_recursive_delete(toku_path_join(glob, 2, TOKU_TEST_FILENAME, "*.tokudb"));
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ for (i=0; i<N; i++) {
+ r=db->get(db, tid, dbt_init(&key, keys[i], 1+strlen(keys[i])), dbt_init_malloc(&data), 0); assert(r==0);
+ assert(strcmp((char*)data.data, vals[i])==0);
+ toku_free(data.data);
+ data.data=0;
+ if (i%500==499) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ toku_free(data.data);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+bool do_commit=false, do_recover_committed=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ srandom(0xDEADBEEF);
+ for (i=0; i<N; i++) {
+ char ks[100]; snprintf(ks, sizeof(ks), "k%09ld.%d", random(), i);
+ char vs[1000]; snprintf(vs, sizeof(vs), "v%d.%0*d", i, (int)(sizeof(vs)-100), i);
+ keys[i]=toku_strdup(ks);
+ vals[i]=toku_strdup(vs);
+ }
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown();
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ }
+ for (i=0; i<N; i++) {
+ toku_free(keys[i]);
+ toku_free(vals[i]);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test_crash_in_flusher_thread.h b/storage/tokudb/PerconaFT/src/tests/recover-test_crash_in_flusher_thread.h
new file mode 100644
index 00000000..5c10d0cb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test_crash_in_flusher_thread.h
@@ -0,0 +1,140 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+toku_pthread_t checkpoint_tid;
+static int cnt = 0;
+static bool starting_a_chkpt = false;
+
+int state_to_crash = 0;
+
+static void *do_checkpoint_and_crash(void *arg) {
+ // first verify that checkpointed_data is correct;
+ DB_ENV* CAST_FROM_VOIDP(env, arg);
+ if (verbose) printf("starting a checkpoint\n");
+ int r = env->txn_checkpoint(env, 0, 0, 0); assert(r==0);
+ if (verbose) printf("completed a checkpoint, about to crash\n");
+ toku_hard_crash_on_purpose();
+ return arg;
+}
+
+static void flt_callback(int flt_state, void* extra) {
+ cnt++;
+ if (verbose) printf("flt_state!! %d\n", flt_state);
+ if (cnt > 0 && !starting_a_chkpt && flt_state == state_to_crash) {
+ starting_a_chkpt = true;
+ if (verbose)
+ printf("flt_state %d\n", flt_state);
+ int r = toku_pthread_create(toku_uninstrumented,
+ &checkpoint_tid,
+ nullptr,
+ do_checkpoint_and_crash,
+ extra);
+ assert(r == 0);
+ usleep(2 * 1000 * 1000);
+ }
+}
+
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 1;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ // make the guy that updates the db
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ myargs[0].operation_extra = &uoe;
+ myargs[0].operation = update_op;
+ //myargs[0].update_pad_frequency = 0;
+
+ db_env_set_flusher_thread_callback(flt_callback, env);
+ run_workers(myargs, num_threads, cli_args->num_seconds, true, cli_args);
+}
+
+static int
+run_recover_flt_test(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ // make test time arbitrarily high because we expect a crash
+ args.num_seconds = 1000000000;
+ if (state_to_crash == 1) {
+ // Getting flt_state 1 (inbox flush) requires a larger tree with more messages floating in it
+ args.num_elements = 100000;
+ args.disperse_keys = true;
+ args.key_size = 8;
+ args.val_size = 192;
+ } else {
+ args.num_elements = 2000;
+ }
+ // we want to induce a checkpoint
+ args.env_args.checkpointing_period = 0;
+ args.env_args.cachetable_size = 20 * 1024 * 1024;
+ parse_stress_test_args(argc, argv, &args);
+ if (args.do_test_and_crash) {
+ stress_test_main(&args);
+ }
+ if (args.do_recover) {
+ stress_recover(&args);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test_stress1.cc b/storage/tokudb/PerconaFT/src/tests/recover-test_stress1.cc
new file mode 100644
index 00000000..91ad596d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test_stress1.cc
@@ -0,0 +1,151 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 4; i < 4 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ myargs[i].do_prepare = true;
+ }
+
+ // make the guy that does point queries
+ for (int i = 4 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ myargs[i].do_prepare = true;
+ }
+
+ int num_seconds = random() % cli_args->num_seconds;
+ run_workers(myargs, num_threads, num_seconds, true, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ args.env_args.checkpointing_period = 1;
+ parse_stress_test_args(argc, argv, &args);
+ if (args.do_test_and_crash) {
+ stress_test_main(&args);
+ }
+ if (args.do_recover) {
+ stress_recover(&args);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test_stress2.cc b/storage/tokudb/PerconaFT/src/tests/recover-test_stress2.cc
new file mode 100644
index 00000000..006b96ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test_stress2.cc
@@ -0,0 +1,84 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_update_threads;
+ struct arg myargs[num_threads];
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 0; i < 0 + cli_args->num_update_threads; ++i) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ }
+
+
+ int num_seconds = random() % cli_args->num_seconds;
+ run_workers(myargs, num_threads, num_seconds, true, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ args.env_args.checkpointing_period = 1;
+ args.num_elements = 2000;
+ parse_stress_test_args(argc, argv, &args);
+ if (args.do_test_and_crash) {
+ stress_test_main(&args);
+ }
+ if (args.do_recover) {
+ stress_recover(&args);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test_stress3.cc b/storage/tokudb/PerconaFT/src/tests/recover-test_stress3.cc
new file mode 100644
index 00000000..066a74da
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test_stress3.cc
@@ -0,0 +1,180 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+uint64_t time_til_crash;
+uint64_t start_time;
+
+static uint64_t get_tnow(void) {
+ struct timeval tv;
+ int r = gettimeofday(&tv, NULL); assert(r == 0);
+ return tv.tv_sec * 1000000ULL + tv.tv_usec;
+}
+
+static void checkpoint_callback2(void* UU(extra)) {
+ uint64_t curr_time = get_tnow();
+ uint64_t time_diff = curr_time - start_time;
+ if ((time_diff/1000000ULL) > time_til_crash) {
+ toku_hard_crash_on_purpose();
+ }
+}
+
+static int manual_checkpoint(DB_TXN *UU(txn), ARG UU(arg), void* operation_extra, void *UU(stats_extra)) {
+ DB_ENV* CAST_FROM_VOIDP(env, operation_extra);
+ int r = env->txn_checkpoint(env,0,0,0);
+ assert_zero(r);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 5 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op;
+
+ // make something for checkpoints
+ myargs[4].operation = manual_checkpoint;
+ myargs[4].sleep_ms = 30*1000; // do checkpoints every 30 seconds
+ myargs[4].operation_extra = env;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 5; i < 5 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that does point queries
+ for (int i = 5 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ }
+
+ db_env_set_checkpoint_callback2(checkpoint_callback2, NULL);
+ time_til_crash = random() % cli_args->num_seconds;
+ start_time = get_tnow();
+ run_workers(myargs, num_threads, INT32_MAX, true, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ args.env_args.checkpointing_period = 0;
+ parse_stress_test_args(argc, argv, &args);
+ if (args.do_test_and_crash) {
+ stress_test_main(&args);
+ }
+ if (args.do_recover) {
+ stress_recover(&args);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-test_stress_openclose.cc b/storage/tokudb/PerconaFT/src/tests/recover-test_stress_openclose.cc
new file mode 100644
index 00000000..217c3e02
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-test_stress_openclose.cc
@@ -0,0 +1,63 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "stress_openclose.h"
+
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ // checkpointing is a part of the ref count, so do it often
+ args.env_args.checkpointing_period = 5;
+ // very small dbs, so verification scans are short and sweet
+ args.num_elements = 1000;
+ // it's okay for update to get DB_LOCK_NOTGRANTED, etc.
+ args.crash_on_operation_failure = false;
+
+ // set crash at end to true for the recovery version
+ // then run the test or run recovery, depending on args
+ stress_openclose_crash_at_end = true;
+ if (args.do_test_and_crash) {
+ stress_test_main(&args);
+ }
+ if (args.do_recover) {
+ stress_recover(&args);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update-multiple-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-update-multiple-abort.cc
new file mode 100644
index 00000000..67ff99b0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update-multiple-abort.cc
@@ -0,0 +1,497 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of some update multiple operations
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_num_new_keys(int i, int dbnum) {
+ if (dbnum == 0) return 1;
+ if (i & (1<<4)) {
+ dbnum++; // Shift every once in a while.
+ }
+ return (i + dbnum) % 3; // 0, 1, or 2
+}
+
+static int
+get_num_keys(int i, int dbnum) {
+ if (dbnum == 0) return 1;
+ return (i + dbnum) % 3; // 0, 1, or 2
+}
+
+static int
+get_total_secondary_rows(int num_primary) {
+ assert(num_primary % 3 == 0);
+ return num_primary / 3 * (0 + 1 + 2);
+}
+
+static int
+get_total_num_keys(int i, int num_dbs) {
+ int sum = 0;
+ for (int db = 1; db < num_dbs; ++db) {
+ sum += get_num_keys(i, db);
+ }
+ return sum;
+}
+
+static int
+get_total_num_new_keys(int i, int num_dbs) {
+ int sum = 0;
+ for (int db = 1; db < num_dbs; ++db) {
+ sum += get_num_new_keys(i, db);
+ }
+ return sum;
+}
+
+static int
+get_key(int i, int dbnum, int which) {
+ assert(i < INT16_MAX / 2);
+ assert(which >= 0);
+ assert(which < get_num_keys(i, dbnum));
+ assert(which < 4);
+ assert(dbnum < 16);
+ if (dbnum == 0) {
+ assert(which == 0);
+ return htonl((2*i) << 16);
+ } else {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1));
+ }
+}
+
+static int
+get_new_key(int i, int dbnum, int which) {
+ assert(which >= 0);
+ assert(which < get_num_new_keys(i, dbnum));
+ assert(which < 4);
+ assert(dbnum < 16);
+
+ if (dbnum == 0) {
+ assert(which == 0);
+ return htonl((2*i+1) << 16);
+ } else if ((i+dbnum+which) & (1<<5)) {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1)); // no change from original
+ } else {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1) + 1);
+ }
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ int index = 0;
+ for (int dbnum = 1; dbnum < ndbs; dbnum++) {
+ for (int which = 0; which < get_num_keys(i, dbnum); ++which) {
+ v[index++] = get_key(i, dbnum, which);
+ }
+ }
+}
+
+static void
+get_new_data(int *v, int i, int ndbs) {
+ int index = 0;
+ for (int dbnum = 1; dbnum < ndbs; dbnum++) {
+ for (int which = 0; which < get_num_new_keys(i, dbnum); ++which) {
+ v[index++] = get_new_key(i, dbnum, which);
+ if (which > 0) {
+ assert(index >= 2);
+ assert(memcmp(&v[index-2], &v[index-1], sizeof(v[0])) < 0);
+ }
+ }
+ }
+}
+
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ (void)src_val;
+ assert(src_db != dest_db);
+ assert(src_db);
+ int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum > 0);
+
+ int pri_key = *(int *) src_key->data;
+ int* pri_val = (int*) src_val->data;
+
+ bool is_new = (ntohl(pri_key) >> 16) % 2 == 1;
+ int i = (ntohl(pri_key) >> 16) / 2;
+
+ int num_keys = is_new ? get_num_new_keys(i, dbnum) : get_num_keys(i, dbnum);
+
+ toku_dbt_array_resize(dest_key_arrays, num_keys);
+
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, num_keys);
+ }
+
+ int index = 0;
+
+ for (int idb = 1; idb < dbnum; idb++) {
+ index += is_new ? get_num_new_keys(i, idb) : get_num_keys(i, idb);
+ }
+ assert(src_val->size % sizeof(int) == 0);
+ assert((int)src_val->size / 4 >= index + num_keys);
+
+ for (int which = 0; which < num_keys; which++) {
+ DBT *dest_key = &dest_key_arrays->dbts[which];
+ DBT *dest_val = NULL;
+
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(int));
+ dest_key->ulen = sizeof(int);
+ }
+ dest_key->size = sizeof(int);
+ if (dest_val_arrays) {
+ dest_val = &dest_val_arrays->dbts[which];
+ assert(dest_val->flags == DB_DBT_REALLOC);
+ dest_val->size = 0;
+ }
+ int new_key = is_new ? get_new_key(i, dbnum, which) : get_key(i, dbnum, which);
+ assert(new_key == pri_val[index + which]);
+ *(int*)dest_key->data = new_key;
+ }
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_keys, NULL, src_key, src_data);
+}
+
+static void
+update_diagonal(DB_ENV *env, DB_TXN *txn, DB *db[], int ndbs, int nrows) {
+ assert(ndbs > 0);
+ int r;
+
+ int narrays = 2 * ndbs;
+ DBT_ARRAY keys[narrays];
+ DBT_ARRAY vals[narrays];
+ for (int i = 0; i < narrays; i++) {
+ toku_dbt_array_init(&keys[i], 1);
+ toku_dbt_array_init(&vals[i], 1);
+ }
+
+ for (int i = 0; i < nrows; i++) {
+
+ // update the data i % ndbs col from x to x+1
+
+ int old_k = get_key(i, 0, 0);
+ DBT old_key; dbt_init(&old_key, &old_k, sizeof old_k);
+ int new_k = get_new_key(i, 0, 0);
+ DBT new_key; dbt_init(&new_key, &new_k, sizeof new_k);
+
+ int num_old_keys = get_total_num_keys(i, ndbs);
+ int v[num_old_keys]; get_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+
+ int num_new_keys = get_total_num_new_keys(i, ndbs);
+ int newv[num_new_keys]; get_new_data(newv, i, ndbs);
+ DBT new_data; dbt_init(&new_data, &newv[0], sizeof newv);
+
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env->update_multiple(env, db[0], txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, narrays, keys, narrays, vals);
+ assert_zero(r);
+ }
+ for (int i = 0; i < narrays; i++) {
+ toku_dbt_array_destroy(&keys[i]);
+ toku_dbt_array_destroy(&vals[i]);
+ }
+
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0, 0);
+ int secondary_keys = get_total_num_keys(i, ndbs);
+ int v[secondary_keys]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ for (int which = 0; which < get_num_keys(i, dbnum); which++) {
+ int k = get_key(i, dbnum, which);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_pri_seq(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ const int dbnum = 0;
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ int expectk = get_key(i, dbnum, 0);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == expectk);
+
+ int num_keys = get_total_num_keys(i, ndbs);
+ assert(val.size == num_keys*sizeof(int));
+ int v[num_keys]; get_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ }
+ assert(i == nrows); // if (i != nrows) printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, i, nrows); // assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_sec_seq(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ assert(dbnum > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ int rows_found = 0;
+
+ for (i = 0; ; i++) {
+ int num_keys = get_num_keys(i, dbnum);
+ for (int which = 0; which < num_keys; ++which) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) {
+ CKERR2(r, DB_NOTFOUND);
+ goto done;
+ }
+ rows_found++;
+ int k;
+ int expectk = get_key(i, dbnum, which);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ int got_i = (ntohl(k) >> 16) / 2;
+ if (got_i < i) {
+ // Will fail. Too many old i's
+ assert(k == expectk);
+ } else if (got_i > i) {
+ // Will fail. Too few in previous i.
+ assert(k == expectk);
+ }
+
+ if (k != expectk && which < get_num_new_keys(i, dbnum) && k == get_new_key(i, dbnum, which)) {
+ // Will fail, never got updated.
+ assert(k == expectk);
+ }
+ assert(k == expectk);
+ assert(val.size == 0);
+ }
+ }
+done:
+ assert(rows_found == get_total_secondary_rows(nrows));
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ // update multiple key0
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ update_diagonal(env, txn, db, ndbs, nrows);
+
+ toku_hard_crash_on_purpose();
+}
+
+static void
+verify_all(DB_ENV *env, int ndbs, int nrows) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ if (dbnum == 0) {
+ verify_pri_seq(env, db, ndbs, nrows);
+ } else {
+ verify_sec_seq(env, db, dbnum, nrows);
+ }
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int nrows) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs, nrows);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 3*(1<<5)*4;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+ while (nrows % (3*(1<<5)) != 0) {
+ nrows++;
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update-multiple.cc b/storage/tokudb/PerconaFT/src/tests/recover-update-multiple.cc
new file mode 100644
index 00000000..f2f5d09f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update-multiple.cc
@@ -0,0 +1,507 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of some update multiple operations
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static int
+get_num_new_keys(int i, int dbnum) {
+ if (dbnum == 0) return 1;
+ if (i & (1<<4)) {
+ dbnum++; // Shift every once in a while.
+ }
+ return (i + dbnum) % 3; // 0, 1, or 2
+}
+
+static int
+get_num_keys(int i, int dbnum) {
+ if (dbnum == 0) return 1;
+ return (i + dbnum) % 3; // 0, 1, or 2
+}
+
+static int
+get_total_secondary_rows(int num_primary) {
+ assert(num_primary % 3 == 0);
+ return num_primary / 3 * (0 + 1 + 2);
+}
+
+static int
+get_total_num_keys(int i, int num_dbs) {
+ int sum = 0;
+ for (int db = 1; db < num_dbs; ++db) {
+ sum += get_num_keys(i, db);
+ }
+ return sum;
+}
+
+static int
+get_total_num_new_keys(int i, int num_dbs) {
+ int sum = 0;
+ for (int db = 1; db < num_dbs; ++db) {
+ sum += get_num_new_keys(i, db);
+ }
+ return sum;
+}
+
+static int
+get_key(int i, int dbnum, int which) {
+ assert(i < INT16_MAX / 2);
+ assert(which >= 0);
+ assert(which < get_num_keys(i, dbnum));
+ assert(which < 4);
+ assert(dbnum < 16);
+ if (dbnum == 0) {
+ assert(which == 0);
+ return htonl((2*i) << 16);
+ } else {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1));
+ }
+}
+
+static int
+get_new_key(int i, int dbnum, int which) {
+ assert(which >= 0);
+ assert(which < get_num_new_keys(i, dbnum));
+ assert(which < 4);
+ assert(dbnum < 16);
+
+ if (dbnum == 0) {
+ assert(which == 0);
+ return htonl((2*i+1) << 16);
+ } else if ((i+dbnum+which) & (1<<5)) {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1)); // no change from original
+ } else {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1) + 1);
+ }
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ int index = 0;
+ for (int dbnum = 1; dbnum < ndbs; dbnum++) {
+ for (int which = 0; which < get_num_keys(i, dbnum); ++which) {
+ v[index++] = get_key(i, dbnum, which);
+ }
+ }
+}
+
+static void
+get_new_data(int *v, int i, int ndbs) {
+ int index = 0;
+ for (int dbnum = 1; dbnum < ndbs; dbnum++) {
+ for (int which = 0; which < get_num_new_keys(i, dbnum); ++which) {
+ v[index++] = get_new_key(i, dbnum, which);
+ if (which > 0) {
+ assert(index >= 2);
+ assert(memcmp(&v[index-2], &v[index-1], sizeof(v[0])) < 0);
+ }
+ }
+ }
+}
+
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ (void)src_val;
+ assert(src_db != dest_db);
+ assert(src_db);
+ int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum > 0);
+
+ int pri_key = *(int *) src_key->data;
+ int* pri_val = (int*) src_val->data;
+
+ bool is_new = (ntohl(pri_key) >> 16) % 2 == 1;
+ int i = (ntohl(pri_key) >> 16) / 2;
+
+ int num_keys = is_new ? get_num_new_keys(i, dbnum) : get_num_keys(i, dbnum);
+
+ toku_dbt_array_resize(dest_key_arrays, num_keys);
+
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, num_keys);
+ }
+
+ int index = 0;
+
+ for (int idb = 1; idb < dbnum; idb++) {
+ index += is_new ? get_num_new_keys(i, idb) : get_num_keys(i, idb);
+ }
+ assert(src_val->size % sizeof(int) == 0);
+ assert((int)src_val->size / 4 >= index + num_keys);
+
+ for (int which = 0; which < num_keys; which++) {
+ DBT *dest_key = &dest_key_arrays->dbts[which];
+ DBT *dest_val = NULL;
+
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(int));
+ dest_key->ulen = sizeof(int);
+ }
+ dest_key->size = sizeof(int);
+ if (dest_val_arrays) {
+ dest_val = &dest_val_arrays->dbts[which];
+ assert(dest_val->flags == DB_DBT_REALLOC);
+ dest_val->size = 0;
+ }
+ int new_key = is_new ? get_new_key(i, dbnum, which) : get_key(i, dbnum, which);
+ assert(new_key == pri_val[index + which]);
+ *(int*)dest_key->data = new_key;
+ }
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+static void
+update_diagonal(DB_ENV *env, DB_TXN *txn, DB *db[], int ndbs, int nrows) {
+ assert(ndbs > 0);
+ int r;
+
+ int narrays = 2 * ndbs;
+ DBT_ARRAY keys[narrays];
+ DBT_ARRAY vals[narrays];
+ for (int i = 0; i < narrays; i++) {
+ toku_dbt_array_init(&keys[i], 1);
+ toku_dbt_array_init(&vals[i], 1);
+ }
+
+ for (int i = 0; i < nrows; i++) {
+
+ // update the data i % ndbs col from x to x+1
+
+ int old_k = get_key(i, 0, 0);
+ DBT old_key; dbt_init(&old_key, &old_k, sizeof old_k);
+ int new_k = get_new_key(i, 0, 0);
+ DBT new_key; dbt_init(&new_key, &new_k, sizeof new_k);
+
+ int num_old_keys = get_total_num_keys(i, ndbs);
+ int v[num_old_keys]; get_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+
+ int num_new_keys = get_total_num_new_keys(i, ndbs);
+ int newv[num_new_keys]; get_new_data(newv, i, ndbs);
+ DBT new_data; dbt_init(&new_data, &newv[0], sizeof newv);
+
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env->update_multiple(env, db[0], txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, narrays, keys, narrays, vals);
+ assert_zero(r);
+ }
+ for (int i = 0; i < narrays; i++) {
+ toku_dbt_array_destroy(&keys[i]);
+ toku_dbt_array_destroy(&vals[i]);
+ }
+
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0, 0);
+ int secondary_keys = get_total_num_keys(i, ndbs);
+ int v[secondary_keys]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ for (int which = 0; which < get_num_keys(i, dbnum); which++) {
+ int k = get_key(i, dbnum, which);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_pri_seq(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ const int dbnum = 0;
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ int expectk = get_new_key(i, dbnum, 0);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == expectk);
+
+ int num_keys = get_total_num_new_keys(i, ndbs);
+ assert(val.size == num_keys*sizeof(int));
+ int v[num_keys]; get_new_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ }
+ assert(i == nrows); // if (i != nrows) printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, i, nrows); // assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_sec_seq(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ assert(dbnum > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ int rows_found = 0;
+
+ for (i = 0; ; i++) {
+ int num_keys = get_num_new_keys(i, dbnum);
+ for (int which = 0; which < num_keys; ++which) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) {
+ CKERR2(r, DB_NOTFOUND);
+ goto done;
+ }
+ rows_found++;
+ int k;
+ int expectk = get_new_key(i, dbnum, which);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ int got_i = (ntohl(k) >> 16) / 2;
+ if (got_i < i) {
+ // Will fail. Too many old i's
+ assert(k == expectk);
+ } else if (got_i > i) {
+ // Will fail. Too few in previous i.
+ assert(k == expectk);
+ }
+
+ if (k != expectk && which < get_num_keys(i, dbnum) && k == get_key(i, dbnum, which)) {
+ // Will fail, never got updated.
+ assert(k == expectk);
+ }
+ assert(k == expectk);
+ assert(val.size == 0);
+ }
+ }
+done:
+ assert(rows_found == get_total_secondary_rows(nrows));
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0);
+ assert_zero(r);
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert_zero(r);
+
+ // update multiple key0
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ update_diagonal(env, txn, db, ndbs, nrows);
+
+ r = txn->commit(txn, 0); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0) {
+ verify_pri_seq(env, db[0], ndbs, nrows);
+ } else {
+ verify_sec_seq(env, db[dbnum], dbnum, nrows);
+ }
+ }
+
+ toku_hard_crash_on_purpose();
+}
+
+
+static void
+verify_all(DB_ENV *env, int ndbs, int nrows) {
+ int r;
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db->open(db, NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666);
+ assert_zero(r);
+ if (dbnum == 0) {
+ verify_pri_seq(env, db, ndbs, nrows);
+ } else {
+ verify_sec_seq(env, db, dbnum, nrows);
+ }
+ r = db->close(db, 0);
+ assert_zero(r);
+ }
+}
+
+static void
+run_recover(int ndbs, int nrows) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert_zero(r);
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ verify_all(env, ndbs, nrows);
+ r = env->close(env, 0); assert_zero(r);
+}
+
+static int
+usage(void) {
+ return 1;
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ bool do_test = false;
+ bool do_recover = false;
+ int ndbs = 2;
+ int nrows = 3*(1<<5)*4;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+ while (nrows % (3*(1<<5)) != 0) {
+ nrows++;
+ }
+
+ if (do_test)
+ run_test(ndbs, nrows);
+ if (do_recover)
+ run_recover(ndbs, nrows);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_aborts.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_aborts.cc
new file mode 100644
index 00000000..8b438891
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_aborts.cc
@@ -0,0 +1,215 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i);
+ r = db->update(db, txn, keyp, extrap, 0);
+ CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_checkpoint.cc
new file mode 100644
index 00000000..47de8377
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_checkpoint.cc
@@ -0,0 +1,215 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i);
+ r = db->update(db, txn, keyp, extrap, 0);
+ CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_close.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_close.cc
new file mode 100644
index 00000000..34ddfcd2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_aborts_before_close.cc
@@ -0,0 +1,215 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i);
+ r = db->update(db, txn, keyp, extrap, 0);
+ CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts.cc
new file mode 100644
index 00000000..01ab177b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts.cc
@@ -0,0 +1,206 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts2.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts2.cc
new file mode 100644
index 00000000..2afdf2ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts2.cc
@@ -0,0 +1,208 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+DB_ENV *env;
+DB *db;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+}
+
+static void run_test(void)
+{
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback2(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(void)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts3.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts3.cc
new file mode 100644
index 00000000..32a4e03c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts3.cc
@@ -0,0 +1,208 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+DB_ENV *env;
+DB *db;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+}
+
+static void run_test(void)
+{
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(void)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc
new file mode 100644
index 00000000..f261b59d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc
@@ -0,0 +1,206 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_close.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_close.cc
new file mode 100644
index 00000000..f2d26c77
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_aborts_before_close.cc
@@ -0,0 +1,206 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_unchanged(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _v(i));
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_unchanged(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values.cc
new file mode 100644
index 00000000..90c80a9e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values.cc
@@ -0,0 +1,210 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values2.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values2.cc
new file mode 100644
index 00000000..24c8891a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values2.cc
@@ -0,0 +1,213 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0);
+ CKERR(r);
+ return r;
+}
+
+DB_ENV *env;
+DB *db;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+}
+
+
+static void run_test(void)
+{
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback2(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(void)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0);
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values3.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values3.cc
new file mode 100644
index 00000000..8a14c845
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values3.cc
@@ -0,0 +1,211 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0); CKERR(r);
+ return r;
+}
+
+DB_ENV *env;
+DB *db;
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+}
+
+
+static void run_test(void)
+{
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(void)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc
new file mode 100644
index 00000000..8705096d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc
@@ -0,0 +1,207 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0); CKERR(r);
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_close.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_close.cc
new file mode 100644
index 00000000..b38f2d61
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_broadcast_changes_values_before_close.cc
@@ -0,0 +1,207 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ v = _u(*ov, _e(*k));
+
+ if (should_update(*k)) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, 0); CKERR(r);
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values.cc
new file mode 100644
index 00000000..8faf0084
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values.cc
@@ -0,0 +1,216 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i);
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_checkpoint.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_checkpoint.cc
new file mode 100644
index 00000000..c94a47b0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_checkpoint.cc
@@ -0,0 +1,216 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i);
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_close.cc b/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_close.cc
new file mode 100644
index 00000000..09f75ef7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-update_changes_values_before_close.cc
@@ -0,0 +1,216 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify recovery of an update log entry which changes values at keys
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+static const unsigned int NUM_KEYS = 100;
+
+static inline bool should_update(const unsigned int k) { return k % 3 == 0; }
+
+static inline unsigned int _v(const unsigned int k) { return 10 - k; }
+static inline unsigned int _e(const unsigned int k) { return k + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int do_inserts(DB_TXN *txn, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i);
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void run_test(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ toku_hard_crash_on_purpose();
+}
+
+static int verify_updated(DB_ENV *env, DB *db)
+{
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn_1, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ });
+
+ return r;
+}
+
+static void run_recover(void)
+{
+ DB_ENV *env;
+ DB *db;
+
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ { int chk_r = verify_updated(env, db); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int usage(void)
+{
+ return 1;
+}
+
+int test_main(int argc, char * const argv[])
+{
+ bool do_test = false;
+ bool do_recover = false;
+
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--test") == 0) {
+ do_test = true;
+ continue;
+ }
+ if (strcmp(arg, "--recover") == 0) {
+ do_recover = true;
+ continue;
+ }
+ if (strcmp(arg, "--help") == 0) {
+ return usage();
+ }
+ }
+
+ if (do_test) {
+ run_test();
+ }
+ if (do_recover) {
+ run_recover();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor-multihandle.cc b/storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor-multihandle.cc
new file mode 100644
index 00000000..8423707e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor-multihandle.cc
@@ -0,0 +1,327 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the comparison function get a valid db object pointer
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const char *descriptor_contents[] = {
+ "Spoon full of sugar",
+ "Bucket full of pants"
+};
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+int verified = 0;
+uint32_t forced_version = 2;
+
+static int my_compare(DB *UU(db), const DBT *a, const DBT *b) {
+ assert(db);
+ assert(db->cmp_descriptor);
+ uint32_t which = forced_version-1;
+ size_t len = strlen(descriptor_contents[which])+1;
+
+ assert(db->cmp_descriptor->dbt.size == len);
+ assert(memcmp(db->cmp_descriptor->dbt.data, descriptor_contents[which], len) == 0);
+
+ assert(a->size == b->size);
+ verified = 1;
+ return memcmp(a->data, b->data, a->size);
+}
+
+static void
+change_descriptor(DB* db, int which, DB_ENV* env) {
+ DBT descriptor;
+ size_t len = strlen(descriptor_contents[which])+1;
+ dbt_init(&descriptor, descriptor_contents[which], len);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db->change_descriptor(db, txn_desc, &descriptor, DB_UPDATE_CMP_DESCRIPTOR); CKERR(chk_r); }
+ });
+}
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char datadir[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(datadir, 2, TOKU_TEST_FILENAME, "data"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "data"); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ change_descriptor(dba, 0, env);
+
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ change_descriptor(dbb, 1, env);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = dba->put(dba, txn, &b, &a, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ assert(verified);
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba;
+ int r;
+ char datadir[TOKU_PATH_MAX+1];
+ toku_path_join(datadir, 2, TOKU_TEST_FILENAME, "data");
+ toku_os_recursive_delete(datadir);
+ r = toku_os_mkdir(datadir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "data"); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == 0);
+ assert(aa.size == 2 && ab.size == 2 && memcmp(aa.data, b, 2) == 0 && memcmp(ab.data, a, 2) == 0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ assert(verified);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit)
+{
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor.cc b/storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor.cc
new file mode 100644
index 00000000..e195f95a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-upgrade-db-descriptor.cc
@@ -0,0 +1,330 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the comparison function get a valid db object pointer
+
+#include <sys/stat.h>
+#include "test.h"
+
+
+const char *descriptor_contents[] = {
+ "Spoon full of sugar",
+ "Bucket full of pants"
+};
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+
+int verified = 0;
+uint32_t forced_version = 2;
+
+static int my_compare(DB *UU(db), const DBT *a, const DBT *b) {
+ assert(db);
+ assert(db->cmp_descriptor);
+ uint32_t which = forced_version-1;
+ size_t len = strlen(descriptor_contents[which])+1;
+
+ assert(db->cmp_descriptor->dbt.size == len);
+ assert(memcmp(db->cmp_descriptor->dbt.data, descriptor_contents[which], len) == 0);
+
+ assert(a->size == b->size);
+ verified = 1;
+ return memcmp(a->data, b->data, a->size);
+}
+
+static void
+change_descriptor(DB* db, int which, DB_ENV* env) {
+ DBT descriptor;
+ size_t len = strlen(descriptor_contents[which])+1;
+ dbt_init(&descriptor, descriptor_contents[which], len);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db->change_descriptor(db, txn_desc, &descriptor, DB_UPDATE_CMP_DESCRIPTOR); CKERR(chk_r); }
+ });
+}
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ char datadir[TOKU_PATH_MAX+1];
+ r = toku_os_mkdir(toku_path_join(datadir, 2, TOKU_TEST_FILENAME, "data"), S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_ENV *env;
+ DB *dba;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "data"); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ change_descriptor(dba, 0, env);
+ r = dba->close(dba, 0); CKERR(r);
+
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ change_descriptor(dba, 1, env);
+
+
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = dba->put(dba, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ //printf("shutdown\n");
+ assert(verified);
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba;
+ int r;
+ char datadir[TOKU_PATH_MAX+1];
+ toku_path_join(datadir, 2, TOKU_TEST_FILENAME, "data");
+ toku_os_recursive_delete(datadir);
+ r = toku_os_mkdir(datadir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_data_dir(env, "data"); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == 0);
+ assert(aa.size == 2 && ab.size == 2 && memcmp(aa.data, b, 2) == 0 && memcmp(ab.data, a, 2) == 0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ assert(verified);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit)
+{
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-x1-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-x1-abort.cc
new file mode 100644
index 00000000..7a4db46a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-x1-abort.cc
@@ -0,0 +1,304 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Transaction consistency:
+ * fork a process:
+ * Open two tables, T1 and T2
+ * begin transaction
+ * store A in T1
+ * checkpoint
+ * store B in T2
+ * commit (or abort)
+ * signal to end the process abruptly
+ * wait for the process to finish
+ * open the environment doing recovery
+ * check to see if both A and B are present (or absent)
+ */
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+static void
+do_test_internal (bool commit)
+{
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+#endif
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0 || strcmp(argv[0], "--test") == 0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char *const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-x1-commit.cc b/storage/tokudb/PerconaFT/src/tests/recover-x1-commit.cc
new file mode 100644
index 00000000..237182b4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-x1-commit.cc
@@ -0,0 +1,307 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Transaction consistency:
+ * fork a process:
+ * Open two tables, T1 and T2
+ * begin transaction
+ * store A in T1
+ * checkpoint
+ * store B in T2
+ * commit (or abort)
+ * signal to end the process abruptly
+ * wait for the process to finish
+ * open the environment doing recovery
+ * check to see if both A and B are present (or absent)
+ */
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ if (do_commit) {
+ r = txn->commit(txn, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == DB_RUNRECOVERY);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit)
+{
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false, do_no_recover = false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (do_no_recover) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[])
+{
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ } else if (do_no_recover) {
+ do_x1_no_recover();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-x1-nested-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-x1-nested-abort.cc
new file mode 100644
index 00000000..82627405
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-x1-nested-abort.cc
@@ -0,0 +1,290 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Transaction consistency:
+ * fork a process:
+ * Open two tables, T1 and T2
+ * begin transaction
+ * store A in T1
+ * checkpoint
+ * store B in T2
+ * commit (or abort)
+ * signal to end the process abruptly
+ * wait for the process to finish
+ * open the environment doing recovery
+ * check to see if both A and B are present (or absent)
+ */
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn0;
+ r = env->txn_begin(env, NULL, &txn0, 0); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn0, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ r = txn->commit(txn, 0); CKERR(r);
+ if (do_commit) {
+ r = txn->commit(txn0, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn0); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit) {
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0 || strcmp(argv[0], "--test") == 0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-x1-nested-commit.cc b/storage/tokudb/PerconaFT/src/tests/recover-x1-nested-commit.cc
new file mode 100644
index 00000000..c2a01db8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-x1-nested-commit.cc
@@ -0,0 +1,291 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Transaction consistency:
+ * fork a process:
+ * Open two tables, T1 and T2
+ * begin transaction
+ * store A in T1
+ * checkpoint
+ * store B in T2
+ * commit (or abort)
+ * signal to end the process abruptly
+ * wait for the process to finish
+ * open the environment doing recovery
+ * check to see if both A and B are present (or absent)
+ */
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+
+static void
+do_x1_shutdown (bool do_commit, bool do_abort) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txn0;
+ r = env->txn_begin(env, NULL, &txn0, 0); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn0, &txn, 0); CKERR(r);
+ {
+ DBT a,b;
+ dbt_init(&a, "a", 2);
+ dbt_init(&b, "b", 2);
+ r = dba->put(dba, txn, &a, &b, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ r = dbb->put(dbb, txn, &b, &a, 0); CKERR(r);
+ }
+ //printf("opened\n");
+ r = txn->commit(txn, 0); CKERR(r);
+ txn = NULL;
+ if (do_commit) {
+ r = txn0->commit(txn0, 0); CKERR(r);
+ } else if (do_abort) {
+ r = txn->abort(txn0); CKERR(r);
+
+ // force an fsync of the log
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = txn->commit(txn, DB_TXN_SYNC); CKERR(r);
+ }
+ //printf("shutdown\n");
+ toku_hard_crash_on_purpose();
+}
+
+static void
+do_x1_recover (bool did_commit) {
+ DB_ENV *env;
+ DB *dba, *dbb;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBT aa, ab;
+ dbt_init(&aa, NULL, 0);
+ dbt_init(&ab, NULL, 0);
+ DBT ba, bb;
+ dbt_init(&ba, NULL, 0);
+ dbt_init(&bb, NULL, 0);
+ DB_TXN *txn;
+ DBC *ca,*cb;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = dba->cursor(dba, txn, &ca, 0); CKERR(r);
+ r = dbb->cursor(dbb, txn, &cb, 0); CKERR(r);
+ int ra = ca->c_get(ca, &aa, &ab, DB_FIRST); CKERR(r);
+ int rb = cb->c_get(cb, &ba, &bb, DB_FIRST); CKERR(r);
+ if (did_commit) {
+ assert(ra==0);
+ assert(rb==0);
+ // verify key-value pairs
+ assert(aa.size==2);
+ assert(ab.size==2);
+ assert(ba.size==2);
+ assert(bb.size==2);
+ const char a[2] = "a";
+ const char b[2] = "b";
+ assert(memcmp(aa.data, &a, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(ab.data, &b, 2)==0);
+ assert(memcmp(bb.data, &a, 2)==0);
+ // make sure no other entries in DB
+ assert(ca->c_get(ca, &aa, &ab, DB_NEXT) == DB_NOTFOUND);
+ assert(cb->c_get(cb, &ba, &bb, DB_NEXT) == DB_NOTFOUND);
+ fprintf(stderr, "Both verified. Yay!\n");
+ } else {
+ // It wasn't committed (it also wasn't aborted), but a checkpoint happened.
+ assert(ra==DB_NOTFOUND);
+ assert(rb==DB_NOTFOUND);
+ fprintf(stderr, "Neither present. Yay!\n");
+ }
+ r = ca->c_close(ca); CKERR(r);
+ r = cb->c_close(cb); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void
+do_x1_recover_only (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit) {
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+bool do_commit=false, do_abort=false, do_explicit_abort=false, do_recover_committed=false, do_recover_aborted=false, do_recover_only=false;
+
+static void
+x1_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0], "--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0], "--explicit-abort")==0) {
+ do_explicit_abort=true;
+ } else if (strcmp(argv[0], "--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0], "--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--commit | --abort | --explicit-abort | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_explicit_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (do_recover_only) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ x1_parse_args(argc, argv);
+ if (do_commit) {
+ do_x1_shutdown (true, false);
+ } else if (do_abort) {
+ do_x1_shutdown (false, false);
+ } else if (do_explicit_abort) {
+ do_x1_shutdown(false, true);
+ } else if (do_recover_committed) {
+ do_x1_recover(true);
+ } else if (do_recover_aborted) {
+ do_x1_recover(false);
+ } else if (do_recover_only) {
+ do_x1_recover_only();
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-x2-abort.cc b/storage/tokudb/PerconaFT/src/tests/recover-x2-abort.cc
new file mode 100644
index 00000000..51aa2814
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-x2-abort.cc
@@ -0,0 +1,267 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Transaction consistency:
+ * fork a process:
+ * Open two tables, A and B
+ * begin transaction U
+ * begin transaction V
+ * store U.A into A using U
+ * store V.B into B using V
+ * checkpoint
+ * store U.C into A using U
+ * store V.D into B using V
+ * commit U
+ * maybe commit V
+ * abort the process abruptly
+ * wait for the process to finish
+ * open the environment doing recovery
+ * check to see if both rows are present in A and maybe present in B
+ */
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void
+put (DB_TXN *txn, DB *db, const char *key, const char *data) {
+ DBT k,d;
+ dbt_init(&k, key, 1+strlen(key));
+ dbt_init(&d, data, 1+strlen(data));
+ int r = db->put(db, txn, &k, &d, 0);
+ CKERR(r);
+}
+
+static void
+do_x2_shutdown (bool do_commit) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb; // Use two DBs so that BDB doesn't get a lock conflict
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txnU, *txnV;
+ r = env->txn_begin(env, NULL, &txnU, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnV, 0); CKERR(r);
+ put(txnU, dba, "u.a", "u.a.data");
+ put(txnV, dbb, "v.b", "v.b.data");
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ put(txnU, dba, "u.c", "u.c.data");
+ put(txnV, dbb, "v.d", "v.d.data");
+ r = txnU->commit(txnU, 0); CKERR(r);
+ if (do_commit) {
+ r = txnV->commit(txnV, 0); CKERR(r);
+ }
+ toku_hard_crash_on_purpose();
+}
+
+static void
+checkcurs (DBC *curs, int cursflags, const char *key, const char *val, bool expect_it) {
+ DBT k,v;
+ dbt_init(&k, NULL, 0);
+ dbt_init(&v, NULL, 0);
+ int r = curs->c_get(curs, &k, &v, cursflags);
+ if (expect_it) {
+ assert(r==0);
+ printf("Got %s expected %s\n", (char*)k.data, key);
+ assert(strcmp((char*)k.data, key)==0);
+ assert(strcmp((char*)v.data, val)==0);
+ } else {
+ printf("Expected nothing, got r=%d\n", r);
+ assert(r!=0);
+ }
+}
+
+static void
+do_x2_recover (bool did_commit) {
+ DB_ENV *env;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBC *c;
+ r = dba->cursor(dba, txn, &c, 0); CKERR(r);
+ checkcurs(c, DB_FIRST, "u.a", "u.a.data", true);
+ checkcurs(c, DB_NEXT, "u.c", "u.c.data", true);
+ checkcurs(c, DB_NEXT, NULL, NULL, false);
+ r = c->c_close(c); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ }
+ {
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBC *c;
+ r = dbb->cursor(dbb, txn, &c, 0); CKERR(r);
+ checkcurs(c, DB_FIRST, "v.b", "v.b.data", did_commit);
+ checkcurs(c, DB_NEXT, "v.d", "v.d.data", did_commit);
+ checkcurs(c, DB_NEXT, NULL, NULL, false);
+ r = c->c_close(c); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ }
+
+ r = txn->commit(txn, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit) {
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+bool do_commit=false, do_abort=false, do_recover_committed=false, do_recover_aborted=false;
+
+static void
+x2_parse_args (int argc, char *const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0],"--abort")==0 || strcmp(argv[0], "--test") == 0) {
+ do_abort=true;
+ } else if (strcmp(argv[0],"--commit")==0) {
+ do_commit=true;
+ } else if (strcmp(argv[0],"--recover-committed")==0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0],"--recover-aborted")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--abort | --commit | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ x2_parse_args(argc, argv);
+ if (do_commit) {
+ do_x2_shutdown (true);
+ } else if (do_abort) {
+ do_x2_shutdown (false);
+ } else if (do_recover_committed) {
+ do_x2_recover(true);
+ } else if (do_recover_aborted) {
+ do_x2_recover(false);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recover-x2-commit.cc b/storage/tokudb/PerconaFT/src/tests/recover-x2-commit.cc
new file mode 100644
index 00000000..fdff0f1d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recover-x2-commit.cc
@@ -0,0 +1,267 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Transaction consistency:
+ * fork a process:
+ * Open two tables, A and B
+ * begin transaction U
+ * begin transaction V
+ * store U.A into A using U
+ * store V.B into B using V
+ * checkpoint
+ * store U.C into A using U
+ * store V.D into B using V
+ * commit U
+ * maybe commit V
+ * abort the process abruptly
+ * wait for the process to finish
+ * open the environment doing recovery
+ * check to see if both rows are present in A and maybe present in B
+ */
+#include <sys/stat.h>
+#include "test.h"
+
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+const char *namea="a.db";
+const char *nameb="b.db";
+
+static void
+put (DB_TXN *txn, DB *db, const char *key, const char *data) {
+ DBT k,d;
+ dbt_init(&k, key, 1+strlen(key));
+ dbt_init(&d, data, 1+strlen(data));
+ int r = db->put(db, txn, &k, &d, 0);
+ CKERR(r);
+}
+
+static void
+do_x2_shutdown (bool do_commit) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ DB *dba, *dbb; // Use two DBs so that BDB doesn't get a lock conflict
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dba->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DB_TXN *txnU, *txnV;
+ r = env->txn_begin(env, NULL, &txnU, 0); CKERR(r);
+ r = env->txn_begin(env, NULL, &txnV, 0); CKERR(r);
+ put(txnU, dba, "u.a", "u.a.data");
+ put(txnV, dbb, "v.b", "v.b.data");
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ put(txnU, dba, "u.c", "u.c.data");
+ put(txnV, dbb, "v.d", "v.d.data");
+ r = txnU->commit(txnU, 0); CKERR(r);
+ if (do_commit) {
+ r = txnV->commit(txnV, 0); CKERR(r);
+ }
+ toku_hard_crash_on_purpose();
+}
+
+static void
+checkcurs (DBC *curs, int cursflags, const char *key, const char *val, bool expect_it) {
+ DBT k,v;
+ dbt_init(&k, NULL, 0);
+ dbt_init(&v, NULL, 0);
+ int r = curs->c_get(curs, &k, &v, cursflags);
+ if (expect_it) {
+ assert(r==0);
+ printf("Got %s expected %s\n", (char*)k.data, key);
+ assert(strcmp((char*)k.data, key)==0);
+ assert(strcmp((char*)v.data, val)==0);
+ } else {
+ printf("Expected nothing, got r=%d\n", r);
+ assert(r!=0);
+ }
+}
+
+static void
+do_x2_recover (bool did_commit) {
+ DB_ENV *env;
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ {
+ DB *dba;
+ r = db_create(&dba, env, 0); CKERR(r);
+ r = dba->open(dba, NULL, namea, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBC *c;
+ r = dba->cursor(dba, txn, &c, 0); CKERR(r);
+ checkcurs(c, DB_FIRST, "u.a", "u.a.data", true);
+ checkcurs(c, DB_NEXT, "u.c", "u.c.data", true);
+ checkcurs(c, DB_NEXT, NULL, NULL, false);
+ r = c->c_close(c); CKERR(r);
+ r = dba->close(dba, 0); CKERR(r);
+ }
+ {
+ DB *dbb;
+ r = db_create(&dbb, env, 0); CKERR(r);
+ r = dbb->open(dbb, NULL, nameb, NULL, DB_BTREE, DB_AUTO_COMMIT|DB_CREATE, 0666); CKERR(r);
+ DBC *c;
+ r = dbb->cursor(dbb, txn, &c, 0); CKERR(r);
+ checkcurs(c, DB_FIRST, "v.b", "v.b.data", did_commit);
+ checkcurs(c, DB_NEXT, "v.d", "v.d.data", did_commit);
+ checkcurs(c, DB_NEXT, NULL, NULL, false);
+ r = c->c_close(c); CKERR(r);
+ r = dbb->close(dbb, 0); CKERR(r);
+ }
+
+ r = txn->commit(txn, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+#if 0
+
+static void
+do_test_internal (bool commit) {
+ pid_t pid;
+ if (0 == (pid=fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--commit" : "--abort", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("signaled=%d sig=%d\n", WIFSIGNALED(status), WTERMSIG(status));
+ assert(WIFSIGNALED(status) && WTERMSIG(status)==SIGABRT);
+ }
+ // Now find out what happend
+
+ if (0 == (pid = fork())) {
+ int r=execl(cmd, verbose ? "-v" : "-q", commit ? "--recover-committed" : "--recover-aborted", NULL);
+ assert(r==-1);
+ printf("execl failed: %d (%s)\n", errno, strerror(errno));
+ assert(0);
+ }
+ {
+ int r;
+ int status;
+ r = waitpid(pid, &status, 0);
+ //printf("recovery exited=%d\n", WIFEXITED(status));
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+}
+
+static void
+do_test (void) {
+ do_test_internal(true);
+ do_test_internal(false);
+}
+
+#endif
+
+bool do_commit=false, do_abort=false, do_recover_committed=false, do_recover_aborted=false;
+
+static void
+x2_parse_args (int argc, char * const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0],"--abort")==0) {
+ do_abort=true;
+ } else if (strcmp(argv[0],"--commit")==0 || strcmp(argv[0], "--test") == 0) {
+ do_commit=true;
+ } else if (strcmp(argv[0],"--recover-committed")==0 || strcmp(argv[0], "--recover") == 0) {
+ do_recover_committed=true;
+ } else if (strcmp(argv[0],"--recover-aborted")==0) {
+ do_recover_aborted=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--abort | --commit | --recover-committed | --recover-aborted } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ {
+ int n_specified=0;
+ if (do_commit) n_specified++;
+ if (do_abort) n_specified++;
+ if (do_recover_committed) n_specified++;
+ if (do_recover_aborted) n_specified++;
+ if (n_specified>1) {
+ printf("Specify only one of --commit or --abort or --recover-committed or --recover-aborted\n");
+ resultcode=1;
+ goto do_usage;
+ }
+ }
+}
+
+int
+test_main (int argc, char * const argv[]) {
+ x2_parse_args(argc, argv);
+ if (do_commit) {
+ do_x2_shutdown (true);
+ } else if (do_abort) {
+ do_x2_shutdown (false);
+ } else if (do_recover_committed) {
+ do_x2_recover(true);
+ } else if (do_recover_aborted) {
+ do_x2_recover(false);
+ }
+#if 0
+ else {
+ do_test();
+ }
+#endif
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_stress.cc b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_stress.cc
new file mode 100644
index 00000000..683952c5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_stress.cc
@@ -0,0 +1,587 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+
+static const int NUM_DICTIONARIES = 100;
+//static const int NUM_DICTIONARIES = 3;
+static const char *table = "tbl";
+static const int ROWS_PER_TABLE = 10;
+
+DB_ENV *env;
+DB** db_array;
+DB* states;
+static const int percent_do_op = 20;
+static const int percent_do_abort = 25;
+static const int start_crashing_iter = 10;
+// iterations_per_crash_in_recovery should be an odd number;
+static const int iterations_per_crash_in_recovery = 7;
+const char *state_db_name="states.db";
+
+#define CREATED 0
+#define OPEN 1
+#define CLOSED 2
+#define DELETED 3
+
+#define COMMIT_TXN 0
+#define ABORT_TXN 1
+
+static int commit_or_abort(void) {
+ int i = random() % 100;
+ int rval = ( i < percent_do_abort ) ? ABORT_TXN : COMMIT_TXN;
+ if ( verbose ) {
+ if ( rval == ABORT_TXN ) printf("%s : abort txn\n", __FILE__);
+ }
+ return rval;
+}
+
+static void put_state(int db_num, int state) {
+ int r;
+ DB_TXN* txn;
+ DBT key, val;
+ int key_data = db_num;
+ int val_data = state;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = states->put(states, txn,
+ dbt_init(&key, &key_data, sizeof(key_data)),
+ dbt_init(&val, &val_data, sizeof(val_data)),
+ 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static int get_state(int db_num) {
+ int r;
+ DBT key, val;
+
+ memset(&val, 0, sizeof(val));
+ r = states->get(states, 0,
+ dbt_init(&key, &db_num, sizeof(db_num)),
+ &val,
+ 0);
+ CKERR(r);
+ int state = *(int*)val.data;
+ return state;
+}
+
+static int crash_timer;
+static void crash_it(void);
+static void crash_it_callback_f(void*);
+static void set_crash_timer(void) {
+ crash_timer = random() % (3 * NUM_DICTIONARIES);
+}
+
+static void update_crash_timer(void) {
+ if ( --crash_timer == 0 ) {
+ // close the states table before we crash
+ int r = states->close(states, 0);
+ CKERR(r);
+ if ( verbose ) {
+ printf("%s : crash\n", __FILE__);
+ fflush(stdout);
+ }
+ crash_it();
+ }
+}
+
+static void env_startup(int recovery_flags);
+static int64_t generate_val(int64_t key);
+static void insert_n(DB *db, DB_TXN *txn, int firstkey, int n);
+static int verify_identical_dbts(const DBT *dbt1, const DBT *dbt2);
+static void verify_sequential_rows(DB* compare_db, int64_t firstkey, int64_t numkeys);
+
+static DB* do_create(char* name, int* next_state) {
+ DB* db = NULL;
+ if ( verbose ) printf("%s : do_create(%s)\n", __FILE__, name);
+ int r;
+ DB_TXN* txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, name, NULL, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ insert_n(db, txn, 0, ROWS_PER_TABLE);
+ if ( commit_or_abort() == COMMIT_TXN ) {
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ *next_state = CREATED;
+ }
+ else {
+ r = db->close(db, 0);
+ db = NULL;
+ CKERR(r);
+ r = txn->abort(txn);
+ CKERR(r);
+ db = NULL;
+ }
+ return db;
+}
+
+static DB* do_open(char* name, int* next_state) {
+ DB* db = NULL;
+ DB_TXN* txn;
+ if ( verbose ) printf("%s : do_open(%s)\n", __FILE__, name);
+ int r;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, name, NULL, DB_UNKNOWN, 0, 0666);
+ CKERR(r);
+ if ( commit_or_abort() == COMMIT_TXN ) {
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ *next_state = OPEN;
+ }
+ else {
+ r = db->close(db, 0);
+ db = NULL;
+ CKERR(r);
+ r = txn->abort(txn);
+ CKERR(r);
+ db = NULL;
+ }
+ return db;
+}
+
+static void do_close(DB* db, char* name, int* next_state) {
+ if ( verbose ) printf("%s : do_close(%s)\n", __FILE__, name);
+ if (!db) printf("db == NULL\n");
+
+ int r = db->close(db, 0);
+ CKERR(r);
+ db = NULL;
+ *next_state = CLOSED;
+}
+
+static void do_delete(char* name, int* next_state) {
+ DB_TXN* txn;
+ if ( verbose ) printf("%s : do_delete(%s)\n", __FILE__, name);
+ int r;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = env->dbremove(env, txn, name, NULL, 0);
+ CKERR(r);
+
+ if ( commit_or_abort() == COMMIT_TXN ) {
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ *next_state = DELETED;
+ }
+ else {
+ r = txn->abort(txn);
+ CKERR(r);
+ }
+}
+
+static int do_random_fileop(int i, int state) {
+ DB* db = db_array[i];
+ int rval = random() % 100;
+// if ( verbose ) printf("%s : %s : DB '%d', state '%d, rval '%d'\n", __FILE__, __FUNCTION__, i, state, rval);
+
+ int next_state = state;
+
+ char fname[100];
+ sprintf(fname, "%s%d.db", table, i);
+
+ if ( rval < percent_do_op ) {
+ switch ( state ) {
+ case CREATED:
+ do_close(db, fname, &next_state);
+ db_array[i] = db = 0;
+ if ( rval < (percent_do_op / 2) ) {
+ do_delete(fname, &next_state);
+ }
+ break;
+ case OPEN:
+ do_close(db, fname, &next_state);
+ db_array[i] = db = 0;
+ if ( rval < (percent_do_op / 2) ) {
+ do_delete(fname, &next_state);
+ }
+ break;
+ case CLOSED:
+ if ( rval < (percent_do_op / 2) ) {
+ db = do_open(fname, &next_state);
+ db_array[i] = db;
+ }
+ else {
+ do_delete(fname, &next_state);
+ }
+ break;
+ case DELETED:
+ db = do_create(fname, &next_state);
+ db_array[i] = db;
+ break;
+ }
+ }
+ return next_state;
+}
+
+static void do_random_fileops(void)
+{
+ int i, state, next_state;
+ DB_TXN *txn;
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ state = get_state(i);
+ next_state = do_random_fileop(i, state);
+ put_state(i, next_state);
+ { int chk_r = txn->commit(txn, 0); CKERR(chk_r); }
+ update_crash_timer();
+ }
+}
+
+
+static void run_test(int iter){
+ uint32_t recovery_flags = DB_INIT_LOG | DB_INIT_TXN;
+ int r, i;
+
+ XMALLOC_N(NUM_DICTIONARIES, db_array);
+ srand(iter);
+
+ if (iter == 0) {
+ // create working directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ }
+ else
+ recovery_flags += DB_RECOVER;
+
+ // crash somewhat frequently during recovery
+ // first, wait until after first crash
+ if ( iter > start_crashing_iter + 1 ) {
+ // every N cycles, crash in recovery
+ if ( (iter % iterations_per_crash_in_recovery) == 0 ) {
+ // crash at different places in recovery
+ if ( iter & 1 )
+ db_env_set_recover_callback(crash_it_callback_f, NULL);
+ else
+ db_env_set_recover_callback2(crash_it_callback_f, NULL);
+ }
+ }
+
+ env_startup(recovery_flags);
+ if ( verbose ) printf("%s : environment init\n", __FILE__);
+
+ if (iter == 0) {
+ // create a dictionary to store test state
+ r = db_create(&states, env, 0); CKERR(r);
+ r = states->open(states, NULL, state_db_name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ DB_TXN *states_txn;
+ r = env->txn_begin(env, NULL, &states_txn, 0); CKERR(r);
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ put_state(i, DELETED);
+ }
+ r = states_txn->commit(states_txn, 0); CKERR(r);
+ r = states->close(states, 0); CKERR(r);
+ if ( verbose ) printf("%s : states.db initialized\n", __FILE__);
+ }
+
+ // open the 'states' table
+ r = db_create(&states, env, 0); CKERR(r);
+ r = states->open(states, NULL, state_db_name, NULL, DB_UNKNOWN, 0, 0666); CKERR(r);
+
+ if ( verbose ) printf("%s : === ITERATION %6d ===\n", __FILE__, iter);
+
+ // verify previous results
+ if ( verbose ) printf("%s : verify previous results\n", __FILE__);
+ int state = DELETED;
+ DB* db;
+ char fname[100];
+ if ( iter > 0 ) {
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ sprintf(fname, "%s%d.db", table, i);
+ state = get_state(i);
+ switch (state) {
+ case CREATED:
+ case OPEN:
+ // open the table
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, fname, NULL, DB_UNKNOWN, 0, 0666); CKERR(r);
+ db_array[i] = db;
+ verify_sequential_rows(db, 0, ROWS_PER_TABLE);
+ // leave table open
+ if (verbose) printf("%s : verified open/created db[%d]\n", __FILE__, i);
+ break;
+ case CLOSED:
+ // open the table
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, fname, NULL, DB_UNKNOWN, 0, 0666); CKERR(r);
+ verify_sequential_rows(db, 0, ROWS_PER_TABLE);
+ // close table
+ r = db->close(db, 0); CKERR(r);
+ db_array[i] = db = NULL;
+ if (verbose) printf("%s : verified closed db[%d]\n", __FILE__, i);
+ break;
+ case DELETED:
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, fname, NULL, DB_UNKNOWN, 0, 0666);
+ if ( r == 0 ) assert(1);
+ db_array[i] = db = NULL;
+ if (verbose) printf("%s : verified db[%d] removed\n", __FILE__, i);
+ break;
+ default:
+ printf("ERROR : Unknown state '%d'\n", state);
+ return;
+ }
+ }
+ }
+ if ( verbose ) printf("%s : previous results verified\n", __FILE__);
+
+ // for each of the dictionaries, perform a fileop some percentage of time (set in do_random_fileop).
+
+ // before checkpoint #1
+ if ( verbose ) printf("%s : before checkpoint #1\n", __FILE__);
+ crash_timer = NUM_DICTIONARIES + 1; // won't go off
+ do_random_fileops();
+
+ // during checkpoint #1
+ if ( verbose ) printf("%s : during checkpoint #1\n", __FILE__);
+ crash_timer = NUM_DICTIONARIES + 1; // won't go off
+
+ if ( iter & 1 )
+ db_env_set_checkpoint_callback((void (*)(void*))do_random_fileops, NULL);
+ else
+ db_env_set_checkpoint_callback2((void (*)(void*))do_random_fileops, NULL);
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ db_env_set_checkpoint_callback(NULL, NULL);
+ db_env_set_checkpoint_callback2(NULL, NULL);
+
+ // randomly fail sometime during the next 3 phases
+ // 1) before the next checkpoint
+ // 2) during the next checkpoint
+ // 3) after the next (final) checkpoint
+
+ if ( iter >= start_crashing_iter ) {
+ set_crash_timer();
+ }
+ else {
+ crash_timer = ( 3 * NUM_DICTIONARIES ) + 1; // won't go off
+ }
+
+ // before checkpoint #2
+ if ( verbose ) printf("%s : before checkpoint #2\n", __FILE__);
+ do_random_fileops();
+
+ // during checkpoint
+ if ( verbose ) printf("%s : during checkpoint #2\n", __FILE__);
+
+ if ( iter & 1 )
+ db_env_set_checkpoint_callback((void (*)(void*))do_random_fileops, NULL);
+ else
+ db_env_set_checkpoint_callback2((void (*)(void*))do_random_fileops, NULL);
+ // checkpoint
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ db_env_set_checkpoint_callback(NULL, NULL);
+ db_env_set_checkpoint_callback2(NULL, NULL);
+
+ // after checkpoint
+ if ( verbose ) printf("%s : after checkpoint #2\n", __FILE__);
+ do_random_fileops();
+
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ db = db_array[i];
+ state = get_state(i);
+ if ( state == CREATED || state == OPEN ) {
+ r = db->close(db, 0); CKERR(r);
+ db = NULL;
+ }
+ }
+
+ r = states->close(states, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ if ( verbose ) printf("%s : done\n", __FILE__);
+
+ toku_free(db_array);
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+static int iter_arg = 0;
+
+int test_main(int argc, char *const*argv) {
+ do_args(argc, argv);
+ run_test(iter_arg);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] [-i] \n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-i")==0) {
+ argc--; argv++;
+ iter_arg = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+static void env_startup(int recovery_flags) {
+ int r;
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | recovery_flags;
+ r = db_env_create(&env, 0); CKERR(r);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+ r=env->set_redzone(env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ //Disable auto-checkpointing.
+ r = env->checkpointing_set_period(env, 0); CKERR(r);
+}
+
+static int64_t generate_val(int64_t key) {
+ return key + 314;
+}
+
+static void insert_n(DB *db, DB_TXN *txn, int firstkey, int n) {
+ int64_t k, v;
+ int r, i;
+ DBT key, val;
+
+ if (!db) return;
+
+ for (i = 0; i<n; i++) {
+ k = firstkey + i;
+ v = generate_val(k);
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+}
+
+static int verify_identical_dbts(const DBT *dbt1, const DBT *dbt2) {
+ int r = 0;
+ if (dbt1->size != dbt2->size) r = 1;
+ else if (memcmp(dbt1->data, dbt2->data, dbt1->size)!=0) r = 1;
+ return r;
+}
+
+static void verify_sequential_rows(DB* compare_db, int64_t firstkey, int64_t numkeys) {
+ //This does not lock the dbs/grab table locks.
+ //This means that you CANNOT CALL THIS while another thread is modifying the db.
+ //You CAN call it while a txn is open however.
+ int rval = 0;
+ DB_TXN *compare_txn;
+ int r, r1;
+
+ assert(numkeys >= 1);
+ r = env->txn_begin(env, NULL, &compare_txn, DB_READ_UNCOMMITTED);
+ CKERR(r);
+ DBC *c1;
+
+ r = compare_db->cursor(compare_db, compare_txn, &c1, 0);
+ CKERR(r);
+
+
+ DBT key1, val1;
+ DBT key2, val2;
+
+ int64_t k, v;
+
+ dbt_init_realloc(&key1);
+ dbt_init_realloc(&val1);
+
+ dbt_init(&key2, &k, sizeof(k));
+ dbt_init(&val2, &v, sizeof(v));
+
+// k = firstkey;
+// v = generate_val(k);
+// r1 = c1->c_get(c1, &key2, &val2, DB_SET);
+// CKERR(r1);
+
+ int64_t i;
+ for (i = 0; i<numkeys; i++) {
+ k = i + firstkey;
+ v = generate_val(k);
+ r1 = c1->c_get(c1, &key1, &val1, DB_NEXT);
+// printf("k = %" PRIu64 ", v = %" PRIu64 ", key = %" PRIu64 ", val = %" PRIu64 "\n",
+// k, v, *((int64_t *)(key1.data)), *((int64_t *)(val1.data)));
+ assert(r1==0);
+ rval = verify_identical_dbts(&key1, &key2) |
+ verify_identical_dbts(&val1, &val2);
+ assert(rval == 0);
+ }
+ // now verify that there are no rows after the last expected
+ r1 = c1->c_get(c1, &key1, &val1, DB_NEXT);
+ assert(r1 == DB_NOTFOUND);
+
+ c1->c_close(c1);
+ if (key1.data) toku_free(key1.data);
+ if (val1.data) toku_free(val1.data);
+ compare_txn->commit(compare_txn, 0);
+}
+
+static void UU() crash_it(void) {
+ fflush(stdout);
+ fflush(stderr);
+ int zero = 0;
+ int divide_by_zero = 1/zero;
+ printf("force use of %d\n", divide_by_zero);
+ fflush(stdout);
+ fflush(stderr);
+}
+
+static void crash_it_callback_f(void *dummy UU()) {
+ crash_it();
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc
new file mode 100644
index 00000000..45f0b465
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc
@@ -0,0 +1,652 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include "ft/logger/logger.h"
+#include "test.h"
+#include "toku_pthread.h"
+
+static int do_recover;
+static int do_crash;
+static char fileop;
+static int choices['J' - 'A' + 1];
+const int num_choices = sizeof(choices)/sizeof(choices[0]);
+static DB_TXN *txn;
+const char *oldname = "oldfoo";
+const char *newname = "newfoo";
+DB_ENV *env;
+DB *db;
+static int crash_during_checkpoint;
+static char *cmd;
+
+static void
+usage(void) {
+ fprintf(stderr,
+ "Usage:\n%s [-v|-q]* [-h] (-c|-r) -O fileop -A# -B# -C# -D# -E# "
+ "-F# -G# [-H# -I# -J#]\n"
+ " fileop = c/r/d (create/rename/delete)\n"
+ " Where # is a single digit number > 0.\n"
+ " A-G are required for fileop=create\n"
+ " A-I are required for fileop=delete, fileop=rename\n",
+ cmd);
+ exit(1);
+}
+
+
+enum { CLOSE_TXN_COMMIT, CLOSE_TXN_ABORT, CLOSE_TXN_NONE };
+enum {CREATE_CREATE, CREATE_CHECKPOINT, CREATE_COMMIT_NEW,
+ CREATE_COMMIT_NEW_CHECKPOINT, CREATE_COMMIT_CHECKPOINT_NEW,
+ CREATE_CHECKPOINT_COMMIT_NEW};
+
+static int fileop_did_commit(void);
+static void close_txn(int type);
+
+static int
+get_x_choice(char c, int possibilities) {
+ assert(c < 'A' + num_choices);
+ assert(c >= 'A');
+ int choice = choices[c-'A'];
+ if (choice >= possibilities)
+ usage();
+ return choice;
+}
+
+//return 0 or 1
+static int
+get_bool_choice(char c) {
+ return get_x_choice(c, 2);
+}
+
+static int
+get_choice_first_create_unrelated_txn(void) {
+ return get_bool_choice('A');
+}
+
+static int
+get_choice_do_checkpoint_after_fileop(void) {
+ return get_bool_choice('B');
+}
+
+static int
+get_choice_txn_close_type(void) {
+ return get_x_choice('C', 3);
+}
+
+static int
+get_choice_close_txn_before_checkpoint(void) {
+ int choice = get_bool_choice('D');
+ //Can't do checkpoint related thing without checkpoint
+ if (choice)
+ assert(get_choice_do_checkpoint_after_fileop());
+ return choice;
+}
+
+static int
+get_choice_crash_checkpoint_in_callback(void) {
+ int choice = get_bool_choice('E');
+ //Can't do checkpoint related thing without checkpoint
+ if (choice)
+ assert(get_choice_do_checkpoint_after_fileop());
+ return choice;
+}
+
+static int
+get_choice_flush_log_before_crash(void) {
+ return get_bool_choice('F');
+}
+
+static int get_choice_dir_per_db(void) { return get_bool_choice('G'); }
+
+static int get_choice_create_type(void) { return get_x_choice('H', 6); }
+
+static int
+get_choice_txn_does_open_close_before_fileop(void) {
+ return get_bool_choice('I');
+}
+
+static int
+get_choice_lock_table_split_fcreate(void) {
+ int choice = get_bool_choice('J');
+ if (choice)
+ assert(fileop_did_commit());
+ return choice;
+}
+
+static void
+do_args(int argc, char * const argv[]) {
+ cmd = argv[0];
+ int i;
+ //Clear
+ for (i = 0; i < num_choices; i++) {
+ choices[i] = -1;
+ }
+
+ signed char c;
+ while ((c = getopt(argc, argv, "vqhcrO:A:B:C:D:E:F:G:H:I:J:X:")) != -1) {
+ switch (c) {
+ case 'v':
+ verbose++;
+ break;
+ case 'q':
+ verbose--;
+ if (verbose < 0)
+ verbose = 0;
+ break;
+ case 'h':
+ case '?':
+ usage();
+ break;
+ case 'c':
+ do_crash = 1;
+ break;
+ case 'r':
+ do_recover = 1;
+ break;
+ case 'O':
+ if (fileop != '\0')
+ usage();
+ fileop = optarg[0];
+ switch (fileop) {
+ case 'c':
+ case 'r':
+ case 'd':
+ break;
+ default:
+ usage();
+ break;
+ }
+ break;
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ case 'G':
+ case 'H':
+ case 'I':
+ case 'J':
+ if (fileop == '\0')
+ usage();
+ int num;
+ num = atoi(optarg);
+ if (num < 0 || num > 9)
+ usage();
+ choices[c - 'A'] = num;
+ break;
+ case 'X':
+ if (strcmp(optarg, "novalgrind") == 0) {
+ // provide a way for the shell script runner to pass an
+ // arg that suppresses valgrind on this child process
+ break;
+ }
+ /* fall through */ // otherwise, fall through to an error
+ default:
+ usage();
+ break;
+ }
+ }
+ if (argc!=optind) { usage(); exit(1); }
+
+ for (i = 0; i < num_choices; i++) {
+ if (i >= 'H' - 'A' && fileop == 'c')
+ break;
+ if (choices[i] == -1)
+ usage();
+ }
+ assert(!do_recover || !do_crash);
+ assert(do_recover || do_crash);
+}
+
+static void UU() crash_it(void) {
+ int r;
+ if (get_choice_flush_log_before_crash()) {
+ r = env->log_flush(env, NULL); //TODO: USe a real DB_LSN* instead of NULL
+ CKERR(r);
+ }
+ fprintf(stderr, "HAPPY CRASH\n");
+ fflush(stdout);
+ fflush(stderr);
+ toku_hard_crash_on_purpose();
+ printf("This line should never be printed\n");
+ fflush(stdout);
+}
+
+static void checkpoint_callback_maybe_crash(void * UU(extra)) {
+ if (crash_during_checkpoint)
+ crash_it();
+}
+
+static void env_startup(void) {
+ int r;
+ int recover_flag = do_crash ? 0 : DB_RECOVER;
+ if (do_crash) {
+ db_env_set_checkpoint_callback(checkpoint_callback_maybe_crash, NULL);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ }
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | recover_flag;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_dir_per_db(env, get_choice_dir_per_db());
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ //Disable auto-checkpointing.
+ r = env->checkpointing_set_period(env, 0);
+ CKERR(r);
+}
+
+static void
+env_shutdown(void) {
+ int r;
+ r = env->close(env, 0);
+ CKERR(r);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+}
+
+static void
+checkpoint(void) {
+ int r;
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+}
+
+static void
+maybe_make_oldest_living_txn(void) {
+ if (get_choice_first_create_unrelated_txn()) {
+ // create a txn that never closes, forcing recovery to run from the beginning of the log
+ DB_TXN *oldest_living_txn;
+ int r;
+ r = env->txn_begin(env, NULL, &oldest_living_txn, 0);
+ CKERR(r);
+ checkpoint();
+ }
+}
+
+static void
+make_txn(void) {
+ int r;
+ assert(!txn);
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+}
+
+static void
+fcreate(void) {
+ int r;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, oldname, NULL, DB_BTREE, DB_CREATE|DB_EXCL, 0666);
+ CKERR(r);
+
+ if (fileop!='c' && get_choice_lock_table_split_fcreate()) {
+ r = db->close(db, 0);
+ CKERR(r);
+ close_txn(CLOSE_TXN_COMMIT);
+ make_txn();
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, oldname, NULL, DB_BTREE, 0, 0666);
+ CKERR(r);
+ r = db->pre_acquire_table_lock(db, txn);
+ CKERR(r);
+ }
+
+ DBT key, val;
+ dbt_init(&key, choices, sizeof(choices));
+ dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ dbt_init(&key, "name", sizeof("name"));
+ dbt_init(&val, (void*)oldname, strlen(oldname)+1);
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+
+ dbt_init(&key, "to_delete", sizeof("to_delete"));
+ dbt_init(&val, "delete_me", sizeof("delete_me"));
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ r = db->del(db, txn, &key, DB_DELETE_ANY);
+ CKERR(r);
+
+ dbt_init(&key, "to_delete2", sizeof("to_delete2"));
+ dbt_init(&val, "delete_me2", sizeof("delete_me2"));
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ r = db->del(db, txn, &key, 0);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void
+fdelete(void) {
+ int r;
+ r = env->dbremove(env, txn, oldname, NULL, 0);
+ CKERR(r);
+}
+
+static void
+frename(void) {
+ int r;
+ {
+ //Rename in 'key/val' pair.
+ DBT key,val;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, oldname, NULL, DB_BTREE, 0, 0666);
+ CKERR(r);
+ dbt_init(&key, "name", sizeof("name"));
+ dbt_init(&val, (void*)newname, strlen(newname)+1);
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+ }
+ r = env->dbrename(env, txn, oldname, NULL, newname, 0);
+ CKERR(r);
+}
+
+static void
+close_txn(int type) {
+ int r;
+ assert(txn);
+ if (type==CLOSE_TXN_COMMIT) {
+ //commit
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ txn = NULL;
+ }
+ else if (type == CLOSE_TXN_ABORT) {
+ //abort
+ r = txn->abort(txn);
+ CKERR(r);
+ txn = NULL;
+ }
+ else
+ assert(type == CLOSE_TXN_NONE);
+}
+
+static void
+create_and_crash(void) {
+ //Make txn
+ make_txn();
+ //fcreate
+ fcreate();
+
+ if (get_choice_do_checkpoint_after_fileop()) {
+ crash_during_checkpoint = get_choice_crash_checkpoint_in_callback();
+ if (get_choice_close_txn_before_checkpoint())
+ close_txn(get_choice_txn_close_type());
+ checkpoint();
+ if (!get_choice_close_txn_before_checkpoint())
+ close_txn(get_choice_txn_close_type());
+ }
+ else {
+ crash_during_checkpoint = get_choice_crash_checkpoint_in_callback();
+ assert(!crash_during_checkpoint);
+ close_txn(get_choice_txn_close_type());
+ }
+}
+
+static void
+create_and_maybe_checkpoint_and_or_close_after_create(void) {
+ fcreate();
+ switch (get_choice_create_type()) {
+ case (CREATE_CREATE): //Just create
+ break;
+ case (CREATE_CHECKPOINT): //Create then checkpoint
+ checkpoint();
+ break;
+ case (CREATE_COMMIT_NEW): //Create then commit
+ close_txn(CLOSE_TXN_COMMIT);
+ make_txn();
+ break;
+ case (CREATE_COMMIT_NEW_CHECKPOINT): //Create then commit then create new txn then checkpoint
+ close_txn(CLOSE_TXN_COMMIT);
+ make_txn();
+ checkpoint();
+ break;
+ case (CREATE_COMMIT_CHECKPOINT_NEW): //Create then commit then checkpoint then create new txn
+ close_txn(CLOSE_TXN_COMMIT);
+ checkpoint();
+ make_txn();
+ break;
+ case (CREATE_CHECKPOINT_COMMIT_NEW): //Create then checkpoint then commit then create new txn
+ checkpoint();
+ close_txn(CLOSE_TXN_COMMIT);
+ make_txn();
+ break;
+ default:
+ assert(false);
+ break;
+ }
+}
+
+static void
+maybe_open_and_close_file_again_before_fileop(void) {
+ if (get_choice_txn_does_open_close_before_fileop()) {
+ int r;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, oldname, NULL, DB_BTREE, 0, 0666);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+ }
+}
+
+static void
+delete_and_crash(void) {
+ //Make txn
+ make_txn();
+ //fcreate
+ create_and_maybe_checkpoint_and_or_close_after_create();
+
+ maybe_open_and_close_file_again_before_fileop();
+
+ fdelete();
+ if (get_choice_do_checkpoint_after_fileop()) {
+ crash_during_checkpoint = get_choice_crash_checkpoint_in_callback();
+ if (get_choice_close_txn_before_checkpoint())
+ close_txn(get_choice_txn_close_type());
+ checkpoint();
+ if (!get_choice_close_txn_before_checkpoint())
+ close_txn(get_choice_txn_close_type());
+ }
+ else {
+ crash_during_checkpoint = get_choice_crash_checkpoint_in_callback();
+ assert(!crash_during_checkpoint);
+ close_txn(get_choice_txn_close_type());
+ }
+}
+
+static void
+rename_and_crash(void) {
+ //Make txn
+ make_txn();
+ //fcreate
+ create_and_maybe_checkpoint_and_or_close_after_create();
+
+ maybe_open_and_close_file_again_before_fileop();
+
+ frename();
+ if (get_choice_do_checkpoint_after_fileop()) {
+ crash_during_checkpoint = get_choice_crash_checkpoint_in_callback();
+ if (get_choice_close_txn_before_checkpoint())
+ close_txn(get_choice_txn_close_type());
+ checkpoint();
+ if (!get_choice_close_txn_before_checkpoint())
+ close_txn(get_choice_txn_close_type());
+ }
+ else {
+ crash_during_checkpoint = get_choice_crash_checkpoint_in_callback();
+ assert(!crash_during_checkpoint);
+ close_txn(get_choice_txn_close_type());
+ }
+}
+
+
+static void
+execute_and_crash(void) {
+ maybe_make_oldest_living_txn();
+ //split into create/delete/rename
+ if (fileop=='c')
+ create_and_crash();
+ else if (fileop == 'd')
+ delete_and_crash();
+ else {
+ assert(fileop == 'r');
+ rename_and_crash();
+ }
+ crash_it();
+}
+
+static int
+did_create_commit_early(void) {
+ int r;
+ switch (get_choice_create_type()) {
+ case (CREATE_CREATE): //Just create
+ case (CREATE_CHECKPOINT): //Create then checkpoint
+ r = 0;
+ break;
+ case (CREATE_COMMIT_NEW): //Create then commit
+ case (CREATE_COMMIT_NEW_CHECKPOINT): //Create then commit then create new txn then checkpoint
+ case (CREATE_COMMIT_CHECKPOINT_NEW): //Create then commit then checkpoint then create new txn
+ case (CREATE_CHECKPOINT_COMMIT_NEW): //Create then checkpoint then commit then create new txn
+ r = 1;
+ break;
+ default:
+ assert(false);
+ }
+ return r;
+}
+
+static int
+getf_do_nothing(DBT const* UU(key), DBT const* UU(val), void* UU(extra)) {
+ return 0;
+}
+
+static void
+verify_file_exists(const char *name, int should_exist) {
+ int r;
+ make_txn();
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, name, NULL, DB_BTREE, 0, 0666);
+ if (should_exist) {
+ CKERR(r);
+ DBT key, val;
+ dbt_init(&key, choices, sizeof(choices));
+ dbt_init(&val, NULL, 0);
+ r = db->get(db, txn, &key, &val, 0);
+ r = db->getf_set(db, txn, 0, &key, getf_do_nothing, NULL);
+ CKERR(r);
+ dbt_init(&key, "name", sizeof("name"));
+ dbt_init(&val, (void*)name, strlen(name)+1);
+ r = db->getf_set(db, txn, 0, &key, getf_do_nothing, NULL);
+ CKERR(r);
+
+ DBC *c;
+ r = db->cursor(db, txn, &c, 0);
+ CKERR(r);
+ int num_found = 0;
+ while ((r = c->c_getf_next(c, 0, getf_do_nothing, NULL)) == 0) {
+ num_found++;
+ }
+ CKERR2(r, DB_NOTFOUND);
+ assert(num_found == 2); //name and choices array.
+ r = c->c_close(c);
+ CKERR(r);
+ }
+ else
+ CKERR2(r, ENOENT);
+ r = db->close(db, 0);
+ CKERR(r);
+ close_txn(CLOSE_TXN_COMMIT);
+}
+
+static int
+fileop_did_commit(void) {
+ return get_choice_txn_close_type() == CLOSE_TXN_COMMIT &&
+ (!get_choice_do_checkpoint_after_fileop() ||
+ !get_choice_crash_checkpoint_in_callback() ||
+ get_choice_close_txn_before_checkpoint());
+}
+
+static void
+recover_and_verify(void) {
+ //Recovery was done during env_startup
+ int expect_old_name = 0;
+ int expect_new_name = 0;
+ if (fileop=='c') {
+ expect_old_name = fileop_did_commit();
+ }
+ else if (fileop == 'd') {
+ expect_old_name = did_create_commit_early() && !fileop_did_commit();
+ }
+ else {
+ //Wrong? if checkpoint AND crash during checkpoint
+ if (fileop_did_commit())
+ expect_new_name = 1;
+ else if (did_create_commit_early())
+ expect_old_name = 1;
+ }
+ // We can't expect files existence until recovery log was not flushed
+ if ((get_choice_flush_log_before_crash())) {
+ verify_file_exists(oldname, expect_old_name);
+ verify_file_exists(newname, expect_new_name);
+ }
+ env_shutdown();
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ crash_during_checkpoint = 0; //Do not crash during checkpoint (possibly during recovery).
+ do_args(argc, argv);
+ env_startup();
+ if (do_crash)
+ execute_and_crash();
+ else
+ recover_and_verify();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/recovery_stress.cc b/storage/tokudb/PerconaFT/src/tests/recovery_stress.cc
new file mode 100644
index 00000000..1646030c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/recovery_stress.cc
@@ -0,0 +1,573 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "checkpoint_test.h"
+
+
+static const uint64_t max_cachesize = 256 << 20;
+static const int NUM_DICTIONARIES = 1;
+
+static const int OPER_STEPS = 6;
+
+static const int ITERATIONS_PER_CRASH_IN_RECOVERY = 7;
+
+typedef enum __recovery_stress_steps
+{
+ PRE_PRE_STEP = 0,
+ PRE_CP_STEP,
+ PRE_POST_STEP,
+ CP_CP_STEP,
+ CP_POST_STEP,
+ POST_POST_STEP
+} STEP;
+//const int OPER_PER_STEP = 331;
+const int OPER_PER_STEP = 43;
+#define OPER_PER_ITER ( OPER_STEPS * OPER_PER_STEP )
+
+#define DBG(str) if (verbose) printf("%s:%25s: %s\n", __FILE__, __FUNCTION__, str)
+#define iDBG(iter) if (verbose) printf("%s:%25s: iter = %d\n", __FILE__, __FUNCTION__, iter)
+
+static int firstkey(int iter, int step) { return (iter * OPER_PER_ITER) + (step * OPER_PER_STEP); }
+
+//static toku_pthread_t thread;
+
+static void
+drop_dead(void) {
+ // deliberate zerodivide or sigsegv
+#if 0
+ fprintf(stderr, "HAPPY CRASH\n");
+#endif
+ fflush(stdout);
+ fflush(stderr);
+ int zero = 0;
+ int infinity = 1/zero;
+ printf("Survived zerodivide!\n");
+ fflush(stdout);
+ printf("Infinity = %d\n", infinity);
+ fflush(stdout);
+ void * intothevoid = NULL;
+ (*(int*)intothevoid)++;
+ printf("intothevoid = %p, infinity = %d\n", intothevoid, infinity);
+ printf("This line should never be printed\n");
+ fflush(stdout);
+}
+
+static void drop_dead_callback_f(void *dummy UU()) {
+ drop_dead();
+}
+
+static void verify (DICTIONARY dictionaries, int iter) {
+ int i, key;
+ DB *db;
+// iDBG(iter);
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ db = dictionaries[i].db;
+ if (iter == 1) {
+ key = firstkey(iter - 1, 0);
+ verify_sequential_rows(db, key, OPER_PER_ITER);
+ }
+ else if (iter == 2) {
+ key = firstkey(iter - 2, 0);
+ verify_sequential_rows(db, key, OPER_PER_ITER * 2);
+ }
+ else if (iter == 3) {
+ key = firstkey(iter - 3, 0);
+ verify_sequential_rows(db, key, OPER_PER_ITER * 3);
+ }
+ else if (iter > 3) {
+ key = firstkey(iter - 4, 0);
+ verify_sequential_rows(db, key, OPER_PER_ITER * 4);
+ }
+ }
+ return;
+}
+
+struct iteration_spec {
+ DICTIONARY dictionaries;
+ int iter;
+ STEP step;
+ DB_TXN *pre_pre_insert_commit;
+ DB_TXN *pre_cp_insert_commit;
+ DB_TXN *pre_post_insert_commit;
+ DB_TXN *cp_cp_insert_commit;
+ DB_TXN *cp_post_insert_commit;
+ DB_TXN *post_post_insert_commit;
+
+ DB_TXN *pre_pre_insert_abort;
+ DB_TXN *pre_cp_insert_abort;
+ DB_TXN *pre_post_insert_abort;
+ DB_TXN *cp_cp_insert_abort;
+ DB_TXN *cp_post_insert_abort;
+ DB_TXN *post_post_insert_abort;
+
+ DB_TXN *pre_insert_incmplt;
+ DB_TXN *cp_insert_incmplt;
+ DB_TXN *post_insert_incmplt;
+
+ DB_TXN *pre_pre_delete_commit;
+ DB_TXN *pre_cp_delete_commit;
+ DB_TXN *pre_post_delete_commit;
+ DB_TXN *cp_cp_delete_commit;
+ DB_TXN *cp_post_delete_commit;
+ DB_TXN *post_post_delete_commit;
+
+ DB_TXN *pre_pre_delete_abort;
+ DB_TXN *pre_cp_delete_abort;
+ DB_TXN *pre_post_delete_abort;
+ DB_TXN *cp_cp_delete_abort;
+ DB_TXN *cp_post_delete_abort;
+ DB_TXN *post_post_delete_abort;
+
+ DB_TXN *pre_delete_incmplt;
+ DB_TXN *cp_delete_incmplt;
+ DB_TXN *post_delete_incmplt;
+};
+typedef struct iteration_spec *ITER_SPEC;
+
+static void pre_checkpoint_acts(ITER_SPEC spec) {
+ int i;
+ DB *db;
+ DICTIONARY dictionaries = spec->dictionaries;
+ int iter = spec->iter;
+ assert(spec->step == PRE_PRE_STEP);
+ int key;
+ int r;
+
+// iDBG(iter);
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ db = dictionaries[i].db;
+
+
+ // ---- GOOD INSERTIONS FOR THIS ITERATION ----
+ // begin pre, commit pre
+ key = firstkey(iter, PRE_PRE_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_pre_insert_commit, 0); CKERR(r);
+ insert_n_fixed(db, NULL, spec->pre_pre_insert_commit, key, OPER_PER_STEP);
+ r = spec->pre_pre_insert_commit->commit(spec->pre_pre_insert_commit, 0); CKERR(r);
+
+ // begin pre, commit cp, post
+ key = firstkey(iter, PRE_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_cp_insert_commit, 0); CKERR(r);
+ insert_n_fixed(db, NULL, spec->pre_cp_insert_commit, key, OPER_PER_STEP);
+ key = firstkey(iter, PRE_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_post_insert_commit, 0); CKERR(r);
+ insert_n_fixed(db, NULL, spec->pre_post_insert_commit, key, OPER_PER_STEP);
+
+
+ // ---- ABORTED INSERTIONS THAT WOULD OVERWRITE PREVIOUS ITERATION ----
+ if ( iter > 0 ) {
+ // begin pre, abort pre
+ key = firstkey(iter - 1, PRE_PRE_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_pre_insert_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->pre_pre_insert_abort, key, OPER_PER_STEP);
+ r = spec->pre_pre_insert_abort->abort(spec->pre_pre_insert_abort); CKERR(r);
+ // begin pre, abort cp, post
+ key = firstkey(iter - 1, PRE_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_cp_insert_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->pre_cp_insert_abort, key, OPER_PER_STEP);
+ key = firstkey(iter - 1, PRE_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_post_insert_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->pre_post_insert_abort, key, OPER_PER_STEP);
+ }
+
+ // ---- INCOMPLETE INSERTIONS THAT WOULD OVERWRITE I-2 ITERATIONS AGO ----
+ if ( iter > 1 ) {
+ // begin pre, incomplete
+ key = firstkey(iter - 2, PRE_PRE_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_insert_incmplt, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->pre_insert_incmplt, key, OPER_PER_STEP);
+ }
+
+ // ---- ABORTED DELETES THAT WOULD DELETE I-3 ITERATIONS AGO ----
+ if ( iter > 2 ) {
+ // begin pre, abort pre
+ key = firstkey(iter - 3, PRE_PRE_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_pre_delete_abort, 0); CKERR(r);
+ delete_n(db, NULL, spec->pre_pre_delete_abort, key, OPER_PER_STEP, 0);
+ r = spec->pre_pre_delete_abort->abort(spec->pre_pre_delete_abort); CKERR(r);
+ // begin pre, abort cp, post
+ key = firstkey(iter - 3, PRE_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_cp_delete_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->pre_cp_delete_abort, key, OPER_PER_STEP);
+ key = firstkey(iter - 3, PRE_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_post_delete_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->pre_post_delete_abort, key, OPER_PER_STEP);
+ }
+
+ // ---- INCOMPLETE DELETES THAT WOULD DELETE I-4 ITERATIONS AGO ----
+ if ( iter > 3 ) {
+ // begin pre, incomplete
+ key = firstkey(iter - 4, PRE_PRE_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_delete_incmplt, 0); CKERR(r);
+ delete_n(db, NULL, spec->pre_delete_incmplt, key, OPER_PER_STEP, 0);
+ }
+
+ // ---- GOOD DELETES THAT REMOVE I-5 ITERATIONS AGO ----
+ if ( iter > 4 ) {
+ // begin pre, commit pre
+ key = firstkey(iter - 5, PRE_PRE_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_pre_delete_commit, 0); CKERR(r);
+ delete_n(db, NULL, spec->pre_pre_delete_commit, key, OPER_PER_STEP, 0);
+ r = spec->pre_pre_delete_commit->commit(spec->pre_pre_delete_commit, 0); CKERR(r);
+
+ // begin pre, commit cp, post
+ key = firstkey(iter - 5, PRE_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_cp_delete_commit, 0); CKERR(r);
+ delete_n(db, NULL, spec->pre_cp_delete_commit, key, OPER_PER_STEP, 0);
+ key = firstkey(iter - 5, PRE_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->pre_post_delete_commit, 0); CKERR(r);
+ delete_n(db, NULL, spec->pre_post_delete_commit, key, OPER_PER_STEP, 0);
+ }
+ }
+ return;
+}
+
+static void checkpoint_acts(ITER_SPEC spec) {
+ int i, r, key;
+ DB *db;
+ int iter = spec->iter;
+ DICTIONARY dictionaries = spec->dictionaries;
+ assert(spec->step == CP_CP_STEP);
+// iDBG(iter);
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ db = dictionaries[i].db;
+
+ // ---- GOOD INSERTIONS FOR THIS ITERATION ----
+ // begin pre, commit cp
+ r = spec->pre_cp_insert_commit->commit(spec->pre_cp_insert_commit, 0); CKERR(r);
+ // begin cp, commit cp
+ key = firstkey(iter, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_cp_insert_commit, 0); CKERR(r);
+ insert_n_fixed(db, NULL, spec->cp_cp_insert_commit, key, OPER_PER_STEP);
+ r = spec->cp_cp_insert_commit->commit(spec->cp_cp_insert_commit, 0); CKERR(r);
+
+ // begin cp, commit post
+ key = firstkey(iter, CP_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_post_insert_commit, 0); CKERR(r);
+ insert_n_fixed(db, NULL, spec->cp_post_insert_commit, key, OPER_PER_STEP);
+
+ // ---- ABORTED INSERTIONS THAT WOULD OVERWRITE PREVIOUS ITERATION ----
+ if ( iter > 0 ) {
+ // begin pre, abort cp
+ r = spec->pre_cp_insert_abort->abort(spec->pre_cp_insert_abort); CKERR(r);
+ // begin cp, abort cp
+ key = firstkey(iter - 1, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_cp_insert_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->cp_cp_insert_abort, key, OPER_PER_STEP);
+ r = spec->cp_cp_insert_abort->abort(spec->cp_cp_insert_abort); CKERR(r);
+ // begin cp, abort post
+ key = firstkey(iter - 1, CP_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_post_insert_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->cp_post_insert_abort, key, OPER_PER_STEP);
+ }
+
+ // ---- INCOMPLETE INSERTIONS THAT WOULD OVERWRITE I-2 ITERATIONS AGO ----
+ if ( iter > 1 ) {
+ // begin cp, incomplete
+ key = firstkey(iter - 2, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_insert_incmplt, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->cp_insert_incmplt, key, OPER_PER_STEP);
+ }
+
+ // ---- ABORTED DELETES THAT WOULD DELETE I-3 ITERATIONS AGO ----
+ if ( iter > 2 ) {
+ // begin pre, abort cp
+ r = spec->pre_cp_delete_abort->abort(spec->pre_cp_delete_abort); CKERR(r);
+ // begin cp, abort cp
+ key = firstkey(iter - 3, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_cp_delete_abort, 0); CKERR(r);
+ delete_n(db, NULL, spec->cp_cp_delete_abort, key, OPER_PER_STEP, 0);
+ r = spec->cp_cp_delete_abort->abort(spec->cp_cp_delete_abort); CKERR(r);
+ // begin cp, abort post
+ key = firstkey(iter - 3, CP_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_post_delete_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->cp_post_delete_abort, key, OPER_PER_STEP);
+ }
+
+ // ---- INCOMPLETE DELETES THAT WOULD DELETE I-4 ITERATIONS AGO ----
+ if ( iter > 3 ) {
+ // begin pre, incomplete
+ key = firstkey(iter - 4, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_delete_incmplt, 0); CKERR(r);
+ delete_n(db, NULL, spec->cp_delete_incmplt, key, OPER_PER_STEP, 0);
+ }
+
+ // ---- GOOD DELETES THAT REMOVE I-5 ITERATIONS AGO ----
+ if ( iter > 4 ) {
+ // begin pre, commit cp
+ r = spec->pre_cp_delete_commit->commit(spec->pre_cp_delete_commit, 0); CKERR(r);
+ // begin cp, commit cp
+ key = firstkey(iter - 5, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_cp_delete_commit, 0); CKERR(r);
+ delete_n(db, NULL, spec->cp_cp_delete_commit, key, OPER_PER_STEP, 0);
+ r = spec->cp_cp_delete_commit->commit(spec->cp_cp_delete_commit, 0); CKERR(r);
+
+ // begin cp, commit post
+ key = firstkey(iter - 5, CP_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->cp_post_delete_commit, 0); CKERR(r);
+ delete_n(db, NULL, spec->cp_post_delete_commit, key, OPER_PER_STEP, 0);
+ }
+ }
+ return;
+}
+
+static void post_checkpoint_acts(ITER_SPEC spec) {
+ int i, r, key;
+ DB *db;
+ int iter = spec->iter;
+ DICTIONARY dictionaries = spec->dictionaries;
+ assert(spec->step == POST_POST_STEP);
+// iDBG(iter);
+ for (i=0;i<NUM_DICTIONARIES;i++) {
+ db = dictionaries[i].db;
+
+ // ---- GOOD INSERTIONS FOR THIS ITERATION ----
+ // begin pre, commit post
+ r = spec->pre_post_insert_commit->commit(spec->pre_post_insert_commit, 0); CKERR(r);
+ // begin cp, commit post
+ r = spec->cp_post_insert_commit->commit(spec->cp_post_insert_commit, 0); CKERR(r);
+ // begin post, commit post
+ key = firstkey(iter, POST_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->post_post_insert_commit, 0); CKERR(r);
+ insert_n_fixed(db, NULL, spec->post_post_insert_commit, key, OPER_PER_STEP);
+ r = spec->post_post_insert_commit->commit(spec->post_post_insert_commit, 0); CKERR(r);
+
+ // ---- ABORTED INSERTIONS THAT WOULD OVERWRITE PREVIOUS ITERATION ----
+ if ( iter > 0 ) {
+ // begin pre, abort post
+ r = spec->pre_post_insert_abort->abort(spec->pre_post_insert_abort); CKERR(r);
+ // begin cp, abort post
+ r = spec->cp_post_insert_abort->abort(spec->cp_post_insert_abort); CKERR(r);
+ // begin post, abort post
+ key = firstkey(iter - 1, POST_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->post_post_insert_abort, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->post_post_insert_abort, key, OPER_PER_STEP);
+ r = spec->post_post_insert_abort->abort(spec->post_post_insert_abort); CKERR(r);
+ }
+
+ // ---- INCOMPLETE INSERTIONS THAT WOULD OVERWRITE I-2 ITERATIONS AGO ----
+ if ( iter > 1 ) {
+ // begin post, incomplete
+ key = firstkey(iter - 2, POST_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->post_insert_incmplt, 0); CKERR(r);
+ insert_n_broken(db, NULL, spec->post_insert_incmplt, key, OPER_PER_STEP);
+ }
+
+ // ---- ABORTED DELETES THAT WOULD DELETE I-3 ITERATIONS AGO ----
+ if ( iter > 2 ) {
+ // begin pre, abort post
+ r = spec->pre_post_delete_abort->abort(spec->pre_post_delete_abort); CKERR(r);
+ // begin cp, abort post
+ r = spec->cp_post_delete_abort->abort(spec->cp_post_delete_abort); CKERR(r);
+ // begin post, abort post
+ key = firstkey(iter - 3, POST_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->post_post_delete_abort, 0); CKERR(r);
+ delete_n(db, NULL, spec->post_post_delete_abort, key, OPER_PER_STEP, 0);
+ r = spec->post_post_delete_abort->abort(spec->post_post_delete_abort); CKERR(r);
+ }
+
+ // ---- INCOMPLETE DELETES THAT WOULD DELETE I-4 ITERATIONS AGO ----
+ if ( iter > 3 ) {
+ // begin post, incomplete
+ key = firstkey(iter - 4, POST_POST_STEP);
+ r = env->txn_begin(env, NULL, &spec->post_delete_incmplt, 0); CKERR(r);
+ delete_n(db, NULL, spec->post_delete_incmplt, key, OPER_PER_STEP, 0);
+ }
+
+ // ---- GOOD DELETES THAT REMOVE I-5 ITERATIONS AGO ----
+ if ( iter > 4 ) {
+ // begin pre, commit post
+ r = spec->pre_post_delete_commit->commit(spec->pre_post_delete_commit, 0); CKERR(r);
+ // begin cp, commit post
+ r = spec->cp_post_delete_commit->commit(spec->cp_post_delete_commit, 0); CKERR(r);
+ // begin post, commit post
+ key = firstkey(iter - 5, CP_CP_STEP);
+ r = env->txn_begin(env, NULL, &spec->post_post_delete_commit, 0); CKERR(r);
+ delete_n(db, NULL, spec->post_post_delete_commit, key, OPER_PER_STEP, 0);
+ r = spec->post_post_delete_commit->commit(spec->post_post_delete_commit, 0); CKERR(r);
+ }
+ }
+ return;
+}
+
+static void run_test (int iter) {
+
+ uint32_t flags = 0;
+ int i, r;
+
+ if (iter == 0)
+ dir_create(TOKU_TEST_FILENAME); // create directory if first time through
+
+ // Run with cachesize of 256 bytes per iteration
+ // to force lots of disk I/O
+ // (each iteration inserts about 4K rows/dictionary, 16 bytes/row, 4 dictionaries = 256K bytes inserted per iteration)
+ uint64_t cachebytes = 0; // 0 => use default size
+ const int32_t K256 = 256 * 1024;
+ cachebytes = K256 * (iter + 1) - (128 * 1024);
+ if (cachebytes > max_cachesize)
+ cachebytes = 0;
+ if (iter & 2) cachebytes = 0; // use default cachesize half the time
+
+
+ if (verbose) printf("%s: iter = %d\n", __FILE__, iter);
+
+ int recovery_flags = DB_INIT_LOG|DB_INIT_TXN;
+ if ( iter != 0 )
+ recovery_flags += DB_RECOVER;
+
+ // crash somewhat frequently during recovery
+ // first, wait until after the system is primed
+ if ( iter > ITERATIONS_PER_CRASH_IN_RECOVERY + 5 ) {
+ // every N cycles, crash in recovery
+ if ( (iter % ITERATIONS_PER_CRASH_IN_RECOVERY) == 0 ) {
+ // crash at different places in recovery
+ if ( iter & 1 )
+ db_env_set_recover_callback(drop_dead_callback_f, NULL);
+ else
+ db_env_set_recover_callback2(drop_dead_callback_f, NULL);
+ }
+ }
+
+ env_startup(TOKU_TEST_FILENAME, cachebytes, recovery_flags);
+
+ // logic below counts on a mapping of 'iter' to dictionary values
+ // since crashes in recovery do not modify dictionary values
+ // need to adjust 'iter' to be iter of successful recoveries
+ int crashes_in_recovery = (iter / ITERATIONS_PER_CRASH_IN_RECOVERY) - ( ( ITERATIONS_PER_CRASH_IN_RECOVERY + 5 ) / ITERATIONS_PER_CRASH_IN_RECOVERY );
+ if ( crashes_in_recovery > 0 ) {
+ iter = iter - crashes_in_recovery;
+ }
+
+ // create array of dictionaries
+ // for each dictionary verify previous iterations and perform new inserts
+
+ DICTIONARY_S dictionaries[NUM_DICTIONARIES];
+ for (i = 0; i < NUM_DICTIONARIES; i++) {
+ char name[32];
+ sprintf(name, "stress_%d", i);
+ init_dictionary(&dictionaries[i], flags, name);
+ db_startup(&dictionaries[i], NULL);
+ }
+
+ // verify previous results
+ verify(dictionaries, iter);
+
+ struct iteration_spec spec;
+ spec.iter = iter;
+ spec.dictionaries = dictionaries;
+ spec.step = PRE_PRE_STEP;
+ // perform pre-checkpoint actions
+ pre_checkpoint_acts(&spec);
+
+ // perform checkpoint acts
+ spec.step = CP_CP_STEP;
+ if ( iter & 1 )
+ db_env_set_checkpoint_callback((void (*)(void*))checkpoint_acts, &spec);
+ else
+ db_env_set_checkpoint_callback2((void (*)(void*))checkpoint_acts, &spec);
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ db_env_set_checkpoint_callback(NULL, NULL);
+ db_env_set_checkpoint_callback2(NULL, NULL);
+
+ // post checkpoint acts
+ spec.step = POST_POST_STEP;
+ post_checkpoint_acts(&spec);
+
+ // if requesting crash, randomly do other non-committed acts, then "drop_dead"
+ if (iter > 0) {
+ if (verbose)
+ printf("dying\n");
+ uint32_t delay = myrandom();
+ delay &= 0xFFF; // select lower 12 bits, shifted up 8 for random number ...
+ delay = delay << 8; // ... uniformly distributed between 0 and 1M ...
+ usleep(delay); // ... to sleep up to one second (1M usec)
+ drop_dead();
+ }
+
+ for (i = 0; i < NUM_DICTIONARIES; i++) {
+ db_shutdown(&dictionaries[i]);
+ }
+ r = env->close(env, 0);
+ assert((r == 0) || (r == EINVAL)); // OK to have open transactions prior to close
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char *const argv[]);
+
+static int iter_arg = 0;
+
+int test_main(int argc, char *const*argv) {
+ do_args(argc, argv);
+ run_test(iter_arg);
+ return 0;
+}
+
+static void do_args(int argc, char *const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] [-i] [-C] \n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-i")==0) {
+ argc--; argv++;
+ iter_arg = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/redirect.cc b/storage/tokudb/PerconaFT/src/tests/redirect.cc
new file mode 100644
index 00000000..f684982c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/redirect.cc
@@ -0,0 +1,327 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/*
+ - ydb layer test of redirection:
+ - create two dictionaries, close
+ - create txn
+ - open dictionary A
+ - redirect (using test-only wrapper in ydb)
+ - verify now open to dictionary B
+ - abort
+ - verify now open to dictionary A
+*/
+
+/*
+ for N = 0 .. n
+ for X == 0 .. x
+ for Y == 0 .. N+X
+ for c == 0 .. 1
+ create two dictionaries (iname A,B), close.
+ create txn
+ Open N DB handles to dictionary A
+ redirect from A to B
+ open X more DB handles to dictionary B
+ close Y DB handles to dictionary B
+ if c ==1 commit else abort
+*/
+
+#define DICT_0 "dict_0.db"
+#define DICT_1 "dict_1.db"
+enum {MAX_DBS = 3};
+static DB_ENV *env = NULL;
+static DB_TXN *txn = NULL;
+static DB *dbs[MAX_DBS];
+static int num_open_dbs = 0;
+static const char *dname = DICT_0;
+static DBT key;
+
+
+static void start_txn(void);
+static void commit_txn(void);
+static void open_db(void);
+static void close_db(void);
+static void insert(int index, int64_t i);
+static void
+start_env(void) {
+ assert(env==NULL);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ dname = DICT_0;
+
+ dbt_init(&key, "key", strlen("key")+1);
+
+ start_txn();
+ open_db();
+ insert(0, 0);
+ dname = DICT_1;
+ open_db();
+ insert(1, 1);
+ close_db();
+ close_db();
+ commit_txn();
+
+ dname = DICT_0;
+}
+
+static void
+end_env(void) {
+ int r;
+ r=env->close(env, 0);
+ CKERR(r);
+ env = NULL;
+}
+
+static void
+start_txn(void) {
+ assert(env!=NULL);
+ assert(txn==NULL);
+ int r;
+ r=env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+}
+
+static void
+abort_txn(void) {
+ assert(env!=NULL);
+ assert(txn!=NULL);
+ int r;
+ r=txn->abort(txn);
+ CKERR(r);
+ txn = NULL;
+}
+
+static void
+commit_txn(void) {
+ assert(env!=NULL);
+ assert(txn!=NULL);
+ int r;
+ r=txn->commit(txn, 0);
+ CKERR(r);
+ txn = NULL;
+}
+
+static void
+open_db(void) {
+ assert(env!=NULL);
+ assert(txn!=NULL);
+ assert(num_open_dbs < MAX_DBS);
+ assert(dbs[num_open_dbs] == NULL);
+
+ int r;
+
+ r = db_create(&dbs[num_open_dbs], env, 0);
+ CKERR(r);
+
+ DB *db = dbs[num_open_dbs];
+
+ r=db->open(db, txn, dname, 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ num_open_dbs++;
+}
+
+static void
+close_db(void) {
+ assert(env!=NULL);
+ assert(num_open_dbs > 0);
+ assert(dbs[num_open_dbs-1] != NULL);
+
+ num_open_dbs--;
+ int r;
+ DB *db = dbs[num_open_dbs];
+ r = db->close(db, 0);
+ CKERR(r);
+ dbs[num_open_dbs] = NULL;
+}
+
+static void
+insert(int idx, int64_t i) {
+ assert(env!=NULL);
+ assert(txn!=NULL);
+ assert(idx>=0);
+ assert(idx<num_open_dbs);
+
+ DB *db = dbs[idx];
+ DBT val;
+ dbt_init(&val, &i, sizeof(i));
+ int r=db->put(db, txn,
+ &key,
+ &val,
+ 0);
+ CKERR(r);
+}
+
+//Verify that ALL dbs point to expected dictionary.
+static void
+verify(int64_t i) {
+ assert(env!=NULL);
+ assert(txn!=NULL);
+ int r;
+ int which;
+ for (which = 0; which < num_open_dbs; which++) {
+ DB *db = dbs[which];
+ assert(db);
+ DBT val_expected, val_observed;
+ dbt_init(&val_expected, &i, sizeof(i));
+ dbt_init(&val_observed, NULL, 0);
+ r = db->get(db, txn, &key, &val_observed, 0);
+ CKERR(r);
+ r = int64_dbt_cmp(db, &val_expected, &val_observed);
+ assert(r==0);
+ }
+}
+
+static void
+redirect_dictionary(const char *new_dname, int r_expect) {
+ assert(env!=NULL);
+ assert(txn!=NULL);
+ assert(num_open_dbs>0);
+ int r;
+ DB *db = dbs[0];
+ assert(db!=NULL);
+ r = toku_test_db_redirect_dictionary(db, new_dname, txn); // ydb-level wrapper gets iname of new file and redirects
+ CKERR2(r, r_expect);
+ if (r==0) {
+ dname = new_dname;
+ }
+}
+
+static void
+redirect_EINVAL(void) {
+ start_env();
+ start_txn();
+ dname = DICT_0;
+ open_db();
+ dname = DICT_1;
+ open_db();
+ redirect_dictionary(DICT_1, EINVAL);
+ insert(1, 1);
+ redirect_dictionary(DICT_1, EINVAL);
+ close_db();
+ redirect_dictionary(DICT_1, EINVAL);
+ close_db();
+ commit_txn();
+ end_env();
+}
+
+static void
+redirect_test(uint8_t num_open_before, uint8_t num_open_after, uint8_t num_close_after, uint8_t commit) {
+ int i;
+ start_env();
+ start_txn();
+
+ assert(num_open_before > 0);
+
+ for (i = 0; i < num_open_before; i++) {
+ open_db();
+ }
+ verify(0);
+ redirect_dictionary(DICT_1, 0);
+ verify(1);
+ for (i = 0; i < num_open_after; i++) {
+ open_db();
+ }
+ verify(1);
+ assert(num_close_after <= num_open_before + num_open_after);
+ for (i = 0; i < num_close_after; i++) {
+ close_db();
+ }
+ verify(1);
+ if (commit) {
+ commit_txn();
+ start_txn();
+ verify(1);
+ commit_txn();
+ {
+ //Close any remaining open dbs.
+ int still_open = num_open_dbs;
+ assert(still_open == (num_open_before + num_open_after) - num_close_after);
+ for (i = 0; i < still_open; i++) {
+ close_db();
+ }
+ }
+ }
+ else {
+ {
+ //Close any remaining open dbs.
+ int still_open = num_open_dbs;
+ assert(still_open == (num_open_before + num_open_after) - num_close_after);
+ for (i = 0; i < still_open; i++) {
+ close_db();
+ }
+ }
+ abort_txn();
+ start_txn();
+ verify(0);
+ commit_txn();
+ }
+ end_env();
+}
+
+
+int
+test_main (int argc, char *const argv[])
+{
+ parse_args(argc, argv);
+ redirect_EINVAL();
+ int num_open_before; // number of dbs open before redirect
+ int num_open_after; // number of dbs opened after redirect
+ int num_close_after; // number of dbs closed after redirect
+ int commit;
+ for (num_open_before = 1; num_open_before <= 2; num_open_before++) {
+ for (num_open_after = 0; num_open_after <= 1; num_open_after++) {
+ for (num_close_after = 0; num_close_after <= num_open_before+num_open_after; num_close_after++) {
+ for (commit = 0; commit <= 1; commit++) {
+ redirect_test(num_open_before, num_open_after, num_close_after, commit);
+ }
+ }
+ }
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/replace-into-write-lock.cc b/storage/tokudb/PerconaFT/src/tests/replace-into-write-lock.cc
new file mode 100644
index 00000000..8835cebc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/replace-into-write-lock.cc
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that a db->put with NOOVERWRITE grabs a write lock not a read lock.
+// we use two transactions. the first transaction tries to put with NOOVERWRITE
+// and finds that the key already exists. it now holds a write lock on the key.
+// the second transaction trys to put the same key with NOOVERWRITE and gets
+// LOCK_NOTGRANTED. the second transaction can not put the key until the first
+// transaction commits.
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "replacetest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0); assert_zero(r);
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+
+ DB_TXN *write_txn = NULL;
+ r = env->txn_begin(env, NULL, &write_txn, 0); assert_zero(r);
+
+ int k = htonl(42); int v = 42;
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v, sizeof v);
+ r = db->put(db, write_txn, &key, &val, DB_NOOVERWRITE); assert_zero(r);
+ r = write_txn->commit(write_txn, 0); assert_zero(r);
+
+ DB_TXN *txn1 = NULL;
+ r = env->txn_begin(env, NULL, &txn1, 0); assert_zero(r);
+
+ DB_TXN *txn2 = NULL;
+ r = env->txn_begin(env, NULL, &txn2, 0); assert_zero(r);
+
+ r = db->put(db, txn1, &key, &val, DB_NOOVERWRITE); assert(r == DB_KEYEXIST);
+ r = db->put(db, txn2, &key, &val, DB_NOOVERWRITE); assert(r == DB_LOCK_NOTGRANTED);
+ r = db->put(db, txn1, &key, &val, 0); assert_zero(r);
+ r = db->put(db, txn2, &key, &val, 0); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn1->commit(txn1, 0); assert_zero(r);
+ r = db->put(db, txn2, &key, &val, 0); assert_zero(r);
+ r = txn2->commit(txn2, 0); assert_zero(r);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc b/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
new file mode 100644
index 00000000..f8099c7a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/rollback-inconsistency.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// insert enough rows with a child txn and then force an eviction to verify the rollback
+// log node is in valid state.
+// the test fails without the fix (and of course passes with it).
+// The test basically simulates the test script of George's.
+
+
+static void
+populate_table(int start, int end, DB_TXN * parent, DB_ENV * env, DB * db) {
+ DB_TXN *txn = NULL;
+ int r = env->txn_begin(env, parent, &txn, 0); assert_zero(r);
+ for (int i = start; i < end; i++) {
+ int k = htonl(i);
+ char kk[4];
+ char str[220];
+ memset(kk, 0, sizeof kk);
+ memcpy(kk, &k, sizeof k);
+ memset(str,'a', sizeof str);
+ DBT key = { .data = kk, .size = sizeof kk };
+ DBT val = { .data = str, .size = sizeof str };
+ r = db->put(db, txn, &key, &val, 0);
+ assert_zero(r);
+ }
+ r = txn->commit(txn, 0);
+ assert_zero(r);
+}
+
+static void
+populate_and_test(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *parent = NULL;
+ r = env->txn_begin(env, NULL, &parent, 0); assert_zero(r);
+
+ populate_table(0, 128, parent, env, db);
+
+ //we know the eviction is going to happen here and the log node of parent txn is going to be evicted
+ //due to the extremely low cachesize.
+ populate_table(128, 256, parent, env, db);
+
+ //again eviction due to the memory pressure. 256 rows is the point when that rollback log spills out. The spilled node
+ //will be written back but will not be dirtied by including rollback nodes from child txn(in which case the bug is bypassed).
+ populate_table(256, 512, parent, env, db);
+
+ r = parent->abort(parent); assert_zero(r);
+
+ //try to search anything in the lost range
+ int k = htonl(200);
+ char kk[4];
+ memset(kk, 0, sizeof kk);
+ memcpy(kk, &k, sizeof k);
+ DBT key = { .data = kk, .size = sizeof kk };
+ DBT val;
+ r = db->get(db, NULL, &key, &val, 0);
+ assert(r==DB_NOTFOUND);
+
+}
+
+static void
+run_test(void) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0);
+ assert_zero(r);
+ env->set_errfile(env, stderr);
+
+ //setting up the cachetable size 64k
+ uint32_t cachesize = 64*1024;
+ r = env->set_cachesize(env, 0, cachesize, 1);
+ assert_zero(r);
+
+ //setting up the log write block size to 4k so the rollback log nodes spill in accordance with the node size
+ r = env->set_lg_bsize(env, 4096);
+ assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+
+ r = db->set_pagesize(db, 4096);
+ assert_zero(r);
+
+ r = db->set_readpagesize(db, 1024);
+ assert_zero(r);
+
+ r = db->open(db, NULL, "test.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+
+ populate_and_test(env, db);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test();
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/root_fifo_1.cc b/storage/tokudb/PerconaFT/src/tests/root_fifo_1.cc
new file mode 100644
index 00000000..7e9ace14
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/root_fifo_1.cc
@@ -0,0 +1,185 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test txn commit after db close
+
+#include "test.h"
+#include <sys/stat.h>
+
+DB_ENV *null_env = NULL;
+DB *null_db = NULL;
+DB_TXN *null_txn = NULL;
+DBC *null_cursor = NULL;
+int constant = 0;
+
+static void root_fifo_verify(DB_ENV *env, int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBC *cursor = null_cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key, val;
+ memset(&key, 0, sizeof key); memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert((int)toku_ntohl(k) == i);
+ }
+ if (constant)
+ assert(i==1);
+ else
+ assert(i == n);
+
+ r = cursor->c_close(cursor); assert(r == 0); cursor = null_cursor;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+}
+
+static void root_fifo_1(int n, int create_outside) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ // create the env
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env = null_env;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->open(env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ if (create_outside) {
+ DB_TXN *txn_open = null_txn;
+ r = env->txn_begin(env, null_txn, &txn_open, 0); assert(r == 0); assert(txn_open != NULL);
+ DB *db_open = null_db;
+ r = db_create(&db_open, env, 0); assert(r == 0); assert(db_open != NULL);
+ r = db_open->open(db_open, txn_open, "test.db", 0, DB_BTREE, DB_CREATE|DB_EXCL, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ r = db_open->close(db_open, 0); assert(r == 0); db_open = null_db;
+ r = txn_open->commit(txn_open, 0); assert(r == 0); txn_open = null_txn;
+ }
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ int i;
+ for (i=0; i<n; i++) {
+ if (verbose>1) {
+ printf("%s-%s:%d %d\n", __FILE__, __FUNCTION__, __LINE__, i);
+ }
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBT key, val;
+ int k = toku_htonl(i);
+ int v = i;
+ if (constant) {
+ k = v = 0;
+ }
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ // verify the db
+ root_fifo_verify(env, n);
+
+ // cleanup
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+int test_main(int argc, char *const argv[]) {
+ int i;
+ int n = -1;
+
+ // parse_args(argc, argv);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0) {
+ if (i+1 < argc)
+ n = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ verbose--;
+ if (verbose<0) verbose = 0;
+ continue;
+ }
+ if (strcmp(argv[i], "-c") == 0) {
+ constant = 1;
+ continue;
+ }
+ }
+
+ if (n >= 0) {
+ root_fifo_1(n, 0);
+ root_fifo_1(n, 1);
+ }
+ else
+ for (i=0; i<100; i++) {
+ root_fifo_1(i, 0);
+ root_fifo_1(i, 1);
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/root_fifo_2.cc b/storage/tokudb/PerconaFT/src/tests/root_fifo_2.cc
new file mode 100644
index 00000000..a240e1e7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/root_fifo_2.cc
@@ -0,0 +1,166 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test txn commit after db close
+
+#include "test.h"
+#include <sys/stat.h>
+
+DB_ENV *null_env = NULL;
+DB *null_db = NULL;
+DB_TXN *null_txn = NULL;
+DBC *null_cursor = NULL;
+
+static void root_fifo_verify(DB_ENV *env, int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBC *cursor = null_cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key, val;
+ memset(&key, 0, sizeof key); memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert((int)toku_ntohl(k) == i);
+ }
+ assert(i == 0);
+
+ r = cursor->c_close(cursor); assert(r == 0); cursor = null_cursor;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+}
+
+static void root_fifo_2(int n, int create_outside) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ // create the env
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env = null_env;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->open(env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ if (create_outside) {
+ DB_TXN *txn_open = null_txn;
+ r = env->txn_begin(env, null_txn, &txn_open, 0); assert(r == 0); assert(txn_open != NULL);
+ DB *db_open = null_db;
+ r = db_create(&db_open, env, 0); assert(r == 0); assert(db_open != NULL);
+ r = db_open->open(db_open, txn_open, "test.db", 0, DB_BTREE, DB_CREATE|DB_EXCL, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ r = db_open->close(db_open, 0); assert(r == 0); db_open = null_db;
+ r = txn_open->commit(txn_open, 0); assert(r == 0); txn_open = null_txn;
+ }
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ int i;
+ for (i=0; i<n; i++) {
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+ }
+
+ r = txn->abort(txn); assert(r == 0); txn = null_txn;
+
+ // verify the db
+ root_fifo_verify(env, n);
+
+ // cleanup
+ r = env->close(env, 0);
+ assert(r == 0); env = null_env;
+}
+
+int test_main(int argc, char *const argv[]) {
+ int i;
+ int n = -1;
+
+ // parse_args(argc, argv);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0) {
+ if (i+1 < argc)
+ n = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ if (n >= 0) {
+ root_fifo_2(n, 0);
+ root_fifo_2(n, 1);
+ }
+ else
+ for (i=0; i<100; i++) {
+ root_fifo_2(i, 0);
+ root_fifo_2(i, 1);
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/root_fifo_31.cc b/storage/tokudb/PerconaFT/src/tests/root_fifo_31.cc
new file mode 100644
index 00000000..1f4390af
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/root_fifo_31.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test txn commit after db close
+
+#include "test.h"
+#include <sys/stat.h>
+
+DB_ENV *null_env = NULL;
+DB *null_db = NULL;
+DB_TXN *null_txn = NULL;
+DBC *null_cursor = NULL;
+
+static void create_non_empty(int n) {
+ DB_ENV *env = null_env;
+ int r;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->open(env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ int i;
+ for (i=n; i<2*n; i++) {
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+static void root_fifo_verify(DB_ENV *env, int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+
+ int r;
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBC *cursor = null_cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key, val;
+ memset(&key, 0, sizeof key); memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert((int)toku_ntohl(k) == i);
+ }
+ assert(i == 2*n);
+
+ r = cursor->c_close(cursor); assert(r == 0); cursor = null_cursor;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+}
+
+static void root_fifo_31(int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ // create the env
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ // populate
+ create_non_empty(n);
+
+ DB_ENV *env = null_env;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->open(env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ int i;
+ for (i=0; i<n; i++) {
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ // verify the db
+ root_fifo_verify(env, n);
+
+ // cleanup
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+int test_main(int argc, char *const argv[]) {
+ int i;
+ int n = -1;
+
+ // parse_args(argc, argv);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0) {
+ if (i+1 < argc)
+ n = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ if (n >= 0)
+ root_fifo_31(n);
+ else
+ for (i=0; i<100; i++)
+ root_fifo_31(i);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/root_fifo_32.cc b/storage/tokudb/PerconaFT/src/tests/root_fifo_32.cc
new file mode 100644
index 00000000..fbaba4e3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/root_fifo_32.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test txn commit after db close
+
+#include "test.h"
+#include <sys/stat.h>
+
+DB_ENV *null_env = NULL;
+DB *null_db = NULL;
+DB_TXN *null_txn = NULL;
+DBC *null_cursor = NULL;
+
+static void create_non_empty(int n) {
+ DB_ENV *env = null_env;
+ int r;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->open(env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ int i;
+ for (i=n; i<2*n; i++) {
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+static void root_fifo_verify(DB_ENV *env, int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+
+ int r;
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBC *cursor = null_cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ int i;
+ for (i = n; ; i++) {
+ DBT key, val;
+ memset(&key, 0, sizeof key); memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert((int)toku_ntohl(k) == i);
+ }
+ assert(i == 2*n);
+
+ r = cursor->c_close(cursor); assert(r == 0); cursor = null_cursor;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+}
+
+static void root_fifo_32(int n) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ // create the env
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ // populate
+ create_non_empty(n);
+
+ DB_ENV *env = null_env;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->open(env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ int i;
+ for (i=0; i<n; i++) {
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+ }
+
+ r = txn->abort(txn); assert(r == 0); txn = null_txn;
+
+ // verify the db
+ root_fifo_verify(env, n);
+
+ // cleanup
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+int test_main(int argc, char *const argv[]) {
+ int i;
+ int n = -1;
+
+ // parse_args(argc, argv);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ continue;
+ }
+ if (strcmp(argv[i], "-n") == 0) {
+ if (i+1 < argc)
+ n = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ if (n >= 0)
+ root_fifo_32(n);
+ else
+ for (i=0; i<100; i++)
+ root_fifo_32(i);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/root_fifo_41.cc b/storage/tokudb/PerconaFT/src/tests/root_fifo_41.cc
new file mode 100644
index 00000000..79e3de53
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/root_fifo_41.cc
@@ -0,0 +1,230 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test txn commit after db close
+
+#include "test.h"
+#include <sys/stat.h>
+
+DB_ENV *null_env = NULL;
+DB *null_db = NULL;
+DB_TXN *null_txn = NULL;
+DBC *null_cursor = NULL;
+
+static void create_non_empty(int n, const char *dirname) {
+ DB_ENV *env = null_env;
+ int r;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->set_redzone(env, 0); assert(r == 0);
+ r = env->open(env,
+ dirname,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ int i;
+ for (i=n; i<2*n; i++) {
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+static void root_fifo_verify(DB_ENV *env, int n, int expectn) {
+ if (verbose) printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, n, expectn);
+
+ int r;
+ DB_TXN *txn = null_txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0); assert(txn != NULL);
+
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBC *cursor = null_cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key, val;
+ memset(&key, 0, sizeof key); memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert((int)toku_ntohl(k) == i);
+ }
+ assert(i == expectn);
+
+ r = cursor->c_close(cursor); assert(r == 0); cursor = null_cursor;
+
+ r = txn->commit(txn, 0); assert(r == 0); txn = null_txn;
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+}
+
+static void root_fifo_41(int n, int ntxn, bool do_populate) {
+ if (verbose) printf("%s:%d %d\n", __FUNCTION__, __LINE__, n);
+ int r;
+
+ const char *dirname = TOKU_TEST_FILENAME;
+
+ // create the env
+ toku_os_recursive_delete(dirname);
+ toku_os_mkdir(dirname, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ // populate
+ if (do_populate)
+ create_non_empty(n, dirname);
+
+ DB_ENV *env = null_env;
+ r = db_env_create(&env, 0); assert(r == 0); assert(env != NULL);
+ r = env->set_redzone(env, 0); assert(r == 0);
+ r = env->open(env,
+ dirname,
+ DB_INIT_MPOOL+DB_INIT_LOG+DB_INIT_LOCK+DB_INIT_TXN+DB_PRIVATE+DB_CREATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ {
+ DB_TXN *txn;
+ DB *db = null_db;
+ r = env->txn_begin(env, null_txn, &txn, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ }
+
+ DB_TXN *txn[ntxn];
+ int i;
+ for (i=0; i<ntxn; i++) {
+ r = env->txn_begin(env, null_txn, &txn[i], 0); assert(r == 0); assert(txn[i] != NULL);
+ }
+
+ for (i=0; i<n; i++) {
+ DB *db = null_db;
+ r = db_create(&db, env, 0); assert(r == 0); assert(db != NULL);
+
+ r = db->open(db, txn[i % ntxn], "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DBT key, val;
+ int k = toku_htonl(i);
+ r = db->put(db, txn[i % ntxn], dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0); db = null_db;
+ }
+
+ for (i=0; i<ntxn; i++) {
+ r = txn[i]->commit(txn[i], 0); assert(r == 0);
+ }
+
+ // verify the db
+ root_fifo_verify(env, n, do_populate ? 2*n : n);
+
+ // cleanup
+ r = env->close(env, 0); assert(r == 0); env = null_env;
+}
+
+static int parseint (char const *str) {
+ char *end;
+ errno=0;
+ int v = strtol(str, &end, 10);
+ if (errno!=0 || *end!=0) {
+ fprintf(stderr, "This argument should be an int: %s\n", str);
+ exit(1);
+ }
+ return v;
+}
+
+int test_main(int argc, char *const argv[]) {
+ int i;
+ int n = -1;
+ int ntxn = -1;
+ bool do_populate = false;
+
+ // parse_args(argc, argv);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ } else if (strcmp(argv[i], "-n") == 0) {
+ assert(i+1 < argc);
+ n = parseint(argv[++i]);
+ } else if (strcmp(argv[i], "-ntxn") == 0) {
+ assert(i+1 < argc);
+ ntxn = parseint(argv[++i]);
+ } else if (strcmp(argv[i], "-populate") == 0) {
+ do_populate = true;
+ } else {
+ fprintf(stderr, "What is this argument? %s\n", argv[i]);
+ exit(1);
+ }
+ }
+
+ if (n >= 0)
+ root_fifo_41(n, ntxn == -1 ? 1 : ntxn, do_populate);
+ else {
+ for (i=0; i<100; i++) {
+ for (ntxn=1; ntxn<=4; ntxn++) {
+ root_fifo_41(i, ntxn, false);
+ root_fifo_41(i, ntxn, true);
+ }
+ }
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/rowsize.cc b/storage/tokudb/PerconaFT/src/tests/rowsize.cc
new file mode 100644
index 00000000..1c5a1bb4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/rowsize.cc
@@ -0,0 +1,91 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static DB_ENV *env = NULL;
+static DB *db = NULL;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static void setup_env (void) {
+ const int len = strlen(envdir)+100;
+ char cmd[len];
+ snprintf(cmd, len, "rm -rf %s", envdir);
+ {int r = system(cmd); CKERR(r); }
+ {int r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); }
+ {int r = db_env_create(&env, 0); CKERR(r); }
+ //env->set_errfile(env, stderr);
+ CKERR(env->set_redzone(env, 0));
+ { int r = env->open(env, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); }
+ { int r = db_create(&db, env, 0); CKERR(r); }
+ { int r = db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); }
+}
+
+static void shutdown_env (void) {
+ { int r = db->close(db, 0); CKERR(r); }
+ { int r = env->close(env, 0); CKERR(r); }
+}
+
+static void put (const char *keystring, int size, bool should_work) {
+ DBT k, v;
+ dbt_init(&k, keystring, 1+strlen(keystring));
+ dbt_init(&v, toku_xcalloc(size, 1), size);
+ static DB_TXN *txn = NULL;
+ { int r = env->txn_begin(env, 0, &txn, 0); CKERR(r); }
+ {
+ int r = db->put(db, NULL, &k, &v, 0);
+ if (should_work) {
+ CKERR(r);
+ } else {
+ assert(r!=0);
+ }
+ }
+ { int r = txn->commit(txn, 0); CKERR(r); }
+ toku_free(v.data);
+}
+
+int test_main (int argc, char *const argv[]) {
+ if (0) parse_args(argc, argv);
+ setup_env();
+ if (0) put("foo", 32, true);
+ put("foo", 32*1024*1024, true);
+ put("bar", 32*1024*1024+1, false);
+ shutdown_env();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/run-hotindexer-undo-do-tests.bash b/storage/tokudb/PerconaFT/src/tests/run-hotindexer-undo-do-tests.bash
new file mode 100644
index 00000000..90382cbf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run-hotindexer-undo-do-tests.bash
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+# run a sequence of hotindexer undo tests.
+
+tests=""
+verbose=0
+valgrind=""
+exitcode=0
+
+for arg in $* ; do
+ if [[ $arg =~ --(.*)=(.*) ]] ; then
+ eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]}
+ else
+ tests="$tests $arg"
+ fi
+done
+
+for t in $tests ; do
+ testdir=`dirname $t`
+ testfile=`basename $t`
+ testname=""
+ resultfile=""
+ if [[ $testfile =~ (.*)\.test$ ]] ; then
+ testname=${BASH_REMATCH[1]}
+ resultfile=$testname.result
+ else
+ exit 1
+ fi
+ if [ $verbose != 0 ] ; then echo $testdir $testname $testfile $resultfile; fi
+
+ $valgrind ./hotindexer-undo-do-test.tdb $testdir/$testfile >$testdir/$testname.run
+
+ if [ -f $testdir/$resultfile ] ; then
+ diff -q $testdir/$testname.run $testdir/$resultfile >/dev/null 2>&1
+ exitcode=$?
+ else
+ exitcode=1
+ fi
+ if [ $verbose != 0 ] ; then
+ echo $testname $exitcode
+ else
+ rm $testdir/$testname.run
+ fi
+ if [ $exitcode != 0 ] ; then break; fi
+done
+
+exit $exitcode
diff --git a/storage/tokudb/PerconaFT/src/tests/run_abortrecover_test.sh b/storage/tokudb/PerconaFT/src/tests/run_abortrecover_test.sh
new file mode 100644
index 00000000..78825603
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_abortrecover_test.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+
+if [[ $# -ne 1 ]]; then exit 1; fi
+
+bin=$1; shift
+
+if $bin --test
+then
+ echo $bin --test did not crash
+ exit 1
+else
+ set -e
+ $bin --recover
+fi
diff --git a/storage/tokudb/PerconaFT/src/tests/run_checkpoint_stress_test.sh b/storage/tokudb/PerconaFT/src/tests/run_checkpoint_stress_test.sh
new file mode 100644
index 00000000..b7cfe0f0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_checkpoint_stress_test.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+set -e
+
+test $# -ge 4
+
+bin=$1; shift
+size=$1; shift
+runs=$1; shift
+abortcode=$1; shift
+
+$bin -C -n $size
+$bin -C -i 0 -n $size
+for (( i = 1; i < $runs; i++ ))
+do
+ echo -n "$i: " && date
+ set +e
+ $bin -c -i $i -n $size -X novalgrind 2>$TOKU_TEST_FILENAME/error.$i
+ test $? -eq $abortcode || exit 1
+ set -e
+ grep -q 'HAPPY CRASH' $TOKU_TEST_FILENAME/error.$i
+done
diff --git a/storage/tokudb/PerconaFT/src/tests/run_diskfull_test.sh b/storage/tokudb/PerconaFT/src/tests/run_diskfull_test.sh
new file mode 100644
index 00000000..26172fec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_diskfull_test.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+set -e
+
+test $# -ge 2
+
+bin=$1; shift
+abortcode=$1; shift
+
+num_writes=$($bin -q)
+set +e
+for (( i = 0; i < $num_writes; i++ ))
+do
+ $bin -C $i
+ test $? -eq $abortcode || exit 1
+done
diff --git a/storage/tokudb/PerconaFT/src/tests/run_powerfail_test.py b/storage/tokudb/PerconaFT/src/tests/run_powerfail_test.py
new file mode 100644
index 00000000..41a07acb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_powerfail_test.py
@@ -0,0 +1,140 @@
+#!/usr/local/bin/python2.6
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+import sys
+import os
+import optparse
+import getpass
+import pexpect
+import time
+from multiprocessing import Process
+
+# options
+parser = optparse.OptionParser()
+parser.add_option('--test', dest='test', type='string', default=None, help="name of stress test to run")
+parser.add_option('--iterations', dest='iterations', type='int', default=1, help="Number of test iterations (default = 1)")
+parser.add_option('--verbose', dest='verbose', action="store_true", default=False, help="Verbose printing (default = FALSE)")
+parser.add_option('--client', dest='client', type='string', default='192.168.1.107', help='client machine being power failed (default=tick)')
+parser.add_option('--sandbox_dir', dest='sbdir', type='string', default='/tpch/mysb/msb_3.0.2-beta_16948/', help='sandbox directory (default = None)')
+options,remainder = parser.parse_args()
+
+nameaddr='mysql@'+options.client
+password='mytokudb'
+
+ipm_nameaddr='admn@192.168.1.254'
+ipm_passwd='admn'
+
+def IPM_cmd(cmds):
+ # password handling
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ p=pexpect.spawn('ssh %s' % ipm_nameaddr, timeout=60)
+ i=p.expect([ssh_newkey,'Password:',pexpect.EOF])
+ if i==0:
+ p.sendline('yes')
+ i=p.expect([ssh_newkey,'Password:',pexpect.EOF])
+ if i==1:
+ p.sendline(ipm_passwd)
+ elif i==2:
+ print "I either got key or connection timeout"
+ pass
+
+ # run command(s)
+ i = p.expect('Sentry:')
+ for cmd in cmds:
+ if i==0:
+ p.sendline(cmd)
+ else:
+ print 'p.expect saw', p.before
+ i = p.expect('Sentry:')
+ print p.before
+
+ # close session
+ p.sendline('quit')
+ p.expect(pexpect.EOF)
+ return 0
+
+def IPM_power_on():
+ IPM_cmd(['on all'])
+
+def IPM_power_off():
+ IPM_cmd(['off all'])
+
+def ssh_cmd(cmd, verbose=True, timeout=30):
+
+ ssh_newkey = 'Are you sure you want to continue connecting'
+ p=pexpect.spawn('ssh %s %s' % (nameaddr, cmd), timeout=timeout)
+
+ i=p.expect([ssh_newkey,'password:',pexpect.EOF])
+ if i==0:
+ p.sendline('yes')
+ i=p.expect([ssh_newkey,'password:',pexpect.EOF])
+ if i==1:
+ if verbose:
+ print 'ssh %s %s' % (nameaddr, cmd)
+ p.sendline(password)
+ p.expect(pexpect.EOF)
+ elif i==2:
+ print "I either got key or connection timeout"
+ pass
+ if verbose:
+ print p.before
+ return p.before
+
+def client_cmd(cmd, verbose=True, timeout=3600):
+ ssh_cmd(cmd, verbose, timeout)
+
+def ping_server(name):
+ p=pexpect.spawn("ping -c 1 "+name)
+ i=p.expect(['1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms',
+ '1 packets transmitted, 1 received, 0% packet loss, time 0ms',
+ pexpect.EOF])
+ return i
+
+
+def test_it():
+ cmd = "/home/wells/svn/iibench/py/iibench.py --db_config_file=%smy.sandbox.cnf --max_rows=1000000000 --engine=tokudb --outfile=/tmp/pf_%d" % (options.sbdir, options.iterations)
+ print "CMD = ", cmd
+ client_cmd(cmd, timeout=3600)
+
+def run_test():
+# cmd = options.test
+# if ( options.verbose ): cmd += ' -v'
+# for i in range(options.iterations):
+
+ t0 = Process(target=test_it, args=())
+ for iter in range(options.iterations + 1):
+ print "Turn On Power to Server"
+ IPM_power_on()
+ i = ping_server(options.client)
+ while ( i != 1 ):
+ i = ping_server(options.client)
+ print "Server rebooted, wait 30 seconds to restart MySQL"
+ time.sleep(30)
+ print "Start MySQL"
+ client_cmd(options.sbdir+'stop') # clears out flags from previous start
+ client_cmd(options.sbdir+'start')
+ if iter < options.iterations:
+ print "Run Test"
+ t0.start()
+ print "Sleep(%d)" % (300 + iter)
+ time.sleep(300 + iter)
+ print "Turn Off Power to Server"
+ IPM_power_off()
+ t0.terminate()
+ else:
+ # last loop through, just cleanup
+ client_cmd(options.sbdir+'stop')
+
+def main(argv):
+ run_test()
+ return 0
+
+if __name__ == '__main__':
+ usage = sys.modules["__main__"].__doc__
+ parser.set_usage(usage)
+ unused_flags, new_argv = parser.parse_args(args=sys.argv[1:], values=options)
+ sys.exit(main([sys.argv[0]] + new_argv))
+
diff --git a/storage/tokudb/PerconaFT/src/tests/run_recover_stress_test.sh b/storage/tokudb/PerconaFT/src/tests/run_recover_stress_test.sh
new file mode 100644
index 00000000..362ea012
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_recover_stress_test.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+set -e
+
+test $# -ge 4
+
+bin=$1; shift
+size=$1; shift
+runs=$1; shift
+abortcode=$1; shift
+
+mkdir -p $TOKU_TEST_FILENAME
+$bin -C -n $size -l
+$bin -C -i 0 -n $size -l
+for (( i = 1; i < $runs; i++ ))
+do
+ echo -n "$i: " && date
+ set +e
+ $bin -c -i $i -n $size -l -X novalgrind 2>$TOKU_TEST_FILENAME/error.$i
+ test $? -eq $abortcode || exit 1
+ set -e
+ grep -q 'HAPPY CRASH' $TOKU_TEST_FILENAME/error.$i
+done
diff --git a/storage/tokudb/PerconaFT/src/tests/run_recover_test.sh b/storage/tokudb/PerconaFT/src/tests/run_recover_test.sh
new file mode 100644
index 00000000..ac974f4b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_recover_test.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+set -e
+
+test $# -ge 4
+
+bin=$1; shift
+envdir=$1; shift
+tdbrecover=$1; shift
+tdbdump=$1; shift
+
+echo doing $bin
+$bin --no-shutdown
+rm -rf $envdir/recoverdir
+mkdir $envdir/recoverdir
+cp $envdir/tokudb.directory $envdir/recoverdir/
+cp $envdir/tokudb.environment $envdir/recoverdir/
+cp $envdir/tokudb.rollback $envdir/recoverdir/
+cp $envdir/*.tokulog* $envdir/recoverdir/
+echo doing recovery
+$tdbrecover $envdir/recoverdir $envdir/recoverdir
+echo dump and compare
+$tdbdump -h $envdir foo.db >$envdir/foo.dump
+$tdbdump -h $envdir/recoverdir foo.db >$envdir/recoverdir/foo.dump
+diff -q $envdir/foo.dump $envdir/recoverdir/foo.dump
diff --git a/storage/tokudb/PerconaFT/src/tests/run_recovery_fileops_unit.sh b/storage/tokudb/PerconaFT/src/tests/run_recovery_fileops_unit.sh
new file mode 100644
index 00000000..0ac623a2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_recovery_fileops_unit.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+set -e
+test $# -ge 3
+
+bin=$1; shift
+errorfile=$1; shift
+abortcode=$1; shift
+
+set +e
+$bin -X novalgrind -c $@ 2> $errorfile
+test $? -eq $abortcode || { cat $errorfile; echo Error: no crash in $errorfile; exit 1; }
+set -e
+grep -q 'HAPPY CRASH' $errorfile || { cat $errorfile; echo Error: incorrect crash in $errorfile; exit 1; }
+rm -f $errorfile
+exec $bin -r $@
diff --git a/storage/tokudb/PerconaFT/src/tests/run_stress_test.py b/storage/tokudb/PerconaFT/src/tests/run_stress_test.py
new file mode 100644
index 00000000..554c683a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_stress_test.py
@@ -0,0 +1,34 @@
+#!/usr/local/bin/python2.6
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+import sys
+import os
+import optparse
+
+# options
+parser = optparse.OptionParser()
+parser.add_option('--test', dest='test', type='string', default=None, help="name of stress test to run")
+parser.add_option('--iterations', dest='iterations', type='int', default=1, help="Number of test iterations (default = 1)")
+parser.add_option('--verbose', dest='verbose', action="store_true", default=False, help="Verbose printing (default = FALSE)")
+options, remainder = parser.parse_args()
+
+def run_test():
+ cmd = options.test
+ if ( options.verbose ): cmd += ' -v'
+ for i in range(options.iterations):
+ os.system(cmd + ' -i %d' % (i))
+
+
+def main(argv):
+ run_test()
+ return 0
+
+if __name__ == '__main__':
+ usage = sys.modules["__main__"].__doc__
+ parser.set_usage(usage)
+ unused_flags, new_argv = parser.parse_args(args=sys.argv[1:], values=options)
+ sys.exit(main([sys.argv[0]] + new_argv))
+
diff --git a/storage/tokudb/PerconaFT/src/tests/run_test_thread_stack.sh b/storage/tokudb/PerconaFT/src/tests/run_test_thread_stack.sh
new file mode 100644
index 00000000..034dbaca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/run_test_thread_stack.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+#
+# This file is part of PerconaFT.
+# Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+#
+
+if [[ $# -ne 1 ]]; then exit 1; fi
+
+bin=$1; shift
+
+set -e
+
+$bin -a -thread_stack 16384
+$bin -a -thread_stack 16384 -resume
diff --git a/storage/tokudb/PerconaFT/src/tests/seqinsert.cc b/storage/tokudb/PerconaFT/src/tests/seqinsert.cc
new file mode 100644
index 00000000..ccb604d6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/seqinsert.cc
@@ -0,0 +1,112 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <toku_portability.h>
+#include <toku_os.h>
+#include <memory.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+DB_TXN * const null_txn = nullptr;
+
+const size_t nodesize = 128 << 10;
+const size_t keysize = 8;
+const size_t valsize = 92;
+const size_t rowsize = keysize + valsize;
+const int max_degree = 16;
+const size_t numleaves = max_degree * 3; // want height 2, this should be good enough
+const size_t numrows = (numleaves * nodesize + rowsize) / rowsize;
+
+static void test_seqinsert(bool asc) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_pagesize(db, nodesize);
+ CKERR(r);
+ r = db->open(db, null_txn, "seqinsert", NULL, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+
+ char v[valsize];
+ ZERO_ARRAY(v);
+ uint64_t k;
+ DBT key, val;
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, v, valsize);
+ for (size_t i = 0; i < numrows; ++i) {
+ k = toku_htod64(numrows + (asc ? i : -i));
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ default_parse_args(argc, argv);
+
+ test_seqinsert(true);
+ test_seqinsert(false);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/shutdown-3344.cc b/storage/tokudb/PerconaFT/src/tests/shutdown-3344.cc
new file mode 100644
index 00000000..86eb4cca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/shutdown-3344.cc
@@ -0,0 +1,232 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// measure the cost of closing db's with a full cache table
+
+// create db 0 with txn 0
+// create db's 1..N-1 with auto txn1
+// fill the cache table with blocks for db 0
+// close db 1..N-1 (these should be fast)
+// close db 0
+// abort txn 0
+
+#include "test.h"
+#include <toku_byteswap.h>
+
+static long htonl64(long x) {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ return bswap_64(x);
+#else
+#error
+#endif
+}
+
+static inline float tdiff (struct timeval *a, struct timeval *b) {
+ return (a->tv_sec - b->tv_sec) +1e-6*(a->tv_usec - b->tv_usec);
+}
+
+static void
+insert_row(DB_ENV *env UU(), DB_TXN *txn, DB *db, uint64_t rowi) {
+ int r;
+
+ // generate the key
+ char key_buffer[8];
+ uint64_t k = htonl64(rowi);
+ memcpy(key_buffer, &k, sizeof k);
+
+ // generate the val
+ char val_buffer[1024];
+ memset(val_buffer, 0, sizeof val_buffer);
+
+ DBT key = { .data = key_buffer, .size = sizeof key_buffer };
+ DBT value = { .data = val_buffer, .size = sizeof val_buffer };
+ //uint32_t put_flags = 0 | (txn ? (DB_PRELOCKED_FILE_READ | DB_PRELOCKED_WRITE) : 0);
+ uint32_t put_flags = 0;
+ r = db->put(db, txn, &key, &value, put_flags); assert_zero(r);
+}
+
+static void
+populate(DB_ENV *env, DB_TXN *txn, DB *db, uint64_t nrows) {
+ int r;
+ struct timeval tstart;
+ r = gettimeofday(&tstart, NULL); assert_zero(r);
+ struct timeval tlast = tstart;
+
+ for (uint64_t rowi = 0; rowi < nrows; rowi++) {
+ insert_row(env, txn, db, rowi);
+
+ // maybe report performance
+ uint64_t rows_per_report = 100000;
+ if (((rowi + 1) % rows_per_report) == 0) {
+ struct timeval tnow;
+ r = gettimeofday(&tnow, NULL); assert_zero(r);
+ float last_time = tdiff(&tnow, &tlast);
+ float total_time = tdiff(&tnow, &tstart);
+ if (verbose) {
+ fprintf(stderr, "%" PRIu64 " %.3f %.0f/s %.0f/s\n", rowi + 1, last_time, rows_per_report/last_time, rowi/total_time); fflush(stderr);
+ }
+ tlast = tnow;
+ }
+ }
+}
+
+static void
+run_test(DB_ENV *env, int ndbs, int do_txn, uint32_t pagesize, uint64_t nrows) {
+ int r;
+
+ DB *dbs[ndbs];
+ for (int i = 0; i < ndbs; i++) {
+ DB *db = NULL;
+ if (verbose) {
+ time_t now = time(0); fprintf(stderr, "%.24s creating %d\n", ctime(&now), i);
+ }
+ r = db_create(&db, env, 0); assert_zero(r);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert_zero(r);
+ }
+ DB_TXN *txn1 = NULL;
+ if (do_txn) {
+ r = env->txn_begin(env, NULL, &txn1, 0); assert_zero(r);
+ }
+ char db_filename[32]; sprintf(db_filename, "test%d", i);
+ r = db->open(db, txn1, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+ if (do_txn) {
+ r = txn1->commit(txn1, 0); assert_zero(r);
+ }
+ dbs[i] = db;
+ }
+
+ if (verbose) {
+ time_t now = time(0); fprintf(stderr, "%.24s populating\n", ctime(&now));
+ }
+
+ DB_TXN *txn0 = NULL;
+ if (do_txn) {
+ r = env->txn_begin(env, NULL, &txn0, 0); assert_zero(r);
+ }
+
+ populate(env, txn0, dbs[ndbs-1], nrows);
+
+ if (do_txn) {
+ if (verbose) {
+ time_t now = time(0); fprintf(stderr, "%.24s commit txn0\n", ctime(&now));
+ }
+ r = txn0->commit(txn0, 0); assert_zero(r);
+ }
+
+ for (int i = 0; i < ndbs; i++) {
+ DB *db = dbs[i];
+ if (verbose) {
+ time_t now = time(0); fprintf(stderr, "%.24s closing %d\n", ctime(&now), i);
+ }
+ r = db->close(db, 0); assert_zero(r);
+ }
+
+ if (verbose) {
+ time_t now = time(0); fprintf(stderr, "%.24s done\n", ctime(&now));
+ }
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ const char *env_dir = "dir.shutdown.ca";
+ int ndbs = 500;
+ int do_txn = 1;
+ uint32_t pagesize = 1024;
+ uint64_t cachesize = 1000000000;
+ uint64_t nrows = 50000;
+
+ for (int i = 1; i < argc ; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ if (strcmp(arg, "--txn") == 0 && i+1 < argc) {
+ do_txn = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--pagesize") == 0 && i+1 < argc) {
+ pagesize = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--cachesize") == 0 && i+1 < argc) {
+ cachesize = atol(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--rows") == 0 && i+1 < argc) {
+ nrows = atol(argv[++i]);
+ continue;
+ }
+
+ assert(0);
+ }
+
+ // create clean env dir
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ int r;
+ r = system(rm_cmd); assert_zero(r);
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = env->set_cachesize(env, cachesize / gig, cachesize % gig, 1); assert_zero(r);
+ }
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ if (!do_txn)
+ env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG);
+ r = env->open(env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ run_test(env, ndbs, do_txn, pagesize, nrows);
+
+ if (verbose) fprintf(stderr, "closing env\n");
+ r = env->close(env, 0); assert_zero(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/simple.cc b/storage/tokudb/PerconaFT/src/tests/simple.cc
new file mode 100644
index 00000000..07c493b7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/simple.cc
@@ -0,0 +1,89 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this test is to verify that a failed assert will
+ * cause a panic, which should be visible via engine status.
+ * This is a manual test, should not be checked in to repository.
+ * The panic must be manually induced in the debugger.
+ */
+
+
+#include "test.h"
+#include <db.h>
+
+static DB_ENV *env;
+
+#define FLAGS_NOLOG DB_INIT_LOCK|DB_INIT_MPOOL|DB_CREATE|DB_PRIVATE
+#define FLAGS_LOG FLAGS_NOLOG|DB_INIT_TXN|DB_INIT_LOG
+
+static int mode = S_IRWXU+S_IRWXG+S_IRWXO;
+
+static void test_shutdown(void);
+
+static void
+test_shutdown(void) {
+ int r;
+ r=env->close(env, 0); CKERR(r);
+ env = NULL;
+}
+
+static void
+setup (uint32_t flags) {
+ int r;
+ if (env)
+ test_shutdown();
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r=db_env_create(&env, 0);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, flags, mode);
+ CKERR(r);
+}
+
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup(FLAGS_LOG);
+ env->txn_checkpoint(env, 0, 0, 0);
+ print_engine_status(env);
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stat64-create-modify-times.cc b/storage/tokudb/PerconaFT/src/tests/stat64-create-modify-times.cc
new file mode 100644
index 00000000..6826499e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stat64-create-modify-times.cc
@@ -0,0 +1,129 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the tree create time is retuned in db->stat64
+
+#include "test.h"
+
+#include <db.h>
+#include <sys/stat.h>
+
+static void
+test_stat64_create_time (uint64_t n) {
+ if (verbose) printf("%s:%u\n", __FUNCTION__, __LINE__);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+
+ r = env->set_cachesize(env, 0, 20*1000000, 1);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); CKERR(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ // initial stats
+ DB_BTREE_STAT64 s0;
+ r = db->stat64(db, txn, &s0); assert(r == 0);
+ printf("initial stat create %" PRIu64 "\n", s0.bt_create_time_sec);
+ assert(s0.bt_create_time_sec != 0);
+ assert(s0.bt_modify_time_sec == s0.bt_create_time_sec);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ // stats after create is committed
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DB_BTREE_STAT64 s1;
+ r = db->stat64(db, txn, &s1); assert(r == 0);
+ assert(s1.bt_create_time_sec == s0.bt_create_time_sec);
+ assert(s1.bt_modify_time_sec == s0.bt_modify_time_sec);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ // stats after checkpoint
+ sleep(10);
+ r = env->txn_checkpoint(env, 0, 0, 0); assert(r == 0);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DB_BTREE_STAT64 s2;
+ r = db->stat64(db, txn, &s2); assert(r == 0);
+ assert(s2.bt_create_time_sec == s1.bt_create_time_sec);
+ assert(s2.bt_modify_time_sec > s1.bt_modify_time_sec);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ // stats after insertion
+ DB_BTREE_STAT64 s3;
+ assert(n > 0);
+ for (uint64_t i = 0; i < n; i++) {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ r = db->stat64(db, txn, &s3); assert(r == 0);
+ assert(s3.bt_create_time_sec == s2.bt_create_time_sec);
+ assert(s3.bt_modify_time_sec == s2.bt_modify_time_sec);
+ r = txn->commit(txn, 0); assert(r == 0);
+ }
+
+ // stats after checkpoint
+ sleep(10);
+ r = env->txn_checkpoint(env, 0, 0, 0); assert(r == 0);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DB_BTREE_STAT64 s4;
+ r = db->stat64(db, txn, &s4); assert(r == 0);
+ assert(s4.bt_create_time_sec == s3.bt_create_time_sec);
+ assert(s4.bt_modify_time_sec > s3.bt_modify_time_sec);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_stat64_create_time(1);
+ test_stat64_create_time(1000);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stat64-null-txn.cc b/storage/tokudb/PerconaFT/src/tests/stat64-null-txn.cc
new file mode 100644
index 00000000..492df9ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stat64-null-txn.cc
@@ -0,0 +1,173 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the stat64 function on flat databases
+
+#include "test.h"
+
+#include <db.h>
+#include <sys/stat.h>
+
+static void
+test_stat64 (unsigned int N) {
+ if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+
+ r = env->set_cachesize(env, 0, 20*1000000, 1);
+ /* Open the environment without transactions. */
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_MPOOL|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+
+ {
+ /* Don't begin a transaction, just set it to null.
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ */
+ txn = NULL;
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ /* r=txn->commit(txn, 0); assert(r==0); */
+ }
+
+ /* No transactions.
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ */
+
+ // insert sequential keys into the databases
+
+ unsigned int i;
+ uint64_t dsize=0;
+ for (i=0; i<N; i++) {
+ if (verbose>1 && i % (1<<14) == 0) {
+ printf("%s(total=%u) inserted %u so far\n", __FILE__, N, i);
+ fflush(stdout);
+ }
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%8d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ dsize += strlen(hello)+1 + strlen(there)+1;
+ CKERR(r);
+ }
+ /* r=txn->commit(txn, 0); CKERR(r); */
+
+ // get and verify stats, should be treated as estimates
+ /* r=env->txn_begin(env, 0, &txn, 0); CKERR(r); */
+ {
+ DB_BTREE_STAT64 s;
+ r=db->stat64(db, txn, &s); CKERR(r);
+ if (verbose) {
+ char cmd[sizeof("ls -l ") + TOKU_PATH_MAX];
+ snprintf(cmd, sizeof(cmd), "ls -l %s", TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+ printf("nkeys=%" PRIu64 "\nndata=%" PRIu64 "\ndsize=%" PRIu64 "\n",
+ s.bt_nkeys, s.bt_ndata, s.bt_dsize);
+ printf("fsize=%" PRIu64 "\n", s.bt_fsize);
+ printf("expected dsize=%" PRIu64 "\n", dsize);
+ }
+ assert(0 < s.bt_nkeys && s.bt_nkeys <= N);
+ assert(s.bt_ndata == s.bt_nkeys);
+ assert(0 < s.bt_dsize && s.bt_dsize <= dsize);
+ // cannot reliably test bt_fsize, because it
+ // just measures size of file on disk
+ // assert(s.bt_fsize > N);
+ }
+ /* r=txn->commit(txn, 0); CKERR(r); */
+
+ // get the last row, this forces the root estimates to be updated
+ {
+ /* r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); */
+ DBC *c = NULL;
+ r = db->cursor(db, txn, &c, 0); CKERR(r);
+ DBT key; dbt_init(&key, NULL, 0);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = c->c_get(c, &key, &val, DB_LAST);
+ CKERR(r);
+ r = c->c_close(c); CKERR(r);
+ /* r = txn->commit(txn, 0); CKERR(r); */
+ }
+
+ // get and verify stats
+ /* r=env->txn_begin(env, 0, &txn, 0); CKERR(r); */
+ {
+ DB_BTREE_STAT64 s;
+ r=db->stat64(db, txn, &s); CKERR(r);
+ if (verbose) {
+ char cmd[sizeof("ls -l ") + TOKU_PATH_MAX];
+ snprintf(cmd, sizeof(cmd), "ls -l %s", TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+ printf("nkeys=%" PRIu64 "\nndata=%" PRIu64 "\ndsize=%" PRIu64 "\n",
+ s.bt_nkeys, s.bt_ndata, s.bt_dsize);
+ printf("fsize=%" PRIu64 "\n", s.bt_fsize);
+ printf("expected dsize=%" PRIu64 "\n", dsize);
+ }
+ assert(0 < s.bt_nkeys && s.bt_nkeys <= N);
+ assert(s.bt_ndata == s.bt_nkeys);
+ assert(0 < s.bt_dsize && s.bt_dsize <= dsize);
+ // cannot reliably test bt_fsize, because it
+ // just measures size of file on disk
+ //assert(s.bt_fsize > N);
+ }
+ /* r=txn->commit(txn, 0); CKERR(r); */
+
+ r=db->close(db, 0); CKERR(r);
+
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[])
+{
+ parse_args(argc, argv);
+ test_stat64(40000);
+ test_stat64(400000);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc b/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc
new file mode 100644
index 00000000..48843a0b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stat64-root-changes.cc
@@ -0,0 +1,249 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify stats after a new row inserted into the root
+// verify stats after a row overwrite in the root
+// verify stats after a row deletion in the root
+// verify stats after an update callback inserts row
+// verify stats after an update callback overwrites row
+// verify ststs after an update callback deletes row
+
+#include <db.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+static int
+my_update_callback(DB *db UU(), const DBT *key UU(), const DBT *old_val, const DBT *extra, void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra) {
+ if (old_val != NULL && old_val->size == 42) // special code for delete
+ set_val(NULL, set_extra);
+ else
+ set_val(extra, set_extra);
+ return 0;
+}
+
+static void
+run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_redzone(env, 0); CKERR(r);
+ env->set_update(env, my_update_callback);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); CKERR(r);
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // verify that stats include a new row inserted into the root
+ {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ int key = 1; char val = 1;
+ DBT k,v;
+ r = db->put(db, txn, dbt_init(&k, &key, sizeof key), dbt_init(&v, &val, sizeof val), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ DB_BTREE_STAT64 s;
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+ }
+
+ // verify that stats are updated by row overwrite in the root
+ {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ int key = 1; int val = 2;
+ DBT k,v;
+ r = db->put(db, txn, dbt_init(&k, &key, sizeof key), dbt_init(&v, &val, sizeof val), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ DB_BTREE_STAT64 s;
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+ }
+
+ // verify that stats are updated by row deletion in the root
+ {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ int key = 1;
+ DBT k;
+ r = db->del(db, txn, dbt_init(&k, &key, sizeof key), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ DB_BTREE_STAT64 s;
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys <= 1 && s.bt_dsize == 0); // since garbage collection may not occur, the key count may not be updated
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ // garbage collection has happened in db->close, so
+ // the number of keys should be 0
+ assert(s.bt_nkeys == 0 && s.bt_dsize == 0);
+ }
+
+ // verify update of non-existing key inserts a row
+ //
+ //
+ // NOTE: #5744 was caught by this test below.
+ //
+ //
+ {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ int key = 1; char val = 1;
+ DBT k = { .data = &key, .size = sizeof key };
+ DBT e = { .data = &val, .size = sizeof val };
+ r = db->update(db, txn, &k, &e, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ DB_BTREE_STAT64 s;
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1);
+ }
+
+ // verify update callback overwrites the row
+ {
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ int key = 1; int val = 2;
+ DBT k = { .data = &key, .size = sizeof key };
+ DBT e = { .data = &val, .size = sizeof val };
+ r = db->update(db, txn, &k, &e, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ DB_BTREE_STAT64 s;
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys == 1 && s.bt_dsize == sizeof key + sizeof val);
+ }
+
+ // verify update callback deletes the row
+ {
+ // insert a new row
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ int key = 1; char val[42]; memset(val, 0, sizeof val);
+ DBT k = { .data = &key, .size = sizeof key };
+ DBT e = { .data = &val, .size = sizeof val };
+ r = db->update(db, txn, &k, &e, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ DB_BTREE_STAT64 s;
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys <= 2 && s.bt_dsize == sizeof key + sizeof val);
+
+ // update it again, this should delete the row
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->update(db, txn, &k, &e, 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys <= 2 && s.bt_dsize == 0); // since garbage collection may not occur, the key count may not be updated
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->stat64(db, NULL, &s); CKERR(r);
+ assert(s.bt_nkeys <= 2 && s.bt_dsize == 0);
+ }
+
+ r = db->close(db, 0); CKERR(r);
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc , char * const argv[]) {
+ parse_args(argc, argv);
+ run_test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stat64.cc b/storage/tokudb/PerconaFT/src/tests/stat64.cc
new file mode 100644
index 00000000..40773e52
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stat64.cc
@@ -0,0 +1,168 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test the stat64 function on flat databases
+
+#include "test.h"
+
+#include <db.h>
+#include <sys/stat.h>
+
+static void
+test_stat64 (unsigned int N) {
+ if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+
+ r = env->set_cachesize(env, 0, 20*1000000, 1);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+
+ {
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+ }
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ // insert sequential keys into the databases
+ unsigned int i;
+ uint64_t dsize=0;
+ for (i=0; i<N; i++) {
+ if (verbose>1 && i % (1<<14) == 0) {
+ printf("%s(total=%u) inserted %u so far\n", __FILE__, N, i);
+ fflush(stdout);
+ }
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%8d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ dsize += strlen(hello)+1 + strlen(there)+1;
+ CKERR(r);
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+
+ // get and verify stats, should be treated as estimates
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ {
+ DB_BTREE_STAT64 s;
+ r=db->stat64(db, txn, &s); CKERR(r);
+ if (verbose) {
+ char cmd[sizeof("ls -l ") + TOKU_PATH_MAX];
+ snprintf(cmd, sizeof(cmd), "ls -l %s", TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+ printf("N=%d\n", N);
+ printf("nkeys=%" PRIu64 "\nndata=%" PRIu64 "\ndsize=%" PRIu64 "\n",
+ s.bt_nkeys, s.bt_ndata, s.bt_dsize);
+ printf("fsize=%" PRIu64 "\n", s.bt_fsize);
+ printf("expected dsize=%" PRIu64 "\n", dsize);
+ }
+ assert(0 < s.bt_nkeys && s.bt_nkeys <= N);
+ assert(s.bt_ndata == s.bt_nkeys);
+ assert(0 < s.bt_dsize && s.bt_dsize <= dsize);
+ // cannot reliably test bt_fsize, because it
+ // measures the size of the file on disk.
+ //assert(s.bt_fsize > N);
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+
+ // get the last row, this forces the root estimates to be updated.
+ {
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DBC *c = NULL;
+ r = db->cursor(db, txn, &c, 0); CKERR(r);
+ DBT key; dbt_init(&key, NULL, 0);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = c->c_get(c, &key, &val, DB_LAST);
+ CKERR(r);
+ r = c->c_close(c); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+
+ // get and verify stats
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ {
+ DB_BTREE_STAT64 s;
+ r=db->stat64(db, txn, &s); CKERR(r);
+ if (verbose) {
+ char cmd[sizeof("ls -l ") + TOKU_PATH_MAX];
+ snprintf(cmd, sizeof(cmd), "ls -l %s", TOKU_TEST_FILENAME);
+ r = system(cmd);
+ CKERR(r);
+ printf("N=%d\n", N);
+ printf("nkeys=%" PRIu64 "\nndata=%" PRIu64 "\ndsize=%" PRIu64 "\n",
+ s.bt_nkeys, s.bt_ndata, s.bt_dsize);
+ printf("fsize=%" PRIu64 "\n", s.bt_fsize);
+ printf("expected dsize=%" PRIu64 "\n", dsize);
+ }
+ assert(0 < s.bt_nkeys && s.bt_nkeys <= N);
+ assert(s.bt_ndata == s.bt_nkeys);
+ assert(0 < s.bt_dsize && s.bt_dsize <= dsize);
+ // cannot reliably test bt_fsize, because it
+ // measures the size of the file on disk.
+ //assert(s.bt_fsize > N);
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=db->close(db, 0); CKERR(r);
+
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[])
+{
+ parse_args(argc, argv);
+ test_stat64(40000);
+ test_stat64(400000);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stress-gc.cc b/storage/tokudb/PerconaFT/src/tests/stress-gc.cc
new file mode 100644
index 00000000..850d5b5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stress-gc.cc
@@ -0,0 +1,115 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Test that isolation works right for subtransactions.
+// In particular, check to see what happens if a subtransaction has different isolation level from its parent.
+
+#include "test.h"
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int useseed;
+
+ {
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ useseed = tv.tv_sec+tv.tv_usec*997; // magic: 997 is a prime, and a million (microseconds/second) times 997 is still 32 bits.
+ }
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ db_env_set_mvcc_garbage_collection_verification(1);
+ int max_txns = 400;
+ int num_runs = 2000;
+ DB_TXN* txns[max_txns];
+ memset(txns, 0, sizeof(txns));
+ int num_txns = 0;
+ int i;
+
+ if (verbose) printf("seed=%d\n", useseed);
+ srandom(useseed);
+
+ for (i = 0; i < num_runs; i++) {
+ int rand_num = random()%max_txns;
+ /*
+ if (i%50 == 0) {
+ printf("rand_num %d\n", rand_num);
+ printf("num_txns %d\n", num_txns);
+ printf("iteration %d\n", i);
+ }
+ */
+ if (rand_num >= num_txns) {
+ // add a txn
+ assert(txns[num_txns] == NULL);
+ // 7 out of 8 times, it is snapshot, otherwise, serializable
+ int is_snapshot = (random() % 8 != 0);
+ r = env->txn_begin(env, NULL, &txns[num_txns], is_snapshot ? DB_TXN_SNAPSHOT : 0);
+ CKERR(r);
+ num_txns++;
+ }
+ else {
+ // commit the txn
+ r = txns[rand_num]->commit(txns[rand_num], 0);
+ CKERR(r);
+ int j;
+ for (j = rand_num; j < num_txns-1; j++) {
+ txns[j] = txns[j+1];
+ }
+ txns[num_txns-1] = NULL;
+ num_txns--;
+ }
+ }
+
+ for (i = 0; i < num_txns; i++) {
+ r = txns[i]->commit(txns[i], 0);
+ CKERR(r);
+ }
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stress-gc2.cc b/storage/tokudb/PerconaFT/src/tests/stress-gc2.cc
new file mode 100644
index 00000000..01339f0d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stress-gc2.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+// The intent of this test is to measure create and abort transactions
+// with garbage collection verification on
+
+static int random_sleep(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ usleep(random()%2000);
+ return 0;
+}
+
+
+static void
+stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ myargs[i].operation = random_sleep;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int UU(argc), char *const UU(argv[])) {
+ struct cli_args args = get_default_args_for_perf();
+ db_env_set_mvcc_garbage_collection_verification(1);
+ args.num_seconds = 60;
+ args.num_ptquery_threads = 12;
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stress-test.cc b/storage/tokudb/PerconaFT/src/tests/stress-test.cc
new file mode 100644
index 00000000..9d21f695
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stress-test.cc
@@ -0,0 +1,264 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+enum state {
+ UNTOUCHED = 0,
+ INSERTED,
+ DELETED
+};
+struct pair {
+ DBT key, val;
+ enum state state;
+};
+
+#define NKEYS (1<<20)
+#define NDELS (1<<17)
+
+int keys[NKEYS];
+struct pair pairs[NKEYS];
+struct pair sorted[NKEYS];
+int dels[NDELS];
+
+char some_data[200] = ("abetefocebbrk3894d,h"
+ "tebe73t90htb349i83d4"
+ "h3498bk4onhaosnetkb0"
+ "bk934bkgpbk0,8kh4c.r"
+ "bk9,438k4bkr,09k8hkb"
+ "bk9,gr,gkhb,k9,.bkg,"
+ "b4kg4,39k,3k890,.bkr"
+ "bugk349kc,b.rk,.0k8,"
+ "bkreb,0k8.p,k,r,bkhr"
+ "kb.rpgxbeu0xcehu te");
+
+static int
+pair_cmp(const void *a, const void *b)
+{
+ const struct pair *CAST_FROM_VOIDP(p1, a);
+ const struct pair *CAST_FROM_VOIDP(p2, b);
+ if (p1->key.size < p2->key.size) {
+ int c = memcmp(p1->key.data, p2->key.data, p1->key.size);
+ if (!c) {
+ return -1;
+ }
+ return c;
+ } else if (p1->key.size > p2->key.size) {
+ int c = memcmp(p1->key.data, p2->key.data, p2->key.size);
+ if (!c) {
+ return 1;
+ }
+ return c;
+ } else {
+ return memcmp(p1->key.data, p2->key.data, p1->key.size);
+ }
+}
+
+static void
+gen_data(void)
+{
+ srandom(0);
+ for (int i = 0; i < NKEYS; ++i) {
+ keys[i] = htonl(i);
+ }
+ for (int e = NKEYS-1; e > 0; --e) {
+ int r = random() % e;
+ int t = keys[r];
+ keys[r] = keys[e];
+ keys[e] = t;
+ }
+ for (int i = 0; i < NKEYS; ++i) {
+ int vallen = random() % 150;
+ int idx = random() % (200 - vallen);
+ dbt_init(&pairs[i].key, &keys[i], sizeof keys[i]);
+ dbt_init(&pairs[i].val, &some_data[idx], vallen);
+ pairs[i].state = UNTOUCHED;
+ }
+
+ for (int i = 0; i < NDELS; ) {
+ int idx = random() % NKEYS;
+ if (pairs[idx].state != DELETED) {
+ dels[i++] = idx;
+ pairs[idx].state = DELETED;
+ }
+ }
+ for (int i = 0; i < NDELS; ++i) {
+ pairs[dels[i]].state = UNTOUCHED;
+ }
+}
+
+static void
+run_test(DB *db)
+{
+ DB_TXN * const null_txn = 0;
+ int p = 0, d = 0;
+ for (int cursz = NKEYS / 10; cursz <= NKEYS; cursz += NKEYS / 10) {
+ // insert a chunk
+ for (; p < cursz; ++p) {
+ // put an element in
+ invariant(pairs[p].state == UNTOUCHED);
+ { int chk_r = db->put(db, null_txn, &pairs[p].key, &pairs[p].val, 0); CKERR(chk_r); }
+ pairs[p].state = INSERTED;
+ // delete everything we can so far, in the given order
+ for (; d < NDELS && dels[d] <= p; ++d) {
+ invariant(pairs[dels[d]].state == INSERTED);
+ { int chk_r = db->del(db, null_txn, &pairs[dels[d]].key, 0); CKERR(chk_r); }
+ pairs[dels[d]].state = DELETED;
+ }
+ }
+
+ // get what the data should be
+ memcpy(sorted, pairs, cursz * (sizeof pairs[0]));
+ qsort(sorted, cursz, sizeof sorted[0], pair_cmp);
+
+ // verify the data
+
+ // with point queries
+ if ((random() % 10) < 5) {
+ for (int i = 0; i < cursz; ++i) {
+ DBT val; dbt_init(&val, NULL, 0);
+ invariant(sorted[i].state != UNTOUCHED);
+ int r = db->get(db, null_txn, &sorted[i].key, &val, 0);
+ if (sorted[i].state == INSERTED) {
+ CKERR(r);
+ assert(val.size == sorted[i].val.size);
+ assert(memcmp(val.data, sorted[i].val.data, val.size) == 0);
+ } else {
+ CKERR2(r, DB_NOTFOUND);
+ }
+ }
+ }
+
+ // with a forward traversal
+ if ((random() % 10) < 5) {
+ DBC *cur;
+ { int chk_r = db->cursor(db, null_txn, &cur, 0); CKERR(chk_r); }
+ DBT ck, cv; dbt_init(&ck, NULL, 0); dbt_init(&cv, NULL, 0);
+ int i, r;
+ r = cur->c_get(cur, &ck, &cv, DB_FIRST);
+ CKERR(r);
+ for (i = 0;
+ r == 0 && i < cursz;
+ r = cur->c_get(cur, &ck, &cv, DB_NEXT), ++i) {
+ invariant(sorted[i].state != UNTOUCHED);
+ while (i < cursz && sorted[i].state == DELETED) {
+ i++;
+ invariant(sorted[i].state != UNTOUCHED);
+ }
+ invariant(i < cursz);
+ assert(ck.size == sorted[i].key.size);
+ assert(memcmp(ck.data, sorted[i].key.data, ck.size) == 0);
+ assert(cv.size == sorted[i].val.size);
+ assert(memcmp(cv.data, sorted[i].val.data, cv.size) == 0);
+ }
+ while (i < cursz && sorted[i].state == DELETED) {
+ i++;
+ invariant(sorted[i].state != UNTOUCHED);
+ }
+ assert(i == cursz);
+ assert(r == DB_NOTFOUND);
+ }
+
+ // with a backward traversal
+ if ((random() % 10) < 5) {
+ DBC *cur;
+ { int chk_r = db->cursor(db, null_txn, &cur, 0); CKERR(chk_r); }
+ DBT ck, cv; dbt_init(&ck, NULL, 0); dbt_init(&cv, NULL, 0);
+ int i, r;
+ r = cur->c_get(cur, &ck, &cv, DB_LAST);
+ CKERR(r);
+ for (i = cursz - 1;
+ r == 0 && i >= 0;
+ r = cur->c_get(cur, &ck, &cv, DB_PREV), --i) {
+ invariant(sorted[i].state != UNTOUCHED);
+ while (i >= 0 && sorted[i].state == DELETED) {
+ i--;
+ invariant(sorted[i].state != UNTOUCHED);
+ }
+ invariant(i >= 0);
+ assert(ck.size == sorted[i].key.size);
+ assert(memcmp(ck.data, sorted[i].key.data, ck.size) == 0);
+ assert(cv.size == sorted[i].val.size);
+ assert(memcmp(cv.data, sorted[i].val.data, cv.size) == 0);
+ }
+ while (i >= 0 && sorted[i].state == DELETED) {
+ i--;
+ invariant(sorted[i].state != UNTOUCHED);
+ }
+ assert(i == -1);
+ assert(r == DB_NOTFOUND);
+ }
+ }
+}
+
+static void
+init_db(DB_ENV **env, DB **db)
+{
+ DB_TXN * const null_txn = 0;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(env, 0); CKERR(chk_r); }
+ (*env)->set_errfile(*env, stderr);
+ { int chk_r = (*env)->open(*env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); CKERR(chk_r); }
+ { int chk_r = db_create(db, *env, 0); CKERR(chk_r); }
+ { int chk_r = (*db)->open(*db, null_txn, "test.stress.ft_handle", "main",
+ DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+}
+
+static void
+destroy_db(DB_ENV *env, DB *db)
+{
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+int
+test_main(int argc, char * const argv[])
+{
+ DB_ENV *env;
+ DB *db;
+
+ parse_args(argc, argv);
+ gen_data();
+ init_db(&env, &db);
+ run_test(db);
+ destroy_db(env, db);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/stress_openclose.h b/storage/tokudb/PerconaFT/src/tests/stress_openclose.h
new file mode 100644
index 00000000..3a55ca44
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/stress_openclose.h
@@ -0,0 +1,284 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_pthread.h>
+#include "test.h"
+#include "threaded_stress_test_helpers.h"
+#include <portability/toku_atomic.h>
+
+// set this to true for the recovery version of this stress test
+// the way this works is to include this header and set
+// crash_at_end = true;
+static bool stress_openclose_crash_at_end;
+
+//
+// Stress test ft reference counting
+//
+// Three things keep a fractal tree in memory by holding a reference:
+// - open ft handle
+// - live txn that did a write op
+// - checkpoint
+//
+// To stress reference counting, we would like threads which:
+// - take checkpoints at random intervals
+// - update random values, random point queries for auditing
+// * sometimes close handle before commit.
+// - close random dictionaries
+//
+// Here's how we can do it:
+//
+// A bunch of threads randomly choose from N buckets. Each bucket
+// has a DB, an is_open bit, and a lock.
+// - in a single txn, each thread will do some small number of
+// queries or updates on randomb uckets, opening the dbs if
+// they were closed and possibly closing afterwards.
+// - this should stress both open db handles and various txns
+// references dbs simultaneously.
+//
+// and all while this is happening, throw in scanners, updaters,
+// and query threads that all assert the contents of these dbs
+// is correct, even after recovery.
+
+#define verbose_printf(...) \
+ do { \
+ if (verbose) { \
+ printf(__VA_ARGS__); \
+ fflush(stdout); \
+ } \
+ } while (0)
+
+// a bunch of buckets with dbs, a lock, and an is_open bit
+// threads will choose buckets randomly for update, query,
+// and then maybe open/close the bucket's db.
+struct db_bucket {
+ DB_ENV *env;
+ DB *db;
+ bool is_open;
+ toku_mutex_t mutex;
+};
+static struct db_bucket *buckets;
+static int num_buckets;
+
+// each operation can do at most this many operations in one txn
+static int
+choose_random_iteration_count(ARG arg) {
+ const int max_iteration_count = 8;
+ int k = myrandom_r(arg->random_data) % max_iteration_count;
+ return k + 1;
+}
+
+// open the ith db in the array, asserting success
+static void
+open_ith_db(DB_ENV *env, DB **db, int i) {
+ char name[30];
+ memset(name, 0, sizeof(name));
+ get_ith_table_name(name, sizeof(name), i);
+ int r = db_create(db, env, 0);
+ CKERR(r);
+ r = (*db)->open(*db, null_txn, name, NULL, DB_BTREE, 0, 0666);
+ CKERR(r);
+}
+
+// debugging counter to maintain the invariant that open_buckets <= num_buckets
+static int open_buckets;
+
+// choose and lock a random bucket, possibly opening a db
+static struct db_bucket *
+lock_and_maybe_open_some_db(ARG arg) {
+ int k = myrandom_r(arg->random_data) % num_buckets;
+ struct db_bucket *bucket = &buckets[k];
+ toku_mutex_lock(&bucket->mutex);
+ if (!bucket->is_open) {
+ // choose a random DB from 0..k-1 to associate with this bucket
+ // then, mark the bucket as open.
+ int i = myrandom_r(arg->random_data) % num_buckets;
+ open_ith_db(bucket->env, &bucket->db, i);
+ bucket->is_open = true;
+ assert(toku_sync_fetch_and_add(&open_buckets, 1) < num_buckets);
+ verbose_printf("opened db %d in bucket %d\n", i, k);
+ }
+ return bucket;
+}
+
+// release the lock on a bucket, possibly closing its db
+static void
+unlock_and_maybe_close_db(struct db_bucket *bucket, ARG arg) {
+ static const int p = 5;
+ int k = ((unsigned) myrandom_r(arg->random_data)) % 100;
+ // we should close with probability approximately p / 100
+ assert(bucket->is_open);
+ if (k <= p) {
+ DB *db = bucket->db;
+ int r = db->close(db, 0);
+ CKERR(r);
+ bucket->is_open = false;
+ int old_open_buckets = toku_sync_fetch_and_sub(&open_buckets, 1);
+ assert(old_open_buckets > 0);
+ verbose_printf("decided to close a bucket's db before unlocking\n");
+ }
+ toku_mutex_unlock(&bucket->mutex);
+}
+
+// scan some dbs, verifying the correct sum.
+static int
+scan_some_dbs(DB_TXN *txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ int r = 0;
+ verbose_printf("scanning some dbs\n");
+ struct scan_op_extra* CAST_FROM_VOIDP(extra, operation_extra);
+ // scan every db, one by one, and verify that the contents are correct
+ for (int i = 0; r == 0 && run_test && i < arg->cli->num_DBs; i++) {
+ struct db_bucket *bucket = lock_and_maybe_open_some_db(arg);
+ const bool check_sum = true;
+ r = scan_op_and_maybe_check_sum(bucket->db, txn, extra, check_sum);
+ invariant(r == 0 || r == DB_LOCK_NOTGRANTED);
+ unlock_and_maybe_close_db(bucket, arg);
+ }
+ return r;
+}
+
+// update a couple of dbs in some buckets with a txn
+static int
+update_some_dbs(DB_TXN *txn, ARG arg, void *op_extra, void *stats_extra) {
+ int r = 0;
+ verbose_printf("updating some dbs\n");
+ const int iterations = choose_random_iteration_count(arg);
+ for (int i = 0; r == 0 && run_test && i < iterations; i++) {
+ struct db_bucket *bucket = lock_and_maybe_open_some_db(arg);
+ // does an update operation on this bucket's db
+ r = update_op_db(bucket->db, txn, arg, op_extra, stats_extra);
+ invariant(r == 0 || r == DB_LOCK_NOTGRANTED);
+ unlock_and_maybe_close_db(bucket, arg);
+ }
+ return r;
+}
+
+// point query a couple of dbs in some buckets with a txn
+static int
+ptquery_some_dbs(DB_TXN *txn, ARG arg, void *UU(op_extra), void *UU(stats_extra)) {
+ int r = 0;
+ verbose_printf("querying some dbs\n");
+ const int iterations = choose_random_iteration_count(arg);
+ for (int i = 0; r == 0 && run_test && i < iterations; i++) {
+ struct db_bucket *bucket = lock_and_maybe_open_some_db(arg);
+ // does a point query on a random key for this bucket's db
+ const bool check_sum = true;
+ r = ptquery_and_maybe_check_op(bucket->db, txn, arg, check_sum);
+ invariant(r == 0 || r == DB_LOCK_NOTGRANTED);
+ unlock_and_maybe_close_db(bucket, arg);
+ }
+ return r;
+}
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ const int update_threads = cli_args->num_update_threads;
+ const int query_threads = cli_args->num_ptquery_threads;
+ const int total_threads = update_threads + query_threads + 1;
+
+ struct arg myargs[total_threads];
+ for (int i = 0; i < total_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_some_dbs;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 1; i < 1 + update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_some_dbs;
+ myargs[i].do_prepare = true;
+ }
+ // make the guy that does point queries
+ for (int i = 1 + update_threads; i < total_threads; i++) {
+ myargs[i].operation = ptquery_some_dbs;
+ myargs[i].do_prepare = true;
+ }
+
+ num_buckets = cli_args->num_DBs;
+ open_buckets = num_buckets;
+ // each thread gets access to this array of db buckets, from
+ // which they can choose a random db to either touch or query
+ XMALLOC_N(num_buckets, buckets);
+ for (int i = 0; i < num_buckets; i++) {
+ struct db_bucket bucket = {.env = env, .db = dbp[i], .is_open = true};
+ buckets[i] = bucket;
+ toku_mutex_init(toku_uninstrumented, &buckets[i].mutex, nullptr);
+ }
+ // run all of the query and update workers. they may randomly open
+ // and close the dbs in each db_bucket to be some random dictionary,
+ // so when they're done we'll have to clean up the mess so this
+ // stress test can exit gracefully expecting db[i] = the ith db
+ // verbose_printf("stressing %d tables using %d update threads, %d query
+ // threads\n",
+ // num_buckets, update_threads, query_threads);
+ verbose_printf("stressing %d tables using %d update threads\n",
+ num_buckets, update_threads);
+ // stress_openclose_crash_at_end should be changed to true or false,
+ // depending if this test is for recovery or not.
+ const bool crash_at_end = stress_openclose_crash_at_end;
+ run_workers(myargs, total_threads, cli_args->num_seconds, crash_at_end, cli_args);
+
+ // the stress test is now complete. get ready for shutdown/close.
+ //
+ // make sure that every db in the original array is opened
+ // as it was when it was passed in.
+ for (int i = 0; i < num_buckets; i++) {
+ // close whatever is open
+ if (buckets[i].is_open) {
+ DB *db = buckets[i].db;
+ int r = db->close(db, 0);
+ CKERR(r);
+ }
+ // put the correct db back, then save the pointer
+ // into the dbp array we were given
+ open_ith_db(env, &buckets[i].db, i);
+ dbp[i] = buckets[i].db;
+ }
+
+ toku_free(buckets);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-5138.cc b/storage/tokudb/PerconaFT/src/tests/test-5138.cc
new file mode 100644
index 00000000..4a55da02
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-5138.cc
@@ -0,0 +1,87 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that with full optimizations including the "last IPO pass" and
+// static linking doesn't break lzma
+
+#include "test.h"
+
+int test_main(int argc, char * const argv[]) {
+ int r;
+ parse_args(argc, argv);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD|DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_compression_method(db, TOKU_LZMA_METHOD);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+
+ DBT key, val;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &i, sizeof(i));
+ for (i = 0; i < 1000; ++i) {
+ r = db->put(db, txn, keyp, valp, 0);
+ CKERR(r);
+ }
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = db->close(db, 0);
+ CKERR(r);
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-nested-xopen-eclose.cc b/storage/tokudb/PerconaFT/src/tests/test-nested-xopen-eclose.cc
new file mode 100644
index 00000000..ee6b80e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-nested-xopen-eclose.cc
@@ -0,0 +1,142 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the env close aborts open txns
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ DB_TXN *txnb;
+ r = env->txn_begin(env, txn, &txnb, 0); CKERR(r);
+
+ r = env->close(env, 0);
+ assert(r == EINVAL);
+
+#if 0
+ r = txn->abort(txn); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+#endif
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char *const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char *const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-prepare.cc b/storage/tokudb/PerconaFT/src/tests/test-prepare.cc
new file mode 100644
index 00000000..2f04afa2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-prepare.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/wait.h>
+
+static void clean_env (const char *envdir) {
+ const int len = strlen(envdir)+100;
+ char cmd[len];
+ snprintf(cmd, len, "rm -rf %s", envdir);
+ int r = system(cmd);
+ CKERR(r);
+ CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
+}
+
+static void setup_env (DB_ENV **envp, const char *envdir) {
+ { int chk_r = db_env_create(envp, 0); CKERR(chk_r); }
+ (*envp)->set_errfile(*envp, stderr);
+ { int chk_r = (*envp)->set_redzone(*envp, 0); CKERR(chk_r); }
+ { int chk_r = (*envp)->open(*envp, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void setup_env_and_prepare (DB_ENV **envp, const char *envdir, bool commit) {
+ DB *db;
+ DB_TXN *txn;
+ clean_env(envdir);
+ setup_env(envp, envdir);
+ CKERR(db_create(&db, *envp, 0));
+ CKERR(db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO));
+ CKERR((*envp)->txn_begin(*envp, 0, &txn, 0));
+ DBT key;
+ dbt_init(&key, "foo", 4);
+ CKERR(db->put(db, txn, &key, &key, 0));
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ gid[0]=42;
+ CKERR(txn->prepare(txn, gid, 0));
+ if (commit)
+ CKERR(txn->commit(txn, 0));
+}
+
+static void test1 (void) {
+ pid_t pid;
+ bool do_fork = true;
+ if (!do_fork || 0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env_and_prepare(&env, TOKU_TEST_FILENAME, false);
+ {
+ DB_PREPLIST l[1];
+ long count=-1;
+ CKERR(env->txn_recover(env, l, 1, &count, DB_FIRST));
+ printf("%s:%d count=%ld\n", __FILE__, __LINE__, count);
+ assert(count==1);
+ assert(l[0].gid[0]==42);
+ }
+ exit(0);
+ }
+ int status;
+ if (do_fork) {
+ pid_t pid2 = wait(&status);
+ assert(pid2==pid);
+ }
+
+ DB_ENV *env2;
+ char envdir2[TOKU_PATH_MAX+1];
+ setup_env_and_prepare(&env2, toku_path_join(envdir2, 2, TOKU_TEST_FILENAME, "envdir2"), true);
+
+ // Now we can look at env2 in the debugger to see if we managed to make it the same
+
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+
+ {
+ DB_PREPLIST l[1];
+ long count=-1;
+ int r = env->txn_recover(env, l, 1, &count, DB_FIRST);
+ printf("r=%d count=%ld\n", r, count);
+ assert(count==1);
+ assert(l[0].gid[0]==42);
+ for (int i=1; i<DB_GID_SIZE; i++) {
+ assert(l[0].gid[i]==0);
+ }
+ { int chk_r = l->txn->commit(l->txn, 0); CKERR(chk_r); }
+ }
+ { int chk_r = env2->close(env2, 0); CKERR(chk_r); }
+ { int chk_r = env ->close(env, 0); CKERR(chk_r); }
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+ // first test: open an environment, a db, a txn, and do a prepare. Then do txn_prepare (without even closing the environment).
+ test1();
+
+
+ // second test: poen environment, a db, a txn, prepare, close the environment. Then reopen and do txn_prepare.
+
+ // third test: make sure there is an fsync on txn_prepare, but not on the following commit.
+
+
+ // Then close the environment Find out what BDB does when ask for the txn prepares.
+ // Other tests: read prepared txns, 1 at a time. Then close it and read them again.
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-prepare2.cc b/storage/tokudb/PerconaFT/src/tests/test-prepare2.cc
new file mode 100644
index 00000000..73d82aad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-prepare2.cc
@@ -0,0 +1,161 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/wait.h>
+
+// Verify that if tokudb crashes during recovery, then the prepared transactions are still prepared.
+
+static void clean_env (const char *envdir) {
+ const int len = strlen(envdir)+100;
+ char cmd[len];
+ snprintf(cmd, len, "rm -rf %s", envdir);
+ int r = system(cmd);
+ CKERR(r);
+ CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
+}
+
+static void setup_env (DB_ENV **envp, const char *envdir) {
+ { int chk_r = db_env_create(envp, 0); CKERR(chk_r); }
+ (*envp)->set_errfile(*envp, stderr);
+ { int chk_r = (*envp)->set_redzone(*envp, 0); CKERR(chk_r); }
+ { int chk_r = (*envp)->open(*envp, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void setup_env_and_prepare (DB_ENV **envp, const char *envdir, bool commit) {
+ DB *db;
+ DB_TXN *txn;
+ clean_env(envdir);
+ setup_env(envp, envdir);
+ CKERR(db_create(&db, *envp, 0));
+ CKERR(db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO));
+ CKERR((*envp)->txn_begin(*envp, 0, &txn, 0));
+ DBT key;
+ dbt_init(&key, "foo", 4);
+ CKERR(db->put(db, txn, &key, &key, 0));
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ gid[0]=42;
+ CKERR(txn->prepare(txn, gid, 0));
+ if (commit)
+ CKERR(txn->commit(txn, 0));
+}
+
+static void test (void) {
+ pid_t pid;
+
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env_and_prepare(&env, TOKU_TEST_FILENAME, false);
+ {
+ DB_PREPLIST l[1];
+ long count=-1;
+ CKERR(env->txn_recover(env, l, 1, &count, DB_FIRST));
+ printf("%s:%d count=%ld\n", __FILE__, __LINE__, count);
+ assert(count==1);
+ assert(l[0].gid[0]==42);
+ }
+ exit(0);
+ }
+ {
+ int status;
+ pid_t pid2 = wait(&status);
+ assert(pid2==pid);
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+ // Now run recovery and crash on purpose.
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+
+ // make sure there is 1 prepared txn.
+ {
+ DB_PREPLIST l[1];
+ long count=-1;
+ int r = env->txn_recover(env, l, 1, &count, DB_FIRST);
+ printf("r=%d count=%ld\n", r, count);
+ assert(count==1);
+ assert(l[0].gid[0]==42);
+ for (int i=1; i<DB_GID_SIZE; i++) {
+ assert(l[0].gid[i]==0);
+ }
+ }
+
+ exit(0);
+ }
+ {
+ int status;
+ pid_t pid2 = wait(&status);
+ assert(pid2==pid);
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+
+ // Now see if recovery works the second time.
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ {
+ DB_PREPLIST l[1];
+ long count=-1;
+ int r = env->txn_recover(env, l, 1, &count, DB_FIRST);
+ printf("r=%d count=%ld\n", r, count);
+ assert(count==1);
+ assert(l[0].gid[0]==42);
+ for (int i=1; i<DB_GID_SIZE; i++) {
+ assert(l[0].gid[i]==0);
+ }
+ { int chk_r = l->txn->commit(l->txn, 0); CKERR(chk_r); }
+ }
+ { int chk_r = env ->close(env, 0); CKERR(chk_r); }
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+ // first test: open an environment, a db, a txn, and do a prepare. Then do txn_prepare (without even closing the environment).
+ test();
+
+
+ // second test: poen environment, a db, a txn, prepare, close the environment. Then reopen and do txn_prepare.
+
+ // third test: make sure there is an fsync on txn_prepare, but not on the following commit.
+
+
+ // Then close the environment Find out what BDB does when ask for the txn prepares.
+ // Other tests: read prepared txns, 1 at a time. Then close it and read them again.
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc b/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc
new file mode 100644
index 00000000..f57fc963
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-prepare3.cc
@@ -0,0 +1,340 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/wait.h>
+
+// Verify that if we prepare a transaction, then commit a bunch more transactions so that the logs may have been rotated, then the transaction can commit or abort properly on recovery.
+
+static void clean_env (const char *envdir) {
+ const int len = strlen(envdir)+100;
+ char cmd[len];
+ snprintf(cmd, len, "rm -rf %s", envdir);
+ int r = system(cmd);
+ CKERR(r);
+ CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
+}
+
+static void setup_env (DB_ENV **envp, const char *envdir) {
+ { int chk_r = db_env_create(envp, 0); CKERR(chk_r); }
+ (*envp)->set_errfile(*envp, stderr);
+ { int chk_r = (*envp)->set_redzone(*envp, 0); CKERR(chk_r); }
+ { int chk_r = (*envp)->open(*envp, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+#define NTXNS 6
+
+static void setup_env_and_prepare (DB_ENV **envp, const char *envdir) {
+ DB *db;
+ clean_env(envdir);
+ setup_env(envp, envdir);
+ CKERR(db_create(&db, *envp, 0));
+ CKERR(db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO));
+
+ {
+ DB_TXN *txn;
+ CKERR((*envp)->txn_begin(*envp, 0, &txn, 0));
+ for (int tnum=0; tnum<NTXNS; tnum++) {
+ for (int k=0; k<26; k++) {
+ #define DSIZE 200
+ char data[DSIZE];
+ memset(data, ' ', DSIZE);
+ data[0]='a'+tnum;
+ data[1]='a'+k;
+ data[DSIZE-1]=0;
+ DBT key;
+ dbt_init(&key, data, DSIZE);
+ CKERR(db->put(db, txn, &key, &key, 0));
+ }
+ }
+ CKERR(txn->commit(txn, 0));
+ }
+
+ for (int tnum=0; tnum<NTXNS; tnum++) {
+ DB_TXN *txn;
+ CKERR((*envp)->txn_begin(*envp, 0, &txn, 0));
+ char data[3]={(char)('a'+tnum),'_',0};
+ DBT key;
+ dbt_init(&key, data, 3);
+ CKERR(db->put(db, txn, &key, &key, 0));
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ gid[0]='a'+tnum;
+ CKERR(txn->prepare(txn, gid, 0));
+ // Drop txn on the ground, since we will commit or abort it after recovery
+ if (tnum==0) {
+ //printf("commit %d\n", tnum);
+ CKERR(txn->commit(txn, 0));
+ } else if (tnum==1) {
+ //printf("abort %d\n", tnum);
+ CKERR(txn->abort(txn));
+ } else {
+ //printf("prepare %d\n", tnum);
+ }
+ }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+}
+
+enum prepared_state {
+ COMMITTED,
+ ABORTED,
+ MAYBE_COMMITTED,
+ MAYBE_ABORTED,
+ PREPARED};
+
+
+static void check_prepared_list (enum prepared_state ps[NTXNS], long count, DB_PREPLIST *l) {
+ int count_prepared=0;
+ int count_maybe_prepared=0;
+ for (int j=0; j<NTXNS; j++) {
+ switch (ps[j]) {
+ case COMMITTED:
+ case ABORTED:
+ goto next;
+ case PREPARED:
+ count_prepared++;
+ /* fall through */
+ case MAYBE_COMMITTED:
+ case MAYBE_ABORTED:
+ count_maybe_prepared++;
+ goto next;
+ }
+ assert(0);
+ next:;
+ }
+
+ assert(count>=count_prepared && count<=count_maybe_prepared);
+
+ bool found[NTXNS];
+ for (int j=0; j<NTXNS; j++) {
+ found[j] = (ps[j]!=PREPARED);
+ }
+
+ // now found[j] is false on those transactions that I hope to find in the prepared list.
+ for (int j=0; j<count; j++) {
+ int num = l[j].gid[0]-'a';
+ assert(num>=0 && num<NTXNS);
+ switch (ps[num]) {
+ case PREPARED:
+ assert(!found[num]);
+ found[num]=true;
+ break;
+ default:;
+ }
+ for (int i=1; i<DB_GID_SIZE; i++) {
+ assert(l[j].gid[i]==0);
+ }
+ }
+}
+
+static void get_prepared (DB_ENV *env, long *count, DB_PREPLIST *l) {
+ CKERR(env->txn_recover(env, l, NTXNS, count, DB_FIRST));
+ //printf("%s:%d count=%ld\n", __FILE__, __LINE__, *count);
+ assert(*count>=0);
+}
+
+static void check_prepared_txns (DB_ENV *env, enum prepared_state ps[NTXNS]) {
+ DB_PREPLIST l[NTXNS];
+ long count=-1;
+ get_prepared(env, &count, l);
+ check_prepared_list(ps, count, l);
+}
+
+static void check_state_after_full_recovery (DB_ENV *env) {
+ DB *db;
+ CKERR(db_create(&db, env, 0));
+ CKERR(db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO));
+
+ for (int tnum=0; tnum<NTXNS; tnum++) {
+ DB_TXN *txn;
+ CKERR(env->txn_begin(env, 0, &txn, 0));
+ char data[3]={(char)('a'+tnum),'_',0};
+ DBT key;
+ dbt_init(&key, data, 3);
+ DBT dbt_data;
+ dbt_init(&dbt_data, NULL, 0);
+ int r = db->get(db, txn, &key, &dbt_data, 0);
+ if (tnum%2==0) {
+ assert(r==0);
+ assert(dbt_data.size==3 && memcmp(dbt_data.data, data, 3)==0);
+ } else {
+ assert(r==DB_NOTFOUND);
+ }
+ CKERR(txn->commit(txn, 0));
+ }
+ CKERR(db->close(db, 0));
+}
+
+static void waitfor (pid_t pid) {
+ int status;
+ pid_t pid2 = wait(&status);
+ assert(pid2==pid);
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+}
+
+static void abort_number(int num, int count, DB_PREPLIST *l) {
+ for (int j=0; j<count; j++) {
+ if (l[j].gid[0]=='a'+num) {
+ CKERR(l[j].txn->abort(l[j].txn));
+ return;
+ }
+ }
+ assert(0);
+}
+static void commit_number(int num, int count, DB_PREPLIST *l) {
+ for (int j=0; j<count; j++) {
+ if (l[j].gid[0]=='a'+num) {
+ CKERR(l[j].txn->commit(l[j].txn, 0));
+ return;
+ }
+ }
+ assert(0);
+}
+
+static void test (void) {
+ pid_t pid;
+
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env_and_prepare(&env, TOKU_TEST_FILENAME);
+ enum prepared_state prepared[NTXNS]={COMMITTED,ABORTED,PREPARED,PREPARED,PREPARED,PREPARED};
+ check_prepared_txns(env, prepared);
+ exit(0);
+ }
+ waitfor(pid);
+ // Now run recovery and crash on purpose.
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ enum prepared_state prepared[NTXNS]={COMMITTED,ABORTED,PREPARED,PREPARED,PREPARED,PREPARED};
+ check_prepared_txns(env, prepared);
+ exit(0);
+ }
+ waitfor(pid);
+
+ // Now see if recovery works the second time.
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ enum prepared_state prepared[NTXNS]={COMMITTED,ABORTED,PREPARED,PREPARED,PREPARED,PREPARED};
+ check_prepared_txns(env, prepared);
+ exit(0);
+ }
+ waitfor(pid);
+
+ // Now see if recovery works the third time.
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ enum prepared_state prepared[NTXNS]={COMMITTED,ABORTED,PREPARED,PREPARED,PREPARED,PREPARED};
+ DB_PREPLIST l[NTXNS];
+ long count=-1;
+ get_prepared(env, &count, l);
+ check_prepared_list(prepared, count, l);
+ abort_number(3, count, l);
+ commit_number(2, count, l); // do the commit second so it will make it to disk.
+ exit(0);
+ }
+ waitfor(pid);
+ // Now see if recovery works a third time, with number 2 and 3 no longer in the prepared state.
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ enum prepared_state prepared[NTXNS]={COMMITTED,ABORTED,MAYBE_COMMITTED,MAYBE_ABORTED,PREPARED,PREPARED};
+ DB_PREPLIST l[NTXNS];
+ long count=-1;
+ //printf("%s:%d count=%ld\n", __FILE__, __LINE__, count); // it's a little bit funky that the committed transactions in BDB (from commit_number(2,...) above) don't stay committed. But whatever...
+ get_prepared(env, &count, l);
+ check_prepared_list(prepared, count, l);
+ exit(0);
+ }
+ waitfor(pid);
+ // Now see if recovery works a fourth time, with number 2 and 3 no longer in the prepared state.
+ // This time we'll do get_prepared with a short count.
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ //printf("%s:%d count=%ld\n", __FILE__, __LINE__, count); // it's a little bit funky that the committed transactions in BDB (from commit_number(2,...) above) don't stay committed. But whatever...
+
+ long actual_count=0;
+
+ for (int recover_num=0; 1; recover_num++) {
+ long count=-1;
+ DB_PREPLIST *MALLOC_N(1, l); // use malloc so that valgrind might notice a problem
+ CKERR(env->txn_recover(env, l, 1, &count, recover_num==0 ? DB_FIRST : DB_NEXT));
+ //printf("recover_num %d count=%ld\n", recover_num,count);
+ if (count==0) break;
+ actual_count++;
+ if ((l[0].gid[0]-'a')%2==0) {
+ CKERR(l[0].txn->commit(l[0].txn, 0));
+ } else {
+ CKERR(l[0].txn->abort(l[0].txn));
+ }
+ toku_free(l);
+ }
+ //printf("actual_count=%ld\n", actual_count);
+
+ // Now let's see what the state is.
+ check_state_after_full_recovery(env);
+
+ CKERR(env->close(env, 0));
+ exit(0);
+ }
+ waitfor(pid);
+ // Now we should end up with nothing in the recovery list.
+ {
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+ long count=-1;
+ DB_PREPLIST l[1];
+ CKERR(env->txn_recover(env, l, 1, &count, DB_FIRST));
+ assert(count==0);
+ check_state_after_full_recovery(env);
+ CKERR(env->close(env, 0));
+ }
+
+
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+ // first test: open an environment, a db, a txn, and do a prepare. Then do txn_prepare (without even closing the environment).
+ test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-rollinclude.cc b/storage/tokudb/PerconaFT/src/tests/test-rollinclude.cc
new file mode 100644
index 00000000..0b7d9b57
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-rollinclude.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// insert enough rows with a child txn to force a rollinclude log entry
+
+static void
+populate(DB_ENV *env, DB *db, int nrows) {
+ int r;
+ DB_TXN *parent = NULL;
+ r = env->txn_begin(env, NULL, &parent, 0); assert_zero(r);
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, parent, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = htonl(i);
+ char kk[4096]; // 4 KB key
+ memset(kk, 0, sizeof kk);
+ memcpy(kk, &k, sizeof k);
+ DBT key = { .data = &kk, .size = sizeof kk };
+ DBT val = { .data = NULL, .size = 0 };
+ r = db->put(db, txn, &key, &val, 0);
+ assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+ r = parent->commit(parent, 0); assert_zero(r);
+}
+
+static void
+run_test(int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0); assert_zero(r);
+
+ r = db->open(db, NULL, "0.tdb", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert_zero(r);
+
+ populate(env, db, nrows);
+
+ r = db->close(db, 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int nrows = 1024; // = 4 MB / 4KB assumes 4 MB rollback nodes
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test-xa-prepare.cc b/storage/tokudb/PerconaFT/src/tests/test-xa-prepare.cc
new file mode 100644
index 00000000..ba04be56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-xa-prepare.cc
@@ -0,0 +1,157 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/wait.h>
+
+static void clean_env (const char *envdir) {
+ const int len = strlen(envdir)+100;
+ char cmd[len];
+ snprintf(cmd, len, "rm -rf %s", envdir);
+ int r = system(cmd);
+ CKERR(r);
+ CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
+}
+
+static void setup_env (DB_ENV **envp, const char *envdir) {
+ { int chk_r = db_env_create(envp, 0); CKERR(chk_r); }
+ (*envp)->set_errfile(*envp, stderr);
+ { int chk_r = (*envp)->set_redzone(*envp, 0); CKERR(chk_r); }
+ { int chk_r = (*envp)->open(*envp, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+const unsigned int myformatid = 0x74736554;
+
+static void setup_env_and_prepare (DB_ENV **envp, const char *envdir, bool commit) {
+ DB *db;
+ DB_TXN *txn;
+ clean_env(envdir);
+ setup_env(envp, envdir);
+ CKERR(db_create(&db, *envp, 0));
+ CKERR(db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO));
+ CKERR((*envp)->txn_begin(*envp, 0, &txn, 0));
+ DBT key;
+ dbt_init(&key, "foo", 4);
+ CKERR(db->put(db, txn, &key, &key, 0));
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ TOKU_XA_XID x = {.formatID = myformatid,
+ .gtrid_length = 8,
+ .bqual_length = 9};
+ for (int i=0; i<8+9; i++) x.data[i] = 42+i;
+ CKERR(txn->xa_prepare(txn, &x, 0));
+ if (commit)
+ CKERR(txn->commit(txn, 0));
+}
+
+static void test1 (void) {
+ pid_t pid;
+ bool do_fork = true;
+ if (!do_fork || 0==(pid=fork())) {
+ DB_ENV *env;
+ setup_env_and_prepare(&env, TOKU_TEST_FILENAME, false);
+ {
+ TOKU_XA_XID l[1];
+ long count=-1;
+ CKERR(env->txn_xa_recover(env, l, 1, &count, DB_FIRST));
+ printf("%s:%d count=%ld\n", __FILE__, __LINE__, count);
+ assert(count==1);
+ assert(myformatid==l[0].formatID);
+ assert( 8==l[0].gtrid_length);
+ assert( 9==l[0].bqual_length);
+ for (int i=0; i<8+9; i++) {
+ assert(l[0].data[i]==42+i);
+ }
+ }
+ exit(0);
+ }
+ int status;
+ if (do_fork) {
+ pid_t pid2 = wait(&status);
+ assert(pid2==pid);
+ }
+
+ DB_ENV *env2;
+ char envdir2[TOKU_PATH_MAX+1];
+ setup_env_and_prepare(&env2, toku_path_join(envdir2, 2, TOKU_TEST_FILENAME, "envdir2"), true);
+
+ // Now we can look at env2 in the debugger to see if we managed to make it the same
+
+ DB_ENV *env;
+ setup_env(&env, TOKU_TEST_FILENAME);
+
+ {
+ TOKU_XA_XID l[1];
+ long count=-1;
+ {
+ int r = env->txn_xa_recover(env, l, 1, &count, DB_FIRST);
+ printf("r=%d count=%ld\n", r, count);
+ }
+ assert(count==1);
+ assert(l[0].data[0]==42);
+ assert(myformatid==l[0].formatID);
+ assert( 8 ==l[0].gtrid_length);
+ assert( 9 ==l[0].bqual_length);
+ for (int i=0; i<8+9; i++) {
+ assert(l[0].data[i]==42+i);
+ }
+ {
+ DB_TXN *txn;
+ int r = env->get_txn_from_xid(env, &l[0], &txn);
+ assert(r==0);
+ { int chk_r = txn->commit(txn, 0); CKERR(chk_r); }
+ }
+ }
+ { int chk_r = env2->close(env2, 0); CKERR(chk_r); }
+ { int chk_r = env ->close(env, 0); CKERR(chk_r); }
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+ // first test: open an environment, a db, a txn, and do a prepare. Then do txn_prepare (without even closing the environment).
+ test1();
+
+
+ // second test: poen environment, a db, a txn, prepare, close the environment. Then reopen and do txn_prepare.
+
+ // third test: make sure there is an fsync on txn_prepare, but not on the following commit.
+
+
+ // Then close the environment Find out what BDB does when ask for the txn prepares.
+ // Other tests: read prepared txns, 1 at a time. Then close it and read them again.
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test-xopen-eclose.cc b/storage/tokudb/PerconaFT/src/tests/test-xopen-eclose.cc
new file mode 100644
index 00000000..6b585b3a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test-xopen-eclose.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the env close aborts open txns
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+static void run_test (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+
+ r = env->close(env, 0);
+ assert(r == EINVAL);
+
+#if 0
+ r = txn->abort(txn); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+#endif
+}
+
+static void run_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ // run recovery
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags + DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+static void run_no_recover (void) {
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags & ~DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ exit(0);
+}
+
+const char *cmd;
+
+bool do_test=false, do_recover=false, do_recover_only=false, do_no_recover = false;
+
+static void test_parse_args (int argc, char *const argv[]) {
+ int resultcode;
+ cmd = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v") == 0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "--test")==0) {
+ do_test=true;
+ } else if (strcmp(argv[0], "--recover") == 0) {
+ do_recover=true;
+ } else if (strcmp(argv[0], "--recover-only") == 0) {
+ do_recover_only=true;
+ } else if (strcmp(argv[0], "--no-recover") == 0) {
+ do_no_recover=true;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] {--test | --recover } \n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main (int argc, char *const argv[]) {
+ test_parse_args(argc, argv);
+ if (do_test) {
+ run_test();
+ } else if (do_recover) {
+ run_recover();
+ } else if (do_recover_only) {
+ run_recover();
+ } else if (do_no_recover) {
+ run_no_recover();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test.h b/storage/tokudb/PerconaFT/src/tests/test.h
new file mode 100644
index 00000000..c5214961
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test.h
@@ -0,0 +1,454 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_portability.h>
+
+#include <string.h>
+#include <stdlib.h>
+#include <toku_stdint.h>
+#include <stdio.h>
+#include <db.h>
+#include <limits.h>
+#include <errno.h>
+#include <toku_htonl.h>
+#include <portability/toku_path.h>
+#include <portability/toku_crash.h>
+#include "toku_assert.h"
+#include <signal.h>
+#include <time.h>
+
+#include "ydb.h"
+//TDB uses DB_NOTFOUND for c_del and DB_CURRENT errors.
+#ifdef DB_KEYEMPTY
+#error
+#endif
+#define DB_KEYEMPTY DB_NOTFOUND
+
+// Certain tests fail when row locks taken for read are not shared.
+// This switch prevents them from failing so long as read locks are not shared.
+#define BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+
+int verbose=0;
+
+#define UU(x) x __attribute__((__unused__))
+
+#define CKERR(r) ({ int __r = r; if (__r!=0) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, __r, db_strerror(r)); assert(__r==0); })
+#define CKERR2(r,r2) do { if (r!=r2) fprintf(stderr, "%s:%d error %d %s, expected %d\n", __FILE__, __LINE__, r, db_strerror(r), r2); assert(r==r2); } while (0)
+#define CKERR2s(r,r2,r3) do { if (r!=r2 && r!=r3) fprintf(stderr, "%s:%d error %d %s, expected %d or %d\n", __FILE__, __LINE__, r, db_strerror(r), r2,r3); assert(r==r2||r==r3); } while (0)
+
+/*
+ * Helpers for defining pseudo-hygienic macros using a (gensym)-like
+ * technique.
+ */
+#define _CONCAT(x, y) x ## y
+#define CONCAT(x, y) _CONCAT(x, y)
+#define GS(symbol) CONCAT(CONCAT(__gensym_, __LINE__), CONCAT(_, symbol))
+
+#define DEBUG_LINE do { \
+ fprintf(stderr, "%s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ fflush(stderr); \
+} while (0)
+
+static __attribute__((__unused__)) void
+parse_args (int argc, char * const argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q] [-h]\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+static __attribute__((__unused__)) void
+print_engine_status(DB_ENV * UU(env)) {
+ if (verbose) { // verbose declared statically in this file
+ uint64_t nrows;
+ env->get_engine_status_num_rows(env, &nrows);
+ int bufsiz = nrows * 128; // assume 128 characters per row
+ char buff[bufsiz];
+ env->get_engine_status_text(env, buff, bufsiz);
+ printf("Engine status:\n");
+ printf("%s", buff);
+ }
+}
+
+static __attribute__((__unused__)) uint64_t
+get_engine_status_val(DB_ENV * UU(env), const char * keyname) {
+ uint64_t rval = 0;
+ uint64_t nrows;
+ uint64_t max_rows;
+ env->get_engine_status_num_rows(env, &max_rows);
+ TOKU_ENGINE_STATUS_ROW_S mystat[max_rows];
+ fs_redzone_state redzone_state;
+ uint64_t panic;
+ uint32_t panic_string_len = 1024;
+ char panic_string[panic_string_len];
+ int r = env->get_engine_status (env, mystat, max_rows, &nrows, &redzone_state, &panic, panic_string, panic_string_len, TOKU_ENGINE_STATUS);
+ CKERR(r);
+ int found = 0;
+ for (uint64_t i = 0; i < nrows && !found; i++) {
+ if (strcmp(keyname, mystat[i].keyname) == 0) {
+ found++;
+ rval = mystat[i].value.num;
+ }
+ }
+ CKERR2(found, 1);
+ return rval;
+}
+
+static __attribute__((__unused__)) DBT *
+dbt_init(DBT *dbt, const void *data, uint32_t size) {
+ memset(dbt, 0, sizeof *dbt);
+ dbt->data = (void*)data;
+ dbt->size = size;
+ return dbt;
+}
+
+static __attribute__((__unused__)) DBT *
+dbt_init_malloc (DBT *dbt) {
+ memset(dbt, 0, sizeof *dbt);
+ dbt->flags = DB_DBT_MALLOC;
+ return dbt;
+}
+
+static __attribute__((__unused__)) DBT *
+dbt_init_realloc (DBT *dbt) {
+ memset(dbt, 0, sizeof *dbt);
+ dbt->flags = DB_DBT_REALLOC;
+ return dbt;
+}
+
+// Simple LCG random number generator. Not high quality, but good enough.
+static uint32_t rstate=1;
+static inline void mysrandom (int s) {
+ rstate=s;
+}
+static inline uint32_t myrandom (void) {
+ rstate = (279470275ull*(uint64_t)rstate)%4294967291ull;
+ return rstate;
+}
+
+static __attribute__((__unused__)) int
+int64_dbt_cmp (DB *db UU(), const DBT *a, const DBT *b) {
+// assert(db && a && b);
+ assert(a);
+ assert(b);
+// assert(db);
+
+ assert(a->size == sizeof(int64_t));
+ assert(b->size == sizeof(int64_t));
+
+ int64_t x = *(int64_t *) a->data;
+ int64_t y = *(int64_t *) b->data;
+
+ if (x<y) return -1;
+ if (x>y) return 1;
+ return 0;
+}
+
+static __attribute__((__unused__)) int
+int_dbt_cmp (DB *db, const DBT *a, const DBT *b) {
+ assert(db && a && b);
+ assert(a->size == sizeof(int));
+ assert(b->size == sizeof(int));
+
+ int x = *(int *) a->data;
+ int y = *(int *) b->data;
+
+ if (x<y) return -1;
+ if (x>y) return 1;
+ return 0;
+}
+
+static __attribute__((__unused__)) int
+uint_dbt_cmp (DB *db, const DBT *a, const DBT *b) {
+ assert(db && a && b);
+ assert(a->size == sizeof(unsigned int));
+ assert(b->size == sizeof(unsigned int));
+
+ unsigned int x = *(unsigned int *) a->data;
+ unsigned int y = *(unsigned int *) b->data;
+
+ if (x<y) return -1;
+ if (x>y) return 1;
+ return 0;
+}
+
+#define SET_TRACE_FILE(x) toku_set_trace_file(x)
+#define CLOSE_TRACE_FILE(x) toku_close_trace_file()
+
+#include <memory.h>
+
+static uint64_t __attribute__((__unused__))
+random64(void) {
+ const unsigned int seed = 0xFEEDFACE;
+ static int seeded = 0;
+ if (!seeded) {
+ seeded = 1;
+ srandom(seed);
+ }
+ //random() generates 31 bits of randomness (low order)
+ uint64_t low = random();
+ uint64_t high = random();
+ uint64_t twobits = random();
+ uint64_t ret = low | (high<<31) | (twobits<<62);
+ return ret;
+}
+
+static __attribute__((__unused__))
+double get_tdiff(void) {
+ static struct timeval prev={0,0};
+ if (prev.tv_sec==0) {
+ gettimeofday(&prev, 0);
+ return 0.0;
+ } else {
+ struct timeval now;
+ gettimeofday(&now, 0);
+ double diff = now.tv_sec - prev.tv_sec + 1e-6*(now.tv_usec - prev.tv_usec);
+ prev = now;
+ return diff;
+ }
+}
+
+static __attribute__((__unused__))
+void format_time(const time_t *timer, char *buf) {
+ ctime_r(timer, buf);
+ size_t len = strlen(buf);
+ assert(len < 26);
+ char end;
+
+ assert(len>=1);
+ end = buf[len-1];
+ while (end == '\n' || end == '\r') {
+ buf[len-1] = '\0';
+ len--;
+ assert(len>=1);
+ end = buf[len-1];
+ }
+}
+
+static __attribute__((__unused__))
+void print_time_now(void) {
+ char timestr[80];
+ time_t now = time(NULL);
+ format_time(&now, timestr);
+ printf("%s", timestr);
+}
+
+static void UU()
+multiply_locks_for_n_dbs(DB_ENV *env, int num_dbs) {
+ uint64_t current_max_lock_memory;
+ int r = env->get_lk_max_memory(env, &current_max_lock_memory);
+ CKERR(r);
+ r = env->set_lk_max_memory(env, current_max_lock_memory * num_dbs);
+ CKERR(r);
+}
+
+static inline void
+default_parse_args (int argc, char * const argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ ++verbose;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+UU()
+static void copy_dbt(DBT *dest, const DBT *src) {
+ assert(dest->flags & DB_DBT_REALLOC);
+ dest->data = toku_xrealloc(dest->data, src->size);
+ dest->size = src->size;
+ memcpy(dest->data, src->data, src->size);
+}
+
+// DBT_ARRAY is a toku-specific type
+UU()
+static int
+env_update_multiple_test_no_array(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ DBT *old_src_key, DBT *old_src_data,
+ DBT *new_src_key, DBT *new_src_data,
+ uint32_t num_dbs, DB **db_array, uint32_t* flags_array,
+ uint32_t num_keys, DBT keys[],
+ uint32_t num_vals, DBT vals[]) {
+ int r;
+ DBT_ARRAY key_arrays[num_keys];
+ DBT_ARRAY val_arrays[num_vals];
+ for (uint32_t i = 0; i < num_keys; i++) {
+ toku_dbt_array_init(&key_arrays[i], 1);
+ key_arrays[i].dbts[0] = keys[i];
+ }
+ for (uint32_t i = 0; i < num_vals; i++) {
+ toku_dbt_array_init(&val_arrays[i], 1);
+ val_arrays[i].dbts[0] = vals[i];
+ }
+ r = env->update_multiple(env, src_db, txn, old_src_key, old_src_data, new_src_key, new_src_data,
+ num_dbs, db_array, flags_array,
+ num_keys, &key_arrays[0],
+ num_vals, &val_arrays[0]);
+ for (uint32_t i = 0; i < num_keys; i++) {
+ invariant(key_arrays[i].size == 1);
+ invariant(key_arrays[i].capacity == 1);
+ keys[i] = key_arrays[i].dbts[0];
+ toku_dbt_array_destroy_shallow(&key_arrays[i]);
+ }
+ for (uint32_t i = 0; i < num_vals; i++) {
+ invariant(val_arrays[i].size == 1);
+ invariant(val_arrays[i].capacity == 1);
+ vals[i] = val_arrays[i].dbts[0];
+ toku_dbt_array_destroy_shallow(&val_arrays[i]);
+ }
+ return r;
+}
+
+UU()
+static int env_put_multiple_test_no_array(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ const DBT *src_key,
+ const DBT *src_val,
+ uint32_t num_dbs,
+ DB **db_array,
+ DBT *keys,
+ DBT *vals,
+ uint32_t *flags_array)
+{
+ int r;
+ DBT_ARRAY key_arrays[num_dbs];
+ DBT_ARRAY val_arrays[num_dbs];
+ for (uint32_t i = 0; i < num_dbs; i++) {
+ toku_dbt_array_init(&key_arrays[i], 1);
+ toku_dbt_array_init(&val_arrays[i], 1);
+ key_arrays[i].dbts[0] = keys[i];
+ val_arrays[i].dbts[0] = vals[i];
+ }
+ r = env->put_multiple(env, src_db, txn, src_key, src_val, num_dbs, db_array, &key_arrays[0], &val_arrays[0], flags_array);
+ for (uint32_t i = 0; i < num_dbs; i++) {
+ invariant(key_arrays[i].size == 1);
+ invariant(key_arrays[i].capacity == 1);
+ invariant(val_arrays[i].size == 1);
+ invariant(val_arrays[i].capacity == 1);
+ keys[i] = key_arrays[i].dbts[0];
+ vals[i] = val_arrays[i].dbts[0];
+ toku_dbt_array_destroy_shallow(&key_arrays[i]);
+ toku_dbt_array_destroy_shallow(&val_arrays[i]);
+ }
+ return r;
+}
+
+UU()
+static int env_del_multiple_test_no_array(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ const DBT *src_key,
+ const DBT *src_val,
+ uint32_t num_dbs,
+ DB **db_array,
+ DBT *keys,
+ uint32_t *flags_array)
+{
+ int r;
+ DBT_ARRAY key_arrays[num_dbs];
+ for (uint32_t i = 0; i < num_dbs; i++) {
+ toku_dbt_array_init(&key_arrays[i], 1);
+ key_arrays[i].dbts[0] = keys[i];
+ }
+ r = env->del_multiple(env, src_db, txn, src_key, src_val, num_dbs, db_array, &key_arrays[0], flags_array);
+ for (uint32_t i = 0; i < num_dbs; i++) {
+ invariant(key_arrays[i].size == 1);
+ invariant(key_arrays[i].capacity == 1);
+ keys[i] = key_arrays[i].dbts[0];
+ toku_dbt_array_destroy_shallow(&key_arrays[i]);
+ }
+ return r;
+}
+
+/* Some macros for evaluating blocks or functions within the scope of a
+ * transaction. */
+#define IN_TXN_COMMIT(env, parent, txn, flags, expr) ({ \
+ DB_TXN *txn; \
+ { int chk_r = (env)->txn_begin((env), (parent), &(txn), (flags)); CKERR(chk_r); } \
+ (expr); \
+ { int chk_r = (txn)->commit((txn), 0); CKERR(chk_r); } \
+ })
+
+#define IN_TXN_ABORT(env, parent, txn, flags, expr) ({ \
+ DB_TXN *txn; \
+ { int chk_r = (env)->txn_begin((env), (parent), &(txn), (flags)); CKERR(chk_r); } \
+ (expr); \
+ { int chk_r = (txn)->abort(txn); CKERR(chk_r); } \
+ })
+
+int test_main(int argc, char *const argv[]);
+int main(int argc, char *const argv[]) {
+ int r;
+ toku_os_initialize_settings(1);
+ r = test_main(argc, argv);
+ return r;
+}
+
+#ifndef DB_GID_SIZE
+#define DB_GID_SIZE DB_XIDDATASIZE
+#endif
diff --git a/storage/tokudb/PerconaFT/src/tests/test1572.cc b/storage/tokudb/PerconaFT/src/tests/test1572.cc
new file mode 100644
index 00000000..584b3991
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test1572.cc
@@ -0,0 +1,112 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Is it feasible to run 4 billion transactions in one test in the regression tests? */
+#include <db.h>
+#include <sys/stat.h>
+#include <ft/logger/log.h>
+#include <src/ydb_txn.h>
+
+static void
+four_billion_subtransactions (int do_something_in_children, int use_big_increment) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *xparent;
+
+ uint64_t extra_increment;
+ if (use_big_increment) {
+ extra_increment = (1<<28); // 1/4 of a billion, so 16 transactions should push us over the edge.
+ } else {
+ extra_increment = 0; // xid is already incrementing once per txn.
+ }
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+
+ {
+ DB_TXN *txn;
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+ }
+
+ r=env->txn_begin(env, 0, &xparent, 0); CKERR(r);
+ long long i;
+ long long const fourbillion = use_big_increment ? 32 : 500000; // if using the big increment we should run into trouble in only 32 transactions or less.
+ for (i=0; i < fourbillion + 100; i++) {
+ DB_TXN *xchild;
+ toku_increase_last_xid(env, extra_increment);
+ r=env->txn_begin(env, xparent, &xchild, 0); CKERR(r);
+ if (do_something_in_children) {
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%lld", i);
+ snprintf(there, sizeof(there), "there%lld", i);
+ DBT key, val;
+ r=db->put(db, xchild,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+ }
+ r=xchild->commit(xchild, 0); CKERR(r);
+ }
+ r=xparent->commit(xparent, 0); CKERR(r);
+
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[])
+{
+ parse_args(argc, argv);
+ four_billion_subtransactions(0, 0);
+ four_billion_subtransactions(1, 0);
+ four_billion_subtransactions(0, 1);
+ four_billion_subtransactions(1, 1);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test1753.cc b/storage/tokudb/PerconaFT/src/tests/test1753.cc
new file mode 100644
index 00000000..552540cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test1753.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+
+
+
+DB_TXN *null_txn=0;
+
+static void do_test1753 (int do_create_on_reopen) {
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ // Create an empty file
+ {
+ DB_ENV *env;
+ DB *db;
+
+ const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_PRIVATE ;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, "main", 0, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ }
+ // Now open the empty file and insert
+ {
+ DB_ENV *env;
+ int envflags = DB_INIT_MPOOL| DB_THREAD |DB_PRIVATE;
+ if (do_create_on_reopen) envflags |= DB_CREATE;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, 0);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (do_create_on_reopen) CKERR(r);
+ else CKERR2(r, ENOENT);
+ r = env->close(env, 0); CKERR(r);
+
+ }
+}
+
+int test_main (int argc __attribute__((__unused__)), char * const argv[] __attribute__((__unused__))) {
+ do_test1753(1);
+ do_test1753(0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test1842.cc b/storage/tokudb/PerconaFT/src/tests/test1842.cc
new file mode 100644
index 00000000..154ab612
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test1842.cc
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static DB *db;
+static DB_ENV *env;
+
+static __attribute__((__unused__)) int
+length_int_dbt_cmp (DB *db_compare, const DBT *a, const DBT *b) {
+ assert(db_compare && a && b);
+ assert(a->size > sizeof(int));
+ assert(b->size > sizeof(int));
+
+ int i;
+ int extra_len_a = *(uint8_t*)((uint8_t*)a->data +4);
+ assert(a->size == sizeof(int)+extra_len_a);
+
+ for (i = 1; i < extra_len_a; i++) {
+ assert(((char*)a->data+4)[i] == ' ');
+ }
+
+ int extra_len_b = *(uint8_t*)((uint8_t*)b->data+4);
+ assert(b->size == sizeof(int)+extra_len_b);
+ for (i = 1; i < extra_len_b; i++) {
+ assert(((char*)b->data+4)[i] == ' ');
+ }
+
+ int x = *(int *) a->data;
+ int y = *(int *) b->data;
+
+ if (x<y) return -1;
+ if (x>y) return 1;
+
+ if (extra_len_a<extra_len_b) return -1;
+ if (extra_len_a>extra_len_b) return 1;
+ return 0;
+}
+
+static void
+setup_db (uint32_t dup_mode) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, int_dbt_cmp); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_flags(db, dup_mode); assert(r == 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+static inline DBT *
+dbt_init_length(DBT *dbt, int val, uint8_t extra_len, uint8_t* buf) {
+ *(int*)buf = val;
+ buf[sizeof(int)] = extra_len;
+ int i;
+ for (i = 1; i < extra_len; i++) {
+ buf[sizeof(int)+i] = ' ';
+ }
+ return dbt_init(dbt, buf, sizeof(int)+extra_len);
+}
+
+static void
+test_txn_abort (uint32_t dup_mode) {
+ setup_db(dup_mode);
+ DBT key, val;
+ int r;
+
+
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ uint8_t value1[256];
+ uint8_t value2[256];
+
+ int k = 1;
+ int v1 = 1;
+ int v2 = 1;
+ uint8_t extra_1 = 1;
+ uint8_t extra_2 = 2;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init_length(&val, v1, extra_1, value1), 0);
+ CKERR(r);
+ r = txn->commit(txn, DB_TXN_NOSYNC);
+ txn = NULL;
+
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init_length(&val, v2, extra_2, value2), 0);
+ CKERR(r);
+ r = db->del(db, txn, dbt_init(&key, &k, sizeof k), DB_DELETE_ANY);
+ CKERR(r);
+ //Flush by scanning
+ {
+ DBC *c;
+ r = db->cursor(db, txn, &c, 0);
+ CKERR(r);
+ DBT ck;
+ DBT cv;
+ memset(&ck, 0, sizeof(ck));
+ memset(&cv, 0, sizeof(cv));
+ do {
+ r = c->c_get(c, &ck, &cv, DB_NEXT);
+ } while (r==0);
+ CKERR2(r, DB_NOTFOUND);
+ r = c->c_close(c);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_txn_abort(0);
+// test_txn_abort(DB_DUP|DB_DUPSORT);
+ return 0;
+}
+/*
+BNC_NBYTESINBUF
+comparison does assert that 'val(value) == length(value)'
+insert 1,'1' (commit it)
+ 1,'2 '
+do a db->del (1) we'll crash on the assert
+*/
diff --git a/storage/tokudb/PerconaFT/src/tests/test3039.cc b/storage/tokudb/PerconaFT/src/tests/test3039.cc
new file mode 100644
index 00000000..6c38fa13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test3039.cc
@@ -0,0 +1,284 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* This is a performance test. Releasing lock during I/O should mean that given two threads doing queries,
+ * and one of them is in-memory and one of them is out of memory, then the in-memory one should not be slowed down by the out-of-memory one.
+ *
+ * Step 1: Create a dictionary that doesn't fit in main memory. Do it fast (sequential insertions).
+ * Step 2: Measure performance of in-memory requests.
+ * Step 3: Add a thread that does requests in parallel.
+ */
+
+#include "test.h"
+#include <string.h>
+#include <toku_time.h>
+#include <toku_pthread.h>
+#include <portability/toku_atomic.h>
+
+static const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+#define ROWSIZE 100
+static const char dbname[] = "data.db";
+static unsigned long long n_rows;
+
+static DB_ENV *env = NULL;
+static DB *db;
+
+// BDB cannot handle big transactions by default (runs out of locks).
+#define N_PER_XACTION 10000
+
+static void create_db (uint64_t N) {
+ n_rows = N;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ { int r = db_env_create(&env, 0); CKERR(r); }
+ env->set_errfile(env, stderr);
+ env->set_redzone(env, 0);
+ { int r = env->set_cachesize(env, 0, 400*4096, 1); CKERR(r); }
+ { int r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); }
+ DB_TXN *txn;
+ { int r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); }
+ { int r = db_create(&db, env, 0); CKERR(r); }
+ { int r = db->set_pagesize(db, 4096); CKERR(r); }
+ { int r = db->open(db, txn, dbname, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); }
+ { int r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r); }
+
+ { int r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); }
+ uint64_t n_since_commit = 0;
+ for (unsigned long long i=0; i<N; i++) {
+ if (n_since_commit++ > N_PER_XACTION) {
+ { int r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r); }
+ { int r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); }
+ }
+ char key[20];
+ char data[200];
+ snprintf(key, sizeof(key), "%016llx", i);
+ snprintf(data, sizeof(data), "%08lx%08lx%66s", random(), random()%16, "");
+ DBT keyd, datad;
+ {
+ int r = db->put(db, txn, dbt_init(&keyd, key, strlen(key)+1), dbt_init(&datad, data, strlen(data)+1), 0);
+ CKERR(r);
+ }
+ }
+ //printf("n_rows=%lld\n", n_rows);
+ { int r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r); }
+}
+
+struct reader_thread_state {
+ /* output */
+ double elapsed_time;
+ unsigned long long n_did_read;
+
+ /* input */
+ signed long long n_to_read; // Negative if we just run forever
+ int do_local;
+
+ /* communicate to the thread while running */
+ volatile int finish;
+
+};
+
+static
+void* reader_thread (void *arg)
+// Return the time to read
+{
+ struct timeval start_time, end_time;
+ gettimeofday(&start_time, 0);
+
+ DB_TXN *txn;
+ struct reader_thread_state *rs = (struct reader_thread_state *)arg;
+
+ { int r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); }
+ char key[20];
+ char data [200];
+ DBT keyd, datad;
+ keyd.data = key;
+ keyd.size = 0;
+ keyd.ulen = sizeof(key);
+ keyd.flags = DB_DBT_USERMEM;
+ datad.data = data;
+ datad.size = 0;
+ datad.ulen = sizeof(data);
+ datad.flags = DB_DBT_USERMEM;
+
+#define N_DISTINCT 16
+ unsigned long long vals[N_DISTINCT];
+ if (rs->do_local) {
+ for (int i=0; i<N_DISTINCT; i++) {
+ vals[i] = random()%n_rows;
+ }
+ }
+
+ uint64_t n_since_commit = 0;
+ long long n_read_so_far = 0;
+ while ((!rs->finish) && ((rs->n_to_read < 0) || (n_read_so_far < rs->n_to_read))) {
+
+ if (n_since_commit++ > N_PER_XACTION) {
+ { int r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r); }
+ { int r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); }
+ n_since_commit = 0;
+ }
+ long long value;
+ if (rs->do_local) {
+ long which = random()%N_DISTINCT;
+ value = vals[which];
+ //printf("value=%lld\n", value);
+ } else {
+ value = random()%n_rows;
+ }
+ snprintf(key, sizeof(key), "%016llx", value);
+ keyd.size = strlen(key)+1;
+ int r = db->get(db, txn, &keyd, &datad, 0);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ invariant(r == 0 || r == DB_LOCK_NOTGRANTED || r == DB_LOCK_DEADLOCK);
+#else
+ CKERR(r);
+#endif
+ rs->n_did_read++;
+ n_read_so_far ++;
+ }
+ { int r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r); }
+
+ gettimeofday(&end_time, 0);
+ rs->elapsed_time = toku_tdiff(&end_time, &start_time);
+ return NULL;
+}
+
+static
+void do_threads (unsigned long long N, int do_nonlocal) {
+ toku_pthread_t ths[2];
+ struct reader_thread_state rstates[2] = {{.elapsed_time = 0.0,
+ .n_did_read = 0,
+ .n_to_read = (long long signed)N,
+ .do_local = 1,
+ .finish = 0},
+ {.elapsed_time = 0.0,
+ .n_did_read = 0,
+ .n_to_read = -1,
+ .do_local = 0,
+ .finish = 0}};
+ int n_to_create = do_nonlocal ? 2 : 1;
+ for (int i = 0; i < n_to_create; i++) {
+ int r = toku_pthread_create(toku_uninstrumented,
+ &ths[i],
+ nullptr,
+ reader_thread,
+ static_cast<void *>(&rstates[i]));
+ CKERR(r);
+ }
+ for (int i = 0; i < n_to_create; i++) {
+ void *retval;
+ int r = toku_pthread_join(ths[i], &retval);
+ CKERR(r);
+ assert(retval == 0);
+ if (verbose) {
+ printf("%9s thread time = %8.2fs on %9lld reads (%.3f us/read)\n",
+ (i == 0 ? "local" : "nonlocal"),
+ rstates[i].elapsed_time,
+ rstates[i].n_did_read,
+ rstates[i].elapsed_time / rstates[i].n_did_read * 1e6);
+ }
+ rstates[1].finish = 1;
+ }
+ if (verbose && do_nonlocal) {
+ printf("total %9lld reads (%.3f us/read)\n",
+ rstates[0].n_did_read + rstates[1].n_did_read,
+ (rstates[0].elapsed_time)/(rstates[0].n_did_read + rstates[1].n_did_read) * 1e6);
+ }
+}
+
+static volatile unsigned long long n_preads;
+
+static ssize_t my_pread (int fd, void *buf, size_t count, off_t offset) {
+ (void) toku_sync_fetch_and_add(&n_preads, 1);
+ usleep(1000); // sleep for a millisecond
+ return pread(fd, buf, count, offset);
+}
+
+unsigned long N_default = 100000;
+unsigned long N;
+
+static void my_parse_args (int argc, char * const argv[]) {
+ const char *progname = argv[0];
+ argc--; argv++;
+ verbose = 0;
+ N = N_default;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ if (verbose>0) verbose--;
+ } else if (strcmp(argv[0],"-n")==0) {
+ argc--; argv++;
+ if (argc==0) goto usage;
+ errno = 0;
+ char *end;
+ N = strtol(argv[0], &end, 10);
+ if (errno!=0 || *end!=0) goto usage;
+ } else {
+ usage:
+ fprintf(stderr, "Usage:\n %s [-v] [-q] [-n <rowcount> (default %ld)]\n", progname, N_default);
+ fprintf(stderr, " -n 10000 is probably good for valgrind.\n");
+ exit(1);
+ }
+ argc--; argv++;
+ }
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ my_parse_args(argc, argv);
+
+ unsigned long long M = N*10;
+
+ db_env_set_func_pread(my_pread);
+
+ create_db (N);
+ if (verbose) printf("%lld preads\n", n_preads);
+ do_threads (M, 0);
+ if (verbose) printf("%lld preads\n", n_preads);
+ do_threads (M, 0);
+ if (verbose) printf("%lld preads\n", n_preads);
+ do_threads (M, 1);
+ if (verbose) printf("%lld preads\n", n_preads);
+ { int r = db->close(db, 0); CKERR(r); }
+ { int r = env->close(env, 0); CKERR(r); }
+ if (verbose) printf("%lld preads\n", n_preads);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test3219.cc b/storage/tokudb/PerconaFT/src/tests/test3219.cc
new file mode 100644
index 00000000..99d1a90a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test3219.cc
@@ -0,0 +1,207 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+
+// This test, when run under helgrind, should detect the race problem documented in #3219.
+// The test:
+// checkpointing runs (in one thread)
+// another thread does an ft lookup.
+// We expect to see a lock-acquisition error.
+
+
+#include "test.h"
+#include <pthread.h>
+
+
+static DB_ENV *env;
+static DB *db;
+
+static void
+insert(int i, DB_TXN *txn)
+{
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ int r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+lookup(int i, DB_TXN *txn)
+// Do a lookup, but don't complain if it's not there.
+{
+ char hello[30], there[30], expectthere[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(expectthere, sizeof(expectthere), "there%d", i);
+ DBT key, val;
+ val.data = there;
+ val.ulen = sizeof there;
+ val.flags = DB_DBT_USERMEM;
+ int r=db->get(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &val,
+ 0);
+ if (r==0) {
+ assert(val.data==there);
+ assert(val.size==strlen(expectthere)+1);
+ //printf("Found %s, expected %s\n", there, expectthere);
+ assert(strcmp(there, expectthere)==0);
+ }
+}
+
+#define N_ROWS 1000000
+#define N_TXNS 10000
+#define N_ROWS_PER_TXN 1
+
+#define INITIAL_SIZE 1000
+//#define N_TXNS 10
+//#define PER_TXN 10000
+
+static void
+setup (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_cachesize(env, 0, 128*1024, 1); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 4096); CKERR(r);
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<INITIAL_SIZE; i++) insert(random()%N_ROWS, txn);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+
+static void
+finish (void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+volatile int finished = false;
+
+// Thread A performs checkpoints
+static void*
+start_a (void *arg __attribute__((__unused__))) {
+ //r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ while (!finished) {
+ int r;
+ r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ sleep(1);
+ }
+ return NULL;
+}
+
+// Thread B performs insertions (eventually they start overwriting the same record).
+static void*
+start_b (void *arg __attribute__((__unused__))) {
+ int r;
+ for (int j=0; j<N_TXNS; j++) {
+ if (verbose) {
+ printf("."); fflush(stdout);
+ if (j%(N_TXNS/10)==0) printf("\n");
+ }
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N_ROWS_PER_TXN; i++) {
+ insert(random()%N_ROWS, txn);
+ }
+ r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r);
+ }
+ finished = true;
+ return NULL;
+}
+
+// Thread C performs lookups
+static void*
+start_c (void *arg __attribute__((__unused__))) {
+ int r;
+ while (!finished) {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(random()%N_ROWS, txn);
+ r = txn->commit(txn, DB_TXN_NOSYNC); CKERR(r);
+ }
+ return NULL;
+}
+
+
+typedef void *(*pthread_fun)(void*);
+
+static void
+run_test (void)
+{
+ setup();
+ pthread_t t[3];
+ pthread_fun funs[3] = {start_a, start_b, start_c};
+ finished = false;
+ for (int i=0; i<3; i++) {
+ int r = pthread_create(&t[i], NULL, funs[i], NULL);
+ assert(r==0);
+ }
+ for (int i=0; i<3; i++) {
+ void *rv;
+ int r = pthread_join(t[i], &rv);
+ assert(r==0 && rv==NULL);
+ }
+ finish();
+}
+
+int test_main (int argc, char*const argv[]) {
+ parse_args(argc, argv);
+ run_test();
+ if (verbose) printf("\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test3522.cc b/storage/tokudb/PerconaFT/src/tests/test3522.cc
new file mode 100644
index 00000000..be594bd4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test3522.cc
@@ -0,0 +1,178 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Test for #3522. Demonstrate that with DB_TRYAGAIN a cursor can stall.
+ * Strategy: Create a tree (with relatively small nodes so things happen quickly, and relatively large compared to the cache).
+ * In a single transaction: Delete everything, and then do a DB_FIRST.
+ * Make the test terminate by capturing the calls to pread(). */
+
+#include "test.h"
+#include <portability/toku_atomic.h>
+
+static DB_ENV *env;
+static DB *db;
+const int N = 1000;
+
+const int n_preads_limit = 1000;
+long n_preads = 0;
+
+static ssize_t my_pread (int fd, void *buf, size_t count, off_t offset) {
+ long n_read_so_far = toku_sync_fetch_and_add(&n_preads, 1);
+ if (n_read_so_far > n_preads_limit) {
+ if (verbose) fprintf(stderr, "Apparent infinite loop detected\n");
+ abort();
+ }
+ return pread(fd, buf, count, offset);
+}
+
+static void
+insert(int i, DB_TXN *txn)
+{
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ int r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void op_delete (int i, DB_TXN *x) {
+ char hello[30];
+ DBT key;
+ if (verbose>1) printf("op_delete %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ int r = db->del(db, x,
+ dbt_init(&key, hello, strlen(hello)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+setup (void) {
+ db_env_set_func_pread(my_pread);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_cachesize(env, 0, 128*1024, 1); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 4096); CKERR(r);
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N; i++) insert(i, txn);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+static void finish (void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+int did_nothing = 0;
+
+static int
+do_nothing(DBT const *UU(a), DBT const *UU(b), void *UU(c)) {
+ did_nothing++;
+ return 0;
+}
+static void run_del_next (void) {
+ DB_TXN *txn;
+ DBC *cursor;
+ int r;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N; i++) op_delete(i, txn);
+
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ if (verbose) printf("read_next\n");
+ n_preads = 0;
+ r = cursor->c_getf_next(cursor, 0, do_nothing, NULL); CKERR2(r, DB_NOTFOUND);
+ assert(did_nothing==0);
+ if (verbose) printf("n_preads=%ld\n", n_preads);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static void run_del_prev (void) {
+ DB_TXN *txn;
+ DBC *cursor;
+ int r;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N; i++) op_delete(i, txn);
+
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ if (verbose) printf("read_prev\n");
+ n_preads = 0;
+ r = cursor->c_getf_prev(cursor, 0, do_nothing, NULL); CKERR2(r, DB_NOTFOUND);
+ assert(did_nothing==0);
+ if (verbose) printf("n_preads=%ld\n", n_preads);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static void run_test (void) {
+ setup();
+ run_del_next();
+ finish();
+
+ setup();
+ run_del_prev();
+ finish();
+}
+int test_main (int argc, char*const argv[]) {
+ parse_args(argc, argv);
+ run_test();
+ printf("n_preads=%ld\n", n_preads);
+ return 0;
+}
+
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test3522b.cc b/storage/tokudb/PerconaFT/src/tests/test3522b.cc
new file mode 100644
index 00000000..287d0a66
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test3522b.cc
@@ -0,0 +1,189 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Test for #3522. Demonstrate that with DB_TRYAGAIN a cursor can stall.
+ * Strategy: Create a tree (with relatively small nodes so things happen quickly, and relatively large compared to the cache).
+ * In a single transaction: Delete everything except the last one, and then do a DB_FIRST.
+ * (Compare to test3522.c which deletes everything including the last one.)
+ * Make the test terminate by capturing the calls to pread(). */
+
+#include "test.h"
+#include <portability/toku_atomic.h>
+
+static DB_ENV *env;
+static DB *db;
+const int N = 1000;
+
+const int n_preads_limit = 1000;
+long n_preads = 0;
+
+static ssize_t my_pread (int fd, void *buf, size_t count, off_t offset) {
+ long n_read_so_far = toku_sync_fetch_and_add(&n_preads, 1);
+ if (n_read_so_far > n_preads_limit) {
+ if (verbose) fprintf(stderr, "Apparent infinite loop detected\n");
+ abort();
+ }
+ return pread(fd, buf, count, offset);
+}
+
+static void
+insert(int i, DB_TXN *txn)
+{
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ int r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void op_delete (int i, DB_TXN *x) {
+ char hello[30];
+ DBT key;
+ if (verbose>1) printf("op_delete %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ int r = db->del(db, x,
+ dbt_init(&key, hello, strlen(hello)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+setup (void) {
+ db_env_set_func_pread(my_pread);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_cachesize(env, 0, 128*1024, 1); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 4096); CKERR(r);
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N; i++) insert(i, txn);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+static void finish (void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+int did_nothing_count = 0;
+int expect_n = -1;
+
+static int
+do_nothing(DBT const *a, DBT const *b, void *c) {
+ did_nothing_count++;
+ assert(c==NULL);
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d", expect_n);
+ snprintf(there, sizeof(there), "there%d", expect_n);
+ assert(strlen(hello)+1 == a->size);
+ assert(strlen(there)+1 == b->size);
+ assert(strcmp(hello, (char*)a->data)==0);
+ assert(strcmp(there, (char*)b->data)==0);
+ return 0;
+}
+static void run_del_next (void) {
+ DB_TXN *txn;
+ DBC *cursor;
+ int r;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N-1; i++) op_delete(i, txn);
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ expect_n = N-1;
+ did_nothing_count = 0;
+ n_preads = 0;
+ if (verbose) printf("read_next\n");
+ r = cursor->c_getf_next(cursor, 0, do_nothing, NULL); CKERR(r);
+ assert(did_nothing_count==1);
+ if (verbose) printf("n_preads=%ld\n", n_preads);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static void run_del_prev (void) {
+ DB_TXN *txn;
+ DBC *cursor;
+ int r;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=1; i<N; i++) op_delete(i, txn);
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ expect_n = 0;
+ did_nothing_count = 0;
+ if (verbose) printf("read_prev\n");
+ n_preads = 0;
+ r = cursor->c_getf_prev(cursor, 0, do_nothing, NULL); CKERR(r);
+ assert(did_nothing_count==1);
+ if (verbose) printf("n_preads=%ld\n", n_preads);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+static void run_test (void) {
+ setup();
+ run_del_next();
+ finish();
+
+ setup();
+ run_del_prev();
+ finish();
+}
+int test_main (int argc, char*const argv[]) {
+ parse_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test3529.cc b/storage/tokudb/PerconaFT/src/tests/test3529.cc
new file mode 100644
index 00000000..34f67a80
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test3529.cc
@@ -0,0 +1,210 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Test for #3522. Show that even with DB_TRYAGAIN, isolation still works.
+ * Strategy:
+ * 1. Create a tree (with relatively small nodes so things happen quickly, and relatively large compared to the cache).
+ * 2. Start two transactions YY and XX.
+ * 3. Force XX to precede YY (e.g., XX reads the last row, and then YY deletes it, under MVCC).
+ * 4. YY, in a single transaction: deletes everything
+ * 5. YY does do a DB_FIRST.
+ * Set things up so that while YY is doing it's retries, XX inserts a row at the beginning.
+ * Make the test terminate by capturing the calls to pread(). */
+
+#include "test.h"
+#include <portability/toku_atomic.h>
+
+static DB_ENV *env;
+static DB *db;
+const int N = 1000;
+static DB_TXN *XX, *YY;
+
+long do_XX_on_pread = -1;
+const int n_preads_limit = 1000;
+long n_preads = 0;
+
+static void insert(int i, DB_TXN *txn);
+
+static ssize_t my_pread (int fd, void *buf, size_t count, off_t offset) {
+ long n_read_so_far = toku_sync_fetch_and_add(&n_preads, 1);
+ if (do_XX_on_pread==n_read_so_far && XX != NULL) {
+ // we're supposed to do the XX operation now. Insert a row.
+ printf("Did XX\n");
+ insert(0, XX);
+ }
+ if (n_read_so_far > n_preads_limit) {
+ if (verbose) fprintf(stderr, "Apparent infinite loop detected\n");
+ abort();
+ }
+ return pread(fd, buf, count, offset);
+}
+
+static void
+insert(int i, DB_TXN *txn)
+{
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%04d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ DBT key, val;
+ if (db) {
+ int r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&val, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+ }
+}
+
+static void op_delete (int i, DB_TXN *x) {
+ char hello[30];
+ DBT key;
+ if (verbose>1) printf("op_delete %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%04d", i);
+ int r = db->del(db, x,
+ dbt_init(&key, hello, strlen(hello)+1),
+ 0);
+ CKERR(r);
+}
+
+int did_nothing = 0;
+
+static int
+do_nothing(DBT const *UU(a), DBT const *UU(b), void *UU(c)) {
+ did_nothing++;
+ return 0;
+}
+
+
+static void
+setup (void) {
+ db = NULL;
+ db_env_set_func_pread(my_pread);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_cachesize(env, 0, 2*128*1024, 1); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 4096); CKERR(r);
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (int i=0; i<N; i++) insert(i+1, txn);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ r = env->txn_begin(env, 0, &XX, DB_TXN_SNAPSHOT); CKERR(r);
+ r = env->txn_begin(env, 0, &YY, DB_TXN_SNAPSHOT); CKERR(r);
+
+ // Force XX to preceed YY by making XX read something. (YY will op_delete everything in a moment).
+ {
+ DBC *cursor;
+ r = db->cursor(db, XX, &cursor, 0); CKERR(r);
+ did_nothing = 0;
+ //r = cursor->c_getf_next(cursor, 0, do_nothing, NULL); CKERR(r);
+ //assert(did_nothing==1);
+ did_nothing = 0;
+ r = cursor->c_close(cursor); CKERR(r);
+ }
+}
+
+static void finish (void) {
+ int r;
+ r = YY->commit(YY, 0); CKERR(r);
+ YY = NULL;
+ r = XX->commit(XX, 0); CKERR(r);
+ XX = NULL;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+static void run_del_next (void) {
+ DBC *cursor;
+ int r;
+ for (int i=0; i<N; i++) op_delete(i+1, YY);
+
+ r = db->cursor(db, YY, &cursor, 0); CKERR(r);
+ if (verbose) printf("read_next\n");
+ n_preads = 0;
+ do_XX_on_pread = 2;
+ printf("doing on %ld\n", do_XX_on_pread);
+ r = cursor->c_getf_next(cursor, 0, do_nothing, NULL); CKERR2(r, DB_NOTFOUND);
+ do_XX_on_pread = 0;
+ assert(did_nothing==0);
+ if (verbose) printf("n_preads=%ld\n", n_preads);
+ r = cursor->c_close(cursor); CKERR(r);
+}
+
+static void run_del_prev (void) {
+ DBC *cursor;
+ int r;
+ for (int i=0; i<N; i++) op_delete(i+1, YY);
+
+ r = db->cursor(db, YY, &cursor, 0); CKERR(r);
+ if (verbose) printf("read_prev\n");
+ n_preads = 0;
+ r = cursor->c_getf_prev(cursor, 0, do_nothing, NULL); CKERR2(r, DB_NOTFOUND);
+ assert(did_nothing==0);
+ if (verbose) printf("n_preads=%ld\n", n_preads);
+ r = cursor->c_close(cursor); CKERR(r);
+}
+
+static void run_test (void) {
+ setup();
+ run_del_next();
+ finish();
+
+ setup();
+ run_del_prev();
+ finish();
+}
+int test_main (int argc, char*const argv[]) {
+ parse_args(argc, argv);
+ run_test();
+ return 0;
+}
+
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test4573-logtrim.cc b/storage/tokudb/PerconaFT/src/tests/test4573-logtrim.cc
new file mode 100644
index 00000000..337d724a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test4573-logtrim.cc
@@ -0,0 +1,121 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/wait.h>
+
+const int envflags = DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_RECOVER;
+
+const int my_lg_max = 100;
+
+int test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ pid_t pid;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ const int N = 5;
+
+ if (0==(pid=fork())) {
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_lg_max(env, my_lg_max); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ for (int i=0; i<N; i++) {
+ DBT k,v;
+ r = db->put(db, txn, dbt_init(&k, &i, sizeof(i)), dbt_init(&v, &i, sizeof(i)), 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ for (int i=0; i<N; i+=2) {
+ DBT k;
+ r = db->del(db, txn, dbt_init(&k, &i, sizeof(i)), 0); CKERR(r);
+ r = env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ exit(0);
+
+ }
+ {
+ int status;
+ pid_t pid2 = wait(&status);
+ assert(pid2==pid);
+ assert(WIFEXITED(status) && WEXITSTATUS(status)==0);
+ }
+ // Now run recovery to see what happens.
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ for (int i=0; i<N; i++) {
+ DBT k;
+ DBT v;
+ dbt_init(&v, NULL, 0);
+ r = db->get(db, txn, dbt_init(&k, &i, sizeof(i)), &v, 0);
+ if (i%2==1) {
+ assert(r==0);
+ //printf("Got %d\n", *(int*)v.data);
+ } else {
+ assert(r==DB_NOTFOUND);
+ }
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ //toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test5092.cc b/storage/tokudb/PerconaFT/src/tests/test5092.cc
new file mode 100644
index 00000000..36fee5f6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test5092.cc
@@ -0,0 +1,81 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <sys/wait.h>
+
+static void clean_env (const char *envdir) {
+ const int len = strlen(envdir)+100;
+ char cmd[len];
+ snprintf(cmd, len, "rm -rf %s", envdir);
+ int r = system(cmd);
+ CKERR(r);
+ CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
+}
+
+static void setup_env (DB_ENV **envp, const char *envdir) {
+ { int chk_r = db_env_create(envp, 0); CKERR(chk_r); }
+ (*envp)->set_errfile(*envp, stderr);
+ { int chk_r = (*envp)->set_redzone(*envp, 0); CKERR(chk_r); }
+ { int chk_r = (*envp)->open(*envp, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void setup_env_and_prepare (DB_ENV **envp, const char *envdir, bool commit) {
+ DB *db;
+ DB_TXN *txn;
+ clean_env(envdir);
+ setup_env(envp, envdir);
+ CKERR(db_create(&db, *envp, 0));
+ CKERR(db->open(db, NULL, "foo.db", 0, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO));
+ CKERR((*envp)->txn_begin(*envp, 0, &txn, 0));
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ gid[0]=42;
+ CKERR(txn->prepare(txn, gid, 0));
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ if (commit)
+ CKERR(txn->commit(txn, 0));
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+ DB_ENV *env;
+ setup_env_and_prepare(&env, TOKU_TEST_FILENAME, true);
+ { int chk_r = env ->close(env, 0); CKERR(chk_r); }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test938.cc b/storage/tokudb/PerconaFT/src/tests/test938.cc
new file mode 100644
index 00000000..195b5b82
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test938.cc
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+unsigned char N=5;
+
+static int
+fact(int n) {
+ if (n<=2) return n;
+ else return n*fact(n-1);
+}
+
+static void
+swapc (unsigned char *a, unsigned char *b) {
+ unsigned char tmp=*a;
+ *a=*b;
+ *b=tmp;
+}
+
+DB_ENV *env;
+DB *db;
+
+static void
+run (int choice) {
+ unsigned char v[N];
+ int i;
+ int r;
+ for (i=0; i<N; i++) {
+ v[i]=(unsigned char)(10*i);
+ }
+ for (i=0; i<N; i++) {
+ int nchoices=N-i;
+ swapc(&v[i], &v[i+choice%nchoices]);
+ choice=choice/nchoices;
+ }
+ if (0) {
+ for (i=0; i<N; i++) {
+ printf("%d ", v[i]);
+ }
+
+ printf("\n");
+ }
+ DB_TXN *txn;
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ DBT kdbt,vdbt;
+ char key[2]={25,(char)v[i]};
+ char val=v[i];
+ //printf("put %d %d\n", key, val);
+ r=db->put(db, txn, dbt_init(&kdbt, &key, 2), dbt_init(&vdbt, &val, 1), 0); CKERR(r);
+ }
+ r=txn->commit(txn, DB_TXN_NOSYNC); CKERR(r);
+ }
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *c;
+ r=db->cursor(db, txn, &c, 0); CKERR(r);
+ DBT kdbt,vdbt;
+ memset(&kdbt, 0, sizeof(kdbt));
+ memset(&vdbt, 0, sizeof(vdbt));
+ i=0;
+ while (0==(r=c->c_get(c, &kdbt, &vdbt, DB_NEXT))) {
+ //printf("Got %d %d\n", *(unsigned char*)kdbt.data, *(unsigned char*)vdbt.data);
+ i++;
+ kdbt.data=0;
+ vdbt.data=0;
+ }
+ CKERR2(r, DB_NOTFOUND);
+ //printf("i=%d N=%d\n", i, N);
+ assert(i==N);
+ r=c->c_close(c); CKERR(r);
+ r=txn->commit(txn, DB_TXN_NOSYNC); CKERR(r);
+ }
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *c;
+ r=db->cursor(db, txn, &c, 0); CKERR(r);
+ DBT kdbt,vdbt;
+ memset(&kdbt, 0, sizeof(kdbt));
+ memset(&vdbt, 0, sizeof(vdbt));
+ i=0;
+ while (0==(r=(c->c_get(c, &kdbt, &vdbt, DB_FIRST)))) {
+ i++;
+ r = db->del(db, txn, &kdbt, DB_DELETE_ANY);
+ CKERR(r);
+ }
+ assert(r==DB_NOTFOUND);
+ r=c->c_close(c); CKERR(r);
+ r=txn->commit(txn, DB_TXN_NOSYNC); CKERR(r);
+ }
+ return;
+#if 0
+ char v101=101, v102=102, v1=1, v2=2;
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBT k,v;
+ r=db->put(db, txn, dbt_init(&k, &v1, 1), dbt_init(&v, &v101, 1), 0); CKERR(r);
+ r=db->put(db, txn, dbt_init(&k, &v2, 1), dbt_init(&v, &v102, 1), 0); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *c;
+ r=db->cursor(db, txn, &c, 0); CKERR(r);
+ DBT k,v;
+ r=c->c_get(c, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_FIRST); CKERR(r);
+ assert(*(char*)k.data==v1); assert(*(char*)v.data==v101);
+ r=c->c_get(c, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_NEXT); CKERR(r);
+ assert(*(char*)k.data==v2); assert(*(char*)v.data==v102);
+ r=c->c_get(c, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_NEXT); assert(r!=0);
+ r=c->c_close(c); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+#endif
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_TXN *txn;
+ {
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+ int i;
+ //printf("fact(%d)=%d\n", N, fact(N));
+ for (i=0; i<fact(N); i++) {
+ run(i);
+ }
+ {
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test938b.cc b/storage/tokudb/PerconaFT/src/tests/test938b.cc
new file mode 100644
index 00000000..6c9db74e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test938b.cc
@@ -0,0 +1,113 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+DB *db;
+
+static void
+run (void) {
+ int r;
+ DB_TXN *txn;
+ char v101=101, v102=102, v1=1, v2=2;
+ int vN=0;
+ int N=0;
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBT k,v;
+ int i;
+ r=db->put(db, txn, dbt_init(&k, &v1, 1), dbt_init(&v, &v101, 1), 0); CKERR(r);
+ r=db->put(db, txn, dbt_init(&k, &v2, 1), dbt_init(&v, &v102, 1), 0); CKERR(r);
+ for (i=0; i<N; i++) {
+ int iv = htonl(i);
+ r=db->put(db, txn, dbt_init(&k, &vN, 1), dbt_init(&v, &iv, 4), 0); CKERR(r);
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *c;
+ r=db->cursor(db, txn, &c, 0); CKERR(r);
+ DBT k,v;
+ int i;
+ for (i=0; i<N; i++) {
+ r=c->c_get(c, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_NEXT); CKERR(r);
+ assert(k.size==1); assert(v.size==4);
+ assert(*(char*)k.data==vN); assert((int)ntohl(*(int*)v.data)==i);
+ }
+
+ r=c->c_get(c, dbt_init(&k, 0, 0), dbt_init(&v, 0, 0), DB_NEXT); CKERR(r);
+ assert(*(char*)k.data==v1); assert(*(char*)v.data==v101);
+ r=c->c_get(c, dbt_init(&k, 0, 0), dbt_init(&v, 0, 0), DB_NEXT); CKERR(r);
+ assert(*(char*)k.data==v2); assert(*(char*)v.data==v102);
+ r=c->c_get(c, dbt_init(&k, 0, 0), dbt_init(&v, 0, 0), DB_NEXT); assert(r!=0);
+ r=c->c_close(c); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_TXN *txn;
+ {
+ r = db_env_create(&env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+ run();
+ {
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test938c.cc b/storage/tokudb/PerconaFT/src/tests/test938c.cc
new file mode 100644
index 00000000..16c9a6d7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test938c.cc
@@ -0,0 +1,120 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <db.h>
+#include <sys/stat.h>
+
+unsigned char N=5;
+
+DB_ENV *env;
+DB *db;
+
+static void
+run (void) {
+ int r;
+ DB_TXN *txn, *txn2;
+ char v101=101, v102=102, v1=1, v2=1;
+ // Add (1,102) to the tree
+ // In one txn
+ // add (1,101) to the tree
+ // In another concurrent txn
+ // look up (1,102) and do DB_NEXT
+ // That should be fine in PerconaFT.
+ // It fails before #938 is fixed.
+ // It also fails for BDB for other reasons (page-level locking vs. row-level locking)
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ char kk[2] = {v2, v102};
+ DBT k,v;
+ r=db->put(db, txn, dbt_init(&k, &kk, 2), dbt_init(&v, &v102, 1), 0); CKERR(r);
+
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+ {
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn2, 0); CKERR(r);
+
+ DBT k,v;
+ {
+ char kk[2] = {v1, v101};
+ r=db->put(db, txn, dbt_init(&k, &kk, 2), dbt_init(&v, &v101, 1), 0); CKERR(r);
+ }
+
+ DBC *c2;
+ r=db->cursor(db, txn2, &c2, 0); CKERR(r);
+
+ {
+ char kk[2] = {v2, v102};
+ r=c2->c_get(c2, dbt_init(&k, &kk, 2), dbt_init(&v, &v102, 1), DB_SET); CKERR(r);
+ }
+ r=c2->c_get(c2, dbt_init_malloc(&k), dbt_init_malloc(&v), DB_NEXT); assert(r==DB_NOTFOUND);
+
+ r=c2->c_close(c2);
+ r=txn->commit(txn, 0); CKERR(r);
+ r=txn2->commit(txn2, 0); CKERR(r);
+ }
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_TXN *txn;
+ {
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ }
+ run();
+ {
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_1672532.cc b/storage/tokudb/PerconaFT/src/tests/test_1672532.cc
new file mode 100644
index 00000000..721e7677
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_1672532.cc
@@ -0,0 +1,210 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+// to verify the DB_LOCKING_READ works to lock the read rows for snapshot
+// isolaton.
+// we create a db, then init a read transaction with repeatable-read isolation
+// and
+// locking read flag, then we start another transaction to grab the write lock.
+// DB_LOCKING_READ is defined here to just make the before and after tests work
+// (before
+// test did not have DB_LOCKING_READ flag).
+#if !defined(DB_LOCKING_READ)
+#define DB_LOCKING_READ 0
+#endif
+static int prelock_range(DBC *cursor, int left, int right) {
+ DBT key_left;
+ dbt_init(&key_left, &left, sizeof left);
+ DBT key_right;
+ dbt_init(&key_right, &right, sizeof right);
+ int r = cursor->c_set_bounds(cursor, &key_left, &key_right, true, 0);
+ return r;
+}
+
+static void test_read_write_range(DB_ENV *env,
+ DB *db,
+ uint32_t iso_flags,
+ int expect_r) {
+ int r;
+
+ DB_TXN *txn_a = NULL;
+ r = env->txn_begin(env, NULL, &txn_a, iso_flags);
+ assert_zero(r);
+ DB_TXN *txn_b = NULL;
+ r = env->txn_begin(env, NULL, &txn_b, iso_flags);
+ assert_zero(r);
+
+ DBC *cursor_a = NULL;
+ r = db->cursor(db, txn_a, &cursor_a, DB_LOCKING_READ);
+ assert_zero(r);
+ DBC *cursor_b = NULL;
+ r = db->cursor(db, txn_b, &cursor_b, DB_RMW);
+ assert_zero(r);
+
+ r = prelock_range(cursor_a, htonl(10), htonl(100));
+ assert_zero(r);
+ r = prelock_range(cursor_b, htonl(50), htonl(200));
+ assert(r == expect_r);
+
+ r = cursor_a->c_close(cursor_a);
+ assert_zero(r);
+ r = cursor_b->c_close(cursor_b);
+ assert_zero(r);
+
+ r = txn_a->commit(txn_a, 0);
+ assert_zero(r);
+ r = txn_b->commit(txn_b, 0);
+ assert_zero(r);
+}
+
+static void test_read_write_point(DB_ENV *env,
+ DB *db,
+ uint32_t iso_flags,
+ int expect_r) {
+ int r;
+
+ DB_TXN *txn1 = NULL;
+ r = env->txn_begin(env, NULL, &txn1, iso_flags);
+ assert_zero(r);
+
+ DB_TXN *txn2 = NULL;
+ r = env->txn_begin(env, NULL, &txn2, iso_flags);
+ assert_zero(r);
+
+ DBC *c1 = NULL;
+ r = db->cursor(db, txn1, &c1, DB_LOCKING_READ);
+ assert_zero(r);
+
+ DBC *c2 = NULL;
+ r = db->cursor(db, txn2, &c2, DB_RMW);
+ assert_zero(r);
+
+ int k = htonl(42);
+ DBT key;
+ dbt_init(&key, &k, sizeof k);
+ DBT val;
+ memset(&val, 0, sizeof val);
+ r = c1->c_get(c1, &key, &val, DB_SET);
+ assert_zero(r);
+
+ r = c2->c_get(c2, &key, &val, DB_SET);
+ assert(r == expect_r);
+
+ r = c1->c_close(c1);
+ assert_zero(r);
+ r = c2->c_close(c2);
+ assert_zero(r);
+
+ r = txn1->commit(txn1, 0);
+ assert_zero(r);
+ r = txn2->commit(txn2, 0);
+ assert_zero(r);
+}
+
+int test_main(int argc, char *const argv[]) {
+ int r;
+
+ const char *env_dir = TOKU_TEST_FILENAME;
+ const char *db_filename = "lockingreadtest";
+
+ parse_args(argc, argv);
+
+ char rm_cmd[strlen(env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", env_dir);
+ r = system(rm_cmd);
+ assert_zero(r);
+
+ r = toku_os_mkdir(env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+ assert_zero(r);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0);
+ assert_zero(r);
+ int env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG;
+ r = env->open(
+ env, env_dir, env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ r = env->txn_begin(env, NULL, &create_txn, 0);
+ assert_zero(r);
+ r = db->open(db,
+ create_txn,
+ db_filename,
+ NULL,
+ DB_BTREE,
+ DB_CREATE,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ assert_zero(r);
+ r = create_txn->commit(create_txn, 0);
+ assert_zero(r);
+
+ // add a record
+
+ DB_TXN *write_txn = NULL;
+ r = env->txn_begin(env, NULL, &write_txn, 0);
+ assert_zero(r);
+
+ int k = htonl(42);
+ int v = 42;
+ DBT key;
+ dbt_init(&key, &k, sizeof k);
+ DBT val;
+ dbt_init(&val, &v, sizeof v);
+ r = db->put(db, write_txn, &key, &val, DB_NOOVERWRITE);
+ assert_zero(r);
+ r = write_txn->commit(write_txn, 0);
+ assert_zero(r);
+
+ test_read_write_range(env, db, DB_TXN_SNAPSHOT, DB_LOCK_NOTGRANTED);
+ test_read_write_point(env, db, DB_TXN_SNAPSHOT, DB_LOCK_NOTGRANTED);
+
+ r = db->close(db, 0);
+ assert_zero(r);
+
+ r = env->close(env, 0);
+ assert_zero(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_3529_insert_2.cc b/storage/tokudb/PerconaFT/src/tests/test_3529_insert_2.cc
new file mode 100644
index 00000000..b0fe4ab7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_3529_insert_2.cc
@@ -0,0 +1,219 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that serializable cursor locks deleted keys so that another transaction can not insert into the range being scanned by the cursor
+// we create 2 level tree that looks like
+// root node with pivot key 2
+// left leaf contains keys 0, 1, and 2
+// right leaf contains keys 3 and 4
+// we delete key 2 while a snapshot txn exist so that garbage collection does not occur.
+// txn_a walks a cursor through the deleted keys.
+// when txn_a finishes reading the deleted keys, txn_b tries to insert key 2 and should get lock not granted.
+
+#include <db.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <pthread.h>
+
+static DB_ENV *env = NULL;
+static DB_TXN *txn_a = NULL;
+static DB_TXN *txn_b = NULL;
+static DB *db = NULL;
+static uint32_t db_page_size = 4096;
+// static uint32_t db_basement_size = 4096;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static int
+my_compare(DB *this_db UU(), const DBT *a UU(), const DBT *b UU()) {
+ assert(a->size == b->size);
+ return memcmp(a->data, b->data, a->size);
+}
+
+static int
+my_generate_row(DB *dest_db UU(), DB *src_db UU(), DBT_ARRAY *dest_key_arrays UU(), DBT_ARRAY *dest_val_arrays UU(), const DBT *src_key UU(), const DBT *src_val UU()) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ dest_key->data = toku_realloc(dest_key->data, src_key->size);
+ memcpy(dest_key->data, src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ assert(dest_val->flags == DB_DBT_REALLOC);
+ dest_val->data = toku_realloc(dest_val->data, src_val->size);
+ memcpy(dest_val->data, src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static int
+next_do_nothing(DBT const *UU(a), DBT const *UU(b), void *UU(c)) {
+ return 0;
+}
+
+static void *
+do_insert_2(void *arg) {
+ int r;
+ uint64_t key = 2;
+ char val[800]; memset(val, 0, sizeof val);
+ DBT k,v;
+ r = db->put(db, txn_b, dbt_init(&k, &key, sizeof key), dbt_init(&v, val, sizeof val), 0);
+ assert(r == DB_LOCK_NOTGRANTED);
+ return arg;
+}
+
+static ssize_t
+my_pread (int fd, void *buf, size_t count, off_t offset) {
+ static int my_pread_count = 0;
+ if (++my_pread_count == 5) {
+ pthread_t id;
+ pthread_create(&id, NULL, do_insert_2, NULL);
+ void *ret;
+ pthread_join(id, &ret);
+ }
+ return pread(fd, buf, count, offset);
+}
+
+static void
+run_test(void) {
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, my_generate_row); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, db_page_size);
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // build a tree with 2 leaf nodes
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DB_LOADER *loader = NULL;
+ r = env->create_loader(env, txn, &loader, db, 1, &db, NULL, NULL, 0); CKERR(r);
+ for (uint64_t i = 0; i < 5; i++) {
+ uint64_t key = i;
+ char val[800]; memset(val, 0, sizeof val);
+ DBT k,v;
+ r = loader->put(loader, dbt_init(&k, &key, sizeof key), dbt_init(&v, val, sizeof val)); CKERR(r);
+ }
+ r = loader->close(loader); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // delete key 2
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (uint64_t i = 2; i < 3; i++) {
+ uint64_t key = i;
+ DBT k;
+ r = db->del(db, txn, dbt_init(&k, &key, sizeof key), 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // close and reopen
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // create a txn that will try to insert key 2 while the serializable cursor is walking through the tree
+ r = env->txn_begin(env, 0, &txn_b, 0); CKERR(r);
+
+ // walk a serializable cursor through the tree
+ r = env->txn_begin(env, 0, &txn_a, 0); CKERR(r);
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn_a, &cursor, 0); CKERR(r);
+ db_env_set_func_pread(my_pread);
+ while (1) {
+ r = cursor->c_getf_next(cursor, 0, next_do_nothing, NULL);
+ if (r != 0)
+ break;
+ }
+ db_env_set_func_pread(NULL);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn_a->commit(txn_a, 0); CKERR(r);
+
+ r = txn_b->commit(txn_b, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static int
+usage(void) {
+ fprintf(stderr, "-v (verbose)\n");
+ fprintf(stderr, "-q (quiet)\n");
+ fprintf(stderr, "--envdir %s\n", envdir);
+ return 1;
+}
+
+int
+test_main (int argc , char * const argv[]) {
+ for (int i = 1 ; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--envdir") == 0 && i+1 < argc) {
+ envdir = argv[++i];
+ continue;
+ }
+ return usage();
+ }
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ int r;
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ run_test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_3529_table_lock.cc b/storage/tokudb/PerconaFT/src/tests/test_3529_table_lock.cc
new file mode 100644
index 00000000..2d4e9ba1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_3529_table_lock.cc
@@ -0,0 +1,212 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that serializable cursor locks deleted keys so that another transaction can not insert into the range being scanned by the cursor
+// we create 2 level tree that looks like
+// root node with pivot key 2
+// left leaf contains keys 0, 1, and 2
+// right leaf contains keys 3 and 4
+// we delete keys 0, 1, and 2 while a snapshot txn exist so that garbage collection does not occur.
+// txn_a walks a cursor through the deleted keys.
+// when txn_a finishes reading the deleted keys, txn_b tries to get a table lock.
+// the table lock should fail since txn_a holds a read lock on the deleted key range.
+
+#include <db.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+static DB_ENV *env = NULL;
+static DB_TXN *txn_a = NULL;
+static DB_TXN *txn_b = NULL;
+static DB *db = NULL;
+static uint32_t db_page_size = 4096;
+// static uint32_t db_basement_size = 4096;
+static const char *envdir = TOKU_TEST_FILENAME;
+
+static int
+my_compare(DB *this_db UU(), const DBT *a UU(), const DBT *b UU()) {
+ assert(a->size == b->size);
+ return memcmp(a->data, b->data, a->size);
+}
+
+static int
+my_generate_row(DB *dest_db UU(), DB *src_db UU(), DBT_ARRAY *dest_key_arrays UU(), DBT_ARRAY *dest_val_arrays UU(), const DBT *src_key UU(), const DBT *src_val UU()) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ dest_key->data = toku_realloc(dest_key->data, src_key->size);
+ memcpy(dest_key->data, src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ assert(dest_val->flags == DB_DBT_REALLOC);
+ dest_val->data = toku_realloc(dest_val->data, src_val->size);
+ memcpy(dest_val->data, src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+ return 0;
+}
+
+static int
+next_do_nothing(DBT const *UU(a), DBT const *UU(b), void *UU(c)) {
+ return 0;
+}
+
+static ssize_t
+my_pread (int fd, void *buf, size_t count, off_t offset) {
+ static int my_pread_count = 0;
+ if (++my_pread_count == 5) {
+ // try to acquire a table lock, should fail
+ int r = db->pre_acquire_table_lock(db, txn_b);
+ assert(r == DB_LOCK_NOTGRANTED);
+ }
+ return pread(fd, buf, count, offset);
+}
+
+static void
+run_test(void) {
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_redzone(env, 0); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, my_generate_row); CKERR(r);
+ r = env->set_default_bt_compare(env, my_compare); CKERR(r);
+ r = env->open(env, envdir, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, db_page_size);
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // build a tree with 2 leaf nodes
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DB_LOADER *loader = NULL;
+ r = env->create_loader(env, txn, &loader, db, 1, &db, NULL, NULL, 0); CKERR(r);
+ for (uint64_t i = 0; i < 5; i++) {
+ uint64_t key = i;
+ char val[800]; memset(val, 0, sizeof val);
+ DBT k,v;
+ r = loader->put(loader, dbt_init(&k, &key, sizeof key), dbt_init(&v, val, sizeof val)); CKERR(r);
+ }
+ r = loader->close(loader); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // this transaction ensure that garbage collection does not occur when deleting
+ DB_TXN *bogus_txn = NULL;
+ r = env->txn_begin(env, 0, &bogus_txn, DB_TXN_SNAPSHOT); CKERR(r);
+
+ // delete the keys in the first leaf node
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ for (uint64_t i = 0; i < 3; i++) {
+ uint64_t key = i;
+ DBT k;
+ r = db->del(db, txn, dbt_init(&k, &key, sizeof key), 0); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ r = bogus_txn->commit(bogus_txn, 0); CKERR(r);
+
+ // close and reopen
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // create a txn that will try to acquire a write lock on key 0 in the pread callback
+ r = env->txn_begin(env, 0, &txn_b, 0); CKERR(r);
+
+ // walk a serializable cursor through the tree
+ r = env->txn_begin(env, 0, &txn_a, 0); CKERR(r);
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn_a, &cursor, 0); CKERR(r);
+ db_env_set_func_pread(my_pread);
+ while (1) {
+ r = cursor->c_getf_next(cursor, 0, next_do_nothing, NULL);
+ if (r != 0)
+ break;
+ }
+ db_env_set_func_pread(NULL);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn_a->commit(txn_a, 0); CKERR(r);
+
+ r = txn_b->commit(txn_b, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static int
+usage(void) {
+ fprintf(stderr, "-v (verbose)\n");
+ fprintf(stderr, "-q (quiet)\n");
+ fprintf(stderr, "--envdir %s\n", envdir);
+ return 1;
+}
+
+int
+test_main (int argc , char * const argv[]) {
+ for (int i = 1 ; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0)
+ verbose--;
+ continue;
+ }
+ if (strcmp(argv[i], "--envdir") == 0 && i+1 < argc) {
+ envdir = argv[++i];
+ continue;
+ }
+ return usage();
+ }
+
+ char rmcmd[32 + strlen(envdir)];
+ snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir);
+ int r;
+ r = system(rmcmd); CKERR(r);
+ r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ run_test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_3645.cc b/storage/tokudb/PerconaFT/src/tests/test_3645.cc
new file mode 100644
index 00000000..e1fa37be
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_3645.cc
@@ -0,0 +1,350 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+//
+// This test verifies that running evictions on a writer thread
+// are ok. We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - update existing values in the dictionary with db->put(DB_YESOVERWRITE)
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success.
+//
+
+bool run_test;
+int time_of_test;
+int num_elements;
+
+struct arg {
+ int n;
+ DB *db;
+ DB_ENV* env;
+ bool fast;
+ bool fwd;
+};
+
+static int
+go_fast(DBT const *a, DBT const *b, void *c) {
+ assert(a);
+ assert(b);
+ assert(c==NULL);
+ return TOKUDB_CURSOR_CONTINUE;
+}
+static int
+go_slow(DBT const *a, DBT const *b, void *c) {
+ assert(a);
+ assert(b);
+ assert(c==NULL);
+ return 0;
+}
+
+static void *scan_db(void *arg) {
+ struct arg *myarg = (struct arg *) arg;
+ DB_ENV* env = myarg->env;
+ DB* db = myarg->db;
+ DB_TXN* txn = NULL;
+ while(run_test) {
+ int r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT); CKERR(r);
+ DBC* cursor = NULL;
+ { int chk_r = db->cursor(db, txn, &cursor, 0); CKERR(chk_r); }
+ while (r != DB_NOTFOUND) {
+ if (myarg->fwd) {
+ r = cursor->c_getf_next(cursor, 0, myarg->fast ? go_fast : go_slow, NULL);
+ }
+ else {
+ r = cursor->c_getf_prev(cursor, 0, myarg->fast ? go_fast : go_slow, NULL);
+ }
+ assert(r==0 || r==DB_NOTFOUND);
+ }
+
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+ }
+ return arg;
+}
+
+static void *ptquery_db(void *arg) {
+ struct arg *myarg = (struct arg *) arg;
+ DB_ENV* env = myarg->env;
+ DB* db = myarg->db;
+ DB_TXN* txn = NULL;
+ int n = myarg->n;
+ while(run_test) {
+ int r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT); CKERR(r);
+ int rand_key = random() % n;
+ DBT key;
+ DBT val;
+ memset(&val, 0, sizeof(val));
+ dbt_init(&key, &rand_key, sizeof(rand_key));
+ r = db->get(db, txn, &key, &val, 0);
+ assert(r != DB_NOTFOUND);
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+ }
+ return arg;
+}
+
+static void *update_db(void *arg) {
+ struct arg *myarg = (struct arg *) arg;
+ DB_ENV* env = myarg->env;
+ DB* db = myarg->db;
+ int n = myarg->n;
+
+ DB_TXN* txn = NULL;
+ while (run_test) {
+ int r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT); CKERR(r);
+ for (uint32_t i = 0; i < 1000; i++) {
+ int rand_key = random() % n;
+ int rand_val = random();
+ DBT key, val;
+ r = db->put(
+ db,
+ txn,
+ dbt_init(&key, &rand_key, sizeof(rand_key)),
+ dbt_init(&val, &rand_val, sizeof(rand_val)),
+ 0
+ );
+ CKERR(r);
+ }
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+ }
+ return arg;
+}
+
+static void *test_time(void *arg) {
+ assert(arg == NULL);
+ usleep(time_of_test*1000*1000);
+ if (verbose) printf("should now end test\n");
+ run_test = false;
+ return arg;
+}
+
+
+static void
+test_evictions (void) {
+ int n = num_elements;
+ if (verbose) printf("test_3645:%d \n", n);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.bulk_fetch.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r=env->set_default_bt_compare(env, int_dbt_cmp); CKERR(r);
+ // set the cache size to 10MB
+ r = env->set_cachesize(env, 0, 100000, 1); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->checkpointing_set_period(env, 10);
+ CKERR(r);
+
+
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, 0);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->set_readpagesize(db, 1024);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ int keys[n];
+ for (int i=0; i<n; i++) {
+ keys[i] = i;
+ }
+
+ if (verbose) printf("starting insertion of elements to setup test\n");
+ for (int i=0; i<n; i++) {
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+ run_test = true;
+ if (verbose) printf("starting creation of pthreads\n");
+ toku_pthread_t mytids[7];
+ struct arg myargs[7];
+ for (uint32_t i = 0; i < sizeof(myargs)/sizeof(myargs[0]); i++) {
+ myargs[i].n = n;
+ myargs[i].db = db;
+ myargs[i].env = env;
+ myargs[i].fast = true;
+ myargs[i].fwd = true;
+ }
+
+ // make the forward fast scanner
+ myargs[0].fast = true;
+ myargs[0].fwd = true;
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[0], nullptr, scan_db, &myargs[0]);
+ CKERR(chk_r);
+ }
+
+ // make the forward slow scanner
+ myargs[1].fast = false;
+ myargs[1].fwd = true;
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[1], nullptr, scan_db, &myargs[1]);
+ CKERR(chk_r);
+ }
+
+ // make the backward fast scanner
+ myargs[2].fast = true;
+ myargs[2].fwd = false;
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[2], nullptr, scan_db, &myargs[2]);
+ CKERR(chk_r);
+ }
+
+ // make the backward slow scanner
+ myargs[3].fast = false;
+ myargs[3].fwd = false;
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[3], nullptr, scan_db, &myargs[3]);
+ CKERR(chk_r);
+ }
+
+ // make the guy that updates the db
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[4], nullptr, update_db, &myargs[4]);
+ CKERR(chk_r);
+ }
+
+ // make the guy that does point queries
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[5], nullptr, ptquery_db, &myargs[5]);
+ CKERR(chk_r);
+ }
+
+ // make the guy that sleeps
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &mytids[6], nullptr, test_time, nullptr);
+ CKERR(chk_r);
+ }
+
+ for (uint32_t i = 0; i < sizeof(myargs) / sizeof(myargs[0]); i++) {
+ void *ret;
+ r = toku_pthread_join(mytids[i], &ret); assert_zero(r);
+ }
+ if (verbose) printf("ending test, pthreads have joined\n");
+
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static inline void parse_3645_args (int argc, char *const argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ }
+ else if (strcmp(argv[1], "-q")==0) {
+ verbose=0;
+ }
+ else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-h-q|--num_elements number | --num_seconds number]\n", argv0);
+ exit(resultcode);
+ }
+ else if (strcmp(argv[1], "--num_elements") == 0) {
+ argc--;
+ argv++;
+ num_elements = atoi(argv[1]);
+ }
+ else if (strcmp(argv[1], "--num_seconds") == 0) {
+ argc--;
+ argv++;
+ time_of_test = atoi(argv[1]);
+ }
+ else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ // default values
+ num_elements = 100000;
+ time_of_test = 60;
+ parse_3645_args(argc, argv);
+ test_evictions();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_3755.cc b/storage/tokudb/PerconaFT/src/tests/test_3755.cc
new file mode 100644
index 00000000..67ef3b4b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_3755.cc
@@ -0,0 +1,156 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// stress test for update broadcast. 10M 8-byte keys should be 2, maybe 3
+// levels of treeness, makes sure flushes work
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 1024;
+
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ assert(extra->size == sizeof(unsigned int));
+ assert(old_val->size == sizeof(unsigned int));
+ unsigned int e = *(unsigned int *)extra->data;
+ unsigned int ov = *(unsigned int *)old_val->data;
+ assert(e == (ov+1));
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &e, sizeof(e)), set_extra);
+ }
+ //usleep(10);
+ return 0;
+}
+
+static int
+int_cmp(DB *UU(db), const DBT *a, const DBT *b) {
+ unsigned int *ap, *bp;
+ assert(a->size == sizeof(*ap));
+ CAST_FROM_VOIDP(ap, a->data);
+ assert(b->size == sizeof(*bp));
+ CAST_FROM_VOIDP(bp, b->data);
+ return (*ap > *bp) - (*ap < *bp);
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->set_default_bt_compare(env, int_cmp); CKERR(chk_r); }
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ // make a really small checkpointing period
+ { int chk_r = env->checkpointing_set_period(env,1); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = 0;
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, unsigned int i) {
+ DBT extra;
+ unsigned int e = i;
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ int r = db->update_broadcast(db, txn, extrap, 0); CKERR(r);
+ return r;
+}
+
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->set_pagesize(db, 1<<8); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ for(unsigned int i = 1; i < 100; i++) {
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, i); CKERR(chk_r); }
+ });
+ for (unsigned int curr_key = 0; curr_key < NUM_KEYS; ++curr_key) {
+ DBT key, val;
+ unsigned int *vp;
+ DBT *keyp = dbt_init(&key, &curr_key, sizeof(curr_key));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db->get(db, txn_3, keyp, valp, 0); CKERR(chk_r); }
+ });
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp==i);
+ }
+ }
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_4015.cc b/storage/tokudb/PerconaFT/src/tests/test_4015.cc
new file mode 100644
index 00000000..1231e3b4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_4015.cc
@@ -0,0 +1,179 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <portability/toku_atomic.h>
+
+static int my_compare (DB *db, const DBT *a, const DBT *b) {
+ assert(db);
+ assert(db->cmp_descriptor);
+ assert(db->cmp_descriptor->dbt.size >= 3);
+ char *CAST_FROM_VOIDP(data, db->cmp_descriptor->dbt.data);
+ assert(data[0]=='f');
+ assert(data[1]=='o');
+ assert(data[2]=='o');
+ if (verbose) printf("compare descriptor=%s\n", data);
+ sched_yield();
+ return uint_dbt_cmp(db, a, b);
+}
+
+DB_ENV *env;
+DB *db;
+const char *env_dir = TOKU_TEST_FILENAME;
+volatile int done = 0;
+
+static void *startA (void *ignore __attribute__((__unused__))) {
+ for (int i=0;i<999; i++) {
+ DBT k,v;
+ int a = (random()<<16) + i;
+ dbt_init(&k, &a, sizeof(a));
+ dbt_init(&v, &a, sizeof(a));
+ DB_TXN *txn;
+ again:
+ { int chk_r = env->txn_begin(env, NULL, &txn, DB_TXN_NOSYNC); CKERR(chk_r); }
+ {
+ int r = db->put(db, txn, &k, &v, 0);
+ if (r==DB_LOCK_NOTGRANTED) {
+ if (verbose) printf("lock not granted on %d\n", i);
+ { int chk_r = txn->abort(txn); CKERR(chk_r); }
+ goto again;
+ }
+ assert(r==0);
+ }
+ { int chk_r = txn->commit(txn, 0); CKERR(chk_r); }
+ }
+ int r __attribute__((__unused__)) = toku_sync_fetch_and_add(&done, 1);
+ return NULL;
+}
+static void change_descriptor (DB_TXN *txn, int i) {
+ DBT desc;
+ char foo[100];
+ snprintf(foo, 99, "foo%d", i);
+ dbt_init(&desc, foo, 1+strlen(foo));
+ int r;
+ if (verbose) printf("trying to change to %s\n", foo);
+ while ((r=db->change_descriptor(db, txn, &desc, 0))) {
+ if (verbose) printf("Change failed r=%d, try again\n", r);
+ }
+ if (verbose) printf("ok\n");
+}
+static void startB (void) {
+ for (int i=0; !done; i++) {
+ IN_TXN_COMMIT(env, NULL, txn, 0,
+ change_descriptor(txn, i));
+ sched_yield();
+ }
+}
+
+static void my_parse_args (int argc, char * const argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[1],"--envdir")==0) {
+ assert(argc>2);
+ env_dir = argv[2];
+ argc--;
+ argv++;
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q] [-h] [--envdir <envdir>]\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+int test_main(int argc, char * const argv[]) {
+ my_parse_args(argc, argv);
+
+ db_env_set_num_bucket_mutexes(32);
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ { int chk_r = env->set_redzone(env, 0); CKERR(chk_r); }
+ { int chk_r = env->set_default_bt_compare(env, my_compare); CKERR(chk_r); }
+ {
+ const int size = 10+strlen(env_dir);
+ char cmd[size];
+ snprintf(cmd, size, "rm -rf %s", env_dir);
+ int r = system(cmd);
+ CKERR(r);
+ }
+ { int chk_r = toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+ { int chk_r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->set_pagesize(db, 1024); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ IN_TXN_COMMIT(env, NULL, txn, 0, {
+ int chk_r =
+ db->change_descriptor(db, txn, &desc, DB_UPDATE_CMP_DESCRIPTOR);
+ CKERR(chk_r);
+ });
+ pthread_t thd;
+ {
+ int chk_r =
+ toku_pthread_create(
+ toku_uninstrumented, &thd, nullptr, startA, nullptr);
+ CKERR(chk_r);
+ }
+
+ startB();
+
+ void *retval;
+ { int chk_r = toku_pthread_join(thd, &retval); CKERR(chk_r); }
+ assert(retval==NULL);
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_4368.cc b/storage/tokudb/PerconaFT/src/tests/test_4368.cc
new file mode 100644
index 00000000..21a3050f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_4368.cc
@@ -0,0 +1,71 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Can I close a db without opening it? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ DB_ENV *env;
+ DB *db;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_create(&db, env, 0); assert(r==0);
+ r = db->open(db, NULL, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ // call hot_optimize on an empty db. The empty db should have only a root node, which should invoke the bug
+ uint64_t loops_run;
+ r = db->hot_optimize(db, NULL, NULL, NULL, NULL, &loops_run); assert_zero(r);
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_4657.cc b/storage/tokudb/PerconaFT/src/tests/test_4657.cc
new file mode 100644
index 00000000..70c9eb16
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_4657.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+//
+// test that nodes written out for checkpointing properly update the stats
+// insert a bunch of elements, but not too many. The amount of data should
+// fit into a single leaf node. Then we :
+// - checkpoint
+// - close dictionary
+// - reopen dictionary
+// - call stat64
+// prior to the fix for 4657, the stats would return
+// 0 rows. After the fix, the stats should return an
+// accurate number of rows
+//
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ DB *db;
+ {
+ DB_TXN *txna;
+ r = env->txn_begin(env, NULL, &txna, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ CKERR(r);
+ r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ r = txna->commit(txna, 0); CKERR(r);
+ }
+ if (verbose) printf("starting insertion of even elements\n");
+ //
+ // now insert 1000 elements
+ //
+ DB_TXN* txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ for (uint32_t i = 0; i < 1000; i++) {
+ DBT key,val;
+ uint64_t key_data = i;
+ uint64_t val_data = i;
+ r = db->put(
+ db,
+ txn,
+ dbt_init(&key, &key_data, sizeof(key_data)),
+ dbt_init(&val, &val_data, sizeof(val_data)),
+ 0
+ );
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ //
+ // the assumption here is that the db consists
+ // of a single leaf node that is the root.
+ //
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+
+ // now reopen
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_THREAD, 0666);
+ CKERR(r);
+ DB_BTREE_STAT64 dict_stats;
+ r = db->stat64(
+ db,
+ NULL,
+ &dict_stats
+ );
+ CKERR(r);
+ // check that stats are correct
+ assert(dict_stats.bt_nkeys == 1000);
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_5015.cc b/storage/tokudb/PerconaFT/src/tests/test_5015.cc
new file mode 100644
index 00000000..619a93f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_5015.cc
@@ -0,0 +1,99 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+
+#include <unistd.h>
+#include <db.h>
+#include <errno.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.already.exists.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ DB_TXN* parent_txn = NULL;
+ DB_TXN* child_txn = NULL;
+ r = env->txn_begin(env, 0, &parent_txn, 0);
+ CKERR(r);
+ r = env->txn_begin(env, parent_txn, &child_txn, 0);
+ CKERR(r);
+ DBT key,val;
+ r = db->put(db, child_txn, dbt_init(&key, "a", 2), dbt_init(&val, "a", 2), 0);
+ CKERR(r);
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ gid[0]='a';
+ r = child_txn->prepare(child_txn, gid, 0);
+ CKERR(r);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ r = child_txn->commit(child_txn, 0);
+ CKERR(r);
+ r = parent_txn->commit(parent_txn, 0);
+ CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_5469.cc b/storage/tokudb/PerconaFT/src/tests/test_5469.cc
new file mode 100644
index 00000000..5e58057a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_5469.cc
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static int put_multiple_generate(DB *UU(dest_db), DB *UU(src_db), DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ return 0;
+}
+
+static void
+test_loader_abort (bool do_compress, bool abort_loader, bool abort_txn) {
+ DB_ENV * env;
+ DB *db;
+ DB_TXN *txn;
+ DB_TXN* const null_txn = 0;
+ const char * const fname = "test.loader_abort.ft_handle";
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_LOADER *loader;
+ uint32_t db_flags = 0;
+ uint32_t dbt_flags = 0;
+ uint32_t loader_flags = do_compress ? LOADER_COMPRESS_INTERMEDIATES : 0;
+ DBC* cursor = NULL;
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->set_generate_row_callback_for_put(env, put_multiple_generate);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ r = env->create_loader(env, txn, &loader, db, 1, &db, &db_flags, &dbt_flags, loader_flags);
+ CKERR(r);
+
+ DBT key, val;
+ uint32_t k;
+ uint32_t v;
+ uint32_t num_elements = 2;
+ for (uint32_t i = 0; i < num_elements; i++) {
+ k = i;
+ v = i;
+ r = loader->put(
+ loader,
+ dbt_init(&key, &k, sizeof k),
+ dbt_init(&val, &v, sizeof v)
+ );
+ assert(r == 0);
+ }
+ if (abort_loader) {
+ loader->abort(loader);
+ }
+ else {
+ loader->close(loader);
+ }
+ k = num_elements;
+ v = num_elements;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+
+ if (abort_txn) {
+ r = txn->abort(txn);
+ CKERR(r);
+ }
+ else {
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT k1; memset(&k1, 0, sizeof k1);
+ DBT v1; memset(&v1, 0, sizeof v1);
+ if (!abort_txn) {
+ if (!abort_loader) {
+ for (uint32_t i = 0; i < num_elements; i++) {
+ r = cursor->c_get(cursor, &k1, &v1, DB_NEXT); assert(r == 0);
+ assert(k1.size == sizeof(uint32_t));
+ assert(v1.size == sizeof(uint32_t));
+ assert(*(uint32_t *)k1.data == i);
+ assert(*(uint32_t *)v1.data == i);
+ }
+ }
+ r = cursor->c_get(cursor, &k1, &v1, DB_NEXT); assert(r == 0);
+ assert(k1.size == sizeof(uint32_t));
+ assert(v1.size == sizeof(uint32_t));
+ assert(*(uint32_t *)k1.data == num_elements);
+ assert(*(uint32_t *)v1.data == num_elements);
+ }
+ r = cursor->c_get(cursor, &k1, &v1, DB_NEXT); assert(r == DB_NOTFOUND);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_loader_abort(false, false, true);
+ test_loader_abort(false, true, true);
+ test_loader_abort(true, false, true);
+ test_loader_abort(true, true, true);
+ test_loader_abort(false, false, false);
+ test_loader_abort(false, true, false);
+ test_loader_abort(true, false, false);
+ test_loader_abort(true, true, false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_789.cc b/storage/tokudb/PerconaFT/src/tests/test_789.cc
new file mode 100644
index 00000000..d76c7711
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_789.cc
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <memory.h>
+
+static void
+test_789(void) {
+ int r;
+
+ /* setup test directory */
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ /* setup environment */
+ DB_ENV *env;
+ {
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ }
+
+ /* setup database */
+ DB *db;
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+ }
+
+ /* insert, commit */
+ {
+ DB_TXN *txn_master;
+ r = env->txn_begin(env, 0, &txn_master, 0); assert(r == 0);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn_master, &txn, 0); assert(r == 0);
+ int i;
+ for (i=0; i<3; i++) {
+ int k = htonl(i);
+ int v = 0;
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ }
+ r = txn->commit(txn, 0); assert(r == 0);
+ r = txn_master->commit(txn_master, 0); assert(r == 0);
+ }
+
+ /* update, rollback */
+ {
+ DB_TXN *txn_master;
+ r = env->txn_begin(env, 0, &txn_master, 0); assert(r == 0);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn_master, &txn, 0); assert(r == 0);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_NEXT); assert(r == 0);
+ *(char*)val.data = 1;
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ r = cursor->c_close(cursor); assert(r == 0);
+ toku_free(key.data); toku_free(val.data);
+ r = txn->commit(txn, 0); assert(r == 0);
+ r = txn_master->abort(txn_master); assert(r == 0);
+ }
+
+ /* delete, rollback */
+ {
+ DB_TXN *txn_master;
+ r = env->txn_begin(env, 0, &txn_master, 0); assert(r == 0);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn_master, &txn, 0); assert(r == 0);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_NEXT); assert(r == 0);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == 0);
+ r = cursor->c_close(cursor); assert(r == 0);
+ toku_free(key.data); toku_free(val.data);
+ r = txn->commit(txn, 0); assert(r == 0);
+ r = txn_master->abort(txn_master); assert(r == 0);
+ }
+
+ /* update, commit */
+ {
+ DB_TXN *txn_master;
+ r = env->txn_begin(env, 0, &txn_master, 0); assert(r == 0);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn_master, &txn, 0); assert(r == 0);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_NEXT); assert(r == 0);
+ *(char*)val.data = 2;
+ r = db->put(db, txn, &key, &val, 0); assert(r == 0);
+ r = cursor->c_close(cursor); assert(r == 0);
+ toku_free(key.data); toku_free(val.data);
+ r = txn->commit(txn, 0); assert(r == 0);
+ r = txn_master->commit(txn_master, 0); assert(r == 0);
+ }
+
+ /* delete, commit */
+ {
+ DB_TXN *txn_master;
+ r = env->txn_begin(env, 0, &txn_master, 0); assert(r == 0);
+ DB_TXN *txn;
+ r = env->txn_begin(env, txn_master, &txn, 0); assert(r == 0);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_NEXT); assert(r == 0);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == 0);
+ r = cursor->c_close(cursor); assert(r == 0);
+ toku_free(key.data); toku_free(val.data);
+ r = txn->commit(txn, 0); assert(r == 0);
+ r = txn_master->commit(txn_master, 0); assert(r == 0);
+ }
+
+ /* close db */
+ r = db->close(db, 0); assert(r == 0);
+
+ /* close env */
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int UU(argc), char UU(*const argv[])) {
+ test_789();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_935.cc b/storage/tokudb/PerconaFT/src/tests/test_935.cc
new file mode 100644
index 00000000..ba83b7c0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_935.cc
@@ -0,0 +1,132 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <memory.h>
+
+static void
+testit (const int klen, const int vlen, const int n, const int lastvlen) {
+ if (verbose) printf("testit %d %d %d %d\n", klen, vlen, n, lastvlen);
+
+ int r;
+
+ // setup test directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ // setup environment
+ DB_ENV *env;
+ {
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+ }
+
+ // setup database
+ DB *db;
+ {
+ DB_TXN *txn = 0;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+ }
+
+ // insert to fill up a node
+ {
+ void *v = toku_malloc(vlen); assert(v); memset(v, 0, vlen);
+ DB_TXN *txn = 0;
+ int i;
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ assert(sizeof k == klen);
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, v, vlen), 0);
+ assert(r == 0);
+ }
+ if (lastvlen > 0) {
+ int k = htonl(n);
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, v, lastvlen), 0);
+ assert(r == 0);
+ }
+ toku_free(v);
+ }
+
+ // add another one to force a node split
+ {
+ void *v = toku_malloc(vlen); assert(v); memset(v, 0, vlen);
+ DB_TXN *txn = 0;
+ int k = htonl(n+1);
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, v, vlen), 0);
+ assert(r == 0);
+ toku_free(v);
+ }
+
+ // close db
+ r = db->close(db, 0); assert(r == 0);
+
+ // close env
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ const int meg = 1024*1024;
+ const int headeroverhead = 12*4;
+ const int numentries = 4;
+ const int klen = 4;
+ const int vlen = 4096;
+ const int leafoverhead = 1+8+4+4;
+ const int leafentrysize = leafoverhead+klen+vlen;
+ int n = (meg - headeroverhead - numentries) / leafentrysize;
+ int left = meg - headeroverhead - numentries - n*leafentrysize;
+ int lastvlen = left - leafoverhead - klen;
+ testit(klen, vlen, n, lastvlen-1);
+ testit(klen, vlen, n, lastvlen-0);
+ testit(klen, vlen, n, lastvlen+1);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_abort1.cc b/storage/tokudb/PerconaFT/src/tests/test_abort1.cc
new file mode 100644
index 00000000..7b603648
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_abort1.cc
@@ -0,0 +1,198 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Simple test of logging. Can I start PerconaFT with logging enabled? */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+#include <memory.h>
+#include <stdio.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static void
+test_db_open_aborts (void) {
+ DB_ENV *env;
+ DB *db;
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ {
+ DB_TXN *tid;
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, "there", 6);
+ r=db->put(db, tid, &key, &data, 0);
+ CKERR(r);
+ }
+ r=db->close(db, 0); assert(r==0);
+ r=tid->abort(tid); assert(r==0);
+ }
+ {
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "foo.db", sizeof("foo.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR2(r, DB_NOTFOUND);
+ }
+ toku_struct_stat statbuf;
+ char filename[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(filename, 2, TOKU_TEST_FILENAME, "foo.db"),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r != 0);
+ assert(errno == ENOENT);
+ }
+
+ r=env->close(env, 0); assert(r==0);
+}
+
+// Do two transactions, one commits, and one aborts. Do them concurrently.
+static void
+test_db_put_aborts (void) {
+ DB_ENV *env;
+ DB *db;
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ {
+ DB_TXN *tid;
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid,0); assert(r==0);
+ }
+ {
+ DB_TXN *tid;
+ DB_TXN *tid2;
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid2, 0); assert(r==0);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, "there", 6);
+ r=db->put(db, tid, &key, &data, 0);
+ CKERR(r);
+ }
+ {
+ DBT key,data;
+ dbt_init(&key, "bye", 4);
+ dbt_init(&data, "now", 4);
+ r=db->put(db, tid2, &key, &data, 0);
+ CKERR(r);
+ }
+ //printf("%s:%d aborting\n", __FILE__, __LINE__);
+ r=tid->abort(tid); assert(r==0);
+ //printf("%s:%d committing\n", __FILE__, __LINE__);
+ r=tid2->commit(tid2,0); assert(r==0);
+ }
+ // The database should exist
+ {
+ char *filename;
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "foo.db", sizeof("foo.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ CAST_FROM_VOIDP(filename, iname.data);
+ assert(filename);
+ }
+ toku_struct_stat statbuf;
+ char fullfile[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(fullfile, 2, TOKU_TEST_FILENAME, filename),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r == 0);
+ toku_free(filename);
+ }
+ // But the item should not be in it.
+ if (1)
+ {
+ DB_TXN *tid;
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, NULL, 0);
+ r=db->get(db, tid, &key, &data, 0);
+ assert(r!=0);
+ assert(r==DB_NOTFOUND);
+ }
+ {
+ DBT key,data;
+ dbt_init(&key, "bye", 4);
+ dbt_init(&data, NULL, 0);
+ r=db->get(db, tid, &key, &data, 0);
+ CKERR(r);
+ }
+ r=tid->commit(tid,0); assert(r==0);
+ }
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+}
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ test_db_open_aborts();
+ test_db_put_aborts();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_abort2.cc b/storage/tokudb/PerconaFT/src/tests/test_abort2.cc
new file mode 100644
index 00000000..b024248b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_abort2.cc
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Put some insert messages into an internal buffer (by first creating a DB, filling it up, then closing it, and reopening, and inserting a few things)
+ * Then perform a transaction that overwrites some of those internal things.
+ * Then abort the transaction.
+ * Make sure those middle things made it back into the tree.
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *txn;
+
+static void
+insert (int i, int j) {
+ char hello[30], there[230];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "%dthere%d %*s", j, i, 10+i%40, "padding");
+ int r = db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+do_test_abort2 (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->set_pagesize(db, 4096); // Use a small page
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ {
+ int i;
+ for (i=0; i<1000; i++) {
+ insert(4*i, 0);
+ }
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+
+ //printf("%s:%d\n", __FILE__, __LINE__);
+
+ // Now do a few inserts that abort.
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ insert(3, 0);
+ insert(5, 0);
+ insert(7, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ insert(7, 1);
+ r=txn->abort(txn); CKERR(r);
+
+
+ //printf("%s:%d\n", __FILE__, __LINE__);
+ //r=db->close(db,0); CKERR(r); r=env->close(env, 0); CKERR(r); return;
+
+ // Don't do a lookup on "hello7", because that will force things out of the buffer.
+ r=db->close(db, 0); CKERR(r);
+ //printf("%s:%d\n", __FILE__, __LINE__);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+ //printf("%s:%d\n", __FILE__, __LINE__);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ {
+ DBT key,data;
+ memset(&data, 0, sizeof(data));
+ r = db->get(db, txn, dbt_init(&key, "hello7", strlen("hello7")+1), &data, 0);
+ CKERR(r);
+ //printf("data is %s\n", (char*)data.data);
+ assert(((char*)data.data)[0]=='0');
+ }
+ r=txn->abort(txn); CKERR(r);
+
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ do_test_abort2();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_abort3.cc b/storage/tokudb/PerconaFT/src/tests/test_abort3.cc
new file mode 100644
index 00000000..0d4736c7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_abort3.cc
@@ -0,0 +1,196 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Try to exercise all the cases for the leafcommands in ft-ops.c
+ */
+
+
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *txn;
+
+static void insert (int i, int j) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", j);
+ int r = db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void op_delete (int i) {
+ char hello[30];
+ DBT key;
+ if (verbose) printf("op_delete %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ int r = db->del(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ DB_DELETE_ANY);
+ assert(r==0);
+}
+
+static void lookup (int i, int expect, int expectj) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", expectj);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+static void
+test_abort3 (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ insert(0, 0);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ op_delete(0);
+ op_delete(1);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(1, DB_NOTFOUND, -1);
+ insert(2, 3);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(2, 4);
+ insert(2, 5);
+ lookup(2, 0, 5);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(2, 0, 5);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(3, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(3, 1);
+ lookup(3, 0, 1);
+ r=txn->abort(txn); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(3, 0, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(4, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ op_delete(4);
+ lookup(4, DB_NOTFOUND, -1);
+ r=txn->abort(txn); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(4, 0, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(5, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(5, 1);
+ lookup(5, 0, 1);
+ op_delete(5);
+ lookup(5, DB_NOTFOUND, -1);
+ r=txn->abort(txn); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(5, 0, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(6, 0);
+ lookup(6, 0, 0);
+ op_delete(6);
+ lookup(6, DB_NOTFOUND, -1);
+ r=txn->abort(txn); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(6, DB_NOTFOUND, -1);
+ r=txn->commit(txn, 0); CKERR(r);
+
+
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_abort3();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_abort4.cc b/storage/tokudb/PerconaFT/src/tests/test_abort4.cc
new file mode 100644
index 00000000..e797fc64
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_abort4.cc
@@ -0,0 +1,265 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//Verify aborting transactions works properly when transaction
+//starts with an empty db and a table lock.
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+#include <memory.h>
+#include <stdio.h>
+
+
+DB_ENV *env;
+DB *db;
+DB_TXN *null_txn = NULL;
+DB_TXN *txn;
+uint32_t find_num;
+
+long closemode = -1; // must be set to 0 or 1 on command line
+long logsize = -2; // must be set to a number from -1 to 20 inclusive, on command line.
+
+static void
+init(void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+;
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, null_txn, "foo.db", 0, DB_BTREE, DB_CREATE|DB_EXCL, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, null_txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->pre_acquire_table_lock(db, txn); CKERR(r);
+}
+
+static void
+tear_down(void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void
+abort_txn(void) {
+ find_num = 0;
+ int r = txn->abort(txn); CKERR(r);
+ txn = NULL;
+}
+
+static void
+put(uint32_t k, uint32_t v) {
+ int r;
+ DBT key,val;
+
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+ r = db->put(db, txn, &key, &val, 0); CKERR(r);
+}
+
+static void
+test_insert_and_abort(uint32_t num_to_insert) {
+ find_num = 0;
+
+ uint32_t k;
+ uint32_t v;
+
+ uint32_t i;
+ for (i=0; i < num_to_insert; i++) {
+ k = htonl(i);
+ v = htonl(i+num_to_insert);
+ put(k, v);
+ }
+ abort_txn();
+}
+
+static void
+test_insert_and_abort_and_insert(uint32_t num_to_insert) {
+ test_insert_and_abort(num_to_insert);
+ find_num = num_to_insert / 2;
+ uint32_t k, v;
+ uint32_t i;
+ int r;
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->pre_acquire_table_lock(db, txn); CKERR(r);
+ for (i=0; i < find_num; i++) {
+ k = htonl(i);
+ v = htonl(i+5);
+ put(k, v);
+ }
+ txn->commit(txn, 0);
+ txn = NULL;
+}
+
+#define bit0 (1<<0)
+#define bit1 (1<<1)
+
+static int
+do_nothing(DBT const *UU(a), DBT const *UU(b), void *UU(c)) {
+ return 0;
+}
+
+static void
+verify_and_tear_down(int close_first) {
+ int r;
+ {
+ char *filename;
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "foo.db", sizeof("foo.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ CAST_FROM_VOIDP(filename, iname.data);
+ assert(filename);
+ }
+ toku_struct_stat statbuf;
+ char fullfile[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(fullfile, 2, TOKU_TEST_FILENAME, filename),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r == 0);
+ toku_free(filename);
+ }
+ CKERR(r);
+ if (close_first) {
+ r=db->close(db, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, null_txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ }
+ DBC *cursor;
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ uint32_t found = 0;
+ do {
+ r = cursor->c_getf_next(cursor, 0, do_nothing, NULL);
+ if (r==0) found++;
+ } while (r==0);
+ CKERR2(r, DB_NOTFOUND);
+ cursor->c_close(cursor);
+ txn->commit(txn, 0);
+ assert(found==find_num);
+ tear_down();
+}
+
+static void
+runtests(void) {
+ int close_first = closemode;
+ if (logsize == -1) {
+ init();
+ abort_txn();
+ verify_and_tear_down(close_first);
+ } else {
+ uint32_t n = 1<<logsize;
+ {
+ if (verbose) {
+ printf("\t%s:%d-%s() close_first=%d n=%06x\n",
+ __FILE__, __LINE__, __FUNCTION__, close_first, n);
+ fflush(stdout);
+ }
+ init();
+ test_insert_and_abort(n);
+ verify_and_tear_down(close_first);
+
+ init();
+ test_insert_and_abort_and_insert(n);
+ verify_and_tear_down(close_first);
+ }
+ }
+}
+
+static long parseint (const char *str) {
+ errno = 0;
+ char *end;
+ long v = strtol(str, &end, 10);
+ assert(errno==0 && *end==0);
+ return v;
+}
+
+static void
+parse_my_args (int argc, char * const argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[1],"-c") == 0 && argc > 2) {
+ argc--; argv++;
+ closemode = parseint(argv[1]);
+ } else if (strcmp(argv[1],"-l") == 0 && argc > 2) {
+ argc--; argv++;
+ logsize = parseint(argv[1]);
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q] [-h] -c <closemode (0 or 1)> -l <log of size, -1, or 0 through 20>\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+ assert(closemode==0 || closemode==1);
+ assert(logsize >= -1 && logsize <=20);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_my_args(argc, argv);
+
+ runtests();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_abort5.cc b/storage/tokudb/PerconaFT/src/tests/test_abort5.cc
new file mode 100644
index 00000000..d5d056d5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_abort5.cc
@@ -0,0 +1,253 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+//Verify aborting transactions works properly when transaction
+//starts with an empty db and a table lock.
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+#include <memory.h>
+#include <stdio.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+DB_TXN *null_txn = NULL;
+DB_TXN *txn;
+DB_TXN *childtxn;
+uint32_t find_num;
+
+static void
+init(void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, null_txn, "foo.db", 0, DB_BTREE, DB_CREATE|DB_EXCL, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, null_txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->pre_acquire_table_lock(db, txn); CKERR(r);
+ r=env->txn_begin(env, txn, &childtxn, 0); CKERR(r);
+}
+
+static void
+tear_down(void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void
+abort_childtxn(void) {
+ find_num = 0;
+ int r;
+ r = txn->abort(childtxn); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ childtxn = NULL;
+ txn = NULL;
+}
+
+static void
+abort_both(void) {
+ find_num = 0;
+ int r;
+ r = txn->abort(childtxn); CKERR(r);
+ r = txn->abort(txn); CKERR(r);
+ childtxn = NULL;
+ txn = NULL;
+}
+
+static void
+abort_parent(void) {
+ int r = txn->abort(txn); CKERR(r);
+}
+
+static void
+abort_txn(int type) {
+ if (type==0) abort_parent();
+ else if (type==1) abort_childtxn();
+ else if (type==2) abort_both();
+ else assert(false);
+
+ find_num = 0;
+ childtxn = NULL;
+ txn = NULL;
+}
+
+static void
+put(uint32_t k, uint32_t v) {
+ int r;
+ DBT key,val;
+ static uint32_t kvec[128];
+ static uint32_t vvec[128];
+
+ kvec[0] = k;
+ vvec[0] = v;
+ dbt_init(&key, &kvec[0], sizeof(kvec));
+ dbt_init(&val, &vvec[0], sizeof(vvec));
+ r = db->put(db, childtxn ? childtxn : txn, &key, &val, 0); CKERR(r);
+}
+
+static void
+test_insert_and_abort(uint32_t num_to_insert, int abort_type) {
+ if (verbose>1) printf("\t" __FILE__ ": insert+abort(%u,%d)\n", num_to_insert, abort_type);
+ find_num = 0;
+
+ uint32_t k;
+ uint32_t v;
+
+ uint32_t i;
+ for (i=0; i < num_to_insert; i++) {
+ k = htonl(i);
+ v = htonl(i+num_to_insert);
+ put(k, v);
+ }
+ abort_txn(abort_type);
+}
+
+static void
+test_insert_and_abort_and_insert(uint32_t num_to_insert, int abort_type) {
+ if (verbose>1) printf("\t" __FILE__ ": insert+abort+insert(%u,%d)\n", num_to_insert, abort_type);
+ test_insert_and_abort(num_to_insert, abort_type);
+ find_num = num_to_insert / 2;
+ uint32_t k, v;
+ uint32_t i;
+ for (i=0; i < find_num; i++) {
+ k = htonl(i);
+ v = htonl(i+5);
+ put(k, v);
+ }
+}
+
+#define bit0 (1<<0)
+#define bit1 (1<<1)
+
+static int
+do_nothing(DBT const *UU(a), DBT const *UU(b), void *UU(c)) {
+ return 0;
+}
+
+static void
+verify_and_tear_down(int close_first) {
+ int r;
+ {
+ char *filename;
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "foo.db", sizeof("foo.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ CAST_FROM_VOIDP(filename, iname.data);
+ assert(filename);
+ }
+ toku_struct_stat statbuf;
+ char fullfile[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(fullfile, 2, TOKU_TEST_FILENAME, filename),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r == 0);
+ toku_free(filename);
+ }
+ if (close_first) {
+ r=db->close(db, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, null_txn, "foo.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR(r);
+ }
+ DBC *cursor;
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ uint32_t found = 0;
+ do {
+ r = cursor->c_getf_next(cursor, 0, do_nothing, NULL);
+ if (r==0) found++;
+ } while (r==0);
+ CKERR2(r, DB_NOTFOUND);
+ cursor->c_close(cursor);
+ txn->commit(txn, 0);
+ assert(found==find_num);
+ tear_down();
+}
+
+static void
+runtests(int abort_type) {
+ if (verbose) printf("\t" __FILE__ ": runtests(%d)\n", abort_type);
+ int close_first;
+ for (close_first = 0; close_first < 2; close_first++) {
+ init();
+ abort_txn(abort_type);
+ verify_and_tear_down(close_first);
+ uint32_t n;
+ for (n = 1; n < 1<<10; n*=2) {
+ init();
+ test_insert_and_abort(n, abort_type);
+ verify_and_tear_down(close_first);
+
+ init();
+ test_insert_and_abort_and_insert(n, abort_type);
+ verify_and_tear_down(close_first);
+ }
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ int abort_type;
+ for (abort_type = 0; abort_type<3; abort_type++) {
+ runtests(abort_type);
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_abort_delete_first.cc b/storage/tokudb/PerconaFT/src/tests/test_abort_delete_first.cc
new file mode 100644
index 00000000..3dd7258e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_abort_delete_first.cc
@@ -0,0 +1,174 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Test what happens if we have deleted the first thing in the database.
+ * Also the last.
+ * Also if we've deleted a lot of stuff, so that the first good thing is not on the first page.
+ */
+
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *txn;
+
+static void
+insert (int i) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ int r = db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+op_delete (int i) {
+ char hello[30];
+ DBT key;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ int r = db->del(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ 0);
+ CKERR(r);
+}
+
+static void
+find (int i) {
+ char hello[30];
+ DBT key, val;
+ memset(&val,0,sizeof(val));
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ int r = db->get(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &val,
+ 0);
+ CKERR(r);
+}
+
+static void
+find_first_or_last (int i, int cflag) {
+ int r;
+ DBC *cursor;
+ DBT key, val;
+ memset(&key,0,sizeof(key));
+ memset(&val,0,sizeof(val));
+
+ r = db->cursor(db, txn, &cursor, 0);
+ CKERR(r);
+ r = cursor->c_get(cursor, &key, &val, cflag);
+ assert(r==0);
+
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+
+ assert(strcmp(hello, (char*)key.data)==0);
+ assert(strcmp(there, (char*)val.data)==0);
+
+ r = cursor->c_close(cursor);
+}
+
+static void
+do_abort_delete_first_or_last(int N,
+ int first // 1 for first, 0 for last
+ ) {
+ int r,i;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->set_pagesize(db, 4096); // Use a small page
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ // First fill up the db
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+
+ for (i=0; i<N; i++) {
+ insert(i);
+ }
+ r=txn->commit(txn, 0); CKERR(r);
+
+ // Now op_delete a bunch of stuff and see if we can do DB_FIRST
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ if (first) {
+ for (i=0; i<N-1; i++) {
+ op_delete(i);
+ }
+ find(i);
+ find_first_or_last(i, DB_FIRST);
+ } else {
+ for (i=1; i<N; i++) {
+ op_delete(i);
+ }
+ find_first_or_last(0, DB_LAST);
+ }
+
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+
+ // Oh man, this is gross.
+ char cmd[sizeof("../../tools/tokudb_dump -h foo.db >") + 2 * TOKU_PATH_MAX];
+ snprintf(cmd, sizeof(cmd), "../../tools/tokudb_dump -h %s foo.db > %s", TOKU_TEST_FILENAME, DEV_NULL_FILE);
+ r=system(cmd);
+ CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ int f;
+ for (f=0; f<2; f++) {
+ do_abort_delete_first_or_last(10, f);
+ do_abort_delete_first_or_last(1000,f);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_archive0.cc b/storage/tokudb/PerconaFT/src/tests/test_archive0.cc
new file mode 100644
index 00000000..0e3b3475
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_archive0.cc
@@ -0,0 +1,73 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test log archive. */
+#include <db.h>
+#include <sys/stat.h>
+
+
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ char **list;
+ r=env->log_archive(env, &list, 0);
+
+ assert(list==0);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_archive1.cc b/storage/tokudb/PerconaFT/src/tests/test_archive1.cc
new file mode 100644
index 00000000..9a797a20
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_archive1.cc
@@ -0,0 +1,93 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test log archive. */
+#include <db.h>
+#include <sys/stat.h>
+#include <memory.h>
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *txn;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->set_lg_max(env, 16000); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ int i;
+ for (i=0; i<400; i++) {
+ DBT key,data;
+ char hello[30],there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ r=txn->commit(txn, 0); CKERR(r);
+ r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ }
+
+ {
+ char **list;
+ r=env->log_archive(env, &list, 0);
+ CKERR(r);
+ //this test no longer produces a list with any entries for TDB
+ // - txn_checkpoint trims unused logfiles
+ assert(list == 0);
+ }
+
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_archive2.cc b/storage/tokudb/PerconaFT/src/tests/test_archive2.cc
new file mode 100644
index 00000000..a3c88b10
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_archive2.cc
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test log archive. */
+#include <db.h>
+#include <sys/stat.h>
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *env;
+ DB *db, *db2;
+ DB_TXN *txn, *txn2;
+ DBT key,data;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->set_lg_max(env, 20000); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=db_create(&db2, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->open(db2, txn, "foo2.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn2, 0); CKERR(r);
+ r=db->put(db2, txn2, dbt_init(&key, "what", 5), dbt_init(&data, "who", 4), 0); CKERR(r);
+
+ int i;
+ for (i=0; i<100; i++) {
+ char hello[30],there[30];
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db->put(db, txn,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ r=txn->commit(txn, 0); CKERR(r);
+ r=env->txn_checkpoint(env, 0, 0, 0); CKERR(r);
+ }
+
+ {
+ char **list;
+ r=env->log_archive(env, &list, 0);
+ CKERR(r);
+ assert(list==0); // since there is an open txn
+ }
+
+ r=txn2->commit(txn2, 0); CKERR(r);
+
+ r=db->close(db, 0); CKERR(r);
+ r=db2->close(db2, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_bad_implicit_promotion.cc b/storage/tokudb/PerconaFT/src/tests/test_bad_implicit_promotion.cc
new file mode 100644
index 00000000..cdc2ee4d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_bad_implicit_promotion.cc
@@ -0,0 +1,138 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Test the following scenario:
+// Begin A
+// A deletes key K
+// A aborts
+// Begin B
+// B deletes key K-1
+// B deletes key K
+// B deletes key K+1
+// B commits
+// Begin C
+// C queries K, should read K (not the delete!).
+//
+// An incorrect mvcc implementation would 'implicitly' promote
+// A's delete to committed, based on the fact that the oldest
+// referenced xid at the time of injection for key k-1 and k+1
+// is greater than A's xid.
+
+static void test_insert_bad_implicit_promotion(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_cachesize(env, 1, 0, 1); CKERR(r); // 1gb cache so this test fits in memory
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 4096); CKERR(r);
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ const int val_size = 512;
+
+ DBT key;
+ DBT val;
+ char *XMALLOC_N(val_size, val_buf);
+ memset(val_buf, 'x', val_size);
+ dbt_init(&val, val_buf, val_size);
+
+ // Insert rows [0, N]
+ const int N = 1000;
+ for (int i = 0; i < N; i++) {
+ int k = toku_htonl(i);
+ dbt_init(&key, &k, sizeof(k));
+ r = db->put(db, NULL, &key, &val, 0); CKERR(r);
+ }
+
+ int key_500 = toku_htonl(500);
+ int key_499 = toku_htonl(499);
+ int key_501 = toku_htonl(501);
+ // sanity check our keys
+ r = db->get(db, NULL, dbt_init(&key, &key_500, sizeof(key_500)), &val, 0); CKERR(r);
+ r = db->get(db, NULL, dbt_init(&key, &key_500, sizeof(key_499)), &val, 0); CKERR(r);
+ r = db->get(db, NULL, dbt_init(&key, &key_500, sizeof(key_501)), &val, 0); CKERR(r);
+
+ // Abort a delete for key 500
+ DB_TXN *txn_A;
+ r = env->txn_begin(env, NULL, &txn_A, DB_SERIALIZABLE); CKERR(r);
+ dbt_init(&key, &key_500, sizeof(key_500));
+ r = db->del(db, txn_A, &key, DB_DELETE_ANY); CKERR(r);
+ r = txn_A->abort(txn_A); CKERR(r);
+
+ // Commit two deletes on keys 499 and 501. This should inject
+ // at least one message in the same buffer that has the delete/abort
+ // messages for key 500.
+ DB_TXN *txn_B;
+ r = env->txn_begin(env, NULL, &txn_B, DB_SERIALIZABLE); CKERR(r);
+ dbt_init(&key, &key_499, sizeof(key_499));
+ r = db->del(db, txn_B, &key, DB_DELETE_ANY); CKERR(r);
+ dbt_init(&key, &key_501, sizeof(key_501));
+ r = db->del(db, txn_B, &key, DB_DELETE_ANY); CKERR(r);
+ r = txn_B->commit(txn_B, 0); CKERR(r);
+
+ // No transactions are live - so when we create txn C, the oldest
+ // referenced xid will be txn C. If our implicit promotion logic is
+ // wrong, we will use txn C's xid to promote the delete on key 500
+ // before the abort message hits it, and C's query will return nothing.
+ DB_TXN *txn_C;
+ dbt_init(&key, &key_500, sizeof(key_500));
+ r = env->txn_begin(env, NULL, &txn_C, DB_TXN_SNAPSHOT); CKERR(r);
+ r = db->get(db, txn_C, &key, &val, 0); CKERR(r);
+ r = txn_C->commit(txn_C, 0); CKERR(r);
+
+ toku_free(val_buf);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ test_insert_bad_implicit_promotion();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_blobs_leaf_split.cc b/storage/tokudb/PerconaFT/src/tests/test_blobs_leaf_split.cc
new file mode 100644
index 00000000..0cb7c619
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_blobs_leaf_split.cc
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// verify that the leaf split code can handle rows larger than nodesize
+
+#include "test.h"
+
+static void insert(DB *db, DB_TXN *txn, int k, int val_size) {
+ int r;
+
+ // generate the key
+ char key_buffer[8];
+ memset(key_buffer, 0, sizeof key_buffer);
+ int newa = htonl(k);
+ memcpy(key_buffer, &newa, sizeof newa);
+
+ // generate the value
+ char *XCALLOC_N(val_size, val_buffer);
+
+ DBT key = { .data = key_buffer, .size = sizeof key_buffer };
+ DBT value = { .data = val_buffer, .size = (uint32_t) val_size };
+ r = db->put(db, txn, &key, &value, 0); assert_zero(r);
+
+ toku_free(val_buffer);
+}
+
+int test_main(int argc, char * const argv[]) {
+ const char *db_env_dir = "dir.blobs.leafsplit.env.tdb";
+ int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG;
+ const char *db_filename = "blobs.db";
+ int do_txn = 1;
+ uint64_t cachesize = 0;
+ uint32_t pagesize = 0;
+
+ int i;
+ for (i = 1; i < argc; i++) {
+ char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ if (strcmp(arg, "--txn") == 0 && i+1 < argc) {
+ do_txn = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--pagesize") == 0 && i+1 < argc) {
+ pagesize = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--cachesize") == 0 && i+1 < argc) {
+ cachesize = atol(argv[++i]);
+ continue;
+ }
+
+ assert(0);
+ }
+
+ int r;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+ r = system(rm_cmd); assert_zero(r);
+
+ r = toku_os_mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert_zero(r);
+
+ // create and open the env
+ DB_ENV *db_env = NULL;
+ r = db_env_create(&db_env, 0); assert_zero(r);
+ if (cachesize) {
+ const uint64_t gig = 1 << 30;
+ r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert_zero(r);
+ }
+ if (!do_txn)
+ db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG);
+ r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ // create the db
+ DB *db = NULL;
+ r = db_create(&db, db_env, 0); assert_zero(r);
+ DB_TXN *create_txn = NULL;
+ if (do_txn) {
+ r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert_zero(r);
+ }
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert_zero(r);
+ }
+ r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert_zero(r);
+
+ insert(db, create_txn, 1, 8000000);
+ insert(db, create_txn, 1, 16000000);
+ insert(db, create_txn, 1, 32000000);
+ insert(db, create_txn, 2, 1);
+
+ if (do_txn) {
+ r = create_txn->commit(create_txn, 0); assert_zero(r);
+ }
+
+ // shutdown
+ r = db->close(db, 0); assert_zero(r); db = NULL;
+ r = db_env->close(db_env, 0); assert_zero(r); db_env = NULL;
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_bulk_fetch.cc b/storage/tokudb/PerconaFT/src/tests/test_bulk_fetch.cc
new file mode 100644
index 00000000..bdebc9ae
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_bulk_fetch.cc
@@ -0,0 +1,305 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+verify_val(DBT const *a, DBT const *b, void *c) {
+ assert(a->size == sizeof(uint64_t));
+ assert(b->size == sizeof(uint64_t));
+ uint64_t* expected = (uint64_t *)c;
+ assert(*expected == *(uint64_t *)a->data);
+ assert(*expected == *(uint64_t *)b->data);
+}
+
+static int
+verify_fwd_fast(DBT const *a, DBT const *b, void *c) {
+ verify_val(a,b,c);
+ uint64_t* expected = (uint64_t *)c;
+ *expected = *expected + 1;
+ return TOKUDB_CURSOR_CONTINUE;
+}
+
+static int
+verify_fwd_slow(DBT const *a, DBT const *b, void *c) {
+ verify_val(a,b,c);
+ uint64_t* expected = (uint64_t *)c;
+ *expected = *expected + 1;
+ return 0;
+}
+
+static int
+verify_bwd_fast(DBT const *a, DBT const *b, void *c) {
+ verify_val(a,b,c);
+ uint64_t* expected = (uint64_t *)c;
+ *expected = *expected - 1;
+ return TOKUDB_CURSOR_CONTINUE;
+}
+
+static int
+verify_bwd_slow(DBT const *a, DBT const *b, void *c) {
+ verify_val(a,b,c);
+ uint64_t* expected = (uint64_t *)c;
+ *expected = *expected - 1;
+ return 0;
+}
+
+uint64_t num_pivots_fetched_prefetch;
+uint64_t num_basements_decompressed_aggressive;
+uint64_t num_basements_decompressed_prefetch;
+uint64_t num_basements_fetched_aggressive;
+uint64_t num_basements_fetched_prefetch;
+
+static void
+init_eng_stat_vars(DB_ENV* env) {
+ num_pivots_fetched_prefetch = get_engine_status_val(env, "FT_NUM_PIVOTS_FETCHED_PREFETCH");
+ num_basements_decompressed_aggressive = get_engine_status_val(env, "FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE");
+ num_basements_decompressed_prefetch = get_engine_status_val(env, "FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH");
+ num_basements_fetched_aggressive = get_engine_status_val(env, "FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE");
+ num_basements_fetched_prefetch = get_engine_status_val(env, "FT_NUM_BASEMENTS_FETCHED_PREFETCH");
+}
+
+static void
+check_eng_stat_vars_unchanged(DB_ENV* env) {
+ assert(num_pivots_fetched_prefetch == get_engine_status_val(env, "FT_NUM_PIVOTS_FETCHED_PREFETCH"));
+ assert(num_basements_decompressed_aggressive == get_engine_status_val(env, "FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE"));
+ assert(num_basements_decompressed_prefetch == get_engine_status_val(env, "FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH"));
+ assert(num_basements_fetched_aggressive == get_engine_status_val(env, "FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE"));
+ assert(num_basements_fetched_prefetch == get_engine_status_val(env, "FT_NUM_BASEMENTS_FETCHED_PREFETCH"));
+}
+
+static void
+print_relevant_eng_stat_vars(DB_ENV* env) {
+ printf("num_pivots_fetched_prefetch %" PRId64 " \n", get_engine_status_val(env, "FT_NUM_PIVOTS_FETCHED_PREFETCH"));
+ printf("num_basements_decompressed_aggressive %" PRId64 " \n", get_engine_status_val(env, "FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE"));
+ printf("num_basements_decompressed_prefetch %" PRId64 " \n", get_engine_status_val(env, "FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH"));
+ printf("num_basements_fetched_aggressive %" PRId64 " \n", get_engine_status_val(env, "FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE"));
+ printf("num_basements_fetched_prefetch %" PRId64 " \n", get_engine_status_val(env, "FT_NUM_BASEMENTS_FETCHED_PREFETCH"));
+}
+
+static void
+test_bulk_fetch (uint64_t n, bool prelock, bool disable_prefetching) {
+ if (verbose) printf("test_rand_insert:%" PRId64 " \n", n);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.bulk_fetch.ft_handle";
+ int r;
+
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r=env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ // arbitrarily have cachetable size be 4*n
+ // goal is to make it small enough such that all of data
+ // does not fit in cachetable, but not so small that we get thrashing
+ r = env->set_cachesize(env, 0, (uint32_t)4*n, 1); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, 0);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->set_readpagesize(db, 1024);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ uint64_t keys[n];
+ uint64_t i;
+ for (i=0; i<n; i++) {
+ keys[i] = i;
+ }
+
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ //
+ // data inserted, now verify that using TOKUDB_CURSOR_CONTINUE in the callback works
+ //
+ DBC* cursor;
+
+ // verify fast
+ uint32_t flags = disable_prefetching ? DBC_DISABLE_PREFETCHING : 0;
+ if (disable_prefetching) {
+ init_eng_stat_vars(env);
+ }
+ r = db->cursor(db, NULL, &cursor, flags);
+ CKERR(r);
+ if (prelock) {
+ r = cursor->c_set_bounds(
+ cursor,
+ db->dbt_neg_infty(),
+ db->dbt_pos_infty(),
+ true,
+ 0
+ );
+ CKERR(r);
+ }
+ uint64_t expected = 0;
+ while (r != DB_NOTFOUND) {
+ r = cursor->c_getf_next(cursor, 0, verify_fwd_fast, &expected);
+ assert(r==0 || r==DB_NOTFOUND);
+ }
+ r = cursor->c_close(cursor); CKERR(r);
+ if (disable_prefetching) {
+ check_eng_stat_vars_unchanged(env);
+ }
+ if (verbose) {
+ print_relevant_eng_stat_vars(env);
+ }
+
+ // verify slow
+ if (disable_prefetching) {
+ init_eng_stat_vars(env);
+ }
+ r = db->cursor(db, NULL, &cursor, flags);
+ CKERR(r);
+ if (prelock) {
+ r = cursor->c_set_bounds(
+ cursor,
+ db->dbt_neg_infty(),
+ db->dbt_pos_infty(),
+ true,
+ 0
+ );
+ CKERR(r);
+ }
+ expected = 0;
+ while (r != DB_NOTFOUND) {
+ r = cursor->c_getf_next(cursor, 0, verify_fwd_slow, &expected);
+ assert(r==0 || r==DB_NOTFOUND);
+ }
+ r = cursor->c_close(cursor); CKERR(r);
+ if (disable_prefetching) {
+ check_eng_stat_vars_unchanged(env);
+ }
+ if (verbose) {
+ print_relevant_eng_stat_vars(env);
+ }
+
+ // now do backwards
+ if (disable_prefetching) {
+ init_eng_stat_vars(env);
+ }
+ r = db->cursor(db, NULL, &cursor, flags);
+ CKERR(r);
+ if (prelock) {
+ r = cursor->c_set_bounds(
+ cursor,
+ db->dbt_neg_infty(),
+ db->dbt_pos_infty(),
+ true,
+ 0
+ );
+ CKERR(r);
+ }
+ expected = n-1;
+ while (r != DB_NOTFOUND) {
+ r = cursor->c_getf_prev(cursor, 0, verify_bwd_fast, &expected);
+ assert(r==0 || r==DB_NOTFOUND);
+ }
+ r = cursor->c_close(cursor); CKERR(r);
+ if (disable_prefetching) {
+ check_eng_stat_vars_unchanged(env);
+ }
+ if (verbose) {
+ print_relevant_eng_stat_vars(env);
+ }
+
+ // verify slow
+ if (disable_prefetching) {
+ init_eng_stat_vars(env);
+ }
+ r = db->cursor(db, NULL, &cursor, flags);
+ CKERR(r);
+ if (prelock) {
+ r = cursor->c_set_bounds(
+ cursor,
+ db->dbt_neg_infty(),
+ db->dbt_pos_infty(),
+ true,
+ 0
+ );
+ CKERR(r);
+ }
+ expected = n-1;
+ while (r != DB_NOTFOUND) {
+ r = cursor->c_getf_prev(cursor, 0, verify_bwd_slow, &expected);
+ assert(r==0 || r==DB_NOTFOUND);
+ }
+ r = cursor->c_close(cursor); CKERR(r);
+ if (disable_prefetching) {
+ check_eng_stat_vars_unchanged(env);
+ }
+ if (verbose) {
+ print_relevant_eng_stat_vars(env);
+ }
+
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_bulk_fetch(10000, false, true);
+ test_bulk_fetch(10000, true, true);
+ test_bulk_fetch(10000, false, false);
+ test_bulk_fetch(10000, true, false);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cachesize.cc b/storage/tokudb/PerconaFT/src/tests/test_cachesize.cc
new file mode 100644
index 00000000..577f4689
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cachesize.cc
@@ -0,0 +1,114 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static uint64_t
+size_from (uint32_t gbytes, uint32_t bytes) {
+ return ((uint64_t)gbytes << 30) + bytes;
+}
+
+static inline void
+size_to (uint64_t s, uint32_t *gbytes, uint32_t *bytes) {
+ *gbytes = s >> 30;
+ *bytes = s & ((1<<30) - 1);
+}
+
+static inline void
+expect_le (uint64_t a, uint32_t gbytes, uint32_t bytes) {
+ uint64_t b = size_from(gbytes, bytes);
+ if (a != b && verbose)
+ printf("WARNING: expect %" PRIu64 " got %" PRIu64 "\n", a, b);
+ assert(a <= b);
+}
+
+
+static void
+test_cachesize (void) {
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+ int r;
+ DB_ENV *env;
+ uint32_t gbytes, bytes; int ncache;
+
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->get_cachesize(env, &gbytes, &bytes, &ncache); assert(r == 0);
+ if (verbose) printf("default %u %u %d\n", gbytes, bytes, ncache);
+
+ r = env->set_cachesize(env, 0, 0, 1); assert(r == 0);
+ r = env->get_cachesize(env, &gbytes, &bytes, &ncache); assert(r == 0);
+ if (verbose) printf("minimum %u %u %d\n", gbytes, bytes, ncache);
+ uint64_t minsize = size_from(gbytes, bytes);
+
+ uint64_t s = 1; size_to(s, &gbytes, &bytes);
+ while (gbytes <= 32) {
+ r = env->set_cachesize(env, gbytes, bytes, ncache);
+ if (r != 0) {
+ if (verbose) printf("max %u %u\n", gbytes, bytes);
+ break;
+ }
+ assert(r == 0);
+ r = env->get_cachesize(env, &gbytes, &bytes, &ncache); assert(r == 0);
+ assert(ncache == 1);
+ if (s <= minsize)
+ expect_le(minsize, gbytes, bytes);
+ else
+ expect_le(s, gbytes, bytes);
+ s *= 2; size_to(s, &gbytes, &bytes);
+ }
+ r = env->close(env, 0); assert(r == 0);
+#endif
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_cachesize();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cmp_descriptor.cc b/storage/tokudb/PerconaFT/src/tests/test_cmp_descriptor.cc
new file mode 100644
index 00000000..9aff47ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cmp_descriptor.cc
@@ -0,0 +1,281 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+bool cmp_desc_is_four;
+uint32_t four_byte_desc = 0xffffffff;
+uint64_t eight_byte_desc = 0x12345678ffffffff;
+
+
+static int generate_row_for_put(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ DBT_ARRAY *dest_val_arrays,
+ const DBT *src_key,
+ const DBT *src_val
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ dest_key->data = src_key->data;
+ dest_key->size = src_key->size;
+ dest_key->flags = 0;
+ dest_val->data = src_val->data;
+ dest_val->size = src_val->size;
+ dest_val->flags = 0;
+ return 0;
+}
+static void assert_cmp_desc_valid (DB* db) {
+ if (cmp_desc_is_four) {
+ assert(db->cmp_descriptor->dbt.size == sizeof(four_byte_desc));
+ }
+ else {
+ assert(db->cmp_descriptor->dbt.size == sizeof(eight_byte_desc));
+ }
+ unsigned char* CAST_FROM_VOIDP(cmp_desc_data, db->cmp_descriptor->dbt.data);
+ assert(cmp_desc_data[0] == 0xff);
+ assert(cmp_desc_data[1] == 0xff);
+ assert(cmp_desc_data[2] == 0xff);
+ assert(cmp_desc_data[3] == 0xff);
+}
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint64_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static int
+desc_int64_dbt_cmp (DB *db, const DBT *a, const DBT *b) {
+ assert_cmp_desc_valid(db);
+ assert(a);
+ assert(b);
+
+ assert(a->size == sizeof(int64_t));
+ assert(b->size == sizeof(int64_t));
+
+ int64_t x = *(int64_t *) a->data;
+ int64_t y = *(int64_t *) b->data;
+
+ if (x<y) return -1;
+ if (x>y) return 1;
+ return 0;
+}
+
+
+static void open_env(void) {
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ int r = env->set_default_bt_compare(env, desc_int64_dbt_cmp); CKERR(r);
+ //r = env->set_cachesize(env, 0, 500000, 1); CKERR(r);
+ r = env->set_generate_row_callback_for_put(env, generate_row_for_put); CKERR(r);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ open_env();
+}
+
+static void cleanup (void) {
+ int chk_r = env->close(env, 0);
+ CKERR(chk_r);
+ env = NULL;
+}
+
+static void do_inserts_and_queries(DB* db) {
+ int r = 0;
+ DB_TXN* write_txn = NULL;
+ r = env->txn_begin(env, NULL, &write_txn, 0);
+ CKERR(r);
+ for (int i = 0; i < 2000; i++) {
+ uint64_t key_data = random();
+ uint64_t val_data = random();
+ DBT key, val;
+ dbt_init(&key, &key_data, sizeof(key_data));
+ dbt_init(&val, &val_data, sizeof(val_data));
+ { int chk_r = db->put(db, write_txn, &key, &val, 0); CKERR(chk_r); }
+ }
+ r = write_txn->commit(write_txn, 0);
+ CKERR(r);
+ for (int i = 0; i < 2; i++) {
+ DB_TXN* read_txn = NULL;
+ r = env->txn_begin(env, NULL, &read_txn, 0);
+ CKERR(r);
+ DBC* cursor = NULL;
+ r = db->cursor(db, read_txn, &cursor, 0);
+ CKERR(r);
+ if (i == 0) {
+ r = cursor->c_set_bounds(
+ cursor,
+ db->dbt_neg_infty(),
+ db->dbt_pos_infty(),
+ true,
+ 0
+ );
+ CKERR(r);
+ }
+ while(r != DB_NOTFOUND) {
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ assert(r == 0 || r == DB_NOTFOUND);
+ }
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = read_txn->commit(read_txn, 0);
+ CKERR(r);
+ }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+ int r;
+ cmp_desc_is_four = true;
+
+ DBT orig_desc;
+ memset(&orig_desc, 0, sizeof(orig_desc));
+ orig_desc.size = sizeof(four_byte_desc);
+ orig_desc.data = &four_byte_desc;
+
+ DBT other_desc;
+ memset(&other_desc, 0, sizeof(other_desc));
+ other_desc.size = sizeof(eight_byte_desc);
+ other_desc.data = &eight_byte_desc;
+
+ DB_LOADER *loader = NULL;
+ DBT key, val;
+ uint64_t k = 0;
+ uint64_t v = 0;
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ r = db->set_pagesize(db, 2048);
+ CKERR(r);
+ r = db->set_readpagesize(db, 1024);
+ CKERR(r);
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ assert(db->descriptor->dbt.size == 0);
+ assert(db->cmp_descriptor->dbt.size == 0);
+ { int chk_r = db->change_descriptor(db, txn_create, &orig_desc, DB_UPDATE_CMP_DESCRIPTOR); CKERR(chk_r); }
+ assert_desc_four(db);
+ assert_cmp_desc_valid(db);
+ r = env->create_loader(env, txn_create, &loader, db, 1, &db, NULL, NULL, 0);
+ CKERR(r);
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, &v, sizeof v);
+ r = loader->put(loader, &key, &val);
+ CKERR(r);
+ r = loader->close(loader);
+ CKERR(r);
+ assert_cmp_desc_valid(db);
+ });
+ assert_cmp_desc_valid(db);
+ CKERR(r);
+ do_inserts_and_queries(db);
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db->change_descriptor(db, txn_1, &other_desc, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ assert_cmp_desc_valid(db);
+ });
+ assert_desc_eight(db);
+ assert_cmp_desc_valid(db);
+ do_inserts_and_queries(db);
+
+ IN_TXN_ABORT(env, NULL, txn_1, 0, {
+ { int chk_r = db->change_descriptor(db, txn_1, &orig_desc, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ assert_cmp_desc_valid(db);
+ });
+ assert_desc_eight(db);
+ assert_cmp_desc_valid(db);
+ do_inserts_and_queries(db);
+
+ {
+ int chk_r = db->close(db, 0); CKERR(chk_r);
+ cleanup();
+ open_env();
+ }
+
+ // verify that after close and reopen, cmp_descriptor is now
+ // latest descriptor
+ cmp_desc_is_four = false;
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ assert_desc_eight(db);
+ assert_cmp_desc_valid(db);
+ do_inserts_and_queries(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cmp_desc_is_four = true;
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_AUTO_COMMIT, 0666); CKERR(chk_r); }
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db->change_descriptor(db, txn_1, &orig_desc, DB_UPDATE_CMP_DESCRIPTOR); CKERR(chk_r); }
+ assert_desc_four(db);
+ assert_cmp_desc_valid(db);
+ });
+ assert_desc_four(db);
+ assert_cmp_desc_valid(db);
+ do_inserts_and_queries(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_compression_methods.cc b/storage/tokudb/PerconaFT/src/tests/test_compression_methods.cc
new file mode 100644
index 00000000..56ca13f8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_compression_methods.cc
@@ -0,0 +1,155 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/*
+ * Test that different compression methods can be used.
+ */
+
+#include <db.h>
+#include "test.h"
+
+static const int VAL_SIZE = 248;
+static const int NUM_ROWS = 1 << 12;
+
+static int
+insert(DB_ENV *env, DB *db, void *UU(extra))
+{
+ assert(VAL_SIZE%sizeof(int)==0);
+ int val[VAL_SIZE/sizeof(int)];
+ memset(val, 0, sizeof val);
+ DB_TXN *txn;
+ int r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ for (int i = 0; i < NUM_ROWS; ++i) {
+ DBT k, v;
+ val[0] = i;
+ r = db->put(db, txn, dbt_init(&k, &i, sizeof i), dbt_init(&v, val, sizeof val), 0);
+ CKERR(r);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ return 0;
+}
+
+static int
+lookup(DB_ENV *env, DB *db, void *UU(extra))
+{
+ DB_TXN *txn;
+ int r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ for (int i = 0; i < NUM_ROWS; ++i) {
+ DBT k, v;
+ r = db->get(db, txn, dbt_init(&k, &i, sizeof i), dbt_init(&v, NULL, 0), 0);
+ CKERR(r);
+ assert(v.size == (size_t) VAL_SIZE);
+ assert(*(int *) v.data == i);
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ return 0;
+}
+
+typedef int (*db_callback)(DB_ENV *env, DB *db, void *extra);
+static int
+with_open_db(db_callback cb, void *cb_extra, bool set_method, enum toku_compression_method method)
+{
+ DB_ENV *env;
+ DB *db;
+ int r;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ if (set_method) {
+ r = db->set_compression_method(db, method);
+ CKERR(r);
+ }
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+
+ {
+ enum toku_compression_method saved_method;
+ r = db->get_compression_method(db, &saved_method);
+ CKERR(r);
+ assert(saved_method == method);
+ }
+
+ int cr = cb(env, db, cb_extra);
+
+ r = db->close(db, 0);
+ CKERR(r);
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return cr;
+}
+
+static void
+run_test(enum toku_compression_method method)
+{
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ r = with_open_db(insert, NULL, true, method);
+ CKERR(r);
+ r = with_open_db(lookup, NULL, false, method);
+ CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[])
+{
+ parse_args(argc, argv);
+ run_test(TOKU_NO_COMPRESSION);
+ run_test(TOKU_ZLIB_METHOD);
+ run_test(TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD);
+ run_test(TOKU_QUICKLZ_METHOD);
+ run_test(TOKU_LZMA_METHOD);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_2.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_2.cc
new file mode 100644
index 00000000..976114d0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_2.cc
@@ -0,0 +1,127 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_cursor (void) {
+ if (verbose) printf("test_cursor\n");
+
+ DB_ENV * env;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.cursor.ft_handle";
+ int r;
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_THREAD|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ int n = 42;
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ int v = htonl(i);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ }
+
+ int ncursors = 2;
+ DBC *cursor[ncursors];
+ r = db->cursor(db, null_txn, &cursor[0], 0); assert(r == 0);
+ r = db->cursor(db, null_txn, &cursor[1], 0); assert(r == 0);
+
+ DBT k0; memset(&k0, 0, sizeof k0);
+ DBT v0; memset(&v0, 0, sizeof v0);
+ r = cursor[0]->c_get(cursor[0], &k0, &v0, DB_FIRST); assert(r == 0);
+ if (verbose) {
+ printf("k0:%p:%u\n", k0.data, k0.size);
+ printf("v0:%p:%u\n", v0.data, v0.size);
+ }
+
+ DBT k1; memset(&k1, 0, sizeof k1);
+ DBT v1; memset(&v1, 0, sizeof v1);
+ r = cursor[1]->c_get(cursor[1], &k1, &v1, DB_FIRST); assert(r == 0);
+ if (verbose) {
+ printf("k1:%p:%u\n", k1.data, k1.size);
+ printf("v1:%p:%u\n", v1.data, v1.size);
+ }
+
+ r = cursor[0]->c_get(cursor[0], &k0, &v0, DB_NEXT); assert(r == 0);
+ if (verbose) {
+ printf("k0:%p:%u\n", k0.data, k0.size);
+ printf("v0:%p:%u\n", v0.data, v0.size);
+ }
+
+ assert(k0.data != k1.data);
+ assert(v0.data != v1.data);
+
+ r = cursor[0]->c_close(cursor[0]); assert(r == 0);
+ r = cursor[1]->c_close(cursor[1]); assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ test_cursor();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_3.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_3.cc
new file mode 100644
index 00000000..df7d1d65
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_3.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+// Verify that different cursors return different data items when DBT is given no flags.
+
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+verify_distinct_pointers (void **ptrs, int n) {
+ int i,j;
+ for (i=0; i<n; i++) {
+ for (j=i+1; j<n; j++) {
+ assert(ptrs[i]!=ptrs[j]);
+ }
+ }
+}
+
+DB_ENV * env;
+DB *db;
+DB_TXN * const null_txn = 0;
+
+enum { ncursors = 2 };
+DBC *cursor[ncursors];
+
+static void
+testit (uint32_t cop) {
+ void *kptrs[ncursors];
+ void *vptrs[ncursors];
+ int i;
+ for (i=0; i<ncursors; i++) {
+ DBT k0; memset(&k0, 0, sizeof k0);
+ DBT v0; memset(&v0, 0, sizeof v0);
+ int r = cursor[i]->c_get(cursor[i], &k0, &v0, cop);
+ CKERR(r);
+ kptrs[i] = k0.data;
+ vptrs[i] = v0.data;
+ }
+ verify_distinct_pointers(kptrs, ncursors);
+ verify_distinct_pointers(vptrs, ncursors);
+}
+
+static void
+test (void) {
+ if (verbose) printf("test_cursor\n");
+
+ const char * const fname = "test.cursor.ft_handle";
+ int r;
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_THREAD|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ int n = 42;
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ int v = htonl(i);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ }
+
+ for (i=0; i<ncursors; i++) {
+ r = db->cursor(db, null_txn, &cursor[i], 0); CKERR(r);
+ }
+
+ testit(DB_FIRST);
+ testit(DB_NEXT);
+ testit(DB_PREV);
+ testit(DB_LAST);
+
+ r = cursor[0]->c_close(cursor[0]); assert(r == 0);
+ r = cursor[1]->c_close(cursor[1]); assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_DB_NEXT_no_dup.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_DB_NEXT_no_dup.cc
new file mode 100644
index 00000000..8ccc2751
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_DB_NEXT_no_dup.cc
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static DBC* cursor = NULL;
+static DB* db = NULL;
+static DB_ENV* env = NULL;
+static int r = 0;
+static DB_TXN* null_txn = NULL;
+
+static void setup_env(void) {
+ assert(!env && !db && !cursor);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ assert(env);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_THREAD|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ assert(env);
+}
+
+static void close_env(void) {
+ assert(env && !db && !cursor);
+ r = env->close(env, 0);
+ CKERR(r);
+ env = NULL;
+}
+
+static void setup_db(void) {
+ assert(env && !db && !cursor);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ assert(db);
+ db->set_errfile(db, stderr);
+ r = db->open(db, null_txn, "foo.db", "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ assert(db);
+}
+
+static void close_db(void) {
+ assert(env && db && !cursor);
+ r = db->close(db, 0);
+ CKERR(r);
+ db = NULL;
+}
+
+static void setup_cursor(void) {
+ assert(env && db && !cursor);
+ r = db->cursor(db, NULL, &cursor, 0);
+ CKERR(r);
+ assert(cursor);
+}
+
+static void close_cursor(void) {
+ assert(env && db && cursor);
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ cursor = NULL;
+}
+
+static void insert(char k, char d) {
+ DBT key;
+ DBT data;
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof(k)), dbt_init(&data, &d, sizeof(d)), 0);
+ CKERR(r);
+}
+
+static void c_get(uint32_t flag, char key_expect, char data_expect) {
+ DBT key;
+ DBT data;
+
+ r = cursor->c_get(cursor, dbt_init(&key, 0, 0), dbt_init(&data, 0, 0), flag);
+ CKERR(r);
+ assert(key.size == sizeof(key_expect));
+ assert(data.size == sizeof(data_expect));
+ char got_key = *(char*)key.data;
+ char got_data = *(char*)data.data;
+ if (verbose &&
+ (got_key != key_expect || got_data != data_expect)) {
+ printf("c_get(%u) Expect (%c,%c)\n"
+ " Got (%c,%c)\n",
+ flag, key_expect, data_expect, got_key, got_data);
+ }
+ assert(got_key == key_expect);
+ assert(got_data == data_expect);
+}
+
+static void test_skip_key(uint32_t flag, bool is_next) {
+ setup_env();
+ setup_db();
+ setup_cursor();
+
+ /* ********************************************************************** */
+
+ char key = 'g';
+ char data = 'g';
+ int forward = is_next ? 1 : -1;
+
+ insert(key, data);
+ insert((char)(key + forward), data);
+ c_get(flag, key, data);
+ insert(key, (char)(data + forward));
+ c_get(flag, (char)(key + forward), data);
+
+ /* ********************************************************************** */
+ close_cursor();
+ close_db();
+ close_env();
+}
+
+static void run_test(void) {
+ /* ********************************************************************** */
+ /* Test DB_NEXT works properly. */
+ test_skip_key(DB_NEXT, true);
+ /* ********************************************************************** */
+ /* Test DB_PREV works properly. */
+ test_skip_key(DB_PREV, false);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+
+ run_test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_db_current.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_db_current.cc
new file mode 100644
index 00000000..00310b27
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_db_current.cc
@@ -0,0 +1,162 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <memory.h>
+#include <db.h>
+
+static void
+db_put (DB *db, int k, int v) {
+ DB_TXN * const null_txn = 0;
+ DBT key, val;
+ int r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ CKERR(r);
+}
+
+static void
+test_cursor_current (void) {
+ if (verbose) printf("test_cursor_current\n");
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.cursor.current.ft_handle";
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ int k = 42, v = 42000;
+ db_put(db, k, v);
+ db_put(db, 43, 2000);
+
+ DBC *cursor;
+
+ r = db->cursor(db, null_txn, &cursor, 0); CKERR(r);
+
+ DBT key, data; int kk, vv;
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&data), DB_CURRENT);
+ assert(r == EINVAL);
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&data), DB_FIRST);
+ CKERR(r);
+ assert(key.size == sizeof kk);
+ memcpy(&kk, key.data, sizeof kk);
+ assert(kk == k);
+ assert(data.size == sizeof vv);
+ memcpy(&vv, data.data, data.size);
+ assert(vv == v);
+ toku_free(key.data); toku_free(data.data);
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&data), DB_CURRENT);
+ CKERR(r);
+ assert(key.size == sizeof kk);
+ memcpy(&kk, key.data, sizeof kk);
+ assert(kk == k);
+ assert(data.size == sizeof vv);
+ memcpy(&vv, data.data, data.size);
+ assert(vv == v);
+ r = db->del(db, null_txn, &key, DB_DELETE_ANY);
+ toku_free(key.data); toku_free(data.data);
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&data), DB_CURRENT);
+ CKERR2(r,DB_KEYEMPTY);
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&data), DB_CURRENT);
+ CKERR2(r,DB_KEYEMPTY);
+
+ r = cursor->c_close(cursor); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static void
+db_get (DB *db, int k, int UU(v), int expectr) {
+ DBT key, val;
+ int r = db->get(db, 0, dbt_init(&key, &k, sizeof k), dbt_init_malloc(&val), 0);
+ assert(r == expectr);
+}
+
+static void
+test_reopen (void) {
+ if (verbose) printf("test_reopen\n");
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.cursor.current.ft_handle";
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666); CKERR(r);
+
+ db_get(db, 1, 1, DB_NOTFOUND);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ test_cursor_current();
+ test_reopen();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_delete2.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_delete2.cc
new file mode 100644
index 00000000..f976e1c6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_delete2.cc
@@ -0,0 +1,109 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static DB_ENV *dbenv;
+static DB *db;
+static DB_TXN * txn;
+
+static void
+test_cursor_delete2 (void) {
+ int r;
+ DBT key,val;
+
+ r = db_env_create(&dbenv, 0); CKERR(r);
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_PRIVATE|DB_INIT_MPOOL|DB_CREATE|DB_INIT_TXN, 0); CKERR(r);
+
+ r = db_create(&db, dbenv, 0); CKERR(r);
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->open(db, txn, "primary.db", NULL, DB_BTREE, DB_CREATE, 0600); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->put(db, txn, dbt_init(&key, "a", 2), dbt_init(&val, "b", 2), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->del(db, txn, dbt_init(&key, "a", 2), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->del(db, txn, dbt_init(&key, "a", 2), DB_DELETE_ANY); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->put(db, txn, dbt_init(&key, "a", 2), dbt_init(&val, "c", 2), 0); CKERR(r);
+ r = db->del(db, txn, dbt_init(&key, "a", 2), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->put(db, txn, dbt_init(&key, "a", 2), dbt_init(&val, "c", 2), 0); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = dbenv->txn_begin(dbenv, 0, &txn, 0); CKERR(r);
+ r = db->del(db, txn, dbt_init(&key, "a", 2), 0); CKERR(r);
+ r = db->del(db, txn, dbt_init(&key, "a", 2), DB_DELETE_ANY); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = dbenv->close(dbenv, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ test_cursor_delete2();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_flags.cc
new file mode 100644
index 00000000..469c32bd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_flags.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_cursor_flags (int cursor_flags, int expectr) {
+ if (verbose) printf("test_cursor_flags:%d %d\n", cursor_flags, expectr);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.cursor.delete.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ DBC *cursor;
+ r = db->cursor(db, null_txn, &cursor, cursor_flags);
+ assert(r == expectr);
+
+ if (r == 0) {
+ r = cursor->c_close(cursor); assert(r == 0);
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ test_cursor_flags(0, 0);
+ test_cursor_flags(~0, EINVAL);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_interrupt.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_interrupt.cc
new file mode 100644
index 00000000..0020c968
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_interrupt.cc
@@ -0,0 +1,152 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+
+
+int num_interrupts_called;
+static bool interrupt(void* extra UU(), uint64_t rows UU()) {
+ num_interrupts_called++;
+ return false;
+}
+
+static bool interrupt_true(void* extra UU(), uint64_t rows UU()) {
+ num_interrupts_called++;
+ return true;
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *env;
+ DB *db;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_readpagesize(db, 1024);
+ CKERR(r);
+ r = db->set_pagesize(db, 1024*10);
+ CKERR(r);
+
+ const char * const fname = "test.change_pagesize";
+ r = db->open(db, NULL, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ DB_TXN* txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ for (uint64_t i = 0; i < 10000; i++) {
+ DBT key, val;
+ uint64_t k = i;
+ uint64_t v = i;
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, &v, sizeof v);
+ db->put(db, txn, &key, &val, DB_PRELOCKED_WRITE); // adding DB_PRELOCKED_WRITE just to make the test go faster
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // create a snapshot txn so that when we delete the elements
+ // we just inserted, that they do not get garbage collected away
+ DB_TXN* snapshot_txn;
+ r = env->txn_begin(env, 0, &snapshot_txn, DB_TXN_SNAPSHOT);
+ CKERR(r);
+
+ DB_TXN* delete_txn;
+ r = env->txn_begin(env, 0, &delete_txn, DB_TXN_SNAPSHOT);
+ CKERR(r);
+
+ for (uint64_t i = 0; i < 10000; i++) {
+ DBT key;
+ uint64_t k = i;
+ dbt_init(&key, &k, sizeof k);
+ db->del(db, delete_txn, &key, DB_PRELOCKED_WRITE | DB_DELETE_ANY); // adding DB_PRELOCKED_WRITE just to make the test go faster
+ }
+ r = delete_txn->commit(delete_txn, 0);
+ CKERR(r);
+
+ // to make more than one basement node in the dictionary's leaf nodes
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ // create a txn that should see an empty dictionary
+ DB_TXN* test_txn;
+ r = env->txn_begin(env, 0, &test_txn, DB_TXN_SNAPSHOT);
+ CKERR(r);
+ DBC* cursor = NULL;
+ r = db->cursor(db, test_txn, &cursor, 0);
+ cursor->c_set_check_interrupt_callback(cursor, interrupt, NULL);
+ DBT key, val;
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR2(r, DB_NOTFOUND);
+ assert(num_interrupts_called > 1);
+ num_interrupts_called = 0;
+ cursor->c_set_check_interrupt_callback(cursor, interrupt_true, NULL);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR2(r, TOKUDB_INTERRUPTED);
+ assert(num_interrupts_called == 1);
+
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = test_txn->commit(test_txn, 0);
+ CKERR(r);
+
+
+ r = snapshot_txn->commit(snapshot_txn, 0);
+ CKERR(r);
+
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_nonleaf_expand.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_nonleaf_expand.cc
new file mode 100644
index 00000000..29651c42
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_nonleaf_expand.cc
@@ -0,0 +1,142 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+expect_cursor_get (DBC *cursor, int k, int v, int op) {
+ int kk, vv;
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), op);
+ assert(r == 0);
+ assert(key.size == sizeof kk); memcpy(&kk, key.data, key.size); assert(kk == k); toku_free(key.data);
+ assert(val.size == sizeof vv); memcpy(&vv, val.data, val.size); assert(vv == v); toku_free(val.data);
+}
+
+static DBC *
+new_cursor (DB *db, int k, int v, int op) {
+ DBC *cursor;
+ int r;
+ r = db->cursor(db, 0, &cursor, 0); assert(r == 0);
+ expect_cursor_get(cursor, k, v, op);
+ return cursor;
+}
+
+static int
+db_put (DB *db, int k, int v) {
+ DBT key, val;
+ int r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ return r;
+}
+
+/* use inserts and cursors to test the ft_nonleaf_expand function
+ insert keys 0 and n and set cursors to them
+ then insert keys 1 .. n-1. this should cause leaf splits, new root nodes, nonleaf expands
+ and nonleaf splits as the tree grows.
+
+ the reverse parameter controls where in insertions are made to test the <, =, >
+ cases in the ft_nonleaf_expand function */
+
+static void
+test_cursor_nonleaf_expand (int n, int reverse) {
+ if (verbose) printf("test_cursor_nonleaf_expand:%d %d\n", n, reverse);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.insert.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ r = db_put(db, htonl(0), 0); assert(r == 0);
+ DBC *cursor0 = new_cursor(db, htonl(0), 0, DB_FIRST); assert(cursor0);
+ r = db_put(db, htonl(n), n); assert(r == 0);
+ DBC *cursorn = new_cursor(db, htonl(n), n, DB_LAST); assert(cursorn);
+
+ int i;
+ if (reverse) {
+ for (i=n-1; i > 0; i--) {
+ r = db_put(db, htonl(i), i); assert(r == 0);
+ }
+ } else {
+ for (i=1; i < n; i++) {
+ r = db_put(db, htonl(i), i); assert(r == 0);
+ }
+ }
+
+ /* make sure the cursors did not move */
+ expect_cursor_get(cursor0, htonl(0), 0, DB_CURRENT);
+ expect_cursor_get(cursorn, htonl(n), n, DB_CURRENT);
+
+ r = cursor0->c_close(cursor0); assert(r == 0);
+ r = cursorn->c_close(cursorn); assert(r == 0);
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int i;
+ for (i=1; i<=65536; i *= 2) {
+ test_cursor_nonleaf_expand(i, 0);
+ test_cursor_nonleaf_expand(i, 1);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_null.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_null.cc
new file mode 100644
index 00000000..e106974a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_null.cc
@@ -0,0 +1,210 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <memory.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB *db;
+DB_ENV* dbenv;
+DBC* cursors[(int)256];
+DB_TXN* null_txn = NULL;
+
+static void
+put (int _key, int _data) {
+ int r;
+ DBT key;
+ DBT data;
+ dbt_init(&key, &_key, sizeof(int));
+ dbt_init(&data, &_data, sizeof(int));
+ if (_key == -1) {
+ key.data = NULL;
+ key.size = 0;
+ }
+ if (_data == -1) {
+ data.data = NULL;
+ data.size = 0;
+ }
+
+ r = db->put(db, null_txn, &key, &data, 0);
+ CKERR(r);
+}
+
+static void
+cget (uint32_t flag, bool find, char txn, int _key, int _data) {
+ assert(cursors[(int)txn]);
+
+ int r;
+ DBT key;
+ DBT data;
+ if (flag == DB_CURRENT) {
+ _key++;
+ _data++;
+ dbt_init(&key, &_key, sizeof(int));
+ dbt_init(&data, &_data, sizeof(int));
+ _key--;
+ _data--;
+ }
+ else if (flag == DB_SET) {
+ dbt_init(&key, &_key, sizeof(int));
+ if (_key == -1) {
+ key.data = NULL;
+ key.size = 0;
+ }
+ _data++;
+ dbt_init(&data, &_data, sizeof(int));
+ _data--;
+ }
+ else assert(false);
+ r = cursors[(int)txn]->c_get(cursors[(int)txn], &key, &data, flag);
+ if (find) {
+ CKERR(r);
+ if (_key == -1) {
+ assert(key.data == NULL);
+ assert(key.size == 0);
+ }
+ else {
+ assert(key.size == sizeof(int));
+ assert(*(int*)key.data == _key);
+ }
+ if (_data == -1) {
+ assert(data.data == NULL);
+ assert(data.size == 0);
+ }
+ else {
+ assert(data.size == sizeof(int));
+ assert(*(int*)data.data == _data);
+ }
+ }
+ else CKERR2(r, DB_NOTFOUND);
+}
+
+static void
+init_dbc (char name) {
+ int r;
+
+ assert(!cursors[(int)name]);
+ r = db->cursor(db, null_txn, &cursors[(int)name], 0);
+ CKERR(r);
+ assert(cursors[(int)name]);
+}
+
+static void
+close_dbc (char name) {
+ int r;
+
+ assert(cursors[(int)name]);
+ r = cursors[(int)name]->c_close(cursors[(int)name]);
+ CKERR(r);
+ cursors[(int)name] = NULL;
+}
+
+static void
+setup_dbs (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ dbenv = NULL;
+ db = NULL;
+ /* Open/create primary */
+ r = db_env_create(&dbenv, 0);
+ CKERR(r);
+ uint32_t env_txn_flags = 0;
+ uint32_t env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL;
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, env_open_flags | env_txn_flags, 0600);
+ CKERR(r);
+
+ r = db_create(&db, dbenv, 0);
+ CKERR(r);
+
+ char a;
+ r = db->open(db, null_txn, "foobar.db", NULL, DB_BTREE, DB_CREATE, 0600);
+ CKERR(r);
+ for (a = 'a'; a <= 'z'; a++) init_dbc(a);
+}
+
+static void
+close_dbs (void) {
+ char a;
+ for (a = 'a'; a <= 'z'; a++) {
+ if (cursors[(int)a]) close_dbc(a);
+ }
+
+ int r;
+ r = db->close(db, 0);
+ CKERR(r);
+ db = NULL;
+ r = dbenv->close(dbenv, 0);
+ CKERR(r);
+ dbenv = NULL;
+}
+
+static void
+test (void) {
+ /* ********************************************************************** */
+ int key;
+ int data;
+ int i;
+ for (i = 0; i < 4; i++) {
+ if (i & 0x1) key = -1;
+ else key = 1;
+ if (i & 0x2) data = -1;
+ else data = 1;
+ setup_dbs();
+ put(key, data);
+ cget(DB_SET, true, 'a', key, data);
+ cget(DB_CURRENT, true, 'a', key, data);
+ close_dbs();
+ }
+ /* ********************************************************************** */
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_stickyness.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_stickyness.cc
new file mode 100644
index 00000000..b271e175
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_stickyness.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+
+static void
+db_put (DB *db, int k, int v) {
+ DB_TXN * const null_txn = 0;
+ DBT key, val;
+ int r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+}
+
+static int
+cursor_get (DBC *cursor, unsigned int *k, unsigned int *v, int op) {
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), op);
+ if (r == 0) {
+ assert(key.size == sizeof *k); memcpy(k, key.data, key.size);
+ assert(val.size == sizeof *v); memcpy(v, val.data, val.size);
+ }
+ if (key.data) toku_free(key.data);
+ if (val.data) toku_free(val.data);
+ return r;
+}
+
+static void
+test_cursor_sticky (int n, int dup_mode) {
+ if (verbose) printf("test_cursor_sticky:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test_cursor_sticky.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_flags(db, dup_mode); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ unsigned int k, v;
+ for (i=0; i<n; i++) {
+ db_put(db, htonl(i), htonl(i));
+ }
+
+ /* walk the tree */
+ DBC *cursor;
+ r = db->cursor(db, 0, &cursor, 0); assert(r == 0);
+ for (i=0; i<n; i++) {
+ // GCC 4.8 complains about these being maybe uninitialized.
+ // TODO(leif): figure out why and fix it.
+ k = 0; v = 0;
+ r = cursor_get(cursor, &k, &v, DB_NEXT); assert(r == 0);
+ assert(k == htonl(i)); assert(v == htonl(i));
+ }
+
+ r = cursor_get(cursor, &k, &v, DB_NEXT); assert(r == DB_NOTFOUND);
+
+ r = cursor_get(cursor, &k, &v, DB_CURRENT); assert(r == 0); assert(k == htonl(n-1)); assert(v == htonl(n-1));
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ int i;
+
+ // setvbuf(stdout, NULL, _IONBF, 0);
+ parse_args(argc, argv);
+
+ for (i=1; i<65537; i *= 2) {
+ test_cursor_sticky(i, 0);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_cursor_with_read_txn.cc b/storage/tokudb/PerconaFT/src/tests/test_cursor_with_read_txn.cc
new file mode 100644
index 00000000..449be718
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_cursor_with_read_txn.cc
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+int test_main(int argc, char * const argv[])
+{
+ int r;
+ DB * db;
+ DB_ENV * env;
+ (void) argc;
+ (void) argv;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
+
+ // set things up
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
+ CKERR(r);
+
+
+ DB_TXN* txn = NULL;
+ r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT);
+ CKERR(r);
+
+ int k = 1;
+ int v = 10;
+ DBT key, val;
+ r = db->put(
+ db,
+ txn,
+ dbt_init(&key, &k, sizeof k),
+ dbt_init(&val, &v, sizeof v),
+ 0
+ );
+ CKERR(r);
+ k = 2;
+ v = 20;
+ r = db->put(
+ db,
+ txn,
+ dbt_init(&key, &k, sizeof k),
+ dbt_init(&val, &v, sizeof v),
+ 0
+ );
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY);
+ CKERR(r);
+ DBC* cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0);
+ CKERR(r);
+ DBT key1, val1;
+ memset(&key1, 0, sizeof key1);
+ memset(&val1, 0, sizeof val1);
+ r = cursor->c_get(cursor, &key1, &val1, DB_FIRST);
+ CKERR(r);
+ invariant(key1.size == sizeof(int));
+ invariant(*(int *)key1.data == 1);
+ invariant(val1.size == sizeof(int));
+ invariant(*(int *)val1.data == 10);
+
+ r = cursor->c_get(cursor, &key1, &val1, DB_NEXT);
+ CKERR(r);
+ invariant(key1.size == sizeof(int));
+ invariant(*(int *)key1.data == 2);
+ invariant(val1.size == sizeof(int));
+ invariant(*(int *)val1.data == 20);
+
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // clean things up
+ r = db->close(db, 0);
+ CKERR(r);
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_already_exists.cc b/storage/tokudb/PerconaFT/src/tests/test_db_already_exists.cc
new file mode 100644
index 00000000..68b16296
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_already_exists.cc
@@ -0,0 +1,97 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+
+#include <unistd.h>
+#include <db.h>
+#include <errno.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.already.exists.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ // r = db->set_flags(db, DB_DUP); CKERR(r);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_EXCL, 0666);
+ assert(r == EINVAL);
+
+ r = db->close(db, 0); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,0); // Turn off those annoying errors
+
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE | DB_EXCL, 0666);
+ assert(r == EEXIST);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_change_pagesize.cc b/storage/tokudb/PerconaFT/src/tests/test_db_change_pagesize.cc
new file mode 100644
index 00000000..ea576a15
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_change_pagesize.cc
@@ -0,0 +1,104 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *env;
+ DB *db;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | DB_INIT_LOG, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_pagesize(db, 10000);
+ CKERR(r);
+
+ const char * const fname = "test.change_pagesize";
+ r = db->open(db, NULL, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ DB_TXN* txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ for (uint64_t i = 0; i < 10000; i++) {
+ DBT key, val;
+ uint64_t k = i;
+ uint64_t v = i;
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, &v, sizeof v);
+ db->put(db, txn, &key, &val, DB_PRELOCKED_WRITE); // adding DB_PRELOCKED_WRITE just to make the test go faster
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // now we change the pagesize. In 6.1.0, this would eventually cause a crash
+ r = db->change_pagesize(db, 1024);
+ CKERR(r);
+
+ r = env->txn_begin(env, 0, &txn, 0);
+ CKERR(r);
+ for (uint64_t i = 0; i < 10000; i++) {
+ DBT key, val;
+ uint64_t k = 10000+i;
+ uint64_t v = i;
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, &v, sizeof v);
+ db->put(db, txn, &key, &val, DB_PRELOCKED_WRITE); // adding DB_PRELOCKED_WRITE just to make the test go faster
+ }
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_change_xxx.cc b/storage/tokudb/PerconaFT/src/tests/test_db_change_xxx.cc
new file mode 100644
index 00000000..1310d9d3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_change_xxx.cc
@@ -0,0 +1,150 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Can I close a db without opening it? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ DB_ENV *env;
+ DB *db;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ uint32_t ret_val = 0;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_pagesize(db, 112024);
+ CKERR(r);
+ r = db->change_pagesize(db, 202433);
+ CKERR2(r, EINVAL);
+ r = db->get_pagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 112024);
+ r = db->set_readpagesize(db, 33024);
+ CKERR(r);
+ r = db->change_readpagesize(db, 202433);
+ CKERR2(r, EINVAL);
+ r = db->get_readpagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 33024);
+
+ enum toku_compression_method method = TOKU_ZLIB_METHOD;
+ enum toku_compression_method ret_method = TOKU_NO_COMPRESSION;
+ r = db->set_compression_method(db, method);
+ CKERR(r);
+ r = db->change_compression_method(db, method);
+ CKERR2(r, EINVAL);
+ r = db->get_compression_method(db, &ret_method);
+ CKERR(r);
+ assert(ret_method == TOKU_ZLIB_METHOD);
+
+ // now do the open
+ const char * const fname = "test.change_xxx";
+ r = db->open(db, NULL, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+
+ r = db->get_pagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 112024);
+ r = db->get_readpagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 33024);
+ ret_method = TOKU_NO_COMPRESSION;
+ r = db->get_compression_method(db, &ret_method);
+ CKERR(r);
+ assert(ret_method == TOKU_ZLIB_METHOD);
+
+ r = db->set_pagesize(db, 2024);
+ CKERR2(r, EINVAL);
+ r = db->set_readpagesize(db, 1111);
+ CKERR2(r, EINVAL);
+ r = db->set_compression_method(db, TOKU_NO_COMPRESSION);
+ CKERR2(r, EINVAL);
+
+ r = db->change_pagesize(db, 100000);
+ CKERR(r);
+ r = db->change_readpagesize(db, 10000);
+ CKERR(r);
+ r = db->change_compression_method(db, TOKU_LZMA_METHOD);
+ CKERR(r);
+
+ r = db->get_pagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 100000);
+ r = db->get_readpagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 10000);
+ ret_method = TOKU_NO_COMPRESSION;
+ r = db->get_compression_method(db, &ret_method);
+ CKERR(r);
+ assert(ret_method == TOKU_LZMA_METHOD);
+
+ r = db->close(db, 0);
+
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, NULL, fname, "main", DB_BTREE, DB_AUTO_COMMIT, 0666);
+ CKERR(r);
+
+ r = db->get_pagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 100000);
+ r = db->get_readpagesize(db, &ret_val);
+ CKERR(r);
+ assert(ret_val == 10000);
+ ret_method = TOKU_NO_COMPRESSION;
+ r = db->get_compression_method(db, &ret_method);
+ CKERR(r);
+ assert(ret_method == TOKU_LZMA_METHOD);
+
+ r = db->close(db, 0);
+
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_close_no_open.cc b/storage/tokudb/PerconaFT/src/tests/test_db_close_no_open.cc
new file mode 100644
index 00000000..dc2d32ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_close_no_open.cc
@@ -0,0 +1,65 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Can I close a db without opening it? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_current_clobbers_db.cc b/storage/tokudb/PerconaFT/src/tests/test_db_current_clobbers_db.cc
new file mode 100644
index 00000000..80cfd044
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_current_clobbers_db.cc
@@ -0,0 +1,109 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* DB_CURRENT */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+DB_TXN* null_txn = NULL;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_PRIVATE|DB_INIT_MPOOL|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, "foo.db", "main", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, null_txn, &cursor, 0); CKERR(r);
+ DBT key, val;
+ DBT ckey, cval;
+ int k1 = 1, v1=7;
+ enum foo { blob = 1 };
+ int k2 = 2;
+ int v2 = 8;
+ r = db->put(db, null_txn, dbt_init(&key, &k1, sizeof(k1)), dbt_init(&val, &v1, sizeof(v1)), 0);
+ CKERR(r);
+ r = db->put(db, null_txn, dbt_init(&key, &k2, sizeof(k2)), dbt_init(&val, &v2, sizeof(v2)), 0);
+ CKERR(r);
+
+ r = cursor->c_get(cursor, dbt_init(&ckey, NULL, 0), dbt_init(&cval, NULL, 0), DB_LAST);
+ CKERR(r);
+ //Copies a static pointer into val.
+ r = db->get(db, null_txn, dbt_init(&key, &k1, sizeof(k1)), dbt_init(&val, NULL, 0), 0);
+ CKERR(r);
+ assert(val.data != &v1);
+ assert(*(int*)val.data == v1);
+
+ r = cursor->c_get(cursor, dbt_init(&ckey, NULL, 0), dbt_init(&cval, NULL, 0), DB_LAST);
+ CKERR(r);
+
+ //Does not corrupt it.
+ assert(val.data != &v1);
+ assert(*(int*)val.data == v1);
+
+ r = cursor->c_get(cursor, &ckey, &cval, DB_CURRENT);
+ CKERR(r);
+
+ assert(*(int*)val.data == v1); // Will bring up valgrind error.
+
+
+ r = db->del(db, null_txn, &ckey, DB_DELETE_ANY); assert(r == 0);
+ CKERR(r);
+
+ assert(*(int*)val.data == v1); // Will bring up valgrind error.
+
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_dbt_mem_behavior.cc b/storage/tokudb/PerconaFT/src/tests/test_db_dbt_mem_behavior.cc
new file mode 100644
index 00000000..cf747e11
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_dbt_mem_behavior.cc
@@ -0,0 +1,192 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <memory.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+typedef struct {
+ int32_t pkey;
+ char waste[1024];
+} DATA;
+
+DB* db;
+DB_TXN *const null_txn = 0;
+DB_ENV *dbenv;
+uint32_t set_ulen;
+int32_t key_1 = 1;
+
+static void
+setup(void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&dbenv, 0); assert(r == 0);
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+ /* Open/create primary */
+ r = db_create(&db, dbenv, 0); CKERR(r);
+ r = db->open(db, null_txn, "primary.db", NULL, DB_BTREE, DB_CREATE, 0600); CKERR(r);
+}
+
+static void
+insert_test (void) {
+ int r;
+ DATA entry;
+ DBT data;
+ DBT key;
+
+ memset(&entry, 0xFF, sizeof(entry));
+ entry.pkey = key_1;
+
+ dbt_init(&key, &entry.pkey, sizeof(entry.pkey));
+ dbt_init(&data, &entry, sizeof(entry));
+ r = db->put(db, null_txn, &key, &data, 0); CKERR(r);
+}
+
+static void
+close_dbs (void) {
+ int r;
+
+ r = db->close(db, 0); CKERR(r);
+ r = dbenv->close(dbenv, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ int i;
+ int r;
+
+ parse_args(argc, argv);
+//Simple flags that require minimal setup.
+ uint32_t flags[] = {
+ 0,
+ DB_DBT_USERMEM,
+ DB_DBT_MALLOC,
+ DB_DBT_REALLOC,
+ };
+ int num_flags = sizeof(flags) / sizeof(flags[0]);
+
+ int j;
+ setup();
+ insert_test();
+ DBT key;
+ DBT data;
+ void* oldmem;
+
+ for (j = 0; j < num_flags; j++) {
+ for (i = 0; i < 2; i++) {
+ if (i) set_ulen = sizeof(DATA) / 2;
+ else set_ulen = sizeof(DATA);
+
+ unsigned int old_ulen;
+ int was_truncated = 0;
+ int ulen_changed;
+ int size_full;
+ int doclone = 0;
+ DATA fake;
+ int small_buffer = 0;
+
+ memset(&fake, 0xFF, sizeof(DATA));
+ fake.pkey = key_1;
+
+
+ dbt_init(&key, &key_1, sizeof(key_1));
+ dbt_init(&data, 0, 0);
+ data.flags = flags[j];
+ oldmem = toku_malloc(set_ulen);
+ data.data = oldmem;
+ memset(oldmem, 0, set_ulen);
+ if (flags[j] == DB_DBT_USERMEM) {
+ data.ulen = set_ulen;
+ }
+ old_ulen = data.ulen;
+ r = db->get(db, null_txn, &key, &data, 0);
+ if (flags[j] == DB_DBT_USERMEM && set_ulen < sizeof(DATA)) CKERR2(r, DB_BUFFER_SMALL);
+ else CKERR(r);
+
+ if (r == DB_BUFFER_SMALL) {
+ //The entire 'waste' is full of 0xFFs
+ DATA* CAST_FROM_VOIDP(entry, data.data);
+ was_truncated = entry->waste[0] != 0;
+ small_buffer = 1;
+ }
+ ulen_changed = data.ulen != old_ulen;
+ size_full = data.size == sizeof(DATA);
+
+ unsigned int min = data.ulen < data.size ? data.ulen : data.size;
+ min = min < sizeof(DATA) ? min : sizeof(DATA);
+ //assert(min == sizeof(DATA));
+ r = memcmp((DATA*)data.data, &fake, min);
+ doclone = r == 0;
+
+ if (flags[j] != 0) {
+ toku_free(data.data);
+ }
+ if (flags[j] == 0 || flags[j] == DB_DBT_MALLOC) {
+ toku_free(oldmem);
+ }
+
+ assert(!was_truncated);
+
+ bool ulen_should_change = false;
+ if (flags[j] == DB_DBT_REALLOC) {
+ ulen_should_change = (bool)(old_ulen < sizeof(DATA));
+ }
+ else if (flags[j] == DB_DBT_MALLOC) {
+ ulen_should_change = (bool)(old_ulen != sizeof(DATA)*2);
+ }
+ assert(ulen_should_change == (bool)ulen_changed);
+ assert(size_full);
+ assert(doclone == !small_buffer);
+ }
+ }
+ oldmem = 0;
+ dbt_init(&key, 0, 0);
+ dbt_init(&data, 0, 0);
+ close_dbs();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_delete.cc b/storage/tokudb/PerconaFT/src/tests/test_db_delete.cc
new file mode 100644
index 00000000..36aaa197
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_delete.cc
@@ -0,0 +1,186 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+
+
+static void
+db_put (DB *db, int k, int v) {
+ DBT key, val;
+ int r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+}
+
+static void
+expect_db_del (DB *db, int k, int flags, int expectr) {
+ DBT key;
+ int r = db->del(db, 0, dbt_init(&key, &k, sizeof k), flags);
+ assert(r == expectr);
+}
+
+static void
+expect_db_get (DB *db, int k, int expectr) {
+ DBT key, val;
+ int r = db->get(db, 0, dbt_init(&key, &k, sizeof k), dbt_init_malloc(&val), 0);
+ assert(r == expectr);
+}
+
+static void
+test_db_delete (int n, int dup_mode) {
+ if (verbose) printf("test_db_delete:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.db.delete.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_redzone(env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, dup_mode);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ /* insert n/2 <i, i> pairs */
+ int i;
+ for (i=0; i<n/2; i++)
+ db_put(db, htonl(i), i);
+
+ /* reopen the database to force nonleaf buffering */
+ r = db->close(db, 0);
+ assert(r == 0);
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, dup_mode);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666);
+ assert(r == 0);
+
+ /* insert n/2 <i, i> pairs */
+ for (i=n/2; i<n; i++)
+ db_put(db, htonl(i), i);
+
+ for (i=0; i<n; i++) {
+ expect_db_del(db, htonl(i), 0, 0);
+
+ expect_db_get(db, htonl(i), DB_NOTFOUND);
+ }
+
+ expect_db_del(db, htonl(n), 0, DB_NOTFOUND);
+ expect_db_del(db, htonl(n), DB_DELETE_ANY, 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+static void
+test_db_get_datasize0 (void) {
+ if (verbose) printf("test_db_get_datasize0\n");
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.db_delete.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_redzone(env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ int k = 0;
+ db_put(db, k, 0);
+
+ DBT key, val;
+ r = db->get(db, 0, dbt_init(&key, &k, sizeof k), dbt_init_malloc(&val), 0);
+ assert(r == 0);
+ toku_free(val.data);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ test_db_get_datasize0();
+
+ test_db_delete(0, 0);
+
+ int i;
+ for (i = 1; i <= (1<<16); i *= 2) {
+ test_db_delete(i, 0);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_descriptor.cc b/storage/tokudb/PerconaFT/src/tests/test_db_descriptor.cc
new file mode 100644
index 00000000..60971315
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_descriptor.cc
@@ -0,0 +1,336 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <memory.h>
+#include <toku_portability.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+#include "test.h"
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+#define FNAME "foo.tokudb"
+const char *name = NULL;
+
+#define NUM 3
+#define MAX_LENGTH (1<<16)
+
+int order[NUM+1];
+uint32_t length[NUM];
+uint8_t data[NUM][MAX_LENGTH];
+DBT descriptors[NUM];
+DB_ENV *env;
+
+enum {NUM_DBS=2};
+DB *dbs[NUM_DBS];
+DB_TXN *txn = NULL;
+DB_TXN *null_txn;
+int last_open_descriptor = -1;
+
+int abort_type;
+int get_table_lock;
+uint64_t num_called = 0;
+
+
+static void
+verify_db_matches(void) {
+ DB *db;
+ int which;
+ for (which = 0; which < NUM_DBS; which++) {
+ db = dbs[which];
+ if (db) {
+ const DBT * dbt = &db->descriptor->dbt;
+
+ if (last_open_descriptor<0) {
+ assert(dbt->size == 0 && dbt->data == NULL);
+ }
+ else {
+ assert(last_open_descriptor < NUM);
+ assert(dbt->size == descriptors[last_open_descriptor].size);
+ assert(!memcmp(dbt->data, descriptors[last_open_descriptor].data, dbt->size));
+ assert(dbt->data != descriptors[last_open_descriptor].data);
+ }
+ }
+ }
+
+}
+
+static int
+verify_int_cmp (DB *dbp, const DBT *a, const DBT *b) {
+ num_called++;
+ verify_db_matches();
+ int r = int_dbt_cmp(dbp, a, b);
+ return r;
+}
+
+static void
+open_db(int descriptor, int which) {
+ /* create the dup database file */
+ assert(dbs[which]==NULL);
+ DB *db;
+ int r = db_create(&db, env, 0);
+ CKERR(r);
+ dbs[which] = db;
+
+ assert(abort_type >=0 && abort_type <= 2);
+ if (abort_type==2 && !txn) {
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ CKERR(r);
+ last_open_descriptor = -1; //DB was destroyed at end of last close, did not hang around.
+ }
+ r = db->open(db, txn, FNAME, name, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ if (descriptor >= 0) {
+ assert(descriptor < NUM);
+ if (txn) {
+ { int chk_r = db->change_descriptor(db, txn, &descriptors[descriptor], 0); CKERR(chk_r); }
+ }
+ else {
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db->change_descriptor(db, txn_desc, &descriptors[descriptor], 0); CKERR(chk_r); }
+ });
+ }
+ last_open_descriptor = descriptor;
+ }
+ verify_db_matches();
+ if (abort_type!=2 && !txn) {
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ CKERR(r);
+ }
+ assert(txn);
+ if (get_table_lock) {
+ r = db->pre_acquire_table_lock(db, txn);
+ CKERR(r);
+ }
+}
+
+static void
+delete_db(void) {
+ int which;
+ for (which = 0; which < NUM_DBS; which++) {
+ assert(dbs[which] == NULL);
+ }
+ int r = env->dbremove(env, NULL, FNAME, name, 0);
+ if (abort_type==2) {
+ CKERR2(r, ENOENT); //Abort deleted it
+ }
+ else CKERR(r);
+ last_open_descriptor = -1;
+}
+
+static void
+close_db(int which) {
+ assert(dbs[which]!=NULL);
+ DB *db = dbs[which];
+ dbs[which] = NULL;
+
+ int r;
+ if (which==1) {
+ r = db->close(db, 0);
+ CKERR(r);
+ return;
+ }
+ if (abort_type>0) {
+ if (abort_type==2 && dbs[1]) {
+ close_db(1);
+ }
+ r = db->close(db, 0);
+ CKERR(r);
+ r = txn->abort(txn);
+ CKERR(r);
+ }
+ else {
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+ }
+ txn = NULL;
+}
+
+static void
+setup_data(void) {
+ int r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, verify_int_cmp); CKERR(r);
+ const int envflags = DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK |DB_THREAD |DB_PRIVATE;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ int i;
+ for (i=0; i < NUM; i++) {
+ length[i] = i * MAX_LENGTH / (NUM-1);
+ uint32_t j;
+ for (j = 0; j < length[i]; j++) {
+ data[i][j] = (uint8_t)(random() & 0xFF);
+ }
+ memset(&descriptors[i], 0, sizeof(descriptors[i]));
+ descriptors[i].size = length[i];
+ descriptors[i].data = &data[i][0];
+ }
+ last_open_descriptor = -1;
+ txn = NULL;
+}
+
+static void
+permute_order(void) {
+ int i;
+ for (i=0; i < NUM; i++) {
+ order[i] = i;
+ }
+ for (i=0; i < NUM; i++) {
+ int which = (random() % (NUM-i)) + i;
+ int temp = order[i];
+ order[i] = order[which];
+ order[which] = temp;
+ }
+}
+
+static void
+test_insert (int n, int which) {
+ if (which == -1) {
+ for (which = 0; which < NUM_DBS; which++) {
+ if (dbs[which]) {
+ test_insert(n, which);
+ }
+ }
+ return;
+ }
+ assert(dbs[which]!=NULL);
+ DB *db = dbs[which];
+ int i;
+ static int last = 0;
+ for (i=0; i<n; i++) {
+ int k = last++;
+ DBT key, val;
+ uint64_t called = num_called;
+ int r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ if (i>0) assert(num_called > called);
+ CKERR(r);
+ }
+}
+
+
+static void
+runtest(void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ setup_data();
+ permute_order();
+
+ int i;
+ /* Subsumed by rest of test.
+ for (i=0; i < NUM; i++) {
+ open_db(-1, 0);
+ test_insert(i, 0);
+ close_db(0);
+ open_db(-1, 0);
+ test_insert(i, 0);
+ close_db(0);
+ delete_db();
+ }
+
+ for (i=0; i < NUM; i++) {
+ open_db(order[i], 0);
+ test_insert(i, 0);
+ close_db(0);
+ open_db(-1, 0);
+ test_insert(i, 0);
+ close_db(0);
+ open_db(order[i], 0);
+ test_insert(i, 0);
+ close_db(0);
+ delete_db();
+ }
+ */
+
+ //Upgrade descriptors along the way. Need version to increase, so do not use 'order[i]'
+ for (i=0; i < NUM; i++) {
+ open_db(i, 0);
+ test_insert(i, 0);
+ close_db(0);
+ open_db(-1, 0);
+ test_insert(i, 0);
+ close_db(0);
+ open_db(i, 0);
+ test_insert(i, 0);
+ close_db(0);
+ }
+ delete_db();
+
+ //Upgrade descriptors along the way. With two handles
+ open_db(-1, 1);
+ for (i=0; i < NUM; i++) {
+ open_db(i, 0);
+ test_insert(i, -1);
+ close_db(0);
+ open_db(-1, 0);
+ test_insert(i, -1);
+ close_db(0);
+ open_db(i, 0);
+ test_insert(i, -1);
+ close_db(0);
+ }
+ if (dbs[1]) {
+ close_db(1);
+ }
+ delete_db();
+
+ env->close(env, 0);
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ for (abort_type = 0; abort_type < 3; abort_type++) {
+ for (get_table_lock = 0; get_table_lock < 2; get_table_lock++) {
+ name = NULL;
+ runtest();
+
+ name = "bar";
+ runtest();
+
+ }
+ }
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_open_close.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_open_close.cc
new file mode 100644
index 00000000..e8a641e3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_open_close.cc
@@ -0,0 +1,59 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *dbenv;
+ int r;
+
+ r = db_env_create(&dbenv, 0);
+ assert(r == 0);
+
+ r = dbenv->close(dbenv, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_open_nocreate.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_open_nocreate.cc
new file mode 100644
index 00000000..aebd3cb2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_open_nocreate.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+// Try to open an environment where the directory does not exist
+// Try when the dir exists but is not an initialized env
+// Try when the dir exists and we do DB_CREATE: it should work.
+// And after that the open should work without a DB_CREATE
+// However, in BDB, after doing an DB_ENV->open and then a close, no state has changed
+// One must actually create a DB I think...
+
+
+#include <db.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <unistd.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *dbenv;
+ int r;
+ int do_private;
+
+ for (do_private=0; do_private<2; do_private++) {
+ if (do_private==0) continue; // See #208.
+ int private_flags = do_private ? (DB_CREATE|DB_PRIVATE) : 0;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = db_env_create(&dbenv, 0);
+ CKERR(r);
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, private_flags|DB_INIT_MPOOL, 0);
+ assert(r==ENOENT);
+ dbenv->close(dbenv,0); // free memory
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create(&dbenv, 0);
+ CKERR(r);
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, private_flags|DB_INIT_MPOOL, 0);
+ // PerconaFT has no trouble opening an environment if the directory exists.
+ CKERR(r);
+ assert(r==0);
+ dbenv->close(dbenv,0); // free memory
+ }
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_open_open_close.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_open_open_close.cc
new file mode 100644
index 00000000..2204bdf4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_open_open_close.cc
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char*const* argv) {
+ DB_ENV *dbenv;
+ int r;
+ if (argc == 2 && !strcmp(argv[1], "-v")) verbose = 1;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r = db_env_create(&dbenv, 0);
+ assert(r == 0);
+
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_PRIVATE, 0666);
+ assert(r == 0);
+
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_PRIVATE, 0666);
+ if (verbose) printf("r=%d\n", r);
+ assert(r == EINVAL);
+
+ r = dbenv->close(dbenv, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_set_errpfx.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_set_errpfx.cc
new file mode 100644
index 00000000..1027bab0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_set_errpfx.cc
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *dbenv;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&dbenv, 0);
+ assert(r == 0);
+
+ dbenv->set_errpfx(dbenv, "houdy partners");
+
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL, 0);
+ assert(r == 0);
+
+ dbenv->set_errpfx(dbenv, "houdy partners");
+
+ r = dbenv->close(dbenv, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_set_lg_dir.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_set_lg_dir.cc
new file mode 100644
index 00000000..ac9ce45f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_set_lg_dir.cc
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *dbenv;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&dbenv, 0);
+ assert(r == 0);
+
+ r = dbenv->set_lg_dir(dbenv, ".");
+ assert(r == 0);
+
+ r = dbenv->set_lg_dir(dbenv, ".");
+ assert(r == 0);
+
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_INIT_TXN|DB_INIT_LOG|DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL, 0);
+ CKERR(r);
+
+ r = dbenv->set_lg_dir(dbenv, ".");
+ assert(r == EINVAL);
+
+ r = dbenv->close(dbenv, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_set_tmp_dir.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_set_tmp_dir.cc
new file mode 100644
index 00000000..1cea3032
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_set_tmp_dir.cc
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdio.h>
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV *dbenv;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&dbenv, 0);
+ assert(r == 0);
+
+ r = dbenv->set_tmp_dir(dbenv, ".");
+ assert(r == 0);
+
+ r = dbenv->set_tmp_dir(dbenv, ".");
+ assert(r == 0);
+
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL, 0);
+ CKERR(r);
+
+ r = dbenv->set_tmp_dir(dbenv, ".");
+ assert(r == EINVAL);
+
+ r = dbenv->close(dbenv, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_env_strdup_null.cc b/storage/tokudb/PerconaFT/src/tests/test_db_env_strdup_null.cc
new file mode 100644
index 00000000..48b90d56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_env_strdup_null.cc
@@ -0,0 +1,67 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Do I return EINVAL when passing in NULL for something that would otherwise be strdup'd? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <db.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->set_data_dir(env, NULL); assert(r==EINVAL);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ env->set_errpfx(env, NULL); assert(1); //Did not crash.
+ r=env->set_tmp_dir(env, NULL); assert(r==EINVAL);
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_get_put_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_db_get_put_flags.cc
new file mode 100644
index 00000000..4db3741c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_get_put_flags.cc
@@ -0,0 +1,172 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <memory.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+typedef struct {
+ uint32_t db_flags;
+ uint32_t flags;
+ int r_expect;
+ int key;
+ int data;
+} PUT_TEST;
+
+typedef struct {
+ PUT_TEST put;
+ uint32_t flags;
+ int r_expect;
+ int key;
+ int data;
+} GET_TEST;
+
+enum testtype {NONE=0, TGET=1, TPUT=2, SGET=3, SPUT=4, SPGET=5};
+
+typedef struct {
+ enum testtype kind;
+ uint32_t flags;
+ int r_expect;
+ int key;
+ int data;
+} TEST;
+
+static DB *dbp;
+static DB_TXN *const null_txn = 0;
+static DB_ENV *dbenv;
+
+static void
+setup (uint32_t flags) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ /* Open/create primary */
+ r = db_env_create(&dbenv, 0); assert(r == 0);
+ r = dbenv->set_redzone(dbenv, 0); CKERR(r);
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+ r = db_create(&dbp, dbenv, 0); CKERR(r);
+ dbp->set_errfile(dbp,0); // Turn off those annoying errors
+ if (flags) {
+ r = dbp->set_flags(dbp, flags); CKERR(r);
+ }
+ r = dbp->open(dbp, NULL, "primary.db", NULL, DB_BTREE, DB_CREATE, 0600); CKERR(r);
+}
+
+static void
+close_dbs (void) {
+ int r;
+ r = dbp->close(dbp, 0); CKERR(r);
+ r = dbenv->close(dbenv, 0); CKERR(r);
+}
+
+static void
+insert_bad_flags (DB* db, uint32_t flags, int r_expect, int keyint, int dataint) {
+ DBT key;
+ DBT data;
+ int r;
+
+ dbt_init(&key, &keyint, sizeof(keyint));
+ dbt_init(&data,&dataint,sizeof(dataint));
+ r = db->put(db, null_txn, &key, &data, flags);
+ CKERR2(r, r_expect);
+}
+
+static void
+get_bad_flags (DB* db, uint32_t flags, int r_expect, int keyint, int dataint) {
+ DBT key;
+ DBT data;
+ int r;
+
+ dbt_init(&key, &keyint, sizeof(keyint));
+ dbt_init(&data,&dataint,sizeof(dataint));
+ r = db->get(db, null_txn, &key, &data, flags);
+ CKERR2(r, r_expect);
+ //Verify things don't change.
+ assert(*(int*)key.data == keyint);
+ assert(*(int*)data.data == dataint);
+}
+
+PUT_TEST put_tests[] = {
+ {0, DB_NODUPDATA, EINVAL, 0, 0}, //r_expect must change to 0, once implemented.
+ {0, 0, 0, 0, 0},
+ {0, DB_NOOVERWRITE, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+};
+const int num_put = sizeof(put_tests) / sizeof(put_tests[0]);
+
+GET_TEST get_tests[] = {
+ {{0, 0, 0, 0, 0}, 0 , 0, 0, 0},
+ {{0, 0, 0, 0, 0}, 0 , 0, 0, 0},
+ {{0, 0, 0, 0, 0}, 0 , 0, 0, 0},
+ {{0, 0, 0, 0, 0}, 0 , 0, 0, 0},
+ {{0, 0, 0, 0, 0}, DB_RMW, EINVAL, 0, 0},
+ {{0, 0, 0, 0, 0}, DB_RMW, EINVAL, 0, 0},
+};
+const int num_get = sizeof(get_tests) / sizeof(get_tests[0]);
+
+int
+test_main(int argc, char *const argv[]) {
+ int i;
+
+ parse_args(argc, argv);
+
+ for (i = 0; i < num_put; i++) {
+ if (verbose) printf("PutTest [%d]\n", i);
+ setup(put_tests[i].db_flags);
+ insert_bad_flags(dbp, put_tests[i].flags, put_tests[i].r_expect, put_tests[i].key, put_tests[i].data);
+ close_dbs();
+ }
+
+ for (i = 0; i < num_get; i++) {
+ if (verbose) printf("GetTest [%d]\n", i);
+ setup(get_tests[i].put.db_flags);
+ insert_bad_flags(dbp, get_tests[i].put.flags, get_tests[i].put.r_expect, get_tests[i].put.key, get_tests[i].put.data);
+ get_bad_flags(dbp, get_tests[i].flags, get_tests[i].r_expect, get_tests[i].key, get_tests[i].data);
+ close_dbs();
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_named_delete_last.cc b/storage/tokudb/PerconaFT/src/tests/test_db_named_delete_last.cc
new file mode 100644
index 00000000..3bcafc2e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_named_delete_last.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <memory.h>
+#include <toku_portability.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+#include "test.h"
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+#define FNAME "foo.tokudb"
+const char *name = NULL;
+
+#define NUM 8
+#define MAX_LENGTH (1<<16)
+
+DB_ENV *env;
+
+DB *db;
+DB_TXN *null_txn;
+
+static void
+open_db(void) {
+ int r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, null_txn, FNAME, name, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+}
+
+static void
+delete_db(void) {
+ int r = env->dbremove(env, NULL, FNAME, name, 0); CKERR(r);
+}
+
+static void
+close_db(void) {
+ int r;
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void
+setup_data(void) {
+ int r = db_env_create(&env, 0); CKERR(r);
+ const int envflags = DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK |DB_THREAD |DB_PRIVATE;
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+}
+
+static void
+runtest(void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ setup_data();
+
+ name = "foo";
+ open_db();
+ close_db();
+ delete_db();
+
+ name = "foo1";
+ open_db();
+ close_db();
+ name = "foo2";
+ open_db();
+ close_db();
+ name = "foo1";
+ delete_db();
+ name = "foo2";
+ delete_db();
+
+ name = "foo1";
+ open_db();
+ close_db();
+ name = "foo2";
+ open_db();
+ close_db();
+ name = "foo2";
+ delete_db();
+ name = "foo1";
+ delete_db();
+
+ env->close(env, 0);
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ runtest();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_no_env.cc b/storage/tokudb/PerconaFT/src/tests/test_db_no_env.cc
new file mode 100644
index 00000000..da5f57f4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_no_env.cc
@@ -0,0 +1,62 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+#include <signal.h>
+
+static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__unused__))) {
+ exit(1);
+}
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ signal (SIGABRT, catch_abort);
+ DB *db;
+ int r;
+ r = db_create(&db, 0, 0);
+ assert(r == 0);
+ r = db->close(db, 0);
+ assert(r == 0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_open_notexist_reopen.cc b/storage/tokudb/PerconaFT/src/tests/test_db_open_notexist_reopen.cc
new file mode 100644
index 00000000..83762ab4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_open_notexist_reopen.cc
@@ -0,0 +1,67 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Simple test of logging. Can I start PerconaFT with logging enabled? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <db.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); CKERR(r);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_PRIVATE|DB_INIT_MPOOL|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, NULL, "doesnotexist.db", "testdb", DB_BTREE, 0, 0666); assert(r==ENOENT);
+ r=db->open(db, NULL, "doesnotexist.db", "testdb", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_remove.cc b/storage/tokudb/PerconaFT/src/tests/test_db_remove.cc
new file mode 100644
index 00000000..9ad7606b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_remove.cc
@@ -0,0 +1,79 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <fcntl.h>
+
+DB_TXN * const null_txn = 0;
+
+const char * const fname = "test_db_remove.ft_handle";
+
+static void test_db_remove (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ // create the DB
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db1;
+ r = db_create(&db1, env, 0); assert(r == 0);
+ r = db1->open(db1, null_txn, fname, 0, DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+ r = db1->close(db1, 0); assert(r == 0); //Header has been written to disk
+
+ r = db_create(&db1, env, 0); assert(r == 0);
+ r = db1->open(db1, null_txn, fname, 0, DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ // Now remove it, while it is open.
+ r = env->dbremove(env, NULL, fname, 0, 0);
+ assert(r!=0);
+
+ r = db1->close(db1, 0); assert(r==0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ test_db_remove();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_remove_subdb.cc b/storage/tokudb/PerconaFT/src/tests/test_db_remove_subdb.cc
new file mode 100644
index 00000000..c0a97d49
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_remove_subdb.cc
@@ -0,0 +1,125 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Does removing subdatabases corrupt the db file/other dbs in that file? (when nothing else open) */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <db.h>
+#include <memory.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+DBT key;
+DBT data;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ dbt_init(&key, "name", sizeof "name");
+ dbt_init(&data, NULL, 0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ // Note: without DB_INIT_MPOOL the BDB library will fail on db->open().
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=env->dbremove(env, NULL, "DoesNotExist.db", NULL, 0); assert(r==ENOENT);
+
+ r=env->dbremove(env, NULL, "DoesNotExist.db", "SubDb", 0); assert(r==ENOENT);
+
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->open(db, NULL, "master.db", "first", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ dbt_init(&data, "first.db", sizeof "first.db");
+ db->put(db, NULL, &key, &data, 0);
+ r=db->close(db, 0); assert(r==0);
+
+ r=env->dbremove(env, NULL, "master.db", "second", 0); assert(r==ENOENT);
+
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->open(db, NULL, "master.db", "second", DB_BTREE, DB_CREATE, 0666); assert(r==0);
+ dbt_init(&key, "name", sizeof "name");
+ dbt_init(&data, "second.db", sizeof "second.db");
+ db->put(db, NULL, &key, &data, 0);
+ r=db->close(db, 0); assert(r==0);
+
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->open(db, NULL, "master.db", "third", DB_BTREE, DB_CREATE, 0666); assert(r==0);
+ dbt_init(&key, "name", sizeof "name");
+ dbt_init(&data, "third.db", sizeof "third.db");
+ db->put(db, NULL, &key, &data, 0);
+ r=db->close(db, 0); assert(r==0);
+
+ r=env->dbremove(env, NULL, "master.db", "second", 0); assert(r==0);
+
+ r=env->dbremove(env, NULL, "master.db", "second", 0); assert(r==ENOENT);
+
+ dbt_init(&key, "name", sizeof "name");
+ dbt_init(&data, NULL, 0);
+
+ //Verify data still exists in first/third
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->open(db, NULL, "master.db", "first", DB_BTREE, 0, 0666); assert(r==0);
+ r=db->get(db, NULL, &key, &data, 0); assert(r==0);
+ assert(!strcmp((char*)data.data, "first.db"));
+ r=db->close(db, 0); assert(r==0);
+
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->open(db, NULL, "master.db", "third", DB_BTREE, 0, 0666); assert(r==0);
+ r=db->get(db, NULL, &key, &data, 0); assert(r==0);
+ assert(!strcmp((char*)data.data, "third.db"));
+ r=db->close(db, 0); assert(r==0);
+
+ //Verify second is gone.
+ r=db_create(&db, env, 0); assert(r==0);
+ r=db->open(db, NULL, "master.db", "second", DB_BTREE, 0, 0666); assert(r==ENOENT);
+ //Create again, verify it does not have its old data.
+ r=db->open(db, NULL, "master.db", "second", DB_BTREE, DB_CREATE, 0666); assert(r==0);
+ r=db->get(db, NULL, &key, &data, 0); assert(r==DB_NOTFOUND);
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc b/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
new file mode 100644
index 00000000..c440bdc5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_rowcount.cc
@@ -0,0 +1,523 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <sys/stat.h>
+#include <db.h>
+
+// Tests that the logical row counts are correct and not subject to variance
+// due to normal insert/delete messages within the tree with the few exceptions
+// of 1) rollback messages not yet applied; 2) inserts messages turned to
+// updates on apply; and 3) missing leafentries on delete messages on apply.
+
+static DB_TXN* const null_txn = 0;
+static const uint64_t num_records = 4*1024;
+
+#define CHECK_NUM_ROWS(_expected, _stats) assert(_stats.bt_ndata == _expected)
+
+static DB* create_db(const char* fname, DB_ENV* env) {
+ int r;
+ DB* db;
+
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ db->set_errfile(db, stderr);
+
+ r = db->set_pagesize(db, 8192);
+ assert(r == 0);
+
+ r = db->set_readpagesize(db, 1024);
+ assert(r == 0);
+
+ r = db->set_fanout(db, 4);
+ assert(r == 0);
+
+ r = db->set_compression_method(db, TOKU_NO_COMPRESSION);
+ assert(r == 0);
+
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE,
+ 0666);
+ assert(r == 0);
+
+ return db;
+}
+static void add_records(DB* db, DB_TXN* txn, uint64_t start_id, uint64_t num) {
+ int r;
+ for (uint64_t i = 0, j=start_id; i < num; i++,j++) {
+ char key[100], val[256];
+ DBT k,v;
+ snprintf(key, 100, "%08" PRIu64, j);
+ snprintf(val, 256, "%*s", 200, key);
+ r =
+ db->put(
+ db,
+ txn,
+ dbt_init(&k, key, 1+strlen(key)),
+ dbt_init(&v, val, 1+strlen(val)),
+ 0);
+ assert(r == 0);
+ }
+}
+static void delete_records(
+ DB* db,
+ DB_TXN* txn,
+ uint64_t start_id,
+ uint64_t num) {
+
+ int r;
+ for (uint64_t i = 0, j=start_id; i < num; i++,j++) {
+ char key[100];
+ DBT k;
+ snprintf(key, 100, "%08" PRIu64, j);
+ r =
+ db->del(
+ db,
+ txn,
+ dbt_init(&k, key, 1+strlen(key)),
+ 0);
+ assert(r == 0);
+ }
+}
+static void full_optimize(DB* db) {
+ int r;
+ uint64_t loops_run = 0;
+
+ r = db->optimize(db);
+ assert(r == 0);
+
+ r = db->hot_optimize(db, NULL, NULL, NULL, NULL, &loops_run);
+ assert(r == 0);
+}
+static void test_insert_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_delete_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_commit_delete_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : before insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : after insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf(
+ "%s : after delete commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_rollback(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before rollback %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->abort(txn);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ // CAN NOT TEST stats HERE AS THEY ARE SOMEWHAT NON_DETERMINISTIC UNTIL
+ // optimize + hot_optimize HAVE BEEN RUN DUE TO THE FACT THAT ROLLBACK
+ // MESSAGES ARE "IN-FLIGHT" IN THE TREE AND MUST BE APPLIED IN ORDER TO
+ // CORRECT THE RUNNING LOGICAL COUNT
+ if (verbose)
+ printf("%s : after rollback %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ full_optimize(db);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf(
+ "%s : after rollback optimize %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_delete_rollback(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : before delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->abort(txn);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ db->close(db, 0);
+}
+static void test_insert_commit_delete_rollback(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : before insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : after insert commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ delete_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(0, stats);
+ if (verbose)
+ printf("%s : after delete %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ r = txn->abort(txn);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ // CAN NOT TEST stats HERE AS THEY ARE SOMEWHAT NON_DETERMINISTIC UNTIL
+ // optimize + hot_optimize HAVE BEEN RUN DUE TO THE FACT THAT ROLLBACK
+ // MESSAGES ARE "IN-FLIGHT" IN THE TREE AND MUST BE APPLIED IN ORDER TO
+ // CORRECT THE RUNNING LOGICAL COUNT
+ if (verbose)
+ printf(
+ "%s : after delete rollback %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ full_optimize(db);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : after delete rollback optimize %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ db->close(db, 0);
+}
+
+static int test_recount_insert_commit_progress(
+ uint64_t count,
+ uint64_t deleted,
+ void*) {
+
+ if (verbose)
+ printf(
+ "%s : count[%" PRIu64 "] deleted[%" PRIu64 "]\n",
+ __FUNCTION__,
+ count,
+ deleted);
+ return 0;
+}
+static int test_recount_cancel_progress(uint64_t, uint64_t, void*) {
+ return 1;
+}
+
+static void test_recount_insert_commit(DB_ENV* env) {
+ int r;
+ DB* db;
+ DB_TXN* txn;
+ DB_BTREE_STAT64 stats;
+
+ db = create_db(__FUNCTION__, env);
+
+ r = env->txn_begin(env, null_txn, &txn, 0);
+ assert(r == 0);
+
+ add_records(db, txn, 0, num_records);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf(
+ "%s : before commit %" PRIu64 " rows\n",
+ __FUNCTION__,
+ stats.bt_ndata);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+ r = db->stat64(db, null_txn, &stats);
+ assert(r == 0);
+
+ CHECK_NUM_ROWS(num_records, stats);
+ if (verbose)
+ printf("%s : after commit %" PRIu64 " rows\n", __FUNCTION__, stats.bt_ndata);
+
+ // test that recount counted correct # of rows
+ r = db->recount_rows(db, test_recount_insert_commit_progress, NULL);
+ assert(r == 0);
+ CHECK_NUM_ROWS(num_records, stats);
+
+ // test that recount callback cancel returns
+ r = db->recount_rows(db, test_recount_cancel_progress, NULL);
+ assert(r == 1);
+ CHECK_NUM_ROWS(num_records, stats);
+
+ db->close(db, 0);
+}
+int test_main(int UU(argc), char UU(*const argv[])) {
+ int r;
+ DB_ENV* env;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU + S_IRWXG + S_IRWXO);
+
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r =
+ env->open(
+ env,
+ TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_TXN + DB_PRIVATE + DB_CREATE,
+ S_IRWXU + S_IRWXG + S_IRWXO);
+ assert(r == 0);
+
+ test_insert_commit(env);
+ test_insert_delete_commit(env);
+ test_insert_commit_delete_commit(env);
+ test_insert_rollback(env);
+ test_insert_delete_rollback(env);
+ test_insert_commit_delete_rollback(env);
+ test_recount_insert_commit(env);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_set_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_db_set_flags.cc
new file mode 100644
index 00000000..ac381c46
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_set_flags.cc
@@ -0,0 +1,84 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_db_set_flags (int flags, int expectr, int flags2, int expectr2) {
+ if (verbose) printf("test_db_set_flags:%d %d %d %d\n", flags, expectr, flags2, expectr2);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.db.set.flags.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->set_flags(db, flags); assert(r == expectr);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+ r = db->set_flags(db, flags2); assert(r == expectr2);
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ test_db_set_flags(0, 0, 0, 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_subdb.cc b/storage/tokudb/PerconaFT/src/tests/test_db_subdb.cc
new file mode 100644
index 00000000..0d8468ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_subdb.cc
@@ -0,0 +1,95 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+
+#include <unistd.h>
+#include <db.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV * env = 0;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.db";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ // Note: without DB_INIT_MPOOL the BDB library will fail on db->open().
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_PRIVATE|DB_CREATE|DB_INIT_LOG|DB_INIT_TXN, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r = db_create(&db, env, 0);
+ CKERR(r);
+
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+#if 0
+ const char * const fname2 = "test2.db";
+ // This sequence segfaults in BDB 4.3.29
+ // See what happens if we open a database with a subdb, when the file has only the main db.
+ r = db->open(db, null_txn, fname2, 0, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ r = db->close(db,0);
+ CKERR(r);
+ r = db->open(db, null_txn, fname2, "main", DB_BTREE, 0, 0666);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+#endif
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_subdb_different_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_db_subdb_different_flags.cc
new file mode 100644
index 00000000..46e1f89f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_subdb_different_flags.cc
@@ -0,0 +1,114 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+
+#include <unistd.h>
+#include <db.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ DB_ENV * env = 0;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.db";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ // Note: without DB_INIT_MPOOL the BDB library will fail on db->open().
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_PRIVATE|DB_CREATE|DB_INIT_LOG|DB_INIT_TXN, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "subdb", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "subdb2", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ uint32_t flags;
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666); CKERR(r);
+ r = db->get_flags(db, &flags); CKERR(r); assert(flags==0);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "subdb", DB_BTREE, 0, 0666); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "subdb2", DB_BTREE, 0, 0666); CKERR(r);
+ r = db->get_flags(db, &flags); CKERR(r); assert(flags==0);
+ r = db->close(db, 0); CKERR(r);
+
+#if 0
+ const char * const fname2 = "test2.db";
+ // This sequence segfaults in BDB 4.3.29
+ // See what happens if we open a database with a subdb, when the file has only the main db.
+ r = db->open(db, null_txn, fname2, 0, DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+ r = db->close(db,0);
+ CKERR(r);
+ r = db->open(db, null_txn, fname2, "main", DB_BTREE, 0, 0666);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+#endif
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_nonheaviside.cc b/storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_nonheaviside.cc
new file mode 100644
index 00000000..fc01992e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_nonheaviside.cc
@@ -0,0 +1,612 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <memory.h>
+#include <toku_portability.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+#include "test.h"
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static DB *db;
+static DB_TXN* txns[(int)256];
+static DB_ENV* dbenv;
+static DBC* cursors[(int)256];
+
+static void
+put(bool success, char txn, int _key, int _data) {
+ assert(txns[(int)txn]);
+
+ int r;
+ DBT key;
+ DBT data;
+
+ r = db->put(db, txns[(int)txn],
+ dbt_init(&key, &_key, sizeof(int)),
+ dbt_init(&data, &_data, sizeof(int)),
+ 0);
+
+ if (success) CKERR(r);
+ else CKERR2s(r, DB_LOCK_DEADLOCK, DB_LOCK_NOTGRANTED);
+}
+
+static void
+cget(bool success, bool find, char txn, int _key, int _data,
+ int _key_expect, int _data_expect, uint32_t flags) {
+ assert(txns[(int)txn] && cursors[(int)txn]);
+
+ int r;
+ DBT key;
+ DBT data;
+
+ r = cursors[(int)txn]->c_get(cursors[(int)txn],
+ dbt_init(&key, &_key, sizeof(int)),
+ dbt_init(&data, &_data, sizeof(int)),
+ flags);
+ if (success) {
+ if (find) {
+ CKERR(r);
+ assert(*(int *)key.data == _key_expect);
+ assert(*(int *)data.data == _data_expect);
+ }
+ else CKERR2(r, DB_NOTFOUND);
+ }
+ else CKERR2s(r, DB_LOCK_DEADLOCK, DB_LOCK_NOTGRANTED);
+}
+
+static void
+dbdel (bool success, bool find, char txn, int _key) {
+ int r;
+ DBT key;
+
+ /* If DB_DELETE_ANY changes to 0, then find is meaningful and
+ has to be fixed in test_dbdel*/
+ r = db->del(db, txns[(int)txn], dbt_init(&key,&_key, sizeof(int)),
+ DB_DELETE_ANY);
+ if (success) {
+ if (find) CKERR(r);
+ else CKERR2( r, DB_NOTFOUND);
+ }
+ else CKERR2s(r, DB_LOCK_DEADLOCK, DB_LOCK_NOTGRANTED);
+}
+
+static void
+init_txn (char name) {
+ int r;
+ assert(!txns[(int)name]);
+ r = dbenv->txn_begin(dbenv, NULL, &txns[(int)name], DB_TXN_NOWAIT);
+ CKERR(r);
+ assert(txns[(int)name]);
+}
+
+static void
+init_dbc (char name) {
+ int r;
+
+ assert(!cursors[(int)name] && txns[(int)name]);
+ r = db->cursor(db, txns[(int)name], &cursors[(int)name], 0);
+ CKERR(r);
+ assert(cursors[(int)name]);
+}
+
+static void
+commit_txn (char name) {
+ int r;
+ assert(txns[(int)name] && !cursors[(int)name]);
+
+ r = txns[(int)name]->commit(txns[(int)name], 0);
+ CKERR(r);
+ txns[(int)name] = NULL;
+}
+
+static void
+abort_txn (char name) {
+ int r;
+ assert(txns[(int)name] && !cursors[(int)name]);
+
+ r = txns[(int)name]->abort(txns[(int)name]);
+ CKERR(r);
+ txns[(int)name] = NULL;
+}
+
+static void
+close_dbc (char name) {
+ int r;
+
+ assert(cursors[(int)name]);
+ r = cursors[(int)name]->c_close(cursors[(int)name]);
+ CKERR(r);
+ cursors[(int)name] = NULL;
+}
+
+static void
+early_commit (char name) {
+ assert(cursors[(int)name] && txns[(int)name]);
+ close_dbc(name);
+ commit_txn(name);
+}
+
+static void
+early_abort (char name) {
+ assert(cursors[(int)name] && txns[(int)name]);
+ close_dbc(name);
+ abort_txn(name);
+}
+
+static void
+setup_dbs (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ dbenv = NULL;
+ db = NULL;
+ /* Open/create primary */
+ r = db_env_create(&dbenv, 0);
+ CKERR(r);
+ r = dbenv->set_default_bt_compare(dbenv, int_dbt_cmp);
+ CKERR(r);
+ uint32_t env_txn_flags = DB_INIT_TXN | DB_INIT_LOCK;
+ uint32_t env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL;
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, env_open_flags | env_txn_flags, 0600);
+ CKERR(r);
+
+ r = db_create(&db, dbenv, 0);
+ CKERR(r);
+
+ char a;
+ for (a = 'a'; a <= 'z'; a++) init_txn(a);
+ init_txn('\0');
+ r = db->open(db, txns[(int)'\0'], "foobar.db", NULL, DB_BTREE, DB_CREATE, 0600);
+ CKERR(r);
+ commit_txn('\0');
+ for (a = 'a'; a <= 'z'; a++) init_dbc(a);
+}
+
+static void
+close_dbs(void) {
+ char a;
+ for (a = 'a'; a <= 'z'; a++) {
+ if (cursors[(int)a]) close_dbc(a);
+ if (txns[(int)a]) commit_txn(a);
+ }
+
+ int r;
+ r = db->close(db, 0);
+ CKERR(r);
+ db = NULL;
+ r = dbenv->close(dbenv, 0);
+ CKERR(r);
+ dbenv = NULL;
+}
+
+
+static __attribute__((__unused__))
+void
+test_abort (void) {
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ early_abort('a');
+ cget(true, false, 'b', 1, 1, 0, 0, DB_SET);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, DB_SET);
+ cget(true, false, 'b', 1, 1, 0, 0, DB_SET);
+ put(false, 'a', 1, 1);
+ early_commit('b');
+ put(true, 'a', 1, 1);
+ cget(true, true, 'a', 1, 1, 1, 1, DB_SET);
+ cget(true, false, 'a', 2, 1, 1, 1, DB_SET);
+ cget(false, true, 'c', 1, 1, 0, 0, DB_SET);
+ early_abort('a');
+ cget(true, false, 'c', 1, 1, 0, 0, DB_SET);
+ close_dbs();
+ /* ********************************************************************** */
+}
+
+static void
+test_both (uint32_t db_flags) {
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+ cget(true, false, 'a', 2, 1, 0, 0, db_flags);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+ cget(true, false, 'b', 2, 1, 0, 0, db_flags);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ cget(false, false, 'b', 1, 1, 0, 0, db_flags);
+#else
+ cget(true, false, 'b', 1, 1, 0, 0, db_flags);
+#endif
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 1, 1, 0, 0, db_flags);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ cget(false, false, 'b', 1, 1, 0, 0, db_flags);
+ put(true, 'a', 1, 1);
+#else
+ cget(true, false, 'b', 1, 1, 0, 0, db_flags);
+ put(false, 'a', 1, 1);
+#endif
+ early_commit('b');
+ put(true, 'a', 1, 1);
+ cget(true, true, 'a', 1, 1, 1, 1, db_flags);
+ cget(true, false, 'a', 2, 1, 0, 0, db_flags);
+ cget(false, true, 'c', 1, 1, 0, 0, db_flags);
+ early_commit('a');
+ cget(true, true, 'c', 1, 1, 1, 1, db_flags);
+ close_dbs();
+}
+
+
+static void
+test_last (void) {
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 0, 0, 0, 0, DB_LAST);
+ put(false, 'b', 2, 1);
+ put(true, 'a', 2, 1);
+ cget(true, true, 'a', 0, 0, 2, 1, DB_LAST);
+ early_commit('a');
+ put(true, 'b', 2, 1);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_LAST);
+ put(false, 'b', 2, 1);
+ put(true, 'b', -1, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_LAST);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ put(true, 'a', 3, 1);
+ put(true, 'a', 6, 1);
+ cget(true, true, 'a', 0, 0, 6, 1, DB_LAST);
+ put(true, 'b', 2, 1);
+ put(true, 'b', 4, 1);
+ put(false, 'b', 7, 1);
+ put(true, 'b', -1, 1);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_LAST);
+ put(false, 'b', 1, 0);
+ close_dbs();
+}
+
+static void
+test_first (void) {
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', 0, 0, 0, 0, DB_FIRST);
+ put(false, 'b', 2, 1);
+ put(true, 'a', 2, 1);
+ cget(true, true, 'a', 0, 0, 2, 1, DB_FIRST);
+ early_commit('a');
+ put(true, 'b', 2, 1);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_FIRST);
+ put(true, 'b', 2, 1);
+ put(false, 'b', -1, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_FIRST);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ put(true, 'a', 3, 1);
+ put(true, 'a', 6, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_FIRST);
+ put(true, 'b', 2, 1);
+ put(true, 'b', 4, 1);
+ put(true, 'b', 7, 1);
+ put(false, 'b', -1, 1);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, DB_FIRST);
+ put(false, 'b', 1, 2);
+ close_dbs();
+}
+
+static void
+test_set_range (uint32_t flag, int i) {
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+ cget(true, false, 'a', i*2, i*1, 0, 0, flag);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ cget(false, false, 'b', i*2, i*1, 0, 0, flag);
+#else
+ cget(true, false, 'b', i*2, i*1, 0, 0, flag);
+#endif
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ cget(false, false, 'b', i*1, i*1, 0, 0, flag);
+#else
+ cget(true, false, 'b', i*1, i*1, 0, 0, flag);
+#endif
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ cget(true, false, 'a', i*1, i*1, 0, 0, flag);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ cget(false, false, 'b', i*5, i*5, 0, 0, flag);
+ put(true, 'a', i*7, i*6);
+ put(true, 'a', i*5, i*5);
+#else
+ cget(true, false, 'b', i*5, i*5, 0, 0, flag);
+ put(false, 'a', i*7, i*6);
+ put(false, 'a', i*5, i*5);
+#endif
+ put(true, 'a', i*4, i*4);
+ put(true, 'b', -i*1, i*4);
+ put(false, 'b', i*2, i*4);
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ put(true, 'a', i*5, i*4);
+#else
+ put(false, 'a', i*5, i*4);
+#endif
+ early_commit('b');
+ put(true, 'a', i*7, i*6);
+ put(true, 'a', i*5, i*5);
+ put(true, 'a', i*4, i*4);
+ put(true, 'a', i*5, i*4);
+ cget(true, true, 'a', i*1, i*1, i*4, i*4, flag);
+ cget(true, true, 'a', i*2, i*1, i*4, i*4, flag);
+ cget(false, true, 'c', i*6, i*6, i*7, i*6, flag);
+ early_commit('a');
+ cget(true, true, 'c', i*6, i*6, i*7, i*6, flag);
+ close_dbs();
+}
+
+static void
+test_next (uint32_t next_type) {
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'a', 2, 1);
+ put(true, 'a', 5, 1);
+ cget(true, true, 'a', 0, 0, 2, 1, next_type);
+ put(false, 'b', 2, 1);
+ put(true, 'b', 4, 1);
+ put(false, 'b', -1, 1);
+ cget(false, true, 'a', 0, 0, 4, 1, next_type);
+ early_commit('b');
+ cget(true, true, 'a', 2, 1, 2, 1, DB_SET);
+ cget(true, true, 'a', 0, 0, 4, 1, next_type);
+ cget(true, true, 'a', 0, 0, 5, 1, next_type);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ put(true, 'a', 3, 1);
+ put(true, 'a', 6, 1);
+ cget(true, true, 'a', 0, 0, 1, 1, next_type);
+ cget(true, true, 'a', 0, 0, 3, 1, next_type);
+ put(false, 'b', 2, 1);
+ put(true, 'b', 4, 1);
+ put(true, 'b', 7, 1);
+ put(false, 'b', -1, 1);
+ close_dbs();
+}
+
+static void
+test_prev (uint32_t next_type) {
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'a', -2, -1);
+ put(true, 'a', -5, -1);
+ cget(true, true, 'a', 0, 0, -2, -1, next_type);
+ put(false, 'b', -2, -1);
+ put(true, 'b', -4, -1);
+ put(false, 'b', 1, -1);
+ cget(false, true, 'a', 0, 0, -4, -1, next_type);
+ early_commit('b');
+ cget(true, true, 'a', -2, -1, -2, -1, DB_SET);
+ cget(true, true, 'a', 0, 0, -4, -1, next_type);
+ cget(true, true, 'a', 0, 0, -5, -1, next_type);
+ close_dbs();
+ /* ****************************************** */
+ setup_dbs();
+ put(true, 'a', -1, -1);
+ put(true, 'a', -3, -1);
+ put(true, 'a', -6, -1);
+ cget(true, true, 'a', 0, 0, -1, -1, next_type);
+ cget(true, true, 'a', 0, 0, -3, -1, next_type);
+ put(false, 'b', -2, -1);
+ put(true, 'b', -4, -1);
+ put(true, 'b', -7, -1);
+ put(false, 'b', 1, -1);
+ close_dbs();
+}
+
+static void
+test_dbdel (void) {
+ /* If DB_DELETE_ANY changes to 0, then find is meaningful and
+ has to be fixed in test_dbdel*/
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'c', 1, 1);
+ early_commit('c');
+ dbdel(true, true, 'a', 1);
+ cget(false, true, 'b', 1, 1, 1, 1, DB_SET);
+ cget(false, true, 'b', 1, 4, 1, 4, DB_SET);
+ cget(false, true, 'b', 1, 0, 1, 4, DB_SET);
+ cget(true, false, 'b', 0, 0, 0, 0, DB_SET);
+ cget(true, false, 'b', 2, 10, 2, 10, DB_SET);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ dbdel(true, true, 'a', 1);
+ cget(false, true, 'b', 1, 1, 1, 1, DB_SET);
+ cget(false, true, 'b', 1, 4, 1, 4, DB_SET);
+ cget(false, true, 'b', 1, 0, 1, 4, DB_SET);
+ cget(true, false, 'b', 0, 0, 0, 0, DB_SET);
+ cget(true, false, 'b', 2, 10, 2, 10, DB_SET);
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'c', 1, 1);
+ early_commit('c');
+ cget(true, true, 'b', 1, 1, 1, 1, DB_SET);
+ dbdel(false, true, 'a', 1);
+ dbdel(true, true, 'a', 2);
+ dbdel(true, true, 'a', 0);
+ close_dbs();
+}
+
+static void
+test_current (void) {
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ early_commit('a');
+ cget(true, true, 'b', 1, 1, 1, 1, DB_SET);
+ cget(true, true, 'b', 1, 1, 1, 1, DB_CURRENT);
+ close_dbs();
+}
+
+struct dbt_pair {
+ DBT key;
+ DBT val;
+};
+
+struct int_pair {
+ int key;
+ int val;
+};
+
+int got_r_h;
+
+static __attribute__((__unused__))
+void
+ignore (void *ignore __attribute__((__unused__))) {
+}
+#define TOKU_IGNORE(x) ignore((void*)x)
+
+static void
+test (void) {
+ /* ********************************************************************** */
+ setup_dbs();
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ early_abort('a');
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ early_commit('a');
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ put(true, 'a', 1, 1);
+ close_dbs();
+ /* ********************************************************************** */
+ test_both( DB_SET);
+ /* ********************************************************************** */
+ test_first();
+ /* ********************************************************************** */
+ test_last();
+ /* ********************************************************************** */
+ test_set_range( DB_SET_RANGE, 1);
+#ifdef DB_SET_RANGE_REVERSE
+ test_set_range( DB_SET_RANGE_REVERSE, -1);
+#endif
+ /* ********************************************************************** */
+ test_next(DB_NEXT);
+ /* ********************************************************************** */
+ test_prev(DB_PREV);
+ /* ********************************************************************** */
+ test_dbdel();
+ /* ********************************************************************** */
+ test_current();
+ /* ********************************************************************** */
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_read_uncommitted.cc b/storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_read_uncommitted.cc
new file mode 100644
index 00000000..c42eba28
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_txn_locks_read_uncommitted.cc
@@ -0,0 +1,241 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <memory.h>
+#include <db.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static DB *db;
+static DB_TXN* txns[(int)256];
+static DB_ENV* dbenv;
+static DBC* cursors[(int)256];
+
+static void
+put(bool success, char txn, int _key, int _data) {
+ assert(txns[(int)txn]);
+
+ int r;
+ DBT key;
+ DBT data;
+
+ r = db->put(db, txns[(int)txn],
+ dbt_init(&key, &_key, sizeof(int)),
+ dbt_init(&data, &_data, sizeof(int)),
+ 0);
+
+ if (success) CKERR(r);
+ else CKERR2s(r, DB_LOCK_DEADLOCK, DB_LOCK_NOTGRANTED);
+}
+
+static void
+init_txn (char name, uint32_t flags) {
+ int r;
+ assert(!txns[(int)name]);
+ r = dbenv->txn_begin(dbenv, NULL, &txns[(int)name], DB_TXN_NOWAIT | flags);
+ CKERR(r);
+ assert(txns[(int)name]);
+}
+
+static void
+init_dbc (char name) {
+ int r;
+
+ assert(!cursors[(int)name] && txns[(int)name]);
+ r = db->cursor(db, txns[(int)name], &cursors[(int)name], 0);
+ CKERR(r);
+ assert(cursors[(int)name]);
+}
+
+static void
+commit_txn (char name) {
+ int r;
+ assert(txns[(int)name] && !cursors[(int)name]);
+
+ r = txns[(int)name]->commit(txns[(int)name], 0);
+ CKERR(r);
+ txns[(int)name] = NULL;
+}
+
+
+static void
+close_dbc (char name) {
+ int r;
+
+ assert(cursors[(int)name]);
+ r = cursors[(int)name]->c_close(cursors[(int)name]);
+ CKERR(r);
+ cursors[(int)name] = NULL;
+}
+
+static void
+early_commit (char name) {
+ assert(cursors[(int)name] && txns[(int)name]);
+ close_dbc(name);
+ commit_txn(name);
+}
+
+static void
+setup_dbs (void) {
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ dbenv = NULL;
+ db = NULL;
+ /* Open/create primary */
+ r = db_env_create(&dbenv, 0);
+ CKERR(r);
+ r = dbenv->set_default_bt_compare(dbenv, int_dbt_cmp);
+ CKERR(r);
+ uint32_t env_txn_flags = DB_INIT_TXN | DB_INIT_LOCK;
+ uint32_t env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL;
+ r = dbenv->open(dbenv, TOKU_TEST_FILENAME, env_open_flags | env_txn_flags, 0600);
+ CKERR(r);
+
+ r = db_create(&db, dbenv, 0);
+ CKERR(r);
+
+ char a;
+ for (a = 'a'; a <= 'z'; a++) init_txn(a, 0);
+ for (a = '0'; a <= '9'; a++) init_txn(a, DB_READ_UNCOMMITTED);
+ init_txn('\0', 0);
+ r = db->open(db, txns[(int)'\0'], "foobar.db", NULL, DB_BTREE, DB_CREATE | DB_READ_UNCOMMITTED, 0600);
+ CKERR(r);
+ commit_txn('\0');
+ for (a = 'a'; a <= 'z'; a++) init_dbc(a);
+ for (a = '0'; a <= '9'; a++) init_dbc(a);
+}
+
+static void
+close_dbs(void) {
+ char a;
+ for (a = 'a'; a <= 'z'; a++) {
+ if (cursors[(int)a]) close_dbc(a);
+ if (txns[(int)a]) commit_txn(a);
+ }
+ for (a = '0'; a <= '9'; a++) {
+ if (cursors[(int)a]) close_dbc(a);
+ if (txns[(int)a]) commit_txn(a);
+ }
+
+ int r;
+ r = db->close(db, 0);
+ CKERR(r);
+ db = NULL;
+ r = dbenv->close(dbenv, 0);
+ CKERR(r);
+ dbenv = NULL;
+}
+
+
+static void
+table_scan(char txn, bool success) {
+ int r;
+ DBT key;
+ DBT data;
+
+ assert(txns[(int)txn] && cursors[(int)txn]);
+ r = cursors[(int)txn]->c_get(cursors[(int)txn],
+ dbt_init(&key, 0, 0),
+ dbt_init(&data, 0, 0),
+ DB_FIRST);
+ while (r==0) {
+ r = cursors[(int)txn]->c_get(cursors[(int)txn],
+ dbt_init(&key, 0, 0),
+ dbt_init(&data, 0, 0),
+ DB_NEXT);
+ }
+#ifdef BLOCKING_ROW_LOCKS_READS_NOT_SHARED
+ if (success) invariant(r == DB_NOTFOUND || r == DB_LOCK_NOTGRANTED || r == DB_LOCK_DEADLOCK);
+ else CKERR2s(r, DB_LOCK_NOTGRANTED, DB_LOCK_DEADLOCK);
+#else
+ if (success) CKERR2(r, DB_NOTFOUND);
+ else CKERR2s(r, DB_LOCK_NOTGRANTED, DB_LOCK_DEADLOCK);
+#endif
+}
+
+static void
+table_prelock(char txn, bool success) {
+ int r;
+ r = db->pre_acquire_table_lock(db, txns[(int)txn]);
+ if (success) CKERR(r);
+ else CKERR2s(r, DB_LOCK_NOTGRANTED, DB_LOCK_DEADLOCK);
+}
+
+static void
+test (void) {
+ char txn;
+ /* ********************************************************************** */
+ setup_dbs();
+ close_dbs();
+ /* ********************************************************************** */
+ setup_dbs();
+ table_scan('0', true);
+ table_prelock('a', true);
+ put(true, 'a', 0, 0);
+ for (txn = 'b'; txn<'z'; txn++) {
+ table_scan(txn, false);
+ }
+ for (txn = '0'; txn<'9'; txn++) {
+ table_scan(txn, true);
+ }
+ early_commit('a');
+ for (txn = 'b'; txn<'z'; txn++) {
+ table_scan(txn, true);
+ }
+ for (txn = '0'; txn<'9'; txn++) {
+ table_scan(txn, true);
+ }
+ close_dbs();
+ /* ********************************************************************** */
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_db_version.cc b/storage/tokudb/PerconaFT/src/tests/test_db_version.cc
new file mode 100644
index 00000000..78909a14
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_db_version.cc
@@ -0,0 +1,61 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <db.h>
+
+
+
+int
+test_main (int argc, char *const argv[]) {
+ const char *v;
+ int major, minor, patch;
+ parse_args(argc, argv);
+ v = db_version(0, 0, 0);
+ assert(v!=0);
+ v = db_version(&major, &minor, &patch);
+ assert(major==DB_VERSION_MAJOR);
+ assert(minor==DB_VERSION_MINOR);
+ assert(patch==DB_VERSION_PATCH);
+ if (verbose) {
+ printf("%d.%d.%d\n", major, minor, patch);
+ printf("%s\n", v);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_env_close_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_env_close_flags.cc
new file mode 100644
index 00000000..def7f7bd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_env_close_flags.cc
@@ -0,0 +1,84 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+
+#include <db.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ DB_ENV *env;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env,0); // Turn off those annoying errors
+ r=env->close (env, 0); assert(r==0);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env,0); // Turn off those annoying errors
+ r=env->close (env, 1);
+ assert(r==EINVAL);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env,0); // Turn off those annoying errors
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=env->close (env, 0); assert(r==0);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env,0); // Turn off those annoying errors
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=env->close (env, 1);
+ assert(r==EINVAL);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_env_create_db_create.cc b/storage/tokudb/PerconaFT/src/tests/test_env_create_db_create.cc
new file mode 100644
index 00000000..249055ca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_env_create_db_create.cc
@@ -0,0 +1,60 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ DB_ENV *env;
+ DB *db;
+ int r;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ r = db_create(&db, env, 0);
+ assert(r != 0);
+ r = env->close(env, 0);
+ assert(r == 0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_env_open_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_env_open_flags.cc
new file mode 100644
index 00000000..715bc552
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_env_open_flags.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_env_open_flags (int env_open_flags, int expectr) {
+ if (verbose) printf("test_env_open_flags:%d\n", env_open_flags);
+
+ DB_ENV *env;
+ int r;
+
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ env->set_errfile(env, 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, env_open_flags, 0644);
+ if (r != expectr && verbose) printf("env open flags=%x expectr=%d r=%d\n", env_open_flags, expectr, r);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ char tracefile[TOKU_PATH_MAX+1];
+ toku_set_trace_file(toku_path_join(tracefile, 2, TOKU_TEST_FILENAME, "trace.tktrace"));
+
+ /* test flags */
+ test_env_open_flags(0, ENOENT);
+ // This one segfaults in BDB 4.6.21
+ test_env_open_flags(DB_PRIVATE, ENOENT);
+ test_env_open_flags(DB_PRIVATE+DB_CREATE, 0);
+ test_env_open_flags(DB_PRIVATE+DB_CREATE+DB_INIT_MPOOL, 0);
+ test_env_open_flags(DB_PRIVATE+DB_RECOVER, EINVAL);
+ test_env_open_flags(DB_PRIVATE+DB_CREATE+DB_INIT_MPOOL+DB_RECOVER, EINVAL);
+
+ toku_close_trace_file();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_equal_keys_with_different_bytes.cc b/storage/tokudb/PerconaFT/src/tests/test_equal_keys_with_different_bytes.cc
new file mode 100644
index 00000000..7a027c4c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_equal_keys_with_different_bytes.cc
@@ -0,0 +1,97 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <string>
+
+#include "test.h"
+
+static int compare_strings_case_insensitive(DB *db, const DBT *a, const DBT *b) {
+ invariant_notnull(db);
+ return strcasecmp(reinterpret_cast<char *>(a->data),
+ reinterpret_cast<char *>(b->data));
+}
+
+static void test_equal_keys_with_different_bytes(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, compare_strings_case_insensitive);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ DBT key;
+
+ // put 'key'
+ dbt_init(&key, "key", sizeof("key"));
+ r = db->put(db, NULL, &key, &key, 0); CKERR(r);
+
+ // del 'KEY' - should match 'key'
+ dbt_init(&key, "KEY", sizeof("KEY"));
+ r = db->del(db, NULL, &key, 0); CKERR(r);
+
+ DBT val;
+ char val_buf[10];
+ dbt_init(&val, val_buf, sizeof(val_buf));
+
+ // search should fail for 'key'
+ dbt_init(&key, "key", sizeof("key"));
+ r = db->get(db, NULL, &key, &val, 0); CKERR2(r, DB_NOTFOUND);
+
+ // search should fail for 'KEY'
+ dbt_init(&key, "KEY", sizeof("KEY"));
+ r = db->get(db, NULL, &key, &val, 0); CKERR2(r, DB_NOTFOUND);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ test_equal_keys_with_different_bytes();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_error.cc b/storage/tokudb/PerconaFT/src/tests/test_error.cc
new file mode 100644
index 00000000..8f939a15
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_error.cc
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <sys/stat.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+
+char const* expect_errpfx;
+int n_handle_error=0;
+
+static void
+handle_error (const DB_ENV *UU(dbenv), const char *errpfx, const char *UU(msg)) {
+ assert(errpfx==expect_errpfx);
+ n_handle_error++;
+}
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ {
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env,0); // Turn off those annoying errors
+ r = env->open(env, TOKU_TEST_FILENAME, (uint32_t) -1, 0644);
+ CKERR2(r, EINVAL);
+ assert(n_handle_error==0);
+ r = env->close(env, 0); assert(r==0);
+ }
+
+ int do_errfile, do_errcall,do_errpfx;
+ for (do_errpfx=0; do_errpfx<2; do_errpfx++) {
+ for (do_errfile=0; do_errfile<2; do_errfile++) {
+ for (do_errcall=0; do_errcall<2; do_errcall++) {
+ char errfname[TOKU_PATH_MAX+1];
+ toku_path_join(errfname, 2, TOKU_TEST_FILENAME, "errfile");
+ unlink(errfname);
+ {
+ DB_ENV *env;
+ FILE *write_here = fopen(errfname, "w");
+ assert(write_here);
+ n_handle_error=0;
+ r = db_env_create(&env, 0); assert(r==0);
+ if (do_errpfx) {
+ expect_errpfx="whoopi";
+ env->set_errpfx(env, expect_errpfx);
+ } else {
+ expect_errpfx=0;
+ }
+ env->set_errfile(env,0); // Turn off those annoying errors
+ if (do_errfile)
+ env->set_errfile(env, write_here);
+ if (do_errcall)
+ env->set_errcall(env, handle_error);
+ r = env->open(env, TOKU_TEST_FILENAME, (uint32_t) -1, 0644);
+ assert(r==EINVAL);
+ r = env->close(env, 0); assert(r==0);
+ fclose(write_here);
+ }
+ {
+ FILE *read_here = fopen(errfname, "r");
+ assert(read_here);
+ char buf[10000];
+ int buflen = fread(buf, 1, sizeof(buf)-1, read_here);
+ assert(buflen>=0);
+ buf[buflen]=0;
+ if (do_errfile) {
+ if (do_errpfx) {
+ assert(strncmp(buf,"whoopi:",6)==0);
+ } else {
+ assert(buf[0]!=0);
+ assert(buf[0]!=':');
+ }
+ assert(buf[strlen(buf)-1]=='\n');
+ } else {
+ assert(buf[0]==0);
+ }
+ if (do_errcall) {
+ assert(n_handle_error==1);
+ } else {
+ assert(n_handle_error==0);
+ }
+ fclose(read_here);
+ }
+ unlink(errfname);
+ }
+ }
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_forkjoin.cc b/storage/tokudb/PerconaFT/src/tests/test_forkjoin.cc
new file mode 100644
index 00000000..766372a1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_forkjoin.cc
@@ -0,0 +1,60 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+
+#include <toku_pthread.h>
+
+static void *
+f (void *arg) {
+ //toku_pthread_exit(arg); // toku_pthread_exit has a memory leak on linux
+ return arg;
+}
+
+int test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ toku_pthread_t t;
+ int r = toku_pthread_create(toku_uninstrumented, &t, nullptr, f, nullptr);
+ assert(r == 0);
+ void *ret;
+ r = toku_pthread_join(t, &ret);
+ assert(r == 0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_get_max_row_size.cc b/storage/tokudb/PerconaFT/src/tests/test_get_max_row_size.cc
new file mode 100644
index 00000000..8800b717
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_get_max_row_size.cc
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+int test_main(int argc, char * const argv[])
+{
+ int r;
+ DB * db;
+ DB_ENV * db_env;
+ (void) argc;
+ (void) argv;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
+
+ // set things up
+ r = db_env_create(&db_env, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db_env->open(db_env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_PRIVATE, 0755); { int chk_r = r; CKERR(chk_r); }
+ r = db_create(&db, db_env, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); { int chk_r = r; CKERR(chk_r); }
+
+ // - does not test low bounds, so a 0 byte key is "okay"
+ // - assuming 32k keys and 32mb values are the max
+ uint32_t max_key, max_val;
+ db->get_max_row_size(db, &max_key, &max_val);
+ // assume it is a red flag for the key to be outside the 16-32kb range
+ assert(max_key >= 16*1024);
+ assert(max_key <= 32*1024);
+ // assume it is a red flag for the value to be outside the 16-32mb range
+ assert(max_val >= 16*1024*1024);
+ assert(max_val <= 32*1024*1024);
+
+ // clean things up
+ r = db->close(db, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db_env->close(db_env, 0); { int chk_r = r; CKERR(chk_r); }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_get_zeroed_dbt.cc b/storage/tokudb/PerconaFT/src/tests/test_get_zeroed_dbt.cc
new file mode 100644
index 00000000..723b792b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_get_zeroed_dbt.cc
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Test to see if DB->get works on a zeroed DBT. */
+
+#include <db.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include <sys/stat.h>
+
+
+
+static void
+test_get (void) {
+ DB_TXN * const null_txn = 0;
+ DBT key,data;
+ char fname[] = "test.db";
+ int r;
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create (&db, env, 0); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+ dbt_init(&key, "a", 2);
+ r = db->put(db, null_txn, &key, dbt_init(&data, "b", 2), 0); assert(r==0);
+ memset(&data, 0, sizeof(data));
+ r = db->get(db, null_txn, &key, &data, 0); assert(r == 0);
+ assert(strcmp((char*)data.data, "b")==0);
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ test_get();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_groupcommit_count.cc b/storage/tokudb/PerconaFT/src/tests/test_groupcommit_count.cc
new file mode 100644
index 00000000..35104c2a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_groupcommit_count.cc
@@ -0,0 +1,224 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Test by counting the fsyncs, to see if group commit is working. */
+
+#include <db.h>
+#include <toku_pthread.h>
+#include <toku_time.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+DB_ENV *env;
+DB *db;
+int do_sync=1;
+
+#define NITER 100
+
+static void *start_a_thread (void *i_p) {
+ int *CAST_FROM_VOIDP(which_thread_p, i_p);
+ int i,r;
+ for (i=0; i<NITER; i++) {
+ DB_TXN *tid;
+ char keystr[100];
+ DBT key,data;
+ snprintf(keystr, sizeof(key), "%ld.%d.%d", random(), *which_thread_p, i);
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ r=db->put(db, tid,
+ dbt_init(&key, keystr, 1+strlen(keystr)),
+ dbt_init(&data, keystr, 1+strlen(keystr)),
+ 0);
+ r=tid->commit(tid, do_sync ? 0 : DB_TXN_NOSYNC); CKERR(r);
+ }
+ return 0;
+}
+
+const char *env_path;
+
+static void
+test_groupcommit (int nthreads) {
+ int r;
+ DB_TXN *tid;
+
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, env_path, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ int i;
+ toku_pthread_t threads[nthreads];
+ int whichthread[nthreads];
+ for (i = 0; i < nthreads; i++) {
+ whichthread[i] = i;
+ r = toku_pthread_create(toku_uninstrumented,
+ &threads[i],
+ nullptr,
+ start_a_thread,
+ &whichthread[i]);
+ }
+ for (i = 0; i < nthreads; i++) {
+ toku_pthread_join(threads[i], 0);
+ }
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+
+ //if (verbose) printf(" That's a total of %d commits\n", nthreads*NITER);
+}
+
+// helgrind doesn't understand that pthread_join removes a race condition. I'm not impressed... -Bradley
+// Also, it doesn't happen every time, making helgrind unsuitable for regression tests.
+// So we must put locks around things that are properly serialized anyway.
+
+static int fsync_count_maybe_lockprotected=0;
+static void
+inc_fsync_count (void) {
+ fsync_count_maybe_lockprotected++;
+}
+
+static int
+get_fsync_count (void) {
+ int result=fsync_count_maybe_lockprotected;
+ return result;
+}
+
+static int
+do_fsync (int fd) {
+ //fprintf(stderr, "%8.6fs Thread %ld start fsyncing\n", get_tdiff(), pthread_self());
+ inc_fsync_count();
+ int r = fsync(fd);
+ //fprintf(stderr, "%8.6fs Thread %ld done fsyncing\n", get_tdiff(), pthread_self());
+ return r;
+}
+
+static const char *progname;
+static struct timeval prevtime;
+static int prev_count;
+
+static void
+printtdiff (int N) {
+ struct timeval thistime;
+ gettimeofday(&thistime, 0);
+ double diff = toku_tdiff(&thistime, &prevtime);
+ int fcount=get_fsync_count();
+ if (verbose) printf("%s: %10.6fs %4d fsyncs for %4d threads %s %8.1f tps, %8.1f tps/thread\n", progname, diff, fcount-prev_count,
+ N,
+ do_sync ? "with sync " : "with DB_TXN_NOSYNC",
+ NITER*(N/diff), NITER/diff);
+ prevtime=thistime;
+ prev_count=fcount;
+}
+
+static void
+do_test (int N) {
+ for (do_sync = 0; do_sync<2; do_sync++) {
+ int count_before = get_fsync_count();
+ test_groupcommit(N);
+ printtdiff(N);
+ int count_after = get_fsync_count();
+ if (count_after-count_before >= N*NITER) {
+ if (verbose) printf("It looks like too many fsyncs. Group commit doesn't appear to be occuring. %d - %d >= %d\n", count_after, count_before, N*NITER);
+ exit(1);
+ }
+ }
+}
+
+int log_max_n_threads_over_10 = 3;
+
+static void
+my_parse_args (int argc, char *const argv[]) {
+ verbose=1; // use -q to turn off the talking.
+ env_path = TOKU_TEST_FILENAME;
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[1],"-n")==0) {
+ argc--;
+ argv++;
+ if (argc<=1) { resultcode=1; goto do_usage; }
+ errno = 0;
+ char *end;
+ log_max_n_threads_over_10 = strtol(argv[1], &end, 10);
+ if (errno!=0 || *end) {
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-q] [-n LOG(MAX_N_THREADS/10)] [-h]\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+
+
+int
+test_main (int argc, char *const argv[]) {
+ progname=argv[0];
+ my_parse_args(argc, argv);
+
+ gettimeofday(&prevtime, 0);
+ prev_count=0;
+
+ db_env_set_func_fsync(do_fsync);
+ db_env_set_num_bucket_mutexes(32);
+
+ toku_os_recursive_delete(env_path);
+ { int r=toku_os_mkdir(env_path, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0); }
+
+ test_groupcommit(1); printtdiff(1);
+ test_groupcommit(2); printtdiff(2);
+ for (int i=0; i<log_max_n_threads_over_10; i++) {
+ do_test(10 << i);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_groupcommit_perf.cc b/storage/tokudb/PerconaFT/src/tests/test_groupcommit_perf.cc
new file mode 100644
index 00000000..6f872d10
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_groupcommit_perf.cc
@@ -0,0 +1,151 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Test using performance metrics only, to see if group commit is working. */
+
+#include <db.h>
+#include <toku_pthread.h>
+#include <toku_time.h>
+#include <sys/stat.h>
+
+DB_ENV *env;
+DB *db;
+
+#define NITER 100
+
+static void *
+start_a_thread (void *i_p) {
+ int *CAST_FROM_VOIDP(which_thread_p, i_p);
+ int i,r;
+ for (i=0; i<NITER; i++) {
+ DB_TXN *tid;
+ char keystr[100];
+ DBT key,data;
+ snprintf(keystr, sizeof(key), "%ld.%d.%d", random(), *which_thread_p, i);
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ r=db->put(db, tid,
+ dbt_init(&key, keystr, 1+strlen(keystr)),
+ dbt_init(&data, keystr, 1+strlen(keystr)),
+ 0);
+ r=tid->commit(tid, 0); CKERR(r);
+ }
+ return 0;
+}
+
+static void
+test_groupcommit (int nthreads) {
+ int r;
+ DB_TXN *tid;
+
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ int i;
+ toku_pthread_t threads[nthreads];
+ int whichthread[nthreads];
+ for (i = 0; i < nthreads; i++) {
+ whichthread[i] = i;
+ r = toku_pthread_create(toku_uninstrumented,
+ &threads[i],
+ nullptr,
+ start_a_thread,
+ &whichthread[i]);
+ }
+ for (i = 0; i < nthreads; i++) {
+ toku_pthread_join(threads[i], 0);
+ }
+#if 0
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ char there[1000];
+ memset(there, 'a',sizeof(there));
+ there[999]=0;
+ for (i=0; sum<(effective_max*3)/2; i++) {
+ DBT key,data;
+ char hello[20];
+ snprintf(hello, 20, "hello%d", i);
+ r=db->put(db, tid,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, sizeof(there)),
+ 0);
+ assert(r==0);
+ sum+=strlen(hello)+1+sizeof(there);
+ if ((i+1)%10==0) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ }
+ }
+ if (verbose) printf("i=%d sum=%d effmax=%d\n", i, sum, effective_max);
+ r=tid->commit(tid, 0); assert(r==0);
+#endif
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+
+}
+
+static struct timeval prevtime;
+
+static void
+printtdiff (const char *str) {
+ struct timeval thistime;
+ gettimeofday(&thistime, 0);
+ if (verbose) printf("%10.6f %s\n", toku_tdiff(&thistime, &prevtime), str);
+ prevtime = thistime;
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0); }
+
+ gettimeofday(&prevtime, 0);
+ test_groupcommit(1); printtdiff("1 thread");
+ test_groupcommit(2); printtdiff("2 threads");
+ test_groupcommit(10); printtdiff("10 threads");
+ test_groupcommit(20); printtdiff("20 threads");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_hsoc.cc b/storage/tokudb/PerconaFT/src/tests/test_hsoc.cc
new file mode 100644
index 00000000..8381c478
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_hsoc.cc
@@ -0,0 +1,151 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static int
+db_put (DB *db, DB_TXN *txn, int k, int v) {
+ DBT key, val;
+ int r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ return r;
+}
+
+/* create a tree with 15 of 16 leaf nodes
+ each of the leaves should be about 1/2 full
+ then almost fill leaf 0 and leaf 13 to almost full
+ reopen the tree to flush all of leaves out of the cache
+ create a cursor on leaf 0 to pull it in memory
+ fill the root buffer 13
+ insert to leaf 0. this should cause leaf 0 to split, cause the root to expand to 16 children, but
+ cause the root node to be too big. flush to leaf 16 causing another leaf split, causing the root
+ to expand to 17 nodes, which causes the root to split
+
+ the magic number where found via experimentation */
+
+static void
+test_hsoc (int pagesize) {
+ if (verbose) printf("test_hsoc:%d\n", pagesize);
+
+ int npp = pagesize / 16;
+ int n = npp + 13*npp/2;
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.hsoc.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+
+ /* force 15 leaves (14 splits) */
+ if (verbose) printf("force15\n");
+ for (i=0; i<n; i++) {
+ r = db_put(db, null_txn, htonl(i), i); assert(r == 0);
+ }
+
+ /* almost fill leaf 0 */
+ if (verbose) printf("fill0\n");
+ for (i=0; i<(npp/2)-4; i++) {
+ r = db_put(db, null_txn, htonl(0), n+i); assert(r == 0);
+ }
+
+ /* almost fill leaf 15 */
+ if (verbose) printf("fill15\n");
+ for (i=0; i<111; i++) { // for (i=0; i<(npp/2)-4; i++) {
+ r = db_put(db, null_txn, htonl(n), i); assert(r == 0);
+ }
+
+ /* reopen the database to force nonleaf buffering */
+ if (verbose) printf("reopen\n");
+ r = db->close(db, 0); assert(r == 0);
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666); assert(r == 0);
+
+ /* do a cursor get k=0 to pull in leaf 0 */
+ DBC *cursor;
+
+ r = db->cursor(db, null_txn, &cursor, 0); assert(r == 0);
+
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_FIRST); assert(r == 0);
+ toku_free(key.data); toku_free(val.data);
+
+ /* fill up buffer 2 in the root node */
+ for (i=0; i<216; i++) {
+ r = db_put(db, null_txn, htonl(npp), i); assert(r == 0);
+ }
+
+ /* push a cmd to leaf 0 to cause it to split */
+ for (i=0; i<3; i++) {
+ r = db_put(db, null_txn, htonl(0), 2*n+i); assert(r == 0);
+ }
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ test_hsoc(4096);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_insert_cursor_delete_insert.cc b/storage/tokudb/PerconaFT/src/tests/test_insert_cursor_delete_insert.cc
new file mode 100644
index 00000000..9af5665a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_insert_cursor_delete_insert.cc
@@ -0,0 +1,113 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_insert_delete_insert (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ if (verbose) printf("test_insert_delete_insert:\n");
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.cursor.insert.delete.insert.ft_handle";
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ DBC *cursor;
+ r = db->cursor(db, null_txn, &cursor, 0); assert(r == 0);
+
+ int k = htonl(1), v = 2;
+ DBT key, val;
+
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+
+ r = cursor->c_get(cursor, dbt_init(&key, &k, sizeof k), dbt_init_malloc(&val), DB_SET);
+ assert(r == 0);
+ toku_free(val.data);
+
+ r = db->del(db, null_txn, &key, DB_DELETE_ANY); assert(r == 0);
+ assert(r == 0);
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_CURRENT);
+ assert(r == DB_KEYEMPTY);
+ if (key.data) toku_free(key.data);
+ if (val.data) toku_free(val.data);
+
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+
+ r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_CURRENT);
+ assert(r == 0);
+ if (key.data) toku_free(key.data);
+ if (val.data) toku_free(val.data);
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ test_insert_delete_insert();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_insert_many_gc.cc b/storage/tokudb/PerconaFT/src/tests/test_insert_many_gc.cc
new file mode 100644
index 00000000..f6111d4b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_insert_many_gc.cc
@@ -0,0 +1,105 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static void test_insert_many_gc(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_cachesize(env, 1, 0, 1); CKERR(r); // 1gb cache so this test fits in memory
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ const int val_size = 1 * 1024 * 1024;
+
+ // Begin a snapshot transaction, which should prevent simple garbage collection
+ // from being effective. Only full garbage collection can prevent many inserts
+ // into a single leaf node from growing out of control.
+ DB_TXN *snapshot_txn;
+ r = env->txn_begin(env, NULL, &snapshot_txn, DB_TXN_SNAPSHOT); CKERR(r);
+
+ DBT key;
+ int k = 0;
+ dbt_init(&key, &k, sizeof(k));
+
+ DBT val;
+ char *XMALLOC_N(val_size, val_buf);
+ memset(val_buf, 0, val_size);
+ dbt_init(&val, val_buf, val_size);
+
+ // Keep overwriting the same row over and over.
+ const int N = 75;
+ for (int i = 0; i < N; i++) {
+ r = db->put(db, NULL, &key, &val, 0); CKERR(r);
+ }
+
+ // Full garbage collection should have prevented the leaf node
+ // from having an MVCC stack of size 'N'. At the time of this
+ // writing, we run full GC on leaf-inject when the leaf is
+ // 32mb or larger. A good invariant is that the max LE size
+ // never grew larger than 35mb and that the max committed xr stack
+ // length never exceeded 35
+ const uint64_t le_max_memsize = get_engine_status_val(env, "LE_MAX_MEMSIZE");
+ const uint64_t le_max_committed_xr = get_engine_status_val(env, "LE_MAX_COMMITTED_XR");
+ invariant(le_max_memsize <= 35 * 1024 * 1024);
+ invariant(le_max_committed_xr <= 35);
+
+ r = snapshot_txn->commit(snapshot_txn, 0); CKERR(r);
+
+ toku_free(val_buf);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ test_insert_many_gc();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_insert_memleak.cc b/storage/tokudb/PerconaFT/src/tests/test_insert_memleak.cc
new file mode 100644
index 00000000..9c18695c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_insert_memleak.cc
@@ -0,0 +1,96 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_insert (int n, int dup_mode) {
+ if (verbose) printf("test_insert:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.insert.ft_handle";
+ int r;
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, dup_mode);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ int i;
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ test_insert(256, 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_insert_unique.cc b/storage/tokudb/PerconaFT/src/tests/test_insert_unique.cc
new file mode 100644
index 00000000..d6913ee6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_insert_unique.cc
@@ -0,0 +1,159 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/**
+ * Test that unique inserts work correctly. This exercises the rightmost leaf inject optimization.
+ */
+
+#include <portability/toku_random.h>
+
+#include "test.h"
+
+static char random_buf[8];
+static struct random_data random_data;
+
+static void test_simple_unique_insert(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r);
+
+ DBT key1, key2, key3;
+ dbt_init(&key1, "a", sizeof("a"));
+ dbt_init(&key2, "b", sizeof("b"));
+ dbt_init(&key3, "c", sizeof("c"));
+ r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR(r);
+ r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+ r = db->put(db, NULL, &key3, &key3, DB_NOOVERWRITE); CKERR(r);
+ r = db->put(db, NULL, &key3, &key3, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+ r = db->put(db, NULL, &key2, &key2, DB_NOOVERWRITE); CKERR(r);
+ r = db->put(db, NULL, &key2, &key2, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+ // sanity check
+ r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+ r = db->put(db, NULL, &key1, &key3, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r);
+}
+
+static void test_large_sequential_insert_unique(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+
+ // very small nodes/basements to make a taller tree
+ r = db->set_pagesize(db, 8 * 1024); CKERR(r);
+ r = db->set_readpagesize(db, 2 * 1024); CKERR(r);
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r);
+
+ const int val_size = 8;
+ char *XMALLOC_N(val_size, val_buf);
+ memset(val_buf, 'k', val_size);
+ DBT val;
+ dbt_init(&val, val_buf, val_size);
+
+ // grow a tree to about depth 3, taking sanity checks along the way
+ const int start_num_rows = (64 * 1024 * 1024) / val_size;
+ for (int i = 0; i < start_num_rows; i++) {
+ DBT key;
+ int k = toku_htonl(i);
+ dbt_init(&key, &k, sizeof(k));
+ r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r);
+ if (i % 50 == 0) {
+ // sanity check - should not be able to insert this key twice in a row
+ r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+
+ // .. but re-inserting is okay, if we provisionally deleted the row
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); CKERR(r);
+ r = db->put(db, txn, &key, &val, DB_NOOVERWRITE); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+ // re-inserting is also ok if we actually delete the row, for some key < k
+ if (i > 0) {
+ DBT other_key;
+ int other_k = toku_htonl(i - 10);
+ dbt_init(&other_key, &other_k, sizeof(other_k));
+ r = db->del(db, NULL, &other_key, DB_DELETE_ANY); CKERR(r);
+ r = db->put(db, NULL, &other_key, &val, DB_NOOVERWRITE); CKERR(r);
+ }
+ }
+ if (i > 0 && i % 250 == 0) {
+ // sanity check - unique checks on random keys we already inserted should
+ // fail (exercises middle-of-the-tree checks)
+ for (int check_i = 0; check_i < 4; check_i++) {
+ DBT rand_key;
+ int rand_k = toku_htonl(myrandom_r(&random_data) % i);
+ dbt_init(&rand_key, &rand_k, sizeof(rand_k));
+ r = db->put(db, NULL, &rand_key, &val, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST);
+ }
+ }
+ }
+
+ toku_free(val_buf);
+ r = db->close(db, 0); CKERR(r);
+ r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r);
+}
+
+
+int test_main(int argc, char * const argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ const int envflags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ // startup
+ DB_ENV *env;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, 0755);
+
+ r = myinitstate_r(random(), random_buf, 8, &random_data); CKERR(r);
+
+ test_simple_unique_insert(env);
+ test_large_sequential_insert_unique(env);
+
+ // cleanup
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_iterate_live_transactions.cc b/storage/tokudb/PerconaFT/src/tests/test_iterate_live_transactions.cc
new file mode 100644
index 00000000..1eb04a06
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_iterate_live_transactions.cc
@@ -0,0 +1,138 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+static DB_TXN *txn1, *txn2, *txn3;
+static uint64_t txnid1, txnid2, txnid3;
+
+struct iterate_extra {
+ iterate_extra() : n(0) {
+ visited_txn[0] = false;
+ visited_txn[1] = false;
+ visited_txn[2] = false;
+ }
+ int n;
+ bool visited_txn[3];
+};
+
+static int iterate_callback(DB_TXN *txn,
+ iterate_row_locks_callback iterate_locks,
+ void *locks_extra, void *extra) {
+ uint64_t txnid = txn->id64(txn);
+ uint64_t client_id; void *client_extra;
+ txn->get_client_id(txn, &client_id, &client_extra);
+ iterate_extra *info = reinterpret_cast<iterate_extra *>(extra);
+ DB *db;
+ DBT left_key, right_key;
+ int r = iterate_locks(&db, &left_key, &right_key, locks_extra);
+ invariant(r == DB_NOTFOUND);
+ if (txnid == txnid1) {
+ assert(!info->visited_txn[0]);
+ invariant(client_id == 0);
+ info->visited_txn[0] = true;
+ } else if (txnid == txnid2) {
+ assert(!info->visited_txn[1]);
+ invariant(client_id == 1);
+ info->visited_txn[1] = true;
+ } else if (txnid == txnid3) {
+ assert(!info->visited_txn[2]);
+ invariant(client_id == 2);
+ info->visited_txn[2] = true;
+ }
+ info->n++;
+ return 0;
+}
+
+int test_main(int UU(argc), char *const UU(argv[])) {
+ int r;
+ const int env_flags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->iterate_live_transactions(env, iterate_callback, NULL);
+ assert(r == EINVAL);
+ r = env->open(env, TOKU_TEST_FILENAME, env_flags, 0755); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn1, 0); CKERR(r);
+ txn1->set_client_id(txn1, 0, nullptr);
+ txnid1 = txn1->id64(txn1);
+ r = env->txn_begin(env, NULL, &txn2, 0); CKERR(r);
+ txn2->set_client_id(txn2, 1, nullptr);
+ txnid2 = txn2->id64(txn2);
+ r = env->txn_begin(env, NULL, &txn3, 0); CKERR(r);
+ txn3->set_client_id(txn3, 2, nullptr);
+ txnid3 = txn3->id64(txn3);
+
+ {
+ iterate_extra e;
+ r = env->iterate_live_transactions(env, iterate_callback, &e); CKERR(r);
+ assert(e.visited_txn[0]);
+ assert(e.visited_txn[1]);
+ assert(e.visited_txn[2]);
+ assert(e.n == 3);
+ }
+
+ r = txn1->commit(txn1, 0); CKERR(r);
+ r = txn2->abort(txn2); CKERR(r);
+ {
+ iterate_extra e;
+ r = env->iterate_live_transactions(env, iterate_callback, &e); CKERR(r);
+ assert(!e.visited_txn[0]);
+ assert(!e.visited_txn[1]);
+ assert(e.visited_txn[2]);
+ assert(e.n == 1);
+ }
+
+ r = txn3->commit(txn3, 0); CKERR(r);
+ {
+ iterate_extra e;
+ r = env->iterate_live_transactions(env, iterate_callback, &e); CKERR(r);
+ assert(!e.visited_txn[0]);
+ assert(!e.visited_txn[1]);
+ assert(!e.visited_txn[2]);
+ assert(e.n == 0);
+ }
+
+ r = env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_iterate_pending_lock_requests.cc b/storage/tokudb/PerconaFT/src/tests/test_iterate_pending_lock_requests.cc
new file mode 100644
index 00000000..66471a74
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_iterate_pending_lock_requests.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <portability/toku_pthread.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *txn1, *txn2, *txn3;
+static const char *dname = "iterate_pending_requests_dname";
+static const int magic_key = 100;
+static int iterate_callback_called;
+toku_pthread_t thread1, thread2;
+
+// Verify the state of the world
+static int iterate_callback(DB *_db, uint64_t requesting_txnid,
+ const DBT *left_key, const DBT *right_key,
+ uint64_t blocking_txnid, uint64_t start_time, void *extra) {
+ iterate_callback_called++;
+ invariant(extra == nullptr);
+ invariant(strcmp(_db->get_dname(_db), db->get_dname(db)) == 0);
+ invariant(start_time > 0);
+ invariant(*reinterpret_cast<int *>(left_key->data) == magic_key);
+ invariant(*reinterpret_cast<int *>(right_key->data) == magic_key);
+ invariant(blocking_txnid == txn1->id64(txn1));
+ invariant(requesting_txnid == txn2->id64(txn2) || requesting_txnid == txn3->id64(txn3));
+ return 0;
+}
+
+static void acquire_lock(DB_TXN *txn, int key) {
+ int val = 0;
+ DBT k, v;
+ dbt_init(&k, &key, sizeof(int));
+ dbt_init(&v, &val, sizeof(int));
+ (void) db->put(db, txn, &k, &v, 0);
+}
+
+struct acquire_lock_extra {
+ acquire_lock_extra(DB_TXN *x, int k) :
+ txn(x), key(k) {
+ }
+ DB_TXN *txn;
+ int key;
+};
+
+static void *acquire_lock_thread(void *arg) {
+ acquire_lock_extra *info = reinterpret_cast<acquire_lock_extra *>(arg);
+ acquire_lock(info->txn, info->key);
+ return NULL;
+}
+
+int test_main(int UU(argc), char *const UU(argv[])) {
+ int r;
+ const int env_flags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, env_flags, 0755); CKERR(r);
+ r = env->set_lock_timeout(env, 4000, nullptr);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, dname, NULL, DB_BTREE, DB_CREATE, 0777); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn1, DB_SERIALIZABLE); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn2, DB_SERIALIZABLE); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn3, DB_SERIALIZABLE); CKERR(r);
+
+ // Extremely simple test. Get lock [0, 0] on txn1, then asynchronously
+ // attempt to get that lock in txn2 and txn3. The iterate callback
+ // verifies that two waiters exist for [0, 0] and that txn1 is
+ // the blocking txn.
+
+ acquire_lock(txn1, magic_key);
+
+ acquire_lock_extra e1(txn2, magic_key);
+ r = toku_pthread_create(
+ toku_uninstrumented, &thread1, nullptr, acquire_lock_thread, &e1);
+ CKERR(r);
+ acquire_lock_extra e2(txn3, magic_key);
+ r = toku_pthread_create(
+ toku_uninstrumented, &thread2, nullptr, acquire_lock_thread, &e2);
+ CKERR(r);
+
+ sleep(1);
+ r = env->iterate_pending_lock_requests(env, iterate_callback, NULL);
+ CKERR(r);
+ invariant(iterate_callback_called == 2);
+
+ void *v;
+ r = toku_pthread_join(thread1, &v); CKERR(r);
+ r = toku_pthread_join(thread2, &v); CKERR(r);
+
+ r = txn1->commit(txn1, 0); CKERR(r);
+ r = txn2->commit(txn2, 0); CKERR(r);
+ r = txn3->commit(txn3, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_keylen_diff.cc b/storage/tokudb/PerconaFT/src/tests/test_keylen_diff.cc
new file mode 100644
index 00000000..10f121df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_keylen_diff.cc
@@ -0,0 +1,232 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// test a comparison function that treats certain different-lengthed keys as equal
+
+struct packed_key {
+ char type;
+ char k[8];
+ static packed_key as_int(int v) {
+ packed_key k;
+ k.type = 0;
+ memcpy(k.k, &v, sizeof(int));
+ return k;
+ }
+ static packed_key as_double(double v) {
+ packed_key k;
+ k.type = 1;
+ memcpy(k.k, &v, sizeof(double));
+ return k;
+ }
+ size_t size() const {
+ assert(type == 0 || type == 1);
+ return type == 0 ? 5 : 9;
+ }
+};
+
+// the point is that keys can be packed as integers or doubles, but
+// we'll treat them both as doubles for the sake of comparison.
+// this means a 4 byte number could equal an 8 byte number.
+static int packed_key_cmp(DB *UU(db), const DBT *a, const DBT *b) {
+ assert(a->size == 5 || a->size == 9);
+ assert(b->size == 5 || b->size == 9);
+ char *k1 = reinterpret_cast<char *>(a->data);
+ char *k2 = reinterpret_cast<char *>(b->data);
+ assert(*k1 == 0 || *k1 == 1);
+ assert(*k2 == 0 || *k2 == 1);
+ double v1 = *k1 == 0 ? static_cast<double>(*reinterpret_cast<int *>(k1 + 1)) :
+ *reinterpret_cast<double *>(k1 + 1);
+ double v2 = *k2 == 0 ? static_cast<double>(*reinterpret_cast<int *>(k2 + 1)) :
+ *reinterpret_cast<double *>(k2 + 1);
+ if (v1 > v2) {
+ return 1;
+ } else if (v1 < v2) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static int update_callback(DB *UU(db), const DBT *UU(key), const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val, void *setval_extra), void *setval_extra) {
+ assert(extra != nullptr);
+ assert(old_val != nullptr);
+ assert(extra->size == 0 || extra->size == 100);
+ assert(old_val->size == 0 || old_val->size == 100);
+ if (extra->data == nullptr) {
+ set_val(nullptr, setval_extra);
+ } else {
+ set_val(extra, setval_extra);
+ }
+ return 0;
+}
+
+enum overwrite_method {
+ VIA_UPDATE_OVERWRITE_BROADCAST,
+ VIA_UPDATE_DELETE_BROADCAST,
+ VIA_UPDATE_OVERWRITE,
+ VIA_UPDATE_DELETE,
+ VIA_DELETE,
+ VIA_INSERT,
+ NUM_OVERWRITE_METHODS
+};
+
+static void test_keylen_diff(enum overwrite_method method, bool control_test) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, packed_key_cmp); CKERR(r);
+ env->set_update(env, update_callback); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_pagesize(db, 16 * 1024); // smaller pages so we get a more lush tree
+ r = db->set_readpagesize(db, 1 * 1024); // smaller basements so we get more per leaf
+ r = db->open(db, nullptr, "db", nullptr, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ DBT null_dbt, val_dbt;
+ char val_buf[100];
+ memset(val_buf, 0, sizeof val_buf);
+ dbt_init(&val_dbt, &val_buf, sizeof val_buf);
+ dbt_init(&null_dbt, nullptr, 0);
+
+ const int num_keys = 1<<11; //256 * 1000;
+
+ for (int i = 0; i < num_keys; i++) {
+ // insert it using a 4 byte key ..
+ packed_key key = packed_key::as_int(i);
+
+ DBT dbt;
+ dbt_init(&dbt, &key, key.size());
+ r = db->put(db, nullptr, &dbt, &val_dbt, 0); CKERR(r);
+ }
+
+ // overwrite keys randomly, so we induce flushes and get better / realistic coverage
+ int *XMALLOC_N(num_keys, shuffled_keys);
+ for (int i = 0; i < num_keys; i++) {
+ shuffled_keys[i] = i;
+ }
+ for (int i = num_keys - 1; i >= 1; i--) {
+ long rnd = random64() % (i + 1);
+ int tmp = shuffled_keys[rnd];
+ shuffled_keys[rnd] = shuffled_keys[i];
+ shuffled_keys[i] = tmp;
+ }
+
+ for (int i = 0; i < num_keys; i++) {
+ // for the control test, delete it using the same length key
+ //
+ // .. otherwise, delete it with an 8 byte key
+ packed_key key = control_test ? packed_key::as_int(shuffled_keys[i]) :
+ packed_key::as_double(shuffled_keys[i]);
+
+ DBT dbt;
+ dbt_init(&dbt, &key, key.size());
+ DB_TXN *txn;
+ env->txn_begin(env, nullptr, &txn, DB_TXN_NOSYNC); CKERR(r);
+ switch (method) {
+ case VIA_INSERT: {
+ r = db->put(db, txn, &dbt, &val_dbt, 0); CKERR(r);
+ break;
+ }
+ case VIA_DELETE: {
+ // we purposefully do not pass DB_DELETE_ANY because the hidden query acts as
+ // a sanity check for the control test and, overall, gives better code coverage
+ r = db->del(db, txn, &dbt, 0); CKERR(r);
+ break;
+ }
+ case VIA_UPDATE_OVERWRITE:
+ case VIA_UPDATE_DELETE: {
+ r = db->update(db, txn, &dbt, method == VIA_UPDATE_DELETE ? &null_dbt : &val_dbt, 0); CKERR(r);
+ break;
+ }
+ case VIA_UPDATE_OVERWRITE_BROADCAST:
+ case VIA_UPDATE_DELETE_BROADCAST: {
+ r = db->update_broadcast(db, txn, method == VIA_UPDATE_DELETE_BROADCAST ? &null_dbt : &val_dbt, 0); CKERR(r);
+ if (i > 1 ) { // only need to test broadcast twice - one with abort, one without
+ txn->abort(txn); // we opened a txn so we should abort it before exiting
+ goto done;
+ }
+ break;
+ }
+ default: {
+ assert(false);
+ }
+ }
+ const bool abort = i % 2 == 0;
+ if (abort) {
+ txn->abort(txn);
+ } else {
+ txn->commit(txn, 0);
+ }
+ }
+
+done:
+ toku_free(shuffled_keys);
+
+ // optimize before close to ensure that all messages are applied and any potential bugs are exposed
+ r = db->optimize(db);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ for (int i = 0; i < NUM_OVERWRITE_METHODS; i++) {
+ enum overwrite_method method = static_cast<enum overwrite_method>(i);
+
+ // control test - must pass for the 'real' test below to be interesting
+ printf("testing method %d (control)\n", i);
+ test_keylen_diff(method, true);
+
+ // real test, actually mixes key lengths
+ printf("testing method %d (real)\n", i);
+ test_keylen_diff(method, false);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_kv_gen.h b/storage/tokudb/PerconaFT/src/tests/test_kv_gen.h
new file mode 100644
index 00000000..06f3ad3c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_kv_gen.h
@@ -0,0 +1,226 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "test.h"
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+enum {MAX_DBS=256};
+enum {MAGIC=311};
+static int aa[MAX_DBS][32] UU();
+static int inv[MAX_DBS][32] UU();
+
+// rotate right and left functionsp
+static inline unsigned int UU()
+rotr32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+}
+static inline unsigned int UU()
+rotl32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+}
+
+static void UU()
+generate_permute_tables(void) {
+ srandom(1);
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ aa[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = aa[db][j];
+ aa[db][j] = aa[db][i];
+ aa[db][i] = tmp;
+ }
+ for(i=0;i<32;i++) {
+ inv[db][aa[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on permute table bitmap
+static unsigned int UU()
+twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << aa[db][i];
+ }
+ return b;
+}
+
+// permute bits of x based on inverse permute table bitmap
+static unsigned int UU()
+inv_twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+// generate val from key, index
+static unsigned int UU()
+generate_val(int key, int i) {
+ return rotl32((key + MAGIC), i);
+}
+static unsigned int UU()
+pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+
+static int __attribute__((unused))
+dummy_progress(void *UU(extra), float UU(progress))
+{
+ return 0;
+}
+
+static void __attribute__((unused))
+do_hot_optimize_on_dbs(DB_ENV *UU(env), DB **dbs, int num_dbs)
+{
+ for (int i = 0; i < num_dbs; ++i) {
+ uint64_t loops_run;
+ int r = dbs[i]->hot_optimize(dbs[i], NULL, NULL, dummy_progress, NULL, &loops_run);
+ CKERR(r);
+ }
+}
+
+ // don't check first n rows (expect to have been deleted)
+static void UU()
+check_results_after_row_n(DB_ENV *env, DB **dbs, const int num_dbs, const int num_rows, const int first_row_to_check) {
+
+ for(int j=0;j<num_dbs;j++){
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int r;
+ unsigned int pkey_for_db_key;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(int i=first_row_to_check; i<num_rows; i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR(r);
+ k = *(unsigned int*)key.data;
+ pkey_for_db_key = (j == 0) ? k : inv_twiddle32(k, j);
+ v = *(unsigned int*)val.data;
+ // test that we have the expected keys and values
+ if ((unsigned int)pkey_for_db_key != (unsigned int)pkey_for_val(v, j))
+ printf(" DB[%d] key = %10u, val = %10u, pkey_for_db_key = %10u, pkey_for_val=%10d\n", j, v, k, pkey_for_db_key, pkey_for_val(v, j));
+ assert((unsigned int)pkey_for_db_key == (unsigned int)pkey_for_val(v, j));
+ dbt_init(&key, NULL, sizeof(unsigned int));
+ dbt_init(&val, NULL, sizeof(unsigned int));
+ }
+ if ( verbose ) {printf("."); fflush(stdout);}
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ if ( verbose ) {printf("ok");fflush(stdout);}
+}
+
+static void UU()
+check_results(DB_ENV *env, DB **dbs, const int num_dbs, const int num_rows)
+{
+ check_results_after_row_n(env, dbs, num_dbs, num_rows, 0);
+}
+
+
+static int UU()
+put_multiple_generate(DB *dest_db, DB *src_db, DBT *dest_key, DBT *dest_val, const DBT *src_key, const DBT *src_val, void *extra) {
+
+ (void) src_db;
+ (void) extra;
+
+ uint32_t which = *(uint32_t*)dest_db->app_private;
+
+ if ( which == 0 ) {
+ if (dest_key->flags==DB_DBT_REALLOC) {
+ if (dest_key->data) toku_free(dest_key->data);
+ dest_key->flags = 0;
+ dest_key->ulen = 0;
+ }
+ if (dest_val->flags==DB_DBT_REALLOC) {
+ if (dest_val->data) toku_free(dest_val->data);
+ dest_val->flags = 0;
+ dest_val->ulen = 0;
+ }
+ dbt_init(dest_key, src_key->data, src_key->size);
+ dbt_init(dest_val, src_val->data, src_val->size);
+ }
+ else {
+ assert(dest_key->flags==DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(unsigned int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(unsigned int));
+ dest_key->ulen = sizeof(unsigned int);
+ }
+ assert(dest_val->flags==DB_DBT_REALLOC);
+ if (dest_val->ulen < sizeof(unsigned int)) {
+ dest_val->data = toku_xrealloc(dest_val->data, sizeof(unsigned int));
+ dest_val->ulen = sizeof(unsigned int);
+ }
+ unsigned int *new_key = (unsigned int *)dest_key->data;
+ unsigned int *new_val = (unsigned int *)dest_val->data;
+
+ *new_key = twiddle32(*(unsigned int*)src_key->data, which);
+ *new_val = generate_val(*(unsigned int*)src_key->data, which);
+
+ dest_key->size = sizeof(unsigned int);
+ dest_val->size = sizeof(unsigned int);
+ //data is already set above
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_kv_limits.cc b/storage/tokudb/PerconaFT/src/tests/test_kv_limits.cc
new file mode 100644
index 00000000..cd707d89
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_kv_limits.cc
@@ -0,0 +1,211 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static uint64_t lorange = 0;
+static uint64_t hirange = 1<<24;
+static uint32_t pagesize = 0;
+
+static void test_key_size_limit (void) {
+ if (verbose > 1) printf("%s\n", __FUNCTION__);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.rand.insert.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ void *k = 0;
+ void *v = 0;
+ uint32_t lo = lorange, mi = 0, hi = hirange;
+ uint32_t bigest = 0;
+ while (lo <= hi) {
+ mi = lo + (hi - lo) / 2;
+ assert(lo <= mi && mi <= hi);
+ uint32_t ks = mi;
+ if (verbose > 1) printf("trying %u %u %u ks=%u\n", lo, mi, hi, ks);
+ k = toku_realloc(k, ks); assert(k);
+ memset(k, 0, ks);
+ memcpy(k, &ks, sizeof ks);
+ uint32_t vs = sizeof (uint32_t);
+ v = toku_realloc(v, vs); assert(v);
+ memset(v, 0, vs);
+ memcpy(v, &vs, sizeof vs);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, k, ks), dbt_init(&val, v, vs), 0);
+ if (r == 0) {
+ bigest = mi;
+ lo = mi+1;
+ } else {
+ if (verbose > 1) printf("%u too big\n", ks);
+ hi = mi-1;
+ }
+ }
+ toku_free(k);
+ toku_free(v);
+ assert(bigest > 0);
+ if (verbose) printf("%s bigest %u\n", __FUNCTION__, bigest);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+static void test_data_size_limit (void) {
+ if (verbose > 1) printf("%s\n", __FUNCTION__);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.rand.insert.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ if (pagesize) {
+ r = db->set_pagesize(db, pagesize); assert(r == 0);
+ }
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ void *k = 0;
+ void *v = 0;
+ uint32_t lo = lorange, mi = 0, hi = hirange;
+ uint32_t bigest = 0;
+ while (lo <= hi) {
+ mi = lo + (hi - lo) / 2;
+ assert(lo <= mi && mi <= hi);
+ uint32_t ks = sizeof (uint32_t);
+ if (verbose > 1) printf("trying %u %u %u ks=%u\n", lo, mi, hi, ks);
+ k = toku_realloc(k, ks); assert(k);
+ memset(k, 0, ks);
+ memcpy(k, &ks, sizeof ks);
+ uint32_t vs = mi;
+ v = toku_realloc(v, vs); assert(v);
+ memset(v, 0, vs);
+ memcpy(v, &vs, sizeof vs);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, k, ks), dbt_init(&val, v, vs), 0);
+ if (r == 0) {
+ bigest = mi;
+ lo = mi+1;
+ } else {
+ if (verbose > 1) printf("%u too big\n", vs);
+ hi = mi-1;
+ }
+ }
+ toku_free(k);
+ toku_free(v);
+ if (verbose && bigest > 0) printf("%s bigest %u\n", __FUNCTION__, bigest);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ int do_key = 1;
+ int do_data = 1;
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-lorange") == 0 && i+1 < argc) {
+ lorange = strtoull(argv[++i], 0, 10);
+ if (lorange > ULLONG_MAX)
+ return 2;
+ continue;
+ }
+ if (strcmp(arg, "-hirange") == 0 && i+1 < argc) {
+ hirange = strtoull(argv[++i], 0, 10);
+ if (hirange > ULLONG_MAX)
+ return 2;
+ continue;
+ }
+ if (strcmp(arg, "-pagesize") == 0 && i+1 < argc) {
+ pagesize = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "-nokey") == 0) {
+ do_key = 0;
+ continue;
+ }
+ if (strcmp(arg, "-nodata") == 0) {
+ do_data = 0;
+ continue;
+ }
+ }
+
+ if (do_key)
+ test_key_size_limit();
+ if (do_data)
+ test_data_size_limit();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_large_update_broadcast_small_cachetable.cc b/storage/tokudb/PerconaFT/src/tests/test_large_update_broadcast_small_cachetable.cc
new file mode 100644
index 00000000..a5cfac34
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_large_update_broadcast_small_cachetable.cc
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// This test sets the cache size to be small and then inserts enough data
+// to make some basement nodes get evicted. Then sends a broadcast update
+// and checks all the data. If the msns for evicted basement nodes and
+// leaf nodes are not managed properly, this test should fail (because the
+// broadcast message will not be applied to basement nodes being brought
+// back in).
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = (1<<17);
+const unsigned int MAGIC_EXTRA = 0x4ac0ffee;
+
+const char original_data[] = "original: ha.rpbkasrkcabkshtabksraghpkars3cbkarpcpktkpbarkca.hpbtkvaekragptknbnsaotbknotbkaontekhba";
+const char updated_data[] = "updated: crkphi30bi8a9hpckbrap.k98a.pkrh3miachpk0[alr3s4nmubrp8.9girhp,bgoekhrl,nurbperk8ochk,bktoe";
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *e;
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ assert(*e == MAGIC_EXTRA);
+ assert(old_val->size == sizeof(original_data));
+ assert(memcmp(old_val->data, original_data, sizeof(original_data)) == 0);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, updated_data, sizeof(updated_data)), set_extra);
+ }
+
+ return 0;
+}
+
+static int
+int_cmp(DB *UU(db), const DBT *a, const DBT *b) {
+ unsigned int *ap, *bp;
+ assert(a->size == sizeof(*ap));
+ CAST_FROM_VOIDP(ap, a->data);
+ assert(b->size == sizeof(*bp));
+ CAST_FROM_VOIDP(bp, b->data);
+ return (*ap > *bp) - (*ap < *bp);
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ env->set_cachesize(env, 0, 10*(1<<20), 1);
+ { int chk_r = env->set_default_bt_compare(env, int_cmp); CKERR(chk_r); }
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, original_data, sizeof(original_data));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ unsigned int e = MAGIC_EXTRA;
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ int r = db->update_broadcast(db, txn, extrap, 0); CKERR(r);
+ return r;
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(updated_data));
+ assert(memcmp(val.data, updated_data, sizeof(updated_data)) == 0);
+ }
+ return r;
+}
+
+static int run_test(bool shutdown_before_update, bool shutdown_before_verify) {
+ setup();
+
+ DB *db;
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ db->set_pagesize(db, 256*1024);
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ if (shutdown_before_update) {
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ IN_TXN_COMMIT(env, NULL, txn_reopen, 0, {
+ { int chk_r = db->open(db, txn_reopen, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ }
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ if (shutdown_before_verify) {
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ IN_TXN_COMMIT(env, NULL, txn_reopen, 0, {
+ { int chk_r = db->open(db, txn_reopen, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ }
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+
+ run_test(false, false);
+ run_test(false, true);
+ run_test(true, false);
+ run_test(true, true);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_lock_timeout_callback.cc b/storage/tokudb/PerconaFT/src/tests/test_lock_timeout_callback.cc
new file mode 100644
index 00000000..571bae69
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_lock_timeout_callback.cc
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <portability/toku_pthread.h>
+#include <portability/toku_atomic.h>
+
+static DB_ENV *env;
+static DB *db;
+static DB_TXN *txn1, *txn2;
+static const int magic_key = 100;
+static int callback_calls;
+toku_pthread_t thread1;
+
+static void lock_not_granted(DB *_db, uint64_t requesting_txnid,
+ const DBT *left_key, const DBT *right_key,
+ uint64_t blocking_txnid) {
+ toku_sync_fetch_and_add(&callback_calls, 1);
+ invariant(strcmp(_db->get_dname(_db), db->get_dname(db)) == 0);
+ if (requesting_txnid == txn2->id64(txn2)) {
+ invariant(blocking_txnid == txn1->id64(txn1));
+ invariant(*reinterpret_cast<int *>(left_key->data) == magic_key);
+ invariant(*reinterpret_cast<int *>(right_key->data) == magic_key);
+ } else {
+ invariant(blocking_txnid == txn2->id64(txn2));
+ invariant(*reinterpret_cast<int *>(left_key->data) == magic_key + 1);
+ invariant(*reinterpret_cast<int *>(right_key->data) == magic_key + 1);
+ }
+}
+
+static void acquire_lock(DB_TXN *txn, int key) {
+ int val = 0;
+ DBT k, v;
+ dbt_init(&k, &key, sizeof(int));
+ dbt_init(&v, &val, sizeof(int));
+ (void) db->put(db, txn, &k, &v, 0);
+}
+
+struct acquire_lock_extra {
+ acquire_lock_extra(DB_TXN *x, int k) :
+ txn(x), key(k) {
+ }
+ DB_TXN *txn;
+ int key;
+};
+
+static void *acquire_lock_thread(void *arg) {
+ acquire_lock_extra *info = reinterpret_cast<acquire_lock_extra *>(arg);
+ acquire_lock(info->txn, info->key);
+ return NULL;
+}
+
+int test_main(int UU(argc), char *const UU(argv[])) {
+ int r;
+ const int env_flags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, env_flags, 0755); CKERR(r);
+ r = env->set_lock_timeout(env, 1000, nullptr);
+ r = env->set_lock_timeout_callback(env, lock_not_granted);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "test", NULL, DB_BTREE, DB_CREATE, 0777); CKERR(r);
+
+ r = env->txn_begin(env, NULL, &txn1, DB_SERIALIZABLE); CKERR(r);
+ r = env->txn_begin(env, NULL, &txn2, DB_SERIALIZABLE); CKERR(r);
+
+ // Extremely simple test. Get lock [0, 0] on txn1, then asynchronously
+ // attempt to get that lock in txn2. The timouet callback should get called.
+
+ acquire_lock(txn1, magic_key);
+ invariant(callback_calls == 0);
+
+ acquire_lock(txn2, magic_key);
+ invariant(callback_calls == 1);
+
+ // If we enduce a deadlock, the callback should get called.
+ acquire_lock(txn2, magic_key + 1);
+ toku_pthread_t thread;
+ acquire_lock_extra e(txn1, magic_key + 1);
+ r = toku_pthread_create(
+ toku_uninstrumented, &thread, nullptr, acquire_lock_thread, &e);
+ usleep(100000);
+ acquire_lock(txn2, magic_key);
+ invariant(callback_calls == 2);
+ void *v;
+ r = toku_pthread_join(thread, &v); CKERR(r);
+ invariant(callback_calls == 3);
+
+ // If we set the callback to null, then it shouldn't get called anymore.
+ env->set_lock_timeout_callback(env, nullptr);
+ acquire_lock(txn2, magic_key);
+ invariant(callback_calls == 3);
+
+ r = txn1->commit(txn1, 0); CKERR(r);
+ r = txn2->commit(txn2, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_locking_with_read_txn.cc b/storage/tokudb/PerconaFT/src/tests/test_locking_with_read_txn.cc
new file mode 100644
index 00000000..fbff18ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_locking_with_read_txn.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+int test_main(int argc, char * const argv[])
+{
+ int r;
+ DB * db;
+ DB_ENV * env;
+ (void) argc;
+ (void) argv;
+
+ const char *db_env_dir = TOKU_TEST_FILENAME;
+ char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
+ snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
+
+ r = system(rm_cmd); { int chk_r = r; CKERR(chk_r); }
+ r = toku_os_mkdir(db_env_dir, 0755); { int chk_r = r; CKERR(chk_r); }
+
+ // set things up
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, db_env_dir, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
+ CKERR(r);
+
+
+ DB_TXN* txn1 = NULL;
+ DB_TXN* txn2 = NULL;
+ r = env->txn_begin(env, 0, &txn1, DB_TXN_READ_ONLY);
+ CKERR(r);
+ r = env->txn_begin(env, 0, &txn2, DB_TXN_READ_ONLY);
+ CKERR(r);
+
+
+ r=db->pre_acquire_table_lock(db, txn1); CKERR(r);
+ r=db->pre_acquire_table_lock(db, txn2); CKERR2(r, DB_LOCK_NOTGRANTED);
+
+ r = txn1->commit(txn1, 0);
+ CKERR(r);
+ r = txn2->commit(txn2, 0);
+ CKERR(r);
+
+ // clean things up
+ r = db->close(db, 0);
+ CKERR(r);
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_locktree_close.cc b/storage/tokudb/PerconaFT/src/tests/test_locktree_close.cc
new file mode 100644
index 00000000..050a5d48
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_locktree_close.cc
@@ -0,0 +1,115 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_cursor (void) {
+ if (verbose) printf("test_cursor\n");
+
+ DB_ENV * env;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.cursor.ft";
+ int r;
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_TXN | DB_INIT_LOCK |DB_CREATE|DB_INIT_MPOOL|DB_THREAD|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); CKERR(r);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ DBC* cursor;
+ DBT k0; memset(&k0, 0, sizeof k0);
+ DBT v0; memset(&v0, 0, sizeof v0);
+ DB_TXN* txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, DB_SERIALIZABLE); CKERR(r);
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ r = cursor->c_set_bounds(
+ cursor,
+ db->dbt_neg_infty(),
+ db->dbt_pos_infty(),
+ true,
+ 0
+ );
+ r = cursor->c_close(cursor); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_THREAD, 0666); assert(r == 0);
+ DB_TXN* txn2 = NULL;
+ env->txn_begin(env, NULL, &txn2, DB_SERIALIZABLE);
+ int k = htonl(1);
+ int v = htonl(1);
+ DBT key, val;
+ // #4838 will improperly allow this put to succeed, whereas we should
+ // be returning DB_LOCK_NOTGRANTED
+ r = db->put(db, txn2, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+
+ r = txn->commit(txn, 0);
+ r = txn2->commit(txn2, 0);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ test_cursor();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log0.cc b/storage/tokudb/PerconaFT/src/tests/test_log0.cc
new file mode 100644
index 00000000..67c6c92a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log0.cc
@@ -0,0 +1,62 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Simple test of logging. Can I start PerconaFT with logging enabled? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log1.cc b/storage/tokudb/PerconaFT/src/tests/test_log1.cc
new file mode 100644
index 00000000..5473a8e3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log1.cc
@@ -0,0 +1,113 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Simple test of logging. Can I start PerconaFT with logging enabled? */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <db.h>
+#include <memory.h>
+#include <stdio.h>
+#include <errno.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+DB_TXN *tid;
+
+static void make_db (bool close_env) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", sizeof "hello");
+ dbt_init(&data, "there", sizeof "there");
+ r=db->put(db, tid, &key, &data, 0);
+ CKERR(r);
+ }
+ char *filename;
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "foo.db", sizeof("foo.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ CAST_FROM_VOIDP(filename, iname.data);
+ assert(filename);
+ }
+
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ {
+ toku_struct_stat statbuf;
+ char fullfile[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(fullfile, 2, TOKU_TEST_FILENAME, filename),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r == 0);
+ toku_free(filename);
+ }
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log10.cc b/storage/tokudb/PerconaFT/src/tests/test_log10.cc
new file mode 100644
index 00000000..f1e3782d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log10.cc
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+/* This test_log10 inserts to a db, closes, reopens, and inserts more to db. We want to make sure that the recovery of the buffers works. */
+/* Lots of stuff gets inserted. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0;
+
+int maxcount = 10000;
+
+static void insert_some (int outeri, bool close_env) {
+ uint32_t create_flag = outeri%2 ? DB_CREATE : 0; // Sometimes use DB_CREATE, sometimes don't.
+ int r;
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ r=db_env_create(&env, 0); assert(r==0);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|create_flag, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, create_flag, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ int i;
+ for (i=0; i<maxcount; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d.%d", newitem->r, outeri, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); CKERR(r);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); CKERR(r);
+ if (close_env) {
+ r=env->close(env, 0); CKERR(r);
+ }
+
+ for (i=0; i<10; i++)
+ insert_some(i, close_env);
+
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log1_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log1_abort.cc
new file mode 100644
index 00000000..c66409fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log1_abort.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Do test_log1, except abort instead of commit. */
+
+
+#include <db.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+DB_ENV *env;
+DB *db;
+DB_TXN *tid;
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_PRIVATE|DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", sizeof "hello");
+ dbt_init(&data, "there", sizeof "there");
+ r=db->put(db, tid, &key, &data, 0);
+ CKERR(r);
+ }
+ r=db->close(db, 0);
+ assert(r==0);
+ r=tid->abort(tid);
+ assert(r==0);
+ r=env->close(env, 0);
+ assert(r==0);
+ {
+ toku_struct_stat statbuf;
+ char filename[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(filename, 2, TOKU_TEST_FILENAME, "foo.db"),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r == -1);
+ assert(errno == ENOENT);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log2.cc b/storage/tokudb/PerconaFT/src/tests/test_log2.cc
new file mode 100644
index 00000000..d6804736
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log2.cc
@@ -0,0 +1,83 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log2_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log2_abort.cc
new file mode 100644
index 00000000..aa197d33
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log2_abort.cc
@@ -0,0 +1,76 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Like test_log2 except abort. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static void make_db (void) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ make_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log3.cc b/storage/tokudb/PerconaFT/src/tests/test_log3.cc
new file mode 100644
index 00000000..da5c6c23
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log3.cc
@@ -0,0 +1,91 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", sizeof "hello");
+ dbt_init(&data, "there", sizeof "there");
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log3_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log3_abort.cc
new file mode 100644
index 00000000..de2793ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log3_abort.cc
@@ -0,0 +1,93 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Like test_log3 except do abort */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static void make_db (void) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", sizeof "hello");
+ dbt_init(&data, "there", sizeof "there");
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ }
+ r=tid->abort(tid); assert(r==0);
+
+ // Now see that the string isn't there.
+ {
+ DBT key,data;
+ dbt_init(&key, "hello", sizeof "hello");
+ dbt_init(&data, NULL, 0);
+ r=db->get(db, 0, &key, &data, 0);
+ assert(r==DB_NOTFOUND);
+ }
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ make_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log4.cc b/storage/tokudb/PerconaFT/src/tests/test_log4.cc
new file mode 100644
index 00000000..f971eddb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log4.cc
@@ -0,0 +1,99 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<20000; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", random(), i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); CKERR(r);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log4_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log4_abort.cc
new file mode 100644
index 00000000..e9b7ee88
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log4_abort.cc
@@ -0,0 +1,104 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Like test_log4, except abort */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+#define N 20000
+long random_nums[N];
+
+static void make_db (void) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<N; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", (random_nums[i]=random()), i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); CKERR(r);
+ }
+ r=tid->abort(tid); assert(r==0);
+ for (i=0; i<N; i++) {
+ char hello[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", (random_nums[i]=random()), i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ r=db->get(db, 0, &key, &data, 0);
+ assert(r==DB_NOTFOUND);
+ }
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ make_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log5.cc b/storage/tokudb/PerconaFT/src/tests/test_log5.cc
new file mode 100644
index 00000000..eeb173be
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log5.cc
@@ -0,0 +1,118 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0;
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ int maxcount = 24073;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<maxcount; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", newitem->r, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log5_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log5_abort.cc
new file mode 100644
index 00000000..790b43eb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log5_abort.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Like test_log5 except abort. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0;
+
+static void make_db (void) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ int maxcount = 24073;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<maxcount; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", newitem->r, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ }
+ r=tid->abort(tid); assert(r==0);
+ {
+ struct in_db *l=items;
+ for (l=items; l; l=l->next) {
+ char hello[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", l->r, l->i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ r=db->get(db, 0, &key, &data, 0);
+ assert(r==DB_NOTFOUND);
+ }
+ }
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ make_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log6.cc b/storage/tokudb/PerconaFT/src/tests/test_log6.cc
new file mode 100644
index 00000000..c8cb697b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log6.cc
@@ -0,0 +1,158 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+#ifndef DB_DELETE_ANY
+#define DB_DELETE_ANY 0
+#endif
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0, *deleted_items=0;
+
+static void put_n (DB *db, DB_TXN *tid, int i) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", newitem->r, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ int r=db->put(db, tid, &key, &data, 0); assert(r==0);
+}
+
+static void del_n (DB *db, DB_TXN *tid, int i) {
+ // Move it to deleted items if it is present.
+ struct in_db *present;
+ struct in_db **prevp;
+ for ((prevp=&items), (present=items);
+ present;
+ (prevp=&present->next), (present=present->next)) {
+ if (present->i==i) {
+ // Remove it
+ struct in_db *next = present->next;
+ present->next = deleted_items;
+ deleted_items = present;
+ *prevp = next;
+
+ char hello[30];
+ DBT key;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", present->r, i);
+ memset(&key, 0, sizeof(key));
+ key.data = hello; key.size = strlen(hello)+1;
+ int r = db->del(db, tid, &key, DB_DELETE_ANY); assert(r==0);
+
+ return;
+ }
+ }
+}
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<1; i++) {
+ put_n(db, tid, i);
+ if (random()%3==0) {
+ del_n(db, tid, random()%(i+1));
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+
+ while (deleted_items) {
+ struct in_db *next=deleted_items->next;
+ toku_free(deleted_items);
+ deleted_items=next;
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log6_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log6_abort.cc
new file mode 100644
index 00000000..67a746cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log6_abort.cc
@@ -0,0 +1,163 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Like test_log6 except abort. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+#ifndef DB_DELETE_ANY
+#define DB_DELETE_ANY 0
+#endif
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0, *deleted_items=0;
+
+static void put_n (DB *db, DB_TXN *tid, int i) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", newitem->r, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ int r=db->put(db, tid, &key, &data, 0); assert(r==0);
+}
+
+static void del_n (DB *db, DB_TXN *tid, int i) {
+ // Move it to deleted items if it is present.
+ struct in_db *present;
+ struct in_db **prevp;
+ for ((prevp=&items), (present=items);
+ present;
+ (prevp=&present->next), (present=present->next)) {
+ if (present->i==i) {
+ // Remove it
+ struct in_db *next = present->next;
+ present->next = deleted_items;
+ deleted_items = present;
+ *prevp = next;
+
+ char hello[30];
+ DBT key;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", present->r, i);
+ memset(&key, 0, sizeof(key));
+ key.data = hello; key.size = strlen(hello)+1;
+ int r = db->del(db, tid, &key, DB_DELETE_ANY); assert(r==0);
+
+ return;
+ }
+ }
+}
+
+static void make_db (void) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<1; i++) {
+ put_n(db, tid, i);
+ if (random()%3==0) {
+ del_n(db, tid, random()%(i+1));
+ }
+ }
+ r=tid->abort(tid); assert(r==0);
+ {
+ struct in_db *l=items;
+ for (l=items; l; l=l->next) {
+ char hello[30];
+ DBT key,data;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ snprintf(hello, sizeof(hello), "hello%ld.%d", l->r, i);
+ r = db->get(db, 0, &key, &data, 0);
+ assert(r==DB_NOTFOUND);
+ }
+ }
+
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+
+ while (deleted_items) {
+ struct in_db *next=deleted_items->next;
+ toku_free(deleted_items);
+ deleted_items=next;
+ }
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ make_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log6a_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_log6a_abort.cc
new file mode 100644
index 00000000..7ca04251
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log6a_abort.cc
@@ -0,0 +1,338 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* Like test_log6 except abort.
+ * And abort some stuff, but not others (unlike test_log6_abort which aborts everything) */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <search.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+
+#ifndef DB_DELETE_ANY
+#define DB_DELETE_ANY 0
+#endif
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+// How many iterations are we going to do insertions and deletions. This is a bound to the number of distinct keys in the DB.
+#define N 1000
+
+static int n_keys_mentioned=0;
+static int random_keys_mentioned[N];
+
+static DB *pending_i, *pending_d, *committed;
+
+// Keep track of what's in the committed database separately
+struct pair {int x,y;};
+
+static void
+insert_in_mem (int x, int y, int *count, struct pair *pairs) {
+ assert(*count<N);
+ pairs[(*count)++]=(struct pair){x,y};
+}
+static void
+delete_in_mem (int x, int *count, struct pair *pairs) {
+ int i;
+ for (i=0; i<*count; i++) {
+ if (pairs[i].x==x) {
+ pairs[i]=pairs[--(*count)];
+ return;
+ }
+ }
+}
+
+static int com_count=0, pend_count=0, peni_count=0;
+static struct pair com_data[N], pend_data[N], peni_data[N];
+
+static void
+insert_pending (int key, int val, DB_TXN *bookx) {
+ DBT keyd,datad;
+ //printf("IP %u,%u\n", key,val);
+
+ insert_in_mem(key, val, &peni_count, peni_data);
+ pending_i->put(pending_i, bookx,
+ dbt_init(&keyd, &key, sizeof(key)),
+ dbt_init(&datad, &val, sizeof(val)),
+ 0);
+
+ delete_in_mem(key, &pend_count, pend_data);
+ pending_d->del(pending_d, bookx,
+ dbt_init(&keyd, &key, sizeof(key)),
+ 0);
+}
+
+static void put_a_random_item (DB *db, DB_TXN *tid, int i, DB_TXN *bookx) {
+ char hello[30], there[30];
+ DBT key,data;
+ int randv = myrandom();
+ random_keys_mentioned[n_keys_mentioned++] = randv;
+ insert_pending(randv, i, bookx);
+ //printf("Insert %u\n", randv);
+ snprintf(hello, sizeof(hello), "hello%d.%d", randv, i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ int r=db->put(db, tid, &key, &data, 0);
+ if (r!=0) printf("%s:%d i=%d r=%d (%s)\n", __FILE__, __LINE__, i, r, strerror(r));
+ assert(r==0);
+}
+
+static void delete_a_random_item (DB *db, DB_TXN *tid, DB_TXN *bookx) {
+ if (n_keys_mentioned==0) return;
+ int ridx = myrandom()%n_keys_mentioned;
+ int randv = random_keys_mentioned[ridx];
+ DBT keyd;
+ DBT vald;
+ //printf("Delete %u\n", randv);
+ dbt_init(&keyd, &randv, sizeof(randv));
+ dbt_init(&vald, &randv, sizeof(randv));
+
+ pending_i->del(pending_i, bookx, &keyd, 0);
+ delete_in_mem(randv, &peni_count, peni_data);
+
+ pending_d->put(pending_d, bookx, &keyd, &vald, 0);
+ insert_in_mem(randv, randv, &pend_count, pend_data);
+
+ db->del(db, tid, &keyd, DB_DELETE_ANY);
+}
+
+static void commit_items (DB_ENV *env, int UU(i)) {
+ //printf("commit_items %d\n", i);
+ DB_TXN *txn;
+ int r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ DBC *cursor;
+ r = pending_i->cursor(pending_i, txn, &cursor, 0); assert(r==0);
+ DBT k,v;
+ memset(&k,0,sizeof(k));
+ memset(&v,0,sizeof(v));
+ //printf("%d items in peni\n", peni_count);
+ while (cursor->c_get(cursor, &k, &v, DB_FIRST)==0) {
+ assert(k.size==4);
+ assert(v.size==4);
+ int ki=*(int*)k.data;
+ int vi=*(int*)v.data;
+ //printf(" put %u %u\n", ki, vi);
+ r=committed->put(committed, txn, dbt_init(&k, &ki, sizeof(ki)), dbt_init(&v, &vi, sizeof(vi)), 0);
+ insert_in_mem(ki, vi, &com_count, com_data);
+ assert(r==0);
+ r=pending_i->del(pending_i, txn, &k, 0);
+ assert(r==0);
+ }
+ r=cursor->c_close(cursor);
+ assert(r==0);
+
+ r = pending_d->cursor(pending_d, txn, &cursor, 0); assert(r==0);
+ memset(&k,0,sizeof(k));
+ memset(&v,0,sizeof(v));
+ while (cursor->c_get(cursor, &k, &v, DB_FIRST)==0) {
+ assert(k.size==4);
+ assert(v.size==4);
+ int ki=*(int*)k.data;
+ int vi=*(int*)v.data;
+ assert(ki==vi);
+ //printf(" del %u\n", ki);
+ committed->del(committed, txn, dbt_init(&k, &ki, sizeof(ki)), 0);
+ delete_in_mem(ki, &com_count, com_data);
+ // ignore result from that del
+ r=pending_d->del(pending_d, txn, &k, 0);
+ assert(r==0);
+ }
+ r=cursor->c_close(cursor);
+ assert(r==0);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void abort_items (DB_ENV *env) {
+ DB_TXN *txn;
+ int r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ //printf("abort_items\n");
+ DBC *cursor;
+ r = pending_i->cursor(pending_i, txn, &cursor, 0); assert(r==0);
+ DBT k,v;
+ memset(&k,0,sizeof(k));
+ memset(&v,0,sizeof(v));
+ while (cursor->c_get(cursor, &k, &v, DB_FIRST)==0) {
+ assert(k.size==4);
+ assert(v.size==4);
+ int ki=*(int*)k.data;
+ //printf("Deleting %u\n", ki);
+ r=pending_i->del(pending_i, txn, dbt_init(&k, &ki, sizeof(ki)), 0);
+ assert(r==0);
+ }
+ r=cursor->c_close(cursor);
+ assert(r==0);
+
+ r = pending_d->cursor(pending_d, txn, &cursor, 0); assert(r==0);
+ memset(&k,0,sizeof(k));
+ memset(&v,0,sizeof(v));
+ while (cursor->c_get(cursor, &k, &v, DB_FIRST)==0) {
+ assert(k.size==4);
+ assert(v.size==4);
+ int ki=*(int*)k.data;
+ r=pending_d->del(pending_d, txn, dbt_init(&k, &ki, sizeof(ki)), 0);
+ assert(r==0);
+ }
+ r=cursor->c_close(cursor);
+ assert(r==0);
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static int
+compare_pairs (const void *a, const void *b) {
+ return memcmp(a,b,4);
+}
+
+static void verify_items (DB_ENV *env, DB *db) {
+ DB_TXN *txn;
+ int r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ DBC *cursor;
+ DBT k,v;
+ memset(&k,0,sizeof(k));
+ memset(&v,0,sizeof(v));
+
+#if 0
+ r=db->cursor(db, txn, &cursor, 0);
+ assert(r==0);
+ while (cursor->c_get(cursor, &k, &v, DB_NEXT)==0) {
+ }
+ r=cursor->c_close(cursor);
+ assert(r==0);
+#endif
+
+ r = committed->cursor(committed, txn, &cursor, 0);
+ assert(r==0);
+ qsort(com_data, com_count, sizeof(com_data[0]), compare_pairs);
+ int curscount=0;
+ //printf(" count=%d\n", com_count);
+ while (cursor->c_get(cursor, &k, &v, DB_NEXT)==0) {
+ int kv=*(int*)k.data;
+ int dv=*(int*)v.data;
+ //printf(" sorted com_data[%d]=%d, cursor got %d\n", curscount, com_data[curscount].x, kv);
+ assert(com_data[curscount].x==kv);
+ DBT k2,v2;
+ memset(&k2, 0, sizeof(k2));
+ memset(&v2, 0, sizeof(v2));
+ char hello[30], there[30];
+ snprintf(hello, sizeof(hello), "hello%d.%d", kv, dv);
+ snprintf(there, sizeof(hello), "there%d", dv);
+ k2.data = hello; k2.size=strlen(hello)+1;
+ //printf("committed: %u,%u\n", kv, dv);
+ r=db->get(db, txn, &k2, &v2, 0);
+ assert(r==0);
+ assert(strcmp((char*)v2.data, there)==0);
+ curscount++;
+ }
+ assert(curscount==com_count);
+ r=cursor->c_close(cursor);
+ assert(r==0);
+
+ r=txn->commit(txn, 0); assert(r==0);
+}
+
+static void make_db (void) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid, *bookx;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db_create(&pending_i, env, 0); CKERR(r);
+ r=db_create(&pending_d, env, 0); CKERR(r);
+ r=db_create(&committed, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=pending_i->open(pending_i, tid, "pending_i.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=pending_d->open(pending_d, tid, "pending_d.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=committed->open(committed, tid, "committed.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &bookx, 0); assert(r==0);
+
+ for (i=0; i<N; i++) {
+ int randv = myrandom();
+ //if (i%10000==0) printf(".");
+ if (randv%100==0) {
+ r=tid->abort(tid); assert(r==0);
+ r=bookx->commit(bookx, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &bookx, 0); assert(r==0);
+ abort_items(env);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ } else if (randv%1000==1) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=bookx->commit(bookx, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &bookx, 0); assert(r==0);
+ commit_items(env, i);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ } else if (randv%3==0) {
+ delete_a_random_item(db, tid, bookx);
+ } else {
+ put_a_random_item(db, tid, i, bookx);
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=bookx->commit(bookx, 0); assert(r==0);
+ commit_items(env, i);
+ verify_items(env, db);
+
+ r=pending_i->close(pending_i, 0); assert(r==0);
+ r=pending_d->close(pending_d, 0); assert(r==0);
+ r=committed->close(committed, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ make_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log7.cc b/storage/tokudb/PerconaFT/src/tests/test_log7.cc
new file mode 100644
index 00000000..91ce714f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log7.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+/* This test_log7 is like test_log5 except maxcount is larger. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0;
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ int maxcount = 100000;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ for (i=0; i<maxcount; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d", newitem->r, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ // BDB cannot handle this huge transaction even with a lot of locks.
+ if (i%1000==599) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ }
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log8.cc b/storage/tokudb/PerconaFT/src/tests/test_log8.cc
new file mode 100644
index 00000000..35a877be
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log8.cc
@@ -0,0 +1,147 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+/* This test_log8 inserts to a db, closes, reopens, and inserts more to db. We want to make sure that the recovery of the buffers works. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0;
+
+int maxcount = 10;
+
+static void insert_some (int outeri, bool close_env) {
+ uint32_t create_flag = outeri%2 ? DB_CREATE : 0; // Sometimes use DB_CREATE, sometimes don't.
+ int r;
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ r=db_env_create(&env, 0); assert(r==0);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|create_flag, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, create_flag, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ int i;
+ for (i=0; i<maxcount; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d.%d", newitem->r, outeri, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); CKERR(r);
+ if (close_env) {
+ r=env->close(env, 0); CKERR(r);
+ }
+
+ for (i=0; i<1; i++)
+ insert_some(i, close_env);
+
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_log9.cc b/storage/tokudb/PerconaFT/src/tests/test_log9.cc
new file mode 100644
index 00000000..c9df578a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_log9.cc
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+/* Test to see if we can do logging and recovery. */
+/* This is very specific to PerconaFT. It won't work with Berkeley DB. */
+/* This test_log8 inserts to a db, closes, reopens, and inserts more to db. We want to make sure that the recovery of the buffers works. */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <memory.h>
+
+// TOKU_TEST_FILENAME is defined in the Makefile
+
+struct in_db;
+struct in_db {
+ long int r;
+ int i;
+ struct in_db *next;
+} *items=0;
+
+int maxcount = 10;
+
+static void insert_some (int outeri, bool close_env) {
+ uint32_t create_flag = outeri%2 ? DB_CREATE : 0; // Sometimes use DB_CREATE, sometimes don't.
+ int r;
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ r=db_env_create(&env, 0); assert(r==0);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE|create_flag, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, create_flag, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+
+ int i;
+ for (i=0; i<maxcount; i++) {
+ char hello[30], there[30];
+ DBT key,data;
+ struct in_db *XMALLOC(newitem);
+ newitem->r = random();
+ newitem->i = i;
+ newitem->next = items;
+ items = newitem;
+ snprintf(hello, sizeof(hello), "hello%ld.%d.%d", newitem->r, outeri, newitem->i);
+ snprintf(there, sizeof(hello), "there%d", i);
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ key.data = hello; key.size=strlen(hello)+1;
+ data.data = there; data.size=strlen(there)+1;
+ r=db->put(db, tid, &key, &data, 0); assert(r==0);
+ }
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ if (close_env) {
+ r=env->close(env, 0); assert(r==0);
+ }
+}
+
+static void make_db (bool close_env) {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ db_env_enable_engine_status(0); // disable engine status on crash because test is expected to fail
+
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); CKERR(r);
+ if (close_env) {
+ r=env->close(env, 0); CKERR(r);
+ }
+
+ for (i=0; i<2; i++)
+ insert_some(i, close_env);
+
+ while (items) {
+ struct in_db *next=items->next;
+ toku_free(items);
+ items=next;
+ }
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ bool close_env = true;
+ for (int i=1; i<argc; i++) {
+ if (strcmp(argv[i], "--no-shutdown") == 0)
+ close_env = false;
+ }
+ make_db(close_env);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_logflush.cc b/storage/tokudb/PerconaFT/src/tests/test_logflush.cc
new file mode 100644
index 00000000..f92c3ec7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_logflush.cc
@@ -0,0 +1,97 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <db.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+// Return the offset
+static int
+grep_for_in_logs (const char *str) {
+#define lfname "log000000000000.tokulog[0-9]*"
+#define COMMAND "grep -F -q"
+ char lname[TOKU_PATH_MAX+1];
+ toku_path_join(lname, 2, TOKU_TEST_FILENAME, lfname);
+ char cmd[strlen(str) + sizeof(COMMAND " \"\" ") + TOKU_PATH_MAX];
+ int bytes = snprintf(cmd, sizeof(cmd), COMMAND " \"%s\" %s", str, lname);
+ assert(bytes>=0);
+ assert((size_t)bytes<sizeof(cmd));
+ int r = system(cmd);
+ assert(r!=-1);
+ if (r>0) r = -1;
+ return r;
+}
+
+int
+test_main (int UU(argc), char UU(*const argv[])) {
+ int r;
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ {
+ DBT key,data;
+ char hello[]="hello";
+ char there[]="there";
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ r=db->put(db, tid,
+ dbt_init(&key, hello, sizeof(hello)),
+ dbt_init(&data, there, sizeof(there)),
+ 0);
+ r=grep_for_in_logs(hello);
+ assert(r==-1);
+ r=env->log_flush(env, 0); CKERR(r);
+ r=grep_for_in_logs(hello);
+ assert(r>=0);
+ r=tid->commit(tid, 0); CKERR(r);
+ }
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_logmax.cc b/storage/tokudb/PerconaFT/src/tests/test_logmax.cc
new file mode 100644
index 00000000..133eb36d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_logmax.cc
@@ -0,0 +1,144 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <db.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+static void
+check_logmax (int max) {
+ int any_too_big=0;
+ DIR *dir = opendir(TOKU_TEST_FILENAME);
+ struct dirent *ent;
+ while ((ent=readdir(dir))) {
+ if ((ent->d_type==DT_REG || ent->d_type==DT_UNKNOWN) && strncmp(ent->d_name, "log", 3)==0) {
+ // It is a "log*" file
+ char full_fname[TOKU_PATH_MAX + 1];
+ toku_struct_stat sbuf;
+ int r = toku_stat(
+ toku_path_join(full_fname, 2, TOKU_TEST_FILENAME, ent->d_name),
+ &sbuf,
+ toku_uninstrumented);
+ assert(r == 0);
+ if (verbose)
+ printf("%s is of size %" PRId64 "\n",
+ ent->d_name,
+ (int64_t)sbuf.st_size);
+ if (sbuf.st_size > max) any_too_big=1;
+ }
+ }
+ assert(!any_too_big);
+ int r=closedir(dir);
+ assert(r==0);
+}
+
+static void
+test_logmax (int logmax) {
+ int r;
+ DB_ENV *env;
+ DB *db;
+ DB_TXN *tid;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+ r=db_env_create(&env, 0); assert(r==0);
+ if (logmax>0) {
+ r=env->set_lg_max(env, logmax);
+ assert(r==0);
+ }
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ {
+ uint32_t lmax;
+ r=env->get_lg_max(env, &lmax);
+ assert(r==0);
+ if (logmax>0) {
+ assert(lmax==(uint32_t)logmax);
+ } else {
+ assert(lmax>0);
+
+ }
+ }
+ r=db_create(&db, env, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &tid, 0); assert(r==0);
+ r=db->open(db, tid, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=tid->commit(tid, 0); assert(r==0);
+
+ int i;
+ int sum = 0;
+ int effective_max;
+ if (logmax>0) effective_max = logmax;
+ else {
+ effective_max = 100<<20;
+ }
+
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ char there[1000];
+ memset(there, 'a',sizeof(there));
+ there[999]=0;
+ for (i=0; sum<(effective_max*3)/2; i++) {
+ DBT key,data;
+ char hello[20];
+ snprintf(hello, 20, "hello%d", i);
+ r=db->put(db, tid,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, sizeof(there)),
+ 0);
+ assert(r==0);
+ sum+=strlen(hello)+1+sizeof(there);
+ if ((i+1)%10==0) {
+ r=tid->commit(tid, 0); assert(r==0);
+ r=env->txn_begin(env, 0, &tid, 0); CKERR(r);
+ }
+ }
+ if (verbose) printf("i=%d sum=%d effmax=%d\n", i, sum, effective_max);
+ r=tid->commit(tid, 0); assert(r==0);
+ r=db->close(db, 0); assert(r==0);
+ r=env->close(env, 0); assert(r==0);
+ check_logmax(effective_max);
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_logmax(1<<20);
+ test_logmax(-1);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_memcmp_magic.cc b/storage/tokudb/PerconaFT/src/tests/test_memcmp_magic.cc
new file mode 100644
index 00000000..8b56e716
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_memcmp_magic.cc
@@ -0,0 +1,169 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include "util/dbt.h"
+
+static void test_memcmp_magic(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+
+ // Can't set the memcmp magic to 0 (since it's used as a sentinel for `none')
+ r = db->set_memcmp_magic(db, 0); CKERR2(r, EINVAL);
+
+ // Should be ok to set it more than once, even to different things, before opening.
+ r = db->set_memcmp_magic(db, 1); CKERR(r);
+ r = db->set_memcmp_magic(db, 2); CKERR(r);
+ r = db->open(db, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ // Can't set the memcmp magic after opening.
+ r = db->set_memcmp_magic(db, 0); CKERR2(r, EINVAL);
+ r = db->set_memcmp_magic(db, 1); CKERR2(r, EINVAL);
+
+ DB *db2;
+ r = db_create(&db2, env, 0); CKERR(r);
+ r = db2->set_memcmp_magic(db2, 3); CKERR(r); // ..we can try setting it to something different
+ // ..but it should fail to open
+ r = db2->open(db2, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR2(r, EINVAL);
+ r = db2->set_memcmp_magic(db2, 2); CKERR(r);
+ r = db2->open(db2, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ r = db2->close(db2, 0);
+ r = db->close(db, 0); CKERR(r);
+
+ // dbremove opens its own handle internally. ensure that the open
+ // operation succeeds (and so does dbremove) despite the fact the
+ // internal open does not set the memcmp magic
+ r = env->dbremove(env, NULL, "db", "db", 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+static int comparison_function_unused(DB *UU(db), const DBT *UU(a), const DBT *UU(b)) {
+ // We're testing that the memcmp magic gets used so the real
+ // comparison function should never get called.
+ invariant(false);
+ return 0;
+}
+
+static int getf_key_cb(const DBT *key, const DBT *UU(val), void *extra) {
+ DBT *dbt = reinterpret_cast<DBT *>(extra);
+ toku_clone_dbt(dbt, *key);
+ return 0;
+}
+
+static void test_memcmp_magic_sort_order(void) {
+ int r;
+
+ // Verify that randomly generated integer keys are sorted in memcmp
+ // order when packed as little endian, even with an environment-wide
+ // comparison function that sorts as though keys are big-endian ints.
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, comparison_function_unused); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r);
+
+ const int magic = 49;
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->set_memcmp_magic(db, magic); CKERR(r);
+ r = db->open(db, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ for (int i = 0; i < 10000; i++) {
+ char buf[1 + sizeof(int)];
+ // Serialize key to first have the magic byte, then the little-endian key.
+ int k = toku_htonl(random());
+ buf[0] = magic;
+ memcpy(&buf[1], &k, sizeof(int));
+
+ DBT key;
+ dbt_init(&key, buf, sizeof(buf));
+ r = db->put(db, NULL, &key, &key, 0); CKERR(r);
+ }
+
+ DB_TXN *txn;
+ env->txn_begin(env, NULL, &txn, 0);
+ DBC *dbc;
+ db->cursor(db, txn, &dbc, 0);
+ DBT prev_dbt, curr_dbt;
+ memset(&curr_dbt, 0, sizeof(DBT));
+ memset(&prev_dbt, 0, sizeof(DBT));
+ while (dbc->c_getf_next(dbc, 0, getf_key_cb, &curr_dbt)) {
+ invariant(curr_dbt.size == sizeof(int));
+ if (prev_dbt.data != NULL) {
+ // Each key should be >= to the last using memcmp
+ int c = memcmp(prev_dbt.data, curr_dbt.data, sizeof(int));
+ invariant(c <= 0);
+ }
+ toku_destroy_dbt(&prev_dbt);
+ prev_dbt = curr_dbt;
+ }
+ toku_destroy_dbt(&curr_dbt);
+ toku_destroy_dbt(&prev_dbt);
+ dbc->c_close(dbc);
+ txn->commit(txn, 0);
+
+ r = db->close(db, 0); CKERR(r);
+
+ // dbremove opens its own handle internally. ensure that the open
+ // operation succeeds (and so does dbremove) despite the fact the
+ // internal open does not set the memcmp magic
+ r = env->dbremove(env, NULL, "db", "db", 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ test_memcmp_magic();
+ test_memcmp_magic_sort_order();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc b/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc
new file mode 100644
index 00000000..55b2943e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <sys/stat.h>
+#include <time.h>
+#include <db.h>
+
+static void
+seqinsert (int n, float p) {
+ if (verbose) printf("%s %d %f\n", __FUNCTION__, n, p);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_PRIVATE + DB_CREATE, 077); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+
+ r = db->open(db, 0, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ int i;
+ for (i = 2; i <= 2*n; i += 2) {
+ int k = htonl(i);
+ int v = i;
+ DBT key, val;
+ r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0); assert(r == 0);
+ if (random() <= static_cast<float>(RAND_MAX) * p) {
+ k = htonl(i-1);
+ v = i-1;
+ r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0); assert(r == 0);
+ }
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ srandom(time(0));
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-seed") == 0) {
+ if (i+1 >= argc) return 1;
+ srandom(atoi(argv[++i]));
+ continue;
+ }
+ }
+
+ int nodesize = 1024*1024;
+ int entrysize = 25;
+ int d = nodesize/entrysize;
+ int n = d + d/4;
+
+ float ps[] = { 0.0, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5, 1 };
+ for (i=0; i<(int)(sizeof ps / sizeof (float)); i++) {
+ seqinsert(n, ps[i]);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_multiple_checkpoints_block_commit.cc b/storage/tokudb/PerconaFT/src/tests/test_multiple_checkpoints_block_commit.cc
new file mode 100644
index 00000000..da53a7bf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_multiple_checkpoints_block_commit.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+#include "toku_pthread.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ usleep(10*1024*1024);
+}
+
+static void *run_checkpoint(void *arg) {
+ int r = env->txn_checkpoint(env, 0, 0, 0);
+ assert_zero(r);
+ return arg;
+}
+
+static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
+ uint64_t t = tend->tv_sec * 1000000 + tend->tv_usec;
+ t -= tstart->tv_sec * 1000000 + tstart->tv_usec;
+ return t;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ DBT key, val;
+ int i = 0;
+ int v = 0;
+ dbt_init(&key, &i, sizeof(i));
+ dbt_init(&val, &v, sizeof(v));
+ // put a value to make it dirty, just to make sure that checkpoint
+ // will do something
+ { int chk_r = db->put(db, NULL, &key, &val, 0); CKERR(chk_r); }
+
+ // at this point, we have a db that is dirty. Now we want to do the following
+ // have two threads each start a checkpoint
+ // then have a third thread try to create a txn, do a write,
+ // and commit the txn. In 5.2.3, the commit of the txn would block
+ // until the one of the checkpoints complete (which should take 10 seconds)
+ // With the fix, the commit should return immedietely
+ toku_pthread_t chkpt1_tid;
+ toku_pthread_t chkpt2_tid;
+
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &chkpt1_tid, nullptr, run_checkpoint, nullptr);
+ CKERR(chk_r);
+ }
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &chkpt2_tid, nullptr, run_checkpoint, nullptr);
+ CKERR(chk_r);
+ }
+ usleep(2 * 1024 * 1024);
+ struct timeval tstart;
+ gettimeofday(&tstart, NULL);
+ DB_TXN *txn = NULL;
+ { int chk_r = env->txn_begin(env, NULL, &txn, 0); CKERR(chk_r); }
+ i = 1; v = 1;
+ { int chk_r = db->put(db, txn, &key, &val, 0); CKERR(chk_r); }
+ { int chk_r = txn->commit(txn, 0); CKERR(chk_r); }
+
+ struct timeval tend;
+ gettimeofday(&tend, NULL);
+ uint64_t diff = tdelta_usec(&tend, &tstart);
+ assert(diff < 5*1024*1024);
+
+
+ void *ret;
+ { int chk_r = toku_pthread_join(chkpt2_tid, &ret); CKERR(chk_r); }
+ { int chk_r = toku_pthread_join(chkpt1_tid, &ret); CKERR(chk_r); }
+
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+ db = NULL;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_nested.cc b/storage/tokudb/PerconaFT/src/tests/test_nested.cc
new file mode 100644
index 00000000..1c802ab8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_nested.cc
@@ -0,0 +1,185 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Nested transactions. */
+
+#include <db.h>
+#include <sys/stat.h>
+
+static DB_ENV *env;
+static DB *db;
+
+static void insert (int i, DB_TXN *x) {
+ char hello[30], there[30];
+ DBT key,data;
+ if (verbose) printf("Insert %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ snprintf(there, sizeof(there), "there%d", i);
+ int r = db->put(db, x,
+ dbt_init(&key, hello, strlen(hello)+1),
+ dbt_init(&data, there, strlen(there)+1),
+ 0);
+ CKERR(r);
+}
+
+static void op_delete (int i, DB_TXN *x) {
+ char hello[30];
+ DBT key;
+ if (verbose) printf("op_delete %d\n", i);
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ int r = db->del(db, x,
+ dbt_init(&key, hello, strlen(hello)+1),
+ 0);
+ CKERR(r);
+}
+
+static void lookup (int i, DB_TXN *x, int expect) {
+ char hello[30], there[30];
+ DBT key,data;
+ snprintf(hello, sizeof(hello), "hello%d", i);
+ memset(&data, 0, sizeof(data));
+ if (verbose) printf("Looking up %d (expecting %s)\n", i, expect==0 ? "to find" : "not to find");
+ int r = db->get(db, x,
+ dbt_init(&key, hello, strlen(hello)+1),
+ &data,
+ 0);
+ assert(expect==r);
+ if (expect==0) {
+ CKERR(r);
+ snprintf(there, sizeof(there), "there%d", i);
+ assert(data.size==strlen(there)+1);
+ assert(strcmp((char*)data.data, there)==0);
+ }
+}
+
+static DB_TXN *txn, *txn2;
+
+static void
+test_nested (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ r=db_env_create(&env, 0); assert(r==0);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ r=db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ insert(0, txn);
+
+ insert(1, txn);
+ insert(2, txn);
+ insert(3, txn);
+ lookup(0, txn, 0);
+ lookup(1, txn, 0);
+ lookup(2, txn, 0);
+ lookup(3, txn, 0);
+ r=txn->commit(txn, 0); assert(r==0);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ op_delete(0, txn);
+ op_delete(3, txn);
+ r=env->txn_begin(env, txn, &txn2, 0); CKERR(r);
+ op_delete(1, txn2); CKERR(r);
+ lookup(3, txn2, DB_NOTFOUND);
+ insert(3, txn2);
+ lookup(3, txn2, 0);
+ r=txn2->commit(txn2, 0); CKERR(r);
+ lookup(0, txn, DB_NOTFOUND);
+ lookup(1, txn, DB_NOTFOUND);
+ lookup(2, txn, 0);
+ lookup(3, txn, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ lookup(0, txn, DB_NOTFOUND);
+ lookup(1, txn, DB_NOTFOUND);
+ lookup(2, txn, 0);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(4, txn);
+ r=txn->commit(txn, 0); CKERR(r);
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=env->txn_begin(env, txn, &txn2, 0); CKERR(r);
+ op_delete(4, txn2);
+ r=txn->commit(txn2, 0); CKERR(r);
+ lookup(4, txn, DB_NOTFOUND);
+ insert(4, txn);
+ r=txn->commit(txn, 0); CKERR(r);
+ lookup(4, 0, 0);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(5, txn);
+ r=env->txn_begin(env, txn, &txn2, 0); CKERR(r);
+ lookup(5, txn2, 0);
+ insert(5, txn2);
+ lookup(5, txn2, 0);
+ r=txn->commit(txn2, 0); CKERR(r);
+ lookup(5, txn, 0);
+ r=env->txn_begin(env, txn, &txn2, 0); CKERR(r);
+ lookup(5, txn2, 0);
+ op_delete(5, txn2);
+ r=txn->commit(txn2, 0); CKERR(r);
+ lookup(5, txn, DB_NOTFOUND);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(6, txn);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ insert(6, txn);
+ r=env->txn_begin(env, txn, &txn2, 0); CKERR(r);
+ op_delete(6, txn2);
+ r=txn->commit(txn2, 0); CKERR(r);
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_nested();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_nodup_set.cc b/storage/tokudb/PerconaFT/src/tests/test_nodup_set.cc
new file mode 100644
index 00000000..0aa94550
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_nodup_set.cc
@@ -0,0 +1,208 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+
+
+static void
+db_put (DB *db, int k, int v) {
+ DB_TXN * const null_txn = 0;
+ DBT key, val;
+ int r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+}
+
+static void
+db_del (DB *db, int k) {
+ DB_TXN * const null_txn = 0;
+ DBT key;
+ int r = db->del(db, null_txn, dbt_init(&key, &k, sizeof k), 0);
+ assert(r == 0);
+}
+
+static void
+expect_db_get (DB *db, int k, int v) {
+ DB_TXN * const null_txn = 0;
+ DBT key, val;
+ int r = db->get(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init_malloc(&val), 0);
+ assert(r == 0);
+ int vv;
+ assert(val.size == sizeof vv);
+ memcpy(&vv, val.data, val.size);
+ assert(vv == v);
+ toku_free(val.data);
+}
+
+static void
+expect_cursor_get (DBC *cursor, int k, int v) {
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_NEXT);
+ assert(r == 0);
+ assert(key.size == sizeof k);
+ int kk;
+ memcpy(&kk, key.data, key.size);
+ assert(val.size == sizeof v);
+ int vv;
+ memcpy(&vv, val.data, val.size);
+ if (kk != k || vv != v) printf("expect key %u got %u - %u %u\n", (uint32_t)htonl(k), (uint32_t)htonl(kk), (uint32_t)htonl(v), (uint32_t)htonl(vv));
+ assert(kk == k);
+ assert(vv == v);
+
+ toku_free(key.data);
+ toku_free(val.data);
+}
+
+static void
+expect_cursor_set (DBC *cursor, int k, int expectr) {
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init(&key, &k, sizeof k), dbt_init_malloc(&val), DB_SET);
+ assert(r == expectr);
+ if (val.data) toku_free(val.data);
+}
+
+static void
+expect_cursor_get_current (DBC *cursor, int k, int v) {
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_CURRENT);
+ assert(r == 0);
+ int kk, vv;
+ assert(key.size == sizeof kk); memcpy(&kk, key.data, key.size); assert(kk == k);
+ assert(val.size == sizeof vv); memcpy(&vv, val.data, val.size); assert(vv == v);
+ toku_free(key.data); toku_free(val.data);
+}
+
+
+/* insert, close, delete, insert, search */
+static void
+test_icdi_search (int n, int dup_mode) {
+ if (verbose) printf("test_icdi_search:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test_icdi_search.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_flags(db, dup_mode); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ int v = htonl(i);
+ db_put(db, k, v);
+
+ expect_db_get(db, k, v);
+ }
+
+ /* reopen the database to force nonleaf buffering */
+ r = db->close(db, 0); assert(r == 0);
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_flags(db, dup_mode); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666); assert(r == 0);
+
+ for (i=0; i<n; i++)
+ db_del(db, htonl(i));
+
+ {
+ DBC *cursor;
+ r = db->cursor(db, 0, &cursor, 0); assert(r == 0);
+ expect_cursor_set(cursor, 0, DB_NOTFOUND);
+ r = cursor->c_close(cursor); assert(r == 0);
+ }
+
+ for (i=0; i<n; i++) {
+ int k = htonl(i);
+ int v = htonl(n+i);
+ db_put(db, k, v);
+
+ DBC *cursor;
+ r = db->cursor(db, 0, &cursor, 0); assert(r == 0);
+ expect_cursor_set(cursor, k, 0);
+ expect_cursor_get_current(cursor, k, v);
+ r = cursor->c_close(cursor); assert(r == 0);
+ }
+
+ DBC *cursor;
+ r = db->cursor(db, null_txn, &cursor, 0); assert(r == 0);
+
+ for (i=0; i<n; i++) {
+ expect_cursor_get(cursor, htonl(i), htonl(n+i));
+ }
+
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ int i;
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ for (i=1; i<65537; i *= 2) {
+ test_icdi_search(i, 0);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_query.cc b/storage/tokudb/PerconaFT/src/tests/test_query.cc
new file mode 100644
index 00000000..fa835b41
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_query.cc
@@ -0,0 +1,433 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/**
+ * Test that various queries behave correctly
+ *
+ * Zardosht says:
+ *
+ * write a test that inserts a bunch of elements into the tree,
+ * and then verify that the following types of queries work:
+ * - db->get
+ * - next
+ * - prev
+ * - set_range
+ * - set_range_reverse
+ * - first
+ * - last
+ * - current
+ *
+ * do it on a table with:
+ * - just a leaf node
+ * - has internal nodes (make node size 4K and bn size 1K)
+ * - big cachetable such that everything fits
+ * - small cachetable such that not a lot fits
+ *
+ * make sure APIs are the callback APIs (getf_XXX)
+ * make sure your callbacks all return TOKUDB_CURSOR_CONTINUE,
+ * so we ensure that returning TOKUDB_CURSOR_CONTINUE does not
+ * mess anything up.
+ */
+
+#include "test.h"
+
+enum cursor_type {
+ FIRST,
+ LAST,
+ NEXT,
+ PREV,
+ CURRENT,
+ SET,
+ SET_RANGE,
+ SET_RANGE_REVERSE
+};
+
+/**
+ * Calculate or verify that a value for a given key is correct
+ * Returns 0 if the value is correct, nonzero otherwise.
+ */
+static void get_value_by_key(DBT * key, DBT * value)
+{
+ // keys/values are always stored in the DBT in net order
+ int * CAST_FROM_VOIDP(k, key->data);
+ int v = toku_ntohl(*k) * 2 + 1;
+ memcpy(value->data, &v, sizeof(int));
+}
+
+static void verify_value_by_key(DBT * key, DBT * value)
+{
+ assert(key->size == sizeof(int));
+ assert(value->size == sizeof(int));
+
+ int expected;
+ DBT expected_dbt;
+ expected_dbt.data = &expected;
+ expected_dbt.size = sizeof(int);
+ get_value_by_key(key, &expected_dbt);
+
+ int * CAST_FROM_VOIDP(v, value->data);
+ assert(*v == expected);
+}
+
+/**
+ * Callback for cursors, can be either traversing
+ * forward, backward, or not at all.
+ */
+struct cursor_cb_info {
+ int last_key_seen;
+ enum cursor_type type;
+};
+static int cursor_cb(DBT const * key,
+ DBT const * value, void * extra)
+{
+ struct cursor_cb_info * CAST_FROM_VOIDP(info, extra);
+ int * CAST_FROM_VOIDP(kbuf, key->data);
+ int k = ntohl(*kbuf);
+
+ switch (info->type) {
+ // point queries, just verify the pair
+ // is correct.
+ case SET:
+ case SET_RANGE:
+ case SET_RANGE_REVERSE:
+ case FIRST:
+ case LAST:
+ case CURRENT:
+ verify_value_by_key((DBT *) key, (DBT *) value);
+ break;
+ case NEXT:
+ // verify the last key we saw was the previous
+ verify_value_by_key((DBT *) key, (DBT *) value);
+ if (k < 0) {
+ assert(k == info->last_key_seen - 1);
+ }
+ break;
+ case PREV:
+ // verify the last key we saw was the next
+ verify_value_by_key((DBT *) key, (DBT *) value);
+ if (k < info->last_key_seen) {
+ assert(k == info->last_key_seen - 1);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ info->last_key_seen = k;
+ return TOKUDB_CURSOR_CONTINUE;
+}
+
+/**
+ * Fill a FT with the the given number of rows.
+ */
+static void fill_db(DB_ENV * env, DB * db, int num_rows)
+{
+ int r;
+ DB_TXN * txn;
+ DBT key, value;
+
+ printf("filling db\n");
+
+ int i, j;
+ const int ins_per_txn = 1000;
+ assert(num_rows % ins_per_txn == 0);
+ for (i = 0; i < num_rows; i+= ins_per_txn) {
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ for (j = 0; j < ins_per_txn && (i + j) < num_rows; j++) {
+ int v, k = toku_htonl(i + j);
+ dbt_init(&key, &k, sizeof(int));
+ dbt_init(&value, &v, sizeof(int));
+ get_value_by_key(&key, &value);
+ r = db->put(db, txn, &key, &value, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+}
+
+static void init_env(DB_ENV ** env, size_t ct_size)
+{
+ int r;
+ const int envflags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ printf("initializing environment\n");
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
+
+ r = db_env_create(env, 0); { int chk_r = r; CKERR(chk_r); }
+ assert(ct_size < 1024 * 1024 * 1024L);
+ r = (*env)->set_cachesize(*env, 0, ct_size, 1); { int chk_r = r; CKERR(chk_r); }
+ r = (*env)->open(*env, TOKU_TEST_FILENAME, envflags, 0755); { int chk_r = r; CKERR(chk_r); }
+}
+
+static void init_db(DB_ENV * env, DB ** db)
+{
+ int r;
+ const int node_size = 4096;
+ const int bn_size = 1024;
+
+ printf("initializing db\n");
+
+ DB_TXN * txn;
+ r = db_create(db, env, 0); { int chk_r = r; CKERR(chk_r); }
+ r = (*db)->set_readpagesize(*db, bn_size); { int chk_r = r; CKERR(chk_r); }
+ r = (*db)->set_pagesize(*db, node_size); { int chk_r = r; CKERR(chk_r); }
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = (*db)->open(*db, txn, "db", NULL, DB_BTREE, DB_CREATE, 0644); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+}
+
+static void cleanup_env_and_db(DB_ENV * env, DB * db)
+{
+ int r;
+
+ printf("cleaning up environment and db\n");
+ r = db->close(db, 0); { int chk_r = r; CKERR(chk_r); }
+ r = env->close(env, 0); { int chk_r = r; CKERR(chk_r); }
+}
+
+static void do_test(size_t ct_size, int num_keys)
+{
+ int i, r;
+ DB * db;
+ DB_ENV * env;
+
+ printf("doing tests for ct_size %lu, num_keys %d\n",
+ ct_size, num_keys);
+
+ // initialize everything and insert data
+ init_env(&env, ct_size);
+ assert(env != NULL);
+ init_db(env, &db);
+ assert(db != NULL);
+ fill_db(env, db, num_keys);
+
+ const int last_key = num_keys - 1;
+
+ // test c_getf_first
+ printf("testing c getf first\n");
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ struct cursor_cb_info info;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ info.last_key_seen = -1;
+ info.type = FIRST;
+ r = dbc->c_getf_first(dbc, 0, cursor_cb, &info); { int chk_r = r; CKERR(chk_r); }
+ assert(info.last_key_seen == 0);
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ // test c_getf_last
+ printf("testing c getf last\n");
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ struct cursor_cb_info info;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ info.last_key_seen = -1;
+ info.type = LAST;
+ r = dbc->c_getf_last(dbc, 0, cursor_cb, &info); { int chk_r = r; CKERR(chk_r); }
+ assert(info.last_key_seen == last_key);
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ // test c_getf_next
+ printf("testing c getf next\n");
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ struct cursor_cb_info info;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ info.last_key_seen = -1;
+ //info.type = FIRST;
+ //r = dbc->c_getf_first(dbc, 0, cursor_cb, &info); { int chk_r =
+ //r; CKERR(chk_r); }
+ //assert(info.last_key_seen == 0);
+ info.type = NEXT;
+ while ((r = dbc->c_getf_next(dbc, 0, cursor_cb, &info)) == 0);
+ assert(r == DB_NOTFOUND);
+ if (info.last_key_seen != last_key) {
+ printf("last keen seen %d, wanted %d\n",
+ info.last_key_seen, last_key);
+ }
+ assert(info.last_key_seen == last_key);
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ // test c_getf_prev
+ printf("testing c getf prev\n");
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ struct cursor_cb_info info;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ info.last_key_seen = -1;
+ //info.type = LAST;
+ //r = dbc->c_getf_last(dbc, 0, cursor_cb, &info); { int chk_r = r;
+ //CKERR(chk_r); }
+ //assert(info.last_key_seen == last_key);
+ info.type = PREV;
+ while ((r = dbc->c_getf_prev(dbc, 0, cursor_cb, &info)) == 0);
+ assert(r == DB_NOTFOUND);
+ assert(info.last_key_seen == 0);
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ printf("testing db->get, c getf set, current\n");
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ struct cursor_cb_info info;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ for (i = 0; i < 1000; i++) {
+ DBT key;
+ int k = random() % num_keys;
+ int nk = toku_htonl(k);
+ dbt_init(&key, &nk, sizeof(int));
+
+ // test c_getf_set
+ info.last_key_seen = -1;
+ info.type = SET;
+ r = dbc->c_getf_set(dbc, 0, &key, cursor_cb, &info); { int chk_r = r; CKERR(chk_r); }
+ assert(info.last_key_seen == k);
+
+ // test c_getf_current
+ info.last_key_seen = -1;
+ info.type = CURRENT;
+ r = dbc->c_getf_current(dbc, 0, cursor_cb, &info); { int chk_r = r; CKERR(chk_r); }
+ assert(info.last_key_seen == k);
+
+ // test db->get (point query)
+ DBT value;
+ memset(&value, 0, sizeof(DBT));
+ r = db->get(db, txn, &key, &value, 0); { int chk_r = r; CKERR(chk_r); }
+ verify_value_by_key(&key, &value);
+ }
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ // delete some elements over a variable stride,
+ // this will let us test range/reverse
+ const int stride = num_keys / 10;
+ printf("deleting some elements in stride %d\n", stride);
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ DBT key;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ for (i = 0; i < num_keys; i += stride) {
+ int k = toku_htonl(i);
+ dbt_init(&key, &k, sizeof(int));
+ r = db->del(db, txn, &key, 0);
+ }
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ // test getf set range and range reverse
+ printf("testing getf set range and range reverse\n");
+ {
+ DBC * dbc;
+ DB_TXN * txn;
+ DBT key;
+ struct cursor_cb_info info;
+ r = env->txn_begin(env, NULL, &txn, 0); { int chk_r = r; CKERR(chk_r); }
+ r = db->cursor(db, txn, &dbc, 0); { int chk_r = r; CKERR(chk_r); }
+ for (i = 0; i < num_keys; i += stride) {
+ int k = toku_htonl(i);
+ dbt_init(&key, &k, sizeof(int));
+
+ // we should have only actually seen the next
+ // key after i if i was not the last key,
+ // otherwise there's nothing after that key
+ info.last_key_seen = -1;
+ info.type = SET_RANGE;
+ r = dbc->c_getf_set_range(dbc, 0, &key, cursor_cb, &info);
+ if (i == last_key) {
+ assert(r == DB_NOTFOUND);
+ } else {
+ assert(info.last_key_seen == i + 1);
+ }
+
+ // we should have only actually seen the prev
+ // key if i was not the first key, otherwise
+ // there's nothing before that key.
+ info.last_key_seen = -1;
+ info.type = SET_RANGE_REVERSE;
+ r = dbc->c_getf_set_range_reverse(dbc, 0, &key, cursor_cb, &info);
+ if (i == 0) {
+ assert(r == DB_NOTFOUND);
+ } else {
+ assert(info.last_key_seen == i - 1);
+ { int chk_r = r; CKERR(chk_r); }
+ }
+ }
+ r = dbc->c_close(dbc); { int chk_r = r; CKERR(chk_r); }
+ r = txn->commit(txn, 0); { int chk_r = r; CKERR(chk_r); }
+ }
+
+ cleanup_env_and_db(env, db);
+}
+
+int test_main(int argc, char * const argv[])
+{
+ default_parse_args(argc, argv);
+
+ // just a leaf, fits in cachetable
+ do_test(1L*1024*1024, 1000);
+ // with internal nodes, fits in cachetable
+ do_test(4L*1024*1024, 100000);
+ // with internal nodes, does not fit in cachetable
+ do_test(1L*1024*1024, 1000000);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_rand_insert.cc b/storage/tokudb/PerconaFT/src/tests/test_rand_insert.cc
new file mode 100644
index 00000000..a9b77f8e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_rand_insert.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_rand_insert (int n, int dup_mode) {
+ if (verbose) printf("test_rand_insert:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.rand.insert.ft_handle";
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, dup_mode);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ unsigned int keys[n];
+ int i;
+ for (i=0; i<n; i++)
+ keys[i] = htonl(random());
+
+ /* insert n/2 <random(), i> pairs */
+ for (i=0; i<n/2; i++) {
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ /* reopen the database to force nonleaf buffering */
+ r = db->close(db, 0);
+ assert(r == 0);
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, dup_mode);
+ assert(r == 0);
+ r = db->set_pagesize(db, 4096);
+ assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666);
+ assert(r == 0);
+
+ /* insert n/2 <random(), i> pairs */
+ for (i=n/2; i<n; i++) {
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ r = db->get(db, 0, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init_malloc(&val), 0);
+ assert(r == 0);
+ int vv;
+ assert(val.size == sizeof vv);
+ memcpy(&vv, val.data, val.size);
+ if (vv != i) assert(keys[vv] == keys[i]);
+ else assert(vv == i);
+ toku_free(val.data);
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int i;
+ for (i = 1; i <= 2048; i *= 2) {
+ test_rand_insert(i, 0);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_read_txn_invalid_ops.cc b/storage/tokudb/PerconaFT/src/tests/test_read_txn_invalid_ops.cc
new file mode 100644
index 00000000..d0f7d3aa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_read_txn_invalid_ops.cc
@@ -0,0 +1,196 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *UU(extra),
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *UU(set_extra))
+{
+ abort();
+ assert(set_val != NULL);
+ return 0;
+}
+
+static int generate_row_for_put(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *UU(dest_key_arrays),
+ DBT_ARRAY *UU(dest_val_arrays),
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ abort();
+ return 0;
+}
+
+static int generate_row_for_del(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *UU(dest_key_arrays),
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ abort();
+ return 0;
+}
+
+static void test_invalid_ops(uint32_t iso_flags) {
+ int r;
+ DB * db;
+ DB_ENV * env;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
+
+ // set things up
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->set_generate_row_callback_for_put(env,generate_row_for_put);
+ CKERR(r);
+ r = env->set_generate_row_callback_for_del(env,generate_row_for_del);
+ CKERR(r);
+ env->set_update(env, update_fun);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+
+ DB_TXN* txn = NULL;
+ r = env->txn_begin(env, 0, &txn, iso_flags | DB_TXN_READ_ONLY);
+ CKERR(r);
+
+ r = db->open(db, txn, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
+ CKERR2(r, EINVAL);
+ r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
+ CKERR(r);
+
+ int k = 1;
+ int v = 10;
+ DBT key, val;
+ dbt_init(&key, &k, sizeof k);
+ dbt_init(&val, &v, sizeof v);
+
+ uint32_t db_flags = 0;
+ uint32_t indexer_flags = 0;
+ DB_INDEXER* indexer;
+ r = env->create_indexer(
+ env,
+ txn,
+ &indexer,
+ db,
+ 1,
+ &db,
+ &db_flags,
+ indexer_flags
+ );
+ CKERR2(r, EINVAL);
+
+
+ // test invalid operations of ydb_db.cc,
+ // db->open tested above
+ DB_LOADER* loader;
+ uint32_t put_flags = 0;
+ uint32_t dbt_flags = 0;
+ r = env->create_loader(env, txn, &loader, NULL, 1, &db, &put_flags, &dbt_flags, 0);
+ CKERR2(r, EINVAL);
+
+ r = db->change_descriptor(db, txn, &key, 0);
+ CKERR2(r, EINVAL);
+
+ //
+ // test invalid operations return EINVAL from ydb_write.cc
+ //
+ r = db->put(db, txn, &key, &val,0);
+ CKERR2(r, EINVAL);
+ r = db->del(db, txn, &key, DB_DELETE_ANY);
+ CKERR2(r, EINVAL);
+ r = db->update(db, txn, &key, &val, 0);
+ CKERR2(r, EINVAL);
+ r = db->update_broadcast(db, txn, &val, 0);
+ CKERR2(r, EINVAL);
+
+ r = env_put_multiple_test_no_array(env, NULL, txn, &key, &val, 1, &db, &key, &val, 0);
+ CKERR2(r, EINVAL);
+ r = env_del_multiple_test_no_array(env, NULL, txn, &key, &val, 1, &db, &key, 0);
+ CKERR2(r, EINVAL);
+ uint32_t flags;
+ r = env_update_multiple_test_no_array(
+ env, NULL, txn,
+ &key, &val,
+ &key, &val,
+ 1, &db, &flags,
+ 1, &key,
+ 1, &val
+ );
+ CKERR2(r, EINVAL);
+
+ r = db->close(db, 0);
+ CKERR(r);
+
+ // test invalid operations of ydb.cc, dbrename and dbremove
+ r = env->dbremove(env, txn, "foo.db", NULL, 0);
+ CKERR2(r, EINVAL);
+ // test invalid operations of ydb.cc, dbrename and dbremove
+ r = env->dbrename(env, txn, "foo.db", NULL, "bar.db", 0);
+ CKERR2(r, EINVAL);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ // clean things up
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+
+int test_main(int argc, char * const argv[]) {
+ (void) argc;
+ (void) argv;
+ test_invalid_ops(0);
+ test_invalid_ops(DB_TXN_SNAPSHOT);
+ test_invalid_ops(DB_READ_COMMITTED);
+ test_invalid_ops(DB_READ_UNCOMMITTED);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_redirect_func.cc b/storage/tokudb/PerconaFT/src/tests/test_redirect_func.cc
new file mode 100644
index 00000000..9e00bb13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_redirect_func.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ set_val(extra, set_extra);
+ return 0;
+}
+
+
+static int generate_row_for_del(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 0);
+ assert(false);
+ return 0;
+}
+
+static int generate_row_for_put(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ DBT_ARRAY *dest_val_arrays,
+ const DBT *src_key,
+ const DBT *src_val
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ dest_key->size=src_key->size;
+ dest_key->data=src_key->data;
+ dest_key->flags = 0;
+ dest_val->size=src_val->size;
+ dest_val->data=src_val->data;
+ dest_val->flags = 0;
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->set_generate_row_callback_for_put(env,generate_row_for_put); CKERR(chk_r); }
+ { int chk_r = env->set_generate_row_callback_for_del(env,generate_row_for_del); CKERR(chk_r); }
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+ DB_LOADER* loader = NULL;
+ DBT key, val;
+ uint32_t mult_db_flags = 0;
+ uint32_t mult_dbt_flags = DB_DBT_REALLOC;
+ uint8_t key_data = 0;
+ uint8_t val_data = 0;
+
+
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+
+
+ dbt_init(&key,&key_data,sizeof(uint8_t));
+ dbt_init(&val,&val_data,sizeof(uint8_t));
+
+ val_data = 100;
+
+ //
+ // now create a loader
+ //
+ IN_TXN_COMMIT(env, NULL, txn_loader, 0, {
+ // create DB
+ { int chk_r = env->create_loader(
+ env,
+ txn_loader,
+ &loader,
+ db,
+ 1,
+ &db,
+ &mult_db_flags,
+ &mult_dbt_flags,
+ 0
+ ); CKERR(chk_r); }
+ { int chk_r = loader->put(loader, &key, &val); CKERR(chk_r); }
+ { int chk_r = loader->close(loader); CKERR(chk_r); }
+ });
+
+ val_data = 101;
+ IN_TXN_COMMIT(env, NULL, txn_update, 0, {
+ { int chk_r = db->update(db, txn_update, &key, &val, 0); CKERR(chk_r); }
+ });
+
+ key_data = 11;
+ val_data = 11;
+ IN_TXN_COMMIT(env, NULL, txn_update, 0, {
+ { int chk_r = db->update(db, txn_update, &key, &val, 0); CKERR(chk_r); }
+ });
+
+
+ DBC *cursor = NULL;
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db->cursor(db, txn_3, &cursor, 0); CKERR(chk_r); }
+ { int chk_r = cursor->c_get(cursor, &key, &val, DB_NEXT); CKERR(chk_r); }
+ assert(key.size == sizeof(uint8_t));
+ assert(val.size == sizeof(uint8_t));
+ assert(*(uint8_t *)(key.data) == 0);
+ assert(*(uint8_t *)(val.data) == 101);
+ { int chk_r = cursor->c_get(cursor, &key, &val, DB_NEXT); CKERR(chk_r); }
+ assert(key.size == sizeof(uint8_t));
+ assert(val.size == sizeof(uint8_t));
+ assert(*(uint8_t *)(key.data) == 11);
+ assert(*(uint8_t *)(val.data) == 11);
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_restrict.cc b/storage/tokudb/PerconaFT/src/tests/test_restrict.cc
new file mode 100644
index 00000000..9433cf0f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_restrict.cc
@@ -0,0 +1,305 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+typedef struct {
+ int64_t left;
+ int64_t right;
+ int64_t last;
+ int found;
+ int direction;
+ int error_to_expect;
+} cont_extra;
+
+static int
+getf_continue(DBT const* key, DBT const* val, void* context) {
+ assert(key); // prob wrong?
+ assert(val); // prob wrong? make ifs if this fails
+ cont_extra *CAST_FROM_VOIDP(c, context);
+
+ assert(c->found >= 0);
+ assert(c->found < 3);
+ c->found++;
+ assert(key->size == 8);
+ assert(val->size == 8);
+ int64_t k = *(int64_t*)key->data;
+ int64_t v = *(int64_t*)val->data;
+ assert(k==v);
+ assert(k==c->last+c->direction);
+ c->last = k;
+ if (c->error_to_expect) {
+ assert(c->left <= k);
+ assert(k <= c->right);
+ }
+ if (c->found < 3) {
+ return TOKUDB_CURSOR_CONTINUE;
+ } else {
+ return 0;
+ }
+}
+
+static void
+test_restrict (int64_t n, int offset, int error_to_expect) {
+ assert(n > 30);
+ DB_TXN * const null_txn = 0;
+ int r;
+
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r=env->set_default_bt_compare(env, int64_dbt_cmp); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ r = db->set_flags(db, 0);
+ assert(r == 0);
+ r = db->open(db, null_txn, "restrict.db", NULL, DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+
+ int64_t keys[n];
+ int64_t i;
+ for (i=0; i<n; i++) {
+ keys[i] = i;
+ }
+
+ DBT key, val;
+ for (i=0; i<n; i++) {
+ r = db->put(db, null_txn, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init(&val, &i, sizeof i), 0);
+ assert(r == 0);
+ }
+
+ DBC* cursor;
+
+ r = db->cursor(db, NULL, &cursor, 0);
+ CKERR(r);
+
+ DBT dbt_left, dbt_right;
+ int64_t int_left, int_right;
+ int_left = n / 3 + offset;
+ int_right = 2 * n / 3 + offset;
+
+ dbt_init(&dbt_left, &keys[int_left], sizeof keys[int_left]);
+ dbt_init(&dbt_right, &keys[int_right], sizeof keys[int_right]);
+
+ r = cursor->c_set_bounds(
+ cursor,
+ &dbt_left,
+ &dbt_right,
+ true,
+ error_to_expect);
+ CKERR(r);
+
+
+ for (i=0; i<n; i++) {
+ r = cursor->c_get(cursor, dbt_init(&key, &keys[i], sizeof keys[i]), dbt_init(&val, NULL, 0), DB_SET);
+ if (i < int_left || i > int_right) {
+ CKERR2(r, error_to_expect);
+ } else {
+ CKERR(r);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == i);
+ }
+ }
+ // Forwards
+
+ r = cursor->c_get(cursor, dbt_init(&key, &keys[int_left], sizeof keys[int_left]), dbt_init(&val, NULL, 0), DB_SET);
+ CKERR(r);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == int_left);
+
+ for (i=int_left+1; i < n; i++) {
+ r = cursor->c_get(cursor, dbt_init(&key, NULL, 0), dbt_init(&val, NULL, 0), DB_NEXT);
+ if (i >= int_left && i <= int_right) {
+ CKERR(r);
+ assert(key.size == 8);
+ assert(*(int64_t*)key.data == i);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == i);
+ } else {
+ CKERR2(r, error_to_expect);
+ break;
+ }
+ }
+
+ r = cursor->c_get(cursor, dbt_init(&key, &keys[int_right], sizeof keys[int_right]), dbt_init(&val, NULL, 0), DB_SET);
+ CKERR(r);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == int_right);
+
+ for (i=int_right-1; i >= 0; i--) {
+ r = cursor->c_get(cursor, dbt_init(&key, NULL, 0), dbt_init(&val, NULL, 0), DB_PREV);
+ if (i >= int_left && i <= int_right) {
+ CKERR(r);
+ assert(key.size == 8);
+ assert(*(int64_t*)key.data == i);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == i);
+ } else {
+ CKERR2(r, error_to_expect);
+ break;
+ }
+ }
+
+ // Forwards
+
+ r = cursor->c_get(cursor, dbt_init(&key, &keys[int_left], sizeof keys[int_left]), dbt_init(&val, NULL, 0), DB_SET);
+ CKERR(r);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == int_left);
+
+ cont_extra c;
+ c.left = int_left;
+ c.right = int_right;
+ c.error_to_expect = error_to_expect;
+ c.direction = 1;
+ c.last = int_left;
+ for (i=int_left+1; i < n; i+=3) {
+ c.found = 0;
+
+ r = cursor->c_getf_next(cursor, 0, getf_continue, &c);
+ if (i >= int_left && i <= int_right) {
+ CKERR(r);
+ if (!error_to_expect) {
+ assert(c.found == 3);
+ assert(c.last == i+2);
+ } else if (i+2 >= int_left && i+2 <= int_right) {
+ assert(c.found == 3);
+ assert(c.last == i+2);
+ } else if (i+1 >= int_left && i+1 <= int_right) {
+ assert(c.found == 2);
+ assert(c.last == i+1);
+ r = cursor->c_get(cursor, dbt_init(&key, NULL, 0), dbt_init(&val, NULL, 0), DB_CURRENT);
+ CKERR2(r, error_to_expect);
+ break;
+ } else {
+ assert(c.found == 1);
+ assert(c.last == i);
+ r = cursor->c_get(cursor, dbt_init(&key, NULL, 0), dbt_init(&val, NULL, 0), DB_CURRENT);
+ CKERR2(r, error_to_expect);
+ break;
+ }
+ } else {
+ if (error_to_expect == 0) {
+ assert(c.found == 3);
+ assert(c.last == i+2);
+ } else {
+ assert(c.found == 0);
+ assert(c.last == i-1);
+ }
+ CKERR2(r, error_to_expect);
+ break;
+ }
+ }
+
+ r = cursor->c_get(cursor, dbt_init(&key, &keys[int_right], sizeof keys[int_right]), dbt_init(&val, NULL, 0), DB_SET);
+ CKERR(r);
+ assert(val.size == 8);
+ assert(*(int64_t*)val.data == int_right);
+
+ c.direction = -1;
+ c.last = int_right;
+ for (i=int_right-1; i >= 0; i -= 3) {
+ c.found = 0;
+
+ r = cursor->c_getf_prev(cursor, 0, getf_continue, &c);
+ if (i >= int_left && i <= int_right) {
+ CKERR(r);
+ if (!error_to_expect) {
+ assert(c.found == 3);
+ assert(c.last == i-2);
+ } else if (i-2 >= int_left && i-2 <= int_right) {
+ assert(c.found == 3);
+ assert(c.last == i-2);
+ } else if (i-1 >= int_left && i-1 <= int_right) {
+ assert(c.found == 2);
+ assert(c.last == i-1);
+ r = cursor->c_get(cursor, dbt_init(&key, NULL, 0), dbt_init(&val, NULL, 0), DB_CURRENT);
+ CKERR2(r, error_to_expect);
+ break;
+ } else {
+ assert(c.found == 1);
+ assert(c.last == i);
+ r = cursor->c_get(cursor, dbt_init(&key, NULL, 0), dbt_init(&val, NULL, 0), DB_CURRENT);
+ CKERR2(r, error_to_expect);
+ break;
+ }
+ } else {
+ if (error_to_expect == 0) {
+ assert(c.found == 3);
+ assert(c.last == i-2);
+ } else {
+ assert(c.found == 0);
+ assert(c.last == i+1);
+ }
+ CKERR2(r, error_to_expect);
+ break;
+ }
+ }
+
+ r = cursor->c_close(cursor); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ for (int i = 3*64; i < 3*1024; i *= 2) {
+ for (int offset = -2; offset <= 2; offset++) {
+ test_restrict(i, offset, DB_NOTFOUND);
+ test_restrict(i, offset, TOKUDB_OUT_OF_RANGE);
+ test_restrict(i, offset, 0);
+ }
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_reverse_compare_fun.cc b/storage/tokudb/PerconaFT/src/tests/test_reverse_compare_fun.cc
new file mode 100644
index 00000000..5fc0ab47
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_reverse_compare_fun.cc
@@ -0,0 +1,179 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+/* try a reverse compare function to verify that the database always uses the application's
+ compare function */
+
+
+#include <db.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+
+static int
+keycompare (const void *key1, unsigned int key1len, const void *key2, unsigned int key2len) {
+ if (key1len==key2len) {
+ return memcmp(key1,key2,key1len);
+ } else if (key1len<key2len) {
+ int r = memcmp(key1,key2,key1len);
+ if (r<=0) return -1; /* If the keys are the same up to 1's length, then return -1, since key1 is shorter than key2. */
+ else return 1;
+ } else {
+ return -keycompare(key2,key2len,key1,key1len);
+ }
+}
+
+static int
+reverse_compare (DB *db __attribute__((__unused__)), const DBT *a, const DBT*b) {
+ return -keycompare(a->data, a->size, b->data, b->size);
+}
+
+static void
+expect (DBC *cursor, int k, int v) {
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), DB_NEXT);
+ CKERR(r);
+ assert(key.size == sizeof k);
+ int kk;
+ memcpy(&kk, key.data, key.size);
+ assert(val.size == sizeof v);
+ int vv;
+ memcpy(&vv, val.data, val.size);
+ if (kk != k || vv != v) printf("expect key %u got %u - %u %u\n", (uint32_t)htonl(k), (uint32_t)htonl(kk), (uint32_t)htonl(v), (uint32_t)htonl(vv));
+ assert(kk == k);
+ assert(vv == v);
+
+ toku_free(key.data);
+ toku_free(val.data);
+}
+
+static void
+test_reverse_compare (int n) {
+ if (verbose) printf("test_reverse_compare:%d\n", n);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "reverse.compare.db";
+
+ int r;
+ int i;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_default_bt_compare(env, reverse_compare);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_pagesize(db, 4096);
+ CKERR(r);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666);
+ CKERR(r);
+
+ /* insert n unique keys {0, 1, n-1} */
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ int k, v;
+ k = htonl(i);
+ dbt_init(&key, &k, sizeof k);
+ v = htonl(i);
+ dbt_init(&val, &v, sizeof v);
+ r = db->put(db, null_txn, &key, &val, 0);
+ CKERR(r);
+ }
+
+ /* reopen the database to force nonleaf buffering */
+ r = db->close(db, 0);
+ CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->set_pagesize(db, 4096);
+ CKERR(r);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, 0, 0666);
+ CKERR(r);
+
+ /* insert n unique keys {n, n+1, 2*n-1} */
+ for (i=n; i<2*n; i++) {
+ DBT key, val;
+ int k, v;
+ k = htonl(i);
+ dbt_init(&key, &k, sizeof k);
+ v = htonl(i);
+ dbt_init(&val, &v, sizeof v);
+ r = db->put(db, null_txn, &key, &val, 0);
+ CKERR(r);
+ }
+
+ /* verify the sort order with a cursor */
+ DBC *cursor;
+ r = db->cursor(db, null_txn, &cursor, 0);
+ CKERR(r);
+
+ //for (i=0; i<2*n; i++)
+ for (i=2*n-1; i>=0; i--)
+ expect(cursor, htonl(i), htonl(i));
+
+ r = cursor->c_close(cursor);
+ CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int i;
+
+ for (i = 1; i <= (1<<16); i *= 2) {
+ test_reverse_compare(i);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_set_func_malloc.cc b/storage/tokudb/PerconaFT/src/tests/test_set_func_malloc.cc
new file mode 100644
index 00000000..e84437ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_set_func_malloc.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#define DONT_DEPRECATE_MALLOC
+#include "test.h"
+
+/* Test to see if setting malloc works. */
+
+#include <memory.h>
+#include <db.h>
+
+static int malloc_counter=0;
+static int realloc_counter=0;
+static int free_counter=0;
+
+static void *
+bmalloc (size_t s)
+{
+ malloc_counter++;
+ return malloc(s);
+}
+
+static void
+bfree (void*p)
+{
+ free_counter++;
+ free(p);
+}
+
+static void*
+brealloc (void*p, size_t s)
+{
+ realloc_counter++;
+ return realloc(p,s);
+}
+
+static void
+test1 (void)
+{
+ DB_ENV *env=0;
+ int r;
+ r = db_env_create(&env, 0); assert(r==0);
+ r = env->close(env, 0); assert(r==0);
+ assert(malloc_counter==0);
+ assert(free_counter==0);
+ assert(realloc_counter==0);
+
+ db_env_set_func_malloc(bmalloc);
+ r = db_env_create(&env, 0); assert(r==0);
+ r = env->close(env, 0); assert(r==0);
+ assert(malloc_counter>0);
+ assert(free_counter==0);
+ assert(realloc_counter==0);
+
+ malloc_counter = realloc_counter = free_counter = 0;
+
+ db_env_set_func_free(bfree);
+ db_env_set_func_malloc(NULL);
+ r = db_env_create(&env, 0); assert(r==0);
+ r = env->close(env, 0); assert(r==0);
+ assert(malloc_counter==0);
+ assert(free_counter>=0);
+ assert(realloc_counter==0);
+
+ db_env_set_func_malloc(bmalloc);
+ db_env_set_func_realloc(brealloc);
+ db_env_set_func_free(bfree);
+
+ // toku_malloc isn't affected by calling the BDB set_fun_malloc calls.
+ malloc_counter = realloc_counter = free_counter = 0;
+
+ {
+ void *x = toku_malloc(5); assert(x); assert(malloc_counter==1 && free_counter==0 && realloc_counter==0);
+ x = toku_realloc(x, 6); assert(x); assert(malloc_counter==1 && free_counter==0 && realloc_counter==1);
+ toku_free(x); assert(malloc_counter==1 && free_counter==1 && realloc_counter==1);
+ }
+
+ db_env_set_func_malloc(NULL);
+ db_env_set_func_realloc(NULL);
+ db_env_set_func_free(NULL);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__)))
+{
+ test1();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_simple_read_txn.cc b/storage/tokudb/PerconaFT/src/tests/test_simple_read_txn.cc
new file mode 100644
index 00000000..69e08678
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_simple_read_txn.cc
@@ -0,0 +1,97 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+
+static void test_read_txn_creation(DB_ENV* env, uint32_t iso_flags) {
+ int r;
+ DB_TXN* parent_txn = NULL;
+ DB_TXN* child_txn = NULL;
+ r = env->txn_begin(env, 0, &parent_txn, iso_flags);
+ CKERR(r);
+ r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY);
+ CKERR2(r, EINVAL);
+ r = env->txn_begin(env, parent_txn, &child_txn, iso_flags);
+ CKERR(r);
+ r = child_txn->commit(child_txn, 0);
+ CKERR(r);
+ r = parent_txn->commit(parent_txn, 0);
+ CKERR(r);
+
+ r = env->txn_begin(env, 0, &parent_txn, iso_flags | DB_TXN_READ_ONLY);
+ CKERR(r);
+ r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY);
+ CKERR(r);
+ r = child_txn->commit(child_txn, 0);
+ CKERR(r);
+ r = env->txn_begin(env, parent_txn, &child_txn, iso_flags);
+ CKERR(r);
+ r = child_txn->commit(child_txn, 0);
+ CKERR(r);
+ r = parent_txn->commit(parent_txn, 0);
+ CKERR(r);
+
+}
+
+int test_main(int argc, char * const argv[])
+{
+ int r;
+ DB_ENV * env;
+ (void) argc;
+ (void) argv;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
+
+ // set things up
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
+ CKERR(r);
+
+ test_read_txn_creation(env, 0);
+ test_read_txn_creation(env, DB_TXN_SNAPSHOT);
+ test_read_txn_creation(env, DB_READ_COMMITTED);
+ test_read_txn_creation(env, DB_READ_UNCOMMITTED);
+
+ r = env->close(env, 0);
+ CKERR(r);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress0.cc b/storage/tokudb/PerconaFT/src/tests/test_stress0.cc
new file mode 100644
index 00000000..037ffdd3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress0.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a micro stress test that does multithreaded updates on a fixed size table.
+// There is also a thread that scans the table with bulk fetch, ensuring the sum is zero.
+//
+// This test is targeted at stressing the locktree, hence the small table and many update threads.
+//
+
+static int UU() lock_escalation_op(DB_TXN *UU(txn), ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ invariant_null(operation_extra);
+ if (!arg->cli->nolocktree) {
+ toku_env_run_lock_escalation_for_test(arg->env);
+ }
+ return 0;
+}
+
+static int iterate_requests(DB *db, uint64_t txnid,
+ const DBT *left_key, const DBT *right_key,
+ uint64_t blocking_txnid,
+ uint64_t UU(start_time),
+ void *extra) {
+ invariant_null(extra);
+ invariant(db != nullptr);
+ invariant(txnid > 0);
+ invariant(left_key != nullptr);
+ invariant(right_key != nullptr);
+ invariant(blocking_txnid > 0);
+ invariant(txnid != blocking_txnid);
+ if (rand() % 5 == 0) {
+ usleep(100);
+ }
+ return 0;
+}
+
+static int UU() iterate_pending_lock_requests_op(DB_TXN *UU(txn), ARG arg, void *UU(operation_extra), void *UU(stats_extra)) {
+ DB_ENV *env = arg->env;
+ int r = env->iterate_pending_lock_requests(env, iterate_requests, nullptr);
+ invariant_zero(r);
+ return r;
+}
+
+static int iterate_txns(DB_TXN *txn,
+ iterate_row_locks_callback iterate_locks,
+ void *locks_extra, void *extra) {
+ uint64_t txnid = txn->id64(txn);
+ uint64_t client_id; void *client_extra;
+ txn->get_client_id(txn, &client_id, &client_extra);
+ invariant_null(extra);
+ invariant(txnid > 0);
+ invariant(client_id == 0);
+ DB *db;
+ DBT left_key, right_key;
+ while (iterate_locks(&db, &left_key, &right_key, locks_extra) == 0) {
+ invariant_notnull(db);
+ invariant_notnull(left_key.data);
+ invariant(left_key.size > 0);
+ invariant_notnull(right_key.data);
+ invariant(right_key.size > 0);
+ if (rand() % 5 == 0) {
+ usleep(50);
+ }
+ memset(&left_key, 0, sizeof(DBT));
+ memset(&right_key, 0, sizeof(DBT));
+ }
+ return 0;
+}
+
+static int UU() iterate_live_transactions_op(DB_TXN *UU(txn), ARG arg, void *UU(operation_extra), void *UU(stats_extra)) {
+ DB_ENV *env = arg->env;
+ int r = env->iterate_live_transactions(env, iterate_txns, nullptr);
+ invariant_zero(r);
+ return r;
+}
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int non_update_threads = 4;
+ const int num_threads = non_update_threads + cli_args->num_update_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[1];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ myargs[1].sleep_ms = 15L * 1000;
+ myargs[1].operation_extra = nullptr;
+ myargs[1].operation = lock_escalation_op;
+
+ myargs[2].sleep_ms = 1L * 1000;
+ myargs[2].operation_extra = nullptr;
+ myargs[2].operation = iterate_pending_lock_requests_op;
+
+ myargs[3].sleep_ms = 1L * 1000;
+ myargs[3].operation_extra = nullptr;
+ myargs[3].operation = iterate_live_transactions_op;
+
+ // make the threads that update the db
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ for (int i = non_update_threads; i < num_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ myargs[i].do_prepare = false;
+ // the first three threads will prelock ranges before
+ // doing sequential updates. the rest of the threads
+ // will take point write locks on update as usual.
+ // this ensures both ranges and points are stressed.
+ myargs[i].prelock_updates = i < 5 ? true : false;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ // default args for first, then parse any overrides
+ args.num_update_threads = 8;
+ args.num_elements = 512;
+ args.txn_size = 16;
+ parse_stress_test_args(argc, argv, &args);
+
+ // we expect to get lock_notgranted op failures, and we
+ // don't want the overhead of fsync on small txns
+ args.crash_on_operation_failure = false;
+ args.env_args.sync_period = 100; // speed up the test by not fsyncing very often
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress1.cc b/storage/tokudb/PerconaFT/src/tests/test_stress1.cc
new file mode 100644
index 00000000..a9bd860e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress1.cc
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - Grow the dictionary with insertions
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
+// work correctly by verifying that table scans sum their vals to 0.
+//
+// This does NOT test:
+// - splits and merges
+// - multiple DBs
+//
+// Variables that are interesting to tweak and run:
+// - small cachetable
+// - number of elements
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - some threads constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - some threads doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+ myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op;
+ myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 4; i < 4 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ myargs[i].do_prepare = true;
+ }
+
+ // make the guy that does point queries
+ for (int i = 4 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ myargs[i].do_prepare = true;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress2.cc b/storage/tokudb/PerconaFT/src/tests/test_stress2.cc
new file mode 100644
index 00000000..5ea57625
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress2.cc
@@ -0,0 +1,140 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - update existing values in the dictionary with db->put(DB_YESOVERWRITE)
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success.
+//
+// This test differs from stress1 in that it grows the database through
+// update operations.
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+ myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op;
+ myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 4; i < 4 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].bounded_element_range = false;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that does point queries
+ for (int i = 4 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].bounded_element_range = false;
+ myargs[i].operation = ptquery_op_no_check;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress3.cc b/storage/tokudb/PerconaFT/src/tests/test_stress3.cc
new file mode 100644
index 00000000..b8ca68ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress3.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - update existing values in the dictionary with db->put(DB_YESOVERWRITE)
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success.
+//
+// This test differs from stress2 in that it sends periodic update broadcasts.
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 5 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+ myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op;
+ myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 4; i < 4 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].lock_type = STRESS_LOCK_SHARED;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that sends update broadcasts
+ myargs[4 + cli_args->num_update_threads].lock_type = STRESS_LOCK_EXCL;
+ myargs[4 + cli_args->num_update_threads].sleep_ms = cli_args->update_broadcast_period_ms;
+ myargs[4 + cli_args->num_update_threads].operation = update_broadcast_op;
+
+ // make the guys that do point queries
+ for (int i = 5 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress4.cc b/storage/tokudb/PerconaFT/src/tests/test_stress4.cc
new file mode 100644
index 00000000..1405bfc1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress4.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - update existing values in the dictionary with db->put(DB_YESOVERWRITE)
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success.
+//
+// This test differs from stress2 in that it verifies the last value on an update.
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ int n = cli_args->num_elements;
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op_no_check;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op_no_check;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op_no_check;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op_no_check;
+
+ // make the guy that updates the db
+ invariant(cli_args->num_update_threads == 1);
+ int *XCALLOC_N(n, update_history_buffer);
+ struct update_op_args uoe = get_update_op_args(cli_args, update_history_buffer);
+ myargs[4].operation = update_with_history_op;
+ myargs[4].operation_extra = &uoe;
+
+ // make the guys that do point queries
+ for (int i = 5; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+
+ toku_free(update_history_buffer);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ args.num_update_threads = 1; // if we had more than 1 update thread, we would need locking for the update_history_buffer.
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress5.cc b/storage/tokudb/PerconaFT/src/tests/test_stress5.cc
new file mode 100644
index 00000000..89365d19
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress5.cc
@@ -0,0 +1,113 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // do insertions and queries with a loader lying around doing stuff
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 5 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[2];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+ myargs[0].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+
+ // make the guy that updates the db
+ struct loader_op_extra loe;
+ loe.soe = soe[0];
+ loe.num_dbs = 3;
+ myargs[2].operation_extra = &loe;
+ myargs[2].operation = loader_op;
+ myargs[3].operation = keyrange_op;
+ myargs[4].operation = get_key_after_bytes_op;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 5; i < 5 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that does point queries
+ for (int i = 5 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ // let's make default checkpointing period really slow
+ args.env_args.checkpointing_period = 1;
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress6.cc b/storage/tokudb/PerconaFT/src/tests/test_stress6.cc
new file mode 100644
index 00000000..3082bdab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress6.cc
@@ -0,0 +1,171 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+//
+// This test is a form of stress that does operations on a single dictionary:
+// We create a dictionary bigger than the cachetable (around 4x greater).
+// Then, we spawn a bunch of pthreads that do the following:
+// - scan dictionary forward with bulk fetch
+// - scan dictionary forward slowly
+// - scan dictionary backward with bulk fetch
+// - scan dictionary backward slowly
+// - update existing values in the dictionary with db->put(DB_YESOVERWRITE)
+// - do random point queries into the dictionary
+// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
+// If the test runs to completion without crashing, we consider it a success.
+//
+// This test differs from stress1 in that it grows the database through
+// update operations.
+//
+
+static int remove_and_recreate_me(DB_TXN *UU(txn), ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ int r;
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+ r = (db)->close(db, 0); CKERR(r);
+
+ char name[30];
+ ZERO_ARRAY(name);
+ get_ith_table_name(name, sizeof(name), db_index);
+
+ r = arg->env->dbremove(arg->env, null_txn, name, nullptr, 0);
+ CKERR(r);
+
+ r = db_create(&(arg->dbp[db_index]), arg->env, 0);
+ assert(r == 0);
+ // TODO: Need to call before_db_open_hook() and after_db_open_hook()
+ r = arg->dbp[db_index]->open(arg->dbp[db_index], null_txn, name, nullptr, DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+ return 0;
+}
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // the threads that we want:
+ // - one thread constantly updating random values
+ // - one thread doing table scan with bulk fetch
+ // - one thread doing table scan without bulk fetch
+ // - one thread doing random point queries
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 5 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+
+ struct scan_op_extra soe[4];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].lock_type = STRESS_LOCK_SHARED;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].lock_type = STRESS_LOCK_SHARED;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+
+ // make the backward fast scanner
+ soe[2].fast = true;
+ soe[2].fwd = false;
+ soe[2].prefetch = false;
+ myargs[2].lock_type = STRESS_LOCK_SHARED;
+ myargs[2].operation_extra = &soe[2];
+ myargs[2].operation = scan_op;
+
+ // make the backward slow scanner
+ soe[3].fast = false;
+ soe[3].fwd = false;
+ soe[3].prefetch = false;
+ myargs[3].lock_type = STRESS_LOCK_SHARED;
+ myargs[3].operation_extra = &soe[3];
+ myargs[3].operation = scan_op;
+
+ // make the guy that removes and recreates the db
+ myargs[4].lock_type = STRESS_LOCK_EXCL;
+ myargs[4].sleep_ms = 2000; // maybe make this a runtime param at some point
+ myargs[4].operation = remove_and_recreate_me;
+
+ // make the guy that updates the db
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ for (int i = 5; i < 5 + cli_args->num_update_threads; ++i) {
+ myargs[i].bounded_element_range = false;
+ myargs[i].lock_type = STRESS_LOCK_SHARED;
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that does point queries
+ for (int i = 5 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].lock_type = STRESS_LOCK_SHARED;
+ myargs[i].bounded_element_range = false;
+ myargs[i].operation = ptquery_op_no_check;
+ }
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress7.cc b/storage/tokudb/PerconaFT/src/tests/test_stress7.cc
new file mode 100644
index 00000000..f4267688
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress7.cc
@@ -0,0 +1,109 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // do insertions and queries with a loader lying around doing stuff
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 5 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ struct scan_op_extra soe[2];
+
+ // make the forward fast scanner
+ soe[0].fast = true;
+ soe[0].fwd = true;
+ soe[0].prefetch = false;
+ myargs[0].operation_extra = &soe[0];
+ myargs[0].operation = scan_op;
+
+ // make the forward slow scanner
+ soe[1].fast = false;
+ soe[1].fwd = true;
+ soe[1].prefetch = false;
+ myargs[1].operation_extra = &soe[1];
+ myargs[1].operation = scan_op;
+
+ // make the guys that run hot optimize, keyrange, and frag stats in the background
+ myargs[2].operation = hot_op;
+ myargs[3].operation = keyrange_op;
+ myargs[4].operation = frag_op;
+ myargs[4].sleep_ms = 100;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ // make the guy that updates the db
+ for (int i = 5; i < 5 + cli_args->num_update_threads; ++i) {
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that does point queries
+ for (int i = 5 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].operation = ptquery_op;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ // let's make default checkpointing period really slow
+ args.env_args.checkpointing_period = 1;
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress_hot_indexing.cc b/storage/tokudb/PerconaFT/src/tests/test_stress_hot_indexing.cc
new file mode 100644
index 00000000..6395f591
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress_hot_indexing.cc
@@ -0,0 +1,333 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+DB* hot_db;
+toku_mutex_t fops_lock;
+toku_mutex_t hi_lock;
+uint32_t gid_count;
+uint8_t hi_gid[DB_GID_SIZE];
+
+
+static int
+hi_put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ lazy_assert(src_db != NULL && dest_db != NULL);
+
+ if (dest_key->data) {
+ toku_free(dest_key->data);
+ dest_key->data = NULL;
+ }
+ if (dest_val->data) {
+ toku_free(dest_val->data);
+ dest_val->data = NULL;
+ }
+ dest_key->data = toku_xmemdup(src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+ dest_val->data = toku_xmemdup(src_val->data, src_val->size);
+ dest_val->size = src_val->size;
+
+ return 0;
+}
+
+static int
+hi_del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT* UU(src_data)) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ lazy_assert(src_db != NULL && dest_db != NULL);
+ if (dest_key->data) {
+ toku_free(dest_key->data);
+ dest_key->data = NULL;
+ }
+ dest_key->data = toku_xmemdup(src_key->data, src_key->size);
+ dest_key->size = src_key->size;
+
+ return 0;
+}
+
+
+static int hi_inserts(DB_TXN* UU(txn), ARG arg, void* UU(operation_extra), void *stats_extra) {
+ int r;
+ DB_TXN* hi_txn = NULL;
+ toku_mutex_lock(&fops_lock);
+ DB_ENV* env = arg->env;
+ DB* db = arg->dbp[0];
+ uint32_t flags[2];
+ flags[0] = 0;
+ flags[1] = 0;
+ DBT_ARRAY dest_keys[2];
+ DBT_ARRAY dest_vals[2];
+ for (int j = 0; j < 2; j++) {
+ toku_dbt_array_init(&dest_keys[j], 1);
+ toku_dbt_array_init(&dest_vals[j], 1);
+ }
+
+ DBT key, val;
+ uint8_t keybuf[arg->cli->key_size];
+ uint8_t valbuf[arg->cli->val_size];
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, valbuf, sizeof valbuf);
+
+ int i;
+ r = env->txn_begin(env, NULL, &hi_txn, 0);
+ CKERR(r);
+ for (i = 0; i < 1000; i++) {
+ DB* dbs[2];
+ toku_mutex_lock(&hi_lock);
+ dbs[0] = db;
+ dbs[1] = hot_db;
+ int num_dbs = hot_db ? 2 : 1;
+ // do a random insertion. the assertion comes from the fact
+ // that the code used to generate a random key and mod it
+ // by the table size manually. fill_key_buf_random will
+ // do this iff arg->bounded_element_range is true.
+ invariant(arg->bounded_element_range);
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+ fill_val_buf_random(arg->random_data, valbuf, arg->cli);
+ r = env->put_multiple(
+ env,
+ db,
+ hi_txn,
+ &key,
+ &val,
+ num_dbs,
+ dbs,
+ dest_keys,
+ dest_vals,
+ flags
+ );
+ toku_mutex_unlock(&hi_lock);
+ if (r != 0) {
+ goto cleanup;
+ }
+ }
+cleanup:
+ for (int j = 0; j < 2; j++) {
+ toku_dbt_array_destroy(&dest_keys[j]);
+ toku_dbt_array_destroy(&dest_vals[j]);
+ }
+ increment_counter(stats_extra, PUTS, i);
+ gid_count++;
+ uint32_t *hi_gid_count_p = cast_to_typeof(hi_gid_count_p) hi_gid; // make gcc --happy about -Wstrict-aliasing
+ *hi_gid_count_p = gid_count;
+ int rr = hi_txn->prepare(hi_txn, hi_gid, 0);
+ CKERR(rr);
+ if (r || (random() % 2)) {
+ rr = hi_txn->abort(hi_txn);
+ CKERR(rr);
+ }
+ else {
+ rr = hi_txn->commit(hi_txn, 0);
+ CKERR(rr);
+ }
+ toku_mutex_unlock(&fops_lock);
+ return r;
+}
+
+static int indexer_maybe_quit_poll(void *UU(poll_extra), float UU(progress)) {
+ return run_test ? 0 : TOKUDB_CANCELED;
+}
+
+static int hi_create_index(DB_TXN* UU(txn), ARG arg, void* UU(operation_extra), void* UU(stats_extra)) {
+ int r;
+ DB_TXN* hi_txn = NULL;
+ DB_ENV* env = arg->env;
+ DB* db = arg->dbp[0];
+ DB_INDEXER* indexer = NULL;
+ r = env->txn_begin(env, NULL, &hi_txn, 0);
+ CKERR(r);
+ toku_mutex_lock(&hi_lock);
+ assert(hot_db == NULL);
+ db_create(&hot_db, env, 0);
+ CKERR(r);
+ r = hot_db->set_flags(hot_db, 0);
+ CKERR(r);
+ r = hot_db->set_pagesize(hot_db, arg->cli->env_args.node_size);
+ CKERR(r);
+ r = hot_db->set_readpagesize(hot_db, arg->cli->env_args.basement_node_size);
+ CKERR(r);
+ r = hot_db->open(hot_db, NULL, "hotindex_db", NULL, DB_BTREE, DB_CREATE | DB_IS_HOT_INDEX, 0666);
+ CKERR(r);
+ uint32_t db_flags = 0;
+ uint32_t indexer_flags = 0;
+
+ r = env->create_indexer(
+ env,
+ hi_txn,
+ &indexer,
+ arg->dbp[0],
+ 1,
+ &hot_db,
+ &db_flags,
+ indexer_flags
+ );
+ CKERR(r);
+ toku_mutex_unlock(&hi_lock);
+
+ r = indexer->set_poll_function(indexer, indexer_maybe_quit_poll, nullptr);
+ CKERR(r);
+
+ r = indexer->build(indexer);
+ CKERR2s(r, 0, TOKUDB_CANCELED);
+
+ toku_mutex_lock(&hi_lock);
+ r = indexer->close(indexer);
+ CKERR(r);
+ toku_mutex_unlock(&hi_lock);
+
+ r = hi_txn->commit(hi_txn, 0);
+ hi_txn = NULL;
+ CKERR(r);
+
+ // now do a scan to make sure hot index is good
+ DB_TXN* scan_txn = NULL;
+ DBC* main_cursor = NULL;
+ DBC* hi_cursor = NULL;
+ r = env->txn_begin(env, NULL, &scan_txn, DB_TXN_SNAPSHOT);
+ CKERR(r);
+ r = db->cursor(db, scan_txn, &main_cursor, 0);
+ CKERR(r);
+ r = db->cursor(hot_db, scan_txn, &hi_cursor, 0);
+ CKERR(r);
+ DBT key1, key2, val1, val2;
+ memset(&key1, 0, sizeof key1);
+ memset(&val1, 0, sizeof val1);
+ memset(&key2, 0, sizeof key2);
+ memset(&val2, 0, sizeof val2);
+ uint64_t count = 0;
+ while(r != DB_NOTFOUND) {
+ if (count++ % 256 == 0 && !run_test) {
+ r = TOKUDB_CANCELED;
+ break;
+ }
+ // get next from both cursors and assert they are equal
+ int r1 = main_cursor->c_get(
+ main_cursor,
+ &key1,
+ &val1,
+ DB_NEXT
+ );
+ int r2 = hi_cursor->c_get(
+ hi_cursor,
+ &key2,
+ &val2,
+ DB_NEXT
+ );
+ assert(r1 == r2);
+ r = r1;
+ if (r != DB_NOTFOUND) {
+ assert(key1.size == key2.size);
+ assert(val1.size == val2.size);
+ assert(memcmp(key1.data, key2.data, key1.size) == 0);
+ assert(memcmp(val1.data, val2.data, val1.size) == 0);
+ }
+ }
+ CKERR2s(r, DB_NOTFOUND, TOKUDB_CANCELED);
+ r = main_cursor->c_close(main_cursor);
+ r = hi_cursor->c_close(hi_cursor);
+ CKERR(r);
+ r = scan_txn->commit(scan_txn, 0);
+ CKERR(r);
+
+ // grab lock and close hot_db, set it to NULL
+ toku_mutex_lock(&hi_lock);
+ r = hot_db->close(hot_db, 0);
+ CKERR(r);
+ hot_db = NULL;
+ toku_mutex_unlock(&hi_lock);
+
+ toku_mutex_lock(&fops_lock);
+ r = env->dbremove(env, NULL, "hotindex_db", NULL, 0);
+ toku_mutex_unlock(&fops_lock);
+ CKERR(r);
+ return 0;
+}
+
+//
+// purpose of this stress test is to do a bunch of splitting and merging
+// and run db->verify periodically to make sure the db is in a
+// a good state
+//
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 2;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ myargs[0].operation = hi_inserts;
+ myargs[1].operation = hi_create_index;
+
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int test_main(int argc, char *const argv[]) {
+ gid_count = 0;
+ memset(hi_gid, 0, sizeof(hi_gid));
+ toku_mutex_init(toku_uninstrumented, &hi_lock, nullptr);
+ toku_mutex_init(toku_uninstrumented, &fops_lock, nullptr);
+ hot_db = NULL;
+ struct cli_args args = get_default_args();
+ // let's make default checkpointing period really slow
+ args.num_ptquery_threads = 0;
+ parse_stress_test_args(argc, argv, &args);
+ args.num_DBs = 1;
+ args.crash_on_operation_failure = false;
+ args.env_args.generate_del_callback = hi_del_callback;
+ args.env_args.generate_put_callback = hi_put_callback;
+ stress_test_main(&args);
+ toku_mutex_destroy(&hi_lock);
+ toku_mutex_destroy(&fops_lock);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress_openclose.cc b/storage/tokudb/PerconaFT/src/tests/test_stress_openclose.cc
new file mode 100644
index 00000000..b4d56794
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress_openclose.cc
@@ -0,0 +1,56 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "stress_openclose.h"
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ parse_stress_test_args(argc, argv, &args);
+ // checkpointing is a part of the ref count, so do it often
+ args.env_args.checkpointing_period = 5;
+ // very small dbs, so verification scans are short and sweet
+ args.num_elements = 1000;
+ // it's okay for update to get DB_LOCK_NOTGRANTED, etc.
+ args.crash_on_operation_failure = false;
+
+ // just run the stress test, no crashing and recovery test
+ stress_openclose_crash_at_end = false;
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_stress_with_verify.cc b/storage/tokudb/PerconaFT/src/tests/test_stress_with_verify.cc
new file mode 100644
index 00000000..cdd98739
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_stress_with_verify.cc
@@ -0,0 +1,110 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <toku_pthread.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+#include "threaded_stress_test_helpers.h"
+
+
+//
+// purpose of this stress test is to do a bunch of splitting and merging
+// and run db->verify periodically to make sure the db is in a
+// a good state
+//
+
+static void
+stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
+ //
+ // do insertions and queries with a loader lying around doing stuff
+ //
+
+ if (verbose) printf("starting creation of pthreads\n");
+ const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
+ struct arg myargs[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ arg_init(&myargs[i], dbp, env, cli_args);
+ }
+ // make the forward fast scanner
+ struct scan_op_extra soe;
+ soe.fast = true;
+ soe.fwd = true;
+ soe.prefetch = false;
+ myargs[0].operation_extra = &soe;
+ myargs[0].lock_type = STRESS_LOCK_SHARED;
+ myargs[0].operation = scan_op;
+
+ // make the backward slow scanner
+ myargs[1].lock_type = STRESS_LOCK_EXCL;
+ myargs[1].sleep_ms = 3000; // maybe make this a runtime param at some point
+ myargs[1].operation = verify_op;
+
+ struct update_op_args uoe = get_update_op_args(cli_args, NULL);
+ for (int i = 2; i < 2 + cli_args->num_update_threads; ++i) {
+ myargs[i].lock_type = STRESS_LOCK_SHARED;
+ myargs[i].operation_extra = &uoe;
+ myargs[i].operation = update_op;
+ }
+
+ // make the guy that does point queries
+ for (int i = 2 + cli_args->num_update_threads; i < num_threads; i++) {
+ myargs[i].lock_type = STRESS_LOCK_SHARED;
+ myargs[i].operation = ptquery_op;
+ }
+ run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ struct cli_args args = get_default_args();
+ // let's make default checkpointing period really slow
+ args.env_args.checkpointing_period = 1;
+ args.num_elements= 2000; // make default of small num elements to
+ args.num_ptquery_threads = 0;
+ parse_stress_test_args(argc, argv, &args);
+ stress_test_main(&args);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_thread_flags.cc b/storage/tokudb/PerconaFT/src/tests/test_thread_flags.cc
new file mode 100644
index 00000000..a813d94b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_thread_flags.cc
@@ -0,0 +1,132 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <memory.h>
+
+const char *dbfile = "test.db";
+const char *dbname = 0;
+
+static int
+db_put (DB *db, int k, int v) {
+ DBT key, val;
+ int r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ return r;
+}
+
+static int
+db_get (DB *db, int k, int expectv, int val_flags) {
+ int v;
+ DBT key, val;
+ memset(&val, 0, sizeof val); val.flags = val_flags;
+ if (val.flags == DB_DBT_USERMEM) {
+ val.ulen = sizeof v; val.data = &v;
+ }
+ int r = db->get(db, 0, dbt_init(&key, &k, sizeof k), &val, 0);
+ if (r == 0) {
+ assert(val.size == sizeof v);
+ if ((val.flags & DB_DBT_USERMEM) == 0) memcpy(&v, val.data, val.size);
+ assert(v == expectv);
+ } else {
+ if (verbose) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ }
+ if (val.flags & (DB_DBT_MALLOC|DB_DBT_REALLOC))
+ toku_free(val.data);
+ return r;
+}
+
+static void
+test_db_create (void) {
+ int r;
+
+ unlink(dbfile);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, 0, dbfile, dbname, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+ r = db_put(db, htonl(1), 1); assert(r == 0);
+ r = db_get(db, htonl(1), 1, 0); assert(r == 0);
+ r = db_get(db, htonl(1), 1, DB_DBT_USERMEM); assert(r == 0);
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+static void
+test_db_thread (void) {
+ int r;
+
+ unlink(dbfile);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_THREAD, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,0); // Turn off those annoying errors
+ r = db->open(db, 0, dbfile, dbname, DB_BTREE, DB_CREATE + DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+ r = db_put(db, htonl(1), 1); assert(r == 0);
+ r = db_get(db, htonl(1), 1, 0); assert(r == EINVAL);
+ r = db_get(db, htonl(1), 1, DB_DBT_MALLOC); assert(r == 0);
+ r = db_get(db, htonl(1), 1, DB_DBT_REALLOC); assert(r == 0);
+ r = db_get(db, htonl(1), 1, DB_DBT_USERMEM); assert(r == 0);
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ test_db_create();
+ test_db_thread();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_thread_insert.cc b/storage/tokudb/PerconaFT/src/tests/test_thread_insert.cc
new file mode 100644
index 00000000..a266de63
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_thread_insert.cc
@@ -0,0 +1,174 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <toku_pthread.h>
+
+static inline unsigned int getmyid(void) {
+ return toku_os_gettid();
+}
+
+typedef unsigned int my_t;
+
+struct db_inserter {
+ toku_pthread_t tid;
+ DB *db;
+ my_t startno, endno;
+ int do_exit;
+};
+
+static int
+db_put (DB *db, my_t k, my_t v) {
+ DBT key, val;
+ int r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ return r;
+}
+
+static void *
+do_inserts (void *arg) {
+ struct db_inserter *mywork = (struct db_inserter *) arg;
+ if (verbose) {
+ toku_pthread_t self = toku_pthread_self();
+ printf("%lu:%u:do_inserts:start:%u-%u\n", *(unsigned long*)&self, getmyid(), mywork->startno, mywork->endno);
+ }
+ my_t i;
+ for (i=mywork->startno; i < mywork->endno; i++) {
+ int r = db_put(mywork->db, htonl(i), i); assert(r == 0);
+ }
+
+ if (verbose) {
+ toku_pthread_t self = toku_pthread_self();
+ printf("%lu:%u:do_inserts:end\n", *(unsigned long*)&self, getmyid());
+ }
+ // Don't call toku_pthread_exit(), since it has a memory leak.
+ // if (mywork->do_exit) toku_pthread_exit(arg);
+ return 0;
+}
+
+static int
+usage (void) {
+ fprintf(stderr, "test [-n NTUPLES] [-p NTHREADS]\n");
+ fprintf(stderr, "default NTUPLES=1000000\n");
+ fprintf(stderr, "default NTHREADS=2\n");
+ return 1;
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ const char *dbfile = "test.db";
+ const char *dbname = "main";
+ int nthreads = 2;
+ my_t n = 1000000;
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (0 == strcmp(arg, "-h") || 0 == strcmp(arg, "--help")) {
+ return usage();
+ }
+ if (0 == strcmp(arg, "-v") || 0 == strcmp(arg, "--verbose")) {
+ verbose = 1;
+ continue;
+ }
+ if (0 == strcmp(arg, "-p")) {
+ if (i+1 >= argc) return usage();
+ nthreads = atoi(argv[++i]);
+ continue;
+ }
+ if (0 == strcmp(arg, "-n")) {
+ if (i+1 >= argc) return usage();
+ n = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ DB_ENV *env;
+
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_cachesize(env, 0, 128000000, 1); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE + DB_THREAD + DB_PRIVATE + DB_INIT_MPOOL + DB_INIT_LOCK, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ DB *db;
+
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, 0, dbfile, dbname, DB_BTREE, DB_CREATE + DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ struct db_inserter work[nthreads];
+
+ for (i=0; i<nthreads; i++) {
+ work[i].db = db;
+ work[i].startno = i*(n/nthreads);
+ work[i].endno = work[i].startno + (n/nthreads);
+ work[i].do_exit =1 ;
+ if (i+1 == nthreads)
+ work[i].endno = n;
+ }
+
+ if (verbose)
+ printf("pid:%d\n", toku_os_getpid());
+
+ for (i = 1; i < nthreads; i++) {
+ r = toku_pthread_create(
+ toku_uninstrumented, &work[i].tid, nullptr, do_inserts, &work[i]);
+ assert(r == 0);
+ }
+
+ work[0].do_exit = 0;
+ do_inserts(&work[0]);
+
+ for (i=1; i<nthreads; i++) {
+ void *ret;
+ r = toku_pthread_join(work[i].tid, &ret); assert(r == 0);
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt.cc b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt.cc
new file mode 100644
index 00000000..8dadb0ee
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt.cc
@@ -0,0 +1,132 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ DB* db = NULL;
+
+ DBT change_descriptor;
+ memset(&change_descriptor, 0, sizeof(change_descriptor));
+ change_descriptor.size = sizeof(eight_byte_desc);
+ change_descriptor.data = &eight_byte_desc;
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ IN_TXN_COMMIT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ });
+ assert_desc_eight(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+
+ DBT orig_desc;
+ memset(&orig_desc, 0, sizeof(orig_desc));
+ orig_desc.size = sizeof(four_byte_desc);
+ orig_desc.data = &four_byte_desc;
+ // verify we can only set a descriptor with version 1
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &orig_desc, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_eight(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ db = NULL;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt2.cc b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt2.cc
new file mode 100644
index 00000000..612c352d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt2.cc
@@ -0,0 +1,132 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ DB* db = NULL;
+
+ DBT change_descriptor;
+ memset(&change_descriptor, 0, sizeof(change_descriptor));
+ change_descriptor.size = sizeof(eight_byte_desc);
+ change_descriptor.data = &eight_byte_desc;
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ IN_TXN_ABORT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+
+ DBT orig_desc;
+ memset(&orig_desc, 0, sizeof(orig_desc));
+ orig_desc.size = sizeof(four_byte_desc);
+ orig_desc.data = &four_byte_desc;
+ // verify we can only set a descriptor with version 1
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &orig_desc, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ db = NULL;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt3.cc b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt3.cc
new file mode 100644
index 00000000..43569b2a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt3.cc
@@ -0,0 +1,132 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ DB* db = NULL;
+
+ DBT change_descriptor;
+ memset(&change_descriptor, 0, sizeof(change_descriptor));
+ change_descriptor.size = sizeof(eight_byte_desc);
+ change_descriptor.data = &eight_byte_desc;
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ IN_TXN_COMMIT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ });
+ assert_desc_eight(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback2(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+
+ DBT orig_desc;
+ memset(&orig_desc, 0, sizeof(orig_desc));
+ orig_desc.size = sizeof(four_byte_desc);
+ orig_desc.data = &four_byte_desc;
+ // verify we can only set a descriptor with version 1
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &orig_desc, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_eight(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ db = NULL;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt4.cc b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt4.cc
new file mode 100644
index 00000000..612c352d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_trans_desc_during_chkpt4.cc
@@ -0,0 +1,132 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void checkpoint_callback_1(void * extra) {
+ assert(extra == NULL);
+ DB* db = NULL;
+
+ DBT change_descriptor;
+ memset(&change_descriptor, 0, sizeof(change_descriptor));
+ change_descriptor.size = sizeof(eight_byte_desc);
+ change_descriptor.data = &eight_byte_desc;
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ IN_TXN_ABORT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ db_env_set_checkpoint_callback(checkpoint_callback_1, NULL);
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+
+ DBT orig_desc;
+ memset(&orig_desc, 0, sizeof(orig_desc));
+ orig_desc.size = sizeof(four_byte_desc);
+ orig_desc.data = &four_byte_desc;
+ // verify we can only set a descriptor with version 1
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &orig_desc, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ { int chk_r = env->txn_checkpoint(env, 0, 0, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+
+ db = NULL;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_transactional_descriptor.cc b/storage/tokudb/PerconaFT/src/tests/test_transactional_descriptor.cc
new file mode 100644
index 00000000..86350f95
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_transactional_descriptor.cc
@@ -0,0 +1,227 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+uint32_t four_byte_desc = 101;
+uint64_t eight_byte_desc = 10101;
+
+
+static void assert_desc_four (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(four_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == four_byte_desc);
+}
+static void assert_desc_eight (DB* db) {
+ assert(db->descriptor->dbt.size == sizeof(eight_byte_desc));
+ assert(*(uint32_t *)(db->descriptor->dbt.data) == eight_byte_desc);
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+ DB* db2 = NULL;
+
+ DBT orig_desc;
+ memset(&orig_desc, 0, sizeof(orig_desc));
+ orig_desc.size = sizeof(four_byte_desc);
+ orig_desc.data = &four_byte_desc;
+ // verify we can only set a descriptor with version 1
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &orig_desc, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ });
+
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+ db2 = NULL;
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+
+ // verify that after closing and reopening db gets the same descriptor
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+
+ /********************************************************************/
+
+ // now lets test change_descriptor
+ DBT change_descriptor;
+ memset(&change_descriptor, 0, sizeof(change_descriptor));
+ change_descriptor.size = sizeof(eight_byte_desc);
+ change_descriptor.data = &eight_byte_desc;
+
+ // test that simple abort works
+ IN_TXN_ABORT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ });
+ assert_desc_four(db);
+
+ // test that close/reopen gets the right descriptor
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+ db2 = NULL;
+
+ // test that simple commit works
+ IN_TXN_COMMIT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_eight(db);
+ });
+ assert_desc_eight(db);
+
+ // test that close/reopen gets the right descriptor
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_eight(db);
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+ db2 = NULL;
+
+
+ change_descriptor.size = sizeof(four_byte_desc);
+ change_descriptor.data = &four_byte_desc;
+ // test that close then abort works
+ IN_TXN_ABORT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_change, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ });
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_eight(db);
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_eight(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+ db2 = NULL;
+
+ // test that close then commit works
+ IN_TXN_COMMIT(env, NULL, txn_change, 0, {
+ { int chk_r = db->change_descriptor(db, txn_change, &change_descriptor, 0); CKERR(chk_r); }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_change, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ });
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db);
+ { int chk_r = db_create(&db2, env, 0); CKERR(chk_r); }
+ { int chk_r = db2->open(db2, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ assert_desc_four(db2);
+ { int chk_r = db2->close(db2, 0); CKERR(chk_r); }
+ db2 = NULL;
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+
+ IN_TXN_ABORT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "bar.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &change_descriptor, 0); CKERR(chk_r); }
+ // test some error cases
+ IN_TXN_COMMIT(env, txn_create, txn_create2, 0, {
+ { int chk_r = db->change_descriptor(db, txn_create, &change_descriptor, 0); CKERR2(chk_r, EINVAL); }
+ });
+ assert_desc_four(db);
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+ });
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ assert(db->descriptor == NULL);
+ { int chk_r = db->open(db, txn_create, "bar.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ { int chk_r = db->change_descriptor(db, txn_create, &change_descriptor, 0); CKERR(chk_r); }
+ assert_desc_four(db);
+ });
+ assert_desc_four(db);
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ db = NULL;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_abort5.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_abort5.cc
new file mode 100644
index 00000000..81d9c951
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_abort5.cc
@@ -0,0 +1,112 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static void
+test_txn_abort (int n) {
+ if (verbose) printf("test_txn_abort:%d\n", n);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+ int i;
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ r = db->put(db, txn, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+ }
+ r = txn->abort(txn);
+#if 0
+ assert(r == 0);
+#else
+ if (r != 0) printf("%s:%d:abort:%d\n", __FILE__, __LINE__, r);
+#endif
+ /* walk the db, should be empty */
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_FIRST);
+ assert(r == DB_NOTFOUND);
+ r = cursor->c_close(cursor); assert(r == 0);
+ r = txn->commit(txn, 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ }
+ for (i=1; i<100; i++)
+ test_txn_abort(i);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_abort5a.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_abort5a.cc
new file mode 100644
index 00000000..301eed15
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_abort5a.cc
@@ -0,0 +1,133 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_txn_abort (int n) {
+ if (verbose>1) printf("%s %s:%d\n", __FILE__, __FUNCTION__, n);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+ int i;
+ for (i=0; i<n; i++) {
+ DBT key, val;
+ int i2=htonl(i*2);
+ if (verbose>2) printf("put %d\n", i*2);
+ r = db->put(db, txn, dbt_init(&key, &i2, sizeof i2), dbt_init(&val, &i, sizeof i), 0);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+ }
+ r = txn->commit(txn, 0);
+
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+ for (i=0; i<n; i++) {
+ DBT key;
+ int i2=htonl(i*2);
+ if (verbose>2) printf("del %d\n", i*2);
+ r = db->del(db, txn, dbt_init(&key, &i2, sizeof i2), 0);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+ }
+ r = txn->abort(txn);
+ if (r != 0) printf("%s:%d:abort:%d\n", __FILE__, __LINE__, r);
+ assert(r == 0);
+ /* walk the db, even numbers should be there */
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ for (i=0; 1; i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r!=0) break;
+ if (verbose>2) printf("%u present\n", (uint32_t)ntohl(*(int*)key.data));
+ assert(key.size==4);
+ assert(ntohl(*(int*)key.data)==(unsigned int)(2*i));
+ }
+ assert(i==n);
+ r = cursor->c_close(cursor); assert(r == 0);
+ r = txn->commit(txn, 0);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ }
+ if (verbose>0) printf("%s", __FILE__);
+ if (verbose>1) printf("\n");
+ for (i=1; i<100; i++)
+ test_txn_abort(i);
+ if (verbose>1) printf("%s OK\n", __FILE__);
+ if (verbose>0) printf(" OK\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_abort6.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_abort6.cc
new file mode 100644
index 00000000..8350830b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_abort6.cc
@@ -0,0 +1,162 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+#define N_TXNS 4
+
+static void
+test_txn_abort (int n, int which_guys_to_abort) {
+ if (verbose>1) printf("test_txn_abort(%d,%x)\n", n, which_guys_to_abort);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+
+ DB *db;
+ {
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+ r = txn->commit(txn, 0); assert(r == 0);
+ }
+ {
+ DB_TXN *txns[N_TXNS];
+ {
+ int j;
+ for (j=0; j<N_TXNS; j++) {
+ r = env->txn_begin(env, 0, &txns[j], 0); assert(r == 0);
+ }
+ }
+
+ {
+ int i;
+ for (i=0; i<n; i++) {
+ int j;
+ for (j=N_TXNS; j>0; j--) {
+ if (i%j==0) { // This is guaranteed to be true when j==1, so someone will do it.
+ DBT key, val;
+ r = db->put(db, txns[j-1], dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+ goto didit;
+ }
+ }
+ toku_hard_crash_on_purpose();
+ didit: ;
+ }
+ }
+ {
+ int j;
+ for (j=0; j<N_TXNS; j++) {
+ if (which_guys_to_abort&(1<<j)) {
+ r = txns[j]->abort(txns[j]);
+ } else {
+ r = txns[j]->commit(txns[j], 0);
+ }
+ if (r != 0) printf("%s:%d:abort:%d\n", __FILE__, __LINE__, r);
+ assert(r == 0);
+ }
+ }
+ }
+ {
+ DB_TXN *txn;
+ int i;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r==0);
+ if (verbose>1) printf("Now see what's there: which_guys_to_abort=%x: ", which_guys_to_abort);
+ for (i=0; i<n; i++) {
+ DBT key,val;
+ memset(&val, 0, sizeof val);
+ r = db->get(db, txn, dbt_init(&key, &i, sizeof i), &val, 0);
+ if (r==0) { if (verbose>1) printf(" %d", i); }
+ }
+ if (verbose>1) printf("\n");
+ for (i=0; i<n; i++) {
+ DBT key,val;
+ memset(&val, 0, sizeof val);
+ r = db->get(db, txn, dbt_init(&key, &i, sizeof i), &val, 0);
+ int j;
+ for (j=N_TXNS; j>0; j--) {
+ if (i%j==0) {
+ if (which_guys_to_abort&(1<<(j-1))) assert(r==DB_NOTFOUND);
+ else assert(r==0);
+ break;
+ }
+ }
+ }
+ r = txn->commit(txn, 0); assert(r==0);
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ int i,j;
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose++;
+ continue;
+ }
+ }
+ if (verbose>0) printf("%s:", __FILE__);
+ if (verbose==1) printf("\n");
+ for (j=0; j<(1<<N_TXNS); j++)
+ for (i=1; i<100; i*=2)
+ test_txn_abort(i, j);
+ if (verbose>0) printf("OK\n");
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_abort7.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_abort7.cc
new file mode 100644
index 00000000..0a446364
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_abort7.cc
@@ -0,0 +1,124 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static void
+test_abort_create (void) {
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ {
+ char *filename;
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "test.db", sizeof("test.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR(r);
+ CAST_FROM_VOIDP(filename, iname.data);
+ assert(filename);
+ }
+ toku_struct_stat statbuf;
+ char fullfile[TOKU_PATH_MAX + 1];
+ r = toku_stat(toku_path_join(fullfile, 2, TOKU_TEST_FILENAME, filename),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r == 0);
+ toku_free(filename);
+ }
+
+ r = db->close(db, 0);
+ r = txn->abort(txn); assert(r == 0);
+
+ {
+ {
+ DBT dname;
+ DBT iname;
+ dbt_init(&dname, "test.db", sizeof("test.db"));
+ dbt_init(&iname, NULL, 0);
+ iname.flags |= DB_DBT_MALLOC;
+ r = env->get_iname(env, &dname, &iname);
+ CKERR2(r, DB_NOTFOUND);
+ }
+ toku_struct_stat statbuf;
+ char fullfile[TOKU_PATH_MAX + 1];
+ r = toku_stat(
+ toku_path_join(fullfile, 2, TOKU_TEST_FILENAME, "test.db"),
+ &statbuf,
+ toku_uninstrumented);
+ assert(r != 0);
+ assert(errno == ENOENT);
+ }
+
+ r = env->close(env, 0); assert(r == 0);
+
+
+
+}
+
+int
+test_main(int UU(argc), char UU(*const argv[])) {
+ test_abort_create();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_begin_commit.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_begin_commit.cc
new file mode 100644
index 00000000..fb9cb7cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_begin_commit.cc
@@ -0,0 +1,70 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <sys/stat.h>
+#include <db.h>
+
+
+int
+test_main(int UU(argc), char UU(*const argv[])) {
+ int r;
+ DB_ENV *env;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, 0, &txn, 0);
+ assert(r == 0);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+
+r = env->close(env, 0);
+ assert(r == 0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_close_before_commit.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_close_before_commit.cc
new file mode 100644
index 00000000..87d76db5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_close_before_commit.cc
@@ -0,0 +1,89 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+// Recreate a mysqld crash by closing and opening a db within a transaction.
+// The crash occurs when writing a dirty cachetable pair, so we insert one
+// row.
+static void
+test_txn_close_before_commit (void) {
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ int r;
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, NULL, "test.db", 0, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ DBT key, val;
+ int k = 1, v = 1;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+
+ // Close before commit
+ r = db->close(db, 0); assert(r == 0);
+
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int UU(argc), char UU(*const argv[])) {
+ test_txn_close_before_commit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_close_before_prepare_commit.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_close_before_prepare_commit.cc
new file mode 100644
index 00000000..80525fd3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_close_before_prepare_commit.cc
@@ -0,0 +1,92 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+// Recreate a mysqld crash by closing and opening a db within a transaction.
+// The crash occurs when writing a dirty cachetable pair, so we insert one
+// row.
+static void
+test_txn_close_before_prepare_commit (void) {
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ int r;
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stdout);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL + DB_INIT_LOG + DB_INIT_LOCK + DB_INIT_TXN + DB_PRIVATE + DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ if (r != 0) printf("%s:%d:%d:%s\n", __FILE__, __LINE__, r, db_strerror(r));
+ assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->open(db, NULL, "test.db", 0, DB_BTREE, DB_CREATE|DB_AUTO_COMMIT, S_IRWXU+S_IRWXG+S_IRWXO); assert(r == 0);
+
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); assert(r == 0);
+
+ DBT key, val;
+ int k = 1, v = 1;
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+
+ // Close before commit
+ r = db->close(db, 0); assert(r == 0);
+
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 1, DB_GID_SIZE);
+ r = txn->prepare(txn, gid, 0); assert(r == 0);
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int UU(argc), char UU(*const argv[])) {
+ test_txn_close_before_prepare_commit();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_cursor_last.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_cursor_last.cc
new file mode 100644
index 00000000..5571281f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_cursor_last.cc
@@ -0,0 +1,249 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static int
+db_put (DB *db, DB_TXN *txn, int k, int v) {
+ DBT key, val;
+ return db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), DB_NOOVERWRITE);
+}
+
+static const char *db_error(int error) {
+ static char errorbuf[32];
+ switch (error) {
+ case DB_NOTFOUND: return "DB_NOTFOUND";
+ case DB_LOCK_DEADLOCK: return "DB_LOCK_DEADLOCK";
+ case DB_LOCK_NOTGRANTED: return "DB_LOCK_NOTGRANTED";
+ case DB_KEYEXIST: return "DB_KEYEXIST";
+ default:
+ sprintf(errorbuf, "%d", error);
+ return errorbuf;
+ }
+}
+
+/* t1 t2 l1 l2 p1 p2 c1 c2 */
+static void
+test_txn_cursor_last_1 (int nrows) {
+ if (verbose) printf("test_txn_cursor_last_1:%d\n", nrows);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.txn.cursor.last.1.ft_handle";
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_INIT_LOG |DB_THREAD |DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE+DB_AUTO_COMMIT, 0666); assert(r == 0);
+ int i;
+ for (i=0; i<nrows; i++) {
+ int k = htonl(i);
+ int v = htonl(i);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ }
+
+ DB_TXN *t1;
+ r = env->txn_begin(env, null_txn, &t1, 0); assert(r == 0);
+ if (verbose) printf("t1:begin\n");
+
+ DBC *c1;
+ r = db->cursor(db, t1, &c1, 0); assert(r == 0);
+
+ DB_TXN *t2;
+ r = env->txn_begin(env, null_txn, &t2, 0); assert(r == 0);
+ if (verbose) printf("t2:begin\n");
+
+ DBC *c2;
+ r = db->cursor(db, t2, &c2, 0); assert(r == 0);
+
+ DBT k1; memset(&k1, 0, sizeof k1);
+ DBT v1; memset(&v1, 0, sizeof v1);
+ r = c1->c_get(c1, &k1, &v1, DB_LAST);
+ if (verbose) printf("c1:last:%s\n", db_error(r));
+
+ r = c1->c_close(c1); assert(r == 0);
+
+ DBT k2; memset(&k2, 0, sizeof k2);
+ DBT v2; memset(&v2, 0, sizeof v2);
+ r = c2->c_get(c2, &k2, &v2, DB_LAST);
+ if (verbose) printf("c2:last:%s\n", db_error(r));
+
+ r = c2->c_close(c2); assert(r == 0);
+
+ int r1 = db_put(db, t1, htonl(nrows), htonl(nrows));
+ if (verbose) printf("t1:put:%s\n", db_error(r1));
+
+ int r2 = db_put(db, t2, htonl(nrows), htonl(nrows));
+ if (verbose) printf("t2:put:%s\n", db_error(r2));
+
+ if (r1 == 0) {
+ r = t1->commit(t1, 0);
+ if (verbose) printf("t1:commit:%s\n", db_error(r));
+ } else {
+ r = t1->abort(t1);
+ if (verbose) printf("t1:abort:%s\n", db_error(r));
+ }
+
+ if (r2 == 0) {
+ r = t2->commit(t2, 0);
+ if (verbose) printf("t2:commit:%s\n", db_error(r));
+ } else {
+ r = t2->abort(t2);
+ if (verbose) printf("t2:abort:%s\n", db_error(r));
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+/* t1 t2 l1 p1 l2 c1 p2 c2 */
+static void
+test_txn_cursor_last_2 (int nrows) {
+ if (verbose) printf("test_txn_cursor_last_2:%d\n", nrows);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.txn.cursor.last.1.ft_handle";
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_INIT_LOG|DB_THREAD|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE+DB_AUTO_COMMIT, 0666); assert(r == 0);
+ int i;
+ for (i=0; i<nrows; i++) {
+ int k = htonl(i);
+ int v = htonl(i);
+ DBT key, val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ }
+
+ DB_TXN *t1;
+ r = env->txn_begin(env, null_txn, &t1, 0); assert(r == 0);
+ if (verbose) printf("t1:begin\n");
+
+ DBC *c1;
+ r = db->cursor(db, t1, &c1, 0); assert(r == 0);
+
+ DB_TXN *t2;
+ r = env->txn_begin(env, null_txn, &t2, 0); assert(r == 0);
+ if (verbose) printf("t2:begin\n");
+
+ DBC *c2;
+ r = db->cursor(db, t2, &c2, 0); assert(r == 0);
+
+ DBT k1; memset(&k1, 0, sizeof k1);
+ DBT v1; memset(&v1, 0, sizeof v1);
+ r = c1->c_get(c1, &k1, &v1, DB_LAST);
+ if (verbose) printf("c1:last:%s\n", db_error(r));
+
+ r = c1->c_close(c1); assert(r == 0);
+
+ int r1 = db_put(db, t1, htonl(nrows), htonl(nrows));
+ if (verbose) printf("t1:put:%s\n", db_error(r1));
+
+ DBT k2; memset(&k2, 0, sizeof k2);
+ DBT v2; memset(&v2, 0, sizeof v2);
+ r = c2->c_get(c2, &k2, &v2, DB_LAST);
+ if (verbose) printf("c2:last:%s\n", db_error(r));
+
+ r = c2->c_close(c2); assert(r == 0);
+
+ if (r1 == 0) {
+ r = t1->commit(t1, 0);
+ if (verbose) printf("t1:commit:%s\n", db_error(r));
+ } else {
+ r = t1->abort(t1);
+ if (verbose) printf("t1:abort:%s\n", db_error(r));
+ }
+
+ int r2 = db_put(db, t2, htonl(nrows), htonl(nrows));
+ if (verbose) printf("t2:put:%s\n", db_error(r2));
+
+ if (r2 == 0) {
+ r = t2->commit(t2, 0);
+ if (verbose) printf("t2:commit:%s\n", db_error(r));
+ } else {
+ r = t2->abort(t2);
+ if (verbose) printf("t2:abort:%s\n", db_error(r));
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ test_txn_cursor_last_1(0);
+ test_txn_cursor_last_1(1);
+ test_txn_cursor_last_2(0);
+ test_txn_cursor_last_2(1);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested1.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested1.cc
new file mode 100644
index 00000000..d83cd15d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested1.cc
@@ -0,0 +1,173 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <ft/txn/xids.h>
+#define MAX_NEST MAX_NESTED_TRANSACTIONS
+
+
+/*********************
+ *
+ * Purpose of this test is to exercise nested transactions in a basic way:
+ * Create MAX nested transactions, inserting a value at each level, verify:
+ *
+ * for i = 1 to MAX
+ * - txnid = begin()
+ * - txns[i] = txnid
+ * - insert, query
+ *
+ * for i = 1 to MAX
+ * - txnid = txns[MAX - i - 1]
+ * - commit or abort(txnid), query
+ *
+ */
+
+static DB *db;
+static DB_ENV *env;
+
+static void
+setup_db (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, int_dbt_cmp); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+
+static void
+close_db (void) {
+ int r;
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+static void
+test_txn_nesting (int depth) {
+ int r;
+ if (verbose) { fprintf(stderr, "%s (%s):%d [depth = %d]\n", __FILE__, __FUNCTION__, __LINE__, depth); fflush(stderr); }
+
+ DBT key, val, observed_val;
+ dbt_init(&observed_val, NULL, 0);
+ int i;
+
+ DB_TXN * txns[depth];
+ DB_TXN * parent = NULL;
+
+ int vals[depth];
+
+ int mykey = 42;
+ dbt_init(&key, &mykey, sizeof mykey);
+
+
+ for (i = 0; i < depth; i++){
+ DB_TXN * this_txn;
+
+ if (verbose)
+ printf("Begin txn at level %d\n", i);
+ vals[i] = i;
+ dbt_init(&val, &vals[i], sizeof i);
+ r = env->txn_begin(env, parent, &this_txn, 0); CKERR(r);
+ txns[i] = this_txn;
+ parent = this_txn; // will be parent in next iteration
+ r = db->put(db, this_txn, &key, &val, 0); CKERR(r);
+
+ r = db->get(db, this_txn, &key, &observed_val, 0); CKERR(r);
+ assert(int_dbt_cmp(db, &val, &observed_val) == 0);
+ }
+
+ int which_val = depth-1;
+ for (i = depth-1; i >= 0; i--) {
+ //Query, verify the correct value is stored.
+ //Close (abort/commit) innermost transaction
+
+ if (verbose)
+ printf("Commit txn at level %d\n", i);
+
+ dbt_init(&observed_val, NULL, 0);
+ r = db->get(db, txns[i], &key, &observed_val, 0); CKERR(r);
+ dbt_init(&val, &vals[which_val], sizeof i);
+ assert(int_dbt_cmp(db, &val, &observed_val) == 0);
+
+ if (i % 2) {
+ r = txns[i]->commit(txns[i], DB_TXN_NOSYNC); CKERR(r);
+ //which_val does not change (it gets promoted)
+ }
+ else {
+ r = txns[i]->abort(txns[i]); CKERR(r);
+ which_val = i - 1;
+ }
+ txns[i] = NULL;
+ }
+ //Query, verify the correct value is stored.
+ r = db->get(db, NULL, &key, &observed_val, 0);
+ if (which_val == -1) CKERR2(r, DB_NOTFOUND);
+ else {
+ CKERR(r);
+ dbt_init(&val, &vals[which_val], sizeof i);
+ assert(int_dbt_cmp(db, &val, &observed_val) == 0);
+ }
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ setup_db();
+ test_txn_nesting(MAX_NEST);
+ close_db();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested2.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested2.cc
new file mode 100644
index 00000000..ffb9cb67
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested2.cc
@@ -0,0 +1,245 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+
+#include "src/tests/test.h"
+
+#include <ft/txn/xids.h>
+
+#define MAX_NEST MAX_TRANSACTION_RECORDS
+#define MAX_SIZE MAX_TRANSACTION_RECORDS
+
+uint8_t valbufs[MAX_NEST][MAX_SIZE];
+DBT vals [MAX_NEST];
+uint8_t keybuf [MAX_SIZE];
+DBT key;
+DB_TXN *txns [MAX_NEST];
+DB_TXN *txn_query;
+int which_expected;
+
+static void
+fillrandom(uint8_t buf[MAX_SIZE], uint32_t length) {
+ assert(length < MAX_SIZE);
+ uint32_t i;
+ for (i = 0; i < length; i++) {
+ buf[i] = random() & 0xFF;
+ }
+}
+
+static void
+initialize_values (void) {
+ int nest_level;
+ for (nest_level = 0; nest_level < MAX_NEST; nest_level++) {
+ fillrandom(valbufs[nest_level], nest_level);
+ dbt_init(&vals[nest_level], &valbufs[nest_level][0], nest_level);
+ }
+ uint32_t len = random() % MAX_SIZE;
+ fillrandom(keybuf, len);
+ dbt_init(&key, &keybuf[0], len);
+}
+
+
+/*********************
+ *
+ * Purpose of this test is to verify nested transactions (support right number of possible values)
+for test = 1 to MAX
+ create empty db
+ for nesting_level = 1 to MAX
+ - begin txn
+ - insert a value/len unique to this txn
+ - query
+ abort txn (MAX-test) (test-th innermost) // for test=1 don't abort anything
+ commit txn 1 (outermost) // for test = MAX don't commit anything
+ query // only query that really matters
+ */
+
+static DB *db;
+static DB_ENV *env;
+
+static void
+setup_db (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+
+static void
+close_db (void) {
+ int r;
+ r = txn_query->commit(txn_query, 0);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+static void
+verify_val(void) {
+ int r;
+ DBT observed_val;
+ dbt_init(&observed_val, NULL, 0);
+ r = db->get(db, txn_query, &key, &observed_val, 0);
+ if (which_expected==-1)
+ CKERR2(r, DB_NOTFOUND);
+ else {
+ CKERR(r);
+ assert(observed_val.size == vals[which_expected].size);
+ assert(memcmp(observed_val.data, vals[which_expected].data, vals[which_expected].size) == 0);
+ }
+}
+
+static void
+initialize_db(void) {
+ int r;
+ r = env->txn_begin(env, NULL, &txn_query, DB_READ_UNCOMMITTED);
+ CKERR(r);
+ which_expected = -1;
+ verify_val();
+ //Put in a 'committed value'
+ r = db->put(db, NULL, &key, &vals[0], 0);
+ CKERR(r);
+ txns[0] = NULL;
+
+ int i;
+ which_expected = 0;
+ for (i = 1; i < MAX_NEST; i++) {
+ r = env->txn_begin(env, txns[i-1], &txns[i], 0);
+ CKERR(r);
+ verify_val();
+ r = db->put(db, txns[i], &key, &vals[i], 0);
+ CKERR(r);
+ which_expected = i;
+ verify_val();
+ }
+}
+
+static void
+test_txn_nested_shortcut (int abort_at_depth) {
+ int r;
+ if (verbose) { fprintf(stderr, "%s (%s):%d [abortdepth = %d]\n", __FILE__, __FUNCTION__, __LINE__, abort_at_depth); fflush(stderr); }
+
+ setup_db();
+ initialize_db();
+
+ which_expected = MAX_NEST-1;
+ verify_val();
+
+ assert(abort_at_depth > 0); //Cannot abort 'committed' txn.
+ assert(abort_at_depth <= MAX_NEST); //must be in range
+ if (abort_at_depth < MAX_NEST) {
+ //MAX_NEST means no abort
+ DB_TXN *abort_txn = txns[abort_at_depth];
+ r = abort_txn->abort(abort_txn);
+ CKERR(r);
+ which_expected = abort_at_depth - 1;
+ verify_val();
+ }
+ if (abort_at_depth > 1) {
+ //abort_at_depth 1 means abort the whole thing (nothing left to commit)
+ DB_TXN *commit_txn = txns[1];
+ r = commit_txn->commit(commit_txn, DB_TXN_NOSYNC);
+ CKERR(r);
+ verify_val();
+ }
+ close_db();
+}
+
+static void
+test_txn_nested_slow (int abort_at_depth) {
+ int r;
+ if (verbose) { fprintf(stderr, "%s (%s):%d [abortdepth = %d]\n", __FILE__, __FUNCTION__, __LINE__, abort_at_depth); fflush(stderr); }
+
+ setup_db();
+ initialize_db();
+
+ which_expected = MAX_NEST-1;
+ verify_val();
+
+ assert(abort_at_depth > 0); //Cannot abort 'committed' txn.
+ assert(abort_at_depth <= MAX_NEST); //must be in range
+ //MAX_NEST means no abort
+ int nest;
+ for (nest = MAX_NEST - 1; nest >= abort_at_depth; nest--) {
+ DB_TXN *abort_txn = txns[nest];
+ r = abort_txn->abort(abort_txn);
+ CKERR(r);
+ which_expected = nest - 1;
+ verify_val();
+ }
+ //which_expected does not change anymore
+ for (nest = abort_at_depth-1; nest > 0; nest--) {
+ DB_TXN *commit_txn = txns[nest];
+ r = commit_txn->commit(commit_txn, DB_TXN_NOSYNC);
+ CKERR(r);
+ verify_val();
+ }
+ close_db();
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ initialize_values();
+ int i;
+ for (i = 1; i <= MAX_NEST; i++) {
+ test_txn_nested_shortcut(i);
+ test_txn_nested_slow(i);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested3.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested3.cc
new file mode 100644
index 00000000..8794c05c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested3.cc
@@ -0,0 +1,281 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <ft/txn/xids.h>
+#define MAX_NEST MAX_TRANSACTION_RECORDS
+#define MAX_SIZE MAX_TRANSACTION_RECORDS
+
+/*********************
+ *
+ * Purpose of this test is to verify nested transactions (support right number of possible values)
+create empty db
+for test = 1 to MAX
+ for nesting level 0
+ - randomly insert or not
+ for nesting_level = 1 to MAX
+ - begin txn
+ - randomly one of (insert, delete, do nothing)
+ - if insert, use a value/len unique to this txn
+ - query to verify
+ for nesting level = MAX to 1
+ - randomly abort or commit each transaction
+ - query to verify
+delete db
+ */
+
+
+enum { TYPE_DELETE = 1, TYPE_INSERT, TYPE_PLACEHOLDER };
+
+uint8_t valbufs[MAX_NEST][MAX_SIZE];
+DBT vals [MAX_NEST];
+uint8_t keybuf [MAX_SIZE];
+DBT key;
+uint8_t types [MAX_NEST];
+DB_TXN *txns [MAX_NEST];
+DB_TXN *txn_query;
+int which_expected;
+
+static void
+fillrandom(uint8_t buf[MAX_SIZE], uint32_t length) {
+ assert(length < MAX_SIZE);
+ uint32_t i;
+ for (i = 0; i < length; i++) {
+ buf[i] = random() & 0xFF;
+ }
+}
+
+static void
+initialize_values (void) {
+ int nest_level;
+ for (nest_level = 0; nest_level < MAX_NEST; nest_level++) {
+ fillrandom(valbufs[nest_level], nest_level);
+ dbt_init(&vals[nest_level], &valbufs[nest_level][0], nest_level);
+ }
+ uint32_t len = random() % MAX_SIZE;
+ fillrandom(keybuf, len);
+ dbt_init(&key, &keybuf[0], len);
+}
+
+
+static DB *db;
+static DB_ENV *env;
+
+static void
+setup_db (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ r = env->txn_begin(env, NULL, &txn_query, DB_READ_UNCOMMITTED);
+ CKERR(r);
+}
+
+
+static void
+close_db (void) {
+ int r;
+ r = txn_query->commit(txn_query, 0);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+static void
+verify_val(uint8_t nest_level) {
+ assert(nest_level < MAX_NEST);
+ if (types[nest_level] == TYPE_PLACEHOLDER) {
+ assert(nest_level > 0);
+ return verify_val(nest_level - 1);
+ }
+ int r;
+ DBT observed_val;
+ dbt_init(&observed_val, NULL, 0);
+ r = db->get(db, txn_query, &key, &observed_val, 0);
+ if (types[nest_level] == TYPE_INSERT) {
+ CKERR(r);
+ assert(observed_val.size == vals[nest_level].size);
+ assert(memcmp(observed_val.data, vals[nest_level].data, vals[nest_level].size) == 0);
+ }
+ else {
+ assert(types[nest_level] == TYPE_DELETE);
+ CKERR2(r, DB_NOTFOUND);
+ }
+}
+
+static uint8_t
+randomize_no_placeholder_type(void) {
+ int r;
+ r = random() % 2;
+ switch (r) {
+ case 0:
+ return TYPE_INSERT;
+ case 1:
+ return TYPE_DELETE;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+static uint8_t
+randomize_type(void) {
+ int r;
+ do {
+ r = random() % 4;
+ } while (r >= 3); //Generate uniformly random 0-2
+ switch (r) {
+ case 0:
+ return TYPE_INSERT;
+ case 1:
+ return TYPE_DELETE;
+ case 2:
+ return TYPE_PLACEHOLDER;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+static void
+start_txn_and_maybe_insert_or_delete(uint8_t nest) {
+ int r;
+ if (nest == 0) {
+ types[nest] = randomize_no_placeholder_type();
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ //Committed entry is autocommitted by not providing the txn
+ txns[nest] = NULL;
+ }
+ else {
+ types[nest] = randomize_type();
+ r = env->txn_begin(env, txns[nest-1], &txns[nest], 0);
+ CKERR(r);
+ }
+ switch (types[nest]) {
+ case TYPE_INSERT:
+ r = db->put(db, txns[nest], &key, &vals[nest], 0);
+ CKERR(r);
+ break;
+ case TYPE_DELETE:
+ r = db->del(db, txns[nest], &key, DB_DELETE_ANY);
+ CKERR(r);
+ break;
+ case TYPE_PLACEHOLDER:
+ //Do Nothing.
+ break;
+ default:
+ assert(false);
+ }
+ verify_val(nest);
+}
+
+static void
+initialize_db(void) {
+ types[0] = TYPE_DELETE; //Not yet inserted
+ verify_val(0);
+ int i;
+ for (i = 0; i < MAX_NEST; i++) {
+ start_txn_and_maybe_insert_or_delete(i);
+ }
+}
+
+static void
+test_txn_nested_jumble (int iteration) {
+ int r;
+ if (verbose) { fprintf(stderr, "%s (%s):%d [iteration # %d]\n", __FILE__, __FUNCTION__, __LINE__, iteration); fflush(stderr); }
+
+ initialize_db();
+
+ //BELOW IS OLD CODE
+ int index_of_expected_value = MAX_NEST - 1;
+ int nest_level;
+ for (nest_level = MAX_NEST - 1; nest_level > 0; nest_level--) {
+ int do_abort = random() & 0x1;
+ if (do_abort) {
+ r = txns[nest_level]->abort(txns[nest_level]);
+ CKERR(r);
+ index_of_expected_value = nest_level - 1;
+ }
+ else {
+ r = txns[nest_level]->commit(txns[nest_level], DB_TXN_NOSYNC);
+ CKERR(r);
+ //index of expected value unchanged
+ }
+ txns[nest_level] = NULL;
+ verify_val(index_of_expected_value);
+ }
+ //Clean out dictionary
+
+ types[0] = TYPE_DELETE;
+ r = db->del(db, NULL, &key, DB_DELETE_ANY);
+ CKERR(r);
+ verify_val(0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ initialize_values();
+ int i;
+ setup_db();
+ for (i = 0; i < 64; i++) {
+ test_txn_nested_jumble(i);
+ }
+ close_db();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested4.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested4.cc
new file mode 100644
index 00000000..fa4488ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested4.cc
@@ -0,0 +1,367 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <ft/txn/xids.h>
+#define MAX_NEST MAX_TRANSACTION_RECORDS
+#define MAX_SIZE MAX_TRANSACTION_RECORDS
+
+/*********************
+ *
+ * Purpose of this test is to verify nested transactions, including support for implicit promotion
+ * in the presence of placeholders and branched trees of transactions.
+ *
+create empty db
+for test = 1 to MAX
+ for nesting level 0
+ - randomly insert or not
+ for nesting_level = 1 to MAX
+ - begin txn
+ - randomly perform four operations, each of which is one of (insert, delete, do nothing)
+ - if insert, use a value/len unique to this txn
+ - query to verify
+ for nesting level = MAX to 1
+ - randomly abort or commit each transaction or
+ - insert or delete at same level (followed by either abort/commit)
+ - branch (add more child txns similar to above)
+ - query to verify
+delete db
+ *
+ */
+
+
+enum { TYPE_DELETE = 1, TYPE_INSERT, TYPE_PLACEHOLDER };
+
+uint8_t valbufs[MAX_NEST][MAX_SIZE];
+DBT vals [MAX_NEST];
+uint8_t keybuf [MAX_SIZE];
+DBT key;
+uint8_t types [MAX_NEST];
+uint8_t currval[MAX_NEST];
+DB_TXN *txns [MAX_NEST];
+DB_TXN *txn_query;
+DB_TXN *patient_txn;
+int which_expected;
+
+static void
+fillrandom(uint8_t buf[MAX_SIZE], uint32_t length) {
+ assert(length < MAX_SIZE);
+ uint32_t i;
+ for (i = 0; i < length; i++) {
+ buf[i] = random() & 0xFF;
+ }
+}
+
+static void
+initialize_values (void) {
+ int nest_level;
+ for (nest_level = 0; nest_level < MAX_NEST; nest_level++) {
+ fillrandom(valbufs[nest_level], nest_level);
+ dbt_init(&vals[nest_level], &valbufs[nest_level][0], nest_level);
+ }
+ uint32_t len = random() % MAX_SIZE;
+ fillrandom(keybuf, len);
+ dbt_init(&key, &keybuf[0], len);
+}
+
+
+static DB *db;
+static DB_ENV *env;
+
+static void
+setup_db (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ r = env->txn_begin(env, NULL, &txn_query, DB_READ_UNCOMMITTED);
+ CKERR(r);
+}
+
+
+static void
+close_db (void) {
+ int r;
+ r = txn_query->commit(txn_query, 0);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+static void
+verify_val(uint8_t nest_level) {
+ assert(nest_level < MAX_NEST);
+ if (nest_level>0) assert(txns[nest_level]);
+ assert(types[nest_level] != TYPE_PLACEHOLDER);
+ int r;
+ DBT observed_val;
+ dbt_init(&observed_val, NULL, 0);
+ r = db->get(db, txn_query, &key, &observed_val, 0);
+ if (types[nest_level] == TYPE_INSERT) {
+ CKERR(r);
+ int idx = currval[nest_level];
+ assert(observed_val.size == vals[idx].size);
+ assert(memcmp(observed_val.data, vals[idx].data, vals[idx].size) == 0);
+ }
+ else {
+ assert(types[nest_level] == TYPE_DELETE);
+ CKERR2(r, DB_NOTFOUND);
+ }
+}
+
+static uint8_t
+randomize_no_placeholder_type(void) {
+ int r;
+ r = random() % 2;
+ switch (r) {
+ case 0:
+ return TYPE_INSERT;
+ case 1:
+ return TYPE_DELETE;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+static uint8_t
+randomize_type(void) {
+ int r;
+ r = random() % 4;
+ switch (r) {
+ case 0:
+ return TYPE_INSERT;
+ case 1:
+ return TYPE_DELETE;
+ case 2:
+ case 3:
+ return TYPE_PLACEHOLDER;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+static void
+maybe_insert_or_delete(uint8_t nest, int type) {
+ int r;
+ if (nest>0) assert(txns[nest]);
+ types[nest] = type;
+ currval[nest] = nest;
+ switch (types[nest]) {
+ case TYPE_INSERT:
+ r = db->put(db, txns[nest], &key, &vals[nest], 0);
+ CKERR(r);
+ break;
+ case TYPE_DELETE:
+ r = db->del(db, txns[nest], &key, DB_DELETE_ANY);
+ CKERR(r);
+ break;
+ case TYPE_PLACEHOLDER:
+ types[nest] = types[nest - 1];
+ currval[nest] = currval[nest-1];
+ break;
+ default:
+ assert(false);
+ }
+ verify_val(nest);
+}
+
+static void
+start_txn_and_maybe_insert_or_delete(uint8_t nest) {
+ int iteration;
+ int r;
+ for (iteration = 0; iteration < 4; iteration++) {
+ bool skip = false;
+ if (nest == 0) {
+ types[nest] = randomize_no_placeholder_type();
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ //Committed entry is autocommitted by not providing the txn
+ txns[nest] = NULL;
+ }
+ else {
+ if (iteration == 0) {
+ types[nest] = randomize_type();
+ r = env->txn_begin(env, txns[nest-1], &txns[nest], 0);
+ CKERR(r);
+ if (types[nest] == TYPE_PLACEHOLDER) skip = true;
+ }
+ else {
+ types[nest] = randomize_no_placeholder_type();
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ }
+ }
+ maybe_insert_or_delete(nest, types[nest]);
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ if (skip) break;
+ }
+}
+
+static void
+initialize_db(void) {
+ types[0] = TYPE_DELETE; //Not yet inserted
+ verify_val(0);
+ int i;
+ for (i = 0; i < MAX_NEST; i++) {
+ start_txn_and_maybe_insert_or_delete(i);
+ }
+}
+
+static void
+test_txn_nested_jumble (int iteration) {
+ int r;
+ if (verbose) { fprintf(stderr, "%s (%s):%d [iteration # %d]\n", __FILE__, __FUNCTION__, __LINE__, iteration); fflush(stderr); }
+
+ initialize_db();
+ r = env->txn_begin(env, NULL, &patient_txn, 0);
+ CKERR(r);
+
+ int index_of_expected_value = MAX_NEST - 1;
+ int nest_level = MAX_NEST - 1;
+ int min_allowed_branch_level = MAX_NEST - 2;
+futz_with_stack:
+ while (nest_level > 0) {
+ int operation = random() % 4;
+ switch (operation) {
+ case 0:
+ //abort
+ r = txns[nest_level]->abort(txns[nest_level]);
+ CKERR(r);
+ index_of_expected_value = nest_level - 1;
+ txns[nest_level] = NULL;
+ nest_level--;
+ verify_val(index_of_expected_value);
+ break;
+ case 1:
+ //commit
+ r = txns[nest_level]->commit(txns[nest_level], DB_TXN_NOSYNC);
+ CKERR(r);
+ currval[nest_level-1] = currval[index_of_expected_value];
+ types[nest_level-1] = types[index_of_expected_value];
+ index_of_expected_value = nest_level - 1;
+ txns[nest_level] = NULL;
+ nest_level--;
+ verify_val(index_of_expected_value);
+ break;
+ case 2:;
+ //do more work with this guy
+ int type;
+ type = randomize_no_placeholder_type();
+ maybe_insert_or_delete(nest_level, type);
+ index_of_expected_value = nest_level;
+ continue; //transaction is still alive
+ case 3:
+ if (min_allowed_branch_level >= nest_level) {
+ //start new subtree
+ int max = nest_level + 4;
+ if (MAX_NEST - 1 < max) {
+ max = MAX_NEST - 1;
+ assert(max > nest_level);
+ }
+ int branch_level;
+ for (branch_level = nest_level + 1; branch_level <= max; branch_level++) {
+ start_txn_and_maybe_insert_or_delete(branch_level);
+ }
+ nest_level = max;
+ min_allowed_branch_level--;
+ index_of_expected_value = nest_level;
+ }
+ continue; //transaction is still alive
+ default:
+ assert(false);
+ }
+ }
+ //All transactions that have touched this key are finished.
+ assert(nest_level == 0);
+ if (min_allowed_branch_level >= 0) {
+ //start new subtree
+ int max = 4;
+ assert(patient_txn);
+ txns[1] = patient_txn;
+ patient_txn = NULL;
+ maybe_insert_or_delete(1, randomize_no_placeholder_type());
+ int branch_level;
+ for (branch_level = 2; branch_level <= max; branch_level++) {
+ start_txn_and_maybe_insert_or_delete(branch_level);
+ }
+ nest_level = max;
+ min_allowed_branch_level = -1;
+ index_of_expected_value = nest_level;
+ goto futz_with_stack;
+ }
+
+ //Clean out dictionary
+
+ types[0] = TYPE_DELETE;
+ r = db->del(db, NULL, &key, DB_DELETE_ANY);
+ CKERR(r);
+ verify_val(0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ initialize_values();
+ int i;
+ setup_db();
+ for (i = 0; i < 64; i++) {
+ test_txn_nested_jumble(i);
+ }
+ close_db();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested5.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested5.cc
new file mode 100644
index 00000000..0374482e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested5.cc
@@ -0,0 +1,386 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+#include <ft/txn/xids.h>
+#define MAX_NEST MAX_TRANSACTION_RECORDS
+#define MAX_SIZE (MAX_TRANSACTION_RECORDS + 1)
+
+/*********************
+ *
+ * Purpose of this test is to verify insert ignore (DB_NOOVERWRITE_NO_ERROR) with nested transactions,
+ * including support for implicit promotion
+ * in the presence of placeholders and branched trees of transactions.
+ *
+create empty db
+for test = 1 to MAX
+ for nesting level 0
+ - randomly insert or not
+ for nesting_level = 1 to MAX
+ - begin txn
+ - randomly perform four operations, each of which is one of (insert, delete, do nothing)
+ - if insert, use a value/len unique to this txn
+ - query to verify
+ for nesting level = MAX to 1
+ - randomly abort or commit each transaction or
+ - insert or delete at same level (followed by either abort/commit)
+ - branch (add more child txns similar to above)
+ - query to verify
+delete db
+ *
+ */
+
+
+enum { TYPE_DELETE = 1, TYPE_INSERT, TYPE_PLACEHOLDER };
+
+bool top_is_delete;
+uint8_t junkvalbuf[MAX_SIZE];
+DBT junkval;
+uint8_t valbufs[MAX_NEST][MAX_SIZE];
+DBT vals [MAX_NEST];
+uint8_t keybuf [MAX_SIZE];
+DBT key;
+uint8_t types [MAX_NEST];
+uint8_t currval[MAX_NEST];
+DB_TXN *txns [MAX_NEST];
+DB_TXN *txn_query;
+DB_TXN *patient_txn;
+int which_expected;
+
+static void
+fillrandom(uint8_t buf[MAX_SIZE], uint32_t length) {
+ assert(length < MAX_SIZE);
+ uint32_t i;
+ for (i = 0; i < length; i++) {
+ buf[i] = random() & 0xFF;
+ }
+}
+
+static void
+initialize_values (void) {
+ int nest_level;
+ for (nest_level = 0; nest_level < MAX_NEST; nest_level++) {
+ fillrandom(valbufs[nest_level], nest_level);
+ dbt_init(&vals[nest_level], &valbufs[nest_level][0], nest_level);
+ }
+ uint32_t len = random() % MAX_SIZE;
+ fillrandom(keybuf, len);
+ dbt_init(&key, &keybuf[0], len);
+
+ fillrandom(junkvalbuf, MAX_SIZE-1);
+ dbt_init(&junkval, &junkvalbuf[0], MAX_SIZE-1);
+}
+
+
+static DB *db;
+static DB_ENV *env;
+
+static void
+setup_db (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+ r = env->txn_begin(env, NULL, &txn_query, DB_READ_UNCOMMITTED);
+ CKERR(r);
+}
+
+
+static void
+close_db (void) {
+ int r;
+ r = txn_query->commit(txn_query, 0);
+ CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+}
+
+static void
+verify_val(uint8_t nest_level) {
+ assert(nest_level < MAX_NEST);
+ if (nest_level>0) assert(txns[nest_level]);
+ assert(types[nest_level] != TYPE_PLACEHOLDER);
+ int r;
+ DBT observed_val;
+ dbt_init(&observed_val, NULL, 0);
+ r = db->get(db, txn_query, &key, &observed_val, 0);
+ if (types[nest_level] == TYPE_INSERT) {
+ CKERR(r);
+ int idx = currval[nest_level];
+ assert(observed_val.size == vals[idx].size);
+ assert(memcmp(observed_val.data, vals[idx].data, vals[idx].size) == 0);
+ top_is_delete = false;
+ }
+ else {
+ assert(types[nest_level] == TYPE_DELETE);
+ CKERR2(r, DB_NOTFOUND);
+ top_is_delete = true;
+ }
+}
+
+static uint8_t
+randomize_no_placeholder_type(void) {
+ int r;
+ r = random() % 2;
+ switch (r) {
+ case 0:
+ return TYPE_INSERT;
+ case 1:
+ return TYPE_DELETE;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+static uint8_t
+randomize_type(void) {
+ int r;
+ r = random() % 4;
+ switch (r) {
+ case 0:
+ return TYPE_INSERT;
+ case 1:
+ return TYPE_DELETE;
+ case 2:
+ case 3:
+ return TYPE_PLACEHOLDER;
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+static void
+maybe_insert_or_delete(uint8_t nest, int type) {
+ int r;
+ if (nest>0) assert(txns[nest]);
+ types[nest] = type;
+ currval[nest] = nest;
+ switch (types[nest]) {
+ case TYPE_INSERT:
+ if (top_is_delete) {
+ r = db->put(db, txns[nest], &key, &vals[nest], DB_NOOVERWRITE_NO_ERROR);
+ CKERR(r);
+ }
+ else {
+ r = db->put(db, txns[nest], &key, &vals[nest], 0);
+ CKERR(r);
+ }
+ r = db->put(db, txns[nest], &key, &junkval, DB_NOOVERWRITE_NO_ERROR);
+ CKERR(r);
+ top_is_delete = false;
+ break;
+ case TYPE_DELETE:
+ r = db->del(db, txns[nest], &key, DB_DELETE_ANY);
+ CKERR(r);
+ top_is_delete = true;
+ break;
+ case TYPE_PLACEHOLDER:
+ types[nest] = types[nest - 1];
+ currval[nest] = currval[nest-1];
+ break;
+ default:
+ assert(false);
+ }
+ verify_val(nest);
+}
+
+static void
+start_txn_and_maybe_insert_or_delete(uint8_t nest) {
+ int iteration;
+ int r;
+ for (iteration = 0; iteration < 4; iteration++) {
+ bool skip = false;
+ if (nest == 0) {
+ types[nest] = randomize_no_placeholder_type();
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ //Committed entry is autocommitted by not providing the txn
+ txns[nest] = NULL;
+ }
+ else {
+ if (iteration == 0) {
+ types[nest] = randomize_type();
+ r = env->txn_begin(env, txns[nest-1], &txns[nest], 0);
+ CKERR(r);
+ if (types[nest] == TYPE_PLACEHOLDER) skip = true;
+ }
+ else {
+ types[nest] = randomize_no_placeholder_type();
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ }
+ }
+ maybe_insert_or_delete(nest, types[nest]);
+ assert(types[nest] != TYPE_PLACEHOLDER);
+ if (skip) break;
+ }
+}
+
+static void
+initialize_db(void) {
+ types[0] = TYPE_DELETE; //Not yet inserted
+ verify_val(0);
+ int i;
+ for (i = 0; i < MAX_NEST; i++) {
+ start_txn_and_maybe_insert_or_delete(i);
+ }
+}
+
+static void
+test_txn_nested_jumble (int iteration) {
+ int r;
+ if (verbose) { fprintf(stderr, "%s (%s):%d [iteration # %d]\n", __FILE__, __FUNCTION__, __LINE__, iteration); fflush(stderr); }
+
+ initialize_db();
+ r = env->txn_begin(env, NULL, &patient_txn, 0);
+ CKERR(r);
+
+ int index_of_expected_value = MAX_NEST - 1;
+ int nest_level = MAX_NEST - 1;
+ int min_allowed_branch_level = MAX_NEST - 2;
+futz_with_stack:
+ while (nest_level > 0) {
+ int operation = random() % 4;
+ switch (operation) {
+ case 0:
+ //abort
+ r = txns[nest_level]->abort(txns[nest_level]);
+ CKERR(r);
+ index_of_expected_value = nest_level - 1;
+ txns[nest_level] = NULL;
+ nest_level--;
+ verify_val(index_of_expected_value);
+ break;
+ case 1:
+ //commit
+ r = txns[nest_level]->commit(txns[nest_level], DB_TXN_NOSYNC);
+ CKERR(r);
+ currval[nest_level-1] = currval[index_of_expected_value];
+ types[nest_level-1] = types[index_of_expected_value];
+ index_of_expected_value = nest_level - 1;
+ txns[nest_level] = NULL;
+ nest_level--;
+ verify_val(index_of_expected_value);
+ break;
+ case 2:;
+ //do more work with this guy
+ int type;
+ type = randomize_no_placeholder_type();
+ maybe_insert_or_delete(nest_level, type);
+ index_of_expected_value = nest_level;
+ continue; //transaction is still alive
+ case 3:
+ if (min_allowed_branch_level >= nest_level) {
+ //start new subtree
+ int max = nest_level + 4;
+ if (MAX_NEST - 1 < max) {
+ max = MAX_NEST - 1;
+ assert(max > nest_level);
+ }
+ int branch_level;
+ for (branch_level = nest_level + 1; branch_level <= max; branch_level++) {
+ start_txn_and_maybe_insert_or_delete(branch_level);
+ }
+ nest_level = max;
+ min_allowed_branch_level--;
+ index_of_expected_value = nest_level;
+ }
+ continue; //transaction is still alive
+ default:
+ assert(false);
+ }
+ }
+ //All transactions that have touched this key are finished.
+ assert(nest_level == 0);
+ if (min_allowed_branch_level >= 0) {
+ //start new subtree
+ int max = 4;
+ assert(patient_txn);
+ txns[1] = patient_txn;
+ patient_txn = NULL;
+ maybe_insert_or_delete(1, randomize_no_placeholder_type());
+ int branch_level;
+ for (branch_level = 2; branch_level <= max; branch_level++) {
+ start_txn_and_maybe_insert_or_delete(branch_level);
+ }
+ nest_level = max;
+ min_allowed_branch_level = -1;
+ index_of_expected_value = nest_level;
+ goto futz_with_stack;
+ }
+
+ //Clean out dictionary
+
+ types[0] = TYPE_DELETE;
+ r = db->del(db, NULL, &key, DB_DELETE_ANY);
+ CKERR(r);
+ verify_val(0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ initialize_values();
+ int i;
+ setup_db();
+ for (i = 0; i < 64; i++) {
+ test_txn_nested_jumble(i);
+ }
+ close_db();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort.cc
new file mode 100644
index 00000000..a9c9f95a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort.cc
@@ -0,0 +1,128 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static int
+db_put (DB *db, DB_TXN *txn, int k, int v) {
+ DBT key, val;
+ return db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), DB_NOOVERWRITE);
+}
+
+static const char *db_error(int error) {
+ static char errorbuf[32];
+ switch (error) {
+ case DB_NOTFOUND: return "DB_NOTFOUND";
+ case DB_LOCK_DEADLOCK: return "DB_LOCK_DEADLOCK";
+ case DB_LOCK_NOTGRANTED: return "DB_LOCK_NOTGRANTED";
+ case DB_KEYEXIST: return "DB_KEYEXIST";
+ default:
+ sprintf(errorbuf, "%d", error);
+ return errorbuf;
+ }
+}
+
+static void
+test_txn_nested(int do_commit) {
+ if (verbose) printf("test_txn_nested:%d\n", do_commit);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *db;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.txn.nested.abort.ft_handle";
+
+ /* create the dup database file */
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_INIT_LOG |DB_THREAD |DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE+DB_AUTO_COMMIT, 0666); assert(r == 0);
+
+ DB_TXN *t1;
+ r = env->txn_begin(env, null_txn, &t1, 0); assert(r == 0);
+ if (verbose) printf("t1:begin\n");
+
+ DB_TXN *t2;
+ r = env->txn_begin(env, t1, &t2, 0); assert(r == 0);
+ if (verbose) printf("t2:begin\n");
+
+ r = db_put(db, t2, htonl(1), htonl(1));
+ if (verbose) printf("t1:put:%s\n", db_error(r));
+
+ if (do_commit) {
+ r = t2->commit(t2, 0);
+ if (verbose) printf("t2:commit:%s\n", db_error(r));
+ } else {
+ r = t2->abort(t2);
+ if (verbose) printf("t2:abort:%s\n", db_error(r));
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+
+ r = t1->commit(t1, 0);
+ if (verbose) printf("t1:commit:%s\n", db_error(r));
+
+ r = env->close(env, 0); assert(r == 0);
+}
+
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ test_txn_nested(0);
+ test_txn_nested(1);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort2.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort2.cc
new file mode 100644
index 00000000..8e0105fc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort2.cc
@@ -0,0 +1,117 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static void
+test_txn_abort (void) {
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ int i;
+ DB_ENV *env;
+ DBT key, val;
+ DB_TXN* txn_all = NULL;
+ DB_TXN* txn_stmt = NULL;
+ DB_TXN* txn_sp = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+
+
+ r = env->txn_begin(env, 0, &txn_all, 0); CKERR(r);
+
+ r = env->txn_begin(env, txn_all, &txn_stmt, 0); CKERR(r);
+ i = 1;
+ r = db->put(db, txn_stmt, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ CKERR(r);
+ r = txn_stmt->commit(txn_stmt,DB_TXN_NOSYNC);
+ txn_stmt = NULL;
+
+ r = env->txn_begin(env, txn_all, &txn_sp, 0); CKERR(r);
+
+ r = env->txn_begin(env, txn_sp, &txn_stmt, 0); CKERR(r);
+ i = 2;
+ r = db->put(db, txn_stmt, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ CKERR(r);
+ r = txn_stmt->commit(txn_stmt,DB_TXN_NOSYNC);
+ txn_stmt = NULL;
+
+
+ r = txn_all->abort(txn_all);
+ CKERR(r);
+
+
+ /* walk the db, should be empty */
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ memset(&key, 0, sizeof key);
+ memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_FIRST);
+ CKERR2(r, DB_NOTFOUND);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_txn_abort();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort3.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort3.cc
new file mode 100644
index 00000000..4da6964a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort3.cc
@@ -0,0 +1,123 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static void
+test_txn_abort (void) {
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ int i;
+ DB_ENV *env;
+ DBT key, val;
+ DB_TXN* txn_all = NULL;
+ DB_TXN* txn_stmt = NULL;
+ DB_TXN* txn_sp = NULL;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB *db = NULL;
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+
+
+ r = env->txn_begin(env, 0, &txn_all, 0); CKERR(r);
+
+ r = env->txn_begin(env, txn_all, &txn_stmt, 0); CKERR(r);
+ i = 1;
+ r = db->put(db, txn_stmt, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ CKERR(r);
+ r = txn_stmt->commit(txn_stmt,DB_TXN_NOSYNC);
+ txn_stmt = NULL;
+
+ r = env->txn_begin(env, txn_all, &txn_sp, 0); CKERR(r);
+
+ r = env->txn_begin(env, txn_sp, &txn_stmt, 0); CKERR(r);
+ r = db->del(db, txn_stmt, dbt_init(&key, &i, sizeof i), 0);
+ CKERR(r);
+
+
+ r = txn_stmt->commit(txn_stmt,DB_TXN_NOSYNC);
+ txn_stmt = NULL;
+
+
+ r = txn_all->abort(txn_all);
+ CKERR(r);
+
+
+ {
+ /* walk the db, should be empty */
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ memset(&key, 0, sizeof key);
+ memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_FIRST);
+ CKERR2(r, DB_NOTFOUND);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0);
+ }
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_txn_abort();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort4.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort4.cc
new file mode 100644
index 00000000..51e858fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_nested_abort4.cc
@@ -0,0 +1,148 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdio.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static DB *db;
+static DB_ENV *env;
+
+static void
+setup_db (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL | DB_INIT_LOG | DB_INIT_LOCK | DB_INIT_TXN | DB_PRIVATE | DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ {
+ DB_TXN *txn = 0;
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, txn, "test.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = txn->commit(txn, 0); CKERR(r);
+ }
+}
+
+#if 0
+static void
+close_db (void) {
+ int r;
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+}
+#endif
+
+static void
+test_txn_abort (int insert, int secondnum) {
+ if (verbose) { fprintf(stderr, "%s (%s):%d [%d,%d]\n", __FILE__, __FUNCTION__, __LINE__, insert, secondnum); fflush(stderr); }
+ setup_db();
+
+ DBT key, val;
+ int r;
+
+
+ DB_TXN *parent = NULL, *child = NULL;
+
+ int i = 1;
+ r = env->txn_begin(env, 0, &parent, 0); CKERR(r);
+
+ //Insert something as a child
+ r = env->txn_begin(env, parent, &child, 0); CKERR(r);
+ i = 1;
+ r = db->put(db, child, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ CKERR(r);
+ r = child->commit(child,DB_TXN_NOSYNC);
+ child = NULL;
+
+
+ //delete it as a child
+ r = env->txn_begin(env, parent, &child, 0); CKERR(r);
+ i = secondnum;
+ if (insert) {
+ r = db->put(db, child, dbt_init(&key, &i, sizeof i), dbt_init(&val, &i, sizeof i), 0);
+ CKERR(r);
+ }
+ else { // delete
+ r = db->del(db, child, dbt_init(&key, &i, sizeof i), DB_DELETE_ANY);
+ CKERR(r);
+ }
+ r = child->commit(child,DB_TXN_NOSYNC);
+ child = NULL;
+
+ r = parent->abort(parent);
+ CKERR(r);
+ parent = NULL;
+
+
+ {
+ DB_TXN *txn = NULL;
+ /* walk the db, should be empty */
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ DBC *cursor;
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ memset(&key, 0, sizeof key);
+ memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_FIRST);
+ CKERR2(r, DB_NOTFOUND);
+ r = cursor->c_close(cursor); CKERR(r);
+ r = txn->commit(txn, 0);
+ }
+ r=db->close(db, 0); CKERR(r);
+ r=env->close(env, 0); CKERR(r);
+
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ test_txn_abort(1, 0);
+ test_txn_abort(0, 0);
+ test_txn_abort(1, 1);
+ test_txn_abort(0, 1);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_read_committed_always.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_read_committed_always.cc
new file mode 100644
index 00000000..42e0e8cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_read_committed_always.cc
@@ -0,0 +1,121 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/**
+ * Test that read committed always isolation works.
+ *
+ * Read committed means 'always read the outermost committed value'. This is less isolated
+ * than 'read committed', which MySQl defines as 'snapshot isolation per sub-statement (child txn)'
+ */
+
+#include <portability/toku_random.h>
+
+#include "test.h"
+
+static void test_simple_committed_read(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r);
+
+ char valbuf[64];
+ DBT john, christian, val;
+ dbt_init(&john, "john", sizeof("john"));
+ dbt_init(&christian, "christian", sizeof("christian"));
+ dbt_init(&val, valbuf, sizeof(valbuf));
+
+ // start with just john
+ r = db->put(db, NULL, &john, &john, 0); CKERR(r);
+
+ // begin an outer txn with read-committed-always isolation
+ DB_TXN *outer_txn;
+ r = env->txn_begin(env, NULL, &outer_txn, DB_READ_COMMITTED_ALWAYS); CKERR(r);
+
+ // outer txn sees john
+ r = db->get(db, outer_txn, &john, &val, 0); CKERR(r);
+
+ // outer txn does not yet see christian
+ r = db->get(db, outer_txn, &christian, &val, 0); CKERR2(r, DB_NOTFOUND);
+
+ // insert christian in another txn (NULL means generate an auto-commit txn)
+ r = db->put(db, NULL, &christian, &christian, 0); CKERR(r);
+
+ // outer txn does not see christian, because it is provisional
+ // and our copied snapshot says it is not committed
+ r = db->get(db, outer_txn, &christian, &val, 0); CKERR2(r, DB_NOTFOUND);
+
+ // insert christian in another txn (again), thereby autocommitting last put
+ r = db->put(db, NULL, &christian, &christian, 0); CKERR(r);
+
+ // outer txn sees christian, because we now have a committed version
+ r = db->get(db, outer_txn, &christian, &val, 0); CKERR(r);
+
+ // delete john in another txn
+ r = db->del(db, NULL, &john, 0); CKERR(r);
+
+ // outer txn no longer sees john
+ r = db->get(db, outer_txn, &john, &val, 0); CKERR2(r, DB_NOTFOUND);
+
+ r = outer_txn->commit(outer_txn, 0); CKERR(r);
+
+ r = db->close(db, 0); CKERR(r);
+ r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r);
+}
+
+int test_main(int argc, char * const argv[]) {
+ default_parse_args(argc, argv);
+
+ int r;
+ const int envflags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE;
+
+ // startup
+ DB_ENV *env;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME, envflags, 0755);
+
+ test_simple_committed_read(env);
+
+ // cleanup
+ r = env->close(env, 0); CKERR(r);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/test_txn_recover3.cc b/storage/tokudb/PerconaFT/src/tests/test_txn_recover3.cc
new file mode 100644
index 00000000..46124e17
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_txn_recover3.cc
@@ -0,0 +1,140 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+test_txn_recover3 (int nrows) {
+ if (verbose) printf("test_txn_recover1:%d\n", nrows);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ char dirname[TOKU_PATH_MAX+1];
+ toku_os_mkdir(toku_path_join(dirname, 2, TOKU_TEST_FILENAME, "t.tokudb"), S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env;
+ DB *mdb, *sdb;
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "t.tokudb/main.ft_handle";
+ const char * const sname = "t.tokudb/status.ft_handle";
+
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_INIT_LOG |DB_THREAD |DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); CKERR(r);
+
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_INIT_LOG |DB_THREAD |DB_PRIVATE | DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r = db_create(&mdb, env, 0); assert(r == 0);
+ mdb->set_errfile(mdb,stderr); // Turn off those annoying errors
+ r = mdb->open(mdb, null_txn, fname, NULL, DB_BTREE, DB_CREATE+DB_THREAD+DB_AUTO_COMMIT, 0666); assert(r == 0);
+ r = mdb->close(mdb, 0); assert(r == 0);
+
+ r = db_create(&sdb, env, 0); assert(r == 0);
+ sdb->set_errfile(sdb,stderr); // Turn off those annoying errors
+ r = sdb->open(sdb, null_txn, sname, NULL, DB_BTREE, DB_CREATE+DB_THREAD+DB_AUTO_COMMIT, 0666); assert(r == 0);
+ r = sdb->close(sdb, 0); assert(r == 0);
+
+ r = db_create(&mdb, env, 0); assert(r == 0);
+ mdb->set_errfile(mdb,stderr); // Turn off those annoying errors
+ r = mdb->open(mdb, null_txn, fname, NULL, DB_BTREE, DB_CREATE+DB_THREAD+DB_AUTO_COMMIT, 0666); assert(r == 0);
+
+ r = db_create(&sdb, env, 0); assert(r == 0);
+ sdb->set_errfile(sdb,stderr); // Turn off those annoying errors
+ r = sdb->open(sdb, null_txn, sname, NULL, DB_BTREE, DB_CREATE+DB_THREAD+DB_AUTO_COMMIT, 0666); assert(r == 0);
+
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, null_txn, &txn, 0); assert(r == 0);
+
+ int i;
+ for (i=0; i<nrows; i++) {
+ int k = htonl(i);
+ int v = htonl(i);
+ DBT key, val;
+ r = mdb->put(mdb, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ r = sdb->put(sdb, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ assert(r == 0);
+ }
+
+ r = txn->commit(txn, 0); assert(r == 0);
+
+ r = mdb->close(mdb, 0); assert(r == 0);
+ r = sdb->close(sdb, 0); assert(r == 0);
+
+ r = env->txn_checkpoint(env, 0, 0, 0); assert(r == 0);
+
+ char **names;
+ r = env->log_archive(env, &names, 0); assert(r == 0);
+ if (names) {
+ for (i=0; names[i]; i++)
+ printf("%d:%s\n", i, names[i]);
+ toku_free(names);
+ }
+
+ r = env->close(env, 0); assert(r == 0);
+
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOCK|DB_INIT_LOG |DB_THREAD |DB_PRIVATE | DB_RECOVER, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ test_txn_recover3(1);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_unused_memory_crash.cc b/storage/tokudb/PerconaFT/src/tests/test_unused_memory_crash.cc
new file mode 100644
index 00000000..f273a02a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_unused_memory_crash.cc
@@ -0,0 +1,137 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+static void
+expect_cursor_get (DBC *cursor, int k, int v, int op) {
+ int kk, vv;
+ DBT key, val;
+ int r = cursor->c_get(cursor, dbt_init_malloc(&key), dbt_init_malloc(&val), op);
+ assert(r == 0);
+ assert(key.size == sizeof kk); memcpy(&kk, key.data, key.size); assert(kk == k); toku_free(key.data);
+ assert(val.size == sizeof vv); memcpy(&vv, val.data, val.size); assert(vv == v); toku_free(val.data);
+}
+
+static DBC *
+new_cursor (DB *db, int k, int v, int op) {
+ DBC *cursor;
+ int r;
+ r = db->cursor(db, 0, &cursor, 0); assert(r == 0);
+ expect_cursor_get(cursor, k, v, op);
+ return cursor;
+}
+
+static int
+db_put (DB *db, int k, int v) {
+ DBT key, val;
+ int r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0);
+ return r;
+}
+
+static void
+test_cursor_nonleaf_expand (int n, int reverse) {
+ if (verbose) printf("test_cursor_nonleaf_expand:%d %d\n", n, reverse);
+
+ DB_TXN * const null_txn = 0;
+ const char * const fname = "test.insert.ft_handle";
+ int r;
+
+ // toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ // r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ r = db_put(db, htonl(0), 0); assert(r == 0);
+ DBC *cursor0 = new_cursor(db, htonl(0), 0, DB_FIRST); assert(cursor0);
+ r = db_put(db, htonl(n), n); assert(r == 0);
+ DBC *cursorn = new_cursor(db, htonl(n), n, DB_LAST); assert(cursorn);
+
+ int i;
+ if (reverse) {
+ for (i=n-1; i > 0; i--) {
+ r = db_put(db, htonl(i), i); assert(r == 0);
+ }
+ } else {
+ for (i=1; i < n; i++) {
+ r = db_put(db, htonl(i), i); assert(r == 0);
+ }
+ }
+
+ expect_cursor_get(cursor0, htonl(0), 0, DB_CURRENT);
+ expect_cursor_get(cursorn, htonl(n), n, DB_CURRENT);
+
+ r = cursor0->c_close(cursor0); assert(r == 0);
+ r = cursorn->c_close(cursorn); assert(r == 0);
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+ parse_args(argc, argv);
+
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ int i;
+ for (i=1; i<=65536; i *= 2) {
+ test_cursor_nonleaf_expand(i, 0);
+ test_cursor_nonleaf_expand(i, 1);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_abort_works.cc b/storage/tokudb/PerconaFT/src/tests/test_update_abort_works.cc
new file mode 100644
index 00000000..70194ea4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_abort_works.cc
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an aborted update doesn't affect later reads
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (to_update[i] == 1) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ if (to_update[k]) {
+ assert(v == _u(_v(k), _e(k)));
+ } else {
+ assert(v == _v(k));
+ }
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v)) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ return r;
+}
+
+
+static void run_test(bool do_close) {
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_verify_results(txn_21, db, chk_updated); CKERR(chk_r); }
+ });
+ });
+
+ if (do_close) {
+ { int chk_r = db->close(db,0); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ }
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_original); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+}
+
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ run_test(false);
+ run_test(true);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_abort_works.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_abort_works.cc
new file mode 100644
index 00000000..0d538844
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_abort_works.cc
@@ -0,0 +1,182 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that aborting an update broadcast works correctly
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 100;
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ e = _e(*k);
+ v = _u(*ov, e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags); CKERR(r);
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ assert(v == _u(_v(k), _e(k)));
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v)) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ return r;
+}
+
+static void run_test(bool is_resetting) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_verify_results(txn_21, db, chk_updated); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_original); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_4, 0, {
+ { int chk_r = do_updates(txn_4, db, update_flags); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_4, txn_41, 0, {
+ { int chk_r = do_verify_results(txn_41, db, chk_updated); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_5, 0, {
+ { int chk_r = do_verify_results(txn_5, db, chk_updated); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true);
+ run_test(false);
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_calls_back.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_calls_back.cc
new file mode 100644
index 00000000..468747a1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_calls_back.cc
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+int updates_called[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+// the commands are: byte 1 is "nop" "add" or "del". Byte 2 is the amount to add.
+enum cmd { CNOP, CADD, CDEL };
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *UU(old_val), const DBT *UU(extra),
+ void UU((*set_val)(const DBT *new_val,
+ void *set_extra)),
+ void *UU(set_extra)) {
+ unsigned int *k;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(updates_called[*k] == 0);
+ updates_called[*k] = 1;
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(bool is_resetting) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+ for (unsigned int i = 0;
+ i < (sizeof(updates_called) / sizeof(updates_called[0])); ++i) {
+ updates_called[i] = 0;
+ }
+
+ {
+ DB_TXN* txna = NULL;
+ { int chk_r = env->txn_begin(env, NULL, &txna, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ {
+ DBT key, val;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, "a", 2);
+ for (i = 0; i < (sizeof(updates_called) / sizeof(updates_called[0])); ++i) {
+ { int chk_r = db->put(db, txna, keyp, valp, 0); CKERR(chk_r); }
+ }
+ }
+
+ { int chk_r = txna->commit(txna, 0); CKERR(chk_r); }
+ }
+
+ {
+ DB_TXN *txnb = NULL;
+ { int chk_r = env->txn_begin(env, NULL, &txnb, 0); CKERR(chk_r); }
+
+ {
+ DBT nullextra;
+ DBT *nullextrap = dbt_init(&nullextra, NULL, 0);
+ { int chk_r = db->update_broadcast(db, txnb, nullextrap, update_flags); CKERR(chk_r); }
+ }
+
+ { int chk_r = txnb->commit(txnb, 0); CKERR(chk_r); }
+ }
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ for (unsigned int i = 0;
+ i < (sizeof(updates_called) / sizeof(updates_called[0])); ++i) {
+ assert(updates_called[i]);
+ }
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true);
+ run_test(false);
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_can_delete_elements.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_can_delete_elements.cc
new file mode 100644
index 00000000..76fbbbfc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_can_delete_elements.cc
@@ -0,0 +1,166 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that update broadcast can delete (all) elements
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 100;
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+
+ set_val(NULL, set_extra);
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags); CKERR(r);
+ return r;
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v), bool already_deleted) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (already_deleted) {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ } else {
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ }
+ return r;
+}
+
+static void run_test(bool is_resetting) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original, false); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_verify_results(txn_21, db, chk_original, true); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_original, false); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ }
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true);
+ run_test(false);
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_changes_values.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_changes_values.cc
new file mode 100644
index 00000000..130f023a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_changes_values.cc
@@ -0,0 +1,154 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update broadcast can change (all) values
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 100;
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ e = _e(*k);
+ v = _u(*ov, e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags); CKERR(r);
+ return r;
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ assert(*vp == _u(_v(i), _e(i)));
+ }
+ return r;
+}
+
+static void run_test(bool is_resetting) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true);
+ run_test(false);
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_indexer.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_indexer.cc
new file mode 100644
index 00000000..1ee699e1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_indexer.cc
@@ -0,0 +1,225 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ set_val(extra, set_extra);
+ return 0;
+}
+
+
+static int generate_row_for_del(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ dest_key->size=0;
+ return 0;
+}
+
+static int generate_row_for_put(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ DBT_ARRAY *dest_val_arrays,
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ uint8_t src_val_data;
+ assert(src_val->size == 1);
+ src_val_data = *(uint8_t *)src_val->data;
+ assert(src_val_data == 100);
+ dest_key->size=0;
+ dest_val->size=0;
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->set_generate_row_callback_for_put(env,generate_row_for_put); CKERR(chk_r); }
+ { int chk_r = env->set_generate_row_callback_for_del(env,generate_row_for_del); CKERR(chk_r); }
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+ DB* hot_index_db = NULL;
+ DB_INDEXER* indexer = NULL;
+ DBT key, val;
+ uint32_t mult_db_flags = 0;
+ uint8_t key_data = 0;
+ uint8_t val_data = 0;
+ DB_TXN* txn_read1 = NULL;
+ DB_TXN* txn_read2 = NULL;
+ DB_TXN* txn_read3 = NULL;
+
+
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+
+
+ dbt_init(&key,&key_data,sizeof(uint8_t));
+ dbt_init(&val,&val_data,sizeof(uint8_t));
+
+ val_data = 1;
+ IN_TXN_COMMIT(env, NULL, txn_put1, 0, {
+ { int chk_r = db->put(db, txn_put1, &key, &val, 0); CKERR(chk_r); }
+ });
+ { int chk_r = env->txn_begin(env, NULL, &txn_read1, DB_TXN_SNAPSHOT); CKERR(chk_r); }
+
+ val_data = 2;
+ IN_TXN_COMMIT(env, NULL, txn_put2, 0, {
+ { int chk_r = db->put(db, txn_put2, &key, &val, 0); CKERR(chk_r); }
+ });
+ { int chk_r = env->txn_begin(env, NULL, &txn_read2, DB_TXN_SNAPSHOT); CKERR(chk_r); }
+
+ val_data = 3;
+ IN_TXN_COMMIT(env, NULL, txn_put3, 0, {
+ { int chk_r = db->put(db, txn_put3, &key, &val, 0); CKERR(chk_r); }
+ });
+ { int chk_r = env->txn_begin(env, NULL, &txn_read3, DB_TXN_SNAPSHOT); CKERR(chk_r); }
+
+ //
+ // at this point, we should have a leafentry with 3 committed values.
+ //
+
+
+ //
+ // now do an update broadcast that will set the val to something bigger
+ //
+ val_data = 100;
+ IN_TXN_COMMIT(env, NULL, txn_broadcast, 0, {
+ { int chk_r = db->update_broadcast(db, txn_broadcast, &val, DB_IS_RESETTING_OP); CKERR(chk_r); }
+ });
+
+ //
+ // now create an indexer
+ //
+ IN_TXN_COMMIT(env, NULL, txn_indexer, 0, {
+ // create DB
+ { int chk_r = db_create(&hot_index_db, env, 0); CKERR(chk_r); }
+ { int chk_r = hot_index_db->open(hot_index_db, txn_indexer, "bar.db", NULL, DB_BTREE, DB_CREATE|DB_IS_HOT_INDEX, 0666); CKERR(chk_r); }
+ { int chk_r = env->create_indexer(
+ env,
+ txn_indexer,
+ &indexer,
+ db,
+ 1,
+ &hot_index_db,
+ &mult_db_flags,
+ 0
+ ); CKERR(chk_r); }
+ { int chk_r = indexer->build(indexer); CKERR(chk_r); }
+ { int chk_r = indexer->close(indexer); CKERR(chk_r); }
+ });
+
+ //verify that txn_read1,2,3 cannot open a cursor on db
+ DBC* cursor = NULL;
+ { int chk_r = db->cursor(db, txn_read1, &cursor, 0); CKERR2(chk_r, TOKUDB_MVCC_DICTIONARY_TOO_NEW); }
+ { int chk_r = db->cursor(db, txn_read2, &cursor, 0); CKERR2(chk_r, TOKUDB_MVCC_DICTIONARY_TOO_NEW); }
+ { int chk_r = db->cursor(db, txn_read3, &cursor, 0); CKERR2(chk_r, TOKUDB_MVCC_DICTIONARY_TOO_NEW); }
+ IN_TXN_COMMIT(env, NULL, txn_read_succ, 0, {
+ { int chk_r = db->cursor(db, txn_read_succ, &cursor, 0); CKERR(chk_r); }
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ cursor = NULL;
+ });
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, 0, 0666); CKERR(chk_r); }
+ { int chk_r = db->cursor(db, txn_read1, &cursor, 0); CKERR2(chk_r, TOKUDB_MVCC_DICTIONARY_TOO_NEW); }
+ { int chk_r = db->cursor(db, txn_read2, &cursor, 0); CKERR2(chk_r, TOKUDB_MVCC_DICTIONARY_TOO_NEW); }
+ { int chk_r = db->cursor(db, txn_read3, &cursor, 0); CKERR2(chk_r, TOKUDB_MVCC_DICTIONARY_TOO_NEW); }
+ IN_TXN_COMMIT(env, NULL, txn_read_succ, 0, {
+ { int chk_r = db->cursor(db, txn_read_succ, &cursor, 0); CKERR(chk_r); }
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ cursor = NULL;
+ });
+
+ // commit the read transactions
+ { int chk_r = txn_read1->commit(txn_read1, 0); CKERR(chk_r); }
+ { int chk_r = txn_read2->commit(txn_read2, 0); CKERR(chk_r); }
+ { int chk_r = txn_read3->commit(txn_read3, 0); CKERR(chk_r); }
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ { int chk_r = hot_index_db->close(hot_index_db, 0); CKERR(chk_r); }
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_loader.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_loader.cc
new file mode 100644
index 00000000..ea80be6a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_loader.cc
@@ -0,0 +1,178 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ set_val(extra, set_extra);
+ return 0;
+}
+
+
+static int generate_row_for_del(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ dest_key->flags = 0;
+ dest_key->size=0;
+ return 0;
+}
+
+static int generate_row_for_put(
+ DB *UU(dest_db),
+ DB *UU(src_db),
+ DBT_ARRAY *dest_key_arrays,
+ DBT_ARRAY *dest_val_arrays,
+ const DBT *UU(src_key),
+ const DBT *UU(src_val)
+ )
+{
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = &dest_val_arrays->dbts[0];
+ dest_key->flags = 0;
+ dest_val->flags = 0;
+
+ uint8_t src_val_data;
+ assert(src_val->size == 1);
+ src_val_data = *(uint8_t *)src_val->data;
+ assert(src_val_data == 100);
+ dest_key->size=0;
+ dest_val->size=0;
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ { int chk_r = env->set_generate_row_callback_for_put(env,generate_row_for_put); CKERR(chk_r); }
+ { int chk_r = env->set_generate_row_callback_for_del(env,generate_row_for_del); CKERR(chk_r); }
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static void run_test(void) {
+ DB* db = NULL;
+ DB_LOADER* loader = NULL;
+ DBT key, val;
+ uint32_t mult_db_flags = 0;
+ uint32_t mult_dbt_flags = DB_DBT_REALLOC;
+ uint8_t key_data = 0;
+ uint8_t val_data = 0;
+
+
+ IN_TXN_COMMIT(env, NULL, txn_create, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_create, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+
+
+ dbt_init(&key,&key_data,sizeof(uint8_t));
+ dbt_init(&val,&val_data,sizeof(uint8_t));
+
+ val_data = 1;
+
+
+ //
+ // now do an update broadcast that will set the val to something bigger
+ //
+ val_data = 100;
+ IN_TXN_COMMIT(env, NULL, txn_broadcast, 0, {
+ { int chk_r = db->update_broadcast(db, txn_broadcast, &val, DB_IS_RESETTING_OP); CKERR(chk_r); }
+ });
+
+ //
+ // now create a loader
+ //
+ IN_TXN_COMMIT(env, NULL, txn_loader, 0, {
+ // create DB
+ { int chk_r = env->create_loader(
+ env,
+ txn_loader,
+ &loader,
+ db,
+ 1,
+ &db,
+ &mult_db_flags,
+ &mult_dbt_flags,
+ 0
+ ); CKERR(chk_r); }
+ { int chk_r = loader->put(loader, &key, &val); CKERR(chk_r); }
+ { int chk_r = loader->close(loader); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_update, 0, {
+ { int chk_r = db->update(db, txn_update, &key, &val, 0); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_nested_updates.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_nested_updates.cc
new file mode 100644
index 00000000..113832ff
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_nested_updates.cc
@@ -0,0 +1,165 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update broadcast can change (all) values
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 100;
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ e = _e(*k);
+ v = _u(*ov, e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags);
+ return r;
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, bool updated_twice) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (updated_twice) {
+ assert(*vp == _u(_u(_v(i), _e(i)), _e(i)));
+ } else {
+ assert(*vp == _u(_v(i), _e(i)));
+ }
+ }
+ return r;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, 0); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_2, db, false); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_updates(txn_21, db, 0); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_21, db, true); CKERR(chk_r); }
+ });
+ IN_TXN_COMMIT(env, txn_2, txn_22, 0, {
+ { int chk_r = do_updates(txn_22, db, DB_IS_RESETTING_OP); CKERR2(chk_r, EINVAL); }
+ });
+
+ { int chk_r = do_verify_results(txn_2, db, true); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, true); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_previously_deleted.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_previously_deleted.cc
new file mode 100644
index 00000000..c6d03427
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_previously_deleted.cc
@@ -0,0 +1,196 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update broadcast called when there are previously deleted
+// keys doesn't get a callback to update_fun on those keys
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_delete[] = { 1, 1, 0, 0, 1, 0, 0, 0, 1, 0 };
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(extra->size == 0);
+ if (to_delete[*k]) {
+ assert(0);
+ } else {
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ v = _u(*ov, _e(*k));
+ }
+
+ if (to_update[*k]) {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_deletes(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ if (to_delete[i]) {
+ r = db->del(db, txn, keyp, DB_DELETE_ANY); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags); CKERR(r);
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ if (to_update[k]) {
+ assert(v == _u(_v(k), _e(k)));
+ } else {
+ assert(v == _v(k));
+ }
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v)) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (to_delete[i]) {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ } else {
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ }
+ return r;
+}
+
+static void run_test(bool is_resetting) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ { int chk_r = do_deletes(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_updated); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true);
+ run_test(false);
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_stress.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_stress.cc
new file mode 100644
index 00000000..eebbd7fc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_stress.cc
@@ -0,0 +1,177 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// stress test for update broadcast. 10M 8-byte keys should be 2, maybe 3
+// levels of treeness, makes sure flushes work
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 10000000;
+const unsigned int MAGIC_EXTRA = 0x4ac0ffee;
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v + 2 * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ assert(*e == MAGIC_EXTRA);
+ v = _u(*ov, _e(*k));
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int
+int_cmp(DB *UU(db), const DBT *a, const DBT *b) {
+ unsigned int *ap, *bp;
+ assert(a->size == sizeof(*ap));
+ CAST_FROM_VOIDP(ap, a->data);
+ assert(b->size == sizeof(*bp));
+ CAST_FROM_VOIDP(bp, b->data);
+ return (*ap > *bp) - (*ap < *bp);
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->set_default_bt_compare(env, int_cmp); CKERR(chk_r); }
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ DBT extra;
+ unsigned int e = MAGIC_EXTRA;
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ int r = db->update_broadcast(db, txn, extrap, 0); CKERR(r);
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ assert(v == _u(_v(k), _e(k)));
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v)) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_verify_results(txn_21, db, chk_updated); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_original); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_update_fun_has_choices.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_update_fun_has_choices.cc
new file mode 100644
index 00000000..24ed5210
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_update_fun_has_choices.cc
@@ -0,0 +1,176 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update broadcast can change and delete different values,
+// or do nothing
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 1000;
+
+static inline bool should_insert(const unsigned int i) { return i % 2 == 0; }
+static inline bool should_update(const unsigned int i) { return i % 3 == 0; }
+static inline bool should_delete(const unsigned int i) { return (i % 5 == 0) && (i % 3 != 0); }
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(should_insert(*k));
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+ if (should_update(*k)) {
+ e = _e(*k);
+ v = _u(*ov, e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+ } else if (should_delete(*k)) {
+ set_val(NULL, set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_insert(i)) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags); CKERR(r);
+ return r;
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (!should_insert(i) || should_delete(i)) {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ } else if (should_insert(i)) {
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (should_update(i)) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ }
+ return r;
+}
+
+static void run_test(bool is_resetting) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true);
+ run_test(false);
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_with_empty_table.cc b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_with_empty_table.cc
new file mode 100644
index 00000000..e46b5398
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_broadcast_with_empty_table.cc
@@ -0,0 +1,107 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that update broadcast does nothing if the table is empty
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *UU(extra),
+ void UU((*set_val)(const DBT *new_val,
+ void *set_extra)),
+ void *UU(set_extra)) {
+ assert(0); return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT extra;
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ int r = db->update_broadcast(db, txn, extrap, flags); CKERR(r);
+ return r;
+}
+
+static void run_test(bool is_resetting, bool prelock) {
+ DB *db;
+ uint32_t update_flags = is_resetting ? DB_IS_RESETTING_OP : 0;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ if (prelock) {
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db->pre_acquire_table_lock(db, txn_2); CKERR(chk_r); }
+ });
+ }
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ run_test(true,true);
+ run_test(false,true);
+ run_test(true,false);
+ run_test(false,false);
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_calls_back.cc b/storage/tokudb/PerconaFT/src/tests/test_update_calls_back.cc
new file mode 100644
index 00000000..57664062
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_calls_back.cc
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update calls back into the update function
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+ int updates_called[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+// the commands are: byte 1 is "nop" "add" or "del". Byte 2 is the amount to add.
+enum cmd { CNOP, CADD, CDEL };
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *UU(old_val), const DBT *UU(extra),
+ void UU((*set_val)(const DBT *new_val,
+ void *set_extra)),
+ void *UU(set_extra)) {
+ unsigned int *k;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(to_update[*k] == 1);
+ assert(updates_called[*k] == 0);
+ updates_called[*k] = 1;
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ {
+ DB_TXN* txna = NULL;
+ { int chk_r = env->txn_begin(env, NULL, &txna, 0); CKERR(chk_r); }
+
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txna, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ {
+ DBT key, val;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, "a", 2);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ { int chk_r = db->put(db, txna, keyp, valp, 0); CKERR(chk_r); }
+ }
+ }
+
+ { int chk_r = txna->commit(txna, 0); CKERR(chk_r); }
+ }
+
+ {
+ DB_TXN *txnb = NULL;
+ { int chk_r = env->txn_begin(env, NULL, &txnb, 0); CKERR(chk_r); }
+
+ {
+ DBT key, nullextra;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *nullextrap = dbt_init(&nullextra, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (to_update[i] == 1) {
+ { int chk_r = db->update(db, txnb, keyp, nullextrap, 0); CKERR(chk_r); }
+ }
+ }
+ }
+
+ { int chk_r = txnb->commit(txnb, 0); CKERR(chk_r); }
+ }
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ for (unsigned int i = 0;
+ i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ assert(to_update[i] == updates_called[i]);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_can_delete_elements.cc b/storage/tokudb/PerconaFT/src/tests/test_update_can_delete_elements.cc
new file mode 100644
index 00000000..389aec32
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_can_delete_elements.cc
@@ -0,0 +1,168 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update can delete some elements
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_delete[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == 0);
+
+ set_val(NULL, set_extra);
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ if (to_delete[i] == 1) {
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v), bool already_deleted) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (already_deleted && to_delete[i]) {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ } else {
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ }
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original, false); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_verify_results(txn_21, db, chk_original, true); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_original, false); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_changes_values.cc b/storage/tokudb/PerconaFT/src/tests/test_update_changes_values.cc
new file mode 100644
index 00000000..5bcebf74
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_changes_values.cc
@@ -0,0 +1,162 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update changes some values
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (to_update[i] == 1) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (to_update[i]) {
+ assert(*vp == _u(_v(i), _e(i)));
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ return r;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_nested_updates.cc b/storage/tokudb/PerconaFT/src/tests/test_update_nested_updates.cc
new file mode 100644
index 00000000..6f1017d8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_nested_updates.cc
@@ -0,0 +1,174 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that a transaction can update, then create a child which also updates
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (to_update[i] == 1) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, bool updated_twice) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ if (to_update[i]) {
+ if (updated_twice) {
+ assert(*vp == _u(_u(_v(i), _e(i)), _e(i)));
+ } else {
+ assert(*vp == _u(_v(i), _e(i)));
+ }
+ } else {
+ assert(*vp == _v(i));
+ }
+ }
+ return r;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_2, db, false); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_updates(txn_21, db); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_21, db, true); CKERR(chk_r); }
+ });
+
+ { int chk_r = do_verify_results(txn_2, db, true); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, true); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_nonexistent_keys.cc b/storage/tokudb/PerconaFT/src/tests/test_update_nonexistent_keys.cc
new file mode 100644
index 00000000..79fc0d14
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_nonexistent_keys.cc
@@ -0,0 +1,193 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update, if called on a nonexistent key, will call back
+// into update_function with the right arguments, and allows it to set a
+// new value
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_insert[] = { 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 };
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline bool should_insert(const unsigned int i) { return to_insert[i]; }
+static inline bool should_update(const unsigned int i) { return to_update[i]; }
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ if (!should_insert(*k)) {
+ assert(old_val == NULL);
+ v = _u(_v(*k), *e);
+ } else {
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ v = _u(*ov, *e);
+ }
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (should_insert(i)) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ const DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ const DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (should_update(i)) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ if (should_update(k)) {
+ assert(v == _u(_v(k), _e(k)));
+ } else {
+ assert(v == _v(k));
+ }
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v), bool after_update) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_insert) / sizeof(to_insert[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (should_insert(i) || (after_update && should_update(i))) {
+ CKERR(r);
+ assert(val.size == sizeof(v));
+ v = *(unsigned int *) val.data;
+
+ check_val(i, v);
+ } else {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ }
+ }
+ return r;
+}
+
+int test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_ABORT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original, false); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_updated, true); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_previously_deleted.cc b/storage/tokudb/PerconaFT/src/tests/test_update_previously_deleted.cc
new file mode 100644
index 00000000..11c4f536
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_previously_deleted.cc
@@ -0,0 +1,202 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update called on previously deleted keys works the same as
+// with nonexistent keys
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_delete[] = { 1, 1, 0, 0, 1, 0, 0, 0, 1, 0 };
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ if (to_delete[*k]) {
+ assert(old_val == NULL);
+ v = _u(_v(*k), *e);
+ } else {
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ v = _u(*ov, *e);
+ }
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_deletes(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ if (to_delete[i]) {
+ r = db->del(db, txn, keyp, DB_DELETE_ANY); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (to_update[i] == 1) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ if (to_update[k]) {
+ assert(v == _u(_v(k), _e(k)));
+ } else {
+ assert(v == _v(k));
+ }
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v), bool already_updated) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (to_delete[i] && !(already_updated && to_update[i])) {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ } else {
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ }
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ { int chk_r = do_deletes(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original, false); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_updated, true); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_stress.cc b/storage/tokudb/PerconaFT/src/tests/test_update_stress.cc
new file mode 100644
index 00000000..75b4219f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_stress.cc
@@ -0,0 +1,187 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// stress test for update. 10M 8-byte keys should be 2, maybe 3 levels of
+// treeness, makes sure flushes work
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const unsigned int NUM_KEYS = 10000000;
+
+static inline bool should_update(const unsigned int i) { return i % 3 == 0; }
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v + 2 * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static int
+int_cmp(DB *UU(db), const DBT *a, const DBT *b) {
+ unsigned int *ap, *bp;
+ assert(a->size == sizeof(*ap));
+ CAST_FROM_VOIDP(ap, a->data);
+ assert(b->size == sizeof(*bp));
+ CAST_FROM_VOIDP(bp, b->data);
+ return (*ap > *bp) - (*ap < *bp);
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->set_default_bt_compare(env, int_cmp); CKERR(chk_r); }
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < NUM_KEYS; ++i) {
+ if (should_update(i)) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ if (should_update(k)) {
+ assert(v == _u(_v(k), _e(k)));
+ } else {
+ assert(v == _v(k));
+ }
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v)) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < NUM_KEYS; ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+ });
+
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_2, txn_21, 0, {
+ { int chk_r = do_verify_results(txn_21, db, chk_updated); CKERR(chk_r); }
+ });
+ });
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = do_verify_results(txn_3, db, chk_original); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_concurrently.cc b/storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_concurrently.cc
new file mode 100644
index 00000000..1985bdae
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_concurrently.cc
@@ -0,0 +1,183 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update doesn't infringe on the values read by another txn
+// started with TXN_SNAPSHOT
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_update[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+static inline unsigned int _e(const unsigned int i) { return i + 4; }
+static inline unsigned int _u(const unsigned int v, const unsigned int e) { return v * v * e; }
+
+static int update_fun(DB *UU(db),
+ const DBT *key,
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ unsigned int *k, *ov, *e, v;
+ assert(key->size == sizeof(*k));
+ CAST_FROM_VOIDP(k, key->data);
+ assert(old_val->size == sizeof(*ov));
+ CAST_FROM_VOIDP(ov, old_val->data);
+ assert(extra->size == sizeof(*e));
+ CAST_FROM_VOIDP(e, extra->data);
+ v = _u(*ov, *e);
+
+ {
+ DBT newval;
+ set_val(dbt_init(&newval, &v, sizeof(v)), set_extra);
+ }
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i, e;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, &e, sizeof(e));
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ if (to_update[i] == 1) {
+ e = _e(i); // E I O
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_updated(const unsigned int k, const unsigned int v) {
+ if (to_update[k]) {
+ assert(v == _u(_v(k), _e(k)));
+ } else {
+ assert(v == _v(k));
+ }
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v)) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_update) / sizeof(to_update[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0); CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original); CKERR(chk_r); }
+ });
+ });
+
+ {
+ DB_TXN *txn_2, *txn_3;
+ { int chk_r = env->txn_begin(env, NULL, &txn_2, DB_TXN_SNAPSHOT); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_2, db, chk_original); CKERR(chk_r); }
+ { int chk_r = env->txn_begin(env, NULL, &txn_3, 0); CKERR(chk_r); }
+ { int chk_r = do_updates(txn_3, db); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_2, db, chk_original); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_3, db, chk_updated); CKERR(chk_r); }
+ { int chk_r = txn_2->abort(txn_2); CKERR(chk_r); }
+ { int chk_r = txn_3->abort(txn_3); CKERR(chk_r); }
+ }
+
+ IN_TXN_COMMIT(env, NULL, txn_4, 0, {
+ { int chk_r = do_verify_results(txn_4, db, chk_original); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc b/storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc
new file mode 100644
index 00000000..9cbda2ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc
@@ -0,0 +1,168 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that an update doesn't infringe on other txns started with
+// TXN_SNAPSHOT, when the update deletes elements
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+const int to_delete[] = { 0, 1, 1, 1, 0, 0, 1, 0, 1, 0 };
+
+static inline unsigned int _v(const unsigned int i) { return 10 - i; }
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ assert(extra->size == 0);
+
+ set_val(NULL, set_extra);
+
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_inserts(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, v;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, &v, sizeof(v));
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ v = _v(i);
+ r = db->put(db, txn, keyp, valp, 0); CKERR(r);
+ }
+ return r;
+}
+
+static int do_updates(DB_TXN *txn, DB *db) {
+ int r = 0;
+ DBT key, extra;
+ unsigned int i;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *extrap = dbt_init(&extra, NULL, 0);
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ if (to_delete[i] == 1) {
+ r = db->update(db, txn, keyp, extrap, 0); CKERR(r);
+ }
+ }
+ return r;
+}
+
+static void chk_original(const unsigned int k, const unsigned int v) {
+ assert(v == _v(k));
+}
+
+static int do_verify_results(DB_TXN *txn, DB *db, void (*check_val)(const unsigned int k, const unsigned int v), bool already_deleted) {
+ int r = 0;
+ DBT key, val;
+ unsigned int i, *vp;
+ DBT *keyp = dbt_init(&key, &i, sizeof(i));
+ DBT *valp = dbt_init(&val, NULL, 0);
+ for (i = 0; i < (sizeof(to_delete) / sizeof(to_delete[0])); ++i) {
+ r = db->get(db, txn, keyp, valp, 0);
+ if (already_deleted && to_delete[i]) {
+ CKERR2(r, DB_NOTFOUND);
+ r = 0;
+ } else {
+ CKERR(r);
+ assert(val.size == sizeof(*vp));
+ CAST_FROM_VOIDP(vp, val.data);
+ check_val(i, *vp);
+ }
+ }
+ return r;
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ setup();
+
+ DB *db;
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+
+ { int chk_r = do_inserts(txn_1, db); CKERR(chk_r); }
+
+ IN_TXN_COMMIT(env, txn_1, txn_11, 0, {
+ { int chk_r = do_verify_results(txn_11, db, chk_original, false); CKERR(chk_r); }
+ });
+ });
+
+ {
+ DB_TXN *txn_2, *txn_3;
+ { int chk_r = env->txn_begin(env, NULL, &txn_2, DB_TXN_SNAPSHOT); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_2, db, chk_original, false); CKERR(chk_r); }
+ { int chk_r = env->txn_begin(env, NULL, &txn_3, 0); CKERR(chk_r); }
+ { int chk_r = do_updates(txn_3, db); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_2, db, chk_original, false); CKERR(chk_r); }
+ { int chk_r = do_verify_results(txn_3, db, chk_original, true); CKERR(chk_r); }
+ { int chk_r = txn_2->abort(txn_2); CKERR(chk_r); }
+ { int chk_r = txn_3->abort(txn_3); CKERR(chk_r); }
+ }
+
+ IN_TXN_COMMIT(env, NULL, txn_4, 0, {
+ { int chk_r = do_verify_results(txn_4, db, chk_original, false); CKERR(chk_r); }
+ });
+
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+
+ cleanup();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_update_with_empty_table.cc b/storage/tokudb/PerconaFT/src/tests/test_update_with_empty_table.cc
new file mode 100644
index 00000000..411c3c4f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_update_with_empty_table.cc
@@ -0,0 +1,142 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that update broadcast does nothing if the table is empty
+
+#include "test.h"
+
+const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
+
+DB_ENV *env;
+
+static int update_fun(DB *UU(db),
+ const DBT *UU(key),
+ const DBT *UU(old_val), const DBT *UU(extra),
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ set_val(extra,set_extra);
+ return 0;
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+ { int chk_r = db_env_create(&env, 0); CKERR(chk_r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, update_fun);
+ { int chk_r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); }
+}
+
+static void cleanup (void) {
+ { int chk_r = env->close(env, 0); CKERR(chk_r); }
+}
+
+static int do_updates(DB_TXN *txn, DB *db, uint32_t flags) {
+ DBT key, val;
+ uint32_t k = 101;
+ uint32_t v = 10101;
+ dbt_init(&key, &k, sizeof(k));
+ dbt_init(&val, &v, sizeof(v));
+
+ int r = db->update(db, txn, &key, &val, flags); CKERR(r);
+ return r;
+}
+
+static void run_test(bool prelock, bool commit) {
+ DB *db;
+ uint32_t update_flags = 0;
+ setup();
+
+ IN_TXN_COMMIT(env, NULL, txn_1, 0, {
+ { int chk_r = db_create(&db, env, 0); CKERR(chk_r); }
+ { int chk_r = db->open(db, txn_1, "foo.db", NULL, DB_BTREE, DB_CREATE, 0666); CKERR(chk_r); }
+ });
+ if (prelock) {
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = db->pre_acquire_table_lock(db, txn_2); CKERR(chk_r); }
+ });
+ }
+
+ if (commit) {
+ IN_TXN_COMMIT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+ });
+ DBC *cursor = NULL;
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db->cursor(db, txn_3, &cursor, 0); CKERR(chk_r); }
+ { int chk_r = cursor->c_get(cursor, &key, &val, DB_NEXT); CKERR(chk_r); }
+ assert(key.size == sizeof(uint32_t));
+ assert(val.size == sizeof(uint32_t));
+ assert(*(uint32_t *)(key.data) == 101);
+ assert(*(uint32_t *)(val.data) == 10101);
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ });
+ }
+ else {
+ IN_TXN_ABORT(env, NULL, txn_2, 0, {
+ { int chk_r = do_updates(txn_2, db, update_flags); CKERR(chk_r); }
+ });
+ DBC *cursor = NULL;
+ DBT key, val;
+ memset(&key, 0, sizeof(key));
+ memset(&val, 0, sizeof(val));
+
+ IN_TXN_COMMIT(env, NULL, txn_3, 0, {
+ { int chk_r = db->cursor(db, txn_3, &cursor, 0); CKERR(chk_r); }
+ { int chk_r = cursor->c_get(cursor, &key, &val, DB_NEXT); CKERR2(chk_r, DB_NOTFOUND); }
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ });
+ }
+ { int chk_r = db->close(db, 0); CKERR(chk_r); }
+ cleanup();
+}
+
+int test_main(int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ run_test(true,true);
+ run_test(false,true);
+ run_test(true,false);
+ run_test(false,false);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_updates_single_key.cc b/storage/tokudb/PerconaFT/src/tests/test_updates_single_key.cc
new file mode 100644
index 00000000..3c657a9b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_updates_single_key.cc
@@ -0,0 +1,101 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <db.h>
+
+
+//
+// This test ensures that we can do many updates to a single key when the dictionary
+// is just that key.
+//
+static void
+run_test (void) {
+
+ DB_ENV * env;
+ DB *db;
+ const char * const fname = "test.updates_single_key.ft_handle";
+ int r;
+
+ r = db_env_create(&env, 0); assert(r == 0);
+ env->set_errfile(env, stderr);
+ // no need to run with logging, so DB_INIT_LOG not passed in
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_THREAD, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0); assert(r == 0);
+ db->set_errfile(db,stderr); // Turn off those annoying errors
+ r = db->open(db, NULL, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ for (i=0; i<1000000; i++) {
+ int k = 1;
+ int v = i;
+ DBT key, val;
+ DB_TXN* txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ // want this test to go as fast as possible, so no need to use the lock tree
+ // we just care to see that #5700 is behaving better, that some garbage collection is happening
+ r = db->put(db, txn, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), DB_PRELOCKED_WRITE);
+ txn->commit(txn, DB_TXN_NOSYNC);
+ CKERR(r);
+ }
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ run_test();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_weakxaction.cc b/storage/tokudb/PerconaFT/src/tests/test_weakxaction.cc
new file mode 100644
index 00000000..1ddb1838
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_weakxaction.cc
@@ -0,0 +1,101 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+/* Find out about weak transactions.
+ * User A does a transaction.
+ * User B does somethign without a transaction, and it conflicts.
+ */
+
+
+#include <db.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <sys/stat.h>
+
+static void
+test_autotxn (uint32_t env_flags, uint32_t db_flags) {
+ DB_ENV *env;
+ DB *db;
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ r = db_env_create (&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->set_flags(env, env_flags, 1); CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL |
+ DB_INIT_LOG | DB_INIT_TXN | DB_INIT_LOCK, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ {
+ DB_TXN *x = NULL;
+ if (env_flags==0 && db_flags==0) {
+ r = env->txn_begin(env, 0, &x, 0); CKERR(r);
+ }
+ r = db->open(db, x, "numbers.db", 0, DB_BTREE, DB_CREATE | db_flags, 0);
+ if (env_flags==0 && db_flags==0) {
+ r = x->commit(x, 0); CKERR(r);
+ }
+ CKERR(r);
+ }
+
+ DB_TXN *x1, *x2 = NULL;
+ r = env->txn_begin(env, 0, &x1, DB_TXN_NOWAIT); CKERR(r);
+ DBT k1,k2,v1,v2;
+ dbt_init(&k1, "hello", sizeof "hello");
+ dbt_init(&k2, "hello", sizeof "hello");
+ dbt_init(&v1, "there", sizeof "there");
+ dbt_init(&v2, NULL, 0);
+ memset(&v1, 0, sizeof(DBT));
+ memset(&v2, 0, sizeof(DBT));
+ r = db->put(db, x1, &k1, &v1, 0); CKERR(r);
+ r = db->get(db, x2, &k2, &v2, 0); assert(r==DB_LOCK_DEADLOCK || r==DB_LOCK_NOTGRANTED);
+ r = x1->commit(x1, 0); CKERR(r);
+ r = db->close(db, 0); CKERR(r);
+ r = env->close(env, 0); assert(r==0);
+}
+
+int
+test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+ test_autotxn(DB_AUTO_COMMIT, DB_AUTO_COMMIT);
+ test_autotxn(0, DB_AUTO_COMMIT);
+ test_autotxn(DB_AUTO_COMMIT, 0);
+ test_autotxn(0, 0);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/test_zero_length_keys.cc b/storage/tokudb/PerconaFT/src/tests/test_zero_length_keys.cc
new file mode 100644
index 00000000..d98fea8e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/test_zero_length_keys.cc
@@ -0,0 +1,188 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <memory.h>
+#include <sys/stat.h>
+#include <db.h>
+
+static void
+walk (DB *db) {
+ int r;
+ DB_TXN * const null_txn = 0;
+
+ DBC *cursor;
+ r = db->cursor(db, null_txn, &cursor, 0); assert(r == 0);
+
+ DBT key; memset(&key, 0, sizeof key); key.flags = DB_DBT_REALLOC;
+ DBT val; memset(&val, 0, sizeof val); val.flags = DB_DBT_REALLOC;
+ int i;
+ for (i=0; ; i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ if (verbose) printf("%d %u %u\n", i, key.size, val.size);
+ if (i == 0) assert(key.size == 0);
+ }
+ assert(i != 0);
+ r = cursor->c_close(cursor); assert(r == 0);
+
+ if (key.data) toku_free(key.data);
+ if (val.data) toku_free(val.data);
+}
+
+static void
+test_insert_zero_length (int n, int dup_mode, const char *fname) {
+ if (verbose) printf("test_insert_zero_length:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_flags(db, dup_mode); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ for (i=0; i<n; i++) {
+ char k[n]; memset(k, i, n);
+ char v[n]; memset(v, i, n);
+ DBT key;
+ DBT val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, i), dbt_init(&val, &v, i), 0);
+ if (r != 0) {
+ if (verbose) printf("db->put %d %d = %d\n", n, n, r);
+ assert(r == 0);
+ }
+ if (i == 0) {
+ dbt_init(&key, &k, i);
+ memset(&val, 0, sizeof val);
+ r = db->get(db, null_txn, &key, &val, 0);
+ assert(r == 0 && val.data == 0 && val.size == 0);
+
+ r = db->get(db, null_txn, &key, dbt_init_malloc(&val), 0);
+ assert(r == 0 && val.data != 0 && val.size == 0);
+ toku_free(val.data);
+
+ memset(&key, 0, sizeof key);
+ memset(&val, 0, sizeof val);
+ r = db->get(db, null_txn, &key, &val, 0);
+ assert(r == 0 && val.data == 0 && val.size == 0);
+
+ r = db->get(db, null_txn, &key, dbt_init_malloc(&val), 0);
+ assert(r == 0 && val.data != 0 && val.size == 0);
+ toku_free(val.data);
+ }
+ }
+
+ walk(db);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+static void
+test_insert_zero_length_keys (int n, int dup_mode, const char *fname) {
+ if (verbose) printf("test_insert_zero_length_keys:%d %d\n", n, dup_mode);
+
+ DB_TXN * const null_txn = 0;
+ int r;
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ /* create the dup database file */
+ DB_ENV *env;
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL, 0); assert(r == 0);
+
+ DB *db;
+ r = db_create(&db, env, 0); assert(r == 0);
+ r = db->set_flags(db, dup_mode); assert(r == 0);
+ r = db->set_pagesize(db, 4096); assert(r == 0);
+ r = db->open(db, null_txn, fname, "main", DB_BTREE, DB_CREATE, 0666); assert(r == 0);
+
+ int i;
+ for (i=0; i<n; i++) {
+ char k[n]; memset(k, i, n);
+ char v[n]; memset(v, i, n);
+ DBT key;
+ DBT val;
+ r = db->put(db, null_txn, dbt_init(&key, &k, 0), dbt_init(&val, &v, i), 0);
+ if (r != 0) {
+ if (verbose) printf("db->put %d %d = %d\n", n, n, r);
+ assert(r == 0);
+ }
+ }
+
+ walk(db);
+
+ r = db->close(db, 0); assert(r == 0);
+ r = env->close(env, 0); assert(r == 0);
+}
+
+int
+test_main(int argc, char *const argv[]) {
+
+ parse_args(argc, argv);
+
+#define TFILE __FILE__ ".tktrace"
+ unlink(TFILE);
+ SET_TRACE_FILE(TFILE);
+
+ test_insert_zero_length(32, 0, "test0");
+ test_insert_zero_length_keys(32, 0, "test0keys");
+
+ CLOSE_TRACE_FILE();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h b/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h
new file mode 100644
index 00000000..1d8833ad
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h
@@ -0,0 +1,2915 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// The Way Things Work:
+//
+// Threaded stress tests have the following properties:
+// - One or more DBs
+// - One or more threads performing some number of operations per txn.
+// - Correctness tests use signed 4 byte keys and signed 4 byte values. They expect
+// a table with all zeroes before running.
+// - Performance tests should use 8 byte keys and 8+ byte values, where the values
+// are some mixture of random uncompressible garbage and zeroes, depending how
+// compressible we want the data. These tests want the table to be populated
+// with keys in the range [0, table_size - 1] unless disperse_keys is true,
+// then the keys are scrambled up in the integer key space.
+
+#include "toku_config.h"
+#include "test.h"
+
+#include <stdio.h>
+#include <math.h>
+#include <locale.h>
+
+#include <db.h>
+#include <memory.h>
+#include <toku_race_tools.h>
+
+#include <portability/toku_atomic.h>
+#include <portability/toku_pthread.h>
+#include <portability/toku_random.h>
+#include <portability/toku_time.h>
+
+#include <src/ydb-internal.h>
+
+#include <util/dbt.h>
+
+#include <util/rwlock.h>
+#include <util/kibbutz.h>
+
+static const size_t min_val_size = sizeof(int32_t);
+static const size_t min_key_size = sizeof(int32_t);
+
+volatile bool run_test; // should be volatile since we are communicating through this variable.
+
+typedef struct arg *ARG;
+typedef int (*operation_t)(DB_TXN *txn, ARG arg, void *operation_extra, void *stats_extra);
+
+// TODO: Properly define these in db.h so we don't have to copy them here
+typedef int (*test_update_callback_f)(DB *, const DBT *key, const DBT *old_val, const DBT *extra, void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra);
+typedef int (*test_generate_row_for_put_callback)(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, DBT_ARRAY *dest_vals, const DBT *src_key, const DBT *src_data);
+typedef int (*test_generate_row_for_del_callback)(DB *dest_db, DB *src_db, DBT_ARRAY *dest_keys, const DBT *src_key, const DBT *src_data);
+
+enum stress_lock_type {
+ STRESS_LOCK_NONE = 0,
+ STRESS_LOCK_SHARED,
+ STRESS_LOCK_EXCL
+};
+
+struct env_args {
+ int fanout;
+ int node_size;
+ int basement_node_size;
+ int rollback_node_size;
+ int checkpointing_period;
+ int cleaner_period;
+ int cleaner_iterations;
+ int sync_period;
+ uint64_t lk_max_memory;
+ uint64_t cachetable_size;
+ uint32_t num_bucket_mutexes;
+ const char *envdir;
+ test_update_callback_f update_function; // update callback function
+ test_generate_row_for_put_callback generate_put_callback;
+ test_generate_row_for_del_callback generate_del_callback;
+};
+
+enum perf_output_format {
+ HUMAN = 0,
+ CSV,
+ TSV,
+ NUM_OUTPUT_FORMATS
+};
+
+struct cli_args {
+ int num_elements; // number of elements per DB
+ int num_DBs; // number of DBs
+ int num_seconds; // how long test should run
+ int join_timeout; // how long to wait for threads to join before assuming deadlocks
+ bool only_create; // true if want to only create DBs but not run stress
+ bool only_stress; // true if DBs are already created and want to only run stress
+ int update_broadcast_period_ms; // specific to test_stress3
+ int num_ptquery_threads; // number of threads to run point queries
+ bool do_test_and_crash; // true if we should crash after running stress test. For recovery tests.
+ bool do_recover; // true if we should run recover
+ int num_update_threads; // number of threads running updates
+ int num_put_threads; // number of threads running puts
+ int range_query_limit; // how many rows to look at for range queries
+ bool serial_insert;
+ bool interleave; // for insert benchmarks, whether to interleave separate threads' puts (or segregate them)
+ bool crash_on_operation_failure;
+ bool print_performance;
+ bool print_thread_performance;
+ bool print_iteration_performance;
+ enum perf_output_format perf_output_format;
+ enum toku_compression_method compression_method; // the compression method to use on newly created DBs
+ int performance_period;
+ uint32_t txn_size; // specifies number of updates/puts/whatevers per txn
+ uint32_t key_size; // number of bytes in vals. Must be at least 4
+ uint32_t val_size; // number of bytes in vals. Must be at least 4
+ double compressibility; // the row values should compress down to this fraction
+ struct env_args env_args; // specifies environment variables
+ bool single_txn;
+ bool warm_cache; // warm caches before running stress_table
+ bool blackhole; // all message injects are no-ops. helps measure txn/logging/locktree overhead.
+ bool nolocktree; // use this flag to avoid the locktree on insertions
+ bool unique_checks; // use uniqueness checking during insert. makes it slow.
+ uint32_t sync_period; // background log fsync period
+ bool nolog; // do not log. useful for testing in memory performance.
+ bool nocrashstatus; // do not print engine status upon crash
+ bool prelock_updates; // update threads perform serial updates on a prelocked range
+ bool disperse_keys; // spread the keys out during a load (by reversing the bits in the loop index) to make a wide tree we can spread out random inserts into
+ bool memcmp_keys; // pack keys big endian and use the builtin key comparison function in the fractal tree
+ bool direct_io; // use direct I/O
+ const char *print_engine_status; // print engine status rows matching a simple regex "a|b|c", matching strings where a or b or c is a subtring.
+};
+
+struct arg {
+ DB **dbp; // array of DBs
+ DB_ENV* env; // environment used
+ bool bounded_element_range; // true if elements in dictionary are bounded
+ // by num_elements, that is, all keys in each
+ // DB are in [0, num_elements)
+ // false otherwise
+ int sleep_ms; // number of milliseconds to sleep between operations
+ uint32_t txn_flags; // isolation level for txn running operation
+ operation_t operation; // function that is the operation to be run
+ void* operation_extra; // extra parameter passed to operation
+ enum stress_lock_type lock_type; // states if operation must be exclusive, shared, or does not require locking
+ struct random_data *random_data; // state for random_r
+ int thread_idx;
+ int num_threads;
+ struct cli_args *cli;
+ bool do_prepare;
+ bool prelock_updates;
+ bool track_thread_performance;
+ bool wrap_in_parent;
+};
+
+static void arg_init(struct arg *arg, DB **dbp, DB_ENV *env, struct cli_args *cli_args) {
+ arg->cli = cli_args;
+ arg->dbp = dbp;
+ arg->env = env;
+ arg->bounded_element_range = true;
+ arg->sleep_ms = 0;
+ arg->lock_type = STRESS_LOCK_NONE;
+ arg->txn_flags = DB_TXN_SNAPSHOT;
+ arg->operation_extra = nullptr;
+ arg->do_prepare = false;
+ arg->prelock_updates = false;
+ arg->track_thread_performance = true;
+ arg->wrap_in_parent = false;
+}
+
+enum operation_type {
+ OPERATION = 0,
+ PUTS,
+ PTQUERIES,
+ NUM_OPERATION_TYPES
+};
+
+const char *operation_names[] = {
+ "ops",
+ "puts",
+ "ptqueries",
+ nullptr
+};
+
+static void increment_counter(void *extra, enum operation_type type, uint64_t inc) {
+ invariant(type != OPERATION);
+ int t = (int) type;
+ invariant(extra);
+ invariant(t >= 0 && t < (int) NUM_OPERATION_TYPES);
+ uint64_t *CAST_FROM_VOIDP(counters, extra);
+ counters[t] += inc;
+}
+
+struct perf_formatter {
+ void (*header)(const struct cli_args *cli_args, const int num_threads);
+ void (*iteration)(const struct cli_args *cli_args, const int current_time, uint64_t last_counters[][(int) NUM_OPERATION_TYPES], uint64_t *counters[], const int num_threads);
+ void (*totals)(const struct cli_args *cli_args, uint64_t *counters[], const int num_threads);
+};
+
+static inline int
+seconds_in_this_iteration(const int current_time, const int performance_period)
+{
+ const int iteration = (current_time + performance_period - 1) / performance_period;
+ return current_time - ((iteration - 1) * performance_period);
+}
+
+static void
+human_print_perf_header(const struct cli_args *UU(cli_args), const int UU(num_threads)) {}
+
+static void
+human_print_perf_iteration(const struct cli_args *cli_args, const int current_time, uint64_t last_counters[][(int) NUM_OPERATION_TYPES], uint64_t *counters[], const int num_threads)
+{
+ const int secondsthisiter = seconds_in_this_iteration(current_time, cli_args->performance_period);
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ uint64_t period_total = 0;
+ printf("%4d %s", current_time, operation_names[op]);
+ for (int i = strlen(operation_names[op]); i < 12; ++i) {
+ printf(" ");
+ }
+ for (int t = 0; t < num_threads; ++t) {
+ const uint64_t last = last_counters[t][op];
+ const uint64_t current = counters[t][op];
+ const uint64_t this_iter = current - last;
+ if (cli_args->print_thread_performance) {
+ const double persecond = (double) this_iter / secondsthisiter;
+ printf("\t%'12" PRIu64 " (%'12.1lf/s)", this_iter, persecond);
+ }
+ period_total += this_iter;
+ last_counters[t][op] = current;
+ }
+ const double totalpersecond = (double) period_total / secondsthisiter;
+ printf("\tTotal %'12" PRIu64 " (%'12.1lf/s)\n", period_total, totalpersecond);
+ }
+ fflush(stdout);
+}
+
+static void
+human_print_perf_totals(const struct cli_args *cli_args, uint64_t *counters[], const int num_threads)
+{
+ if (cli_args->print_iteration_performance) {
+ printf("\n");
+ }
+ printf("Overall performance:\n");
+ uint64_t overall_totals[(int) NUM_OPERATION_TYPES];
+ ZERO_ARRAY(overall_totals);
+ for (int t = 0; t < num_threads; ++t) {
+ if (cli_args->print_thread_performance) {
+ printf("Thread %4d: ", t + 1);
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const uint64_t current = counters[t][op];
+ if (cli_args->print_thread_performance) {
+ const double persecond = (double) current / cli_args->num_seconds;
+ printf("\t%s\t%'12" PRIu64 " (%'12.1lf/s)", operation_names[op], current, persecond);
+ }
+ overall_totals[op] += current;
+ }
+ if (cli_args->print_thread_performance) {
+ printf("\n");
+ }
+ }
+ printf("All threads: ");
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const double totalpersecond = (double) overall_totals[op] / cli_args->num_seconds;
+ printf("\t%s\t%'12" PRIu64 " (%'12.1lf/s)", operation_names[op], overall_totals[op], totalpersecond);
+ }
+ printf("\n");
+}
+
+static void
+csv_print_perf_header(const struct cli_args *cli_args, const int num_threads)
+{
+ printf("seconds");
+ if (cli_args->print_thread_performance) {
+ for (int t = 1; t <= num_threads; ++t) {
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ printf(",\"Thread %d %s\",\"Thread %d %s/s\"", t, operation_names[op], t, operation_names[op]);
+ }
+ }
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ printf(",\"Total %s\",\"Total %s/s\"", operation_names[op], operation_names[op]);
+ }
+ printf("\n");
+}
+
+static void
+csv_print_perf_iteration(const struct cli_args *cli_args, const int current_time, uint64_t last_counters[][(int) NUM_OPERATION_TYPES], uint64_t *counters[], const int num_threads)
+{
+ const int secondsthisiter = seconds_in_this_iteration(current_time, cli_args->performance_period);
+ printf("%d", current_time);
+ uint64_t period_totals[(int) NUM_OPERATION_TYPES];
+ ZERO_ARRAY(period_totals);
+ for (int t = 0; t < num_threads; ++t) {
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const uint64_t last = last_counters[t][op];
+ const uint64_t current = counters[t][op];
+ const uint64_t this_iter = current - last;
+ if (cli_args->print_thread_performance) {
+ const double persecond = (double) this_iter / secondsthisiter;
+ printf(",%" PRIu64 ",%.1lf", this_iter, persecond);
+ }
+ period_totals[op] += this_iter;
+ last_counters[t][op] = current;
+ }
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const double totalpersecond = (double) period_totals[op] / secondsthisiter;
+ printf(",%" PRIu64 ",%.1lf", period_totals[op], totalpersecond);
+ }
+ printf("\n");
+ fflush(stdout);
+}
+
+static void
+csv_print_perf_totals(const struct cli_args *cli_args, uint64_t *counters[], const int num_threads) {
+ printf("overall");
+ uint64_t overall_totals[(int) NUM_OPERATION_TYPES];
+ ZERO_ARRAY(overall_totals);
+ for (int t = 0; t < num_threads; ++t) {
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const uint64_t current = counters[t][op];
+ if (cli_args->print_thread_performance) {
+ const double persecond = (double) current / cli_args->num_seconds;
+ printf(",%" PRIu64 ",%.1lf", current, persecond);
+ }
+ overall_totals[op] += current;
+ }
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const double totalpersecond = (double) overall_totals[op] / cli_args->num_seconds;
+ printf(",%" PRIu64 ",%.1lf", overall_totals[op], totalpersecond);
+ }
+ printf("\n");
+}
+
+static void
+tsv_print_perf_header(const struct cli_args *cli_args, const int num_threads)
+{
+ printf("\"seconds\"");
+ if (cli_args->print_thread_performance) {
+ for (int t = 1; t <= num_threads; ++t) {
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ printf("\t\"Thread %d %s\"\t\"Thread %d %s/s\"", t, operation_names[op], t, operation_names[op]);
+ }
+ }
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ printf("\t\"Total %s\"\t\"Total %s/s\"", operation_names[op], operation_names[op]);
+ }
+ printf("\n");
+}
+
+static void
+tsv_print_perf_iteration(const struct cli_args *cli_args, const int current_time, uint64_t last_counters[][(int) NUM_OPERATION_TYPES], uint64_t *counters[], const int num_threads)
+{
+ const int secondsthisiter = seconds_in_this_iteration(current_time, cli_args->performance_period);
+ printf("%d", current_time);
+ uint64_t period_totals[(int) NUM_OPERATION_TYPES];
+ ZERO_ARRAY(period_totals);
+ for (int t = 0; t < num_threads; ++t) {
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const uint64_t last = last_counters[t][op];
+ const uint64_t current = counters[t][op];
+ const uint64_t this_iter = current - last;
+ if (cli_args->print_thread_performance) {
+ const double persecond = (double) this_iter / secondsthisiter;
+ printf("\t%" PRIu64 "\t%.1lf", this_iter, persecond);
+ }
+ period_totals[op] += this_iter;
+ last_counters[t][op] = current;
+ }
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const double totalpersecond = (double) period_totals[op] / secondsthisiter;
+ printf("\t%" PRIu64 "\t%.1lf", period_totals[op], totalpersecond);
+ }
+ printf("\n");
+ fflush(stdout);
+}
+
+static void
+tsv_print_perf_totals(const struct cli_args *cli_args, uint64_t *counters[], const int num_threads) {
+ printf("\"overall\"");
+ uint64_t overall_totals[(int) NUM_OPERATION_TYPES];
+ ZERO_ARRAY(overall_totals);
+ for (int t = 0; t < num_threads; ++t) {
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const uint64_t current = counters[t][op];
+ if (cli_args->print_thread_performance) {
+ const double persecond = (double) current / cli_args->num_seconds;
+ printf("\t%" PRIu64 "\t%.1lf", current, persecond);
+ }
+ overall_totals[op] += current;
+ }
+ }
+ for (int op = 0; op < (int) NUM_OPERATION_TYPES; ++op) {
+ const double totalpersecond = (double) overall_totals[op] / cli_args->num_seconds;
+ printf("\t%" PRIu64 "\t%.1lf", overall_totals[op], totalpersecond);
+ }
+ printf("\n");
+}
+
+const struct perf_formatter perf_formatters[] = {
+ { /* HUMAN */
+ .header = human_print_perf_header,
+ .iteration = human_print_perf_iteration,
+ .totals = human_print_perf_totals
+ },
+ { /* CSV */
+ .header = csv_print_perf_header,
+ .iteration = csv_print_perf_iteration,
+ .totals = csv_print_perf_totals
+ },
+ { /* TSV */
+ .header = tsv_print_perf_header,
+ .iteration = tsv_print_perf_iteration,
+ .totals = tsv_print_perf_totals
+ },
+};
+
+static int get_env_open_flags(struct cli_args *args) {
+ int flags = DB_INIT_LOCK|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE;
+ flags |= args->nolog ? 0 : DB_INIT_LOG;
+ return flags;
+}
+
+static int get_put_flags(struct cli_args *args) {
+ int flags = 0;
+ flags |= args->nolocktree ? DB_PRELOCKED_WRITE : 0;
+ flags |= args->unique_checks ? DB_NOOVERWRITE : 0;
+ return flags;
+}
+
+static int get_commit_flags(struct cli_args *args) {
+ int flags = 0;
+ flags |= args->env_args.sync_period > 0 ? DB_TXN_NOSYNC : 0;
+ return flags;
+}
+
+struct worker_extra {
+ struct arg *thread_arg;
+ toku_mutex_t *operation_lock_mutex;
+ struct st_rwlock *operation_lock;
+ uint64_t *counters;
+ int64_t pad[4]; // pad to 64 bytes
+};
+
+static void lock_worker_op(struct worker_extra* we) {
+ ARG arg = we->thread_arg;
+ if (arg->lock_type != STRESS_LOCK_NONE) {
+ toku_mutex_lock(we->operation_lock_mutex);
+ if (arg->lock_type == STRESS_LOCK_SHARED) {
+ rwlock_read_lock(we->operation_lock, we->operation_lock_mutex);
+ } else if (arg->lock_type == STRESS_LOCK_EXCL) {
+ rwlock_write_lock(we->operation_lock, we->operation_lock_mutex);
+ } else {
+ abort();
+ }
+ toku_mutex_unlock(we->operation_lock_mutex);
+ }
+}
+
+static void unlock_worker_op(struct worker_extra* we) {
+ ARG arg = we->thread_arg;
+ if (arg->lock_type != STRESS_LOCK_NONE) {
+ toku_mutex_lock(we->operation_lock_mutex);
+ if (arg->lock_type == STRESS_LOCK_SHARED) {
+ rwlock_read_unlock(we->operation_lock);
+ } else if (arg->lock_type == STRESS_LOCK_EXCL) {
+ rwlock_write_unlock(we->operation_lock);
+ } else {
+ abort();
+ }
+ toku_mutex_unlock(we->operation_lock_mutex);
+ }
+}
+
+static void *worker(void *arg_v) {
+ int r;
+ struct worker_extra* CAST_FROM_VOIDP(we, arg_v);
+ ARG arg = we->thread_arg;
+ struct random_data random_data;
+ ZERO_STRUCT(random_data);
+ char *XCALLOC_N(8, random_buf);
+ r = myinitstate_r(random(), random_buf, 8, &random_data);
+ assert_zero(r);
+ arg->random_data = &random_data;
+ DB_ENV *env = arg->env;
+ DB_TXN *txn = nullptr;
+ DB_TXN *ptxn = nullptr;
+ if (verbose) {
+ toku_pthread_t self = toku_pthread_self();
+ uintptr_t intself = (uintptr_t) self;
+ printf("%lu starting %p\n", (unsigned long) intself, arg->operation);
+ }
+ if (arg->cli->single_txn) {
+ r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r);
+ } else if (arg->wrap_in_parent) {
+ r = env->txn_begin(env, 0, &ptxn, arg->txn_flags); CKERR(r);
+ }
+ while (run_test) {
+ lock_worker_op(we);
+ if (!arg->cli->single_txn) {
+ r = env->txn_begin(env, ptxn, &txn, arg->txn_flags); CKERR(r);
+ }
+ r = arg->operation(txn, arg, arg->operation_extra, we->counters);
+ if (r==0 && !arg->cli->single_txn && arg->do_prepare) {
+ uint8_t gid[DB_GID_SIZE];
+ memset(gid, 0, DB_GID_SIZE);
+ uint64_t gid_val = txn->id64(txn);
+ uint64_t *gid_count_p = cast_to_typeof(gid_count_p) gid; // make gcc --happy about -Wstrict-aliasing
+ *gid_count_p = gid_val;
+ int rr = txn->prepare(txn, gid, 0);
+ assert_zero(rr);
+ }
+ if (r == 0) {
+ if (!arg->cli->single_txn) {
+ int flags = get_commit_flags(arg->cli);
+ int chk_r = txn->commit(txn, flags); CKERR(chk_r);
+ }
+ } else {
+ if (arg->cli->crash_on_operation_failure) {
+ CKERR(r);
+ } else {
+ if (!arg->cli->single_txn) {
+ { int chk_r = txn->abort(txn); CKERR(chk_r); }
+ }
+ }
+ }
+ unlock_worker_op(we);
+ if (arg->track_thread_performance) {
+ we->counters[OPERATION]++;
+ }
+ if (arg->sleep_ms) {
+ usleep(arg->sleep_ms * 1000);
+ }
+ }
+ if (arg->cli->single_txn) {
+ int flags = get_commit_flags(arg->cli);
+ int chk_r = txn->commit(txn, flags); CKERR(chk_r);
+ } else if (arg->wrap_in_parent) {
+ int flags = get_commit_flags(arg->cli);
+ int chk_r = ptxn->commit(ptxn, flags); CKERR(chk_r);
+ }
+ if (verbose) {
+ toku_pthread_t self = toku_pthread_self();
+ uintptr_t intself = (uintptr_t) self;
+ printf("%lu returning\n", (unsigned long) intself);
+ }
+ toku_free(random_buf);
+ return arg;
+}
+
+struct scan_cb_extra {
+ bool fast;
+ int curr_sum;
+ int num_elements;
+};
+
+struct scan_op_extra {
+ bool fast;
+ bool fwd;
+ bool prefetch;
+};
+
+static int
+scan_cb(const DBT *key, const DBT *val, void *arg_v) {
+ struct scan_cb_extra *CAST_FROM_VOIDP(cb_extra, arg_v);
+ assert(key);
+ assert(val);
+ assert(cb_extra);
+ assert(val->size >= sizeof(int));
+ cb_extra->curr_sum += *(int *) val->data;
+ cb_extra->num_elements++;
+ return cb_extra->fast ? TOKUDB_CURSOR_CONTINUE : 0;
+}
+
+static int scan_op_and_maybe_check_sum(
+ DB* db,
+ DB_TXN *txn,
+ struct scan_op_extra* sce,
+ bool check_sum
+ )
+{
+ int r = 0;
+ DBC* cursor = nullptr;
+
+ struct scan_cb_extra e = {
+ e.fast = sce->fast,
+ e.curr_sum = 0,
+ e.num_elements = 0,
+ };
+
+ { int chk_r = db->cursor(db, txn, &cursor, 0); CKERR(chk_r); }
+ if (sce->prefetch) {
+ r = cursor->c_set_bounds(cursor, db->dbt_neg_infty(), db->dbt_pos_infty(), true, 0);
+ assert(r == 0);
+ }
+ while (r != DB_NOTFOUND) {
+ if (sce->fwd) {
+ r = cursor->c_getf_next(cursor, 0, scan_cb, &e);
+ }
+ else {
+ r = cursor->c_getf_prev(cursor, 0, scan_cb, &e);
+ }
+ assert(r==0 || r==DB_NOTFOUND);
+ if (!run_test) {
+ // terminate early because this op takes a while under drd.
+ // don't check the sum if we do this.
+ check_sum = false;
+ break;
+ }
+ }
+ { int chk_r = cursor->c_close(cursor); CKERR(chk_r); }
+ if (r == DB_NOTFOUND) {
+ r = 0;
+ }
+ if (check_sum && e.curr_sum) {
+ printf("e.curr_sum: %" PRId32 " e.num_elements: %" PRId32 " \n", e.curr_sum, e.num_elements);
+ abort();
+ }
+ return r;
+}
+
+static int generate_row_for_put(
+ DB *dest_db,
+ DB *src_db,
+ DBT_ARRAY *dest_keys,
+ DBT_ARRAY *dest_vals,
+ const DBT *src_key,
+ const DBT *src_val
+ )
+{
+ invariant(!src_db || src_db != dest_db);
+ invariant(src_key->size >= sizeof(unsigned int));
+
+ // Consistent pseudo random source. Use checksum of key and val, and which db as seed
+
+/*
+ struct x1764 l;
+ x1764_init(&l);
+ x1764_add(&l, src_key->data, src_key->size);
+ x1764_add(&l, src_val->data, src_val->size);
+ x1764_add(&l, &dest_db, sizeof(dest_db)); //make it depend on which db
+ unsigned int seed = x1764_finish(&l);
+ */
+ unsigned int seed = *(unsigned int*)src_key->data;
+
+ struct random_data random_data;
+ ZERO_STRUCT(random_data);
+ char random_buf[8];
+ {
+ int r = myinitstate_r(seed, random_buf, 8, &random_data);
+ assert_zero(r);
+ }
+
+ uint8_t num_outputs = 0;
+ while (myrandom_r(&random_data) % 2) {
+ num_outputs++;
+ if (num_outputs > 8) {
+ break;
+ }
+ }
+
+ toku_dbt_array_resize(dest_keys, num_outputs);
+ toku_dbt_array_resize(dest_vals, num_outputs);
+ int sum = 0;
+ for (uint8_t i = 0; i < num_outputs; i++) {
+ DBT *dest_key = &dest_keys->dbts[i];
+ DBT *dest_val = &dest_vals->dbts[i];
+
+ invariant(dest_key->flags == DB_DBT_REALLOC);
+ invariant(dest_val->flags == DB_DBT_REALLOC);
+
+ if (dest_key->ulen < src_key->size) {
+ dest_key->data = toku_xrealloc(dest_key->data, src_key->size);
+ dest_key->ulen = src_key->size;
+ }
+ dest_key->size = src_key->size;
+ if (dest_val->ulen < src_val->size) {
+ dest_val->data = toku_xrealloc(dest_val->data, src_val->size);
+ dest_val->ulen = src_val->size;
+ }
+ dest_val->size = src_val->size;
+ memcpy(dest_key->data, src_key->data, src_key->size);
+ ((uint8_t*)dest_key->data)[src_key->size-1] = i; //Have different keys for each entry.
+
+ memcpy(dest_val->data, src_val->data, src_val->size);
+ invariant(dest_val->size >= sizeof(int));
+ int number;
+ if (i == num_outputs - 1) {
+ // Make sum add to 0
+ number = -sum;
+ } else {
+ // Keep track of sum
+ number = myrandom_r(&random_data);
+ }
+ sum += number;
+ *(int *) dest_val->data = number;
+ }
+ invariant(sum == 0);
+ return 0;
+}
+
+// How Keys Work:
+//
+// Keys are either
+// - 4 byte little endian non-negative integers
+// - 8 byte little endian non-negative integers
+// - 8 byte little endian non-negative integers, padded with zeroes.
+//
+// The comparison function treats the key as a 4 byte
+// int if the key size is exactly 4, and it treats
+// the key as an 8 byte int if the key size is 8 or more.
+
+static int64_t random_bounded_key(struct random_data *random_data, ARG arg) {
+// Effect: Returns a random key in the table, possible bounded by the number of elements.
+ int64_t key = myrandom_r(random_data);
+ if (arg->bounded_element_range && arg->cli->num_elements > 0) {
+ key = key % arg->cli->num_elements;
+ }
+ return key;
+}
+
+static int64_t breverse(int64_t v)
+// Effect: return the bits in i, reversed
+// Notes: implementation taken from http://graphics.stanford.edu/~seander/bithacks.html#BitReverseObvious
+// Rationale: just a hack to spread out the keys during loading, doesn't need to be fast but does need to be correct.
+{
+ uint64_t k = v; // r will be reversed bits of v; first get LSB of v
+ int s = sizeof(v) * CHAR_BIT - 1; // extra shift needed at end
+
+ for (v >>= 1; v; v >>= 1) {
+ k <<= 1;
+ k |= v & 1;
+ s--;
+ }
+ k <<= s; // shift when v's highest bits are zero
+ int64_t r = k;
+ return r & ~(1ULL << 63);
+}
+
+static void
+fill_key_buf(int64_t key, uint8_t *data, struct cli_args *args) {
+// Effect: Fill data with a specific little-endian integer, 4 or 8 bytes long
+// depending on args->key_size, possibly padded with zeroes.
+// Requires: *data is at least sizeof(uint64_t)
+ if (args->disperse_keys) {
+ key = breverse(key);
+ }
+ invariant(key >= 0);
+ if (args->key_size == sizeof(int)) {
+ const int key32 = args->memcmp_keys ? toku_htonl(key) : key;
+ memcpy(data, &key32, sizeof(key32));
+ } else {
+ invariant(args->key_size >= sizeof(key));
+ const int64_t key64 = args->memcmp_keys ? toku_htonl(key) : key;
+ memcpy(data, &key64, sizeof(key64));
+ memset(data + sizeof(key64), 0, args->key_size - sizeof(key64));
+ }
+}
+
+static void
+fill_key_buf_random(struct random_data *random_data, uint8_t *data, ARG arg) {
+// Effect: Fill data with a random, little-endian, 4 or 8 byte integer, possibly
+// bounded by the size of the table, and padded with zeroes until key_size.
+// Requires, Notes: see fill_key_buf()
+ int64_t key = random_bounded_key(random_data, arg);
+ fill_key_buf(key, data, arg->cli);
+}
+
+// How Vals Work:
+//
+// Values are either
+// - 4 byte little endian integers
+// - 4 byte little endian integers, padded with zeroes
+// - X bytes random values, Y bytes zeroes, where X and Y
+// are derived from the desired compressibility;
+//
+// Correctness tests use integer values, perf tests use random bytes.
+// Both support padding out values > 4 bytes with zeroes.
+
+static void
+fill_val_buf(int64_t val, uint8_t *data, uint32_t val_size) {
+// Effect, Requires, Notes: see fill_key_buf().
+ if (val_size == sizeof(int)) {
+ const int val32 = val;
+ memcpy(data, &val32, sizeof(val32));
+ } else {
+ invariant(val_size >= sizeof(val));
+ memcpy(data, &val, sizeof(val));
+ memset(data + sizeof(val), 0, val_size - sizeof(val));
+ }
+}
+
+// Fill array with compressibility*size 0s.
+// 0.0<=compressibility<=1.0
+// Compressibility is the fraction of size that will be 0s (e.g. approximate fraction that will be compressed away).
+// The rest will be random data.
+static void
+fill_val_buf_random(struct random_data *random_data, uint8_t *data, struct cli_args *args) {
+ invariant(args->val_size >= min_val_size);
+ //Requires: The array was zeroed since the last time 'size' was changed.
+ //Requires: compressibility is in range [0,1] indicating fraction that should be zeros.
+
+ // Fill in the random bytes
+ uint32_t num_random_bytes = (1 - args->compressibility) * args->val_size;
+ if (num_random_bytes > 0) {
+ uint32_t filled;
+ for (filled = 0; filled + sizeof(uint64_t) <= num_random_bytes; filled += sizeof(uint64_t)) {
+ *((uint64_t *) &data[filled]) = myrandom_r(random_data);
+ }
+ if (filled != num_random_bytes) {
+ uint64_t last8 = myrandom_r(random_data);
+ memcpy(&data[filled], &last8, num_random_bytes - filled);
+ }
+ }
+
+ // Fill in the zero bytes
+ if (num_random_bytes < args->val_size) {
+ memset(data + num_random_bytes, 0, args->val_size - num_random_bytes);
+ }
+}
+
+static int random_put_in_db(DB *db, DB_TXN *txn, ARG arg, bool ignore_errors, void *stats_extra) {
+ int r = 0;
+ uint8_t keybuf[arg->cli->key_size];
+ uint8_t valbuf[arg->cli->val_size];
+
+ DBT key, val;
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, valbuf, sizeof valbuf);
+ const int put_flags = get_put_flags(arg->cli);
+
+ uint64_t puts_to_increment = 0;
+ for (uint32_t i = 0; i < arg->cli->txn_size; ++i) {
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+ fill_val_buf_random(arg->random_data, valbuf, arg->cli);
+ r = db->put(db, txn, &key, &val, put_flags);
+ if (!ignore_errors && r != 0) {
+ goto cleanup;
+ }
+ puts_to_increment++;
+ if (puts_to_increment == 100) {
+ increment_counter(stats_extra, PUTS, puts_to_increment);
+ puts_to_increment = 0;
+ }
+ }
+
+cleanup:
+ increment_counter(stats_extra, PUTS, puts_to_increment);
+ return r;
+}
+
+static int UU() random_put_op(DB_TXN *txn, ARG arg, void *UU(operation_extra), void *stats_extra) {
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+ return random_put_in_db(db, txn, arg, false, stats_extra);
+}
+
+static int UU() random_put_op_singledb(DB_TXN *txn, ARG arg, void *UU(operation_extra), void *stats_extra) {
+ int db_index = arg->thread_idx%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+ return random_put_in_db(db, txn, arg, false, stats_extra);
+}
+
+struct serial_put_extra {
+ uint64_t current;
+};
+
+static int UU() serial_put_op(DB_TXN *txn, ARG arg, void *operation_extra, void *stats_extra) {
+ struct serial_put_extra *CAST_FROM_VOIDP(extra, operation_extra);
+
+ int db_index = arg->thread_idx % arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+
+ int r = 0;
+ uint8_t keybuf[arg->cli->key_size];
+ uint8_t valbuf[arg->cli->val_size];
+
+ DBT key, val;
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, valbuf, sizeof valbuf);
+ const int put_flags = get_put_flags(arg->cli);
+
+ uint64_t puts_to_increment = 0;
+ for (uint64_t i = 0; i < arg->cli->txn_size; ++i) {
+ // TODO: Change perf_insert to pass a single serial_put_op_extra
+ // to each insertion thread so they share the current key,
+ // and use a sync fetch an add here. This way you can measure
+ // the true performance of multiple threads appending unique
+ // keys to the end of a tree.
+ uint64_t k = extra->current++;
+ fill_key_buf(k, keybuf, arg->cli);
+ fill_val_buf_random(arg->random_data, valbuf, arg->cli);
+ r = db->put(db, txn, &key, &val, put_flags);
+ if (r != 0) {
+ goto cleanup;
+ }
+ puts_to_increment++;
+ if (puts_to_increment == 100) {
+ increment_counter(stats_extra, PUTS, puts_to_increment);
+ puts_to_increment = 0;
+ }
+ }
+
+cleanup:
+ increment_counter(stats_extra, PUTS, puts_to_increment);
+ return r;
+}
+
+struct loader_op_extra {
+ struct scan_op_extra soe;
+ int num_dbs;
+};
+
+static int UU() loader_op(DB_TXN* txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ struct loader_op_extra* CAST_FROM_VOIDP(extra, operation_extra);
+ invariant(extra->num_dbs >= 1);
+ DB_ENV* env = arg->env;
+ int r;
+ for (int num = 0; num < 2; num++) {
+ DB *dbs_load[extra->num_dbs];
+ uint32_t db_flags[extra->num_dbs];
+ uint32_t dbt_flags[extra->num_dbs];
+ for (int i = 0; i < extra->num_dbs; ++i) {
+ db_flags[i] = 0;
+ dbt_flags[i] = 0;
+ r = db_create(&dbs_load[i], env, 0);
+ assert(r == 0);
+ char fname[100];
+ sprintf(fname, "loader-db-%d", i);
+ // TODO: Need to call before_db_open_hook() and after_db_open_hook()
+ r = dbs_load[i]->open(dbs_load[i], txn, fname, nullptr, DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+ }
+ DB_LOADER *loader;
+ uint32_t loader_flags = (num == 0) ? 0 : LOADER_COMPRESS_INTERMEDIATES;
+ r = env->create_loader(env, txn, &loader, dbs_load[0], extra->num_dbs, dbs_load, db_flags, dbt_flags, loader_flags);
+ CKERR(r);
+
+ DBT key, val;
+ uint8_t keybuf[arg->cli->key_size];
+ uint8_t valbuf[arg->cli->val_size];
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, valbuf, sizeof valbuf);
+
+ int sum = 0;
+ const int num_elements = 1000;
+ for (int i = 0; i < num_elements; i++) {
+ fill_key_buf(i, keybuf, arg->cli);
+ fill_val_buf_random(arg->random_data, valbuf, arg->cli);
+
+ assert(val.size >= sizeof(int));
+ if (i == num_elements - 1) {
+ // Make sum add to 0
+ *(int *) val.data = -sum;
+ } else {
+ // Keep track of sum
+ sum += *(int *) val.data;
+ }
+ r = loader->put(loader, &key, &val); CKERR(r);
+ }
+
+ r = loader->close(loader); CKERR(r);
+
+ for (int i = 0; i < extra->num_dbs; ++i) {
+ r = scan_op_and_maybe_check_sum(dbs_load[i], txn, &extra->soe, true); CKERR(r);
+ r = dbs_load[i]->close(dbs_load[i], 0); CKERR(r);
+ char fname[100];
+ sprintf(fname, "loader-db-%d", i);
+ r = env->dbremove(env, txn, fname, nullptr, 0); CKERR(r);
+ }
+ }
+ return 0;
+}
+
+static int UU() keyrange_op(DB_TXN *txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ // Pick a random DB, do a keyrange operation.
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+
+ int r = 0;
+ uint8_t keybuf[arg->cli->key_size];
+
+ DBT key;
+ dbt_init(&key, keybuf, sizeof keybuf);
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+
+ uint64_t less,equal,greater;
+ int is_exact;
+ r = db->key_range64(db, txn, &key, &less, &equal, &greater, &is_exact);
+ assert(r == 0);
+ return r;
+}
+
+static int UU() frag_op(DB_TXN *UU(txn), ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB *db = arg->dbp[db_index];
+
+ TOKU_DB_FRAGMENTATION_S frag;
+ int r = db->get_fragmentation(db, &frag);
+ invariant_zero(r);
+ return r;
+}
+
+static void UU() get_key_after_bytes_callback(const DBT *UU(end_key), uint64_t UU(skipped), void *UU(extra)) {
+ // nothing
+}
+
+static int UU() get_key_after_bytes_op(DB_TXN *txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ // Pick a random DB, do a get_key_after_bytes operation.
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+
+ int r = 0;
+ uint8_t keybuf[arg->cli->key_size];
+
+ DBT start_key, end_key;
+ dbt_init(&start_key, keybuf, sizeof keybuf);
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+ uint64_t skip_len = myrandom_r(arg->random_data) % (2<<30);
+ dbt_init(&end_key, nullptr, 0);
+
+ r = db->get_key_after_bytes(db, txn, &start_key, skip_len, get_key_after_bytes_callback, nullptr, 0);
+ return r;
+}
+
+static int verify_progress_callback(void *UU(extra), float UU(progress)) {
+ if (!run_test) {
+ return -1;
+ }
+ return 0;
+}
+
+static int UU() verify_op(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ int r = 0;
+ for (int i = 0; i < arg->cli->num_DBs && run_test; i++) {
+ DB* db = arg->dbp[i];
+ r = db->verify_with_progress(db, verify_progress_callback, nullptr, 1, 0);
+ if (!run_test) {
+ r = 0;
+ }
+ CKERR(r);
+ }
+ return r;
+}
+
+static int UU() scan_op(DB_TXN *txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ struct scan_op_extra* CAST_FROM_VOIDP(extra, operation_extra);
+ for (int i = 0; run_test && i < arg->cli->num_DBs; i++) {
+ int r = scan_op_and_maybe_check_sum(arg->dbp[i], txn, extra, true);
+ assert_zero(r);
+ }
+ return 0;
+}
+
+static int UU() scan_op_no_check(DB_TXN *txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ struct scan_op_extra* CAST_FROM_VOIDP(extra, operation_extra);
+ for (int i = 0; run_test && i < arg->cli->num_DBs; i++) {
+ int r = scan_op_and_maybe_check_sum(arg->dbp[i], txn, extra, false);
+ assert_zero(r);
+ }
+ return 0;
+}
+
+struct scan_op_worker_info {
+ DB *db;
+ DB_TXN *txn;
+ void *extra;
+};
+
+static void scan_op_worker(void *arg) {
+ struct scan_op_worker_info *CAST_FROM_VOIDP(info, arg);
+ struct scan_op_extra *CAST_FROM_VOIDP(extra, info->extra);
+ int r = scan_op_and_maybe_check_sum(
+ info->db,
+ info->txn,
+ extra,
+ false
+ );
+ assert_zero(r);
+ toku_free(info);
+}
+
+static int UU() scan_op_no_check_parallel(DB_TXN *txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ const int num_cores = toku_os_get_number_processors();
+ const int num_workers = arg->cli->num_DBs < num_cores ? arg->cli->num_DBs : num_cores;
+ KIBBUTZ kibbutz = NULL;
+ int r = toku_kibbutz_create(num_workers, &kibbutz);
+ assert(r == 0);
+ for (int i = 0; run_test && i < arg->cli->num_DBs; i++) {
+ struct scan_op_worker_info *XCALLOC(info);
+ info->db = arg->dbp[i];
+ info->txn = txn;
+ info->extra = operation_extra;
+ toku_kibbutz_enq(kibbutz, scan_op_worker, info);
+ }
+ toku_kibbutz_destroy(kibbutz);
+ return 0;
+}
+
+static int dbt_do_nothing (DBT const *UU(key), DBT const *UU(row), void *UU(context)) {
+ return 0;
+}
+
+static int UU() ptquery_and_maybe_check_op(DB* db, DB_TXN *txn, ARG arg, bool check) {
+ int r = 0;
+ uint8_t keybuf[arg->cli->key_size];
+ DBT key, val;
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, nullptr, 0);
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+
+ r = db->getf_set(
+ db,
+ txn,
+ 0,
+ &key,
+ dbt_do_nothing,
+ nullptr
+ );
+ if (check) {
+ assert(r != DB_NOTFOUND);
+ }
+ r = 0;
+ return r;
+}
+
+static int UU() ptquery_op(DB_TXN *txn, ARG arg, void* UU(operation_extra), void *stats_extra) {
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+ int r = ptquery_and_maybe_check_op(db, txn, arg, true);
+ if (!r) {
+ increment_counter(stats_extra, PTQUERIES, 1);
+ }
+ return r;
+}
+
+static int UU() ptquery_op_no_check(DB_TXN *txn, ARG arg, void* UU(operation_extra), void *stats_extra) {
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+ int r = ptquery_and_maybe_check_op(db, txn, arg, false);
+ if (!r) {
+ increment_counter(stats_extra, PTQUERIES, 1);
+ }
+ return r;
+}
+
+typedef void (*rangequery_row_cb)(DB *db, const DBT *key, const DBT *val, void *extra);
+struct rangequery_cb_extra {
+ int rows_read;
+
+ // Call cb(db, key, value, cb_extra) on up to $limit rows.
+ const int limit;
+ const rangequery_row_cb cb;
+ DB *const db;
+ void *const cb_extra;
+};
+
+static int rangequery_cb(const DBT *key, const DBT *value, void *extra) {
+ struct rangequery_cb_extra *CAST_FROM_VOIDP(info, extra);
+ if (info->cb != nullptr) {
+ info->cb(info->db, key, value, info->cb_extra);
+ }
+ if (++info->rows_read >= info->limit) {
+ return 0;
+ } else {
+ return TOKUDB_CURSOR_CONTINUE;
+ }
+}
+
+static void rangequery_db(DB *db, DB_TXN *txn, ARG arg, rangequery_row_cb cb, void *cb_extra) {
+ const int limit = arg->cli->range_query_limit;
+
+ int r;
+ DBC *cursor;
+ DBT start_key, end_key;
+ uint8_t start_keybuf[arg->cli->key_size];
+ uint8_t end_keybuf[arg->cli->key_size];
+ dbt_init(&start_key, start_keybuf, sizeof start_keybuf);
+ dbt_init(&end_key, end_keybuf, sizeof end_keybuf);
+ const uint64_t start_k = random_bounded_key(arg->random_data, arg);
+ fill_key_buf(start_k, start_keybuf, arg->cli);
+ fill_key_buf(start_k + limit, end_keybuf, arg->cli);
+
+ r = db->cursor(db, txn, &cursor, 0); CKERR(r);
+ r = cursor->c_set_bounds(cursor, &start_key, &end_key, true, 0); CKERR(r);
+
+ struct rangequery_cb_extra extra = {
+ .rows_read = 0,
+ .limit = limit,
+ .cb = cb,
+ .db = db,
+ .cb_extra = cb_extra,
+ };
+ r = cursor->c_getf_set(cursor, 0, &start_key, rangequery_cb, &extra);
+ while (r == 0 && extra.rows_read < extra.limit && run_test) {
+ r = cursor->c_getf_next(cursor, 0, rangequery_cb, &extra);
+ }
+
+ r = cursor->c_close(cursor); CKERR(r);
+}
+
+static int UU() rangequery_op(DB_TXN *txn, ARG arg, void *UU(operation_extra), void *stats_extra) {
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB *db = arg->dbp[db_index];
+ rangequery_db(db, txn, arg, nullptr, nullptr);
+ increment_counter(stats_extra, PTQUERIES, 1);
+ return 0;
+}
+
+static int UU() cursor_create_close_op(DB_TXN *txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ int db_index = arg->cli->num_DBs > 1 ? myrandom_r(arg->random_data)%arg->cli->num_DBs : 0;
+ DB* db = arg->dbp[db_index];
+ DBC* cursor = nullptr;
+ int r = db->cursor(db, txn, &cursor, 0); assert(r == 0);
+ r = cursor->c_close(cursor); assert(r == 0);
+ return 0;
+}
+
+#define MAX_RANDOM_VAL 10000
+
+enum update_type {
+ UPDATE_ADD_DIFF,
+ UPDATE_NEGATE,
+ UPDATE_WITH_HISTORY
+};
+
+struct update_op_extra {
+ enum update_type type;
+ int pad_bytes;
+ union {
+ struct {
+ int diff;
+ } d;
+ struct {
+ int expected;
+ int new_val;
+ } h;
+ } u;
+};
+
+struct update_op_args {
+ int *update_history_buffer;
+ int update_pad_frequency;
+};
+
+static struct update_op_args UU() get_update_op_args(struct cli_args* cli_args, int* update_history_buffer) {
+ struct update_op_args uoe;
+ uoe.update_history_buffer = update_history_buffer;
+ uoe.update_pad_frequency = cli_args->num_elements/100; // arbitrary
+ return uoe;
+}
+
+static uint64_t update_count = 0;
+
+static int update_op_callback(DB *UU(db), const DBT *UU(key),
+ const DBT *old_val,
+ const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra)
+{
+ int old_int_val = 0;
+ if (old_val) {
+ old_int_val = *(int *) old_val->data;
+ }
+ assert(extra->size == sizeof(struct update_op_extra));
+ struct update_op_extra *CAST_FROM_VOIDP(e, extra->data);
+
+ int new_int_val;
+ switch (e->type) {
+ case UPDATE_ADD_DIFF:
+ new_int_val = old_int_val + e->u.d.diff;
+ break;
+ case UPDATE_NEGATE:
+ new_int_val = -old_int_val;
+ break;
+ case UPDATE_WITH_HISTORY:
+ assert(old_int_val == e->u.h.expected);
+ new_int_val = e->u.h.new_val;
+ break;
+ default:
+ abort();
+ }
+
+ uint32_t val_size = sizeof(int) + e->pad_bytes;
+ uint8_t valbuf[val_size];
+ fill_val_buf(new_int_val, valbuf, val_size);
+
+ DBT new_val;
+ dbt_init(&new_val, valbuf, val_size);
+ set_val(&new_val, set_extra);
+ return 0;
+}
+
+static int UU() update_op2(DB_TXN* txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+
+ int r = 0;
+ DBT key, val;
+ uint8_t keybuf[arg->cli->key_size];
+
+ toku_sync_fetch_and_add(&update_count, 1);
+ struct update_op_extra extra;
+ ZERO_STRUCT(extra);
+ extra.type = UPDATE_ADD_DIFF;
+ extra.pad_bytes = 0;
+ int curr_val_sum = 0;
+
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, &extra, sizeof extra);
+
+ for (uint32_t i = 0; i < arg->cli->txn_size; i++) {
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+ extra.u.d.diff = 1;
+ curr_val_sum += extra.u.d.diff;
+ r = db->update(
+ db,
+ txn,
+ &key,
+ &val,
+ 0
+ );
+ if (r != 0) {
+ return r;
+ }
+ int *rkp = (int *) keybuf;
+ int rand_key = *rkp;
+ invariant(rand_key != (arg->cli->num_elements - rand_key));
+ rand_key -= arg->cli->num_elements;
+ fill_key_buf(rand_key, keybuf, arg->cli);
+ extra.u.d.diff = -1;
+ r = db->update(
+ db,
+ txn,
+ &key,
+ &val,
+ 0
+ );
+ if (r != 0) {
+ return r;
+ }
+ }
+ return r;
+}
+
+static int pre_acquire_write_lock(DB *db, DB_TXN *txn,
+ const DBT *left_key, const DBT *right_key) {
+ int r;
+ DBC *cursor;
+
+ r = db->cursor(db, txn, &cursor, DB_RMW);
+ CKERR(r);
+ int cursor_r = cursor->c_set_bounds(cursor, left_key, right_key, true, 0);
+ r = cursor->c_close(cursor);
+ CKERR(r);
+
+ return cursor_r;
+}
+
+// take the given db and do an update on it
+static int
+UU() update_op_db(DB *db, DB_TXN *txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ uint64_t old_update_count = toku_sync_fetch_and_add(&update_count, 1);
+ struct update_op_args* CAST_FROM_VOIDP(op_args, operation_extra);
+ struct update_op_extra extra;
+ ZERO_STRUCT(extra);
+ extra.type = UPDATE_ADD_DIFF;
+ extra.pad_bytes = 0;
+ if (op_args->update_pad_frequency) {
+ if (old_update_count % (2*op_args->update_pad_frequency) == old_update_count%op_args->update_pad_frequency) {
+ extra.pad_bytes = 100;
+ }
+ }
+
+ int r = 0;
+ DBT key, val;
+ uint8_t keybuf[arg->cli->key_size];
+ int update_key;
+ int curr_val_sum = 0;
+ const int update_flags = arg->cli->prelock_updates ? DB_PRELOCKED_WRITE : 0;
+
+ for (uint32_t i = 0; i < arg->cli->txn_size; i++) {
+ if (arg->prelock_updates) {
+ if (i == 0) {
+ update_key = random_bounded_key(arg->random_data, arg);
+
+ const int max_key_in_table = arg->cli->num_elements - 1;
+ const bool range_wraps = (update_key + (int) arg->cli->txn_size - 1) > max_key_in_table;
+ int left_key, right_key;
+ DBT left_key_dbt, right_key_dbt;
+
+ // acquire the range starting at the random key, plus txn_size - 1
+ // elements, but lock no further than the end of the table. if the
+ // range wraps around to the beginning we will handle it below.
+ left_key = update_key;
+ right_key = range_wraps ? max_key_in_table : (left_key + arg->cli->txn_size - 1);
+ r = pre_acquire_write_lock(
+ db,
+ txn,
+ dbt_init(&left_key_dbt, &left_key, sizeof update_key),
+ dbt_init(&right_key_dbt, &right_key, sizeof right_key)
+ );
+ if (r != 0) {
+ return r;
+ }
+
+ // check if the right end point wrapped around to the beginning
+ // if so, lock from 0 to the right key, modded by table size.
+ if (range_wraps) {
+ right_key = (left_key + arg->cli->txn_size - 1) - max_key_in_table;
+ invariant(right_key > 0);
+ left_key = 0;
+ r = pre_acquire_write_lock(
+ db,
+ txn,
+ dbt_init(&left_key_dbt, &left_key, sizeof update_key),
+ dbt_init(&right_key_dbt, &right_key, sizeof right_key)
+ );
+ if (r != 0) {
+ return r;
+ }
+ }
+ } else {
+ update_key++;
+ if (arg->bounded_element_range) {
+ update_key = update_key % arg->cli->num_elements;
+ }
+ }
+ fill_key_buf(update_key, keybuf, arg->cli);
+ } else {
+ // just do a usual, random point update without locking first
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+ }
+
+
+ // the last update keeps the table's sum as zero
+ // every other update except the last applies a random delta
+ if (i == arg->cli->txn_size - 1) {
+ extra.u.d.diff = -curr_val_sum;
+ } else {
+ extra.u.d.diff = myrandom_r(arg->random_data) % MAX_RANDOM_VAL;
+ // just make every other value random
+ if (i%2 == 0) {
+ extra.u.d.diff = -extra.u.d.diff;
+ }
+ curr_val_sum += extra.u.d.diff;
+ }
+
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, &extra, sizeof extra);
+
+ // do the update
+ r = db->update(
+ db,
+ txn,
+ &key,
+ &val,
+ update_flags
+ );
+ if (r != 0) {
+ return r;
+ }
+ }
+
+ return r;
+}
+
+// choose a random DB and do an update on it
+static int
+UU() update_op(DB_TXN *txn, ARG arg, void* operation_extra, void *stats_extra) {
+ int db_index = myrandom_r(arg->random_data) % arg->cli->num_DBs;
+ DB *db = arg->dbp[db_index];
+ return update_op_db(db, txn, arg, operation_extra, stats_extra);
+}
+
+static int UU() update_with_history_op(DB_TXN *txn, ARG arg, void* operation_extra, void *UU(stats_extra)) {
+ struct update_op_args* CAST_FROM_VOIDP(op_args, operation_extra);
+ assert(arg->bounded_element_range);
+ assert(op_args->update_history_buffer);
+
+ int r = 0;
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+
+ struct update_op_extra extra;
+ ZERO_STRUCT(extra);
+ extra.type = UPDATE_WITH_HISTORY;
+ uint64_t old_update_count = toku_sync_fetch_and_add(&update_count, 1);
+ extra.pad_bytes = 0;
+ if (op_args->update_pad_frequency) {
+ if (old_update_count % (2*op_args->update_pad_frequency) != old_update_count%op_args->update_pad_frequency) {
+ extra.pad_bytes = 500;
+ }
+ }
+
+ DBT key, val;
+ uint8_t keybuf[arg->cli->key_size];
+ int rand_key;
+ int curr_val_sum = 0;
+
+ dbt_init(&key, keybuf, sizeof keybuf);
+ dbt_init(&val, &extra, sizeof extra);
+
+ for (uint32_t i = 0; i < arg->cli->txn_size; i++) {
+ fill_key_buf_random(arg->random_data, keybuf, arg);
+ int *rkp = (int *) keybuf;
+ rand_key = *rkp;
+ invariant(rand_key < arg->cli->num_elements);
+ if (i < arg->cli->txn_size - 1) {
+ extra.u.h.new_val = myrandom_r(arg->random_data) % MAX_RANDOM_VAL;
+ // just make every other value random
+ if (i % 2 == 0) {
+ extra.u.h.new_val = -extra.u.h.new_val;
+ }
+ curr_val_sum += extra.u.h.new_val;
+ } else {
+ // the last update should ensure the sum stays zero
+ extra.u.h.new_val = -curr_val_sum;
+ }
+ extra.u.h.expected = op_args->update_history_buffer[rand_key];
+ op_args->update_history_buffer[rand_key] = extra.u.h.new_val;
+ r = db->update(
+ db,
+ txn,
+ &key,
+ &val,
+ 0
+ );
+ if (r != 0) {
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int UU() update_broadcast_op(DB_TXN *txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
+ struct update_op_extra extra;
+ ZERO_STRUCT(extra);
+ int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs;
+ DB* db = arg->dbp[db_index];
+ extra.type = UPDATE_NEGATE;
+ extra.pad_bytes = 0;
+ DBT val;
+ int r = db->update_broadcast(db, txn, dbt_init(&val, &extra, sizeof extra), 0);
+ CKERR(r);
+ return r;
+}
+
+static int hot_progress_callback(void *UU(extra), float UU(progress)) {
+ return run_test ? 0 : 1;
+}
+
+static int UU() hot_op(DB_TXN *UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
+ int r;
+ for (int i = 0; run_test && i < arg->cli->num_DBs; i++) {
+ DB* db = arg->dbp[i];
+ uint64_t loops_run;
+ r = db->hot_optimize(db, NULL, NULL, hot_progress_callback, nullptr, &loops_run);
+ if (run_test) {
+ CKERR(r);
+ }
+ }
+ return 0;
+}
+
+static void
+get_ith_table_name(char *buf, size_t len, int i) {
+ snprintf(buf, len, "main%d", i);
+}
+
+DB_TXN * const null_txn = 0;
+
+// For each line of engine status output, look for lines that contain substrings
+// that match any of the strings in the pattern string. The pattern string contains
+// 0 or more strings separated by the '|' character, kind of like a regex.
+static void print_matching_engine_status_rows(DB_ENV *env, const char *pattern) {
+ uint64_t num_rows;
+ env->get_engine_status_num_rows(env, &num_rows);
+ uint64_t buf_size = num_rows * 128;
+ const char *row;
+ char *row_r;
+
+ char *pattern_copy = toku_xstrdup(pattern);
+ int num_patterns = 1;
+ for (char *p = pattern_copy; *p != '\0'; p++) {
+ if (*p == '|') {
+ *p = '\0';
+ num_patterns++;
+ }
+ }
+
+ char *XMALLOC_N(buf_size, buf);
+ int r = env->get_engine_status_text(env, buf, buf_size);
+ invariant_zero(r);
+
+ for (row = strtok_r(buf, "\n", &row_r); row != nullptr; row = strtok_r(nullptr, "\n", &row_r)) {
+ const char *p = pattern_copy;
+ for (int i = 0; i < num_patterns; i++, p += strlen(p) + 1) {
+ if (strstr(row, p) != nullptr) {
+ fprintf(stderr, "%s\n", row);
+ }
+ }
+ }
+
+ toku_free(pattern_copy);
+ toku_free(buf);
+ fflush(stderr);
+}
+
+// TODO: stuff like this should be in a generalized header somwhere
+static inline int
+intmin(const int a, const int b)
+{
+ if (a < b) {
+ return a;
+ }
+ return b;
+}
+
+struct test_time_extra {
+ DB_ENV *env;
+ int num_seconds;
+ bool crash_at_end;
+ struct worker_extra *wes;
+ int num_wes;
+ struct cli_args *cli_args;
+};
+
+static void *test_time(void *arg) {
+ struct test_time_extra* CAST_FROM_VOIDP(tte, arg);
+ DB_ENV *env = tte->env;
+ int num_seconds = tte->num_seconds;
+ const struct perf_formatter *perf_formatter = &perf_formatters[tte->cli_args->perf_output_format];
+
+ //
+ // if num_Seconds is set to 0, run indefinitely
+ //
+ if (num_seconds == 0) {
+ num_seconds = INT32_MAX;
+ }
+ uint64_t last_counter_values[tte->num_wes][(int) NUM_OPERATION_TYPES];
+ ZERO_ARRAY(last_counter_values);
+ uint64_t *counters[tte->num_wes];
+ for (int t = 0; t < tte->num_wes; ++t) {
+ counters[t] = tte->wes[t].counters;
+ }
+ if (verbose) {
+ printf("Sleeping for %d seconds\n", num_seconds);
+ }
+ for (int i = 0; i < num_seconds; ) {
+ struct timeval tv[2];
+ const int sleeptime = intmin(tte->cli_args->performance_period, num_seconds - i);
+ int r = gettimeofday(&tv[0], nullptr);
+ assert_zero(r);
+ usleep(sleeptime*1000*1000);
+ r = gettimeofday(&tv[1], nullptr);
+ assert_zero(r);
+ int actual_sleeptime = tv[1].tv_sec - tv[0].tv_sec;
+ if (abs(actual_sleeptime - sleeptime) <= 1) {
+ // Close enough, no need to alarm the user, and we didn't check nsec.
+ i += sleeptime;
+ } else {
+ if (verbose) {
+ printf("tried to sleep %d secs, actually slept %d secs\n", sleeptime, actual_sleeptime);
+ }
+ i += actual_sleeptime;
+ }
+ if (tte->cli_args->print_performance && tte->cli_args->print_iteration_performance) {
+ perf_formatter->iteration(tte->cli_args, i, last_counter_values, counters, tte->num_wes);
+ }
+ if (tte->cli_args->print_engine_status != nullptr) {
+ print_matching_engine_status_rows(env, tte->cli_args->print_engine_status);
+ }
+ }
+
+ if (verbose) {
+ printf("should now end test\n");
+ }
+ toku_sync_bool_compare_and_swap(&run_test, true, false); // make this atomic to make valgrind --tool=drd happy.
+ if (verbose) {
+ printf("run_test %d\n", run_test);
+ }
+ if (tte->crash_at_end) {
+ toku_hard_crash_on_purpose();
+ }
+ return arg;
+}
+
+struct sleep_and_crash_extra {
+ toku_mutex_t mutex;
+ toku_cond_t cond;
+ int seconds;
+ bool is_setup;
+ bool threads_have_joined;
+};
+
+static void *sleep_and_crash(void *extra) {
+ sleep_and_crash_extra *e = static_cast<sleep_and_crash_extra *>(extra);
+ toku_mutex_lock(&e->mutex);
+ struct timeval tv;
+ toku_timespec_t ts;
+ gettimeofday(&tv, nullptr);
+ ts.tv_sec = tv.tv_sec + e->seconds;
+ ts.tv_nsec = 0;
+ e->is_setup = true;
+ if (verbose) {
+ printf("Waiting %d seconds for other threads to join.\n", e->seconds);
+ fflush(stdout);
+ }
+ int r = toku_cond_timedwait(&e->cond, &e->mutex, &ts);
+ toku_mutex_assert_locked(&e->mutex);
+ if (r == ETIMEDOUT) {
+ invariant(!e->threads_have_joined);
+ if (verbose) {
+ printf("Some thread didn't join on time, crashing.\n");
+ fflush(stdout);
+ }
+ toku_crash_and_dump_core_on_purpose();
+ } else {
+ assert(r == 0);
+ assert(e->threads_have_joined);
+ if (verbose) {
+ printf("Other threads joined on time, exiting cleanly.\n");
+ }
+ }
+ toku_mutex_unlock(&e->mutex);
+ return nullptr;
+}
+
+static int run_workers(
+ struct arg *thread_args,
+ int num_threads,
+ uint32_t num_seconds,
+ bool crash_at_end,
+ struct cli_args* cli_args
+ )
+{
+ int r;
+ const struct perf_formatter *perf_formatter =
+ &perf_formatters[cli_args->perf_output_format];
+ toku_mutex_t mutex = ZERO_MUTEX_INITIALIZER;
+ toku_mutex_init(toku_uninstrumented, &mutex, nullptr);
+ struct st_rwlock rwlock;
+ rwlock_init(toku_uninstrumented, &rwlock);
+ toku_pthread_t tids[num_threads];
+ toku_pthread_t time_tid;
+ if (cli_args->print_performance) {
+ perf_formatter->header(cli_args, num_threads);
+ }
+ // allocate worker_extra's on cache line boundaries
+ struct worker_extra *XMALLOC_N_ALIGNED(64, num_threads, worker_extra);
+ struct test_time_extra tte;
+ tte.env = thread_args[0].env;
+ tte.num_seconds = num_seconds;
+ tte.crash_at_end = crash_at_end;
+ tte.wes = worker_extra;
+ tte.num_wes = num_threads;
+ tte.cli_args = cli_args;
+ run_test = true;
+ for (int i = 0; i < num_threads; ++i) {
+ thread_args[i].thread_idx = i;
+ thread_args[i].num_threads = num_threads;
+ worker_extra[i].thread_arg = &thread_args[i];
+ worker_extra[i].operation_lock = &rwlock;
+ worker_extra[i].operation_lock_mutex = &mutex;
+ XCALLOC_N((int)NUM_OPERATION_TYPES, worker_extra[i].counters);
+ TOKU_DRD_IGNORE_VAR(worker_extra[i].counters);
+ {
+ int chk_r = toku_pthread_create(toku_uninstrumented,
+ &tids[i],
+ nullptr,
+ worker,
+ &worker_extra[i]);
+ CKERR(chk_r);
+ }
+ if (verbose)
+ printf("%lu created\n", (unsigned long)tids[i]);
+ }
+ {
+ int chk_r = toku_pthread_create(
+ toku_uninstrumented, &time_tid, nullptr, test_time, &tte);
+ CKERR(chk_r);
+ }
+ if (verbose)
+ printf("%lu created\n", (unsigned long)time_tid);
+
+ void *ret;
+ r = toku_pthread_join(time_tid, &ret); assert_zero(r);
+ if (verbose) printf("%lu joined\n", (unsigned long) time_tid);
+
+ {
+ // Set an alarm that will kill us if it takes too long to join all the
+ // threads (i.e. there is some runaway thread).
+ struct sleep_and_crash_extra sac_extra;
+ ZERO_STRUCT(sac_extra);
+ toku_mutex_init(toku_uninstrumented, &sac_extra.mutex, nullptr);
+ toku_cond_init(toku_uninstrumented, &sac_extra.cond, nullptr);
+ sac_extra.seconds = cli_args->join_timeout;
+ sac_extra.is_setup = false;
+ sac_extra.threads_have_joined = false;
+
+ toku_mutex_lock(&sac_extra.mutex);
+ toku_pthread_t sac_thread;
+ r = toku_pthread_create(toku_uninstrumented,
+ &sac_thread,
+ nullptr,
+ sleep_and_crash,
+ &sac_extra);
+ assert_zero(r);
+ // Wait for sleep_and_crash thread to get set up, spinning is ok, this
+ // should be quick.
+ while (!sac_extra.is_setup) {
+ toku_mutex_unlock(&sac_extra.mutex);
+ r = toku_pthread_yield();
+ assert_zero(r);
+ toku_mutex_lock(&sac_extra.mutex);
+ }
+ toku_mutex_unlock(&sac_extra.mutex);
+
+ // Timeout thread has started, join everyone
+ for (int i = 0; i < num_threads; ++i) {
+ r = toku_pthread_join(tids[i], &ret); assert_zero(r);
+ if (verbose)
+ printf("%lu joined\n", (unsigned long) tids[i]);
+ }
+
+ // Signal timeout thread not to crash.
+ toku_mutex_lock(&sac_extra.mutex);
+ sac_extra.threads_have_joined = true;
+ toku_cond_signal(&sac_extra.cond);
+ toku_mutex_unlock(&sac_extra.mutex);
+ r = toku_pthread_join(sac_thread, nullptr);
+ assert_zero(r);
+ toku_cond_destroy(&sac_extra.cond);
+ toku_mutex_destroy(&sac_extra.mutex);
+ }
+
+ if (cli_args->print_performance) {
+ uint64_t *counters[num_threads];
+ for (int i = 0; i < num_threads; ++i) {
+ counters[i] = worker_extra[i].counters;
+ }
+ perf_formatter->totals(cli_args, counters, num_threads);
+ }
+
+ for (int i = 0; i < num_threads; ++i) {
+ toku_free(worker_extra[i].counters);
+ }
+ if (verbose)
+ printf("ending test, pthreads have joined\n");
+ rwlock_destroy(&rwlock);
+ toku_mutex_destroy(&mutex);
+ toku_free(worker_extra);
+ return r;
+}
+
+// Pre-open hook
+static void do_nothing_before_db_open(DB *UU(db), int UU(idx)) { }
+// Requires: DB is created (allocated) but not opened. idx is the index
+// into the DBs array.
+static void (*before_db_open_hook)(DB *db, int idx) = do_nothing_before_db_open;
+
+// Post-open hook
+typedef void (*reopen_db_fn)(DB *db, int idx, struct cli_args *cli_args);
+static DB *do_nothing_after_db_open(DB_ENV *UU(env), DB *db, int UU(idx), reopen_db_fn UU(reopen), struct cli_args *UU(cli_args)) { return db; }
+// Requires: DB is opened and is the 'idx' db in the DBs array.
+// Note: Reopen function may be used to open a db if the given one was closed.
+// Returns: An opened db.
+static DB *(*after_db_open_hook)(DB_ENV *env, DB *db, int idx, reopen_db_fn reopen, struct cli_args *cli_args) = do_nothing_after_db_open;
+
+static void open_db_for_create(DB *db, int idx, struct cli_args *cli_args) {
+ int r;
+ char name[30];
+ memset(name, 0, sizeof(name));
+ get_ith_table_name(name, sizeof(name), idx);
+ r = db->set_flags(db, 0); CKERR(r);
+ r = db->set_fanout(db, cli_args->env_args.fanout); CKERR(r);
+ r = db->set_pagesize(db, cli_args->env_args.node_size); CKERR(r);
+ r = db->set_readpagesize(db, cli_args->env_args.basement_node_size); CKERR(r);
+ r = db->set_compression_method(db, cli_args->compression_method); CKERR(r);
+ const int flags = DB_CREATE | (cli_args->blackhole ? DB_BLACKHOLE : 0);
+ r = db->open(db, null_txn, name, nullptr, DB_BTREE, flags, 0666); CKERR(r);
+}
+
+static void open_db(DB *db, int idx, struct cli_args *cli_args) {
+ int r;
+ char name[30];
+ memset(name, 0, sizeof(name));
+ get_ith_table_name(name, sizeof(name), idx);
+ const int flags = DB_CREATE | (cli_args->blackhole ? DB_BLACKHOLE : 0);
+ r = db->open(db, null_txn, name, nullptr, DB_BTREE, flags, 0666); CKERR(r);
+ r = db->change_fanout(db, cli_args->env_args.fanout); CKERR(r); // change fanout until fanout is persistent
+}
+
+static int create_tables(DB_ENV **env_res, DB **db_res, int num_DBs,
+ int (*bt_compare)(DB *, const DBT *, const DBT *),
+ struct cli_args *cli_args
+) {
+ int r;
+ struct env_args env_args = cli_args->env_args;
+
+ char rmcmd[32 + strlen(env_args.envdir)]; sprintf(rmcmd, "rm -rf %s", env_args.envdir);
+ r = system(rmcmd);
+ CKERR(r);
+ r = toku_os_mkdir(env_args.envdir, S_IRWXU+S_IRWXG+S_IRWXO); assert(r==0);
+
+ DB_ENV *env;
+ db_env_set_num_bucket_mutexes(env_args.num_bucket_mutexes);
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_redzone(env, 0); CKERR(r);
+ if (!cli_args->memcmp_keys) {
+ r = env->set_default_bt_compare(env, bt_compare); CKERR(r);
+ }
+ r = env->set_lk_max_memory(env, env_args.lk_max_memory); CKERR(r);
+ r = env->set_cachesize(env, env_args.cachetable_size / (1 << 30), env_args.cachetable_size % (1 << 30), 1); CKERR(r);
+ r = env->set_lg_bsize(env, env_args.rollback_node_size); CKERR(r);
+ if (env_args.generate_put_callback) {
+ r = env->set_generate_row_callback_for_put(env, env_args.generate_put_callback);
+ CKERR(r);
+ }
+ else {
+ r = env->set_generate_row_callback_for_put(env, generate_row_for_put);
+ CKERR(r);
+ }
+ if (env_args.generate_del_callback) {
+ r = env->set_generate_row_callback_for_del(env, env_args.generate_del_callback);
+ CKERR(r);
+ }
+ int env_flags = get_env_open_flags(cli_args);
+ r = env->open(env, env_args.envdir, env_flags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ r = env->checkpointing_set_period(env, env_args.checkpointing_period); CKERR(r);
+ r = env->cleaner_set_period(env, env_args.cleaner_period); CKERR(r);
+ r = env->cleaner_set_iterations(env, env_args.cleaner_iterations); CKERR(r);
+ env->change_fsync_log_period(env, env_args.sync_period);
+ *env_res = env;
+
+ for (int i = 0; i < num_DBs; i++) {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ before_db_open_hook(db, i);
+ open_db_for_create(db, i, cli_args);
+ db_res[i] = after_db_open_hook(env, db, i, open_db_for_create, cli_args);
+ }
+ return r;
+}
+
+static void report_overall_fill_table_progress(struct cli_args *args, int num_rows) {
+ // for sanitary reasons we'd like to prevent two threads
+ // from printing the same performance report twice.
+ static bool reporting;
+
+ // when was the first time measurement taken?
+ static uint64_t t0;
+ static int rows_inserted;
+
+ // when was the last report? what was its progress?
+ static uint64_t last_report;
+ static double last_progress;
+ if (t0 == 0) {
+ t0 = toku_current_time_microsec();
+ last_report = t0;
+ }
+
+ uint64_t rows_so_far = toku_sync_add_and_fetch(&rows_inserted, num_rows);
+ double progress = rows_so_far / (args->num_elements * args->num_DBs * 1.0);
+ if (progress > (last_progress + .01)) {
+ uint64_t t1 = toku_current_time_microsec();
+ const uint64_t minimum_report_period = 5 * 1000000;
+ if (t1 > last_report + minimum_report_period
+ && toku_sync_bool_compare_and_swap(&reporting, 0, 1) == 0) {
+ double inserts_per_sec = (rows_so_far*1000000) / ((t1 - t0) * 1.0);
+ printf("fill tables: %ld%% complete, %.2lf rows/sec\n",
+ (long)(progress * 100), inserts_per_sec);
+ last_progress = progress;
+ last_report = t1;
+ reporting = false;
+ }
+ }
+}
+
+static void fill_single_table(DB_ENV *env, DB *db, struct cli_args *args, bool fill_with_zeroes) {
+ const int min_size_for_loader = 1 * 1000 * 1000;
+ const int puts_per_txn = 10 * 1000;;
+
+ int r = 0;
+ DB_TXN *txn = nullptr;
+ DB_LOADER *loader = nullptr;
+ struct random_data random_data;
+ char random_buf[8];
+ memset(&random_data, 0, sizeof(random_data));
+ memset(random_buf, 0, 8);
+ r = myinitstate_r(random(), random_buf, 8, &random_data); CKERR(r);
+
+ uint8_t keybuf[args->key_size], valbuf[args->val_size];
+ memset(keybuf, 0, sizeof keybuf);
+ memset(valbuf, 0, sizeof valbuf);
+ DBT key, val;
+ dbt_init(&key, keybuf, args->key_size);
+ dbt_init(&val, valbuf, args->val_size);
+
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ if (args->num_elements >= min_size_for_loader) {
+ uint32_t db_flags = DB_PRELOCKED_WRITE;
+ uint32_t dbt_flags = 0;
+ r = env->create_loader(env, txn, &loader, db, 1, &db, &db_flags, &dbt_flags, 0); CKERR(r);
+ }
+
+ for (int i = 0; i < args->num_elements; i++) {
+ fill_key_buf(i, keybuf, args);
+
+ // Correctness tests map every key to zeroes. Perf tests fill
+ // values with random bytes, based on compressibility.
+ if (fill_with_zeroes) {
+ fill_val_buf(0, valbuf, args->val_size);
+ } else {
+ fill_val_buf_random(&random_data, valbuf, args);
+ }
+
+ r = loader ? loader->put(loader, &key, &val) :
+ db->put(db, txn, &key, &val, DB_PRELOCKED_WRITE);
+ CKERR(r);
+
+ if (i > 0 && i % puts_per_txn == 0) {
+ if (verbose) {
+ report_overall_fill_table_progress(args, puts_per_txn);
+ }
+ // begin a new txn if we're not using the loader,
+ if (loader == nullptr) {
+ r = txn->commit(txn, 0); CKERR(r);
+ r = env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ }
+ }
+ }
+
+ if (loader) {
+ r = loader->close(loader); CKERR(r);
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+}
+
+struct fill_table_worker_info {
+ struct cli_args *args;
+ DB_ENV *env;
+ DB *db;
+ bool fill_with_zeroes;
+};
+
+static void fill_table_worker(void *arg) {
+ struct fill_table_worker_info *CAST_FROM_VOIDP(info, arg);
+ fill_single_table(info->env, info->db, info->args, info->fill_with_zeroes);
+ toku_free(info);
+}
+
+static int fill_tables_default(DB_ENV *env, DB **dbs, struct cli_args *args, bool fill_with_zeroes) {
+ const int num_cores = toku_os_get_number_processors();
+ // Use at most cores / 2 worker threads, since we want some other cores to
+ // be used for internal engine work (ie: flushes, loader threads, etc).
+ const int max_num_workers = (num_cores + 1) / 2;
+ const int num_workers = args->num_DBs < max_num_workers ? args->num_DBs : max_num_workers;
+ KIBBUTZ kibbutz = NULL;
+ int r = toku_kibbutz_create(num_workers, &kibbutz);
+ assert(r == 0);
+ for (int i = 0; i < args->num_DBs; i++) {
+ struct fill_table_worker_info *XCALLOC(info);
+ info->env = env;
+ info->db = dbs[i];
+ info->args = args;
+ info->fill_with_zeroes = fill_with_zeroes;
+ toku_kibbutz_enq(kibbutz, fill_table_worker, info);
+ }
+ toku_kibbutz_destroy(kibbutz);
+ return 0;
+}
+
+// fill_tables() is called when the tables are first created.
+// set this function if you want custom table contents.
+static int (*fill_tables)(DB_ENV *env, DB **dbs, struct cli_args *args, bool fill_with_zeroes) = fill_tables_default;
+
+static void do_xa_recovery(DB_ENV* env) {
+ DB_PREPLIST preplist[1];
+ long num_recovered= 0;
+ int r = 0;
+ r = env->txn_recover(env, preplist, 1, &num_recovered, DB_NEXT);
+ while(r==0 && num_recovered > 0) {
+ DB_TXN* recovered_txn = preplist[0].txn;
+ if (verbose) {
+ printf("recovering transaction with id %" PRIu64 " \n", recovered_txn->id64(recovered_txn));
+ }
+ if (random() % 2 == 0) {
+ int rr = recovered_txn->commit(recovered_txn, 0);
+ CKERR(rr);
+ }
+ else {
+ int rr = recovered_txn->abort(recovered_txn);
+ CKERR(rr);
+ }
+ r = env->txn_recover(env, preplist, 1, &num_recovered, DB_NEXT);
+ }
+}
+
+static int open_tables(DB_ENV **env_res, DB **db_res, int num_DBs,
+ int (*bt_compare)(DB *, const DBT *, const DBT *),
+ struct cli_args *cli_args) {
+ int r;
+ struct env_args env_args = cli_args->env_args;
+
+ DB_ENV *env;
+ db_env_set_num_bucket_mutexes(env_args.num_bucket_mutexes);
+ r = db_env_create(&env, 0); assert(r == 0);
+ r = env->set_redzone(env, 0); CKERR(r);
+ if (!cli_args->memcmp_keys) {
+ r = env->set_default_bt_compare(env, bt_compare); CKERR(r);
+ }
+ r = env->set_lk_max_memory(env, env_args.lk_max_memory); CKERR(r);
+ env->set_update(env, env_args.update_function);
+ r = env->set_cachesize(env, env_args.cachetable_size / (1 << 30), env_args.cachetable_size % (1 << 30), 1); CKERR(r);
+ r = env->set_lg_bsize(env, env_args.rollback_node_size); CKERR(r);
+ if (env_args.generate_put_callback) {
+ r = env->set_generate_row_callback_for_put(env, env_args.generate_put_callback);
+ CKERR(r);
+ }
+ else {
+ r = env->set_generate_row_callback_for_put(env, generate_row_for_put);
+ CKERR(r);
+ }
+ if (env_args.generate_del_callback) {
+ r = env->set_generate_row_callback_for_del(env, env_args.generate_del_callback);
+ CKERR(r);
+ }
+ int env_flags = get_env_open_flags(cli_args);
+ r = env->open(env, env_args.envdir, DB_RECOVER | env_flags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ do_xa_recovery(env);
+ r = env->checkpointing_set_period(env, env_args.checkpointing_period); CKERR(r);
+ r = env->cleaner_set_period(env, env_args.cleaner_period); CKERR(r);
+ r = env->cleaner_set_iterations(env, env_args.cleaner_iterations); CKERR(r);
+ env->change_fsync_log_period(env, env_args.sync_period);
+ *env_res = env;
+
+ for (int i = 0; i < num_DBs; i++) {
+ DB *db;
+ r = db_create(&db, env, 0); CKERR(r);
+ before_db_open_hook(db, i);
+ open_db(db, i, cli_args);
+ db_res[i] = after_db_open_hook(env, db, i, open_db, cli_args);
+ }
+ return r;
+}
+
+static int close_tables(DB_ENV *env, DB** dbs, int num_DBs) {
+ int r;
+ for (int i = 0; i < num_DBs; i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ }
+ r = env->close(env, 0); CKERR(r);
+ return r;
+}
+
+static const struct env_args DEFAULT_ENV_ARGS = {
+ .fanout = 16,
+ .node_size = 4096,
+ .basement_node_size = 1024,
+ .rollback_node_size = 4096,
+ .checkpointing_period = 10,
+ .cleaner_period = 1,
+ .cleaner_iterations = 1,
+ .sync_period = 0,
+ .lk_max_memory = 1L * 1024 * 1024 * 1024,
+ .cachetable_size = 300000,
+ .num_bucket_mutexes = 1024,
+ .envdir = nullptr,
+ .update_function = update_op_callback,
+ .generate_put_callback = nullptr,
+ .generate_del_callback = nullptr,
+};
+
+static const struct env_args DEFAULT_PERF_ENV_ARGS = {
+ .fanout = 16,
+ .node_size = 4*1024*1024,
+ .basement_node_size = 128*1024,
+ .rollback_node_size = 4*1024*1024,
+ .checkpointing_period = 60,
+ .cleaner_period = 1,
+ .cleaner_iterations = 5,
+ .sync_period = 0,
+ .lk_max_memory = 1L * 1024 * 1024 * 1024,
+ .cachetable_size = 1<<30,
+ .num_bucket_mutexes = 1024 * 1024,
+ .envdir = nullptr,
+ .update_function = nullptr,
+ .generate_put_callback = nullptr,
+ .generate_del_callback = nullptr,
+};
+
+static struct cli_args UU() get_default_args(void) {
+ struct cli_args DEFAULT_ARGS = {
+ .num_elements = 150000,
+ .num_DBs = 1,
+ .num_seconds = 180,
+ .join_timeout = 3600,
+ .only_create = false,
+ .only_stress = false,
+ .update_broadcast_period_ms = 2000,
+ .num_ptquery_threads = 1,
+ .do_test_and_crash = false,
+ .do_recover = false,
+ .num_update_threads = 1,
+ .num_put_threads = 1,
+ .range_query_limit = 100,
+ .serial_insert = false,
+ .interleave = false,
+ .crash_on_operation_failure = true,
+ .print_performance = false,
+ .print_thread_performance = true,
+ .print_iteration_performance = true,
+ .perf_output_format = HUMAN,
+ .compression_method = TOKU_DEFAULT_COMPRESSION_METHOD,
+ .performance_period = 1,
+ .txn_size = 1000,
+ .key_size = min_key_size,
+ .val_size = min_val_size,
+ .compressibility = 1.0,
+ .env_args = DEFAULT_ENV_ARGS,
+ .single_txn = false,
+ .warm_cache = false,
+ .blackhole = false,
+ .nolocktree = false,
+ .unique_checks = false,
+ .sync_period = 0,
+ .nolog = false,
+ .nocrashstatus = false,
+ .prelock_updates = false,
+ .disperse_keys = false,
+ .memcmp_keys = false,
+ .direct_io = false,
+ };
+ DEFAULT_ARGS.env_args.envdir = TOKU_TEST_FILENAME;
+ return DEFAULT_ARGS;
+}
+
+static struct cli_args UU() get_default_args_for_perf(void) {
+ struct cli_args args = get_default_args();
+ args.num_elements = 1000000; //default of 1M
+ args.env_args = DEFAULT_PERF_ENV_ARGS;
+ args.env_args.envdir = TOKU_TEST_FILENAME;
+ return args;
+}
+
+union val_type {
+ int32_t i32;
+ int64_t i64;
+ uint32_t u32;
+ uint64_t u64;
+ bool b;
+ double d;
+ const char *s;
+};
+
+struct arg_type;
+
+typedef bool (*match_fun)(struct arg_type *type, char *const argv[]);
+typedef int (*parse_fun)(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]);
+typedef void (*help_fun)(struct arg_type *type, int width_name, int width_type);
+
+struct type_description {
+ const char *type_name;
+ const match_fun matches;
+ const parse_fun parse;
+ const help_fun help;
+};
+
+struct arg_type {
+ const char *name;
+ struct type_description *description;
+ union val_type default_val;
+ void *target;
+ const char *help_suffix;
+ union val_type min;
+ union val_type max;
+};
+
+#define DEFINE_NUMERIC_HELP(typename, format, member, MIN, MAX) \
+static inline void \
+help_##typename(struct arg_type *type, int width_name, int width_type) { \
+ invariant(!strncmp("--", type->name, strlen("--"))); \
+ fprintf(stderr, "\t%-*s %-*s ", width_name, type->name, width_type, type->description->type_name); \
+ fprintf(stderr, "(default %" format "%s", type->default_val.member, type->help_suffix); \
+ if (type->min.member != MIN) { \
+ fprintf(stderr, ", min %" format "%s", type->min.member, type->help_suffix); \
+ } \
+ if (type->max.member != MAX) { \
+ fprintf(stderr, ", max %" format "%s", type->max.member, type->help_suffix); \
+ } \
+ fprintf(stderr, ")\n"); \
+}
+
+DEFINE_NUMERIC_HELP(int32, PRId32, i32, INT32_MIN, INT32_MAX)
+DEFINE_NUMERIC_HELP(int64, PRId64, i64, INT64_MIN, INT64_MAX)
+DEFINE_NUMERIC_HELP(uint32, PRIu32, u32, 0, UINT32_MAX)
+DEFINE_NUMERIC_HELP(uint64, PRIu64, u64, 0, UINT64_MAX)
+DEFINE_NUMERIC_HELP(double, ".2lf", d, -HUGE_VAL, HUGE_VAL)
+static inline void
+help_bool(struct arg_type *type, int width_name, int width_type) {
+ invariant(strncmp("--", type->name, strlen("--")));
+ const char *default_value = type->default_val.b ? "yes" : "no";
+ fprintf(stderr, "\t--[no-]%-*s %-*s (default %s)\n",
+ width_name - (int)strlen("--[no-]"), type->name,
+ width_type, type->description->type_name,
+ default_value);
+}
+
+static inline void
+help_string(struct arg_type *type, int width_name, int width_type) {
+ invariant(!strncmp("--", type->name, strlen("--")));
+ const char *default_value = type->default_val.s ? type->default_val.s : "";
+ fprintf(stderr, "\t%-*s %-*s (default '%s')\n",
+ width_name, type->name,
+ width_type, type->description->type_name,
+ default_value);
+}
+
+static inline bool
+match_name(struct arg_type *type, char *const argv[]) {
+ invariant(!strncmp("--", type->name, strlen("--")));
+ return !strcmp(argv[1], type->name);
+}
+
+static inline bool
+match_bool(struct arg_type *type, char *const argv[]) {
+ invariant(strncmp("--", type->name, strlen("--")));
+ const char *string = argv[1];
+ if (strncmp(string, "--", strlen("--"))) {
+ return false;
+ }
+ string += strlen("--");
+ if (!strncmp(string, "no-", strlen("no-"))) {
+ string += strlen("no-");
+ }
+ return !strcmp(string, type->name);
+}
+
+static inline int
+parse_bool(struct arg_type *type, int *extra_args_consumed, int UU(argc), char *const argv[]) {
+ const char *string = argv[1];
+ if (!strncmp(string, "--no-", strlen("--no-"))) {
+ *((bool *)type->target) = false;
+ }
+ else {
+ *((bool *)type->target) = true;
+ }
+ *extra_args_consumed = 0;
+ return 0;
+}
+
+static inline int
+parse_string(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]) {
+ if (argc < 2) {
+ return EINVAL;
+ }
+ *((const char **)type->target) = argv[2];
+ *extra_args_consumed = 1;
+ return 0;
+}
+
+static inline int
+parse_uint64(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]) {
+ // Already verified name.
+
+ if (argc < 2) {
+ return EINVAL;
+ }
+ if (*argv[2] == '\0') {
+ return EINVAL;
+ }
+
+ char *endptr;
+ unsigned long long int result = strtoull(argv[2], &endptr, 0);
+ if (*endptr != '\0') {
+ return EINVAL;
+ }
+ if (result < type->min.u64 || result > type->max.u64) {
+ return ERANGE;
+ }
+ *((uint64_t*)type->target) = result;
+ *extra_args_consumed = 1;
+ return 0;
+}
+
+static inline int
+parse_int64(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]) {
+ // Already verified name.
+
+ if (argc < 2) {
+ return EINVAL;
+ }
+ if (*argv[2] == '\0') {
+ return EINVAL;
+ }
+
+ char *endptr;
+ long long int result = strtoll(argv[2], &endptr, 0);
+ if (*endptr != '\0') {
+ return EINVAL;
+ }
+ if (result < type->min.i64 || result > type->max.i64) {
+ return ERANGE;
+ }
+ *((int64_t*)type->target) = result;
+ *extra_args_consumed = 1;
+ return 0;
+}
+
+static inline int
+parse_uint32(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]) {
+ // Already verified name.
+
+ if (argc < 2) {
+ return EINVAL;
+ }
+ if (*argv[2] == '\0') {
+ return EINVAL;
+ }
+
+ char *endptr;
+ unsigned long int result = strtoul(argv[2], &endptr, 0);
+ if (*endptr != '\0') {
+ return EINVAL;
+ }
+ if (result < type->min.u32 || result > type->max.u32) {
+ return ERANGE;
+ }
+ *((int32_t*)type->target) = result;
+ *extra_args_consumed = 1;
+ return 0;
+}
+
+static inline int
+parse_int32(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]) {
+ // Already verified name.
+
+ if (argc < 2) {
+ return EINVAL;
+ }
+ if (*argv[2] == '\0') {
+ return EINVAL;
+ }
+
+ char *endptr;
+ long int result = strtol(argv[2], &endptr, 0);
+ if (*endptr != '\0') {
+ return EINVAL;
+ }
+ if (result < type->min.i32 || result > type->max.i32) {
+ return ERANGE;
+ }
+ *((int32_t*)type->target) = result;
+ *extra_args_consumed = 1;
+ return 0;
+}
+
+static inline int
+parse_double(struct arg_type *type, int *extra_args_consumed, int argc, char *const argv[]) {
+ // Already verified name.
+
+ if (argc < 2) {
+ return EINVAL;
+ }
+ if (*argv[2] == '\0') {
+ return EINVAL;
+ }
+
+ char *endptr;
+ double result = strtod(argv[2], &endptr);
+ if (*endptr != '\0') {
+ return EINVAL;
+ }
+ if (result < type->min.d || result > type->max.d) {
+ return ERANGE;
+ }
+ *((double*)type->target) = result;
+ *extra_args_consumed = 1;
+ return 0;
+}
+
+// Common case (match_name).
+#define DECLARE_TYPE_DESCRIPTION(typename) \
+ struct type_description type_##typename = { \
+ .type_name = #typename, \
+ .matches = match_name, \
+ .parse = parse_##typename, \
+ .help = help_##typename \
+ }
+DECLARE_TYPE_DESCRIPTION(int32);
+DECLARE_TYPE_DESCRIPTION(uint32);
+DECLARE_TYPE_DESCRIPTION(int64);
+DECLARE_TYPE_DESCRIPTION(uint64);
+DECLARE_TYPE_DESCRIPTION(double);
+DECLARE_TYPE_DESCRIPTION(string);
+
+// Bools use their own match function so they are declared manually.
+struct type_description type_bool = {
+ .type_name = "bool",
+ .matches = match_bool,
+ .parse = parse_bool,
+ .help = help_bool
+};
+
+#define ARG_MATCHES(type, rest...) type->description->matches(type, rest)
+#define ARG_PARSE(type, rest...) type->description->parse(type, rest)
+#define ARG_HELP(type, rest...) type->description->help(type, rest)
+
+static inline void
+do_usage(const char *argv0, int n, struct arg_type types[/*n*/]) {
+ fprintf(stderr, "Usage:\n");
+ fprintf(stderr, "\t%s [-h|--help]\n", argv0);
+ fprintf(stderr, "\t%s [OPTIONS]\n", argv0);
+ fprintf(stderr, "\n");
+ fprintf(stderr, "OPTIONS are among:\n");
+ fprintf(stderr, "\t-q|--quiet\n");
+ fprintf(stderr, "\t-v|--verbose\n");
+ for (int i = 0; i < n; i++) {
+ struct arg_type *type = &types[i];
+ ARG_HELP(type, 35, 6);
+ }
+}
+
+static inline void parse_stress_test_args (int argc, char *const argv[], struct cli_args *args) {
+ struct cli_args default_args = *args;
+ const char *argv0=argv[0];
+
+#define MAKE_ARG(name_string, type, member, variable, suffix, min_val, max_val) { \
+ .name=(name_string), \
+ .description=&(type), \
+ .default_val={.member=default_args.variable}, \
+ .target=&(args->variable), \
+ .help_suffix=(suffix), \
+ .min={.member=min_val}, \
+ .max={.member=max_val}, \
+}
+#define MAKE_LOCAL_ARG(name_string, type, member, default, variable, suffix, min_val, max_val) { \
+ .name=(name_string), \
+ .description=&(type), \
+ .default_val={.member=default}, \
+ .target=&(variable), \
+ .help_suffix=(suffix), \
+ .min={.member=min_val}, \
+ .max={.member=max_val}, \
+}
+#define UINT32_ARG(name_string, variable, suffix) \
+ MAKE_ARG(name_string, type_uint32, u32, variable, suffix, 0, UINT32_MAX)
+#define UINT32_ARG_R(name_string, variable, suffix, min, max) \
+ MAKE_ARG(name_string, type_uint32, u32, variable, suffix, min, max)
+#define UINT64_ARG(name_string, variable, suffix) \
+ MAKE_ARG(name_string, type_uint64, u64, variable, suffix, 0, UINT64_MAX)
+#define INT32_ARG_NONNEG(name_string, variable, suffix) \
+ MAKE_ARG(name_string, type_int32, i32, variable, suffix, 0, INT32_MAX)
+#define INT32_ARG_R(name_string, variable, suffix, min, max) \
+ MAKE_ARG(name_string, type_int32, i32, variable, suffix, min, max)
+#define DOUBLE_ARG_R(name_string, variable, suffix, min, max) \
+ MAKE_ARG(name_string, type_double, d, variable, suffix, min, max)
+#define BOOL_ARG(name_string, variable) \
+ MAKE_ARG(name_string, type_bool, b, variable, "", false, false)
+#define STRING_ARG(name_string, variable) \
+ MAKE_ARG(name_string, type_string, s, variable, "", "", "")
+#define LOCAL_STRING_ARG(name_string, variable, default) \
+ MAKE_LOCAL_ARG(name_string, type_string, s, default, variable, "", "", "")
+
+ const char *perf_format_s = nullptr;
+ const char *compression_method_s = nullptr;
+ const char *print_engine_status_s = nullptr;
+ struct arg_type arg_types[] = {
+ INT32_ARG_NONNEG("--num_elements", num_elements, ""),
+ INT32_ARG_NONNEG("--num_DBs", num_DBs, ""),
+ INT32_ARG_NONNEG("--num_seconds", num_seconds, "s"),
+ INT32_ARG_NONNEG("--fanout", env_args.fanout, ""),
+ INT32_ARG_NONNEG("--node_size", env_args.node_size, " bytes"),
+ INT32_ARG_NONNEG("--basement_node_size", env_args.basement_node_size, " bytes"),
+ INT32_ARG_NONNEG("--rollback_node_size", env_args.rollback_node_size, " bytes"),
+ INT32_ARG_NONNEG("--checkpointing_period", env_args.checkpointing_period, "s"),
+ INT32_ARG_NONNEG("--cleaner_period", env_args.cleaner_period, "s"),
+ INT32_ARG_NONNEG("--cleaner_iterations", env_args.cleaner_iterations, ""),
+ INT32_ARG_NONNEG("--sync_period", env_args.sync_period, "ms"),
+ INT32_ARG_NONNEG("--update_broadcast_period", update_broadcast_period_ms, "ms"),
+ INT32_ARG_NONNEG("--num_ptquery_threads", num_ptquery_threads, " threads"),
+ INT32_ARG_NONNEG("--num_put_threads", num_put_threads, " threads"),
+ INT32_ARG_NONNEG("--num_update_threads", num_update_threads, " threads"),
+ INT32_ARG_NONNEG("--range_query_limit", range_query_limit, " rows"),
+
+ UINT32_ARG("--txn_size", txn_size, " rows"),
+ UINT32_ARG("--num_bucket_mutexes", env_args.num_bucket_mutexes, " mutexes"),
+
+ INT32_ARG_R("--join_timeout", join_timeout, "s", 1, INT32_MAX),
+ INT32_ARG_R("--performance_period", performance_period, "s", 1, INT32_MAX),
+
+ UINT64_ARG("--cachetable_size", env_args.cachetable_size, " bytes"),
+ UINT64_ARG("--lk_max_memory", env_args.lk_max_memory, " bytes"),
+
+ DOUBLE_ARG_R("--compressibility", compressibility, "", 0.0, 1.0),
+
+ //TODO: when outputting help.. skip min/max that is min/max of data range.
+ UINT32_ARG_R("--key_size", key_size, " bytes", min_key_size, UINT32_MAX),
+ UINT32_ARG_R("--val_size", val_size, " bytes", min_val_size, UINT32_MAX),
+
+ BOOL_ARG("serial_insert", serial_insert),
+ BOOL_ARG("interleave", interleave),
+ BOOL_ARG("crash_on_operation_failure", crash_on_operation_failure),
+ BOOL_ARG("single_txn", single_txn),
+ BOOL_ARG("warm_cache", warm_cache),
+ BOOL_ARG("print_performance", print_performance),
+ BOOL_ARG("print_thread_performance", print_thread_performance),
+ BOOL_ARG("print_iteration_performance", print_iteration_performance),
+ BOOL_ARG("only_create", only_create),
+ BOOL_ARG("only_stress", only_stress),
+ BOOL_ARG("test", do_test_and_crash),
+ BOOL_ARG("recover", do_recover),
+ BOOL_ARG("blackhole", blackhole),
+ BOOL_ARG("nolocktree", nolocktree),
+ BOOL_ARG("unique_checks", unique_checks),
+ BOOL_ARG("nolog", nolog),
+ BOOL_ARG("nocrashstatus", nocrashstatus),
+ BOOL_ARG("prelock_updates", prelock_updates),
+ BOOL_ARG("disperse_keys", disperse_keys),
+ BOOL_ARG("memcmp_keys", memcmp_keys),
+ BOOL_ARG("direct_io", direct_io),
+
+ STRING_ARG("--envdir", env_args.envdir),
+
+ LOCAL_STRING_ARG("--perf_format", perf_format_s, "human"),
+ LOCAL_STRING_ARG("--compression_method", compression_method_s, "quicklz"),
+ LOCAL_STRING_ARG("--print_engine_status", print_engine_status_s, nullptr),
+ //TODO(add --quiet, -v, -h)
+ };
+#undef UINT32_ARG
+#undef UINT32_ARG_R
+#undef UINT64_ARG
+#undef DOUBLE_ARG_R
+#undef BOOL_ARG
+#undef STRING_ARG
+#undef MAKE_ARG
+
+ int num_arg_types = sizeof(arg_types) / sizeof(arg_types[0]);
+
+ int resultcode = 0;
+ while (argc > 1) {
+ if (!strcmp(argv[1], "-v") || !strcmp(argv[1], "--verbose")) {
+ verbose++;
+ argv++;
+ argc--;
+ }
+ else if (!strcmp(argv[1], "-q") || !strcmp(argv[1], "--quiet")) {
+ verbose = 0;
+ argv++;
+ argc--;
+ }
+ else if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
+ fprintf(stderr, "HELP INVOKED\n");
+ do_usage(argv0, num_arg_types, arg_types);
+ exit(0);
+ }
+ else {
+ bool found = false;
+ for (int i = 0; i < num_arg_types; i++) {
+ struct arg_type *type = &arg_types[i];
+ if (ARG_MATCHES(type, argv)) {
+ int extra_args_consumed;
+ resultcode = ARG_PARSE(type, &extra_args_consumed, argc, argv);
+ if (resultcode) {
+ fprintf(stderr, "ERROR PARSING [%s]\n", argv[1]);
+ do_usage(argv0, num_arg_types, arg_types);
+ exit(resultcode);
+ }
+ found = true;
+ argv += extra_args_consumed + 1;
+ argc -= extra_args_consumed + 1;
+ break;
+ }
+ }
+ if (!found) {
+ fprintf(stderr, "COULD NOT PARSE [%s]\n", argv[1]);
+ do_usage(argv0, num_arg_types, arg_types);
+ exit(EINVAL);
+ }
+ }
+ }
+ args->print_engine_status = print_engine_status_s;
+ if (compression_method_s != nullptr) {
+ if (strcmp(compression_method_s, "quicklz") == 0) {
+ args->compression_method = TOKU_QUICKLZ_METHOD;
+ } else if (strcmp(compression_method_s, "zlib") == 0) {
+ args->compression_method = TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
+ } else if (strcmp(compression_method_s, "lzma") == 0) {
+ args->compression_method = TOKU_LZMA_METHOD;
+ } else if (strcmp(compression_method_s, "snappy") == 0) {
+ args->compression_method = TOKU_SNAPPY_METHOD;
+ } else if (strcmp(compression_method_s, "none") == 0) {
+ args->compression_method = TOKU_NO_COMPRESSION;
+ } else {
+ fprintf(stderr, "valid values for --compression_method are \"quicklz\", \"zlib\", \"lzma\", \"snappy\", and \"none\"\n");
+ do_usage(argv0, num_arg_types, arg_types);
+ exit(EINVAL);
+ }
+ }
+ if (perf_format_s != nullptr) {
+ if (!strcmp(perf_format_s, "human")) {
+ args->perf_output_format = HUMAN;
+ } else if (!strcmp(perf_format_s, "csv")) {
+ args->perf_output_format = CSV;
+ } else if (!strcmp(perf_format_s, "tsv")) {
+ args->perf_output_format = TSV;
+ } else {
+ fprintf(stderr, "valid values for --perf_format are \"human\", \"csv\", and \"tsv\"\n");
+ do_usage(argv0, num_arg_types, arg_types);
+ exit(EINVAL);
+ }
+ }
+ if (args->only_create && args->only_stress) {
+ fprintf(stderr, "used --only_stress and --only_create\n");
+ do_usage(argv0, num_arg_types, arg_types);
+ exit(EINVAL);
+ }
+}
+
+static void
+stress_table(DB_ENV *, DB **, struct cli_args *);
+
+static int
+stress_dbt_cmp_legacy(const DBT *a, const DBT *b) {
+ int x = *(int *) a->data;
+ int y = *(int *) b->data;
+ if (x < y) {
+ return -1;
+ } else if (x > y) {
+ return +1;
+ } else {
+ return 0;
+ }
+}
+
+static int
+stress_dbt_cmp(const DBT *a, const DBT *b) {
+ // Keys are only compared by their first 8 bytes,
+ // interpreted as a little endian 64 bit integers.
+ // The rest of the key is just padding.
+ uint64_t x = *(uint64_t *) a->data;
+ uint64_t y = *(uint64_t *) b->data;
+ if (x < y) {
+ return -1;
+ } else if (x > y) {
+ return +1;
+ } else {
+ return 0;
+ }
+}
+
+static int
+stress_cmp(DB *db, const DBT *a, const DBT *b) {
+ assert(db && a && b);
+ assert(a->size == b->size);
+
+ if (a->size == sizeof(int)) {
+ // Legacy comparison: keys must be >= 4 bytes
+ return stress_dbt_cmp_legacy(a, b);
+ } else {
+ // Modern comparison: keys must be >= 8 bytes
+ invariant(a->size >= sizeof(uint64_t));
+ return stress_dbt_cmp(a, b);
+ }
+}
+
+static void
+do_warm_cache(DB_ENV *env, DB **dbs, struct cli_args *args)
+{
+ struct scan_op_extra soe;
+ soe.fast = true;
+ soe.fwd = true;
+ soe.prefetch = true;
+ struct arg scan_arg;
+ arg_init(&scan_arg, dbs, env, args);
+ scan_arg.operation_extra = &soe;
+ scan_arg.operation = scan_op_no_check;
+ scan_arg.lock_type = STRESS_LOCK_NONE;
+ DB_TXN* txn = nullptr;
+ // don't take serializable read locks when scanning.
+ int r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT); CKERR(r);
+ // make sure the scan doesn't terminate early
+ run_test = true;
+ // warm up each DB in parallel
+ scan_op_no_check_parallel(txn, &scan_arg, &soe, nullptr);
+ r = txn->commit(txn,0); CKERR(r);
+}
+
+static void
+UU() stress_recover(struct cli_args *args) {
+ DB_ENV* env = nullptr;
+ DB* dbs[args->num_DBs];
+ memset(dbs, 0, sizeof(dbs));
+ { int chk_r = open_tables(&env,
+ dbs,
+ args->num_DBs,
+ stress_cmp,
+ args); CKERR(chk_r); }
+
+ DB_TXN* txn = nullptr;
+ struct arg recover_args;
+ arg_init(&recover_args, dbs, env, args);
+ int r = env->txn_begin(env, 0, &txn, recover_args.txn_flags);
+ CKERR(r);
+ struct scan_op_extra soe = {
+ .fast = true,
+ .fwd = true,
+ .prefetch = false
+ };
+ // make sure the scan doesn't terminate early
+ run_test = true;
+ r = scan_op(txn, &recover_args, &soe, nullptr);
+ CKERR(r);
+ { int chk_r = txn->commit(txn,0); CKERR(chk_r); }
+ { int chk_r = close_tables(env, dbs, args->num_DBs); CKERR(chk_r); }
+}
+
+static void
+open_and_stress_tables(struct cli_args *args, bool fill_with_zeroes, int (*cmp)(DB *, const DBT *, const DBT *))
+{
+ if ((args->key_size < 8 && args->key_size != 4) ||
+ (args->val_size < 8 && args->val_size != 4)) {
+ fprintf(stderr, "The only valid key/val sizes are 4, 8, and > 8.\n");
+ return;
+ }
+
+ setlocale(LC_NUMERIC, "en_US.UTF-8");
+ DB_ENV* env = nullptr;
+ DB* dbs[args->num_DBs];
+ memset(dbs, 0, sizeof(dbs));
+ db_env_enable_engine_status(args->nocrashstatus ? false : true);
+ db_env_set_direct_io(args->direct_io ? true : false);
+ if (!args->only_stress) {
+ create_tables(
+ &env,
+ dbs,
+ args->num_DBs,
+ cmp,
+ args
+ );
+ { int chk_r = fill_tables(env, dbs, args, fill_with_zeroes); CKERR(chk_r); }
+ { int chk_r = close_tables(env, dbs, args->num_DBs); CKERR(chk_r); }
+ }
+ if (!args->only_create) {
+ { int chk_r = open_tables(&env,
+ dbs,
+ args->num_DBs,
+ cmp,
+ args); CKERR(chk_r); }
+ if (args->warm_cache) {
+ do_warm_cache(env, dbs, args);
+ }
+ stress_table(env, dbs, args);
+ { int chk_r = close_tables(env, dbs, args->num_DBs); CKERR(chk_r); }
+ }
+}
+
+static void
+UU() stress_test_main(struct cli_args *args) {
+ // Begin the test with fixed size values equal to zero.
+ // This is important for correctness testing.
+ open_and_stress_tables(args, true, stress_cmp);
+}
+
+static void
+UU() perf_test_main(struct cli_args *args) {
+ // Do not begin the test by creating a table of all zeroes.
+ // We want to control the row size and its compressibility.
+ open_and_stress_tables(args, false, stress_cmp);
+}
+
+static void
+UU() perf_test_main_with_cmp(struct cli_args *args, int (*cmp)(DB *, const DBT *, const DBT *)) {
+ // Do not begin the test by creating a table of all zeroes.
+ // We want to control the row size and its compressibility.
+ open_and_stress_tables(args, false, cmp);
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/time_create_db.cc b/storage/tokudb/PerconaFT/src/tests/time_create_db.cc
new file mode 100644
index 00000000..f8f56e85
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/time_create_db.cc
@@ -0,0 +1,122 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <vector>
+#include <db.h>
+#include "toku_time.h"
+
+static void open_dbs(DB_ENV *env, int max_dbs) {
+ std::vector<DB *> dbs;
+
+ uint64_t t_start = toku_current_time_microsec();
+ // open db's
+ {
+ uint64_t t0 = toku_current_time_microsec();
+ for (int i = 1; i <= max_dbs; i++) {
+ int r;
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+ char db_name[32];
+ sprintf(db_name, "db%d", i);
+ r = db->open(db, NULL, db_name, NULL, DB_BTREE, DB_CREATE, 0666);
+ assert(r == 0);
+ dbs.push_back(db);
+ if ((i % 100) == 0) {
+ uint64_t t = toku_current_time_microsec();
+ fprintf(stderr, "open %d %" PRIu64 "\n", i, t - t0);
+ t0 = t;
+ }
+ }
+ }
+ uint64_t t_end = toku_current_time_microsec();
+ fprintf(stderr, "%" PRIu64 "\n", t_end - t_start);
+
+ // close db's
+ {
+ uint64_t t0 = toku_current_time_microsec();
+ int i = 1;
+ for (std::vector<DB *>::iterator dbi = dbs.begin(); dbi != dbs.end(); dbi++, i++) {
+ DB *db = *dbi;
+ int r = db->close(db, 0);
+ assert(r == 0);
+ if ((i % 100) == 0) {
+ uint64_t t = toku_current_time_microsec();
+ printf("close %d %" PRIu64 "\n", i, t - t0);
+ t0 = t;
+ }
+ }
+ }
+}
+
+int test_main (int argc, char * const argv[]) {
+ int r;
+ int max_dbs = 1;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(argv[i], "-q") == 0) {
+ if (verbose > 0) verbose--;
+ continue;
+ }
+ max_dbs = atoi(argv[i]);
+ continue;
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+ env->set_errfile(env, stderr);
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK+DB_INIT_MPOOL+DB_INIT_TXN+DB_INIT_LOG + DB_CREATE + DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ open_dbs(env, max_dbs);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/transactional_fileops.cc b/storage/tokudb/PerconaFT/src/tests/transactional_fileops.cc
new file mode 100644
index 00000000..858a1d70
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/transactional_fileops.cc
@@ -0,0 +1,468 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this test is to verify correct behavior of transactional file
+ * operations. File operations to be tested (and expected results) are:
+ * - open
+ * - create (dictionary is created only if transaction is committed)
+ * - rename (dictionary is renamed only if transaction is committed)
+ * - delete (dictionary is deleted only if transaction is committed)
+ *
+ * The following subtests are here:
+ *
+ * test_fileops_1:
+ * Verify that operations appear effective within a transaction,
+ * but are truly effective only if the transaction is committed.
+ *
+ * test_fileops_2:
+ * Verify that attempting to open, remove or rename a dictionary that
+ * is marked for removal or renaming by another transaction in
+ * progress results in a DB_LOCK_NOTGRANTED error code.
+ *
+ * test_fileops_3:
+ * Verify that the correct error codes are returned when attempting
+ * miscellaneous operations that should fail.
+ *
+ *
+ * Future work (possible enhancements to this test, if desired):
+ * - verify correct behavior with "subdb" names (e.g. foo/bar)
+ * - beyond verifying that a dictionary exists, open it and read one entry, verify that the entry is correct
+ * (especially useful for renamed dictionary)
+ * - perform repeatedly in multiple threads
+ *
+ */
+
+
+#include "test.h"
+#include <db.h>
+
+static DB_ENV *env;
+static FILE *error_file = NULL;
+
+static void
+setup (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ if (verbose==0) {
+ char errfname[TOKU_PATH_MAX+1];
+ error_file = fopen(toku_path_join(errfname, 2, TOKU_TEST_FILENAME, "stderr"), "w"); assert(error_file);
+ }
+ else error_file = stderr;
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_dir_per_db(env, true);
+ env->set_errfile(env, error_file ? error_file : stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+}
+
+
+
+static void
+test_shutdown(void) {
+ int r;
+ r=env->close(env, 0); CKERR(r);
+ if (verbose==0) {
+ fclose(error_file);
+ error_file = NULL;
+ }
+}
+
+
+// create dictionaries a.db, b.db, c.db
+static void
+create_abcd(void) {
+ int r;
+ DB_TXN * txn;
+ DB * db_a;
+ DB * db_b;
+ DB * db_c;
+ DB * db_d;
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db_create(&db_a, env, 0); CKERR(r);
+ r=db_create(&db_b, env, 0); CKERR(r);
+ r=db_create(&db_c, env, 0); CKERR(r);
+ r=db_create(&db_d, env, 0); CKERR(r);
+
+ r=db_a->open(db_a, txn, "a.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_b->open(db_b, txn, "b.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_c->open(db_c, txn, "c.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_d->open(db_d, txn, "d.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+
+ r=db_a->close(db_a, 0); CKERR(r);
+ r=db_b->close(db_b, 0); CKERR(r);
+ r=db_c->close(db_c, 0); CKERR(r);
+
+ r=txn->commit(txn, 0); CKERR(r);
+
+ r=db_d->close(db_d, 0); CKERR(r); //Should work whether close is before or after commit. Do one after.
+}
+
+
+
+// delete b
+// rename c to c2
+// create x
+static void
+perform_ops(DB_TXN * txn) {
+ int r;
+ DB * db_x;
+
+ r = env->dbremove(env, txn, "b.db", NULL, 0); CKERR(r);
+
+ r = env->dbrename(env, txn, "c.db", NULL, "c2.db", 0); CKERR(r);
+
+ r=db_create(&db_x, env, 0); CKERR(r);
+ r=db_x->open(db_x, txn, "x.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_x->close(db_x, 0); CKERR(r); // abort requires db be closed first
+}
+
+
+// verify that:
+// dictionaries a.db, b.db, c.db, d.db exist
+// dictionaries x.db and c2.db do not exist
+static void
+verify_abcd(void) {
+ int r;
+ DB_TXN * txn;
+ DB * db_a;
+ DB * db_b;
+ DB * db_c;
+ DB * db_d;
+ DB * db_x;
+ DB * db_c2;
+
+ r=env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ r=db_create(&db_a, env, 0); CKERR(r);
+ r=db_create(&db_b, env, 0); CKERR(r);
+ r=db_create(&db_c, env, 0); CKERR(r);
+ r=db_create(&db_d, env, 0); CKERR(r);
+ r=db_create(&db_x, env, 0); CKERR(r);
+ r=db_create(&db_c2, env, 0); CKERR(r);
+
+ // should exist:
+ r=db_a->open(db_a, txn, "a.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_b->open(db_b, txn, "b.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_c->open(db_c, txn, "c.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_d->open(db_d, txn, "d.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+
+ // should not exist:
+ r=db_x->open(db_x, txn, "x.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR2(r, ENOENT);
+ r=db_c2->open(db_c2, txn, "c2.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR2(r, ENOENT);
+
+ r=db_a->close(db_a, 0); CKERR(r);
+ r=db_b->close(db_b, 0); CKERR(r);
+ r=db_c->close(db_c, 0); CKERR(r);
+ r=db_d->close(db_d, 0); CKERR(r);
+ r=db_x->close(db_x, 0); CKERR(r);
+ r=db_c2->close(db_c2, 0); CKERR(r);
+
+ r=txn->commit(txn, 0); CKERR(r);
+}
+
+
+// verify that:
+// dictionary a.db exists
+// dictionaries b.db, c.db do not exist
+// dictionary c2.db exists
+// dictionary d.db exists
+// dictionary x.db exists
+static void
+verify_ac2dx(DB_TXN * parent_txn) {
+ int r;
+ DB_TXN * txn;
+ DB * db_a;
+ DB * db_b;
+ DB * db_c;
+ DB * db_d;
+ DB * db_x;
+ DB * db_c2;
+
+ r=env->txn_begin(env, parent_txn, &txn, 0); CKERR(r);
+ r=db_create(&db_a, env, 0); CKERR(r);
+ r=db_create(&db_b, env, 0); CKERR(r);
+ r=db_create(&db_c, env, 0); CKERR(r);
+ r=db_create(&db_d, env, 0); CKERR(r);
+ r=db_create(&db_x, env, 0); CKERR(r);
+ r=db_create(&db_c2, env, 0); CKERR(r);
+
+ // should exist:
+ r=db_a->open(db_a, txn, "a.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_c2->open(db_c2, txn, "c2.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_d->open(db_d, txn, "d.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_x->open(db_x, txn, "x.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+
+ // should not exist:
+ r=db_b->open(db_b, txn, "b.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR2(r, ENOENT);
+ r=db_c->open(db_c, txn, "c.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR2(r, ENOENT);
+
+ r=db_a->close(db_a, 0); CKERR(r);
+ r=db_b->close(db_b, 0); CKERR(r);
+ r=db_c->close(db_c, 0); CKERR(r);
+ r=db_d->close(db_d, 0); CKERR(r);
+ r=db_x->close(db_x, 0); CKERR(r);
+ r=db_c2->close(db_c2, 0); CKERR(r);
+
+ r=txn->commit(txn, 0); CKERR(r);
+}
+
+
+static void
+test_fileops_1(void) {
+ int r;
+ DB_TXN *txn;
+
+ create_abcd();
+ verify_abcd();
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ perform_ops(txn);
+ verify_ac2dx(txn); // verify that operations appear effective within this txn
+ r=txn->abort(txn); CKERR(r);
+
+ // verify that aborted transaction changed nothing
+ verify_abcd();
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ perform_ops(txn);
+ verify_ac2dx(txn); // verify that operations appear effective within this txn
+ r=txn->commit(txn, 0); CKERR(r);
+
+ // verify that committed transaction actually changed db
+ verify_ac2dx(NULL);
+}
+
+
+
+static void
+verify_locked_open(const char * name) {
+ int r;
+ DB_TXN * txn;
+ DB * db;
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, txn, name, 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r=db->close(db, 0); CKERR(r); // always safe to close
+ r=txn->abort(txn); CKERR(r);
+}
+
+static void
+verify_locked_remove(const char * name) {
+ int r;
+ DB_TXN * txn;
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = env->dbremove(env, txn, name, NULL, 0);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r=txn->abort(txn); CKERR(r);
+}
+
+static void
+verify_locked_rename(const char * oldname, const char * newname) {
+ int r;
+ DB_TXN * txn;
+
+ r=env->txn_begin(env, 0, &txn, 0); CKERR(r);
+ r = env->dbrename(env, txn, oldname, NULL, newname, 0);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r=txn->abort(txn); CKERR(r);
+}
+
+
+// Purpose of test_fileops_2() is to verify correct operation of
+// directory range locks. It should not be possible to open or
+// rename or remove a dictionary that is marked for removal or
+// rename by another open transaction.
+static void
+test_fileops_2(void) {
+ int r;
+ DB_TXN * txn_a;
+
+ verify_ac2dx(NULL); // should still exist
+
+ // begin txn_a
+ // remove a
+ // create e
+ // rename x->x2
+ // rename c2->c3
+ // open x2, c3, should succeed
+ // close x2, c3
+ {
+ DB * db_e;
+ DB * db_c3;
+ DB * db_x2;
+
+ r=env->txn_begin(env, 0, &txn_a, 0); CKERR(r);
+ r=db_create(&db_e, env, 0); CKERR(r);
+ r=db_create(&db_x2, env, 0); CKERR(r);
+ r=db_create(&db_c3, env, 0); CKERR(r);
+
+ r = env->dbremove(env, txn_a, "a.db", NULL, 0); CKERR(r);
+ r=db_e->open(db_e, txn_a, "e.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r = env->dbrename(env, txn_a, "x.db", NULL, "x2.db", 0); CKERR(r);
+ r = env->dbrename(env, txn_a, "c2.db", NULL, "c3.db", 0); CKERR(r);
+
+ r=db_x2->open(db_x2, txn_a, "x2.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db_c3->open(db_c3, txn_a, "c3.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+
+ r=db_e->close(db_e, 0); CKERR(r); // abort requires db be closed first
+ r=db_x2->close(db_x2, 0); CKERR(r); // abort requires db be closed first
+ r=db_c3->close(db_c3, 0); CKERR(r); // abort requires db be closed first
+
+ }
+
+ // within another transaction:
+ // open a, should fail DB_LOCK_NOTGRANTED
+ // open e, should fail DB_LOCK_NOTGRANTED
+ // open x, should fail DB_LOCK_NOTGRANTED
+ // open x2, should fail DB_LOCK_NOTGRANTED
+ // open c2, should fail DB_LOCK_NOTGRANTED
+ // open c3, should fail DB_LOCK_NOTGRANTED
+ // remove a, e, x, x2, c2, c3 DB_LOCK_NOTGRANTED
+ // rename a, e, x, x2, c2, c3 DB_LOCK_NOTGRANTED
+
+ verify_locked_open("a.db");
+ verify_locked_open("e.db");
+ verify_locked_open("x.db");
+ verify_locked_open("x2.db");
+ verify_locked_open("c2.db");
+ verify_locked_open("c3.db");
+
+ verify_locked_remove("a.db");
+ verify_locked_remove("e.db");
+ verify_locked_remove("x.db");
+ verify_locked_remove("x2.db");
+ verify_locked_remove("c2.db");
+ verify_locked_remove("c3.db");
+
+ verify_locked_rename("a.db", "z.db");
+ verify_locked_rename("e.db", "z.db");
+ verify_locked_rename("x.db", "z.db");
+ verify_locked_rename("x2.db", "z.db");
+ verify_locked_rename("c2.db", "z.db");
+ verify_locked_rename("c3.db", "z.db");
+
+ verify_locked_rename("d.db", "a.db");
+ verify_locked_rename("d.db", "e.db");
+ verify_locked_rename("d.db", "x.db");
+ verify_locked_rename("d.db", "x2.db");
+ verify_locked_rename("d.db", "c2.db");
+ verify_locked_rename("d.db", "c3.db");
+
+
+ r=txn_a->abort(txn_a); CKERR(r);
+
+}
+
+
+static void
+test_fileops_3(void) {
+ // verify cannot remove an open db
+
+ int r;
+ DB_TXN * txn_a;
+ DB_TXN * txn_b;
+ DB * db_d;
+
+ r=env->txn_begin(env, 0, &txn_a, 0); CKERR(r);
+ r=db_create(&db_d, env, 0); CKERR(r);
+ r=db_d->open(db_d, txn_a, "d.db", 0, DB_BTREE, 0, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+
+ // Verify correct error return codes when trying to
+ // remove or rename an open dictionary
+ r=env->txn_begin(env, 0, &txn_b, 0); CKERR(r);
+ r = env->dbremove(env, txn_b, "d.db", NULL, 0);
+ CKERR2(r, EINVAL);
+ r = env->dbrename(env, txn_b, "d.db", NULL, "z.db", 0);
+ CKERR2(r, EINVAL);
+ r = env->dbrename(env, txn_b, "a.db", NULL, "d.db", 0);
+ CKERR2(r, EINVAL);
+ r=db_d->close(db_d, 0); CKERR(r);
+ r=txn_b->abort(txn_b); CKERR(r);
+
+
+ // verify correct error return codes when trying to
+ // remove or rename a non-existent dictionary
+ r = env->dbremove(env, txn_a, "nonexistent.db", NULL, 0);
+ CKERR2(r, ENOENT);
+ r = env->dbrename(env, txn_a, "nonexistent.db", NULL, "z.db", 0);
+ CKERR2(r, ENOENT);
+
+ // verify correct error return code when trying to
+ // rename a dictionary to a name that already exists
+ r = env->dbrename(env, txn_a, "a.db", NULL, "d.db", 0);
+ CKERR2(r, EEXIST);
+
+ // verify correct error return code when trying to
+ // rename a dictionary to a name that is beyond the limit
+ // of the operating system.
+ char longname[FILENAME_MAX+11];
+ memset(longname, 'b', FILENAME_MAX+7);
+ memcpy(longname+FILENAME_MAX+7, ".db", 4);
+ r = env->dbrename(env, txn_a, "a.db", NULL, longname, 0);
+ CKERR2(r, ENAMETOOLONG);
+ r=txn_a->abort(txn_a); CKERR(r);
+}
+
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ if (verbose >= 2) {
+ printf("Immediately after setup:\n");
+ print_engine_status(env);
+ }
+ test_fileops_1();
+ if (verbose >= 2) {
+ printf("After test_1:\n");
+ print_engine_status(env);
+ }
+ test_fileops_2();
+ test_fileops_3();
+ if (verbose >= 2) {
+ printf("After test_2 and test_3:\n");
+ print_engine_status(env);
+ }
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc b/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
new file mode 100644
index 00000000..469e78f4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/txn_manager_handle_snapshot_atomicity.cc
@@ -0,0 +1,217 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+//In response to the read-commit crash bug in the sysbench, this test is created to test
+//the atomicity of the txn manager when handling the child txn snapshot.
+//The test is supposed to fail before the read-commit-fix.
+
+#include "test.h"
+#include "toku_pthread.h"
+#include "ydb.h"
+struct test_sync {
+ int state;
+ toku_mutex_t lock;
+ toku_cond_t cv;
+};
+
+static void test_sync_init(struct test_sync *UU(sync)) {
+#if TOKU_DEBUG_TXN_SYNC
+ sync->state = 0;
+ toku_mutex_init(toku_uninstrumented, &sync->lock, nullptr);
+ toku_cond_init(toku_uninstrumented, &sync->cv, nullptr);
+#endif
+}
+
+static void test_sync_destroy(struct test_sync *UU(sync)) {
+#if TOKU_DEBUG_TXN_SYNC
+ toku_mutex_destroy(&sync->lock);
+ toku_cond_destroy(&sync->cv);
+#endif
+}
+
+static void test_sync_sleep(struct test_sync *UU(sync), int UU(new_state)) {
+#if TOKU_DEBUG_TXN_SYNC
+ toku_mutex_lock(&sync->lock);
+ while (sync->state != new_state) {
+ toku_cond_wait(&sync->cv, &sync->lock);
+ }
+ toku_mutex_unlock(&sync->lock);
+#endif
+}
+
+static void test_sync_next_state(struct test_sync *UU(sync)) {
+#if TOKU_DEBUG_TXN_SYNC
+ toku_mutex_lock(&sync->lock);
+ sync->state++;
+ toku_cond_broadcast(&sync->cv);
+ toku_mutex_unlock(&sync->lock);
+#endif
+}
+
+
+struct start_txn_arg {
+ DB_ENV *env;
+ DB *db;
+ DB_TXN * parent;
+};
+
+static struct test_sync sync_s;
+
+static void test_callback(pthread_t self_tid, void * extra) {
+ pthread_t **p = (pthread_t **) extra;
+ pthread_t tid_1 = *p[0];
+ pthread_t tid_2 = *p[1];
+ assert(pthread_equal(self_tid, tid_2));
+ printf("%s: the thread[%" PRIu64 "] is going to wait...\n", __func__, reinterpret_cast<uint64_t>(tid_1));
+ test_sync_next_state(&sync_s);
+ sleep(3);
+ //test_sync_sleep(&sync_s,3);
+ //using test_sync_sleep/test_sync_next_state pair can sync threads better, however
+ //after the fix, this might cause a deadlock. just simply use sleep to do a proof-
+ //of-concept test.
+ printf("%s: the thread[%" PRIu64 "] is resuming...\n", __func__, reinterpret_cast<uint64_t>(tid_1));
+ return;
+}
+
+static void * start_txn2(void * extra) {
+ struct start_txn_arg * args = (struct start_txn_arg *) extra;
+ DB_ENV * env = args -> env;
+ DB * db = args->db;
+ DB_TXN * parent = args->parent;
+ test_sync_sleep(&sync_s, 1);
+ printf("start %s [thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ DB_TXN *txn;
+ int r = env->txn_begin(env, parent, &txn, DB_READ_COMMITTED);
+ assert(r == 0);
+ //do some random things...
+ DBT key, data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, "world", 6);
+ db->put(db, txn, &key, &data, 0);
+ db->get(db, txn, &key, &data, 0);
+
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+ printf("%s done[thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ return extra;
+}
+
+static void * start_txn1(void * extra) {
+ struct start_txn_arg * args = (struct start_txn_arg *) extra;
+ DB_ENV * env = args -> env;
+ DB * db = args->db;
+ printf("start %s: [thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ DB_TXN *txn;
+ int r = env->txn_begin(env, NULL, &txn, DB_READ_COMMITTED);
+ assert(r == 0);
+ printf("%s: txn began by [thread %" PRIu64 "], will wait\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ test_sync_next_state(&sync_s);
+ test_sync_sleep(&sync_s,2);
+ printf("%s: [thread %" PRIu64 "] resumed\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ //do some random things...
+ DBT key, data;
+ dbt_init(&key, "hello", 6);
+ dbt_init(&data, "world", 6);
+ db->put(db, txn, &key, &data, 0);
+ db->get(db, txn, &key, &data, 0);
+ r = txn->commit(txn, 0);
+ assert(r == 0);
+ printf("%s: done[thread %" PRIu64 "]\n", __func__, reinterpret_cast<uint64_t>(pthread_self()));
+ //test_sync_next_state(&sync_s);
+ return extra;
+}
+
+int test_main (int UU(argc), char * const UU(argv[])) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ assert(r == 0);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB *db = NULL;
+ r = db_create(&db, env, 0);
+ assert(r == 0);
+
+ r = db->open(db, NULL, "testit", NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ assert(r == 0);
+
+ DB_TXN * parent = NULL;
+ r = env->txn_begin(env, 0, &parent, DB_READ_COMMITTED);
+ assert(r == 0);
+
+ ZERO_STRUCT(sync_s);
+ test_sync_init(&sync_s);
+
+ pthread_t tid_1 = 0;
+ pthread_t tid_2 = 0;
+ pthread_t* callback_extra[2] = {&tid_1, &tid_2};
+ toku_set_test_txn_sync_callback(test_callback, callback_extra);
+
+ struct start_txn_arg args = {env, db, parent};
+
+ r = pthread_create(&tid_1, NULL, start_txn1, &args);
+ assert(r==0);
+
+ r= pthread_create(&tid_2, NULL, start_txn2, &args);
+ assert(r==0);
+
+ void * ret;
+ r = pthread_join(tid_1, &ret);
+ assert(r == 0);
+ r = pthread_join(tid_2, &ret);
+ assert(r == 0);
+
+ r = parent->commit(parent, 0);
+ assert(r ==0);
+
+ test_sync_destroy(&sync_s);
+ r = db->close(db, 0);
+ assert(r == 0);
+
+ r = env->close(env, 0);
+ assert(r == 0);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/update-multiple-data-diagonal.cc b/storage/tokudb/PerconaFT/src/tests/update-multiple-data-diagonal.cc
new file mode 100644
index 00000000..632942df
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/update-multiple-data-diagonal.cc
@@ -0,0 +1,343 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that update_multiple where we change the data in row[i] col[j] from x to x+1
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(2*(i + dbnum));
+}
+
+static int
+get_new_key(int i, int dbnum) {
+ return htonl(2*(i + dbnum) + 1);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static void
+get_new_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if ((i % ndbs) == dbnum)
+ v[dbnum] = get_new_key(i, dbnum);
+ else
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = NULL;
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ dest_val = &dest_val_arrays->dbts[0];
+ }
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_key = (int *) src_key->data;
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = dbnum == 0 ? &pri_key[dbnum] : &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, dbnum == 0 ? &pri_key[dbnum] : &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else
+ dest_val->size = 0;
+ break;
+ case DB_DBT_REALLOC:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = toku_realloc(dest_val->data, dest_val->size);
+ memcpy(dest_val->data, src_val->data, dest_val->size);
+ } else
+ dest_val->size = 0;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+#if 0
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+#endif
+
+static void
+verify_seq(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ int expectk;
+ if (dbnum == 0 || (i % ndbs) != dbnum)
+ expectk = get_key(i, dbnum);
+ else
+ expectk = get_new_key(i, dbnum);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == expectk);
+
+ if (dbnum == 0) {
+ assert(val.size == ndbs * sizeof (int));
+ int v[ndbs]; get_new_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ } else
+ assert(val.size == 0);
+ }
+ assert(i == nrows); // if (i != nrows) printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, i, nrows); // assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+update_diagonal(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ assert(ndbs > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+
+ // update the data i % ndbs col from x to x+1
+
+ int k = get_key(i, 0);
+ DBT old_key; dbt_init(&old_key, &k, sizeof k);
+ DBT new_key = old_key;
+
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+
+ int newv[ndbs]; get_new_data(newv, i, ndbs);
+ DBT new_data; dbt_init(&new_data, &newv[0], sizeof newv);
+
+ int ndbts = 2 * ndbs;
+ DBT keys[ndbts]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbts]; memset(vals, 0, sizeof vals);
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env_update_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, ndbts, keys, ndbts, vals);
+ assert_zero(r);
+ }
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ update_diagonal(env, db, ndbs, nrows);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_seq(env, db[dbnum], dbnum, ndbs, nrows);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/update-multiple-key0.cc b/storage/tokudb/PerconaFT/src/tests/update-multiple-key0.cc
new file mode 100644
index 00000000..b9ae33b7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/update-multiple-key0.cc
@@ -0,0 +1,327 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that update_multiple where we only change key0
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = NULL;
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ dest_val = &dest_val_arrays->dbts[0];
+ }
+
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_key = (int *) src_key->data;
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = dbnum == 0 ? &pri_key[dbnum] : &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, dbnum == 0 ? &pri_key[dbnum] : &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else
+ dest_val->size = 0;
+ break;
+ case DB_DBT_REALLOC:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = toku_realloc(dest_val->data, dest_val->size);
+ memcpy(dest_val->data, src_val->data, dest_val->size);
+ } else
+ dest_val->size = 0;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+#if 0
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+#endif
+
+static void
+verify_seq(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ int expectk = dbnum == 0 ? get_key(i + nrows, dbnum) : get_key(i, dbnum);
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == expectk);
+
+ if (dbnum == 0) {
+ assert(val.size == ndbs * sizeof (int));
+ int v[ndbs]; get_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ } else
+ assert(val.size == 0);
+ }
+ assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+update_key0(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ assert(ndbs > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+
+ // update where new key0 = old key0 + nrows
+
+ int k = get_key(i, 0);
+ DBT old_key; dbt_init(&old_key, &k, sizeof k);
+ int newk = get_key(i + nrows, 0);
+ DBT new_key; dbt_init(&new_key, &newk, sizeof newk);
+
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+ DBT new_data = old_data;
+
+ int ndbts = 2 * ndbs;
+ DBT keys[ndbts]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbts]; memset(vals, 0, sizeof vals);
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env_update_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, ndbts, keys, ndbts, vals);
+ assert_zero(r);
+
+ verify_locked(env, db[0], k);
+ verify_locked(env, db[0], newk);
+ }
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ update_key0(env, db, ndbs, nrows);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_seq(env, db[dbnum], dbnum, ndbs, nrows);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/update-multiple-nochange.cc b/storage/tokudb/PerconaFT/src/tests/update-multiple-nochange.cc
new file mode 100644
index 00000000..5bfac519
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/update-multiple-nochange.cc
@@ -0,0 +1,319 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that update_multiple where new row = old row
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(i + dbnum);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = NULL;
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ dest_val = &dest_val_arrays->dbts[0];
+ }
+
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else
+ dest_val->size = 0;
+ break;
+ case DB_DBT_REALLOC:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = toku_realloc(dest_val->data, dest_val->size);
+ memcpy(dest_val->data, src_val->data, dest_val->size);
+ } else
+ dest_val->size = 0;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+#if 0
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+#endif
+
+static void
+verify_seq(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == get_key(i, dbnum));
+
+ if (dbnum == 0) {
+ assert(val.size == ndbs * sizeof (int));
+ int v[ndbs]; get_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ } else
+ assert(val.size == 0);
+ }
+ assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+
+ // update where new row = old row
+
+ int k = get_key(i, 0);
+ DBT old_key; dbt_init(&old_key, &k, sizeof k);
+ DBT new_key = old_key;
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+ DBT new_data = old_data;
+
+ int ndbts = 2 * ndbs;
+ DBT keys[ndbts]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbts]; memset(vals, 0, sizeof vals);
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env_update_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, ndbts, keys, ndbts, vals);
+ assert_zero(r);
+ }
+ r = txn->commit(txn, 0); assert_zero(r);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_seq(env, db[dbnum], dbnum, ndbs, nrows);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ verify(env, db, ndbs, nrows);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer-array.cc b/storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer-array.cc
new file mode 100644
index 00000000..c79bca04
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer-array.cc
@@ -0,0 +1,459 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that update_multiple where we change the data in row[i] col[j] from x to x+1
+
+static const int MAX_KEYS = 3;
+
+static int
+array_size(int ndbs) {
+ return +
+ 1 + // 0 for old 1 for new
+ 1 + // ndbs
+ 2 * MAX_KEYS * (ndbs-1);
+}
+static int
+get_num_new_keys(int i, int dbnum) {
+ if (dbnum == 0) return 1;
+ if (i & (1<<4)) {
+ dbnum++; // Shift every once in a while.
+ }
+ return (i + dbnum) % MAX_KEYS; // 0, 1, or 2
+}
+
+static int
+get_old_num_keys(int i, int dbnum) {
+ if (dbnum == 0) return 1;
+ return (i + dbnum) % MAX_KEYS; // 0, 1, or 2
+}
+
+static int
+get_total_secondary_rows(int num_primary) {
+ assert(num_primary % MAX_KEYS == 0);
+ return num_primary / MAX_KEYS * (0 + 1 + 2);
+}
+
+static int
+get_old_key(int i, int dbnum, int which) {
+ assert(i < INT16_MAX / 2);
+ assert(which >= 0);
+ assert(which < 4);
+ assert(dbnum < 16);
+ if (dbnum == 0) {
+ assert(which == 0);
+ return htonl(2*i);
+ }
+ if (which >= get_old_num_keys(i, dbnum)) {
+ return htonl(-1);
+ }
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1));
+}
+
+static int
+get_new_key(int i, int dbnum, int which) {
+ assert(which >= 0);
+ assert(which < 4);
+ assert(dbnum < 16);
+
+ if (dbnum == 0) {
+ assert(which == 0);
+ return htonl(2*i);
+ }
+ if (which >= get_num_new_keys(i, dbnum)) {
+ return htonl(-1);
+ }
+ if ((i+dbnum+which) & (1<<5)) {
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1)); // no change from original
+ }
+ return htonl(((2*i+0) << 16) + (dbnum<<8) + (which<<1) + 1);
+}
+
+static void
+fill_data_2_and_later(int *v, int i, int ndbs) {
+ int index = 2;
+ for (int dbnum = 1; dbnum < ndbs; dbnum++) {
+ for (int which = 0; which < MAX_KEYS; ++which) {
+ v[index++] = get_old_key(i, dbnum, which);
+ }
+ }
+ for (int dbnum = 1; dbnum < ndbs; dbnum++) {
+ for (int which = 0; which < MAX_KEYS; ++which) {
+ v[index++] = get_new_key(i, dbnum, which);
+ }
+ }
+}
+
+
+static void
+fill_old_data(int *v, int i, int ndbs) {
+ v[0] = 0;
+ v[1] = ndbs;
+ fill_data_2_and_later(v, i, ndbs);
+}
+
+static void
+fill_new_data(int *v, int i, int ndbs) {
+ v[0] = 1;
+ v[1] = ndbs;
+ fill_data_2_and_later(v, i, ndbs);
+}
+
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ (void)src_val;
+ assert(src_db != dest_db);
+ assert(src_db);
+ int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum > 0);
+
+ int pri_key = *(int *) src_key->data;
+ int* pri_val = (int*) src_val->data;
+
+ bool is_new = pri_val[0] == 1;
+ int i = (ntohl(pri_key)) / 2;
+
+ int num_keys = is_new ? get_num_new_keys(i, dbnum) : get_old_num_keys(i, dbnum);
+
+ toku_dbt_array_resize(dest_key_arrays, num_keys);
+
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, num_keys);
+ }
+
+ int ndbs = pri_val[1];
+ int index = 2 + (dbnum-1)*MAX_KEYS;
+ if (is_new) {
+ index += MAX_KEYS*(ndbs-1);
+ }
+
+ assert(src_val->size % sizeof(int) == 0);
+ assert((int)src_val->size / 4 >= index + num_keys);
+
+
+ for (int which = 0; which < num_keys; which++) {
+ DBT *dest_key = &dest_key_arrays->dbts[which];
+ DBT *dest_val = NULL;
+
+ assert(dest_key->flags == DB_DBT_REALLOC);
+ if (dest_key->ulen < sizeof(int)) {
+ dest_key->data = toku_xrealloc(dest_key->data, sizeof(int));
+ dest_key->ulen = sizeof(int);
+ }
+ dest_key->size = sizeof(int);
+ if (dest_val_arrays) {
+ dest_val = &dest_val_arrays->dbts[which];
+ assert(dest_val->flags == DB_DBT_REALLOC);
+ dest_val->size = 0;
+ }
+ int new_key = is_new ? get_new_key(i, dbnum, which) : get_old_key(i, dbnum, which);
+ assert(new_key == pri_val[index + which]);
+ *(int*)dest_key->data = new_key;
+ }
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+static void
+do_updates(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ assert(ndbs > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ int narrays = 2 * ndbs;
+ DBT_ARRAY keys[narrays];
+ DBT_ARRAY vals[narrays];
+ for (int i = 0; i < narrays; i++) {
+ toku_dbt_array_init(&keys[i], 1);
+ toku_dbt_array_init(&vals[i], 1);
+ }
+
+ for (int i = 0; i < nrows; i++) {
+
+ // update the data i % ndbs col from x to x+1
+
+ int old_k = get_old_key(i, 0, 0);
+ DBT old_key; dbt_init(&old_key, &old_k, sizeof old_k);
+ int new_k = get_new_key(i, 0, 0);
+ DBT new_key; dbt_init(&new_key, &new_k, sizeof new_k);
+
+ int v[array_size(ndbs)]; fill_old_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+
+ int newv[array_size(ndbs)]; fill_new_data(newv, i, ndbs);
+ DBT new_data; dbt_init(&new_data, &newv[0], sizeof newv);
+
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env->update_multiple(env, db[0], txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, narrays, keys, narrays, vals);
+ assert_zero(r);
+ }
+ for (int i = 0; i < narrays; i++) {
+ toku_dbt_array_destroy(&keys[i]);
+ toku_dbt_array_destroy(&vals[i]);
+ }
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_old_key(i, 0, 0);
+ int v[array_size(ndbs)];
+ fill_old_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ for (int which = 0; which < MAX_KEYS; which++) {
+ int k = get_old_key(i, dbnum, which);
+ if (k >= 0) {
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+ }
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_pri_seq(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ const int dbnum = 0;
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ int expectk = get_new_key(i, dbnum, 0);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == expectk);
+
+ int num_keys = array_size(ndbs);
+ assert(val.size == num_keys*sizeof(int));
+ int v[num_keys]; fill_new_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ }
+ assert(i == nrows); // if (i != nrows) printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, i, nrows); // assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+verify_sec_seq(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ assert(dbnum > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ int rows_found = 0;
+
+ for (i = 0; ; i++) {
+ int num_keys = get_num_new_keys(i, dbnum);
+ for (int which = 0; which < num_keys; ++which) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0) {
+ CKERR2(r, DB_NOTFOUND);
+ goto done;
+ }
+ rows_found++;
+ int k;
+ int expectk = get_new_key(i, dbnum, which);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ int got_i = (ntohl(k) >> 16) / 2;
+ if (got_i < i) {
+ // Will fail. Too many old i's
+ assert(k == expectk);
+ } else if (got_i > i) {
+ // Will fail. Too few in previous i.
+ assert(k == expectk);
+ }
+
+ if (k != expectk && which < get_old_num_keys(i, dbnum) && k == get_old_key(i, dbnum, which)) {
+ // Will fail, never got updated.
+ assert(k == expectk);
+ }
+ assert(k == expectk);
+ assert(val.size == 0);
+ }
+ }
+done:
+ assert(rows_found == get_total_secondary_rows(nrows));
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ populate_primary(env, db[0], ndbs, nrows);
+ for (int dbnum = 1; dbnum < ndbs-1; dbnum++) {
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ DB_TXN *indexer_txn = NULL;
+ r = env->txn_begin(env, NULL, &indexer_txn, 0); assert_zero(r);
+
+ DB_INDEXER *indexer = NULL;
+ uint32_t db_flags = 0;
+ assert(ndbs > 2);
+ r = env->create_indexer(env, indexer_txn, &indexer, db[0], 1, &db[ndbs-1], &db_flags, 0); assert_zero(r);
+
+ do_updates(env, db, ndbs, nrows);
+
+ r = indexer->build(indexer); assert_zero(r);
+ r = indexer->close(indexer); assert_zero(r);
+
+ r = indexer_txn->commit(indexer_txn, 0); assert_zero(r);
+
+ verify_pri_seq(env, db[0], ndbs, nrows);
+ for (int dbnum = 1; dbnum < ndbs; dbnum++)
+ verify_sec_seq(env, db[dbnum], dbnum, nrows);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 10;
+ int nrows = MAX_KEYS*(1<<5)*4;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+ while (nrows % (MAX_KEYS*(1<<5)) != 0) {
+ nrows++;
+ }
+ //Need at least one to update, and one to index
+ while (ndbs < 3) {
+ ndbs++;
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer.cc b/storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer.cc
new file mode 100644
index 00000000..b1a95a1e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/update-multiple-with-indexer.cc
@@ -0,0 +1,358 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// verify that update_multiple where we change the data in row[i] col[j] from x to x+1
+
+static int
+get_key(int i, int dbnum) {
+ return htonl(2*(i + dbnum));
+}
+
+static int
+get_new_key(int i, int dbnum) {
+ return htonl(2*(i + dbnum) + 1);
+}
+
+static void
+get_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static void
+get_new_data(int *v, int i, int ndbs) {
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ if ((i % ndbs) == dbnum)
+ v[dbnum] = get_new_key(i, dbnum);
+ else
+ v[dbnum] = get_key(i, dbnum);
+ }
+}
+
+static int
+put_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, DBT_ARRAY *dest_val_arrays, const DBT *src_key, const DBT *src_val) {
+ toku_dbt_array_resize(dest_key_arrays, 1);
+ DBT *dest_key = &dest_key_arrays->dbts[0];
+ DBT *dest_val = NULL;
+ if (dest_val_arrays) {
+ toku_dbt_array_resize(dest_val_arrays, 1);
+ dest_val = &dest_val_arrays->dbts[0];
+ }
+
+ (void) dest_db; (void) src_db; (void) dest_key; (void) dest_val; (void) src_key; (void) src_val;
+
+ unsigned int dbnum;
+ assert(dest_db->descriptor->dbt.size == sizeof dbnum);
+ memcpy(&dbnum, dest_db->descriptor->dbt.data, sizeof dbnum);
+ assert(dbnum < src_val->size / sizeof (int));
+
+ int *pri_key = (int *) src_key->data;
+ int *pri_data = (int *) src_val->data;
+
+ switch (dest_key->flags) {
+ case 0:
+ dest_key->size = sizeof (int);
+ dest_key->data = dbnum == 0 ? &pri_key[dbnum] : &pri_data[dbnum];
+ break;
+ case DB_DBT_REALLOC:
+ dest_key->size = sizeof (int);
+ dest_key->data = toku_realloc(dest_key->data, dest_key->size);
+ memcpy(dest_key->data, dbnum == 0 ? &pri_key[dbnum] : &pri_data[dbnum], dest_key->size);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (dest_val) {
+ switch (dest_val->flags) {
+ case 0:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = src_val->data;
+ } else
+ dest_val->size = 0;
+ break;
+ case DB_DBT_REALLOC:
+ if (dbnum == 0) {
+ dest_val->size = src_val->size;
+ dest_val->data = toku_realloc(dest_val->data, dest_val->size);
+ memcpy(dest_val->data, src_val->data, dest_val->size);
+ } else
+ dest_val->size = 0;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static int
+del_callback(DB *dest_db, DB *src_db, DBT_ARRAY *dest_key_arrays, const DBT *src_key, const DBT *src_data) {
+ return put_callback(dest_db, src_db, dest_key_arrays, NULL, src_key, src_data);
+}
+
+#if 0
+static void
+verify_locked(DB_ENV *env, DB *db, int k) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ r = db->del(db, txn, &key, DB_DELETE_ANY); assert(r == DB_LOCK_NOTGRANTED);
+ r = txn->abort(txn); assert_zero(r);
+}
+
+static void
+verify_empty(DB_ENV *env, DB *db) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ }
+ assert_zero(i);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+#endif
+
+static void
+verify_seq(DB_ENV *env, DB *db, int dbnum, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ DBC *cursor = NULL;
+ r = db->cursor(db, txn, &cursor, 0); assert_zero(r);
+ int i;
+ for (i = 0; ; i++) {
+ DBT key; memset(&key, 0, sizeof key);
+ DBT val; memset(&val, 0, sizeof val);
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k;
+ int expectk;
+ if (dbnum == 0 || (i % ndbs) != dbnum)
+ expectk = get_key(i, dbnum);
+ else
+ expectk = get_new_key(i, dbnum);
+
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == expectk);
+
+ if (dbnum == 0) {
+ assert(val.size == ndbs * sizeof (int));
+ int v[ndbs]; get_new_data(v, i, ndbs);
+ assert(memcmp(val.data, v, val.size) == 0);
+ } else
+ assert(val.size == 0);
+ }
+ assert(i == nrows); // if (i != nrows) printf("%s:%d %d %d\n", __FUNCTION__, __LINE__, i, nrows); // assert(i == nrows);
+ r = cursor->c_close(cursor); assert_zero(r);
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+update_diagonal(DB_ENV *env, DB *db[], int ndbs, int nrows) {
+ assert(ndbs > 0);
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+ for (int i = 0; i < nrows; i++) {
+
+ // update the data i % ndbs col from x to x+1
+
+ int k = get_key(i, 0);
+ DBT old_key; dbt_init(&old_key, &k, sizeof k);
+ DBT new_key = old_key;
+
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT old_data; dbt_init(&old_data, &v[0], sizeof v);
+
+ int newv[ndbs]; get_new_data(newv, i, ndbs);
+ DBT new_data; dbt_init(&new_data, &newv[0], sizeof newv);
+
+ int ndbts = 2 * ndbs;
+ DBT keys[ndbts]; memset(keys, 0, sizeof keys);
+ DBT vals[ndbts]; memset(vals, 0, sizeof vals);
+ uint32_t flags_array[ndbs]; memset(flags_array, 0, sizeof(flags_array));
+
+ r = env_update_multiple_test_no_array(env, ndbs > 0 ? db[0] : NULL, txn, &old_key, &old_data, &new_key, &new_data, ndbs, db, flags_array, ndbts, keys, ndbts, vals);
+ assert_zero(r);
+ }
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_primary(DB_ENV *env, DB *db, int ndbs, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, 0);
+ int v[ndbs]; get_data(v, i, ndbs);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, &v[0], sizeof v);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+populate_secondary(DB_ENV *env, DB *db, int dbnum, int nrows) {
+ int r;
+ DB_TXN *txn = NULL;
+ r = env->txn_begin(env, NULL, &txn, 0); assert_zero(r);
+
+ // populate
+ for (int i = 0; i < nrows; i++) {
+ int k = get_key(i, dbnum);
+ DBT key; dbt_init(&key, &k, sizeof k);
+ DBT val; dbt_init(&val, NULL, 0);
+ r = db->put(db, txn, &key, &val, 0); assert_zero(r);
+ }
+
+ r = txn->commit(txn, 0); assert_zero(r);
+}
+
+static void
+run_test(int ndbs, int nrows) {
+ int r;
+ DB_ENV *env = NULL;
+ r = db_env_create(&env, 0); assert_zero(r);
+
+ r = env->set_generate_row_callback_for_put(env, put_callback); assert_zero(r);
+ r = env->set_generate_row_callback_for_del(env, del_callback); assert_zero(r);
+
+ r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ DB *db[ndbs];
+ for (int dbnum = 0; dbnum < ndbs; dbnum++) {
+ r = db_create(&db[dbnum], env, 0); assert_zero(r);
+
+ DBT dbt_dbnum; dbt_init(&dbt_dbnum, &dbnum, sizeof dbnum);
+
+ char dbname[32]; sprintf(dbname, "%d.tdb", dbnum);
+ r = db[dbnum]->open(db[dbnum], NULL, dbname, NULL, DB_BTREE, DB_AUTO_COMMIT+DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = db[dbnum]->change_descriptor(db[dbnum], txn_desc, &dbt_dbnum, 0); CKERR(chk_r); }
+ });
+ }
+
+ for (int dbnum = 0; dbnum < ndbs-1; dbnum++) {
+ if (dbnum == 0)
+ populate_primary(env, db[dbnum], ndbs, nrows);
+ else
+ populate_secondary(env, db[dbnum], dbnum, nrows);
+ }
+
+ DB_TXN *indexer_txn = NULL;
+ r = env->txn_begin(env, NULL, &indexer_txn, 0); assert_zero(r);
+
+ DB_INDEXER *indexer = NULL;
+ uint32_t db_flags = 0;
+ r = env->create_indexer(env, indexer_txn, &indexer, db[0], 1, &db[ndbs-1], &db_flags, 0); assert_zero(r);
+
+ update_diagonal(env, db, ndbs, nrows);
+
+ r = indexer->build(indexer); assert_zero(r);
+ r = indexer->close(indexer); assert_zero(r);
+
+ r = indexer_txn->commit(indexer_txn, 0); assert_zero(r);
+
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ verify_seq(env, db[dbnum], dbnum, ndbs, nrows);
+ for (int dbnum = 0; dbnum < ndbs; dbnum++)
+ r = db[dbnum]->close(db[dbnum], 0); assert_zero(r);
+
+ r = env->close(env, 0); assert_zero(r);
+}
+
+int
+test_main(int argc, char * const argv[]) {
+ int r;
+ int ndbs = 2;
+ int nrows = 2;
+
+ // parse_args(argc, argv);
+ for (int i = 1; i < argc; i++) {
+ char * const arg = argv[i];
+ if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ }
+ if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) {
+ ndbs = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "--nrows") == 0 && i+1 < argc) {
+ nrows = atoi(argv[++i]);
+ continue;
+ }
+ }
+
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); assert_zero(r);
+
+ run_test(ndbs, nrows);
+
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/src/tests/update.cc b/storage/tokudb/PerconaFT/src/tests/update.cc
new file mode 100644
index 00000000..e5696605
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/update.cc
@@ -0,0 +1,91 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* test the update functionality. */
+
+#include "test.h"
+
+DB_ENV *env;
+
+// the commands are: byte 1 is "nop" "add" or "del". Byte 2 is the amount to add.
+enum cmd { CNOP, CADD, CDEL };
+
+static int increment_update (DB *db __attribute__((__unused__)),
+ const DBT *key __attribute__((__unused__)),
+ const DBT *old_val, const DBT *extra,
+ void (*set_val)(const DBT *new_val,
+ void *set_extra),
+ void *set_extra) {
+ assert (extra->size==2);
+ assert (old_val->size==4);
+ unsigned char *CAST_FROM_VOIDP(extra_data, extra->data);
+ switch ((enum cmd)(extra_data[0])) {
+ case CNOP:
+ return 0;
+ case CADD: {
+ unsigned int data = *(unsigned int*)old_val->data;
+ data += extra_data[1];
+ DBT new_val = {.data=&data, .size=4, .ulen=0, .flags=0};
+ set_val(&new_val, set_extra);
+ return 0;
+ }
+ case CDEL:
+ set_val(NULL, set_extra);
+ return 0;
+ }
+ assert(0); return 0; // enumeration failed.
+}
+
+static void setup (void) {
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ { int r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); }
+ { int r=db_env_create(&env, 0); CKERR(r); }
+ env->set_errfile(env, stderr);
+ env->set_update(env, increment_update);
+}
+
+static void cleanup (void) {
+ { int r = env->close(env, 0); CKERR(r); }
+}
+
+int test_main (int argc __attribute__((__unused__)), char *const argv[] __attribute__((__unused__))) {
+
+ setup();
+ cleanup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-1.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-1.cc
new file mode 100644
index 00000000..099aa0b6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-1.cc
@@ -0,0 +1,263 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+
+#define kv_pair_funcs 1 // pull in kv_pair generators from test.h
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+
+/*
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int SRC_VERSION = 4;
+int littlenode = 0;
+int flat = 0;
+
+#define OLDDATADIR "../../../../tokudb.data/"
+
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *db_v5_dir = "dir.preload-db.c.tdb";
+char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.cleanshutdown";
+char *db_v4_dir_node4k = OLDDATADIR "env_preload.4.2.0.node4k.cleanshutdown";
+char *db_v4_dir_flat = OLDDATADIR "env_preload.4.2.0.flat.cleanshutdown";
+
+// HACK: Newer versions of the database/ft to use with this old
+// upgrade test code.
+char *db_v6_dir = OLDDATADIR "env_preload.5.0.8.cleanshutdown";
+char *db_v6_dir_node4k = OLDDATADIR "env_preload.5.0.8.node4k.cleanshutdown";
+char *db_v6_dir_flat = OLDDATADIR "env_preload.5.0.8.flat.cleanshutdown";
+
+char *db_v7_dir = OLDDATADIR "env_preload.5.2.7.cleanshutdown";
+char *db_v7_dir_node4k = OLDDATADIR "env_preload.5.2.7.node4k.cleanshutdown";
+char *db_v7_dir_flat = OLDDATADIR "env_preload.5.2.7.flat.cleanshutdown";
+
+
+// should put this in test.h:
+static __attribute__((__unused__)) int
+char_dbt_cmp (const DBT *a, const DBT *b) {
+ int rval = 0;
+ assert(a && b);
+ if (a->size < b->size) rval = -1;
+ else if (a->size > b->size) rval = 1;
+ else if (a->size) { // if both strings are of size zero, return 0
+ rval = strcmp((char*)a->data, (char*)b->data);
+ }
+ return rval;
+}
+
+
+static void upgrade_test_1(DB **dbs) {
+ int r;
+ // open the DBS
+ {
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ r = char_dbt_cmp(&desc, &(dbs[i]->descriptor->dbt));
+ CKERR(r); // verify that upgraded descriptor is same as original
+ }
+ }
+
+ // read and verify all rows
+ {
+ if ( verbose ) {printf("checking");fflush(stdout);}
+ check_results(env, dbs, NUM_DBS, NUM_ROWS);
+ if ( verbose) {printf("\ndone\n");fflush(stdout);}
+ }
+ // close
+ {
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ }
+}
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ if ( SRC_VERSION == 4 ) {
+ if (flat)
+ src_db_dir = db_v4_dir_flat;
+ else if (littlenode)
+ src_db_dir = db_v4_dir_node4k;
+ else
+ src_db_dir = db_v4_dir;
+ }
+ else if ( SRC_VERSION == 5 ) {
+ src_db_dir = db_v5_dir;
+ }
+ else if (SRC_VERSION == 6) {
+ if (flat) {
+ src_db_dir = db_v6_dir_flat;
+ } else if (littlenode) {
+ src_db_dir = db_v6_dir_node4k;
+ } else {
+ src_db_dir = db_v6_dir;
+ }
+ }
+ else if (SRC_VERSION == 7) {
+ if (flat) {
+ src_db_dir = db_v7_dir_flat;
+ } else if (littlenode) {
+ src_db_dir = db_v7_dir_node4k;
+ } else {
+ src_db_dir = db_v7_dir;
+ }
+ }
+ else {
+ fprintf(stderr, "unsupported PerconaFT version %d to upgrade\n", SRC_VERSION);
+ assert(0);
+ }
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+ generate_permute_tables();
+
+}
+
+static void run_test(void)
+{
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ if (littlenode) {
+ r = env->set_cachesize(env, 0, 512*1024, 1); CKERR(r);
+ }
+ r = env->set_redzone(env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 1); CKERR(r);
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+
+ // --------------------------
+ upgrade_test_1(dbs);
+ // --------------------------
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ if (SRC_VERSION == 4) {
+ littlenode = 1; // 4k nodes, small cache
+ }
+ setup();
+ run_test(); // read, upgrade, write back to disk
+ run_test(); // read and verify
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -d <num_dbs> -r <num_rows> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-V")==0) {
+ argc--; argv++;
+ SRC_VERSION = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-f")==0) {
+ flat = 1;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-2.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-2.cc
new file mode 100644
index 00000000..0a0d5d08
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-2.cc
@@ -0,0 +1,244 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+
+/*
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int CHECK_RESULTS=0;
+int SRC_VERSION = 4;
+int littlenode = 0;
+
+#define OLDDATADIR "../../../../tokudb.data/"
+
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *db_v5_dir = "dir.preload-db.c.tdb";
+char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.cleanshutdown";
+char *db_v4_dir_node4k = OLDDATADIR "env_preload.4.2.0.node4k.cleanshutdown";
+
+
+static void upgrade_test_2(DB **dbs) {
+ int r = 0;
+ // open the DBS
+ {
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+ }
+ // close
+ {
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ }
+ // open
+ {
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+ }
+
+ // read and verify all rows
+ {
+ if ( verbose ) {printf("checking");fflush(stdout);}
+ check_results(env, dbs, NUM_DBS, NUM_ROWS);
+ if ( verbose) {printf("\ndone\n");fflush(stdout);}
+ }
+ // close
+ {
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ }
+}
+
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ if ( SRC_VERSION == 4 ) {
+ if (littlenode)
+ src_db_dir = db_v4_dir_node4k;
+ else
+ src_db_dir = db_v4_dir;
+ }
+ else if ( SRC_VERSION == 5 ) {
+ src_db_dir = db_v5_dir;
+ }
+ else {
+ fprintf(stderr, "unsupported PerconaFT version %d to upgrade\n", SRC_VERSION);
+ assert(0);
+ }
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+ generate_permute_tables();
+
+}
+
+static void run_test(int checkpoint_period)
+{
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ if (littlenode) {
+ r = env->set_cachesize(env, 0, 512*1024, 1); CKERR(r);
+ }
+ r = env->set_redzone(env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, checkpoint_period); CKERR(r);
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+
+ // --------------------------
+ upgrade_test_2(dbs);
+ // --------------------------
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ if (SRC_VERSION == 4) {
+ littlenode = 1; // 4k nodes, small cache
+ }
+ setup();
+ run_test(1);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -d <num_dbs> -r <num_rows> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-V")==0) {
+ argc--; argv++;
+ SRC_VERSION = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-3.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-3.cc
new file mode 100644
index 00000000..b80e600d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-3.cc
@@ -0,0 +1,260 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Purpose of this test is to verify that dictionaries created with 4.2.0
+// can be properly truncated with PerconaFT version 5.x or later.
+
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+
+/*
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int CHECK_RESULTS=0;
+int SRC_VERSION = 4;
+int littlenode = 0;
+
+#define OLDDATADIR "../../../../tokudb.data/"
+
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *db_v5_dir = "dir.preload-db.c.tdb";
+char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.cleanshutdown";
+char *db_v4_dir_node4k = OLDDATADIR "env_preload.4.2.0.node4k.cleanshutdown";
+
+
+static void upgrade_test_3(DB **dbs) {
+ int r = 0;
+ char name[MAX_NAME*2];
+
+ // truncate, verify, close, open, verify again
+ DBC *cursor;
+ DB_TXN * txn;
+ DBT desc;
+ int idx[MAX_DBS];
+
+ dbt_init(&desc, "foo", sizeof("foo"));
+
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+
+ r = env->txn_begin(env, NULL, &txn, DB_SERIALIZABLE);
+ CKERR(r);
+
+ // truncate the tree
+ uint32_t row_count = 0;
+ r = dbs[i]->truncate(dbs[i], 0, &row_count, 0); assert(r == 0);
+
+ // walk the tree - expect 0 rows
+ int rowcount = 0;
+ r = dbs[i]->cursor(dbs[i], txn, &cursor, 0);
+ CKERR(r);
+ while (1) {
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init(&key, 0, 0), dbt_init(&val, 0, 0), DB_NEXT);
+ if (r == DB_NOTFOUND) break;
+ rowcount++;
+ }
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ assert(rowcount == 0);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = dbs[i]->close(dbs[i], 0); assert(r == 0);
+
+ r = db_create(&dbs[i], env, 0); assert(r == 0);
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+
+ // open new txn and walk the tree again - expect 0 rows
+
+ r = env->txn_begin(env, NULL, &txn, DB_SERIALIZABLE);
+ CKERR(r);
+
+ rowcount = 0;
+ r = dbs[i]->cursor(dbs[i], txn, &cursor, 0); assert(r == 0);
+ while (1) {
+ DBT key, val;
+ r = cursor->c_get(cursor, dbt_init(&key, 0, 0), dbt_init(&val, 0, 0), DB_NEXT);
+ if (r == DB_NOTFOUND) break;
+ rowcount++;
+ }
+ r = cursor->c_close(cursor); assert(r == 0);
+ assert(rowcount == 0);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = dbs[i]->close(dbs[i], 0);
+ CKERR(r);
+
+ dbs[i] = NULL;
+ }
+
+}
+
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ if ( SRC_VERSION == 4 ) {
+ if (littlenode)
+ src_db_dir = db_v4_dir_node4k;
+ else
+ src_db_dir = db_v4_dir;
+ }
+ else if ( SRC_VERSION == 5 ) {
+ src_db_dir = db_v5_dir;
+ }
+ else {
+ fprintf(stderr, "unsupported PerconaFT version %d to upgrade\n", SRC_VERSION);
+ assert(0);
+ }
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+}
+
+static void run_test(int checkpoint_period)
+{
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ if (littlenode) {
+ r = env->set_cachesize(env, 0, 512*1024, 1); CKERR(r);
+ }
+ r = env->set_redzone(env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, checkpoint_period); CKERR(r);
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+
+ // --------------------------
+ upgrade_test_3(dbs);
+ // --------------------------
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ if (SRC_VERSION == 4) {
+ littlenode = 1; // 4k nodes, small cache
+ }
+ setup();
+ run_test(1);
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -d <num_dbs> -V <version> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-V")==0) {
+ argc--; argv++;
+ SRC_VERSION = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-4.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-4.cc
new file mode 100644
index 00000000..79627952
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-4.cc
@@ -0,0 +1,364 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+
+/****************************************************************************************
+ *
+ * Test sequence is run four times, two in outer loop, two in inner loop
+ * Outer loop is for default or small node and cachetable sizes,
+ * inner loop is for insert and delete.
+ *
+ * open dbs
+ * read and verify first n rows of primary, a few interspersed rows of secondaries (n is very small so only a few nodes of secondaries are upgraded, even with prefetch)
+ * close dbs (dictionaries now partially upgraded)
+ * open dbs
+ * read and verify a few more rows of primary, a few more interspersed rows of secondaries
+ * close dbs (some more nodes now upgraded)
+ * open dbs
+ * if (insert test)
+ * insert at end of primary and interspersed in secondary dictionaries
+ * else (delete test)
+ * delete from beginning of primary and interspersed in secondary dictionaries
+ * close dbs
+ * open dbs
+ * verify all rows (including newly inserted ones)
+ * close dbs
+ *
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+int NUM_DBS=5;
+int NUM_ROWS=100000;
+int CHECK_RESULTS=0;
+int SRC_VERSION = 4;
+int littlenode = 0;
+
+#define OLDDATADIR "../../../../tokudb.data/"
+
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *db_v5_dir = "dir.preload-db.c.tdb";
+char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.cleanshutdown";
+char *db_v4_dir_node4k = OLDDATADIR "env_preload.4.2.0.node4k.cleanshutdown";
+
+
+enum {ROWS_PER_TRANSACTION=10000};
+
+static int idx[MAX_DBS];
+
+typedef enum {insert, delete} test_type;
+
+static void
+open_dbs(DB **dbs) {
+ int r;
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+}
+
+
+static void
+close_dbs(DB **dbs) {
+ for(int i=0;i<NUM_DBS;i++) {
+ int r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+}
+
+
+static void upgrade_test_4(DB **dbs, test_type test_to_do) {
+ int r;
+ int n = 4; // number of rows to check to partially upgrade dictionary
+ char * msg;
+ if (test_to_do == insert)
+ msg = "insert";
+ else if (test_to_do == delete)
+ msg = "delete";
+ else assert(0);
+
+
+ // open the DBS
+ open_dbs(dbs);
+
+ // check first few rows of primary, some (pseudo)random rows of secondaries
+ {
+ check_results(env, dbs, NUM_DBS, n);
+ if (verbose)
+ printf("First %d rows checked, now close and reopen\n", n);
+ }
+
+ // close and reopen
+ close_dbs(dbs);
+ open_dbs(dbs);
+
+ // check first few rows of primary, some (pseudo)random rows of secondaries
+ {
+ n *= 2;
+ check_results(env, dbs, NUM_DBS, n);
+ if (verbose)
+ printf("\nFirst %d rows checked, now %s some rows\n", n, msg);
+ }
+
+ // close and reopen
+ close_dbs(dbs);
+ open_dbs(dbs);
+
+ // insert or delete some rows
+ DB_TXN *txn;
+ DBT skey, sval;
+ DBT key, val;
+ dbt_init_realloc(&key);
+ dbt_init_realloc(&val);
+
+ unsigned int k, v;
+ if ( verbose ) {
+ printf("%s some rows\n", msg);
+ fflush(stdout);
+ }
+ int num_rows_to_modify, base;
+ if (test_to_do == insert) {
+ num_rows_to_modify = NUM_ROWS;
+ base = NUM_ROWS; // insert after existing rows in primary
+ }
+ else if (test_to_do == delete) {
+ num_rows_to_modify = 2*n;
+ base = 0; // delete some rows from primary
+ }
+ else assert(0);
+ int outer_loop_num = ( num_rows_to_modify <= ROWS_PER_TRANSACTION ) ? 1 : (num_rows_to_modify / ROWS_PER_TRANSACTION);
+ for(int x=0;x<outer_loop_num;x++) {
+ r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
+ for(int i=1; (i<=ROWS_PER_TRANSACTION && i<=num_rows_to_modify); i++) {
+ k = i + (x*ROWS_PER_TRANSACTION) + base;
+ v = generate_val(k, 0);
+ dbt_init(&skey, &k, sizeof(unsigned int));
+ dbt_init(&sval, &v, sizeof(unsigned int));
+
+ for(int db = 0;db < NUM_DBS;db++) {
+ put_multiple_generate(dbs[db], // dest_db
+ NULL, // src_db, ignored
+ &key, &val, //
+ &skey, &sval, // src_key, src_val
+ NULL); // extra, ignored
+ if (test_to_do == insert) {
+ r = dbs[db]->put(dbs[db], txn, &key, &val, 0);
+ CKERR(r);
+ }
+ else if (test_to_do == delete) {
+ r = dbs[db]->del(dbs[db], txn, &key, 0);
+ CKERR(r);
+ }
+ else assert(0);
+
+ if (key.flags == 0) { dbt_init_realloc(&key); }
+ if (val.flags == 0) { dbt_init_realloc(&val); }
+ }
+ }
+ r = txn->commit(txn, 0); CKERR(r);
+ if ( verbose ) {printf(".");fflush(stdout);}
+ }
+ if ( key.flags ) { toku_free(key.data); key.data = NULL; }
+ if ( val.flags ) { toku_free(val.data); key.data = NULL; }
+
+ // close
+ close_dbs(dbs);
+
+ // open
+ open_dbs(dbs);
+
+ // read and verify all rows
+ {
+ if ( verbose ) {printf("\nchecking");fflush(stdout);}
+ if (test_to_do == insert)
+ check_results(env, dbs, NUM_DBS, NUM_ROWS * 2);
+ else if (test_to_do == delete)
+ check_results_after_row_n(env, dbs, NUM_DBS, NUM_ROWS, num_rows_to_modify);
+ else assert(0);
+ if ( verbose) {printf("\ndone\n");fflush(stdout);}
+ }
+ // close
+ {
+ for(int i=0;i<NUM_DBS;i++) {
+ r = dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ }
+}
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ if ( SRC_VERSION == 4 ) {
+ if (littlenode)
+ src_db_dir = db_v4_dir_node4k;
+ else
+ src_db_dir = db_v4_dir;
+ }
+ else if ( SRC_VERSION == 5 ) {
+ src_db_dir = db_v5_dir;
+ }
+ else {
+ fprintf(stderr, "unsupported PerconaFT version %d to upgrade\n", SRC_VERSION);
+ assert(0);
+ }
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+ generate_permute_tables();
+
+}
+
+static void run_test(test_type test_to_do)
+{
+ int r;
+
+ r = db_env_create(&env, 0); CKERR(r);
+ if (littlenode) {
+ r = env->set_cachesize(env, 0, 512*1024, 1); CKERR(r);
+ }
+ r = env->set_redzone(env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 5); CKERR(r);
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+
+ // --------------------------
+ upgrade_test_4(dbs, test_to_do);
+ // --------------------------
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ littlenode = 0;
+ setup();
+ run_test(insert);
+ setup();
+ run_test(delete);
+ if (SRC_VERSION == 4) {
+ if (verbose)
+ printf("Now repeat test with small nodes and small cache.\n");
+ littlenode = 1; // 4k nodes, small cache
+ setup();
+ run_test(insert);
+ setup();
+ run_test(delete);
+ }
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -c -d <num_dbs> -r <num_rows> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-c")==0) {
+ CHECK_RESULTS = 1;
+ } else if (strcmp(argv[0], "-V")==0) {
+ argc--; argv++;
+ SRC_VERSION = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-5.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-5.cc
new file mode 100644
index 00000000..555a5c19
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-5.cc
@@ -0,0 +1,245 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#define kv_pair_funcs 1 // pull in kv_pair generators from test.h
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+#include "test_kv_gen.h"
+
+/*
+ */
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+int NUM_DBS=1;
+int NUM_ROWS=100000;
+int SRC_VERSION = 4;
+
+#define MAXDEPTH 64
+#define OLDDATADIR "../../../../tokudb.data/"
+
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *db_v5_dir = "dir.preload-db-nested.c.tdb";
+char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.nested.cleanshutdown";
+
+
+static void
+check_results_nested(DB ** dbs, const uint num_rows) {
+ int num_dbs = 1; // maybe someday increase
+ for(int j=0;j<num_dbs;j++){
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int r;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(uint i=0;i<num_rows;i++) {
+ if (i % MAXDEPTH) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR(r);
+ uint observed_k = *(unsigned int*)key.data;
+ uint observed_v = *(unsigned int*)val.data;
+ uint expected_k = i;
+ uint generated_value = generate_val(i, 0);
+ uint expected_v = generated_value + (i%MAXDEPTH - 1);
+ if (verbose >= 3)
+ printf("expected key %d, observed key %d, expected val %d, observed val %d\n",
+ expected_k, observed_k, expected_v, observed_v);
+ // test that we have the expected keys and values
+ assert(observed_k == expected_k);
+ assert(observed_v == expected_v);
+ }
+ dbt_init(&key, NULL, sizeof(unsigned int));
+ dbt_init(&val, NULL, sizeof(unsigned int));
+ if ( verbose && (i%10000 == 0)) {printf("."); fflush(stdout);}
+ }
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ if ( verbose ) {printf("ok");fflush(stdout);}
+}
+
+
+static void upgrade_test_1(DB **dbs) {
+ int r;
+ // open the DBS
+ {
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+ }
+
+ // read and verify all rows
+ {
+ if ( verbose ) {printf("checking");fflush(stdout);}
+ check_results_nested(&dbs[0], NUM_ROWS);
+ if ( verbose) {printf("\ndone\n");fflush(stdout);}
+ }
+ // close
+ {
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ }
+}
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ if ( SRC_VERSION == 4 ) {
+ src_db_dir = db_v4_dir;
+ }
+ else if ( SRC_VERSION == 5 ) {
+ src_db_dir = db_v5_dir;
+ }
+ else {
+ fprintf(stderr, "unsupported PerconaFT version %d to upgrade\n", SRC_VERSION);
+ assert(0);
+ }
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+ generate_permute_tables();
+
+}
+
+static void run_test(void)
+{
+ int r;
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ r = env->set_cachesize(env, 0, 512*1024, 1); CKERR(r);
+ r = env->set_redzone(env, 0); CKERR(r);
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 1); CKERR(r);
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+
+ // --------------------------
+ upgrade_test_1(dbs);
+ // --------------------------
+
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+
+}
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+ setup();
+ run_test(); // read, upgrade, write back to disk
+ run_test(); // read and verify
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -r <num_rows> %s\n", cmd);
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-V")==0) {
+ argc--; argv++;
+ SRC_VERSION = atoi(argv[0]);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-6.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-6.cc
new file mode 100644
index 00000000..d3e0154c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-6.cc
@@ -0,0 +1,416 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Purpose of this test is to verify that a dictionary created by the 4.2.0
+// loader can be properly read with 5.0.
+// This file was derived from the 4.2.0 version of loader-stress-test.c,
+// which was used to create the dictionary.
+// This test only reads (and upgrades) the dictionary, it does not load it.
+
+// Need to use malloc for the malloc instrumentation tests
+#define TOKU_ALLOW_DEPRECATED
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+DB_ENV *env;
+enum {MAX_NAME=128};
+enum {MAX_DBS=256};
+int NUM_DBS=1;
+int NUM_ROWS=250000;
+int CHECK_RESULTS=0;
+int USE_PUTS=0;
+enum { old_default_cachesize=1024 }; // MB
+int CACHESIZE=old_default_cachesize;
+int ALLOW_DUPS=0;
+enum {MAGIC=311};
+char *datadir = NULL;
+bool check_est = true; // do check the estimates by default
+bool footprint_print = false; // print memory footprint info
+
+
+// Code for showing memory footprint information.
+pthread_mutex_t my_lock = PTHREAD_MUTEX_INITIALIZER;
+size_t hiwater;
+size_t water;
+size_t hiwater_start;
+static long long mcount = 0, fcount=0;
+
+
+size_t malloc_usable_size(void *p);
+
+static void my_free(void*p) {
+ if (p) {
+ water-=malloc_usable_size(p);
+ }
+ free(p);
+}
+
+static void *my_malloc(size_t size) {
+ void *r = malloc(size);
+ if (r) {
+ water += malloc_usable_size(r);
+ if (water>hiwater) hiwater=water;
+ }
+ return r;
+}
+
+static void *my_realloc(void *p, size_t size) {
+ size_t old_usable = p ? malloc_usable_size(p) : 0;
+ void *r = realloc(p, size);
+ if (r) {
+ water -= old_usable;
+ water += malloc_usable_size(r);
+ }
+ return r;
+}
+
+//
+// Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS
+//
+
+// a is the bit-wise permute table. For DB[i], permute bits as described in a[i] using 'twiddle32'
+// inv is the inverse bit-wise permute of a[]. To get the original value from a twiddled value, twiddle32 (again) with inv[]
+int a[MAX_DBS][32];
+int inv[MAX_DBS][32];
+
+
+// rotate right and left functions
+static inline unsigned int rotr32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x >> n) | ( x << (32 - n));
+}
+static inline unsigned int rotl32(const unsigned int x, const unsigned int num) {
+ const unsigned int n = num % 32;
+ return (x << n) | ( x >> (32 - n));
+}
+
+static void generate_permute_tables(void) {
+ int i, j, tmp;
+ for(int db=0;db<MAX_DBS;db++) {
+ for(i=0;i<32;i++) {
+ a[db][i] = i;
+ }
+ for(i=0;i<32;i++) {
+ j = random() % (i + 1);
+ tmp = a[db][j];
+ a[db][j] = a[db][i];
+ a[db][i] = tmp;
+ }
+// if(db < NUM_DBS){ printf("a[%d] = ", db); for(i=0;i<32;i++) { printf("%2d ", a[db][i]); } printf("\n");}
+ for(i=0;i<32;i++) {
+ inv[db][a[db][i]] = i;
+ }
+ }
+}
+
+// permute bits of x based on inverse permute table bitmap
+static unsigned int inv_twiddle32(unsigned int x, int db)
+{
+ unsigned int b = 0;
+ for(int i=0;i<32;i++) {
+ b |= (( x >> i ) & 1) << inv[db][i];
+ }
+ return b;
+}
+
+
+static unsigned int pkey_for_val(int key, int i) {
+ return rotr32(key, i) - MAGIC;
+}
+
+
+static void check_results(DB **dbs)
+{
+ for(int j=0;j<NUM_DBS;j++){
+ DBT key, val;
+ unsigned int k=0, v=0;
+ dbt_init(&key, &k, sizeof(unsigned int));
+ dbt_init(&val, &v, sizeof(unsigned int));
+ int r;
+ unsigned int pkey_for_db_key;
+
+ DB_TXN *txn;
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+
+ DBC *cursor;
+ r = dbs[j]->cursor(dbs[j], txn, &cursor, 0);
+ CKERR(r);
+ for(int i=0;i<NUM_ROWS;i++) {
+ r = cursor->c_get(cursor, &key, &val, DB_NEXT);
+ CKERR(r);
+ k = *(unsigned int*)key.data;
+ pkey_for_db_key = (j == 0) ? k : inv_twiddle32(k, j);
+ v = *(unsigned int*)val.data;
+ // test that we have the expected keys and values
+ assert((unsigned int)pkey_for_db_key == (unsigned int)pkey_for_val(v, j));
+// printf(" DB[%d] key = %10u, val = %10u, pkey_for_db_key = %10u, pkey_for_val=%10d\n", j, v, k, pkey_for_db_key, pkey_for_val(v, j));
+ }
+ if (verbose) {
+ printf(".");
+ fflush(stdout);
+ }
+ r = cursor->c_close(cursor);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+ if (verbose)
+ printf("\nCheck OK\n");
+}
+
+static void *expect_poll_void = &expect_poll_void;
+
+static struct progress_info {
+ double time;
+ double progress;
+} *progress_infos=NULL;
+static int progress_infos_count=0;
+
+static void test_loader(DB **dbs)
+{
+ int r;
+ DB_TXN *txn;
+
+
+ // this is the essential part of the upgrade test
+ check_results(dbs);
+
+ for (int i=0; i<NUM_DBS; i++) {
+ r = env->txn_begin(env, NULL, &txn, 0);
+ CKERR(r);
+ DB_BTREE_STAT64 stats;
+ r = dbs[i]->stat64(dbs[i], txn, &stats);
+ CKERR(r);
+ if (verbose)
+ printf("n_keys=%" PRIu64 " n_data=%" PRIu64 " dsize=%" PRIu64 " fsize=%" PRIu64 "\n",
+ stats.bt_nkeys, stats.bt_ndata, stats.bt_dsize, stats.bt_fsize);
+ assert(stats.bt_nkeys == (uint64_t)NUM_ROWS);
+ assert(stats.bt_ndata == (uint64_t)NUM_ROWS);
+ assert(stats.bt_dsize == ((uint64_t)NUM_ROWS) * 2 * sizeof(unsigned int));
+ r = txn->commit(txn, 0);
+ CKERR(r);
+ }
+}
+
+
+char *free_me = NULL;
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *tmp_subdir = "tmp.subdir";
+
+#define OLDDATADIR "../../../../tokudb.data/"
+char *db_v4_dir = OLDDATADIR "env_preload.4.2.0.loader250kd1.cleanshutdown";
+
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ src_db_dir = db_v4_dir;
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+}
+
+
+
+
+static void run_test(void)
+{
+ int r;
+
+ setup();
+ {
+ char len = strlen(env_dir) + strlen(tmp_subdir) + 10;
+ char tmpdir[len];
+ r = snprintf(tmpdir, len, "%s/%s", env_dir, tmp_subdir);
+ assert(r<len);
+ r = db_env_create(&env, 0); CKERR(r);
+ r = env->set_tmp_dir(env, tmp_subdir); CKERR(r);
+ }
+ r = env->set_default_bt_compare(env, uint_dbt_cmp); CKERR(r);
+ if ( verbose ) printf("CACHESIZE = %d MB\n", CACHESIZE);
+ r = env->set_cachesize(env, CACHESIZE / 1024, (CACHESIZE % 1024)*1024*1024, 1); CKERR(r);
+ if (datadir) {
+ r = env->set_data_dir(env, datadir); CKERR(r);
+ }
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+ env->set_errfile(env, stderr);
+ r = env->checkpointing_set_period(env, 60); CKERR(r);
+
+ DBT desc;
+ dbt_init(&desc, "foo", sizeof("foo"));
+ char name[MAX_NAME*2];
+
+ DB **dbs = (DB**)toku_malloc(sizeof(DB*) * NUM_DBS);
+ assert(dbs != NULL);
+ int idx[MAX_DBS];
+ for(int i=0;i<NUM_DBS;i++) {
+ idx[i] = i;
+ r = db_create(&dbs[i], env, 0); CKERR(r);
+ dbs[i]->app_private = &idx[i];
+ snprintf(name, sizeof(name), "db_%04x", i);
+ r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r);
+ IN_TXN_COMMIT(env, NULL, txn_desc, 0, {
+ { int chk_r = dbs[i]->change_descriptor(dbs[i], txn_desc, &desc, 0); CKERR(chk_r); }
+ });
+ }
+
+ generate_permute_tables();
+
+ // -------------------------- //
+ test_loader(dbs);
+ // -------------------------- //
+
+ for(int i=0;i<NUM_DBS;i++) {
+ dbs[i]->close(dbs[i], 0); CKERR(r);
+ dbs[i] = NULL;
+ }
+ if (verbose >= 2)
+ print_engine_status(env);
+ r = env->close(env, 0); CKERR(r);
+ toku_free(dbs);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+
+ run_test();
+ if (free_me) toku_free(free_me);
+
+ if (progress_infos) {
+ if (verbose>=2) {
+ double ratio=progress_infos[progress_infos_count-1].time/progress_infos[progress_infos_count-1].progress;
+ printf("Progress ratios:\n");
+ for (int i=0; i<progress_infos_count; i++) {
+ printf(" %5.3f\n", (progress_infos[i].time/progress_infos[i].progress)/ratio);
+ }
+ }
+ toku_free(progress_infos);
+ }
+ if (footprint_print) {
+ printf("%s:%d Hiwater=%ld water=%ld (extra hiwater=%ldM) mcount=%lld fcount=%lld\n", __FILE__, __LINE__, hiwater, water, (hiwater-hiwater_start)/(1024*1024), mcount, fcount);
+ extern void malloc_stats(void);
+ malloc_stats();
+ }
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+
+ // Must look for "-f" right away before we malloc anything.
+ for (int i=1; i<argc; i++) {
+
+ if (strcmp(argv[i], "-f")) {
+ db_env_set_func_malloc(my_malloc);
+ db_env_set_func_realloc(my_realloc);
+ db_env_set_func_free(my_free);
+ }
+ }
+
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ CACHESIZE = (toku_os_get_phys_memory_size() / (1024*1024))/2; //MB
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -d <num_dbs> -r <num_rows> [-m <megabytes>] [-M]\n%s\n", cmd);
+ fprintf(stderr, " where -d <num_dbs> is the number of dictionaries to build (primary & secondary). (Default=%d)\n", NUM_DBS);
+ fprintf(stderr, " -m <m> use m MB of memory for the cachetable (default is %d MB)\n", CACHESIZE);
+ fprintf(stderr, " -M use %d MB of memory for the cachetable\n", old_default_cachesize);
+ fprintf(stderr, " -f print memory footprint information at various points in the load\n");
+ exit(resultcode);
+ } else if (strcmp(argv[0], "-d")==0) {
+ argc--; argv++;
+ NUM_DBS = atoi(argv[0]);
+ if ( NUM_DBS > MAX_DBS ) {
+ fprintf(stderr, "max value for -d field is %d\n", MAX_DBS);
+ resultcode=1;
+ goto do_usage;
+ }
+ } else if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-f")==0) {
+ footprint_print = true;
+ } else if (strcmp(argv[0], "-r")==0) {
+ argc--; argv++;
+ NUM_ROWS = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-m")==0) {
+ argc--; argv++;
+ CACHESIZE = atoi(argv[0]);
+ } else if (strcmp(argv[0], "-M")==0) {
+ CACHESIZE = old_default_cachesize;
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade-test-7.cc b/storage/tokudb/PerconaFT/src/tests/upgrade-test-7.cc
new file mode 100644
index 00000000..53955cd2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade-test-7.cc
@@ -0,0 +1,144 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Purpose of this test is to verify that an environment created by PerconaFT 3.1.0
+// is properly rejected by the upgrade logic of PerconaFT 5.x and later.
+
+#include "test.h"
+#include "toku_pthread.h"
+#include <db.h>
+#include <sys/stat.h>
+#include "ydb-internal.h"
+
+DB_ENV *env;
+
+
+char *free_me = NULL;
+char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+char *tmp_subdir = "tmp.subdir";
+
+#define OLDDATADIR "../../../../tokudb.data/"
+ char *db_v3_dir = OLDDATADIR "env_preload.3.1.0.simple.cleanshutdown";
+
+
+static void setup(void) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+ char * src_db_dir;
+
+ src_db_dir = db_v3_dir;
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+}
+
+
+
+
+static void run_test(void)
+{
+ int r;
+
+ setup();
+ {
+ char len = strlen(env_dir) + strlen(tmp_subdir) + 10;
+ char tmpdir[len];
+ r = snprintf(tmpdir, len, "%s/%s", env_dir, tmp_subdir);
+ assert(r<len);
+ r = db_env_create(&env, 0); CKERR(r);
+ //
+ // NOTE: If tmp_dir is set, then attempt to open database created with 3.x will fail with error message:
+ // Couldn't start tokudb because some other tokudb process is using the same directory [dir.upgrade-test-7.c.tdb/tmp.subdir] for [temp]
+ //
+ // r = env->set_tmp_dir(env, tmp_subdir); CKERR(r);
+ //
+ }
+
+ int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE;
+ r = env->open(env, env_dir, envflags, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR2(r, TOKUDB_DICTIONARY_TOO_OLD);
+
+ r = env->close(env, 0); CKERR(r);
+}
+
+
+// ------------ infrastructure ----------
+static void do_args(int argc, char * const argv[]);
+
+int test_main(int argc, char * const *argv) {
+ do_args(argc, argv);
+
+ run_test();
+
+ return 0;
+}
+
+static void do_args(int argc, char * const argv[]) {
+
+ int resultcode;
+ char *cmd = argv[0];
+ argc--; argv++;
+
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ if (verbose<0) verbose=0;
+ } else if (strcmp(argv[0], "-h")==0) {
+ resultcode=0;
+ do_usage:
+ fprintf(stderr, "Usage: -h -v -q\n%s\n", cmd);
+ exit(resultcode);
+ } else {
+ fprintf(stderr, "Unknown arg: %s\n", argv[0]);
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/upgrade_simple.cc b/storage/tokudb/PerconaFT/src/tests/upgrade_simple.cc
new file mode 100644
index 00000000..0fcf40a7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/upgrade_simple.cc
@@ -0,0 +1,160 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+
+/* Purpose of this test is to verify simplest part of upgrade logic.
+ * Start by creating two very simple 4.x environments,
+ * one in each of two states:
+ * - after a clean shutdown
+ * - without a clean shutdown
+ *
+ * The two different environments will be used to exercise upgrade logic
+ * for 5.x.
+ *
+ */
+
+
+#include "test.h"
+#include <db.h>
+
+static DB_ENV *env;
+
+#define FLAGS_NOLOG DB_INIT_LOCK|DB_INIT_MPOOL|DB_CREATE|DB_PRIVATE
+#define FLAGS_LOG FLAGS_NOLOG|DB_INIT_TXN|DB_INIT_LOG
+
+static int mode = S_IRWXU+S_IRWXG+S_IRWXO;
+
+static void test_shutdown(void);
+
+#define OLDDATADIR "../../../../tokudb.data/"
+
+static char *env_dir = TOKU_TEST_FILENAME; // the default env_dir.
+
+static char * dir_v41_clean = OLDDATADIR "env_simple.4.1.1.cleanshutdown";
+static char * dir_v42_clean = OLDDATADIR "env_simple.4.2.0.cleanshutdown";
+static char * dir_v42_dirty = OLDDATADIR "env_simple.4.2.0.dirtyshutdown";
+static char * dir_v41_dirty_multilogfile = OLDDATADIR "env_preload.4.1.1.multilog.dirtyshutdown";
+static char * dir_v42_dirty_multilogfile = OLDDATADIR "env_preload.4.2.0.multilog.dirtyshutdown";
+
+
+static void
+setup (uint32_t flags, bool clean, bool too_old, char * src_db_dir) {
+ int r;
+ int len = 256;
+ char syscmd[len];
+
+ if (env)
+ test_shutdown();
+
+ r = snprintf(syscmd, len, "rm -rf %s", env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r = snprintf(syscmd, len, "cp -r %s %s", src_db_dir, env_dir);
+ assert(r<len);
+ r = system(syscmd);
+ CKERR(r);
+
+ r=db_env_create(&env, 0);
+ CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, flags, mode);
+ if (clean)
+ CKERR(r);
+ else {
+ if (too_old)
+ CKERR2(r, TOKUDB_DICTIONARY_TOO_OLD);
+ else
+ CKERR2(r, TOKUDB_UPGRADE_FAILURE);
+ }
+}
+
+
+
+static void
+test_shutdown(void) {
+ int r;
+ r=env->close(env, 0); CKERR(r);
+ env = NULL;
+}
+
+
+static void
+test_env_startup(void) {
+ uint32_t flags;
+
+ flags = FLAGS_LOG;
+
+ setup(flags, true, false, dir_v42_clean);
+ print_engine_status(env);
+ test_shutdown();
+
+ setup(flags, false, true, dir_v41_clean);
+ print_engine_status(env);
+ test_shutdown();
+
+ setup(flags, false, false, dir_v42_dirty);
+ if (verbose) {
+ printf("\n\nEngine status after aborted env->open() will have some garbage values:\n");
+ }
+ print_engine_status(env);
+ test_shutdown();
+
+ setup(flags, false, true, dir_v41_dirty_multilogfile);
+ if (verbose) {
+ printf("\n\nEngine status after aborted env->open() will have some garbage values:\n");
+ }
+ print_engine_status(env);
+ test_shutdown();
+
+ setup(flags, false, false, dir_v42_dirty_multilogfile);
+ if (verbose) {
+ printf("\n\nEngine status after aborted env->open() will have some garbage values:\n");
+ }
+ print_engine_status(env);
+ test_shutdown();
+}
+
+
+int
+test_main (int argc, char * const argv[]) {
+ parse_args(argc, argv);
+ test_env_startup();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-abort.cc b/storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-abort.cc
new file mode 100644
index 00000000..9f1b904d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-abort.cc
@@ -0,0 +1,209 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Verify that a commit of a prepared txn in recovery retains a db created by it.
+// A checkpoint is taken between the db creation and the txn prepare.
+
+// Verify that an abort of a prepared txn in recovery discards the rows that were inserted.
+// A checkpoint is taken after the rows are inserted and before and the txn prepare.
+
+const int test_nrows = 1000000;
+
+static void create_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void populate_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR(r);
+
+ for (int i = 0; i < test_nrows; i++) {
+ int k = htonl(i);
+ DBT key = { .data = &k, .size = sizeof k }; DBT val = { .data = &i, .size = sizeof i };
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void check_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR(r);
+
+ DBC *c = nullptr;
+ r = db->cursor(db, txn, &c, 0);
+ CKERR(r);
+
+ DBT key = {}; key.flags = DB_DBT_REALLOC;
+ DBT val = {}; val.flags = DB_DBT_REALLOC;
+ int i;
+ for (i = 0; 1; i++) {
+ r = c->c_get(c, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k, v;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == (int) htonl(i));
+ assert(val.size == sizeof v);
+ memcpy(&v, val.data, val.size);
+ assert(v == i);
+ }
+ assert(i == 0); // no rows found
+ toku_free(key.data);
+ toku_free(val.data);
+
+ r = c->c_close(c);
+ CKERR(r);
+
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void create_prepared_txn(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ create_foo(env, txn);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ populate_foo(env, txn);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ TOKU_XA_XID xid = { 0x1234, 8, 9 };
+ for (int i = 0; i < 8+9; i++) {
+ xid.data[i] = i;
+ }
+ r = txn->xa_prepare(txn, &xid, 0);
+ CKERR(r);
+
+ // discard the txn so that we can close the env and run xa recovery later
+ r = txn->discard(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, TOKUFT_DIRTY_SHUTDOWN);
+ CKERR(r);
+}
+
+static void run_xa_recovery(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // get prepared xid
+ long count;
+ TOKU_XA_XID xid;
+ r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST);
+ CKERR(r);
+
+ // abort it
+ DB_TXN *txn = nullptr;
+ r = env->get_txn_from_xid(env, &xid, &txn);
+ CKERR(r);
+ r = txn->abort(txn);
+ CKERR(r);
+
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ check_foo(env, txn);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ create_prepared_txn();
+ run_xa_recovery();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-commit.cc b/storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-commit.cc
new file mode 100644
index 00000000..ecbfa18b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/xa-bigtxn-discard-commit.cc
@@ -0,0 +1,206 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Verify that a commit of a prepared txn in recovery retains the rows that it inserted.
+// A checkpoint is taken after the rows are inserted and before and the txn prepare.
+
+const int test_nrows = 1000000;
+
+static void create_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void populate_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db = nullptr;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR(r);
+
+ for (int i = 0; i < test_nrows; i++) {
+ int k = htonl(i);
+ DBT key = { .data = &k, .size = sizeof k }; DBT val = { .data = &i, .size = sizeof i };
+ r = db->put(db, txn, &key, &val, 0);
+ CKERR(r);
+ }
+
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void check_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR(r);
+
+ DBC *c = nullptr;
+ r = db->cursor(db, txn, &c, 0);
+ CKERR(r);
+
+ DBT key = {}; key.flags = DB_DBT_REALLOC;
+ DBT val = {}; val.flags = DB_DBT_REALLOC;
+ int i;
+ for (i = 0; 1; i++) {
+ r = c->c_get(c, &key, &val, DB_NEXT);
+ if (r != 0)
+ break;
+ int k, v;
+ assert(key.size == sizeof k);
+ memcpy(&k, key.data, key.size);
+ assert(k == (int) htonl(i));
+ assert(val.size == sizeof v);
+ memcpy(&v, val.data, val.size);
+ assert(v == i);
+ }
+ assert(i == test_nrows);
+ toku_free(key.data);
+ toku_free(val.data);
+
+ r = c->c_close(c);
+ CKERR(r);
+
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void create_prepared_txn(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ create_foo(env, txn);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ populate_foo(env, txn);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ TOKU_XA_XID xid = { 0x1234, 8, 9 };
+ for (int i = 0; i < 8+9; i++) {
+ xid.data[i] = i;
+ }
+ r = txn->xa_prepare(txn, &xid, 0);
+ CKERR(r);
+
+ // discard the txn so that we can close the env and run xa recovery later
+ r = txn->discard(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, TOKUFT_DIRTY_SHUTDOWN);
+ CKERR(r);
+}
+
+static void run_xa_recovery(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // get prepared xid
+ long count;
+ TOKU_XA_XID xid;
+ r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST);
+ CKERR(r);
+
+ // commit it
+ DB_TXN *txn = nullptr;
+ r = env->get_txn_from_xid(env, &xid, &txn);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ check_foo(env, txn);
+
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ create_prepared_txn();
+ run_xa_recovery();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/xa-dirty-commit.cc b/storage/tokudb/PerconaFT/src/tests/xa-dirty-commit.cc
new file mode 100644
index 00000000..f198202c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/xa-dirty-commit.cc
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Verify that a commit of a prepared txn in recovery retains a db that was created by it.
+// The rollback file is dirty when the environment is closed.
+
+static void create_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void check_foo(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void create_prepared_txn(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ create_foo(env, txn);
+
+ TOKU_XA_XID xid = { 0x1234, 8, 9 };
+ for (int i = 0; i < 8+9; i++) {
+ xid.data[i] = i;
+ }
+ r = txn->xa_prepare(txn, &xid, 0);
+ CKERR(r);
+
+ // discard the txn so that we can close the env and run xa recovery later
+ r = txn->discard(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, TOKUFT_DIRTY_SHUTDOWN);
+ CKERR(r);
+}
+
+static void run_xa_recovery(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // get prepared xid
+ long count;
+ TOKU_XA_XID xid;
+ r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST);
+ CKERR(r);
+
+ // commit it
+ DB_TXN *txn = nullptr;
+ r = env->get_txn_from_xid(env, &xid, &txn);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ check_foo(env);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ create_prepared_txn();
+ run_xa_recovery();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/xa-dirty-rollback.cc b/storage/tokudb/PerconaFT/src/tests/xa-dirty-rollback.cc
new file mode 100644
index 00000000..e23dcb50
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/xa-dirty-rollback.cc
@@ -0,0 +1,141 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Verify that an abort of a prepared txn in recovery deletes a db created by it.
+// The rollback file is dirty when the environment is closed.
+
+static void create_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void check_foo(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR2(r, ENOENT);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void create_prepared_txn(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ create_foo(env, txn);
+
+ TOKU_XA_XID xid = { 0x1234, 8, 9 };
+ for (int i = 0; i < 8+9; i++) {
+ xid.data[i] = i;
+ }
+ r = txn->xa_prepare(txn, &xid, 0);
+ CKERR(r);
+
+ // discard the txn so that we can close the env and run xa recovery later
+ r = txn->discard(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, TOKUFT_DIRTY_SHUTDOWN);
+ CKERR(r);
+}
+
+static void run_xa_recovery(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // get prepared xid
+ long count;
+ TOKU_XA_XID xid;
+ r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST);
+ CKERR(r);
+
+ // abort it
+ DB_TXN *txn = nullptr;
+ r = env->get_txn_from_xid(env, &xid, &txn);
+ CKERR(r);
+ r = txn->abort(txn);
+ CKERR(r);
+
+ check_foo(env);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ create_prepared_txn();
+ run_xa_recovery();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/xa-txn-discard-abort.cc b/storage/tokudb/PerconaFT/src/tests/xa-txn-discard-abort.cc
new file mode 100644
index 00000000..3496ef49
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/xa-txn-discard-abort.cc
@@ -0,0 +1,143 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Verify that an abort of a prepared txn in recovery removes a db created by it.
+// A checkpoint is taken between the db creation and the txn prepare.
+
+static void create_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void check_foo(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR2(r, ENOENT);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void create_prepared_txn(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ create_foo(env, txn);
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ TOKU_XA_XID xid = { 0x1234, 8, 9 };
+ for (int i = 0; i < 8+9; i++) {
+ xid.data[i] = i;
+ }
+ r = txn->xa_prepare(txn, &xid, 0);
+ CKERR(r);
+
+ // discard the txn so that we can close the env and run xa recovery later
+ r = txn->discard(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, TOKUFT_DIRTY_SHUTDOWN);
+ CKERR(r);
+}
+
+static void run_xa_recovery(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // get prepared xid
+ long count;
+ TOKU_XA_XID xid;
+ r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST);
+ CKERR(r);
+
+ // abort it
+ DB_TXN *txn = nullptr;
+ r = env->get_txn_from_xid(env, &xid, &txn);
+ CKERR(r);
+ r = txn->abort(txn);
+ CKERR(r);
+
+ check_foo(env);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ create_prepared_txn();
+ run_xa_recovery();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/xa-txn-discard-commit.cc b/storage/tokudb/PerconaFT/src/tests/xa-txn-discard-commit.cc
new file mode 100644
index 00000000..1d0f63c3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/xa-txn-discard-commit.cc
@@ -0,0 +1,144 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+// Verify that a commit of a prepared txn in recovery retains a db created by it.
+// A checkpoint is taken between the db creation and the txn prepare.
+
+static void create_foo(DB_ENV *env, DB_TXN *txn) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void check_foo(DB_ENV *env) {
+ int r;
+ DB *db;
+ r = db_create(&db, env, 0);
+ CKERR(r);
+ r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0);
+ CKERR(r);
+ r = db->close(db, 0);
+ CKERR(r);
+}
+
+static void create_prepared_txn(void) {
+ int r;
+
+ DB_ENV *env = nullptr;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ DB_TXN *txn = nullptr;
+ r = env->txn_begin(env, nullptr, &txn, 0);
+ CKERR(r);
+
+ create_foo(env, txn);
+
+ r = env->txn_checkpoint(env, 0, 0, 0);
+ CKERR(r);
+
+ TOKU_XA_XID xid = { 0x1234, 8, 9 };
+ for (int i = 0; i < 8+9; i++) {
+ xid.data[i] = i;
+ }
+ r = txn->xa_prepare(txn, &xid, 0);
+ CKERR(r);
+
+ // discard the txn so that we can close the env and run xa recovery later
+ r = txn->discard(txn, 0);
+ CKERR(r);
+
+ r = env->close(env, TOKUFT_DIRTY_SHUTDOWN);
+ CKERR(r);
+}
+
+static void run_xa_recovery(void) {
+ int r;
+
+ DB_ENV *env;
+ r = db_env_create(&env, 0);
+ CKERR(r);
+ r = env->open(env, TOKU_TEST_FILENAME,
+ DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER,
+ S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // get prepared xid
+ long count;
+ TOKU_XA_XID xid;
+ r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST);
+ CKERR(r);
+
+ // commit it
+ DB_TXN *txn = nullptr;
+ r = env->get_txn_from_xid(env, &xid, &txn);
+ CKERR(r);
+ r = txn->commit(txn, 0);
+ CKERR(r);
+
+ check_foo(env);
+
+ r = env->close(env, 0);
+ CKERR(r);
+}
+
+int test_main (int argc, char *const argv[]) {
+ default_parse_args(argc, argv);
+
+ // init the env directory
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO);
+ CKERR(r);
+
+ // run the test
+ create_prepared_txn();
+ run_xa_recovery();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/tests/zombie_db.cc b/storage/tokudb/PerconaFT/src/tests/zombie_db.cc
new file mode 100644
index 00000000..12534887
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/tests/zombie_db.cc
@@ -0,0 +1,158 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Purpose of this test is to verify correct behavior of
+ * zombie dbs.
+ *
+ * A db is destroyed when it is closed by the user and there are no txns using it.
+ * If a transaction creates a db and then closes, that leaves an open db with
+ * no transaction associated with it. If another transaction then uses the db,
+ * and then closes it, then that leaves a zombie db. The db is closed, but
+ * cannot be destroyed because there is still a transaction associated with it
+ * (not the transaction that created it).
+ *
+ * Outline of this test:
+ *
+ * begin txn_a
+ * create db for new dictionary "foo"
+ * commit txn_a
+ * => leaves open db with no txn
+ * (releases range lock on "foo" dname in directory)
+ *
+ * begin txn_b
+ * insert into db
+ * close db
+ * => leaves zombie db, held open by txn_b
+ *
+ *
+ * create txn_c
+ *
+ * test1:
+ * try to delete dictionary (env->dbremove(foo))
+ * should return DB_LOCK_NOT_GRANTED because txnB is holding range lock on some part of
+ * the dictionary ("foo") referred to by db
+ *
+ * test2:
+ * try to rename dictionary (env->dbrename(foo->bar))
+ * should return DB_LOCK_NOT_GRANTED because txnB is holding range lock on some part of
+ * the dictionary ("foo") referred to by db
+ *
+ */
+
+#include "test.h"
+#include <db.h>
+
+static DB_ENV *env;
+static DB * db;
+
+static void
+setup (void) {
+ int r;
+ toku_os_recursive_delete(TOKU_TEST_FILENAME);
+ r=toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+
+ r=db_env_create(&env, 0); CKERR(r);
+ env->set_errfile(env, stderr);
+ r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
+}
+
+
+
+static void
+test_shutdown(void) {
+ int r;
+ r=env->close(env, 0); CKERR(r);
+}
+
+static void
+test_zombie_db(void) {
+ int r;
+ DBT key, val;
+ DB_TXN * txn_b;
+
+ r=env->txn_begin(env, 0, &txn_b, 0); CKERR(r);
+
+ {
+ DB_TXN * txn_a;
+ dbt_init(&key, "key1", 4);
+ dbt_init(&val, "val1", 4);
+
+ r=env->txn_begin(env, 0, &txn_a, 0); CKERR(r);
+ r=db_create(&db, env, 0); CKERR(r);
+ r=db->open(db, txn_a, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU|S_IRWXG|S_IRWXO); CKERR(r);
+ r=db->put(db, txn_a, &key, &val, 0); CKERR(r);
+ r=txn_a->commit(txn_a, 0); CKERR(r);
+ }
+
+ // db is now open with no associated txn
+
+ {
+ dbt_init(&key, "key2", 4);
+ dbt_init(&val, "val2", 4);
+
+ r = db->put(db, txn_b, &key, &val, 0); CKERR(r);
+ r=db->close(db, 0); CKERR(r);
+ }
+
+ // db is now closed, but cannot be destroyed until txn_b closes
+ // db is now a zombie
+
+ {
+ DB_TXN * txn_c;
+
+ r=env->txn_begin(env, 0, &txn_c, 0); CKERR(r);
+ r = env->dbremove(env, txn_c, "foo.db", NULL, 0);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r = env->dbrename(env, txn_c, "foo.db", NULL, "bar.db", 0);
+ CKERR2(r, DB_LOCK_NOTGRANTED);
+ r=txn_c->commit(txn_c, 0); CKERR(r);
+ }
+
+ r=txn_b->commit(txn_b, 0); CKERR(r);
+
+ // db should now be destroyed
+}
+
+int
+test_main (int argc, char *const argv[]) {
+ parse_args(argc, argv);
+ setup();
+ test_zombie_db();
+ test_shutdown();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/src/toku_patent.cc b/storage/tokudb/PerconaFT/src/toku_patent.cc
new file mode 100644
index 00000000..c2b01c64
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/toku_patent.cc
@@ -0,0 +1,66 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+const char *toku_patent_string = "\
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.\n\
+\n\
+ PerconaFT is free software: you can redistribute it and/or modify\n\
+ it under the terms of the GNU General Public License, version 2,\n\
+ as published by the Free Software Foundation.\n\
+\n\
+ PerconaFT is distributed in the hope that it will be useful,\n\
+ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
+ GNU General Public License for more details.\n\
+\n\
+ You should have received a copy of the GNU General Public License\n\
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.\n\
+\n\
+----------------------------------------\n\
+\n\
+ PerconaFT is free software: you can redistribute it and/or modify\n\
+ it under the terms of the GNU Affero General Public License, version 3,\n\
+ as published by the Free Software Foundation.\n\
+\n\
+ PerconaFT is distributed in the hope that it will be useful,\n\
+ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
+ GNU Affero General Public License for more details.\n\
+\n\
+ You should have received a copy of the GNU Affero General Public License\n\
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.\n\\";
diff --git a/storage/tokudb/PerconaFT/src/ydb-internal.h b/storage/tokudb/PerconaFT/src/ydb-internal.h
new file mode 100644
index 00000000..db204109
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb-internal.h
@@ -0,0 +1,283 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+#include <limits.h>
+
+#include <ft/cachetable/cachetable.h>
+#include <ft/cursor.h>
+#include <ft/comparator.h>
+#include <ft/logger/logger.h>
+#include <ft/txn/txn.h>
+
+#include <util/growable_array.h>
+#include <util/minicron.h>
+#include <util/omt.h>
+
+#include <locktree/locktree.h>
+#include <locktree/range_buffer.h>
+
+#include <toku_list.h>
+
+struct __toku_db_internal {
+ int opened;
+ uint32_t open_flags;
+ int open_mode;
+ FT_HANDLE ft_handle;
+ DICTIONARY_ID dict_id; // unique identifier used by locktree logic
+ toku::locktree *lt;
+ struct simple_dbt skey, sval; // static key and value
+ bool key_compare_was_set; // true if a comparison function was provided before call to db->open() (if false, use environment's comparison function).
+ char *dname; // dname is constant for this handle (handle must be closed before file is renamed)
+ DB_INDEXER *indexer;
+};
+
+int toku_db_set_indexer(DB *db, DB_INDEXER *indexer);
+DB_INDEXER *toku_db_get_indexer(DB *db);
+
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1
+typedef void (*toku_env_errcall_t)(const char *, char *);
+#elif DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+typedef void (*toku_env_errcall_t)(const DB_ENV *, const char *, const char *);
+#else
+#error
+#endif
+
+struct __toku_db_env_internal {
+ int is_panicked; // if nonzero, then its an error number
+ char *panic_string;
+ uint32_t open_flags;
+ int open_mode;
+ toku_env_errcall_t errcall;
+ void *errfile;
+ const char *errpfx;
+ char *dir; /* A malloc'd copy of the directory. */
+ char *tmp_dir;
+ char *lg_dir;
+ char *data_dir;
+ int (*bt_compare) (DB *, const DBT *, const DBT *);
+ int (*update_function)(DB *, const DBT *key, const DBT *old_val, const DBT *extra, void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra);
+ generate_row_for_put_func generate_row_for_put;
+ generate_row_for_del_func generate_row_for_del;
+
+ unsigned long cachetable_size;
+ unsigned long client_pool_threads;
+ unsigned long cachetable_pool_threads;
+ unsigned long checkpoint_pool_threads;
+ CACHETABLE cachetable;
+ TOKULOGGER logger;
+ toku::locktree_manager ltm;
+ lock_timeout_callback lock_wait_timeout_callback; // Called when a lock request times out waiting for a lock.
+ lock_wait_callback lock_wait_needed_callback; // Called when a lock request requires a wait.
+
+ DB *directory; // Maps dnames to inames
+ DB *persistent_environment; // Stores environment settings, can be used for upgrade
+ toku::omt<DB *> *open_dbs_by_dname; // Stores open db handles, sorted first by dname and then by numerical value of pointer to the db (arbitrarily assigned memory location)
+ toku::omt<DB *> *open_dbs_by_dict_id; // Stores open db handles, sorted by dictionary id and then by numerical value of pointer to the db (arbitrarily assigned memory location)
+ toku_pthread_rwlock_t open_dbs_rwlock; // rwlock that protects the OMT of open dbs.
+
+ char *real_data_dir; // data dir used when the env is opened (relative to cwd, or absolute with leading /)
+ char *real_log_dir; // log dir used when the env is opened (relative to cwd, or absolute with leading /)
+ char *real_tmp_dir; // tmp dir used for temporary files (relative to cwd, or absolute with leading /)
+
+ fs_redzone_state fs_state;
+ uint64_t fs_seq; // how many times has fs_poller run?
+ uint64_t last_seq_entered_red;
+ uint64_t last_seq_entered_yellow;
+ int redzone; // percent of total fs space that marks boundary between yellow and red zones
+ int enospc_redzone_ctr; // number of operations rejected by enospc prevention (red zone)
+ int fs_poll_time; // Time in seconds between statfs calls
+ struct minicron fs_poller; // Poll the file systems
+ bool fs_poller_is_init;
+ uint32_t fsync_log_period_ms;
+ bool fsync_log_cron_is_init;
+ struct minicron fsync_log_cron; // fsync recovery log
+ int envdir_lockfd;
+ int datadir_lockfd;
+ int logdir_lockfd;
+ int tmpdir_lockfd;
+ bool check_thp; // if set check if transparent huge pages are disabled
+ bool dir_per_db;
+ uint64_t (*get_loader_memory_size_callback)(void);
+ uint64_t default_lock_timeout_msec;
+ uint64_t (*get_lock_timeout_callback)(uint64_t default_lock_timeout_msec);
+ uint64_t default_killed_time_msec;
+ uint64_t (*get_killed_time_callback)(uint64_t default_killed_time_msec);
+ int (*killed_callback)(void);
+};
+
+// test-only environment function for running lock escalation
+static inline void toku_env_run_lock_escalation_for_test(DB_ENV *env) {
+ toku::locktree_manager *mgr = &env->i->ltm;
+ mgr->run_escalation_for_test();
+}
+
+// Common error handling macros and panic detection
+#define MAYBE_RETURN_ERROR(cond, status) if (cond) return status;
+#define HANDLE_PANICKED_ENV(env) if (toku_env_is_panicked(env)) { sleep(1); return EINVAL; }
+#define HANDLE_PANICKED_DB(db) HANDLE_PANICKED_ENV(db->dbenv)
+
+// Only commit/abort/prelock (which are used by handlerton) are allowed when a child exists.
+#define HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn) \
+ MAYBE_RETURN_ERROR(((txn) && db_txn_struct_i(txn)->child), \
+ toku_ydb_do_error((env), \
+ EINVAL, \
+ "%s: Transaction cannot do work when child exists\n", __FUNCTION__))
+
+#define HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn) \
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN((db)->dbenv, txn)
+
+#define HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c) \
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN((c)->dbp, dbc_struct_i(c)->txn)
+
+// Bail out if we get unknown flags
+#define HANDLE_EXTRA_FLAGS(env, flags_to_function, allowed_flags) \
+ MAYBE_RETURN_ERROR((env) && ((flags_to_function) & ~(allowed_flags)), \
+ toku_ydb_do_error((env), \
+ EINVAL, \
+ "Unknown flags (%" PRIu32 ") in " __FILE__ ":%s(): %d\n", (flags_to_function) & ~(allowed_flags), __FUNCTION__, __LINE__))
+
+int toku_ydb_check_avail_fs_space(DB_ENV *env);
+
+void toku_ydb_error_all_cases(const DB_ENV * env,
+ int error,
+ bool include_stderrstring,
+ bool use_stderr_if_nothing_else,
+ const char *fmt, va_list ap)
+ __attribute__((format (printf, 5, 0)))
+ __attribute__((__visibility__("default"))); // this is needed by the C++ interface.
+
+int toku_ydb_do_error (const DB_ENV *dbenv, int error, const char *string, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+
+/* Environment related errors */
+int toku_env_is_panicked(DB_ENV *dbenv);
+void toku_env_err(const DB_ENV * env, int error, const char *fmt, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+
+typedef enum __toku_isolation_level {
+ TOKU_ISO_SERIALIZABLE=0,
+ TOKU_ISO_SNAPSHOT=1,
+ TOKU_ISO_READ_COMMITTED=2,
+ TOKU_ISO_READ_UNCOMMITTED=3,
+ TOKU_ISO_READ_COMMITTED_ALWAYS=4
+} TOKU_ISOLATION;
+
+// needed in ydb_db.c
+#define DB_ISOLATION_FLAGS (DB_READ_COMMITTED | DB_READ_COMMITTED_ALWAYS | DB_READ_UNCOMMITTED | DB_TXN_SNAPSHOT | DB_SERIALIZABLE | DB_INHERIT_ISOLATION)
+
+struct txn_lock_range {
+ DBT left;
+ DBT right;
+};
+
+struct txn_lt_key_ranges {
+ toku::locktree *lt;
+ toku::range_buffer *buffer;
+};
+
+struct __toku_db_txn_internal {
+ struct tokutxn *tokutxn;
+ uint32_t flags;
+ TOKU_ISOLATION iso;
+ DB_TXN *child;
+ toku_mutex_t txn_mutex;
+
+ // maps a locktree to a buffer of key ranges that are locked.
+ // it is protected by the txn_mutex, so hot indexing and a client
+ // thread can concurrently operate on this txn.
+ toku::omt<txn_lt_key_ranges> lt_map;
+};
+
+struct __toku_db_txn_external {
+ struct __toku_db_txn external_part;
+ struct __toku_db_txn_internal internal_part;
+};
+#define db_txn_struct_i(x) (&((struct __toku_db_txn_external *)x)->internal_part)
+
+struct __toku_dbc_internal {
+ struct ft_cursor ftcursor;
+ DB_TXN *txn;
+ TOKU_ISOLATION iso;
+ struct simple_dbt skey_s,sval_s;
+ struct simple_dbt *skey,*sval;
+
+ // if the rmw flag is asserted, cursor operations (like set) grab write
+ // locks instead of read locks
+ // the rmw flag is set when the cursor is created with the DB_RMW flag set
+ bool rmw;
+ bool locking_read;
+};
+
+static_assert(
+ sizeof(__toku_dbc_internal) <= sizeof(((DBC *)nullptr)->_internal),
+ "__toku_dbc_internal doesn't fit in the internal portion of a DBC");
+
+static inline __toku_dbc_internal *dbc_struct_i(DBC *c) {
+ union dbc_union {
+ __toku_dbc_internal *dbc_internal;
+ char *buf;
+ } u;
+ u.buf = c->_internal;
+ return u.dbc_internal;
+}
+
+static inline struct ft_cursor *dbc_ftcursor(DBC *c) {
+ return &dbc_struct_i(c)->ftcursor;
+}
+
+static inline int
+env_opened(DB_ENV *env) {
+ return env->i->cachetable != 0;
+}
+
+static inline bool
+txn_is_read_only(DB_TXN* txn) {
+ if (txn && (db_txn_struct_i(txn)->flags & DB_TXN_READ_ONLY)) {
+ return true;
+ }
+ return false;
+}
+
+#define HANDLE_READ_ONLY_TXN(txn) if(txn_is_read_only(txn)) return EINVAL;
+
+void env_panic(DB_ENV * env, int cause, const char * msg);
+void env_note_db_opened(DB_ENV *env, DB *db);
+void env_note_db_closed(DB_ENV *env, DB *db);
diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc
new file mode 100644
index 00000000..1378c05b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb.cc
@@ -0,0 +1,3510 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+extern const char *toku_patent_string;
+const char *toku_copyright_string = "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.";
+
+#include <my_global.h>
+
+extern int writing_rollback;
+
+#include <db.h>
+#include <errno.h>
+#include <string.h>
+
+#include "portability/memory.h"
+#include "portability/toku_assert.h"
+#include "portability/toku_portability.h"
+#include "portability/toku_pthread.h"
+#include "portability/toku_stdlib.h"
+
+#include "ft/ft-flusher.h"
+#include "ft/cachetable/cachetable.h"
+#include "ft/cachetable/checkpoint.h"
+#include "ft/logger/log.h"
+#include "ft/loader/loader.h"
+#include "ft/log_header.h"
+#include "ft/ft.h"
+#include "ft/txn/txn_manager.h"
+#include "src/ydb.h"
+#include "src/ydb-internal.h"
+#include "src/ydb_cursor.h"
+#include "src/ydb_row_lock.h"
+#include "src/ydb_env_func.h"
+#include "src/ydb_db.h"
+#include "src/ydb_write.h"
+#include "src/ydb_txn.h"
+#include "src/loader.h"
+#include "src/indexer.h"
+#include "util/status.h"
+#include "util/context.h"
+
+#include <functional>
+
+// Include ydb_lib.cc here so that its constructor/destructor gets put into
+// ydb.o, to make sure they don't get erased at link time (when linking to
+// a static libtokufractaltree.a that was compiled with gcc). See #5094.
+#include "ydb_lib.cc"
+
+#ifdef TOKUTRACE
+ #define DB_ENV_CREATE_FUN db_env_create_toku10
+ #define DB_CREATE_FUN db_create_toku10
+#else
+ #define DB_ENV_CREATE_FUN db_env_create
+ #define DB_CREATE_FUN db_create
+ int toku_set_trace_file (const char *fname __attribute__((__unused__))) { return 0; }
+ int toku_close_trace_file (void) { return 0; }
+#endif
+
+extern uint force_recovery;
+
+// Set when env is panicked, never cleared.
+static int env_is_panicked = 0;
+
+void
+env_panic(DB_ENV * env, int cause, const char * msg) {
+ if (cause == 0)
+ cause = -1; // if unknown cause, at least guarantee panic
+ if (msg == NULL)
+ msg = "Unknown cause in env_panic\n";
+ env_is_panicked = cause;
+ env->i->is_panicked = cause;
+ env->i->panic_string = toku_strdup(msg);
+}
+
+static int env_get_engine_status_num_rows (DB_ENV * UU(env), uint64_t * num_rowsp);
+
+/********************************************************************************
+ * Status is intended for display to humans to help understand system behavior.
+ * It does not need to be perfectly thread-safe.
+ */
+
+typedef enum {
+ YDB_LAYER_TIME_CREATION = 0, /* timestamp of environment creation, read from persistent environment */
+ YDB_LAYER_TIME_STARTUP, /* timestamp of system startup */
+ YDB_LAYER_TIME_NOW, /* timestamp of engine status query */
+ YDB_LAYER_NUM_DB_OPEN,
+ YDB_LAYER_NUM_DB_CLOSE,
+ YDB_LAYER_NUM_OPEN_DBS,
+ YDB_LAYER_MAX_OPEN_DBS,
+ YDB_LAYER_FSYNC_LOG_PERIOD,
+#if 0
+ YDB_LAYER_ORIGINAL_ENV_VERSION, /* version of original environment, read from persistent environment */
+ YDB_LAYER_STARTUP_ENV_VERSION, /* version of environment at this startup, read from persistent environment (curr_env_ver_key) */
+ YDB_LAYER_LAST_LSN_OF_V13, /* read from persistent environment */
+ YDB_LAYER_UPGRADE_V14_TIME, /* timestamp of upgrade to version 14, read from persistent environment */
+ YDB_LAYER_UPGRADE_V14_FOOTPRINT, /* footprint of upgrade to version 14, read from persistent environment */
+#endif
+ YDB_LAYER_STATUS_NUM_ROWS /* number of rows in this status array */
+} ydb_layer_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[YDB_LAYER_STATUS_NUM_ROWS];
+} YDB_LAYER_STATUS_S, *YDB_LAYER_STATUS;
+
+static YDB_LAYER_STATUS_S ydb_layer_status;
+#define STATUS_VALUE(x) ydb_layer_status.status[x].value.num
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_layer_status, k, c, t, l, inc)
+
+static void
+ydb_layer_status_init (void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+
+ STATUS_INIT(YDB_LAYER_TIME_CREATION, nullptr, UNIXTIME, "time of environment creation", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_TIME_STARTUP, nullptr, UNIXTIME, "time of engine startup", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_TIME_NOW, nullptr, UNIXTIME, "time now", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_DB_OPEN, DB_OPENS, UINT64, "db opens", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_DB_CLOSE, DB_CLOSES, UINT64, "db closes", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_OPEN_DBS, DB_OPEN_CURRENT, UINT64, "num open dbs now", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ STATUS_INIT(YDB_LAYER_MAX_OPEN_DBS, DB_OPEN_MAX, UINT64, "max open dbs", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ STATUS_INIT(YDB_LAYER_FSYNC_LOG_PERIOD, nullptr, UINT64, "period, in ms, that recovery log is automatically fsynced", TOKU_ENGINE_STATUS);
+
+ STATUS_VALUE(YDB_LAYER_TIME_STARTUP) = time(NULL);
+ ydb_layer_status.initialized = true;
+}
+#undef STATUS_INIT
+
+static void
+ydb_layer_get_status(DB_ENV* env, YDB_LAYER_STATUS statp) {
+ STATUS_VALUE(YDB_LAYER_TIME_NOW) = time(NULL);
+ STATUS_VALUE(YDB_LAYER_FSYNC_LOG_PERIOD) = toku_minicron_get_period_in_ms_unlocked(&env->i->fsync_log_cron);
+ *statp = ydb_layer_status;
+}
+
+/********************************************************************************
+ * End of ydb_layer local status section.
+ */
+
+static DB_ENV * volatile most_recent_env; // most recently opened env, used for engine status on crash. Note there are likely to be races on this if you have multiple threads creating and closing environments in parallel. We'll declare it volatile since at least that helps make sure the compiler doesn't optimize away certain code (e.g., if while debugging, you write a code that spins on most_recent_env, you'd like to compiler not to optimize your code away.)
+
+static int env_get_iname(DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt);
+static int toku_maybe_get_engine_status_text (char* buff, int buffsize); // for use by toku_assert
+static int toku_maybe_err_engine_status (void);
+static void toku_maybe_set_env_panic(int code, const char * msg); // for use by toku_assert
+
+int
+toku_ydb_init(void) {
+ int r = 0;
+ //Lower level must be initialized first.
+ r = toku_ft_layer_init();
+ return r;
+}
+
+// Do not clean up resources if env is panicked, just exit ugly
+void
+toku_ydb_destroy(void) {
+ if (!ydb_layer_status.initialized)
+ return;
+ if (env_is_panicked == 0) {
+ toku_ft_layer_destroy();
+ }
+ ydb_layer_status.initialized = false;
+}
+
+static int
+ydb_getf_do_nothing(DBT const* UU(key), DBT const* UU(val), void* UU(extra)) {
+ return 0;
+}
+
+/* env methods */
+
+static void
+env_fs_report_in_yellow(DB_ENV *UU(env)) {
+ char tbuf[26];
+ time_t tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT file system space is low\n", ctime_r(&tnow, tbuf)); fflush(stderr);
+}
+
+static void
+env_fs_report_in_red(DB_ENV *UU(env)) {
+ char tbuf[26];
+ time_t tnow = time(NULL);
+ fprintf(stderr, "%.24s PerconaFT file system space is really low and access is restricted\n", ctime_r(&tnow, tbuf)); fflush(stderr);
+}
+
+static inline uint64_t
+env_fs_redzone(DB_ENV *env, uint64_t total) {
+ return total * env->i->redzone / 100;
+}
+
+#define ZONEREPORTLIMIT 12
+// Check the available space in the file systems used by tokuft and erect barriers when available space gets low.
+static int
+env_fs_poller(void *arg) {
+ if(force_recovery == 6) {
+ return 0;
+ }
+ DB_ENV *env = (DB_ENV *) arg;
+ int r;
+
+ int in_yellow; // set true to issue warning to user
+ int in_red; // set true to prevent certain operations (returning ENOSPC)
+
+ // get the fs sizes for the home dir
+ uint64_t avail_size = 0, total_size = 0;
+ r = toku_get_filesystem_sizes(env->i->dir, &avail_size, NULL, &total_size);
+ assert(r == 0);
+ in_yellow = (avail_size < 2 * env_fs_redzone(env, total_size));
+ in_red = (avail_size < env_fs_redzone(env, total_size));
+
+ // get the fs sizes for the data dir if different than the home dir
+ if (strcmp(env->i->dir, env->i->real_data_dir) != 0) {
+ r = toku_get_filesystem_sizes(env->i->real_data_dir, &avail_size, NULL, &total_size);
+ assert(r == 0);
+ in_yellow += (avail_size < 2 * env_fs_redzone(env, total_size));
+ in_red += (avail_size < env_fs_redzone(env, total_size));
+ }
+
+ // get the fs sizes for the log dir if different than the home dir and data dir
+ if (strcmp(env->i->dir, env->i->real_log_dir) != 0 && strcmp(env->i->real_data_dir, env->i->real_log_dir) != 0) {
+ r = toku_get_filesystem_sizes(env->i->real_log_dir, &avail_size, NULL, &total_size);
+ assert(r == 0);
+ in_yellow += (avail_size < 2 * env_fs_redzone(env, total_size));
+ in_red += (avail_size < env_fs_redzone(env, total_size));
+ }
+
+ env->i->fs_seq++; // how many times through this polling loop?
+ uint64_t now = env->i->fs_seq;
+
+ // Don't issue report if we have not been out of this fs_state for a while, unless we're at system startup
+ switch (env->i->fs_state) {
+ case FS_RED:
+ if (!in_red) {
+ if (in_yellow) {
+ env->i->fs_state = FS_YELLOW;
+ } else {
+ env->i->fs_state = FS_GREEN;
+ }
+ }
+ break;
+ case FS_YELLOW:
+ if (in_red) {
+ if ((now - env->i->last_seq_entered_red > ZONEREPORTLIMIT) || (now < ZONEREPORTLIMIT))
+ env_fs_report_in_red(env);
+ env->i->fs_state = FS_RED;
+ env->i->last_seq_entered_red = now;
+ } else if (!in_yellow) {
+ env->i->fs_state = FS_GREEN;
+ }
+ break;
+ case FS_GREEN:
+ if (in_red) {
+ if ((now - env->i->last_seq_entered_red > ZONEREPORTLIMIT) || (now < ZONEREPORTLIMIT))
+ env_fs_report_in_red(env);
+ env->i->fs_state = FS_RED;
+ env->i->last_seq_entered_red = now;
+ } else if (in_yellow) {
+ if ((now - env->i->last_seq_entered_yellow > ZONEREPORTLIMIT) || (now < ZONEREPORTLIMIT))
+ env_fs_report_in_yellow(env);
+ env->i->fs_state = FS_YELLOW;
+ env->i->last_seq_entered_yellow = now;
+ }
+ break;
+ default:
+ assert(0);
+ }
+ return 0;
+}
+#undef ZONEREPORTLIMIT
+
+static void
+env_fs_init(DB_ENV *env) {
+ env->i->fs_state = FS_GREEN;
+ env->i->fs_poll_time = 5; // seconds
+ env->i->redzone = 5; // percent of total space
+ env->i->fs_poller_is_init = false;
+}
+
+// Initialize the minicron that polls file system space
+static int
+env_fs_init_minicron(DB_ENV *env) {
+ if(force_recovery == 6) {
+ return 0;
+ }
+ int r = toku_minicron_setup(&env->i->fs_poller, env->i->fs_poll_time*1000, env_fs_poller, env);
+ if (r == 0)
+ env->i->fs_poller_is_init = true;
+ return r;
+}
+
+// Destroy the file system space minicron
+static void
+env_fs_destroy(DB_ENV *env) {
+ if (env->i->fs_poller_is_init) {
+ int r = toku_minicron_shutdown(&env->i->fs_poller);
+ assert(r == 0);
+ env->i->fs_poller_is_init = false;
+ }
+}
+
+static int
+env_fsync_log_on_minicron(void *arg) {
+ DB_ENV *env = (DB_ENV *) arg;
+ int r = env->log_flush(env, 0);
+ assert(r == 0);
+ return 0;
+}
+
+static void
+env_fsync_log_init(DB_ENV *env) {
+ env->i->fsync_log_period_ms = 0;
+ env->i->fsync_log_cron_is_init = false;
+}
+
+static void UU()
+env_change_fsync_log_period(DB_ENV* env, uint32_t period_ms) {
+ env->i->fsync_log_period_ms = period_ms;
+ if (env->i->fsync_log_cron_is_init) {
+ toku_minicron_change_period(&env->i->fsync_log_cron, period_ms);
+ }
+}
+
+static int
+env_fsync_log_cron_init(DB_ENV *env) {
+ int r = toku_minicron_setup(&env->i->fsync_log_cron, env->i->fsync_log_period_ms, env_fsync_log_on_minicron, env);
+ if (r == 0)
+ env->i->fsync_log_cron_is_init = true;
+ return r;
+}
+
+static void
+env_fsync_log_cron_destroy(DB_ENV *env) {
+ if (env->i->fsync_log_cron_is_init) {
+ int r = toku_minicron_shutdown(&env->i->fsync_log_cron);
+ assert(r == 0);
+ env->i->fsync_log_cron_is_init = false;
+ }
+}
+
+static void
+env_setup_real_dir(DB_ENV *env, char **real_dir, const char *nominal_dir) {
+ toku_free(*real_dir);
+ *real_dir = NULL;
+
+ assert(env->i->dir);
+ if (nominal_dir)
+ *real_dir = toku_construct_full_name(2, env->i->dir, nominal_dir);
+ else
+ *real_dir = toku_strdup(env->i->dir);
+}
+
+static void
+env_setup_real_data_dir(DB_ENV *env) {
+ env_setup_real_dir(env, &env->i->real_data_dir, env->i->data_dir);
+}
+
+static void
+env_setup_real_log_dir(DB_ENV *env) {
+ env_setup_real_dir(env, &env->i->real_log_dir, env->i->lg_dir);
+}
+
+static void
+env_setup_real_tmp_dir(DB_ENV *env) {
+ env_setup_real_dir(env, &env->i->real_tmp_dir, env->i->tmp_dir);
+}
+
+static void keep_cachetable_callback (DB_ENV *env, CACHETABLE cachetable)
+{
+ env->i->cachetable = cachetable;
+}
+
+static int
+ydb_do_recovery (DB_ENV *env) {
+ assert(env->i->real_log_dir);
+ int r = tokuft_recover(env,
+ toku_keep_prepared_txn_callback,
+ keep_cachetable_callback,
+ env->i->logger,
+ env->i->dir, env->i->real_log_dir, env->i->bt_compare,
+ env->i->update_function,
+ env->i->generate_row_for_put, env->i->generate_row_for_del,
+ env->i->cachetable_size);
+ return r;
+}
+
+static int
+needs_recovery (DB_ENV *env) {
+ assert(env->i->real_log_dir);
+ int recovery_needed = tokuft_needs_recovery(env->i->real_log_dir, true);
+ return recovery_needed ? DB_RUNRECOVERY : 0;
+}
+
+static int toku_env_txn_checkpoint(DB_ENV * env, uint32_t kbyte, uint32_t min, uint32_t flags);
+
+// Keys used in persistent environment dictionary:
+// Following keys added in version 12
+static const char * orig_env_ver_key = "original_version";
+static const char * curr_env_ver_key = "current_version";
+// Following keys added in version 14, add more keys for future versions
+static const char * creation_time_key = "creation_time";
+
+static char * get_upgrade_time_key(int version) {
+ static char upgrade_time_key[sizeof("upgrade_v_time") + 12];
+ {
+ int n;
+ n = snprintf(upgrade_time_key, sizeof(upgrade_time_key), "upgrade_v%d_time", version);
+ assert(n >= 0 && n < (int)sizeof(upgrade_time_key));
+ }
+ return &upgrade_time_key[0];
+}
+
+static char * get_upgrade_footprint_key(int version) {
+ static char upgrade_footprint_key[sizeof("upgrade_v_footprint") + 12];
+ {
+ int n;
+ n = snprintf(upgrade_footprint_key, sizeof(upgrade_footprint_key), "upgrade_v%d_footprint", version);
+ assert(n >= 0 && n < (int)sizeof(upgrade_footprint_key));
+ }
+ return &upgrade_footprint_key[0];
+}
+
+static char * get_upgrade_last_lsn_key(int version) {
+ static char upgrade_last_lsn_key[sizeof("upgrade_v_last_lsn") + 12];
+ {
+ int n;
+ n = snprintf(upgrade_last_lsn_key, sizeof(upgrade_last_lsn_key), "upgrade_v%d_last_lsn", version);
+ assert(n >= 0 && n < (int)sizeof(upgrade_last_lsn_key));
+ }
+ return &upgrade_last_lsn_key[0];
+}
+
+// Values read from (or written into) persistent environment,
+// kept here for read-only access from engine status.
+// Note, persistent_upgrade_status info is separate in part to simplify its exclusion from engine status until relevant.
+typedef enum {
+ PERSISTENT_UPGRADE_ORIGINAL_ENV_VERSION = 0,
+ PERSISTENT_UPGRADE_STORED_ENV_VERSION_AT_STARTUP, // read from curr_env_ver_key, prev version as of this startup
+ PERSISTENT_UPGRADE_LAST_LSN_OF_V13,
+ PERSISTENT_UPGRADE_V14_TIME,
+ PERSISTENT_UPGRADE_V14_FOOTPRINT,
+ PERSISTENT_UPGRADE_STATUS_NUM_ROWS
+} persistent_upgrade_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[PERSISTENT_UPGRADE_STATUS_NUM_ROWS];
+} PERSISTENT_UPGRADE_STATUS_S, *PERSISTENT_UPGRADE_STATUS;
+
+static PERSISTENT_UPGRADE_STATUS_S persistent_upgrade_status;
+
+#define PERSISTENT_UPGRADE_STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(persistent_upgrade_status, k, c, t, "upgrade: " l, inc)
+
+static void
+persistent_upgrade_status_init (void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+
+ PERSISTENT_UPGRADE_STATUS_INIT(PERSISTENT_UPGRADE_ORIGINAL_ENV_VERSION, nullptr, UINT64, "original version (at time of environment creation)", TOKU_ENGINE_STATUS);
+ PERSISTENT_UPGRADE_STATUS_INIT(PERSISTENT_UPGRADE_STORED_ENV_VERSION_AT_STARTUP, nullptr, UINT64, "version at time of startup", TOKU_ENGINE_STATUS);
+ PERSISTENT_UPGRADE_STATUS_INIT(PERSISTENT_UPGRADE_LAST_LSN_OF_V13, nullptr, UINT64, "last LSN of version 13", TOKU_ENGINE_STATUS);
+ PERSISTENT_UPGRADE_STATUS_INIT(PERSISTENT_UPGRADE_V14_TIME, nullptr, UNIXTIME, "time of upgrade to version 14", TOKU_ENGINE_STATUS);
+ PERSISTENT_UPGRADE_STATUS_INIT(PERSISTENT_UPGRADE_V14_FOOTPRINT, nullptr, UINT64, "footprint from version 13 to 14", TOKU_ENGINE_STATUS);
+ persistent_upgrade_status.initialized = true;
+}
+
+#define PERSISTENT_UPGRADE_STATUS_VALUE(x) persistent_upgrade_status.status[x].value.num
+
+// Requires: persistent environment dictionary is already open.
+// Input arg is lsn of clean shutdown of previous version,
+// or ZERO_LSN if no upgrade or if crash between log upgrade and here.
+// NOTE: To maintain compatibility with previous versions, do not change the
+// format of any information stored in the persistent environment dictionary.
+// For example, some values are stored as 32 bits, even though they are immediately
+// converted to 64 bits when read. Do not change them to be stored as 64 bits.
+//
+static int
+maybe_upgrade_persistent_environment_dictionary(DB_ENV * env, DB_TXN * txn, LSN last_lsn_of_clean_shutdown_read_from_log) {
+ int r;
+ DBT key, val;
+ DB *persistent_environment = env->i->persistent_environment;
+
+ if (!persistent_upgrade_status.initialized)
+ persistent_upgrade_status_init();
+
+ toku_fill_dbt(&key, curr_env_ver_key, strlen(curr_env_ver_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert(r == 0);
+ uint32_t stored_env_version = toku_dtoh32(*(uint32_t*)val.data);
+ PERSISTENT_UPGRADE_STATUS_VALUE(PERSISTENT_UPGRADE_STORED_ENV_VERSION_AT_STARTUP) = stored_env_version;
+ if (stored_env_version > FT_LAYOUT_VERSION)
+ r = TOKUDB_DICTIONARY_TOO_NEW;
+ else if (stored_env_version < FT_LAYOUT_MIN_SUPPORTED_VERSION)
+ r = TOKUDB_DICTIONARY_TOO_OLD;
+ else if (stored_env_version < FT_LAYOUT_VERSION) {
+ const uint32_t curr_env_ver_d = toku_htod32(FT_LAYOUT_VERSION);
+ toku_fill_dbt(&key, curr_env_ver_key, strlen(curr_env_ver_key));
+ toku_fill_dbt(&val, &curr_env_ver_d, sizeof(curr_env_ver_d));
+ r = toku_db_put(persistent_environment, txn, &key, &val, 0, false);
+ assert_zero(r);
+
+ time_t upgrade_time_d = toku_htod64(time(NULL));
+ uint64_t upgrade_footprint_d = toku_htod64(toku_log_upgrade_get_footprint());
+ uint64_t upgrade_last_lsn_d = toku_htod64(last_lsn_of_clean_shutdown_read_from_log.lsn);
+ for (int version = stored_env_version+1; version <= FT_LAYOUT_VERSION; version++) {
+ uint32_t put_flag = DB_NOOVERWRITE;
+ if (version <= FT_LAYOUT_VERSION_19) {
+ // See #5902.
+ // To prevent a crash (and any higher complexity code) we'll simply
+ // silently not overwrite anything if it exists.
+ // The keys existing for version <= 19 is not necessarily an error.
+ // If this happens for versions > 19 it IS an error and we'll use DB_NOOVERWRITE.
+ put_flag = DB_NOOVERWRITE_NO_ERROR;
+ }
+
+
+ char* upgrade_time_key = get_upgrade_time_key(version);
+ toku_fill_dbt(&key, upgrade_time_key, strlen(upgrade_time_key));
+ toku_fill_dbt(&val, &upgrade_time_d, sizeof(upgrade_time_d));
+ r = toku_db_put(persistent_environment, txn, &key, &val, put_flag, false);
+ assert_zero(r);
+
+ char* upgrade_footprint_key = get_upgrade_footprint_key(version);
+ toku_fill_dbt(&key, upgrade_footprint_key, strlen(upgrade_footprint_key));
+ toku_fill_dbt(&val, &upgrade_footprint_d, sizeof(upgrade_footprint_d));
+ r = toku_db_put(persistent_environment, txn, &key, &val, put_flag, false);
+ assert_zero(r);
+
+ char* upgrade_last_lsn_key = get_upgrade_last_lsn_key(version);
+ toku_fill_dbt(&key, upgrade_last_lsn_key, strlen(upgrade_last_lsn_key));
+ toku_fill_dbt(&val, &upgrade_last_lsn_d, sizeof(upgrade_last_lsn_d));
+ r = toku_db_put(persistent_environment, txn, &key, &val, put_flag, false);
+ assert_zero(r);
+ }
+
+ }
+ return r;
+}
+
+// Capture contents of persistent_environment dictionary so that it can be read by engine status
+static void
+capture_persistent_env_contents (DB_ENV * env, DB_TXN * txn) {
+ int r;
+ DBT key, val;
+ DB *persistent_environment = env->i->persistent_environment;
+
+ toku_fill_dbt(&key, curr_env_ver_key, strlen(curr_env_ver_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert_zero(r);
+ uint32_t curr_env_version = toku_dtoh32(*(uint32_t*)val.data);
+ assert(curr_env_version == FT_LAYOUT_VERSION);
+
+ toku_fill_dbt(&key, orig_env_ver_key, strlen(orig_env_ver_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert_zero(r);
+ uint64_t persistent_original_env_version = toku_dtoh32(*(uint32_t*)val.data);
+ PERSISTENT_UPGRADE_STATUS_VALUE(PERSISTENT_UPGRADE_ORIGINAL_ENV_VERSION) = persistent_original_env_version;
+ assert(persistent_original_env_version <= curr_env_version);
+
+ // make no assertions about timestamps, clock may have been reset
+ if (persistent_original_env_version >= FT_LAYOUT_VERSION_14) {
+ toku_fill_dbt(&key, creation_time_key, strlen(creation_time_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert_zero(r);
+ STATUS_VALUE(YDB_LAYER_TIME_CREATION) = toku_dtoh64((*(time_t*)val.data));
+ }
+
+ if (persistent_original_env_version != curr_env_version) {
+ // an upgrade was performed at some time, capture info about the upgrade
+
+ char * last_lsn_key = get_upgrade_last_lsn_key(curr_env_version);
+ toku_fill_dbt(&key, last_lsn_key, strlen(last_lsn_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert_zero(r);
+ PERSISTENT_UPGRADE_STATUS_VALUE(PERSISTENT_UPGRADE_LAST_LSN_OF_V13) = toku_dtoh64(*(uint64_t*)val.data);
+
+ char * time_key = get_upgrade_time_key(curr_env_version);
+ toku_fill_dbt(&key, time_key, strlen(time_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert_zero(r);
+ PERSISTENT_UPGRADE_STATUS_VALUE(PERSISTENT_UPGRADE_V14_TIME) = toku_dtoh64(*(time_t*)val.data);
+
+ char * footprint_key = get_upgrade_footprint_key(curr_env_version);
+ toku_fill_dbt(&key, footprint_key, strlen(footprint_key));
+ toku_init_dbt(&val);
+ r = toku_db_get(persistent_environment, txn, &key, &val, 0);
+ assert_zero(r);
+ PERSISTENT_UPGRADE_STATUS_VALUE(PERSISTENT_UPGRADE_V14_FOOTPRINT) = toku_dtoh64(*(uint64_t*)val.data);
+ }
+
+}
+
+// return 0 if log exists or ENOENT if log does not exist
+static int
+ydb_recover_log_exists(DB_ENV *env) {
+ int r = tokuft_recover_log_exists(env->i->real_log_dir);
+ return r;
+}
+
+// Validate that all required files are present, no side effects.
+// Return 0 if all is well, ENOENT if some files are present but at least one is
+// missing,
+// other non-zero value if some other error occurs.
+// Set *valid_newenv if creating a new environment (all files missing).
+// (Note, if special dictionaries exist, then they were created transactionally
+// and log should exist.)
+static int validate_env(DB_ENV *env,
+ bool *valid_newenv,
+ bool need_rollback_cachefile) {
+ int r;
+ bool expect_newenv = false; // set true if we expect to create a new env
+ toku_struct_stat buf;
+ char *path = NULL;
+
+ // Test for persistent environment
+ path = toku_construct_full_name(
+ 2, env->i->dir, toku_product_name_strings.environmentdictionary);
+ assert(path);
+ r = toku_stat(path, &buf, toku_uninstrumented);
+ if (r == 0) {
+ expect_newenv = false; // persistent info exists
+ } else {
+ int stat_errno = get_error_errno();
+ if (stat_errno == ENOENT) {
+ expect_newenv = true;
+ r = 0;
+ } else {
+ r = toku_ydb_do_error(
+ env,
+ stat_errno,
+ "Unable to access persistent environment [%s] in [%s]\n",
+ toku_product_name_strings.environmentdictionary,
+ env->i->dir);
+ assert(r);
+ }
+ }
+ toku_free(path);
+
+ // Test for existence of rollback cachefile if it is expected to exist
+ if (r == 0 && need_rollback_cachefile) {
+ path = toku_construct_full_name(
+ 2, env->i->dir, toku_product_name_strings.rollback_cachefile);
+ assert(path);
+ r = toku_stat(path, &buf, toku_uninstrumented);
+ if (r == 0) {
+ if (expect_newenv) // rollback cachefile exists, but persistent env
+ // is missing
+ r = toku_ydb_do_error(
+ env,
+ ENOENT,
+ "Persistent environment is missing while looking for "
+ "rollback cachefile [%s] in [%s]\n",
+ toku_product_name_strings.rollback_cachefile, env->i->dir);
+ } else {
+ int stat_errno = get_error_errno();
+ if (stat_errno == ENOENT) {
+ if (!expect_newenv) // rollback cachefile is missing but
+ // persistent env exists
+ r = toku_ydb_do_error(
+ env,
+ ENOENT,
+ "rollback cachefile [%s] is missing from [%s]\n",
+ toku_product_name_strings.rollback_cachefile,
+ env->i->dir);
+ else
+ r = 0; // both rollback cachefile and persistent env are
+ // missing
+ } else {
+ r = toku_ydb_do_error(
+ env,
+ stat_errno,
+ "Unable to access rollback cachefile [%s] in [%s]\n",
+ toku_product_name_strings.rollback_cachefile,
+ env->i->dir);
+ assert(r);
+ }
+ }
+ toku_free(path);
+ }
+
+ // Test for fileops directory
+ if (r == 0 && force_recovery != 6) {
+ path = toku_construct_full_name(
+ 2, env->i->dir, toku_product_name_strings.fileopsdirectory);
+ assert(path);
+ r = toku_stat(path, &buf, toku_uninstrumented);
+ if (r == 0) {
+ if (expect_newenv) // fileops directory exists, but persistent env
+ // is missing
+ r = toku_ydb_do_error(
+ env,
+ ENOENT,
+ "Persistent environment is missing while looking for "
+ "fileops directory [%s] in [%s]\n",
+ toku_product_name_strings.fileopsdirectory,
+ env->i->dir);
+ } else {
+ int stat_errno = get_error_errno();
+ if (stat_errno == ENOENT) {
+ if (!expect_newenv) // fileops directory is missing but
+ // persistent env exists
+ r = toku_ydb_do_error(
+ env,
+ ENOENT,
+ "Fileops directory [%s] is missing from [%s]\n",
+ toku_product_name_strings.fileopsdirectory,
+ env->i->dir);
+ else
+ r = 0; // both fileops directory and persistent env are
+ // missing
+ } else {
+ r = toku_ydb_do_error(
+ env,
+ stat_errno,
+ "Unable to access fileops directory [%s] in [%s]\n",
+ toku_product_name_strings.fileopsdirectory,
+ env->i->dir);
+ assert(r);
+ }
+ }
+ toku_free(path);
+ }
+
+ // Test for recovery log
+ if ((r == 0) && (env->i->open_flags & DB_INIT_LOG) && force_recovery != 6) {
+ // if using transactions, test for existence of log
+ r = ydb_recover_log_exists(env); // return 0 or ENOENT
+ if (expect_newenv && (r != ENOENT))
+ r = toku_ydb_do_error(env,
+ ENOENT,
+ "Persistent environment information is "
+ "missing (but log exists) while looking for "
+ "recovery log files in [%s]\n",
+ env->i->real_log_dir);
+ else if (!expect_newenv && r == ENOENT)
+ r = toku_ydb_do_error(env,
+ ENOENT,
+ "Recovery log is missing (persistent "
+ "environment information is present) while "
+ "looking for recovery log files in [%s]\n",
+ env->i->real_log_dir);
+ else
+ r = 0;
+ }
+
+ if (r == 0)
+ *valid_newenv = expect_newenv;
+ else
+ *valid_newenv = false;
+ return r;
+}
+
+// The version of the environment (on disk) is the version of the recovery log.
+// If the recovery log is of the current version, then there is no upgrade to be done.
+// If the recovery log is of an old version, then replacing it with a new recovery log
+// of the current version is how the upgrade is done.
+// Note, the upgrade procedure takes a checkpoint, so we must release the ydb lock.
+static int
+ydb_maybe_upgrade_env (DB_ENV *env, LSN * last_lsn_of_clean_shutdown_read_from_log, bool * upgrade_in_progress) {
+ int r = 0;
+ if (env->i->open_flags & DB_INIT_TXN && env->i->open_flags & DB_INIT_LOG) {
+ r = toku_maybe_upgrade_log(env->i->dir, env->i->real_log_dir, last_lsn_of_clean_shutdown_read_from_log, upgrade_in_progress);
+ }
+ return r;
+}
+
+static void
+unlock_single_process(DB_ENV *env) {
+ int r;
+ r = toku_single_process_unlock(&env->i->envdir_lockfd);
+ lazy_assert_zero(r);
+ r = toku_single_process_unlock(&env->i->datadir_lockfd);
+ lazy_assert_zero(r);
+ r = toku_single_process_unlock(&env->i->logdir_lockfd);
+ lazy_assert_zero(r);
+ r = toku_single_process_unlock(&env->i->tmpdir_lockfd);
+ lazy_assert_zero(r);
+}
+
+// Open the environment.
+// If this is a new environment, then create the necessary files.
+// Return 0 on success, ENOENT if any of the expected necessary files are missing.
+// (The set of necessary files is defined in the function validate_env() above.)
+static int
+env_open(DB_ENV * env, const char *home, uint32_t flags, int mode) {
+
+ if(force_recovery == 6) {
+ {
+ const int len = strlen(toku_product_name_strings.rollback_cachefile);
+ toku_product_name_strings.rollback_cachefile[len] = '2';
+ toku_product_name_strings.rollback_cachefile[len+1] = 0;
+ }
+
+ {
+ const int len = strlen(toku_product_name_strings.single_process_lock);
+ toku_product_name_strings.single_process_lock[len] = '2';
+ toku_product_name_strings.single_process_lock[len+1] = 0;
+ }
+
+ {
+ const int len = strlen(toku_product_name_strings.environmentdictionary);
+ toku_product_name_strings.environmentdictionary[len] = '2';
+ toku_product_name_strings.environmentdictionary[len+1] = 0;
+ }
+ }
+
+ HANDLE_PANICKED_ENV(env);
+ int r;
+ bool newenv; // true iff creating a new environment
+ uint32_t unused_flags=flags;
+ CHECKPOINTER cp;
+ DB_TXN *txn = NULL;
+
+ if (env_opened(env)) {
+ r = toku_ydb_do_error(env, EINVAL, "The environment is already open\n");
+ goto cleanup;
+ }
+
+ if (env->get_check_thp(env) && toku_os_huge_pages_enabled()) {
+ r = toku_ydb_do_error(env, TOKUDB_HUGE_PAGES_ENABLED,
+ "Huge pages are enabled, disable them before continuing\n");
+ goto cleanup;
+ }
+
+ most_recent_env = NULL;
+
+ assert(sizeof(time_t) == sizeof(uint64_t));
+
+ HANDLE_EXTRA_FLAGS(env, flags,
+ DB_CREATE|DB_PRIVATE|DB_INIT_LOG|DB_INIT_TXN|DB_RECOVER|DB_INIT_MPOOL|DB_INIT_LOCK|DB_THREAD);
+
+ // DB_CREATE means create if env does not exist, and PerconaFT requires it because
+ // PerconaFT requries DB_PRIVATE.
+ if ((flags & DB_PRIVATE) && !(flags & DB_CREATE)) {
+ r = toku_ydb_do_error(env, ENOENT, "DB_PRIVATE requires DB_CREATE (seems gratuitous to us, but that's BDB's behavior\n");
+ goto cleanup;
+ }
+
+ if (!(flags & DB_PRIVATE)) {
+ r = toku_ydb_do_error(env, ENOENT, "PerconaFT requires DB_PRIVATE\n");
+ goto cleanup;
+ }
+
+ if ((flags & DB_INIT_LOG) && !(flags & DB_INIT_TXN)) {
+ r = toku_ydb_do_error(env, EINVAL, "PerconaFT requires transactions for logging\n");
+ goto cleanup;
+ }
+
+ if (!home) home = ".";
+
+ // Verify that the home exists.
+ toku_struct_stat buf;
+ r = toku_stat(home, &buf, toku_uninstrumented);
+ if (r != 0) {
+ int e = get_error_errno();
+ r = toku_ydb_do_error(
+ env, e, "Error from toku_stat(\"%s\",...)\n", home);
+ goto cleanup;
+ }
+ unused_flags &= ~DB_PRIVATE;
+
+ if (env->i->dir) {
+ toku_free(env->i->dir);
+ }
+ env->i->dir = toku_strdup(home);
+ if (env->i->dir == 0) {
+ r = toku_ydb_do_error(env, ENOMEM, "Out of memory\n");
+ goto cleanup;
+ }
+ env->i->open_flags = flags;
+ env->i->open_mode = mode;
+
+ // Instrumentation probe start
+ TOKU_PROBE_START(toku_instr_probe_1);
+
+ env_setup_real_data_dir(env);
+ env_setup_real_log_dir(env);
+ env_setup_real_tmp_dir(env);
+
+ // Instrumentation probe stop
+ toku_instr_probe_1->stop();
+
+ r = toku_single_process_lock(
+ env->i->dir, "environment", &env->i->envdir_lockfd);
+ if (r != 0)
+ goto cleanup;
+ r = toku_single_process_lock(
+ env->i->real_data_dir, "data", &env->i->datadir_lockfd);
+ if (r!=0) goto cleanup;
+ r = toku_single_process_lock(env->i->real_log_dir, "logs", &env->i->logdir_lockfd);
+ if (r!=0) goto cleanup;
+ r = toku_single_process_lock(env->i->real_tmp_dir, "temp", &env->i->tmpdir_lockfd);
+ if (r!=0) goto cleanup;
+
+ bool need_rollback_cachefile;
+ need_rollback_cachefile = false;
+ if (flags & (DB_INIT_TXN | DB_INIT_LOG) && force_recovery != 6) {
+ need_rollback_cachefile = true;
+ }
+
+ ydb_layer_status_init(); // do this before possibly upgrading, so upgrade work is counted in status counters
+
+ LSN last_lsn_of_clean_shutdown_read_from_log;
+ last_lsn_of_clean_shutdown_read_from_log = ZERO_LSN;
+ bool upgrade_in_progress;
+ upgrade_in_progress = false;
+ r = ydb_maybe_upgrade_env(env, &last_lsn_of_clean_shutdown_read_from_log, &upgrade_in_progress);
+ if (r!=0) goto cleanup;
+
+ if (upgrade_in_progress || force_recovery == 6) {
+ // Delete old rollback file. There was a clean shutdown, so it has nothing useful,
+ // and there is no value in upgrading it. It is simpler to just create a new one.
+ char* rollback_filename = toku_construct_full_name(2, env->i->dir, toku_product_name_strings.rollback_cachefile);
+ assert(rollback_filename);
+ r = unlink(rollback_filename);
+ if (r != 0) {
+ assert(get_error_errno() == ENOENT);
+ }
+ toku_free(rollback_filename);
+ need_rollback_cachefile = false; // we're not expecting it to exist now
+ }
+
+ r = validate_env(env, &newenv, need_rollback_cachefile); // make sure that environment is either new or complete
+ if (r != 0) goto cleanup;
+
+ unused_flags &= ~DB_INIT_TXN & ~DB_INIT_LOG;
+
+ if(force_recovery == 6) {
+ flags |= DB_INIT_LOG | DB_INIT_TXN;
+ }
+
+ // do recovery only if there exists a log and recovery is requested
+ // otherwise, a log is created when the logger is opened later
+ if (!newenv && force_recovery == 0) {
+ if (flags & DB_INIT_LOG) {
+ // the log does exist
+ if (flags & DB_RECOVER) {
+ r = ydb_do_recovery(env);
+ if (r != 0) goto cleanup;
+ } else {
+ // the log is required to have clean shutdown if recovery is not requested
+ r = needs_recovery(env);
+ if (r != 0) goto cleanup;
+ }
+ }
+ }
+
+ toku_loader_cleanup_temp_files(env);
+
+ if (flags & (DB_INIT_TXN | DB_INIT_LOG)) {
+ assert(env->i->logger);
+ toku_logger_write_log_files(env->i->logger, (bool)((flags & DB_INIT_LOG) != 0));
+ if (!toku_logger_is_open(env->i->logger)) {
+ r = toku_logger_open(env->i->real_log_dir, env->i->logger);
+ if (r!=0) {
+ toku_ydb_do_error(env, r, "Could not open logger\n");
+ }
+ }
+ } else {
+ r = toku_logger_close(&env->i->logger); // if no logging system, then kill the logger
+ assert_zero(r);
+ }
+
+ unused_flags &= ~DB_INIT_MPOOL; // we always init an mpool.
+ unused_flags &= ~DB_CREATE; // we always do DB_CREATE
+ unused_flags &= ~DB_INIT_LOCK; // we check this later (e.g. in db->open)
+ unused_flags &= ~DB_RECOVER;
+
+// This is probably correct, but it will be pain...
+// if ((flags & DB_THREAD)==0) {
+// r = toku_ydb_do_error(env, EINVAL, "PerconaFT requires DB_THREAD");
+// goto cleanup;
+// }
+ unused_flags &= ~DB_THREAD;
+
+ if (unused_flags!=0) {
+ r = toku_ydb_do_error(env, EINVAL, "Extra flags not understood by tokuft: %u\n", unused_flags);
+ goto cleanup;
+ }
+
+ if (env->i->cachetable==NULL) {
+ // If we ran recovery then the cachetable should be set here.
+ r = toku_cachetable_create_ex(&env->i->cachetable, env->i->cachetable_size,
+ env->i->client_pool_threads,
+ env->i->cachetable_pool_threads,
+ env->i->checkpoint_pool_threads,
+ ZERO_LSN, env->i->logger);
+ if (r != 0) {
+ r = toku_ydb_do_error(env, r, "Cant create a cachetable\n");
+ goto cleanup;
+ }
+ }
+
+ toku_cachetable_set_env_dir(env->i->cachetable, env->i->dir);
+
+ int using_txns;
+ using_txns = env->i->open_flags & DB_INIT_TXN;
+ if (env->i->logger) {
+ // if this is a newborn env or if this is an upgrade, then create a brand new rollback file
+ assert (using_txns);
+ toku_logger_set_cachetable(env->i->logger, env->i->cachetable);
+ if (!toku_logger_rollback_is_open(env->i->logger)) {
+ bool create_new_rollback_file = newenv | upgrade_in_progress | (force_recovery == 6);
+ r = toku_logger_open_rollback(env->i->logger, env->i->cachetable, create_new_rollback_file);
+ if (r != 0) {
+ r = toku_ydb_do_error(env, r, "Cant open rollback\n");
+ goto cleanup;
+ }
+ }
+ }
+
+ if (using_txns) {
+ r = toku_txn_begin(env, 0, &txn, 0);
+ assert_zero(r);
+ }
+
+ {
+ r = toku_db_create(&env->i->persistent_environment, env, 0);
+ assert_zero(r);
+ r = toku_db_use_builtin_key_cmp(env->i->persistent_environment);
+ assert_zero(r);
+ writing_rollback++;
+ r = toku_db_open_iname(env->i->persistent_environment, txn, toku_product_name_strings.environmentdictionary, DB_CREATE, mode);
+ if (r != 0) {
+ r = toku_ydb_do_error(env, r, "Cant open persistent env\n");
+ goto cleanup;
+ }
+ if (newenv) {
+ // create new persistent_environment
+ DBT key, val;
+ uint32_t persistent_original_env_version = FT_LAYOUT_VERSION;
+ const uint32_t environment_version = toku_htod32(persistent_original_env_version);
+
+ toku_fill_dbt(&key, orig_env_ver_key, strlen(orig_env_ver_key));
+ toku_fill_dbt(&val, &environment_version, sizeof(environment_version));
+ r = toku_db_put(env->i->persistent_environment, txn, &key, &val, 0, false);
+ assert_zero(r);
+
+ toku_fill_dbt(&key, curr_env_ver_key, strlen(curr_env_ver_key));
+ toku_fill_dbt(&val, &environment_version, sizeof(environment_version));
+ r = toku_db_put(env->i->persistent_environment, txn, &key, &val, 0, false);
+ assert_zero(r);
+
+ time_t creation_time_d = toku_htod64(time(NULL));
+ toku_fill_dbt(&key, creation_time_key, strlen(creation_time_key));
+ toku_fill_dbt(&val, &creation_time_d, sizeof(creation_time_d));
+ r = toku_db_put(env->i->persistent_environment, txn, &key, &val, 0, false);
+ assert_zero(r);
+ }
+ else {
+ r = maybe_upgrade_persistent_environment_dictionary(env, txn, last_lsn_of_clean_shutdown_read_from_log);
+ assert_zero(r);
+ }
+ capture_persistent_env_contents(env, txn);
+ writing_rollback--;
+ }
+ {
+ r = toku_db_create(&env->i->directory, env, 0);
+ assert_zero(r);
+ r = toku_db_use_builtin_key_cmp(env->i->directory);
+ assert_zero(r);
+ r = toku_db_open_iname(env->i->directory, txn, toku_product_name_strings.fileopsdirectory, DB_CREATE, mode);
+ if (r != 0) {
+ r = toku_ydb_do_error(env, r, "Cant open %s\n", toku_product_name_strings.fileopsdirectory);
+ goto cleanup;
+ }
+ }
+ if (using_txns) {
+ r = locked_txn_commit(txn, 0);
+ assert_zero(r);
+ txn = NULL;
+ }
+ cp = toku_cachetable_get_checkpointer(env->i->cachetable);
+ if (!force_recovery) {
+ r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, STARTUP_CHECKPOINT);
+ }
+ writing_rollback--;
+ env_fs_poller(env); // get the file system state at startup
+ r = env_fs_init_minicron(env);
+ if (r != 0) {
+ r = toku_ydb_do_error(env, r, "Cant create fs minicron\n");
+ goto cleanup;
+ }
+ r = env_fsync_log_cron_init(env);
+ if (r != 0) {
+ r = toku_ydb_do_error(env, r, "Cant create fsync log minicron\n");
+ goto cleanup;
+ }
+cleanup:
+ if (r!=0) {
+ if (txn) {
+ locked_txn_abort(txn);
+ }
+ if (env && env->i) {
+ unlock_single_process(env);
+ }
+ }
+ if (r == 0) {
+ set_errno(0); // tabula rasa. If there's a crash after env was successfully opened, no misleading errno will have been left around by this code.
+ most_recent_env = env;
+ uint64_t num_rows;
+ env_get_engine_status_num_rows(env, &num_rows);
+ toku_assert_set_fpointers(toku_maybe_get_engine_status_text, toku_maybe_err_engine_status, toku_maybe_set_env_panic, num_rows);
+ }
+ return r;
+}
+
+static int
+env_close(DB_ENV * env, uint32_t flags) {
+ int r = 0;
+ const char * err_msg = NULL;
+ bool clean_shutdown = true;
+
+ if (flags & TOKUFT_DIRTY_SHUTDOWN) {
+ clean_shutdown = false;
+ flags &= ~TOKUFT_DIRTY_SHUTDOWN;
+ }
+
+ most_recent_env = NULL; // Set most_recent_env to NULL so that we don't have a dangling pointer (and if there's an error, the toku assert code would try to look at the env.)
+
+ // if panicked, or if any open transactions, or any open dbs, then do nothing.
+
+ if (toku_env_is_panicked(env)) {
+ goto panic_and_quit_early;
+ }
+ if (env->i->logger && toku_logger_txns_exist(env->i->logger)) {
+ err_msg = "Cannot close environment due to open transactions\n";
+ r = toku_ydb_do_error(env, EINVAL, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ if (env->i->open_dbs_by_dname) { //Verify that there are no open dbs.
+ if (env->i->open_dbs_by_dname->size() > 0) {
+ err_msg = "Cannot close environment due to open DBs\n";
+ r = toku_ydb_do_error(env, EINVAL, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ }
+ if (env->i->persistent_environment) {
+ r = toku_db_close(env->i->persistent_environment);
+ if (r) {
+ err_msg = "Cannot close persistent environment dictionary (DB->close error)\n";
+ toku_ydb_do_error(env, r, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ }
+ if (env->i->directory) {
+ r = toku_db_close(env->i->directory);
+ if (r) {
+ err_msg = "Cannot close Directory dictionary (DB->close error)\n";
+ toku_ydb_do_error(env, r, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ }
+ env_fsync_log_cron_destroy(env);
+ if (env->i->cachetable) {
+ toku_cachetable_prepare_close(env->i->cachetable);
+ toku_cachetable_minicron_shutdown(env->i->cachetable);
+ if (env->i->logger) {
+ CHECKPOINTER cp = nullptr;
+ if (clean_shutdown) {
+ cp = toku_cachetable_get_checkpointer(env->i->cachetable);
+ r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT);
+ if (r) {
+ err_msg = "Cannot close environment (error during checkpoint)\n";
+ toku_ydb_do_error(env, r, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ }
+ toku_logger_close_rollback_check_empty(env->i->logger, clean_shutdown);
+ if (clean_shutdown) {
+ //Do a second checkpoint now that the rollback cachefile is closed.
+ r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT);
+ if (r) {
+ err_msg = "Cannot close environment (error during checkpoint)\n";
+ toku_ydb_do_error(env, r, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ toku_logger_shutdown(env->i->logger);
+ }
+ }
+ toku_cachetable_close(&env->i->cachetable);
+ }
+ if (env->i->logger) {
+ r = toku_logger_close(&env->i->logger);
+ if (r) {
+ err_msg = "Cannot close environment (logger close error)\n";
+ env->i->logger = NULL;
+ toku_ydb_do_error(env, r, "%s", err_msg);
+ goto panic_and_quit_early;
+ }
+ }
+ // Even if nothing else went wrong, but we were panicked, then raise an error.
+ // But if something else went wrong then raise that error (above)
+ if (toku_env_is_panicked(env)) {
+ goto panic_and_quit_early;
+ } else {
+ assert(env->i->panic_string == 0);
+ }
+
+ env_fs_destroy(env);
+ env->i->ltm.destroy();
+ if (env->i->data_dir)
+ toku_free(env->i->data_dir);
+ if (env->i->lg_dir)
+ toku_free(env->i->lg_dir);
+ if (env->i->tmp_dir)
+ toku_free(env->i->tmp_dir);
+ if (env->i->real_data_dir)
+ toku_free(env->i->real_data_dir);
+ if (env->i->real_log_dir)
+ toku_free(env->i->real_log_dir);
+ if (env->i->real_tmp_dir)
+ toku_free(env->i->real_tmp_dir);
+ if (env->i->open_dbs_by_dname) {
+ env->i->open_dbs_by_dname->destroy();
+ toku_free(env->i->open_dbs_by_dname);
+ }
+ if (env->i->open_dbs_by_dict_id) {
+ env->i->open_dbs_by_dict_id->destroy();
+ toku_free(env->i->open_dbs_by_dict_id);
+ }
+ if (env->i->dir)
+ toku_free(env->i->dir);
+ toku_pthread_rwlock_destroy(&env->i->open_dbs_rwlock);
+
+ // Immediately before freeing internal environment unlock the directories
+ unlock_single_process(env);
+ toku_free(env->i);
+ toku_free(env);
+ toku_sync_fetch_and_add(&tokuft_num_envs, -1);
+ if (flags != 0) {
+ r = EINVAL;
+ }
+ return r;
+
+panic_and_quit_early:
+ //release lock files.
+ unlock_single_process(env);
+ //r is the panic error
+ if (toku_env_is_panicked(env)) {
+ char *panic_string = env->i->panic_string;
+ r = toku_ydb_do_error(env, toku_env_is_panicked(env), "Cannot close environment due to previous error: %s\n", panic_string);
+ }
+ else {
+ env_panic(env, r, err_msg);
+ }
+ return r;
+}
+
+static int
+env_log_archive(DB_ENV * env, char **list[], uint32_t flags) {
+ return toku_logger_log_archive(env->i->logger, list, flags);
+}
+
+static int
+env_log_flush(DB_ENV * env, const DB_LSN * lsn __attribute__((__unused__))) {
+ HANDLE_PANICKED_ENV(env);
+ // do nothing if no logger
+ if (env->i->logger) {
+ // We just flush everything. MySQL uses lsn == 0 which means flush everything.
+ // For anyone else using the log, it is correct to flush too much, so we are OK.
+ toku_logger_fsync(env->i->logger);
+ }
+ return 0;
+}
+
+static int
+env_set_cachesize(DB_ENV * env, uint32_t gbytes, uint32_t bytes, int ncache) {
+ HANDLE_PANICKED_ENV(env);
+ if (ncache != 1) {
+ return EINVAL;
+ }
+ uint64_t cs64 = ((uint64_t) gbytes << 30) + bytes;
+ unsigned long cs = cs64;
+ if (cs64 > cs) {
+ return EINVAL;
+ }
+ env->i->cachetable_size = cs;
+ return 0;
+}
+
+static int
+env_set_client_pool_threads(DB_ENV * env, uint32_t threads) {
+ HANDLE_PANICKED_ENV(env);
+ env->i->client_pool_threads = threads;
+ return 0;
+}
+
+static int
+env_set_cachetable_pool_threads(DB_ENV * env, uint32_t threads) {
+ HANDLE_PANICKED_ENV(env);
+ env->i->cachetable_pool_threads = threads;
+ return 0;
+}
+
+static int
+env_set_checkpoint_pool_threads(DB_ENV * env, uint32_t threads) {
+ HANDLE_PANICKED_ENV(env);
+ env->i->checkpoint_pool_threads = threads;
+ return 0;
+}
+
+static void
+env_set_check_thp(DB_ENV * env, bool new_val) {
+ assert(env);
+ env->i->check_thp = new_val;
+}
+
+static bool
+env_get_check_thp(DB_ENV * env) {
+ assert(env);
+ return env->i->check_thp;
+}
+
+static bool env_set_dir_per_db(DB_ENV *env, bool new_val) {
+ HANDLE_PANICKED_ENV(env);
+ bool r = env->i->dir_per_db;
+ env->i->dir_per_db = new_val;
+ return r;
+}
+
+static bool env_get_dir_per_db(DB_ENV *env) {
+ HANDLE_PANICKED_ENV(env);
+ return env->i->dir_per_db;
+}
+
+static const char *env_get_data_dir(DB_ENV *env) {
+ return env->i->real_data_dir;
+}
+
+static int env_dirtool_attach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ const char *iname) {
+ int r;
+ DBT dname_dbt;
+ DBT iname_dbt;
+
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
+ toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1);
+
+ r = toku_db_put(env->i->directory,
+ txn,
+ &dname_dbt,
+ &iname_dbt,
+ 0,
+ true);
+ return r;
+}
+
+static int env_dirtool_detach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname) {
+ int r;
+ DBT dname_dbt;
+ DBT old_iname_dbt;
+
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
+ toku_init_dbt_flags(&old_iname_dbt, DB_DBT_REALLOC);
+
+ r = toku_db_get(env->i->directory,
+ txn,
+ &dname_dbt,
+ &old_iname_dbt,
+ DB_SERIALIZABLE); // allocates memory for iname
+ if (r == DB_NOTFOUND)
+ return EEXIST;
+ toku_free(old_iname_dbt.data);
+
+ r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true);
+
+ return r;
+}
+
+static int env_dirtool_move(DB_ENV *env,
+ DB_TXN *txn,
+ const char *old_dname,
+ const char *new_dname) {
+ int r;
+ DBT old_dname_dbt;
+ DBT new_dname_dbt;
+ DBT iname_dbt;
+
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+
+ toku_fill_dbt(&old_dname_dbt, old_dname, strlen(old_dname) + 1);
+ toku_fill_dbt(&new_dname_dbt, new_dname, strlen(new_dname) + 1);
+ toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
+
+ r = toku_db_get(env->i->directory,
+ txn,
+ &old_dname_dbt,
+ &iname_dbt,
+ DB_SERIALIZABLE); // allocates memory for iname
+ if (r == DB_NOTFOUND)
+ return EEXIST;
+
+ r = toku_db_del(
+ env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true);
+ if (r != 0)
+ goto exit;
+
+ r = toku_db_put(
+ env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true);
+
+exit:
+ toku_free(iname_dbt.data);
+ return r;
+}
+
+static int locked_env_op(DB_ENV *env,
+ DB_TXN *txn,
+ std::function<int(DB_TXN *)> f) {
+ int ret, r;
+ HANDLE_READ_ONLY_TXN(txn);
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+
+ DB_TXN *child_txn = NULL;
+ int using_txns = env->i->open_flags & DB_INIT_TXN;
+ if (using_txns) {
+ ret = toku_txn_begin(env, txn, &child_txn, 0);
+ lazy_assert_zero(ret);
+ }
+
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ r = f(child_txn);
+ toku_multi_operation_client_unlock();
+
+ if (using_txns) {
+ if (r == 0) {
+ ret = locked_txn_commit(child_txn, 0);
+ lazy_assert_zero(ret);
+ } else {
+ ret = locked_txn_abort(child_txn);
+ lazy_assert_zero(ret);
+ }
+ }
+ return r;
+
+}
+
+static int locked_env_dirtool_attach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ const char *iname) {
+ auto f = std::bind(
+ env_dirtool_attach, env, std::placeholders::_1, dname, iname);
+ return locked_env_op(env, txn, f);
+}
+
+static int locked_env_dirtool_detach(DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname) {
+ auto f = std::bind(
+ env_dirtool_detach, env, std::placeholders::_1, dname);
+ return locked_env_op(env, txn, f);
+}
+
+static int locked_env_dirtool_move(DB_ENV *env,
+ DB_TXN *txn,
+ const char *old_dname,
+ const char *new_dname) {
+ auto f = std::bind(
+ env_dirtool_move, env, std::placeholders::_1, old_dname, new_dname);
+ return locked_env_op(env, txn, f);
+}
+
+static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags);
+
+static int
+locked_env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags) {
+ int ret, r;
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+ HANDLE_READ_ONLY_TXN(txn);
+
+ DB_TXN *child_txn = NULL;
+ int using_txns = env->i->open_flags & DB_INIT_TXN;
+ if (using_txns) {
+ ret = toku_txn_begin(env, txn, &child_txn, 0);
+ lazy_assert_zero(ret);
+ }
+
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ r = env_dbremove(env, child_txn, fname, dbname, flags);
+ toku_multi_operation_client_unlock();
+
+ if (using_txns) {
+ if (r == 0) {
+ ret = locked_txn_commit(child_txn, 0);
+ lazy_assert_zero(ret);
+ } else {
+ ret = locked_txn_abort(child_txn);
+ lazy_assert_zero(ret);
+ }
+ }
+ return r;
+}
+
+static int env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags);
+
+static int
+locked_env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) {
+ int ret, r;
+ HANDLE_READ_ONLY_TXN(txn);
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+
+ DB_TXN *child_txn = NULL;
+ int using_txns = env->i->open_flags & DB_INIT_TXN;
+ if (using_txns) {
+ ret = toku_txn_begin(env, txn, &child_txn, 0);
+ lazy_assert_zero(ret);
+ }
+
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ r = env_dbrename(env, child_txn, fname, dbname, newname, flags);
+ toku_multi_operation_client_unlock();
+
+ if (using_txns) {
+ if (r == 0) {
+ ret = locked_txn_commit(child_txn, 0);
+ lazy_assert_zero(ret);
+ } else {
+ ret = locked_txn_abort(child_txn);
+ lazy_assert_zero(ret);
+ }
+ }
+ return r;
+}
+
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+
+static int
+env_get_cachesize(DB_ENV * env, uint32_t *gbytes, uint32_t *bytes, int *ncache) {
+ HANDLE_PANICKED_ENV(env);
+ *gbytes = env->i->cachetable_size >> 30;
+ *bytes = env->i->cachetable_size & ((1<<30)-1);
+ *ncache = 1;
+ return 0;
+}
+
+#endif
+
+static int
+env_set_data_dir(DB_ENV * env, const char *dir) {
+ HANDLE_PANICKED_ENV(env);
+ int r;
+
+ if (env_opened(env) || !dir) {
+ r = toku_ydb_do_error(env, EINVAL, "You cannot set the data dir after opening the env\n");
+ }
+ else if (env->i->data_dir)
+ r = toku_ydb_do_error(env, EINVAL, "You cannot set the data dir more than once.\n");
+ else {
+ env->i->data_dir = toku_strdup(dir);
+ if (env->i->data_dir==NULL) {
+ assert(get_error_errno() == ENOMEM);
+ r = toku_ydb_do_error(env, ENOMEM, "Out of memory\n");
+ }
+ else r = 0;
+ }
+ return r;
+}
+
+static void
+env_set_errcall(DB_ENV * env, toku_env_errcall_t errcall) {
+ env->i->errcall = errcall;
+}
+
+static void
+env_set_errfile(DB_ENV*env, FILE*errfile) {
+ env->i->errfile = errfile;
+}
+
+static void
+env_set_errpfx(DB_ENV * env, const char *errpfx) {
+ env->i->errpfx = errpfx;
+}
+
+static int
+env_set_flags(DB_ENV * env, uint32_t flags, int onoff) {
+ HANDLE_PANICKED_ENV(env);
+
+ uint32_t change = 0;
+ if (flags & DB_AUTO_COMMIT) {
+ change |= DB_AUTO_COMMIT;
+ flags &= ~DB_AUTO_COMMIT;
+ }
+ if (flags != 0 && onoff) {
+ return toku_ydb_do_error(env, EINVAL, "PerconaFT does not (yet) support any nonzero ENV flags other than DB_AUTO_COMMIT\n");
+ }
+ if (onoff) env->i->open_flags |= change;
+ else env->i->open_flags &= ~change;
+ return 0;
+}
+
+static int
+env_set_lg_bsize(DB_ENV * env, uint32_t bsize) {
+ HANDLE_PANICKED_ENV(env);
+ return toku_logger_set_lg_bsize(env->i->logger, bsize);
+}
+
+static int
+env_set_lg_dir(DB_ENV * env, const char *dir) {
+ HANDLE_PANICKED_ENV(env);
+ if (env_opened(env)) {
+ return toku_ydb_do_error(env, EINVAL, "Cannot set log dir after opening the env\n");
+ }
+
+ if (env->i->lg_dir) toku_free(env->i->lg_dir);
+ if (dir) {
+ env->i->lg_dir = toku_strdup(dir);
+ if (!env->i->lg_dir) {
+ return toku_ydb_do_error(env, ENOMEM, "Out of memory\n");
+ }
+ }
+ else env->i->lg_dir = NULL;
+ return 0;
+}
+
+static int
+env_set_lg_max(DB_ENV * env, uint32_t lg_max) {
+ HANDLE_PANICKED_ENV(env);
+ return toku_logger_set_lg_max(env->i->logger, lg_max);
+}
+
+static int
+env_get_lg_max(DB_ENV * env, uint32_t *lg_maxp) {
+ HANDLE_PANICKED_ENV(env);
+ return toku_logger_get_lg_max(env->i->logger, lg_maxp);
+}
+
+static int
+env_set_lk_detect(DB_ENV * env, uint32_t UU(detect)) {
+ HANDLE_PANICKED_ENV(env);
+ return toku_ydb_do_error(env, EINVAL, "PerconaFT does not (yet) support set_lk_detect\n");
+}
+
+static int
+env_set_lk_max_memory(DB_ENV *env, uint64_t lock_memory_limit) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (env_opened(env)) {
+ r = EINVAL;
+ } else {
+ r = env->i->ltm.set_max_lock_memory(lock_memory_limit);
+ }
+ return r;
+}
+
+static int
+env_get_lk_max_memory(DB_ENV *env, uint64_t *lk_maxp) {
+ HANDLE_PANICKED_ENV(env);
+ uint32_t max_lock_memory = env->i->ltm.get_max_lock_memory();
+ *lk_maxp = max_lock_memory;
+ return 0;
+}
+
+//void toku__env_set_noticecall (DB_ENV *env, void (*noticecall)(DB_ENV *, db_notices)) {
+// env->i->noticecall = noticecall;
+//}
+
+static int
+env_set_tmp_dir(DB_ENV * env, const char *tmp_dir) {
+ HANDLE_PANICKED_ENV(env);
+ if (env_opened(env)) {
+ return toku_ydb_do_error(env, EINVAL, "Cannot set the tmp dir after opening an env\n");
+ }
+ if (!tmp_dir) {
+ return toku_ydb_do_error(env, EINVAL, "Tmp dir bust be non-null\n");
+ }
+ if (env->i->tmp_dir)
+ toku_free(env->i->tmp_dir);
+ env->i->tmp_dir = toku_strdup(tmp_dir);
+ return env->i->tmp_dir ? 0 : ENOMEM;
+}
+
+static int
+env_set_verbose(DB_ENV * env, uint32_t UU(which), int UU(onoff)) {
+ HANDLE_PANICKED_ENV(env);
+ return 1;
+}
+
+static int
+toku_env_txn_checkpoint(DB_ENV * env, uint32_t kbyte __attribute__((__unused__)), uint32_t min __attribute__((__unused__)), uint32_t flags __attribute__((__unused__))) {
+ CHECKPOINTER cp = toku_cachetable_get_checkpointer(env->i->cachetable);
+ int r = toku_checkpoint(cp, env->i->logger,
+ checkpoint_callback_f, checkpoint_callback_extra,
+ checkpoint_callback2_f, checkpoint_callback2_extra,
+ CLIENT_CHECKPOINT);
+ if (r) {
+ // Panicking the whole environment may be overkill, but I'm not sure what else to do.
+ env_panic(env, r, "checkpoint error\n");
+ toku_ydb_do_error(env, r, "Checkpoint\n");
+ }
+ return r;
+}
+
+static int
+env_txn_stat(DB_ENV * env, DB_TXN_STAT ** UU(statp), uint32_t UU(flags)) {
+ HANDLE_PANICKED_ENV(env);
+ return 1;
+}
+
+//
+// We can assume the client calls this function right after recovery
+// to return a list of prepared transactions to the user. When called,
+// we can assume that no other work is being done in the system,
+// as we are in the state of being after recovery,
+// but before client operations should commence
+//
+static int
+env_txn_xa_recover (DB_ENV *env, TOKU_XA_XID xids[/*count*/], long count, /*out*/ long *retp, uint32_t flags) {
+ struct tokulogger_preplist *MALLOC_N(count,preps);
+ int r = toku_logger_recover_txn(env->i->logger, preps, count, retp, flags);
+ if (r==0) {
+ assert(*retp<=count);
+ for (int i=0; i<*retp; i++) {
+ xids[i] = preps[i].xid;
+ }
+ }
+ toku_free(preps);
+ return r;
+}
+
+//
+// We can assume the client calls this function right after recovery
+// to return a list of prepared transactions to the user. When called,
+// we can assume that no other work is being done in the system,
+// as we are in the state of being after recovery,
+// but before client operations should commence
+//
+static int
+env_txn_recover (DB_ENV *env, DB_PREPLIST preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags) {
+ struct tokulogger_preplist *MALLOC_N(count,preps);
+ int r = toku_logger_recover_txn(env->i->logger, preps, count, retp, flags);
+ if (r==0) {
+ assert(*retp<=count);
+ for (int i=0; i<*retp; i++) {
+ preplist[i].txn = preps[i].txn;
+ memcpy(preplist[i].gid, preps[i].xid.data, preps[i].xid.gtrid_length + preps[i].xid.bqual_length);
+ }
+ }
+ toku_free(preps);
+ return r;
+}
+
+static int
+env_get_txn_from_xid (DB_ENV *env, /*in*/ TOKU_XA_XID *xid, /*out*/ DB_TXN **txnp) {
+ return toku_txn_manager_get_root_txn_from_xid(toku_logger_get_txn_manager(env->i->logger), xid, txnp);
+}
+
+static int
+env_checkpointing_set_period(DB_ENV * env, uint32_t seconds) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) {
+ r = EINVAL;
+ } else {
+ toku_set_checkpoint_period(env->i->cachetable, seconds);
+ }
+ return r;
+}
+
+static int
+env_cleaner_set_period(DB_ENV * env, uint32_t seconds) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) {
+ r = EINVAL;
+ } else {
+ toku_set_cleaner_period(env->i->cachetable, seconds);
+ }
+ return r;
+}
+
+static int
+env_cleaner_set_iterations(DB_ENV * env, uint32_t iterations) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) {
+ r = EINVAL;
+ } else {
+ toku_set_cleaner_iterations(env->i->cachetable, iterations);
+ }
+ return r;
+}
+
+static int
+env_create_loader(DB_ENV *env,
+ DB_TXN *txn,
+ DB_LOADER **blp,
+ DB *src_db,
+ int N,
+ DB *dbs[],
+ uint32_t db_flags[/*N*/],
+ uint32_t dbt_flags[/*N*/],
+ uint32_t loader_flags) {
+ int r = toku_loader_create_loader(env, txn, blp, src_db, N, dbs, db_flags, dbt_flags, loader_flags, true);
+ return r;
+}
+
+static int
+env_checkpointing_get_period(DB_ENV * env, uint32_t *seconds) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else
+ *seconds = toku_get_checkpoint_period_unlocked(env->i->cachetable);
+ return r;
+}
+
+static int
+env_cleaner_get_period(DB_ENV * env, uint32_t *seconds) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else
+ *seconds = toku_get_cleaner_period_unlocked(env->i->cachetable);
+ return r;
+}
+
+static int
+env_cleaner_get_iterations(DB_ENV * env, uint32_t *iterations) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else
+ *iterations = toku_get_cleaner_iterations(env->i->cachetable);
+ return r;
+}
+
+static int
+env_evictor_set_enable_partial_eviction(DB_ENV* env, bool enabled) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else toku_set_enable_partial_eviction(env->i->cachetable, enabled);
+ return r;
+}
+
+static int
+env_evictor_get_enable_partial_eviction(DB_ENV* env, bool *enabled) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else *enabled = toku_get_enable_partial_eviction(env->i->cachetable);
+ return r;
+}
+
+static int
+env_checkpointing_postpone(DB_ENV * env) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else toku_checkpoint_safe_client_lock();
+ return r;
+}
+
+static int
+env_checkpointing_resume(DB_ENV * env) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else toku_checkpoint_safe_client_unlock();
+ return r;
+}
+
+static int
+env_checkpointing_begin_atomic_operation(DB_ENV * env) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else toku_multi_operation_client_lock();
+ return r;
+}
+
+static int
+env_checkpointing_end_atomic_operation(DB_ENV * env) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (!env_opened(env)) r = EINVAL;
+ else toku_multi_operation_client_unlock();
+ return r;
+}
+
+static int
+env_set_default_bt_compare(DB_ENV * env, int (*bt_compare) (DB *, const DBT *, const DBT *)) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (env_opened(env)) r = EINVAL;
+ else {
+ env->i->bt_compare = bt_compare;
+ }
+ return r;
+}
+
+static void
+env_set_update (DB_ENV *env, int (*update_function)(DB *, const DBT *key, const DBT *old_val, const DBT *extra, void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra)) {
+ env->i->update_function = update_function;
+}
+
+static int
+env_set_generate_row_callback_for_put(DB_ENV *env, generate_row_for_put_func generate_row_for_put) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (env_opened(env)) r = EINVAL;
+ else {
+ env->i->generate_row_for_put = generate_row_for_put;
+ }
+ return r;
+}
+
+static int
+env_set_generate_row_callback_for_del(DB_ENV *env, generate_row_for_del_func generate_row_for_del) {
+ HANDLE_PANICKED_ENV(env);
+ int r = 0;
+ if (env_opened(env)) r = EINVAL;
+ else {
+ env->i->generate_row_for_del = generate_row_for_del;
+ }
+ return r;
+}
+static int
+env_set_redzone(DB_ENV *env, int redzone) {
+ HANDLE_PANICKED_ENV(env);
+ int r;
+ if (env_opened(env))
+ r = EINVAL;
+ else {
+ env->i->redzone = redzone;
+ r = 0;
+ }
+ return r;
+}
+
+static int env_get_lock_timeout(DB_ENV *env, uint64_t *lock_timeout_msec) {
+ uint64_t t = env->i->default_lock_timeout_msec;
+ if (env->i->get_lock_timeout_callback)
+ t = env->i->get_lock_timeout_callback(t);
+ *lock_timeout_msec = t;
+ return 0;
+}
+
+static int env_set_lock_timeout(DB_ENV *env, uint64_t default_lock_timeout_msec, uint64_t (*get_lock_timeout_callback)(uint64_t default_lock_timeout_msec)) {
+ env->i->default_lock_timeout_msec = default_lock_timeout_msec;
+ env->i->get_lock_timeout_callback = get_lock_timeout_callback;
+ return 0;
+}
+
+static int
+env_set_lock_timeout_callback(DB_ENV *env, lock_timeout_callback callback) {
+ env->i->lock_wait_timeout_callback = callback;
+ return 0;
+}
+
+static int
+env_set_lock_wait_callback(DB_ENV *env, lock_wait_callback callback) {
+ env->i->lock_wait_needed_callback = callback;
+ return 0;
+}
+
+static void
+format_time(const time_t *timer, char *buf) {
+ ctime_r(timer, buf);
+ size_t len = strlen(buf);
+ assert(len < 26);
+ char end;
+
+ assert(len>=1);
+ end = buf[len-1];
+ while (end == '\n' || end == '\r') {
+ buf[len-1] = '\0';
+ len--;
+ assert(len>=1);
+ end = buf[len-1];
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+// Local definition of status information from portability layer, which should not include db.h.
+// Local status structs are used to concentrate file system information collected from various places
+// and memory information collected from memory.c.
+//
+typedef enum {
+ FS_ENOSPC_REDZONE_STATE = 0, // possible values are enumerated by fs_redzone_state
+ FS_ENOSPC_THREADS_BLOCKED, // how many threads currently blocked on ENOSPC
+ FS_ENOSPC_REDZONE_CTR, // number of operations rejected by enospc prevention (red zone)
+ FS_ENOSPC_MOST_RECENT, // most recent time that file system was completely full
+ FS_ENOSPC_COUNT, // total number of times ENOSPC was returned from an attempt to write
+ FS_FSYNC_TIME,
+ FS_FSYNC_COUNT,
+ FS_LONG_FSYNC_TIME,
+ FS_LONG_FSYNC_COUNT,
+ FS_STATUS_NUM_ROWS, // must be last
+} fs_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[FS_STATUS_NUM_ROWS];
+} FS_STATUS_S, *FS_STATUS;
+
+static FS_STATUS_S fsstat;
+
+#define FS_STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(fsstat, k, c, t, "filesystem: " l, inc)
+
+static void
+fs_status_init(void) {
+ FS_STATUS_INIT(FS_ENOSPC_REDZONE_STATE, nullptr, FS_STATE, "ENOSPC redzone state", TOKU_ENGINE_STATUS);
+ FS_STATUS_INIT(FS_ENOSPC_THREADS_BLOCKED, FILESYSTEM_THREADS_BLOCKED_BY_FULL_DISK, UINT64, "threads currently blocked by full disk", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ FS_STATUS_INIT(FS_ENOSPC_REDZONE_CTR, nullptr, UINT64, "number of operations rejected by enospc prevention (red zone)", TOKU_ENGINE_STATUS);
+ FS_STATUS_INIT(FS_ENOSPC_MOST_RECENT, nullptr, UNIXTIME, "most recent disk full", TOKU_ENGINE_STATUS);
+ FS_STATUS_INIT(FS_ENOSPC_COUNT, nullptr, UINT64, "number of write operations that returned ENOSPC", TOKU_ENGINE_STATUS);
+ FS_STATUS_INIT(FS_FSYNC_TIME, FILESYSTEM_FSYNC_TIME, UINT64, "fsync time", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ FS_STATUS_INIT(FS_FSYNC_COUNT, FILESYSTEM_FSYNC_NUM, UINT64, "fsync count", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ FS_STATUS_INIT(FS_LONG_FSYNC_TIME, FILESYSTEM_LONG_FSYNC_TIME, UINT64, "long fsync time", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ FS_STATUS_INIT(FS_LONG_FSYNC_COUNT, FILESYSTEM_LONG_FSYNC_NUM, UINT64, "long fsync count", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
+ fsstat.initialized = true;
+}
+#undef FS_STATUS_INIT
+
+#define FS_STATUS_VALUE(x) fsstat.status[x].value.num
+
+static void
+fs_get_status(DB_ENV * env, fs_redzone_state * redzone_state) {
+ if (!fsstat.initialized)
+ fs_status_init();
+
+ time_t enospc_most_recent_timestamp;
+ uint64_t enospc_threads_blocked, enospc_total;
+ toku_fs_get_write_info(&enospc_most_recent_timestamp, &enospc_threads_blocked, &enospc_total);
+ if (enospc_threads_blocked)
+ FS_STATUS_VALUE(FS_ENOSPC_REDZONE_STATE) = FS_BLOCKED;
+ else
+ FS_STATUS_VALUE(FS_ENOSPC_REDZONE_STATE) = env->i->fs_state;
+ *redzone_state = (fs_redzone_state) FS_STATUS_VALUE(FS_ENOSPC_REDZONE_STATE);
+ FS_STATUS_VALUE(FS_ENOSPC_THREADS_BLOCKED) = enospc_threads_blocked;
+ FS_STATUS_VALUE(FS_ENOSPC_REDZONE_CTR) = env->i->enospc_redzone_ctr;
+ FS_STATUS_VALUE(FS_ENOSPC_MOST_RECENT) = enospc_most_recent_timestamp;
+ FS_STATUS_VALUE(FS_ENOSPC_COUNT) = enospc_total;
+
+ uint64_t fsync_count, fsync_time, long_fsync_threshold, long_fsync_count, long_fsync_time;
+ toku_get_fsync_times(&fsync_count, &fsync_time, &long_fsync_threshold, &long_fsync_count, &long_fsync_time);
+ FS_STATUS_VALUE(FS_FSYNC_COUNT) = fsync_count;
+ FS_STATUS_VALUE(FS_FSYNC_TIME) = fsync_time;
+ FS_STATUS_VALUE(FS_LONG_FSYNC_COUNT) = long_fsync_count;
+ FS_STATUS_VALUE(FS_LONG_FSYNC_TIME) = long_fsync_time;
+}
+#undef FS_STATUS_VALUE
+
+// Local status struct used to get information from memory.c
+typedef enum {
+ MEMORY_MALLOC_COUNT = 0,
+ MEMORY_FREE_COUNT,
+ MEMORY_REALLOC_COUNT,
+ MEMORY_MALLOC_FAIL,
+ MEMORY_REALLOC_FAIL,
+ MEMORY_REQUESTED,
+ MEMORY_USED,
+ MEMORY_FREED,
+ MEMORY_MAX_REQUESTED_SIZE,
+ MEMORY_LAST_FAILED_SIZE,
+ MEMORY_MAX_IN_USE,
+ MEMORY_MALLOCATOR_VERSION,
+ MEMORY_MMAP_THRESHOLD,
+ MEMORY_STATUS_NUM_ROWS
+} memory_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[MEMORY_STATUS_NUM_ROWS];
+} MEMORY_STATUS_S, *MEMORY_STATUS;
+
+static MEMORY_STATUS_S memory_status;
+
+#define STATUS_INIT(k,c,t,l) TOKUFT_STATUS_INIT(memory_status, k, c, t, "memory: " l, TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS)
+
+static void
+memory_status_init(void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+ STATUS_INIT(MEMORY_MALLOC_COUNT, MEMORY_MALLOC_COUNT, UINT64, "number of malloc operations");
+ STATUS_INIT(MEMORY_FREE_COUNT, MEMORY_FREE_COUNT, UINT64, "number of free operations");
+ STATUS_INIT(MEMORY_REALLOC_COUNT, MEMORY_REALLOC_COUNT, UINT64, "number of realloc operations");
+ STATUS_INIT(MEMORY_MALLOC_FAIL, MEMORY_MALLOC_FAIL, UINT64, "number of malloc operations that failed");
+ STATUS_INIT(MEMORY_REALLOC_FAIL, MEMORY_REALLOC_FAIL, UINT64, "number of realloc operations that failed" );
+ STATUS_INIT(MEMORY_REQUESTED, MEMORY_REQUESTED, UINT64, "number of bytes requested");
+ STATUS_INIT(MEMORY_USED, MEMORY_USED, UINT64, "number of bytes used (requested + overhead)");
+ STATUS_INIT(MEMORY_FREED, MEMORY_FREED, UINT64, "number of bytes freed");
+ STATUS_INIT(MEMORY_MAX_REQUESTED_SIZE, MEMORY_MAX_REQUESTED_SIZE, UINT64, "largest attempted allocation size");
+ STATUS_INIT(MEMORY_LAST_FAILED_SIZE, MEMORY_LAST_FAILED_SIZE, UINT64, "size of the last failed allocation attempt");
+ STATUS_INIT(MEMORY_MAX_IN_USE, MEM_ESTIMATED_MAXIMUM_MEMORY_FOOTPRINT, UINT64, "estimated maximum memory footprint");
+ STATUS_INIT(MEMORY_MALLOCATOR_VERSION, MEMORY_MALLOCATOR_VERSION, CHARSTR, "mallocator version");
+ STATUS_INIT(MEMORY_MMAP_THRESHOLD, MEMORY_MMAP_THRESHOLD, UINT64, "mmap threshold");
+ memory_status.initialized = true;
+}
+#undef STATUS_INIT
+
+#define MEMORY_STATUS_VALUE(x) memory_status.status[x].value.num
+
+static void
+memory_get_status(void) {
+ if (!memory_status.initialized)
+ memory_status_init();
+ LOCAL_MEMORY_STATUS_S local_memstat;
+ toku_memory_get_status(&local_memstat);
+ MEMORY_STATUS_VALUE(MEMORY_MALLOC_COUNT) = local_memstat.malloc_count;
+ MEMORY_STATUS_VALUE(MEMORY_FREE_COUNT) = local_memstat.free_count;
+ MEMORY_STATUS_VALUE(MEMORY_REALLOC_COUNT) = local_memstat.realloc_count;
+ MEMORY_STATUS_VALUE(MEMORY_MALLOC_FAIL) = local_memstat.malloc_fail;
+ MEMORY_STATUS_VALUE(MEMORY_REALLOC_FAIL) = local_memstat.realloc_fail;
+ MEMORY_STATUS_VALUE(MEMORY_REQUESTED) = local_memstat.requested;
+ MEMORY_STATUS_VALUE(MEMORY_USED) = local_memstat.used;
+ MEMORY_STATUS_VALUE(MEMORY_FREED) = local_memstat.freed;
+ MEMORY_STATUS_VALUE(MEMORY_MAX_IN_USE) = local_memstat.max_in_use;
+ MEMORY_STATUS_VALUE(MEMORY_MMAP_THRESHOLD) = local_memstat.mmap_threshold;
+ memory_status.status[MEMORY_MALLOCATOR_VERSION].value.str = local_memstat.mallocator_version;
+}
+#undef MEMORY_STATUS_VALUE
+
+// how many rows are in engine status?
+static int
+env_get_engine_status_num_rows (DB_ENV * UU(env), uint64_t * num_rowsp) {
+ uint64_t num_rows = 0;
+ num_rows += YDB_LAYER_STATUS_NUM_ROWS;
+ num_rows += YDB_C_LAYER_STATUS_NUM_ROWS;
+ num_rows += YDB_WRITE_LAYER_STATUS_NUM_ROWS;
+ num_rows += LE_STATUS_S::LE_STATUS_NUM_ROWS;
+ num_rows += CHECKPOINT_STATUS_S::CP_STATUS_NUM_ROWS;
+ num_rows += CACHETABLE_STATUS_S::CT_STATUS_NUM_ROWS;
+ num_rows += LTM_STATUS_S::LTM_STATUS_NUM_ROWS;
+ num_rows += FT_STATUS_S::FT_STATUS_NUM_ROWS;
+ num_rows += FT_FLUSHER_STATUS_S::FT_FLUSHER_STATUS_NUM_ROWS;
+ num_rows += FT_HOT_STATUS_S::FT_HOT_STATUS_NUM_ROWS;
+ num_rows += TXN_STATUS_S::TXN_STATUS_NUM_ROWS;
+ num_rows += LOGGER_STATUS_S::LOGGER_STATUS_NUM_ROWS;
+ num_rows += MEMORY_STATUS_NUM_ROWS;
+ num_rows += FS_STATUS_NUM_ROWS;
+ num_rows += INDEXER_STATUS_NUM_ROWS;
+ num_rows += LOADER_STATUS_NUM_ROWS;
+ num_rows += CTX_STATUS_NUM_ROWS;
+#if 0
+ // enable when upgrade is supported
+ num_rows += FT_UPGRADE_STATUS_NUM_ROWS;
+ num_rows += PERSISTENT_UPGRADE_STATUS_NUM_ROWS;
+#endif
+ *num_rowsp = num_rows;
+ return 0;
+}
+
+// Do not take ydb lock or any other lock around or in this function.
+// If the engine is blocked because some thread is holding a lock, this function
+// can help diagnose the problem.
+// This function only collects information, and it does not matter if something gets garbled
+// because of a race condition.
+// Note, engine status is still collected even if the environment or logger is panicked
+static int
+env_get_engine_status (DB_ENV * env, TOKU_ENGINE_STATUS_ROW engstat, uint64_t maxrows, uint64_t *num_rows, fs_redzone_state* redzone_state, uint64_t * env_panicp, char * env_panic_string_buf, int env_panic_string_length, toku_engine_status_include_type include_flags) {
+ int r;
+
+ if (env_panic_string_buf) {
+ if (env && env->i && env->i->is_panicked && env->i->panic_string) {
+ strncpy(env_panic_string_buf, env->i->panic_string, env_panic_string_length);
+ env_panic_string_buf[env_panic_string_length - 1] = '\0'; // just in case
+ }
+ else
+ *env_panic_string_buf = '\0';
+ }
+
+ if ( !(env) ||
+ !(env->i) ||
+ !(env_opened(env)) ||
+ !num_rows ||
+ !include_flags)
+ r = EINVAL;
+ else {
+ r = 0;
+ uint64_t row = 0; // which row to fill next
+ *env_panicp = env->i->is_panicked;
+
+ {
+ YDB_LAYER_STATUS_S ydb_stat;
+ ydb_layer_get_status(env, &ydb_stat);
+ for (int i = 0; i < YDB_LAYER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ydb_stat.status[i].include & include_flags) {
+ engstat[row++] = ydb_stat.status[i];
+ }
+ }
+ }
+ {
+ YDB_C_LAYER_STATUS_S ydb_c_stat;
+ ydb_c_layer_get_status(&ydb_c_stat);
+ for (int i = 0; i < YDB_C_LAYER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ydb_c_stat.status[i].include & include_flags) {
+ engstat[row++] = ydb_c_stat.status[i];
+ }
+ }
+ }
+ {
+ YDB_WRITE_LAYER_STATUS_S ydb_write_stat;
+ ydb_write_layer_get_status(&ydb_write_stat);
+ for (int i = 0; i < YDB_WRITE_LAYER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ydb_write_stat.status[i].include & include_flags) {
+ engstat[row++] = ydb_write_stat.status[i];
+ }
+ }
+ }
+ {
+ LE_STATUS_S lestat; // Rice's vampire
+ toku_le_get_status(&lestat);
+ for (int i = 0; i < LE_STATUS_S::LE_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (lestat.status[i].include & include_flags) {
+ engstat[row++] = lestat.status[i];
+ }
+ }
+ }
+ {
+ CHECKPOINT_STATUS_S cpstat;
+ toku_checkpoint_get_status(env->i->cachetable, &cpstat);
+ for (int i = 0; i < CHECKPOINT_STATUS_S::CP_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (cpstat.status[i].include & include_flags) {
+ engstat[row++] = cpstat.status[i];
+ }
+ }
+ }
+ {
+ CACHETABLE_STATUS_S ctstat;
+ toku_cachetable_get_status(env->i->cachetable, &ctstat);
+ for (int i = 0; i < CACHETABLE_STATUS_S::CT_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ctstat.status[i].include & include_flags) {
+ engstat[row++] = ctstat.status[i];
+ }
+ }
+ }
+ {
+ LTM_STATUS_S ltmstat;
+ env->i->ltm.get_status(&ltmstat);
+ for (int i = 0; i < LTM_STATUS_S::LTM_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ltmstat.status[i].include & include_flags) {
+ engstat[row++] = ltmstat.status[i];
+ }
+ }
+ }
+ {
+ FT_STATUS_S ftstat;
+ toku_ft_get_status(&ftstat);
+ for (int i = 0; i < FT_STATUS_S::FT_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ftstat.status[i].include & include_flags) {
+ engstat[row++] = ftstat.status[i];
+ }
+ }
+ }
+ {
+ FT_FLUSHER_STATUS_S flusherstat;
+ toku_ft_flusher_get_status(&flusherstat);
+ for (int i = 0; i < FT_FLUSHER_STATUS_S::FT_FLUSHER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (flusherstat.status[i].include & include_flags) {
+ engstat[row++] = flusherstat.status[i];
+ }
+ }
+ }
+ {
+ FT_HOT_STATUS_S hotstat;
+ toku_ft_hot_get_status(&hotstat);
+ for (int i = 0; i < FT_HOT_STATUS_S::FT_HOT_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (hotstat.status[i].include & include_flags) {
+ engstat[row++] = hotstat.status[i];
+ }
+ }
+ }
+ {
+ TXN_STATUS_S txnstat;
+ toku_txn_get_status(&txnstat);
+ for (int i = 0; i < TXN_STATUS_S::TXN_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (txnstat.status[i].include & include_flags) {
+ engstat[row++] = txnstat.status[i];
+ }
+ }
+ }
+ {
+ LOGGER_STATUS_S loggerstat;
+ toku_logger_get_status(env->i->logger, &loggerstat);
+ for (int i = 0; i < LOGGER_STATUS_S::LOGGER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (loggerstat.status[i].include & include_flags) {
+ engstat[row++] = loggerstat.status[i];
+ }
+ }
+ }
+
+ {
+ INDEXER_STATUS_S indexerstat;
+ toku_indexer_get_status(&indexerstat);
+ for (int i = 0; i < INDEXER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (indexerstat.status[i].include & include_flags) {
+ engstat[row++] = indexerstat.status[i];
+ }
+ }
+ }
+ {
+ LOADER_STATUS_S loaderstat;
+ toku_loader_get_status(&loaderstat);
+ for (int i = 0; i < LOADER_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (loaderstat.status[i].include & include_flags) {
+ engstat[row++] = loaderstat.status[i];
+ }
+ }
+ }
+
+ {
+ // memory_status is local to this file
+ memory_get_status();
+ for (int i = 0; i < MEMORY_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (memory_status.status[i].include & include_flags) {
+ engstat[row++] = memory_status.status[i];
+ }
+ }
+ }
+ {
+ // Note, fs_get_status() and the fsstat structure are local to this file because they
+ // are used to concentrate file system information collected from various places.
+ fs_get_status(env, redzone_state);
+ for (int i = 0; i < FS_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (fsstat.status[i].include & include_flags) {
+ engstat[row++] = fsstat.status[i];
+ }
+ }
+ }
+ {
+ struct context_status ctxstatus;
+ toku_context_get_status(&ctxstatus);
+ for (int i = 0; i < CTX_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ctxstatus.status[i].include & include_flags) {
+ engstat[row++] = ctxstatus.status[i];
+ }
+ }
+ }
+#if 0
+ // enable when upgrade is supported
+ {
+ for (int i = 0; i < PERSISTENT_UPGRADE_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (persistent_upgrade_status.status[i].include & include_flags) {
+ engstat[row++] = persistent_upgrade_status.status[i];
+ }
+ }
+ FT_UPGRADE_STATUS_S ft_upgradestat;
+ toku_ft_upgrade_get_status(&ft_upgradestat);
+ for (int i = 0; i < FT_UPGRADE_STATUS_NUM_ROWS && row < maxrows; i++) {
+ if (ft_upgradestat.status[i].include & include_flags) {
+ engstat[row++] = ft_upgradestat.status[i];
+ }
+ }
+
+ }
+#endif
+ if (r==0) {
+ *num_rows = row;
+ }
+ }
+ return r;
+}
+
+// Fill buff with text description of engine status up to bufsiz bytes.
+// Intended for use by test programs that do not have the handlerton available,
+// and for use by toku_assert logic to print diagnostic info on crash.
+static int
+env_get_engine_status_text(DB_ENV * env, char * buff, int bufsiz) {
+ uint32_t stringsize = 1024;
+ uint64_t panic;
+ char panicstring[stringsize];
+ int n = 0; // number of characters printed so far
+ uint64_t num_rows;
+ uint64_t max_rows;
+ fs_redzone_state redzone_state;
+
+ n = snprintf(buff, bufsiz - n, "BUILD_ID = %d\n", BUILD_ID);
+
+ (void) env_get_engine_status_num_rows (env, &max_rows);
+ TOKU_ENGINE_STATUS_ROW_S mystat[max_rows];
+ int r = env->get_engine_status (env, mystat, max_rows, &num_rows, &redzone_state, &panic, panicstring, stringsize, TOKU_ENGINE_STATUS);
+
+ if (r) {
+ n += snprintf(buff + n, bufsiz - n, "Engine status not available: ");
+ if (!env) {
+ n += snprintf(buff + n, bufsiz - n, "no environment\n");
+ }
+ else if (!(env->i)) {
+ n += snprintf(buff + n, bufsiz - n, "environment internal struct is null\n");
+ }
+ else if (!env_opened(env)) {
+ n += snprintf(buff + n, bufsiz - n, "environment is not open\n");
+ }
+ }
+ else {
+ if (panic) {
+ n += snprintf(buff + n, bufsiz - n, "Env panic code: %" PRIu64 "\n", panic);
+ if (strlen(panicstring)) {
+ invariant(strlen(panicstring) <= stringsize);
+ n += snprintf(buff + n, bufsiz - n, "Env panic string: %s\n", panicstring);
+ }
+ }
+
+ for (uint64_t row = 0; row < num_rows; row++) {
+ n += snprintf(buff + n, bufsiz - n, "%s: ", mystat[row].legend);
+ switch (mystat[row].type) {
+ case FS_STATE:
+ n += snprintf(buff + n, bufsiz - n, "%" PRIu64 "\n", mystat[row].value.num);
+ break;
+ case UINT64:
+ n += snprintf(buff + n, bufsiz - n, "%" PRIu64 "\n", mystat[row].value.num);
+ break;
+ case CHARSTR:
+ n += snprintf(buff + n, bufsiz - n, "%s\n", mystat[row].value.str);
+ break;
+ case UNIXTIME:
+ {
+ char tbuf[26];
+ format_time((time_t*)&mystat[row].value.num, tbuf);
+ n += snprintf(buff + n, bufsiz - n, "%s\n", tbuf);
+ }
+ break;
+ case TOKUTIME:
+ {
+ double t = tokutime_to_seconds(mystat[row].value.num);
+ n += snprintf(buff + n, bufsiz - n, "%.6f\n", t);
+ }
+ break;
+ case PARCOUNT:
+ {
+ uint64_t v = read_partitioned_counter(mystat[row].value.parcount);
+ n += snprintf(buff + n, bufsiz - n, "%" PRIu64 "\n", v);
+ }
+ break;
+ default:
+ n += snprintf(buff + n, bufsiz - n, "UNKNOWN STATUS TYPE: %d\n", mystat[row].type);
+ break;
+ }
+ }
+ }
+
+ if (n > bufsiz) {
+ const char * errmsg = "BUFFER TOO SMALL\n";
+ int len = strlen(errmsg) + 1;
+ (void) snprintf(buff + (bufsiz - 1) - len, len, "%s", errmsg);
+ }
+
+ return r;
+}
+
+// prints engine status using toku_env_err line-by-line
+static int
+env_err_engine_status(DB_ENV * env) {
+ uint32_t stringsize = 1024;
+ uint64_t panic;
+ char panicstring[stringsize];
+ uint64_t num_rows;
+ uint64_t max_rows;
+ fs_redzone_state redzone_state;
+
+ toku_env_err(env, 0, "BUILD_ID = %d", BUILD_ID);
+
+ (void) env_get_engine_status_num_rows (env, &max_rows);
+ TOKU_ENGINE_STATUS_ROW_S mystat[max_rows];
+ int r = env->get_engine_status (env, mystat, max_rows, &num_rows, &redzone_state, &panic, panicstring, stringsize, TOKU_ENGINE_STATUS);
+
+ if (r) {
+ toku_env_err(env, 0, "Engine status not available: ");
+ if (!env) {
+ toku_env_err(env, 0, "no environment");
+ }
+ else if (!(env->i)) {
+ toku_env_err(env, 0, "environment internal struct is null");
+ }
+ else if (!env_opened(env)) {
+ toku_env_err(env, 0, "environment is not open");
+ }
+ }
+ else {
+ if (panic) {
+ toku_env_err(env, 0, "Env panic code: %" PRIu64, panic);
+ if (strlen(panicstring)) {
+ invariant(strlen(panicstring) <= stringsize);
+ toku_env_err(env, 0, "Env panic string: %s", panicstring);
+ }
+ }
+
+ for (uint64_t row = 0; row < num_rows; row++) {
+ switch (mystat[row].type) {
+ case FS_STATE:
+ toku_env_err(env, 0, "%s: %" PRIu64, mystat[row].legend, mystat[row].value.num);
+ break;
+ case UINT64:
+ toku_env_err(env, 0, "%s: %" PRIu64, mystat[row].legend, mystat[row].value.num);
+ break;
+ case CHARSTR:
+ toku_env_err(env, 0, "%s: %s", mystat[row].legend, mystat[row].value.str);
+ break;
+ case UNIXTIME:
+ {
+ char tbuf[26];
+ format_time((time_t*)&mystat[row].value.num, tbuf);
+ toku_env_err(env, 0, "%s: %s", mystat[row].legend, tbuf);
+ }
+ break;
+ case TOKUTIME:
+ {
+ double t = tokutime_to_seconds(mystat[row].value.num);
+ toku_env_err(env, 0, "%s: %.6f", mystat[row].legend, t);
+ }
+ break;
+ case PARCOUNT:
+ {
+ uint64_t v = read_partitioned_counter(mystat[row].value.parcount);
+ toku_env_err(env, 0, "%s: %" PRIu64, mystat[row].legend, v);
+ }
+ break;
+ default:
+ toku_env_err(env, 0, "%s: UNKNOWN STATUS TYPE: %d", mystat[row].legend, mystat[row].type);
+ break;
+ }
+ }
+ }
+
+ return r;
+}
+
+// intended for use by toku_assert logic, when env is not known
+static int
+toku_maybe_get_engine_status_text (char * buff, int buffsize) {
+ DB_ENV * env = most_recent_env;
+ int r;
+ if (engine_status_enable && env != NULL) {
+ r = env_get_engine_status_text(env, buff, buffsize);
+ }
+ else {
+ r = EOPNOTSUPP;
+ snprintf(buff, buffsize, "Engine status not available: disabled by user. This should only happen in test programs.\n");
+ }
+ return r;
+}
+
+static int
+toku_maybe_err_engine_status (void) {
+ DB_ENV * env = most_recent_env;
+ int r;
+ if (engine_status_enable && env != NULL) {
+ r = env_err_engine_status(env);
+ }
+ else {
+ r = EOPNOTSUPP;
+ }
+ return r;
+}
+
+// Set panic code and panic string if not already panicked,
+// intended for use by toku_assert when about to abort().
+static void
+toku_maybe_set_env_panic(int code, const char * msg) {
+ if (code == 0)
+ code = -1;
+ if (msg == NULL)
+ msg = "Unknown cause from abort (failed assert)\n";
+ env_is_panicked = code; // disable library destructor no matter what
+ DB_ENV * env = most_recent_env;
+ if (env &&
+ env->i &&
+ (env->i->is_panicked == 0)) {
+ env_panic(env, code, msg);
+ }
+}
+
+// handlerton's call to fractal tree layer on failed assert in handlerton
+static int
+env_crash(DB_ENV * UU(db_env), const char* msg, const char * fun, const char* file, int line, int caller_errno) {
+ toku_do_assert_fail(msg, fun, file, line, caller_errno);
+ return -1; // placate compiler
+}
+
+static int
+env_get_cursor_for_persistent_environment(DB_ENV* env, DB_TXN* txn, DBC** c) {
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ return toku_db_cursor(env->i->persistent_environment, txn, c, 0);
+}
+
+static int
+env_get_cursor_for_directory(DB_ENV* env, DB_TXN* txn, DBC** c) {
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+ return toku_db_cursor(env->i->directory, txn, c, 0);
+}
+
+static DB *
+env_get_db_for_directory(DB_ENV* env) {
+ if (!env_opened(env)) {
+ return NULL;
+ }
+ return env->i->directory;
+}
+
+struct ltm_iterate_requests_callback_extra {
+ ltm_iterate_requests_callback_extra(DB_ENV *e,
+ iterate_requests_callback cb,
+ void *ex) :
+ env(e), callback(cb), extra(ex) {
+ }
+ DB_ENV *env;
+ iterate_requests_callback callback;
+ void *extra;
+};
+
+static int
+find_db_by_dict_id(DB *const &db, const DICTIONARY_ID &dict_id_find) {
+ DICTIONARY_ID dict_id = db->i->dict_id;
+ if (dict_id.dictid < dict_id_find.dictid) {
+ return -1;
+ } else if (dict_id.dictid > dict_id_find.dictid) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static DB *
+locked_get_db_by_dict_id(DB_ENV *env, DICTIONARY_ID dict_id) {
+ DB *db;
+ int r = env->i->open_dbs_by_dict_id->find_zero<DICTIONARY_ID, find_db_by_dict_id>(dict_id, &db, nullptr);
+ return r == 0 ? db : nullptr;
+}
+
+static int ltm_iterate_requests_callback(DICTIONARY_ID dict_id, TXNID txnid,
+ const DBT *left_key,
+ const DBT *right_key,
+ TXNID blocking_txnid,
+ uint64_t start_time,
+ void *extra) {
+ ltm_iterate_requests_callback_extra *info =
+ reinterpret_cast<ltm_iterate_requests_callback_extra *>(extra);
+
+ toku_pthread_rwlock_rdlock(&info->env->i->open_dbs_rwlock);
+ int r = 0;
+ DB *db = locked_get_db_by_dict_id(info->env, dict_id);
+ if (db != nullptr) {
+ r = info->callback(db, txnid, left_key, right_key,
+ blocking_txnid, start_time, info->extra);
+ }
+ toku_pthread_rwlock_rdunlock(&info->env->i->open_dbs_rwlock);
+ return r;
+}
+
+static int
+env_iterate_pending_lock_requests(DB_ENV *env,
+ iterate_requests_callback callback,
+ void *extra) {
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+
+ toku::locktree_manager *mgr = &env->i->ltm;
+ ltm_iterate_requests_callback_extra e(env, callback, extra);
+ return mgr->iterate_pending_lock_requests(ltm_iterate_requests_callback, &e);
+}
+
+// for the lifetime of this object:
+// - open_dbs_rwlock must be read locked (or better)
+// - txn_mutex must be held
+struct iter_txn_row_locks_callback_extra {
+ iter_txn_row_locks_callback_extra(DB_ENV *e, toku::omt<txn_lt_key_ranges> *m) :
+ env(e), current_db(nullptr), which_lt(0), lt_map(m) {
+ if (lt_map->size() > 0) {
+ set_iterator_and_current_db();
+ }
+ }
+
+ void set_iterator_and_current_db() {
+ txn_lt_key_ranges ranges;
+ const int r = lt_map->fetch(which_lt, &ranges);
+ invariant_zero(r);
+ current_db = locked_get_db_by_dict_id(env, ranges.lt->get_dict_id());
+ iter = toku::range_buffer::iterator(ranges.buffer);
+ }
+
+ DB_ENV *env;
+ DB *current_db;
+ size_t which_lt;
+ toku::omt<txn_lt_key_ranges> *lt_map;
+ toku::range_buffer::iterator iter;
+ toku::range_buffer::iterator::record rec;
+};
+
+static int iter_txn_row_locks_callback(DB **db, DBT *left_key, DBT *right_key, void *extra) {
+ iter_txn_row_locks_callback_extra *info =
+ reinterpret_cast<iter_txn_row_locks_callback_extra *>(extra);
+
+ while (info->which_lt < info->lt_map->size()) {
+ const bool more = info->iter.current(&info->rec);
+ if (more) {
+ *db = info->current_db;
+ // The caller should interpret data/size == 0 to mean infinity.
+ // Therefore, when we copyref pos/neg infinity into left/right_key,
+ // the caller knows what we're talking about.
+ toku_copyref_dbt(left_key, *info->rec.get_left_key());
+ toku_copyref_dbt(right_key, *info->rec.get_right_key());
+ info->iter.next();
+ return 0;
+ } else {
+ info->which_lt++;
+ if (info->which_lt < info->lt_map->size()) {
+ info->set_iterator_and_current_db();
+ }
+ }
+ }
+ return DB_NOTFOUND;
+}
+
+struct iter_txns_callback_extra {
+ iter_txns_callback_extra(DB_ENV *e, iterate_transactions_callback cb, void *ex) :
+ env(e), callback(cb), extra(ex) {
+ }
+ DB_ENV *env;
+ iterate_transactions_callback callback;
+ void *extra;
+};
+
+static int iter_txns_callback(TOKUTXN txn, void *extra) {
+ int r = 0;
+ iter_txns_callback_extra *info =
+ reinterpret_cast<iter_txns_callback_extra *>(extra);
+ DB_TXN *dbtxn = toku_txn_get_container_db_txn(txn);
+ invariant_notnull(dbtxn);
+ struct __toku_db_txn_internal *db_txn_internal __attribute__((__unused__)) = db_txn_struct_i(dbtxn);
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(db_txn_internal, sizeof *db_txn_internal);
+ if (db_txn_struct_i(dbtxn)->tokutxn == txn) { // make sure that the dbtxn is fully initialized
+ toku_mutex_lock(&db_txn_struct_i(dbtxn)->txn_mutex);
+ toku_pthread_rwlock_rdlock(&info->env->i->open_dbs_rwlock);
+
+ iter_txn_row_locks_callback_extra e(info->env, &db_txn_struct_i(dbtxn)->lt_map);
+ r = info->callback(dbtxn, iter_txn_row_locks_callback, &e, info->extra);
+
+ toku_pthread_rwlock_rdunlock(&info->env->i->open_dbs_rwlock);
+ toku_mutex_unlock(&db_txn_struct_i(dbtxn)->txn_mutex);
+ }
+ TOKU_VALGRIND_HG_ENABLE_CHECKING(db_txn_internal, sizeof *db_txn_internal);
+
+ return r;
+}
+
+static int
+env_iterate_live_transactions(DB_ENV *env,
+ iterate_transactions_callback callback,
+ void *extra) {
+ if (!env_opened(env)) {
+ return EINVAL;
+ }
+
+ TXN_MANAGER txn_manager = toku_logger_get_txn_manager(env->i->logger);
+ iter_txns_callback_extra e(env, callback, extra);
+ return toku_txn_manager_iter_over_live_root_txns(txn_manager, iter_txns_callback, &e);
+}
+
+static void env_set_loader_memory_size(DB_ENV *env, uint64_t (*get_loader_memory_size_callback)(void)) {
+ env->i->get_loader_memory_size_callback = get_loader_memory_size_callback;
+}
+
+static uint64_t env_get_loader_memory_size(DB_ENV *env) {
+ uint64_t memory_size = 0;
+ if (env->i->get_loader_memory_size_callback)
+ memory_size = env->i->get_loader_memory_size_callback();
+ return memory_size;
+}
+
+static void env_set_killed_callback(DB_ENV *env, uint64_t default_killed_time_msec, uint64_t (*get_killed_time_callback)(uint64_t default_killed_time_msec), int (*killed_callback)(void)) {
+ env->i->default_killed_time_msec = default_killed_time_msec;
+ env->i->get_killed_time_callback = get_killed_time_callback;
+ env->i->killed_callback = killed_callback;
+}
+
+static void env_kill_waiter(DB_ENV *env, void *extra) {
+ env->i->ltm.kill_waiter(extra);
+}
+
+static void env_do_backtrace(DB_ENV *env) {
+ if (env->i->errcall) {
+ db_env_do_backtrace_errfunc((toku_env_err_func) toku_env_err, (const void *) env);
+ }
+ if (env->i->errfile) {
+ db_env_do_backtrace((FILE *) env->i->errfile);
+ } else {
+ db_env_do_backtrace(stderr);
+ }
+}
+
+static int
+toku_env_create(DB_ENV ** envp, uint32_t flags) {
+ int r = ENOSYS;
+ DB_ENV* result = NULL;
+
+ if (flags!=0) { r = EINVAL; goto cleanup; }
+ MALLOC(result);
+ if (result == 0) { r = ENOMEM; goto cleanup; }
+ memset(result, 0, sizeof *result);
+
+ // locked methods
+ result->err = (void (*)(const DB_ENV * env, int error, const char *fmt, ...)) toku_env_err;
+#define SENV(name) result->name = locked_env_ ## name
+ SENV(dbremove);
+ SENV(dbrename);
+ SENV(dirtool_attach);
+ SENV(dirtool_detach);
+ SENV(dirtool_move);
+ //SENV(set_noticecall);
+#undef SENV
+#define USENV(name) result->name = env_ ## name
+ // methods with locking done internally
+ USENV(put_multiple);
+ USENV(del_multiple);
+ USENV(update_multiple);
+ // unlocked methods
+ USENV(open);
+ USENV(close);
+ USENV(set_default_bt_compare);
+ USENV(set_update);
+ USENV(set_generate_row_callback_for_put);
+ USENV(set_generate_row_callback_for_del);
+ USENV(set_lg_bsize);
+ USENV(set_lg_dir);
+ USENV(set_lg_max);
+ USENV(get_lg_max);
+ USENV(set_lk_max_memory);
+ USENV(get_lk_max_memory);
+ USENV(get_iname);
+ USENV(set_errcall);
+ USENV(set_errfile);
+ USENV(set_errpfx);
+ USENV(set_data_dir);
+ USENV(checkpointing_set_period);
+ USENV(checkpointing_get_period);
+ USENV(cleaner_set_period);
+ USENV(cleaner_get_period);
+ USENV(cleaner_set_iterations);
+ USENV(cleaner_get_iterations);
+ USENV(evictor_set_enable_partial_eviction);
+ USENV(evictor_get_enable_partial_eviction);
+ USENV(set_cachesize);
+ USENV(set_client_pool_threads);
+ USENV(set_cachetable_pool_threads);
+ USENV(set_checkpoint_pool_threads);
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
+ USENV(get_cachesize);
+#endif
+#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR <= 4
+ USENV(set_lk_max);
+#endif
+ USENV(set_lk_detect);
+ USENV(set_flags);
+ USENV(set_tmp_dir);
+ USENV(set_verbose);
+ USENV(txn_recover);
+ USENV(txn_xa_recover);
+ USENV(get_txn_from_xid);
+ USENV(txn_stat);
+ USENV(get_lock_timeout);
+ USENV(set_lock_timeout);
+ USENV(set_lock_timeout_callback);
+ USENV(set_lock_wait_callback);
+ USENV(set_redzone);
+ USENV(log_flush);
+ USENV(log_archive);
+ USENV(create_loader);
+ USENV(get_cursor_for_persistent_environment);
+ USENV(get_cursor_for_directory);
+ USENV(get_db_for_directory);
+ USENV(iterate_pending_lock_requests);
+ USENV(iterate_live_transactions);
+ USENV(change_fsync_log_period);
+ USENV(set_loader_memory_size);
+ USENV(get_loader_memory_size);
+ USENV(set_killed_callback);
+ USENV(do_backtrace);
+ USENV(set_check_thp);
+ USENV(get_check_thp);
+ USENV(set_dir_per_db);
+ USENV(get_dir_per_db);
+ USENV(get_data_dir);
+ USENV(kill_waiter);
+#undef USENV
+
+ // unlocked methods
+ result->create_indexer = toku_indexer_create_indexer;
+ result->txn_checkpoint = toku_env_txn_checkpoint;
+ result->checkpointing_postpone = env_checkpointing_postpone;
+ result->checkpointing_resume = env_checkpointing_resume;
+ result->checkpointing_begin_atomic_operation = env_checkpointing_begin_atomic_operation;
+ result->checkpointing_end_atomic_operation = env_checkpointing_end_atomic_operation;
+ result->get_engine_status_num_rows = env_get_engine_status_num_rows;
+ result->get_engine_status = env_get_engine_status;
+ result->get_engine_status_text = env_get_engine_status_text;
+ result->crash = env_crash; // handlerton's call to fractal tree layer on failed assert
+ result->txn_begin = toku_txn_begin;
+
+ MALLOC(result->i);
+ if (result->i == 0) { r = ENOMEM; goto cleanup; }
+ memset(result->i, 0, sizeof *result->i);
+ result->i->envdir_lockfd = -1;
+ result->i->datadir_lockfd = -1;
+ result->i->logdir_lockfd = -1;
+ result->i->tmpdir_lockfd = -1;
+ env_fs_init(result);
+ env_fsync_log_init(result);
+
+ result->i->check_thp = true;
+
+ result->i->bt_compare = toku_builtin_compare_fun;
+
+ r = toku_logger_create(&result->i->logger);
+ invariant_zero(r);
+ invariant_notnull(result->i->logger);
+
+ // Create the locktree manager, passing in the create/destroy/escalate callbacks.
+ // The extra parameter for escalation is simply a pointer to this environment.
+ // The escalate callback will need it to translate txnids to DB_TXNs
+ result->i->ltm.create(toku_db_lt_on_create_callback, toku_db_lt_on_destroy_callback, toku_db_txn_escalate_callback, result);
+
+ XMALLOC(result->i->open_dbs_by_dname);
+ result->i->open_dbs_by_dname->create();
+ XMALLOC(result->i->open_dbs_by_dict_id);
+ result->i->open_dbs_by_dict_id->create();
+ toku_pthread_rwlock_init(
+ *result_i_open_dbs_rwlock_key, &result->i->open_dbs_rwlock, nullptr);
+
+ *envp = result;
+ r = 0;
+ toku_sync_fetch_and_add(&tokuft_num_envs, 1);
+cleanup:
+ if (r!=0) {
+ if (result) {
+ toku_free(result->i);
+ toku_free(result);
+ }
+ }
+ return r;
+}
+
+int
+DB_ENV_CREATE_FUN (DB_ENV ** envp, uint32_t flags) {
+ int r = toku_env_create(envp, flags);
+ return r;
+}
+
+// return 0 if v and dbv refer to same db (including same dname)
+// return <0 if v is earlier in omt than dbv
+// return >0 if v is later in omt than dbv
+static int
+find_db_by_db_dname(DB *const &db, DB *const &dbfind) {
+ int cmp;
+ const char *dname = db->i->dname;
+ const char *dnamefind = dbfind->i->dname;
+ cmp = strcmp(dname, dnamefind);
+ if (cmp != 0) return cmp;
+ if (db < dbfind) return -1;
+ if (db > dbfind) return 1;
+ return 0;
+}
+
+static int
+find_db_by_db_dict_id(DB *const &db, DB *const &dbfind) {
+ DICTIONARY_ID dict_id = db->i->dict_id;
+ DICTIONARY_ID dict_id_find = dbfind->i->dict_id;
+ if (dict_id.dictid < dict_id_find.dictid) {
+ return -1;
+ } else if (dict_id.dictid > dict_id_find.dictid) {
+ return 1;
+ } else if (db < dbfind) {
+ return -1;
+ } else if (db > dbfind) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+// Tell env that there is a new db handle (with non-unique dname in db->i-dname)
+void
+env_note_db_opened(DB_ENV *env, DB *db) {
+ toku_pthread_rwlock_wrlock(&env->i->open_dbs_rwlock);
+ assert(db->i->dname); // internal (non-user) dictionary has no dname
+
+ int r;
+ uint32_t idx;
+
+ r = env->i->open_dbs_by_dname->find_zero<DB *, find_db_by_db_dname>(db, nullptr, &idx);
+ assert(r == DB_NOTFOUND);
+ r = env->i->open_dbs_by_dname->insert_at(db, idx);
+ assert_zero(r);
+ r = env->i->open_dbs_by_dict_id->find_zero<DB *, find_db_by_db_dict_id>(db, nullptr, &idx);
+ assert(r == DB_NOTFOUND);
+ r = env->i->open_dbs_by_dict_id->insert_at(db, idx);
+ assert_zero(r);
+
+ STATUS_VALUE(YDB_LAYER_NUM_OPEN_DBS) = env->i->open_dbs_by_dname->size();
+ STATUS_VALUE(YDB_LAYER_NUM_DB_OPEN)++;
+ if (STATUS_VALUE(YDB_LAYER_NUM_OPEN_DBS) > STATUS_VALUE(YDB_LAYER_MAX_OPEN_DBS)) {
+ STATUS_VALUE(YDB_LAYER_MAX_OPEN_DBS) = STATUS_VALUE(YDB_LAYER_NUM_OPEN_DBS);
+ }
+ toku_pthread_rwlock_wrunlock(&env->i->open_dbs_rwlock);
+}
+
+// Effect: Tell the DB_ENV that the DB is no longer in use by the user of the API. The DB may still be in use by the fractal tree internals.
+void
+env_note_db_closed(DB_ENV *env, DB *db) {
+ toku_pthread_rwlock_wrlock(&env->i->open_dbs_rwlock);
+ assert(db->i->dname); // internal (non-user) dictionary has no dname
+ assert(env->i->open_dbs_by_dname->size() > 0);
+ assert(env->i->open_dbs_by_dict_id->size() > 0);
+
+ int r;
+ uint32_t idx;
+
+ r = env->i->open_dbs_by_dname->find_zero<DB *, find_db_by_db_dname>(db, nullptr, &idx);
+ assert_zero(r);
+ r = env->i->open_dbs_by_dname->delete_at(idx);
+ assert_zero(r);
+ r = env->i->open_dbs_by_dict_id->find_zero<DB *, find_db_by_db_dict_id>(db, nullptr, &idx);
+ assert_zero(r);
+ r = env->i->open_dbs_by_dict_id->delete_at(idx);
+ assert_zero(r);
+
+ STATUS_VALUE(YDB_LAYER_NUM_DB_CLOSE)++;
+ STATUS_VALUE(YDB_LAYER_NUM_OPEN_DBS) = env->i->open_dbs_by_dname->size();
+ toku_pthread_rwlock_wrunlock(&env->i->open_dbs_rwlock);
+}
+
+static int
+find_open_db_by_dname(DB *const &db, const char *const &dnamefind) {
+ return strcmp(db->i->dname, dnamefind);
+}
+
+// return true if there is any db open with the given dname
+static bool
+env_is_db_with_dname_open(DB_ENV *env, const char *dname) {
+ DB *db;
+ toku_pthread_rwlock_rdlock(&env->i->open_dbs_rwlock);
+ int r = env->i->open_dbs_by_dname->find_zero<const char *, find_open_db_by_dname>(dname, &db, nullptr);
+ if (r == 0) {
+ invariant(strcmp(dname, db->i->dname) == 0);
+ } else {
+ invariant(r == DB_NOTFOUND);
+ }
+ toku_pthread_rwlock_rdunlock(&env->i->open_dbs_rwlock);
+ return r == 0 ? true : false;
+}
+
+//We do not (yet?) support deleting subdbs by deleting the enclosing 'fname'
+static int
+env_dbremove_subdb(DB_ENV * env, DB_TXN * txn, const char *fname, const char *dbname, int32_t flags) {
+ int r;
+ if (!fname || !dbname) r = EINVAL;
+ else {
+ char subdb_full_name[strlen(fname) + sizeof("/") + strlen(dbname)];
+ int bytes = snprintf(subdb_full_name, sizeof(subdb_full_name), "%s/%s", fname, dbname);
+ assert(bytes==(int)sizeof(subdb_full_name)-1);
+ const char *null_subdbname = NULL;
+ r = env_dbremove(env, txn, subdb_full_name, null_subdbname, flags);
+ }
+ return r;
+}
+
+// see if we can acquire a table lock for the given dname.
+// requires: write lock on dname in the directory. dictionary
+// open, close, and begin checkpoint cannot occur.
+// returns: zero if we could open, lock, and close a dictionary
+// with the given dname, errno otherwise.
+static int
+can_acquire_table_lock(DB_ENV *env, DB_TXN *txn, const char *iname_in_env) {
+ int r;
+ DB *db;
+
+ r = toku_db_create(&db, env, 0);
+ assert_zero(r);
+ r = toku_db_open_iname(db, txn, iname_in_env, 0, 0);
+ if(r) {
+ if (r == ENAMETOOLONG)
+ toku_ydb_do_error(env, r, "File name too long!\n");
+ goto exit;
+ }
+ r = toku_db_pre_acquire_table_lock(db, txn);
+ if (r) {
+ r = DB_LOCK_NOTGRANTED;
+ }
+exit:
+ if(db) {
+ int r2 = toku_db_close(db);
+ assert_zero(r2);
+ }
+ return r;
+}
+
+static int
+env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags) {
+ int r;
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env) || flags != 0) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+ if (dbname != NULL) {
+ // env_dbremove_subdb() converts (fname, dbname) to dname
+ return env_dbremove_subdb(env, txn, fname, dbname, flags);
+ }
+
+ const char * dname = fname;
+ assert(dbname == NULL);
+
+ // We check for an open db here as a "fast path" to error.
+ // We'll need to check again below to be sure.
+ if (env_is_db_with_dname_open(env, dname)) {
+ return toku_ydb_do_error(env, EINVAL, "Cannot remove dictionary with an open handle.\n");
+ }
+
+ DBT dname_dbt;
+ DBT iname_dbt;
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1);
+ toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
+
+ // get iname
+ r = toku_db_get(env->i->directory, txn, &dname_dbt, &iname_dbt, DB_SERIALIZABLE); // allocates memory for iname
+ char *iname = (char *) iname_dbt.data;
+ DB *db = NULL;
+ if (r != 0) {
+ if (r == DB_NOTFOUND) {
+ r = ENOENT;
+ }
+ goto exit;
+ }
+ // remove (dname,iname) from directory
+ r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true);
+ if (r != 0) {
+ goto exit;
+ }
+ r = toku_db_create(&db, env, 0);
+ lazy_assert_zero(r);
+ r = toku_db_open_iname(db, txn, iname, 0, 0);
+ if (txn && r) {
+ if (r == EMFILE || r == ENFILE)
+ r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n");
+ else if (r != ENOENT)
+ r = toku_ydb_do_error(env, r, "toku dbremove failed\n");
+ else
+ r = 0;
+ goto exit;
+ }
+ if (txn) {
+ // Now that we have a writelock on dname, verify that there are still no handles open. (to prevent race conditions)
+ if (env_is_db_with_dname_open(env, dname)) {
+ r = toku_ydb_do_error(env, EINVAL, "Cannot remove dictionary with an open handle.\n");
+ goto exit;
+ }
+ // we know a live db handle does not exist.
+ //
+ // use the internally opened db to try and get a table lock
+ //
+ // if we can't get it, then some txn needs the ft and we
+ // should return lock not granted.
+ //
+ // otherwise, we're okay in marking this ft as remove on
+ // commit. no new handles can open for this dictionary
+ // because the txn has directory write locks on the dname
+ r = toku_db_pre_acquire_table_lock(db, txn);
+ if (r != 0) {
+ r = DB_LOCK_NOTGRANTED;
+ goto exit;
+ }
+ // The ft will be unlinked when the txn commits
+ toku_ft_unlink_on_commit(db->i->ft_handle, db_txn_struct_i(txn)->tokutxn);
+ }
+ else {
+ // unlink the ft without a txn
+ toku_ft_unlink(db->i->ft_handle);
+ }
+
+exit:
+ if (db) {
+ int ret = toku_db_close(db);
+ assert(ret == 0);
+ }
+ if (iname) {
+ toku_free(iname);
+ }
+ return r;
+}
+
+static int
+env_dbrename_subdb(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) {
+ int r;
+ if (!fname || !dbname || !newname) r = EINVAL;
+ else {
+ char subdb_full_name[strlen(fname) + sizeof("/") + strlen(dbname)];
+ {
+ int bytes = snprintf(subdb_full_name, sizeof(subdb_full_name), "%s/%s", fname, dbname);
+ assert(bytes==(int)sizeof(subdb_full_name)-1);
+ }
+ char new_full_name[strlen(fname) + sizeof("/") + strlen(dbname)];
+ {
+ int bytes = snprintf(new_full_name, sizeof(new_full_name), "%s/%s", fname, dbname);
+ assert(bytes==(int)sizeof(new_full_name)-1);
+ }
+ const char *null_subdbname = NULL;
+ r = env_dbrename(env, txn, subdb_full_name, null_subdbname, new_full_name, flags);
+ }
+ return r;
+}
+
+static int
+env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) {
+ int r;
+ HANDLE_PANICKED_ENV(env);
+ if (!env_opened(env) || flags != 0) {
+ return EINVAL;
+ }
+ HANDLE_READ_ONLY_TXN(txn);
+ if (dbname != NULL) {
+ // env_dbrename_subdb() converts (fname, dbname) to dname and (fname, newname) to newdname
+ return env_dbrename_subdb(env, txn, fname, dbname, newname, flags);
+ }
+
+ const char * dname = fname;
+ assert(dbname == NULL);
+
+ // We check for open dnames for the old and new name as a "fast path" to error.
+ // We will need to check these again later.
+ if (env_is_db_with_dname_open(env, dname)) {
+ return toku_ydb_do_error(env, EINVAL, "Cannot rename dictionary with an open handle.\n");
+ }
+ if (env_is_db_with_dname_open(env, newname)) {
+ return toku_ydb_do_error(env, EINVAL, "Cannot rename dictionary; Dictionary with target name has an open handle.\n");
+ }
+
+ DBT old_dname_dbt;
+ DBT new_dname_dbt;
+ DBT iname_dbt;
+ toku_fill_dbt(&old_dname_dbt, dname, strlen(dname)+1);
+ toku_fill_dbt(&new_dname_dbt, newname, strlen(newname)+1);
+ toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
+
+ // get iname
+ r = toku_db_get(env->i->directory, txn, &old_dname_dbt, &iname_dbt, DB_SERIALIZABLE); // allocates memory for iname
+ char *iname = (char *) iname_dbt.data;
+ if (r == DB_NOTFOUND) {
+ r = ENOENT;
+ } else if (r == 0) {
+ // verify that newname does not already exist
+ r = db_getf_set(env->i->directory, txn, DB_SERIALIZABLE, &new_dname_dbt, ydb_getf_do_nothing, NULL);
+ if (r == 0) {
+ r = EEXIST;
+ }
+ else if (r == DB_NOTFOUND) {
+ DBT new_iname_dbt;
+ // Do not rename ft file if 'dir_per_db' option is not set
+ auto new_iname =
+ env->get_dir_per_db(env)
+ ? generate_iname_for_rename_or_open(
+ env, txn, newname, false)
+ : std::unique_ptr<char[], decltype(&toku_free)>(
+ toku_strdup(iname), &toku_free);
+ toku_fill_dbt(
+ &new_iname_dbt, new_iname.get(), strlen(new_iname.get()) + 1);
+
+ // remove old (dname,iname) and insert (newname,iname) in directory
+ r = toku_db_del(env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true);
+ if (r != 0) { goto exit; }
+
+ // Do not rename ft file if 'dir_per_db' option is not set
+ if (env->get_dir_per_db(env))
+ r = toku_ft_rename_iname(txn,
+ env->get_data_dir(env),
+ iname,
+ new_iname.get(),
+ env->i->cachetable);
+
+ r = toku_db_put(env->i->directory,
+ txn,
+ &new_dname_dbt,
+ &new_iname_dbt,
+ 0,
+ true);
+ if (r != 0) { goto exit; }
+
+ //Now that we have writelocks on both dnames, verify that there are still no handles open. (to prevent race conditions)
+ if (env_is_db_with_dname_open(env, dname)) {
+ r = toku_ydb_do_error(env, EINVAL, "Cannot rename dictionary with an open handle.\n");
+ goto exit;
+ }
+ if (env_is_db_with_dname_open(env, newname)) {
+ r = toku_ydb_do_error(env, EINVAL, "Cannot rename dictionary; Dictionary with target name has an open handle.\n");
+ goto exit;
+ }
+
+ // we know a live db handle does not exist.
+ //
+ // use the internally opened db to try and get a table lock
+ //
+ // if we can't get it, then some txn needs the ft and we
+ // should return lock not granted.
+ //
+ // otherwise, we're okay in marking this ft as remove on
+ // commit. no new handles can open for this dictionary
+ // because the txn has directory write locks on the dname
+ if (txn) {
+ r = can_acquire_table_lock(env, txn, new_iname.get());
+ }
+ // We don't do anything at the ft or cachetable layer for rename.
+ // We just update entries in the environment's directory.
+ }
+ }
+
+exit:
+ if (iname) {
+ toku_free(iname);
+ }
+ return r;
+}
+
+int
+DB_CREATE_FUN (DB ** db, DB_ENV * env, uint32_t flags) {
+ int r = toku_db_create(db, env, flags);
+ return r;
+}
+
+/* need db_strerror_r for multiple threads */
+
+const char *
+db_strerror(int error) {
+ char *errorstr;
+ if (error >= 0) {
+ errorstr = strerror(error);
+ if (errorstr)
+ return errorstr;
+ }
+
+ switch (error) {
+ case DB_BADFORMAT:
+ return "Database Bad Format (probably a corrupted database)";
+ case DB_NOTFOUND:
+ return "Not found";
+ case TOKUDB_OUT_OF_LOCKS:
+ return "Out of locks";
+ case TOKUDB_DICTIONARY_TOO_OLD:
+ return "Dictionary too old for this version of PerconaFT";
+ case TOKUDB_DICTIONARY_TOO_NEW:
+ return "Dictionary too new for this version of PerconaFT";
+ case TOKUDB_CANCELED:
+ return "User cancelled operation";
+ case TOKUDB_NO_DATA:
+ return "Ran out of data (not EOF)";
+ case TOKUDB_HUGE_PAGES_ENABLED:
+ return "Transparent huge pages are enabled but PerconaFT's memory allocator will oversubscribe main memory with transparent huge pages. This check can be disabled by setting the environment variable TOKU_HUGE_PAGES_OK.";
+ }
+
+ static char unknown_result[100]; // Race condition if two threads call this at the same time. However even in a bad case, it should be some sort of null-terminated string.
+ errorstr = unknown_result;
+ snprintf(errorstr, sizeof unknown_result, "Unknown error code: %d", error);
+ return errorstr;
+}
+
+const char *
+db_version(int *major, int *minor, int *patch) {
+ if (major)
+ *major = DB_VERSION_MAJOR;
+ if (minor)
+ *minor = DB_VERSION_MINOR;
+ if (patch)
+ *patch = DB_VERSION_PATCH;
+ return toku_product_name_strings.db_version;
+}
+
+// HACK: To ensure toku_pthread_yield gets included in the .so
+// non-static would require a prototype in a header
+// static (since unused) would give a warning
+// static + unused would not actually help toku_pthread_yield get in the .so
+// static + used avoids all the warnings and makes sure toku_pthread_yield is in the .so
+static void __attribute__((__used__))
+include_toku_pthread_yield (void) {
+ toku_pthread_yield();
+}
+
+// For test purposes only, translate dname to iname
+// YDB lock is NOT held when this function is called,
+// as it is called by user
+static int
+env_get_iname(DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) {
+ DB *directory = env->i->directory;
+ int r = autotxn_db_get(directory, NULL, dname_dbt, iname_dbt, DB_SERIALIZABLE|DB_PRELOCKED); // allocates memory for iname
+ return r;
+}
+
+// TODO 2216: Patch out this (dangerous) function when loader is working and
+// we don't need to test the low-level redirect anymore.
+// for use by test programs only, just a wrapper around ft call:
+int
+toku_test_db_redirect_dictionary(DB * db, const char * dname_of_new_file, DB_TXN *dbtxn) {
+ int r;
+ DBT dname_dbt;
+ DBT iname_dbt;
+ char * new_iname_in_env;
+
+ FT_HANDLE ft_handle = db->i->ft_handle;
+ TOKUTXN tokutxn = db_txn_struct_i(dbtxn)->tokutxn;
+
+ toku_fill_dbt(&dname_dbt, dname_of_new_file, strlen(dname_of_new_file)+1);
+ toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
+ r = toku_db_get(db->dbenv->i->directory, dbtxn, &dname_dbt, &iname_dbt, DB_SERIALIZABLE); // allocates memory for iname
+ assert_zero(r);
+ new_iname_in_env = (char *) iname_dbt.data;
+
+ toku_multi_operation_client_lock(); //Must hold MO lock for dictionary_redirect.
+ r = toku_dictionary_redirect(new_iname_in_env, ft_handle, tokutxn);
+ toku_multi_operation_client_unlock();
+
+ toku_free(new_iname_in_env);
+ return r;
+}
+
+//Tets only function
+uint64_t
+toku_test_get_latest_lsn(DB_ENV *env) {
+ LSN rval = ZERO_LSN;
+ if (env && env->i->logger) {
+ rval = toku_logger_last_lsn(env->i->logger);
+ }
+ return rval.lsn;
+}
+
+void toku_set_test_txn_sync_callback(void (* cb) (pthread_t, void *), void * extra) {
+ set_test_txn_sync_callback(cb, extra);
+}
+
+int
+toku_test_get_checkpointing_user_data_status (void) {
+ return toku_cachetable_get_checkpointing_user_data_status();
+}
+
+#undef STATUS_VALUE
+#undef PERSISTENT_UPGRADE_STATUS_VALUE
+
+#include <toku_race_tools.h>
+void __attribute__((constructor)) toku_ydb_helgrind_ignore(void);
+void
+toku_ydb_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&ydb_layer_status, sizeof ydb_layer_status);
+}
diff --git a/storage/tokudb/PerconaFT/src/ydb.h b/storage/tokudb/PerconaFT/src/ydb.h
new file mode 100644
index 00000000..facbfdc9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb.h
@@ -0,0 +1,63 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// Initialize the ydb library globals.
+// Called when the ydb library is loaded.
+int toku_ydb_init(void);
+
+// Called when the ydb library is unloaded.
+void toku_ydb_destroy(void);
+
+// db_env_create for the trace library
+int db_env_create_toku10(DB_ENV **, uint32_t) __attribute__((__visibility__("default")));
+
+// db_create for the trace library
+int db_create_toku10(DB **, DB_ENV *, uint32_t) __attribute__((__visibility__("default")));
+
+// test only function
+extern "C" int toku_test_db_redirect_dictionary(DB * db, const char * dname_of_new_file, DB_TXN *dbtxn) __attribute__((__visibility__("default")));
+
+extern "C" uint64_t toku_test_get_latest_lsn(DB_ENV *env) __attribute__((__visibility__("default")));
+
+// test-only function
+extern "C" int toku_test_get_checkpointing_user_data_status(void) __attribute__((__visibility__("default")));
+
+// test-only function
+extern "C" void toku_set_test_txn_sync_callback(void (* ) (pthread_t, void *), void * extra) __attribute__((__visibility__("default")));
diff --git a/storage/tokudb/PerconaFT/src/ydb_cursor.cc b/storage/tokudb/PerconaFT/src/ydb_cursor.cc
new file mode 100644
index 00000000..1f4f00b7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_cursor.cc
@@ -0,0 +1,900 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <db.h>
+#include "toku_assert.h"
+#include "ydb-internal.h"
+#include "ydb_cursor.h"
+#include "ydb_row_lock.h"
+#include "ft/cursor.h"
+
+static YDB_C_LAYER_STATUS_S ydb_c_layer_status;
+#ifdef STATUS_VALUE
+#undef STATUS_VALUE
+#endif
+#define STATUS_VALUE(x) ydb_c_layer_status.status[x].value.num
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_c_layer_status, k, c, t, l, inc)
+
+static void
+ydb_c_layer_status_init (void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+ ydb_c_layer_status.initialized = true;
+}
+#undef STATUS_INIT
+
+void
+ydb_c_layer_get_status(YDB_C_LAYER_STATUS statp) {
+ if (!ydb_c_layer_status.initialized)
+ ydb_c_layer_status_init();
+ *statp = ydb_c_layer_status;
+}
+
+//Get the main portion of a cursor flag (excluding the bitwise or'd components).
+static int
+get_main_cursor_flag(uint32_t flags) {
+ return flags & DB_OPFLAGS_MASK;
+}
+
+static int
+get_nonmain_cursor_flags(uint32_t flags) {
+ return flags & ~(DB_OPFLAGS_MASK);
+}
+
+static inline bool
+c_uninitialized(DBC *c) {
+ return toku_ft_cursor_uninitialized(dbc_ftcursor(c));
+}
+
+typedef struct query_context_wrapped_t {
+ DBT *key;
+ DBT *val;
+ struct simple_dbt *skey;
+ struct simple_dbt *sval;
+} *QUERY_CONTEXT_WRAPPED, QUERY_CONTEXT_WRAPPED_S;
+
+static inline void
+query_context_wrapped_init(QUERY_CONTEXT_WRAPPED context, DBC *c, DBT *key, DBT *val) {
+ context->key = key;
+ context->val = val;
+ context->skey = dbc_struct_i(c)->skey;
+ context->sval = dbc_struct_i(c)->sval;
+}
+
+static int
+c_get_wrapper_callback(DBT const *key, DBT const *val, void *extra) {
+ QUERY_CONTEXT_WRAPPED context = (QUERY_CONTEXT_WRAPPED) extra;
+ int r = toku_dbt_set(key->size, key->data, context->key, context->skey);
+ if (r == 0) {
+ r = toku_dbt_set(val->size, val->data, context->val, context->sval);
+ }
+ return r;
+}
+
+static inline uint32_t get_cursor_prelocked_flags(uint32_t flags, DBC *dbc) {
+ uint32_t lock_flags = flags & (DB_PRELOCKED | DB_PRELOCKED_WRITE);
+
+ // DB_READ_UNCOMMITTED and DB_READ_COMMITTED transactions 'own' all read
+ // locks for user-data dictionaries.
+ if (dbc_struct_i(dbc)->iso != TOKU_ISO_SERIALIZABLE &&
+ !(dbc_struct_i(dbc)->iso == TOKU_ISO_SNAPSHOT &&
+ dbc_struct_i(dbc)->locking_read)) {
+ lock_flags |= DB_PRELOCKED;
+ }
+ return lock_flags;
+}
+
+//This is the user level callback function given to ydb layer functions like
+//c_getf_first
+
+typedef struct query_context_base_t {
+ FT_CURSOR c;
+ DB_TXN *txn;
+ DB *db;
+ YDB_CALLBACK_FUNCTION f;
+ void *f_extra;
+ int r_user_callback;
+ bool do_locking;
+ bool is_write_op;
+ toku::lock_request request;
+} *QUERY_CONTEXT_BASE, QUERY_CONTEXT_BASE_S;
+
+typedef struct query_context_t {
+ QUERY_CONTEXT_BASE_S base;
+} *QUERY_CONTEXT, QUERY_CONTEXT_S;
+
+typedef struct query_context_with_input_t {
+ QUERY_CONTEXT_BASE_S base;
+ DBT *input_key;
+ DBT *input_val;
+} *QUERY_CONTEXT_WITH_INPUT, QUERY_CONTEXT_WITH_INPUT_S;
+
+static void
+query_context_base_init(QUERY_CONTEXT_BASE context, DBC *c, uint32_t flag, bool is_write_op, YDB_CALLBACK_FUNCTION f, void *extra) {
+ context->c = dbc_ftcursor(c);
+ context->txn = dbc_struct_i(c)->txn;
+ context->db = c->dbp;
+ context->f = f;
+ context->f_extra = extra;
+ context->is_write_op = is_write_op;
+ uint32_t lock_flags = get_cursor_prelocked_flags(flag, c);
+ if (context->is_write_op) {
+ lock_flags &= DB_PRELOCKED_WRITE; // Only care about whether already locked for write
+ }
+ context->do_locking = (context->db->i->lt != nullptr && !(lock_flags & (DB_PRELOCKED | DB_PRELOCKED_WRITE)));
+ context->r_user_callback = 0;
+ context->request.create();
+}
+
+static toku::lock_request::type
+query_context_determine_lock_type(QUERY_CONTEXT_BASE context) {
+ return context->is_write_op ?
+ toku::lock_request::type::WRITE : toku::lock_request::type::READ;
+}
+
+static void
+query_context_base_destroy(QUERY_CONTEXT_BASE context) {
+ context->request.destroy();
+}
+
+static void
+query_context_init_read(QUERY_CONTEXT context, DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ const bool is_write = false;
+ query_context_base_init(&context->base, c, flag, is_write, f, extra);
+}
+
+static void
+query_context_init_write(QUERY_CONTEXT context, DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ const bool is_write = true;
+ query_context_base_init(&context->base, c, flag, is_write, f, extra);
+}
+
+static void
+query_context_with_input_init(QUERY_CONTEXT_WITH_INPUT context, DBC *c, uint32_t flag, DBT *key, DBT *val, YDB_CALLBACK_FUNCTION f, void *extra) {
+ // grab write locks if the DB_RMW flag is set or the cursor was created with the DB_RMW flag
+ const bool is_write = ((flag & DB_RMW) != 0) || dbc_struct_i(c)->rmw;
+ query_context_base_init(&context->base, c, flag, is_write, f, extra);
+ context->input_key = key;
+ context->input_val = val;
+}
+
+static int c_getf_first_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static void
+c_query_context_init(QUERY_CONTEXT context, DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ bool is_write_op = false;
+ // grab write locks if the DB_RMW flag is set or the cursor was created with the DB_RMW flag
+ if ((flag & DB_RMW) || dbc_struct_i(c)->rmw) {
+ is_write_op = true;
+ }
+ if (is_write_op) {
+ query_context_init_write(context, c, flag, f, extra);
+ } else {
+ query_context_init_read(context, c, flag, f, extra);
+ }
+}
+
+static void
+c_query_context_destroy(QUERY_CONTEXT context) {
+ query_context_base_destroy(&context->base);
+}
+
+static int
+c_getf_first(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+ int r = 0;
+ QUERY_CONTEXT_S context; //Describes the context of this query.
+ c_query_context_init(&context, c, flag, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_first will call c_getf_first_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_first(dbc_ftcursor(c), c_getf_first_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ c_query_context_destroy(&context);
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_first_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+ DBT found_key = { .data = (void *) key, .size = keylen };
+
+ if (context->do_locking) {
+ const DBT *left_key = toku_dbt_negative_infinity();
+ const DBT *right_key = key != NULL ? &found_key : toku_dbt_positive_infinity();
+ r = toku_db_start_range_lock(context->db, context->txn, left_key, right_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_first
+ return r;
+}
+
+static int c_getf_last_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static int
+c_getf_last(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+ int r = 0;
+ QUERY_CONTEXT_S context; //Describes the context of this query.
+ c_query_context_init(&context, c, flag, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_last will call c_getf_last_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_last(dbc_ftcursor(c), c_getf_last_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ c_query_context_destroy(&context);
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_last_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+ DBT found_key = { .data = (void *) key, .size = keylen };
+
+ if (context->do_locking) {
+ const DBT *left_key = key != NULL ? &found_key : toku_dbt_negative_infinity();
+ const DBT *right_key = toku_dbt_positive_infinity();
+ r = toku_db_start_range_lock(context->db, context->txn, left_key, right_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_last
+ return r;
+}
+
+static int c_getf_next_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static int
+c_getf_next(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ int r;
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+ if (c_uninitialized(c)) {
+ r = c_getf_first(c, flag, f, extra);
+ } else {
+ r = 0;
+ QUERY_CONTEXT_S context; //Describes the context of this query.
+ c_query_context_init(&context, c, flag, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_next will call c_getf_next_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_next(dbc_ftcursor(c), c_getf_next_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ c_query_context_destroy(&context);
+ }
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_next_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+
+ DBT found_key = { .data = (void *) key, .size = keylen };
+
+ if (context->do_locking) {
+ const DBT *prevkey, *prevval;
+ toku_ft_cursor_peek(context->c, &prevkey, &prevval);
+ const DBT *left_key = prevkey;
+ const DBT *right_key = key != NULL ? &found_key : toku_dbt_positive_infinity();
+ r = toku_db_start_range_lock(context->db, context->txn, left_key, right_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_next
+ return r;
+}
+
+static int c_getf_prev_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static int
+c_getf_prev(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ int r;
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+ if (c_uninitialized(c)) {
+ r = c_getf_last(c, flag, f, extra);
+ } else {
+ r = 0;
+ QUERY_CONTEXT_S context; //Describes the context of this query.
+ c_query_context_init(&context, c, flag, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_prev will call c_getf_prev_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_prev(dbc_ftcursor(c), c_getf_prev_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ c_query_context_destroy(&context);
+ }
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_prev_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+ DBT found_key = { .data = (void *) key, .size = keylen };
+
+ if (context->do_locking) {
+ const DBT *prevkey, *prevval;
+ toku_ft_cursor_peek(context->c, &prevkey, &prevval);
+ const DBT *left_key = key != NULL ? &found_key : toku_dbt_negative_infinity();
+ const DBT *right_key = prevkey;
+ r = toku_db_start_range_lock(context->db, context->txn, left_key, right_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_prev
+ return r;
+}
+
+static int c_getf_current_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static int
+c_getf_current(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+
+ QUERY_CONTEXT_S context; //Describes the context of this query.
+ c_query_context_init(&context, c, flag, f, extra);
+ //toku_ft_cursor_current will call c_getf_current_callback(..., context) (if query is successful)
+ int r = toku_ft_cursor_current(dbc_ftcursor(c), DB_CURRENT, c_getf_current_callback, &context);
+ c_query_context_destroy(&context);
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_current_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+
+ //Call application-layer callback if found.
+ if (key!=NULL && !lock_only) {
+ DBT found_key = { .data = (void *) key, .size = keylen };
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ } else {
+ r = 0;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_current
+ return r;
+}
+
+static int c_getf_set_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+int
+toku_c_getf_set(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+
+ int r = 0;
+ QUERY_CONTEXT_WITH_INPUT_S context; //Describes the context of this query.
+ query_context_with_input_init(&context, c, flag, key, NULL, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_set will call c_getf_set_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_set(dbc_ftcursor(c), key, c_getf_set_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ query_context_base_destroy(&context.base);
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_set_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT_WITH_INPUT super_context = (QUERY_CONTEXT_WITH_INPUT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+
+ //Lock:
+ // left(key,val) = (input_key, -infinity)
+ // right(key,val) = (input_key, found ? found_val : infinity)
+ if (context->do_locking) {
+ r = toku_db_start_range_lock(context->db, context->txn, super_context->input_key, super_context->input_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_key = { .data = (void *) key, .size = keylen };
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_set
+ return r;
+}
+
+static int c_getf_set_range_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static int
+c_getf_set_range(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+
+ int r = 0;
+ QUERY_CONTEXT_WITH_INPUT_S context; //Describes the context of this query.
+ query_context_with_input_init(&context, c, flag, key, NULL, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_set_range will call c_getf_set_range_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_set_range(dbc_ftcursor(c), key, nullptr, c_getf_set_range_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ query_context_base_destroy(&context.base);
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_set_range_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT_WITH_INPUT super_context = (QUERY_CONTEXT_WITH_INPUT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+ DBT found_key = { .data = (void *) key, .size = keylen };
+
+ //Lock:
+ // left(key,val) = (input_key, -infinity)
+ // right(key) = found ? found_key : infinity
+ // right(val) = found ? found_val : infinity
+ if (context->do_locking) {
+ const DBT *left_key = super_context->input_key;
+ const DBT *right_key = key != NULL ? &found_key : toku_dbt_positive_infinity();
+ r = toku_db_start_range_lock(context->db, context->txn, left_key, right_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_set_range
+ return r;
+}
+
+static int
+c_getf_set_range_with_bound(DBC *c, uint32_t flag, DBT *key, DBT *key_bound, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+
+ int r = 0;
+ QUERY_CONTEXT_WITH_INPUT_S context; //Describes the context of this query.
+ query_context_with_input_init(&context, c, flag, key, NULL, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_set_range will call c_getf_set_range_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_set_range(dbc_ftcursor(c), key, key_bound, c_getf_set_range_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ query_context_base_destroy(&context.base);
+ return r;
+}
+
+static int c_getf_set_range_reverse_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool);
+
+static int
+c_getf_set_range_reverse(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+
+ int r = 0;
+ QUERY_CONTEXT_WITH_INPUT_S context; //Describes the context of this query.
+ query_context_with_input_init(&context, c, flag, key, NULL, f, extra);
+ while (r == 0) {
+ //toku_ft_cursor_set_range_reverse will call c_getf_set_range_reverse_callback(..., context) (if query is successful)
+ r = toku_ft_cursor_set_range_reverse(dbc_ftcursor(c), key, c_getf_set_range_reverse_callback, &context);
+ if (r == DB_LOCK_NOTGRANTED) {
+ r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request);
+ } else {
+ break;
+ }
+ }
+ query_context_base_destroy(&context.base);
+ return r;
+}
+
+//result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..)
+static int
+c_getf_set_range_reverse_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) {
+ QUERY_CONTEXT_WITH_INPUT super_context = (QUERY_CONTEXT_WITH_INPUT) extra;
+ QUERY_CONTEXT_BASE context = &super_context->base;
+
+ int r;
+ DBT found_key = { .data = (void *) key, .size = keylen };
+
+ //Lock:
+ // left(key) = found ? found_key : -infinity
+ // left(val) = found ? found_val : -infinity
+ // right(key,val) = (input_key, infinity)
+ if (context->do_locking) {
+ const DBT *left_key = key != NULL ? &found_key : toku_dbt_negative_infinity();
+ const DBT *right_key = super_context->input_key;
+ r = toku_db_start_range_lock(context->db, context->txn, left_key, right_key,
+ query_context_determine_lock_type(context), &context->request);
+ } else {
+ r = 0;
+ }
+
+ //Call application-layer callback if found and locks were successfully obtained.
+ if (r==0 && key!=NULL && !lock_only) {
+ DBT found_val = { .data = (void *) val, .size = vallen };
+ context->r_user_callback = context->f(&found_key, &found_val, context->f_extra);
+ r = context->r_user_callback;
+ }
+
+ //Give ft-layer an error (if any) to return from toku_ft_cursor_set_range_reverse
+ return r;
+}
+
+
+int toku_c_close_internal(DBC *c) {
+ toku_ft_cursor_destroy(dbc_ftcursor(c));
+ toku_sdbt_cleanup(&dbc_struct_i(c)->skey_s);
+ toku_sdbt_cleanup(&dbc_struct_i(c)->sval_s);
+ return 0;
+}
+
+// Close a cursor.
+int toku_c_close(DBC *c) {
+ toku_c_close_internal(c);
+ toku_free(c);
+ return 0;
+}
+
+static int c_set_bounds(DBC *dbc,
+ const DBT *left_key,
+ const DBT *right_key,
+ bool pre_acquire,
+ int out_of_range_error) {
+ if (out_of_range_error != DB_NOTFOUND &&
+ out_of_range_error != TOKUDB_OUT_OF_RANGE && out_of_range_error != 0) {
+ return toku_ydb_do_error(dbc->dbp->dbenv,
+ EINVAL,
+ "Invalid out_of_range_error [%d] for %s\n",
+ out_of_range_error,
+ __FUNCTION__);
+ }
+ if (left_key == toku_dbt_negative_infinity() &&
+ right_key == toku_dbt_positive_infinity()) {
+ out_of_range_error = 0;
+ }
+ DB *db = dbc->dbp;
+ DB_TXN *txn = dbc_struct_i(dbc)->txn;
+ HANDLE_PANICKED_DB(db);
+ toku_ft_cursor_set_range_lock(dbc_ftcursor(dbc),
+ left_key,
+ right_key,
+ (left_key == toku_dbt_negative_infinity()),
+ (right_key == toku_dbt_positive_infinity()),
+ out_of_range_error);
+ if (!db->i->lt || !txn || !pre_acquire)
+ return 0;
+ // READ_UNCOMMITTED and READ_COMMITTED transactions do not need read locks.
+ if (!dbc_struct_i(dbc)->rmw &&
+ dbc_struct_i(dbc)->iso != TOKU_ISO_SERIALIZABLE &&
+ !(dbc_struct_i(dbc)->iso == TOKU_ISO_SNAPSHOT &&
+ dbc_struct_i(dbc)->locking_read))
+ return 0;
+
+ toku::lock_request::type lock_type = dbc_struct_i(dbc)->rmw
+ ? toku::lock_request::type::WRITE
+ : toku::lock_request::type::READ;
+ int r = toku_db_get_range_lock(db, txn, left_key, right_key, lock_type);
+ return r;
+}
+
+static void
+c_remove_restriction(DBC *dbc) {
+ toku_ft_cursor_remove_restriction(dbc_ftcursor(dbc));
+}
+
+static void c_set_txn(DBC *dbc, DB_TXN *txn) {
+ dbc_struct_i(dbc)->txn = txn;
+ dbc_ftcursor(dbc)->ttxn = db_txn_struct_i(txn)->tokutxn;
+}
+
+static void
+c_set_check_interrupt_callback(DBC* dbc, bool (*interrupt_callback)(void*, uint64_t), void *extra) {
+ toku_ft_cursor_set_check_interrupt_cb(dbc_ftcursor(dbc), interrupt_callback, extra);
+}
+
+int
+toku_c_get(DBC* c, DBT* key, DBT* val, uint32_t flag) {
+ HANDLE_PANICKED_DB(c->dbp);
+ HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c);
+
+ uint32_t main_flag = get_main_cursor_flag(flag);
+ uint32_t remaining_flags = get_nonmain_cursor_flags(flag);
+ int r;
+ QUERY_CONTEXT_WRAPPED_S context;
+ //Passing in NULL for a key or val means that it is NOT an output.
+ // Both key and val are output:
+ // query_context_wrapped_init(&context, c, key, val);
+ // Val is output, key is not:
+ // query_context_wrapped_init(&context, c, NULL, val);
+ // Neither key nor val are output:
+ // query_context_wrapped_init(&context, c, NULL, NULL);
+ switch (main_flag) {
+ case (DB_FIRST):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_first(c, remaining_flags, c_get_wrapper_callback, &context);
+ break;
+ case (DB_LAST):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_last(c, remaining_flags, c_get_wrapper_callback, &context);
+ break;
+ case (DB_NEXT):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_next(c, remaining_flags, c_get_wrapper_callback, &context);
+ break;
+ case (DB_PREV):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_prev(c, remaining_flags, c_get_wrapper_callback, &context);
+ break;
+#ifdef DB_PREV_DUP
+ case (DB_PREV_DUP):
+ query_context_wrapped_init(&context, c, key, val);
+ r = toku_c_getf_prev_dup(c, remaining_flags, c_get_wrapper_callback, &context);
+ break;
+#endif
+ case (DB_CURRENT):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_current(c, remaining_flags, c_get_wrapper_callback, &context);
+ break;
+ case (DB_SET):
+ query_context_wrapped_init(&context, c, NULL, val);
+ r = toku_c_getf_set(c, remaining_flags, key, c_get_wrapper_callback, &context);
+ break;
+ case (DB_SET_RANGE):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_set_range(c, remaining_flags, key, c_get_wrapper_callback, &context);
+ break;
+ case (DB_SET_RANGE_REVERSE):
+ query_context_wrapped_init(&context, c, key, val);
+ r = c_getf_set_range_reverse(c, remaining_flags, key, c_get_wrapper_callback, &context);
+ break;
+ default:
+ r = EINVAL;
+ break;
+ }
+ return r;
+}
+
+int toku_db_cursor_internal(DB *db,
+ DB_TXN *txn,
+ DBC *c,
+ uint32_t flags,
+ int is_temporary_cursor) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ DB_ENV *env = db->dbenv;
+
+ if (flags &
+ ~(DB_SERIALIZABLE | DB_INHERIT_ISOLATION | DB_LOCKING_READ | DB_RMW |
+ DBC_DISABLE_PREFETCHING)) {
+ return toku_ydb_do_error(
+ env, EINVAL, "Invalid flags set for toku_db_cursor\n");
+ }
+
+#define SCRS(name) c->name = name
+ SCRS(c_getf_first);
+ SCRS(c_getf_last);
+ SCRS(c_getf_next);
+ SCRS(c_getf_prev);
+ SCRS(c_getf_current);
+ SCRS(c_getf_set_range);
+ SCRS(c_getf_set_range_reverse);
+ SCRS(c_getf_set_range_with_bound);
+ SCRS(c_set_bounds);
+ SCRS(c_remove_restriction);
+ SCRS(c_set_txn);
+ SCRS(c_set_check_interrupt_callback);
+#undef SCRS
+
+ c->c_get = toku_c_get;
+ c->c_getf_set = toku_c_getf_set;
+ c->c_close = toku_c_close;
+
+ c->dbp = db;
+
+ dbc_struct_i(c)->txn = txn;
+ dbc_struct_i(c)->skey_s = (struct simple_dbt){0, 0};
+ dbc_struct_i(c)->sval_s = (struct simple_dbt){0, 0};
+ if (is_temporary_cursor) {
+ dbc_struct_i(c)->skey = &db->i->skey;
+ dbc_struct_i(c)->sval = &db->i->sval;
+ } else {
+ dbc_struct_i(c)->skey = &dbc_struct_i(c)->skey_s;
+ dbc_struct_i(c)->sval = &dbc_struct_i(c)->sval_s;
+ }
+ if (flags & DB_SERIALIZABLE) {
+ dbc_struct_i(c)->iso = TOKU_ISO_SERIALIZABLE;
+ } else {
+ dbc_struct_i(c)->iso =
+ txn ? db_txn_struct_i(txn)->iso : TOKU_ISO_SERIALIZABLE;
+ }
+ dbc_struct_i(c)->rmw = (flags & DB_RMW) != 0;
+ dbc_struct_i(c)->locking_read = (flags & DB_LOCKING_READ) != 0;
+ enum cursor_read_type read_type =
+ C_READ_ANY; // default, used in serializable and read uncommitted
+ if (txn) {
+ if (dbc_struct_i(c)->iso == TOKU_ISO_READ_COMMITTED ||
+ dbc_struct_i(c)->iso == TOKU_ISO_SNAPSHOT) {
+ read_type = C_READ_SNAPSHOT;
+ } else if (dbc_struct_i(c)->iso == TOKU_ISO_READ_COMMITTED_ALWAYS) {
+ read_type = C_READ_COMMITTED;
+ }
+ }
+ int r = toku_ft_cursor_create(db->i->ft_handle,
+ dbc_ftcursor(c),
+ txn ? db_txn_struct_i(txn)->tokutxn : NULL,
+ read_type,
+ ((flags & DBC_DISABLE_PREFETCHING) != 0),
+ is_temporary_cursor != 0);
+ if (r != 0) {
+ invariant(r == TOKUDB_MVCC_DICTIONARY_TOO_NEW);
+ }
+ return r;
+}
+
+static inline int
+autotxn_db_cursor(DB *db, DB_TXN *txn, DBC *c, uint32_t flags) {
+ if (!txn && (db->dbenv->i->open_flags & DB_INIT_TXN)) {
+ return toku_ydb_do_error(db->dbenv, EINVAL,
+ "Cursors in a transaction environment must have transactions.\n");
+ }
+ return toku_db_cursor_internal(db, txn, c, flags, 0);
+}
+
+// Create a cursor on a db.
+int toku_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags) {
+ DBC *XMALLOC(cursor);
+ int r = autotxn_db_cursor(db, txn, cursor, flags);
+ if (r == 0) {
+ *c = cursor;
+ } else {
+ toku_free(cursor);
+ }
+ return r;
+}
+
+#undef STATUS_VALUE
+
+#include <toku_race_tools.h>
+void __attribute__((constructor)) toku_ydb_cursor_helgrind_ignore(void);
+void
+toku_ydb_cursor_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&ydb_c_layer_status, sizeof ydb_c_layer_status);
+}
diff --git a/storage/tokudb/PerconaFT/src/ydb_cursor.h b/storage/tokudb/PerconaFT/src/ydb_cursor.h
new file mode 100644
index 00000000..232f2670
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_cursor.h
@@ -0,0 +1,61 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// This file defines the public interface to the ydb library
+
+typedef enum {
+ YDB_C_LAYER_STATUS_NUM_ROWS = 0 /* number of rows in this status array */
+} ydb_c_lock_layer_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[YDB_C_LAYER_STATUS_NUM_ROWS];
+} YDB_C_LAYER_STATUS_S, *YDB_C_LAYER_STATUS;
+
+void ydb_c_layer_get_status(YDB_C_LAYER_STATUS statp);
+
+int toku_c_get(DBC * c, DBT * key, DBT * data, uint32_t flag);
+int toku_c_getf_set(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra);
+
+int toku_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags);
+int toku_db_cursor_internal(DB *db, DB_TXN * txn, DBC *c, uint32_t flags, int is_temporary_cursor);
+
+int toku_c_close(DBC *c);
+int toku_c_close_internal(DBC *c);
diff --git a/storage/tokudb/PerconaFT/src/ydb_db.cc b/storage/tokudb/PerconaFT/src/ydb_db.cc
new file mode 100644
index 00000000..5707415b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_db.cc
@@ -0,0 +1,1284 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <ctype.h>
+
+#include <db.h>
+#include <locktree/locktree.h>
+#include <ft/ft.h>
+#include <ft/ft-flusher.h>
+#include <ft/cachetable/checkpoint.h>
+
+#include "ydb_cursor.h"
+#include "ydb_row_lock.h"
+#include "ydb_db.h"
+#include "ydb_write.h"
+#include "ydb-internal.h"
+#include "ydb_load.h"
+#include "indexer.h"
+#include <portability/toku_atomic.h>
+#include <util/status.h>
+#include <ft/le-cursor.h>
+
+static YDB_DB_LAYER_STATUS_S ydb_db_layer_status;
+#ifdef STATUS_VALUE
+#undef STATUS_VALUE
+#endif
+#define STATUS_VALUE(x) ydb_db_layer_status.status[x].value.num
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_db_layer_status, k, c, t, l, inc)
+
+static void
+ydb_db_layer_status_init (void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+
+ STATUS_INIT(YDB_LAYER_DIRECTORY_WRITE_LOCKS, nullptr, UINT64, "directory write locks", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_DIRECTORY_WRITE_LOCKS_FAIL, nullptr, UINT64, "directory write locks fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_LOGSUPPRESS, nullptr, UINT64, "log suppress", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_LOGSUPPRESS_FAIL, nullptr, UINT64, "log suppress fail", TOKU_ENGINE_STATUS);
+ ydb_db_layer_status.initialized = true;
+}
+#undef STATUS_INIT
+
+void
+ydb_db_layer_get_status(YDB_DB_LAYER_STATUS statp) {
+ if (!ydb_db_layer_status.initialized)
+ ydb_db_layer_status_init();
+ *statp = ydb_db_layer_status;
+}
+
+void create_iname_hint(DB_ENV *env, const char *dname, char *hint) {
+ //Requires: size of hint array must be > strlen(dname)
+ //Copy alphanumeric characters only.
+ //Replace strings of non-alphanumeric characters with a single underscore.
+ if (env->get_dir_per_db(env) && !toku_os_is_absolute_name(dname)) {
+ assert(dname);
+ if (*dname == '.')
+ ++dname;
+ if (*dname == '/')
+ ++dname;
+ bool underscored = false;
+ bool dbdir_is_parsed = false;
+ // Do not change the first '/' because this is
+ // delimiter which splits name into database dir
+ // and table dir.
+ while (*dname) {
+ if (isalnum(*dname) || (*dname == '/' && !dbdir_is_parsed)) {
+ char c = *dname++;
+ *hint++ = c;
+ if (c == '/')
+ dbdir_is_parsed = true;
+ underscored = false;
+ } else if (!dbdir_is_parsed) {
+ char c = *dname++;
+ *hint++ = c;
+ } else {
+ if (!underscored)
+ *hint++ = '_';
+ dname++;
+ underscored = true;
+ }
+ }
+ *hint = '\0';
+ } else {
+ bool underscored = false;
+ while (*dname) {
+ if (isalnum(*dname)) {
+ char c = *dname++;
+ *hint++ = c;
+ underscored = false;
+ }
+ else {
+ if (!underscored)
+ *hint++ = '_';
+ dname++;
+ underscored = true;
+ }
+ }
+ *hint = '\0';
+ }
+}
+
+// n < 0 means to ignore mark and ignore n
+// n >= 0 means to include mark ("_B_" or "_P_") with hex value of n in iname
+// (intended for use by loader, which will create many inames using one txnid).
+char *create_iname(DB_ENV *env,
+ uint64_t id1,
+ uint64_t id2,
+ char *hint,
+ const char *mark,
+ int n) {
+ int bytes;
+ char inamebase[strlen(hint) +
+ 8 + // hex file format version
+ 24 + // hex id (normally the txnid's parent and child)
+ 8 + // hex value of n if non-neg
+ sizeof("_B___.") + // extra pieces
+ strlen(toku_product_name)];
+ if (n < 0)
+ bytes = snprintf(inamebase, sizeof(inamebase),
+ "%s_%" PRIx64 "_%" PRIx64 "_%" PRIx32 ".%s",
+ hint, id1, id2, FT_LAYOUT_VERSION, toku_product_name);
+ else {
+ invariant(strlen(mark) == 1);
+ bytes = snprintf(inamebase, sizeof(inamebase),
+ "%s_%" PRIx64 "_%" PRIx64 "_%" PRIx32 "_%s_%" PRIx32 ".%s",
+ hint, id1, id2, FT_LAYOUT_VERSION, mark, n, toku_product_name);
+ }
+ assert(bytes>0);
+ assert(bytes<=(int)sizeof(inamebase)-1);
+ char *rval;
+ if (env->i->data_dir)
+ rval = toku_construct_full_name(2, env->i->data_dir, inamebase);
+ else
+ rval = toku_construct_full_name(1, inamebase);
+ assert(rval);
+ return rval;
+}
+
+static uint64_t nontransactional_open_id = 0;
+
+std::unique_ptr<char[], decltype(&toku_free)> generate_iname_for_rename_or_open(
+ DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ bool is_open) {
+ std::unique_ptr<char[], decltype(&toku_free)> result(nullptr, &toku_free);
+ char hint[strlen(dname) + 1];
+ uint64_t id1 = 0;
+ uint64_t id2 = 0;
+
+ if (txn) {
+ id1 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).parent_id64;
+ id2 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).child_id64;
+ } else if (is_open)
+ id1 = toku_sync_fetch_and_add(&nontransactional_open_id, 1);
+
+ create_iname_hint(env, dname, hint);
+
+ result.reset(create_iname(env, id1, id2, hint, NULL, -1));
+
+ return result;
+}
+
+static int toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode);
+
+// Effect: Do the work required of DB->close().
+// requires: the multi_operation client lock is held.
+int
+toku_db_close(DB * db) {
+ int r = 0;
+ if (db_opened(db) && db->i->dname) {
+ // internal (non-user) dictionary has no dname
+ env_note_db_closed(db->dbenv, db); // tell env that this db is no longer in use by the user of this api (user-closed, may still be in use by fractal tree internals)
+ }
+ // close the ft handle, and possibly close the locktree
+ toku_ft_handle_close(db->i->ft_handle);
+ if (db->i->lt) {
+ db->dbenv->i->ltm.release_lt(db->i->lt);
+ }
+ toku_sdbt_cleanup(&db->i->skey);
+ toku_sdbt_cleanup(&db->i->sval);
+ if (db->i->dname) {
+ toku_free(db->i->dname);
+ }
+ toku_free(db->i);
+ toku_free(db);
+ return r;
+}
+
+///////////
+//db_getf_XXX is equivalent to c_getf_XXX, without a persistent cursor
+
+int
+db_getf_set(DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ DBC c;
+ uint32_t create_flags = flags & (DB_ISOLATION_FLAGS | DB_RMW);
+ flags &= ~DB_ISOLATION_FLAGS;
+ int r = toku_db_cursor_internal(db, txn, &c, create_flags | DBC_DISABLE_PREFETCHING, 1);
+ if (r==0) {
+ r = toku_c_getf_set(&c, flags, key, f, extra);
+ int r2 = toku_c_close_internal(&c);
+ if (r==0) r = r2;
+ }
+ return r;
+}
+
+static inline int
+db_thread_need_flags(DBT *dbt) {
+ return (dbt->flags & (DB_DBT_MALLOC+DB_DBT_REALLOC+DB_DBT_USERMEM)) == 0;
+}
+
+int
+toku_db_get (DB * db, DB_TXN * txn, DBT * key, DBT * data, uint32_t flags) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ int r;
+ uint32_t iso_flags = flags & DB_ISOLATION_FLAGS;
+
+ if ((db->i->open_flags & DB_THREAD) && db_thread_need_flags(data))
+ return EINVAL;
+
+ uint32_t lock_flags = flags & (DB_PRELOCKED | DB_PRELOCKED_WRITE);
+ flags &= ~lock_flags;
+ flags &= ~DB_ISOLATION_FLAGS;
+ // And DB_GET_BOTH is no longer supported. #2862.
+ if (flags != 0) return EINVAL;
+
+ DBC dbc;
+ r = toku_db_cursor_internal(db, txn, &dbc, iso_flags | DBC_DISABLE_PREFETCHING, 1);
+ if (r!=0) return r;
+ uint32_t c_get_flags = DB_SET;
+ r = toku_c_get(&dbc, key, data, c_get_flags | lock_flags);
+ int r2 = toku_c_close_internal(&dbc);
+ return r ? r : r2;
+}
+
+static int
+db_open_subdb(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
+ int r;
+ if (!fname || !dbname) r = EINVAL;
+ else {
+ char subdb_full_name[strlen(fname) + sizeof("/") + strlen(dbname)];
+ int bytes = snprintf(subdb_full_name, sizeof(subdb_full_name), "%s/%s", fname, dbname);
+ assert(bytes==(int)sizeof(subdb_full_name)-1);
+ const char *null_subdbname = NULL;
+ r = toku_db_open(db, txn, subdb_full_name, null_subdbname, dbtype, flags, mode);
+ }
+ return r;
+}
+
+// inames are created here.
+// algorithm:
+// begin txn
+// convert dname to iname (possibly creating new iname)
+// open file (toku_ft_handle_open() will handle logging)
+// close txn
+// if created a new iname, take full range lock
+// Requires: no checkpoint may take place during this function, which is enforced by holding the multi_operation_client_lock.
+static int
+toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_READ_ONLY_TXN(txn);
+ if (dbname != NULL) {
+ return db_open_subdb(db, txn, fname, dbname, dbtype, flags, mode);
+ }
+
+ // at this point fname is the dname
+ //This code ONLY supports single-db files.
+ assert(dbname == NULL);
+ const char * dname = fname; // db_open_subdb() converts (fname, dbname) to dname
+
+ ////////////////////////////// do some level of parameter checking.
+ uint32_t unused_flags = flags;
+ int r;
+ if (dbtype!=DB_BTREE && dbtype!=DB_UNKNOWN) return EINVAL;
+ int is_db_excl = flags & DB_EXCL; unused_flags&=~DB_EXCL;
+ int is_db_create = flags & DB_CREATE; unused_flags&=~DB_CREATE;
+ int is_db_hot_index = flags & DB_IS_HOT_INDEX; unused_flags&=~DB_IS_HOT_INDEX;
+
+ //We support READ_UNCOMMITTED and READ_COMMITTED whether or not the flag is provided.
+ unused_flags&=~DB_READ_UNCOMMITTED;
+ unused_flags&=~DB_READ_COMMITTED;
+ unused_flags&=~DB_SERIALIZABLE;
+
+ // DB_THREAD is implicitly supported and DB_BLACKHOLE is supported at the ft-layer
+ unused_flags &= ~DB_THREAD;
+ unused_flags &= ~DB_BLACKHOLE;
+ unused_flags &= ~DB_RDONLY;
+
+ // check for unknown or conflicting flags
+ if (unused_flags) return EINVAL; // unknown flags
+ if (is_db_excl && !is_db_create) return EINVAL;
+ if (dbtype==DB_UNKNOWN && is_db_excl) return EINVAL;
+
+ if (db_opened(db)) {
+ // it was already open
+ return EINVAL;
+ }
+ //////////////////////////////
+
+ // convert dname to iname
+ // - look up dname, get iname
+ // - if dname does not exist, create iname and make entry in directory
+ DBT dname_dbt; // holds dname
+ DBT iname_dbt; // holds iname_in_env
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1);
+ toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
+ r = toku_db_get(db->dbenv->i->directory, txn, &dname_dbt, &iname_dbt, DB_SERIALIZABLE); // allocates memory for iname
+ std::unique_ptr<char[], decltype(&toku_free)> iname(
+ static_cast<char *>(iname_dbt.data), &toku_free);
+ if (r == DB_NOTFOUND && !is_db_create) {
+ r = ENOENT;
+ } else if (r==0 && is_db_excl) {
+ r = EEXIST;
+ } else if (r == DB_NOTFOUND) {
+ iname = generate_iname_for_rename_or_open(db->dbenv, txn, dname, true);
+ toku_fill_dbt(&iname_dbt, iname.get(), strlen(iname.get()) + 1);
+ //
+ // put_flags will be 0 for performance only, avoid unnecessary query
+ // if we are creating a hot index, per #3166, we do not want the write lock in directory grabbed.
+ // directory read lock is grabbed in toku_db_get above
+ //
+ uint32_t put_flags = 0 | ((is_db_hot_index) ? DB_PRELOCKED_WRITE : 0);
+ r = toku_db_put(db->dbenv->i->directory, txn, &dname_dbt, &iname_dbt, put_flags, true);
+ }
+
+ // we now have an iname
+ if (r == 0) {
+ r = toku_db_open_iname(db, txn, iname.get(), flags, mode);
+ if (r == 0) {
+ db->i->dname = toku_xstrdup(dname);
+ env_note_db_opened(db->dbenv, db); // tell env that a new db handle is open (using dname)
+ }
+ }
+
+ return r;
+}
+
+// set the descriptor and cmp_descriptor to the
+// descriptors from the given ft, updating the
+// locktree's descriptor pointer if necessary
+static void
+db_set_descriptors(DB *db, FT_HANDLE ft_handle) {
+ const toku::comparator &cmp = toku_ft_get_comparator(ft_handle);
+ db->descriptor = toku_ft_get_descriptor(ft_handle);
+ db->cmp_descriptor = toku_ft_get_cmp_descriptor(ft_handle);
+ invariant(db->cmp_descriptor == cmp.get_descriptor());
+ if (db->i->lt) {
+ db->i->lt->set_comparator(cmp);
+ }
+}
+
+// callback that sets the descriptors when
+// a dictionary is redirected at the ft layer
+static void
+db_on_redirect_callback(FT_HANDLE ft_handle, void* extra) {
+ DB *db = (DB *) extra;
+ db_set_descriptors(db, ft_handle);
+}
+
+// when a locktree is created, clone a ft handle and store it
+// as userdata so we can close it later.
+int toku_db_lt_on_create_callback(toku::locktree *lt, void *extra) {
+ int r;
+ struct lt_on_create_callback_extra *info = (struct lt_on_create_callback_extra *) extra;
+ TOKUTXN ttxn = info->txn ? db_txn_struct_i(info->txn)->tokutxn : NULL;
+ FT_HANDLE ft_handle = info->ft_handle;
+
+ FT_HANDLE cloned_ft_handle;
+ r = toku_ft_handle_clone(&cloned_ft_handle, ft_handle, ttxn, info->open_rw);
+ if (r == 0) {
+ assert(lt->get_userdata() == NULL);
+ lt->set_userdata(cloned_ft_handle);
+ }
+ return r;
+}
+
+// when a locktree is about to be destroyed,
+// close the ft handle stored as userdata.
+void toku_db_lt_on_destroy_callback(toku::locktree *lt) {
+ FT_HANDLE ft_handle = (FT_HANDLE) lt->get_userdata();
+ assert(ft_handle);
+ toku_ft_handle_close(ft_handle);
+}
+
+// Instruct db to use the default (built-in) key comparison function
+// by setting the flag bits in the db and ft structs
+int toku_db_use_builtin_key_cmp(DB *db) {
+ HANDLE_PANICKED_DB(db);
+ int r = 0;
+ if (db_opened(db)) {
+ r = toku_ydb_do_error(db->dbenv, EINVAL, "Comparison functions cannot be set after DB open.\n");
+ } else if (db->i->key_compare_was_set) {
+ r = toku_ydb_do_error(db->dbenv, EINVAL, "Key comparison function already set.\n");
+ } else {
+ uint32_t tflags;
+ toku_ft_get_flags(db->i->ft_handle, &tflags);
+
+ tflags |= TOKU_DB_KEYCMP_BUILTIN;
+ toku_ft_set_flags(db->i->ft_handle, tflags);
+ db->i->key_compare_was_set = true;
+ }
+ return r;
+}
+
+int toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) {
+ //Set comparison functions if not yet set.
+ HANDLE_READ_ONLY_TXN(txn);
+ if (!db->i->key_compare_was_set && db->dbenv->i->bt_compare) {
+ toku_ft_set_bt_compare(db->i->ft_handle, db->dbenv->i->bt_compare);
+ db->i->key_compare_was_set = true;
+ }
+ if (db->dbenv->i->update_function) {
+ toku_ft_set_update(db->i->ft_handle,db->dbenv->i->update_function);
+ }
+ toku_ft_set_redirect_callback(
+ db->i->ft_handle,
+ db_on_redirect_callback,
+ db
+ );
+ bool need_locktree = (bool)((db->dbenv->i->open_flags & DB_INIT_LOCK) &&
+ (db->dbenv->i->open_flags & DB_INIT_TXN));
+
+ int is_db_excl = flags & DB_EXCL; flags&=~DB_EXCL;
+ int is_db_create = flags & DB_CREATE; flags&=~DB_CREATE;
+ //We support READ_UNCOMMITTED and READ_COMMITTED whether or not the flag is provided.
+ flags&=~DB_READ_UNCOMMITTED;
+ flags&=~DB_READ_COMMITTED;
+ flags&=~DB_SERIALIZABLE;
+ flags&=~DB_IS_HOT_INDEX;
+ flags&=~DB_RDONLY;
+ // unknown or conflicting flags are bad
+ int unknown_flags = flags & ~DB_THREAD;
+ unknown_flags &= ~DB_BLACKHOLE;
+ if (unknown_flags || (is_db_excl && !is_db_create)) {
+ return EINVAL;
+ }
+
+ if (db_opened(db)) {
+ return EINVAL; /* It was already open. */
+ }
+
+ db->i->open_flags = flags;
+ db->i->open_mode = mode;
+
+ bool open_rw = mode & (S_IWUSR | S_IWOTH | S_IWGRP);
+ FT_HANDLE ft_handle = db->i->ft_handle;
+ int r = toku_ft_handle_open(ft_handle, iname_in_env,
+ is_db_create, is_db_excl,
+ db->dbenv->i->cachetable,
+ txn ? db_txn_struct_i(txn)->tokutxn : nullptr, open_rw);
+ if (r != 0) {
+ goto out;
+ }
+
+ // if the dictionary was opened as a blackhole, mark the
+ // fractal tree as blackhole too.
+ if (flags & DB_BLACKHOLE) {
+ toku_ft_set_blackhole(ft_handle);
+ }
+
+ db->i->opened = 1;
+
+ // now that the handle has successfully opened, a valid descriptor
+ // is in the ft. we need to set the db's descriptor pointers
+ db_set_descriptors(db, ft_handle);
+
+ if (need_locktree) {
+ db->i->dict_id = toku_ft_get_dictionary_id(db->i->ft_handle);
+ struct lt_on_create_callback_extra on_create_extra = {
+ .txn = txn,
+ .ft_handle = db->i->ft_handle,
+ .open_rw = false
+ };
+ db->i->lt = db->dbenv->i->ltm.get_lt(db->i->dict_id,
+ toku_ft_get_comparator(db->i->ft_handle),
+ &on_create_extra);
+ if (db->i->lt == nullptr) {
+ r = errno;
+ if (r == 0) {
+ r = EINVAL;
+ }
+ goto out;
+ }
+ }
+ r = 0;
+
+out:
+ if (r != 0) {
+ db->i->dict_id = DICTIONARY_ID_NONE;
+ db->i->opened = 0;
+ if (db->i->lt) {
+ db->dbenv->i->ltm.release_lt(db->i->lt);
+ db->i->lt = nullptr;
+ }
+ }
+ return r;
+}
+
+// Return the maximum key and val size in
+// *key_size and *val_size respectively
+static void
+toku_db_get_max_row_size(DB * UU(db), uint32_t * max_key_size, uint32_t * max_val_size) {
+ *max_key_size = 0;
+ *max_val_size = 0;
+ toku_ft_get_maximum_advised_key_value_lengths(max_key_size, max_val_size);
+}
+
+int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn) {
+ // bad hack because some environment dictionaries do not have a dname
+ char *dname = db->i->dname;
+ if (!dname)
+ return 0;
+
+ DBT key_in_directory = { .data = dname, .size = (uint32_t) strlen(dname)+1 };
+ //Left end of range == right end of range (point lock)
+ int r = toku_db_get_range_lock(db->dbenv->i->directory, txn,
+ &key_in_directory, &key_in_directory,
+ toku::lock_request::type::WRITE);
+ if (r == 0)
+ STATUS_VALUE(YDB_LAYER_DIRECTORY_WRITE_LOCKS)++; // accountability
+ else
+ STATUS_VALUE(YDB_LAYER_DIRECTORY_WRITE_LOCKS_FAIL)++; // accountability
+ return r;
+}
+
+//
+// This function is used both to set an initial descriptor of a DB and to
+// change a descriptor. (only way to set a descriptor of a DB)
+//
+// Requires:
+// - The caller must not call put_multiple, del_multiple, or update_multiple concurrently
+// - The caller must not have a hot index running concurrently on db
+// - If the caller has passed DB_UPDATE_CMP_DESCRIPTOR as a flag, then he is calling this function
+// ONLY immediately after creating the dictionary and before doing any actual work on the dictionary.
+//
+static int
+toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t flags) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_READ_ONLY_TXN(txn);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ int r = 0;
+ TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
+ bool is_db_hot_index = ((flags & DB_IS_HOT_INDEX) != 0);
+ bool update_cmp_descriptor = ((flags & DB_UPDATE_CMP_DESCRIPTOR) != 0);
+
+ DBT old_descriptor_dbt;
+ toku_init_dbt(&old_descriptor_dbt);
+
+ if (!db_opened(db) || !descriptor || (descriptor->size>0 && !descriptor->data)){
+ r = EINVAL;
+ goto cleanup;
+ }
+ // For a hot index, this is an initial descriptor.
+ // We do not support (yet) hcad with hot index concurrently on a single table, which
+ // would require changing a descriptor for a hot index.
+ if (!is_db_hot_index) {
+ r = toku_db_pre_acquire_table_lock(db, txn);
+ if (r != 0) { goto cleanup; }
+ }
+
+ toku_clone_dbt(&old_descriptor_dbt, db->descriptor->dbt);
+ toku_ft_change_descriptor(db->i->ft_handle, &old_descriptor_dbt, descriptor,
+ true, ttxn, update_cmp_descriptor);
+
+cleanup:
+ toku_destroy_dbt(&old_descriptor_dbt);
+ return r;
+}
+
+static int
+toku_db_set_flags(DB *db, uint32_t flags) {
+ HANDLE_PANICKED_DB(db);
+
+ /* the following matches BDB */
+ if (db_opened(db) && flags != 0) return EINVAL;
+
+ return 0;
+}
+
+static int
+toku_db_get_flags(DB *db, uint32_t *pflags) {
+ HANDLE_PANICKED_DB(db);
+ if (!pflags) return EINVAL;
+ *pflags = 0;
+ return 0;
+}
+
+static int
+toku_db_change_pagesize(DB *db, uint32_t pagesize) {
+ HANDLE_PANICKED_DB(db);
+ if (!db_opened(db)) return EINVAL;
+ toku_ft_handle_set_nodesize(db->i->ft_handle, pagesize);
+ return 0;
+}
+
+static int
+toku_db_set_pagesize(DB *db, uint32_t pagesize) {
+ HANDLE_PANICKED_DB(db);
+ if (db_opened(db)) return EINVAL;
+ toku_ft_handle_set_nodesize(db->i->ft_handle, pagesize);
+ return 0;
+}
+
+static int
+toku_db_get_pagesize(DB *db, uint32_t *pagesize_ptr) {
+ HANDLE_PANICKED_DB(db);
+ toku_ft_handle_get_nodesize(db->i->ft_handle, pagesize_ptr);
+ return 0;
+}
+
+static int
+toku_db_change_readpagesize(DB *db, uint32_t readpagesize) {
+ HANDLE_PANICKED_DB(db);
+ if (!db_opened(db)) return EINVAL;
+ toku_ft_handle_set_basementnodesize(db->i->ft_handle, readpagesize);
+ return 0;
+}
+
+static int
+toku_db_set_readpagesize(DB *db, uint32_t readpagesize) {
+ HANDLE_PANICKED_DB(db);
+ if (db_opened(db)) return EINVAL;
+ toku_ft_handle_set_basementnodesize(db->i->ft_handle, readpagesize);
+ return 0;
+}
+
+static int
+toku_db_get_readpagesize(DB *db, uint32_t *readpagesize_ptr) {
+ HANDLE_PANICKED_DB(db);
+ toku_ft_handle_get_basementnodesize(db->i->ft_handle, readpagesize_ptr);
+ return 0;
+}
+
+static int
+toku_db_change_compression_method(DB *db, enum toku_compression_method compression_method) {
+ HANDLE_PANICKED_DB(db);
+ if (!db_opened(db)) return EINVAL;
+ toku_ft_handle_set_compression_method(db->i->ft_handle, compression_method);
+ return 0;
+}
+
+static int
+toku_db_set_compression_method(DB *db, enum toku_compression_method compression_method) {
+ HANDLE_PANICKED_DB(db);
+ if (db_opened(db)) return EINVAL;
+ toku_ft_handle_set_compression_method(db->i->ft_handle, compression_method);
+ return 0;
+}
+
+static int
+toku_db_get_compression_method(DB *db, enum toku_compression_method *compression_method_ptr) {
+ HANDLE_PANICKED_DB(db);
+ toku_ft_handle_get_compression_method(db->i->ft_handle, compression_method_ptr);
+ return 0;
+}
+
+static int
+toku_db_change_fanout(DB *db, unsigned int fanout) {
+ HANDLE_PANICKED_DB(db);
+ if (!db_opened(db)) return EINVAL;
+ toku_ft_handle_set_fanout(db->i->ft_handle, fanout);
+ return 0;
+}
+
+static int
+toku_db_set_fanout(DB *db, unsigned int fanout) {
+ HANDLE_PANICKED_DB(db);
+ if (db_opened(db)) return EINVAL;
+ toku_ft_handle_set_fanout(db->i->ft_handle, fanout);
+ return 0;
+}
+
+static int
+toku_db_get_fanout(DB *db, unsigned int *fanout) {
+ HANDLE_PANICKED_DB(db);
+ toku_ft_handle_get_fanout(db->i->ft_handle, fanout);
+ return 0;
+}
+
+static int
+toku_db_set_memcmp_magic(DB *db, uint8_t magic) {
+ HANDLE_PANICKED_DB(db);
+ if (db_opened(db)) {
+ return EINVAL;
+ }
+ return toku_ft_handle_set_memcmp_magic(db->i->ft_handle, magic);
+}
+
+static int
+toku_db_get_fractal_tree_info64(DB *db, uint64_t *num_blocks_allocated, uint64_t *num_blocks_in_use, uint64_t *size_allocated, uint64_t *size_in_use) {
+ HANDLE_PANICKED_DB(db);
+ struct ftinfo64 ftinfo;
+ toku_ft_handle_get_fractal_tree_info64(db->i->ft_handle, &ftinfo);
+ *num_blocks_allocated = ftinfo.num_blocks_allocated;
+ *num_blocks_in_use = ftinfo.num_blocks_in_use;
+ *size_allocated = ftinfo.size_allocated;
+ *size_in_use = ftinfo.size_in_use;
+ return 0;
+}
+
+static int
+toku_db_iterate_fractal_tree_block_map(DB *db, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra) {
+ HANDLE_PANICKED_DB(db);
+ return toku_ft_handle_iterate_fractal_tree_block_map(db->i->ft_handle, iter, iter_extra);
+}
+
+static int
+toku_db_stat64(DB * db, DB_TXN *txn, DB_BTREE_STAT64 *s) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ struct ftstat64_s ftstat;
+ TOKUTXN tokutxn = NULL;
+ if (txn != NULL) {
+ tokutxn = db_txn_struct_i(txn)->tokutxn;
+ }
+ toku_ft_handle_stat64(db->i->ft_handle, tokutxn, &ftstat);
+ s->bt_nkeys = ftstat.nkeys;
+ s->bt_ndata = ftstat.ndata;
+ s->bt_dsize = ftstat.dsize;
+ s->bt_fsize = ftstat.fsize;
+ s->bt_create_time_sec = ftstat.create_time_sec;
+ s->bt_modify_time_sec = ftstat.modify_time_sec;
+ s->bt_verify_time_sec = ftstat.verify_time_sec;
+ return 0;
+}
+
+static const char *
+toku_db_get_dname(DB *db) {
+ if (!db_opened(db)) {
+ return nullptr;
+ }
+ if (db->i->dname == nullptr) {
+ return "";
+ }
+ return db->i->dname;
+}
+
+static int
+toku_db_keys_range64(DB* db, DB_TXN* txn __attribute__((__unused__)), DBT* keyleft, DBT* keyright, uint64_t* less, uint64_t* left, uint64_t* between, uint64_t *right, uint64_t *greater, bool* middle_3_exact) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+
+ // note that we ignore the txn param. It would be more complicated to support it.
+ // TODO(yoni): Maybe add support for txns later? How would we do this? ydb lock comment about db_keyrange64 is obsolete.
+ toku_ft_keysrange(db->i->ft_handle, keyleft, keyright, less, left, between, right, greater, middle_3_exact);
+ return 0;
+}
+
+static int
+toku_db_key_range64(DB* db, DB_TXN* txn, DBT* key, uint64_t* less_p, uint64_t* equal_p, uint64_t* greater_p, int* is_exact) {
+ uint64_t less, equal_left, middle, equal_right, greater;
+ bool ignore;
+ int r = toku_db_keys_range64(db, txn, key, NULL, &less, &equal_left, &middle, &equal_right, &greater, &ignore);
+ if (r == 0) {
+ *less_p = less;
+ *equal_p = equal_left;
+ *greater_p = middle;
+ paranoid_invariant_zero(greater); // no keys are greater than positive infinity
+ paranoid_invariant_zero(equal_right); // no keys are equal to positive infinity
+ // toku_ft_keysrange does not know when all 3 are exact, so set is_exact to false
+ *is_exact = false;
+ }
+ return 0;
+}
+
+static int toku_db_get_key_after_bytes(DB *db, DB_TXN *txn, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *end_key, uint64_t actually_skipped, void *extra), void *cb_extra, uint32_t UU(flags)) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ return toku_ft_get_key_after_bytes(db->i->ft_handle, start_key, skip_len, callback, cb_extra);
+}
+
+// needed by loader.c
+int
+toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn) {
+ HANDLE_PANICKED_DB(db);
+ if (!db->i->lt || !txn) return 0;
+ int r;
+ r = toku_db_get_range_lock(db, txn,
+ toku_dbt_negative_infinity(), toku_dbt_positive_infinity(),
+ toku::lock_request::type::WRITE);
+ return r;
+}
+
+static int
+locked_db_close(DB * db, uint32_t UU(flags)) {
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ int r = toku_db_close(db);
+ toku_multi_operation_client_unlock();
+ return r;
+}
+
+int
+autotxn_db_get(DB* db, DB_TXN* txn, DBT* key, DBT* data, uint32_t flags) {
+ bool changed; int r;
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r!=0) return r;
+ r = toku_db_get(db, txn, key, data, flags);
+ return toku_db_destruct_autotxn(txn, r, changed);
+}
+
+static inline int
+autotxn_db_getf_set (DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) {
+ bool changed; int r;
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r!=0) return r;
+ r = db_getf_set(db, txn, flags, key, f, extra);
+ return toku_db_destruct_autotxn(txn, r, changed);
+}
+
+static int
+locked_db_open(DB *db, DB_TXN *txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
+ int ret, r;
+ HANDLE_READ_ONLY_TXN(txn);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+
+ //
+ // Note that this function opens a db with a transaction. Should
+ // the transaction abort, the user is responsible for closing the DB
+ // before aborting the transaction. Not doing so results in undefined
+ // behavior.
+ //
+ DB_ENV *env = db->dbenv;
+ DB_TXN *child_txn = NULL;
+ int using_txns = env->i->open_flags & DB_INIT_TXN;
+ if (using_txns) {
+ ret = toku_txn_begin(env, txn, &child_txn, DB_TXN_NOSYNC);
+ invariant_zero(ret);
+ }
+
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ r = toku_db_open(db, child_txn, fname, dbname, dbtype, flags & ~DB_AUTO_COMMIT, mode);
+ toku_multi_operation_client_unlock();
+
+ if (using_txns) {
+ if (r == 0) {
+ ret = locked_txn_commit(child_txn, DB_TXN_NOSYNC);
+ invariant_zero(ret);
+ } else {
+ ret = locked_txn_abort(child_txn);
+ invariant_zero(ret);
+ }
+ }
+ return r;
+}
+
+static int
+locked_db_change_descriptor(DB *db, DB_TXN *txn, const DBT *descriptor, uint32_t flags) {
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ int r = toku_db_change_descriptor(db, txn, descriptor, flags);
+ toku_multi_operation_client_unlock();
+ return r;
+}
+
+static int
+autotxn_db_change_descriptor(DB *db, DB_TXN *txn, const DBT *descriptor, uint32_t flags) {
+ bool changed; int r;
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r != 0) { return r; }
+ r = locked_db_change_descriptor(db, txn, descriptor, flags);
+ return toku_db_destruct_autotxn(txn, r, changed);
+}
+
+static void
+toku_db_set_errfile (DB *db, FILE *errfile) {
+ db->dbenv->set_errfile(db->dbenv, errfile);
+}
+
+// TODO 2216 delete this
+static int
+toku_db_fd(DB * UU(db), int * UU(fdp)) {
+ return 0;
+}
+
+static const DBT* toku_db_dbt_pos_infty(void) __attribute__((pure));
+static const DBT*
+toku_db_dbt_pos_infty(void) {
+ return toku_dbt_positive_infinity();
+}
+
+static const DBT* toku_db_dbt_neg_infty(void) __attribute__((pure));
+static const DBT*
+toku_db_dbt_neg_infty(void) {
+ return toku_dbt_negative_infinity();
+}
+
+static int
+toku_db_optimize(DB *db) {
+ HANDLE_PANICKED_DB(db);
+ toku_ft_optimize(db->i->ft_handle);
+ return 0;
+}
+
+static int
+toku_db_hot_optimize(DB *db, DBT* left, DBT* right,
+ int (*progress_callback)(void *extra, float progress),
+ void *progress_extra, uint64_t* loops_run)
+{
+ HANDLE_PANICKED_DB(db);
+ int r = 0;
+ r = toku_ft_hot_optimize(db->i->ft_handle, left, right,
+ progress_callback,
+ progress_extra, loops_run);
+
+ return r;
+}
+
+static int
+locked_db_optimize(DB *db) {
+ // need to protect from checkpointing because
+ // toku_db_optimize does a message injection
+ toku_multi_operation_client_lock(); //Cannot begin checkpoint
+ int r = toku_db_optimize(db);
+ toku_multi_operation_client_unlock();
+ return r;
+}
+
+
+struct last_key_extra {
+ YDB_CALLBACK_FUNCTION func;
+ void* extra;
+};
+
+static int
+db_get_last_key_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) {
+ if (!lock_only) {
+ DBT keydbt;
+ toku_fill_dbt(&keydbt, key, keylen);
+ struct last_key_extra * CAST_FROM_VOIDP(info, extra);
+ info->func(&keydbt, NULL, info->extra);
+ }
+ return 0;
+}
+
+static int
+toku_db_get_last_key(DB * db, DB_TXN *txn, YDB_CALLBACK_FUNCTION func, void* extra) {
+ int r;
+ LE_CURSOR cursor = nullptr;
+ struct last_key_extra last_extra = { .func = func, .extra = extra };
+
+ r = toku_le_cursor_create(&cursor, db->i->ft_handle, db_txn_struct_i(txn)->tokutxn);
+ if (r != 0) { goto cleanup; }
+
+ // Goes in reverse order. First key returned is last in dictionary.
+ r = toku_le_cursor_next(cursor, db_get_last_key_callback, &last_extra);
+ if (r != 0) { goto cleanup; }
+
+cleanup:
+ if (cursor) {
+ toku_le_cursor_close(cursor);
+ }
+ return r;
+}
+
+static int
+autotxn_db_get_last_key(DB* db, YDB_CALLBACK_FUNCTION func, void* extra) {
+ bool changed; int r;
+ DB_TXN *txn = nullptr;
+ // Cursors inside require transactions, but this is _not_ a transactional function.
+ // Create transaction in a wrapper and then later close it.
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r!=0) return r;
+ r = toku_db_get_last_key(db, txn, func, extra);
+ return toku_db_destruct_autotxn(txn, r, changed);
+}
+
+static int
+toku_db_get_fragmentation(DB * db, TOKU_DB_FRAGMENTATION report) {
+ HANDLE_PANICKED_DB(db);
+ int r;
+ if (!db_opened(db))
+ r = toku_ydb_do_error(db->dbenv, EINVAL, "Fragmentation report available only on open DBs.\n");
+ else
+ r = toku_ft_get_fragmentation(db->i->ft_handle, report);
+ return r;
+}
+
+int
+toku_db_set_indexer(DB *db, DB_INDEXER * indexer) {
+ int r = 0;
+ if ( db->i->indexer != NULL && indexer != NULL ) {
+ // you are trying to overwrite a valid indexer
+ r = EINVAL;
+ }
+ else {
+ db->i->indexer = indexer;
+ }
+ return r;
+}
+
+DB_INDEXER *
+toku_db_get_indexer(DB *db) {
+ return db->i->indexer;
+}
+
+static void
+db_get_indexer(DB *db, DB_INDEXER **indexer_ptr) {
+ *indexer_ptr = toku_db_get_indexer(db);
+}
+
+struct ydb_verify_context {
+ int (*progress_callback)(void *extra, float progress);
+ void *progress_extra;
+};
+
+static int
+ydb_verify_progress_callback(void *extra, float progress) {
+ struct ydb_verify_context *context = (struct ydb_verify_context *) extra;
+ int r = 0;
+ if (context->progress_callback) {
+ r = context->progress_callback(context->progress_extra, progress);
+ }
+ return r;
+}
+
+static int
+toku_db_verify_with_progress(DB *db, int (*progress_callback)(void *extra, float progress), void *progress_extra, int verbose, int keep_going) {
+ struct ydb_verify_context context = { progress_callback, progress_extra };
+ int r = toku_verify_ft_with_progress(db->i->ft_handle, ydb_verify_progress_callback, &context, verbose, keep_going);
+ return r;
+}
+
+
+static int
+toku_db_recount_rows(DB* db, int (*progress_callback)(uint64_t count,
+ uint64_t deleted,
+ void* progress_extra),
+ void* progress_extra) {
+
+ HANDLE_PANICKED_DB(db);
+ int r = 0;
+ r =
+ toku_ft_recount_rows(
+ db->i->ft_handle,
+ progress_callback,
+ progress_extra);
+
+ return r;
+}
+
+
+int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE ft_handle, bool is_open) {
+ if (flags || env == NULL)
+ return EINVAL;
+
+ if (!env_opened(env))
+ return EINVAL;
+
+ DB *MALLOC(result);
+ if (result == 0) {
+ return ENOMEM;
+ }
+ memset(result, 0, sizeof *result);
+ result->dbenv = env;
+ MALLOC(result->i);
+ if (result->i == 0) {
+ toku_free(result);
+ return ENOMEM;
+ }
+ memset(result->i, 0, sizeof *result->i);
+ result->i->ft_handle = ft_handle;
+ result->i->opened = is_open;
+ *dbp = result;
+ return 0;
+}
+
+int
+toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) {
+ if (flags || env == NULL)
+ return EINVAL;
+
+ if (!env_opened(env))
+ return EINVAL;
+
+
+ FT_HANDLE ft_handle;
+ toku_ft_handle_create(&ft_handle);
+
+ int r = toku_setup_db_internal(db, env, flags, ft_handle, false);
+ if (r != 0) return r;
+
+ DB *result=*db;
+ // methods that grab the ydb lock
+#define SDB(name) result->name = locked_db_ ## name
+ SDB(close);
+ SDB(open);
+ SDB(optimize);
+#undef SDB
+ // methods that do not take the ydb lock
+#define USDB(name) result->name = toku_db_ ## name
+ USDB(set_errfile);
+ USDB(set_pagesize);
+ USDB(get_pagesize);
+ USDB(change_pagesize);
+ USDB(set_readpagesize);
+ USDB(get_readpagesize);
+ USDB(change_readpagesize);
+ USDB(set_compression_method);
+ USDB(get_compression_method);
+ USDB(change_compression_method);
+ USDB(set_fanout);
+ USDB(get_fanout);
+ USDB(set_memcmp_magic);
+ USDB(change_fanout);
+ USDB(set_flags);
+ USDB(get_flags);
+ USDB(fd);
+ USDB(get_max_row_size);
+ USDB(set_indexer);
+ USDB(pre_acquire_table_lock);
+ USDB(pre_acquire_fileops_lock);
+ USDB(key_range64);
+ USDB(keys_range64);
+ USDB(get_key_after_bytes);
+ USDB(hot_optimize);
+ USDB(stat64);
+ USDB(get_fractal_tree_info64);
+ USDB(iterate_fractal_tree_block_map);
+ USDB(get_dname);
+ USDB(verify_with_progress);
+ USDB(cursor);
+ USDB(dbt_pos_infty);
+ USDB(dbt_neg_infty);
+ USDB(get_fragmentation);
+ USDB(recount_rows);
+#undef USDB
+ result->get_indexer = db_get_indexer;
+ result->del = autotxn_db_del;
+ result->put = autotxn_db_put;
+ result->update = autotxn_db_update;
+ result->update_broadcast = autotxn_db_update_broadcast;
+ result->change_descriptor = autotxn_db_change_descriptor;
+ result->get_last_key = autotxn_db_get_last_key;
+
+ // unlocked methods
+ result->get = autotxn_db_get;
+ result->getf_set = autotxn_db_getf_set;
+
+ result->i->dict_id = DICTIONARY_ID_NONE;
+ result->i->opened = 0;
+ result->i->open_flags = 0;
+ result->i->open_mode = 0;
+ result->i->indexer = NULL;
+ *db = result;
+ return 0;
+}
+
+// When the loader is created, it makes this call (toku_env_load_inames).
+// For each dictionary to be loaded, replace old iname in directory
+// with a newly generated iname. This will also take a write lock
+// on the directory entries. The write lock will be released when
+// the transaction of the loader is completed.
+// If the transaction commits, the new inames are in place.
+// If the transaction aborts, the old inames will be restored.
+// The new inames are returned to the caller.
+// It is the caller's responsibility to free them.
+// If "mark_as_loader" is true, then include a mark in the iname
+// to indicate that the file is created by the ft loader.
+// Return 0 on success (could fail if write lock not available).
+static int
+load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) {
+ int rval = 0;
+ int i;
+
+ TXNID_PAIR xid = TXNID_PAIR_NONE;
+ DBT dname_dbt; // holds dname
+ DBT iname_dbt; // holds new iname
+
+ const char *mark;
+
+ if (mark_as_loader) {
+ mark = "B";
+ } else {
+ mark = "P";
+ }
+
+ for (i=0; i<N; i++) {
+ new_inames_in_env[i] = NULL;
+ }
+
+ if (txn) {
+ xid = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn);
+ }
+ for (i = 0; i < N; i++) {
+ char * dname = dbs[i]->i->dname;
+ toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1);
+
+ // now create new iname
+ char hint[strlen(dname) + 1];
+ create_iname_hint(env, dname, hint);
+
+ // allocates memory for iname_in_env
+ const char *new_iname =
+ create_iname(env, xid.parent_id64, xid.child_id64, hint, mark, i);
+ new_inames_in_env[i] = new_iname;
+
+ // iname_in_env goes in directory
+ toku_fill_dbt(&iname_dbt, new_iname, strlen(new_iname) + 1);
+ rval = toku_db_put(env->i->directory, txn, &dname_dbt, &iname_dbt, 0, true);
+ if (rval) break;
+ }
+
+ // Generate load log entries.
+ if (!rval && txn) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ int do_fsync = 0;
+ LSN *get_lsn = NULL;
+ for (i = 0; i < N; i++) {
+ FT_HANDLE ft_handle = dbs[i]->i->ft_handle;
+ //Fsync is necessary for the last one only.
+ if (i==N-1) {
+ do_fsync = 1; //We only need a single fsync of logs.
+ get_lsn = load_lsn; //Set pointer to capture the last lsn.
+ }
+ toku_ft_load(ft_handle, ttxn, new_inames_in_env[i], do_fsync, get_lsn);
+ }
+ }
+ return rval;
+}
+
+int
+locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) {
+ int r;
+ HANDLE_READ_ONLY_TXN(txn);
+
+ // cannot begin a checkpoint
+ toku_multi_operation_client_lock();
+ r = load_inames(env, txn, N, dbs, (const char **) new_inames_in_env, load_lsn, mark_as_loader);
+ toku_multi_operation_client_unlock();
+
+ return r;
+
+}
+
+#undef STATUS_VALUE
+
+#include <toku_race_tools.h>
+void __attribute__((constructor)) toku_ydb_db_helgrind_ignore(void);
+void
+toku_ydb_db_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&ydb_db_layer_status, sizeof ydb_db_layer_status);
+}
diff --git a/storage/tokudb/PerconaFT/src/ydb_db.h b/storage/tokudb/PerconaFT/src/ydb_db.h
new file mode 100644
index 00000000..c260e9d0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_db.h
@@ -0,0 +1,137 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ft/ft.h>
+
+#include "ydb-internal.h"
+#include "ydb_txn.h"
+
+#include <memory>
+
+typedef enum {
+ YDB_LAYER_DIRECTORY_WRITE_LOCKS = 0, /* total directory write locks taken */
+ YDB_LAYER_DIRECTORY_WRITE_LOCKS_FAIL, /* total directory write locks unable to be taken */
+ YDB_LAYER_LOGSUPPRESS, /* number of times logs are suppressed for empty table (2440) */
+ YDB_LAYER_LOGSUPPRESS_FAIL, /* number of times unable to suppress logs for empty table (2440) */
+ YDB_DB_LAYER_STATUS_NUM_ROWS /* number of rows in this status array */
+} ydb_db_lock_layer_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[YDB_DB_LAYER_STATUS_NUM_ROWS];
+} YDB_DB_LAYER_STATUS_S, *YDB_DB_LAYER_STATUS;
+
+void ydb_db_layer_get_status(YDB_DB_LAYER_STATUS statp);
+
+//
+// export the following locktree create/destroy callbacks so
+// the environment can pass them to the locktree manager.
+//
+struct lt_on_create_callback_extra {
+ DB_TXN *txn;
+ FT_HANDLE ft_handle;
+ bool open_rw;
+};
+int toku_db_lt_on_create_callback(toku::locktree *lt, void *extra);
+void toku_db_lt_on_destroy_callback(toku::locktree *lt);
+
+/* db methods */
+static inline int db_opened(DB *db) {
+ return db->i->opened != 0;
+}
+
+static inline const toku::comparator &toku_db_get_comparator(DB *db) {
+ return toku_ft_get_comparator(db->i->ft_handle);
+}
+
+int toku_db_use_builtin_key_cmp(DB *db);
+int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn);
+int toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname, uint32_t flags, int mode);
+int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn);
+int toku_db_get (DB * db, DB_TXN * txn, DBT * key, DBT * data, uint32_t flags);
+int toku_db_create(DB ** db, DB_ENV * env, uint32_t flags);
+int toku_db_close(DB * db);
+int toku_setup_db_internal (DB **dbp, DB_ENV *env, uint32_t flags, FT_HANDLE ft_handle, bool is_open);
+int db_getf_set(DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra);
+int autotxn_db_get(DB* db, DB_TXN* txn, DBT* key, DBT* data, uint32_t flags);
+
+//TODO: DB_AUTO_COMMIT.
+//TODO: Nowait only conditionally?
+//TODO: NOSYNC change to SYNC if DB_ENV has something in set_flags
+static inline int
+toku_db_construct_autotxn(DB* db, DB_TXN **txn, bool* changed, bool force_auto_commit) {
+ assert(db && txn && changed);
+ DB_ENV* env = db->dbenv;
+ if (*txn || !(env->i->open_flags & DB_INIT_TXN)) {
+ *changed = false;
+ return 0;
+ }
+ bool nosync = (bool)(!force_auto_commit && !(env->i->open_flags & DB_AUTO_COMMIT));
+ uint32_t txn_flags = DB_TXN_NOWAIT | (nosync ? DB_TXN_NOSYNC : 0);
+ int r = toku_txn_begin(env, NULL, txn, txn_flags);
+ if (r!=0) return r;
+ *changed = true;
+ return 0;
+}
+
+static inline int
+toku_db_destruct_autotxn(DB_TXN *txn, int r, bool changed) {
+ if (!changed) return r;
+ if (r==0) {
+ r = locked_txn_commit(txn, 0);
+ }
+ else {
+ locked_txn_abort(txn);
+ }
+ return r;
+}
+
+void create_iname_hint(DB_ENV *env, const char *dname, char *hint);
+char *create_iname(DB_ENV *env,
+ uint64_t id1,
+ uint64_t id2,
+ char *hint,
+ const char *mark,
+ int n);
+std::unique_ptr<char[], decltype(&toku_free)> generate_iname_for_rename_or_open(
+ DB_ENV *env,
+ DB_TXN *txn,
+ const char *dname,
+ bool is_open);
diff --git a/storage/tokudb/PerconaFT/src/ydb_env_func.cc b/storage/tokudb/PerconaFT/src/ydb_env_func.cc
new file mode 100644
index 00000000..aa8f9063
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_env_func.cc
@@ -0,0 +1,185 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <my_global.h>
+#include <toku_portability.h>
+
+#include <memory.h>
+#include <db.h>
+
+#include <ft/cachetable/checkpoint.h>
+#include <ft/ft.h>
+#include <ft/ft-ops.h>
+#include <ft/ft-flusher.h>
+#include <ft/logger/recover.h>
+#include <ft/loader/loader.h>
+
+#include "ydb_env_func.h"
+
+// For test purposes only.
+// These callbacks are never used in production code, only as a way to test the system
+// (for example, by causing crashes at predictable times).
+void (*checkpoint_callback_f)(void*) = NULL;
+void * checkpoint_callback_extra = NULL;
+void (*checkpoint_callback2_f)(void*) = NULL;
+void * checkpoint_callback2_extra = NULL;
+
+bool engine_status_enable = true; // if false, suppress engine status output on failed assert, for test programs only
+
+void db_env_set_direct_io (bool direct_io_on) {
+ toku_ft_set_direct_io(direct_io_on);
+}
+
+void db_env_set_compress_buffers_before_eviction (bool compress_buffers) {
+ toku_ft_set_compress_buffers_before_eviction(compress_buffers);
+}
+
+void db_env_set_func_fsync (int (*fsync_function)(int)) {
+ toku_set_func_fsync(fsync_function);
+}
+
+void db_env_set_func_pwrite (ssize_t (*pwrite_function)(int, const void *, size_t, toku_off_t)) {
+ toku_set_func_pwrite(pwrite_function);
+}
+
+void db_env_set_func_full_pwrite (ssize_t (*pwrite_function)(int, const void *, size_t, toku_off_t)) {
+ toku_set_func_full_pwrite(pwrite_function);
+}
+
+void db_env_set_func_write (ssize_t (*write_function)(int, const void *, size_t)) {
+ toku_set_func_write(write_function);
+}
+
+void db_env_set_func_full_write (ssize_t (*write_function)(int, const void *, size_t)) {
+ toku_set_func_full_write(write_function);
+}
+
+void db_env_set_func_fdopen (FILE * (*fdopen_function)(int, const char *)) {
+ toku_set_func_fdopen(fdopen_function);
+}
+
+void db_env_set_func_fopen (FILE * (*fopen_function)(const char *, const char *)) {
+ toku_set_func_fopen(fopen_function);
+}
+
+void db_env_set_func_open (int (*open_function)(const char *, int, int)) {
+ toku_set_func_open(open_function);
+}
+
+void db_env_set_func_fclose (int (*fclose_function)(FILE*)) {
+ toku_set_func_fclose(fclose_function);
+}
+
+void db_env_set_func_pread (ssize_t (*fun)(int, void *, size_t, off_t)) {
+ toku_set_func_pread(fun);
+}
+
+void db_env_set_func_loader_fwrite (size_t (*fwrite_fun)(const void*,size_t,size_t,FILE*)) {
+ toku_set_func_fwrite(fwrite_fun);
+}
+
+void db_env_set_func_malloc (void *(*f)(size_t)) {
+ toku_set_func_malloc(f);
+}
+
+void db_env_set_func_realloc (void *(*f)(void*, size_t)) {
+ toku_set_func_realloc(f);
+}
+
+void db_env_set_func_free (void (*f)(void*)) {
+ toku_set_func_free(f);
+}
+
+// For test purposes only.
+// With this interface, all checkpoint users get the same callbacks and the same extras.
+void
+db_env_set_checkpoint_callback (void (*callback_f)(void*), void* extra) {
+ toku_checkpoint_safe_client_lock();
+ checkpoint_callback_f = callback_f;
+ checkpoint_callback_extra = extra;
+ toku_checkpoint_safe_client_unlock();
+}
+
+void
+db_env_set_checkpoint_callback2 (void (*callback_f)(void*), void* extra) {
+ toku_checkpoint_safe_client_lock();
+ checkpoint_callback2_f = callback_f;
+ checkpoint_callback2_extra = extra;
+ toku_checkpoint_safe_client_unlock();
+}
+
+void
+db_env_set_recover_callback (void (*callback_f)(void*), void* extra) {
+ toku_recover_set_callback(callback_f, extra);
+}
+
+void
+db_env_set_recover_callback2 (void (*callback_f)(void*), void* extra) {
+ toku_recover_set_callback2(callback_f, extra);
+}
+
+void
+db_env_set_flusher_thread_callback(void (*callback_f)(int, void*), void* extra) {
+ toku_flusher_thread_set_callback(callback_f, extra);
+}
+
+void
+db_env_set_loader_size_factor (uint32_t factor) {
+ toku_ft_loader_set_size_factor(factor);
+}
+
+void
+db_env_set_mvcc_garbage_collection_verification(uint32_t verification_mode) {
+ garbage_collection_debug = (verification_mode != 0);
+}
+
+// Purpose: allow test programs that expect to fail to suppress engine status output on failed assert.
+void
+db_env_enable_engine_status(bool enable) {
+ engine_status_enable = enable;
+}
+
+void
+db_env_set_num_bucket_mutexes(uint32_t num_mutexes) {
+ toku_pair_list_set_lock_size(num_mutexes);
+}
+
+void db_env_try_gdb_stack_trace(const char *gdb_path) {
+ toku_try_gdb_stack_trace(gdb_path);
+}
+
diff --git a/storage/tokudb/PerconaFT/src/ydb_env_func.h b/storage/tokudb/PerconaFT/src/ydb_env_func.h
new file mode 100644
index 00000000..e863c2d4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_env_func.h
@@ -0,0 +1,52 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+extern void (*checkpoint_callback_f)(void*);
+extern void * checkpoint_callback_extra;
+extern void (*checkpoint_callback2_f)(void*);
+extern void * checkpoint_callback2_extra;
+
+extern bool engine_status_enable;
+
+// Called to use dlmalloc functions.
+void setup_dlmalloc(void) __attribute__((__visibility__("default")));
+
+// Test-only function
+void toku_env_increase_last_xid(DB_ENV *env, uint64_t increment);
diff --git a/storage/tokudb/PerconaFT/src/ydb_lib.cc b/storage/tokudb/PerconaFT/src/ydb_lib.cc
new file mode 100644
index 00000000..4775f601
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_lib.cc
@@ -0,0 +1,57 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdio.h>
+#include <toku_stdint.h>
+#include <toku_portability.h>
+#include <db.h>
+#include "ydb.h"
+#include <toku_assert.h>
+
+#if 0 && defined(__GNUC__)
+
+static void __attribute__((constructor)) libtokuft_init(void) {
+ int r = toku_ydb_init();
+ assert(r==0);
+}
+
+static void __attribute__((destructor)) libtokuft_destroy(void) {
+ toku_ydb_destroy();
+}
+
+#endif // __GNUC__
diff --git a/storage/tokudb/PerconaFT/src/ydb_load.h b/storage/tokudb/PerconaFT/src/ydb_load.h
new file mode 100644
index 00000000..aa253ab4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_load.h
@@ -0,0 +1,62 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/* ydb functions used by loader
+ */
+
+// When the loader is created, it makes this call.
+// For each dictionary to be loaded, replace old iname in directory
+// with a newly generated iname. This will also take a write lock
+// on the directory entries. The write lock will be released when
+// the transaction of the loader is completed.
+// If the transaction commits, the new inames are in place.
+// If the transaction aborts, the old inames will be restored.
+// The new inames are returned to the caller.
+// It is the caller's responsibility to free them.
+// If "mark_as_loader" is true, then include a mark in the iname
+// to indicate that the file is created by the ft loader.
+// Return 0 on success (could fail if write lock not available).
+int locked_load_inames(DB_ENV * env,
+ DB_TXN * txn,
+ int N,
+ DB * dbs[/*N*/],
+ char * new_inames_in_env[/*N*/], /* out */
+ LSN *load_lsn,
+ bool mark_as_loader);
diff --git a/storage/tokudb/PerconaFT/src/ydb_row_lock.cc b/storage/tokudb/PerconaFT/src/ydb_row_lock.cc
new file mode 100644
index 00000000..1d2f4e98
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_row_lock.cc
@@ -0,0 +1,295 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+
+#include <locktree/lock_request.h>
+
+#include "ydb-internal.h"
+#include "ydb_txn.h"
+#include "ydb_row_lock.h"
+
+/*
+ Used for partial implementation of nested transactions.
+ Work is done by children as normal, but all locking is done by the
+ root of the nested txn tree.
+ This may hold extra locks, and will not work as expected when
+ a node has two non-completed txns at any time.
+*/
+static DB_TXN *txn_oldest_ancester(DB_TXN* txn) {
+ while (txn && txn->parent) {
+ txn = txn->parent;
+ }
+ return txn;
+}
+
+int find_key_ranges_by_lt(const txn_lt_key_ranges &ranges,
+ const toku::locktree *const &find_lt);
+int find_key_ranges_by_lt(const txn_lt_key_ranges &ranges,
+ const toku::locktree *const &find_lt) {
+ return ranges.lt->compare(find_lt);
+}
+
+static void db_txn_note_row_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key) {
+ const toku::locktree *lt = db->i->lt;
+
+ toku_mutex_lock(&db_txn_struct_i(txn)->txn_mutex);
+
+ uint32_t idx;
+ txn_lt_key_ranges ranges;
+ toku::omt<txn_lt_key_ranges> *map = &db_txn_struct_i(txn)->lt_map;
+
+ // if this txn has not yet already referenced this
+ // locktree, then add it to this txn's locktree map
+ int r = map->find_zero<const toku::locktree *, find_key_ranges_by_lt>(lt, &ranges, &idx);
+ if (r == DB_NOTFOUND) {
+ ranges.lt = db->i->lt;
+ XMALLOC(ranges.buffer);
+ ranges.buffer->create();
+ map->insert_at(ranges, idx);
+
+ // let the manager know we're referencing this lt
+ toku::locktree_manager *ltm = &txn->mgrp->i->ltm;
+ ltm->reference_lt(ranges.lt);
+ } else {
+ invariant_zero(r);
+ }
+
+ // add a new lock range to this txn's row lock buffer
+ size_t old_mem_size = ranges.buffer->total_memory_size();
+ ranges.buffer->append(left_key, right_key);
+ size_t new_mem_size = ranges.buffer->total_memory_size();
+ invariant(new_mem_size > old_mem_size);
+ lt->get_manager()->note_mem_used(new_mem_size - old_mem_size);
+
+ toku_mutex_unlock(&db_txn_struct_i(txn)->txn_mutex);
+}
+
+void toku_db_txn_escalate_callback(TXNID txnid, const toku::locktree *lt, const toku::range_buffer &buffer, void *extra) {
+ DB_ENV *CAST_FROM_VOIDP(env, extra);
+
+ // Get the TOKUTXN and DB_TXN for this txnid from the environment's txn manager.
+ // Only the parent id is used in the search.
+ TOKUTXN ttxn;
+ TXNID_PAIR txnid_pair = { .parent_id64 = txnid, .child_id64 = 0 };
+ TXN_MANAGER txn_manager = toku_logger_get_txn_manager(env->i->logger);
+
+ toku_txn_manager_suspend(txn_manager);
+ toku_txn_manager_id2txn_unlocked(txn_manager, txnid_pair, &ttxn);
+
+ // We are still holding the txn manager lock. If we couldn't find the txn,
+ // then we lost a race with a committing transaction that got removed
+ // from the txn manager before it released its locktree locks. In this
+ // case we do nothing - that transaction has or is just about to release
+ // its locks and be gone, so there's not point in updating its lt_map
+ // with the new escalated ranges. It will go about releasing the old
+ // locks it thinks it had, and will succeed as if nothing happened.
+ //
+ // If we did find the transaction, then it has not yet been removed
+ // from the manager and therefore has not yet released its locks.
+ // We must try to replace the range buffer associated with this locktree,
+ // if it exists. This is impotant, otherwise it can grow out of
+ // control (ticket 5961).
+
+ if (ttxn != nullptr) {
+ DB_TXN *txn = toku_txn_get_container_db_txn(ttxn);
+
+ // One subtle point is that if the transaction is still live, it is impossible
+ // to deadlock on the txn mutex, even though we are holding the locktree's root
+ // mutex and release locks takes them in the opposite order.
+ //
+ // Proof: releasing locks takes the txn mutex and then acquires the locktree's
+ // root mutex, escalation takes the root mutex and possibly takes the txn mutex.
+ // releasing locks implies the txn is not live, and a non-live txn implies we
+ // will not need to take the txn mutex, so the deadlock is avoided.
+ toku_mutex_lock(&db_txn_struct_i(txn)->txn_mutex);
+
+ uint32_t idx;
+ txn_lt_key_ranges ranges;
+ toku::omt<txn_lt_key_ranges> *map = &db_txn_struct_i(txn)->lt_map;
+ int r = map->find_zero<const toku::locktree *, find_key_ranges_by_lt>(lt, &ranges, &idx);
+ if (r == 0) {
+ // Destroy the old range buffer, create a new one, and insert the new ranges.
+ //
+ // We could theoretically steal the memory from the caller instead of copying
+ // it, but it's simpler to have a callback API that doesn't transfer memory ownership.
+ lt->get_manager()->note_mem_released(ranges.buffer->total_memory_size());
+ ranges.buffer->destroy();
+ ranges.buffer->create();
+ toku::range_buffer::iterator iter(&buffer);
+ toku::range_buffer::iterator::record rec;
+ while (iter.current(&rec)) {
+ ranges.buffer->append(rec.get_left_key(), rec.get_right_key());
+ iter.next();
+ }
+ lt->get_manager()->note_mem_used(ranges.buffer->total_memory_size());
+ } else {
+ // In rare cases, we may not find the associated locktree, because we are
+ // racing with the transaction trying to add this locktree to the lt map
+ // after acquiring its first lock. The escalated lock set must be the single
+ // lock that this txnid just acquired. Do nothing here and let the txn
+ // take care of adding this locktree and range to its lt map as usual.
+ invariant(buffer.get_num_ranges() == 1);
+ }
+
+ toku_mutex_unlock(&db_txn_struct_i(txn)->txn_mutex);
+ }
+
+ toku_txn_manager_resume(txn_manager);
+}
+
+// Get a range lock.
+// Return when the range lock is acquired or the default lock tree timeout has expired.
+int toku_db_get_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key,
+ toku::lock_request::type lock_type) {
+ toku::lock_request request;
+ request.create();
+ int r = toku_db_start_range_lock(db, txn, left_key, right_key, lock_type, &request);
+ if (r == DB_LOCK_NOTGRANTED) {
+ toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
+ "toku_range_lock_before_wait");
+ r = toku_db_wait_range_lock(db, txn, &request);
+ if (r == DB_LOCK_NOTGRANTED)
+ toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
+ "toku_range_lock_not_granted_after_wait");
+ }
+ else if (r == 0) {
+ toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
+ "toku_range_lock_granted_immediately");
+ }
+
+ request.destroy();
+ return r;
+}
+
+// Setup and start an asynchronous lock request.
+int toku_db_start_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key,
+ toku::lock_request::type lock_type, toku::lock_request *request) {
+ uint64_t client_id;
+ void *client_extra;
+ DB_TXN *txn_anc = txn_oldest_ancester(txn);
+ TXNID txn_anc_id = txn_anc->id64(txn_anc);
+ txn->get_client_id(txn, &client_id, &client_extra);
+ request->set(db->i->lt, txn_anc_id, left_key, right_key, lock_type,
+ toku_is_big_txn(txn_anc), client_extra);
+
+ const int r = request->start();
+ if (r == 0) {
+ db_txn_note_row_lock(db, txn_anc, left_key, right_key);
+ } else if (r == DB_LOCK_DEADLOCK) {
+ lock_timeout_callback callback = txn->mgrp->i->lock_wait_timeout_callback;
+ if (callback != nullptr) {
+ callback(db, txn_anc_id, left_key, right_key,
+ request->get_conflicting_txnid());
+ }
+ }
+ return r;
+}
+
+// Complete a lock request by waiting until the request is ready
+// and then storing the acquired lock if successful.
+int toku_db_wait_range_lock(DB *db, DB_TXN *txn, toku::lock_request *request) {
+ DB_TXN *txn_anc = txn_oldest_ancester(txn);
+ const DBT *left_key = request->get_left_key();
+ const DBT *right_key = request->get_right_key();
+ DB_ENV *env = db->dbenv;
+ uint64_t wait_time_msec = env->i->default_lock_timeout_msec;
+ if (env->i->get_lock_timeout_callback)
+ wait_time_msec = env->i->get_lock_timeout_callback(wait_time_msec);
+ uint64_t killed_time_msec = env->i->default_killed_time_msec;
+ if (env->i->get_killed_time_callback)
+ killed_time_msec = env->i->get_killed_time_callback(killed_time_msec);
+ const int r = request->wait(wait_time_msec, killed_time_msec, env->i->killed_callback,
+ env->i->lock_wait_needed_callback);
+ if (r == 0) {
+ db_txn_note_row_lock(db, txn_anc, left_key, right_key);
+ } else if (r == DB_LOCK_NOTGRANTED) {
+ lock_timeout_callback callback = txn->mgrp->i->lock_wait_timeout_callback;
+ if (callback != nullptr) {
+ callback(db, txn_anc->id64(txn_anc), left_key, right_key,
+ request->get_conflicting_txnid());
+ }
+ }
+ return r;
+}
+
+int toku_db_get_point_write_lock(DB *db, DB_TXN *txn, const DBT *key) {
+ return toku_db_get_range_lock(db, txn, key, key, toku::lock_request::type::WRITE);
+}
+
+// acquire a point write lock on the key for a given txn.
+// this does not block the calling thread.
+void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
+ uint64_t client_id;
+ void *client_extra;
+ DB_TXN *txn = toku_txn_get_container_db_txn(tokutxn);
+ DB_TXN *txn_anc = txn_oldest_ancester(txn);
+ TXNID txn_anc_id = txn_anc->id64(txn_anc);
+
+ // This lock request must succeed, so we do not want to wait
+ toku::lock_request request;
+ request.create();
+ txn->get_client_id(txn, &client_id, &client_extra);
+ request.set(db->i->lt, txn_anc_id, key, key,
+ toku::lock_request::type::WRITE, toku_is_big_txn(txn_anc),
+ client_extra);
+ int r = request.start();
+ invariant_zero(r);
+ db_txn_note_row_lock(db, txn_anc, key, key);
+ request.destroy();
+}
+
+void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) {
+ toku::locktree *lt = ranges->lt;
+ TXNID txnid = txn->id64(txn);
+
+ // release all of the locks this txn has ever successfully
+ // acquired and stored in the range buffer for this locktree
+ lt->release_locks(txnid, ranges->buffer);
+ lt->get_manager()->note_mem_released(ranges->buffer->total_memory_size());
+ ranges->buffer->destroy();
+ toku_free(ranges->buffer);
+
+ // all of our locks have been released, so first try to wake up
+ // pending lock requests, then release our reference on the lt
+ toku::lock_request::retry_all_lock_requests(lt, txn->mgrp->i->lock_wait_needed_callback);
+
+ // Release our reference on this locktree
+ toku::locktree_manager *ltm = &txn->mgrp->i->ltm;
+ ltm->release_lt(lt);
+}
diff --git a/storage/tokudb/PerconaFT/src/ydb_row_lock.h b/storage/tokudb/PerconaFT/src/ydb_row_lock.h
new file mode 100644
index 00000000..e5258a9a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_row_lock.h
@@ -0,0 +1,61 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <ydb-internal.h>
+
+#include <locktree/lock_request.h>
+
+// Expose the escalate callback to ydb.cc,
+// so it can pass the function pointer to the locktree
+void toku_db_txn_escalate_callback(TXNID txnid, const toku::locktree *lt, const toku::range_buffer &buffer, void *extra);
+
+int toku_db_get_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key,
+ toku::lock_request::type lock_type);
+
+int toku_db_start_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key,
+ toku::lock_request::type lock_type, toku::lock_request *lock_request);
+
+int toku_db_wait_range_lock(DB *db, DB_TXN *txn, toku::lock_request *lock_request);
+
+int toku_db_get_point_write_lock(DB *db, DB_TXN *txn, const DBT *key);
+
+void toku_db_grab_write_lock(DB *db, DBT *key, TOKUTXN tokutxn);
+
+void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges);
diff --git a/storage/tokudb/PerconaFT/src/ydb_txn.cc b/storage/tokudb/PerconaFT/src/ydb_txn.cc
new file mode 100644
index 00000000..dd5fb3b8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_txn.cc
@@ -0,0 +1,624 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+
+#include <portability/toku_race_tools.h>
+#include <portability/toku_atomic.h>
+
+#include <ft/cachetable/checkpoint.h>
+#include <ft/log_header.h>
+#include <ft/txn/txn_manager.h>
+
+
+#include "ydb-internal.h"
+#include "ydb_txn.h"
+#include "ydb_row_lock.h"
+
+static uint64_t toku_txn_id64(DB_TXN * txn) {
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ return toku_txn_get_root_id(db_txn_struct_i(txn)->tokutxn);
+}
+
+static void toku_txn_release_locks(DB_TXN *txn) {
+ // Prevent access to the locktree map while releasing.
+ // It is possible for lock escalation to attempt to
+ // modify this data structure while the txn commits.
+ toku_mutex_lock(&db_txn_struct_i(txn)->txn_mutex);
+
+ size_t num_ranges = db_txn_struct_i(txn)->lt_map.size();
+ for (size_t i = 0; i < num_ranges; i++) {
+ txn_lt_key_ranges ranges;
+ int r = db_txn_struct_i(txn)->lt_map.fetch(i, &ranges);
+ invariant_zero(r);
+ toku_db_release_lt_key_ranges(txn, &ranges);
+ }
+
+ toku_mutex_unlock(&db_txn_struct_i(txn)->txn_mutex);
+}
+
+static void toku_txn_destroy(DB_TXN *txn) {
+ db_txn_struct_i(txn)->lt_map.destroy();
+ toku_txn_destroy_txn(db_txn_struct_i(txn)->tokutxn);
+ toku_mutex_destroy(&db_txn_struct_i(txn)->txn_mutex);
+ toku_free(txn);
+}
+
+static int toku_txn_commit(DB_TXN * txn, uint32_t flags,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra,
+ bool release_mo_lock, bool low_priority) {
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ //Recursively kill off children
+ if (db_txn_struct_i(txn)->child) {
+ //commit of child sets the child pointer to NULL
+ int r_child = toku_txn_commit(db_txn_struct_i(txn)->child, flags, NULL, NULL, false, false);
+ if (r_child !=0 && !toku_env_is_panicked(txn->mgrp)) {
+ env_panic(txn->mgrp, r_child, "Recursive child commit failed during parent commit.\n");
+ }
+ //In a panicked env, the child may not be removed from the list.
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ }
+ assert(!db_txn_struct_i(txn)->child);
+ //Remove from parent
+ if (txn->parent) {
+ assert(db_txn_struct_i(txn->parent)->child == txn);
+ db_txn_struct_i(txn->parent)->child=NULL;
+ }
+ if (flags & DB_TXN_SYNC) {
+ toku_txn_force_fsync_on_commit(db_txn_struct_i(txn)->tokutxn);
+ flags &= ~DB_TXN_SYNC;
+ }
+ int nosync = (flags & DB_TXN_NOSYNC)!=0 || (db_txn_struct_i(txn)->flags&DB_TXN_NOSYNC);
+ flags &= ~DB_TXN_NOSYNC;
+
+ int r;
+ if (flags!=0) {
+ // frees the tokutxn
+ r = toku_txn_abort_txn(db_txn_struct_i(txn)->tokutxn, poll, poll_extra);
+ } else {
+ // frees the tokutxn
+ r = toku_txn_commit_txn(db_txn_struct_i(txn)->tokutxn, nosync,
+ poll, poll_extra);
+ }
+ if (r!=0 && !toku_env_is_panicked(txn->mgrp)) {
+ env_panic(txn->mgrp, r, "Error during commit.\n");
+ }
+ //If panicked, we're done.
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ assert_zero(r);
+
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ TOKULOGGER logger = txn->mgrp->i->logger;
+ LSN do_fsync_lsn;
+ bool do_fsync;
+ toku_txn_get_fsync_info(ttxn, &do_fsync, &do_fsync_lsn);
+ // remove the txn from the list of live transactions, and then
+ // release the lock tree locks. MVCC requires that toku_txn_complete_txn
+ // get called first, otherwise we have bugs, such as #4145 and #4153
+ toku_txn_complete_txn(ttxn);
+ toku_txn_release_locks(txn);
+ // this lock must be released after toku_txn_complete_txn and toku_txn_release_locks because
+ // this lock must be held until the references to the open FTs is released
+ // begin checkpoint logs these associations, so we must be protect
+ // the changing of these associations with checkpointing
+ if (release_mo_lock) {
+ if (low_priority) {
+ toku_low_priority_multi_operation_client_unlock();
+ } else {
+ toku_multi_operation_client_unlock();
+ }
+ }
+ toku_txn_maybe_fsync_log(logger, do_fsync_lsn, do_fsync);
+ if (flags!=0) {
+ r = EINVAL;
+ goto cleanup;
+ }
+cleanup:
+ toku_txn_destroy(txn);
+ return r;
+}
+
+static int toku_txn_abort(DB_TXN * txn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra) {
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ //Recursively kill off children (abort or commit are both correct, commit is cheaper)
+ if (db_txn_struct_i(txn)->child) {
+ //commit of child sets the child pointer to NULL
+ int r_child = toku_txn_commit(db_txn_struct_i(txn)->child, DB_TXN_NOSYNC, NULL, NULL, false, false);
+ if (r_child !=0 && !toku_env_is_panicked(txn->mgrp)) {
+ env_panic(txn->mgrp, r_child, "Recursive child commit failed during parent abort.\n");
+ }
+ //In a panicked env, the child may not be removed from the list.
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ }
+ assert(!db_txn_struct_i(txn)->child);
+ //Remove from parent
+ if (txn->parent) {
+ assert(db_txn_struct_i(txn->parent)->child == txn);
+ db_txn_struct_i(txn->parent)->child=NULL;
+ }
+
+ int r = toku_txn_abort_txn(db_txn_struct_i(txn)->tokutxn, poll, poll_extra);
+ if (r!=0 && !toku_env_is_panicked(txn->mgrp)) {
+ env_panic(txn->mgrp, r, "Error during abort.\n");
+ }
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ assert_zero(r);
+ toku_txn_complete_txn(db_txn_struct_i(txn)->tokutxn);
+ toku_txn_release_locks(txn);
+ toku_txn_destroy(txn);
+ return r;
+}
+
+static int toku_txn_xa_prepare (DB_TXN *txn, TOKU_XA_XID *xid, uint32_t flags) {
+ int r = 0;
+ if (!txn) {
+ r = EINVAL;
+ goto exit;
+ }
+ if (txn->parent) {
+ r = 0; // make this a NO-OP, MySQL calls this
+ goto exit;
+ }
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ // Take the mo lock as soon as a non-readonly txn is found
+ bool holds_mo_lock;
+ holds_mo_lock = false;
+ if (!toku_txn_is_read_only(db_txn_struct_i(txn)->tokutxn)) {
+ // A readonly transaction does no logging, and therefore does not
+ // need the MO lock.
+ toku_multi_operation_client_lock();
+ holds_mo_lock = true;
+ }
+ //Recursively commit any children.
+ if (db_txn_struct_i(txn)->child) {
+ //commit of child sets the child pointer to NULL
+
+ // toku_txn_commit will take the mo_lock if not held and a non-readonly txn is found.
+ int r_child = toku_txn_commit(db_txn_struct_i(txn)->child, 0, NULL, NULL, false, false);
+ if (r_child !=0 && !toku_env_is_panicked(txn->mgrp)) {
+ env_panic(txn->mgrp, r_child, "Recursive child commit failed during parent commit.\n");
+ }
+ //In a panicked env, the child may not be removed from the list.
+ HANDLE_PANICKED_ENV(txn->mgrp);
+ }
+ assert(!db_txn_struct_i(txn)->child);
+ int nosync;
+ nosync = (flags & DB_TXN_NOSYNC)!=0 || (db_txn_struct_i(txn)->flags&DB_TXN_NOSYNC);
+ TOKUTXN ttxn;
+ ttxn = db_txn_struct_i(txn)->tokutxn;
+ toku_txn_prepare_txn(ttxn, xid, nosync);
+ TOKULOGGER logger;
+ logger = txn->mgrp->i->logger;
+ LSN do_fsync_lsn;
+ bool do_fsync;
+ toku_txn_get_fsync_info(ttxn, &do_fsync, &do_fsync_lsn);
+ // release the multi operation lock before fsyncing the log
+ if (holds_mo_lock) {
+ toku_multi_operation_client_unlock();
+ }
+ toku_txn_maybe_fsync_log(logger, do_fsync_lsn, do_fsync);
+exit:
+ return r;
+}
+
+// requires: must hold the multi operation lock. it is
+// released in toku_txn_xa_prepare before the fsync.
+static int toku_txn_prepare (DB_TXN *txn, uint8_t gid[DB_GID_SIZE], uint32_t flags) {
+ TOKU_XA_XID xid;
+ TOKU_ANNOTATE_NEW_MEMORY(&xid, sizeof(xid));
+ xid.formatID=0x756b6f54; // "Toku"
+ xid.gtrid_length=DB_GID_SIZE/2; // The maximum allowed gtrid length is 64. See the XA spec in source:/import/opengroup.org/C193.pdf page 20.
+ xid.bqual_length=DB_GID_SIZE/2; // The maximum allowed bqual length is 64.
+ memcpy(xid.data, gid, DB_GID_SIZE);
+ return toku_txn_xa_prepare(txn, &xid, flags);
+}
+
+static int toku_txn_txn_stat (DB_TXN *txn, struct txn_stat **txn_stat) {
+ XMALLOC(*txn_stat);
+ return toku_logger_txn_rollback_stats(db_txn_struct_i(txn)->tokutxn, *txn_stat);
+}
+
+static int locked_txn_txn_stat (DB_TXN *txn, struct txn_stat **txn_stat) {
+ int r = toku_txn_txn_stat(txn, txn_stat);
+ return r;
+}
+
+static int locked_txn_commit_with_progress(DB_TXN *txn, uint32_t flags,
+ TXN_PROGRESS_POLL_FUNCTION poll, void* poll_extra) {
+ bool holds_mo_lock = false;
+ bool low_priority = false;
+ TOKUTXN tokutxn = db_txn_struct_i(txn)->tokutxn;
+ if (!toku_txn_is_read_only(tokutxn)) {
+ // A readonly transaction does no logging, and therefore does not need the MO lock.
+ holds_mo_lock = true;
+ if (toku_is_big_tokutxn(tokutxn)) {
+ low_priority = true;
+ toku_low_priority_multi_operation_client_lock();
+ } else {
+ toku_multi_operation_client_lock();
+ }
+ }
+ // cannot begin a checkpoint.
+ // the multi operation lock is taken the first time we
+ // see a non-readonly txn in the recursive commit.
+ // But released in the first-level toku_txn_commit (if taken),
+ // this way, we don't hold it while we fsync the log.
+ int r = toku_txn_commit(txn, flags, poll, poll_extra, holds_mo_lock, low_priority);
+ return r;
+}
+
+static int locked_txn_abort_with_progress(DB_TXN *txn,
+ TXN_PROGRESS_POLL_FUNCTION poll, void* poll_extra) {
+ // cannot begin a checkpoint
+ // the multi operation lock is taken the first time we
+ // see a non-readonly txn in the abort (or recursive commit).
+ // But released here so we don't have to hold additional state.
+ bool holds_mo_lock = false;
+ bool low_priority = false;
+ TOKUTXN tokutxn = db_txn_struct_i(txn)->tokutxn;
+ if (!toku_txn_is_read_only(tokutxn)) {
+ // A readonly transaction does no logging, and therefore does not need the MO lock.
+ holds_mo_lock = true;
+ if (toku_is_big_tokutxn(tokutxn)) {
+ low_priority = true;
+ toku_low_priority_multi_operation_client_lock();
+ } else {
+ toku_multi_operation_client_lock();
+ }
+ }
+ int r = toku_txn_abort(txn, poll, poll_extra);
+ if (holds_mo_lock) {
+ if (low_priority) {
+ toku_low_priority_multi_operation_client_unlock();
+ } else {
+ toku_multi_operation_client_unlock();
+ }
+ }
+ return r;
+}
+
+int locked_txn_commit(DB_TXN *txn, uint32_t flags) {
+ int r = locked_txn_commit_with_progress(txn, flags, NULL, NULL);
+ return r;
+}
+
+int locked_txn_abort(DB_TXN *txn) {
+ int r = locked_txn_abort_with_progress(txn, NULL, NULL);
+ return r;
+}
+
+static void locked_txn_set_client_id(DB_TXN *txn, uint64_t client_id, void *client_extra) {
+ toku_txn_set_client_id(db_txn_struct_i(txn)->tokutxn, client_id, client_extra);
+}
+
+static void locked_txn_get_client_id(DB_TXN *txn, uint64_t *client_id, void **client_extra) {
+ toku_txn_get_client_id(db_txn_struct_i(txn)->tokutxn, client_id, client_extra);
+}
+
+static int toku_txn_discard(DB_TXN *txn, uint32_t flags) {
+ // check parameters
+ if (flags != 0)
+ return EINVAL;
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ if (toku_txn_get_state(ttxn) != TOKUTXN_PREPARING)
+ return EINVAL;
+
+ bool low_priority;
+ if (toku_is_big_tokutxn(ttxn)) {
+ low_priority = true;
+ toku_low_priority_multi_operation_client_lock();
+ } else {
+ low_priority = false;
+ toku_multi_operation_client_lock();
+ }
+
+ // discard
+ toku_txn_discard_txn(ttxn);
+
+ // complete
+ toku_txn_complete_txn(ttxn);
+
+ // release locks
+ toku_txn_release_locks(txn);
+
+ if (low_priority) {
+ toku_low_priority_multi_operation_client_unlock();
+ } else {
+ toku_multi_operation_client_unlock();
+ }
+
+ // destroy
+ toku_txn_destroy(txn);
+
+ return 0;
+}
+
+static bool toku_txn_is_prepared(DB_TXN *txn) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ return toku_txn_get_state(ttxn) == TOKUTXN_PREPARING;
+}
+
+static DB_TXN *toku_txn_get_child(DB_TXN *txn) {
+ return db_txn_struct_i(txn)->child;
+}
+
+static uint64_t toku_txn_get_start_time(DB_TXN *txn) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ return toku_txn_get_start_time(ttxn);
+}
+
+static inline void txn_func_init(DB_TXN *txn) {
+#define STXN(name) txn->name = locked_txn_ ## name
+ STXN(abort);
+ STXN(commit);
+ STXN(abort_with_progress);
+ STXN(commit_with_progress);
+ STXN(txn_stat);
+ STXN(set_client_id);
+ STXN(get_client_id);
+#undef STXN
+#define SUTXN(name) txn->name = toku_txn_ ## name
+ SUTXN(prepare);
+ SUTXN(xa_prepare);
+ SUTXN(discard);
+#undef SUTXN
+ txn->id64 = toku_txn_id64;
+ txn->is_prepared = toku_txn_is_prepared;
+ txn->get_child = toku_txn_get_child;
+ txn->get_start_time = toku_txn_get_start_time;
+}
+
+//
+// Creates a transaction for the user
+// In our system, as far as the user is concerned, the rules are as follows:
+// - one cannot operate on a transaction if a child exists, with the exception of commit/abort
+// - one cannot operate on a transaction simultaneously in two separate threads
+// (the reason for this is that some operations may create a child transaction
+// as part of the function, such as env->dbremove and env->dbrename, and if
+// transactions could be operated on simulatenously in different threads, the first
+// rule above is violated)
+// - if a parent transaction is committed/aborted, the child transactions are recursively
+// committed
+//
+int toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) {
+ HANDLE_PANICKED_ENV(env);
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, stxn); //Cannot create child while child already exists.
+ if (!toku_logger_is_open(env->i->logger))
+ return toku_ydb_do_error(env, EINVAL, "Environment does not have logging enabled\n");
+ if (!(env->i->open_flags & DB_INIT_TXN))
+ return toku_ydb_do_error(env, EINVAL, "Environment does not have transactions enabled\n");
+
+ uint32_t txn_flags = 0;
+ txn_flags |= DB_TXN_NOWAIT; //We do not support blocking locks. RFP remove this?
+
+ // handle whether txn is declared as read only
+ bool parent_txn_declared_read_only =
+ stxn &&
+ (db_txn_struct_i(stxn)->flags & DB_TXN_READ_ONLY);
+ bool txn_declared_read_only = false;
+ if (flags & DB_TXN_READ_ONLY) {
+ txn_declared_read_only = true;
+ txn_flags |= DB_TXN_READ_ONLY;
+ flags &= ~(DB_TXN_READ_ONLY);
+ }
+ if (txn_declared_read_only && stxn &&
+ !parent_txn_declared_read_only
+ )
+ {
+ return toku_ydb_do_error(
+ env,
+ EINVAL,
+ "Current transaction set as read only, but parent transaction is not\n"
+ );
+ }
+ if (parent_txn_declared_read_only)
+ {
+ // don't require child transaction to also set transaction as read only
+ // if parent has already done so
+ txn_flags |= DB_TXN_READ_ONLY;
+ txn_declared_read_only = true;
+ }
+
+
+ TOKU_ISOLATION child_isolation = TOKU_ISO_SERIALIZABLE;
+ uint32_t iso_flags = flags & DB_ISOLATION_FLAGS;
+ if (!(iso_flags == 0 ||
+ iso_flags == DB_TXN_SNAPSHOT ||
+ iso_flags == DB_READ_COMMITTED ||
+ iso_flags == DB_READ_COMMITTED_ALWAYS ||
+ iso_flags == DB_READ_UNCOMMITTED ||
+ iso_flags == DB_SERIALIZABLE ||
+ iso_flags == DB_INHERIT_ISOLATION)
+ )
+ {
+ return toku_ydb_do_error(
+ env,
+ EINVAL,
+ "Invalid isolation flags set\n"
+ );
+ }
+ flags &= ~iso_flags;
+
+ switch (iso_flags) {
+ case (DB_INHERIT_ISOLATION):
+ if (stxn) {
+ child_isolation = db_txn_struct_i(stxn)->iso;
+ }
+ else {
+ return toku_ydb_do_error(
+ env,
+ EINVAL,
+ "Cannot set DB_INHERIT_ISOLATION when no parent exists\n"
+ );
+ }
+ break;
+ case (DB_READ_COMMITTED):
+ child_isolation = TOKU_ISO_READ_COMMITTED;
+ break;
+ case (DB_READ_COMMITTED_ALWAYS):
+ child_isolation = TOKU_ISO_READ_COMMITTED_ALWAYS;
+ break;
+ case (DB_READ_UNCOMMITTED):
+ child_isolation = TOKU_ISO_READ_UNCOMMITTED;
+ break;
+ case (DB_TXN_SNAPSHOT):
+ child_isolation = TOKU_ISO_SNAPSHOT;
+ break;
+ case (DB_SERIALIZABLE):
+ child_isolation = TOKU_ISO_SERIALIZABLE;
+ break;
+ case (0):
+ child_isolation = stxn ? db_txn_struct_i(stxn)->iso : TOKU_ISO_SERIALIZABLE;
+ break;
+ default:
+ assert(false); // error path is above, so this should not happen
+ break;
+ }
+ if (stxn && child_isolation != db_txn_struct_i(stxn)->iso) {
+ return toku_ydb_do_error(
+ env,
+ EINVAL,
+ "Cannot set isolation level of transaction to something different \
+ isolation level\n"
+ );
+ }
+
+ if (flags&DB_TXN_NOWAIT) {
+ txn_flags |= DB_TXN_NOWAIT;
+ flags &= ~DB_TXN_NOWAIT;
+ }
+ if (flags&DB_TXN_NOSYNC) {
+ txn_flags |= DB_TXN_NOSYNC;
+ flags &= ~DB_TXN_NOSYNC;
+ }
+ if (flags!=0) return toku_ydb_do_error(env, EINVAL, "Invalid flags passed to DB_ENV->txn_begin\n");
+
+ struct __toku_db_txn_external *XCALLOC(eresult); // so the internal stuff is stuck on the end.
+ DB_TXN *result = &eresult->external_part;
+
+ result->mgrp = env;
+ txn_func_init(result);
+
+ result->parent = stxn;
+ db_txn_struct_i(result)->flags = txn_flags;
+ db_txn_struct_i(result)->iso = child_isolation;
+ db_txn_struct_i(result)->lt_map.create_no_array();
+
+ toku_mutex_init(*db_txn_struct_i_txn_mutex_key,
+ &db_txn_struct_i(result)->txn_mutex,
+ nullptr);
+
+ TXN_SNAPSHOT_TYPE snapshot_type;
+ switch (db_txn_struct_i(result)->iso) {
+ case(TOKU_ISO_SNAPSHOT):
+ {
+ snapshot_type = TXN_SNAPSHOT_ROOT;
+ break;
+ }
+ case(TOKU_ISO_READ_COMMITTED):
+ {
+ snapshot_type = TXN_SNAPSHOT_CHILD;
+ break;
+ }
+ case(TOKU_ISO_READ_COMMITTED_ALWAYS) :
+ {
+ snapshot_type = TXN_COPIES_SNAPSHOT;
+ break;
+ }
+ default:
+ {
+ snapshot_type = TXN_SNAPSHOT_NONE;
+ break;
+ }
+ }
+ int r = toku_txn_begin_with_xid(
+ stxn ? db_txn_struct_i(stxn)->tokutxn : 0,
+ &db_txn_struct_i(result)->tokutxn,
+ env->i->logger,
+ TXNID_PAIR_NONE,
+ snapshot_type,
+ result,
+ false, // for_recovery
+ txn_declared_read_only // read_only
+ );
+ if (r != 0) {
+ toku_free(result);
+ return r;
+ }
+
+ //Add to the list of children for the parent.
+ if (result->parent) {
+ assert(!db_txn_struct_i(result->parent)->child);
+ db_txn_struct_i(result->parent)->child = result;
+ }
+
+ *txn = result;
+ return 0;
+}
+
+void toku_keep_prepared_txn_callback (DB_ENV *env, TOKUTXN tokutxn) {
+ struct __toku_db_txn_external *XCALLOC(eresult);
+ DB_TXN *result = &eresult->external_part;
+ result->mgrp = env;
+ txn_func_init(result);
+
+ result->parent = NULL;
+
+ db_txn_struct_i(result)->tokutxn = tokutxn;
+ db_txn_struct_i(result)->lt_map.create();
+
+ toku_txn_set_container_db_txn(tokutxn, result);
+
+ toku_mutex_init(*db_txn_struct_i_txn_mutex_key,
+ &db_txn_struct_i(result)->txn_mutex,
+ nullptr);
+}
+
+// Test-only function
+void toku_increase_last_xid(DB_ENV *env, uint64_t increment) {
+ toku_txn_manager_increase_last_xid(toku_logger_get_txn_manager(env->i->logger), increment);
+}
+
+bool toku_is_big_txn(DB_TXN *txn) {
+ return toku_is_big_tokutxn(db_txn_struct_i(txn)->tokutxn);
+}
+
+bool toku_is_big_tokutxn(TOKUTXN tokutxn) {
+ return toku_txn_has_spilled_rollback(tokutxn);
+}
diff --git a/storage/tokudb/PerconaFT/src/ydb_txn.h b/storage/tokudb/PerconaFT/src/ydb_txn.h
new file mode 100644
index 00000000..6f4fabf3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_txn.h
@@ -0,0 +1,59 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// begin, commit, and abort use the multi operation lock
+// internally to synchronize with begin checkpoint. callers
+// should not hold the multi operation lock.
+
+int toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags);
+
+void toku_txn_note_db_row_lock(DB_TXN *txn, DB *db, const DBT *left_key, const DBT *right_key);
+
+int locked_txn_commit(DB_TXN *txn, uint32_t flags);
+
+int locked_txn_abort(DB_TXN *txn);
+
+void toku_keep_prepared_txn_callback(DB_ENV *env, TOKUTXN tokutxn);
+
+bool toku_is_big_txn(DB_TXN *txn);
+bool toku_is_big_tokutxn(TOKUTXN tokutxn);
+
+// Test-only function
+extern "C" void toku_increase_last_xid(DB_ENV *env, uint64_t increment) __attribute__((__visibility__("default")));
diff --git a/storage/tokudb/PerconaFT/src/ydb_write.cc b/storage/tokudb/PerconaFT/src/ydb_write.cc
new file mode 100644
index 00000000..8cd7e220
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_write.cc
@@ -0,0 +1,1136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include "ydb-internal.h"
+#include "indexer.h"
+#include <ft/log_header.h>
+#include <ft/cachetable/checkpoint.h>
+#include "ydb_row_lock.h"
+#include "ydb_write.h"
+#include "ydb_db.h"
+#include <portability/toku_atomic.h>
+#include <util/status.h>
+
+static YDB_WRITE_LAYER_STATUS_S ydb_write_layer_status;
+#ifdef STATUS_VALUE
+#undef STATUS_VALUE
+#endif
+#define STATUS_VALUE(x) ydb_write_layer_status.status[x].value.num
+
+#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_write_layer_status, k, c, t, l, inc)
+
+static void
+ydb_write_layer_status_init (void) {
+ // Note, this function initializes the keyname, type, and legend fields.
+ // Value fields are initialized to zero by compiler.
+ STATUS_INIT(YDB_LAYER_NUM_INSERTS, nullptr, UINT64, "dictionary inserts", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_INSERTS_FAIL, nullptr, UINT64, "dictionary inserts fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_DELETES, nullptr, UINT64, "dictionary deletes", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_DELETES_FAIL, nullptr, UINT64, "dictionary deletes fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_UPDATES, nullptr, UINT64, "dictionary updates", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_UPDATES_FAIL, nullptr, UINT64, "dictionary updates fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_UPDATES_BROADCAST, nullptr, UINT64, "dictionary broadcast updates", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_UPDATES_BROADCAST_FAIL, nullptr, UINT64, "dictionary broadcast updates fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_MULTI_INSERTS, nullptr, UINT64, "dictionary multi inserts", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_MULTI_INSERTS_FAIL, nullptr, UINT64, "dictionary multi inserts fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_MULTI_DELETES, nullptr, UINT64, "dictionary multi deletes", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_MULTI_DELETES_FAIL, nullptr, UINT64, "dictionary multi deletes fail", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_MULTI_UPDATES, nullptr, UINT64, "dictionary updates multi", TOKU_ENGINE_STATUS);
+ STATUS_INIT(YDB_LAYER_NUM_MULTI_UPDATES_FAIL, nullptr, UINT64, "dictionary updates multi fail", TOKU_ENGINE_STATUS);
+ ydb_write_layer_status.initialized = true;
+}
+#undef STATUS_INIT
+
+void
+ydb_write_layer_get_status(YDB_WRITE_LAYER_STATUS statp) {
+ if (!ydb_write_layer_status.initialized)
+ ydb_write_layer_status_init();
+ *statp = ydb_write_layer_status;
+}
+
+
+static inline uint32_t
+get_prelocked_flags(uint32_t flags) {
+ uint32_t lock_flags = flags & (DB_PRELOCKED | DB_PRELOCKED_WRITE);
+ return lock_flags;
+}
+
+// these next two static functions are defined
+// both here and ydb.c. We should find a good
+// place for them.
+static int
+ydb_getf_do_nothing(DBT const* UU(key), DBT const* UU(val), void* UU(extra)) {
+ return 0;
+}
+
+// Check if the available file system space is less than the reserve
+// Returns ENOSPC if not enough space, othersize 0
+static inline int
+env_check_avail_fs_space(DB_ENV *env) {
+ int r = env->i->fs_state == FS_RED ? ENOSPC : 0;
+ if (r) {
+ env->i->enospc_redzone_ctr++;
+ }
+ return r;
+}
+
+// Return 0 if proposed pair do not violate size constraints of DB
+// (insertion is legal)
+// Return non zero otherwise.
+static int
+db_put_check_size_constraints(DB *db, const DBT *key, const DBT *val) {
+ int r = 0;
+ unsigned int klimit, vlimit;
+
+ toku_ft_get_maximum_advised_key_value_lengths(&klimit, &vlimit);
+ if (key->size > klimit) {
+ r = toku_ydb_do_error(db->dbenv, EINVAL,
+ "The largest key allowed is %u bytes", klimit);
+ } else if (val->size > vlimit) {
+ r = toku_ydb_do_error(db->dbenv, EINVAL,
+ "The largest value allowed is %u bytes", vlimit);
+ }
+ return r;
+}
+
+//Return 0 if insert is legal
+static int
+db_put_check_overwrite_constraint(DB *db, DB_TXN *txn, DBT *key,
+ uint32_t lock_flags, uint32_t overwrite_flag) {
+ int r;
+
+ if (overwrite_flag == 0) { // 0 (yesoverwrite) does not impose constraints.
+ r = 0;
+ } else if (overwrite_flag == DB_NOOVERWRITE) {
+ // Check if (key,anything) exists in dictionary.
+ // If exists, fail. Otherwise, do insert.
+ // The DB_RMW flag causes the cursor to grab a write lock instead of a read lock on the key if it exists.
+ r = db_getf_set(db, txn, lock_flags|DB_SERIALIZABLE|DB_RMW, key, ydb_getf_do_nothing, NULL);
+ if (r == DB_NOTFOUND)
+ r = 0;
+ else if (r == 0)
+ r = DB_KEYEXIST;
+ //Any other error is passed through.
+ } else if (overwrite_flag == DB_NOOVERWRITE_NO_ERROR) {
+ r = 0;
+ } else {
+ //Other flags are not (yet) supported.
+ r = EINVAL;
+ }
+ return r;
+}
+
+
+int
+toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ HANDLE_READ_ONLY_TXN(txn);
+
+ uint32_t unchecked_flags = flags;
+ //DB_DELETE_ANY means delete regardless of whether it exists in the db.
+ bool error_if_missing = (bool)(!(flags&DB_DELETE_ANY));
+ unchecked_flags &= ~DB_DELETE_ANY;
+ uint32_t lock_flags = get_prelocked_flags(flags);
+ unchecked_flags &= ~lock_flags;
+ bool do_locking = (bool)(db->i->lt && !(lock_flags&DB_PRELOCKED_WRITE));
+
+ int r = 0;
+ if (unchecked_flags!=0) {
+ r = EINVAL;
+ }
+
+ if (r == 0 && error_if_missing) {
+ //Check if the key exists in the db.
+ r = db_getf_set(db, txn, lock_flags|DB_SERIALIZABLE|DB_RMW, key, ydb_getf_do_nothing, NULL);
+ }
+ if (r == 0 && do_locking) {
+ //Do locking if necessary.
+ r = toku_db_get_point_write_lock(db, txn, key);
+ }
+ if (r == 0) {
+ //Do the actual deleting.
+ if (!holds_mo_lock) toku_multi_operation_client_lock();
+ toku_ft_delete(db->i->ft_handle, key, txn ? db_txn_struct_i(txn)->tokutxn : 0);
+ if (!holds_mo_lock) toku_multi_operation_client_unlock();
+ }
+
+ if (r == 0) {
+ STATUS_VALUE(YDB_LAYER_NUM_DELETES)++; // accountability
+ }
+ else {
+ STATUS_VALUE(YDB_LAYER_NUM_DELETES_FAIL)++; // accountability
+ }
+ return r;
+}
+
+static int
+db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, int flags, bool do_log) {
+ int r = 0;
+ bool unique = false;
+ enum ft_msg_type type = FT_INSERT;
+ if (flags == DB_NOOVERWRITE) {
+ unique = true;
+ } else if (flags == DB_NOOVERWRITE_NO_ERROR) {
+ type = FT_INSERT_NO_OVERWRITE;
+ } else if (flags != 0) {
+ // All other non-zero flags are unsupported
+ r = EINVAL;
+ }
+ if (r == 0) {
+ TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : nullptr;
+ if (unique) {
+ r = toku_ft_insert_unique(db->i->ft_handle, key, val, ttxn, do_log);
+ } else {
+ toku_ft_maybe_insert(db->i->ft_handle, key, val, ttxn, false, ZERO_LSN, do_log, type);
+ }
+ invariant(r == DB_KEYEXIST || r == 0);
+ }
+ return r;
+}
+
+int
+toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ HANDLE_READ_ONLY_TXN(txn);
+ int r = 0;
+
+ uint32_t lock_flags = get_prelocked_flags(flags);
+ flags &= ~lock_flags;
+
+ r = db_put_check_size_constraints(db, key, val);
+
+ //Do locking if necessary.
+ bool do_locking = (bool)(db->i->lt && !(lock_flags&DB_PRELOCKED_WRITE));
+ if (r == 0 && do_locking) {
+ r = toku_db_get_point_write_lock(db, txn, key);
+ }
+ if (r == 0) {
+ //Insert into the ft.
+ if (!holds_mo_lock) toku_multi_operation_client_lock();
+ r = db_put(db, txn, key, val, flags, true);
+ if (!holds_mo_lock) toku_multi_operation_client_unlock();
+ }
+
+ if (r == 0) {
+ // helgrind flags a race on this status update. we increment it atomically to satisfy helgrind.
+ // STATUS_VALUE(YDB_LAYER_NUM_INSERTS)++; // accountability
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(YDB_LAYER_NUM_INSERTS), 1);
+ } else {
+ // STATUS_VALUE(YDB_LAYER_NUM_INSERTS_FAIL)++; // accountability
+ (void) toku_sync_fetch_and_add(&STATUS_VALUE(YDB_LAYER_NUM_INSERTS_FAIL), 1);
+ }
+
+ return r;
+}
+
+static int
+toku_db_update(DB *db, DB_TXN *txn,
+ const DBT *key,
+ const DBT *update_function_extra,
+ uint32_t flags) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ HANDLE_READ_ONLY_TXN(txn);
+ int r = 0;
+
+ uint32_t lock_flags = get_prelocked_flags(flags);
+ flags &= ~lock_flags;
+
+ r = db_put_check_size_constraints(db, key, update_function_extra);
+ if (r != 0) { goto cleanup; }
+
+ bool do_locking;
+ do_locking = (db->i->lt && !(lock_flags & DB_PRELOCKED_WRITE));
+ if (do_locking) {
+ r = toku_db_get_point_write_lock(db, txn, key);
+ if (r != 0) { goto cleanup; }
+ }
+
+ TOKUTXN ttxn;
+ ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
+ toku_multi_operation_client_lock();
+ toku_ft_maybe_update(db->i->ft_handle, key, update_function_extra, ttxn,
+ false, ZERO_LSN, true);
+ toku_multi_operation_client_unlock();
+
+cleanup:
+ if (r == 0)
+ STATUS_VALUE(YDB_LAYER_NUM_UPDATES)++; // accountability
+ else
+ STATUS_VALUE(YDB_LAYER_NUM_UPDATES_FAIL)++; // accountability
+ return r;
+}
+
+
+// DB_IS_RESETTING_OP is true if the dictionary should be considered as if created by this transaction.
+// For example, it will be true if toku_db_update_broadcast() is used to implement a schema change (such
+// as adding a column), and will be false if used simply to update all the rows of a table (such as
+// incrementing a field).
+static int
+toku_db_update_broadcast(DB *db, DB_TXN *txn,
+ const DBT *update_function_extra,
+ uint32_t flags) {
+ HANDLE_PANICKED_DB(db);
+ HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
+ HANDLE_READ_ONLY_TXN(txn);
+ int r = 0;
+
+ uint32_t lock_flags = get_prelocked_flags(flags);
+ flags &= ~lock_flags;
+ uint32_t is_resetting_op_flag = flags & DB_IS_RESETTING_OP;
+ flags &= is_resetting_op_flag;
+ bool is_resetting_op = (is_resetting_op_flag != 0);
+
+
+ if (is_resetting_op) {
+ if (txn->parent != NULL) {
+ r = EINVAL; // cannot have a parent if you are a resetting op
+ goto cleanup;
+ }
+ r = toku_db_pre_acquire_fileops_lock(db, txn);
+ if (r != 0) { goto cleanup; }
+ }
+ {
+ DBT null_key;
+ toku_init_dbt(&null_key);
+ r = db_put_check_size_constraints(db, &null_key, update_function_extra);
+ if (r != 0) { goto cleanup; }
+ }
+
+ bool do_locking;
+ do_locking = (db->i->lt && !(lock_flags & DB_PRELOCKED_WRITE));
+ if (do_locking) {
+ r = toku_db_pre_acquire_table_lock(db, txn);
+ if (r != 0) { goto cleanup; }
+ }
+
+ TOKUTXN ttxn;
+ ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
+ toku_multi_operation_client_lock();
+ toku_ft_maybe_update_broadcast(db->i->ft_handle, update_function_extra, ttxn,
+ false, ZERO_LSN, true, is_resetting_op);
+ toku_multi_operation_client_unlock();
+
+cleanup:
+ if (r == 0)
+ STATUS_VALUE(YDB_LAYER_NUM_UPDATES_BROADCAST)++; // accountability
+ else
+ STATUS_VALUE(YDB_LAYER_NUM_UPDATES_BROADCAST_FAIL)++; // accountability
+ return r;
+}
+
+static void
+log_del_single(DB_TXN *txn, FT_HANDLE ft_handle, const DBT *key) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ toku_ft_log_del(ttxn, ft_handle, key);
+}
+
+static uint32_t
+sum_size(uint32_t num_arrays, DBT_ARRAY keys[], uint32_t overhead) {
+ uint32_t sum = 0;
+ for (uint32_t i = 0; i < num_arrays; i++) {
+ for (uint32_t j = 0; j < keys[i].size; j++) {
+ sum += keys[i].dbts[j].size + overhead;
+ }
+ }
+ return sum;
+}
+
+static void
+log_del_multiple(DB_TXN *txn, DB *src_db, const DBT *key, const DBT *val, uint32_t num_dbs, FT_HANDLE fts[], DBT_ARRAY keys[]) {
+ if (num_dbs > 0) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ FT_HANDLE src_ft = src_db ? src_db->i->ft_handle : NULL;
+ uint32_t del_multiple_size = key->size + val->size + num_dbs*sizeof (uint32_t) + toku_log_enq_delete_multiple_overhead;
+ uint32_t del_single_sizes = sum_size(num_dbs, keys, toku_log_enq_delete_any_overhead);
+ if (del_single_sizes < del_multiple_size) {
+ for (uint32_t i = 0; i < num_dbs; i++) {
+ for (uint32_t j = 0; j < keys[i].size; j++) {
+ log_del_single(txn, fts[i], &keys[i].dbts[j]);
+ }
+ }
+ } else {
+ toku_ft_log_del_multiple(ttxn, src_ft, fts, num_dbs, key, val);
+ }
+ }
+}
+
+static uint32_t
+lookup_src_db(uint32_t num_dbs, DB *db_array[], DB *src_db) {
+ uint32_t which_db;
+ for (which_db = 0; which_db < num_dbs; which_db++)
+ if (db_array[which_db] == src_db)
+ break;
+ return which_db;
+}
+
+static int
+do_del_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], DB *src_db, const DBT *src_key, bool indexer_shortcut) {
+ int r = 0;
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ for (uint32_t which_db = 0; r == 0 && which_db < num_dbs; which_db++) {
+ DB *db = db_array[which_db];
+
+ paranoid_invariant(keys[which_db].size <= keys[which_db].capacity);
+
+ // if db is being indexed by an indexer, then insert a delete message into the db if the src key is to the left or equal to the
+ // indexers cursor. we have to get the src_db from the indexer and find it in the db_array.
+ int do_delete = true;
+ DB_INDEXER *indexer = toku_db_get_indexer(db);
+ if (indexer && !indexer_shortcut) { // if this db is the index under construction
+ DB *indexer_src_db = toku_indexer_get_src_db(indexer);
+ invariant(indexer_src_db != NULL);
+ const DBT *indexer_src_key;
+ if (src_db == indexer_src_db)
+ indexer_src_key = src_key;
+ else {
+ uint32_t which_src_db = lookup_src_db(num_dbs, db_array, indexer_src_db);
+ invariant(which_src_db < num_dbs);
+ // The indexer src db must have exactly one item or we don't know how to continue.
+ invariant(keys[which_src_db].size == 1);
+ indexer_src_key = &keys[which_src_db].dbts[0];
+ }
+ do_delete = toku_indexer_should_insert_key(indexer, indexer_src_key);
+ toku_indexer_update_estimate(indexer);
+ }
+ if (do_delete) {
+ for (uint32_t i = 0; i < keys[which_db].size; i++) {
+ toku_ft_maybe_delete(db->i->ft_handle, &keys[which_db].dbts[i], ttxn, false, ZERO_LSN, false);
+ }
+ }
+ }
+ return r;
+}
+
+//
+// if a hot index is in progress, gets the indexer
+// also verifies that there is at most one hot index
+// in progress. If it finds more than one, then returns EINVAL
+//
+static int
+get_indexer_if_exists(
+ uint32_t num_dbs,
+ DB **db_array,
+ DB *src_db,
+ DB_INDEXER** indexerp,
+ bool *src_db_is_indexer_src
+ )
+{
+ int r = 0;
+ DB_INDEXER* first_indexer = NULL;
+ for (uint32_t i = 0; i < num_dbs; i++) {
+ DB_INDEXER* indexer = toku_db_get_indexer(db_array[i]);
+ if (indexer) {
+ if (!first_indexer) {
+ first_indexer = indexer;
+ }
+ else if (first_indexer != indexer) {
+ r = EINVAL;
+ }
+ }
+ }
+ if (r == 0) {
+ if (first_indexer) {
+ DB* indexer_src_db = toku_indexer_get_src_db(first_indexer);
+ // we should just make this an invariant
+ if (src_db == indexer_src_db) {
+ *src_db_is_indexer_src = true;
+ }
+ }
+ *indexerp = first_indexer;
+ }
+ return r;
+}
+
+int
+env_del_multiple(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ const DBT *src_key,
+ const DBT *src_val,
+ uint32_t num_dbs,
+ DB **db_array,
+ DBT_ARRAY *keys,
+ uint32_t *flags_array)
+{
+ int r;
+ DBT_ARRAY del_keys[num_dbs];
+ DB_INDEXER* indexer = NULL;
+
+ HANDLE_PANICKED_ENV(env);
+ HANDLE_READ_ONLY_TXN(txn);
+
+ uint32_t lock_flags[num_dbs];
+ uint32_t remaining_flags[num_dbs];
+ FT_HANDLE fts[num_dbs];
+ bool indexer_lock_taken = false;
+ bool src_same = false;
+ bool indexer_shortcut = false;
+ if (!txn) {
+ r = EINVAL;
+ goto cleanup;
+ }
+ if (!env->i->generate_row_for_del) {
+ r = EINVAL;
+ goto cleanup;
+ }
+
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+ r = get_indexer_if_exists(num_dbs, db_array, src_db, &indexer, &src_same);
+ if (r) {
+ goto cleanup;
+ }
+
+ for (uint32_t which_db = 0; which_db < num_dbs; which_db++) {
+ DB *db = db_array[which_db];
+ lock_flags[which_db] = get_prelocked_flags(flags_array[which_db]);
+ remaining_flags[which_db] = flags_array[which_db] & ~lock_flags[which_db];
+
+ if (db == src_db) {
+ del_keys[which_db].size = 1;
+ del_keys[which_db].capacity = 1;
+ del_keys[which_db].dbts = const_cast<DBT*>(src_key);
+ }
+ else {
+ //Generate the key
+ r = env->i->generate_row_for_del(db, src_db, &keys[which_db], src_key, src_val);
+ if (r != 0) goto cleanup;
+ del_keys[which_db] = keys[which_db];
+ paranoid_invariant(del_keys[which_db].size <= del_keys[which_db].capacity);
+ }
+
+ if (remaining_flags[which_db] & ~DB_DELETE_ANY) {
+ r = EINVAL;
+ goto cleanup;
+ }
+ bool error_if_missing = (bool)(!(remaining_flags[which_db]&DB_DELETE_ANY));
+ for (uint32_t which_key = 0; which_key < del_keys[which_db].size; which_key++) {
+ DBT *del_key = &del_keys[which_db].dbts[which_key];
+ if (error_if_missing) {
+ //Check if the key exists in the db.
+ //Grabs a write lock
+ r = db_getf_set(db, txn, lock_flags[which_db]|DB_SERIALIZABLE|DB_RMW, del_key, ydb_getf_do_nothing, NULL);
+ if (r != 0) goto cleanup;
+ } else if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE)) { //Do locking if necessary.
+ //Needs locking
+ r = toku_db_get_point_write_lock(db, txn, del_key);
+ if (r != 0) goto cleanup;
+ }
+ }
+ fts[which_db] = db->i->ft_handle;
+ }
+
+ if (indexer) {
+ // do a cheap check
+ if (src_same) {
+ bool may_insert = toku_indexer_may_insert(indexer, src_key);
+ if (!may_insert) {
+ toku_indexer_lock(indexer);
+ indexer_lock_taken = true;
+ }
+ else {
+ indexer_shortcut = true;
+ }
+ }
+ }
+ toku_multi_operation_client_lock();
+ log_del_multiple(txn, src_db, src_key, src_val, num_dbs, fts, del_keys);
+ r = do_del_multiple(txn, num_dbs, db_array, del_keys, src_db, src_key, indexer_shortcut);
+ toku_multi_operation_client_unlock();
+ if (indexer_lock_taken) {
+ toku_indexer_unlock(indexer);
+ }
+
+cleanup:
+ if (r == 0)
+ STATUS_VALUE(YDB_LAYER_NUM_MULTI_DELETES) += num_dbs; // accountability
+ else
+ STATUS_VALUE(YDB_LAYER_NUM_MULTI_DELETES_FAIL) += num_dbs; // accountability
+ return r;
+}
+
+static void
+log_put_multiple(DB_TXN *txn, DB *src_db, const DBT *src_key, const DBT *src_val, uint32_t num_dbs, FT_HANDLE fts[]) {
+ if (num_dbs > 0) {
+ TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
+ FT_HANDLE src_ft = src_db ? src_db->i->ft_handle : NULL;
+ toku_ft_log_put_multiple(ttxn, src_ft, fts, num_dbs, src_key, src_val);
+ }
+}
+
+// Requires: If remaining_flags is non-null, this function performs any required uniqueness checks
+// Otherwise, the caller is responsible.
+static int
+do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], DBT_ARRAY vals[], uint32_t *remaining_flags, DB *src_db, const DBT *src_key, bool indexer_shortcut) {
+ int r = 0;
+ for (uint32_t which_db = 0; which_db < num_dbs; which_db++) {
+ DB *db = db_array[which_db];
+
+ invariant(keys[which_db].size == vals[which_db].size);
+ paranoid_invariant(keys[which_db].size <= keys[which_db].capacity);
+ paranoid_invariant(vals[which_db].size <= vals[which_db].capacity);
+
+ if (keys[which_db].size > 0) {
+ bool do_put = true;
+ DB_INDEXER *indexer = toku_db_get_indexer(db);
+ if (indexer && !indexer_shortcut) { // if this db is the index under construction
+ DB *indexer_src_db = toku_indexer_get_src_db(indexer);
+ invariant(indexer_src_db != NULL);
+ const DBT *indexer_src_key;
+ if (src_db == indexer_src_db)
+ indexer_src_key = src_key;
+ else {
+ uint32_t which_src_db = lookup_src_db(num_dbs, db_array, indexer_src_db);
+ invariant(which_src_db < num_dbs);
+ // The indexer src db must have exactly one item or we don't know how to continue.
+ invariant(keys[which_src_db].size == 1);
+ indexer_src_key = &keys[which_src_db].dbts[0];
+ }
+ do_put = toku_indexer_should_insert_key(indexer, indexer_src_key);
+ toku_indexer_update_estimate(indexer);
+ }
+ if (do_put) {
+ for (uint32_t i = 0; i < keys[which_db].size; i++) {
+ int flags = 0;
+ if (remaining_flags != nullptr) {
+ flags = remaining_flags[which_db];
+ invariant(!(flags & DB_NOOVERWRITE_NO_ERROR));
+ }
+ r = db_put(db, txn, &keys[which_db].dbts[i], &vals[which_db].dbts[i], flags, false);
+ if (r != 0) {
+ goto done;
+ }
+ }
+ }
+ }
+ }
+done:
+ return r;
+}
+
+static int
+env_put_multiple_internal(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ const DBT *src_key,
+ const DBT *src_val,
+ uint32_t num_dbs,
+ DB **db_array,
+ DBT_ARRAY *keys,
+ DBT_ARRAY *vals,
+ uint32_t *flags_array)
+{
+ int r;
+ DBT_ARRAY put_keys[num_dbs];
+ DBT_ARRAY put_vals[num_dbs];
+ DB_INDEXER* indexer = NULL;
+
+ HANDLE_PANICKED_ENV(env);
+ HANDLE_READ_ONLY_TXN(txn);
+
+ uint32_t lock_flags[num_dbs];
+ uint32_t remaining_flags[num_dbs];
+ FT_HANDLE fts[num_dbs];
+ bool indexer_shortcut = false;
+ bool indexer_lock_taken = false;
+ bool src_same = false;
+
+ if (!txn || !num_dbs) {
+ r = EINVAL;
+ goto cleanup;
+ }
+ if (!env->i->generate_row_for_put) {
+ r = EINVAL;
+ goto cleanup;
+ }
+
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+ r = get_indexer_if_exists(num_dbs, db_array, src_db, &indexer, &src_same);
+ if (r) {
+ goto cleanup;
+ }
+
+ for (uint32_t which_db = 0; which_db < num_dbs; which_db++) {
+ DB *db = db_array[which_db];
+
+ lock_flags[which_db] = get_prelocked_flags(flags_array[which_db]);
+ remaining_flags[which_db] = flags_array[which_db] & ~lock_flags[which_db];
+
+ //Generate the row
+ if (db == src_db) {
+ put_keys[which_db].size = put_keys[which_db].capacity = 1;
+ put_keys[which_db].dbts = const_cast<DBT*>(src_key);
+
+ put_vals[which_db].size = put_vals[which_db].capacity = 1;
+ put_vals[which_db].dbts = const_cast<DBT*>(src_val);
+ }
+ else {
+ r = env->i->generate_row_for_put(db, src_db, &keys[which_db], &vals[which_db], src_key, src_val);
+ if (r != 0) goto cleanup;
+
+ paranoid_invariant(keys[which_db].size <= keys[which_db].capacity);
+ paranoid_invariant(vals[which_db].size <= vals[which_db].capacity);
+ paranoid_invariant(keys[which_db].size == vals[which_db].size);
+
+ put_keys[which_db] = keys[which_db];
+ put_vals[which_db] = vals[which_db];
+ }
+ for (uint32_t i = 0; i < put_keys[which_db].size; i++) {
+ DBT &put_key = put_keys[which_db].dbts[i];
+ DBT &put_val = put_vals[which_db].dbts[i];
+
+ // check size constraints
+ r = db_put_check_size_constraints(db, &put_key, &put_val);
+ if (r != 0) goto cleanup;
+
+ if (remaining_flags[which_db] == DB_NOOVERWRITE_NO_ERROR) {
+ //put_multiple does not support delaying the no error, since we would
+ //have to log the flag in the put_multiple.
+ r = EINVAL; goto cleanup;
+ }
+
+ //Do locking if necessary.
+ if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE)) {
+ //Needs locking
+ r = toku_db_get_point_write_lock(db, txn, &put_key);
+ if (r != 0) goto cleanup;
+ }
+ }
+ fts[which_db] = db->i->ft_handle;
+ }
+
+ if (indexer) {
+ // do a cheap check
+ if (src_same) {
+ bool may_insert = toku_indexer_may_insert(indexer, src_key);
+ if (!may_insert) {
+ toku_indexer_lock(indexer);
+ indexer_lock_taken = true;
+ }
+ else {
+ indexer_shortcut = true;
+ }
+ }
+ }
+ toku_multi_operation_client_lock();
+ r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, remaining_flags, src_db, src_key, indexer_shortcut);
+ if (r == 0) {
+ log_put_multiple(txn, src_db, src_key, src_val, num_dbs, fts);
+ }
+ toku_multi_operation_client_unlock();
+ if (indexer_lock_taken) {
+ toku_indexer_unlock(indexer);
+ }
+
+cleanup:
+ if (r == 0)
+ STATUS_VALUE(YDB_LAYER_NUM_MULTI_INSERTS) += num_dbs; // accountability
+ else
+ STATUS_VALUE(YDB_LAYER_NUM_MULTI_INSERTS_FAIL) += num_dbs; // accountability
+ return r;
+}
+
+static void swap_dbts(DBT *a, DBT *b) {
+ DBT c;
+ c = *a;
+ *a = *b;
+ *b = c;
+}
+
+//TODO: 26 Add comment in API description about.. new val.size being generated as '0' REQUIRES old_val.size == 0
+//
+int
+env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn,
+ DBT *old_src_key, DBT *old_src_data,
+ DBT *new_src_key, DBT *new_src_data,
+ uint32_t num_dbs, DB **db_array, uint32_t* flags_array,
+ uint32_t num_keys, DBT_ARRAY keys[],
+ uint32_t num_vals, DBT_ARRAY vals[]) {
+ int r = 0;
+
+ HANDLE_PANICKED_ENV(env);
+ DB_INDEXER* indexer = NULL;
+ bool indexer_shortcut = false;
+ bool indexer_lock_taken = false;
+ bool src_same = false;
+ HANDLE_READ_ONLY_TXN(txn);
+ DBT_ARRAY old_key_arrays[num_dbs];
+ DBT_ARRAY new_key_arrays[num_dbs];
+ DBT_ARRAY new_val_arrays[num_dbs];
+
+ if (!txn) {
+ r = EINVAL;
+ goto cleanup;
+ }
+ if (!env->i->generate_row_for_put) {
+ r = EINVAL;
+ goto cleanup;
+ }
+
+ if (num_dbs + num_dbs > num_keys || num_dbs > num_vals) {
+ r = ENOMEM; goto cleanup;
+ }
+
+ HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
+ r = get_indexer_if_exists(num_dbs, db_array, src_db, &indexer, &src_same);
+ if (r) {
+ goto cleanup;
+ }
+
+ {
+ uint32_t n_del_dbs = 0;
+ DB *del_dbs[num_dbs];
+ FT_HANDLE del_fts[num_dbs];
+ DBT_ARRAY del_key_arrays[num_dbs];
+
+ uint32_t n_put_dbs = 0;
+ DB *put_dbs[num_dbs];
+ FT_HANDLE put_fts[num_dbs];
+ DBT_ARRAY put_key_arrays[num_dbs];
+ DBT_ARRAY put_val_arrays[num_dbs];
+
+ uint32_t lock_flags[num_dbs];
+ uint32_t remaining_flags[num_dbs];
+
+ for (uint32_t which_db = 0; which_db < num_dbs; which_db++) {
+ DB *db = db_array[which_db];
+
+ lock_flags[which_db] = get_prelocked_flags(flags_array[which_db]);
+ remaining_flags[which_db] = flags_array[which_db] & ~lock_flags[which_db];
+
+ if (db == src_db) {
+ // Copy the old keys
+ old_key_arrays[which_db].size = old_key_arrays[which_db].capacity = 1;
+ old_key_arrays[which_db].dbts = old_src_key;
+
+ // Copy the new keys and vals
+ new_key_arrays[which_db].size = new_key_arrays[which_db].capacity = 1;
+ new_key_arrays[which_db].dbts = new_src_key;
+
+ new_val_arrays[which_db].size = new_val_arrays[which_db].capacity = 1;
+ new_val_arrays[which_db].dbts = new_src_data;
+ } else {
+ // keys[0..num_dbs-1] are the new keys
+ // keys[num_dbs..2*num_dbs-1] are the old keys
+ // vals[0..num_dbs-1] are the new vals
+
+ // Generate the old keys
+ r = env->i->generate_row_for_put(db, src_db, &keys[which_db + num_dbs], NULL, old_src_key, old_src_data);
+ if (r != 0) goto cleanup;
+
+ paranoid_invariant(keys[which_db+num_dbs].size <= keys[which_db+num_dbs].capacity);
+ old_key_arrays[which_db] = keys[which_db+num_dbs];
+
+ // Generate the new keys and vals
+ r = env->i->generate_row_for_put(db, src_db, &keys[which_db], &vals[which_db], new_src_key, new_src_data);
+ if (r != 0) goto cleanup;
+
+ paranoid_invariant(keys[which_db].size <= keys[which_db].capacity);
+ paranoid_invariant(vals[which_db].size <= vals[which_db].capacity);
+ paranoid_invariant(keys[which_db].size == vals[which_db].size);
+
+ new_key_arrays[which_db] = keys[which_db];
+ new_val_arrays[which_db] = vals[which_db];
+ }
+ DBT_ARRAY &old_keys = old_key_arrays[which_db];
+ DBT_ARRAY &new_keys = new_key_arrays[which_db];
+ DBT_ARRAY &new_vals = new_val_arrays[which_db];
+
+ uint32_t num_skip = 0;
+ uint32_t num_del = 0;
+ uint32_t num_put = 0;
+ // Next index in old_keys to look at
+ uint32_t idx_old = 0;
+ // Next index in new_keys/new_vals to look at
+ uint32_t idx_new = 0;
+ uint32_t idx_old_used = 0;
+ uint32_t idx_new_used = 0;
+ while (idx_old < old_keys.size || idx_new < new_keys.size) {
+ // Check for old key, both, new key
+ DBT *curr_old_key = &old_keys.dbts[idx_old];
+ DBT *curr_new_key = &new_keys.dbts[idx_new];
+ DBT *curr_new_val = &new_vals.dbts[idx_new];
+
+ bool locked_new_key = false;
+ int cmp;
+ if (idx_new == new_keys.size) {
+ cmp = -1;
+ } else if (idx_old == old_keys.size) {
+ cmp = +1;
+ } else {
+ const toku::comparator &cmpfn = toku_db_get_comparator(db);
+ cmp = cmpfn(curr_old_key, curr_new_key);
+ }
+
+ bool do_del = false;
+ bool do_put = false;
+ bool do_skip = false;
+ if (cmp > 0) { // New key does not exist in old array
+ //Check overwrite constraints only in the case where the keys are not equal
+ //(new key is alone/not equal to old key)
+ // If the keys are equal, then we do not care of the flag is DB_NOOVERWRITE or 0
+ r = db_put_check_overwrite_constraint(db, txn,
+ curr_new_key,
+ lock_flags[which_db], remaining_flags[which_db]);
+ if (r != 0) goto cleanup;
+ if (remaining_flags[which_db] == DB_NOOVERWRITE) {
+ locked_new_key = true;
+ }
+
+ if (remaining_flags[which_db] == DB_NOOVERWRITE_NO_ERROR) {
+ //update_multiple does not support delaying the no error, since we would
+ //have to log the flag in the put_multiple.
+ r = EINVAL; goto cleanup;
+ }
+ do_put = true;
+ } else if (cmp < 0) {
+ // lock old key only when it does not exist in new array
+ // otherwise locking new key takes care of this
+ if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE)) {
+ r = toku_db_get_point_write_lock(db, txn, curr_old_key);
+ if (r != 0) goto cleanup;
+ }
+ do_del = true;
+ } else {
+ do_put = curr_new_val->size > 0 ||
+ curr_old_key->size != curr_new_key->size ||
+ memcmp(curr_old_key->data, curr_new_key->data, curr_old_key->size);
+ do_skip = !do_put;
+ }
+ // Check put size constraints and insert new key only if keys are unequal (byte for byte) or there is a val
+ // We assume any val.size > 0 as unequal (saves on generating old val)
+ // (allows us to avoid generating the old val)
+ // we assume that any new vals with size > 0 are different than the old val
+ // if (!key_eq || !(dbt_cmp(&vals[which_db], &vals[which_db + num_dbs]) == 0)) { /* ... */ }
+ if (do_put) {
+ r = db_put_check_size_constraints(db, curr_new_key, curr_new_val);
+ if (r != 0) goto cleanup;
+
+ // lock new key unless already locked
+ if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE) && !locked_new_key) {
+ r = toku_db_get_point_write_lock(db, txn, curr_new_key);
+ if (r != 0) goto cleanup;
+ }
+ }
+
+ // TODO: 26 Add comments explaining squish and why not just use another stack array
+ // Add more comments to explain this if elseif else well
+ if (do_skip) {
+ paranoid_invariant(cmp == 0);
+ paranoid_invariant(!do_put);
+ paranoid_invariant(!do_del);
+
+ num_skip++;
+ idx_old++;
+ idx_new++;
+ } else if (do_put) {
+ paranoid_invariant(cmp >= 0);
+ paranoid_invariant(!do_skip);
+ paranoid_invariant(!do_del);
+
+ num_put++;
+ if (idx_new != idx_new_used) {
+ swap_dbts(&new_keys.dbts[idx_new_used], &new_keys.dbts[idx_new]);
+ swap_dbts(&new_vals.dbts[idx_new_used], &new_vals.dbts[idx_new]);
+ }
+ idx_new++;
+ idx_new_used++;
+ if (cmp == 0) {
+ idx_old++;
+ }
+ } else {
+ invariant(do_del);
+ paranoid_invariant(cmp < 0);
+ paranoid_invariant(!do_skip);
+ paranoid_invariant(!do_put);
+
+ num_del++;
+ if (idx_old != idx_old_used) {
+ swap_dbts(&old_keys.dbts[idx_old_used], &old_keys.dbts[idx_old]);
+ }
+ idx_old++;
+ idx_old_used++;
+ }
+ }
+ old_keys.size = idx_old_used;
+ new_keys.size = idx_new_used;
+ new_vals.size = idx_new_used;
+
+ if (num_del > 0) {
+ del_dbs[n_del_dbs] = db;
+ del_fts[n_del_dbs] = db->i->ft_handle;
+ del_key_arrays[n_del_dbs] = old_keys;
+ n_del_dbs++;
+ }
+ // If we put none, but delete some, but not all, then we need the log_put_multiple to happen.
+ // Include this db in the put_dbs so we do log_put_multiple.
+ // do_put_multiple will be a no-op for this db.
+ if (num_put > 0 || (num_del > 0 && num_skip > 0)) {
+ put_dbs[n_put_dbs] = db;
+ put_fts[n_put_dbs] = db->i->ft_handle;
+ put_key_arrays[n_put_dbs] = new_keys;
+ put_val_arrays[n_put_dbs] = new_vals;
+ n_put_dbs++;
+ }
+ }
+ if (indexer) {
+ // do a cheap check
+ if (src_same) {
+ bool may_insert =
+ toku_indexer_may_insert(indexer, old_src_key) &&
+ toku_indexer_may_insert(indexer, new_src_key);
+ if (!may_insert) {
+ toku_indexer_lock(indexer);
+ indexer_lock_taken = true;
+ }
+ else {
+ indexer_shortcut = true;
+ }
+ }
+ }
+ toku_multi_operation_client_lock();
+ if (r == 0 && n_del_dbs > 0) {
+ log_del_multiple(txn, src_db, old_src_key, old_src_data, n_del_dbs, del_fts, del_key_arrays);
+ r = do_del_multiple(txn, n_del_dbs, del_dbs, del_key_arrays, src_db, old_src_key, indexer_shortcut);
+ }
+
+ if (r == 0 && n_put_dbs > 0) {
+ // We sometimes skip some keys for del/put during runtime, but during recovery
+ // we (may) delete ALL the keys for a given DB. Therefore we must put ALL the keys during
+ // recovery so we don't end up losing data.
+ // So unlike env->put_multiple, we ONLY log a 'put_multiple' log entry.
+ log_put_multiple(txn, src_db, new_src_key, new_src_data, n_put_dbs, put_fts);
+ r = do_put_multiple(txn, n_put_dbs, put_dbs, put_key_arrays, put_val_arrays, nullptr, src_db, new_src_key, indexer_shortcut);
+ }
+ toku_multi_operation_client_unlock();
+ if (indexer_lock_taken) {
+ toku_indexer_unlock(indexer);
+ }
+ }
+
+cleanup:
+ if (r == 0)
+ STATUS_VALUE(YDB_LAYER_NUM_MULTI_UPDATES) += num_dbs; // accountability
+ else
+ STATUS_VALUE(YDB_LAYER_NUM_MULTI_UPDATES_FAIL) += num_dbs; // accountability
+ return r;
+}
+
+int
+autotxn_db_del(DB* db, DB_TXN* txn, DBT* key, uint32_t flags) {
+ bool changed; int r;
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r!=0) return r;
+ r = toku_db_del(db, txn, key, flags, false);
+ return toku_db_destruct_autotxn(txn, r, changed);
+}
+
+int
+autotxn_db_put(DB* db, DB_TXN* txn, DBT* key, DBT* data, uint32_t flags) {
+ //{ unsigned i; printf("put %p keylen=%d key={", db, key->size); for(i=0; i<key->size; i++) printf("%d,", ((char*)key->data)[i]); printf("} datalen=%d data={", data->size); for(i=0; i<data->size; i++) printf("%d,", ((char*)data->data)[i]); printf("}\n"); }
+ bool changed; int r;
+ r = env_check_avail_fs_space(db->dbenv);
+ if (r != 0) { goto cleanup; }
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r!=0) {
+ goto cleanup;
+ }
+ r = toku_db_put(db, txn, key, data, flags, false);
+ r = toku_db_destruct_autotxn(txn, r, changed);
+cleanup:
+ return r;
+}
+
+int
+autotxn_db_update(DB *db, DB_TXN *txn,
+ const DBT *key,
+ const DBT *update_function_extra,
+ uint32_t flags) {
+ bool changed; int r;
+ r = env_check_avail_fs_space(db->dbenv);
+ if (r != 0) { goto cleanup; }
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r != 0) { return r; }
+ r = toku_db_update(db, txn, key, update_function_extra, flags);
+ r = toku_db_destruct_autotxn(txn, r, changed);
+cleanup:
+ return r;
+}
+
+int
+autotxn_db_update_broadcast(DB *db, DB_TXN *txn,
+ const DBT *update_function_extra,
+ uint32_t flags) {
+ bool changed; int r;
+ r = env_check_avail_fs_space(db->dbenv);
+ if (r != 0) { goto cleanup; }
+ r = toku_db_construct_autotxn(db, &txn, &changed, false);
+ if (r != 0) { return r; }
+ r = toku_db_update_broadcast(db, txn, update_function_extra, flags);
+ r = toku_db_destruct_autotxn(txn, r, changed);
+cleanup:
+ return r;
+}
+
+int
+env_put_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn, const DBT *src_key, const DBT *src_val, uint32_t num_dbs, DB **db_array, DBT_ARRAY *keys, DBT_ARRAY *vals, uint32_t *flags_array) {
+ int r = env_check_avail_fs_space(env);
+ if (r == 0) {
+ r = env_put_multiple_internal(env, src_db, txn, src_key, src_val, num_dbs, db_array, keys, vals, flags_array);
+ }
+ return r;
+}
+
+int
+toku_ydb_check_avail_fs_space(DB_ENV *env) {
+ int rval = env_check_avail_fs_space(env);
+ return rval;
+}
+#undef STATUS_VALUE
+
+#include <toku_race_tools.h>
+void __attribute__((constructor)) toku_ydb_write_helgrind_ignore(void);
+void
+toku_ydb_write_helgrind_ignore(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&ydb_write_layer_status, sizeof ydb_write_layer_status);
+}
diff --git a/storage/tokudb/PerconaFT/src/ydb_write.h b/storage/tokudb/PerconaFT/src/ydb_write.h
new file mode 100644
index 00000000..502b60c0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/src/ydb_write.h
@@ -0,0 +1,104 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+typedef enum {
+ YDB_LAYER_NUM_INSERTS = 0,
+ YDB_LAYER_NUM_INSERTS_FAIL,
+ YDB_LAYER_NUM_DELETES,
+ YDB_LAYER_NUM_DELETES_FAIL,
+ YDB_LAYER_NUM_UPDATES,
+ YDB_LAYER_NUM_UPDATES_FAIL,
+ YDB_LAYER_NUM_UPDATES_BROADCAST,
+ YDB_LAYER_NUM_UPDATES_BROADCAST_FAIL,
+ YDB_LAYER_NUM_MULTI_INSERTS,
+ YDB_LAYER_NUM_MULTI_INSERTS_FAIL,
+ YDB_LAYER_NUM_MULTI_DELETES,
+ YDB_LAYER_NUM_MULTI_DELETES_FAIL,
+ YDB_LAYER_NUM_MULTI_UPDATES,
+ YDB_LAYER_NUM_MULTI_UPDATES_FAIL,
+ YDB_WRITE_LAYER_STATUS_NUM_ROWS /* number of rows in this status array */
+} ydb_write_lock_layer_status_entry;
+
+typedef struct {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[YDB_WRITE_LAYER_STATUS_NUM_ROWS];
+} YDB_WRITE_LAYER_STATUS_S, *YDB_WRITE_LAYER_STATUS;
+
+void ydb_write_layer_get_status(YDB_WRITE_LAYER_STATUS statp);
+
+int toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock);
+int toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock);
+int autotxn_db_del(DB* db, DB_TXN* txn, DBT* key, uint32_t flags);
+int autotxn_db_put(DB* db, DB_TXN* txn, DBT* key, DBT* data, uint32_t flags);
+int autotxn_db_update(DB *db, DB_TXN *txn, const DBT *key, const DBT *update_function_extra, uint32_t flags);
+int autotxn_db_update_broadcast(DB *db, DB_TXN *txn, const DBT *update_function_extra, uint32_t flags);
+int env_put_multiple(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ const DBT *src_key, const DBT *src_val,
+ uint32_t num_dbs,
+ DB **db_array,
+ DBT_ARRAY *keys, DBT_ARRAY *vals,
+ uint32_t *flags_array
+ );
+int env_del_multiple(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ const DBT *src_key,
+ const DBT *src_val,
+ uint32_t num_dbs,
+ DB **db_array,
+ DBT_ARRAY *keys,
+ uint32_t *flags_array
+ );
+int env_update_multiple(
+ DB_ENV *env,
+ DB *src_db,
+ DB_TXN *txn,
+ DBT *old_src_key, DBT *old_src_data,
+ DBT *new_src_key, DBT *new_src_data,
+ uint32_t num_dbs,
+ DB **db_array,
+ uint32_t* flags_array,
+ uint32_t num_keys, DBT_ARRAY keys[],
+ uint32_t num_vals, DBT_ARRAY vals[]
+ );
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/AUTHORS b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/AUTHORS
new file mode 100644
index 00000000..4858b377
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/AUTHORS
@@ -0,0 +1 @@
+opensource@google.com
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/CMakeLists.txt b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/CMakeLists.txt
new file mode 100644
index 00000000..c241f791
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/CMakeLists.txt
@@ -0,0 +1,24 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8.9)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+set(snappy_srcs
+ snappy
+ snappy-c
+ snappy-sinksource
+ snappy-stubs-internal
+ )
+
+add_definitions("-Wno-sign-compare -Wno-unused-function -Wno-unused-parameter -fvisibility=hidden -fPIC")
+
+## make the library, it's going to go into libtokudb.so so it needs
+## to be PIC
+add_library(snappy STATIC ${snappy_srcs})
+set_target_properties(snappy PROPERTIES POSITION_INDEPENDENT_CODE ON)
+install(
+ TARGETS snappy
+ DESTINATION lib
+ )
+install(
+ FILES snappy.h snappy-stubs-public.h
+ DESTINATION include
+ )
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/COPYING b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/COPYING
new file mode 100644
index 00000000..4816c430
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/COPYING
@@ -0,0 +1,54 @@
+Copyright 2011, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+===
+
+Some of the benchmark data in util/zippy/testdata is licensed differently:
+
+ - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and
+ is licensed under the Creative Commons Attribution 3.0 license
+ (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/
+ for more information.
+
+ - kppkn.gtb is taken from the Gaviota chess tablebase set, and
+ is licensed under the MIT License. See
+ https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1
+ for more information.
+
+ - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper
+ “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA
+ Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro,
+ which is licensed under the CC-BY license. See
+ http://www.ploscompbiol.org/static/license for more ifnormation.
+
+ - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project
+ Gutenberg. The first three have expired copyrights and are in the public
+ domain; the latter does not have expired copyright, but is still in the
+ public domain according to the license information
+ (http://www.gutenberg.org/ebooks/53).
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ChangeLog b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ChangeLog
new file mode 100644
index 00000000..edd46dd7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ChangeLog
@@ -0,0 +1,1916 @@
+------------------------------------------------------------------------
+r83 | snappy.mirrorbot@gmail.com | 2014-02-19 11:31:49 +0100 (Wed, 19 Feb 2014) | 9 lines
+
+Fix public issue 82: Stop distributing benchmark data files that have
+unclear or unsuitable licensing.
+
+In general, we replace the files we can with liberally licensed data,
+and remove all the others (in particular all the parts of the Canterbury
+corpus that are not clearly in the public domain). The replacements
+do not always have the exact same characteristics as the original ones,
+but they are more than good enough to be useful for benchmarking.
+
+------------------------------------------------------------------------
+r82 | snappy.mirrorbot@gmail.com | 2013-10-25 15:31:27 +0200 (Fri, 25 Oct 2013) | 8 lines
+
+Add support for padding in the Snappy framed format.
+
+This is specifically motivated by DICOM's demands that embedded data
+must be of an even number of bytes, but could in principle be used for
+any sort of padding/alignment needed.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r81 | snappy.mirrorbot@gmail.com | 2013-10-15 17:21:31 +0200 (Tue, 15 Oct 2013) | 4 lines
+
+Release Snappy 1.1.1.
+
+R=jeff
+
+------------------------------------------------------------------------
+r80 | snappy.mirrorbot@gmail.com | 2013-08-13 14:55:00 +0200 (Tue, 13 Aug 2013) | 6 lines
+
+Add autoconf tests for size_t and ssize_t. Sort-of resolves public issue 79;
+it would solve the problem if MSVC typically used autoconf. However, it gives
+a natural place (config.h) to put the typedef even for MSVC.
+
+R=jsbell
+
+------------------------------------------------------------------------
+r79 | snappy.mirrorbot@gmail.com | 2013-07-29 13:06:44 +0200 (Mon, 29 Jul 2013) | 14 lines
+
+When we compare the number of bytes produced with the offset for a
+backreference, make the signedness of the bytes produced clear,
+by sticking it into a size_t. This avoids a signed/unsigned compare
+warning from MSVC (public issue 71), and also is slightly clearer.
+
+Since the line is now so long the explanatory comment about the -1u
+trick has to go somewhere else anyway, I used the opportunity to
+explain it in slightly more detail.
+
+This is a purely stylistic change; the emitted assembler from GCC
+is identical.
+
+R=jeff
+
+------------------------------------------------------------------------
+r78 | snappy.mirrorbot@gmail.com | 2013-06-30 21:24:03 +0200 (Sun, 30 Jun 2013) | 111 lines
+
+In the fast path for decompressing literals, instead of checking
+whether there's 16 bytes free and then checking right afterwards
+(when having subtracted the literal size) that there are now
+5 bytes free, just check once for 21 bytes. This skips a compare
+and a branch; although it is easily predictable, it is still
+a few cycles on a fast path that we would like to get rid of.
+
+Benchmarking this yields very confusing results. On open-source
+GCC 4.8.1 on Haswell, we get exactly the expected results; the
+benchmarks where we hit the fast path for literals (in particular
+the two HTML benchmarks and the protobuf benchmark) give very nice
+speedups, and the others are not really affected.
+
+However, benchmarks with Google's GCC branch on other hardware
+is much less clear. It seems that we have a weak loss in some cases
+(and the win for the “typical” win cases are not nearly as clear),
+but that it depends on microarchitecture and plain luck in how we run
+the benchmark. Looking at the generated assembler, it seems that
+the removal of the if causes other large-scale changes in how the
+function is laid out, which makes it likely that this is just bad luck.
+
+Thus, we should keep this change, even though its exact current impact is
+unclear; it's a sensible change per se, and dropping it on the basis of
+microoptimization for a given compiler (or even branch of a compiler)
+would seem like a bad strategy in the long run.
+
+Microbenchmark results (all in 64-bit, opt mode):
+
+ Nehalem, Google GCC:
+
+ Benchmark Base (ns) New (ns) Improvement
+ ------------------------------------------------------------------------------
+ BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
+ BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
+ BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
+ BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
+ BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
+ BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
+ BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
+ BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
+ BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
+ BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
+ BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
+ BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
+ BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
+ BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
+ BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
+ BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
+ BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
+ BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
+ BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
+ BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
+ BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
+ Sum of all benchmarks 4966150 4980396 -0.3%
+
+
+ Sandy Bridge, Google GCC:
+
+ Benchmark Base (ns) New (ns) Improvement
+ ------------------------------------------------------------------------------
+ BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
+ BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
+ BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
+ BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
+ BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
+ BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
+ BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
+ BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
+ BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
+ BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
+ BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
+ BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
+ BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
+ BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
+ BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
+ BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
+ BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
+ BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
+ BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
+ BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
+ BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
+ Sum of all benchmarks 3408117 3368361 +1.2%
+
+
+ Haswell, upstream GCC 4.8.1:
+
+ Benchmark Base (ns) New (ns) Improvement
+ ------------------------------------------------------------------------------
+ BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
+ BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
+ BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
+ BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
+ BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
+ BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
+ BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
+ BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
+ BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
+ BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
+ BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
+ BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
+ BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
+ BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
+ BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
+ BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
+ BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
+ BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
+ BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
+ BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
+ BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
+ Sum of all benchmarks 3408011 3385508 +0.7%
+
+------------------------------------------------------------------------
+r77 | snappy.mirrorbot@gmail.com | 2013-06-14 23:42:26 +0200 (Fri, 14 Jun 2013) | 92 lines
+
+Make the two IncrementalCopy* functions take in an ssize_t instead of a len,
+in order to avoid having to do 32-to-64-bit signed conversions on a hot path
+during decompression. (Also fixes some MSVC warnings, mentioned in public
+issue 75, but more of those remain.) They cannot be size_t because we expect
+them to go negative and test for that.
+
+This saves a few movzwl instructions, yielding ~2% speedup in decompression.
+
+
+Sandy Bridge:
+
+Benchmark Base (ns) New (ns) Improvement
+-------------------------------------------------------------------------------------------------
+BM_UFlat/0 48009 41283 2.3GB/s html +16.3%
+BM_UFlat/1 531274 513419 1.3GB/s urls +3.5%
+BM_UFlat/2 7378 7062 16.8GB/s jpg +4.5%
+BM_UFlat/3 92 92 2.0GB/s jpg_200 +0.0%
+BM_UFlat/4 15057 14974 5.9GB/s pdf +0.6%
+BM_UFlat/5 204323 193140 2.0GB/s html4 +5.8%
+BM_UFlat/6 13282 12611 1.8GB/s cp +5.3%
+BM_UFlat/7 6511 6504 1.6GB/s c +0.1%
+BM_UFlat/8 2014 2030 1.7GB/s lsp -0.8%
+BM_UFlat/9 775909 768336 1.3GB/s xls +1.0%
+BM_UFlat/10 182 184 1043.2MB/s xls_200 -1.1%
+BM_UFlat/11 167352 161630 901.2MB/s txt1 +3.5%
+BM_UFlat/12 147393 142246 842.8MB/s txt2 +3.6%
+BM_UFlat/13 449960 432853 944.4MB/s txt3 +4.0%
+BM_UFlat/14 620497 594845 775.9MB/s txt4 +4.3%
+BM_UFlat/15 265610 267356 1.8GB/s bin -0.7%
+BM_UFlat/16 206 205 932.7MB/s bin_200 +0.5%
+BM_UFlat/17 25561 24730 1.4GB/s sum +3.4%
+BM_UFlat/18 2620 2644 1.5GB/s man -0.9%
+BM_UFlat/19 45766 38589 2.9GB/s pb +18.6%
+BM_UFlat/20 171107 169832 1039.5MB/s gaviota +0.8%
+Sum of all benchmarks 3500103 3394565 +3.1%
+
+
+Westmere:
+
+Benchmark Base (ns) New (ns) Improvement
+-------------------------------------------------------------------------------------------------
+BM_UFlat/0 72624 71526 1.3GB/s html +1.5%
+BM_UFlat/1 735821 722917 930.8MB/s urls +1.8%
+BM_UFlat/2 10450 10172 11.7GB/s jpg +2.7%
+BM_UFlat/3 117 117 1.6GB/s jpg_200 +0.0%
+BM_UFlat/4 29817 29648 3.0GB/s pdf +0.6%
+BM_UFlat/5 297126 293073 1.3GB/s html4 +1.4%
+BM_UFlat/6 28252 27994 842.0MB/s cp +0.9%
+BM_UFlat/7 12672 12391 862.1MB/s c +2.3%
+BM_UFlat/8 3507 3425 1040.9MB/s lsp +2.4%
+BM_UFlat/9 1004268 969395 1018.0MB/s xls +3.6%
+BM_UFlat/10 233 227 844.8MB/s xls_200 +2.6%
+BM_UFlat/11 230054 224981 647.8MB/s txt1 +2.3%
+BM_UFlat/12 201229 196447 610.5MB/s txt2 +2.4%
+BM_UFlat/13 609547 596761 685.3MB/s txt3 +2.1%
+BM_UFlat/14 824362 804821 573.8MB/s txt4 +2.4%
+BM_UFlat/15 371095 374899 1.3GB/s bin -1.0%
+BM_UFlat/16 267 267 717.8MB/s bin_200 +0.0%
+BM_UFlat/17 44623 43828 835.9MB/s sum +1.8%
+BM_UFlat/18 5077 4815 841.0MB/s man +5.4%
+BM_UFlat/19 74964 73210 1.5GB/s pb +2.4%
+BM_UFlat/20 237987 236745 746.0MB/s gaviota +0.5%
+Sum of all benchmarks 4794092 4697659 +2.1%
+
+
+Istanbul:
+
+Benchmark Base (ns) New (ns) Improvement
+-------------------------------------------------------------------------------------------------
+BM_UFlat/0 98614 96376 1020.4MB/s html +2.3%
+BM_UFlat/1 963740 953241 707.2MB/s urls +1.1%
+BM_UFlat/2 25042 24769 4.8GB/s jpg +1.1%
+BM_UFlat/3 180 180 1065.6MB/s jpg_200 +0.0%
+BM_UFlat/4 45942 45403 1.9GB/s pdf +1.2%
+BM_UFlat/5 400135 390226 1008.2MB/s html4 +2.5%
+BM_UFlat/6 37768 37392 631.9MB/s cp +1.0%
+BM_UFlat/7 18585 18200 588.2MB/s c +2.1%
+BM_UFlat/8 5751 5690 627.7MB/s lsp +1.1%
+BM_UFlat/9 1543154 1542209 641.4MB/s xls +0.1%
+BM_UFlat/10 381 388 494.6MB/s xls_200 -1.8%
+BM_UFlat/11 339715 331973 440.1MB/s txt1 +2.3%
+BM_UFlat/12 294807 289418 415.4MB/s txt2 +1.9%
+BM_UFlat/13 906160 884094 463.3MB/s txt3 +2.5%
+BM_UFlat/14 1224221 1198435 386.1MB/s txt4 +2.2%
+BM_UFlat/15 516277 502923 979.5MB/s bin +2.7%
+BM_UFlat/16 405 402 477.2MB/s bin_200 +0.7%
+BM_UFlat/17 61640 60621 605.6MB/s sum +1.7%
+BM_UFlat/18 7326 7383 549.5MB/s man -0.8%
+BM_UFlat/19 94720 92653 1.2GB/s pb +2.2%
+BM_UFlat/20 360435 346687 510.6MB/s gaviota +4.0%
+Sum of all benchmarks 6944998 6828663 +1.7%
+
+------------------------------------------------------------------------
+r76 | snappy.mirrorbot@gmail.com | 2013-06-13 18:19:52 +0200 (Thu, 13 Jun 2013) | 9 lines
+
+Add support for uncompressing to iovecs (scatter I/O).
+Windows does not have struct iovec defined anywhere,
+so we define our own version that's equal to what UNIX
+typically has.
+
+The bulk of this patch was contributed by Mohit Aron.
+
+R=jeff
+
+------------------------------------------------------------------------
+r75 | snappy.mirrorbot@gmail.com | 2013-06-12 21:51:15 +0200 (Wed, 12 Jun 2013) | 4 lines
+
+Some code reorganization needed for an internal change.
+
+R=fikes
+
+------------------------------------------------------------------------
+r74 | snappy.mirrorbot@gmail.com | 2013-04-09 17:33:30 +0200 (Tue, 09 Apr 2013) | 4 lines
+
+Supports truncated test data in zippy benchmark.
+
+R=sesse
+
+------------------------------------------------------------------------
+r73 | snappy.mirrorbot@gmail.com | 2013-02-05 15:36:15 +0100 (Tue, 05 Feb 2013) | 4 lines
+
+Release Snappy 1.1.0.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r72 | snappy.mirrorbot@gmail.com | 2013-02-05 15:30:05 +0100 (Tue, 05 Feb 2013) | 9 lines
+
+Make ./snappy_unittest pass without "srcdir" being defined.
+
+Previously, snappy_unittests would read from an absolute path /testdata/..;
+convert it to use a relative path instead.
+
+Patch from Marc-Antonie Ruel.
+
+R=maruel
+
+------------------------------------------------------------------------
+r71 | snappy.mirrorbot@gmail.com | 2013-01-18 13:16:36 +0100 (Fri, 18 Jan 2013) | 287 lines
+
+Increase the Zippy block size from 32 kB to 64 kB, winning ~3% density
+while being effectively performance neutral.
+
+The longer story about density is that we win 3-6% density on the benchmarks
+where this has any effect at all; many of the benchmarks (cp, c, lsp, man)
+are smaller than 32 kB and thus will have no effect. Binary data also seems
+to win little or nothing; of course, the already-compressed data wins nothing.
+The protobuf benchmark wins as much as ~18% depending on architecture,
+but I wouldn't be too sure that this is representative of protobuf data in
+general.
+
+As of performance, we lose a tiny amount since we get more tags (e.g., a long
+literal might be broken up into literal-copy-literal), but we win it back with
+less clearing of the hash table, and more opportunities to skip incompressible
+data (e.g. in the jpg benchmark). Decompression seems to get ever so slightly
+slower, again due to more tags. The total net change is about as close to zero
+as we can get, so the end effect seems to be simply more density and no
+real performance change.
+
+The comment about not changing kBlockSize, scary as it is, is not really
+relevant, since we're never going to have a block-level decompressor without
+explicitly marked blocks. Replace it with something more appropriate.
+
+This affects the framing format, but it's okay to change it since it basically
+has no users yet.
+
+
+Density (note that cp, c, lsp and man are all smaller than 32 kB):
+
+ Benchmark Description Base (%) New (%) Improvement
+ --------------------------------------------------------------
+ ZFlat/0 html 22.57 22.31 +5.6%
+ ZFlat/1 urls 50.89 47.77 +6.5%
+ ZFlat/2 jpg 99.88 99.87 +0.0%
+ ZFlat/3 pdf 82.13 82.07 +0.1%
+ ZFlat/4 html4 23.55 22.51 +4.6%
+ ZFlat/5 cp 48.12 48.12 +0.0%
+ ZFlat/6 c 42.40 42.40 +0.0%
+ ZFlat/7 lsp 48.37 48.37 +0.0%
+ ZFlat/8 xls 41.34 41.23 +0.3%
+ ZFlat/9 txt1 59.81 57.87 +3.4%
+ ZFlat/10 txt2 64.07 61.93 +3.5%
+ ZFlat/11 txt3 57.11 54.92 +4.0%
+ ZFlat/12 txt4 68.35 66.22 +3.2%
+ ZFlat/13 bin 18.21 18.11 +0.6%
+ ZFlat/14 sum 51.88 48.96 +6.0%
+ ZFlat/15 man 59.36 59.36 +0.0%
+ ZFlat/16 pb 23.15 19.64 +17.9%
+ ZFlat/17 gaviota 38.27 37.72 +1.5%
+ Geometric mean 45.51 44.15 +3.1%
+
+
+Microbenchmarks (64-bit, opt):
+
+Westmere 2.8 GHz:
+
+ Benchmark Base (ns) New (ns) Improvement
+ -------------------------------------------------------------------------------------------------
+ BM_UFlat/0 75342 75027 1.3GB/s html +0.4%
+ BM_UFlat/1 723767 744269 899.6MB/s urls -2.8%
+ BM_UFlat/2 10072 10072 11.7GB/s jpg +0.0%
+ BM_UFlat/3 30747 30388 2.9GB/s pdf +1.2%
+ BM_UFlat/4 307353 306063 1.2GB/s html4 +0.4%
+ BM_UFlat/5 28593 28743 816.3MB/s cp -0.5%
+ BM_UFlat/6 12958 12998 818.1MB/s c -0.3%
+ BM_UFlat/7 3700 3792 935.8MB/s lsp -2.4%
+ BM_UFlat/8 999685 999905 982.1MB/s xls -0.0%
+ BM_UFlat/9 232954 230079 630.4MB/s txt1 +1.2%
+ BM_UFlat/10 200785 201468 592.6MB/s txt2 -0.3%
+ BM_UFlat/11 617267 610968 666.1MB/s txt3 +1.0%
+ BM_UFlat/12 821595 822475 558.7MB/s txt4 -0.1%
+ BM_UFlat/13 377097 377632 1.3GB/s bin -0.1%
+ BM_UFlat/14 45476 45260 805.8MB/s sum +0.5%
+ BM_UFlat/15 4985 5003 805.7MB/s man -0.4%
+ BM_UFlat/16 80813 77494 1.4GB/s pb +4.3%
+ BM_UFlat/17 251792 241553 727.7MB/s gaviota +4.2%
+ BM_UValidate/0 40343 40354 2.4GB/s html -0.0%
+ BM_UValidate/1 426890 451574 1.4GB/s urls -5.5%
+ BM_UValidate/2 187 179 661.9GB/s jpg +4.5%
+ BM_UValidate/3 13783 13827 6.4GB/s pdf -0.3%
+ BM_UValidate/4 162393 163335 2.3GB/s html4 -0.6%
+ BM_UDataBuffer/0 93756 93302 1046.7MB/s html +0.5%
+ BM_UDataBuffer/1 886714 916292 730.7MB/s urls -3.2%
+ BM_UDataBuffer/2 15861 16401 7.2GB/s jpg -3.3%
+ BM_UDataBuffer/3 38934 39224 2.2GB/s pdf -0.7%
+ BM_UDataBuffer/4 381008 379428 1029.5MB/s html4 +0.4%
+ BM_UCord/0 92528 91098 1072.0MB/s html +1.6%
+ BM_UCord/1 858421 885287 756.3MB/s urls -3.0%
+ BM_UCord/2 13140 13464 8.8GB/s jpg -2.4%
+ BM_UCord/3 39012 37773 2.3GB/s pdf +3.3%
+ BM_UCord/4 376869 371267 1052.1MB/s html4 +1.5%
+ BM_UCordString/0 75810 75303 1.3GB/s html +0.7%
+ BM_UCordString/1 735290 753841 888.2MB/s urls -2.5%
+ BM_UCordString/2 11945 13113 9.0GB/s jpg -8.9%
+ BM_UCordString/3 33901 32562 2.7GB/s pdf +4.1%
+ BM_UCordString/4 310985 309390 1.2GB/s html4 +0.5%
+ BM_UCordValidate/0 40952 40450 2.4GB/s html +1.2%
+ BM_UCordValidate/1 433842 456531 1.4GB/s urls -5.0%
+ BM_UCordValidate/2 1179 1173 100.8GB/s jpg +0.5%
+ BM_UCordValidate/3 14481 14392 6.1GB/s pdf +0.6%
+ BM_UCordValidate/4 164364 164151 2.3GB/s html4 +0.1%
+ BM_ZFlat/0 160610 156601 623.6MB/s html (22.31 %) +2.6%
+ BM_ZFlat/1 1995238 1993582 335.9MB/s urls (47.77 %) +0.1%
+ BM_ZFlat/2 30133 24983 4.7GB/s jpg (99.87 %) +20.6%
+ BM_ZFlat/3 74453 73128 1.2GB/s pdf (82.07 %) +1.8%
+ BM_ZFlat/4 647674 633729 616.4MB/s html4 (22.51 %) +2.2%
+ BM_ZFlat/5 76259 76090 308.4MB/s cp (48.12 %) +0.2%
+ BM_ZFlat/6 31106 31084 342.1MB/s c (42.40 %) +0.1%
+ BM_ZFlat/7 10507 10443 339.8MB/s lsp (48.37 %) +0.6%
+ BM_ZFlat/8 1811047 1793325 547.6MB/s xls (41.23 %) +1.0%
+ BM_ZFlat/9 597903 581793 249.3MB/s txt1 (57.87 %) +2.8%
+ BM_ZFlat/10 525320 514522 232.0MB/s txt2 (61.93 %) +2.1%
+ BM_ZFlat/11 1596591 1551636 262.3MB/s txt3 (54.92 %) +2.9%
+ BM_ZFlat/12 2134523 2094033 219.5MB/s txt4 (66.22 %) +1.9%
+ BM_ZFlat/13 593024 587869 832.6MB/s bin (18.11 %) +0.9%
+ BM_ZFlat/14 114746 110666 329.5MB/s sum (48.96 %) +3.7%
+ BM_ZFlat/15 14376 14485 278.3MB/s man (59.36 %) -0.8%
+ BM_ZFlat/16 167908 150070 753.6MB/s pb (19.64 %) +11.9%
+ BM_ZFlat/17 460228 442253 397.5MB/s gaviota (37.72 %) +4.1%
+ BM_ZCord/0 164896 160241 609.4MB/s html +2.9%
+ BM_ZCord/1 2070239 2043492 327.7MB/s urls +1.3%
+ BM_ZCord/2 54402 47002 2.5GB/s jpg +15.7%
+ BM_ZCord/3 85871 83832 1073.1MB/s pdf +2.4%
+ BM_ZCord/4 664078 648825 602.0MB/s html4 +2.4%
+ BM_ZDataBuffer/0 174874 172549 566.0MB/s html +1.3%
+ BM_ZDataBuffer/1 2134410 2139173 313.0MB/s urls -0.2%
+ BM_ZDataBuffer/2 71911 69551 1.7GB/s jpg +3.4%
+ BM_ZDataBuffer/3 98236 99727 902.1MB/s pdf -1.5%
+ BM_ZDataBuffer/4 710776 699104 558.8MB/s html4 +1.7%
+ Sum of all benchmarks 27358908 27200688 +0.6%
+
+
+Sandy Bridge 2.6 GHz:
+
+ Benchmark Base (ns) New (ns) Improvement
+ -------------------------------------------------------------------------------------------------
+ BM_UFlat/0 49356 49018 1.9GB/s html +0.7%
+ BM_UFlat/1 516764 531955 1.2GB/s urls -2.9%
+ BM_UFlat/2 6982 7304 16.2GB/s jpg -4.4%
+ BM_UFlat/3 15285 15598 5.6GB/s pdf -2.0%
+ BM_UFlat/4 206557 206669 1.8GB/s html4 -0.1%
+ BM_UFlat/5 13681 13567 1.7GB/s cp +0.8%
+ BM_UFlat/6 6571 6592 1.6GB/s c -0.3%
+ BM_UFlat/7 2008 1994 1.7GB/s lsp +0.7%
+ BM_UFlat/8 775700 773286 1.2GB/s xls +0.3%
+ BM_UFlat/9 165578 164480 881.8MB/s txt1 +0.7%
+ BM_UFlat/10 143707 144139 828.2MB/s txt2 -0.3%
+ BM_UFlat/11 443026 436281 932.8MB/s txt3 +1.5%
+ BM_UFlat/12 603129 595856 771.2MB/s txt4 +1.2%
+ BM_UFlat/13 271682 270450 1.8GB/s bin +0.5%
+ BM_UFlat/14 26200 25666 1.4GB/s sum +2.1%
+ BM_UFlat/15 2620 2608 1.5GB/s man +0.5%
+ BM_UFlat/16 48908 47756 2.3GB/s pb +2.4%
+ BM_UFlat/17 174638 170346 1031.9MB/s gaviota +2.5%
+ BM_UValidate/0 31922 31898 3.0GB/s html +0.1%
+ BM_UValidate/1 341265 363554 1.8GB/s urls -6.1%
+ BM_UValidate/2 160 151 782.8GB/s jpg +6.0%
+ BM_UValidate/3 10402 10380 8.5GB/s pdf +0.2%
+ BM_UValidate/4 129490 130587 2.9GB/s html4 -0.8%
+ BM_UDataBuffer/0 59383 58736 1.6GB/s html +1.1%
+ BM_UDataBuffer/1 619222 637786 1049.8MB/s urls -2.9%
+ BM_UDataBuffer/2 10775 11941 9.9GB/s jpg -9.8%
+ BM_UDataBuffer/3 18002 17930 4.9GB/s pdf +0.4%
+ BM_UDataBuffer/4 259182 259306 1.5GB/s html4 -0.0%
+ BM_UCord/0 59379 57814 1.6GB/s html +2.7%
+ BM_UCord/1 598456 615162 1088.4MB/s urls -2.7%
+ BM_UCord/2 8519 8628 13.7GB/s jpg -1.3%
+ BM_UCord/3 18123 17537 5.0GB/s pdf +3.3%
+ BM_UCord/4 252375 252331 1.5GB/s html4 +0.0%
+ BM_UCordString/0 49494 49790 1.9GB/s html -0.6%
+ BM_UCordString/1 524659 541803 1.2GB/s urls -3.2%
+ BM_UCordString/2 8206 8354 14.2GB/s jpg -1.8%
+ BM_UCordString/3 17235 16537 5.3GB/s pdf +4.2%
+ BM_UCordString/4 210188 211072 1.8GB/s html4 -0.4%
+ BM_UCordValidate/0 31956 31587 3.0GB/s html +1.2%
+ BM_UCordValidate/1 340828 362141 1.8GB/s urls -5.9%
+ BM_UCordValidate/2 783 744 158.9GB/s jpg +5.2%
+ BM_UCordValidate/3 10543 10462 8.4GB/s pdf +0.8%
+ BM_UCordValidate/4 130150 129789 2.9GB/s html4 +0.3%
+ BM_ZFlat/0 113873 111200 878.2MB/s html (22.31 %) +2.4%
+ BM_ZFlat/1 1473023 1489858 449.4MB/s urls (47.77 %) -1.1%
+ BM_ZFlat/2 23569 19486 6.1GB/s jpg (99.87 %) +21.0%
+ BM_ZFlat/3 49178 48046 1.8GB/s pdf (82.07 %) +2.4%
+ BM_ZFlat/4 475063 469394 832.2MB/s html4 (22.51 %) +1.2%
+ BM_ZFlat/5 46910 46816 501.2MB/s cp (48.12 %) +0.2%
+ BM_ZFlat/6 16883 16916 628.6MB/s c (42.40 %) -0.2%
+ BM_ZFlat/7 5381 5447 651.5MB/s lsp (48.37 %) -1.2%
+ BM_ZFlat/8 1466870 1473861 666.3MB/s xls (41.23 %) -0.5%
+ BM_ZFlat/9 468006 464101 312.5MB/s txt1 (57.87 %) +0.8%
+ BM_ZFlat/10 408157 408957 291.9MB/s txt2 (61.93 %) -0.2%
+ BM_ZFlat/11 1253348 1232910 330.1MB/s txt3 (54.92 %) +1.7%
+ BM_ZFlat/12 1702373 1702977 269.8MB/s txt4 (66.22 %) -0.0%
+ BM_ZFlat/13 439792 438557 1116.0MB/s bin (18.11 %) +0.3%
+ BM_ZFlat/14 80766 78851 462.5MB/s sum (48.96 %) +2.4%
+ BM_ZFlat/15 7420 7542 534.5MB/s man (59.36 %) -1.6%
+ BM_ZFlat/16 112043 100126 1.1GB/s pb (19.64 %) +11.9%
+ BM_ZFlat/17 368877 357703 491.4MB/s gaviota (37.72 %) +3.1%
+ BM_ZCord/0 116402 113564 859.9MB/s html +2.5%
+ BM_ZCord/1 1507156 1519911 440.5MB/s urls -0.8%
+ BM_ZCord/2 39860 33686 3.5GB/s jpg +18.3%
+ BM_ZCord/3 56211 54694 1.6GB/s pdf +2.8%
+ BM_ZCord/4 485594 479212 815.1MB/s html4 +1.3%
+ BM_ZDataBuffer/0 123185 121572 803.3MB/s html +1.3%
+ BM_ZDataBuffer/1 1569111 1589380 421.3MB/s urls -1.3%
+ BM_ZDataBuffer/2 53143 49556 2.4GB/s jpg +7.2%
+ BM_ZDataBuffer/3 65725 66826 1.3GB/s pdf -1.6%
+ BM_ZDataBuffer/4 517871 514750 758.9MB/s html4 +0.6%
+ Sum of all benchmarks 20258879 20315484 -0.3%
+
+
+AMD Instanbul 2.4 GHz:
+
+ Benchmark Base (ns) New (ns) Improvement
+ -------------------------------------------------------------------------------------------------
+ BM_UFlat/0 97120 96585 1011.1MB/s html +0.6%
+ BM_UFlat/1 917473 948016 706.3MB/s urls -3.2%
+ BM_UFlat/2 21496 23938 4.9GB/s jpg -10.2%
+ BM_UFlat/3 44751 45639 1.9GB/s pdf -1.9%
+ BM_UFlat/4 391950 391413 998.0MB/s html4 +0.1%
+ BM_UFlat/5 37366 37201 630.7MB/s cp +0.4%
+ BM_UFlat/6 18350 18318 580.5MB/s c +0.2%
+ BM_UFlat/7 5672 5661 626.9MB/s lsp +0.2%
+ BM_UFlat/8 1533390 1529441 642.1MB/s xls +0.3%
+ BM_UFlat/9 335477 336553 431.0MB/s txt1 -0.3%
+ BM_UFlat/10 285140 292080 408.7MB/s txt2 -2.4%
+ BM_UFlat/11 888507 894758 454.9MB/s txt3 -0.7%
+ BM_UFlat/12 1187643 1210928 379.5MB/s txt4 -1.9%
+ BM_UFlat/13 493717 507447 964.5MB/s bin -2.7%
+ BM_UFlat/14 61740 60870 599.1MB/s sum +1.4%
+ BM_UFlat/15 7211 7187 560.9MB/s man +0.3%
+ BM_UFlat/16 97435 93100 1.2GB/s pb +4.7%
+ BM_UFlat/17 362662 356395 493.2MB/s gaviota +1.8%
+ BM_UValidate/0 47475 47118 2.0GB/s html +0.8%
+ BM_UValidate/1 501304 529741 1.2GB/s urls -5.4%
+ BM_UValidate/2 276 243 486.2GB/s jpg +13.6%
+ BM_UValidate/3 16361 16261 5.4GB/s pdf +0.6%
+ BM_UValidate/4 190741 190353 2.0GB/s html4 +0.2%
+ BM_UDataBuffer/0 111080 109771 889.6MB/s html +1.2%
+ BM_UDataBuffer/1 1051035 1085999 616.5MB/s urls -3.2%
+ BM_UDataBuffer/2 25801 25463 4.6GB/s jpg +1.3%
+ BM_UDataBuffer/3 50493 49946 1.8GB/s pdf +1.1%
+ BM_UDataBuffer/4 447258 444138 879.5MB/s html4 +0.7%
+ BM_UCord/0 109350 107909 905.0MB/s html +1.3%
+ BM_UCord/1 1023396 1054964 634.7MB/s urls -3.0%
+ BM_UCord/2 25292 24371 4.9GB/s jpg +3.8%
+ BM_UCord/3 48955 49736 1.8GB/s pdf -1.6%
+ BM_UCord/4 440452 437331 893.2MB/s html4 +0.7%
+ BM_UCordString/0 98511 98031 996.2MB/s html +0.5%
+ BM_UCordString/1 933230 963495 694.9MB/s urls -3.1%
+ BM_UCordString/2 23311 24076 4.9GB/s jpg -3.2%
+ BM_UCordString/3 45568 46196 1.9GB/s pdf -1.4%
+ BM_UCordString/4 397791 396934 984.1MB/s html4 +0.2%
+ BM_UCordValidate/0 47537 46921 2.0GB/s html +1.3%
+ BM_UCordValidate/1 505071 532716 1.2GB/s urls -5.2%
+ BM_UCordValidate/2 1663 1621 72.9GB/s jpg +2.6%
+ BM_UCordValidate/3 16890 16926 5.2GB/s pdf -0.2%
+ BM_UCordValidate/4 192365 191984 2.0GB/s html4 +0.2%
+ BM_ZFlat/0 184708 179103 545.3MB/s html (22.31 %) +3.1%
+ BM_ZFlat/1 2293864 2302950 290.7MB/s urls (47.77 %) -0.4%
+ BM_ZFlat/2 52852 47618 2.5GB/s jpg (99.87 %) +11.0%
+ BM_ZFlat/3 100766 96179 935.3MB/s pdf (82.07 %) +4.8%
+ BM_ZFlat/4 741220 727977 536.6MB/s html4 (22.51 %) +1.8%
+ BM_ZFlat/5 85402 85418 274.7MB/s cp (48.12 %) -0.0%
+ BM_ZFlat/6 36558 36494 291.4MB/s c (42.40 %) +0.2%
+ BM_ZFlat/7 12706 12507 283.7MB/s lsp (48.37 %) +1.6%
+ BM_ZFlat/8 2336823 2335688 420.5MB/s xls (41.23 %) +0.0%
+ BM_ZFlat/9 701804 681153 212.9MB/s txt1 (57.87 %) +3.0%
+ BM_ZFlat/10 606700 597194 199.9MB/s txt2 (61.93 %) +1.6%
+ BM_ZFlat/11 1852283 1803238 225.7MB/s txt3 (54.92 %) +2.7%
+ BM_ZFlat/12 2475527 2443354 188.1MB/s txt4 (66.22 %) +1.3%
+ BM_ZFlat/13 694497 696654 702.6MB/s bin (18.11 %) -0.3%
+ BM_ZFlat/14 136929 129855 280.8MB/s sum (48.96 %) +5.4%
+ BM_ZFlat/15 17172 17124 235.4MB/s man (59.36 %) +0.3%
+ BM_ZFlat/16 190364 171763 658.4MB/s pb (19.64 %) +10.8%
+ BM_ZFlat/17 567285 555190 316.6MB/s gaviota (37.72 %) +2.2%
+ BM_ZCord/0 193490 187031 522.1MB/s html +3.5%
+ BM_ZCord/1 2427537 2415315 277.2MB/s urls +0.5%
+ BM_ZCord/2 85378 81412 1.5GB/s jpg +4.9%
+ BM_ZCord/3 121898 119419 753.3MB/s pdf +2.1%
+ BM_ZCord/4 779564 762961 512.0MB/s html4 +2.2%
+ BM_ZDataBuffer/0 213820 207272 471.1MB/s html +3.2%
+ BM_ZDataBuffer/1 2589010 2586495 258.9MB/s urls +0.1%
+ BM_ZDataBuffer/2 121871 118885 1018.4MB/s jpg +2.5%
+ BM_ZDataBuffer/3 145382 145986 616.2MB/s pdf -0.4%
+ BM_ZDataBuffer/4 868117 852754 458.1MB/s html4 +1.8%
+ Sum of all benchmarks 33771833 33744763 +0.1%
+
+------------------------------------------------------------------------
+r70 | snappy.mirrorbot@gmail.com | 2013-01-06 20:21:26 +0100 (Sun, 06 Jan 2013) | 6 lines
+
+Adjust the Snappy open-source distribution for the changes in Google's
+internal file API.
+
+R=sanjay
+
+
+------------------------------------------------------------------------
+r69 | snappy.mirrorbot@gmail.com | 2013-01-04 12:54:20 +0100 (Fri, 04 Jan 2013) | 15 lines
+
+Change a few ORs to additions where they don't matter. This helps the compiler
+use the LEA instruction more efficiently, since e.g. a + (b << 2) can be encoded
+as one instruction. Even more importantly, it can constant-fold the
+COPY_* enums together with the shifted negative constants, which also saves
+some instructions. (We don't need it for LITERAL, since it happens to be 0.)
+
+I am unsure why the compiler couldn't do this itself, but the theory is that
+it cannot prove that len-1 and len-4 cannot underflow/wrap, and thus can't
+do the optimization safely.
+
+The gains are small but measurable; 0.5-1.0% over the BM_Z* benchmarks
+(measured on Westmere, Sandy Bridge and Istanbul).
+
+R=sanjay
+
+------------------------------------------------------------------------
+r68 | snappy.mirrorbot@gmail.com | 2012-10-08 13:37:16 +0200 (Mon, 08 Oct 2012) | 5 lines
+
+Stop giving -Werror to automake, due to an incompatibility between current
+versions of libtool and automake on non-GNU platforms (e.g. Mac OS X).
+
+R=sanjay
+
+------------------------------------------------------------------------
+r67 | snappy.mirrorbot@gmail.com | 2012-08-17 15:54:47 +0200 (Fri, 17 Aug 2012) | 5 lines
+
+Fix public issue 66: Document GetUncompressedLength better, in particular that
+it leaves the source in a state that's not appropriate for RawUncompress.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r66 | snappy.mirrorbot@gmail.com | 2012-07-31 13:44:44 +0200 (Tue, 31 Jul 2012) | 5 lines
+
+Fix public issue 64: Check for <sys/time.h> at configure time,
+since MSVC seemingly does not have it.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r65 | snappy.mirrorbot@gmail.com | 2012-07-04 11:34:48 +0200 (Wed, 04 Jul 2012) | 10 lines
+
+Handle the case where gettimeofday() goes backwards or returns the same value
+twice; it could cause division by zero in the unit test framework.
+(We already had one fix for this in place, but it was incomplete.)
+
+This could in theory happen on any system, since there are few guarantees
+about gettimeofday(), but seems to only happen in practice on GNU/Hurd, where
+gettimeofday() is cached and only updated ever so often.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r64 | snappy.mirrorbot@gmail.com | 2012-07-04 11:28:33 +0200 (Wed, 04 Jul 2012) | 6 lines
+
+Mark ARMv4 as not supporting unaligned accesses (not just ARMv5 and ARMv6);
+apparently Debian still targets these by default, giving us segfaults on
+armel.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r63 | snappy.mirrorbot@gmail.com | 2012-05-22 11:46:05 +0200 (Tue, 22 May 2012) | 5 lines
+
+Fix public bug #62: Remove an extraneous comma at the end of an enum list,
+causing compile errors when embedded in Mozilla on OpenBSD.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r62 | snappy.mirrorbot@gmail.com | 2012-05-22 11:32:50 +0200 (Tue, 22 May 2012) | 8 lines
+
+Snappy library no longer depends on iostream.
+
+Achieved by moving logging macro definitions to a test-only
+header file, and by changing non-test code to use assert,
+fprintf, and abort instead of LOG/CHECK macros.
+
+R=sesse
+
+------------------------------------------------------------------------
+r61 | snappy.mirrorbot@gmail.com | 2012-02-24 16:46:37 +0100 (Fri, 24 Feb 2012) | 4 lines
+
+Release Snappy 1.0.5.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r60 | snappy.mirrorbot@gmail.com | 2012-02-23 18:00:36 +0100 (Thu, 23 Feb 2012) | 57 lines
+
+For 32-bit platforms, do not try to accelerate multiple neighboring
+32-bit loads with a 64-bit load during compression (it's not a win).
+
+The main target for this optimization is ARM, but 32-bit x86 gets
+a small gain, too, although there is noise in the microbenchmarks.
+It's a no-op for 64-bit x86. It does not affect decompression.
+
+Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from
+Ubuntu/Linaro), -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9
+-mthumb-interwork, minimum 1000 iterations:
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ ---------------------------------------------------
+ BM_ZFlat/0 1158277 1160000 1000 84.2MB/s html (23.57 %) [ +4.3%]
+ BM_ZFlat/1 14861782 14860000 1000 45.1MB/s urls (50.89 %) [ +1.1%]
+ BM_ZFlat/2 393595 390000 1000 310.5MB/s jpg (99.88 %) [ +0.0%]
+ BM_ZFlat/3 650583 650000 1000 138.4MB/s pdf (82.13 %) [ +3.1%]
+ BM_ZFlat/4 4661480 4660000 1000 83.8MB/s html4 (23.55 %) [ +4.3%]
+ BM_ZFlat/5 491973 490000 1000 47.9MB/s cp (48.12 %) [ +2.0%]
+ BM_ZFlat/6 193575 192678 1038 55.2MB/s c (42.40 %) [ +9.0%]
+ BM_ZFlat/7 62343 62754 3187 56.5MB/s lsp (48.37 %) [ +2.6%]
+ BM_ZFlat/8 17708468 17710000 1000 55.5MB/s xls (41.34 %) [ -0.3%]
+ BM_ZFlat/9 3755345 3760000 1000 38.6MB/s txt1 (59.81 %) [ +8.2%]
+ BM_ZFlat/10 3324217 3320000 1000 36.0MB/s txt2 (64.07 %) [ +4.2%]
+ BM_ZFlat/11 10139932 10140000 1000 40.1MB/s txt3 (57.11 %) [ +6.4%]
+ BM_ZFlat/12 13532109 13530000 1000 34.0MB/s txt4 (68.35 %) [ +5.0%]
+ BM_ZFlat/13 4690847 4690000 1000 104.4MB/s bin (18.21 %) [ +4.1%]
+ BM_ZFlat/14 830682 830000 1000 43.9MB/s sum (51.88 %) [ +1.2%]
+ BM_ZFlat/15 84784 85011 2235 47.4MB/s man (59.36 %) [ +1.1%]
+ BM_ZFlat/16 1293254 1290000 1000 87.7MB/s pb (23.15 %) [ +2.3%]
+ BM_ZFlat/17 2775155 2780000 1000 63.2MB/s gaviota (38.27 %) [+12.2%]
+
+Core i7 in 32-bit mode (only one run and 100 iterations, though, so noisy):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ ---------------------------------------------------
+ BM_ZFlat/0 227582 223464 3043 437.0MB/s html (23.57 %) [ +7.4%]
+ BM_ZFlat/1 2982430 2918455 233 229.4MB/s urls (50.89 %) [ +2.9%]
+ BM_ZFlat/2 46967 46658 15217 2.5GB/s jpg (99.88 %) [ +0.0%]
+ BM_ZFlat/3 115298 114864 5833 783.2MB/s pdf (82.13 %) [ +1.5%]
+ BM_ZFlat/4 913440 899743 778 434.2MB/s html4 (23.55 %) [ +0.3%]
+ BM_ZFlat/5 110302 108571 7000 216.1MB/s cp (48.12 %) [ +0.0%]
+ BM_ZFlat/6 44409 43372 15909 245.2MB/s c (42.40 %) [ +0.8%]
+ BM_ZFlat/7 15713 15643 46667 226.9MB/s lsp (48.37 %) [ +2.7%]
+ BM_ZFlat/8 2625539 2602230 269 377.4MB/s xls (41.34 %) [ +1.4%]
+ BM_ZFlat/9 808884 811429 875 178.8MB/s txt1 (59.81 %) [ -3.9%]
+ BM_ZFlat/10 709532 700000 1000 170.5MB/s txt2 (64.07 %) [ +0.0%]
+ BM_ZFlat/11 2177682 2162162 333 188.2MB/s txt3 (57.11 %) [ -1.4%]
+ BM_ZFlat/12 2849640 2840000 250 161.8MB/s txt4 (68.35 %) [ -1.4%]
+ BM_ZFlat/13 849760 835476 778 585.8MB/s bin (18.21 %) [ +1.2%]
+ BM_ZFlat/14 165940 164571 4375 221.6MB/s sum (51.88 %) [ +1.4%]
+ BM_ZFlat/15 20939 20571 35000 196.0MB/s man (59.36 %) [ +2.1%]
+ BM_ZFlat/16 239209 236544 2917 478.1MB/s pb (23.15 %) [ +4.2%]
+ BM_ZFlat/17 616206 610000 1000 288.2MB/s gaviota (38.27 %) [ -1.6%]
+
+R=sanjay
+
+------------------------------------------------------------------------
+r59 | snappy.mirrorbot@gmail.com | 2012-02-21 18:02:17 +0100 (Tue, 21 Feb 2012) | 107 lines
+
+Enable the use of unaligned loads and stores for ARM-based architectures
+where they are available (ARMv7 and higher). This gives a significant
+speed boost on ARM, both for compression and decompression.
+It should not affect x86 at all.
+
+There are more changes possible to speed up ARM, but it might not be
+that easy to do without hurting x86 or making the code uglier.
+Also, we de not try to use NEON yet.
+
+Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from Ubuntu/Linaro),
+-O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9 -mthumb-interwork:
+
+Benchmark Time(ns) CPU(ns) Iterations
+---------------------------------------------------
+BM_UFlat/0 524806 529100 378 184.6MB/s html [+33.6%]
+BM_UFlat/1 5139790 5200000 100 128.8MB/s urls [+28.8%]
+BM_UFlat/2 86540 84166 1901 1.4GB/s jpg [ +0.6%]
+BM_UFlat/3 215351 210176 904 428.0MB/s pdf [+29.8%]
+BM_UFlat/4 2144490 2100000 100 186.0MB/s html4 [+33.3%]
+BM_UFlat/5 194482 190000 1000 123.5MB/s cp [+36.2%]
+BM_UFlat/6 91843 90175 2107 117.9MB/s c [+38.6%]
+BM_UFlat/7 28535 28426 6684 124.8MB/s lsp [+34.7%]
+BM_UFlat/8 9206600 9200000 100 106.7MB/s xls [+42.4%]
+BM_UFlat/9 1865273 1886792 106 76.9MB/s txt1 [+32.5%]
+BM_UFlat/10 1576809 1587301 126 75.2MB/s txt2 [+32.3%]
+BM_UFlat/11 4968450 4900000 100 83.1MB/s txt3 [+32.7%]
+BM_UFlat/12 6673970 6700000 100 68.6MB/s txt4 [+32.8%]
+BM_UFlat/13 2391470 2400000 100 203.9MB/s bin [+29.2%]
+BM_UFlat/14 334601 344827 522 105.8MB/s sum [+30.6%]
+BM_UFlat/15 37404 38080 5252 105.9MB/s man [+33.8%]
+BM_UFlat/16 535470 540540 370 209.2MB/s pb [+31.2%]
+BM_UFlat/17 1875245 1886792 106 93.2MB/s gaviota [+37.8%]
+BM_UValidate/0 178425 179533 1114 543.9MB/s html [ +2.7%]
+BM_UValidate/1 2100450 2000000 100 334.8MB/s urls [ +5.0%]
+BM_UValidate/2 1039 1044 172413 113.3GB/s jpg [ +3.4%]
+BM_UValidate/3 59423 59470 3363 1.5GB/s pdf [ +7.8%]
+BM_UValidate/4 760716 766283 261 509.8MB/s html4 [ +6.5%]
+BM_ZFlat/0 1204632 1204819 166 81.1MB/s html (23.57 %) [+32.8%]
+BM_ZFlat/1 15656190 15600000 100 42.9MB/s urls (50.89 %) [+27.6%]
+BM_ZFlat/2 403336 410677 487 294.8MB/s jpg (99.88 %) [+16.5%]
+BM_ZFlat/3 664073 671140 298 134.0MB/s pdf (82.13 %) [+28.4%]
+BM_ZFlat/4 4961940 4900000 100 79.7MB/s html4 (23.55 %) [+30.6%]
+BM_ZFlat/5 500664 501253 399 46.8MB/s cp (48.12 %) [+33.4%]
+BM_ZFlat/6 217276 215982 926 49.2MB/s c (42.40 %) [+25.0%]
+BM_ZFlat/7 64122 65487 3054 54.2MB/s lsp (48.37 %) [+36.1%]
+BM_ZFlat/8 18045730 18000000 100 54.6MB/s xls (41.34 %) [+34.4%]
+BM_ZFlat/9 4051530 4000000 100 36.3MB/s txt1 (59.81 %) [+25.0%]
+BM_ZFlat/10 3451800 3500000 100 34.1MB/s txt2 (64.07 %) [+25.7%]
+BM_ZFlat/11 11052340 11100000 100 36.7MB/s txt3 (57.11 %) [+24.3%]
+BM_ZFlat/12 14538690 14600000 100 31.5MB/s txt4 (68.35 %) [+24.7%]
+BM_ZFlat/13 5041850 5000000 100 97.9MB/s bin (18.21 %) [+32.0%]
+BM_ZFlat/14 908840 909090 220 40.1MB/s sum (51.88 %) [+22.2%]
+BM_ZFlat/15 86921 86206 1972 46.8MB/s man (59.36 %) [+42.2%]
+BM_ZFlat/16 1312315 1315789 152 86.0MB/s pb (23.15 %) [+34.5%]
+BM_ZFlat/17 3173120 3200000 100 54.9MB/s gaviota (38.27%) [+28.1%]
+
+
+The move from 64-bit to 32-bit operations for the copies also affected 32-bit x86;
+positive on the decompression side, and slightly negative on the compression side
+(unless that is noise; I only ran once):
+
+Benchmark Time(ns) CPU(ns) Iterations
+-----------------------------------------------------
+BM_UFlat/0 86279 86140 7778 1.1GB/s html [ +7.5%]
+BM_UFlat/1 839265 822622 778 813.9MB/s urls [ +9.4%]
+BM_UFlat/2 9180 9143 87500 12.9GB/s jpg [ +1.2%]
+BM_UFlat/3 35080 35000 20000 2.5GB/s pdf [+10.1%]
+BM_UFlat/4 350318 345000 2000 1.1GB/s html4 [ +7.0%]
+BM_UFlat/5 33808 33472 21212 701.0MB/s cp [ +9.0%]
+BM_UFlat/6 15201 15214 46667 698.9MB/s c [+14.9%]
+BM_UFlat/7 4652 4651 159091 762.9MB/s lsp [ +7.5%]
+BM_UFlat/8 1285551 1282528 538 765.7MB/s xls [+10.7%]
+BM_UFlat/9 282510 281690 2414 514.9MB/s txt1 [+13.6%]
+BM_UFlat/10 243494 239286 2800 498.9MB/s txt2 [+14.4%]
+BM_UFlat/11 743625 740000 1000 550.0MB/s txt3 [+14.3%]
+BM_UFlat/12 999441 989717 778 464.3MB/s txt4 [+16.1%]
+BM_UFlat/13 412402 410076 1707 1.2GB/s bin [ +7.3%]
+BM_UFlat/14 54876 54000 10000 675.3MB/s sum [+13.0%]
+BM_UFlat/15 6146 6100 100000 660.8MB/s man [+14.8%]
+BM_UFlat/16 90496 90286 8750 1.2GB/s pb [ +4.0%]
+BM_UFlat/17 292650 292000 2500 602.0MB/s gaviota [+18.1%]
+BM_UValidate/0 49620 49699 14286 1.9GB/s html [ +0.0%]
+BM_UValidate/1 501371 500000 1000 1.3GB/s urls [ +0.0%]
+BM_UValidate/2 232 227 3043478 521.5GB/s jpg [ +1.3%]
+BM_UValidate/3 17250 17143 43750 5.1GB/s pdf [ -1.3%]
+BM_UValidate/4 198643 200000 3500 1.9GB/s html4 [ -0.9%]
+BM_ZFlat/0 227128 229415 3182 425.7MB/s html (23.57 %) [ -1.4%]
+BM_ZFlat/1 2970089 2960000 250 226.2MB/s urls (50.89 %) [ -1.9%]
+BM_ZFlat/2 45683 44999 15556 2.6GB/s jpg (99.88 %) [ +2.2%]
+BM_ZFlat/3 114661 113136 6364 795.1MB/s pdf (82.13 %) [ -1.5%]
+BM_ZFlat/4 919702 914286 875 427.2MB/s html4 (23.55%) [ -1.3%]
+BM_ZFlat/5 108189 108422 6364 216.4MB/s cp (48.12 %) [ -1.2%]
+BM_ZFlat/6 44525 44000 15909 241.7MB/s c (42.40 %) [ -2.9%]
+BM_ZFlat/7 15973 15857 46667 223.8MB/s lsp (48.37 %) [ +0.0%]
+BM_ZFlat/8 2677888 2639405 269 372.1MB/s xls (41.34 %) [ -1.4%]
+BM_ZFlat/9 800715 780000 1000 186.0MB/s txt1 (59.81 %) [ -0.4%]
+BM_ZFlat/10 700089 700000 1000 170.5MB/s txt2 (64.07 %) [ -2.9%]
+BM_ZFlat/11 2159356 2138365 318 190.3MB/s txt3 (57.11 %) [ -0.3%]
+BM_ZFlat/12 2796143 2779923 259 165.3MB/s txt4 (68.35 %) [ -1.4%]
+BM_ZFlat/13 856458 835476 778 585.8MB/s bin (18.21 %) [ -0.1%]
+BM_ZFlat/14 166908 166857 4375 218.6MB/s sum (51.88 %) [ -1.4%]
+BM_ZFlat/15 21181 20857 35000 193.3MB/s man (59.36 %) [ -0.8%]
+BM_ZFlat/16 244009 239973 2917 471.3MB/s pb (23.15 %) [ -1.4%]
+BM_ZFlat/17 596362 590000 1000 297.9MB/s gaviota (38.27%) [ +0.0%]
+
+R=sanjay
+
+------------------------------------------------------------------------
+r58 | snappy.mirrorbot@gmail.com | 2012-02-11 23:11:22 +0100 (Sat, 11 Feb 2012) | 9 lines
+
+Lower the size allocated in the "corrupted input" unit test from 256 MB
+to 2 MB. This fixes issues with running the unit test on platforms with
+little RAM (e.g. some ARM boards).
+
+Also, reactivate the 2 MB test for 64-bit platforms; there's no good
+reason why it shouldn't be.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r57 | snappy.mirrorbot@gmail.com | 2012-01-08 18:55:48 +0100 (Sun, 08 Jan 2012) | 2 lines
+
+Minor refactoring to accomodate changes in Google's internal code tree.
+
+------------------------------------------------------------------------
+r56 | snappy.mirrorbot@gmail.com | 2012-01-04 14:10:46 +0100 (Wed, 04 Jan 2012) | 19 lines
+
+Fix public issue r57: Fix most warnings with -Wall, mostly signed/unsigned
+warnings. There are still some in the unit test, but the main .cc file should
+be clean. We haven't enabled -Wall for the default build, since the unit test
+is still not clean.
+
+This also fixes a real bug in the open-source implementation of
+ReadFileToStringOrDie(); it would not detect errors correctly.
+
+I had to go through some pains to avoid performance loss as the types
+were changed; I think there might still be some with 32-bit if and only if LFS
+is enabled (ie., size_t is 64-bit), but for regular 32-bit and 64-bit I can't
+see any losses, and I've diffed the generated GCC assembler between the old and
+new code without seeing any significant choices. If anything, it's ever so
+slightly faster.
+
+This may or may not enable compression of very large blocks (>2^32 bytes)
+when size_t is 64-bit, but I haven't checked, and it is still not a supported
+case.
+
+------------------------------------------------------------------------
+r55 | snappy.mirrorbot@gmail.com | 2012-01-04 11:46:39 +0100 (Wed, 04 Jan 2012) | 6 lines
+
+Add a framing format description. We do not have any implementation of this at
+the current point, but there seems to be enough of a general interest in the
+topic (cf. public bug #34).
+
+R=csilvers,sanjay
+
+------------------------------------------------------------------------
+r54 | snappy.mirrorbot@gmail.com | 2011-12-05 22:27:26 +0100 (Mon, 05 Dec 2011) | 81 lines
+
+Speed up decompression by moving the refill check to the end of the loop.
+
+This seems to work because in most of the branches, the compiler can evaluate
+“ip_limit_ - ip” in a more efficient way than reloading ip_limit_ from memory
+(either by already having the entire expression in a register, or reconstructing
+it from “avail”, or something else). Memory loads, even from L1, are seemingly
+costly in the big picture at the current decompression speeds.
+
+Microbenchmarks (64-bit, opt mode):
+
+Westmere (Intel Core i7):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ --------------------------------------------
+ BM_UFlat/0 74492 74491 187894 1.3GB/s html [ +5.9%]
+ BM_UFlat/1 712268 712263 19644 940.0MB/s urls [ +3.8%]
+ BM_UFlat/2 10591 10590 1000000 11.2GB/s jpg [ -6.8%]
+ BM_UFlat/3 29643 29643 469915 3.0GB/s pdf [ +7.9%]
+ BM_UFlat/4 304669 304667 45930 1.3GB/s html4 [ +4.8%]
+ BM_UFlat/5 28508 28507 490077 823.1MB/s cp [ +4.0%]
+ BM_UFlat/6 12415 12415 1000000 856.5MB/s c [ +8.6%]
+ BM_UFlat/7 3415 3415 4084723 1039.0MB/s lsp [+18.0%]
+ BM_UFlat/8 979569 979563 14261 1002.5MB/s xls [ +5.8%]
+ BM_UFlat/9 230150 230148 60934 630.2MB/s txt1 [ +5.2%]
+ BM_UFlat/10 197167 197166 71135 605.5MB/s txt2 [ +4.7%]
+ BM_UFlat/11 607394 607390 23041 670.1MB/s txt3 [ +5.6%]
+ BM_UFlat/12 808502 808496 17316 568.4MB/s txt4 [ +5.0%]
+ BM_UFlat/13 372791 372788 37564 1.3GB/s bin [ +3.3%]
+ BM_UFlat/14 44541 44541 313969 818.8MB/s sum [ +5.7%]
+ BM_UFlat/15 4833 4833 2898697 834.1MB/s man [ +4.8%]
+ BM_UFlat/16 79855 79855 175356 1.4GB/s pb [ +4.8%]
+ BM_UFlat/17 245845 245843 56838 715.0MB/s gaviota [ +5.8%]
+
+Clovertown (Intel Core 2):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ --------------------------------------------
+ BM_UFlat/0 107911 107890 100000 905.1MB/s html [ +2.2%]
+ BM_UFlat/1 1011237 1011041 10000 662.3MB/s urls [ +2.5%]
+ BM_UFlat/2 26775 26770 523089 4.4GB/s jpg [ +0.0%]
+ BM_UFlat/3 48103 48095 290618 1.8GB/s pdf [ +3.4%]
+ BM_UFlat/4 437724 437644 31937 892.6MB/s html4 [ +2.1%]
+ BM_UFlat/5 39607 39600 358284 592.5MB/s cp [ +2.4%]
+ BM_UFlat/6 18227 18224 768191 583.5MB/s c [ +2.7%]
+ BM_UFlat/7 5171 5170 2709437 686.4MB/s lsp [ +3.9%]
+ BM_UFlat/8 1560291 1559989 8970 629.5MB/s xls [ +3.6%]
+ BM_UFlat/9 335401 335343 41731 432.5MB/s txt1 [ +3.0%]
+ BM_UFlat/10 287014 286963 48758 416.0MB/s txt2 [ +2.8%]
+ BM_UFlat/11 888522 888356 15752 458.1MB/s txt3 [ +2.9%]
+ BM_UFlat/12 1186600 1186378 10000 387.3MB/s txt4 [ +3.1%]
+ BM_UFlat/13 572295 572188 24468 855.4MB/s bin [ +2.1%]
+ BM_UFlat/14 64060 64049 218401 569.4MB/s sum [ +4.1%]
+ BM_UFlat/15 7264 7263 1916168 555.0MB/s man [ +1.4%]
+ BM_UFlat/16 108853 108836 100000 1039.1MB/s pb [ +1.7%]
+ BM_UFlat/17 364289 364223 38419 482.6MB/s gaviota [ +4.9%]
+
+Barcelona (AMD Opteron):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ --------------------------------------------
+ BM_UFlat/0 103900 103871 100000 940.2MB/s html [ +8.3%]
+ BM_UFlat/1 1000435 1000107 10000 669.5MB/s urls [ +6.6%]
+ BM_UFlat/2 24659 24652 567362 4.8GB/s jpg [ +0.1%]
+ BM_UFlat/3 48206 48193 291121 1.8GB/s pdf [ +5.0%]
+ BM_UFlat/4 421980 421850 33174 926.0MB/s html4 [ +7.3%]
+ BM_UFlat/5 40368 40357 346994 581.4MB/s cp [ +8.7%]
+ BM_UFlat/6 19836 19830 708695 536.2MB/s c [ +8.0%]
+ BM_UFlat/7 6100 6098 2292774 581.9MB/s lsp [ +9.0%]
+ BM_UFlat/8 1693093 1692514 8261 580.2MB/s xls [ +8.0%]
+ BM_UFlat/9 365991 365886 38225 396.4MB/s txt1 [ +7.1%]
+ BM_UFlat/10 311330 311238 44950 383.6MB/s txt2 [ +7.6%]
+ BM_UFlat/11 975037 974737 14376 417.5MB/s txt3 [ +6.9%]
+ BM_UFlat/12 1303558 1303175 10000 352.6MB/s txt4 [ +7.3%]
+ BM_UFlat/13 517448 517290 27144 946.2MB/s bin [ +5.5%]
+ BM_UFlat/14 66537 66518 210352 548.3MB/s sum [ +7.5%]
+ BM_UFlat/15 7976 7974 1760383 505.6MB/s man [ +5.6%]
+ BM_UFlat/16 103121 103092 100000 1097.0MB/s pb [ +8.7%]
+ BM_UFlat/17 391431 391314 35733 449.2MB/s gaviota [ +6.5%]
+
+R=sanjay
+
+------------------------------------------------------------------------
+r53 | snappy.mirrorbot@gmail.com | 2011-11-23 12:14:17 +0100 (Wed, 23 Nov 2011) | 88 lines
+
+Speed up decompression by making the fast path for literals faster.
+
+We do the fast-path step as soon as possible; in fact, as soon as we know the
+literal length. Since we usually hit the fast path, we can then skip the checks
+for long literals and available input space (beyond what the fast path check
+already does).
+
+Note that this changes the decompression Writer API; however, it does not
+change the ABI, since writers are always templatized and as such never
+cross compilation units. The new API is slightly more general, in that it
+doesn't hard-code the value 16. Note that we also take care to check
+for len <= 16 first, since the other two checks almost always succeed
+(so we don't want to waste time checking for them until we have to).
+
+The improvements are most marked on Nehalem, but are generally positive
+on other platforms as well. All microbenchmarks are 64-bit, opt.
+
+Clovertown (Core 2):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ --------------------------------------------
+ BM_UFlat/0 110226 110224 100000 886.0MB/s html [ +1.5%]
+ BM_UFlat/1 1036523 1036508 10000 646.0MB/s urls [ -0.8%]
+ BM_UFlat/2 26775 26775 522570 4.4GB/s jpg [ +0.0%]
+ BM_UFlat/3 49738 49737 280974 1.8GB/s pdf [ +0.3%]
+ BM_UFlat/4 446790 446792 31334 874.3MB/s html4 [ +0.8%]
+ BM_UFlat/5 40561 40562 350424 578.5MB/s cp [ +1.3%]
+ BM_UFlat/6 18722 18722 746903 568.0MB/s c [ +1.4%]
+ BM_UFlat/7 5373 5373 2608632 660.5MB/s lsp [ +8.3%]
+ BM_UFlat/8 1615716 1615718 8670 607.8MB/s xls [ +2.0%]
+ BM_UFlat/9 345278 345281 40481 420.1MB/s txt1 [ +1.4%]
+ BM_UFlat/10 294855 294855 47452 404.9MB/s txt2 [ +1.6%]
+ BM_UFlat/11 914263 914263 15316 445.2MB/s txt3 [ +1.1%]
+ BM_UFlat/12 1222694 1222691 10000 375.8MB/s txt4 [ +1.4%]
+ BM_UFlat/13 584495 584489 23954 837.4MB/s bin [ -0.6%]
+ BM_UFlat/14 66662 66662 210123 547.1MB/s sum [ +1.2%]
+ BM_UFlat/15 7368 7368 1881856 547.1MB/s man [ +4.0%]
+ BM_UFlat/16 110727 110726 100000 1021.4MB/s pb [ +2.3%]
+ BM_UFlat/17 382138 382141 36616 460.0MB/s gaviota [ -0.7%]
+
+Westmere (Core i7):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ --------------------------------------------
+ BM_UFlat/0 78861 78853 177703 1.2GB/s html [ +2.1%]
+ BM_UFlat/1 739560 739491 18912 905.4MB/s urls [ +3.4%]
+ BM_UFlat/2 9867 9866 1419014 12.0GB/s jpg [ +3.4%]
+ BM_UFlat/3 31989 31986 438385 2.7GB/s pdf [ +0.2%]
+ BM_UFlat/4 319406 319380 43771 1.2GB/s html4 [ +1.9%]
+ BM_UFlat/5 29639 29636 472862 791.7MB/s cp [ +5.2%]
+ BM_UFlat/6 13478 13477 1000000 789.0MB/s c [ +2.3%]
+ BM_UFlat/7 4030 4029 3475364 880.7MB/s lsp [ +8.7%]
+ BM_UFlat/8 1036585 1036492 10000 947.5MB/s xls [ +6.9%]
+ BM_UFlat/9 242127 242105 57838 599.1MB/s txt1 [ +3.0%]
+ BM_UFlat/10 206499 206480 67595 578.2MB/s txt2 [ +3.4%]
+ BM_UFlat/11 641635 641570 21811 634.4MB/s txt3 [ +2.4%]
+ BM_UFlat/12 848847 848769 16443 541.4MB/s txt4 [ +3.1%]
+ BM_UFlat/13 384968 384938 36366 1.2GB/s bin [ +0.3%]
+ BM_UFlat/14 47106 47101 297770 774.3MB/s sum [ +4.4%]
+ BM_UFlat/15 5063 5063 2772202 796.2MB/s man [ +7.7%]
+ BM_UFlat/16 83663 83656 167697 1.3GB/s pb [ +1.8%]
+ BM_UFlat/17 260224 260198 53823 675.6MB/s gaviota [ -0.5%]
+
+Barcelona (Opteron):
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ --------------------------------------------
+ BM_UFlat/0 112490 112457 100000 868.4MB/s html [ -0.4%]
+ BM_UFlat/1 1066719 1066339 10000 627.9MB/s urls [ +1.0%]
+ BM_UFlat/2 24679 24672 563802 4.8GB/s jpg [ +0.7%]
+ BM_UFlat/3 50603 50589 277285 1.7GB/s pdf [ +2.6%]
+ BM_UFlat/4 452982 452849 30900 862.6MB/s html4 [ -0.2%]
+ BM_UFlat/5 43860 43848 319554 535.1MB/s cp [ +1.2%]
+ BM_UFlat/6 21419 21413 653573 496.6MB/s c [ +1.0%]
+ BM_UFlat/7 6646 6645 2105405 534.1MB/s lsp [ +0.3%]
+ BM_UFlat/8 1828487 1827886 7658 537.3MB/s xls [ +2.6%]
+ BM_UFlat/9 391824 391714 35708 370.3MB/s txt1 [ +2.2%]
+ BM_UFlat/10 334913 334816 41885 356.6MB/s txt2 [ +1.7%]
+ BM_UFlat/11 1042062 1041674 10000 390.7MB/s txt3 [ +1.1%]
+ BM_UFlat/12 1398902 1398456 10000 328.6MB/s txt4 [ +1.7%]
+ BM_UFlat/13 545706 545530 25669 897.2MB/s bin [ -0.4%]
+ BM_UFlat/14 71512 71505 196035 510.0MB/s sum [ +1.4%]
+ BM_UFlat/15 8422 8421 1665036 478.7MB/s man [ +2.6%]
+ BM_UFlat/16 112053 112048 100000 1009.3MB/s pb [ -0.4%]
+ BM_UFlat/17 416723 416713 33612 421.8MB/s gaviota [ -2.0%]
+
+R=sanjay
+
+------------------------------------------------------------------------
+r52 | snappy.mirrorbot@gmail.com | 2011-11-08 15:46:39 +0100 (Tue, 08 Nov 2011) | 5 lines
+
+Fix public issue #53: Update the README to the API we actually open-sourced
+with.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r51 | snappy.mirrorbot@gmail.com | 2011-10-05 14:27:12 +0200 (Wed, 05 Oct 2011) | 5 lines
+
+In the format description, use a clearer example to emphasize that varints are
+stored in little-endian. Patch from Christian von Roques.
+
+R=csilvers
+
+------------------------------------------------------------------------
+r50 | snappy.mirrorbot@gmail.com | 2011-09-15 21:34:06 +0200 (Thu, 15 Sep 2011) | 4 lines
+
+Release Snappy 1.0.4.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r49 | snappy.mirrorbot@gmail.com | 2011-09-15 11:50:05 +0200 (Thu, 15 Sep 2011) | 5 lines
+
+Fix public issue #50: Include generic byteswap macros.
+Also include Solaris 10 and FreeBSD versions.
+
+R=csilvers
+
+------------------------------------------------------------------------
+r48 | snappy.mirrorbot@gmail.com | 2011-08-10 20:57:27 +0200 (Wed, 10 Aug 2011) | 5 lines
+
+Partially fix public issue 50: Remove an extra comma from the end of some
+enum declarations, as it seems the Sun compiler does not like it.
+
+Based on patch by Travis Vitek.
+
+------------------------------------------------------------------------
+r47 | snappy.mirrorbot@gmail.com | 2011-08-10 20:44:16 +0200 (Wed, 10 Aug 2011) | 4 lines
+
+Use the right #ifdef test for sys/mman.h.
+
+Based on patch by Travis Vitek.
+
+------------------------------------------------------------------------
+r46 | snappy.mirrorbot@gmail.com | 2011-08-10 03:22:09 +0200 (Wed, 10 Aug 2011) | 6 lines
+
+Fix public issue #47: Small comment cleanups in the unit test.
+
+Originally based on a patch by Patrick Pelletier.
+
+R=sanjay
+
+------------------------------------------------------------------------
+r45 | snappy.mirrorbot@gmail.com | 2011-08-10 03:14:43 +0200 (Wed, 10 Aug 2011) | 8 lines
+
+Fix public issue #46: Format description said "3-byte offset"
+instead of "4-byte offset" for the longest copies.
+
+Also fix an inconsistency in the heading for section 2.2.3.
+Both patches by Patrick Pelletier.
+
+R=csilvers
+
+------------------------------------------------------------------------
+r44 | snappy.mirrorbot@gmail.com | 2011-06-28 13:40:25 +0200 (Tue, 28 Jun 2011) | 8 lines
+
+Fix public issue #44: Make the definition and declaration of CompressFragment
+identical, even regarding cv-qualifiers.
+
+This is required to work around a bug in the Solaris Studio C++ compiler
+(it does not properly disregard cv-qualifiers when doing name mangling).
+
+R=sanjay
+
+------------------------------------------------------------------------
+r43 | snappy.mirrorbot@gmail.com | 2011-06-04 12:19:05 +0200 (Sat, 04 Jun 2011) | 7 lines
+
+Correct an inaccuracy in the Snappy format description.
+(I stumbled into this when changing the way we decompress literals.)
+
+R=csilvers
+
+Revision created by MOE tool push_codebase.
+
+------------------------------------------------------------------------
+r42 | snappy.mirrorbot@gmail.com | 2011-06-03 22:53:06 +0200 (Fri, 03 Jun 2011) | 50 lines
+
+Speed up decompression by removing a fast-path attempt.
+
+Whenever we try to enter a copy fast-path, there is a certain cost in checking
+that all the preconditions are in place, but it's normally offset by the fact
+that we can usually take the cheaper path. However, in a certain path we've
+already established that "avail < literal_length", which usually means that
+either the available space is small, or the literal is big. Both will disqualify
+us from taking the fast path, and thus we take the hit from the precondition
+checking without gaining much from having a fast path. Thus, simply don't try
+the fast path in this situation -- we're already on a slow path anyway
+(one where we need to refill more data from the reader).
+
+I'm a bit surprised at how much this gained; it could be that this path is
+more common than I thought, or that the simpler structure somehow makes the
+compiler happier. I haven't looked at the assembler, but it's a win across
+the board on both Core 2, Core i7 and Opteron, at least for the cases we
+typically care about. The gains seem to be the largest on Core i7, though.
+Results from my Core i7 workstation:
+
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ ---------------------------------------------------
+ BM_UFlat/0 73337 73091 190996 1.3GB/s html [ +1.7%]
+ BM_UFlat/1 696379 693501 20173 965.5MB/s urls [ +2.7%]
+ BM_UFlat/2 9765 9734 1472135 12.1GB/s jpg [ +0.7%]
+ BM_UFlat/3 29720 29621 472973 3.0GB/s pdf [ +1.8%]
+ BM_UFlat/4 294636 293834 47782 1.3GB/s html4 [ +2.3%]
+ BM_UFlat/5 28399 28320 494700 828.5MB/s cp [ +3.5%]
+ BM_UFlat/6 12795 12760 1000000 833.3MB/s c [ +1.2%]
+ BM_UFlat/7 3984 3973 3526448 893.2MB/s lsp [ +5.7%]
+ BM_UFlat/8 991996 989322 14141 992.6MB/s xls [ +3.3%]
+ BM_UFlat/9 228620 227835 61404 636.6MB/s txt1 [ +4.0%]
+ BM_UFlat/10 197114 196494 72165 607.5MB/s txt2 [ +3.5%]
+ BM_UFlat/11 605240 603437 23217 674.4MB/s txt3 [ +3.7%]
+ BM_UFlat/12 804157 802016 17456 573.0MB/s txt4 [ +3.9%]
+ BM_UFlat/13 347860 346998 40346 1.4GB/s bin [ +1.2%]
+ BM_UFlat/14 44684 44559 315315 818.4MB/s sum [ +2.3%]
+ BM_UFlat/15 5120 5106 2739726 789.4MB/s man [ +3.3%]
+ BM_UFlat/16 76591 76355 183486 1.4GB/s pb [ +2.8%]
+ BM_UFlat/17 238564 237828 58824 739.1MB/s gaviota [ +1.6%]
+ BM_UValidate/0 42194 42060 333333 2.3GB/s html [ -0.1%]
+ BM_UValidate/1 433182 432005 32407 1.5GB/s urls [ -0.1%]
+ BM_UValidate/2 197 196 71428571 603.3GB/s jpg [ +0.5%]
+ BM_UValidate/3 14494 14462 972222 6.1GB/s pdf [ +0.5%]
+ BM_UValidate/4 168444 167836 83832 2.3GB/s html4 [ +0.1%]
+
+R=jeff
+
+Revision created by MOE tool push_codebase.
+
+------------------------------------------------------------------------
+r41 | snappy.mirrorbot@gmail.com | 2011-06-03 22:47:14 +0200 (Fri, 03 Jun 2011) | 43 lines
+
+Speed up decompression by not needing a lookup table for literal items.
+
+Looking up into and decoding the values from char_table has long shown up as a
+hotspot in the decompressor. While it turns out that it's hard to make a more
+efficient decoder for the copy ops, the literals are simple enough that we can
+decode them without needing a table lookup. (This means that 1/4 of the table
+is now unused, although that in itself doesn't buy us anything.)
+
+The gains are small, but definitely present; some tests win as much as 10%,
+but 1-4% is more typical. These results are from Core i7, in 64-bit mode;
+Core 2 and Opteron show similar results. (I've run with more iterations
+than unusual to make sure the smaller gains don't drown entirely in noise.)
+
+ Benchmark Time(ns) CPU(ns) Iterations
+ ---------------------------------------------------
+ BM_UFlat/0 74665 74428 182055 1.3GB/s html [ +3.1%]
+ BM_UFlat/1 714106 711997 19663 940.4MB/s urls [ +4.4%]
+ BM_UFlat/2 9820 9789 1427115 12.1GB/s jpg [ -1.2%]
+ BM_UFlat/3 30461 30380 465116 2.9GB/s pdf [ +0.8%]
+ BM_UFlat/4 301445 300568 46512 1.3GB/s html4 [ +2.2%]
+ BM_UFlat/5 29338 29263 479452 801.8MB/s cp [ +1.6%]
+ BM_UFlat/6 13004 12970 1000000 819.9MB/s c [ +2.1%]
+ BM_UFlat/7 4180 4168 3349282 851.4MB/s lsp [ +1.3%]
+ BM_UFlat/8 1026149 1024000 10000 959.0MB/s xls [+10.7%]
+ BM_UFlat/9 237441 236830 59072 612.4MB/s txt1 [ +0.3%]
+ BM_UFlat/10 203966 203298 69307 587.2MB/s txt2 [ +0.8%]
+ BM_UFlat/11 627230 625000 22400 651.2MB/s txt3 [ +0.7%]
+ BM_UFlat/12 836188 833979 16787 551.0MB/s txt4 [ +1.3%]
+ BM_UFlat/13 351904 350750 39886 1.4GB/s bin [ +3.8%]
+ BM_UFlat/14 45685 45562 308370 800.4MB/s sum [ +5.9%]
+ BM_UFlat/15 5286 5270 2656546 764.9MB/s man [ +1.5%]
+ BM_UFlat/16 78774 78544 178117 1.4GB/s pb [ +4.3%]
+ BM_UFlat/17 242270 241345 58091 728.3MB/s gaviota [ +1.2%]
+ BM_UValidate/0 42149 42000 333333 2.3GB/s html [ -3.0%]
+ BM_UValidate/1 432741 431303 32483 1.5GB/s urls [ +7.8%]
+ BM_UValidate/2 198 197 71428571 600.7GB/s jpg [+16.8%]
+ BM_UValidate/3 14560 14521 965517 6.1GB/s pdf [ -4.1%]
+ BM_UValidate/4 169065 168671 83832 2.3GB/s html4 [ -2.9%]
+
+R=jeff
+
+Revision created by MOE tool push_codebase.
+
+------------------------------------------------------------------------
+r40 | snappy.mirrorbot@gmail.com | 2011-06-03 00:57:41 +0200 (Fri, 03 Jun 2011) | 2 lines
+
+Release Snappy 1.0.3.
+
+------------------------------------------------------------------------
+r39 | snappy.mirrorbot@gmail.com | 2011-06-02 20:06:54 +0200 (Thu, 02 Jun 2011) | 11 lines
+
+Remove an unneeded goto in the decompressor; it turns out that the
+state of ip_ after decompression (or attempted decompresion) is
+completely irrelevant, so we don't need the trailer.
+
+Performance is, as expected, mostly flat -- there's a curious ~3-5%
+loss in the "lsp" test, but that test case is so short it is hard to say
+anything definitive about why (most likely, it's some sort of
+unrelated effect).
+
+R=jeff
+
+------------------------------------------------------------------------
+r38 | snappy.mirrorbot@gmail.com | 2011-06-02 19:59:40 +0200 (Thu, 02 Jun 2011) | 52 lines
+
+Speed up decompression by caching ip_.
+
+It is seemingly hard for the compiler to understand that ip_, the current input
+pointer into the compressed data stream, can not alias on anything else, and
+thus using it directly will incur memory traffic as it cannot be kept in a
+register. The code already knew about this and cached it into a local
+variable, but since Step() only decoded one tag, it had to move ip_ back into
+place between every tag. This seems to have cost us a significant amount of
+performance, so changing Step() into a function that decodes as much as it can
+before it saves ip_ back and returns. (Note that Step() was already inlined,
+so it is not the manual inlining that buys the performance here.)
+
+The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
+(for plain array-to-array decompression, in 64-bit opt mode).
+
+There is a tiny difference in the behavior here; if an invalid literal is
+encountered (ie., the writer refuses the Append() operation), ip_ will now
+point to the byte past the tag byte, instead of where the literal was
+originally thought to end. However, we don't use ip_ for anything after
+DecompressAllTags() has returned, so this should not change external behavior
+in any way.
+
+Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
+
+Benchmark Time(ns) CPU(ns) Iterations
+---------------------------------------------------
+BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
+BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
+BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
+BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
+BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
+BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
+BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
+BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
+BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
+BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
+BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
+BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
+BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
+BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
+BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
+BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
+BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
+BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
+BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
+BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
+BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
+BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
+BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
+
+
+
+------------------------------------------------------------------------
+r37 | snappy.mirrorbot@gmail.com | 2011-05-17 10:48:25 +0200 (Tue, 17 May 2011) | 10 lines
+
+
+Fix the numbering of the headlines in the Snappy format description.
+
+R=csilvers
+DELTA=4 (0 added, 0 deleted, 4 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1906
+
+------------------------------------------------------------------------
+r36 | snappy.mirrorbot@gmail.com | 2011-05-16 10:59:18 +0200 (Mon, 16 May 2011) | 12 lines
+
+
+Fix public issue #32: Add compressed format documentation for Snappy.
+This text is new, but an earlier version from Zeev Tarantov was used
+as reference.
+
+R=csilvers
+DELTA=112 (111 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1867
+
+------------------------------------------------------------------------
+r35 | snappy.mirrorbot@gmail.com | 2011-05-09 23:29:02 +0200 (Mon, 09 May 2011) | 12 lines
+
+
+Fix public issue #39: Pick out the median runs based on CPU time,
+not real time. Also, use nth_element instead of sort, since we
+only need one element.
+
+R=csilvers
+DELTA=5 (3 added, 0 deleted, 2 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1799
+
+------------------------------------------------------------------------
+r34 | snappy.mirrorbot@gmail.com | 2011-05-09 23:28:45 +0200 (Mon, 09 May 2011) | 19 lines
+
+
+Fix public issue #38: Make the microbenchmark framework handle
+properly cases where gettimeofday() can stand return the same
+result twice (as sometimes on GNU/Hurd) or go backwards
+(as when the user adjusts the clock). We avoid a division-by-zero,
+and put a lower bound on the number of iterations -- the same
+amount as we use to calibrate.
+
+We should probably use CLOCK_MONOTONIC for platforms that support
+it, to be robust against clock adjustments; we already use Windows'
+monotonic timers. However, that's for a later changelist.
+
+R=csilvers
+DELTA=7 (5 added, 0 deleted, 2 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1798
+
+------------------------------------------------------------------------
+r33 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:52 +0200 (Wed, 04 May 2011) | 11 lines
+
+
+Fix public issue #37: Only link snappy_unittest against -lz and other autodetected
+libraries, not libsnappy.so (which doesn't need any such dependency).
+
+R=csilvers
+DELTA=20 (14 added, 0 deleted, 6 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1710
+
+------------------------------------------------------------------------
+r32 | snappy.mirrorbot@gmail.com | 2011-05-04 01:22:33 +0200 (Wed, 04 May 2011) | 11 lines
+
+
+Release Snappy 1.0.2, to get the license change and various other fixes into
+a release.
+
+R=csilvers
+DELTA=239 (236 added, 0 deleted, 3 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1709
+
+------------------------------------------------------------------------
+r31 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:55 +0200 (Tue, 26 Apr 2011) | 15 lines
+
+
+Fix public issue #30: Stop using gettimeofday() altogether on Win32,
+as MSVC doesn't include it. Replace with QueryPerformanceCounter(),
+which is monotonic and probably reasonably high-resolution.
+(Some machines have traditionally had bugs in QPC, but they should
+be relatively rare these days, and there's really no much better
+alternative that I know of.)
+
+R=csilvers
+DELTA=74 (55 added, 19 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1556
+
+------------------------------------------------------------------------
+r30 | snappy.mirrorbot@gmail.com | 2011-04-26 14:34:37 +0200 (Tue, 26 Apr 2011) | 11 lines
+
+
+Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery
+we need for our own build system internally.
+
+R=csilvers
+DELTA=16 (13 added, 1 deleted, 2 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1555
+
+------------------------------------------------------------------------
+r29 | snappy.mirrorbot@gmail.com | 2011-04-16 00:55:56 +0200 (Sat, 16 Apr 2011) | 12 lines
+
+
+When including <windows.h>, define WIN32_LEAN_AND_MEAN first,
+so we won't pull in macro definitions of things like min() and max(),
+which can conflict with <algorithm>.
+
+R=csilvers
+DELTA=1 (1 added, 0 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1485
+
+------------------------------------------------------------------------
+r28 | snappy.mirrorbot@gmail.com | 2011-04-11 11:07:01 +0200 (Mon, 11 Apr 2011) | 15 lines
+
+
+Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes()
+instead of getursage().
+
+I thought I'd already committed this patch, so that the 1.0.1 release already
+would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it
+instead, so this is a reconstruction.
+
+R=csilvers
+DELTA=43 (39 added, 3 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1295
+
+------------------------------------------------------------------------
+r27 | snappy.mirrorbot@gmail.com | 2011-04-08 11:51:53 +0200 (Fri, 08 Apr 2011) | 22 lines
+
+
+Include C bindings of Snappy, contributed by Martin Gieseking.
+
+I've made a few changes since Martin's version; mostly style nits, but also
+a semantic change -- most functions that return bool in the C++ version now
+return an enum, to better match typical C (and zlib) semantics.
+
+I've kept the copyright notice, since Martin is obviously the author here;
+he has signed the contributor license agreement, though, so this should not
+hinder Google's use in the future.
+
+We'll need to update the libtool version number to match the added interface,
+but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
+I'm going to wait until public release.
+
+R=csilvers
+DELTA=238 (233 added, 0 deleted, 5 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1294
+
+------------------------------------------------------------------------
+r26 | snappy.mirrorbot@gmail.com | 2011-04-07 18:36:43 +0200 (Thu, 07 Apr 2011) | 13 lines
+
+
+Replace geo.protodata with a newer version.
+
+The data compresses/decompresses slightly faster than the old data, and has
+similar density.
+
+R=lookingbill
+DELTA=1 (0 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1288
+
+------------------------------------------------------------------------
+r25 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:53 +0200 (Wed, 30 Mar 2011) | 12 lines
+
+
+Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h
+inclusion in snappy-stubs-internal.h, which eases compiling outside the
+automake/autoconf framework.
+
+R=csilvers
+DELTA=5 (4 added, 1 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1152
+
+------------------------------------------------------------------------
+r24 | snappy.mirrorbot@gmail.com | 2011-03-30 22:27:39 +0200 (Wed, 30 Mar 2011) | 13 lines
+
+
+Fix public issue #26: Take memory allocation and reallocation entirely out of the
+Measure() loop. This gives all algorithms a small speed boost, except Snappy which
+already didn't do reallocation (so the measurements were slightly biased in its
+favor).
+
+R=csilvers
+DELTA=92 (69 added, 9 deleted, 14 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1151
+
+------------------------------------------------------------------------
+r23 | snappy.mirrorbot@gmail.com | 2011-03-30 22:25:09 +0200 (Wed, 30 Mar 2011) | 18 lines
+
+
+Renamed "namespace zippy" to "namespace snappy" to reduce
+the differences from the opensource code. Will make it easier
+in the future to mix-and-match third-party code that uses
+snappy with google code.
+
+Currently, csearch shows that the only external user of
+"namespace zippy" is some bigtable code that accesses
+a TEST variable, which is temporarily kept in the zippy
+namespace.
+
+R=sesse
+DELTA=123 (18 added, 3 deleted, 102 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1150
+
+------------------------------------------------------------------------
+r22 | snappy.mirrorbot@gmail.com | 2011-03-29 00:17:04 +0200 (Tue, 29 Mar 2011) | 11 lines
+
+
+Put back the final few lines of what was truncated during the
+license header change.
+
+R=csilvers
+DELTA=5 (4 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1094
+
+------------------------------------------------------------------------
+r21 | snappy.mirrorbot@gmail.com | 2011-03-26 03:34:34 +0100 (Sat, 26 Mar 2011) | 20 lines
+
+
+Change on 2011-03-25 19:18:00-07:00 by sesse
+
+ Replace the Apache 2.0 license header by the BSD-type license header;
+ somehow a lot of the files were missed in the last round.
+
+ R=dannyb,csilvers
+ DELTA=147 (74 added, 2 deleted, 71 changed)
+
+Change on 2011-03-25 19:25:07-07:00 by sesse
+
+ Unbreak the build; the relicensing removed a bit too much (only comments
+ were intended, but I also accidentially removed some of the top lines of
+ the actual source).
+
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1072
+
+------------------------------------------------------------------------
+r20 | snappy.mirrorbot@gmail.com | 2011-03-25 17:14:41 +0100 (Fri, 25 Mar 2011) | 10 lines
+
+
+Change Snappy from the Apache 2.0 to a BSD-type license.
+
+R=dannyb
+DELTA=328 (80 added, 184 deleted, 64 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1061
+
+------------------------------------------------------------------------
+r19 | snappy.mirrorbot@gmail.com | 2011-03-25 01:39:01 +0100 (Fri, 25 Mar 2011) | 11 lines
+
+
+Release Snappy 1.0.1, to soup up all the various small changes
+that have been made since release.
+
+R=csilvers
+DELTA=266 (260 added, 0 deleted, 6 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1057
+
+------------------------------------------------------------------------
+r18 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:54 +0100 (Thu, 24 Mar 2011) | 11 lines
+
+
+Fix a microbenchmark crash on mingw32; seemingly %lld is not universally
+supported on Windows, and %I64d is recommended instead.
+
+R=csilvers
+DELTA=6 (5 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1034
+
+------------------------------------------------------------------------
+r17 | snappy.mirrorbot@gmail.com | 2011-03-24 20:15:27 +0100 (Thu, 24 Mar 2011) | 13 lines
+
+
+Fix public issue #19: Fix unit test when Google Test is installed but the
+gflags package isn't (Google Test is not properly initialized).
+
+Patch by Martin Gieseking.
+
+R=csilvers
+DELTA=2 (1 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1033
+
+------------------------------------------------------------------------
+r16 | snappy.mirrorbot@gmail.com | 2011-03-24 20:13:57 +0100 (Thu, 24 Mar 2011) | 15 lines
+
+
+Make the unit test work on systems without mmap(). This is required for,
+among others, Windows support. For Windows in specific, we could have used
+CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
+to compiling, and is of course also relevant for embedded systems with no MMU.
+
+(Part 2/2)
+
+R=csilvers
+DELTA=15 (12 added, 3 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1032
+
+------------------------------------------------------------------------
+r15 | snappy.mirrorbot@gmail.com | 2011-03-24 20:12:27 +0100 (Thu, 24 Mar 2011) | 15 lines
+
+
+Make the unit test work on systems without mmap(). This is required for,
+among others, Windows support. For Windows in specific, we could have used
+CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer
+to compiling, and is of course also relevant for embedded systems with no MMU.
+
+(Part 1/2)
+
+R=csilvers
+DELTA=9 (8 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1031
+
+------------------------------------------------------------------------
+r14 | snappy.mirrorbot@gmail.com | 2011-03-24 00:17:36 +0100 (Thu, 24 Mar 2011) | 14 lines
+
+
+Fix public issue #12: Don't keep autogenerated auto* files in Subversion;
+it causes problems with others sending patches etc..
+
+We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4,
+so we can just as well go cleanly in the other direction.
+
+R=csilvers
+DELTA=21038 (0 added, 21036 deleted, 2 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=1012
+
+------------------------------------------------------------------------
+r13 | snappy.mirrorbot@gmail.com | 2011-03-23 18:50:49 +0100 (Wed, 23 Mar 2011) | 11 lines
+
+
+Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule
+to rebuild libtool in Makefile.am won't work.
+
+R=csilvers
+DELTA=1 (1 added, 0 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=997
+
+------------------------------------------------------------------------
+r12 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:39 +0100 (Wed, 23 Mar 2011) | 11 lines
+
+
+Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS;
+it's not needed (CPPFLAGS are always included when compiling).
+
+R=csilvers
+DELTA=1 (0 added, 1 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=994
+
+------------------------------------------------------------------------
+r11 | snappy.mirrorbot@gmail.com | 2011-03-23 12:16:18 +0100 (Wed, 23 Mar 2011) | 11 lines
+
+
+Fix public issue #9: Add -Wall -Werror to automake flags.
+(This concerns automake itself, not the C++ compiler.)
+
+R=csilvers
+DELTA=4 (3 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=993
+
+------------------------------------------------------------------------
+r10 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:37 +0100 (Wed, 23 Mar 2011) | 10 lines
+
+
+Fix a typo in the Snappy README file.
+
+R=csilvers
+DELTA=1 (0 added, 0 deleted, 1 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=992
+
+------------------------------------------------------------------------
+r9 | snappy.mirrorbot@gmail.com | 2011-03-23 12:13:13 +0100 (Wed, 23 Mar 2011) | 11 lines
+
+
+Fix public issue #6: Add a --with-gflags for disabling gflags autodetection
+and using a manually given setting (use/don't use) instead.
+
+R=csilvers
+DELTA=16 (13 added, 0 deleted, 3 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=991
+
+------------------------------------------------------------------------
+r8 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:44 +0100 (Wed, 23 Mar 2011) | 12 lines
+
+
+Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something
+slightly more standard, that also doesn't leak libtool command-line into
+configure.ac.
+
+R=csilvers
+DELTA=7 (0 added, 4 deleted, 3 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=990
+
+------------------------------------------------------------------------
+r7 | snappy.mirrorbot@gmail.com | 2011-03-23 12:12:22 +0100 (Wed, 23 Mar 2011) | 10 lines
+
+
+Fix public issue #4: Properly quote all macro arguments in configure.ac.
+
+R=csilvers
+DELTA=16 (0 added, 0 deleted, 16 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=989
+
+------------------------------------------------------------------------
+r6 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:54 +0100 (Wed, 23 Mar 2011) | 11 lines
+
+
+Fix public issue #7: Don't use internal variables named ac_*, as those belong
+to autoconf's namespace.
+
+R=csilvers
+DELTA=6 (0 added, 0 deleted, 6 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=988
+
+------------------------------------------------------------------------
+r5 | snappy.mirrorbot@gmail.com | 2011-03-23 12:11:09 +0100 (Wed, 23 Mar 2011) | 10 lines
+
+
+Add missing licensing headers to a few files. (Part 2/2.)
+
+R=csilvers
+DELTA=12 (12 added, 0 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=987
+
+------------------------------------------------------------------------
+r4 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:39 +0100 (Wed, 23 Mar 2011) | 10 lines
+
+
+Add mising licensing headers to a few files. (Part 1/2.)
+
+R=csilvers
+DELTA=24 (24 added, 0 deleted, 0 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=986
+
+------------------------------------------------------------------------
+r3 | snappy.mirrorbot@gmail.com | 2011-03-23 12:10:04 +0100 (Wed, 23 Mar 2011) | 11 lines
+
+
+Use the correct license file for the Apache 2.0 license;
+spotted by Florian Weimer.
+
+R=csilvers
+DELTA=202 (174 added, 0 deleted, 28 changed)
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=985
+
+------------------------------------------------------------------------
+r2 | snappy.mirrorbot@gmail.com | 2011-03-18 18:14:15 +0100 (Fri, 18 Mar 2011) | 6 lines
+
+
+
+
+Revision created by MOE tool push_codebase.
+MOE_MIGRATION=
+
+------------------------------------------------------------------------
+r1 | sesse@google.com | 2011-03-18 18:13:52 +0100 (Fri, 18 Mar 2011) | 2 lines
+
+Create trunk directory.
+
+------------------------------------------------------------------------
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/INSTALL b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/INSTALL
new file mode 100644
index 00000000..a1e89e18
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/INSTALL
@@ -0,0 +1,370 @@
+Installation Instructions
+*************************
+
+Copyright (C) 1994-1996, 1999-2002, 2004-2011 Free Software Foundation,
+Inc.
+
+ Copying and distribution of this file, with or without modification,
+are permitted in any medium without royalty provided the copyright
+notice and this notice are preserved. This file is offered as-is,
+without warranty of any kind.
+
+Basic Installation
+==================
+
+ Briefly, the shell commands `./configure; make; make install' should
+configure, build, and install this package. The following
+more-detailed instructions are generic; see the `README' file for
+instructions specific to this package. Some packages provide this
+`INSTALL' file but do not implement all of the features documented
+below. The lack of an optional feature in a given package is not
+necessarily a bug. More recommendations for GNU packages can be found
+in *note Makefile Conventions: (standards)Makefile Conventions.
+
+ The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation. It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions. Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, and a
+file `config.log' containing compiler output (useful mainly for
+debugging `configure').
+
+ It can also use an optional file (typically called `config.cache'
+and enabled with `--cache-file=config.cache' or simply `-C') that saves
+the results of its tests to speed up reconfiguring. Caching is
+disabled by default to prevent problems with accidental use of stale
+cache files.
+
+ If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release. If you are using the cache, and at
+some point `config.cache' contains results you don't want to keep, you
+may remove or edit it.
+
+ The file `configure.ac' (or `configure.in') is used to create
+`configure' by a program called `autoconf'. You need `configure.ac' if
+you want to change it or regenerate `configure' using a newer version
+of `autoconf'.
+
+ The simplest way to compile this package is:
+
+ 1. `cd' to the directory containing the package's source code and type
+ `./configure' to configure the package for your system.
+
+ Running `configure' might take a while. While running, it prints
+ some messages telling which features it is checking for.
+
+ 2. Type `make' to compile the package.
+
+ 3. Optionally, type `make check' to run any self-tests that come with
+ the package, generally using the just-built uninstalled binaries.
+
+ 4. Type `make install' to install the programs and any data files and
+ documentation. When installing into a prefix owned by root, it is
+ recommended that the package be configured and built as a regular
+ user, and only the `make install' phase executed with root
+ privileges.
+
+ 5. Optionally, type `make installcheck' to repeat any self-tests, but
+ this time using the binaries in their final installed location.
+ This target does not install anything. Running this target as a
+ regular user, particularly if the prior `make install' required
+ root privileges, verifies that the installation completed
+ correctly.
+
+ 6. You can remove the program binaries and object files from the
+ source code directory by typing `make clean'. To also remove the
+ files that `configure' created (so you can compile the package for
+ a different kind of computer), type `make distclean'. There is
+ also a `make maintainer-clean' target, but that is intended mainly
+ for the package's developers. If you use it, you may have to get
+ all sorts of other programs in order to regenerate files that came
+ with the distribution.
+
+ 7. Often, you can also type `make uninstall' to remove the installed
+ files again. In practice, not all packages have tested that
+ uninstallation works correctly, even though it is required by the
+ GNU Coding Standards.
+
+ 8. Some packages, particularly those that use Automake, provide `make
+ distcheck', which can by used by developers to test that all other
+ targets like `make install' and `make uninstall' work correctly.
+ This target is generally not run by end users.
+
+Compilers and Options
+=====================
+
+ Some systems require unusual options for compilation or linking that
+the `configure' script does not know about. Run `./configure --help'
+for details on some of the pertinent environment variables.
+
+ You can give `configure' initial values for configuration parameters
+by setting variables in the command line or in the environment. Here
+is an example:
+
+ ./configure CC=c99 CFLAGS=-g LIBS=-lposix
+
+ *Note Defining Variables::, for more details.
+
+Compiling For Multiple Architectures
+====================================
+
+ You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory. To do this, you can use GNU `make'. `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script. `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'. This
+is known as a "VPATH" build.
+
+ With a non-GNU `make', it is safer to compile the package for one
+architecture at a time in the source code directory. After you have
+installed the package for one architecture, use `make distclean' before
+reconfiguring for another architecture.
+
+ On MacOS X 10.5 and later systems, you can create libraries and
+executables that work on multiple system types--known as "fat" or
+"universal" binaries--by specifying multiple `-arch' options to the
+compiler but only a single `-arch' option to the preprocessor. Like
+this:
+
+ ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
+ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
+ CPP="gcc -E" CXXCPP="g++ -E"
+
+ This is not guaranteed to produce working output in all cases, you
+may have to build one architecture at a time and combine the results
+using the `lipo' tool if you have problems.
+
+Installation Names
+==================
+
+ By default, `make install' installs the package's commands under
+`/usr/local/bin', include files under `/usr/local/include', etc. You
+can specify an installation prefix other than `/usr/local' by giving
+`configure' the option `--prefix=PREFIX', where PREFIX must be an
+absolute file name.
+
+ You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files. If you
+pass the option `--exec-prefix=PREFIX' to `configure', the package uses
+PREFIX as the prefix for installing programs and libraries.
+Documentation and other data files still use the regular prefix.
+
+ In addition, if you use an unusual directory layout you can give
+options like `--bindir=DIR' to specify different values for particular
+kinds of files. Run `configure --help' for a list of the directories
+you can set and what kinds of files go in them. In general, the
+default for these options is expressed in terms of `${prefix}', so that
+specifying just `--prefix' will affect all of the other directory
+specifications that were not explicitly provided.
+
+ The most portable way to affect installation locations is to pass the
+correct locations to `configure'; however, many packages provide one or
+both of the following shortcuts of passing variable assignments to the
+`make install' command line to change installation locations without
+having to reconfigure or recompile.
+
+ The first method involves providing an override variable for each
+affected directory. For example, `make install
+prefix=/alternate/directory' will choose an alternate location for all
+directory configuration variables that were expressed in terms of
+`${prefix}'. Any directories that were specified during `configure',
+but not in terms of `${prefix}', must each be overridden at install
+time for the entire installation to be relocated. The approach of
+makefile variable overrides for each directory variable is required by
+the GNU Coding Standards, and ideally causes no recompilation.
+However, some platforms have known limitations with the semantics of
+shared libraries that end up requiring recompilation when using this
+method, particularly noticeable in packages that use GNU Libtool.
+
+ The second method involves providing the `DESTDIR' variable. For
+example, `make install DESTDIR=/alternate/directory' will prepend
+`/alternate/directory' before all installation names. The approach of
+`DESTDIR' overrides is not required by the GNU Coding Standards, and
+does not work on platforms that have drive letters. On the other hand,
+it does better at avoiding recompilation issues, and works well even
+when some directory options were not specified in terms of `${prefix}'
+at `configure' time.
+
+Optional Features
+=================
+
+ If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+ Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System). The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+ For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+ Some packages offer the ability to configure how verbose the
+execution of `make' will be. For these packages, running `./configure
+--enable-silent-rules' sets the default to minimal output, which can be
+overridden with `make V=1'; while running `./configure
+--disable-silent-rules' sets the default to verbose, which can be
+overridden with `make V=0'.
+
+Particular systems
+==================
+
+ On HP-UX, the default C compiler is not ANSI C compatible. If GNU
+CC is not installed, it is recommended to use the following options in
+order to use an ANSI C compiler:
+
+ ./configure CC="cc -Ae -D_XOPEN_SOURCE=500"
+
+and if that doesn't work, install pre-built binaries of GCC for HP-UX.
+
+ HP-UX `make' updates targets which have the same time stamps as
+their prerequisites, which makes it generally unusable when shipped
+generated files such as `configure' are involved. Use GNU `make'
+instead.
+
+ On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot
+parse its `<wchar.h>' header file. The option `-nodtk' can be used as
+a workaround. If GNU CC is not installed, it is therefore recommended
+to try
+
+ ./configure CC="cc"
+
+and if that doesn't work, try
+
+ ./configure CC="cc -nodtk"
+
+ On Solaris, don't put `/usr/ucb' early in your `PATH'. This
+directory contains several dysfunctional programs; working variants of
+these programs are available in `/usr/bin'. So, if you need `/usr/ucb'
+in your `PATH', put it _after_ `/usr/bin'.
+
+ On Haiku, software installed for all users goes in `/boot/common',
+not `/usr/local'. It is recommended to use the following options:
+
+ ./configure --prefix=/boot/common
+
+Specifying the System Type
+==========================
+
+ There may be some features `configure' cannot figure out
+automatically, but needs to determine by the type of machine the package
+will run on. Usually, assuming the package is built to be run on the
+_same_ architectures, `configure' can figure that out, but if it prints
+a message saying it cannot guess the machine type, give it the
+`--build=TYPE' option. TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name which has the form:
+
+ CPU-COMPANY-SYSTEM
+
+where SYSTEM can have one of these forms:
+
+ OS
+ KERNEL-OS
+
+ See the file `config.sub' for the possible values of each field. If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the machine type.
+
+ If you are _building_ compiler tools for cross-compiling, you should
+use the option `--target=TYPE' to select the type of system they will
+produce code for.
+
+ If you want to _use_ a cross compiler, that generates code for a
+platform different from the build platform, you should specify the
+"host" platform (i.e., that on which the generated programs will
+eventually be run) with `--host=TYPE'.
+
+Sharing Defaults
+================
+
+ If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists. Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Defining Variables
+==================
+
+ Variables not defined in a site shell script can be set in the
+environment passed to `configure'. However, some packages may run
+configure again during the build, and the customized values of these
+variables may be lost. In order to avoid this problem, you should set
+them in the `configure' command line, using `VAR=value'. For example:
+
+ ./configure CC=/usr/local2/bin/gcc
+
+causes the specified `gcc' to be used as the C compiler (unless it is
+overridden in the site shell script).
+
+Unfortunately, this technique does not work for `CONFIG_SHELL' due to
+an Autoconf bug. Until the bug is fixed you can use this workaround:
+
+ CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
+
+`configure' Invocation
+======================
+
+ `configure' recognizes the following options to control how it
+operates.
+
+`--help'
+`-h'
+ Print a summary of all of the options to `configure', and exit.
+
+`--help=short'
+`--help=recursive'
+ Print a summary of the options unique to this package's
+ `configure', and exit. The `short' variant lists options used
+ only in the top level, while the `recursive' variant lists options
+ also present in any nested packages.
+
+`--version'
+`-V'
+ Print the version of Autoconf used to generate the `configure'
+ script, and exit.
+
+`--cache-file=FILE'
+ Enable the cache: use and save the results of the tests in FILE,
+ traditionally `config.cache'. FILE defaults to `/dev/null' to
+ disable caching.
+
+`--config-cache'
+`-C'
+ Alias for `--cache-file=config.cache'.
+
+`--quiet'
+`--silent'
+`-q'
+ Do not print messages saying which checks are being made. To
+ suppress all normal output, redirect it to `/dev/null' (any error
+ messages will still be shown).
+
+`--srcdir=DIR'
+ Look for the package's source code in directory DIR. Usually
+ `configure' can determine that directory automatically.
+
+`--prefix=DIR'
+ Use DIR as the installation prefix. *note Installation Names::
+ for more details, including other options available for fine-tuning
+ the installation locations.
+
+`--no-create'
+`-n'
+ Run the configure checks, but stop before creating any output
+ files.
+
+`configure' also accepts some other, not widely useful, options. Run
+`configure --help' for more details.
+
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.am b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.am
new file mode 100644
index 00000000..735bc12e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.am
@@ -0,0 +1,23 @@
+ACLOCAL_AMFLAGS = -I m4
+
+# Library.
+lib_LTLIBRARIES = libsnappy.la
+libsnappy_la_SOURCES = snappy.cc snappy-sinksource.cc snappy-stubs-internal.cc snappy-c.cc
+libsnappy_la_LDFLAGS = -version-info $(SNAPPY_LTVERSION)
+
+include_HEADERS = snappy.h snappy-sinksource.h snappy-stubs-public.h snappy-c.h
+noinst_HEADERS = snappy-internal.h snappy-stubs-internal.h snappy-test.h
+
+# Unit tests and benchmarks.
+snappy_unittest_CPPFLAGS = $(gflags_CFLAGS) $(GTEST_CPPFLAGS)
+snappy_unittest_SOURCES = snappy_unittest.cc snappy-test.cc
+snappy_unittest_LDFLAGS = $(GTEST_LDFLAGS)
+snappy_unittest_LDADD = libsnappy.la $(UNITTEST_LIBS) $(gflags_LIBS) $(GTEST_LIBS)
+TESTS = snappy_unittest
+noinst_PROGRAMS = $(TESTS)
+
+EXTRA_DIST = autogen.sh testdata/alice29.txt testdata/asyoulik.txt testdata/baddata1.snappy testdata/baddata2.snappy testdata/baddata3.snappy testdata/geo.protodata testdata/fireworks.jpeg testdata/html testdata/html_x_4 testdata/kppkn.gtb testdata/lcet10.txt testdata/paper-100k.pdf testdata/plrabn12.txt testdata/urls.10K
+dist_doc_DATA = ChangeLog COPYING INSTALL NEWS README format_description.txt framing_format.txt
+
+libtool: $(LIBTOOL_DEPS)
+ $(SHELL) ./config.status --recheck
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.in b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.in
new file mode 100644
index 00000000..31003adf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/Makefile.in
@@ -0,0 +1,957 @@
+# Makefile.in generated by automake 1.11.3 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+TESTS = snappy_unittest$(EXEEXT)
+noinst_PROGRAMS = $(am__EXEEXT_1)
+subdir = .
+DIST_COMMON = README $(am__configure_deps) $(dist_doc_DATA) \
+ $(include_HEADERS) $(noinst_HEADERS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in $(srcdir)/config.h.in \
+ $(srcdir)/snappy-stubs-public.h.in $(top_srcdir)/configure \
+ AUTHORS COPYING ChangeLog INSTALL NEWS config.guess config.sub \
+ depcomp install-sh ltmain.sh missing
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/gtest.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = config.h
+CONFIG_CLEAN_FILES = snappy-stubs-public.h
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(docdir)" \
+ "$(DESTDIR)$(includedir)"
+LTLIBRARIES = $(lib_LTLIBRARIES)
+libsnappy_la_LIBADD =
+am_libsnappy_la_OBJECTS = snappy.lo snappy-sinksource.lo \
+ snappy-stubs-internal.lo snappy-c.lo
+libsnappy_la_OBJECTS = $(am_libsnappy_la_OBJECTS)
+libsnappy_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+ $(CXXFLAGS) $(libsnappy_la_LDFLAGS) $(LDFLAGS) -o $@
+am__EXEEXT_1 = snappy_unittest$(EXEEXT)
+PROGRAMS = $(noinst_PROGRAMS)
+am_snappy_unittest_OBJECTS = \
+ snappy_unittest-snappy_unittest.$(OBJEXT) \
+ snappy_unittest-snappy-test.$(OBJEXT)
+snappy_unittest_OBJECTS = $(am_snappy_unittest_OBJECTS)
+am__DEPENDENCIES_1 =
+snappy_unittest_DEPENDENCIES = libsnappy.la $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
+snappy_unittest_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+ $(CXXFLAGS) $(snappy_unittest_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(libsnappy_la_SOURCES) $(snappy_unittest_SOURCES)
+DIST_SOURCES = $(libsnappy_la_SOURCES) $(snappy_unittest_SOURCES)
+DATA = $(dist_doc_DATA)
+HEADERS = $(include_HEADERS) $(noinst_HEADERS)
+ETAGS = etags
+CTAGS = ctags
+am__tty_colors = \
+red=; grn=; lgn=; blu=; std=
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ if test -d "$(distdir)"; then \
+ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -rf "$(distdir)" \
+ || { sleep 5 && rm -rf "$(distdir)"; }; \
+ else :; fi
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GREP = @GREP@
+GTEST_CONFIG = @GTEST_CONFIG@
+GTEST_CPPFLAGS = @GTEST_CPPFLAGS@
+GTEST_CXXFLAGS = @GTEST_CXXFLAGS@
+GTEST_LDFLAGS = @GTEST_LDFLAGS@
+GTEST_LIBS = @GTEST_LIBS@
+GTEST_VERSION = @GTEST_VERSION@
+HAVE_GTEST = @HAVE_GTEST@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIBTOOL_DEPS = @LIBTOOL_DEPS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+RANLIB = @RANLIB@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SNAPPY_LTVERSION = @SNAPPY_LTVERSION@
+SNAPPY_MAJOR = @SNAPPY_MAJOR@
+SNAPPY_MINOR = @SNAPPY_MINOR@
+SNAPPY_PATCHLEVEL = @SNAPPY_PATCHLEVEL@
+STRIP = @STRIP@
+UNITTEST_LIBS = @UNITTEST_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_cv_have_stddef_h = @ac_cv_have_stddef_h@
+ac_cv_have_stdint_h = @ac_cv_have_stdint_h@
+ac_cv_have_sys_uio_h = @ac_cv_have_sys_uio_h@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+gflags_CFLAGS = @gflags_CFLAGS@
+gflags_LIBS = @gflags_LIBS@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+ACLOCAL_AMFLAGS = -I m4
+
+# Library.
+lib_LTLIBRARIES = libsnappy.la
+libsnappy_la_SOURCES = snappy.cc snappy-sinksource.cc snappy-stubs-internal.cc snappy-c.cc
+libsnappy_la_LDFLAGS = -version-info $(SNAPPY_LTVERSION)
+include_HEADERS = snappy.h snappy-sinksource.h snappy-stubs-public.h snappy-c.h
+noinst_HEADERS = snappy-internal.h snappy-stubs-internal.h snappy-test.h
+
+# Unit tests and benchmarks.
+snappy_unittest_CPPFLAGS = $(gflags_CFLAGS) $(GTEST_CPPFLAGS)
+snappy_unittest_SOURCES = snappy_unittest.cc snappy-test.cc
+snappy_unittest_LDFLAGS = $(GTEST_LDFLAGS)
+snappy_unittest_LDADD = libsnappy.la $(UNITTEST_LIBS) $(gflags_LIBS) $(GTEST_LIBS)
+EXTRA_DIST = autogen.sh testdata/alice29.txt testdata/asyoulik.txt testdata/baddata1.snappy testdata/baddata2.snappy testdata/baddata3.snappy testdata/geo.protodata testdata/fireworks.jpeg testdata/html testdata/html_x_4 testdata/kppkn.gtb testdata/lcet10.txt testdata/paper-100k.pdf testdata/plrabn12.txt testdata/urls.10K
+dist_doc_DATA = ChangeLog COPYING INSTALL NEWS README format_description.txt framing_format.txt
+all: config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-am
+
+.SUFFIXES:
+.SUFFIXES: .cc .lo .o .obj
+am--refresh: Makefile
+ @:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --gnu'; \
+ $(am__cd) $(srcdir) && $(AUTOMAKE) --gnu \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ $(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+config.h: stamp-h1
+ @if test ! -f $@; then rm -f stamp-h1; else :; fi
+ @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: $(am__configure_deps)
+ ($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+snappy-stubs-public.h: $(top_builddir)/config.status $(srcdir)/snappy-stubs-public.h.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+install-libLTLIBRARIES: $(lib_LTLIBRARIES)
+ @$(NORMAL_INSTALL)
+ test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)"
+ @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+ list2=; for p in $$list; do \
+ if test -f $$p; then \
+ list2="$$list2 $$p"; \
+ else :; fi; \
+ done; \
+ test -z "$$list2" || { \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
+ }
+
+uninstall-libLTLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+ for p in $$list; do \
+ $(am__strip_dir) \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
+ done
+
+clean-libLTLIBRARIES:
+ -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
+ @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
+ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+ test "$$dir" != "$$p" || dir=.; \
+ echo "rm -f \"$${dir}/so_locations\""; \
+ rm -f "$${dir}/so_locations"; \
+ done
+libsnappy.la: $(libsnappy_la_OBJECTS) $(libsnappy_la_DEPENDENCIES) $(EXTRA_libsnappy_la_DEPENDENCIES)
+ $(libsnappy_la_LINK) -rpath $(libdir) $(libsnappy_la_OBJECTS) $(libsnappy_la_LIBADD) $(LIBS)
+
+clean-noinstPROGRAMS:
+ @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+snappy_unittest$(EXEEXT): $(snappy_unittest_OBJECTS) $(snappy_unittest_DEPENDENCIES) $(EXTRA_snappy_unittest_DEPENDENCIES)
+ @rm -f snappy_unittest$(EXEEXT)
+ $(snappy_unittest_LINK) $(snappy_unittest_OBJECTS) $(snappy_unittest_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/snappy-c.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/snappy-sinksource.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/snappy-stubs-internal.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/snappy.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/snappy_unittest-snappy-test.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/snappy_unittest-snappy_unittest.Po@am__quote@
+
+.cc.o:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
+
+.cc.obj:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cc.lo:
+@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
+
+snappy_unittest-snappy_unittest.o: snappy_unittest.cc
+@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT snappy_unittest-snappy_unittest.o -MD -MP -MF $(DEPDIR)/snappy_unittest-snappy_unittest.Tpo -c -o snappy_unittest-snappy_unittest.o `test -f 'snappy_unittest.cc' || echo '$(srcdir)/'`snappy_unittest.cc
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/snappy_unittest-snappy_unittest.Tpo $(DEPDIR)/snappy_unittest-snappy_unittest.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='snappy_unittest.cc' object='snappy_unittest-snappy_unittest.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o snappy_unittest-snappy_unittest.o `test -f 'snappy_unittest.cc' || echo '$(srcdir)/'`snappy_unittest.cc
+
+snappy_unittest-snappy_unittest.obj: snappy_unittest.cc
+@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT snappy_unittest-snappy_unittest.obj -MD -MP -MF $(DEPDIR)/snappy_unittest-snappy_unittest.Tpo -c -o snappy_unittest-snappy_unittest.obj `if test -f 'snappy_unittest.cc'; then $(CYGPATH_W) 'snappy_unittest.cc'; else $(CYGPATH_W) '$(srcdir)/snappy_unittest.cc'; fi`
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/snappy_unittest-snappy_unittest.Tpo $(DEPDIR)/snappy_unittest-snappy_unittest.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='snappy_unittest.cc' object='snappy_unittest-snappy_unittest.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o snappy_unittest-snappy_unittest.obj `if test -f 'snappy_unittest.cc'; then $(CYGPATH_W) 'snappy_unittest.cc'; else $(CYGPATH_W) '$(srcdir)/snappy_unittest.cc'; fi`
+
+snappy_unittest-snappy-test.o: snappy-test.cc
+@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT snappy_unittest-snappy-test.o -MD -MP -MF $(DEPDIR)/snappy_unittest-snappy-test.Tpo -c -o snappy_unittest-snappy-test.o `test -f 'snappy-test.cc' || echo '$(srcdir)/'`snappy-test.cc
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/snappy_unittest-snappy-test.Tpo $(DEPDIR)/snappy_unittest-snappy-test.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='snappy-test.cc' object='snappy_unittest-snappy-test.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o snappy_unittest-snappy-test.o `test -f 'snappy-test.cc' || echo '$(srcdir)/'`snappy-test.cc
+
+snappy_unittest-snappy-test.obj: snappy-test.cc
+@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT snappy_unittest-snappy-test.obj -MD -MP -MF $(DEPDIR)/snappy_unittest-snappy-test.Tpo -c -o snappy_unittest-snappy-test.obj `if test -f 'snappy-test.cc'; then $(CYGPATH_W) 'snappy-test.cc'; else $(CYGPATH_W) '$(srcdir)/snappy-test.cc'; fi`
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/snappy_unittest-snappy-test.Tpo $(DEPDIR)/snappy_unittest-snappy-test.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='snappy-test.cc' object='snappy_unittest-snappy-test.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(snappy_unittest_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o snappy_unittest-snappy-test.obj `if test -f 'snappy-test.cc'; then $(CYGPATH_W) 'snappy-test.cc'; else $(CYGPATH_W) '$(srcdir)/snappy-test.cc'; fi`
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+ -rm -f libtool config.lt
+install-dist_docDATA: $(dist_doc_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(docdir)" || $(MKDIR_P) "$(DESTDIR)$(docdir)"
+ @list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(docdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(docdir)" || exit $$?; \
+ done
+
+uninstall-dist_docDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(docdir)'; $(am__uninstall_files_from_dir)
+install-includeHEADERS: $(include_HEADERS)
+ @$(NORMAL_INSTALL)
+ test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)"
+ @list='$(include_HEADERS)'; test -n "$(includedir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \
+ $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \
+ done
+
+uninstall-includeHEADERS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(include_HEADERS)'; test -n "$(includedir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir)
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+check-TESTS: $(TESTS)
+ @failed=0; all=0; xfail=0; xpass=0; skip=0; \
+ srcdir=$(srcdir); export srcdir; \
+ list=' $(TESTS) '; \
+ $(am__tty_colors); \
+ if test -n "$$list"; then \
+ for tst in $$list; do \
+ if test -f ./$$tst; then dir=./; \
+ elif test -f $$tst; then dir=; \
+ else dir="$(srcdir)/"; fi; \
+ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \
+ all=`expr $$all + 1`; \
+ case " $(XFAIL_TESTS) " in \
+ *[\ \ ]$$tst[\ \ ]*) \
+ xpass=`expr $$xpass + 1`; \
+ failed=`expr $$failed + 1`; \
+ col=$$red; res=XPASS; \
+ ;; \
+ *) \
+ col=$$grn; res=PASS; \
+ ;; \
+ esac; \
+ elif test $$? -ne 77; then \
+ all=`expr $$all + 1`; \
+ case " $(XFAIL_TESTS) " in \
+ *[\ \ ]$$tst[\ \ ]*) \
+ xfail=`expr $$xfail + 1`; \
+ col=$$lgn; res=XFAIL; \
+ ;; \
+ *) \
+ failed=`expr $$failed + 1`; \
+ col=$$red; res=FAIL; \
+ ;; \
+ esac; \
+ else \
+ skip=`expr $$skip + 1`; \
+ col=$$blu; res=SKIP; \
+ fi; \
+ echo "$${col}$$res$${std}: $$tst"; \
+ done; \
+ if test "$$all" -eq 1; then \
+ tests="test"; \
+ All=""; \
+ else \
+ tests="tests"; \
+ All="All "; \
+ fi; \
+ if test "$$failed" -eq 0; then \
+ if test "$$xfail" -eq 0; then \
+ banner="$$All$$all $$tests passed"; \
+ else \
+ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \
+ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \
+ fi; \
+ else \
+ if test "$$xpass" -eq 0; then \
+ banner="$$failed of $$all $$tests failed"; \
+ else \
+ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \
+ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \
+ fi; \
+ fi; \
+ dashes="$$banner"; \
+ skipped=""; \
+ if test "$$skip" -ne 0; then \
+ if test "$$skip" -eq 1; then \
+ skipped="($$skip test was not run)"; \
+ else \
+ skipped="($$skip tests were not run)"; \
+ fi; \
+ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \
+ dashes="$$skipped"; \
+ fi; \
+ report=""; \
+ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \
+ report="Please report to $(PACKAGE_BUGREPORT)"; \
+ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \
+ dashes="$$report"; \
+ fi; \
+ dashes=`echo "$$dashes" | sed s/./=/g`; \
+ if test "$$failed" -eq 0; then \
+ col="$$grn"; \
+ else \
+ col="$$red"; \
+ fi; \
+ echo "$${col}$$dashes$${std}"; \
+ echo "$${col}$$banner$${std}"; \
+ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \
+ test -z "$$report" || echo "$${col}$$report$${std}"; \
+ echo "$${col}$$dashes$${std}"; \
+ test "$$failed" -eq 0; \
+ else :; fi
+
+distdir: $(DISTFILES)
+ $(am__remove_distdir)
+ test -d "$(distdir)" || mkdir "$(distdir)"
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ -test -n "$(am__skip_mode_fix)" \
+ || find "$(distdir)" -type d ! -perm -755 \
+ -exec chmod u+rwx,go+rx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+ $(am__remove_distdir)
+
+dist-lzip: distdir
+ tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+ $(am__remove_distdir)
+
+dist-lzma: distdir
+ tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
+ $(am__remove_distdir)
+
+dist-xz: distdir
+ tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+ $(am__remove_distdir)
+
+dist-tarZ: distdir
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__remove_distdir)
+
+dist-shar: distdir
+ shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+ $(am__remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__remove_distdir)
+
+dist dist-all: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lzma*) \
+ lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\
+ *.tar.lz*) \
+ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
+ *.tar.xz*) \
+ xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ esac
+ chmod -R a-w $(distdir); chmod a+w $(distdir)
+ mkdir $(distdir)/_build
+ mkdir $(distdir)/_inst
+ chmod a-w $(distdir)
+ test -d $(distdir)/_build || exit 0; \
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && am__cwd=`pwd` \
+ && $(am__cd) $(distdir)/_build \
+ && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ $(AM_DISTCHECK_CONFIGURE_FLAGS) \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+ && cd "$$am__cwd" \
+ || exit 1
+ $(am__remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+ @test -n '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: trying to run $@ with an empty' \
+ '$$(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ $(am__cd) '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+ $(MAKE) $(AM_MAKEFLAGS) check-TESTS
+check: check-am
+all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(DATA) $(HEADERS) \
+ config.h
+installdirs:
+ for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(docdir)" "$(DESTDIR)$(includedir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \
+ clean-noinstPROGRAMS mostlyclean-am
+
+distclean: distclean-am
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-hdr distclean-libtool distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-dist_docDATA install-includeHEADERS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-libLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_docDATA uninstall-includeHEADERS \
+ uninstall-libLTLIBRARIES
+
+.MAKE: all check-am install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am am--refresh check check-TESTS check-am \
+ clean clean-generic clean-libLTLIBRARIES clean-libtool \
+ clean-noinstPROGRAMS ctags dist dist-all dist-bzip2 dist-gzip \
+ dist-lzip dist-lzma dist-shar dist-tarZ dist-xz dist-zip \
+ distcheck distclean distclean-compile distclean-generic \
+ distclean-hdr distclean-libtool distclean-tags distcleancheck \
+ distdir distuninstallcheck dvi dvi-am html html-am info \
+ info-am install install-am install-data install-data-am \
+ install-dist_docDATA install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am \
+ install-includeHEADERS install-info install-info-am \
+ install-libLTLIBRARIES install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am uninstall-dist_docDATA \
+ uninstall-includeHEADERS uninstall-libLTLIBRARIES
+
+
+libtool: $(LIBTOOL_DEPS)
+ $(SHELL) ./config.status --recheck
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/NEWS b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/NEWS
new file mode 100644
index 00000000..27a5b176
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/NEWS
@@ -0,0 +1,128 @@
+Snappy v1.1.2, February 28th 2014:
+
+This is a maintenance release with no changes to the actual library
+source code.
+
+ * Stop distributing benchmark data files that have unclear
+ or unsuitable licensing.
+
+ * Add support for padding chunks in the framing format.
+
+
+Snappy v1.1.1, October 15th 2013:
+
+ * Add support for uncompressing to iovecs (scatter I/O).
+ The bulk of this patch was contributed by Mohit Aron.
+
+ * Speed up decompression by ~2%; much more so (~13-20%) on
+ a few benchmarks on given compilers and CPUs.
+
+ * Fix a few issues with MSVC compilation.
+
+ * Support truncated test data in the benchmark.
+
+
+Snappy v1.1.0, January 18th 2013:
+
+ * Snappy now uses 64 kB block size instead of 32 kB. On average,
+ this means it compresses about 3% denser (more so for some
+ inputs), at the same or better speeds.
+
+ * libsnappy no longer depends on iostream.
+
+ * Some small performance improvements in compression on x86
+ (0.5–1%).
+
+ * Various portability fixes for ARM-based platforms, for MSVC,
+ and for GNU/Hurd.
+
+
+Snappy v1.0.5, February 24th 2012:
+
+ * More speed improvements. Exactly how big will depend on
+ the architecture:
+
+ - 3–10% faster decompression for the base case (x86-64).
+
+ - ARMv7 and higher can now use unaligned accesses,
+ and will see about 30% faster decompression and
+ 20–40% faster compression.
+
+ - 32-bit platforms (ARM and 32-bit x86) will see 2–5%
+ faster compression.
+
+ These are all cumulative (e.g., ARM gets all three speedups).
+
+ * Fixed an issue where the unit test would crash on system
+ with less than 256 MB address space available,
+ e.g. some embedded platforms.
+
+ * Added a framing format description, for use over e.g. HTTP,
+ or for a command-line compressor. We do not have any
+ implementations of this at the current point, but there seems
+ to be enough of a general interest in the topic.
+ Also make the format description slightly clearer.
+
+ * Remove some compile-time warnings in -Wall
+ (mostly signed/unsigned comparisons), for easier embedding
+ into projects that use -Wall -Werror.
+
+
+Snappy v1.0.4, September 15th 2011:
+
+ * Speeded up the decompressor somewhat; typically about 2–8%
+ for Core i7, in 64-bit mode (comparable for Opteron).
+ Somewhat more for some tests, almost no gain for others.
+
+ * Make Snappy compile on certain platforms it didn't before
+ (Solaris with SunPro C++, HP-UX, AIX).
+
+ * Correct some minor errors in the format description.
+
+
+Snappy v1.0.3, June 2nd 2011:
+
+ * Speeded up the decompressor somewhat; about 3-6% for Core 2,
+ 6-13% for Core i7, and 5-12% for Opteron (all in 64-bit mode).
+
+ * Added compressed format documentation. This text is new,
+ but an earlier version from Zeev Tarantov was used as reference.
+
+ * Only link snappy_unittest against -lz and other autodetected
+ libraries, not libsnappy.so (which doesn't need any such dependency).
+
+ * Fixed some display issues in the microbenchmarks, one of which would
+ frequently make the test crash on GNU/Hurd.
+
+
+Snappy v1.0.2, April 29th 2011:
+
+ * Relicense to a BSD-type license.
+
+ * Added C bindings, contributed by Martin Gieseking.
+
+ * More Win32 fixes, in particular for MSVC.
+
+ * Replace geo.protodata with a newer version.
+
+ * Fix timing inaccuracies in the unit test when comparing Snappy
+ to other algorithms.
+
+
+Snappy v1.0.1, March 25th 2011:
+
+This is a maintenance release, mostly containing minor fixes.
+There is no new functionality. The most important fixes include:
+
+ * The COPYING file and all licensing headers now correctly state that
+ Snappy is licensed under the Apache 2.0 license.
+
+ * snappy_unittest should now compile natively under Windows,
+ as well as on embedded systems with no mmap().
+
+ * Various autotools nits have been fixed.
+
+
+Snappy v1.0, March 17th 2011:
+
+ * Initial version.
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/README b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/README
new file mode 100644
index 00000000..3bc8888f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/README
@@ -0,0 +1,135 @@
+Snappy, a fast compressor/decompressor.
+
+
+Introduction
+============
+
+Snappy is a compression/decompression library. It does not aim for maximum
+compression, or compatibility with any other compression library; instead,
+it aims for very high speeds and reasonable compression. For instance,
+compared to the fastest mode of zlib, Snappy is an order of magnitude faster
+for most inputs, but the resulting compressed files are anywhere from 20% to
+100% bigger. (For more information, see "Performance", below.)
+
+Snappy has the following properties:
+
+ * Fast: Compression speeds at 250 MB/sec and beyond, with no assembler code.
+ See "Performance" below.
+ * Stable: Over the last few years, Snappy has compressed and decompressed
+ petabytes of data in Google's production environment. The Snappy bitstream
+ format is stable and will not change between versions.
+ * Robust: The Snappy decompressor is designed not to crash in the face of
+ corrupted or malicious input.
+ * Free and open source software: Snappy is licensed under a BSD-type license.
+ For more information, see the included COPYING file.
+
+Snappy has previously been called "Zippy" in some Google presentations
+and the like.
+
+
+Performance
+===========
+
+Snappy is intended to be fast. On a single core of a Core i7 processor
+in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
+about 500 MB/sec or more. (These numbers are for the slowest inputs in our
+benchmark suite; others are much faster.) In our tests, Snappy usually
+is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
+etc.) while achieving comparable compression ratios.
+
+Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
+for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
+other already-compressed data. Similar numbers for zlib in its fastest mode
+are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
+capable of achieving yet higher compression rates, although usually at the
+expense of speed. Of course, compression ratio will vary significantly with
+the input.
+
+Although Snappy should be fairly portable, it is primarily optimized
+for 64-bit x86-compatible processors, and may run slower in other environments.
+In particular:
+
+ - Snappy uses 64-bit operations in several places to process more data at
+ once than would otherwise be possible.
+ - Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
+ On some platforms, these must be emulated with single-byte loads
+ and stores, which is much slower.
+ - Snappy assumes little-endian throughout, and needs to byte-swap data in
+ several places if running on a big-endian platform.
+
+Experience has shown that even heavily tuned code can be improved.
+Performance optimizations, whether for 64-bit x86 or other platforms,
+are of course most welcome; see "Contact", below.
+
+
+Usage
+=====
+
+Note that Snappy, both the implementation and the main interface,
+is written in C++. However, several third-party bindings to other languages
+are available; see the Google Code page at http://code.google.com/p/snappy/
+for more information. Also, if you want to use Snappy from C code, you can
+use the included C bindings in snappy-c.h.
+
+To use Snappy from your own C++ program, include the file "snappy.h" from
+your calling file, and link against the compiled library.
+
+There are many ways to call Snappy, but the simplest possible is
+
+ snappy::Compress(input.data(), input.size(), &output);
+
+and similarly
+
+ snappy::Uncompress(input.data(), input.size(), &output);
+
+where "input" and "output" are both instances of std::string.
+
+There are other interfaces that are more flexible in various ways, including
+support for custom (non-array) input sources. See the header file for more
+information.
+
+
+Tests and benchmarks
+====================
+
+When you compile Snappy, snappy_unittest is compiled in addition to the
+library itself. You do not need it to use the compressor from your own library,
+but it contains several useful components for Snappy development.
+
+First of all, it contains unit tests, verifying correctness on your machine in
+various scenarios. If you want to change or optimize Snappy, please run the
+tests to verify you have not broken anything. Note that if you have the
+Google Test library installed, unit test behavior (especially failures) will be
+significantly more user-friendly. You can find Google Test at
+
+ http://code.google.com/p/googletest/
+
+You probably also want the gflags library for handling of command-line flags;
+you can find it at
+
+ http://code.google.com/p/google-gflags/
+
+In addition to the unit tests, snappy contains microbenchmarks used to
+tune compression and decompression performance. These are automatically run
+before the unit tests, but you can disable them using the flag
+--run_microbenchmarks=false if you have gflags installed (otherwise you will
+need to edit the source).
+
+Finally, snappy can benchmark Snappy against a few other compression libraries
+(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
+To benchmark using a given file, give the compression algorithm you want to test
+Snappy against (e.g. --zlib) and then a list of one or more file names on the
+command line. The testdata/ directory contains the files used by the
+microbenchmark, which should provide a reasonably balanced starting point for
+benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
+are used to verify correctness in the presence of corrupted data in the unit
+test.)
+
+
+Contact
+=======
+
+Snappy is distributed through Google Code. For the latest version, a bug tracker,
+and other information, see
+
+ http://code.google.com/p/snappy/
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/aclocal.m4 b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/aclocal.m4
new file mode 100644
index 00000000..10f8e705
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/aclocal.m4
@@ -0,0 +1,9738 @@
+# generated automatically by aclocal 1.11.3 -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation,
+# Inc.
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.68],,
+[m4_warning([this file was generated for autoconf 2.68.
+You have another version of autoconf. It may work, but is not guaranteed to.
+If you have problems, you may need to regenerate the build system entirely.
+To do so, use the procedure documented by the package, typically `autoreconf'.])])
+
+# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
+#
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+# 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# Written by Gordon Matzigkeit, 1996
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+m4_define([_LT_COPYING], [dnl
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+# 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# Written by Gordon Matzigkeit, 1996
+#
+# This file is part of GNU Libtool.
+#
+# GNU Libtool is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING. If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
+# obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+])
+
+# serial 57 LT_INIT
+
+
+# LT_PREREQ(VERSION)
+# ------------------
+# Complain and exit if this libtool version is less that VERSION.
+m4_defun([LT_PREREQ],
+[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1,
+ [m4_default([$3],
+ [m4_fatal([Libtool version $1 or higher is required],
+ 63)])],
+ [$2])])
+
+
+# _LT_CHECK_BUILDDIR
+# ------------------
+# Complain if the absolute build directory name contains unusual characters
+m4_defun([_LT_CHECK_BUILDDIR],
+[case `pwd` in
+ *\ * | *\ *)
+ AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;;
+esac
+])
+
+
+# LT_INIT([OPTIONS])
+# ------------------
+AC_DEFUN([LT_INIT],
+[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT
+AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+AC_BEFORE([$0], [LT_LANG])dnl
+AC_BEFORE([$0], [LT_OUTPUT])dnl
+AC_BEFORE([$0], [LTDL_INIT])dnl
+m4_require([_LT_CHECK_BUILDDIR])dnl
+
+dnl Autoconf doesn't catch unexpanded LT_ macros by default:
+m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl
+m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl
+dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4
+dnl unless we require an AC_DEFUNed macro:
+AC_REQUIRE([LTOPTIONS_VERSION])dnl
+AC_REQUIRE([LTSUGAR_VERSION])dnl
+AC_REQUIRE([LTVERSION_VERSION])dnl
+AC_REQUIRE([LTOBSOLETE_VERSION])dnl
+m4_require([_LT_PROG_LTMAIN])dnl
+
+_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}])
+
+dnl Parse OPTIONS
+_LT_SET_OPTIONS([$0], [$1])
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ltmain"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+_LT_SETUP
+
+# Only expand once:
+m4_define([LT_INIT])
+])# LT_INIT
+
+# Old names:
+AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT])
+AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PROG_LIBTOOL], [])
+dnl AC_DEFUN([AM_PROG_LIBTOOL], [])
+
+
+# _LT_CC_BASENAME(CC)
+# -------------------
+# Calculate cc_basename. Skip known compiler wrappers and cross-prefix.
+m4_defun([_LT_CC_BASENAME],
+[for cc_temp in $1""; do
+ case $cc_temp in
+ compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
+ distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+])
+
+
+# _LT_FILEUTILS_DEFAULTS
+# ----------------------
+# It is okay to use these file commands and assume they have been set
+# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'.
+m4_defun([_LT_FILEUTILS_DEFAULTS],
+[: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+])# _LT_FILEUTILS_DEFAULTS
+
+
+# _LT_SETUP
+# ---------
+m4_defun([_LT_SETUP],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+
+_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl
+dnl
+_LT_DECL([], [host_alias], [0], [The host system])dnl
+_LT_DECL([], [host], [0])dnl
+_LT_DECL([], [host_os], [0])dnl
+dnl
+_LT_DECL([], [build_alias], [0], [The build system])dnl
+_LT_DECL([], [build], [0])dnl
+_LT_DECL([], [build_os], [0])dnl
+dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+test -z "$LN_S" && LN_S="ln -s"
+_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl
+dnl
+AC_REQUIRE([LT_CMD_MAX_LEN])dnl
+_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl
+_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
+dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl
+m4_require([_LT_CMD_RELOAD])dnl
+m4_require([_LT_CHECK_MAGIC_METHOD])dnl
+m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl
+m4_require([_LT_CMD_OLD_ARCHIVE])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_WITH_SYSROOT])dnl
+
+_LT_CONFIG_LIBTOOL_INIT([
+# See if we are running on zsh, and set the options which allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+])
+if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+
+_LT_CHECK_OBJDIR
+
+m4_require([_LT_TAG_COMPILER])dnl
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+_LT_CC_BASENAME([$compiler])
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ _LT_PATH_MAGIC
+ fi
+ ;;
+esac
+
+# Use C for the default configuration in the libtool script
+LT_SUPPORTED_TAG([CC])
+_LT_LANG_C_CONFIG
+_LT_LANG_DEFAULT_CONFIG
+_LT_CONFIG_COMMANDS
+])# _LT_SETUP
+
+
+# _LT_PREPARE_SED_QUOTE_VARS
+# --------------------------
+# Define a few sed substitution that help us do robust quoting.
+m4_defun([_LT_PREPARE_SED_QUOTE_VARS],
+[# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([["`\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+])
+
+# _LT_PROG_LTMAIN
+# ---------------
+# Note that this code is called both from `configure', and `config.status'
+# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably,
+# `config.status' has no value for ac_aux_dir unless we are using Automake,
+# so we pass a copy along to make sure it has a sensible value anyway.
+m4_defun([_LT_PROG_LTMAIN],
+[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl
+_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir'])
+ltmain="$ac_aux_dir/ltmain.sh"
+])# _LT_PROG_LTMAIN
+
+
+
+# So that we can recreate a full libtool script including additional
+# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
+# in macros and then make a single call at the end using the `libtool'
+# label.
+
+
+# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS])
+# ----------------------------------------
+# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL_INIT],
+[m4_ifval([$1],
+ [m4_append([_LT_OUTPUT_LIBTOOL_INIT],
+ [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_INIT])
+
+
+# _LT_CONFIG_LIBTOOL([COMMANDS])
+# ------------------------------
+# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL],
+[m4_ifval([$1],
+ [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS],
+ [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS])
+
+
+# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS])
+# -----------------------------------------------------
+m4_defun([_LT_CONFIG_SAVE_COMMANDS],
+[_LT_CONFIG_LIBTOOL([$1])
+_LT_CONFIG_LIBTOOL_INIT([$2])
+])
+
+
+# _LT_FORMAT_COMMENT([COMMENT])
+# -----------------------------
+# Add leading comment marks to the start of each line, and a trailing
+# full-stop to the whole comment if one is not present already.
+m4_define([_LT_FORMAT_COMMENT],
+[m4_ifval([$1], [
+m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])],
+ [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.])
+)])
+
+
+
+
+
+# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?])
+# -------------------------------------------------------------------
+# CONFIGNAME is the name given to the value in the libtool script.
+# VARNAME is the (base) name used in the configure script.
+# VALUE may be 0, 1 or 2 for a computed quote escaped value based on
+# VARNAME. Any other value will be used directly.
+m4_define([_LT_DECL],
+[lt_if_append_uniq([lt_decl_varnames], [$2], [, ],
+ [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name],
+ [m4_ifval([$1], [$1], [$2])])
+ lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3])
+ m4_ifval([$4],
+ [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])])
+ lt_dict_add_subkey([lt_decl_dict], [$2],
+ [tagged?], [m4_ifval([$5], [yes], [no])])])
+])
+
+
+# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION])
+# --------------------------------------------------------
+m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])])
+
+
+# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_tag_varnames],
+[_lt_decl_filter([tagged?], [yes], $@)])
+
+
+# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..])
+# ---------------------------------------------------------
+m4_define([_lt_decl_filter],
+[m4_case([$#],
+ [0], [m4_fatal([$0: too few arguments: $#])],
+ [1], [m4_fatal([$0: too few arguments: $#: $1])],
+ [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)],
+ [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)],
+ [lt_dict_filter([lt_decl_dict], $@)])[]dnl
+])
+
+
+# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...])
+# --------------------------------------------------
+m4_define([lt_decl_quote_varnames],
+[_lt_decl_filter([value], [1], $@)])
+
+
+# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_dquote_varnames],
+[_lt_decl_filter([value], [2], $@)])
+
+
+# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_varnames_tagged],
+[m4_assert([$# <= 2])dnl
+_$0(m4_quote(m4_default([$1], [[, ]])),
+ m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]),
+ m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))])
+m4_define([_lt_decl_varnames_tagged],
+[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])])
+
+
+# lt_decl_all_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_all_varnames],
+[_$0(m4_quote(m4_default([$1], [[, ]])),
+ m4_if([$2], [],
+ m4_quote(lt_decl_varnames),
+ m4_quote(m4_shift($@))))[]dnl
+])
+m4_define([_lt_decl_all_varnames],
+[lt_join($@, lt_decl_varnames_tagged([$1],
+ lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl
+])
+
+
+# _LT_CONFIG_STATUS_DECLARE([VARNAME])
+# ------------------------------------
+# Quote a variable value, and forward it to `config.status' so that its
+# declaration there will have the same value as in `configure'. VARNAME
+# must have a single quote delimited value for this to work.
+m4_define([_LT_CONFIG_STATUS_DECLARE],
+[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`'])
+
+
+# _LT_CONFIG_STATUS_DECLARATIONS
+# ------------------------------
+# We delimit libtool config variables with single quotes, so when
+# we write them to config.status, we have to be sure to quote all
+# embedded single quotes properly. In configure, this macro expands
+# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
+#
+# <var>='`$ECHO "$<var>" | $SED "$delay_single_quote_subst"`'
+m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
+ [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAGS
+# ----------------
+# Output comment and list of tags supported by the script
+m4_defun([_LT_LIBTOOL_TAGS],
+[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl
+available_tags="_LT_TAGS"dnl
+])
+
+
+# _LT_LIBTOOL_DECLARE(VARNAME, [TAG])
+# -----------------------------------
+# Extract the dictionary values for VARNAME (optionally with TAG) and
+# expand to a commented shell variable setting:
+#
+# # Some comment about what VAR is for.
+# visible_name=$lt_internal_name
+m4_define([_LT_LIBTOOL_DECLARE],
+[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1],
+ [description])))[]dnl
+m4_pushdef([_libtool_name],
+ m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl
+m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])),
+ [0], [_libtool_name=[$]$1],
+ [1], [_libtool_name=$lt_[]$1],
+ [2], [_libtool_name=$lt_[]$1],
+ [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl
+m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl
+])
+
+
+# _LT_LIBTOOL_CONFIG_VARS
+# -----------------------
+# Produce commented declarations of non-tagged libtool config variables
+# suitable for insertion in the LIBTOOL CONFIG section of the `libtool'
+# script. Tagged libtool config variables (even for the LIBTOOL CONFIG
+# section) are produced by _LT_LIBTOOL_TAG_VARS.
+m4_defun([_LT_LIBTOOL_CONFIG_VARS],
+[m4_foreach([_lt_var],
+ m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)),
+ [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAG_VARS(TAG)
+# -------------------------
+m4_define([_LT_LIBTOOL_TAG_VARS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames),
+ [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])])
+
+
+# _LT_TAGVAR(VARNAME, [TAGNAME])
+# ------------------------------
+m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])])
+
+
+# _LT_CONFIG_COMMANDS
+# -------------------
+# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of
+# variables for single and double quote escaping we saved from calls
+# to _LT_DECL, we can put quote escaped variables declarations
+# into `config.status', and then the shell code to quote escape them in
+# for loops in `config.status'. Finally, any additional code accumulated
+# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded.
+m4_defun([_LT_CONFIG_COMMANDS],
+[AC_PROVIDE_IFELSE([LT_OUTPUT],
+ dnl If the libtool generation code has been placed in $CONFIG_LT,
+ dnl instead of duplicating it all over again into config.status,
+ dnl then we will have config.status run $CONFIG_LT later, so it
+ dnl needs to know what name is stored there:
+ [AC_CONFIG_COMMANDS([libtool],
+ [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])],
+ dnl If the libtool generation code is destined for config.status,
+ dnl expand the accumulated commands and init code now:
+ [AC_CONFIG_COMMANDS([libtool],
+ [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])])
+])#_LT_CONFIG_COMMANDS
+
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT],
+[
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+_LT_CONFIG_STATUS_DECLARATIONS
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+\$[]1
+_LTECHO_EOF'
+}
+
+# Quote evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_quote_varnames); do
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+ *[[\\\\\\\`\\"\\\$]]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+# Double-quote double-evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_dquote_varnames); do
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+ *[[\\\\\\\`\\"\\\$]]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+_LT_OUTPUT_LIBTOOL_INIT
+])
+
+# _LT_GENERATED_FILE_INIT(FILE, [COMMENT])
+# ------------------------------------
+# Generate a child script FILE with all initialization necessary to
+# reuse the environment learned by the parent script, and make the
+# file executable. If COMMENT is supplied, it is inserted after the
+# `#!' sequence but before initialization text begins. After this
+# macro, additional text can be appended to FILE to form the body of
+# the child script. The macro ends with non-zero status if the
+# file could not be fully written (such as if the disk is full).
+m4_ifdef([AS_INIT_GENERATED],
+[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])],
+[m4_defun([_LT_GENERATED_FILE_INIT],
+[m4_require([AS_PREPARE])]dnl
+[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl
+[lt_write_fail=0
+cat >$1 <<_ASEOF || lt_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+$2
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$1 <<\_ASEOF || lt_write_fail=1
+AS_SHELL_SANITIZE
+_AS_PREPARE
+exec AS_MESSAGE_FD>&1
+_ASEOF
+test $lt_write_fail = 0 && chmod +x $1[]dnl
+m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT
+
+# LT_OUTPUT
+# ---------
+# This macro allows early generation of the libtool script (before
+# AC_OUTPUT is called), incase it is used in configure for compilation
+# tests.
+AC_DEFUN([LT_OUTPUT],
+[: ${CONFIG_LT=./config.lt}
+AC_MSG_NOTICE([creating $CONFIG_LT])
+_LT_GENERATED_FILE_INIT(["$CONFIG_LT"],
+[# Run this file to recreate a libtool stub with the current configuration.])
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+lt_cl_silent=false
+exec AS_MESSAGE_LOG_FD>>config.log
+{
+ echo
+ AS_BOX([Running $as_me.])
+} >&AS_MESSAGE_LOG_FD
+
+lt_cl_help="\
+\`$as_me' creates a local libtool stub from the current configuration,
+for use in further configure time tests before the real libtool is
+generated.
+
+Usage: $[0] [[OPTIONS]]
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+
+Report bugs to <bug-libtool@gnu.org>."
+
+lt_cl_version="\
+m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
+m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
+configured by $[0], generated by m4_PACKAGE_STRING.
+
+Copyright (C) 2011 Free Software Foundation, Inc.
+This config.lt script is free software; the Free Software Foundation
+gives unlimited permision to copy, distribute and modify it."
+
+while test $[#] != 0
+do
+ case $[1] in
+ --version | --v* | -V )
+ echo "$lt_cl_version"; exit 0 ;;
+ --help | --h* | -h )
+ echo "$lt_cl_help"; exit 0 ;;
+ --debug | --d* | -d )
+ debug=: ;;
+ --quiet | --q* | --silent | --s* | -q )
+ lt_cl_silent=: ;;
+
+ -*) AC_MSG_ERROR([unrecognized option: $[1]
+Try \`$[0] --help' for more information.]) ;;
+
+ *) AC_MSG_ERROR([unrecognized argument: $[1]
+Try \`$[0] --help' for more information.]) ;;
+ esac
+ shift
+done
+
+if $lt_cl_silent; then
+ exec AS_MESSAGE_FD>/dev/null
+fi
+_LTEOF
+
+cat >>"$CONFIG_LT" <<_LTEOF
+_LT_OUTPUT_LIBTOOL_COMMANDS_INIT
+_LTEOF
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+AC_MSG_NOTICE([creating $ofile])
+_LT_OUTPUT_LIBTOOL_COMMANDS
+AS_EXIT(0)
+_LTEOF
+chmod +x "$CONFIG_LT"
+
+# configure is writing to config.log, but config.lt does its own redirection,
+# appending to config.log, which fails on DOS, as config.log is still kept
+# open by configure. Here we exec the FD to /dev/null, effectively closing
+# config.log, so it can be properly (re)opened and appended to by config.lt.
+lt_cl_success=:
+test "$silent" = yes &&
+ lt_config_lt_args="$lt_config_lt_args --quiet"
+exec AS_MESSAGE_LOG_FD>/dev/null
+$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
+exec AS_MESSAGE_LOG_FD>>config.log
+$lt_cl_success || AS_EXIT(1)
+])# LT_OUTPUT
+
+
+# _LT_CONFIG(TAG)
+# ---------------
+# If TAG is the built-in tag, create an initial libtool script with a
+# default configuration from the untagged config vars. Otherwise add code
+# to config.status for appending the configuration named by TAG from the
+# matching tagged config vars.
+m4_defun([_LT_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_CONFIG_SAVE_COMMANDS([
+ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl
+ m4_if(_LT_TAG, [C], [
+ # See if we are running on zsh, and set the options which allow our
+ # commands through without removal of \ escapes.
+ if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+ fi
+
+ cfgfile="${ofile}T"
+ trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+ $RM "$cfgfile"
+
+ cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+
+# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+_LT_COPYING
+_LT_LIBTOOL_TAGS
+
+# ### BEGIN LIBTOOL CONFIG
+_LT_LIBTOOL_CONFIG_VARS
+_LT_LIBTOOL_TAG_VARS
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+ case $host_os in
+ aix3*)
+ cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+_LT_EOF
+ ;;
+ esac
+
+ _LT_PROG_LTMAIN
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '$q' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ _LT_PROG_REPLACE_SHELLFNS
+
+ mv -f "$cfgfile" "$ofile" ||
+ (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+ chmod +x "$ofile"
+],
+[cat <<_LT_EOF >> "$ofile"
+
+dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded
+dnl in a comment (ie after a #).
+# ### BEGIN LIBTOOL TAG CONFIG: $1
+_LT_LIBTOOL_TAG_VARS(_LT_TAG)
+# ### END LIBTOOL TAG CONFIG: $1
+_LT_EOF
+])dnl /m4_if
+],
+[m4_if([$1], [], [
+ PACKAGE='$PACKAGE'
+ VERSION='$VERSION'
+ TIMESTAMP='$TIMESTAMP'
+ RM='$RM'
+ ofile='$ofile'], [])
+])dnl /_LT_CONFIG_SAVE_COMMANDS
+])# _LT_CONFIG
+
+
+# LT_SUPPORTED_TAG(TAG)
+# ---------------------
+# Trace this macro to discover what tags are supported by the libtool
+# --tag option, using:
+# autoconf --trace 'LT_SUPPORTED_TAG:$1'
+AC_DEFUN([LT_SUPPORTED_TAG], [])
+
+
+# C support is built-in for now
+m4_define([_LT_LANG_C_enabled], [])
+m4_define([_LT_TAGS], [])
+
+
+# LT_LANG(LANG)
+# -------------
+# Enable libtool support for the given language if not already enabled.
+AC_DEFUN([LT_LANG],
+[AC_BEFORE([$0], [LT_OUTPUT])dnl
+m4_case([$1],
+ [C], [_LT_LANG(C)],
+ [C++], [_LT_LANG(CXX)],
+ [Go], [_LT_LANG(GO)],
+ [Java], [_LT_LANG(GCJ)],
+ [Fortran 77], [_LT_LANG(F77)],
+ [Fortran], [_LT_LANG(FC)],
+ [Windows Resource], [_LT_LANG(RC)],
+ [m4_ifdef([_LT_LANG_]$1[_CONFIG],
+ [_LT_LANG($1)],
+ [m4_fatal([$0: unsupported language: "$1"])])])dnl
+])# LT_LANG
+
+
+# _LT_LANG(LANGNAME)
+# ------------------
+m4_defun([_LT_LANG],
+[m4_ifdef([_LT_LANG_]$1[_enabled], [],
+ [LT_SUPPORTED_TAG([$1])dnl
+ m4_append([_LT_TAGS], [$1 ])dnl
+ m4_define([_LT_LANG_]$1[_enabled], [])dnl
+ _LT_LANG_$1_CONFIG($1)])dnl
+])# _LT_LANG
+
+
+m4_ifndef([AC_PROG_GO], [
+# NOTE: This macro has been submitted for inclusion into #
+# GNU Autoconf as AC_PROG_GO. When it is available in #
+# a released version of Autoconf we should remove this #
+# macro and use it instead. #
+m4_defun([AC_PROG_GO],
+[AC_LANG_PUSH(Go)dnl
+AC_ARG_VAR([GOC], [Go compiler command])dnl
+AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl
+_AC_ARG_VAR_LDFLAGS()dnl
+AC_CHECK_TOOL(GOC, gccgo)
+if test -z "$GOC"; then
+ if test -n "$ac_tool_prefix"; then
+ AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo])
+ fi
+fi
+if test -z "$GOC"; then
+ AC_CHECK_PROG(GOC, gccgo, gccgo, false)
+fi
+])#m4_defun
+])#m4_ifndef
+
+
+# _LT_LANG_DEFAULT_CONFIG
+# -----------------------
+m4_defun([_LT_LANG_DEFAULT_CONFIG],
+[AC_PROVIDE_IFELSE([AC_PROG_CXX],
+ [LT_LANG(CXX)],
+ [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_F77],
+ [LT_LANG(F77)],
+ [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_FC],
+ [LT_LANG(FC)],
+ [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])])
+
+dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal
+dnl pulling things in needlessly.
+AC_PROVIDE_IFELSE([AC_PROG_GCJ],
+ [LT_LANG(GCJ)],
+ [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],
+ [LT_LANG(GCJ)],
+ [AC_PROVIDE_IFELSE([LT_PROG_GCJ],
+ [LT_LANG(GCJ)],
+ [m4_ifdef([AC_PROG_GCJ],
+ [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])])
+ m4_ifdef([A][M_PROG_GCJ],
+ [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])])
+ m4_ifdef([LT_PROG_GCJ],
+ [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
+
+AC_PROVIDE_IFELSE([AC_PROG_GO],
+ [LT_LANG(GO)],
+ [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])])
+
+AC_PROVIDE_IFELSE([LT_PROG_RC],
+ [LT_LANG(RC)],
+ [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
+])# _LT_LANG_DEFAULT_CONFIG
+
+# Obsolete macros:
+AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
+AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
+AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
+AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
+AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
+dnl AC_DEFUN([AC_LIBTOOL_F77], [])
+dnl AC_DEFUN([AC_LIBTOOL_FC], [])
+dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
+dnl AC_DEFUN([AC_LIBTOOL_RC], [])
+
+
+# _LT_TAG_COMPILER
+# ----------------
+m4_defun([_LT_TAG_COMPILER],
+[AC_REQUIRE([AC_PROG_CC])dnl
+
+_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl
+_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl
+_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl
+_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+])# _LT_TAG_COMPILER
+
+
+# _LT_COMPILER_BOILERPLATE
+# ------------------------
+# Check for compiler boilerplate output or warnings with
+# the simple compiler test code.
+m4_defun([_LT_COMPILER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+])# _LT_COMPILER_BOILERPLATE
+
+
+# _LT_LINKER_BOILERPLATE
+# ----------------------
+# Check for linker boilerplate output or warnings with
+# the simple link test code.
+m4_defun([_LT_LINKER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+])# _LT_LINKER_BOILERPLATE
+
+# _LT_REQUIRED_DARWIN_CHECKS
+# -------------------------
+m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
+ case $host_os in
+ rhapsody* | darwin*)
+ AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:])
+ AC_CHECK_TOOL([NMEDIT], [nmedit], [:])
+ AC_CHECK_TOOL([LIPO], [lipo], [:])
+ AC_CHECK_TOOL([OTOOL], [otool], [:])
+ AC_CHECK_TOOL([OTOOL64], [otool64], [:])
+ _LT_DECL([], [DSYMUTIL], [1],
+ [Tool to manipulate archived DWARF debug symbol files on Mac OS X])
+ _LT_DECL([], [NMEDIT], [1],
+ [Tool to change global to local symbols on Mac OS X])
+ _LT_DECL([], [LIPO], [1],
+ [Tool to manipulate fat objects and archives on Mac OS X])
+ _LT_DECL([], [OTOOL], [1],
+ [ldd/readelf like tool for Mach-O binaries on Mac OS X])
+ _LT_DECL([], [OTOOL64], [1],
+ [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4])
+
+ AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod],
+ [lt_cv_apple_cc_single_mod=no
+ if test -z "${LT_MULTI_MODULE}"; then
+ # By default we will add the -single_module flag. You can override
+ # by either setting the environment variable LT_MULTI_MODULE
+ # non-empty at configure time, or by adding -multi_module to the
+ # link flags.
+ rm -rf libconftest.dylib*
+ echo "int foo(void){return 1;}" > conftest.c
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD
+ $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+ _lt_result=$?
+ # If there is a non-empty error log, and "single_module"
+ # appears in it, assume the flag caused a linker warning
+ if test -s conftest.err && $GREP single_module conftest.err; then
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ # Otherwise, if the output was created with a 0 exit code from
+ # the compiler, it worked.
+ elif test -f libconftest.dylib && test $_lt_result -eq 0; then
+ lt_cv_apple_cc_single_mod=yes
+ else
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ fi
+ rm -rf libconftest.dylib*
+ rm -f conftest.*
+ fi])
+
+ AC_CACHE_CHECK([for -exported_symbols_list linker flag],
+ [lt_cv_ld_exported_symbols_list],
+ [lt_cv_ld_exported_symbols_list=no
+ save_LDFLAGS=$LDFLAGS
+ echo "_main" > conftest.sym
+ LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+ [lt_cv_ld_exported_symbols_list=yes],
+ [lt_cv_ld_exported_symbols_list=no])
+ LDFLAGS="$save_LDFLAGS"
+ ])
+
+ AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load],
+ [lt_cv_ld_force_load=no
+ cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD
+ $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD
+ echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD
+ $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD
+ echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD
+ $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD
+ cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD
+ $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+ _lt_result=$?
+ if test -s conftest.err && $GREP force_load conftest.err; then
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then
+ lt_cv_ld_force_load=yes
+ else
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ fi
+ rm -f conftest.err libconftest.a conftest conftest.c
+ rm -rf conftest.dSYM
+ ])
+ case $host_os in
+ rhapsody* | darwin1.[[012]])
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
+ darwin1.*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ darwin*) # darwin 5.x on
+ # if running on 10.5 or later, the deployment target defaults
+ # to the OS version, if on x86, and 10.4, the deployment
+ # target defaults to 10.4. Don't you love it?
+ case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
+ 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ 10.[[012]]*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ 10.*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ esac
+ ;;
+ esac
+ if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+ _lt_dar_single_mod='$single_module'
+ fi
+ if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
+ _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+ else
+ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+ fi
+ if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
+ _lt_dsymutil='~$DSYMUTIL $lib || :'
+ else
+ _lt_dsymutil=
+ fi
+ ;;
+ esac
+])
+
+
+# _LT_DARWIN_LINKER_FEATURES([TAG])
+# ---------------------------------
+# Checks for linker and compiler features on darwin
+m4_defun([_LT_DARWIN_LINKER_FEATURES],
+[
+ m4_require([_LT_REQUIRED_DARWIN_CHECKS])
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_automatic, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+ if test "$lt_cv_ld_force_load" = "yes"; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+ m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes],
+ [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes])
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+ fi
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined"
+ case $cc_basename in
+ ifort*) _lt_dar_can_shared=yes ;;
+ *) _lt_dar_can_shared=$GCC ;;
+ esac
+ if test "$_lt_dar_can_shared" = "yes"; then
+ output_verbose_link_cmd=func_echo_all
+ _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+ _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+ _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+ m4_if([$1], [CXX],
+[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then
+ _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}"
+ fi
+],[])
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+])
+
+# _LT_SYS_MODULE_PATH_AIX([TAGNAME])
+# ----------------------------------
+# Links a minimal program and checks the executable
+# for the system default hardcoded library path. In most cases,
+# this is /usr/lib:/lib, but when the MPI compilers are used
+# the location of the communication and MPI libs are included too.
+# If we don't find anything, use the default library path according
+# to the aix ld manual.
+# Store the results from the different compilers for each TAGNAME.
+# Allow to override them for all tags through lt_cv_aix_libpath.
+m4_defun([_LT_SYS_MODULE_PATH_AIX],
+[m4_require([_LT_DECL_SED])dnl
+if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM],[
+ lt_aix_libpath_sed='[
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }]'
+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi],[])
+ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib"
+ fi
+ ])
+ aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])
+fi
+])# _LT_SYS_MODULE_PATH_AIX
+
+
+# _LT_SHELL_INIT(ARG)
+# -------------------
+m4_define([_LT_SHELL_INIT],
+[m4_divert_text([M4SH-INIT], [$1
+])])# _LT_SHELL_INIT
+
+
+
+# _LT_PROG_ECHO_BACKSLASH
+# -----------------------
+# Find how we can fake an echo command that does not interpret backslash.
+# In particular, with Autoconf 2.60 or later we add some code to the start
+# of the generated configure script which will find a shell with a builtin
+# printf (which we can use as an echo command).
+m4_defun([_LT_PROG_ECHO_BACKSLASH],
+[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+AC_MSG_CHECKING([how to print strings])
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='printf %s\n'
+else
+ # Use this function as a fallback that always works.
+ func_fallback_echo ()
+ {
+ eval 'cat <<_LTECHO_EOF
+$[]1
+_LTECHO_EOF'
+ }
+ ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+ $ECHO "$*"
+}
+
+case "$ECHO" in
+ printf*) AC_MSG_RESULT([printf]) ;;
+ print*) AC_MSG_RESULT([print -r]) ;;
+ *) AC_MSG_RESULT([cat]) ;;
+esac
+
+m4_ifdef([_AS_DETECT_SUGGESTED],
+[_AS_DETECT_SUGGESTED([
+ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || (
+ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+ PATH=/empty FPATH=/empty; export PATH FPATH
+ test "X`printf %s $ECHO`" = "X$ECHO" \
+ || test "X`print -r -- $ECHO`" = "X$ECHO" )])])
+
+_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
+_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes])
+])# _LT_PROG_ECHO_BACKSLASH
+
+
+# _LT_WITH_SYSROOT
+# ----------------
+AC_DEFUN([_LT_WITH_SYSROOT],
+[AC_MSG_CHECKING([for sysroot])
+AC_ARG_WITH([sysroot],
+[ --with-sysroot[=DIR] Search for dependent libraries within DIR
+ (or the compiler's sysroot if not specified).],
+[], [with_sysroot=no])
+
+dnl lt_sysroot will always be passed unquoted. We quote it here
+dnl in case the user passed a directory name.
+lt_sysroot=
+case ${with_sysroot} in #(
+ yes)
+ if test "$GCC" = yes; then
+ lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+ fi
+ ;; #(
+ /*)
+ lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+ ;; #(
+ no|'')
+ ;; #(
+ *)
+ AC_MSG_RESULT([${with_sysroot}])
+ AC_MSG_ERROR([The sysroot must be an absolute path.])
+ ;;
+esac
+
+ AC_MSG_RESULT([${lt_sysroot:-no}])
+_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl
+[dependent libraries, and in which our libraries should be installed.])])
+
+# _LT_ENABLE_LOCK
+# ---------------
+m4_defun([_LT_ENABLE_LOCK],
+[AC_ARG_ENABLE([libtool-lock],
+ [AS_HELP_STRING([--disable-libtool-lock],
+ [avoid locking (might break parallel builds)])])
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *ELF-32*)
+ HPUX_IA64_MODE="32"
+ ;;
+ *ELF-64*)
+ HPUX_IA64_MODE="64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -melf32bsmip"
+ ;;
+ *N32*)
+ LD="${LD-ld} -melf32bmipn32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -melf64bmip"
+ ;;
+ esac
+ else
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ fi
+ rm -rf conftest*
+ ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.o` in
+ *32-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_i386_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_i386"
+ ;;
+ ppc64-*linux*|powerpc64-*linux*)
+ LD="${LD-ld} -m elf32ppclinux"
+ ;;
+ s390x-*linux*)
+ LD="${LD-ld} -m elf_s390"
+ ;;
+ sparc64-*linux*)
+ LD="${LD-ld} -m elf32_sparc"
+ ;;
+ esac
+ ;;
+ *64-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_x86_64_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ ppc*-*linux*|powerpc*-*linux*)
+ LD="${LD-ld} -m elf64ppc"
+ ;;
+ s390*-*linux*|s390*-*tpf*)
+ LD="${LD-ld} -m elf64_s390"
+ ;;
+ sparc*-*linux*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+ [AC_LANG_PUSH(C)
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+ AC_LANG_POP])
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+*-*solaris*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.o` in
+ *64-bit*)
+ case $lt_cv_prog_gnu_ld in
+ yes*)
+ case $host in
+ i?86-*-solaris*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ sparc*-*-solaris*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ # GNU ld 2.21 introduced _sol2 emulations. Use them if available.
+ if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+ LD="${LD-ld}_sol2"
+ fi
+ ;;
+ *)
+ if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+ LD="${LD-ld} -64"
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+esac
+
+need_locks="$enable_libtool_lock"
+])# _LT_ENABLE_LOCK
+
+
+# _LT_PROG_AR
+# -----------
+m4_defun([_LT_PROG_AR],
+[AC_CHECK_TOOLS(AR, [ar], false)
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+_LT_DECL([], [AR], [1], [The archiver])
+_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive])
+
+AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file],
+ [lt_cv_ar_at_file=no
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
+ [echo conftest.$ac_objext > conftest.lst
+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD'
+ AC_TRY_EVAL([lt_ar_try])
+ if test "$ac_status" -eq 0; then
+ # Ensure the archiver fails upon bogus file names.
+ rm -f conftest.$ac_objext libconftest.a
+ AC_TRY_EVAL([lt_ar_try])
+ if test "$ac_status" -ne 0; then
+ lt_cv_ar_at_file=@
+ fi
+ fi
+ rm -f conftest.* libconftest.a
+ ])
+ ])
+
+if test "x$lt_cv_ar_at_file" = xno; then
+ archiver_list_spec=
+else
+ archiver_list_spec=$lt_cv_ar_at_file
+fi
+_LT_DECL([], [archiver_list_spec], [1],
+ [How to feed a file listing to the archiver])
+])# _LT_PROG_AR
+
+
+# _LT_CMD_OLD_ARCHIVE
+# -------------------
+m4_defun([_LT_CMD_OLD_ARCHIVE],
+[_LT_PROG_AR
+
+AC_CHECK_TOOL(STRIP, strip, :)
+test -z "$STRIP" && STRIP=:
+_LT_DECL([], [STRIP], [1], [A symbol stripping program])
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+test -z "$RANLIB" && RANLIB=:
+_LT_DECL([], [RANLIB], [1],
+ [Commands used to install an old-style archive])
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+ ;;
+ *)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+fi
+
+case $host_os in
+ darwin*)
+ lock_old_archive_extraction=yes ;;
+ *)
+ lock_old_archive_extraction=no ;;
+esac
+_LT_DECL([], [old_postinstall_cmds], [2])
+_LT_DECL([], [old_postuninstall_cmds], [2])
+_LT_TAGDECL([], [old_archive_cmds], [2],
+ [Commands used to build an old-style archive])
+_LT_DECL([], [lock_old_archive_extraction], [0],
+ [Whether to use a lock for old archive extraction])
+])# _LT_CMD_OLD_ARCHIVE
+
+
+# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------------------
+# Check whether the given compiler option works
+AC_DEFUN([_LT_COMPILER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+ [$2=no
+ m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4])
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="$3"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ $2=yes
+ fi
+ fi
+ $RM conftest*
+])
+
+if test x"[$]$2" = xyes; then
+ m4_if([$5], , :, [$5])
+else
+ m4_if([$6], , :, [$6])
+fi
+])# _LT_COMPILER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], [])
+
+
+# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+# [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------
+# Check whether the given linker option works
+AC_DEFUN([_LT_LINKER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+ [$2=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $3"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&AS_MESSAGE_LOG_FD
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ $2=yes
+ fi
+ else
+ $2=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+])
+
+if test x"[$]$2" = xyes; then
+ m4_if([$4], , :, [$4])
+else
+ m4_if([$5], , :, [$5])
+fi
+])# _LT_LINKER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], [])
+
+
+# LT_CMD_MAX_LEN
+#---------------
+AC_DEFUN([LT_CMD_MAX_LEN],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+# find the maximum length of command line arguments
+AC_MSG_CHECKING([the maximum length of command line arguments])
+AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
+ i=0
+ teststring="ABCD"
+
+ case $build_os in
+ msdosdjgpp*)
+ # On DJGPP, this test can blow up pretty badly due to problems in libc
+ # (any single argument exceeding 2000 bytes causes a buffer overrun
+ # during glob expansion). Even if it were fixed, the result of this
+ # check would be larger than it should be.
+ lt_cv_sys_max_cmd_len=12288; # 12K is about right
+ ;;
+
+ gnu*)
+ # Under GNU Hurd, this test is not required because there is
+ # no limit to the length of command line arguments.
+ # Libtool will interpret -1 as no limit whatsoever
+ lt_cv_sys_max_cmd_len=-1;
+ ;;
+
+ cygwin* | mingw* | cegcc*)
+ # On Win9x/ME, this test blows up -- it succeeds, but takes
+ # about 5 minutes as the teststring grows exponentially.
+ # Worse, since 9x/ME are not pre-emptively multitasking,
+ # you end up with a "frozen" computer, even though with patience
+ # the test eventually succeeds (with a max line length of 256k).
+ # Instead, let's just punt: use the minimum linelength reported by
+ # all of the supported platforms: 8192 (on NT/2K/XP).
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ mint*)
+ # On MiNT this can take a long time and run out of memory.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ amigaos*)
+ # On AmigaOS with pdksh, this test takes hours, literally.
+ # So we just punt and use a minimum line length of 8192.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+ # This has been around since 386BSD, at least. Likely further.
+ if test -x /sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+ elif test -x /usr/sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+ else
+ lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs
+ fi
+ # And add a safety zone
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ ;;
+
+ interix*)
+ # We know the value 262144 and hardcode it with a safety zone (like BSD)
+ lt_cv_sys_max_cmd_len=196608
+ ;;
+
+ os2*)
+ # The test takes a long time on OS/2.
+ lt_cv_sys_max_cmd_len=8192
+ ;;
+
+ osf*)
+ # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+ # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+ # nice to cause kernel panics so lets avoid the loop below.
+ # First set a reasonable default.
+ lt_cv_sys_max_cmd_len=16384
+ #
+ if test -x /sbin/sysconfig; then
+ case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+ *1*) lt_cv_sys_max_cmd_len=-1 ;;
+ esac
+ fi
+ ;;
+ sco3.2v5*)
+ lt_cv_sys_max_cmd_len=102400
+ ;;
+ sysv5* | sco5v6* | sysv4.2uw2*)
+ kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+ if test -n "$kargmax"; then
+ lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'`
+ else
+ lt_cv_sys_max_cmd_len=32768
+ fi
+ ;;
+ *)
+ lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+ if test -n "$lt_cv_sys_max_cmd_len"; then
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ else
+ # Make teststring a little bigger before we do anything with it.
+ # a 1K string should be a reasonable start.
+ for i in 1 2 3 4 5 6 7 8 ; do
+ teststring=$teststring$teststring
+ done
+ SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+ # If test is not a shell built-in, we'll probably end up computing a
+ # maximum length that is only half of the actual maximum length, but
+ # we can't tell.
+ while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \
+ = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+ test $i != 17 # 1/2 MB should be enough
+ do
+ i=`expr $i + 1`
+ teststring=$teststring$teststring
+ done
+ # Only check the string length outside the loop.
+ lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+ teststring=
+ # Add a significant safety factor because C++ compilers can tack on
+ # massive amounts of additional arguments before passing them to the
+ # linker. It appears as though 1/2 is a usable value.
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+ fi
+ ;;
+ esac
+])
+if test -n $lt_cv_sys_max_cmd_len ; then
+ AC_MSG_RESULT($lt_cv_sys_max_cmd_len)
+else
+ AC_MSG_RESULT(none)
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+_LT_DECL([], [max_cmd_len], [0],
+ [What is the maximum length of a command?])
+])# LT_CMD_MAX_LEN
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], [])
+
+
+# _LT_HEADER_DLFCN
+# ----------------
+m4_defun([_LT_HEADER_DLFCN],
+[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl
+])# _LT_HEADER_DLFCN
+
+
+# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ----------------------------------------------------------------
+m4_defun([_LT_TRY_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test "$cross_compiling" = yes; then :
+ [$4]
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+[#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+/* When -fvisbility=hidden is used, assume the code has been annotated
+ correspondingly for the symbols needed. */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else
+ {
+ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else puts (dlerror ());
+ }
+ /* dlclose (self); */
+ }
+ else
+ puts (dlerror ());
+
+ return status;
+}]
+_LT_EOF
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) $1 ;;
+ x$lt_dlneed_uscore) $2 ;;
+ x$lt_dlunknown|x*) $3 ;;
+ esac
+ else :
+ # compilation failed
+ $3
+ fi
+fi
+rm -fr conftest*
+])# _LT_TRY_DLOPEN_SELF
+
+
+# LT_SYS_DLOPEN_SELF
+# ------------------
+AC_DEFUN([LT_SYS_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ mingw* | pw32* | cegcc*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ cygwin*)
+ lt_cv_dlopen="dlopen"
+ lt_cv_dlopen_libs=
+ ;;
+
+ darwin*)
+ # if libdl is installed we need to link against it
+ AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[
+ lt_cv_dlopen="dyld"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ])
+ ;;
+
+ *)
+ AC_CHECK_FUNC([shl_load],
+ [lt_cv_dlopen="shl_load"],
+ [AC_CHECK_LIB([dld], [shl_load],
+ [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"],
+ [AC_CHECK_FUNC([dlopen],
+ [lt_cv_dlopen="dlopen"],
+ [AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],
+ [AC_CHECK_LIB([svld], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"],
+ [AC_CHECK_LIB([dld], [dld_link],
+ [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"])
+ ])
+ ])
+ ])
+ ])
+ ])
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ AC_CACHE_CHECK([whether a program can dlopen itself],
+ lt_cv_dlopen_self, [dnl
+ _LT_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+ lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+ ])
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+ AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+ lt_cv_dlopen_self_static, [dnl
+ _LT_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+ lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross)
+ ])
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+_LT_DECL([dlopen_support], [enable_dlopen], [0],
+ [Whether dlopen is supported])
+_LT_DECL([dlopen_self], [enable_dlopen_self], [0],
+ [Whether dlopen of programs is supported])
+_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0],
+ [Whether dlopen of statically linked programs is supported])
+])# LT_SYS_DLOPEN_SELF
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], [])
+
+
+# _LT_COMPILER_C_O([TAGNAME])
+# ---------------------------
+# Check to see if options -c and -o are simultaneously supported by compiler.
+# This macro does not hard code the compiler like AC_PROG_CC_C_O.
+m4_defun([_LT_COMPILER_C_O],
+[m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
+ [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)],
+ [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&AS_MESSAGE_LOG_FD
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+ fi
+ fi
+ chmod u+w . 2>&AS_MESSAGE_LOG_FD
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+])
+_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1],
+ [Does compiler simultaneously support -c and -o options?])
+])# _LT_COMPILER_C_O
+
+
+# _LT_COMPILER_FILE_LOCKS([TAGNAME])
+# ----------------------------------
+# Check to see if we can do hard links to lock some files if needed
+m4_defun([_LT_COMPILER_FILE_LOCKS],
+[m4_require([_LT_ENABLE_LOCK])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_COMPILER_C_O([$1])
+
+hard_links="nottested"
+if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ AC_MSG_CHECKING([if we can lock with hard links])
+ hard_links=yes
+ $RM conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ AC_MSG_RESULT([$hard_links])
+ if test "$hard_links" = no; then
+ AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe])
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?])
+])# _LT_COMPILER_FILE_LOCKS
+
+
+# _LT_CHECK_OBJDIR
+# ----------------
+m4_defun([_LT_CHECK_OBJDIR],
+[AC_CACHE_CHECK([for objdir], [lt_cv_objdir],
+[rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ lt_cv_objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null])
+objdir=$lt_cv_objdir
+_LT_DECL([], [objdir], [0],
+ [The name of the directory that contains temporary libtool files])dnl
+m4_pattern_allow([LT_OBJDIR])dnl
+AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/",
+ [Define to the sub-directory in which libtool stores uninstalled libraries.])
+])# _LT_CHECK_OBJDIR
+
+
+# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME])
+# --------------------------------------
+# Check hardcoding attributes.
+m4_defun([_LT_LINKER_HARDCODE_LIBPATH],
+[AC_MSG_CHECKING([how to hardcode library paths into programs])
+_LT_TAGVAR(hardcode_action, $1)=
+if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" ||
+ test -n "$_LT_TAGVAR(runpath_var, $1)" ||
+ test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then
+
+ # We can hardcode non-existent directories.
+ if test "$_LT_TAGVAR(hardcode_direct, $1)" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no &&
+ test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then
+ # Linking always hardcodes the temporary library directory.
+ _LT_TAGVAR(hardcode_action, $1)=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ _LT_TAGVAR(hardcode_action, $1)=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ _LT_TAGVAR(hardcode_action, $1)=unsupported
+fi
+AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)])
+
+if test "$_LT_TAGVAR(hardcode_action, $1)" = relink ||
+ test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+_LT_TAGDECL([], [hardcode_action], [0],
+ [How to hardcode a shared library path into an executable])
+])# _LT_LINKER_HARDCODE_LIBPATH
+
+
+# _LT_CMD_STRIPLIB
+# ----------------
+m4_defun([_LT_CMD_STRIPLIB],
+[m4_require([_LT_DECL_EGREP])
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ AC_MSG_RESULT([yes])
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+ case $host_os in
+ darwin*)
+ if test -n "$STRIP" ; then
+ striplib="$STRIP -x"
+ old_striplib="$STRIP -S"
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ fi
+ ;;
+ *)
+ AC_MSG_RESULT([no])
+ ;;
+ esac
+fi
+_LT_DECL([], [old_striplib], [1], [Commands to strip libraries])
+_LT_DECL([], [striplib], [1])
+])# _LT_CMD_STRIPLIB
+
+
+# _LT_SYS_DYNAMIC_LINKER([TAG])
+# -----------------------------
+# PORTME Fill in your ld.so characteristics
+m4_defun([_LT_SYS_DYNAMIC_LINKER],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_OBJDUMP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+AC_MSG_CHECKING([dynamic linker characteristics])
+m4_if([$1],
+ [], [
+if test "$GCC" = yes; then
+ case $host_os in
+ darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
+ *) lt_awk_arg="/^libraries:/" ;;
+ esac
+ case $host_os in
+ mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;;
+ *) lt_sed_strip_eq="s,=/,/,g" ;;
+ esac
+ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+ case $lt_search_path_spec in
+ *\;*)
+ # if the path contains ";" then we assume it to be the separator
+ # otherwise default to the standard path separator (i.e. ":") - it is
+ # assumed that no part of a normal pathname contains ";" but that should
+ # okay in the real world where ";" in dirpaths is itself problematic.
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+ ;;
+ *)
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ esac
+ # Ok, now we have the path, separated by spaces, we can step through it
+ # and add multilib dir if necessary.
+ lt_tmp_lt_search_path_spec=
+ lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+ for lt_sys_path in $lt_search_path_spec; do
+ if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+ else
+ test -d "$lt_sys_path" && \
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+ fi
+ done
+ lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+BEGIN {RS=" "; FS="/|\n";} {
+ lt_foo="";
+ lt_count=0;
+ for (lt_i = NF; lt_i > 0; lt_i--) {
+ if ($lt_i != "" && $lt_i != ".") {
+ if ($lt_i == "..") {
+ lt_count++;
+ } else {
+ if (lt_count == 0) {
+ lt_foo="/" $lt_i lt_foo;
+ } else {
+ lt_count--;
+ }
+ }
+ }
+ }
+ if (lt_foo != "") { lt_freq[[lt_foo]]++; }
+ if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
+}'`
+ # AWK program above erroneously prepends '/' to C:/dos/paths
+ # for these hosts.
+ case $host_os in
+ mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+ $SED 's,/\([[A-Za-z]]:\),\1,g'` ;;
+ esac
+ sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+else
+ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX 3 has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+
+aix[[4-9]]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ hardcode_into_libs=yes
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[[01]] | aix4.[[01]].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+ # soname into executable. Probably we can add versioning support to
+ # collect2, so additional links can be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ case $host_cpu in
+ powerpc)
+ # Since July 2007 AmigaOS4 officially supports .so libraries.
+ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ ;;
+ m68k)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ ;;
+ esac
+ ;;
+
+beos*)
+ library_names_spec='${libname}${shared_ext}'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi[[45]]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+ version_type=windows
+ shrext_cmds=".dll"
+ need_version=no
+ need_lib_prefix=no
+
+ case $GCC,$cc_basename in
+ yes,*)
+ # gcc
+ library_names_spec='$libname.dll.a'
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname~
+ chmod a+x \$dldir/$dlname~
+ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+ fi'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+
+ case $host_os in
+ cygwin*)
+ # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+m4_if([$1], [],[
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"])
+ ;;
+ mingw* | cegcc*)
+ # MinGW DLLs use traditional 'lib' prefix
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ pw32*)
+ # pw32 DLLs use 'pw' prefix rather than 'lib'
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ ;;
+
+ *,cl*)
+ # Native MSVC
+ libname_spec='$name'
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ library_names_spec='${libname}.dll.lib'
+
+ case $build_os in
+ mingw*)
+ sys_lib_search_path_spec=
+ lt_save_ifs=$IFS
+ IFS=';'
+ for lt_path in $LIB
+ do
+ IFS=$lt_save_ifs
+ # Let DOS variable expansion print the short 8.3 style file name.
+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+ done
+ IFS=$lt_save_ifs
+ # Convert to MSYS style.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'`
+ ;;
+ cygwin*)
+ # Convert to unix form, then to dos form, then back to unix form
+ # but this time dos style (no spaces!) so that the unix form looks
+ # like /cygdrive/c/PROGRA~1:/cygdr...
+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ *)
+ sys_lib_search_path_spec="$LIB"
+ if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
+ # It is most probably a Windows format PATH.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # FIXME: find the short name or the path components, as spaces are
+ # common. (e.g. "Program Files" -> "PROGRA~1")
+ ;;
+ esac
+
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+ dynamic_linker='Win32 link.exe'
+ ;;
+
+ *)
+ # Assume MSVC wrapper
+ library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ dynamic_linker='Win32 ld.exe'
+ ;;
+ esac
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+ soname_spec='${libname}${release}${major}$shared_ext'
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+m4_if([$1], [],[
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"])
+ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+ ;;
+
+dgux*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+freebsd* | dragonfly*)
+ # DragonFly does not have aout. When/if they implement a new
+ # versioning mechanism, adjust this.
+ if test -x /usr/bin/objformat; then
+ objformat=`/usr/bin/objformat`
+ else
+ case $host_os in
+ freebsd[[23]].*) objformat=aout ;;
+ *) objformat=elf ;;
+ esac
+ fi
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2.*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ freebsd3.[[01]]* | freebsdelf3.[[01]]*)
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \
+ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ *) # from 4.6 on, and DragonFly
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+haiku*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ dynamic_linker="$host_os runtime_loader"
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ case $host_cpu in
+ ia64*)
+ shrext_cmds='.so'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.so"
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ if test "X$HPUX_IA64_MODE" = X32; then
+ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+ else
+ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+ fi
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ hppa*64*)
+ shrext_cmds='.sl'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ *)
+ shrext_cmds='.sl'
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+ esac
+ # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+ postinstall_cmds='chmod 555 $lib'
+ # or fails outright, so override atomically:
+ install_override_mode=555
+ ;;
+
+interix[[3-9]]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $host_os in
+ nonstopux*) version_type=nonstopux ;;
+ *)
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ version_type=linux # correct to gnu/linux during the next big refactor
+ else
+ version_type=irix
+ fi ;;
+ esac
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+ case $host_os in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+ libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+ libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+ libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ hardcode_into_libs=yes
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+ dynamic_linker=no
+ ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+
+ # Some binutils ld are patched to set DT_RUNPATH
+ AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath],
+ [lt_cv_shlibpath_overrides_runpath=no
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
+ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+ [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
+ [lt_cv_shlibpath_overrides_runpath=yes])])
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+ ])
+ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # Append ld.so.conf contents to the search path
+ if test -f /etc/ld.so.conf; then
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+ fi
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsdelf*-gnu)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='NetBSD ld.elf_so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+*nto* | *qnx*)
+ version_type=qnx
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='ldqnx.so'
+ ;;
+
+openbsd*)
+ version_type=sunos
+ sys_lib_dlsearch_path_spec="/usr/lib"
+ need_lib_prefix=no
+ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+ case $host_os in
+ openbsd3.3 | openbsd3.3.*) need_version=yes ;;
+ *) need_version=no ;;
+ esac
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case $host_os in
+ openbsd2.[[89]] | openbsd2.[[89]].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ ;;
+
+os2*)
+ libname_spec='$name'
+ shrext_cmds=".dll"
+ need_lib_prefix=no
+ library_names_spec='$libname${shared_ext} $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+rdos*)
+ dynamic_linker=no
+ ;;
+
+solaris*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.3*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ need_lib_prefix=no
+ runpath_var=LD_RUN_PATH
+ ;;
+ siemens)
+ need_lib_prefix=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+ soname_spec='$libname${shared_ext}.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ version_type=freebsd-elf
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ if test "$with_gnu_ld" = yes; then
+ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+ else
+ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+ case $host_os in
+ sco3.2v5*)
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+ ;;
+ esac
+ fi
+ sys_lib_dlsearch_path_spec='/usr/lib'
+ ;;
+
+tpf*)
+ # TPF is a cross-target only. Preferred cross-host = GNU/Linux.
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+uts4*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+_LT_DECL([], [variables_saved_for_relink], [1],
+ [Variables whose values should be saved in libtool wrapper scripts and
+ restored at link time])
+_LT_DECL([], [need_lib_prefix], [0],
+ [Do we need the "lib" prefix for modules?])
+_LT_DECL([], [need_version], [0], [Do we need a version for libraries?])
+_LT_DECL([], [version_type], [0], [Library versioning type])
+_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable])
+_LT_DECL([], [shlibpath_var], [0],[Shared library path variable])
+_LT_DECL([], [shlibpath_overrides_runpath], [0],
+ [Is shlibpath searched before the hard-coded library search path?])
+_LT_DECL([], [libname_spec], [1], [Format of library name prefix])
+_LT_DECL([], [library_names_spec], [1],
+ [[List of archive names. First name is the real one, the rest are links.
+ The last name is the one that the linker finds with -lNAME]])
+_LT_DECL([], [soname_spec], [1],
+ [[The coded name of the library, if different from the real name]])
+_LT_DECL([], [install_override_mode], [1],
+ [Permission mode override for installation of shared libraries])
+_LT_DECL([], [postinstall_cmds], [2],
+ [Command to use after installation of a shared archive])
+_LT_DECL([], [postuninstall_cmds], [2],
+ [Command to use after uninstallation of a shared archive])
+_LT_DECL([], [finish_cmds], [2],
+ [Commands used to finish a libtool library installation in a directory])
+_LT_DECL([], [finish_eval], [1],
+ [[As "finish_cmds", except a single script fragment to be evaled but
+ not shown]])
+_LT_DECL([], [hardcode_into_libs], [0],
+ [Whether we should hardcode library paths into libraries])
+_LT_DECL([], [sys_lib_search_path_spec], [2],
+ [Compile-time system search path for libraries])
+_LT_DECL([], [sys_lib_dlsearch_path_spec], [2],
+ [Run-time system search path for libraries])
+])# _LT_SYS_DYNAMIC_LINKER
+
+
+# _LT_PATH_TOOL_PREFIX(TOOL)
+# --------------------------
+# find a file program which can recognize shared library
+AC_DEFUN([_LT_PATH_TOOL_PREFIX],
+[m4_require([_LT_DECL_EGREP])dnl
+AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+[[\\/*] | ?:[\\/]*])
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+*)
+ lt_save_MAGIC_CMD="$MAGIC_CMD"
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word. This closes a longstanding sh security hole.
+ ac_dummy="m4_if([$2], , $PATH, [$2])"
+ for ac_dir in $ac_dummy; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$1; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/$1"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+ MAGIC_CMD="$lt_save_MAGIC_CMD"
+ ;;
+esac])
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ AC_MSG_RESULT($MAGIC_CMD)
+else
+ AC_MSG_RESULT(no)
+fi
+_LT_DECL([], [MAGIC_CMD], [0],
+ [Used to examine libraries when file_magic_cmd begins with "file"])dnl
+])# _LT_PATH_TOOL_PREFIX
+
+# Old name:
+AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], [])
+
+
+# _LT_PATH_MAGIC
+# --------------
+# find a file program which can recognize a shared library
+m4_defun([_LT_PATH_MAGIC],
+[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH)
+ else
+ MAGIC_CMD=:
+ fi
+fi
+])# _LT_PATH_MAGIC
+
+
+# LT_PATH_LD
+# ----------
+# find the pathname to the GNU or non-GNU linker
+AC_DEFUN([LT_PATH_LD],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PROG_ECHO_BACKSLASH])dnl
+
+AC_ARG_WITH([gnu-ld],
+ [AS_HELP_STRING([--with-gnu-ld],
+ [assume the C compiler uses GNU ld @<:@default=no@:>@])],
+ [test "$withval" = no || with_gnu_ld=yes],
+ [with_gnu_ld=no])dnl
+
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ AC_MSG_CHECKING([for ld used by $CC])
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [[\\/]]* | ?:[[\\/]]*)
+ re_direlt='/[[^/]][[^/]]*/\.\./'
+ # Canonicalize the pathname of ld
+ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ AC_MSG_CHECKING([for GNU ld])
+else
+ AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some variants of GNU ld only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break
+ ;;
+ *)
+ test "$with_gnu_ld" != yes && break
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ AC_MSG_RESULT($LD)
+else
+ AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+_LT_PATH_LD_GNU
+AC_SUBST([LD])
+
+_LT_TAGDECL([], [LD], [1], [The linker used to build libraries])
+])# LT_PATH_LD
+
+# Old names:
+AU_ALIAS([AM_PROG_LD], [LT_PATH_LD])
+AU_ALIAS([AC_PROG_LD], [LT_PATH_LD])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_LD], [])
+dnl AC_DEFUN([AC_PROG_LD], [])
+
+
+# _LT_PATH_LD_GNU
+#- --------------
+m4_defun([_LT_PATH_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ lt_cv_prog_gnu_ld=yes
+ ;;
+*)
+ lt_cv_prog_gnu_ld=no
+ ;;
+esac])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])# _LT_PATH_LD_GNU
+
+
+# _LT_CMD_RELOAD
+# --------------
+# find reload flag for linker
+# -- PORTME Some linkers may need a different reload flag.
+m4_defun([_LT_CMD_RELOAD],
+[AC_CACHE_CHECK([for $LD option to reload object files],
+ lt_cv_ld_reload_flag,
+ [lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ if test "$GCC" != yes; then
+ reload_cmds=false
+ fi
+ ;;
+ darwin*)
+ if test "$GCC" = yes; then
+ reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+ else
+ reload_cmds='$LD$reload_flag -o $output$reload_objs'
+ fi
+ ;;
+esac
+_LT_TAGDECL([], [reload_flag], [1], [How to create reloadable object files])dnl
+_LT_TAGDECL([], [reload_cmds], [2])dnl
+])# _LT_CMD_RELOAD
+
+
+# _LT_CHECK_MAGIC_METHOD
+# ----------------------
+# how to check for library dependencies
+# -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_MAGIC_METHOD],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+AC_CACHE_CHECK([how to recognize dependent libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given extended regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[[4-9]]*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi[[45]]*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin*)
+ # func_win32_libid is a shell function defined in ltmain.sh
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ ;;
+
+mingw* | pw32*)
+ # Base MSYS/MinGW do not provide the 'file' command needed by
+ # func_win32_libid shell function, so use a weaker test based on 'objdump',
+ # unless we find 'file', for example because we are cross-compiling.
+ # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
+ if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ else
+ # Keep this pattern in sync with the one in func_win32_libid.
+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ fi
+ ;;
+
+cegcc*)
+ # use the weaker test based on 'objdump'. See mingw*.
+ lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+freebsd* | dragonfly*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+haiku*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20* | hpux11*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ case $host_cpu in
+ ia64*)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
+ lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+ ;;
+ hppa*64*)
+ [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]']
+ lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+ ;;
+ *)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library'
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+ esac
+ ;;
+
+interix[[3-9]]*)
+ # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$'
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+netbsd* | netbsdelf*-gnu)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+*nto* | *qnx*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+openbsd*)
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+rdos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.3*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ siemens)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ pc)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ esac
+ ;;
+
+tpf*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+esac
+])
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+ case $host_os in
+ mingw* | pw32*)
+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+ want_nocaseglob=yes
+ else
+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"`
+ fi
+ ;;
+ esac
+fi
+
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+_LT_DECL([], [deplibs_check_method], [1],
+ [Method to check whether dependent libraries are shared objects])
+_LT_DECL([], [file_magic_cmd], [1],
+ [Command to use when deplibs_check_method = "file_magic"])
+_LT_DECL([], [file_magic_glob], [1],
+ [How to find potential files when deplibs_check_method = "file_magic"])
+_LT_DECL([], [want_nocaseglob], [1],
+ [Find potential files using nocaseglob when deplibs_check_method = "file_magic"])
+])# _LT_CHECK_MAGIC_METHOD
+
+
+# LT_PATH_NM
+# ----------
+# find the pathname to a BSD- or MS-compatible name lister
+AC_DEFUN([LT_PATH_NM],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM,
+[if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ lt_nm_to_check="${ac_tool_prefix}nm"
+ if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+ lt_nm_to_check="$lt_nm_to_check nm"
+ fi
+ for lt_tmp_nm in $lt_nm_to_check; do
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm="$ac_dir/$lt_tmp_nm"
+ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
+ */dev/null* | *'Invalid file or object type'*)
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ ;;
+ *)
+ case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+ */dev/null*)
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ ;;
+ *)
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+ done
+ : ${lt_cv_path_NM=no}
+fi])
+if test "$lt_cv_path_NM" != "no"; then
+ NM="$lt_cv_path_NM"
+else
+ # Didn't find any BSD compatible name lister, look for dumpbin.
+ if test -n "$DUMPBIN"; then :
+ # Let the user override the test.
+ else
+ AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :)
+ case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+ *COFF*)
+ DUMPBIN="$DUMPBIN -symbols"
+ ;;
+ *)
+ DUMPBIN=:
+ ;;
+ esac
+ fi
+ AC_SUBST([DUMPBIN])
+ if test "$DUMPBIN" != ":"; then
+ NM="$DUMPBIN"
+ fi
+fi
+test -z "$NM" && NM=nm
+AC_SUBST([NM])
+_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
+
+AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
+ [lt_cv_nm_interface="BSD nm"
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$ac_compile" 2>conftest.err)
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD)
+ cat conftest.out >&AS_MESSAGE_LOG_FD
+ if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+ lt_cv_nm_interface="MS dumpbin"
+ fi
+ rm -f conftest*])
+])# LT_PATH_NM
+
+# Old names:
+AU_ALIAS([AM_PROG_NM], [LT_PATH_NM])
+AU_ALIAS([AC_PROG_NM], [LT_PATH_NM])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_NM], [])
+dnl AC_DEFUN([AC_PROG_NM], [])
+
+# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+# --------------------------------
+# how to determine the name of the shared library
+# associated with a specific link library.
+# -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+m4_require([_LT_DECL_DLLTOOL])
+AC_CACHE_CHECK([how to associate runtime and link libraries],
+lt_cv_sharedlib_from_linklib_cmd,
+[lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+ # two different shell functions defined in ltmain.sh
+ # decide which to use based on capabilities of $DLLTOOL
+ case `$DLLTOOL --help 2>&1` in
+ *--identify-strict*)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+ ;;
+ *)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+ ;;
+ esac
+ ;;
+*)
+ # fallback: assume linklib IS sharedlib
+ lt_cv_sharedlib_from_linklib_cmd="$ECHO"
+ ;;
+esac
+])
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+_LT_DECL([], [sharedlib_from_linklib_cmd], [1],
+ [Command to associate shared and link libraries])
+])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+
+
+# _LT_PATH_MANIFEST_TOOL
+# ----------------------
+# locate the manifest tool
+m4_defun([_LT_PATH_MANIFEST_TOOL],
+[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :)
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool],
+ [lt_cv_path_mainfest_tool=no
+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD
+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+ lt_cv_path_mainfest_tool=yes
+ fi
+ rm -f conftest*])
+if test "x$lt_cv_path_mainfest_tool" != xyes; then
+ MANIFEST_TOOL=:
+fi
+_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl
+])# _LT_PATH_MANIFEST_TOOL
+
+
+# LT_LIB_M
+# --------
+# check for math library
+AC_DEFUN([LT_LIB_M],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*)
+ # These system don't have libm, or don't need it
+ ;;
+*-ncr-sysv4.3*)
+ AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
+ AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm")
+ ;;
+*)
+ AC_CHECK_LIB(m, cos, LIBM="-lm")
+ ;;
+esac
+AC_SUBST([LIBM])
+])# LT_LIB_M
+
+# Old name:
+AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_CHECK_LIBM], [])
+
+
+# _LT_COMPILER_NO_RTTI([TAGNAME])
+# -------------------------------
+m4_defun([_LT_COMPILER_NO_RTTI],
+[m4_require([_LT_TAG_COMPILER])dnl
+
+_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+
+if test "$GCC" = yes; then
+ case $cc_basename in
+ nvcc*)
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;;
+ esac
+
+ _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
+ lt_cv_prog_compiler_rtti_exceptions,
+ [-fno-rtti -fno-exceptions], [],
+ [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"])
+fi
+_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
+ [Compiler flag to turn off builtin functions])
+])# _LT_COMPILER_NO_RTTI
+
+
+# _LT_CMD_GLOBAL_SYMBOLS
+# ----------------------
+m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output from $compiler object])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe],
+[
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[[BCDT]]'
+ ;;
+cygwin* | mingw* | pw32* | cegcc*)
+ symcode='[[ABCDGISTW]]'
+ ;;
+hpux*)
+ if test "$host_cpu" = ia64; then
+ symcode='[[ABCDEGRST]]'
+ fi
+ ;;
+irix* | nonstopux*)
+ symcode='[[BCDEGRST]]'
+ ;;
+osf*)
+ symcode='[[BCDEGQRST]]'
+ ;;
+solaris*)
+ symcode='[[BDRT]]'
+ ;;
+sco3.2v5*)
+ symcode='[[DT]]'
+ ;;
+sysv4.2uw2*)
+ symcode='[[DT]]'
+ ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+ symcode='[[ABDT]]'
+ ;;
+sysv4)
+ symcode='[[DFNSTU]]'
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+ symcode='[[ABCDGIRSTW]]' ;;
+esac
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+ opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+ symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+ # Write the raw and C identifiers.
+ if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ # Fake it for dumpbin and say T for any non-static function
+ # and D for any global variable.
+ # Also find C++ and __fastcall symbols from MSVC++,
+ # which start with @ or ?.
+ lt_cv_sys_global_symbol_pipe="$AWK ['"\
+" {last_section=section; section=\$ 3};"\
+" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+" \$ 0!~/External *\|/{next};"\
+" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+" {if(hide[section]) next};"\
+" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
+" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
+" s[1]~/^[@?]/{print s[1], s[1]; next};"\
+" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+" ' prfx=^$ac_symprfx]"
+ else
+ lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+ fi
+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+
+ rm -f conftest*
+ cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+ if AC_TRY_EVAL(ac_compile); then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+ if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+ relocations are performed -- see ld's documentation on pseudo-relocs. */
+# define LT@&t@_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data. */
+# define LT@&t@_DLSYM_CONST
+#else
+# define LT@&t@_DLSYM_CONST const
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+ cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols. */
+LT@&t@_DLSYM_CONST struct {
+ const char *name;
+ void *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[[]] =
+{
+ { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+ $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+ cat <<\_LT_EOF >> conftest.$ac_ext
+ {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ lt_globsym_save_LIBS=$LIBS
+ lt_globsym_save_CFLAGS=$CFLAGS
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then
+ pipe_works=yes
+ fi
+ LIBS=$lt_globsym_save_LIBS
+ CFLAGS=$lt_globsym_save_CFLAGS
+ else
+ echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat conftest.$ac_ext >&5
+ fi
+ rm -rf conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+])
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+ AC_MSG_RESULT(failed)
+else
+ AC_MSG_RESULT(ok)
+fi
+
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then
+ nm_file_list_spec='@'
+fi
+
+_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
+ [Take the output of nm and produce a listing of raw symbols and C names])
+_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
+ [Transform the output of nm in a proper C declaration])
+_LT_DECL([global_symbol_to_c_name_address],
+ [lt_cv_sys_global_symbol_to_c_name_address], [1],
+ [Transform the output of nm in a C name address pair])
+_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
+ [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
+ [Transform the output of nm in a C name address pair when lib prefix is needed])
+_LT_DECL([], [nm_file_list_spec], [1],
+ [Specify filename containing input files for $NM])
+]) # _LT_CMD_GLOBAL_SYMBOLS
+
+
+# _LT_COMPILER_PIC([TAGNAME])
+# ---------------------------
+m4_defun([_LT_COMPILER_PIC],
+[m4_require([_LT_TAG_COMPILER])dnl
+_LT_TAGVAR(lt_prog_compiler_wl, $1)=
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+_LT_TAGVAR(lt_prog_compiler_static, $1)=
+
+m4_if([$1], [CXX], [
+ # C++ specific cases for pic, static, wl, etc.
+ if test "$GXX" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ mingw* | cygwin* | os2* | pw32* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+ ;;
+ *djgpp*)
+ # DJGPP does not support shared libraries at all
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ ;;
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)=
+ ;;
+ interix[[3-9]]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+ fi
+ ;;
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ ;;
+ *qnx* | *nto*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ else
+ case $host_os in
+ aix[[4-9]]*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ else
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+ chorus*)
+ case $cc_basename in
+ cxch68*)
+ # Green Hills C++ Compiler
+ # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+ ;;
+ esac
+ ;;
+ mingw* | cygwin* | os2* | pw32* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+ dgux*)
+ case $cc_basename in
+ ec++*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ ;;
+ ghcx*)
+ # Green Hills C++ Compiler
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ freebsd* | dragonfly*)
+ # FreeBSD uses GNU C++
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+ if test "$host_cpu" != ia64; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+ fi
+ ;;
+ aCC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+ ;;
+ esac
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ interix*)
+ # This is c89, which is MS Visual C++ (no shared libs)
+ # Anyone wants to do a port?
+ ;;
+ irix5* | irix6* | nonstopux*)
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ # CC pic flag -KPIC is the default.
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ case $cc_basename in
+ KCC*)
+ # KAI C++ Compiler
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ ecpc* )
+ # old Intel C++ for x86_64 which still supported -KPIC.
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ icpc* )
+ # Intel C++, used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ pgCC* | pgcpp*)
+ # Portland Group C++ compiler
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ cxx*)
+ # Compaq C++
+ # Make sure the PIC flag is empty. It appears that all Alpha
+ # Linux and Compaq Tru64 Unix objects are PIC.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+ xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*)
+ # IBM XL 8.0, 9.0 on PPC and BlueGene
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+ lynxos*)
+ ;;
+ m88k*)
+ ;;
+ mvs*)
+ case $cc_basename in
+ cxx*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ netbsd* | netbsdelf*-gnu)
+ ;;
+ *qnx* | *nto*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+ osf3* | osf4* | osf5*)
+ case $cc_basename in
+ KCC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+ ;;
+ RCC*)
+ # Rational C++ 2.4.1
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ ;;
+ cxx*)
+ # Digital/Compaq C++
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # Make sure the PIC flag is empty. It appears that all Alpha
+ # Linux and Compaq Tru64 Unix objects are PIC.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ psos*)
+ ;;
+ solaris*)
+ case $cc_basename in
+ CC* | sunCC*)
+ # Sun C++ 4.2, 5.x and Centerline C++
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ ;;
+ gcx*)
+ # Green Hills C++ Compiler
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ sunos4*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.x
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ lcc*)
+ # Lucid
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ esac
+ ;;
+ tandem*)
+ case $cc_basename in
+ NCC*)
+ # NonStop-UX NCC 3.20
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ vxworks*)
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ ;;
+ esac
+ fi
+],
+[
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+ ;;
+
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)=
+ ;;
+
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ # +Z the default
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ ;;
+
+ interix[[3-9]]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+
+ msdosdjgpp*)
+ # Just because we use GCC doesn't mean we suddenly get shared libraries
+ # on systems that don't support them.
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ enable_shared=no
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+ fi
+ ;;
+
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+
+ case $cc_basename in
+ nvcc*) # Cuda Compiler Driver 2.2
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker '
+ if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)"
+ fi
+ ;;
+ esac
+ else
+ # PORTME Check for flag to pass linker flags through the system compiler.
+ case $host_os in
+ aix*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ else
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+ # not for PA HP-UX.
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+ ;;
+ esac
+ # Is there a better lt_prog_compiler_static that works with the bundled CC?
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # PIC (with -KPIC) is the default.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ case $cc_basename in
+ # old Intel for x86_64 which still supported -KPIC.
+ ecc*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ # icc used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ icc* | ifort*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ # Lahey Fortran 8.1.
+ lf95*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
+ ;;
+ nagfor*)
+ # NAG Fortran compiler
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+ # Portland Group compilers (*not* the Pentium gcc compiler,
+ # which looks to be a dead project)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ ccc*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # All Alpha code is PIC.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+ xl* | bgxl* | bgf* | mpixl*)
+ # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*)
+ # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
+ ;;
+ *Sun\ F* | *Sun*Fortran*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ ;;
+ *Sun\ C*)
+ # Sun C 5.9
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ ;;
+ *Intel*\ [[CF]]*Compiler*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ *Portland\ Group*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ newsos6)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # All OSF/1 code is PIC.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+
+ rdos*)
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+
+ solaris*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ case $cc_basename in
+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
+ esac
+ ;;
+
+ sunos4*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ fi
+ ;;
+
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ unicos*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ ;;
+
+ uts4*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ *)
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ ;;
+ esac
+ fi
+])
+case $host_os in
+ # For platforms which do not support PIC, -DPIC is meaningless:
+ *djgpp*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
+ ;;
+esac
+
+AC_CACHE_CHECK([for $compiler option to produce PIC],
+ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)],
+ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+ _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works],
+ [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)],
+ [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [],
+ [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in
+ "" | " "*) ;;
+ *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;;
+ esac],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
+fi
+_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
+ [Additional compiler flags for building library objects])
+
+_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
+ [How to pass a linker flag through the compiler])
+#
+# Check to make sure the static flag actually works.
+#
+wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\"
+_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works],
+ _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1),
+ $lt_tmp_static_flag,
+ [],
+ [_LT_TAGVAR(lt_prog_compiler_static, $1)=])
+_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
+ [Compiler flag to prevent dynamic linking])
+])# _LT_COMPILER_PIC
+
+
+# _LT_LINKER_SHLIBS([TAGNAME])
+# ----------------------------
+# See if the linker supports building shared libraries.
+m4_defun([_LT_LINKER_SHLIBS],
+[AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+m4_if([$1], [CXX], [
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+ case $host_os in
+ aix[[4-9]]*)
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global defined
+ # symbols, whereas GNU nm marks them as "W".
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ ;;
+ pw32*)
+ _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds"
+ ;;
+ cygwin* | mingw* | cegcc*)
+ case $cc_basename in
+ cl*)
+ _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ ;;
+ *)
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+ ;;
+ esac
+ ;;
+ linux* | k*bsd*-gnu | gnu*)
+ _LT_TAGVAR(link_all_deplibs, $1)=no
+ ;;
+ *)
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ ;;
+ esac
+], [
+ runpath_var=
+ _LT_TAGVAR(allow_undefined_flag, $1)=
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(archive_cmds, $1)=
+ _LT_TAGVAR(archive_expsym_cmds, $1)=
+ _LT_TAGVAR(compiler_needs_object, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(hardcode_automatic, $1)=no
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=
+ _LT_TAGVAR(hardcode_minus_L, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+ _LT_TAGVAR(inherit_rpath, $1)=no
+ _LT_TAGVAR(link_all_deplibs, $1)=unknown
+ _LT_TAGVAR(module_cmds, $1)=
+ _LT_TAGVAR(module_expsym_cmds, $1)=
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)=
+ _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)=
+ _LT_TAGVAR(thread_safe_flag_spec, $1)=
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ # include_expsyms should be a list of space-separated symbols to be *always*
+ # included in the symbol list
+ _LT_TAGVAR(include_expsyms, $1)=
+ # exclude_expsyms can be an extended regexp of symbols to exclude
+ # it will be wrapped by ` (' and `)$', so one must not match beginning or
+ # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+ # as well as any symbol that contains `d'.
+ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+ # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+ # platforms (ab)use it in PIC code, but their linkers get confused if
+ # the symbol is explicitly referenced. Since portable code cannot
+ # rely on this symbol name, it's probably fine to never include it in
+ # preloaded symbol tables.
+ # Exclude shared library initialization/finalization symbols.
+dnl Note also adjust exclude_expsyms for C++ above.
+ extract_expsyms_cmds=
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+ interix*)
+ # we just hope/assume this is gcc and not c89 (= MSVC++)
+ with_gnu_ld=yes
+ ;;
+ openbsd*)
+ with_gnu_ld=no
+ ;;
+ linux* | k*bsd*-gnu | gnu*)
+ _LT_TAGVAR(link_all_deplibs, $1)=no
+ ;;
+ esac
+
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+
+ # On some targets, GNU ld is compatible enough with the native linker
+ # that we're better off using the native interface for both.
+ lt_use_gnu_ld_interface=no
+ if test "$with_gnu_ld" = yes; then
+ case $host_os in
+ aix*)
+ # The AIX port of GNU ld has always aspired to compatibility
+ # with the native linker. However, as the warning in the GNU ld
+ # block says, versions before 2.19.5* couldn't really create working
+ # shared libraries, regardless of the interface used.
+ case `$LD -v 2>&1` in
+ *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+ *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;;
+ *\ \(GNU\ Binutils\)\ [[3-9]]*) ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ fi
+
+ if test "$lt_use_gnu_ld_interface" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # Set some defaults for GNU ld with shared library support. These
+ # are reset later if shared libraries are not supported. Putting them
+ # here allows them to be overridden if necessary.
+ runpath_var=LD_RUN_PATH
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ fi
+ supports_anon_versioning=no
+ case `$LD -v 2>&1` in
+ *GNU\ gold*) supports_anon_versioning=yes ;;
+ *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
+ *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+ *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+ *\ 2.11.*) ;; # other 2.11 versions
+ *) supports_anon_versioning=yes ;;
+ esac
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix[[3-9]]*)
+ # On AIX/PPC, the GNU linker is very broken
+ if test "$host_cpu" != ia64; then
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)=''
+ ;;
+ m68k)
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ ;;
+ esac
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+ # as there is no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols'
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ haiku*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
+ interix[[3-9]]*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+
+ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+ tmp_diet=no
+ if test "$host_os" = linux-dietlibc; then
+ case $cc_basename in
+ diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn)
+ esac
+ fi
+ if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+ && test "$tmp_diet" = no
+ then
+ tmp_addflag=' $pic_flag'
+ tmp_sharedflag='-shared'
+ case $cc_basename,$host_cpu in
+ pgcc*) # Portland Group C compiler
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag'
+ ;;
+ pgf77* | pgf90* | pgf95* | pgfortran*)
+ # Portland Group f77 and f90 compilers
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag -Mnomain' ;;
+ ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64
+ tmp_addflag=' -i_dynamic' ;;
+ efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64
+ tmp_addflag=' -i_dynamic -nofor_main' ;;
+ ifc* | ifort*) # Intel Fortran compiler
+ tmp_addflag=' -nofor_main' ;;
+ lf95*) # Lahey Fortran 8.1
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ tmp_sharedflag='--shared' ;;
+ xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ tmp_sharedflag='-qmkshrobj'
+ tmp_addflag= ;;
+ nvcc*) # Cuda Compiler Driver 2.2
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(compiler_needs_object, $1)=yes
+ ;;
+ esac
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*) # Sun C 5.9
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(compiler_needs_object, $1)=yes
+ tmp_sharedflag='-G' ;;
+ *Sun\ F*) # Sun Fortran 8.3
+ tmp_sharedflag='-G' ;;
+ esac
+ _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+ if test "x$supports_anon_versioning" = xyes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+
+ case $cc_basename in
+ xlf* | bgf* | bgxlf* | mpixlf*)
+ # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ esac
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ netbsd* | netbsdelf*-gnu)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris*)
+ if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+ case `$LD -v 2>&1` in
+ *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*)
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ ;;
+ *)
+ # For security reasons, it is highly recommended that you always
+ # use absolute paths for naming shared libraries, and exclude the
+ # DT_RUNPATH tag from executables and libraries. But doing so
+ # requires that you compile everything twice, which is a pain.
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ sunos4*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+
+ if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then
+ runpath_var=
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ fi
+ else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ _LT_TAGVAR(hardcode_direct, $1)=unsupported
+ fi
+ ;;
+
+ aix[[4-9]]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global
+ # defined symbols, whereas GNU nm marks them as "W".
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ _LT_TAGVAR(archive_cmds, $1)=''
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='${wl}-f,'
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ _LT_TAGVAR(hardcode_direct, $1)=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=
+ fi
+ ;;
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ _LT_TAGVAR(link_all_deplibs, $1)=no
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to export.
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ _LT_SYS_MODULE_PATH_AIX([$1])
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
+ _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ _LT_SYS_MODULE_PATH_AIX([$1])
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ # This is similar to how AIX traditionally builds its shared libraries.
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)=''
+ ;;
+ m68k)
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ ;;
+ esac
+ ;;
+
+ bsdi[[45]]*)
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ case $cc_basename in
+ cl*)
+ # Native MSVC
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+ # Don't use ranlib
+ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # Assume MSVC wrapper
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ # FIXME: Should let the user specify the lib program.
+ _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ ;;
+ esac
+ ;;
+
+ darwin* | rhapsody*)
+ _LT_DARWIN_LINKER_FEATURES($1)
+ ;;
+
+ dgux*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2.*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd* | dragonfly*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ hpux9*)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ ;;
+
+ hpux10*)
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ if test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ fi
+ ;;
+
+ hpux11*)
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ else
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ m4_if($1, [], [
+ # Older versions of the 11.00 compiler do not understand -b yet
+ # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+ _LT_LINKER_OPTION([if $CC understands -b],
+ _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b],
+ [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'],
+ [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])],
+ [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'])
+ ;;
+ esac
+ fi
+ if test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+ *)
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ ;;
+ esac
+ fi
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ # Try to use the -exported_symbol ld option, if it does not
+ # work, assume that -exports_file does not work either and
+ # implicitly export all symbols.
+ # This should be the same for all languages, so no per-tag cache variable.
+ AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol],
+ [lt_cv_irix_exported_symbol],
+ [save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+ AC_LINK_IFELSE(
+ [AC_LANG_SOURCE(
+ [AC_LANG_CASE([C], [[int foo (void) { return 0; }]],
+ [C++], [[int foo (void) { return 0; }]],
+ [Fortran 77], [[
+ subroutine foo
+ end]],
+ [Fortran], [[
+ subroutine foo
+ end]])])],
+ [lt_cv_irix_exported_symbol=yes],
+ [lt_cv_irix_exported_symbol=no])
+ LDFLAGS="$save_LDFLAGS"])
+ if test "$lt_cv_irix_exported_symbol" = yes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+ fi
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(inherit_rpath, $1)=yes
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
+ netbsd* | netbsdelf*-gnu)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ newsos6)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ *nto* | *qnx*)
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ else
+ case $host_os in
+ openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ os2*)
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ else
+ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+ # Both c and cxx compiler support -rpath directly
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ ;;
+
+ solaris*)
+ _LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
+ if test "$GCC" = yes; then
+ wlarc='${wl}'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ else
+ case `$CC -V 2>&1` in
+ *"Compilers 5.0"*)
+ wlarc=''
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+ ;;
+ *)
+ wlarc='${wl}'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ ;;
+ esac
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'. GCC discards it without `$wl',
+ # but is careful enough not to reorder.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+ fi
+ ;;
+ esac
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ sysv4)
+ case $host_vendor in
+ sni)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true???
+ ;;
+ siemens)
+ ## LD is ld it makes a PLAMLIB
+ ## CC just makes a GrossModule.
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs'
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ ;;
+ motorola)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie
+ ;;
+ esac
+ runpath_var='LD_RUN_PATH'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ sysv4.3*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ fi
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ uts4*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ *)
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+
+ if test x$host_vendor = xsni; then
+ case $host in
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym'
+ ;;
+ esac
+ fi
+ fi
+])
+AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no
+
+_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld
+
+_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl
+_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl
+_LT_DECL([], [extract_expsyms_cmds], [2],
+ [The commands to extract the exported symbol list from a shared archive])
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in
+x|xyes)
+ # Assume -lc should be added
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+
+ if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $_LT_TAGVAR(archive_cmds, $1) in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ AC_CACHE_CHECK([whether -lc should be explicitly linked in],
+ [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1),
+ [$RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
+ pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
+ _LT_TAGVAR(allow_undefined_flag, $1)=
+ if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
+ then
+ lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ else
+ lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ fi
+ _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+ ])
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)
+ ;;
+ esac
+ fi
+ ;;
+esac
+
+_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0],
+ [Whether or not to add -lc for building shared libraries])
+_LT_TAGDECL([allow_libtool_libs_with_static_runtimes],
+ [enable_shared_with_static_runtimes], [0],
+ [Whether or not to disallow shared libs when runtime libs are static])
+_LT_TAGDECL([], [export_dynamic_flag_spec], [1],
+ [Compiler flag to allow reflexive dlopens])
+_LT_TAGDECL([], [whole_archive_flag_spec], [1],
+ [Compiler flag to generate shared objects directly from archives])
+_LT_TAGDECL([], [compiler_needs_object], [1],
+ [Whether the compiler copes with passing no objects directly])
+_LT_TAGDECL([], [old_archive_from_new_cmds], [2],
+ [Create an old-style archive from a shared archive])
+_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2],
+ [Create a temporary old-style archive to link instead of a shared archive])
+_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive])
+_LT_TAGDECL([], [archive_expsym_cmds], [2])
+_LT_TAGDECL([], [module_cmds], [2],
+ [Commands used to build a loadable module if different from building
+ a shared archive.])
+_LT_TAGDECL([], [module_expsym_cmds], [2])
+_LT_TAGDECL([], [with_gnu_ld], [1],
+ [Whether we are building with GNU ld or not])
+_LT_TAGDECL([], [allow_undefined_flag], [1],
+ [Flag that allows shared libraries with undefined symbols to be built])
+_LT_TAGDECL([], [no_undefined_flag], [1],
+ [Flag that enforces no undefined symbols])
+_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
+ [Flag to hardcode $libdir into a binary during linking.
+ This must work even if $libdir does not exist])
+_LT_TAGDECL([], [hardcode_libdir_separator], [1],
+ [Whether we need a single "-rpath" flag with a separated argument])
+_LT_TAGDECL([], [hardcode_direct], [0],
+ [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes
+ DIR into the resulting binary])
+_LT_TAGDECL([], [hardcode_direct_absolute], [0],
+ [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes
+ DIR into the resulting binary and the resulting library dependency is
+ "absolute", i.e impossible to change by setting ${shlibpath_var} if the
+ library is relocated])
+_LT_TAGDECL([], [hardcode_minus_L], [0],
+ [Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+ into the resulting binary])
+_LT_TAGDECL([], [hardcode_shlibpath_var], [0],
+ [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+ into the resulting binary])
+_LT_TAGDECL([], [hardcode_automatic], [0],
+ [Set to "yes" if building a shared library automatically hardcodes DIR
+ into the library and all subsequent libraries and executables linked
+ against it])
+_LT_TAGDECL([], [inherit_rpath], [0],
+ [Set to yes if linker adds runtime paths of dependent libraries
+ to runtime path list])
+_LT_TAGDECL([], [link_all_deplibs], [0],
+ [Whether libtool must link a program against all its dependency libraries])
+_LT_TAGDECL([], [always_export_symbols], [0],
+ [Set to "yes" if exported symbols are required])
+_LT_TAGDECL([], [export_symbols_cmds], [2],
+ [The commands to list exported symbols])
+_LT_TAGDECL([], [exclude_expsyms], [1],
+ [Symbols that should not be listed in the preloaded symbols])
+_LT_TAGDECL([], [include_expsyms], [1],
+ [Symbols that must always be exported])
+_LT_TAGDECL([], [prelink_cmds], [2],
+ [Commands necessary for linking programs (against libraries) with templates])
+_LT_TAGDECL([], [postlink_cmds], [2],
+ [Commands necessary for finishing linking programs])
+_LT_TAGDECL([], [file_list_spec], [1],
+ [Specify filename containing input files])
+dnl FIXME: Not yet implemented
+dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1],
+dnl [Compiler flag to generate thread safe objects])
+])# _LT_LINKER_SHLIBS
+
+
+# _LT_LANG_C_CONFIG([TAG])
+# ------------------------
+# Ensure that the configuration variables for a C compiler are suitably
+# defined. These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_C_CONFIG],
+[m4_require([_LT_DECL_EGREP])dnl
+lt_save_CC="$CC"
+AC_LANG_PUSH(C)
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+_LT_TAG_COMPILER
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+if test -n "$compiler"; then
+ _LT_COMPILER_NO_RTTI($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+ LT_SYS_DLOPEN_SELF
+ _LT_CMD_STRIPLIB
+
+ # Report which library types will actually be built
+ AC_MSG_CHECKING([if libtool supports shared libraries])
+ AC_MSG_RESULT([$can_build_shared])
+
+ AC_MSG_CHECKING([whether to build shared libraries])
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+ aix[[4-9]]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$enable_shared])
+
+ AC_MSG_CHECKING([whether to build static libraries])
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ AC_MSG_RESULT([$enable_static])
+
+ _LT_CONFIG($1)
+fi
+AC_LANG_POP
+CC="$lt_save_CC"
+])# _LT_LANG_C_CONFIG
+
+
+# _LT_LANG_CXX_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a C++ compiler are suitably
+# defined. These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_CXX_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+ ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+ (test "X$CXX" != "Xg++"))) ; then
+ AC_PROG_CXXCPP
+else
+ _lt_caught_CXX_error=yes
+fi
+
+AC_LANG_PUSH(C++)
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(compiler_needs_object, $1)=no
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_caught_CXX_error" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="int some_variable = 0;"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }'
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+ _LT_TAG_COMPILER
+
+ # save warnings/boilerplate of simple test code
+ _LT_COMPILER_BOILERPLATE
+ _LT_LINKER_BOILERPLATE
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC=$CC
+ lt_save_CFLAGS=$CFLAGS
+ lt_save_LD=$LD
+ lt_save_GCC=$GCC
+ GCC=$GXX
+ lt_save_with_gnu_ld=$with_gnu_ld
+ lt_save_path_LD=$lt_cv_path_LD
+ if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+ lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+ else
+ $as_unset lt_cv_prog_gnu_ld
+ fi
+ if test -n "${lt_cv_path_LDCXX+set}"; then
+ lt_cv_path_LD=$lt_cv_path_LDCXX
+ else
+ $as_unset lt_cv_path_LD
+ fi
+ test -z "${LDCXX+set}" || LD=$LDCXX
+ CC=${CXX-"c++"}
+ CFLAGS=$CXXFLAGS
+ compiler=$CC
+ _LT_TAGVAR(compiler, $1)=$CC
+ _LT_CC_BASENAME([$compiler])
+
+ if test -n "$compiler"; then
+ # We don't want -fno-exception when compiling C++ code, so set the
+ # no_builtin_flag separately
+ if test "$GXX" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+ else
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+ fi
+
+ if test "$GXX" = yes; then
+ # Set up default GNU C++ configuration
+
+ LT_PATH_LD
+
+ # Check if GNU C++ uses GNU ld as the underlying linker, since the
+ # archiving commands below assume that GNU ld is being used.
+ if test "$with_gnu_ld" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+ # investigate it a little bit more. (MM)
+ wlarc='${wl}'
+
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+ $GREP 'no-whole-archive' > /dev/null; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ fi
+ else
+ with_gnu_ld=no
+ wlarc=
+
+ # A generic and very simple default shared library creation
+ # command for GNU C++ for the case where it uses the native
+ # linker, instead of GNU ld. If possible, this setting should
+ # overridden to take advantage of the native linker features on
+ # the platform it is being used on.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+ fi
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+ else
+ GXX=no
+ with_gnu_ld=no
+ wlarc=
+ fi
+
+ # PORTME: fill in a description of your system's C++ link characteristics
+ AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ case $host_os in
+ aix3*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ aix[[4-9]]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+ for ld_flag in $LDFLAGS; do
+ case $ld_flag in
+ *-brtl*)
+ aix_use_runtimelinking=yes
+ break
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ _LT_TAGVAR(archive_cmds, $1)=''
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='${wl}-f,'
+
+ if test "$GXX" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ _LT_TAGVAR(hardcode_direct, $1)=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=
+ fi
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to
+ # export.
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+ # Determine the default libpath from the value encoded in an empty
+ # executable.
+ _LT_SYS_MODULE_PATH_AIX([$1])
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
+ _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ _LT_SYS_MODULE_PATH_AIX([$1])
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ # This is similar to how AIX traditionally builds its shared
+ # libraries.
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ chorus*)
+ case $cc_basename in
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ case $GXX,$cc_basename in
+ ,cl* | no,cl*)
+ # Native MSVC
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ # Don't use ranlib
+ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ func_to_tool_file "$lt_outputfile"~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # g++
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+ # as there is no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols'
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+ darwin* | rhapsody*)
+ _LT_DARWIN_LINKER_FEATURES($1)
+ ;;
+
+ dgux*)
+ case $cc_basename in
+ ec++*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ ghcx*)
+ # Green Hills C++ Compiler
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ freebsd2.*)
+ # C++ shared libraries reported to be fairly broken before
+ # switch to ELF
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ freebsd-elf*)
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ ;;
+
+ freebsd* | dragonfly*)
+ # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+ # conventions
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ ;;
+
+ gnu*)
+ ;;
+
+ haiku*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
+ hpux9*)
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+ # but as the default
+ # location of the library.
+
+ case $cc_basename in
+ CC*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ aCC*)
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ hpux10*|hpux11*)
+ if test $with_gnu_ld = no; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ ;;
+ *)
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ ;;
+ esac
+ fi
+ case $host_cpu in
+ hppa*64*|ia64*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+ *)
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+ # but as the default
+ # location of the library.
+ ;;
+ esac
+
+ case $cc_basename in
+ CC*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ aCC*)
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ esac
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ if test $with_gnu_ld = no; then
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ esac
+ fi
+ else
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ interix[[3-9]]*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+ irix5* | irix6*)
+ case $cc_basename in
+ CC*)
+ # SGI C++
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+
+ # Archives containing C++ object files must be created using
+ # "CC -ar", where "CC" is the IRIX C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ if test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib'
+ fi
+ fi
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+ esac
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(inherit_rpath, $1)=yes
+ ;;
+
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ case $cc_basename in
+ KCC*)
+ # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+ # KCC will only create a shared library if the output file
+ # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ # to its proper name (with version) after linking.
+ _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+
+ # Archives containing C++ object files must be created using
+ # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
+ ;;
+ icpc* | ecpc* )
+ # Intel C++
+ with_gnu_ld=yes
+ # version 8.0 and above of icpc choke on multiply defined symbols
+ # if we add $predep_objects and $postdep_objects, however 7.1 and
+ # earlier do not add the objects themselves.
+ case `$CC -V 2>&1` in
+ *"Version 7."*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ ;;
+ *) # Version 8.0 or newer
+ tmp_idyn=
+ case $host_cpu in
+ ia64*) tmp_idyn=' -i_dynamic';;
+ esac
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ ;;
+ esac
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ ;;
+ pgCC* | pgcpp*)
+ # Portland Group C++ compiler
+ case `$CC -V` in
+ *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*)
+ _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+ _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+ $RANLIB $oldlib'
+ _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ ;;
+ *) # Version 6 and above use weak symbols
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ ;;
+ esac
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ ;;
+ cxx*)
+ # Compaq C++
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
+
+ runpath_var=LD_RUN_PATH
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+ ;;
+ xl* | mpixl* | bgxl*)
+ # IBM XL 8.0 on PPC, with GNU ld
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+ _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(compiler_needs_object, $1)=yes
+
+ # Not sure whether something based on
+ # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+ # would be better.
+ output_verbose_link_cmd='func_echo_all'
+
+ # Archives containing C++ object files must be created using
+ # "CC -xar", where "CC" is the Sun C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ lynxos*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ m88k*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ mvs*)
+ case $cc_basename in
+ cxx*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+ wlarc=
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ fi
+ # Workaround some broken pre-1.5 toolchains
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+ ;;
+
+ *nto* | *qnx*)
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ ;;
+
+ openbsd2*)
+ # C++ shared libraries are fairly broken
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ fi
+ output_verbose_link_cmd=func_echo_all
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ osf3* | osf4* | osf5*)
+ case $cc_basename in
+ KCC*)
+ # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+ # KCC will only create a shared library if the output file
+ # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ # to its proper name (with version) after linking.
+ _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Archives containing C++ object files must be created using
+ # the KAI C++ compiler.
+ case $host in
+ osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;;
+ *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;;
+ esac
+ ;;
+ RCC*)
+ # Rational C++ 2.4.1
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ cxx*)
+ case $host in
+ osf3*)
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ ;;
+ *)
+ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+ echo "-hidden">> $lib.exp~
+ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~
+ $RM $lib.exp'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+ ;;
+ esac
+
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ ;;
+ *)
+ if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ case $host in
+ osf3*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ ;;
+ esac
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+ else
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ psos*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ sunos4*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.x
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ lcc*)
+ # Lucid
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ solaris*)
+ case $cc_basename in
+ CC* | sunCC*)
+ # Sun C++ 4.2, 5.x and Centerline C++
+ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes
+ _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+ ;;
+ esac
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+
+ output_verbose_link_cmd='func_echo_all'
+
+ # Archives containing C++ object files must be created using
+ # "CC -xar", where "CC" is the Sun C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+ ;;
+ gcx*)
+ # Green Hills C++ Compiler
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+
+ # The C++ compiler must be used to create the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+ ;;
+ *)
+ # GNU C++ compiler with Solaris linker
+ if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs'
+ if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+ else
+ # g++ 2.7 appears to require `-G' NOT `-shared' on this
+ # platform.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+ fi
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir'
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *)
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ ;;
+ esac
+ fi
+ ;;
+ esac
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ runpath_var='LD_RUN_PATH'
+
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~
+ '"$_LT_TAGVAR(old_archive_cmds, $1)"
+ _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~
+ '"$_LT_TAGVAR(reload_cmds, $1)"
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ ;;
+
+ tandem*)
+ case $cc_basename in
+ NCC*)
+ # NonStop-UX NCC 3.20
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ vxworks*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+
+ AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+ test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no
+
+ _LT_TAGVAR(GCC, $1)="$GXX"
+ _LT_TAGVAR(LD, $1)="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ _LT_SYS_HIDDEN_LIBDEPS($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+ fi # test -n "$compiler"
+
+ CC=$lt_save_CC
+ CFLAGS=$lt_save_CFLAGS
+ LDCXX=$LD
+ LD=$lt_save_LD
+ GCC=$lt_save_GCC
+ with_gnu_ld=$lt_save_with_gnu_ld
+ lt_cv_path_LDCXX=$lt_cv_path_LD
+ lt_cv_path_LD=$lt_save_path_LD
+ lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+ lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test "$_lt_caught_CXX_error" != yes
+
+AC_LANG_POP
+])# _LT_LANG_CXX_CONFIG
+
+
+# _LT_FUNC_STRIPNAME_CNF
+# ----------------------
+# func_stripname_cnf prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+#
+# This function is identical to the (non-XSI) version of func_stripname,
+# except this one can be used by m4 code that may be executed by configure,
+# rather than the libtool script.
+m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl
+AC_REQUIRE([_LT_DECL_SED])
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])
+func_stripname_cnf ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+ esac
+} # func_stripname_cnf
+])# _LT_FUNC_STRIPNAME_CNF
+
+# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
+# ---------------------------------
+# Figure out "hidden" library dependencies from verbose
+# compiler output when linking a shared library.
+# Parse the compiler output and extract the necessary
+# objects, libraries and library flags.
+m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl
+# Dependencies to place before and after the object being linked:
+_LT_TAGVAR(predep_objects, $1)=
+_LT_TAGVAR(postdep_objects, $1)=
+_LT_TAGVAR(predeps, $1)=
+_LT_TAGVAR(postdeps, $1)=
+_LT_TAGVAR(compiler_lib_search_path, $1)=
+
+dnl we can't use the lt_simple_compile_test_code here,
+dnl because it contains code intended for an executable,
+dnl not a library. It's possible we should let each
+dnl tag define a new lt_????_link_test_code variable,
+dnl but it's only used here...
+m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF
+int a;
+void foo (void) { a = 0; }
+_LT_EOF
+], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+ Foo (void) { a = 0; }
+private:
+ int a;
+};
+_LT_EOF
+], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF
+ subroutine foo
+ implicit none
+ integer*4 a
+ a=0
+ return
+ end
+_LT_EOF
+], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF
+ subroutine foo
+ implicit none
+ integer a
+ a=0
+ return
+ end
+_LT_EOF
+], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF
+public class foo {
+ private int a;
+ public void bar (void) {
+ a = 0;
+ }
+};
+_LT_EOF
+], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF
+package foo
+func foo() {
+}
+_LT_EOF
+])
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+dnl Parse the compiler output and extract the necessary
+dnl objects, libraries and library flags.
+if AC_TRY_EVAL(ac_compile); then
+ # Parse the compiler output and extract the necessary
+ # objects, libraries and library flags.
+
+ # Sentinel used to keep track of whether or not we are before
+ # the conftest object file.
+ pre_test_object_deps_done=no
+
+ for p in `eval "$output_verbose_link_cmd"`; do
+ case ${prev}${p} in
+
+ -L* | -R* | -l*)
+ # Some compilers place space between "-{L,R}" and the path.
+ # Remove the space.
+ if test $p = "-L" ||
+ test $p = "-R"; then
+ prev=$p
+ continue
+ fi
+
+ # Expand the sysroot to ease extracting the directories later.
+ if test -z "$prev"; then
+ case $p in
+ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+ esac
+ fi
+ case $p in
+ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+ esac
+ if test "$pre_test_object_deps_done" = no; then
+ case ${prev} in
+ -L | -R)
+ # Internal compiler library paths should come after those
+ # provided the user. The postdeps already come after the
+ # user supplied libs so there is no need to process them.
+ if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then
+ _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}"
+ else
+ _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}"
+ fi
+ ;;
+ # The "-l" case would never come before the object being
+ # linked, so don't bother handling this case.
+ esac
+ else
+ if test -z "$_LT_TAGVAR(postdeps, $1)"; then
+ _LT_TAGVAR(postdeps, $1)="${prev}${p}"
+ else
+ _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}"
+ fi
+ fi
+ prev=
+ ;;
+
+ *.lto.$objext) ;; # Ignore GCC LTO objects
+ *.$objext)
+ # This assumes that the test object file only shows up
+ # once in the compiler output.
+ if test "$p" = "conftest.$objext"; then
+ pre_test_object_deps_done=yes
+ continue
+ fi
+
+ if test "$pre_test_object_deps_done" = no; then
+ if test -z "$_LT_TAGVAR(predep_objects, $1)"; then
+ _LT_TAGVAR(predep_objects, $1)="$p"
+ else
+ _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p"
+ fi
+ else
+ if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then
+ _LT_TAGVAR(postdep_objects, $1)="$p"
+ else
+ _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p"
+ fi
+ fi
+ ;;
+
+ *) ;; # Ignore the rest.
+
+ esac
+ done
+
+ # Clean up.
+ rm -f a.out a.exe
+else
+ echo "libtool.m4: error: problem compiling $1 test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+m4_if([$1], [CXX],
+[case $host_os in
+interix[[3-9]]*)
+ # Interix 3.5 installs completely hosed .la files for C++, so rather than
+ # hack all around it, let's just trust "g++" to DTRT.
+ _LT_TAGVAR(predep_objects,$1)=
+ _LT_TAGVAR(postdep_objects,$1)=
+ _LT_TAGVAR(postdeps,$1)=
+ ;;
+
+linux*)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+
+ # The more standards-conforming stlport4 library is
+ # incompatible with the Cstd library. Avoid specifying
+ # it if it's in CXXFLAGS. Ignore libCrun as
+ # -library=stlport4 depends on it.
+ case " $CXX $CXXFLAGS " in
+ *" -library=stlport4 "*)
+ solaris_use_stlport4=yes
+ ;;
+ esac
+
+ if test "$solaris_use_stlport4" != yes; then
+ _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun'
+ fi
+ ;;
+ esac
+ ;;
+
+solaris*)
+ case $cc_basename in
+ CC* | sunCC*)
+ # The more standards-conforming stlport4 library is
+ # incompatible with the Cstd library. Avoid specifying
+ # it if it's in CXXFLAGS. Ignore libCrun as
+ # -library=stlport4 depends on it.
+ case " $CXX $CXXFLAGS " in
+ *" -library=stlport4 "*)
+ solaris_use_stlport4=yes
+ ;;
+ esac
+
+ # Adding this requires a known-good setup of shared libraries for
+ # Sun compiler versions before 5.6, else PIC objects from an old
+ # archive will be linked into the output, leading to subtle bugs.
+ if test "$solaris_use_stlport4" != yes; then
+ _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun'
+ fi
+ ;;
+ esac
+ ;;
+esac
+])
+
+case " $_LT_TAGVAR(postdeps, $1) " in
+*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;;
+esac
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=
+if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+fi
+_LT_TAGDECL([], [compiler_lib_search_dirs], [1],
+ [The directories searched by this compiler when creating a shared library])
+_LT_TAGDECL([], [predep_objects], [1],
+ [Dependencies to place before and after the objects being linked to
+ create a shared library])
+_LT_TAGDECL([], [postdep_objects], [1])
+_LT_TAGDECL([], [predeps], [1])
+_LT_TAGDECL([], [postdeps], [1])
+_LT_TAGDECL([], [compiler_lib_search_path], [1],
+ [The library search path used internally by the compiler when linking
+ a shared library])
+])# _LT_SYS_HIDDEN_LIBDEPS
+
+
+# _LT_LANG_F77_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a Fortran 77 compiler are
+# suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_F77_CONFIG],
+[AC_LANG_PUSH(Fortran 77)
+if test -z "$F77" || test "X$F77" = "Xno"; then
+ _lt_disable_F77=yes
+fi
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for f77 test sources.
+ac_ext=f
+
+# Object file extension for compiled f77 test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the F77 compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_F77" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="\
+ subroutine t
+ return
+ end
+"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code="\
+ program t
+ end
+"
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+ _LT_TAG_COMPILER
+
+ # save warnings/boilerplate of simple test code
+ _LT_COMPILER_BOILERPLATE
+ _LT_LINKER_BOILERPLATE
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC="$CC"
+ lt_save_GCC=$GCC
+ lt_save_CFLAGS=$CFLAGS
+ CC=${F77-"f77"}
+ CFLAGS=$FFLAGS
+ compiler=$CC
+ _LT_TAGVAR(compiler, $1)=$CC
+ _LT_CC_BASENAME([$compiler])
+ GCC=$G77
+ if test -n "$compiler"; then
+ AC_MSG_CHECKING([if libtool supports shared libraries])
+ AC_MSG_RESULT([$can_build_shared])
+
+ AC_MSG_CHECKING([whether to build shared libraries])
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+ aix[[4-9]]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$enable_shared])
+
+ AC_MSG_CHECKING([whether to build static libraries])
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ AC_MSG_RESULT([$enable_static])
+
+ _LT_TAGVAR(GCC, $1)="$G77"
+ _LT_TAGVAR(LD, $1)="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+ fi # test -n "$compiler"
+
+ GCC=$lt_save_GCC
+ CC="$lt_save_CC"
+ CFLAGS="$lt_save_CFLAGS"
+fi # test "$_lt_disable_F77" != yes
+
+AC_LANG_POP
+])# _LT_LANG_F77_CONFIG
+
+
+# _LT_LANG_FC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for a Fortran compiler are
+# suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_FC_CONFIG],
+[AC_LANG_PUSH(Fortran)
+
+if test -z "$FC" || test "X$FC" = "Xno"; then
+ _lt_disable_FC=yes
+fi
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for fc test sources.
+ac_ext=${ac_fc_srcext-f}
+
+# Object file extension for compiled fc test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the FC compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_FC" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="\
+ subroutine t
+ return
+ end
+"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code="\
+ program t
+ end
+"
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+ _LT_TAG_COMPILER
+
+ # save warnings/boilerplate of simple test code
+ _LT_COMPILER_BOILERPLATE
+ _LT_LINKER_BOILERPLATE
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC="$CC"
+ lt_save_GCC=$GCC
+ lt_save_CFLAGS=$CFLAGS
+ CC=${FC-"f95"}
+ CFLAGS=$FCFLAGS
+ compiler=$CC
+ GCC=$ac_cv_fc_compiler_gnu
+
+ _LT_TAGVAR(compiler, $1)=$CC
+ _LT_CC_BASENAME([$compiler])
+
+ if test -n "$compiler"; then
+ AC_MSG_CHECKING([if libtool supports shared libraries])
+ AC_MSG_RESULT([$can_build_shared])
+
+ AC_MSG_CHECKING([whether to build shared libraries])
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+ aix[[4-9]]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$enable_shared])
+
+ AC_MSG_CHECKING([whether to build static libraries])
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ AC_MSG_RESULT([$enable_static])
+
+ _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu"
+ _LT_TAGVAR(LD, $1)="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ _LT_SYS_HIDDEN_LIBDEPS($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+ fi # test -n "$compiler"
+
+ GCC=$lt_save_GCC
+ CC=$lt_save_CC
+ CFLAGS=$lt_save_CFLAGS
+fi # test "$_lt_disable_FC" != yes
+
+AC_LANG_POP
+])# _LT_LANG_FC_CONFIG
+
+
+# _LT_LANG_GCJ_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Java Compiler compiler
+# are suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_GCJ_CONFIG],
+[AC_REQUIRE([LT_PROG_GCJ])dnl
+AC_LANG_SAVE
+
+# Source file extension for Java test sources.
+ac_ext=java
+
+# Object file extension for compiled Java test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="class foo {}"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GCJ-"gcj"}
+CFLAGS=$GCJFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)="$LD"
+_LT_CC_BASENAME([$compiler])
+
+# GCJ did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+if test -n "$compiler"; then
+ _LT_COMPILER_NO_RTTI($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GCJ_CONFIG
+
+
+# _LT_LANG_GO_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Go compiler
+# are suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_GO_CONFIG],
+[AC_REQUIRE([LT_PROG_GO])dnl
+AC_LANG_SAVE
+
+# Source file extension for Go test sources.
+ac_ext=go
+
+# Object file extension for compiled Go test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="package main; func main() { }"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='package main; func main() { }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GOC-"gccgo"}
+CFLAGS=$GOFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)="$LD"
+_LT_CC_BASENAME([$compiler])
+
+# Go did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+if test -n "$compiler"; then
+ _LT_COMPILER_NO_RTTI($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GO_CONFIG
+
+
+# _LT_LANG_RC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for the Windows resource compiler
+# are suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_RC_CONFIG],
+[AC_REQUIRE([LT_PROG_RC])dnl
+AC_LANG_SAVE
+
+# Source file extension for RC test sources.
+ac_ext=rc
+
+# Object file extension for compiled RC test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
+
+# Code to be used in simple link tests
+lt_simple_link_test_code="$lt_simple_compile_test_code"
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC="$CC"
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=
+CC=${RC-"windres"}
+CFLAGS=
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_CC_BASENAME([$compiler])
+_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+
+if test -n "$compiler"; then
+ :
+ _LT_CONFIG($1)
+fi
+
+GCC=$lt_save_GCC
+AC_LANG_RESTORE
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_RC_CONFIG
+
+
+# LT_PROG_GCJ
+# -----------
+AC_DEFUN([LT_PROG_GCJ],
+[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ],
+ [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ],
+ [AC_CHECK_TOOL(GCJ, gcj,)
+ test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2"
+ AC_SUBST(GCJFLAGS)])])[]dnl
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
+
+
+# LT_PROG_GO
+# ----------
+AC_DEFUN([LT_PROG_GO],
+[AC_CHECK_TOOL(GOC, gccgo,)
+])
+
+
+# LT_PROG_RC
+# ----------
+AC_DEFUN([LT_PROG_RC],
+[AC_CHECK_TOOL(RC, windres,)
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_RC], [])
+
+
+# _LT_DECL_EGREP
+# --------------
+# If we don't have a new enough Autoconf to choose the best grep
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_EGREP],
+[AC_REQUIRE([AC_PROG_EGREP])dnl
+AC_REQUIRE([AC_PROG_FGREP])dnl
+test -z "$GREP" && GREP=grep
+_LT_DECL([], [GREP], [1], [A grep program that handles long lines])
+_LT_DECL([], [EGREP], [1], [An ERE matcher])
+_LT_DECL([], [FGREP], [1], [A literal string matcher])
+dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too
+AC_SUBST([GREP])
+])
+
+
+# _LT_DECL_OBJDUMP
+# --------------
+# If we don't have a new enough Autoconf to choose the best objdump
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_OBJDUMP],
+[AC_CHECK_TOOL(OBJDUMP, objdump, false)
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
+AC_SUBST([OBJDUMP])
+])
+
+# _LT_DECL_DLLTOOL
+# ----------------
+# Ensure DLLTOOL variable is set.
+m4_defun([_LT_DECL_DLLTOOL],
+[AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])
+AC_SUBST([DLLTOOL])
+])
+
+# _LT_DECL_SED
+# ------------
+# Check for a fully-functional sed program, that truncates
+# as few characters as possible. Prefer GNU sed if found.
+m4_defun([_LT_DECL_SED],
+[AC_PROG_SED
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+_LT_DECL([], [SED], [1], [A sed program that does not truncate output])
+_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"],
+ [Sed that helps us avoid accidentally triggering echo(1) options like -n])
+])# _LT_DECL_SED
+
+m4_ifndef([AC_PROG_SED], [
+# NOTE: This macro has been submitted for inclusion into #
+# GNU Autoconf as AC_PROG_SED. When it is available in #
+# a released version of Autoconf we should remove this #
+# macro and use it instead. #
+
+m4_defun([AC_PROG_SED],
+[AC_MSG_CHECKING([for a sed that does not truncate output])
+AC_CACHE_VAL(lt_cv_path_SED,
+[# Loop through the user's path and test for sed and gsed.
+# Then use that list of sed's as ones to test for truncation.
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for lt_ac_prog in sed gsed; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then
+ lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext"
+ fi
+ done
+ done
+done
+IFS=$as_save_IFS
+lt_ac_max=0
+lt_ac_count=0
+# Add /usr/xpg4/bin/sed as it is typically found on Solaris
+# along with /bin/sed that truncates output.
+for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
+ test ! -f $lt_ac_sed && continue
+ cat /dev/null > conftest.in
+ lt_ac_count=0
+ echo $ECHO_N "0123456789$ECHO_C" >conftest.in
+ # Check for GNU sed and select it if it is found.
+ if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then
+ lt_cv_path_SED=$lt_ac_sed
+ break
+ fi
+ while true; do
+ cat conftest.in conftest.in >conftest.tmp
+ mv conftest.tmp conftest.in
+ cp conftest.in conftest.nl
+ echo >>conftest.nl
+ $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break
+ cmp -s conftest.out conftest.nl || break
+ # 10000 chars as input seems more than enough
+ test $lt_ac_count -gt 10 && break
+ lt_ac_count=`expr $lt_ac_count + 1`
+ if test $lt_ac_count -gt $lt_ac_max; then
+ lt_ac_max=$lt_ac_count
+ lt_cv_path_SED=$lt_ac_sed
+ fi
+ done
+done
+])
+SED=$lt_cv_path_SED
+AC_SUBST([SED])
+AC_MSG_RESULT([$SED])
+])#AC_PROG_SED
+])#m4_ifndef
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_SED], [])
+
+
+# _LT_CHECK_SHELL_FEATURES
+# ------------------------
+# Find out whether the shell is Bourne or XSI compatible,
+# or has some other useful features.
+m4_defun([_LT_CHECK_SHELL_FEATURES],
+[AC_MSG_CHECKING([whether the shell understands some XSI constructs])
+# Try some XSI features
+xsi_shell=no
+( _lt_dummy="a/b/c"
+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \
+ = c,a/b,b/c, \
+ && eval 'test $(( 1 + 1 )) -eq 2 \
+ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
+ && xsi_shell=yes
+AC_MSG_RESULT([$xsi_shell])
+_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell'])
+
+AC_MSG_CHECKING([whether the shell understands "+="])
+lt_shell_append=no
+( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \
+ >/dev/null 2>&1 \
+ && lt_shell_append=yes
+AC_MSG_RESULT([$lt_shell_append])
+_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append'])
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ lt_unset=unset
+else
+ lt_unset=false
+fi
+_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+ # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+ lt_SP2NL='tr \040 \012'
+ lt_NL2SP='tr \015\012 \040\040'
+ ;;
+ *) # EBCDIC based system
+ lt_SP2NL='tr \100 \n'
+ lt_NL2SP='tr \r\n \100\100'
+ ;;
+esac
+_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl
+_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
+])# _LT_CHECK_SHELL_FEATURES
+
+
+# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY)
+# ------------------------------------------------------
+# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and
+# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY.
+m4_defun([_LT_PROG_FUNCTION_REPLACE],
+[dnl {
+sed -e '/^$1 ()$/,/^} # $1 /c\
+$1 ()\
+{\
+m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1])
+} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+])
+
+
+# _LT_PROG_REPLACE_SHELLFNS
+# -------------------------
+# Replace existing portable implementations of several shell functions with
+# equivalent extended shell implementations where those features are available..
+m4_defun([_LT_PROG_REPLACE_SHELLFNS],
+[if test x"$xsi_shell" = xyes; then
+ _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac])
+
+ _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl
+ func_basename_result="${1##*/}"])
+
+ _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac
+ func_basename_result="${1##*/}"])
+
+ _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl
+ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+ # positional parameters, so assign one to ordinary parameter first.
+ func_stripname_result=${3}
+ func_stripname_result=${func_stripname_result#"${1}"}
+ func_stripname_result=${func_stripname_result%"${2}"}])
+
+ _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl
+ func_split_long_opt_name=${1%%=*}
+ func_split_long_opt_arg=${1#*=}])
+
+ _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl
+ func_split_short_opt_arg=${1#??}
+ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}])
+
+ _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl
+ case ${1} in
+ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
+ *) func_lo2o_result=${1} ;;
+ esac])
+
+ _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo])
+
+ _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))])
+
+ _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}])
+fi
+
+if test x"$lt_shell_append" = xyes; then
+ _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"])
+
+ _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl
+ func_quote_for_eval "${2}"
+dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \
+ eval "${1}+=\\\\ \\$func_quote_for_eval_result"])
+
+ # Save a `func_append' function call where possible by direct use of '+='
+ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+else
+ # Save a `func_append' function call even when '+=' is not available
+ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+fi
+
+if test x"$_lt_function_replace_fail" = x":"; then
+ AC_MSG_WARN([Unable to substitute extended shell functions in $ofile])
+fi
+])
+
+# _LT_PATH_CONVERSION_FUNCTIONS
+# -----------------------------
+# Determine which file name conversion functions should be used by
+# func_to_host_file (and, implicitly, by func_to_host_path). These are needed
+# for certain cross-compile configurations and native mingw.
+m4_defun([_LT_PATH_CONVERSION_FUNCTIONS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_MSG_CHECKING([how to convert $build file names to $host format])
+AC_CACHE_VAL(lt_cv_to_host_file_cmd,
+[case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+ ;;
+ esac
+ ;;
+ *-*-cygwin* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+ ;;
+ esac
+ ;;
+ * ) # unhandled hosts (and "normal" native builds)
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+esac
+])
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+AC_MSG_RESULT([$lt_cv_to_host_file_cmd])
+_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd],
+ [0], [convert $build file names to $host format])dnl
+
+AC_MSG_CHECKING([how to convert $build file names to toolchain format])
+AC_CACHE_VAL(lt_cv_to_tool_file_cmd,
+[#assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ esac
+ ;;
+esac
+])
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+AC_MSG_RESULT([$lt_cv_to_tool_file_cmd])
+_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd],
+ [0], [convert $build files to toolchain format])dnl
+])# _LT_PATH_CONVERSION_FUNCTIONS
+
+# Helper functions for option handling. -*- Autoconf -*-
+#
+# Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 7 ltoptions.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
+
+
+# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME)
+# ------------------------------------------
+m4_define([_LT_MANGLE_OPTION],
+[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])])
+
+
+# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME)
+# ---------------------------------------
+# Set option OPTION-NAME for macro MACRO-NAME, and if there is a
+# matching handler defined, dispatch to it. Other OPTION-NAMEs are
+# saved as a flag.
+m4_define([_LT_SET_OPTION],
+[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl
+m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]),
+ _LT_MANGLE_DEFUN([$1], [$2]),
+ [m4_warning([Unknown $1 option `$2'])])[]dnl
+])
+
+
+# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET])
+# ------------------------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+m4_define([_LT_IF_OPTION],
+[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])])
+
+
+# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET)
+# -------------------------------------------------------
+# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME
+# are set.
+m4_define([_LT_UNLESS_OPTIONS],
+[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+ [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option),
+ [m4_define([$0_found])])])[]dnl
+m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3
+])[]dnl
+])
+
+
+# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST)
+# ----------------------------------------
+# OPTION-LIST is a space-separated list of Libtool options associated
+# with MACRO-NAME. If any OPTION has a matching handler declared with
+# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about
+# the unknown option and exit.
+m4_defun([_LT_SET_OPTIONS],
+[# Set options
+m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+ [_LT_SET_OPTION([$1], _LT_Option)])
+
+m4_if([$1],[LT_INIT],[
+ dnl
+ dnl Simply set some default values (i.e off) if boolean options were not
+ dnl specified:
+ _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no
+ ])
+ _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no
+ ])
+ dnl
+ dnl If no reference was made to various pairs of opposing options, then
+ dnl we run the default mode handler for the pair. For example, if neither
+ dnl `shared' nor `disable-shared' was passed, we enable building of shared
+ dnl archives by default:
+ _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED])
+ _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC])
+ _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC])
+ _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install],
+ [_LT_ENABLE_FAST_INSTALL])
+ ])
+])# _LT_SET_OPTIONS
+
+
+
+# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME)
+# -----------------------------------------
+m4_define([_LT_MANGLE_DEFUN],
+[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])])
+
+
+# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE)
+# -----------------------------------------------
+m4_define([LT_OPTION_DEFINE],
+[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl
+])# LT_OPTION_DEFINE
+
+
+# dlopen
+# ------
+LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes
+])
+
+AU_DEFUN([AC_LIBTOOL_DLOPEN],
+[_LT_SET_OPTION([LT_INIT], [dlopen])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `dlopen' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], [])
+
+
+# win32-dll
+# ---------
+# Declare package support for building win32 dll's.
+LT_OPTION_DEFINE([LT_INIT], [win32-dll],
+[enable_win32_dll=yes
+
+case $host in
+*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*)
+ AC_CHECK_TOOL(AS, as, false)
+ AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+ AC_CHECK_TOOL(OBJDUMP, objdump, false)
+ ;;
+esac
+
+test -z "$AS" && AS=as
+_LT_DECL([], [AS], [1], [Assembler program])dnl
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl
+])# win32-dll
+
+AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+_LT_SET_OPTION([LT_INIT], [win32-dll])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `win32-dll' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [])
+
+
+# _LT_ENABLE_SHARED([DEFAULT])
+# ----------------------------
+# implement the --enable-shared flag, and supports the `shared' and
+# `disable-shared' LT_INIT options.
+# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_SHARED],
+[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([shared],
+ [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@],
+ [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])],
+ [p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_shared=yes ;;
+ no) enable_shared=no ;;
+ *)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [enable_shared=]_LT_ENABLE_SHARED_DEFAULT)
+
+ _LT_DECL([build_libtool_libs], [enable_shared], [0],
+ [Whether or not to build shared libraries])
+])# _LT_ENABLE_SHARED
+
+LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared])
+])
+
+AC_DEFUN([AC_DISABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], [disable-shared])
+])
+
+AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_SHARED], [])
+dnl AC_DEFUN([AM_DISABLE_SHARED], [])
+
+
+
+# _LT_ENABLE_STATIC([DEFAULT])
+# ----------------------------
+# implement the --enable-static flag, and support the `static' and
+# `disable-static' LT_INIT options.
+# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_STATIC],
+[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([static],
+ [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@],
+ [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])],
+ [p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_static=yes ;;
+ no) enable_static=no ;;
+ *)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [enable_static=]_LT_ENABLE_STATIC_DEFAULT)
+
+ _LT_DECL([build_old_libs], [enable_static], [0],
+ [Whether or not to build static libraries])
+])# _LT_ENABLE_STATIC
+
+LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static])
+])
+
+AC_DEFUN([AC_DISABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], [disable-static])
+])
+
+AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_STATIC], [])
+dnl AC_DEFUN([AM_DISABLE_STATIC], [])
+
+
+
+# _LT_ENABLE_FAST_INSTALL([DEFAULT])
+# ----------------------------------
+# implement the --enable-fast-install flag, and support the `fast-install'
+# and `disable-fast-install' LT_INIT options.
+# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_FAST_INSTALL],
+[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([fast-install],
+ [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@],
+ [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])],
+ [p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_fast_install=yes ;;
+ no) enable_fast_install=no ;;
+ *)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT)
+
+_LT_DECL([fast_install], [enable_fast_install], [0],
+ [Whether or not to optimize for fast installation])dnl
+])# _LT_ENABLE_FAST_INSTALL
+
+LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])])
+
+# Old names:
+AU_DEFUN([AC_ENABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the `fast-install' option into LT_INIT's first parameter.])
+])
+
+AU_DEFUN([AC_DISABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], [disable-fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the `disable-fast-install' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], [])
+dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
+
+
+# _LT_WITH_PIC([MODE])
+# --------------------
+# implement the --with-pic flag, and support the `pic-only' and `no-pic'
+# LT_INIT options.
+# MODE is either `yes' or `no'. If omitted, it defaults to `both'.
+m4_define([_LT_WITH_PIC],
+[AC_ARG_WITH([pic],
+ [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@],
+ [try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
+ [lt_p=${PACKAGE-default}
+ case $withval in
+ yes|no) pic_mode=$withval ;;
+ *)
+ pic_mode=default
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for lt_pkg in $withval; do
+ IFS="$lt_save_ifs"
+ if test "X$lt_pkg" = "X$lt_p"; then
+ pic_mode=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [pic_mode=default])
+
+test -z "$pic_mode" && pic_mode=m4_default([$1], [default])
+
+_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl
+])# _LT_WITH_PIC
+
+LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])])
+
+# Old name:
+AU_DEFUN([AC_LIBTOOL_PICMODE],
+[_LT_SET_OPTION([LT_INIT], [pic-only])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `pic-only' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_PICMODE], [])
+
+
+m4_define([_LTDL_MODE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive],
+ [m4_define([_LTDL_MODE], [nonrecursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [recursive],
+ [m4_define([_LTDL_MODE], [recursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [subproject],
+ [m4_define([_LTDL_MODE], [subproject])])
+
+m4_define([_LTDL_TYPE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [installable],
+ [m4_define([_LTDL_TYPE], [installable])])
+LT_OPTION_DEFINE([LTDL_INIT], [convenience],
+ [m4_define([_LTDL_TYPE], [convenience])])
+
+# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*-
+#
+# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 6 ltsugar.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])])
+
+
+# lt_join(SEP, ARG1, [ARG2...])
+# -----------------------------
+# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their
+# associated separator.
+# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier
+# versions in m4sugar had bugs.
+m4_define([lt_join],
+[m4_if([$#], [1], [],
+ [$#], [2], [[$2]],
+ [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])])
+m4_define([_lt_join],
+[m4_if([$#$2], [2], [],
+ [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])])
+
+
+# lt_car(LIST)
+# lt_cdr(LIST)
+# ------------
+# Manipulate m4 lists.
+# These macros are necessary as long as will still need to support
+# Autoconf-2.59 which quotes differently.
+m4_define([lt_car], [[$1]])
+m4_define([lt_cdr],
+[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])],
+ [$#], 1, [],
+ [m4_dquote(m4_shift($@))])])
+m4_define([lt_unquote], $1)
+
+
+# lt_append(MACRO-NAME, STRING, [SEPARATOR])
+# ------------------------------------------
+# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'.
+# Note that neither SEPARATOR nor STRING are expanded; they are appended
+# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked).
+# No SEPARATOR is output if MACRO-NAME was previously undefined (different
+# than defined and empty).
+#
+# This macro is needed until we can rely on Autoconf 2.62, since earlier
+# versions of m4sugar mistakenly expanded SEPARATOR but not STRING.
+m4_define([lt_append],
+[m4_define([$1],
+ m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])])
+
+
+
+# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...])
+# ----------------------------------------------------------
+# Produce a SEP delimited list of all paired combinations of elements of
+# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list
+# has the form PREFIXmINFIXSUFFIXn.
+# Needed until we can rely on m4_combine added in Autoconf 2.62.
+m4_define([lt_combine],
+[m4_if(m4_eval([$# > 3]), [1],
+ [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl
+[[m4_foreach([_Lt_prefix], [$2],
+ [m4_foreach([_Lt_suffix],
+ ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[,
+ [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])])
+
+
+# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ])
+# -----------------------------------------------------------------------
+# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited
+# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ.
+m4_define([lt_if_append_uniq],
+[m4_ifdef([$1],
+ [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1],
+ [lt_append([$1], [$2], [$3])$4],
+ [$5])],
+ [lt_append([$1], [$2], [$3])$4])])
+
+
+# lt_dict_add(DICT, KEY, VALUE)
+# -----------------------------
+m4_define([lt_dict_add],
+[m4_define([$1($2)], [$3])])
+
+
+# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE)
+# --------------------------------------------
+m4_define([lt_dict_add_subkey],
+[m4_define([$1($2:$3)], [$4])])
+
+
+# lt_dict_fetch(DICT, KEY, [SUBKEY])
+# ----------------------------------
+m4_define([lt_dict_fetch],
+[m4_ifval([$3],
+ m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]),
+ m4_ifdef([$1($2)], [m4_defn([$1($2)])]))])
+
+
+# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE])
+# -----------------------------------------------------------------
+m4_define([lt_if_dict_fetch],
+[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4],
+ [$5],
+ [$6])])
+
+
+# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...])
+# --------------------------------------------------------------
+m4_define([lt_dict_filter],
+[m4_if([$5], [], [],
+ [lt_join(m4_quote(m4_default([$4], [[, ]])),
+ lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]),
+ [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl
+])
+
+# ltversion.m4 -- version numbers -*- Autoconf -*-
+#
+# Copyright (C) 2004 Free Software Foundation, Inc.
+# Written by Scott James Remnant, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# @configure_input@
+
+# serial 3337 ltversion.m4
+# This file is part of GNU Libtool
+
+m4_define([LT_PACKAGE_VERSION], [2.4.2])
+m4_define([LT_PACKAGE_REVISION], [1.3337])
+
+AC_DEFUN([LTVERSION_VERSION],
+[macro_version='2.4.2'
+macro_revision='1.3337'
+_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
+_LT_DECL(, macro_revision, 0)
+])
+
+# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*-
+#
+# Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
+# Written by Scott James Remnant, 2004.
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 5 lt~obsolete.m4
+
+# These exist entirely to fool aclocal when bootstrapping libtool.
+#
+# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN)
+# which have later been changed to m4_define as they aren't part of the
+# exported API, or moved to Autoconf or Automake where they belong.
+#
+# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN
+# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us
+# using a macro with the same name in our local m4/libtool.m4 it'll
+# pull the old libtool.m4 in (it doesn't see our shiny new m4_define
+# and doesn't know about Autoconf macros at all.)
+#
+# So we provide this file, which has a silly filename so it's always
+# included after everything else. This provides aclocal with the
+# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything
+# because those macros already exist, or will be overwritten later.
+# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6.
+#
+# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here.
+# Yes, that means every name once taken will need to remain here until
+# we give up compatibility with versions before 1.7, at which point
+# we need to keep only those names which we still refer to.
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])])
+
+m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])])
+m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])])
+m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])])
+m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])])
+m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])])
+m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])])
+m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])])
+m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])])
+m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])])
+m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])])
+m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])])
+m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])])
+m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])])
+m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])])
+m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])])
+m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])])
+m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])])
+m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])])
+m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])])
+m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])])
+m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])])
+m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])])
+m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])])
+m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])])
+m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])])
+m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])])
+m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])])
+m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])])
+m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])])
+m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])])
+m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])])
+m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])])
+m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])])
+m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
+m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])])
+m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])])
+m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])])
+m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
+m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
+m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])])
+m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])])
+m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
+m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
+m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])])
+m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
+m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])])
+m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])])
+m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])])
+m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])])
+m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])])
+m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])])
+
+# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
+# serial 1 (pkg-config-0.24)
+#
+# Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# PKG_PROG_PKG_CONFIG([MIN-VERSION])
+# ----------------------------------
+AC_DEFUN([PKG_PROG_PKG_CONFIG],
+[m4_pattern_forbid([^_?PKG_[A-Z_]+$])
+m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$])
+m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$])
+AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])
+AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path])
+AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path])
+
+if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then
+ AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
+fi
+if test -n "$PKG_CONFIG"; then
+ _pkg_min_version=m4_default([$1], [0.9.0])
+ AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version])
+ if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ PKG_CONFIG=""
+ fi
+fi[]dnl
+])# PKG_PROG_PKG_CONFIG
+
+# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+#
+# Check to see whether a particular set of modules exists. Similar
+# to PKG_CHECK_MODULES(), but does not set variables or print errors.
+#
+# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
+# only at the first occurrence in configure.ac, so if the first place
+# it's called might be skipped (such as if it is within an "if", you
+# have to call PKG_CHECK_EXISTS manually
+# --------------------------------------------------------------
+AC_DEFUN([PKG_CHECK_EXISTS],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
+if test -n "$PKG_CONFIG" && \
+ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then
+ m4_default([$2], [:])
+m4_ifvaln([$3], [else
+ $3])dnl
+fi])
+
+# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
+# ---------------------------------------------
+m4_define([_PKG_CONFIG],
+[if test -n "$$1"; then
+ pkg_cv_[]$1="$$1"
+ elif test -n "$PKG_CONFIG"; then
+ PKG_CHECK_EXISTS([$3],
+ [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes ],
+ [pkg_failed=yes])
+ else
+ pkg_failed=untried
+fi[]dnl
+])# _PKG_CONFIG
+
+# _PKG_SHORT_ERRORS_SUPPORTED
+# -----------------------------
+AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi[]dnl
+])# _PKG_SHORT_ERRORS_SUPPORTED
+
+
+# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
+# [ACTION-IF-NOT-FOUND])
+#
+#
+# Note that if there is a possibility the first call to
+# PKG_CHECK_MODULES might not happen, you should be sure to include an
+# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
+#
+#
+# --------------------------------------------------------------
+AC_DEFUN([PKG_CHECK_MODULES],
+[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
+AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl
+AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl
+
+pkg_failed=no
+AC_MSG_CHECKING([for $1])
+
+_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2])
+_PKG_CONFIG([$1][_LIBS], [libs], [$2])
+
+m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS
+and $1[]_LIBS to avoid the need to call pkg-config.
+See the pkg-config man page for more details.])
+
+if test $pkg_failed = yes; then
+ AC_MSG_RESULT([no])
+ _PKG_SHORT_ERRORS_SUPPORTED
+ if test $_pkg_short_errors_supported = yes; then
+ $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1`
+ else
+ $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD
+
+ m4_default([$4], [AC_MSG_ERROR(
+[Package requirements ($2) were not met:
+
+$$1_PKG_ERRORS
+
+Consider adjusting the PKG_CONFIG_PATH environment variable if you
+installed software in a non-standard prefix.
+
+_PKG_TEXT])[]dnl
+ ])
+elif test $pkg_failed = untried; then
+ AC_MSG_RESULT([no])
+ m4_default([$4], [AC_MSG_FAILURE(
+[The pkg-config script could not be found or is too old. Make sure it
+is in your PATH or set the PKG_CONFIG environment variable to the full
+path to pkg-config.
+
+_PKG_TEXT
+
+To get pkg-config, see <http://pkg-config.freedesktop.org/>.])[]dnl
+ ])
+else
+ $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
+ $1[]_LIBS=$pkg_cv_[]$1[]_LIBS
+ AC_MSG_RESULT([yes])
+ $3
+fi[]dnl
+])# PKG_CHECK_MODULES
+
+# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software
+# Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_AUTOMAKE_VERSION(VERSION)
+# ----------------------------
+# Automake X.Y traces this macro to ensure aclocal.m4 has been
+# generated from the m4 files accompanying Automake X.Y.
+# (This private macro should not be called outside this file.)
+AC_DEFUN([AM_AUTOMAKE_VERSION],
+[am__api_version='1.11'
+dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+dnl require some minimum version. Point them to the right macro.
+m4_if([$1], [1.11.3], [],
+ [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+])
+
+# _AM_AUTOCONF_VERSION(VERSION)
+# -----------------------------
+# aclocal traces this macro to find the Autoconf version.
+# This is a private macro too. Using m4_define simplifies
+# the logic in aclocal, which can simply ignore this definition.
+m4_define([_AM_AUTOCONF_VERSION], [])
+
+# AM_SET_CURRENT_AUTOMAKE_VERSION
+# -------------------------------
+# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+[AM_AUTOMAKE_VERSION([1.11.3])dnl
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+
+# AM_AUX_DIR_EXPAND -*- Autoconf -*-
+
+# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
+# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to
+# `$srcdir', `$srcdir/..', or `$srcdir/../..'.
+#
+# Of course, Automake must honor this variable whenever it calls a
+# tool from the auxiliary directory. The problem is that $srcdir (and
+# therefore $ac_aux_dir as well) can be either absolute or relative,
+# depending on how configure is run. This is pretty annoying, since
+# it makes $ac_aux_dir quite unusable in subdirectories: in the top
+# source directory, any form will work fine, but in subdirectories a
+# relative path needs to be adjusted first.
+#
+# $ac_aux_dir/missing
+# fails when called from a subdirectory if $ac_aux_dir is relative
+# $top_srcdir/$ac_aux_dir/missing
+# fails if $ac_aux_dir is absolute,
+# fails when called from a subdirectory in a VPATH build with
+# a relative $ac_aux_dir
+#
+# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
+# are both prefixed by $srcdir. In an in-source build this is usually
+# harmless because $srcdir is `.', but things will broke when you
+# start a VPATH build or use an absolute $srcdir.
+#
+# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
+# iff we strip the leading $srcdir from $ac_aux_dir. That would be:
+# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
+# and then we would define $MISSING as
+# MISSING="\${SHELL} $am_aux_dir/missing"
+# This will work as long as MISSING is not called from configure, because
+# unfortunately $(top_srcdir) has no meaning in configure.
+# However there are other variables, like CC, which are often used in
+# configure, and could therefore not use this "fixed" $ac_aux_dir.
+#
+# Another solution, used here, is to always expand $ac_aux_dir to an
+# absolute PATH. The drawback is that using absolute paths prevent a
+# configured tree to be moved without reconfiguration.
+
+AC_DEFUN([AM_AUX_DIR_EXPAND],
+[dnl Rely on autoconf to set up CDPATH properly.
+AC_PREREQ([2.50])dnl
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+])
+
+# AM_CONDITIONAL -*- Autoconf -*-
+
+# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 9
+
+# AM_CONDITIONAL(NAME, SHELL-CONDITION)
+# -------------------------------------
+# Define a conditional.
+AC_DEFUN([AM_CONDITIONAL],
+[AC_PREREQ(2.52)dnl
+ ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
+ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+AC_SUBST([$1_TRUE])dnl
+AC_SUBST([$1_FALSE])dnl
+_AM_SUBST_NOTMAKE([$1_TRUE])dnl
+_AM_SUBST_NOTMAKE([$1_FALSE])dnl
+m4_define([_AM_COND_VALUE_$1], [$2])dnl
+if $2; then
+ $1_TRUE=
+ $1_FALSE='#'
+else
+ $1_TRUE='#'
+ $1_FALSE=
+fi
+AC_CONFIG_COMMANDS_PRE(
+[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
+ AC_MSG_ERROR([[conditional "$1" was never defined.
+Usually this means the macro was only invoked conditionally.]])
+fi])])
+
+# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009,
+# 2010, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 12
+
+# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be
+# written in clear, in which case automake, when reading aclocal.m4,
+# will think it sees a *use*, and therefore will trigger all it's
+# C support machinery. Also note that it means that autoscan, seeing
+# CC etc. in the Makefile, will ask for an AC_PROG_CC use...
+
+
+# _AM_DEPENDENCIES(NAME)
+# ----------------------
+# See how the compiler implements dependency checking.
+# NAME is "CC", "CXX", "GCJ", or "OBJC".
+# We try a few techniques and use that to set a single cache variable.
+#
+# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
+# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular
+# dependency, and given that the user is not expected to run this macro,
+# just rely on AC_PROG_CC.
+AC_DEFUN([_AM_DEPENDENCIES],
+[AC_REQUIRE([AM_SET_DEPDIR])dnl
+AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
+AC_REQUIRE([AM_MAKE_INCLUDE])dnl
+AC_REQUIRE([AM_DEP_TRACK])dnl
+
+ifelse([$1], CC, [depcc="$CC" am_compiler_list=],
+ [$1], CXX, [depcc="$CXX" am_compiler_list=],
+ [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+ [$1], UPC, [depcc="$UPC" am_compiler_list=],
+ [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
+ [depcc="$$1" am_compiler_list=])
+
+AC_CACHE_CHECK([dependency style of $depcc],
+ [am_cv_$1_dependencies_compiler_type],
+[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_$1_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp`
+ fi
+ am__universal=false
+ m4_case([$1], [CC],
+ [case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac],
+ [CXX],
+ [case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac])
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_$1_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_$1_dependencies_compiler_type=none
+fi
+])
+AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type])
+AM_CONDITIONAL([am__fastdep$1], [
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_$1_dependencies_compiler_type" = gcc3])
+])
+
+
+# AM_SET_DEPDIR
+# -------------
+# Choose a directory name for dependency files.
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES
+AC_DEFUN([AM_SET_DEPDIR],
+[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
+])
+
+
+# AM_DEP_TRACK
+# ------------
+AC_DEFUN([AM_DEP_TRACK],
+[AC_ARG_ENABLE(dependency-tracking,
+[ --disable-dependency-tracking speeds up one-time build
+ --enable-dependency-tracking do not reject slow dependency extractors])
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+ am__nodep='_no'
+fi
+AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
+AC_SUBST([AMDEPBACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
+AC_SUBST([am__nodep])dnl
+_AM_SUBST_NOTMAKE([am__nodep])dnl
+])
+
+# Generate code to set up dependency tracking. -*- Autoconf -*-
+
+# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+#serial 5
+
+# _AM_OUTPUT_DEPENDENCY_COMMANDS
+# ------------------------------
+AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
+[{
+ # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named `Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`AS_DIRNAME("$mf")`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running `make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # When using ansi2knr, U may be empty or an underscore; expand it
+ U=`sed -n 's/^U = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`AS_DIRNAME(["$file"])`
+ AS_MKDIR_P([$dirpart/$fdir])
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+])# _AM_OUTPUT_DEPENDENCY_COMMANDS
+
+
+# AM_OUTPUT_DEPENDENCY_COMMANDS
+# -----------------------------
+# This macro should only be invoked once -- use via AC_REQUIRE.
+#
+# This code is only required when automatic dependency tracking
+# is enabled. FIXME. This creates each `.P' file that we will
+# need in order to bootstrap the dependency handling code.
+AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
+[AC_CONFIG_COMMANDS([depfiles],
+ [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
+ [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
+])
+
+# Do all the work for Automake. -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+# 2005, 2006, 2008, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 16
+
+# This macro actually does too much. Some checks are only needed if
+# your package does certain things. But this isn't really a big deal.
+
+# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
+# AM_INIT_AUTOMAKE([OPTIONS])
+# -----------------------------------------------
+# The call with PACKAGE and VERSION arguments is the old style
+# call (pre autoconf-2.50), which is being phased out. PACKAGE
+# and VERSION should now be passed to AC_INIT and removed from
+# the call to AM_INIT_AUTOMAKE.
+# We support both call styles for the transition. After
+# the next Automake release, Autoconf can make the AC_INIT
+# arguments mandatory, and then we can depend on a new Autoconf
+# release and drop the old call support.
+AC_DEFUN([AM_INIT_AUTOMAKE],
+[AC_PREREQ([2.62])dnl
+dnl Autoconf wants to disallow AM_ names. We explicitly allow
+dnl the ones we care about.
+m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
+AC_REQUIRE([AC_PROG_INSTALL])dnl
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+AC_SUBST([CYGPATH_W])
+
+# Define the identity of the package.
+dnl Distinguish between old-style and new-style calls.
+m4_ifval([$2],
+[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ AC_SUBST([PACKAGE], [$1])dnl
+ AC_SUBST([VERSION], [$2])],
+[_AM_SET_OPTIONS([$1])dnl
+dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,,
+ [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
+
+_AM_IF_OPTION([no-define],,
+[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
+ AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl
+
+# Some tools Automake needs.
+AC_REQUIRE([AM_SANITY_CHECK])dnl
+AC_REQUIRE([AC_ARG_PROGRAM])dnl
+AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version})
+AM_MISSING_PROG(AUTOCONF, autoconf)
+AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version})
+AM_MISSING_PROG(AUTOHEADER, autoheader)
+AM_MISSING_PROG(MAKEINFO, makeinfo)
+AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
+AC_REQUIRE([AM_PROG_MKDIR_P])dnl
+# We need awk for the "check" target. The system "awk" is bad on
+# some platforms.
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
+ [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
+ [_AM_PROG_TAR([v7])])])
+_AM_IF_OPTION([no-dependencies],,
+[AC_PROVIDE_IFELSE([AC_PROG_CC],
+ [_AM_DEPENDENCIES(CC)],
+ [define([AC_PROG_CC],
+ defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_CXX],
+ [_AM_DEPENDENCIES(CXX)],
+ [define([AC_PROG_CXX],
+ defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJC],
+ [_AM_DEPENDENCIES(OBJC)],
+ [define([AC_PROG_OBJC],
+ defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl
+])
+_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
+dnl The `parallel-tests' driver may need to know about EXEEXT, so add the
+dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
+dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_CONFIG_COMMANDS_PRE(dnl
+[m4_provide_if([_AM_COMPILER_EXEEXT],
+ [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+])
+
+dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
+dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
+dnl mangled by Autoconf and run in a shell conditional statement.
+m4_define([_AC_COMPILER_EXEEXT],
+m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
+
+
+# When config.status generates a header, we must update the stamp-h file.
+# This file resides in the same directory as the config header
+# that is generated. The stamp files are numbered to have different names.
+
+# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
+# loop where config.status creates the headers, so we can generate
+# our stamp files there.
+AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
+[# Compute $1's index in $config_headers.
+_am_arg=$1
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+
+# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation,
+# Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_PROG_INSTALL_SH
+# ------------------
+# Define $install_sh.
+AC_DEFUN([AM_PROG_INSTALL_SH],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+if test x"${install_sh}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+AC_SUBST(install_sh)])
+
+# Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# Check whether the underlying file-system supports filenames
+# with a leading dot. For instance MS-DOS doesn't.
+AC_DEFUN([AM_SET_LEADING_DOT],
+[rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+AC_SUBST([am__leading_dot])])
+
+# Check to see how 'make' treats includes. -*- Autoconf -*-
+
+# Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 4
+
+# AM_MAKE_INCLUDE()
+# -----------------
+# Check to see how make treats includes.
+AC_DEFUN([AM_MAKE_INCLUDE],
+[am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+AC_MSG_CHECKING([for style of include used by $am_make])
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+AC_SUBST([am__include])
+AC_SUBST([am__quote])
+AC_MSG_RESULT([$_am_result])
+rm -f confinc confmf
+])
+
+# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
+
+# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 6
+
+# AM_MISSING_PROG(NAME, PROGRAM)
+# ------------------------------
+AC_DEFUN([AM_MISSING_PROG],
+[AC_REQUIRE([AM_MISSING_HAS_RUN])
+$1=${$1-"${am_missing_run}$2"}
+AC_SUBST($1)])
+
+
+# AM_MISSING_HAS_RUN
+# ------------------
+# Define MISSING if not defined so far and test if it supports --run.
+# If it does, set am_missing_run to use it, otherwise, to nothing.
+AC_DEFUN([AM_MISSING_HAS_RUN],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([missing])dnl
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+ am_missing_run="$MISSING --run "
+else
+ am_missing_run=
+ AC_MSG_WARN([`missing' script is too old or missing])
+fi
+])
+
+# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation,
+# Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_PROG_MKDIR_P
+# ---------------
+# Check for `mkdir -p'.
+AC_DEFUN([AM_PROG_MKDIR_P],
+[AC_PREREQ([2.60])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P,
+dnl while keeping a definition of mkdir_p for backward compatibility.
+dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile.
+dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of
+dnl Makefile.ins that do not define MKDIR_P, so we do our own
+dnl adjustment using top_builddir (which is defined more often than
+dnl MKDIR_P).
+AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl
+case $mkdir_p in
+ [[\\/$]]* | ?:[[\\/]]*) ;;
+ */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+])
+
+# Helper functions for option handling. -*- Autoconf -*-
+
+# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software
+# Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# _AM_MANGLE_OPTION(NAME)
+# -----------------------
+AC_DEFUN([_AM_MANGLE_OPTION],
+[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
+
+# _AM_SET_OPTION(NAME)
+# --------------------
+# Set option NAME. Presently that only means defining a flag for this option.
+AC_DEFUN([_AM_SET_OPTION],
+[m4_define(_AM_MANGLE_OPTION([$1]), 1)])
+
+# _AM_SET_OPTIONS(OPTIONS)
+# ------------------------
+# OPTIONS is a space-separated list of Automake options.
+AC_DEFUN([_AM_SET_OPTIONS],
+[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
+
+# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
+# -------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+AC_DEFUN([_AM_IF_OPTION],
+[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+
+# Check to make sure that the build environment is sane. -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# AM_SANITY_CHECK
+# ---------------
+AC_DEFUN([AM_SANITY_CHECK],
+[AC_MSG_CHECKING([whether build environment is sane])
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[[\\\"\#\$\&\'\`$am_lf]]*)
+ AC_MSG_ERROR([unsafe absolute working directory name]);;
+esac
+case $srcdir in
+ *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
+ AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$[*]" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ rm -f conftest.file
+ if test "$[*]" != "X $srcdir/configure conftest.file" \
+ && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+alias in your environment])
+ fi
+
+ test "$[2]" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+AC_MSG_RESULT(yes)])
+
+# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_PROG_INSTALL_STRIP
+# ---------------------
+# One issue with vendor `install' (even GNU) is that you can't
+# specify the program used to strip binaries. This is especially
+# annoying in cross-compiling environments, where the build's strip
+# is unlikely to handle the host's binaries.
+# Fortunately install-sh will honor a STRIPPROG variable, so we
+# always use install-sh in `make install-strip', and initialize
+# STRIPPROG with the value of the STRIP variable (set by the user).
+AC_DEFUN([AM_PROG_INSTALL_STRIP],
+[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'. However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be `maybe'.
+if test "$cross_compiling" != no; then
+ AC_CHECK_TOOL([STRIP], [strip], :)
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+AC_SUBST([INSTALL_STRIP_PROGRAM])])
+
+# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 3
+
+# _AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
+# This macro is traced by Automake.
+AC_DEFUN([_AM_SUBST_NOTMAKE])
+
+# AM_SUBST_NOTMAKE(VARIABLE)
+# --------------------------
+# Public sister of _AM_SUBST_NOTMAKE.
+AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+
+# Check how to create a tarball. -*- Autoconf -*-
+
+# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# _AM_PROG_TAR(FORMAT)
+# --------------------
+# Check how to create a tarball in format FORMAT.
+# FORMAT should be one of `v7', `ustar', or `pax'.
+#
+# Substitute a variable $(am__tar) that is a command
+# writing to stdout a FORMAT-tarball containing the directory
+# $tardir.
+# tardir=directory && $(am__tar) > result.tar
+#
+# Substitute a variable $(am__untar) that extract such
+# a tarball read from stdin.
+# $(am__untar) < result.tar
+AC_DEFUN([_AM_PROG_TAR],
+[# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AC_SUBST([AMTAR], ['$${TAR-tar}'])
+m4_if([$1], [v7],
+ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+ [m4_case([$1], [ustar],, [pax],,
+ [m4_fatal([Unknown tar format])])
+AC_MSG_CHECKING([how to create a $1 tar archive])
+# Loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
+_am_tools=${am_cv_prog_tar_$1-$_am_tools}
+# Do not fold the above two line into one, because Tru64 sh and
+# Solaris sh will not grok spaces in the rhs of `-'.
+for _am_tool in $_am_tools
+do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar;
+ do
+ AM_RUN_LOG([$_am_tar --version]) && break
+ done
+ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x $1 -w "$$tardir"'
+ am__tar_='pax -L -x $1 -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+ am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+ am__untar='cpio -i -H $1 -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_$1}" && break
+
+ # tar/untar a dummy directory, and stop if the command works
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ AM_RUN_LOG([$am__untar <conftest.tar])
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+done
+rm -rf conftest.dir
+
+AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+AC_SUBST([am__tar])
+AC_SUBST([am__untar])
+]) # _AM_PROG_TAR
+
+m4_include([m4/gtest.m4])
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/autogen.sh b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/autogen.sh
new file mode 100644
index 00000000..9d0ebe93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/autogen.sh
@@ -0,0 +1,7 @@
+#! /bin/sh -e
+rm -rf autom4te.cache
+aclocal -I m4
+autoheader
+libtoolize --copy
+automake --add-missing --copy
+autoconf
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.guess b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.guess
new file mode 100644
index 00000000..d622a44e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.guess
@@ -0,0 +1,1530 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2012-02-10'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner. Please send patches (context
+# diff format) to <config-patches@gnu.org> and include a ChangeLog
+# entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm:riscos:*:*|arm:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ i*:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ cris:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-gnu
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-gnu
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ i*86:Linux:*:*)
+ LIBC=gnu
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #ifdef __dietlibc__
+ LIBC=dietlibc
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ or32:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-gnu
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-gnu
+ exit ;;
+ x86_64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ case $UNAME_PROCESSOR in
+ i386)
+ eval $set_cc_for_build
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ UNAME_PROCESSOR="x86_64"
+ fi
+ fi ;;
+ unknown) UNAME_PROCESSOR=powerpc ;;
+ esac
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-?:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ c34*)
+ echo c34-convex-bsd
+ exit ;;
+ c38*)
+ echo c38-convex-bsd
+ exit ;;
+ c4*)
+ echo c4-convex-bsd
+ exit ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.h.in b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.h.in
new file mode 100644
index 00000000..09b86df4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.h.in
@@ -0,0 +1,124 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* Define to 1 if the compiler supports __builtin_ctz and friends. */
+#undef HAVE_BUILTIN_CTZ
+
+/* Define to 1 if the compiler supports __builtin_expect. */
+#undef HAVE_BUILTIN_EXPECT
+
+/* Define to 1 if you have the <byteswap.h> header file. */
+#undef HAVE_BYTESWAP_H
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Use the gflags package for command-line parsing. */
+#undef HAVE_GFLAGS
+
+/* Defined when Google Test is available. */
+#undef HAVE_GTEST
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the `fastlz' library (-lfastlz). */
+#undef HAVE_LIBFASTLZ
+
+/* Define to 1 if you have the `lzf' library (-llzf). */
+#undef HAVE_LIBLZF
+
+/* Define to 1 if you have the `lzo2' library (-llzo2). */
+#undef HAVE_LIBLZO2
+
+/* Define to 1 if you have the `quicklz' library (-lquicklz). */
+#undef HAVE_LIBQUICKLZ
+
+/* Define to 1 if you have the `z' library (-lz). */
+#undef HAVE_LIBZ
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#undef HAVE_STDDEF_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <sys/byteswap.h> header file. */
+#undef HAVE_SYS_BYTESWAP_H
+
+/* Define to 1 if you have the <sys/endian.h> header file. */
+#undef HAVE_SYS_ENDIAN_H
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#undef HAVE_SYS_MMAN_H
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#undef HAVE_SYS_RESOURCE_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if you have the <windows.h> header file. */
+#undef HAVE_WINDOWS_H
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#undef LT_OBJDIR
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Version number of package */
+#undef VERSION
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel and VAX). */
+#if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+#elif ! defined __LITTLE_ENDIAN__
+# undef WORDS_BIGENDIAN
+#endif
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+#undef size_t
+
+/* Define to `int' if <sys/types.h> does not define. */
+#undef ssize_t
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.sub b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.sub
new file mode 100644
index 00000000..c894da45
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/config.sub
@@ -0,0 +1,1773 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2012-02-10'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted GNU ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray | -microblaze)
+ os=
+ basic_machine=$1
+ ;;
+ -bluegene*)
+ os=-cnk
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+ | be32 | be64 \
+ | bfin \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | epiphany \
+ | fido | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 \
+ | ns16k | ns32k \
+ | open8 \
+ | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
+ | pyramid \
+ | rl78 | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+ | we32k \
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | aarch64-* | aarch64_be-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | be32-* | be64-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | le32-* | le64-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+ | pyramid-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
+ | tron-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ microblaze)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ msys)
+ basic_machine=i386-pc
+ os=-msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc | ppcbe) basic_machine=powerpc-unknown
+ ;;
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tile*)
+ basic_machine=$basic_machine-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -kaos*)
+ os=-kaos
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -nacl*)
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -cnk*|-aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure
new file mode 100755
index 00000000..5149d101
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure
@@ -0,0 +1,18851 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.68 for snappy 1.1.2.
+#
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software
+# Foundation, Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+if test "x$CONFIG_SHELL" = x; then
+ as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+"
+ as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+ exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1"
+ as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+ as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+ eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+ test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+
+ test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || (
+ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+ ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+ PATH=/empty FPATH=/empty; export PATH FPATH
+ test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\
+ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+ if (eval "$as_required") 2>/dev/null; then :
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+ if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ as_found=:
+ case $as_dir in #(
+ /*)
+ for as_base in sh bash ksh sh5; do
+ # Try only shells that exist, to save several forks.
+ as_shell=$as_dir/$as_base
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ CONFIG_SHELL=$as_shell as_have_required=yes
+ if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+ break 2
+fi
+fi
+ done;;
+ esac
+ as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+ { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+ CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+ if test "x$CONFIG_SHELL" != x; then :
+ # We cannot yet assume a decent shell, so we have to provide a
+ # neutralization value for shells without unset; and this also
+ # works around shells that cannot unset nonexistent variables.
+ # Preserve -v and -x to the replacement shell.
+ BASH_ENV=/dev/null
+ ENV=/dev/null
+ (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+ export CONFIG_SHELL
+ case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+ esac
+ exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"}
+fi
+
+ if test x$as_have_required = xno; then :
+ $as_echo "$0: This script requires a shell more modern than all"
+ $as_echo "$0: the shells that I found on your system."
+ if test x${ZSH_VERSION+set} = xset ; then
+ $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+ $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+ else
+ $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
+ fi
+ exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+ as_lineno_1=$LINENO as_lineno_1a=$LINENO
+ as_lineno_2=$LINENO as_lineno_2a=$LINENO
+ eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+ test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+ # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in #(
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='snappy'
+PACKAGE_TARNAME='snappy'
+PACKAGE_VERSION='1.1.2'
+PACKAGE_STRING='snappy 1.1.2'
+PACKAGE_BUGREPORT=''
+PACKAGE_URL=''
+
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+LIBOBJS
+SNAPPY_LTVERSION
+SNAPPY_PATCHLEVEL
+SNAPPY_MINOR
+SNAPPY_MAJOR
+ac_cv_have_sys_uio_h
+ac_cv_have_stddef_h
+ac_cv_have_stdint_h
+UNITTEST_LIBS
+gflags_LIBS
+gflags_CFLAGS
+PKG_CONFIG_LIBDIR
+PKG_CONFIG_PATH
+PKG_CONFIG
+HAVE_GTEST_FALSE
+HAVE_GTEST_TRUE
+HAVE_GTEST
+GTEST_VERSION
+GTEST_LIBS
+GTEST_LDFLAGS
+GTEST_CXXFLAGS
+GTEST_CPPFLAGS
+GTEST_CONFIG
+CXXCPP
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+ac_ct_CXX
+CXXFLAGS
+CXX
+LIBTOOL_DEPS
+CPP
+OTOOL64
+OTOOL
+LIPO
+NMEDIT
+DSYMUTIL
+MANIFEST_TOOL
+RANLIB
+ac_ct_AR
+AR
+DLLTOOL
+OBJDUMP
+LN_S
+NM
+ac_ct_DUMPBIN
+DUMPBIN
+LD
+FGREP
+EGREP
+GREP
+SED
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+am__nodep
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__quote
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
+LIBTOOL
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_shared
+enable_static
+with_pic
+enable_fast_install
+enable_dependency_tracking
+with_gnu_ld
+with_sysroot
+enable_libtool_lock
+enable_gtest
+with_gflags
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CPP
+CXX
+CXXFLAGS
+CCC
+CXXCPP
+GTEST_CONFIG
+GTEST_CPPFLAGS
+GTEST_CXXFLAGS
+GTEST_LDFLAGS
+GTEST_LIBS
+GTEST_VERSION
+PKG_CONFIG
+PKG_CONFIG_PATH
+PKG_CONFIG_LIBDIR
+gflags_CFLAGS
+gflags_LIBS'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid feature name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ as_fn_error $? "invalid package name: $ac_useropt"
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ case $ac_envvar in #(
+ '' | [0-9]* | *[!_$as_cr_alnum]* )
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+ esac
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used" >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures snappy 1.1.2 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking ...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/snappy]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of snappy 1.1.2:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-shared[=PKGS] build shared libraries [default=yes]
+ --enable-static[=PKGS] build static libraries [default=yes]
+ --enable-fast-install[=PKGS]
+ optimize for fast installation [default=yes]
+ --disable-dependency-tracking speeds up one-time build
+ --enable-dependency-tracking do not reject slow dependency extractors
+ --disable-libtool-lock avoid locking (might break parallel builds)
+ --enable-gtest Enable tests using the Google C++ Testing Framework.
+ (Default is enabled.)
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use
+ both]
+ --with-gnu-ld assume the C compiler uses GNU ld [default=no]
+ --with-sysroot=DIR Search for dependent libraries within DIR
+ (or the compiler's sysroot if not specified).
+ --with-gflags use Google Flags package to enhance the unit test
+ [default=check]
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CPP C preprocessor
+ CXX C++ compiler command
+ CXXFLAGS C++ compiler flags
+ CXXCPP C++ preprocessor
+ GTEST_CONFIG
+ The exact path of Google Test's 'gtest-config' script.
+ GTEST_CPPFLAGS
+ C-like preprocessor flags for Google Test.
+ GTEST_CXXFLAGS
+ C++ compile flags for Google Test.
+ GTEST_LDFLAGS
+ Linker path and option flags for Google Test.
+ GTEST_LIBS Library linking flags for Google Test.
+ GTEST_VERSION
+ The version of Google Test available.
+ PKG_CONFIG path to pkg-config utility
+ PKG_CONFIG_PATH
+ directories to add to pkg-config's search path
+ PKG_CONFIG_LIBDIR
+ path overriding pkg-config's built-in search path
+ gflags_CFLAGS
+ C compiler flags for gflags, overriding pkg-config
+ gflags_LIBS linker flags for gflags, overriding pkg-config
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to the package provider.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+snappy configure 1.1.2
+generated by GNU Autoconf 2.68
+
+Copyright (C) 2010 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_compile
+
+# ac_fn_c_try_cpp LINENO
+# ----------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_cpp ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } > conftest.i && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_cpp
+
+# ac_fn_c_try_run LINENO
+# ----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_c_try_run ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=$ac_status
+fi
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_run
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $2 (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_cxx_try_cpp LINENO
+# ------------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_cpp ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } > conftest.i && {
+ test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_cpp
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+
+# ac_fn_cxx_try_run LINENO
+# ------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_cxx_try_run ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=$ac_status
+fi
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_run
+
+# ac_fn_cxx_check_type LINENO TYPE VAR INCLUDES
+# ---------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_cxx_check_type ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_cxx_check_type
+
+# ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES
+# ---------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_cxx_check_header_mongrel ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if eval \${$3+:} false; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_header_compiler=yes
+else
+ ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <$2>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ ac_header_preproc=yes
+else
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #((
+ yes:no: )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_cxx_check_header_mongrel
+
+# ac_fn_cxx_check_func LINENO FUNC VAR
+# ------------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_cxx_check_func ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $2 (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_cxx_check_func
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by snappy $as_me 1.1.2, which was
+generated by GNU Autoconf 2.68. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+ done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+ 2)
+ as_fn_append ac_configure_args1 " '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ as_fn_append ac_configure_args " '$ac_arg'"
+ ;;
+ esac
+ done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ $as_echo "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ $as_echo "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ $as_echo "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special files
+ # actually), so we avoid doing that. DJGPP emulates it as a regular file.
+ if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+# These are flags passed to automake (though they look like gcc flags!)
+am__api_version='1.11'
+
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if ${ac_cv_path_install+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in #((
+ ./ | .// | /[cC]/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+ done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+$as_echo_n "checking whether build environment is sane... " >&6; }
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[\\\"\#\$\&\'\`$am_lf]*)
+ as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
+esac
+case $srcdir in
+ *[\\\"\#\$\&\'\`$am_lf\ \ ]*)
+ as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ rm -f conftest.file
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
+alias in your environment" "$LINENO" 5
+ fi
+
+ test "$2" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ as_fn_error $? "newly created file is older than distributed files!
+Check your system clock" "$LINENO" 5
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+test "$program_prefix" != NONE &&
+ program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+ am_missing_run="$MISSING --run "
+else
+ am_missing_run=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'. However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if ${ac_cv_path_mkdir+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue
+ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir (GNU coreutils) '* | \
+ 'mkdir (coreutils) '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+ done
+IFS=$as_save_IFS
+
+fi
+
+ test -d ./--version && rmdir ./--version
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
+mkdir_p="$MKDIR_P"
+case $mkdir_p in
+ [\\/$]* | ?:[\\/]*) ;;
+ */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AWK+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_AWK="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+ @echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+ *@@@%%%=?*=@@@%%%*)
+ eval ac_cv_prog_make_${ac_make}_set=yes;;
+ *)
+ eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ SET_MAKE=
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ am__isrc=' -I$(srcdir)'
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='snappy'
+ VERSION='1.1.2'
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# We need awk for the "check" target. The system "awk" is bad on
+# some platforms.
+# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
+
+am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+
+
+
+
+
+case `pwd` in
+ *\ * | *\ *)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
+$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
+esac
+
+
+
+macro_version='2.4.2'
+macro_revision='1.3337'
+
+
+
+
+
+
+
+
+
+
+
+
+
+ltmain="$ac_aux_dir/ltmain.sh"
+
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if ${ac_cv_build+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if ${ac_cv_host+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+
+ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
+$as_echo_n "checking how to print strings... " >&6; }
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='printf %s\n'
+else
+ # Use this function as a fallback that always works.
+ func_fallback_echo ()
+ {
+ eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+ }
+ ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+ $ECHO ""
+}
+
+case "$ECHO" in
+ printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5
+$as_echo "printf" >&6; } ;;
+ print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
+$as_echo "print -r" >&6; } ;;
+ *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5
+$as_echo "cat" >&6; } ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+
+am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5
+$as_echo_n "checking for style of include used by $am_make... " >&6; }
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5
+$as_echo "$_am_result" >&6; }
+rm -f confinc confmf
+
+# Check whether --enable-dependency-tracking was given.
+if test "${enable_dependency_tracking+set}" = set; then :
+ enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+ am__nodep='_no'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+ AMDEP_TRUE=
+ AMDEP_FALSE='#'
+else
+ AMDEP_TRUE='#'
+ AMDEP_FALSE=
+fi
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+if test -z "$ac_file"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+ { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if { ac_try='./conftest$ac_cv_exeext'
+ { { case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then :
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+else
+ CFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CC" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CC_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CC_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CC_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+ am__fastdepCC_TRUE=
+ am__fastdepCC_FALSE='#'
+else
+ am__fastdepCC_TRUE='#'
+ am__fastdepCC_FALSE=
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
+$as_echo_n "checking for a sed that does not truncate output... " >&6; }
+if ${ac_cv_path_SED+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+ for ac_i in 1 2 3 4 5 6 7; do
+ ac_script="$ac_script$as_nl$ac_script"
+ done
+ echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed
+ { ac_script=; unset ac_script;}
+ if test -z "$SED"; then
+ ac_path_SED_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in sed gsed; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_SED="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue
+# Check for GNU ac_path_SED and select it if it is found.
+ # Check for GNU $ac_path_SED
+case `"$ac_path_SED" --version 2>&1` in
+*GNU*)
+ ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo '' >> "conftest.nl"
+ "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_SED_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_SED="$ac_path_SED"
+ ac_path_SED_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_SED_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_SED"; then
+ as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5
+ fi
+else
+ ac_cv_path_SED=$SED
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
+$as_echo "$ac_cv_path_SED" >&6; }
+ SED="$ac_cv_path_SED"
+ rm -f conftest.sed
+
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if ${ac_cv_path_EGREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
+$as_echo_n "checking for fgrep... " >&6; }
+if ${ac_cv_path_FGREP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
+ then ac_cv_path_FGREP="$GREP -F"
+ else
+ if test -z "$FGREP"; then
+ ac_path_FGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in fgrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue
+# Check for GNU ac_path_FGREP and select it if it is found.
+ # Check for GNU $ac_path_FGREP
+case `"$ac_path_FGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'FGREP' >> "conftest.nl"
+ "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ as_fn_arith $ac_count + 1 && ac_count=$as_val
+ if test $ac_count -gt ${ac_path_FGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_FGREP="$ac_path_FGREP"
+ ac_path_FGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_FGREP_found && break 3
+ done
+ done
+ done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_FGREP"; then
+ as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ fi
+else
+ ac_cv_path_FGREP=$FGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5
+$as_echo "$ac_cv_path_FGREP" >&6; }
+ FGREP="$ac_cv_path_FGREP"
+
+
+test -z "$GREP" && GREP=grep
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then :
+ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+$as_echo_n "checking for ld used by $CC... " >&6; }
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [\\/]* | ?:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the pathname of ld
+ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if ${lt_cv_path_LD+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$LD"; then
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some variants of GNU ld only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break
+ ;;
+ *)
+ test "$with_gnu_ld" != yes && break
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
+if ${lt_cv_prog_gnu_ld+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ lt_cv_prog_gnu_ld=yes
+ ;;
+*)
+ lt_cv_prog_gnu_ld=no
+ ;;
+esac
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
+$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
+$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
+if ${lt_cv_path_NM+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ lt_nm_to_check="${ac_tool_prefix}nm"
+ if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+ lt_nm_to_check="$lt_nm_to_check nm"
+ fi
+ for lt_tmp_nm in $lt_nm_to_check; do
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm="$ac_dir/$lt_tmp_nm"
+ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
+ */dev/null* | *'Invalid file or object type'*)
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ ;;
+ *)
+ case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+ */dev/null*)
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ ;;
+ *)
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+ done
+ : ${lt_cv_path_NM=no}
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5
+$as_echo "$lt_cv_path_NM" >&6; }
+if test "$lt_cv_path_NM" != "no"; then
+ NM="$lt_cv_path_NM"
+else
+ # Didn't find any BSD compatible name lister, look for dumpbin.
+ if test -n "$DUMPBIN"; then :
+ # Let the user override the test.
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in dumpbin "link -dump"
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DUMPBIN+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DUMPBIN"; then
+ ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+DUMPBIN=$ac_cv_prog_DUMPBIN
+if test -n "$DUMPBIN"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5
+$as_echo "$DUMPBIN" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$DUMPBIN" && break
+ done
+fi
+if test -z "$DUMPBIN"; then
+ ac_ct_DUMPBIN=$DUMPBIN
+ for ac_prog in dumpbin "link -dump"
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DUMPBIN"; then
+ ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN
+if test -n "$ac_ct_DUMPBIN"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5
+$as_echo "$ac_ct_DUMPBIN" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_DUMPBIN" && break
+done
+
+ if test "x$ac_ct_DUMPBIN" = x; then
+ DUMPBIN=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DUMPBIN=$ac_ct_DUMPBIN
+ fi
+fi
+
+ case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+ *COFF*)
+ DUMPBIN="$DUMPBIN -symbols"
+ ;;
+ *)
+ DUMPBIN=:
+ ;;
+ esac
+ fi
+
+ if test "$DUMPBIN" != ":"; then
+ NM="$DUMPBIN"
+ fi
+fi
+test -z "$NM" && NM=nm
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
+$as_echo_n "checking the name lister ($NM) interface... " >&6; }
+if ${lt_cv_nm_interface+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_nm_interface="BSD nm"
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5)
+ (eval "$ac_compile" 2>conftest.err)
+ cat conftest.err >&5
+ (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+ (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+ cat conftest.err >&5
+ (eval echo "\"\$as_me:$LINENO: output\"" >&5)
+ cat conftest.out >&5
+ if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+ lt_cv_nm_interface="MS dumpbin"
+ fi
+ rm -f conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5
+$as_echo "$lt_cv_nm_interface" >&6; }
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
+$as_echo_n "checking whether ln -s works... " >&6; }
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
+$as_echo "no, using $LN_S" >&6; }
+fi
+
+# find the maximum length of command line arguments
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
+$as_echo_n "checking the maximum length of command line arguments... " >&6; }
+if ${lt_cv_sys_max_cmd_len+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ i=0
+ teststring="ABCD"
+
+ case $build_os in
+ msdosdjgpp*)
+ # On DJGPP, this test can blow up pretty badly due to problems in libc
+ # (any single argument exceeding 2000 bytes causes a buffer overrun
+ # during glob expansion). Even if it were fixed, the result of this
+ # check would be larger than it should be.
+ lt_cv_sys_max_cmd_len=12288; # 12K is about right
+ ;;
+
+ gnu*)
+ # Under GNU Hurd, this test is not required because there is
+ # no limit to the length of command line arguments.
+ # Libtool will interpret -1 as no limit whatsoever
+ lt_cv_sys_max_cmd_len=-1;
+ ;;
+
+ cygwin* | mingw* | cegcc*)
+ # On Win9x/ME, this test blows up -- it succeeds, but takes
+ # about 5 minutes as the teststring grows exponentially.
+ # Worse, since 9x/ME are not pre-emptively multitasking,
+ # you end up with a "frozen" computer, even though with patience
+ # the test eventually succeeds (with a max line length of 256k).
+ # Instead, let's just punt: use the minimum linelength reported by
+ # all of the supported platforms: 8192 (on NT/2K/XP).
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ mint*)
+ # On MiNT this can take a long time and run out of memory.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ amigaos*)
+ # On AmigaOS with pdksh, this test takes hours, literally.
+ # So we just punt and use a minimum line length of 8192.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+ # This has been around since 386BSD, at least. Likely further.
+ if test -x /sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+ elif test -x /usr/sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+ else
+ lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs
+ fi
+ # And add a safety zone
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ ;;
+
+ interix*)
+ # We know the value 262144 and hardcode it with a safety zone (like BSD)
+ lt_cv_sys_max_cmd_len=196608
+ ;;
+
+ os2*)
+ # The test takes a long time on OS/2.
+ lt_cv_sys_max_cmd_len=8192
+ ;;
+
+ osf*)
+ # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+ # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+ # nice to cause kernel panics so lets avoid the loop below.
+ # First set a reasonable default.
+ lt_cv_sys_max_cmd_len=16384
+ #
+ if test -x /sbin/sysconfig; then
+ case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+ *1*) lt_cv_sys_max_cmd_len=-1 ;;
+ esac
+ fi
+ ;;
+ sco3.2v5*)
+ lt_cv_sys_max_cmd_len=102400
+ ;;
+ sysv5* | sco5v6* | sysv4.2uw2*)
+ kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+ if test -n "$kargmax"; then
+ lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'`
+ else
+ lt_cv_sys_max_cmd_len=32768
+ fi
+ ;;
+ *)
+ lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+ if test -n "$lt_cv_sys_max_cmd_len"; then
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ else
+ # Make teststring a little bigger before we do anything with it.
+ # a 1K string should be a reasonable start.
+ for i in 1 2 3 4 5 6 7 8 ; do
+ teststring=$teststring$teststring
+ done
+ SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+ # If test is not a shell built-in, we'll probably end up computing a
+ # maximum length that is only half of the actual maximum length, but
+ # we can't tell.
+ while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \
+ = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+ test $i != 17 # 1/2 MB should be enough
+ do
+ i=`expr $i + 1`
+ teststring=$teststring$teststring
+ done
+ # Only check the string length outside the loop.
+ lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+ teststring=
+ # Add a significant safety factor because C++ compilers can tack on
+ # massive amounts of additional arguments before passing them to the
+ # linker. It appears as though 1/2 is a usable value.
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+ fi
+ ;;
+ esac
+
+fi
+
+if test -n $lt_cv_sys_max_cmd_len ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5
+$as_echo "$lt_cv_sys_max_cmd_len" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5
+$as_echo "none" >&6; }
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+
+
+
+
+
+
+: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5
+$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; }
+# Try some XSI features
+xsi_shell=no
+( _lt_dummy="a/b/c"
+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \
+ = c,a/b,b/c, \
+ && eval 'test $(( 1 + 1 )) -eq 2 \
+ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
+ && xsi_shell=yes
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5
+$as_echo "$xsi_shell" >&6; }
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5
+$as_echo_n "checking whether the shell understands \"+=\"... " >&6; }
+lt_shell_append=no
+( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \
+ >/dev/null 2>&1 \
+ && lt_shell_append=yes
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5
+$as_echo "$lt_shell_append" >&6; }
+
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ lt_unset=unset
+else
+ lt_unset=false
+fi
+
+
+
+
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+ # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+ lt_SP2NL='tr \040 \012'
+ lt_NL2SP='tr \015\012 \040\040'
+ ;;
+ *) # EBCDIC based system
+ lt_SP2NL='tr \100 \n'
+ lt_NL2SP='tr \r\n \100\100'
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5
+$as_echo_n "checking how to convert $build file names to $host format... " >&6; }
+if ${lt_cv_to_host_file_cmd+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+ ;;
+ esac
+ ;;
+ *-*-cygwin* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+ ;;
+ esac
+ ;;
+ * ) # unhandled hosts (and "normal" native builds)
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+esac
+
+fi
+
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5
+$as_echo "$lt_cv_to_host_file_cmd" >&6; }
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5
+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; }
+if ${lt_cv_to_tool_file_cmd+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ #assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ esac
+ ;;
+esac
+
+fi
+
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5
+$as_echo "$lt_cv_to_tool_file_cmd" >&6; }
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
+$as_echo_n "checking for $LD option to reload object files... " >&6; }
+if ${lt_cv_ld_reload_flag+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ld_reload_flag='-r'
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5
+$as_echo "$lt_cv_ld_reload_flag" >&6; }
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ if test "$GCC" != yes; then
+ reload_cmds=false
+ fi
+ ;;
+ darwin*)
+ if test "$GCC" = yes; then
+ reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+ else
+ reload_cmds='$LD$reload_flag -o $output$reload_objs'
+ fi
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
+set dummy ${ac_tool_prefix}objdump; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OBJDUMP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OBJDUMP"; then
+ ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+OBJDUMP=$ac_cv_prog_OBJDUMP
+if test -n "$OBJDUMP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5
+$as_echo "$OBJDUMP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OBJDUMP"; then
+ ac_ct_OBJDUMP=$OBJDUMP
+ # Extract the first word of "objdump", so it can be a program name with args.
+set dummy objdump; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OBJDUMP"; then
+ ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OBJDUMP="objdump"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
+if test -n "$ac_ct_OBJDUMP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5
+$as_echo "$ac_ct_OBJDUMP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OBJDUMP" = x; then
+ OBJDUMP="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OBJDUMP=$ac_ct_OBJDUMP
+ fi
+else
+ OBJDUMP="$ac_cv_prog_OBJDUMP"
+fi
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
+$as_echo_n "checking how to recognize dependent libraries... " >&6; }
+if ${lt_cv_deplibs_check_method+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given extended regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[4-9]*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi[45]*)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin*)
+ # func_win32_libid is a shell function defined in ltmain.sh
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ ;;
+
+mingw* | pw32*)
+ # Base MSYS/MinGW do not provide the 'file' command needed by
+ # func_win32_libid shell function, so use a weaker test based on 'objdump',
+ # unless we find 'file', for example because we are cross-compiling.
+ # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
+ if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ else
+ # Keep this pattern in sync with the one in func_win32_libid.
+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ fi
+ ;;
+
+cegcc*)
+ # use the weaker test based on 'objdump'. See mingw*.
+ lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+freebsd* | dragonfly*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+haiku*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20* | hpux11*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ case $host_cpu in
+ ia64*)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64'
+ lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+ ;;
+ hppa*64*)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'
+ lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+ ;;
+ *)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library'
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+ esac
+ ;;
+
+interix[3-9]*)
+ # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$'
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+netbsd* | netbsdelf*-gnu)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+*nto* | *qnx*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+openbsd*)
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+rdos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.3*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ siemens)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ pc)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ esac
+ ;;
+
+tpf*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+esac
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
+$as_echo "$lt_cv_deplibs_check_method" >&6; }
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+ case $host_os in
+ mingw* | pw32*)
+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+ want_nocaseglob=yes
+ else
+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"`
+ fi
+ ;;
+ esac
+fi
+
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dlltool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DLLTOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DLLTOOL"; then
+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+DLLTOOL=$ac_cv_prog_DLLTOOL
+if test -n "$DLLTOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5
+$as_echo "$DLLTOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DLLTOOL"; then
+ ac_ct_DLLTOOL=$DLLTOOL
+ # Extract the first word of "dlltool", so it can be a program name with args.
+set dummy dlltool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DLLTOOL"; then
+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_DLLTOOL="dlltool"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
+if test -n "$ac_ct_DLLTOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5
+$as_echo "$ac_ct_DLLTOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_DLLTOOL" = x; then
+ DLLTOOL="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DLLTOOL=$ac_ct_DLLTOOL
+ fi
+else
+ DLLTOOL="$ac_cv_prog_DLLTOOL"
+fi
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5
+$as_echo_n "checking how to associate runtime and link libraries... " >&6; }
+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+ # two different shell functions defined in ltmain.sh
+ # decide which to use based on capabilities of $DLLTOOL
+ case `$DLLTOOL --help 2>&1` in
+ *--identify-strict*)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+ ;;
+ *)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+ ;;
+ esac
+ ;;
+*)
+ # fallback: assume linklib IS sharedlib
+ lt_cv_sharedlib_from_linklib_cmd="$ECHO"
+ ;;
+esac
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5
+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; }
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ for ac_prog in ar
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AR"; then
+ ac_cv_prog_AR="$AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AR=$ac_cv_prog_AR
+if test -n "$AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
+$as_echo "$AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$AR" && break
+ done
+fi
+if test -z "$AR"; then
+ ac_ct_AR=$AR
+ for ac_prog in ar
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_AR"; then
+ ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_AR="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+$as_echo "$ac_ct_AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_AR" && break
+done
+
+ if test "x$ac_ct_AR" = x; then
+ AR="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ AR=$ac_ct_AR
+ fi
+fi
+
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5
+$as_echo_n "checking for archiver @FILE support... " >&6; }
+if ${lt_cv_ar_at_file+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ar_at_file=no
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ echo conftest.$ac_objext > conftest.lst
+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+ (eval $lt_ar_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if test "$ac_status" -eq 0; then
+ # Ensure the archiver fails upon bogus file names.
+ rm -f conftest.$ac_objext libconftest.a
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+ (eval $lt_ar_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if test "$ac_status" -ne 0; then
+ lt_cv_ar_at_file=@
+ fi
+ fi
+ rm -f conftest.* libconftest.a
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5
+$as_echo "$lt_cv_ar_at_file" >&6; }
+
+if test "x$lt_cv_ar_at_file" = xno; then
+ archiver_list_spec=
+else
+ archiver_list_spec=$lt_cv_ar_at_file
+fi
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+test -z "$STRIP" && STRIP=:
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_RANLIB+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_RANLIB" = x; then
+ RANLIB=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ RANLIB=$ac_ct_RANLIB
+ fi
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+test -z "$RANLIB" && RANLIB=:
+
+
+
+
+
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+ ;;
+ *)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+fi
+
+case $host_os in
+ darwin*)
+ lock_old_archive_extraction=yes ;;
+ *)
+ lock_old_archive_extraction=no ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
+$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; }
+if ${lt_cv_sys_global_symbol_pipe+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[BCDEGRST]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[BCDT]'
+ ;;
+cygwin* | mingw* | pw32* | cegcc*)
+ symcode='[ABCDGISTW]'
+ ;;
+hpux*)
+ if test "$host_cpu" = ia64; then
+ symcode='[ABCDEGRST]'
+ fi
+ ;;
+irix* | nonstopux*)
+ symcode='[BCDEGRST]'
+ ;;
+osf*)
+ symcode='[BCDEGQRST]'
+ ;;
+solaris*)
+ symcode='[BDRT]'
+ ;;
+sco3.2v5*)
+ symcode='[DT]'
+ ;;
+sysv4.2uw2*)
+ symcode='[DT]'
+ ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+ symcode='[ABDT]'
+ ;;
+sysv4)
+ symcode='[DFNSTU]'
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+ symcode='[ABCDGIRSTW]' ;;
+esac
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+ opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+ symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+ # Write the raw and C identifiers.
+ if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ # Fake it for dumpbin and say T for any non-static function
+ # and D for any global variable.
+ # Also find C++ and __fastcall symbols from MSVC++,
+ # which start with @ or ?.
+ lt_cv_sys_global_symbol_pipe="$AWK '"\
+" {last_section=section; section=\$ 3};"\
+" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+" \$ 0!~/External *\|/{next};"\
+" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+" {if(hide[section]) next};"\
+" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
+" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
+" s[1]~/^[@?]/{print s[1], s[1]; next};"\
+" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+" ' prfx=^$ac_symprfx"
+ else
+ lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+ fi
+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+
+ rm -f conftest*
+ cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5
+ (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+ if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+ relocations are performed -- see ld's documentation on pseudo-relocs. */
+# define LT_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data. */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+ cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols. */
+LT_DLSYM_CONST struct {
+ const char *name;
+ void *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[] =
+{
+ { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+ $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+ cat <<\_LT_EOF >> conftest.$ac_ext
+ {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ lt_globsym_save_LIBS=$LIBS
+ lt_globsym_save_CFLAGS=$CFLAGS
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && test -s conftest${ac_exeext}; then
+ pipe_works=yes
+ fi
+ LIBS=$lt_globsym_save_LIBS
+ CFLAGS=$lt_globsym_save_CFLAGS
+ else
+ echo "cannot find nm_test_func in $nlist" >&5
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&5
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5
+ fi
+ else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ fi
+ rm -rf conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+
+fi
+
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5
+$as_echo "failed" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
+$as_echo "ok" >&6; }
+fi
+
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then
+ nm_file_list_spec='@'
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5
+$as_echo_n "checking for sysroot... " >&6; }
+
+# Check whether --with-sysroot was given.
+if test "${with_sysroot+set}" = set; then :
+ withval=$with_sysroot;
+else
+ with_sysroot=no
+fi
+
+
+lt_sysroot=
+case ${with_sysroot} in #(
+ yes)
+ if test "$GCC" = yes; then
+ lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+ fi
+ ;; #(
+ /*)
+ lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+ ;; #(
+ no|'')
+ ;; #(
+ *)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5
+$as_echo "${with_sysroot}" >&6; }
+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5
+ ;;
+esac
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5
+$as_echo "${lt_sysroot:-no}" >&6; }
+
+
+
+
+
+# Check whether --enable-libtool-lock was given.
+if test "${enable_libtool_lock+set}" = set; then :
+ enableval=$enable_libtool_lock;
+fi
+
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *ELF-32*)
+ HPUX_IA64_MODE="32"
+ ;;
+ *ELF-64*)
+ HPUX_IA64_MODE="64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '#line '$LINENO' "configure"' > conftest.$ac_ext
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -melf32bsmip"
+ ;;
+ *N32*)
+ LD="${LD-ld} -melf32bmipn32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -melf64bmip"
+ ;;
+ esac
+ else
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ fi
+ rm -rf conftest*
+ ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ case `/usr/bin/file conftest.o` in
+ *32-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_i386_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_i386"
+ ;;
+ ppc64-*linux*|powerpc64-*linux*)
+ LD="${LD-ld} -m elf32ppclinux"
+ ;;
+ s390x-*linux*)
+ LD="${LD-ld} -m elf_s390"
+ ;;
+ sparc64-*linux*)
+ LD="${LD-ld} -m elf32_sparc"
+ ;;
+ esac
+ ;;
+ *64-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_x86_64_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ ppc*-*linux*|powerpc*-*linux*)
+ LD="${LD-ld} -m elf64ppc"
+ ;;
+ s390*-*linux*|s390*-*tpf*)
+ LD="${LD-ld} -m elf64_s390"
+ ;;
+ sparc*-*linux*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
+$as_echo_n "checking whether the C compiler needs -belf... " >&6; }
+if ${lt_cv_cc_needs_belf+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ lt_cv_cc_needs_belf=yes
+else
+ lt_cv_cc_needs_belf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5
+$as_echo "$lt_cv_cc_needs_belf" >&6; }
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+*-*solaris*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ case `/usr/bin/file conftest.o` in
+ *64-bit*)
+ case $lt_cv_prog_gnu_ld in
+ yes*)
+ case $host in
+ i?86-*-solaris*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ sparc*-*-solaris*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ # GNU ld 2.21 introduced _sol2 emulations. Use them if available.
+ if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+ LD="${LD-ld}_sol2"
+ fi
+ ;;
+ *)
+ if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+ LD="${LD-ld} -64"
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+esac
+
+need_locks="$enable_libtool_lock"
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args.
+set dummy ${ac_tool_prefix}mt; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$MANIFEST_TOOL"; then
+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL
+if test -n "$MANIFEST_TOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5
+$as_echo "$MANIFEST_TOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then
+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL
+ # Extract the first word of "mt", so it can be a program name with args.
+set dummy mt; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_MANIFEST_TOOL"; then
+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL
+if test -n "$ac_ct_MANIFEST_TOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5
+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_MANIFEST_TOOL" = x; then
+ MANIFEST_TOOL=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL
+ fi
+else
+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL"
+fi
+
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5
+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; }
+if ${lt_cv_path_mainfest_tool+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_path_mainfest_tool=no
+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5
+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+ cat conftest.err >&5
+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+ lt_cv_path_mainfest_tool=yes
+ fi
+ rm -f conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5
+$as_echo "$lt_cv_path_mainfest_tool" >&6; }
+if test "x$lt_cv_path_mainfest_tool" != xyes; then
+ MANIFEST_TOOL=:
+fi
+
+
+
+
+
+
+ case $host_os in
+ rhapsody* | darwin*)
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DSYMUTIL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DSYMUTIL"; then
+ ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+DSYMUTIL=$ac_cv_prog_DSYMUTIL
+if test -n "$DSYMUTIL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5
+$as_echo "$DSYMUTIL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DSYMUTIL"; then
+ ac_ct_DSYMUTIL=$DSYMUTIL
+ # Extract the first word of "dsymutil", so it can be a program name with args.
+set dummy dsymutil; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DSYMUTIL"; then
+ ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL
+if test -n "$ac_ct_DSYMUTIL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5
+$as_echo "$ac_ct_DSYMUTIL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_DSYMUTIL" = x; then
+ DSYMUTIL=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DSYMUTIL=$ac_ct_DSYMUTIL
+ fi
+else
+ DSYMUTIL="$ac_cv_prog_DSYMUTIL"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args.
+set dummy ${ac_tool_prefix}nmedit; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_NMEDIT+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$NMEDIT"; then
+ ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+NMEDIT=$ac_cv_prog_NMEDIT
+if test -n "$NMEDIT"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5
+$as_echo "$NMEDIT" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_NMEDIT"; then
+ ac_ct_NMEDIT=$NMEDIT
+ # Extract the first word of "nmedit", so it can be a program name with args.
+set dummy nmedit; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_NMEDIT"; then
+ ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_NMEDIT="nmedit"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT
+if test -n "$ac_ct_NMEDIT"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5
+$as_echo "$ac_ct_NMEDIT" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_NMEDIT" = x; then
+ NMEDIT=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ NMEDIT=$ac_ct_NMEDIT
+ fi
+else
+ NMEDIT="$ac_cv_prog_NMEDIT"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args.
+set dummy ${ac_tool_prefix}lipo; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_LIPO+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$LIPO"; then
+ ac_cv_prog_LIPO="$LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+LIPO=$ac_cv_prog_LIPO
+if test -n "$LIPO"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5
+$as_echo "$LIPO" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_LIPO"; then
+ ac_ct_LIPO=$LIPO
+ # Extract the first word of "lipo", so it can be a program name with args.
+set dummy lipo; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_LIPO+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_LIPO"; then
+ ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_LIPO="lipo"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO
+if test -n "$ac_ct_LIPO"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5
+$as_echo "$ac_ct_LIPO" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_LIPO" = x; then
+ LIPO=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ LIPO=$ac_ct_LIPO
+ fi
+else
+ LIPO="$ac_cv_prog_LIPO"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OTOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OTOOL"; then
+ ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL=$ac_cv_prog_OTOOL
+if test -n "$OTOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5
+$as_echo "$OTOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL"; then
+ ac_ct_OTOOL=$OTOOL
+ # Extract the first word of "otool", so it can be a program name with args.
+set dummy otool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_OTOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OTOOL"; then
+ ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OTOOL="otool"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL
+if test -n "$ac_ct_OTOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5
+$as_echo "$ac_ct_OTOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OTOOL" = x; then
+ OTOOL=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OTOOL=$ac_ct_OTOOL
+ fi
+else
+ OTOOL="$ac_cv_prog_OTOOL"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool64; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OTOOL64+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OTOOL64"; then
+ ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL64=$ac_cv_prog_OTOOL64
+if test -n "$OTOOL64"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5
+$as_echo "$OTOOL64" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL64"; then
+ ac_ct_OTOOL64=$OTOOL64
+ # Extract the first word of "otool64", so it can be a program name with args.
+set dummy otool64; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OTOOL64"; then
+ ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OTOOL64="otool64"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64
+if test -n "$ac_ct_OTOOL64"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5
+$as_echo "$ac_ct_OTOOL64" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OTOOL64" = x; then
+ OTOOL64=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OTOOL64=$ac_ct_OTOOL64
+ fi
+else
+ OTOOL64="$ac_cv_prog_OTOOL64"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
+$as_echo_n "checking for -single_module linker flag... " >&6; }
+if ${lt_cv_apple_cc_single_mod+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_apple_cc_single_mod=no
+ if test -z "${LT_MULTI_MODULE}"; then
+ # By default we will add the -single_module flag. You can override
+ # by either setting the environment variable LT_MULTI_MODULE
+ # non-empty at configure time, or by adding -multi_module to the
+ # link flags.
+ rm -rf libconftest.dylib*
+ echo "int foo(void){return 1;}" > conftest.c
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&5
+ $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+ _lt_result=$?
+ # If there is a non-empty error log, and "single_module"
+ # appears in it, assume the flag caused a linker warning
+ if test -s conftest.err && $GREP single_module conftest.err; then
+ cat conftest.err >&5
+ # Otherwise, if the output was created with a 0 exit code from
+ # the compiler, it worked.
+ elif test -f libconftest.dylib && test $_lt_result -eq 0; then
+ lt_cv_apple_cc_single_mod=yes
+ else
+ cat conftest.err >&5
+ fi
+ rm -rf libconftest.dylib*
+ rm -f conftest.*
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
+$as_echo "$lt_cv_apple_cc_single_mod" >&6; }
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
+$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; }
+if ${lt_cv_ld_exported_symbols_list+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ld_exported_symbols_list=no
+ save_LDFLAGS=$LDFLAGS
+ echo "_main" > conftest.sym
+ LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ lt_cv_ld_exported_symbols_list=yes
+else
+ lt_cv_ld_exported_symbols_list=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
+$as_echo "$lt_cv_ld_exported_symbols_list" >&6; }
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
+$as_echo_n "checking for -force_load linker flag... " >&6; }
+if ${lt_cv_ld_force_load+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ld_force_load=no
+ cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5
+ $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5
+ echo "$AR cru libconftest.a conftest.o" >&5
+ $AR cru libconftest.a conftest.o 2>&5
+ echo "$RANLIB libconftest.a" >&5
+ $RANLIB libconftest.a 2>&5
+ cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5
+ $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+ _lt_result=$?
+ if test -s conftest.err && $GREP force_load conftest.err; then
+ cat conftest.err >&5
+ elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then
+ lt_cv_ld_force_load=yes
+ else
+ cat conftest.err >&5
+ fi
+ rm -f conftest.err libconftest.a conftest conftest.c
+ rm -rf conftest.dSYM
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
+$as_echo "$lt_cv_ld_force_load" >&6; }
+ case $host_os in
+ rhapsody* | darwin1.[012])
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
+ darwin1.*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ darwin*) # darwin 5.x on
+ # if running on 10.5 or later, the deployment target defaults
+ # to the OS version, if on x86, and 10.4, the deployment
+ # target defaults to 10.4. Don't you love it?
+ case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
+ 10.0,*86*-darwin8*|10.0,*-darwin[91]*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ 10.[012]*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ 10.*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ esac
+ ;;
+ esac
+ if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+ _lt_dar_single_mod='$single_module'
+ fi
+ if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
+ _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+ else
+ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+ fi
+ if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
+ _lt_dsymutil='~$DSYMUTIL $lib || :'
+ else
+ _lt_dsymutil=
+ fi
+ ;;
+ esac
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
+$as_echo_n "checking how to run the C preprocessor... " >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if ${ac_cv_prog_CPP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
+$as_echo "$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_header_stdc=yes
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then :
+ :
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+
+else
+ ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+for ac_header in dlfcn.h
+do :
+ ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
+"
+if test "x$ac_cv_header_dlfcn_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_DLFCN_H 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+
+# Set options
+
+
+
+ enable_dlopen=no
+
+
+ enable_win32_dll=no
+
+
+ # Check whether --enable-shared was given.
+if test "${enable_shared+set}" = set; then :
+ enableval=$enable_shared; p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_shared=yes ;;
+ no) enable_shared=no ;;
+ *)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ enable_shared=yes
+fi
+
+
+
+
+
+
+
+
+
+ # Check whether --enable-static was given.
+if test "${enable_static+set}" = set; then :
+ enableval=$enable_static; p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_static=yes ;;
+ no) enable_static=no ;;
+ *)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ enable_static=yes
+fi
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-pic was given.
+if test "${with_pic+set}" = set; then :
+ withval=$with_pic; lt_p=${PACKAGE-default}
+ case $withval in
+ yes|no) pic_mode=$withval ;;
+ *)
+ pic_mode=default
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for lt_pkg in $withval; do
+ IFS="$lt_save_ifs"
+ if test "X$lt_pkg" = "X$lt_p"; then
+ pic_mode=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ pic_mode=default
+fi
+
+
+test -z "$pic_mode" && pic_mode=default
+
+
+
+
+
+
+
+ # Check whether --enable-fast-install was given.
+if test "${enable_fast_install+set}" = set; then :
+ enableval=$enable_fast_install; p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_fast_install=yes ;;
+ no) enable_fast_install=no ;;
+ *)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ enable_fast_install=yes
+fi
+
+
+
+
+
+
+
+
+
+
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ltmain"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+test -z "$LN_S" && LN_S="ln -s"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
+$as_echo_n "checking for objdir... " >&6; }
+if ${lt_cv_objdir+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ lt_cv_objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5
+$as_echo "$lt_cv_objdir" >&6; }
+objdir=$lt_cv_objdir
+
+
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define LT_OBJDIR "$lt_cv_objdir/"
+_ACEOF
+
+
+
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+for cc_temp in $compiler""; do
+ case $cc_temp in
+ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
+$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; }
+if ${lt_cv_path_MAGIC_CMD+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $MAGIC_CMD in
+[\\/*] | ?:[\\/]*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+*)
+ lt_save_MAGIC_CMD="$MAGIC_CMD"
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+ for ac_dir in $ac_dummy; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/${ac_tool_prefix}file; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+ MAGIC_CMD="$lt_save_MAGIC_CMD"
+ ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+$as_echo "$MAGIC_CMD" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+
+
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5
+$as_echo_n "checking for file... " >&6; }
+if ${lt_cv_path_MAGIC_CMD+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $MAGIC_CMD in
+[\\/*] | ?:[\\/]*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+*)
+ lt_save_MAGIC_CMD="$MAGIC_CMD"
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+ for ac_dir in $ac_dummy; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/file; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/file"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+ MAGIC_CMD="$lt_save_MAGIC_CMD"
+ ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+$as_echo "$MAGIC_CMD" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ else
+ MAGIC_CMD=:
+ fi
+fi
+
+ fi
+ ;;
+esac
+
+# Use C for the default configuration in the libtool script
+
+lt_save_CC="$CC"
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+objext=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+if test -n "$compiler"; then
+
+lt_prog_compiler_no_builtin_flag=
+
+if test "$GCC" = yes; then
+ case $cc_basename in
+ nvcc*)
+ lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;;
+ *)
+ lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;;
+ esac
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
+if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_rtti_exceptions=no
+ ac_outfile=conftest.$ac_objext
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="-fno-rtti -fno-exceptions"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_rtti_exceptions=yes
+ fi
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
+$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
+
+if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then
+ lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions"
+else
+ :
+fi
+
+fi
+
+
+
+
+
+
+ lt_prog_compiler_wl=
+lt_prog_compiler_pic=
+lt_prog_compiler_static=
+
+
+ if test "$GCC" = yes; then
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_static='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_prog_compiler_static='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ lt_prog_compiler_pic='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ lt_prog_compiler_pic='-DDLL_EXPORT'
+ ;;
+
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_prog_compiler_pic='-fno-common'
+ ;;
+
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ lt_prog_compiler_static=
+ ;;
+
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ # +Z the default
+ ;;
+ *)
+ lt_prog_compiler_pic='-fPIC'
+ ;;
+ esac
+ ;;
+
+ interix[3-9]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+
+ msdosdjgpp*)
+ # Just because we use GCC doesn't mean we suddenly get shared libraries
+ # on systems that don't support them.
+ lt_prog_compiler_can_build_shared=no
+ enable_shared=no
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ lt_prog_compiler_pic='-fPIC -shared'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_prog_compiler_pic=-Kconform_pic
+ fi
+ ;;
+
+ *)
+ lt_prog_compiler_pic='-fPIC'
+ ;;
+ esac
+
+ case $cc_basename in
+ nvcc*) # Cuda Compiler Driver 2.2
+ lt_prog_compiler_wl='-Xlinker '
+ if test -n "$lt_prog_compiler_pic"; then
+ lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic"
+ fi
+ ;;
+ esac
+ else
+ # PORTME Check for flag to pass linker flags through the system compiler.
+ case $host_os in
+ aix*)
+ lt_prog_compiler_wl='-Wl,'
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_prog_compiler_static='-Bstatic'
+ else
+ lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_prog_compiler_pic='-DDLL_EXPORT'
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ lt_prog_compiler_wl='-Wl,'
+ # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+ # not for PA HP-UX.
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ lt_prog_compiler_pic='+Z'
+ ;;
+ esac
+ # Is there a better lt_prog_compiler_static that works with the bundled CC?
+ lt_prog_compiler_static='${wl}-a ${wl}archive'
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ lt_prog_compiler_wl='-Wl,'
+ # PIC (with -KPIC) is the default.
+ lt_prog_compiler_static='-non_shared'
+ ;;
+
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ case $cc_basename in
+ # old Intel for x86_64 which still supported -KPIC.
+ ecc*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-static'
+ ;;
+ # icc used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ icc* | ifort*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fPIC'
+ lt_prog_compiler_static='-static'
+ ;;
+ # Lahey Fortran 8.1.
+ lf95*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='--shared'
+ lt_prog_compiler_static='--static'
+ ;;
+ nagfor*)
+ # NAG Fortran compiler
+ lt_prog_compiler_wl='-Wl,-Wl,,'
+ lt_prog_compiler_pic='-PIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+ pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+ # Portland Group compilers (*not* the Pentium gcc compiler,
+ # which looks to be a dead project)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fpic'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+ ccc*)
+ lt_prog_compiler_wl='-Wl,'
+ # All Alpha code is PIC.
+ lt_prog_compiler_static='-non_shared'
+ ;;
+ xl* | bgxl* | bgf* | mpixl*)
+ # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-qpic'
+ lt_prog_compiler_static='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
+ # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl=''
+ ;;
+ *Sun\ F* | *Sun*Fortran*)
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl='-Qoption ld '
+ ;;
+ *Sun\ C*)
+ # Sun C 5.9
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl='-Wl,'
+ ;;
+ *Intel*\ [CF]*Compiler*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fPIC'
+ lt_prog_compiler_static='-static'
+ ;;
+ *Portland\ Group*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fpic'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ newsos6)
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ lt_prog_compiler_pic='-fPIC -shared'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ lt_prog_compiler_wl='-Wl,'
+ # All OSF/1 code is PIC.
+ lt_prog_compiler_static='-non_shared'
+ ;;
+
+ rdos*)
+ lt_prog_compiler_static='-non_shared'
+ ;;
+
+ solaris*)
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ case $cc_basename in
+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+ lt_prog_compiler_wl='-Qoption ld ';;
+ *)
+ lt_prog_compiler_wl='-Wl,';;
+ esac
+ ;;
+
+ sunos4*)
+ lt_prog_compiler_wl='-Qoption ld '
+ lt_prog_compiler_pic='-PIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ lt_prog_compiler_pic='-Kconform_pic'
+ lt_prog_compiler_static='-Bstatic'
+ fi
+ ;;
+
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ unicos*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_can_build_shared=no
+ ;;
+
+ uts4*)
+ lt_prog_compiler_pic='-pic'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ *)
+ lt_prog_compiler_can_build_shared=no
+ ;;
+ esac
+ fi
+
+case $host_os in
+ # For platforms which do not support PIC, -DPIC is meaningless:
+ *djgpp*)
+ lt_prog_compiler_pic=
+ ;;
+ *)
+ lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
+ ;;
+esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+if ${lt_cv_prog_compiler_pic+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5
+$as_echo "$lt_cv_prog_compiler_pic" >&6; }
+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
+$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
+if ${lt_cv_prog_compiler_pic_works+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_pic_works=no
+ ac_outfile=conftest.$ac_objext
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="$lt_prog_compiler_pic -DPIC"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_pic_works=yes
+ fi
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5
+$as_echo "$lt_cv_prog_compiler_pic_works" >&6; }
+
+if test x"$lt_cv_prog_compiler_pic_works" = xyes; then
+ case $lt_prog_compiler_pic in
+ "" | " "*) ;;
+ *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;;
+ esac
+else
+ lt_prog_compiler_pic=
+ lt_prog_compiler_can_build_shared=no
+fi
+
+fi
+
+
+
+
+
+
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if ${lt_cv_prog_compiler_static_works+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_static_works=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_static_works=yes
+ fi
+ else
+ lt_cv_prog_compiler_static_works=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5
+$as_echo "$lt_cv_prog_compiler_static_works" >&6; }
+
+if test x"$lt_cv_prog_compiler_static_works" = xyes; then
+ :
+else
+ lt_prog_compiler_static=
+fi
+
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_c_o=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_c_o=yes
+ fi
+ fi
+ chmod u+w . 2>&5
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_c_o=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_c_o=yes
+ fi
+ fi
+ chmod u+w . 2>&5
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+hard_links="nottested"
+if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+$as_echo_n "checking if we can lock with hard links... " >&6; }
+ hard_links=yes
+ $RM conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+$as_echo "$hard_links" >&6; }
+ if test "$hard_links" = no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+ runpath_var=
+ allow_undefined_flag=
+ always_export_symbols=no
+ archive_cmds=
+ archive_expsym_cmds=
+ compiler_needs_object=no
+ enable_shared_with_static_runtimes=no
+ export_dynamic_flag_spec=
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ hardcode_automatic=no
+ hardcode_direct=no
+ hardcode_direct_absolute=no
+ hardcode_libdir_flag_spec=
+ hardcode_libdir_separator=
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=unsupported
+ inherit_rpath=no
+ link_all_deplibs=unknown
+ module_cmds=
+ module_expsym_cmds=
+ old_archive_from_new_cmds=
+ old_archive_from_expsyms_cmds=
+ thread_safe_flag_spec=
+ whole_archive_flag_spec=
+ # include_expsyms should be a list of space-separated symbols to be *always*
+ # included in the symbol list
+ include_expsyms=
+ # exclude_expsyms can be an extended regexp of symbols to exclude
+ # it will be wrapped by ` (' and `)$', so one must not match beginning or
+ # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+ # as well as any symbol that contains `d'.
+ exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+ # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+ # platforms (ab)use it in PIC code, but their linkers get confused if
+ # the symbol is explicitly referenced. Since portable code cannot
+ # rely on this symbol name, it's probably fine to never include it in
+ # preloaded symbol tables.
+ # Exclude shared library initialization/finalization symbols.
+ extract_expsyms_cmds=
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+ interix*)
+ # we just hope/assume this is gcc and not c89 (= MSVC++)
+ with_gnu_ld=yes
+ ;;
+ openbsd*)
+ with_gnu_ld=no
+ ;;
+ linux* | k*bsd*-gnu | gnu*)
+ link_all_deplibs=no
+ ;;
+ esac
+
+ ld_shlibs=yes
+
+ # On some targets, GNU ld is compatible enough with the native linker
+ # that we're better off using the native interface for both.
+ lt_use_gnu_ld_interface=no
+ if test "$with_gnu_ld" = yes; then
+ case $host_os in
+ aix*)
+ # The AIX port of GNU ld has always aspired to compatibility
+ # with the native linker. However, as the warning in the GNU ld
+ # block says, versions before 2.19.5* couldn't really create working
+ # shared libraries, regardless of the interface used.
+ case `$LD -v 2>&1` in
+ *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+ *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;;
+ *\ \(GNU\ Binutils\)\ [3-9]*) ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ fi
+
+ if test "$lt_use_gnu_ld_interface" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # Set some defaults for GNU ld with shared library support. These
+ # are reset later if shared libraries are not supported. Putting them
+ # here allows them to be overridden if necessary.
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ supports_anon_versioning=no
+ case `$LD -v 2>&1` in
+ *GNU\ gold*) supports_anon_versioning=yes ;;
+ *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
+ *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+ *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+ *\ 2.11.*) ;; # other 2.11 versions
+ *) supports_anon_versioning=yes ;;
+ esac
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix[3-9]*)
+ # On AIX/PPC, the GNU linker is very broken
+ if test "$host_cpu" != ia64; then
+ ld_shlibs=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds=''
+ ;;
+ m68k)
+ archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ ;;
+ esac
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
+ # as there is no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ export_dynamic_flag_spec='${wl}--export-all-symbols'
+ allow_undefined_flag=unsupported
+ always_export_symbols=no
+ enable_shared_with_static_runtimes=yes
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ haiku*)
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ link_all_deplibs=yes
+ ;;
+
+ interix[3-9]*)
+ hardcode_direct=no
+ hardcode_shlibpath_var=no
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+
+ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+ tmp_diet=no
+ if test "$host_os" = linux-dietlibc; then
+ case $cc_basename in
+ diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn)
+ esac
+ fi
+ if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+ && test "$tmp_diet" = no
+ then
+ tmp_addflag=' $pic_flag'
+ tmp_sharedflag='-shared'
+ case $cc_basename,$host_cpu in
+ pgcc*) # Portland Group C compiler
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag'
+ ;;
+ pgf77* | pgf90* | pgf95* | pgfortran*)
+ # Portland Group f77 and f90 compilers
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag -Mnomain' ;;
+ ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64
+ tmp_addflag=' -i_dynamic' ;;
+ efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64
+ tmp_addflag=' -i_dynamic -nofor_main' ;;
+ ifc* | ifort*) # Intel Fortran compiler
+ tmp_addflag=' -nofor_main' ;;
+ lf95*) # Lahey Fortran 8.1
+ whole_archive_flag_spec=
+ tmp_sharedflag='--shared' ;;
+ xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ tmp_sharedflag='-qmkshrobj'
+ tmp_addflag= ;;
+ nvcc*) # Cuda Compiler Driver 2.2
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ compiler_needs_object=yes
+ ;;
+ esac
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*) # Sun C 5.9
+ whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ compiler_needs_object=yes
+ tmp_sharedflag='-G' ;;
+ *Sun\ F*) # Sun Fortran 8.3
+ tmp_sharedflag='-G' ;;
+ esac
+ archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+ if test "x$supports_anon_versioning" = xyes; then
+ archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+
+ case $cc_basename in
+ xlf* | bgf* | bgxlf* | mpixlf*)
+ # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+ whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ esac
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ netbsd* | netbsdelf*-gnu)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris*)
+ if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+ case `$LD -v 2>&1` in
+ *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+ ld_shlibs=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ ;;
+ *)
+ # For security reasons, it is highly recommended that you always
+ # use absolute paths for naming shared libraries, and exclude the
+ # DT_RUNPATH tag from executables and libraries. But doing so
+ # requires that you compile everything twice, which is a pain.
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = no; then
+ runpath_var=
+ hardcode_libdir_flag_spec=
+ export_dynamic_flag_spec=
+ whole_archive_flag_spec=
+ fi
+ else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix[4-9]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global
+ # defined symbols, whereas GNU nm marks them as "W".
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ archive_cmds=''
+ hardcode_direct=yes
+ hardcode_direct_absolute=yes
+ hardcode_libdir_separator=':'
+ link_all_deplibs=yes
+ file_list_spec='${wl}-f,'
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[012]|aix4.[012].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ ;;
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ link_all_deplibs=no
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ export_dynamic_flag_spec='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to export.
+ always_export_symbols=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='-berok'
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ if ${lt_cv_aix_libpath_+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+
+ lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }'
+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_="/usr/lib:/lib"
+ fi
+
+fi
+
+ aix_libpath=$lt_cv_aix_libpath_
+fi
+
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag="-z nodefs"
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ if ${lt_cv_aix_libpath_+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+
+ lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }'
+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_="/usr/lib:/lib"
+ fi
+
+fi
+
+ aix_libpath=$lt_cv_aix_libpath_
+fi
+
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ no_undefined_flag=' ${wl}-bernotok'
+ allow_undefined_flag=' ${wl}-berok'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ whole_archive_flag_spec='$convenience'
+ fi
+ archive_cmds_need_lc=yes
+ # This is similar to how AIX traditionally builds its shared libraries.
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds=''
+ ;;
+ m68k)
+ archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ ;;
+ esac
+ ;;
+
+ bsdi[45]*)
+ export_dynamic_flag_spec=-rdynamic
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ case $cc_basename in
+ cl*)
+ # Native MSVC
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ file_list_spec='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true'
+ enable_shared_with_static_runtimes=yes
+ exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+ # Don't use ranlib
+ old_postinstall_cmds='chmod 644 $oldlib'
+ postlink_cmds='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # Assume MSVC wrapper
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
+ enable_shared_with_static_runtimes=yes
+ ;;
+ esac
+ ;;
+
+ darwin* | rhapsody*)
+
+
+ archive_cmds_need_lc=no
+ hardcode_direct=no
+ hardcode_automatic=yes
+ hardcode_shlibpath_var=unsupported
+ if test "$lt_cv_ld_force_load" = "yes"; then
+ whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+ else
+ whole_archive_flag_spec=''
+ fi
+ link_all_deplibs=yes
+ allow_undefined_flag="$_lt_dar_allow_undefined"
+ case $cc_basename in
+ ifort*) _lt_dar_can_shared=yes ;;
+ *) _lt_dar_can_shared=$GCC ;;
+ esac
+ if test "$_lt_dar_can_shared" = "yes"; then
+ output_verbose_link_cmd=func_echo_all
+ archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+ module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+ archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+ module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+
+ else
+ ld_shlibs=no
+ fi
+
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2.*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd* | dragonfly*)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9*)
+ if test "$GCC" = yes; then
+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ hpux10*)
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_direct_absolute=yes
+ export_dynamic_flag_spec='${wl}-E'
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ fi
+ ;;
+
+ hpux11*)
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ case $host_cpu in
+ hppa*64*)
+ archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ else
+ case $host_cpu in
+ hppa*64*)
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+
+ # Older versions of the 11.00 compiler do not understand -b yet
+ # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
+$as_echo_n "checking if $CC understands -b... " >&6; }
+if ${lt_cv_prog_compiler__b+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler__b=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -b"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler__b=yes
+ fi
+ else
+ lt_cv_prog_compiler__b=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
+$as_echo "$lt_cv_prog_compiler__b" >&6; }
+
+if test x"$lt_cv_prog_compiler__b" = xyes; then
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+else
+ archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+fi
+
+ ;;
+ esac
+ fi
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ hardcode_direct=no
+ hardcode_shlibpath_var=no
+ ;;
+ *)
+ hardcode_direct=yes
+ hardcode_direct_absolute=yes
+ export_dynamic_flag_spec='${wl}-E'
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ ;;
+ esac
+ fi
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ # Try to use the -exported_symbol ld option, if it does not
+ # work, assume that -exports_file does not work either and
+ # implicitly export all symbols.
+ # This should be the same for all languages, so no per-tag cache variable.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
+if ${lt_cv_irix_exported_symbol+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+int foo (void) { return 0; }
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ lt_cv_irix_exported_symbol=yes
+else
+ lt_cv_irix_exported_symbol=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS="$save_LDFLAGS"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
+$as_echo "$lt_cv_irix_exported_symbol" >&6; }
+ if test "$lt_cv_irix_exported_symbol" = yes; then
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+ fi
+ else
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+ fi
+ archive_cmds_need_lc='no'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ inherit_rpath=yes
+ link_all_deplibs=yes
+ ;;
+
+ netbsd* | netbsdelf*-gnu)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ newsos6)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_shlibpath_var=no
+ ;;
+
+ *nto* | *qnx*)
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ hardcode_direct_absolute=yes
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ else
+ case $host_os in
+ openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ archive_cmds_need_lc='no'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+ # Both c and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ archive_cmds_need_lc='no'
+ hardcode_libdir_separator=:
+ ;;
+
+ solaris*)
+ no_undefined_flag=' -z defs'
+ if test "$GCC" = yes; then
+ wlarc='${wl}'
+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ else
+ case `$CC -V 2>&1` in
+ *"Compilers 5.0"*)
+ wlarc=''
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+ ;;
+ *)
+ wlarc='${wl}'
+ archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ ;;
+ esac
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case $host_os in
+ solaris2.[0-5] | solaris2.[0-5].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'. GCC discards it without `$wl',
+ # but is careful enough not to reorder.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ if test "$GCC" = yes; then
+ whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ else
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract'
+ fi
+ ;;
+ esac
+ link_all_deplibs=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ case $host_vendor in
+ sni)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes # is this really true???
+ ;;
+ siemens)
+ ## LD is ld it makes a PLAMLIB
+ ## CC just makes a GrossModule.
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ reload_cmds='$CC -r -o $output$reload_objs'
+ hardcode_direct=no
+ ;;
+ motorola)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ ;;
+ esac
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+ no_undefined_flag='${wl}-z,text'
+ archive_cmds_need_lc=no
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ no_undefined_flag='${wl}-z,text'
+ allow_undefined_flag='${wl}-z,nodefs'
+ archive_cmds_need_lc=no
+ hardcode_shlibpath_var=no
+ hardcode_libdir_flag_spec='${wl}-R,$libdir'
+ hardcode_libdir_separator=':'
+ link_all_deplibs=yes
+ export_dynamic_flag_spec='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+
+ if test x$host_vendor = xsni; then
+ case $host in
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ export_dynamic_flag_spec='${wl}-Blargedynsym'
+ ;;
+ esac
+ fi
+ fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5
+$as_echo "$ld_shlibs" >&6; }
+test "$ld_shlibs" = no && can_build_shared=no
+
+with_gnu_ld=$with_gnu_ld
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc" in
+x|xyes)
+ # Assume -lc should be added
+ archive_cmds_need_lc=yes
+
+ if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
+if ${lt_cv_archive_cmds_need_lc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ $RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } 2>conftest.err; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_prog_compiler_wl
+ pic_flag=$lt_prog_compiler_pic
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+ (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ then
+ lt_cv_archive_cmds_need_lc=no
+ else
+ lt_cv_archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
+$as_echo "$lt_cv_archive_cmds_need_lc" >&6; }
+ archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc
+ ;;
+ esac
+ fi
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+$as_echo_n "checking dynamic linker characteristics... " >&6; }
+
+if test "$GCC" = yes; then
+ case $host_os in
+ darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
+ *) lt_awk_arg="/^libraries:/" ;;
+ esac
+ case $host_os in
+ mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;;
+ *) lt_sed_strip_eq="s,=/,/,g" ;;
+ esac
+ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+ case $lt_search_path_spec in
+ *\;*)
+ # if the path contains ";" then we assume it to be the separator
+ # otherwise default to the standard path separator (i.e. ":") - it is
+ # assumed that no part of a normal pathname contains ";" but that should
+ # okay in the real world where ";" in dirpaths is itself problematic.
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+ ;;
+ *)
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ esac
+ # Ok, now we have the path, separated by spaces, we can step through it
+ # and add multilib dir if necessary.
+ lt_tmp_lt_search_path_spec=
+ lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+ for lt_sys_path in $lt_search_path_spec; do
+ if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+ else
+ test -d "$lt_sys_path" && \
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+ fi
+ done
+ lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+BEGIN {RS=" "; FS="/|\n";} {
+ lt_foo="";
+ lt_count=0;
+ for (lt_i = NF; lt_i > 0; lt_i--) {
+ if ($lt_i != "" && $lt_i != ".") {
+ if ($lt_i == "..") {
+ lt_count++;
+ } else {
+ if (lt_count == 0) {
+ lt_foo="/" $lt_i lt_foo;
+ } else {
+ lt_count--;
+ }
+ }
+ }
+ }
+ if (lt_foo != "") { lt_freq[lt_foo]++; }
+ if (lt_freq[lt_foo] == 1) { print lt_foo; }
+}'`
+ # AWK program above erroneously prepends '/' to C:/dos/paths
+ # for these hosts.
+ case $host_os in
+ mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+ $SED 's,/\([A-Za-z]:\),\1,g'` ;;
+ esac
+ sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+else
+ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX 3 has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+
+aix[4-9]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ hardcode_into_libs=yes
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[01] | aix4.[01].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+ # soname into executable. Probably we can add versioning support to
+ # collect2, so additional links can be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ case $host_cpu in
+ powerpc)
+ # Since July 2007 AmigaOS4 officially supports .so libraries.
+ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ ;;
+ m68k)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ ;;
+ esac
+ ;;
+
+beos*)
+ library_names_spec='${libname}${shared_ext}'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi[45]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+ version_type=windows
+ shrext_cmds=".dll"
+ need_version=no
+ need_lib_prefix=no
+
+ case $GCC,$cc_basename in
+ yes,*)
+ # gcc
+ library_names_spec='$libname.dll.a'
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname~
+ chmod a+x \$dldir/$dlname~
+ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+ fi'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+
+ case $host_os in
+ cygwin*)
+ # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"
+ ;;
+ mingw* | cegcc*)
+ # MinGW DLLs use traditional 'lib' prefix
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ pw32*)
+ # pw32 DLLs use 'pw' prefix rather than 'lib'
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ ;;
+
+ *,cl*)
+ # Native MSVC
+ libname_spec='$name'
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ library_names_spec='${libname}.dll.lib'
+
+ case $build_os in
+ mingw*)
+ sys_lib_search_path_spec=
+ lt_save_ifs=$IFS
+ IFS=';'
+ for lt_path in $LIB
+ do
+ IFS=$lt_save_ifs
+ # Let DOS variable expansion print the short 8.3 style file name.
+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+ done
+ IFS=$lt_save_ifs
+ # Convert to MSYS style.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+ ;;
+ cygwin*)
+ # Convert to unix form, then to dos form, then back to unix form
+ # but this time dos style (no spaces!) so that the unix form looks
+ # like /cygdrive/c/PROGRA~1:/cygdr...
+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ *)
+ sys_lib_search_path_spec="$LIB"
+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+ # It is most probably a Windows format PATH.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # FIXME: find the short name or the path components, as spaces are
+ # common. (e.g. "Program Files" -> "PROGRA~1")
+ ;;
+ esac
+
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+ dynamic_linker='Win32 link.exe'
+ ;;
+
+ *)
+ # Assume MSVC wrapper
+ library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ dynamic_linker='Win32 ld.exe'
+ ;;
+ esac
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+ soname_spec='${libname}${release}${major}$shared_ext'
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"
+ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+ ;;
+
+dgux*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+freebsd* | dragonfly*)
+ # DragonFly does not have aout. When/if they implement a new
+ # versioning mechanism, adjust this.
+ if test -x /usr/bin/objformat; then
+ objformat=`/usr/bin/objformat`
+ else
+ case $host_os in
+ freebsd[23].*) objformat=aout ;;
+ *) objformat=elf ;;
+ esac
+ fi
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2.*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ freebsd3.[01]* | freebsdelf3.[01]*)
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ *) # from 4.6 on, and DragonFly
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+haiku*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ dynamic_linker="$host_os runtime_loader"
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ case $host_cpu in
+ ia64*)
+ shrext_cmds='.so'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.so"
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ if test "X$HPUX_IA64_MODE" = X32; then
+ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+ else
+ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+ fi
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ hppa*64*)
+ shrext_cmds='.sl'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ *)
+ shrext_cmds='.sl'
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+ esac
+ # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+ postinstall_cmds='chmod 555 $lib'
+ # or fails outright, so override atomically:
+ install_override_mode=555
+ ;;
+
+interix[3-9]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $host_os in
+ nonstopux*) version_type=nonstopux ;;
+ *)
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ version_type=linux # correct to gnu/linux during the next big refactor
+ else
+ version_type=irix
+ fi ;;
+ esac
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+ case $host_os in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+ libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+ libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+ libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ hardcode_into_libs=yes
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+ dynamic_linker=no
+ ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+
+ # Some binutils ld are patched to set DT_RUNPATH
+ if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_shlibpath_overrides_runpath=no
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
+ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+ lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+
+fi
+
+ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # Append ld.so.conf contents to the search path
+ if test -f /etc/ld.so.conf; then
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+ fi
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsdelf*-gnu)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='NetBSD ld.elf_so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+*nto* | *qnx*)
+ version_type=qnx
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='ldqnx.so'
+ ;;
+
+openbsd*)
+ version_type=sunos
+ sys_lib_dlsearch_path_spec="/usr/lib"
+ need_lib_prefix=no
+ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+ case $host_os in
+ openbsd3.3 | openbsd3.3.*) need_version=yes ;;
+ *) need_version=no ;;
+ esac
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case $host_os in
+ openbsd2.[89] | openbsd2.[89].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ ;;
+
+os2*)
+ libname_spec='$name'
+ shrext_cmds=".dll"
+ need_lib_prefix=no
+ library_names_spec='$libname${shared_ext} $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+rdos*)
+ dynamic_linker=no
+ ;;
+
+solaris*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.3*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ need_lib_prefix=no
+ runpath_var=LD_RUN_PATH
+ ;;
+ siemens)
+ need_lib_prefix=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+ soname_spec='$libname${shared_ext}.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ version_type=freebsd-elf
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ if test "$with_gnu_ld" = yes; then
+ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+ else
+ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+ case $host_os in
+ sco3.2v5*)
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+ ;;
+ esac
+ fi
+ sys_lib_dlsearch_path_spec='/usr/lib'
+ ;;
+
+tpf*)
+ # TPF is a cross-target only. Preferred cross-host = GNU/Linux.
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+uts4*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+$as_echo "$dynamic_linker" >&6; }
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" ||
+ test -n "$runpath_var" ||
+ test "X$hardcode_automatic" = "Xyes" ; then
+
+ # We can hardcode non-existent directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5
+$as_echo "$hardcode_action" >&6; }
+
+if test "$hardcode_action" = relink ||
+ test "$inherit_rpath" = yes; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+
+
+
+
+
+ if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ mingw* | pw32* | cegcc*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ cygwin*)
+ lt_cv_dlopen="dlopen"
+ lt_cv_dlopen_libs=
+ ;;
+
+ darwin*)
+ # if libdl is installed we need to link against it
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+$as_echo_n "checking for dlopen in -ldl... " >&6; }
+if ${ac_cv_lib_dl_dlopen+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_dl_dlopen=yes
+else
+ ac_cv_lib_dl_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+
+ lt_cv_dlopen="dyld"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+
+fi
+
+ ;;
+
+ *)
+ ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load"
+if test "x$ac_cv_func_shl_load" = xyes; then :
+ lt_cv_dlopen="shl_load"
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
+$as_echo_n "checking for shl_load in -ldld... " >&6; }
+if ${ac_cv_lib_dld_shl_load+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shl_load ();
+int
+main ()
+{
+return shl_load ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_dld_shl_load=yes
+else
+ ac_cv_lib_dld_shl_load=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
+$as_echo "$ac_cv_lib_dld_shl_load" >&6; }
+if test "x$ac_cv_lib_dld_shl_load" = xyes; then :
+ lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
+else
+ ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
+if test "x$ac_cv_func_dlopen" = xyes; then :
+ lt_cv_dlopen="dlopen"
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+$as_echo_n "checking for dlopen in -ldl... " >&6; }
+if ${ac_cv_lib_dl_dlopen+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_dl_dlopen=yes
+else
+ ac_cv_lib_dl_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
+$as_echo_n "checking for dlopen in -lsvld... " >&6; }
+if ${ac_cv_lib_svld_dlopen+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lsvld $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_svld_dlopen=yes
+else
+ ac_cv_lib_svld_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
+$as_echo "$ac_cv_lib_svld_dlopen" >&6; }
+if test "x$ac_cv_lib_svld_dlopen" = xyes; then :
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
+$as_echo_n "checking for dld_link in -ldld... " >&6; }
+if ${ac_cv_lib_dld_dld_link+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dld_link ();
+int
+main ()
+{
+return dld_link ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_dld_dld_link=yes
+else
+ ac_cv_lib_dld_dld_link=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
+$as_echo "$ac_cv_lib_dld_dld_link" >&6; }
+if test "x$ac_cv_lib_dld_dld_link" = xyes; then :
+ lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
+$as_echo_n "checking whether a program can dlopen itself... " >&6; }
+if ${lt_cv_dlopen_self+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ lt_cv_dlopen_self=cross
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+/* When -fvisbility=hidden is used, assume the code has been annotated
+ correspondingly for the symbols needed. */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else
+ {
+ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else puts (dlerror ());
+ }
+ /* dlclose (self); */
+ }
+ else
+ puts (dlerror ());
+
+ return status;
+}
+_LT_EOF
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) >&5 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;;
+ x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;;
+ x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;;
+ esac
+ else :
+ # compilation failed
+ lt_cv_dlopen_self=no
+ fi
+fi
+rm -fr conftest*
+
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5
+$as_echo "$lt_cv_dlopen_self" >&6; }
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
+$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; }
+if ${lt_cv_dlopen_self_static+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ lt_cv_dlopen_self_static=cross
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+/* When -fvisbility=hidden is used, assume the code has been annotated
+ correspondingly for the symbols needed. */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else
+ {
+ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else puts (dlerror ());
+ }
+ /* dlclose (self); */
+ }
+ else
+ puts (dlerror ());
+
+ return status;
+}
+_LT_EOF
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) >&5 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;;
+ x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;;
+ x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;;
+ esac
+ else :
+ # compilation failed
+ lt_cv_dlopen_self_static=no
+ fi
+fi
+rm -fr conftest*
+
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5
+$as_echo "$lt_cv_dlopen_self_static" >&6; }
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+striplib=
+old_striplib=
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5
+$as_echo_n "checking whether stripping libraries is possible... " >&6; }
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+ case $host_os in
+ darwin*)
+ if test -n "$STRIP" ; then
+ striplib="$STRIP -x"
+ old_striplib="$STRIP -S"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+ ;;
+ *)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ ;;
+ esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+ # Report which library types will actually be built
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
+$as_echo_n "checking if libtool supports shared libraries... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
+$as_echo "$can_build_shared" >&6; }
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
+$as_echo_n "checking whether to build shared libraries... " >&6; }
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+ aix[4-9]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
+$as_echo "$enable_shared" >&6; }
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
+$as_echo_n "checking whether to build static libraries... " >&6; }
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
+$as_echo "$enable_static" >&6; }
+
+
+
+
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+CC="$lt_save_CC"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ac_config_commands="$ac_config_commands libtool"
+
+
+
+
+# Only expand once:
+
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+ if test -n "$CCC"; then
+ CXX=$CCC
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
+
+ fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if ${ac_cv_cxx_compiler_gnu+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GXX=yes
+else
+ GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if ${ac_cv_prog_cxx_g+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+ ac_cxx_werror_flag=yes
+ ac_cv_prog_cxx_g=no
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+else
+ CXXFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CXX" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CXX_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CXX_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CXX_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+ am__fastdepCXX_TRUE=
+ am__fastdepCXX_FALSE='#'
+else
+ am__fastdepCXX_TRUE='#'
+ am__fastdepCXX_FALSE=
+fi
+
+
+
+
+func_stripname_cnf ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+ esac
+} # func_stripname_cnf
+
+ if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+ ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+ (test "X$CXX" != "Xg++"))) ; then
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
+$as_echo_n "checking how to run the C++ preprocessor... " >&6; }
+if test -z "$CXXCPP"; then
+ if ${ac_cv_prog_CXXCPP+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CXXCPP needs to be expanded
+ for CXXCPP in "$CXX -E" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+ break
+fi
+
+ done
+ ac_cv_prog_CXXCPP=$CXXCPP
+
+fi
+ CXXCPP=$ac_cv_prog_CXXCPP
+else
+ ac_cv_prog_CXXCPP=$CXXCPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
+$as_echo "$CXXCPP" >&6; }
+ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+ # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+ # Broken: success on invalid input.
+continue
+else
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+else
+ _lt_caught_CXX_error=yes
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+archive_cmds_need_lc_CXX=no
+allow_undefined_flag_CXX=
+always_export_symbols_CXX=no
+archive_expsym_cmds_CXX=
+compiler_needs_object_CXX=no
+export_dynamic_flag_spec_CXX=
+hardcode_direct_CXX=no
+hardcode_direct_absolute_CXX=no
+hardcode_libdir_flag_spec_CXX=
+hardcode_libdir_separator_CXX=
+hardcode_minus_L_CXX=no
+hardcode_shlibpath_var_CXX=unsupported
+hardcode_automatic_CXX=no
+inherit_rpath_CXX=no
+module_cmds_CXX=
+module_expsym_cmds_CXX=
+link_all_deplibs_CXX=unknown
+old_archive_cmds_CXX=$old_archive_cmds
+reload_flag_CXX=$reload_flag
+reload_cmds_CXX=$reload_cmds
+no_undefined_flag_CXX=
+whole_archive_flag_spec_CXX=
+enable_shared_with_static_runtimes_CXX=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+objext_CXX=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_caught_CXX_error" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="int some_variable = 0;"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code='int main(int, char *[]) { return(0); }'
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+ # save warnings/boilerplate of simple test code
+ ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+ ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC=$CC
+ lt_save_CFLAGS=$CFLAGS
+ lt_save_LD=$LD
+ lt_save_GCC=$GCC
+ GCC=$GXX
+ lt_save_with_gnu_ld=$with_gnu_ld
+ lt_save_path_LD=$lt_cv_path_LD
+ if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+ lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+ else
+ $as_unset lt_cv_prog_gnu_ld
+ fi
+ if test -n "${lt_cv_path_LDCXX+set}"; then
+ lt_cv_path_LD=$lt_cv_path_LDCXX
+ else
+ $as_unset lt_cv_path_LD
+ fi
+ test -z "${LDCXX+set}" || LD=$LDCXX
+ CC=${CXX-"c++"}
+ CFLAGS=$CXXFLAGS
+ compiler=$CC
+ compiler_CXX=$CC
+ for cc_temp in $compiler""; do
+ case $cc_temp in
+ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+
+
+ if test -n "$compiler"; then
+ # We don't want -fno-exception when compiling C++ code, so set the
+ # no_builtin_flag separately
+ if test "$GXX" = yes; then
+ lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin'
+ else
+ lt_prog_compiler_no_builtin_flag_CXX=
+ fi
+
+ if test "$GXX" = yes; then
+ # Set up default GNU C++ configuration
+
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then :
+ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+$as_echo_n "checking for ld used by $CC... " >&6; }
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [\\/]* | ?:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the pathname of ld
+ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if ${lt_cv_path_LD+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$LD"; then
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some variants of GNU ld only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break
+ ;;
+ *)
+ test "$with_gnu_ld" != yes && break
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
+if ${lt_cv_prog_gnu_ld+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ lt_cv_prog_gnu_ld=yes
+ ;;
+*)
+ lt_cv_prog_gnu_ld=no
+ ;;
+esac
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
+$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+ # Check if GNU C++ uses GNU ld as the underlying linker, since the
+ # archiving commands below assume that GNU ld is being used.
+ if test "$with_gnu_ld" = yes; then
+ archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+ # investigate it a little bit more. (MM)
+ wlarc='${wl}'
+
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+ $GREP 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec_CXX=
+ fi
+ else
+ with_gnu_ld=no
+ wlarc=
+
+ # A generic and very simple default shared library creation
+ # command for GNU C++ for the case where it uses the native
+ # linker, instead of GNU ld. If possible, this setting should
+ # overridden to take advantage of the native linker features on
+ # the platform it is being used on.
+ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+ fi
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+ else
+ GXX=no
+ with_gnu_ld=no
+ wlarc=
+ fi
+
+ # PORTME: fill in a description of your system's C++ link characteristics
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+ ld_shlibs_CXX=yes
+ case $host_os in
+ aix3*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ aix[4-9]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+ for ld_flag in $LDFLAGS; do
+ case $ld_flag in
+ *-brtl*)
+ aix_use_runtimelinking=yes
+ break
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ archive_cmds_CXX=''
+ hardcode_direct_CXX=yes
+ hardcode_direct_absolute_CXX=yes
+ hardcode_libdir_separator_CXX=':'
+ link_all_deplibs_CXX=yes
+ file_list_spec_CXX='${wl}-f,'
+
+ if test "$GXX" = yes; then
+ case $host_os in aix4.[012]|aix4.[012].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ hardcode_direct_CXX=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L_CXX=yes
+ hardcode_libdir_flag_spec_CXX='-L$libdir'
+ hardcode_libdir_separator_CXX=
+ fi
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ export_dynamic_flag_spec_CXX='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to
+ # export.
+ always_export_symbols_CXX=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag_CXX='-berok'
+ # Determine the default libpath from the value encoded in an empty
+ # executable.
+ if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ if ${lt_cv_aix_libpath__CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+
+ lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }'
+ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$lt_cv_aix_libpath__CXX"; then
+ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ if test -z "$lt_cv_aix_libpath__CXX"; then
+ lt_cv_aix_libpath__CXX="/usr/lib:/lib"
+ fi
+
+fi
+
+ aix_libpath=$lt_cv_aix_libpath__CXX
+fi
+
+ hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath"
+
+ archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag_CXX="-z nodefs"
+ archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ if ${lt_cv_aix_libpath__CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+
+ lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }'
+ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$lt_cv_aix_libpath__CXX"; then
+ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ if test -z "$lt_cv_aix_libpath__CXX"; then
+ lt_cv_aix_libpath__CXX="/usr/lib:/lib"
+ fi
+
+fi
+
+ aix_libpath=$lt_cv_aix_libpath__CXX
+fi
+
+ hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ no_undefined_flag_CXX=' ${wl}-bernotok'
+ allow_undefined_flag_CXX=' ${wl}-berok'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ whole_archive_flag_spec_CXX='$convenience'
+ fi
+ archive_cmds_need_lc_CXX=yes
+ # This is similar to how AIX traditionally builds its shared
+ # libraries.
+ archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag_CXX=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs_CXX=no
+ fi
+ ;;
+
+ chorus*)
+ case $cc_basename in
+ *)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ esac
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ case $GXX,$cc_basename in
+ ,cl* | no,cl*)
+ # Native MSVC
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec_CXX=' '
+ allow_undefined_flag_CXX=unsupported
+ always_export_symbols_CXX=yes
+ file_list_spec_CXX='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true'
+ enable_shared_with_static_runtimes_CXX=yes
+ # Don't use ranlib
+ old_postinstall_cmds_CXX='chmod 644 $oldlib'
+ postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ func_to_tool_file "$lt_outputfile"~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # g++
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless,
+ # as there is no search path for DLLs.
+ hardcode_libdir_flag_spec_CXX='-L$libdir'
+ export_dynamic_flag_spec_CXX='${wl}--export-all-symbols'
+ allow_undefined_flag_CXX=unsupported
+ always_export_symbols_CXX=no
+ enable_shared_with_static_runtimes_CXX=yes
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ ld_shlibs_CXX=no
+ fi
+ ;;
+ esac
+ ;;
+ darwin* | rhapsody*)
+
+
+ archive_cmds_need_lc_CXX=no
+ hardcode_direct_CXX=no
+ hardcode_automatic_CXX=yes
+ hardcode_shlibpath_var_CXX=unsupported
+ if test "$lt_cv_ld_force_load" = "yes"; then
+ whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+ else
+ whole_archive_flag_spec_CXX=''
+ fi
+ link_all_deplibs_CXX=yes
+ allow_undefined_flag_CXX="$_lt_dar_allow_undefined"
+ case $cc_basename in
+ ifort*) _lt_dar_can_shared=yes ;;
+ *) _lt_dar_can_shared=$GCC ;;
+ esac
+ if test "$_lt_dar_can_shared" = "yes"; then
+ output_verbose_link_cmd=func_echo_all
+ archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+ module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+ archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+ module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+ if test "$lt_cv_apple_cc_single_mod" != "yes"; then
+ archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}"
+ archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}"
+ fi
+
+ else
+ ld_shlibs_CXX=no
+ fi
+
+ ;;
+
+ dgux*)
+ case $cc_basename in
+ ec++*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ ghcx*)
+ # Green Hills C++ Compiler
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ esac
+ ;;
+
+ freebsd2.*)
+ # C++ shared libraries reported to be fairly broken before
+ # switch to ELF
+ ld_shlibs_CXX=no
+ ;;
+
+ freebsd-elf*)
+ archive_cmds_need_lc_CXX=no
+ ;;
+
+ freebsd* | dragonfly*)
+ # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+ # conventions
+ ld_shlibs_CXX=yes
+ ;;
+
+ gnu*)
+ ;;
+
+ haiku*)
+ archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ link_all_deplibs_CXX=yes
+ ;;
+
+ hpux9*)
+ hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator_CXX=:
+ export_dynamic_flag_spec_CXX='${wl}-E'
+ hardcode_direct_CXX=yes
+ hardcode_minus_L_CXX=yes # Not in the search PATH,
+ # but as the default
+ # location of the library.
+
+ case $cc_basename in
+ CC*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ aCC*)
+ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ fi
+ ;;
+ esac
+ ;;
+
+ hpux10*|hpux11*)
+ if test $with_gnu_ld = no; then
+ hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator_CXX=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ ;;
+ *)
+ export_dynamic_flag_spec_CXX='${wl}-E'
+ ;;
+ esac
+ fi
+ case $host_cpu in
+ hppa*64*|ia64*)
+ hardcode_direct_CXX=no
+ hardcode_shlibpath_var_CXX=no
+ ;;
+ *)
+ hardcode_direct_CXX=yes
+ hardcode_direct_absolute_CXX=yes
+ hardcode_minus_L_CXX=yes # Not in the search PATH,
+ # but as the default
+ # location of the library.
+ ;;
+ esac
+
+ case $cc_basename in
+ CC*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ aCC*)
+ case $host_cpu in
+ hppa*64*)
+ archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ ia64*)
+ archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ *)
+ archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ esac
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ if test $with_gnu_ld = no; then
+ case $host_cpu in
+ hppa*64*)
+ archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ ia64*)
+ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ *)
+ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ esac
+ fi
+ else
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ fi
+ ;;
+ esac
+ ;;
+
+ interix[3-9]*)
+ hardcode_direct_CXX=no
+ hardcode_shlibpath_var_CXX=no
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec_CXX='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+ irix5* | irix6*)
+ case $cc_basename in
+ CC*)
+ # SGI C++
+ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+
+ # Archives containing C++ object files must be created using
+ # "CC -ar", where "CC" is the IRIX C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ if test "$with_gnu_ld" = no; then
+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib'
+ fi
+ fi
+ link_all_deplibs_CXX=yes
+ ;;
+ esac
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator_CXX=:
+ inherit_rpath_CXX=yes
+ ;;
+
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ case $cc_basename in
+ KCC*)
+ # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+ # KCC will only create a shared library if the output file
+ # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ # to its proper name (with version) after linking.
+ archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+ archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+
+ # Archives containing C++ object files must be created using
+ # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+ old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs'
+ ;;
+ icpc* | ecpc* )
+ # Intel C++
+ with_gnu_ld=yes
+ # version 8.0 and above of icpc choke on multiply defined symbols
+ # if we add $predep_objects and $postdep_objects, however 7.1 and
+ # earlier do not add the objects themselves.
+ case `$CC -V 2>&1` in
+ *"Version 7."*)
+ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ ;;
+ *) # Version 8.0 or newer
+ tmp_idyn=
+ case $host_cpu in
+ ia64*) tmp_idyn=' -i_dynamic';;
+ esac
+ archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ ;;
+ esac
+ archive_cmds_need_lc_CXX=no
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+ whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ ;;
+ pgCC* | pgcpp*)
+ # Portland Group C++ compiler
+ case `$CC -V` in
+ *pgCC\ [1-5].* | *pgcpp\ [1-5].*)
+ prelink_cmds_CXX='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+ old_archive_cmds_CXX='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+ $RANLIB $oldlib'
+ archive_cmds_CXX='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ archive_expsym_cmds_CXX='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ ;;
+ *) # Version 6 and above use weak symbols
+ archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ ;;
+ esac
+
+ hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir'
+ export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+ whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ ;;
+ cxx*)
+ # Compaq C++
+ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
+
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+ hardcode_libdir_separator_CXX=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+ ;;
+ xl* | mpixl* | bgxl*)
+ # IBM XL 8.0 on PPC, with GNU ld
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+ archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+ no_undefined_flag_CXX=' -zdefs'
+ archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
+ hardcode_libdir_flag_spec_CXX='-R$libdir'
+ whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ compiler_needs_object_CXX=yes
+
+ # Not sure whether something based on
+ # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+ # would be better.
+ output_verbose_link_cmd='func_echo_all'
+
+ # Archives containing C++ object files must be created using
+ # "CC -xar", where "CC" is the Sun C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ lynxos*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+
+ m88k*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+
+ mvs*)
+ case $cc_basename in
+ cxx*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ esac
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+ wlarc=
+ hardcode_libdir_flag_spec_CXX='-R$libdir'
+ hardcode_direct_CXX=yes
+ hardcode_shlibpath_var_CXX=no
+ fi
+ # Workaround some broken pre-1.5 toolchains
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+ ;;
+
+ *nto* | *qnx*)
+ ld_shlibs_CXX=yes
+ ;;
+
+ openbsd2*)
+ # C++ shared libraries are fairly broken
+ ld_shlibs_CXX=no
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ hardcode_direct_CXX=yes
+ hardcode_shlibpath_var_CXX=no
+ hardcode_direct_absolute_CXX=yes
+ archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib'
+ export_dynamic_flag_spec_CXX='${wl}-E'
+ whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ fi
+ output_verbose_link_cmd=func_echo_all
+ else
+ ld_shlibs_CXX=no
+ fi
+ ;;
+
+ osf3* | osf4* | osf5*)
+ case $cc_basename in
+ KCC*)
+ # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+ # KCC will only create a shared library if the output file
+ # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ # to its proper name (with version) after linking.
+ archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+ hardcode_libdir_separator_CXX=:
+
+ # Archives containing C++ object files must be created using
+ # the KAI C++ compiler.
+ case $host in
+ osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;;
+ *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;;
+ esac
+ ;;
+ RCC*)
+ # Rational C++ 2.4.1
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ cxx*)
+ case $host in
+ osf3*)
+ allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ ;;
+ *)
+ allow_undefined_flag_CXX=' -expect_unresolved \*'
+ archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+ echo "-hidden">> $lib.exp~
+ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~
+ $RM $lib.exp'
+ hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+ ;;
+ esac
+
+ hardcode_libdir_separator_CXX=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ ;;
+ *)
+ if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+ allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*'
+ case $host in
+ osf3*)
+ archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ ;;
+ *)
+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ ;;
+ esac
+
+ hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator_CXX=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+ else
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ fi
+ ;;
+ esac
+ ;;
+
+ psos*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+
+ sunos4*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.x
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ lcc*)
+ # Lucid
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ esac
+ ;;
+
+ solaris*)
+ case $cc_basename in
+ CC* | sunCC*)
+ # Sun C++ 4.2, 5.x and Centerline C++
+ archive_cmds_need_lc_CXX=yes
+ no_undefined_flag_CXX=' -zdefs'
+ archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ hardcode_libdir_flag_spec_CXX='-R$libdir'
+ hardcode_shlibpath_var_CXX=no
+ case $host_os in
+ solaris2.[0-5] | solaris2.[0-5].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract'
+ ;;
+ esac
+ link_all_deplibs_CXX=yes
+
+ output_verbose_link_cmd='func_echo_all'
+
+ # Archives containing C++ object files must be created using
+ # "CC -xar", where "CC" is the Sun C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
+ ;;
+ gcx*)
+ # Green Hills C++ Compiler
+ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+
+ # The C++ compiler must be used to create the archive.
+ old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+ ;;
+ *)
+ # GNU C++ compiler with Solaris linker
+ if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+ no_undefined_flag_CXX=' ${wl}-z ${wl}defs'
+ if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+ else
+ # g++ 2.7 appears to require `-G' NOT `-shared' on this
+ # platform.
+ archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+ fi
+
+ hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir'
+ case $host_os in
+ solaris2.[0-5] | solaris2.[0-5].*) ;;
+ *)
+ whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ ;;
+ esac
+ fi
+ ;;
+ esac
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+ no_undefined_flag_CXX='${wl}-z,text'
+ archive_cmds_need_lc_CXX=no
+ hardcode_shlibpath_var_CXX=no
+ runpath_var='LD_RUN_PATH'
+
+ case $cc_basename in
+ CC*)
+ archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ no_undefined_flag_CXX='${wl}-z,text'
+ allow_undefined_flag_CXX='${wl}-z,nodefs'
+ archive_cmds_need_lc_CXX=no
+ hardcode_shlibpath_var_CXX=no
+ hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir'
+ hardcode_libdir_separator_CXX=':'
+ link_all_deplibs_CXX=yes
+ export_dynamic_flag_spec_CXX='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ case $cc_basename in
+ CC*)
+ archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~
+ '"$old_archive_cmds_CXX"
+ reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~
+ '"$reload_cmds_CXX"
+ ;;
+ *)
+ archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ ;;
+
+ tandem*)
+ case $cc_basename in
+ NCC*)
+ # NonStop-UX NCC 3.20
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ esac
+ ;;
+
+ vxworks*)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+
+ *)
+ # FIXME: insert proper C++ library support
+ ld_shlibs_CXX=no
+ ;;
+ esac
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+$as_echo "$ld_shlibs_CXX" >&6; }
+ test "$ld_shlibs_CXX" = no && can_build_shared=no
+
+ GCC_CXX="$GXX"
+ LD_CXX="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ # Dependencies to place before and after the object being linked:
+predep_objects_CXX=
+postdep_objects_CXX=
+predeps_CXX=
+postdeps_CXX=
+compiler_lib_search_path_CXX=
+
+cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+ Foo (void) { a = 0; }
+private:
+ int a;
+};
+_LT_EOF
+
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ # Parse the compiler output and extract the necessary
+ # objects, libraries and library flags.
+
+ # Sentinel used to keep track of whether or not we are before
+ # the conftest object file.
+ pre_test_object_deps_done=no
+
+ for p in `eval "$output_verbose_link_cmd"`; do
+ case ${prev}${p} in
+
+ -L* | -R* | -l*)
+ # Some compilers place space between "-{L,R}" and the path.
+ # Remove the space.
+ if test $p = "-L" ||
+ test $p = "-R"; then
+ prev=$p
+ continue
+ fi
+
+ # Expand the sysroot to ease extracting the directories later.
+ if test -z "$prev"; then
+ case $p in
+ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+ esac
+ fi
+ case $p in
+ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+ esac
+ if test "$pre_test_object_deps_done" = no; then
+ case ${prev} in
+ -L | -R)
+ # Internal compiler library paths should come after those
+ # provided the user. The postdeps already come after the
+ # user supplied libs so there is no need to process them.
+ if test -z "$compiler_lib_search_path_CXX"; then
+ compiler_lib_search_path_CXX="${prev}${p}"
+ else
+ compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}"
+ fi
+ ;;
+ # The "-l" case would never come before the object being
+ # linked, so don't bother handling this case.
+ esac
+ else
+ if test -z "$postdeps_CXX"; then
+ postdeps_CXX="${prev}${p}"
+ else
+ postdeps_CXX="${postdeps_CXX} ${prev}${p}"
+ fi
+ fi
+ prev=
+ ;;
+
+ *.lto.$objext) ;; # Ignore GCC LTO objects
+ *.$objext)
+ # This assumes that the test object file only shows up
+ # once in the compiler output.
+ if test "$p" = "conftest.$objext"; then
+ pre_test_object_deps_done=yes
+ continue
+ fi
+
+ if test "$pre_test_object_deps_done" = no; then
+ if test -z "$predep_objects_CXX"; then
+ predep_objects_CXX="$p"
+ else
+ predep_objects_CXX="$predep_objects_CXX $p"
+ fi
+ else
+ if test -z "$postdep_objects_CXX"; then
+ postdep_objects_CXX="$p"
+ else
+ postdep_objects_CXX="$postdep_objects_CXX $p"
+ fi
+ fi
+ ;;
+
+ *) ;; # Ignore the rest.
+
+ esac
+ done
+
+ # Clean up.
+ rm -f a.out a.exe
+else
+ echo "libtool.m4: error: problem compiling CXX test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+case $host_os in
+interix[3-9]*)
+ # Interix 3.5 installs completely hosed .la files for C++, so rather than
+ # hack all around it, let's just trust "g++" to DTRT.
+ predep_objects_CXX=
+ postdep_objects_CXX=
+ postdeps_CXX=
+ ;;
+
+linux*)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+
+ # The more standards-conforming stlport4 library is
+ # incompatible with the Cstd library. Avoid specifying
+ # it if it's in CXXFLAGS. Ignore libCrun as
+ # -library=stlport4 depends on it.
+ case " $CXX $CXXFLAGS " in
+ *" -library=stlport4 "*)
+ solaris_use_stlport4=yes
+ ;;
+ esac
+
+ if test "$solaris_use_stlport4" != yes; then
+ postdeps_CXX='-library=Cstd -library=Crun'
+ fi
+ ;;
+ esac
+ ;;
+
+solaris*)
+ case $cc_basename in
+ CC* | sunCC*)
+ # The more standards-conforming stlport4 library is
+ # incompatible with the Cstd library. Avoid specifying
+ # it if it's in CXXFLAGS. Ignore libCrun as
+ # -library=stlport4 depends on it.
+ case " $CXX $CXXFLAGS " in
+ *" -library=stlport4 "*)
+ solaris_use_stlport4=yes
+ ;;
+ esac
+
+ # Adding this requires a known-good setup of shared libraries for
+ # Sun compiler versions before 5.6, else PIC objects from an old
+ # archive will be linked into the output, leading to subtle bugs.
+ if test "$solaris_use_stlport4" != yes; then
+ postdeps_CXX='-library=Cstd -library=Crun'
+ fi
+ ;;
+ esac
+ ;;
+esac
+
+
+case " $postdeps_CXX " in
+*" -lc "*) archive_cmds_need_lc_CXX=no ;;
+esac
+ compiler_lib_search_dirs_CXX=
+if test -n "${compiler_lib_search_path_CXX}"; then
+ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ lt_prog_compiler_wl_CXX=
+lt_prog_compiler_pic_CXX=
+lt_prog_compiler_static_CXX=
+
+
+ # C++ specific cases for pic, static, wl, etc.
+ if test "$GXX" = yes; then
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_static_CXX='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_prog_compiler_static_CXX='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ lt_prog_compiler_pic_CXX='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ mingw* | cygwin* | os2* | pw32* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_prog_compiler_pic_CXX='-fno-common'
+ ;;
+ *djgpp*)
+ # DJGPP does not support shared libraries at all
+ lt_prog_compiler_pic_CXX=
+ ;;
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ lt_prog_compiler_static_CXX=
+ ;;
+ interix[3-9]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_prog_compiler_pic_CXX=-Kconform_pic
+ fi
+ ;;
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ ;;
+ *)
+ lt_prog_compiler_pic_CXX='-fPIC'
+ ;;
+ esac
+ ;;
+ *qnx* | *nto*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ lt_prog_compiler_pic_CXX='-fPIC -shared'
+ ;;
+ *)
+ lt_prog_compiler_pic_CXX='-fPIC'
+ ;;
+ esac
+ else
+ case $host_os in
+ aix[4-9]*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_prog_compiler_static_CXX='-Bstatic'
+ else
+ lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+ chorus*)
+ case $cc_basename in
+ cxch68*)
+ # Green Hills C++ Compiler
+ # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+ ;;
+ esac
+ ;;
+ mingw* | cygwin* | os2* | pw32* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+ ;;
+ dgux*)
+ case $cc_basename in
+ ec++*)
+ lt_prog_compiler_pic_CXX='-KPIC'
+ ;;
+ ghcx*)
+ # Green Hills C++ Compiler
+ lt_prog_compiler_pic_CXX='-pic'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ freebsd* | dragonfly*)
+ # FreeBSD uses GNU C++
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ case $cc_basename in
+ CC*)
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_static_CXX='${wl}-a ${wl}archive'
+ if test "$host_cpu" != ia64; then
+ lt_prog_compiler_pic_CXX='+Z'
+ fi
+ ;;
+ aCC*)
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_static_CXX='${wl}-a ${wl}archive'
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ lt_prog_compiler_pic_CXX='+Z'
+ ;;
+ esac
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ interix*)
+ # This is c89, which is MS Visual C++ (no shared libs)
+ # Anyone wants to do a port?
+ ;;
+ irix5* | irix6* | nonstopux*)
+ case $cc_basename in
+ CC*)
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_static_CXX='-non_shared'
+ # CC pic flag -KPIC is the default.
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ case $cc_basename in
+ KCC*)
+ # KAI C++ Compiler
+ lt_prog_compiler_wl_CXX='--backend -Wl,'
+ lt_prog_compiler_pic_CXX='-fPIC'
+ ;;
+ ecpc* )
+ # old Intel C++ for x86_64 which still supported -KPIC.
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_pic_CXX='-KPIC'
+ lt_prog_compiler_static_CXX='-static'
+ ;;
+ icpc* )
+ # Intel C++, used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_pic_CXX='-fPIC'
+ lt_prog_compiler_static_CXX='-static'
+ ;;
+ pgCC* | pgcpp*)
+ # Portland Group C++ compiler
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_pic_CXX='-fpic'
+ lt_prog_compiler_static_CXX='-Bstatic'
+ ;;
+ cxx*)
+ # Compaq C++
+ # Make sure the PIC flag is empty. It appears that all Alpha
+ # Linux and Compaq Tru64 Unix objects are PIC.
+ lt_prog_compiler_pic_CXX=
+ lt_prog_compiler_static_CXX='-non_shared'
+ ;;
+ xlc* | xlC* | bgxl[cC]* | mpixl[cC]*)
+ # IBM XL 8.0, 9.0 on PPC and BlueGene
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_pic_CXX='-qpic'
+ lt_prog_compiler_static_CXX='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+ lt_prog_compiler_pic_CXX='-KPIC'
+ lt_prog_compiler_static_CXX='-Bstatic'
+ lt_prog_compiler_wl_CXX='-Qoption ld '
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+ lynxos*)
+ ;;
+ m88k*)
+ ;;
+ mvs*)
+ case $cc_basename in
+ cxx*)
+ lt_prog_compiler_pic_CXX='-W c,exportall'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ netbsd* | netbsdelf*-gnu)
+ ;;
+ *qnx* | *nto*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ lt_prog_compiler_pic_CXX='-fPIC -shared'
+ ;;
+ osf3* | osf4* | osf5*)
+ case $cc_basename in
+ KCC*)
+ lt_prog_compiler_wl_CXX='--backend -Wl,'
+ ;;
+ RCC*)
+ # Rational C++ 2.4.1
+ lt_prog_compiler_pic_CXX='-pic'
+ ;;
+ cxx*)
+ # Digital/Compaq C++
+ lt_prog_compiler_wl_CXX='-Wl,'
+ # Make sure the PIC flag is empty. It appears that all Alpha
+ # Linux and Compaq Tru64 Unix objects are PIC.
+ lt_prog_compiler_pic_CXX=
+ lt_prog_compiler_static_CXX='-non_shared'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ psos*)
+ ;;
+ solaris*)
+ case $cc_basename in
+ CC* | sunCC*)
+ # Sun C++ 4.2, 5.x and Centerline C++
+ lt_prog_compiler_pic_CXX='-KPIC'
+ lt_prog_compiler_static_CXX='-Bstatic'
+ lt_prog_compiler_wl_CXX='-Qoption ld '
+ ;;
+ gcx*)
+ # Green Hills C++ Compiler
+ lt_prog_compiler_pic_CXX='-PIC'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ sunos4*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.x
+ lt_prog_compiler_pic_CXX='-pic'
+ lt_prog_compiler_static_CXX='-Bstatic'
+ ;;
+ lcc*)
+ # Lucid
+ lt_prog_compiler_pic_CXX='-pic'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ case $cc_basename in
+ CC*)
+ lt_prog_compiler_wl_CXX='-Wl,'
+ lt_prog_compiler_pic_CXX='-KPIC'
+ lt_prog_compiler_static_CXX='-Bstatic'
+ ;;
+ esac
+ ;;
+ tandem*)
+ case $cc_basename in
+ NCC*)
+ # NonStop-UX NCC 3.20
+ lt_prog_compiler_pic_CXX='-KPIC'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ vxworks*)
+ ;;
+ *)
+ lt_prog_compiler_can_build_shared_CXX=no
+ ;;
+ esac
+ fi
+
+case $host_os in
+ # For platforms which do not support PIC, -DPIC is meaningless:
+ *djgpp*)
+ lt_prog_compiler_pic_CXX=
+ ;;
+ *)
+ lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC"
+ ;;
+esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+if ${lt_cv_prog_compiler_pic_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; }
+lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5
+$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; }
+if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_pic_works_CXX=no
+ ac_outfile=conftest.$ac_objext
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_pic_works_CXX=yes
+ fi
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; }
+
+if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then
+ case $lt_prog_compiler_pic_CXX in
+ "" | " "*) ;;
+ *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;;
+ esac
+else
+ lt_prog_compiler_pic_CXX=
+ lt_prog_compiler_can_build_shared_CXX=no
+fi
+
+fi
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if ${lt_cv_prog_compiler_static_works_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_static_works_CXX=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_static_works_CXX=yes
+ fi
+ else
+ lt_cv_prog_compiler_static_works_CXX=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; }
+
+if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then
+ :
+else
+ lt_prog_compiler_static_CXX=
+fi
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_c_o_CXX=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_c_o_CXX=yes
+ fi
+ fi
+ chmod u+w . 2>&5
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_c_o_CXX=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_c_o_CXX=yes
+ fi
+ fi
+ chmod u+w . 2>&5
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+
+
+
+
+hard_links="nottested"
+if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+$as_echo_n "checking if we can lock with hard links... " >&6; }
+ hard_links=yes
+ $RM conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+$as_echo "$hard_links" >&6; }
+ if test "$hard_links" = no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+ case $host_os in
+ aix[4-9]*)
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global defined
+ # symbols, whereas GNU nm marks them as "W".
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ ;;
+ pw32*)
+ export_symbols_cmds_CXX="$ltdll_cmds"
+ ;;
+ cygwin* | mingw* | cegcc*)
+ case $cc_basename in
+ cl*)
+ exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ ;;
+ *)
+ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+ exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+ ;;
+ esac
+ ;;
+ linux* | k*bsd*-gnu | gnu*)
+ link_all_deplibs_CXX=no
+ ;;
+ *)
+ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ ;;
+ esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+$as_echo "$ld_shlibs_CXX" >&6; }
+test "$ld_shlibs_CXX" = no && can_build_shared=no
+
+with_gnu_ld_CXX=$with_gnu_ld
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc_CXX" in
+x|xyes)
+ # Assume -lc should be added
+ archive_cmds_need_lc_CXX=yes
+
+ if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds_CXX in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
+if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ $RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } 2>conftest.err; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_prog_compiler_wl_CXX
+ pic_flag=$lt_prog_compiler_pic_CXX
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$allow_undefined_flag_CXX
+ allow_undefined_flag_CXX=
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+ (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ then
+ lt_cv_archive_cmds_need_lc_CXX=no
+ else
+ lt_cv_archive_cmds_need_lc_CXX=yes
+ fi
+ allow_undefined_flag_CXX=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5
+$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; }
+ archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX
+ ;;
+ esac
+ fi
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+$as_echo_n "checking dynamic linker characteristics... " >&6; }
+
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX 3 has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+
+aix[4-9]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ hardcode_into_libs=yes
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[01] | aix4.[01].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+ # soname into executable. Probably we can add versioning support to
+ # collect2, so additional links can be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ case $host_cpu in
+ powerpc)
+ # Since July 2007 AmigaOS4 officially supports .so libraries.
+ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ ;;
+ m68k)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ ;;
+ esac
+ ;;
+
+beos*)
+ library_names_spec='${libname}${shared_ext}'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi[45]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+ version_type=windows
+ shrext_cmds=".dll"
+ need_version=no
+ need_lib_prefix=no
+
+ case $GCC,$cc_basename in
+ yes,*)
+ # gcc
+ library_names_spec='$libname.dll.a'
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname~
+ chmod a+x \$dldir/$dlname~
+ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+ fi'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+
+ case $host_os in
+ cygwin*)
+ # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+
+ ;;
+ mingw* | cegcc*)
+ # MinGW DLLs use traditional 'lib' prefix
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ pw32*)
+ # pw32 DLLs use 'pw' prefix rather than 'lib'
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ ;;
+
+ *,cl*)
+ # Native MSVC
+ libname_spec='$name'
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ library_names_spec='${libname}.dll.lib'
+
+ case $build_os in
+ mingw*)
+ sys_lib_search_path_spec=
+ lt_save_ifs=$IFS
+ IFS=';'
+ for lt_path in $LIB
+ do
+ IFS=$lt_save_ifs
+ # Let DOS variable expansion print the short 8.3 style file name.
+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+ done
+ IFS=$lt_save_ifs
+ # Convert to MSYS style.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+ ;;
+ cygwin*)
+ # Convert to unix form, then to dos form, then back to unix form
+ # but this time dos style (no spaces!) so that the unix form looks
+ # like /cygdrive/c/PROGRA~1:/cygdr...
+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ *)
+ sys_lib_search_path_spec="$LIB"
+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+ # It is most probably a Windows format PATH.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # FIXME: find the short name or the path components, as spaces are
+ # common. (e.g. "Program Files" -> "PROGRA~1")
+ ;;
+ esac
+
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+ dynamic_linker='Win32 link.exe'
+ ;;
+
+ *)
+ # Assume MSVC wrapper
+ library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ dynamic_linker='Win32 ld.exe'
+ ;;
+ esac
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+ soname_spec='${libname}${release}${major}$shared_ext'
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+ ;;
+
+dgux*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+freebsd* | dragonfly*)
+ # DragonFly does not have aout. When/if they implement a new
+ # versioning mechanism, adjust this.
+ if test -x /usr/bin/objformat; then
+ objformat=`/usr/bin/objformat`
+ else
+ case $host_os in
+ freebsd[23].*) objformat=aout ;;
+ *) objformat=elf ;;
+ esac
+ fi
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2.*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ freebsd3.[01]* | freebsdelf3.[01]*)
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ *) # from 4.6 on, and DragonFly
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+haiku*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ dynamic_linker="$host_os runtime_loader"
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ case $host_cpu in
+ ia64*)
+ shrext_cmds='.so'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.so"
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ if test "X$HPUX_IA64_MODE" = X32; then
+ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+ else
+ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+ fi
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ hppa*64*)
+ shrext_cmds='.sl'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ *)
+ shrext_cmds='.sl'
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+ esac
+ # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+ postinstall_cmds='chmod 555 $lib'
+ # or fails outright, so override atomically:
+ install_override_mode=555
+ ;;
+
+interix[3-9]*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $host_os in
+ nonstopux*) version_type=nonstopux ;;
+ *)
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ version_type=linux # correct to gnu/linux during the next big refactor
+ else
+ version_type=irix
+ fi ;;
+ esac
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+ case $host_os in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+ libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+ libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+ libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ hardcode_into_libs=yes
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+ dynamic_linker=no
+ ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+
+ # Some binutils ld are patched to set DT_RUNPATH
+ if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_shlibpath_overrides_runpath=no
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \
+ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+ lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+
+fi
+
+ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # Append ld.so.conf contents to the search path
+ if test -f /etc/ld.so.conf; then
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+ fi
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsdelf*-gnu)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='NetBSD ld.elf_so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+*nto* | *qnx*)
+ version_type=qnx
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='ldqnx.so'
+ ;;
+
+openbsd*)
+ version_type=sunos
+ sys_lib_dlsearch_path_spec="/usr/lib"
+ need_lib_prefix=no
+ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+ case $host_os in
+ openbsd3.3 | openbsd3.3.*) need_version=yes ;;
+ *) need_version=no ;;
+ esac
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case $host_os in
+ openbsd2.[89] | openbsd2.[89].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ ;;
+
+os2*)
+ libname_spec='$name'
+ shrext_cmds=".dll"
+ need_lib_prefix=no
+ library_names_spec='$libname${shared_ext} $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+rdos*)
+ dynamic_linker=no
+ ;;
+
+solaris*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.3*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ need_lib_prefix=no
+ runpath_var=LD_RUN_PATH
+ ;;
+ siemens)
+ need_lib_prefix=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+ soname_spec='$libname${shared_ext}.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ version_type=freebsd-elf
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ if test "$with_gnu_ld" = yes; then
+ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+ else
+ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+ case $host_os in
+ sco3.2v5*)
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+ ;;
+ esac
+ fi
+ sys_lib_dlsearch_path_spec='/usr/lib'
+ ;;
+
+tpf*)
+ # TPF is a cross-target only. Preferred cross-host = GNU/Linux.
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+uts4*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+$as_echo "$dynamic_linker" >&6; }
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action_CXX=
+if test -n "$hardcode_libdir_flag_spec_CXX" ||
+ test -n "$runpath_var_CXX" ||
+ test "X$hardcode_automatic_CXX" = "Xyes" ; then
+
+ # We can hardcode non-existent directories.
+ if test "$hardcode_direct_CXX" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no &&
+ test "$hardcode_minus_L_CXX" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action_CXX=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action_CXX=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action_CXX=unsupported
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5
+$as_echo "$hardcode_action_CXX" >&6; }
+
+if test "$hardcode_action_CXX" = relink ||
+ test "$inherit_rpath_CXX" = yes; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+
+
+
+
+
+
+ fi # test -n "$compiler"
+
+ CC=$lt_save_CC
+ CFLAGS=$lt_save_CFLAGS
+ LDCXX=$LD
+ LD=$lt_save_LD
+ GCC=$lt_save_GCC
+ with_gnu_ld=$lt_save_with_gnu_ld
+ lt_cv_path_LDCXX=$lt_cv_path_LD
+ lt_cv_path_LD=$lt_save_path_LD
+ lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+ lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test "$_lt_caught_CXX_error" != yes
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
+$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
+if ${ac_cv_c_bigendian+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_bigendian=unknown
+ # See if we're dealing with a universal compiler.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifndef __APPLE_CC__
+ not a universal capable compiler
+ #endif
+ typedef int dummy;
+
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+ # Check for potential -arch flags. It is not universal unless
+ # there are at least two -arch flags with different values.
+ ac_arch=
+ ac_prev=
+ for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do
+ if test -n "$ac_prev"; then
+ case $ac_word in
+ i?86 | x86_64 | ppc | ppc64)
+ if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then
+ ac_arch=$ac_word
+ else
+ ac_cv_c_bigendian=universal
+ break
+ fi
+ ;;
+ esac
+ ac_prev=
+ elif test "x$ac_word" = "x-arch"; then
+ ac_prev=arch
+ fi
+ done
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if sys/param.h defines the BYTE_ORDER macro.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \
+ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \
+ && LITTLE_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ # It does; now see whether it defined to BIG_ENDIAN or not.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if BYTE_ORDER != BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_c_bigendian=yes
+else
+ ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if <limits.h> defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris).
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ # It does; now see whether it defined to _BIG_ENDIAN or not.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#ifndef _BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_c_bigendian=yes
+else
+ ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # Compile a test program.
+ if test "$cross_compiling" = yes; then :
+ # Try to guess by grepping values from an object file.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+short int ascii_mm[] =
+ { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
+ short int ascii_ii[] =
+ { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
+ int use_ascii (int i) {
+ return ascii_mm[i] + ascii_ii[i];
+ }
+ short int ebcdic_ii[] =
+ { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
+ short int ebcdic_mm[] =
+ { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
+ int use_ebcdic (int i) {
+ return ebcdic_mm[i] + ebcdic_ii[i];
+ }
+ extern int foo;
+
+int
+main ()
+{
+return use_ascii (foo) == use_ebcdic (foo);
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then
+ ac_cv_c_bigendian=yes
+ fi
+ if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then
+ if test "$ac_cv_c_bigendian" = unknown; then
+ ac_cv_c_bigendian=no
+ else
+ # finding both strings is unlikely to happen, but who knows?
+ ac_cv_c_bigendian=unknown
+ fi
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+
+ /* Are we little or big endian? From Harbison&Steele. */
+ union
+ {
+ long int l;
+ char c[sizeof (long int)];
+ } u;
+ u.l = 1;
+ return u.c[sizeof (long int) - 1] == 1;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_run "$LINENO"; then :
+ ac_cv_c_bigendian=no
+else
+ ac_cv_c_bigendian=yes
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5
+$as_echo "$ac_cv_c_bigendian" >&6; }
+ case $ac_cv_c_bigendian in #(
+ yes)
+ $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h
+;; #(
+ no)
+ ;; #(
+ universal)
+
+$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
+
+ ;; #(
+ *)
+ as_fn_error $? "unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
+ esac
+
+ac_fn_cxx_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
+if test "x$ac_cv_type_size_t" = xyes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define size_t unsigned int
+_ACEOF
+
+fi
+
+ac_fn_cxx_check_type "$LINENO" "ssize_t" "ac_cv_type_ssize_t" "$ac_includes_default"
+if test "x$ac_cv_type_ssize_t" = xyes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define ssize_t int
+_ACEOF
+
+fi
+
+
+for ac_header in stdint.h stddef.h sys/mman.h sys/resource.h windows.h byteswap.h sys/byteswap.h sys/endian.h sys/time.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_cxx_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+# Don't use AC_FUNC_MMAP, as it checks for mappings of already-mapped memory,
+# which we don't need (and does not exist on Windows).
+ac_fn_cxx_check_func "$LINENO" "mmap" "ac_cv_func_mmap"
+if test "x$ac_cv_func_mmap" = xyes; then :
+
+fi
+
+
+
+# Check whether --enable-gtest was given.
+if test "${enable_gtest+set}" = set; then :
+ enableval=$enable_gtest;
+else
+ enable_gtest=
+fi
+
+
+
+
+
+
+
+HAVE_GTEST="no"
+if test "x${enable_gtest}" != "xno"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for 'gtest-config'" >&5
+$as_echo_n "checking for 'gtest-config'... " >&6; }
+ if test "x${enable_gtest}" = "xyes"; then :
+ if test -x "${enable_gtest}/scripts/gtest-config"; then :
+ GTEST_CONFIG="${enable_gtest}/scripts/gtest-config"
+else
+ GTEST_CONFIG="${enable_gtest}/bin/gtest-config"
+fi
+ if test -x "${GTEST_CONFIG}"; then :
+
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ as_fn_error $? "Unable to locate either a built or installed Google Test.
+The specific location '${enable_gtest}' was provided for a built or installed
+Google Test, but no 'gtest-config' script could be found at this location." "$LINENO" 5
+
+fi
+else
+ # Extract the first word of "gtest-config", so it can be a program name with args.
+set dummy gtest-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_GTEST_CONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $GTEST_CONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_GTEST_CONFIG="$GTEST_CONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_GTEST_CONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+GTEST_CONFIG=$ac_cv_path_GTEST_CONFIG
+if test -n "$GTEST_CONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GTEST_CONFIG" >&5
+$as_echo "$GTEST_CONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+ if test -x "${GTEST_CONFIG}"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${GTEST_CONFIG}" >&5
+$as_echo "${GTEST_CONFIG}" >&6; }
+ _gtest_min_version="--min-version=0"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Google Test" >&5
+$as_echo_n "checking for Google Test... " >&6; }
+ if ${GTEST_CONFIG} ${_gtest_min_version}; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ HAVE_GTEST='yes'
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+ if test "x${HAVE_GTEST}" = "xyes"; then :
+ GTEST_CPPFLAGS=`${GTEST_CONFIG} --cppflags`
+ GTEST_CXXFLAGS=`${GTEST_CONFIG} --cxxflags`
+ GTEST_LDFLAGS=`${GTEST_CONFIG} --ldflags`
+ GTEST_LIBS=`${GTEST_CONFIG} --libs`
+ GTEST_VERSION=`${GTEST_CONFIG} --version`
+
+$as_echo "#define HAVE_GTEST 1" >>confdefs.h
+
+else
+ if test "x${enable_gtest}" = "xyes"; then :
+ as_fn_error $? "Google Test was enabled, but no viable version could be found." "$LINENO" 5
+
+fi
+fi
+fi
+
+ if test "x$HAVE_GTEST" = "xyes"; then
+ HAVE_GTEST_TRUE=
+ HAVE_GTEST_FALSE='#'
+else
+ HAVE_GTEST_TRUE='#'
+ HAVE_GTEST_FALSE=
+fi
+
+if test "x$HAVE_GTEST" = "xyes"; then :
+ true
+else
+ true # Ignore; we can live without it.
+fi
+
+
+
+# Check whether --with-gflags was given.
+if test "${with_gflags+set}" = set; then :
+ withval=$with_gflags;
+else
+ with_gflags=check
+fi
+
+
+if test "x$with_gflags" != "xno"; then
+
+
+
+
+
+
+
+if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args.
+set dummy ${ac_tool_prefix}pkg-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_PKG_CONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $PKG_CONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+PKG_CONFIG=$ac_cv_path_PKG_CONFIG
+if test -n "$PKG_CONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5
+$as_echo "$PKG_CONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_path_PKG_CONFIG"; then
+ ac_pt_PKG_CONFIG=$PKG_CONFIG
+ # Extract the first word of "pkg-config", so it can be a program name with args.
+set dummy pkg-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $ac_pt_PKG_CONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG
+if test -n "$ac_pt_PKG_CONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5
+$as_echo "$ac_pt_PKG_CONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_pt_PKG_CONFIG" = x; then
+ PKG_CONFIG=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ PKG_CONFIG=$ac_pt_PKG_CONFIG
+ fi
+else
+ PKG_CONFIG="$ac_cv_path_PKG_CONFIG"
+fi
+
+fi
+if test -n "$PKG_CONFIG"; then
+ _pkg_min_version=0.9.0
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5
+$as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; }
+ if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ PKG_CONFIG=""
+ fi
+fi
+
+pkg_failed=no
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for gflags" >&5
+$as_echo_n "checking for gflags... " >&6; }
+
+if test -n "$gflags_CFLAGS"; then
+ pkg_cv_gflags_CFLAGS="$gflags_CFLAGS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libgflags\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libgflags") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_gflags_CFLAGS=`$PKG_CONFIG --cflags "libgflags" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+if test -n "$gflags_LIBS"; then
+ pkg_cv_gflags_LIBS="$gflags_LIBS"
+ elif test -n "$PKG_CONFIG"; then
+ if test -n "$PKG_CONFIG" && \
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libgflags\""; } >&5
+ ($PKG_CONFIG --exists --print-errors "libgflags") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ pkg_cv_gflags_LIBS=`$PKG_CONFIG --libs "libgflags" 2>/dev/null`
+ test "x$?" != "x0" && pkg_failed=yes
+else
+ pkg_failed=yes
+fi
+ else
+ pkg_failed=untried
+fi
+
+
+
+if test $pkg_failed = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
+ _pkg_short_errors_supported=yes
+else
+ _pkg_short_errors_supported=no
+fi
+ if test $_pkg_short_errors_supported = yes; then
+ gflags_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libgflags" 2>&1`
+ else
+ gflags_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libgflags" 2>&1`
+ fi
+ # Put the nasty error message in config.log where it belongs
+ echo "$gflags_PKG_ERRORS" >&5
+
+ if test "x$with_gflags" != "xcheck"; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "--with-gflags was given, but test for gflags failed
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+elif test $pkg_failed = untried; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ if test "x$with_gflags" != "xcheck"; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "--with-gflags was given, but test for gflags failed
+See \`config.log' for more details" "$LINENO" 5; }
+ fi
+else
+ gflags_CFLAGS=$pkg_cv_gflags_CFLAGS
+ gflags_LIBS=$pkg_cv_gflags_LIBS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+$as_echo "#define HAVE_GFLAGS 1" >>confdefs.h
+
+fi
+fi
+
+# See if we have __builtin_expect.
+# TODO: Use AC_CACHE.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the compiler supports __builtin_expect" >&5
+$as_echo_n "checking if the compiler supports __builtin_expect... " >&6; }
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ return __builtin_expect(1, 1) ? 1 : 0
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+ snappy_have_builtin_expect=yes
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+
+ snappy_have_builtin_expect=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+if test x$snappy_have_builtin_expect = xyes ; then
+
+$as_echo "#define HAVE_BUILTIN_EXPECT 1" >>confdefs.h
+
+fi
+
+# See if we have working count-trailing-zeros intrinsics.
+# TODO: Use AC_CACHE.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the compiler supports __builtin_ctzll" >&5
+$as_echo_n "checking if the compiler supports __builtin_ctzll... " >&6; }
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ return (__builtin_ctzll(0x100000000LL) == 32) ? 1 : 0
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+ snappy_have_builtin_ctz=yes
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+
+ snappy_have_builtin_ctz=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+if test x$snappy_have_builtin_ctz = xyes ; then
+
+$as_echo "#define HAVE_BUILTIN_CTZ 1" >>confdefs.h
+
+fi
+
+# Other compression libraries; the unit test can use these for comparison
+# if they are available. If they are not found, just ignore.
+UNITTEST_LIBS=""
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for zlibVersion in -lz" >&5
+$as_echo_n "checking for zlibVersion in -lz... " >&6; }
+if ${ac_cv_lib_z_zlibVersion+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lz $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char zlibVersion ();
+int
+main ()
+{
+return zlibVersion ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_lib_z_zlibVersion=yes
+else
+ ac_cv_lib_z_zlibVersion=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_zlibVersion" >&5
+$as_echo "$ac_cv_lib_z_zlibVersion" >&6; }
+if test "x$ac_cv_lib_z_zlibVersion" = xyes; then :
+
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBZ 1
+_ACEOF
+
+ UNITTEST_LIBS="-lz $UNITTEST_LIBS"
+
+else
+ true
+
+fi
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lzo1x_1_15_compress in -llzo2" >&5
+$as_echo_n "checking for lzo1x_1_15_compress in -llzo2... " >&6; }
+if ${ac_cv_lib_lzo2_lzo1x_1_15_compress+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-llzo2 $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char lzo1x_1_15_compress ();
+int
+main ()
+{
+return lzo1x_1_15_compress ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_lib_lzo2_lzo1x_1_15_compress=yes
+else
+ ac_cv_lib_lzo2_lzo1x_1_15_compress=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lzo2_lzo1x_1_15_compress" >&5
+$as_echo "$ac_cv_lib_lzo2_lzo1x_1_15_compress" >&6; }
+if test "x$ac_cv_lib_lzo2_lzo1x_1_15_compress" = xyes; then :
+
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBLZO2 1
+_ACEOF
+
+ UNITTEST_LIBS="-llzo2 $UNITTEST_LIBS"
+
+else
+ true
+
+fi
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lzf_compress in -llzf" >&5
+$as_echo_n "checking for lzf_compress in -llzf... " >&6; }
+if ${ac_cv_lib_lzf_lzf_compress+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-llzf $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char lzf_compress ();
+int
+main ()
+{
+return lzf_compress ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_lib_lzf_lzf_compress=yes
+else
+ ac_cv_lib_lzf_lzf_compress=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lzf_lzf_compress" >&5
+$as_echo "$ac_cv_lib_lzf_lzf_compress" >&6; }
+if test "x$ac_cv_lib_lzf_lzf_compress" = xyes; then :
+
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBLZF 1
+_ACEOF
+
+ UNITTEST_LIBS="-llzf $UNITTEST_LIBS"
+
+else
+ true
+
+fi
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fastlz_compress in -lfastlz" >&5
+$as_echo_n "checking for fastlz_compress in -lfastlz... " >&6; }
+if ${ac_cv_lib_fastlz_fastlz_compress+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lfastlz $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char fastlz_compress ();
+int
+main ()
+{
+return fastlz_compress ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_lib_fastlz_fastlz_compress=yes
+else
+ ac_cv_lib_fastlz_fastlz_compress=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_fastlz_fastlz_compress" >&5
+$as_echo "$ac_cv_lib_fastlz_fastlz_compress" >&6; }
+if test "x$ac_cv_lib_fastlz_fastlz_compress" = xyes; then :
+
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBFASTLZ 1
+_ACEOF
+
+ UNITTEST_LIBS="-lfastlz $UNITTEST_LIBS"
+
+else
+ true
+
+fi
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for qlz_compress in -lquicklz" >&5
+$as_echo_n "checking for qlz_compress in -lquicklz... " >&6; }
+if ${ac_cv_lib_quicklz_qlz_compress+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lquicklz $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char qlz_compress ();
+int
+main ()
+{
+return qlz_compress ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+ ac_cv_lib_quicklz_qlz_compress=yes
+else
+ ac_cv_lib_quicklz_qlz_compress=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_quicklz_qlz_compress" >&5
+$as_echo "$ac_cv_lib_quicklz_qlz_compress" >&6; }
+if test "x$ac_cv_lib_quicklz_qlz_compress" = xyes; then :
+
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBQUICKLZ 1
+_ACEOF
+
+ UNITTEST_LIBS="-lquicklz $UNITTEST_LIBS"
+
+else
+ true
+
+fi
+
+
+
+
+# These are used by snappy-stubs-public.h.in.
+if test "$ac_cv_header_stdint_h" = "yes"; then
+ ac_cv_have_stdint_h=1
+
+else
+ ac_cv_have_stdint_h=0
+
+fi
+if test "$ac_cv_header_stddef_h" = "yes"; then
+ ac_cv_have_stddef_h=1
+
+else
+ ac_cv_have_stddef_h=0
+
+fi
+if test "$ac_cv_header_sys_uio_h" = "yes"; then
+ ac_cv_have_sys_uio_h=1
+
+else
+ ac_cv_have_sys_uio_h=0
+
+fi
+
+# Export the version to snappy-stubs-public.h.
+SNAPPY_MAJOR="1"
+SNAPPY_MINOR="1"
+SNAPPY_PATCHLEVEL="2"
+
+
+
+
+SNAPPY_LTVERSION=3:1:2
+
+
+ac_config_headers="$ac_config_headers config.h"
+
+ac_config_files="$ac_config_files Makefile snappy-stubs-public.h"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) { eval $ac_var=; unset $ac_var;} ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes: double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \.
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ if test "x$cache_file" != "x/dev/null"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+ if test -n "$EXEEXT"; then
+ am__EXEEXT_TRUE=
+ am__EXEEXT_FALSE='#'
+else
+ am__EXEEXT_TRUE='#'
+ am__EXEEXT_FALSE=
+fi
+
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+ as_fn_error $? "conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+if test -z "${HAVE_GTEST_TRUE}" && test -z "${HAVE_GTEST_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_GTEST\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in #(
+ *posix*) :
+ set -o posix ;; #(
+ *) :
+ ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='print -r --'
+ as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in #(
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there. '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+ fi
+ $as_echo "$as_me: error: $2" >&2
+ as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+ return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+ set +e
+ as_fn_set_status $1
+ exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+ { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+ eval 'as_fn_append ()
+ {
+ eval $1+=\$2
+ }'
+else
+ as_fn_append ()
+ {
+ eval $1=\$$1\$2
+ }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+ eval 'as_fn_arith ()
+ {
+ as_val=$(( $* ))
+ }'
+else
+ as_fn_arith ()
+ {
+ as_val=`expr "$@" || test $? -eq 1`
+ }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+ case `echo 'xy\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ xy) ECHO_C='\c';;
+ *) echo `echo ksh88 bug on AIX 6.1` > /dev/null
+ ECHO_T=' ';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || eval $as_mkdir_p || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p='mkdir -p "$as_dir"'
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in #(
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by snappy $as_me 1.1.2, which was
+generated by GNU Autoconf 2.68. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration. Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ --config print configuration, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to the package provider."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+snappy config.status 1.1.2
+configured by $0, generated by GNU Autoconf 2.68,
+ with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2010 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=?*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --config | --confi | --conf | --con | --co | --c )
+ $as_echo "$ac_cs_config"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
+ esac
+ as_fn_append CONFIG_FILES " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+ *) as_fn_append ac_config_targets " $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
+
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`'
+macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`'
+enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`'
+enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'
+pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`'
+enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`'
+SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`'
+ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`'
+PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`'
+host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`'
+host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`'
+host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`'
+build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`'
+build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`'
+build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`'
+SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`'
+Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`'
+GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`'
+EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`'
+FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`'
+LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`'
+NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`'
+LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`'
+max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`'
+ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`'
+exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`'
+lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`'
+lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`'
+lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`'
+reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`'
+reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`'
+OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`'
+deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`'
+file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`'
+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`'
+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`'
+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`'
+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`'
+AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`'
+AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`'
+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`'
+STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`'
+RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`'
+old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`'
+lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`'
+CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`'
+CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`'
+compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`'
+GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`'
+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`'
+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`'
+objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`'
+MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`'
+need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`'
+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`'
+DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`'
+NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`'
+LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`'
+OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`'
+OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`'
+libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`'
+shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`'
+extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`'
+hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`'
+inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`'
+always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`'
+include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`'
+prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`'
+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`'
+file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`'
+variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`'
+need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`'
+need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`'
+version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`'
+runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`'
+libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`'
+library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`'
+soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`'
+install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`'
+postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`'
+finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`'
+hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`'
+sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`'
+sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`'
+enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`'
+old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`'
+striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`'
+predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`'
+postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`'
+predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`'
+postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`'
+LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`'
+reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`'
+reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`'
+GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`'
+inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`'
+always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`'
+predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`'
+postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`'
+predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`'
+postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`'
+
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+
+# Quote evaled strings.
+for var in SHELL \
+ECHO \
+PATH_SEPARATOR \
+SED \
+GREP \
+EGREP \
+FGREP \
+LD \
+NM \
+LN_S \
+lt_SP2NL \
+lt_NL2SP \
+reload_flag \
+OBJDUMP \
+deplibs_check_method \
+file_magic_cmd \
+file_magic_glob \
+want_nocaseglob \
+DLLTOOL \
+sharedlib_from_linklib_cmd \
+AR \
+AR_FLAGS \
+archiver_list_spec \
+STRIP \
+RANLIB \
+CC \
+CFLAGS \
+compiler \
+lt_cv_sys_global_symbol_pipe \
+lt_cv_sys_global_symbol_to_cdecl \
+lt_cv_sys_global_symbol_to_c_name_address \
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
+nm_file_list_spec \
+lt_prog_compiler_no_builtin_flag \
+lt_prog_compiler_pic \
+lt_prog_compiler_wl \
+lt_prog_compiler_static \
+lt_cv_prog_compiler_c_o \
+need_locks \
+MANIFEST_TOOL \
+DSYMUTIL \
+NMEDIT \
+LIPO \
+OTOOL \
+OTOOL64 \
+shrext_cmds \
+export_dynamic_flag_spec \
+whole_archive_flag_spec \
+compiler_needs_object \
+with_gnu_ld \
+allow_undefined_flag \
+no_undefined_flag \
+hardcode_libdir_flag_spec \
+hardcode_libdir_separator \
+exclude_expsyms \
+include_expsyms \
+file_list_spec \
+variables_saved_for_relink \
+libname_spec \
+library_names_spec \
+soname_spec \
+install_override_mode \
+finish_eval \
+old_striplib \
+striplib \
+compiler_lib_search_dirs \
+predep_objects \
+postdep_objects \
+predeps \
+postdeps \
+compiler_lib_search_path \
+LD_CXX \
+reload_flag_CXX \
+compiler_CXX \
+lt_prog_compiler_no_builtin_flag_CXX \
+lt_prog_compiler_pic_CXX \
+lt_prog_compiler_wl_CXX \
+lt_prog_compiler_static_CXX \
+lt_cv_prog_compiler_c_o_CXX \
+export_dynamic_flag_spec_CXX \
+whole_archive_flag_spec_CXX \
+compiler_needs_object_CXX \
+with_gnu_ld_CXX \
+allow_undefined_flag_CXX \
+no_undefined_flag_CXX \
+hardcode_libdir_flag_spec_CXX \
+hardcode_libdir_separator_CXX \
+exclude_expsyms_CXX \
+include_expsyms_CXX \
+file_list_spec_CXX \
+compiler_lib_search_dirs_CXX \
+predep_objects_CXX \
+postdep_objects_CXX \
+predeps_CXX \
+postdeps_CXX \
+compiler_lib_search_path_CXX; do
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+ *[\\\\\\\`\\"\\\$]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+# Double-quote double-evaled strings.
+for var in reload_cmds \
+old_postinstall_cmds \
+old_postuninstall_cmds \
+old_archive_cmds \
+extract_expsyms_cmds \
+old_archive_from_new_cmds \
+old_archive_from_expsyms_cmds \
+archive_cmds \
+archive_expsym_cmds \
+module_cmds \
+module_expsym_cmds \
+export_symbols_cmds \
+prelink_cmds \
+postlink_cmds \
+postinstall_cmds \
+postuninstall_cmds \
+finish_cmds \
+sys_lib_search_path_spec \
+sys_lib_dlsearch_path_spec \
+reload_cmds_CXX \
+old_archive_cmds_CXX \
+old_archive_from_new_cmds_CXX \
+old_archive_from_expsyms_cmds_CXX \
+archive_cmds_CXX \
+archive_expsym_cmds_CXX \
+module_cmds_CXX \
+module_expsym_cmds_CXX \
+export_symbols_cmds_CXX \
+prelink_cmds_CXX \
+postlink_cmds_CXX; do
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+ *[\\\\\\\`\\"\\\$]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+ac_aux_dir='$ac_aux_dir'
+xsi_shell='$xsi_shell'
+lt_shell_append='$lt_shell_append'
+
+# See if we are running on zsh, and set the options which allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+
+
+ PACKAGE='$PACKAGE'
+ VERSION='$VERSION'
+ TIMESTAMP='$TIMESTAMP'
+ RM='$RM'
+ ofile='$ofile'
+
+
+
+
+
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+ "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "snappy-stubs-public.h") CONFIG_FILES="$CONFIG_FILES snappy-stubs-public.h" ;;
+
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp= ac_tmp=
+ trap 'exit_status=$?
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+ trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+ eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
+ break
+ elif $ac_last_try; then
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$ac_tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ as_fn_append ac_file_inputs " '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir="$ac_dir"; as_fn_mkdir_p
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined" >&2;}
+
+ rm -f "$ac_tmp/stdin"
+ case $ac_file in
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+ esac \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$ac_tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+ :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+ # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named `Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$mf" : 'X\(//\)[^/]' \| \
+ X"$mf" : 'X\(//\)$' \| \
+ X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running `make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # When using ansi2knr, U may be empty or an underscore; expand it
+ U=`sed -n 's/^U = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$file" : 'X\(//\)[^/]' \| \
+ X"$file" : 'X\(//\)$' \| \
+ X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ as_dir=$dirpart/$fdir; as_fn_mkdir_p
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+ ;;
+ "libtool":C)
+
+ # See if we are running on zsh, and set the options which allow our
+ # commands through without removal of \ escapes.
+ if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+ fi
+
+ cfgfile="${ofile}T"
+ trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+ $RM "$cfgfile"
+
+ cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+
+# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+# 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# Written by Gordon Matzigkeit, 1996
+#
+# This file is part of GNU Libtool.
+#
+# GNU Libtool is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING. If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
+# obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+
+
+# The names of the tagged configurations supported by this script.
+available_tags="CXX "
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Which release of libtool.m4 was used?
+macro_version=$macro_version
+macro_revision=$macro_revision
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# What type of objects to build.
+pic_mode=$pic_mode
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# An echo program that protects backslashes.
+ECHO=$lt_ECHO
+
+# The PATH separator for the build system.
+PATH_SEPARATOR=$lt_PATH_SEPARATOR
+
+# The host system.
+host_alias=$host_alias
+host=$host
+host_os=$host_os
+
+# The build system.
+build_alias=$build_alias
+build=$build
+build_os=$build_os
+
+# A sed program that does not truncate output.
+SED=$lt_SED
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="\$SED -e 1s/^X//"
+
+# A grep program that handles long lines.
+GREP=$lt_GREP
+
+# An ERE matcher.
+EGREP=$lt_EGREP
+
+# A literal string matcher.
+FGREP=$lt_FGREP
+
+# A BSD- or MS-compatible name lister.
+NM=$lt_NM
+
+# Whether we need soft or hard links.
+LN_S=$lt_LN_S
+
+# What is the maximum length of a command?
+max_cmd_len=$max_cmd_len
+
+# Object file suffix (normally "o").
+objext=$ac_objext
+
+# Executable file suffix (normally "").
+exeext=$exeext
+
+# whether the shell understands "unset".
+lt_unset=$lt_unset
+
+# turn spaces into newlines.
+SP2NL=$lt_lt_SP2NL
+
+# turn newlines into spaces.
+NL2SP=$lt_lt_NL2SP
+
+# convert \$build file names to \$host format.
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+
+# convert \$build files to toolchain format.
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+
+# An object symbol dumper.
+OBJDUMP=$lt_OBJDUMP
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method = "file_magic".
+file_magic_cmd=$lt_file_magic_cmd
+
+# How to find potential files when deplibs_check_method = "file_magic".
+file_magic_glob=$lt_file_magic_glob
+
+# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
+want_nocaseglob=$lt_want_nocaseglob
+
+# DLL creation program.
+DLLTOOL=$lt_DLLTOOL
+
+# Command to associate shared and link libraries.
+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd
+
+# The archiver.
+AR=$lt_AR
+
+# Flags to create an archive.
+AR_FLAGS=$lt_AR_FLAGS
+
+# How to feed a file listing to the archiver.
+archiver_list_spec=$lt_archiver_list_spec
+
+# A symbol stripping program.
+STRIP=$lt_STRIP
+
+# Commands used to install an old-style archive.
+RANLIB=$lt_RANLIB
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Whether to use a lock for old archive extraction.
+lock_old_archive_extraction=$lock_old_archive_extraction
+
+# A C compiler.
+LTCC=$lt_CC
+
+# LTCC compiler flags.
+LTCFLAGS=$lt_CFLAGS
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration.
+global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair.
+global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
+
+# Transform the output of nm in a C name address pair when lib prefix is needed.
+global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
+
+# Specify filename containing input files for \$NM.
+nm_file_list_spec=$lt_nm_file_list_spec
+
+# The root where to search for dependent libraries,and in which our libraries should be installed.
+lt_sysroot=$lt_sysroot
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# Used to examine libraries when file_magic_cmd begins with "file".
+MAGIC_CMD=$MAGIC_CMD
+
+# Must we lock files when doing compilation?
+need_locks=$lt_need_locks
+
+# Manifest tool.
+MANIFEST_TOOL=$lt_MANIFEST_TOOL
+
+# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
+DSYMUTIL=$lt_DSYMUTIL
+
+# Tool to change global to local symbols on Mac OS X.
+NMEDIT=$lt_NMEDIT
+
+# Tool to manipulate fat objects and archives on Mac OS X.
+LIPO=$lt_LIPO
+
+# ldd/readelf like tool for Mach-O binaries on Mac OS X.
+OTOOL=$lt_OTOOL
+
+# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4.
+OTOOL64=$lt_OTOOL64
+
+# Old archive suffix (normally "a").
+libext=$libext
+
+# Shared library suffix (normally ".so").
+shrext_cmds=$lt_shrext_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at link time.
+variables_saved_for_relink=$lt_variables_saved_for_relink
+
+# Do we need the "lib" prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Library versioning type.
+version_type=$version_type
+
+# Shared library runtime path variable.
+runpath_var=$runpath_var
+
+# Shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Permission mode override for installation of shared libraries.
+install_override_mode=$lt_install_override_mode
+
+# Command to use after installation of a shared archive.
+postinstall_cmds=$lt_postinstall_cmds
+
+# Command to use after uninstallation of a shared archive.
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# As "finish_cmds", except a single script fragment to be evaled but
+# not shown.
+finish_eval=$lt_finish_eval
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Compile-time system search path for libraries.
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries.
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds
+
+# A language specific compiler.
+CC=$lt_compiler
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds
+module_expsym_cmds=$lt_module_expsym_cmds
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects
+postdep_objects=$lt_postdep_objects
+predeps=$lt_predeps
+postdeps=$lt_postdeps
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path
+
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+ case $host_os in
+ aix3*)
+ cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+_LT_EOF
+ ;;
+ esac
+
+
+ltmain="$ac_aux_dir/ltmain.sh"
+
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '$q' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ if test x"$xsi_shell" = xyes; then
+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\
+func_dirname ()\
+{\
+\ case ${1} in\
+\ */*) func_dirname_result="${1%/*}${2}" ;;\
+\ * ) func_dirname_result="${3}" ;;\
+\ esac\
+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_basename ()$/,/^} # func_basename /c\
+func_basename ()\
+{\
+\ func_basename_result="${1##*/}"\
+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\
+func_dirname_and_basename ()\
+{\
+\ case ${1} in\
+\ */*) func_dirname_result="${1%/*}${2}" ;;\
+\ * ) func_dirname_result="${3}" ;;\
+\ esac\
+\ func_basename_result="${1##*/}"\
+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\
+func_stripname ()\
+{\
+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\
+\ # positional parameters, so assign one to ordinary parameter first.\
+\ func_stripname_result=${3}\
+\ func_stripname_result=${func_stripname_result#"${1}"}\
+\ func_stripname_result=${func_stripname_result%"${2}"}\
+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\
+func_split_long_opt ()\
+{\
+\ func_split_long_opt_name=${1%%=*}\
+\ func_split_long_opt_arg=${1#*=}\
+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\
+func_split_short_opt ()\
+{\
+\ func_split_short_opt_arg=${1#??}\
+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\
+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\
+func_lo2o ()\
+{\
+\ case ${1} in\
+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\
+\ *) func_lo2o_result=${1} ;;\
+\ esac\
+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_xform ()$/,/^} # func_xform /c\
+func_xform ()\
+{\
+ func_xform_result=${1%.*}.lo\
+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_arith ()$/,/^} # func_arith /c\
+func_arith ()\
+{\
+ func_arith_result=$(( $* ))\
+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_len ()$/,/^} # func_len /c\
+func_len ()\
+{\
+ func_len_result=${#1}\
+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+fi
+
+if test x"$lt_shell_append" = xyes; then
+ sed -e '/^func_append ()$/,/^} # func_append /c\
+func_append ()\
+{\
+ eval "${1}+=\\${2}"\
+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\
+func_append_quoted ()\
+{\
+\ func_quote_for_eval "${2}"\
+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\
+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ # Save a `func_append' function call where possible by direct use of '+='
+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+else
+ # Save a `func_append' function call even when '+=' is not available
+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+fi
+
+if test x"$_lt_function_replace_fail" = x":"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5
+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;}
+fi
+
+
+ mv -f "$cfgfile" "$ofile" ||
+ (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+ chmod +x "$ofile"
+
+
+ cat <<_LT_EOF >> "$ofile"
+
+# ### BEGIN LIBTOOL TAG CONFIG: CXX
+
+# The linker used to build libraries.
+LD=$lt_LD_CXX
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag_CXX
+reload_cmds=$lt_reload_cmds_CXX
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds_CXX
+
+# A language specific compiler.
+CC=$lt_compiler_CXX
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC_CXX
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic_CXX
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl_CXX
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static_CXX
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc_CXX
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object_CXX
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds_CXX
+archive_expsym_cmds=$lt_archive_expsym_cmds_CXX
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds_CXX
+module_expsym_cmds=$lt_module_expsym_cmds_CXX
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld_CXX
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag_CXX
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag_CXX
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct_CXX
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute_CXX
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L_CXX
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic_CXX
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath_CXX
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs_CXX
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols_CXX
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds_CXX
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms_CXX
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms_CXX
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds_CXX
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds_CXX
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec_CXX
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action_CXX
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects_CXX
+postdep_objects=$lt_postdep_objects_CXX
+predeps=$lt_predeps_CXX
+postdeps=$lt_postdeps_CXX
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path_CXX
+
+# ### END LIBTOOL TAG CONFIG: CXX
+_LT_EOF
+
+ ;;
+
+ esac
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure.ac b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure.ac
new file mode 100644
index 00000000..3164b09e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/configure.ac
@@ -0,0 +1,133 @@
+m4_define([snappy_major], [1])
+m4_define([snappy_minor], [1])
+m4_define([snappy_patchlevel], [2])
+
+# Libtool shared library interface versions (current:revision:age)
+# Update this value for every release! (A:B:C will map to foo.so.(A-C).C.B)
+# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
+m4_define([snappy_ltversion], [3:1:2])
+
+AC_INIT([snappy], [snappy_major.snappy_minor.snappy_patchlevel])
+AC_CONFIG_MACRO_DIR([m4])
+
+# These are flags passed to automake (though they look like gcc flags!)
+AM_INIT_AUTOMAKE([-Wall])
+
+LT_INIT
+AC_SUBST([LIBTOOL_DEPS])
+AC_PROG_CXX
+AC_LANG([C++])
+AC_C_BIGENDIAN
+AC_TYPE_SIZE_T
+AC_TYPE_SSIZE_T
+AC_CHECK_HEADERS([stdint.h stddef.h sys/mman.h sys/resource.h windows.h byteswap.h sys/byteswap.h sys/endian.h sys/time.h])
+
+# Don't use AC_FUNC_MMAP, as it checks for mappings of already-mapped memory,
+# which we don't need (and does not exist on Windows).
+AC_CHECK_FUNC([mmap])
+
+GTEST_LIB_CHECK([], [true], [true # Ignore; we can live without it.])
+
+AC_ARG_WITH([gflags],
+ [AS_HELP_STRING(
+ [--with-gflags],
+ [use Google Flags package to enhance the unit test @<:@default=check@:>@])],
+ [],
+ [with_gflags=check])
+
+if test "x$with_gflags" != "xno"; then
+ PKG_CHECK_MODULES(
+ [gflags],
+ [libgflags],
+ [AC_DEFINE([HAVE_GFLAGS], [1], [Use the gflags package for command-line parsing.])],
+ [if test "x$with_gflags" != "xcheck"; then
+ AC_MSG_FAILURE([--with-gflags was given, but test for gflags failed])
+ fi])
+fi
+
+# See if we have __builtin_expect.
+# TODO: Use AC_CACHE.
+AC_MSG_CHECKING([if the compiler supports __builtin_expect])
+
+AC_TRY_COMPILE(, [
+ return __builtin_expect(1, 1) ? 1 : 0
+], [
+ snappy_have_builtin_expect=yes
+ AC_MSG_RESULT([yes])
+], [
+ snappy_have_builtin_expect=no
+ AC_MSG_RESULT([no])
+])
+if test x$snappy_have_builtin_expect = xyes ; then
+ AC_DEFINE([HAVE_BUILTIN_EXPECT], [1], [Define to 1 if the compiler supports __builtin_expect.])
+fi
+
+# See if we have working count-trailing-zeros intrinsics.
+# TODO: Use AC_CACHE.
+AC_MSG_CHECKING([if the compiler supports __builtin_ctzll])
+
+AC_TRY_COMPILE(, [
+ return (__builtin_ctzll(0x100000000LL) == 32) ? 1 : 0
+], [
+ snappy_have_builtin_ctz=yes
+ AC_MSG_RESULT([yes])
+], [
+ snappy_have_builtin_ctz=no
+ AC_MSG_RESULT([no])
+])
+if test x$snappy_have_builtin_ctz = xyes ; then
+ AC_DEFINE([HAVE_BUILTIN_CTZ], [1], [Define to 1 if the compiler supports __builtin_ctz and friends.])
+fi
+
+# Other compression libraries; the unit test can use these for comparison
+# if they are available. If they are not found, just ignore.
+UNITTEST_LIBS=""
+AC_DEFUN([CHECK_EXT_COMPRESSION_LIB], [
+ AH_CHECK_LIB([$1])
+ AC_CHECK_LIB(
+ [$1],
+ [$2],
+ [
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIB$1))
+ UNITTEST_LIBS="-l$1 $UNITTEST_LIBS"
+ ],
+ [true]
+ )
+])
+CHECK_EXT_COMPRESSION_LIB([z], [zlibVersion])
+CHECK_EXT_COMPRESSION_LIB([lzo2], [lzo1x_1_15_compress])
+CHECK_EXT_COMPRESSION_LIB([lzf], [lzf_compress])
+CHECK_EXT_COMPRESSION_LIB([fastlz], [fastlz_compress])
+CHECK_EXT_COMPRESSION_LIB([quicklz], [qlz_compress])
+AC_SUBST([UNITTEST_LIBS])
+
+# These are used by snappy-stubs-public.h.in.
+if test "$ac_cv_header_stdint_h" = "yes"; then
+ AC_SUBST([ac_cv_have_stdint_h], [1])
+else
+ AC_SUBST([ac_cv_have_stdint_h], [0])
+fi
+if test "$ac_cv_header_stddef_h" = "yes"; then
+ AC_SUBST([ac_cv_have_stddef_h], [1])
+else
+ AC_SUBST([ac_cv_have_stddef_h], [0])
+fi
+if test "$ac_cv_header_sys_uio_h" = "yes"; then
+ AC_SUBST([ac_cv_have_sys_uio_h], [1])
+else
+ AC_SUBST([ac_cv_have_sys_uio_h], [0])
+fi
+
+# Export the version to snappy-stubs-public.h.
+SNAPPY_MAJOR="snappy_major"
+SNAPPY_MINOR="snappy_minor"
+SNAPPY_PATCHLEVEL="snappy_patchlevel"
+
+AC_SUBST([SNAPPY_MAJOR])
+AC_SUBST([SNAPPY_MINOR])
+AC_SUBST([SNAPPY_PATCHLEVEL])
+AC_SUBST([SNAPPY_LTVERSION], snappy_ltversion)
+
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_FILES([Makefile snappy-stubs-public.h])
+AC_OUTPUT
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/depcomp b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/depcomp
new file mode 100644
index 00000000..bd0ac089
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/depcomp
@@ -0,0 +1,688 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2011-12-04.11; # UTC
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010,
+# 2011 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+ depmode Dependency tracking mode.
+ source Source file read by `PROGRAMS ARGS'.
+ object Object file output by `PROGRAMS ARGS'.
+ DEPDIR directory where to store dependencies.
+ depfile Dependency file to output.
+ tmpdepfile Temporary file to use when outputting dependencies.
+ libtool Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "depcomp $scriptversion"
+ exit $?
+ ;;
+esac
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+ echo "depcomp: Variables source, object and depmode must be set" 1>&2
+ exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+ sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Some modes work just like other modes, but use different flags. We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write. Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+ # HP compiler uses -M and no extra arg.
+ gccflag=-M
+ depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+ # This is just like dashmstdout with a different argument.
+ dashmflag=-xM
+ depmode=dashmstdout
+fi
+
+cygpath_u="cygpath -u -f -"
+if test "$depmode" = msvcmsys; then
+ # This is just like msvisualcpp but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u='sed s,\\\\,/,g'
+ depmode=msvisualcpp
+fi
+
+if test "$depmode" = msvc7msys; then
+ # This is just like msvc7 but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u='sed s,\\\\,/,g'
+ depmode=msvc7
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want. Yay! Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff. Hmm.
+## Unfortunately, FreeBSD c89 acceptance of flags depends upon
+## the command line argument order; so add the flags where they
+## appear in depend2.am. Note that the slowdown incurred here
+## affects only configure: in makefiles, %FASTDEP% shortcuts this.
+ for arg
+ do
+ case $arg in
+ -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
+ *) set fnord "$@" "$arg" ;;
+ esac
+ shift # fnord
+ shift # $arg
+ done
+ "$@"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ mv "$tmpdepfile" "$depfile"
+ ;;
+
+gcc)
+## There are various ways to get dependency output from gcc. Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+## up in a subdir. Having to rename by hand is ugly.
+## (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+## -MM, not -M (despite what the docs say).
+## - Using -M directly means running the compiler twice (even worse
+## than renaming).
+ if test -z "$gccflag"; then
+ gccflag=-MD,
+ fi
+ "$@" -Wp,"$gccflag$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
+## The second -e expression handles DOS-style file names with drive letters.
+ sed -e 's/^[^:]*: / /' \
+ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the `deleted header file' problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header). We avoid this by adding
+## dummy dependencies for each header file. Too bad gcc doesn't do
+## this for us directly.
+ tr ' ' '
+' < "$tmpdepfile" |
+## Some versions of gcc put a space before the `:'. On the theory
+## that the space means something, we add a space to the output as
+## well. hp depmode also adds that space, but also prefixes the VPATH
+## to the object. Take care to not repeat it in the output.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
+ | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+sgi)
+ if test "$libtool" = yes; then
+ "$@" "-Wp,-MDupdate,$tmpdepfile"
+ else
+ "$@" -MDupdate "$tmpdepfile"
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+
+ if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
+ echo "$object : \\" > "$depfile"
+
+ # Clip off the initial element (the dependent). Don't try to be
+ # clever and replace this with sed code, as IRIX sed won't handle
+ # lines with more than a fixed number of characters (4096 in
+ # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
+ # the IRIX cc adds comments like `#:fec' to the end of the
+ # dependency line.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
+ tr '
+' ' ' >> "$depfile"
+ echo >> "$depfile"
+
+ # The second pass generates a dummy entry for each header file.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+ >> "$depfile"
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+aix)
+ # The C for AIX Compiler uses -M and outputs the dependencies
+ # in a .u file. In older versions, this file always lives in the
+ # current directory. Also, the AIX compiler puts `$object:' at the
+ # start of each line; $object doesn't have directory information.
+ # Version 6 uses the directory in both cases.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$base.u
+ tmpdepfile3=$dir.libs/$base.u
+ "$@" -Wc,-M
+ else
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$dir$base.u
+ tmpdepfile3=$dir$base.u
+ "$@" -M
+ fi
+ stat=$?
+
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ # Each line is of the form `foo.o: dependent.h'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+icc)
+ # Intel's C compiler understands `-MD -MF file'. However on
+ # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
+ # ICC 7.0 will fill foo.d with something like
+ # foo.o: sub/foo.c
+ # foo.o: sub/foo.h
+ # which is wrong. We want:
+ # sub/foo.o: sub/foo.c
+ # sub/foo.o: sub/foo.h
+ # sub/foo.c:
+ # sub/foo.h:
+ # ICC 7.1 will output
+ # foo.o: sub/foo.c sub/foo.h
+ # and will wrap long lines using \ :
+ # foo.o: sub/foo.c ... \
+ # sub/foo.h ... \
+ # ...
+
+ "$@" -MD -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ # Each line is of the form `foo.o: dependent.h',
+ # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
+ sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp2)
+ # The "hp" stanza above does not work with aCC (C++) and HP's ia64
+ # compilers, which have integrated preprocessors. The correct option
+ # to use with these is +Maked; it writes dependencies to a file named
+ # 'foo.d', which lands next to the object file, wherever that
+ # happens to be.
+ # Much of this is similar to the tru64 case; see comments there.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir.libs/$base.d
+ "$@" -Wc,+Maked
+ else
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir$base.d
+ "$@" +Maked
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
+ # Add `dependent.h:' lines.
+ sed -ne '2,${
+ s/^ *//
+ s/ \\*$//
+ s/$/:/
+ p
+ }' "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile" "$tmpdepfile2"
+ ;;
+
+tru64)
+ # The Tru64 compiler uses -MD to generate dependencies as a side
+ # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
+ # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+ # dependencies in `foo.d' instead, so we check for that too.
+ # Subdirectories are respected.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+
+ if test "$libtool" = yes; then
+ # With Tru64 cc, shared objects can also be used to make a
+ # static library. This mechanism is used in libtool 1.4 series to
+ # handle both shared and static libraries in a single compilation.
+ # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
+ #
+ # With libtool 1.5 this exception was removed, and libtool now
+ # generates 2 separate objects for the 2 libraries. These two
+ # compilations output dependencies in $dir.libs/$base.o.d and
+ # in $dir$base.o.d. We have to check for both files, because
+ # one of the two compilations can be disabled. We should prefer
+ # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+ # automatically cleaned when .libs/ is deleted, while ignoring
+ # the former would cause a distcleancheck panic.
+ tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4
+ tmpdepfile2=$dir$base.o.d # libtool 1.5
+ tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5
+ tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504
+ "$@" -Wc,-MD
+ else
+ tmpdepfile1=$dir$base.o.d
+ tmpdepfile2=$dir$base.d
+ tmpdepfile3=$dir$base.d
+ tmpdepfile4=$dir$base.d
+ "$@" -MD
+ fi
+
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+msvc7)
+ if test "$libtool" = yes; then
+ showIncludes=-Wc,-showIncludes
+ else
+ showIncludes=-showIncludes
+ fi
+ "$@" $showIncludes > "$tmpdepfile"
+ stat=$?
+ grep -v '^Note: including file: ' "$tmpdepfile"
+ if test "$stat" = 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ # The first sed program below extracts the file names and escapes
+ # backslashes for cygpath. The second sed program outputs the file
+ # name when reading, but also accumulates all include files in the
+ # hold buffer in order to output them again at the end. This only
+ # works with sed implementations that can handle large buffers.
+ sed < "$tmpdepfile" -n '
+/^Note: including file: *\(.*\)/ {
+ s//\1/
+ s/\\/\\\\/g
+ p
+}' | $cygpath_u | sort -u | sed -n '
+s/ /\\ /g
+s/\(.*\)/ \1 \\/p
+s/.\(.*\) \\/\1:/
+H
+$ {
+ s/.*/ /
+ G
+ p
+}' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvc7msys)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+#nosideeffect)
+ # This comment above is used by automake to tell side-effect
+ # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ test -z "$dashmflag" && dashmflag=-M
+ # Require at least two characters before searching for `:'
+ # in the target name. This is to cope with DOS-style filenames:
+ # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
+ "$@" $dashmflag |
+ sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ tr ' ' '
+' < "$tmpdepfile" | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+dashXmstdout)
+ # This case only exists to satisfy depend.m4. It is never actually
+ # run, as this mode is specially recognized in the preamble.
+ exit 1
+ ;;
+
+makedepend)
+ "$@" || exit $?
+ # Remove any Libtool call
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+ # X makedepend
+ shift
+ cleared=no eat=no
+ for arg
+ do
+ case $cleared in
+ no)
+ set ""; shift
+ cleared=yes ;;
+ esac
+ if test $eat = yes; then
+ eat=no
+ continue
+ fi
+ case "$arg" in
+ -D*|-I*)
+ set fnord "$@" "$arg"; shift ;;
+ # Strip any option that makedepend may not understand. Remove
+ # the object too, otherwise makedepend will parse it as a source file.
+ -arch)
+ eat=yes ;;
+ -*|$object)
+ ;;
+ *)
+ set fnord "$@" "$arg"; shift ;;
+ esac
+ done
+ obj_suffix=`echo "$object" | sed 's/^.*\././'`
+ touch "$tmpdepfile"
+ ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+ rm -f "$depfile"
+ # makedepend may prepend the VPATH from the source file name to the object.
+ # No need to regex-escape $object, excess matching of '.' is harmless.
+ sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
+ sed '1,2d' "$tmpdepfile" | tr ' ' '
+' | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile" "$tmpdepfile".bak
+ ;;
+
+cpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ "$@" -E |
+ sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
+ sed '$ s: \\$::' > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ cat < "$tmpdepfile" >> "$depfile"
+ sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvisualcpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ IFS=" "
+ for arg
+ do
+ case "$arg" in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+ set fnord "$@"
+ shift
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift
+ shift
+ ;;
+ esac
+ done
+ "$@" -E 2>/dev/null |
+ sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
+ echo " " >> "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvcmsys)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+none)
+ exec "$@"
+ ;;
+
+*)
+ echo "Unknown depmode $depmode" 1>&2
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/format_description.txt b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/format_description.txt
new file mode 100644
index 00000000..20db66c1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/format_description.txt
@@ -0,0 +1,110 @@
+Snappy compressed format description
+Last revised: 2011-10-05
+
+
+This is not a formal specification, but should suffice to explain most
+relevant parts of how the Snappy format works. It is originally based on
+text by Zeev Tarantov.
+
+Snappy is a LZ77-type compressor with a fixed, byte-oriented encoding.
+There is no entropy encoder backend nor framing layer -- the latter is
+assumed to be handled by other parts of the system.
+
+This document only describes the format, not how the Snappy compressor nor
+decompressor actually works. The correctness of the decompressor should not
+depend on implementation details of the compressor, and vice versa.
+
+
+1. Preamble
+
+The stream starts with the uncompressed length (up to a maximum of 2^32 - 1),
+stored as a little-endian varint. Varints consist of a series of bytes,
+where the lower 7 bits are data and the upper bit is set iff there are
+more bytes to be read. In other words, an uncompressed length of 64 would
+be stored as 0x40, and an uncompressed length of 2097150 (0x1FFFFE)
+would be stored as 0xFE 0xFF 0x7F.
+
+
+2. The compressed stream itself
+
+There are two types of elements in a Snappy stream: Literals and
+copies (backreferences). There is no restriction on the order of elements,
+except that the stream naturally cannot start with a copy. (Having
+two literals in a row is never optimal from a compression point of
+view, but nevertheless fully permitted.) Each element starts with a tag byte,
+and the lower two bits of this tag byte signal what type of element will
+follow:
+
+ 00: Literal
+ 01: Copy with 1-byte offset
+ 10: Copy with 2-byte offset
+ 11: Copy with 4-byte offset
+
+The interpretation of the upper six bits are element-dependent.
+
+
+2.1. Literals (00)
+
+Literals are uncompressed data stored directly in the byte stream.
+The literal length is stored differently depending on the length
+of the literal:
+
+ - For literals up to and including 60 bytes in length, the upper
+ six bits of the tag byte contain (len-1). The literal follows
+ immediately thereafter in the bytestream.
+ - For longer literals, the (len-1) value is stored after the tag byte,
+ little-endian. The upper six bits of the tag byte describe how
+ many bytes are used for the length; 60, 61, 62 or 63 for
+ 1-4 bytes, respectively. The literal itself follows after the
+ length.
+
+
+2.2. Copies
+
+Copies are references back into previous decompressed data, telling
+the decompressor to reuse data it has previously decoded.
+They encode two values: The _offset_, saying how many bytes back
+from the current position to read, and the _length_, how many bytes
+to copy. Offsets of zero can be encoded, but are not legal;
+similarly, it is possible to encode backreferences that would
+go past the end of the block (offset > current decompressed position),
+which is also nonsensical and thus not allowed.
+
+As in most LZ77-based compressors, the length can be larger than the offset,
+yielding a form of run-length encoding (RLE). For instance,
+"xababab" could be encoded as
+
+ <literal: "xab"> <copy: offset=2 length=4>
+
+Note that since the current Snappy compressor works in 32 kB
+blocks and does not do matching across blocks, it will never produce
+a bitstream with offsets larger than about 32768. However, the
+decompressor should not rely on this, as it may change in the future.
+
+There are several different kinds of copy elements, depending on
+the amount of bytes to be copied (length), and how far back the
+data to be copied is (offset).
+
+
+2.2.1. Copy with 1-byte offset (01)
+
+These elements can encode lengths between [4..11] bytes and offsets
+between [0..2047] bytes. (len-4) occupies three bits and is stored
+in bits [2..4] of the tag byte. The offset occupies 11 bits, of which the
+upper three are stored in the upper three bits ([5..7]) of the tag byte,
+and the lower eight are stored in a byte following the tag byte.
+
+
+2.2.2. Copy with 2-byte offset (10)
+
+These elements can encode lengths between [1..64] and offsets from
+[0..65535]. (len-1) occupies six bits and is stored in the upper
+six bits ([2..7]) of the tag byte. The offset is stored as a
+little-endian 16-bit integer in the two bytes following the tag byte.
+
+
+2.2.3. Copy with 4-byte offset (11)
+
+These are like the copies with 2-byte offsets (see previous subsection),
+except that the offset is stored as a 32-bit integer instead of a
+16-bit integer (and thus will occupy four bytes).
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/framing_format.txt b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/framing_format.txt
new file mode 100644
index 00000000..9764e83d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/framing_format.txt
@@ -0,0 +1,135 @@
+Snappy framing format description
+Last revised: 2013-10-25
+
+This format decribes a framing format for Snappy, allowing compressing to
+files or streams that can then more easily be decompressed without having
+to hold the entire stream in memory. It also provides data checksums to
+help verify integrity. It does not provide metadata checksums, so it does
+not protect against e.g. all forms of truncations.
+
+Implementation of the framing format is optional for Snappy compressors and
+decompressor; it is not part of the Snappy core specification.
+
+
+1. General structure
+
+The file consists solely of chunks, lying back-to-back with no padding
+in between. Each chunk consists first a single byte of chunk identifier,
+then a three-byte little-endian length of the chunk in bytes (from 0 to
+16777215, inclusive), and then the data if any. The four bytes of chunk
+header is not counted in the data length.
+
+The different chunk types are listed below. The first chunk must always
+be the stream identifier chunk (see section 4.1, below). The stream
+ends when the file ends -- there is no explicit end-of-file marker.
+
+
+2. File type identification
+
+The following identifiers for this format are recommended where appropriate.
+However, note that none have been registered officially, so this is only to
+be taken as a guideline. We use "Snappy framed" to distinguish between this
+format and raw Snappy data.
+
+ File extension: .sz
+ MIME type: application/x-snappy-framed
+ HTTP Content-Encoding: x-snappy-framed
+
+
+3. Checksum format
+
+Some chunks have data protected by a checksum (the ones that do will say so
+explicitly). The checksums are always masked CRC-32Cs.
+
+A description of CRC-32C can be found in RFC 3720, section 12.1, with
+examples in section B.4.
+
+Checksums are not stored directly, but masked, as checksumming data and
+then its own checksum can be problematic. The masking is the same as used
+in Apache Hadoop: Rotate the checksum by 15 bits, then add the constant
+0xa282ead8 (using wraparound as normal for unsigned integers). This is
+equivalent to the following C code:
+
+ uint32_t mask_checksum(uint32_t x) {
+ return ((x >> 15) | (x << 17)) + 0xa282ead8;
+ }
+
+Note that the masking is reversible.
+
+The checksum is always stored as a four bytes long integer, in little-endian.
+
+
+4. Chunk types
+
+The currently supported chunk types are described below. The list may
+be extended in the future.
+
+
+4.1. Stream identifier (chunk type 0xff)
+
+The stream identifier is always the first element in the stream.
+It is exactly six bytes long and contains "sNaPpY" in ASCII. This means that
+a valid Snappy framed stream always starts with the bytes
+
+ 0xff 0x06 0x00 0x00 0x73 0x4e 0x61 0x50 0x70 0x59
+
+The stream identifier chunk can come multiple times in the stream besides
+the first; if such a chunk shows up, it should simply be ignored, assuming
+it has the right length and contents. This allows for easy concatenation of
+compressed files without the need for re-framing.
+
+
+4.2. Compressed data (chunk type 0x00)
+
+Compressed data chunks contain a normal Snappy compressed bitstream;
+see the compressed format specification. The compressed data is preceded by
+the CRC-32C (see section 3) of the _uncompressed_ data.
+
+Note that the data portion of the chunk, i.e., the compressed contents,
+can be at most 16777211 bytes (2^24 - 1, minus the checksum).
+However, we place an additional restriction that the uncompressed data
+in a chunk must be no longer than 65536 bytes. This allows consumers to
+easily use small fixed-size buffers.
+
+
+4.3. Uncompressed data (chunk type 0x01)
+
+Uncompressed data chunks allow a compressor to send uncompressed,
+raw data; this is useful if, for instance, uncompressible or
+near-incompressible data is detected, and faster decompression is desired.
+
+As in the compressed chunks, the data is preceded by its own masked
+CRC-32C (see section 3).
+
+An uncompressed data chunk, like compressed data chunks, should contain
+no more than 65536 data bytes, so the maximum legal chunk length with the
+checksum is 65540.
+
+
+4.4. Padding (chunk type 0xfe)
+
+Padding chunks allow a compressor to increase the size of the data stream
+so that it complies with external demands, e.g. that the total number of
+bytes is a multiple of some value.
+
+All bytes of the padding chunk, except the chunk byte itself and the length,
+should be zero, but decompressors must not try to interpret or verify the
+padding data in any way.
+
+
+4.5. Reserved unskippable chunks (chunk types 0x02-0x7f)
+
+These are reserved for future expansion. A decoder that sees such a chunk
+should immediately return an error, as it must assume it cannot decode the
+stream correctly.
+
+Future versions of this specification may define meanings for these chunks.
+
+
+4.6. Reserved skippable chunks (chunk types 0x80-0xfd)
+
+These are also reserved for future expansion, but unlike the chunks
+described in 4.5, a decoder seeing these must skip them and continue
+decoding.
+
+Future versions of this specification may define meanings for these chunks.
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/install-sh b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/install-sh
new file mode 100644
index 00000000..a9244eb0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/install-sh
@@ -0,0 +1,527 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2011-01-19.21; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+nl='
+'
+IFS=" "" $nl"
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit=${DOITPROG-}
+if test -z "$doit"; then
+ doit_exec=exec
+else
+ doit_exec=$doit
+fi
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_glob='?'
+initialize_posix_glob='
+ test "$posix_glob" != "?" || {
+ if (set -f) 2>/dev/null; then
+ posix_glob=
+ else
+ posix_glob=:
+ fi
+ }
+'
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+no_target_directory=
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+ or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+ --help display this help and exit.
+ --version display version info and exit.
+
+ -c (ignored)
+ -C install only if different (preserve the last data modification time)
+ -d create directories instead of installing files.
+ -g GROUP $chgrpprog installed files to GROUP.
+ -m MODE $chmodprog installed files to MODE.
+ -o USER $chownprog installed files to USER.
+ -s $stripprog installed files.
+ -t DIRECTORY install into DIRECTORY.
+ -T report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+ CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+ RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+ case $1 in
+ -c) ;;
+
+ -C) copy_on_change=true;;
+
+ -d) dir_arg=true;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift;;
+
+ --help) echo "$usage"; exit $?;;
+
+ -m) mode=$2
+ case $mode in
+ *' '* | *' '* | *'
+'* | *'*'* | *'?'* | *'['*)
+ echo "$0: invalid mode: $mode" >&2
+ exit 1;;
+ esac
+ shift;;
+
+ -o) chowncmd="$chownprog $2"
+ shift;;
+
+ -s) stripcmd=$stripprog;;
+
+ -t) dst_arg=$2
+ # Protect names problematic for `test' and other utilities.
+ case $dst_arg in
+ -* | [=\(\)!]) dst_arg=./$dst_arg;;
+ esac
+ shift;;
+
+ -T) no_target_directory=true;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ --) shift
+ break;;
+
+ -*) echo "$0: invalid option: $1" >&2
+ exit 1;;
+
+ *) break;;
+ esac
+ shift
+done
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+ # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dst_arg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dst_arg"
+ shift # fnord
+ fi
+ shift # arg
+ dst_arg=$arg
+ # Protect names problematic for `test' and other utilities.
+ case $dst_arg in
+ -* | [=\(\)!]) dst_arg=./$dst_arg;;
+ esac
+ done
+fi
+
+if test $# -eq 0; then
+ if test -z "$dir_arg"; then
+ echo "$0: no input file specified." >&2
+ exit 1
+ fi
+ # It's OK to call `install-sh -d' without argument.
+ # This can happen when creating conditional directories.
+ exit 0
+fi
+
+if test -z "$dir_arg"; then
+ do_exit='(exit $ret); exit $ret'
+ trap "ret=129; $do_exit" 1
+ trap "ret=130; $do_exit" 2
+ trap "ret=141; $do_exit" 13
+ trap "ret=143; $do_exit" 15
+
+ # Set umask so as not to create temps with too-generous modes.
+ # However, 'strip' requires both read and write access to temps.
+ case $mode in
+ # Optimize common cases.
+ *644) cp_umask=133;;
+ *755) cp_umask=22;;
+
+ *[0-7])
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw='% 200'
+ fi
+ cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+ *)
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw=,u+rw
+ fi
+ cp_umask=$mode$u_plus_rw;;
+ esac
+fi
+
+for src
+do
+ # Protect names problematic for `test' and other utilities.
+ case $src in
+ -* | [=\(\)!]) src=./$src;;
+ esac
+
+ if test -n "$dir_arg"; then
+ dst=$src
+ dstdir=$dst
+ test -d "$dstdir"
+ dstdir_status=$?
+ else
+
+ # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+ # might cause directories to be created, which would be especially bad
+ # if $src (and thus $dsttmp) contains '*'.
+ if test ! -f "$src" && test ! -d "$src"; then
+ echo "$0: $src does not exist." >&2
+ exit 1
+ fi
+
+ if test -z "$dst_arg"; then
+ echo "$0: no destination specified." >&2
+ exit 1
+ fi
+ dst=$dst_arg
+
+ # If destination is a directory, append the input filename; won't work
+ # if double slashes aren't ignored.
+ if test -d "$dst"; then
+ if test -n "$no_target_directory"; then
+ echo "$0: $dst_arg: Is a directory" >&2
+ exit 1
+ fi
+ dstdir=$dst
+ dst=$dstdir/`basename "$src"`
+ dstdir_status=0
+ else
+ # Prefer dirname, but fall back on a substitute if dirname fails.
+ dstdir=`
+ (dirname "$dst") 2>/dev/null ||
+ expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$dst" : 'X\(//\)[^/]' \| \
+ X"$dst" : 'X\(//\)$' \| \
+ X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
+ echo X"$dst" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'
+ `
+
+ test -d "$dstdir"
+ dstdir_status=$?
+ fi
+ fi
+
+ obsolete_mkdir_used=false
+
+ if test $dstdir_status != 0; then
+ case $posix_mkdir in
+ '')
+ # Create intermediate dirs using mode 755 as modified by the umask.
+ # This is like FreeBSD 'install' as of 1997-10-28.
+ umask=`umask`
+ case $stripcmd.$umask in
+ # Optimize common cases.
+ *[2367][2367]) mkdir_umask=$umask;;
+ .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+ *[0-7])
+ mkdir_umask=`expr $umask + 22 \
+ - $umask % 100 % 40 + $umask % 20 \
+ - $umask % 10 % 4 + $umask % 2
+ `;;
+ *) mkdir_umask=$umask,go-w;;
+ esac
+
+ # With -d, create the new directory with the user-specified mode.
+ # Otherwise, rely on $mkdir_umask.
+ if test -n "$dir_arg"; then
+ mkdir_mode=-m$mode
+ else
+ mkdir_mode=
+ fi
+
+ posix_mkdir=false
+ case $umask in
+ *[123567][0-7][0-7])
+ # POSIX mkdir -p sets u+wx bits regardless of umask, which
+ # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+ ;;
+ *)
+ tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+ if (umask $mkdir_umask &&
+ exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+ then
+ if test -z "$dir_arg" || {
+ # Check for POSIX incompatibilities with -m.
+ # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+ # other-writeable bit of parent directory when it shouldn't.
+ # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+ ls_ld_tmpdir=`ls -ld "$tmpdir"`
+ case $ls_ld_tmpdir in
+ d????-?r-*) different_mode=700;;
+ d????-?--*) different_mode=755;;
+ *) false;;
+ esac &&
+ $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+ ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+ test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+ }
+ }
+ then posix_mkdir=:
+ fi
+ rmdir "$tmpdir/d" "$tmpdir"
+ else
+ # Remove any dirs left behind by ancient mkdir implementations.
+ rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+ fi
+ trap '' 0;;
+ esac;;
+ esac
+
+ if
+ $posix_mkdir && (
+ umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+ )
+ then :
+ else
+
+ # The umask is ridiculous, or mkdir does not conform to POSIX,
+ # or it failed possibly due to a race condition. Create the
+ # directory the slow way, step by step, checking for races as we go.
+
+ case $dstdir in
+ /*) prefix='/';;
+ [-=\(\)!]*) prefix='./';;
+ *) prefix='';;
+ esac
+
+ eval "$initialize_posix_glob"
+
+ oIFS=$IFS
+ IFS=/
+ $posix_glob set -f
+ set fnord $dstdir
+ shift
+ $posix_glob set +f
+ IFS=$oIFS
+
+ prefixes=
+
+ for d
+ do
+ test X"$d" = X && continue
+
+ prefix=$prefix$d
+ if test -d "$prefix"; then
+ prefixes=
+ else
+ if $posix_mkdir; then
+ (umask=$mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+ # Don't fail if two instances are running concurrently.
+ test -d "$prefix" || exit 1
+ else
+ case $prefix in
+ *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) qprefix=$prefix;;
+ esac
+ prefixes="$prefixes '$qprefix'"
+ fi
+ fi
+ prefix=$prefix/
+ done
+
+ if test -n "$prefixes"; then
+ # Don't fail if two instances are running concurrently.
+ (umask $mkdir_umask &&
+ eval "\$doit_exec \$mkdirprog $prefixes") ||
+ test -d "$dstdir" || exit 1
+ obsolete_mkdir_used=true
+ fi
+ fi
+ fi
+
+ if test -n "$dir_arg"; then
+ { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+ { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+ else
+
+ # Make a couple of temp file names in the proper directory.
+ dsttmp=$dstdir/_inst.$$_
+ rmtmp=$dstdir/_rm.$$_
+
+ # Trap to clean up those temp files at exit.
+ trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+ # Copy the file name to the temp name.
+ (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+ # and set any options; do chmod last to preserve setuid bits.
+ #
+ # If any of these fail, we abort the whole thing. If we want to
+ # ignore errors from any of these, just make sure not to ignore
+ # errors from the above "$doit $cpprog $src $dsttmp" command.
+ #
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+ { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+ { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+ # If -C, don't bother to copy if it wouldn't change the file.
+ if $copy_on_change &&
+ old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
+ new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
+
+ eval "$initialize_posix_glob" &&
+ $posix_glob set -f &&
+ set X $old && old=:$2:$4:$5:$6 &&
+ set X $new && new=:$2:$4:$5:$6 &&
+ $posix_glob set +f &&
+
+ test "$old" = "$new" &&
+ $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+ then
+ rm -f "$dsttmp"
+ else
+ # Rename the file to the real destination.
+ $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+ {
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ test ! -f "$dst" ||
+ $doit $rmcmd -f "$dst" 2>/dev/null ||
+ { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+ { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+ } ||
+ { echo "$0: cannot unlink or rename $dst" >&2
+ (exit 1); exit 1
+ }
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dst"
+ }
+ fi || exit 1
+
+ trap '' 0
+ fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ltmain.sh b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ltmain.sh
new file mode 100644
index 00000000..9385c3e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/ltmain.sh
@@ -0,0 +1,9661 @@
+
+# libtool (GNU libtool) 2.4.2
+# Written by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,
+# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions. There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING. If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html,
+# or obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+
+# Usage: $progname [OPTION]... [MODE-ARG]...
+#
+# Provide generalized library-building support services.
+#
+# --config show all configuration variables
+# --debug enable verbose shell tracing
+# -n, --dry-run display commands without modifying any files
+# --features display basic configuration information and exit
+# --mode=MODE use operation mode MODE
+# --preserve-dup-deps don't remove duplicate dependency libraries
+# --quiet, --silent don't print informational messages
+# --no-quiet, --no-silent
+# print informational messages (default)
+# --no-warn don't display warning messages
+# --tag=TAG use configuration variables from tag TAG
+# -v, --verbose print more informational messages than default
+# --no-verbose don't print the extra informational messages
+# --version print version information
+# -h, --help, --help-all print short, long, or detailed help message
+#
+# MODE must be one of the following:
+#
+# clean remove files from the build directory
+# compile compile a source file into a libtool object
+# execute automatically set library path, then run a program
+# finish complete the installation of libtool libraries
+# install install libraries or executables
+# link create a library or an executable
+# uninstall remove libraries from an installed directory
+#
+# MODE-ARGS vary depending on the MODE. When passed as first option,
+# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that.
+# Try `$progname --help --mode=MODE' for a more detailed description of MODE.
+#
+# When reporting a bug, please describe a test case to reproduce it and
+# include the following information:
+#
+# host-triplet: $host
+# shell: $SHELL
+# compiler: $LTCC
+# compiler flags: $LTCFLAGS
+# linker: $LD (gnu? $with_gnu_ld)
+# $progname: (GNU libtool) 2.4.2 Debian-2.4.2-1ubuntu1
+# automake: $automake_version
+# autoconf: $autoconf_version
+#
+# Report bugs to <bug-libtool@gnu.org>.
+# GNU libtool home page: <http://www.gnu.org/software/libtool/>.
+# General help using GNU software: <http://www.gnu.org/gethelp/>.
+
+PROGRAM=libtool
+PACKAGE=libtool
+VERSION="2.4.2 Debian-2.4.2-1ubuntu1"
+TIMESTAMP=""
+package_revision=1.3337
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+}
+
+# NLS nuisances: We save the old values to restore during execute mode.
+lt_user_locale=
+lt_safe_locale=
+for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+do
+ eval "if test \"\${$lt_var+set}\" = set; then
+ save_$lt_var=\$$lt_var
+ $lt_var=C
+ export $lt_var
+ lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\"
+ lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\"
+ fi"
+done
+LC_ALL=C
+LANGUAGE=C
+export LANGUAGE LC_ALL
+
+$lt_unset CDPATH
+
+
+# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
+# is ksh but when the shell is invoked as "sh" and the current value of
+# the _XPG environment variable is not equal to 1 (one), the special
+# positional parameter $0, within a function call, is the name of the
+# function.
+progpath="$0"
+
+
+
+: ${CP="cp -f"}
+test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'}
+: ${MAKE="make"}
+: ${MKDIR="mkdir"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
+: ${Xsed="$SED -e 1s/^X//"}
+
+# Global variables:
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing.
+EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake.
+
+exit_status=$EXIT_SUCCESS
+
+# Make sure IFS has a sensible default
+lt_nl='
+'
+IFS=" $lt_nl"
+
+dirname="s,/[^/]*$,,"
+basename="s,^.*/,,"
+
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE. If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+ func_dirname_result=`$ECHO "${1}" | $SED "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+} # func_dirname may be replaced by extended shell implementation
+
+
+# func_basename file
+func_basename ()
+{
+ func_basename_result=`$ECHO "${1}" | $SED "$basename"`
+} # func_basename may be replaced by extended shell implementation
+
+
+# func_dirname_and_basename file append nondir_replacement
+# perform func_basename and func_dirname in a single function
+# call:
+# dirname: Compute the dirname of FILE. If nonempty,
+# add APPEND to the result, otherwise set result
+# to NONDIR_REPLACEMENT.
+# value returned in "$func_dirname_result"
+# basename: Compute filename of FILE.
+# value retuned in "$func_basename_result"
+# Implementation must be kept synchronized with func_dirname
+# and func_basename. For efficiency, we do not delegate to
+# those functions but instead duplicate the functionality here.
+func_dirname_and_basename ()
+{
+ # Extract subdirectory from the argument.
+ func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+ func_basename_result=`$ECHO "${1}" | $SED -e "$basename"`
+} # func_dirname_and_basename may be replaced by extended shell implementation
+
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+# func_strip_suffix prefix name
+func_stripname ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+ esac
+} # func_stripname may be replaced by extended shell implementation
+
+
+# These SED scripts presuppose an absolute path with a trailing slash.
+pathcar='s,^/\([^/]*\).*$,\1,'
+pathcdr='s,^/[^/]*,,'
+removedotparts=':dotsl
+ s@/\./@/@g
+ t dotsl
+ s,/\.$,/,'
+collapseslashes='s@/\{1,\}@/@g'
+finalslash='s,/*$,/,'
+
+# func_normal_abspath PATH
+# Remove doubled-up and trailing slashes, "." path components,
+# and cancel out any ".." path components in PATH after making
+# it an absolute path.
+# value returned in "$func_normal_abspath_result"
+func_normal_abspath ()
+{
+ # Start from root dir and reassemble the path.
+ func_normal_abspath_result=
+ func_normal_abspath_tpath=$1
+ func_normal_abspath_altnamespace=
+ case $func_normal_abspath_tpath in
+ "")
+ # Empty path, that just means $cwd.
+ func_stripname '' '/' "`pwd`"
+ func_normal_abspath_result=$func_stripname_result
+ return
+ ;;
+ # The next three entries are used to spot a run of precisely
+ # two leading slashes without using negated character classes;
+ # we take advantage of case's first-match behaviour.
+ ///*)
+ # Unusual form of absolute path, do nothing.
+ ;;
+ //*)
+ # Not necessarily an ordinary path; POSIX reserves leading '//'
+ # and for example Cygwin uses it to access remote file shares
+ # over CIFS/SMB, so we conserve a leading double slash if found.
+ func_normal_abspath_altnamespace=/
+ ;;
+ /*)
+ # Absolute path, do nothing.
+ ;;
+ *)
+ # Relative path, prepend $cwd.
+ func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath
+ ;;
+ esac
+ # Cancel out all the simple stuff to save iterations. We also want
+ # the path to end with a slash for ease of parsing, so make sure
+ # there is one (and only one) here.
+ func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+ -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"`
+ while :; do
+ # Processed it all yet?
+ if test "$func_normal_abspath_tpath" = / ; then
+ # If we ascended to the root using ".." the result may be empty now.
+ if test -z "$func_normal_abspath_result" ; then
+ func_normal_abspath_result=/
+ fi
+ break
+ fi
+ func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \
+ -e "$pathcar"`
+ func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+ -e "$pathcdr"`
+ # Figure out what to do with it
+ case $func_normal_abspath_tcomponent in
+ "")
+ # Trailing empty path component, ignore it.
+ ;;
+ ..)
+ # Parent dir; strip last assembled component from result.
+ func_dirname "$func_normal_abspath_result"
+ func_normal_abspath_result=$func_dirname_result
+ ;;
+ *)
+ # Actual path component, append it.
+ func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent
+ ;;
+ esac
+ done
+ # Restore leading double-slash if one was found on entry.
+ func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result
+}
+
+# func_relative_path SRCDIR DSTDIR
+# generates a relative path from SRCDIR to DSTDIR, with a trailing
+# slash if non-empty, suitable for immediately appending a filename
+# without needing to append a separator.
+# value returned in "$func_relative_path_result"
+func_relative_path ()
+{
+ func_relative_path_result=
+ func_normal_abspath "$1"
+ func_relative_path_tlibdir=$func_normal_abspath_result
+ func_normal_abspath "$2"
+ func_relative_path_tbindir=$func_normal_abspath_result
+
+ # Ascend the tree starting from libdir
+ while :; do
+ # check if we have found a prefix of bindir
+ case $func_relative_path_tbindir in
+ $func_relative_path_tlibdir)
+ # found an exact match
+ func_relative_path_tcancelled=
+ break
+ ;;
+ $func_relative_path_tlibdir*)
+ # found a matching prefix
+ func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir"
+ func_relative_path_tcancelled=$func_stripname_result
+ if test -z "$func_relative_path_result"; then
+ func_relative_path_result=.
+ fi
+ break
+ ;;
+ *)
+ func_dirname $func_relative_path_tlibdir
+ func_relative_path_tlibdir=${func_dirname_result}
+ if test "x$func_relative_path_tlibdir" = x ; then
+ # Have to descend all the way to the root!
+ func_relative_path_result=../$func_relative_path_result
+ func_relative_path_tcancelled=$func_relative_path_tbindir
+ break
+ fi
+ func_relative_path_result=../$func_relative_path_result
+ ;;
+ esac
+ done
+
+ # Now calculate path; take care to avoid doubling-up slashes.
+ func_stripname '' '/' "$func_relative_path_result"
+ func_relative_path_result=$func_stripname_result
+ func_stripname '/' '/' "$func_relative_path_tcancelled"
+ if test "x$func_stripname_result" != x ; then
+ func_relative_path_result=${func_relative_path_result}/${func_stripname_result}
+ fi
+
+ # Normalisation. If bindir is libdir, return empty string,
+ # else relative path ending with a slash; either way, target
+ # file name can be directly appended.
+ if test ! -z "$func_relative_path_result"; then
+ func_stripname './' '' "$func_relative_path_result/"
+ func_relative_path_result=$func_stripname_result
+ fi
+}
+
+# The name of this program:
+func_dirname_and_basename "$progpath"
+progname=$func_basename_result
+
+# Make sure we have an absolute path for reexecution:
+case $progpath in
+ [\\/]*|[A-Za-z]:\\*) ;;
+ *[\\/]*)
+ progdir=$func_dirname_result
+ progdir=`cd "$progdir" && pwd`
+ progpath="$progdir/$progname"
+ ;;
+ *)
+ save_IFS="$IFS"
+ IFS=${PATH_SEPARATOR-:}
+ for progdir in $PATH; do
+ IFS="$save_IFS"
+ test -x "$progdir/$progname" && break
+ done
+ IFS="$save_IFS"
+ test -n "$progdir" || progdir=`pwd`
+ progpath="$progdir/$progname"
+ ;;
+esac
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed="${SED}"' -e 1s/^X//'
+sed_quote_subst='s/\([`"$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution that turns a string into a regex matching for the
+# string literally.
+sed_make_literal_regex='s,[].[^$\\*\/],\\&,g'
+
+# Sed substitution that converts a w32 file name or path
+# which contains forward slashes, into one that contains
+# (escaped) backslashes. A very naive implementation.
+lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+
+# Re-`\' parameter expansions in output of double_quote_subst that were
+# `\'-ed in input to the same. If an odd number of `\' preceded a '$'
+# in input to double_quote_subst, that '$' was protected from expansion.
+# Since each input `\' is now two `\'s, look for any number of runs of
+# four `\'s followed by two `\'s and then a '$'. `\' that '$'.
+bs='\\'
+bs2='\\\\'
+bs4='\\\\\\\\'
+dollar='\$'
+sed_double_backslash="\
+ s/$bs4/&\\
+/g
+ s/^$bs2$dollar/$bs&/
+ s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g
+ s/\n//g"
+
+# Standard options:
+opt_dry_run=false
+opt_help=false
+opt_quiet=false
+opt_verbose=false
+opt_warning=:
+
+# func_echo arg...
+# Echo program name prefixed message, along with the current mode
+# name if it has been set yet.
+func_echo ()
+{
+ $ECHO "$progname: ${opt_mode+$opt_mode: }$*"
+}
+
+# func_verbose arg...
+# Echo program name prefixed message in verbose mode only.
+func_verbose ()
+{
+ $opt_verbose && func_echo ${1+"$@"}
+
+ # A bug in bash halts the script if the last line of a function
+ # fails when set -e is in force, so we need another command to
+ # work around that:
+ :
+}
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+ $ECHO "$*"
+}
+
+# func_error arg...
+# Echo program name prefixed message to standard error.
+func_error ()
+{
+ $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2
+}
+
+# func_warning arg...
+# Echo program name prefixed warning message to standard error.
+func_warning ()
+{
+ $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2
+
+ # bash bug again:
+ :
+}
+
+# func_fatal_error arg...
+# Echo program name prefixed message to standard error, and exit.
+func_fatal_error ()
+{
+ func_error ${1+"$@"}
+ exit $EXIT_FAILURE
+}
+
+# func_fatal_help arg...
+# Echo program name prefixed message to standard error, followed by
+# a help hint, and exit.
+func_fatal_help ()
+{
+ func_error ${1+"$@"}
+ func_fatal_error "$help"
+}
+help="Try \`$progname --help' for more information." ## default
+
+
+# func_grep expression filename
+# Check whether EXPRESSION matches any line of FILENAME, without output.
+func_grep ()
+{
+ $GREP "$1" "$2" >/dev/null 2>&1
+}
+
+
+# func_mkdir_p directory-path
+# Make sure the entire path to DIRECTORY-PATH is available.
+func_mkdir_p ()
+{
+ my_directory_path="$1"
+ my_dir_list=
+
+ if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then
+
+ # Protect directory names starting with `-'
+ case $my_directory_path in
+ -*) my_directory_path="./$my_directory_path" ;;
+ esac
+
+ # While some portion of DIR does not yet exist...
+ while test ! -d "$my_directory_path"; do
+ # ...make a list in topmost first order. Use a colon delimited
+ # list incase some portion of path contains whitespace.
+ my_dir_list="$my_directory_path:$my_dir_list"
+
+ # If the last portion added has no slash in it, the list is done
+ case $my_directory_path in */*) ;; *) break ;; esac
+
+ # ...otherwise throw away the child directory and loop
+ my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"`
+ done
+ my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'`
+
+ save_mkdir_p_IFS="$IFS"; IFS=':'
+ for my_dir in $my_dir_list; do
+ IFS="$save_mkdir_p_IFS"
+ # mkdir can fail with a `File exist' error if two processes
+ # try to create one of the directories concurrently. Don't
+ # stop in that case!
+ $MKDIR "$my_dir" 2>/dev/null || :
+ done
+ IFS="$save_mkdir_p_IFS"
+
+ # Bail out if we (or some other process) failed to create a directory.
+ test -d "$my_directory_path" || \
+ func_fatal_error "Failed to create \`$1'"
+ fi
+}
+
+
+# func_mktempdir [string]
+# Make a temporary directory that won't clash with other running
+# libtool processes, and avoids race conditions if possible. If
+# given, STRING is the basename for that directory.
+func_mktempdir ()
+{
+ my_template="${TMPDIR-/tmp}/${1-$progname}"
+
+ if test "$opt_dry_run" = ":"; then
+ # Return a directory name, but don't create it in dry-run mode
+ my_tmpdir="${my_template}-$$"
+ else
+
+ # If mktemp works, use that first and foremost
+ my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null`
+
+ if test ! -d "$my_tmpdir"; then
+ # Failing that, at least try and use $RANDOM to avoid a race
+ my_tmpdir="${my_template}-${RANDOM-0}$$"
+
+ save_mktempdir_umask=`umask`
+ umask 0077
+ $MKDIR "$my_tmpdir"
+ umask $save_mktempdir_umask
+ fi
+
+ # If we're not in dry-run mode, bomb out on failure
+ test -d "$my_tmpdir" || \
+ func_fatal_error "cannot create temporary directory \`$my_tmpdir'"
+ fi
+
+ $ECHO "$my_tmpdir"
+}
+
+
+# func_quote_for_eval arg
+# Aesthetically quote ARG to be evaled later.
+# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT
+# is double-quoted, suitable for a subsequent eval, whereas
+# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters
+# which are still active within double quotes backslashified.
+func_quote_for_eval ()
+{
+ case $1 in
+ *[\\\`\"\$]*)
+ func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;;
+ *)
+ func_quote_for_eval_unquoted_result="$1" ;;
+ esac
+
+ case $func_quote_for_eval_unquoted_result in
+ # Double-quote args containing shell metacharacters to delay
+ # word splitting, command substitution and and variable
+ # expansion for a subsequent eval.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\""
+ ;;
+ *)
+ func_quote_for_eval_result="$func_quote_for_eval_unquoted_result"
+ esac
+}
+
+
+# func_quote_for_expand arg
+# Aesthetically quote ARG to be evaled later; same as above,
+# but do not quote variable references.
+func_quote_for_expand ()
+{
+ case $1 in
+ *[\\\`\"]*)
+ my_arg=`$ECHO "$1" | $SED \
+ -e "$double_quote_subst" -e "$sed_double_backslash"` ;;
+ *)
+ my_arg="$1" ;;
+ esac
+
+ case $my_arg in
+ # Double-quote args containing shell metacharacters to delay
+ # word splitting and command substitution for a subsequent eval.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ my_arg="\"$my_arg\""
+ ;;
+ esac
+
+ func_quote_for_expand_result="$my_arg"
+}
+
+
+# func_show_eval cmd [fail_exp]
+# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is
+# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.
+func_show_eval ()
+{
+ my_cmd="$1"
+ my_fail_exp="${2-:}"
+
+ ${opt_silent-false} || {
+ func_quote_for_expand "$my_cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+
+ if ${opt_dry_run-false}; then :; else
+ eval "$my_cmd"
+ my_status=$?
+ if test "$my_status" -eq 0; then :; else
+ eval "(exit $my_status); $my_fail_exp"
+ fi
+ fi
+}
+
+
+# func_show_eval_locale cmd [fail_exp]
+# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is
+# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it. Use the saved locale for evaluation.
+func_show_eval_locale ()
+{
+ my_cmd="$1"
+ my_fail_exp="${2-:}"
+
+ ${opt_silent-false} || {
+ func_quote_for_expand "$my_cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+
+ if ${opt_dry_run-false}; then :; else
+ eval "$lt_user_locale
+ $my_cmd"
+ my_status=$?
+ eval "$lt_safe_locale"
+ if test "$my_status" -eq 0; then :; else
+ eval "(exit $my_status); $my_fail_exp"
+ fi
+ fi
+}
+
+# func_tr_sh
+# Turn $1 into a string suitable for a shell variable name.
+# Result is stored in $func_tr_sh_result. All characters
+# not in the set a-zA-Z0-9_ are replaced with '_'. Further,
+# if $1 begins with a digit, a '_' is prepended as well.
+func_tr_sh ()
+{
+ case $1 in
+ [0-9]* | *[!a-zA-Z0-9_]*)
+ func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'`
+ ;;
+ * )
+ func_tr_sh_result=$1
+ ;;
+ esac
+}
+
+
+# func_version
+# Echo version message to standard output and exit.
+func_version ()
+{
+ $opt_debug
+
+ $SED -n '/(C)/!b go
+ :more
+ /\./!{
+ N
+ s/\n# / /
+ b more
+ }
+ :go
+ /^# '$PROGRAM' (GNU /,/# warranty; / {
+ s/^# //
+ s/^# *$//
+ s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/
+ p
+ }' < "$progpath"
+ exit $?
+}
+
+# func_usage
+# Echo short help message to standard output and exit.
+func_usage ()
+{
+ $opt_debug
+
+ $SED -n '/^# Usage:/,/^# *.*--help/ {
+ s/^# //
+ s/^# *$//
+ s/\$progname/'$progname'/
+ p
+ }' < "$progpath"
+ echo
+ $ECHO "run \`$progname --help | more' for full usage"
+ exit $?
+}
+
+# func_help [NOEXIT]
+# Echo long help message to standard output and exit,
+# unless 'noexit' is passed as argument.
+func_help ()
+{
+ $opt_debug
+
+ $SED -n '/^# Usage:/,/# Report bugs to/ {
+ :print
+ s/^# //
+ s/^# *$//
+ s*\$progname*'$progname'*
+ s*\$host*'"$host"'*
+ s*\$SHELL*'"$SHELL"'*
+ s*\$LTCC*'"$LTCC"'*
+ s*\$LTCFLAGS*'"$LTCFLAGS"'*
+ s*\$LD*'"$LD"'*
+ s/\$with_gnu_ld/'"$with_gnu_ld"'/
+ s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/
+ s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/
+ p
+ d
+ }
+ /^# .* home page:/b print
+ /^# General help using/b print
+ ' < "$progpath"
+ ret=$?
+ if test -z "$1"; then
+ exit $ret
+ fi
+}
+
+# func_missing_arg argname
+# Echo program name prefixed message to standard error and set global
+# exit_cmd.
+func_missing_arg ()
+{
+ $opt_debug
+
+ func_error "missing argument for $1."
+ exit_cmd=exit
+}
+
+
+# func_split_short_opt shortopt
+# Set func_split_short_opt_name and func_split_short_opt_arg shell
+# variables after splitting SHORTOPT after the 2nd character.
+func_split_short_opt ()
+{
+ my_sed_short_opt='1s/^\(..\).*$/\1/;q'
+ my_sed_short_rest='1s/^..\(.*\)$/\1/;q'
+
+ func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"`
+ func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"`
+} # func_split_short_opt may be replaced by extended shell implementation
+
+
+# func_split_long_opt longopt
+# Set func_split_long_opt_name and func_split_long_opt_arg shell
+# variables after splitting LONGOPT at the `=' sign.
+func_split_long_opt ()
+{
+ my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q'
+ my_sed_long_arg='1s/^--[^=]*=//'
+
+ func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"`
+ func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"`
+} # func_split_long_opt may be replaced by extended shell implementation
+
+exit_cmd=:
+
+
+
+
+
+magic="%%%MAGIC variable%%%"
+magic_exe="%%%MAGIC EXE variable%%%"
+
+# Global variables.
+nonopt=
+preserve_args=
+lo2o="s/\\.lo\$/.${objext}/"
+o2lo="s/\\.${objext}\$/.lo/"
+extracted_archives=
+extracted_serial=0
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end. This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+ eval "${1}=\$${1}\${2}"
+} # func_append may be replaced by extended shell implementation
+
+# func_append_quoted var value
+# Quote VALUE and append to the end of shell variable VAR, separated
+# by a space.
+func_append_quoted ()
+{
+ func_quote_for_eval "${2}"
+ eval "${1}=\$${1}\\ \$func_quote_for_eval_result"
+} # func_append_quoted may be replaced by extended shell implementation
+
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+ func_arith_result=`expr "${@}"`
+} # func_arith may be replaced by extended shell implementation
+
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+ func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len`
+} # func_len may be replaced by extended shell implementation
+
+
+# func_lo2o object
+func_lo2o ()
+{
+ func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"`
+} # func_lo2o may be replaced by extended shell implementation
+
+
+# func_xform libobj-or-source
+func_xform ()
+{
+ func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'`
+} # func_xform may be replaced by extended shell implementation
+
+
+# func_fatal_configuration arg...
+# Echo program name prefixed message to standard error, followed by
+# a configuration failure hint, and exit.
+func_fatal_configuration ()
+{
+ func_error ${1+"$@"}
+ func_error "See the $PACKAGE documentation for more information."
+ func_fatal_error "Fatal configuration error."
+}
+
+
+# func_config
+# Display the configuration for all the tags in this script.
+func_config ()
+{
+ re_begincf='^# ### BEGIN LIBTOOL'
+ re_endcf='^# ### END LIBTOOL'
+
+ # Default configuration.
+ $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"
+
+ # Now print the configurations for the tags.
+ for tagname in $taglist; do
+ $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
+ done
+
+ exit $?
+}
+
+# func_features
+# Display the features supported by this script.
+func_features ()
+{
+ echo "host: $host"
+ if test "$build_libtool_libs" = yes; then
+ echo "enable shared libraries"
+ else
+ echo "disable shared libraries"
+ fi
+ if test "$build_old_libs" = yes; then
+ echo "enable static libraries"
+ else
+ echo "disable static libraries"
+ fi
+
+ exit $?
+}
+
+# func_enable_tag tagname
+# Verify that TAGNAME is valid, and either flag an error and exit, or
+# enable the TAGNAME tag. We also add TAGNAME to the global $taglist
+# variable here.
+func_enable_tag ()
+{
+ # Global variable:
+ tagname="$1"
+
+ re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$"
+ re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$"
+ sed_extractcf="/$re_begincf/,/$re_endcf/p"
+
+ # Validate tagname.
+ case $tagname in
+ *[!-_A-Za-z0-9,/]*)
+ func_fatal_error "invalid tag name: $tagname"
+ ;;
+ esac
+
+ # Don't test for the "default" C tag, as we know it's
+ # there but not specially marked.
+ case $tagname in
+ CC) ;;
+ *)
+ if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then
+ taglist="$taglist $tagname"
+
+ # Evaluate the configuration. Be careful to quote the path
+ # and the sed script, to avoid splitting on whitespace, but
+ # also don't use non-portable quotes within backquotes within
+ # quotes we have to do it in 2 steps:
+ extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"`
+ eval "$extractedcf"
+ else
+ func_error "ignoring unknown tag $tagname"
+ fi
+ ;;
+ esac
+}
+
+# func_check_version_match
+# Ensure that we are using m4 macros, and libtool script from the same
+# release of libtool.
+func_check_version_match ()
+{
+ if test "$package_revision" != "$macro_revision"; then
+ if test "$VERSION" != "$macro_version"; then
+ if test -z "$macro_version"; then
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from an older release.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+ else
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+ fi
+ else
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision,
+$progname: but the definition of this LT_INIT comes from revision $macro_revision.
+$progname: You should recreate aclocal.m4 with macros from revision $package_revision
+$progname: of $PACKAGE $VERSION and run autoconf again.
+_LT_EOF
+ fi
+
+ exit $EXIT_MISMATCH
+ fi
+}
+
+
+# Shorthand for --mode=foo, only valid as the first argument
+case $1 in
+clean|clea|cle|cl)
+ shift; set dummy --mode clean ${1+"$@"}; shift
+ ;;
+compile|compil|compi|comp|com|co|c)
+ shift; set dummy --mode compile ${1+"$@"}; shift
+ ;;
+execute|execut|execu|exec|exe|ex|e)
+ shift; set dummy --mode execute ${1+"$@"}; shift
+ ;;
+finish|finis|fini|fin|fi|f)
+ shift; set dummy --mode finish ${1+"$@"}; shift
+ ;;
+install|instal|insta|inst|ins|in|i)
+ shift; set dummy --mode install ${1+"$@"}; shift
+ ;;
+link|lin|li|l)
+ shift; set dummy --mode link ${1+"$@"}; shift
+ ;;
+uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
+ shift; set dummy --mode uninstall ${1+"$@"}; shift
+ ;;
+esac
+
+
+
+# Option defaults:
+opt_debug=:
+opt_dry_run=false
+opt_config=false
+opt_preserve_dup_deps=false
+opt_features=false
+opt_finish=false
+opt_help=false
+opt_help_all=false
+opt_silent=:
+opt_warning=:
+opt_verbose=:
+opt_silent=false
+opt_verbose=false
+
+
+# Parse options once, thoroughly. This comes as soon as possible in the
+# script to make things like `--version' happen as quickly as we can.
+{
+ # this just eases exit handling
+ while test $# -gt 0; do
+ opt="$1"
+ shift
+ case $opt in
+ --debug|-x) opt_debug='set -x'
+ func_echo "enabling shell trace mode"
+ $opt_debug
+ ;;
+ --dry-run|--dryrun|-n)
+ opt_dry_run=:
+ ;;
+ --config)
+ opt_config=:
+func_config
+ ;;
+ --dlopen|-dlopen)
+ optarg="$1"
+ opt_dlopen="${opt_dlopen+$opt_dlopen
+}$optarg"
+ shift
+ ;;
+ --preserve-dup-deps)
+ opt_preserve_dup_deps=:
+ ;;
+ --features)
+ opt_features=:
+func_features
+ ;;
+ --finish)
+ opt_finish=:
+set dummy --mode finish ${1+"$@"}; shift
+ ;;
+ --help)
+ opt_help=:
+ ;;
+ --help-all)
+ opt_help_all=:
+opt_help=': help-all'
+ ;;
+ --mode)
+ test $# = 0 && func_missing_arg $opt && break
+ optarg="$1"
+ opt_mode="$optarg"
+case $optarg in
+ # Valid mode arguments:
+ clean|compile|execute|finish|install|link|relink|uninstall) ;;
+
+ # Catch anything else as an error
+ *) func_error "invalid argument for $opt"
+ exit_cmd=exit
+ break
+ ;;
+esac
+ shift
+ ;;
+ --no-silent|--no-quiet)
+ opt_silent=false
+func_append preserve_args " $opt"
+ ;;
+ --no-warning|--no-warn)
+ opt_warning=false
+func_append preserve_args " $opt"
+ ;;
+ --no-verbose)
+ opt_verbose=false
+func_append preserve_args " $opt"
+ ;;
+ --silent|--quiet)
+ opt_silent=:
+func_append preserve_args " $opt"
+ opt_verbose=false
+ ;;
+ --verbose|-v)
+ opt_verbose=:
+func_append preserve_args " $opt"
+opt_silent=false
+ ;;
+ --tag)
+ test $# = 0 && func_missing_arg $opt && break
+ optarg="$1"
+ opt_tag="$optarg"
+func_append preserve_args " $opt $optarg"
+func_enable_tag "$optarg"
+ shift
+ ;;
+
+ -\?|-h) func_usage ;;
+ --help) func_help ;;
+ --version) func_version ;;
+
+ # Separate optargs to long options:
+ --*=*)
+ func_split_long_opt "$opt"
+ set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"}
+ shift
+ ;;
+
+ # Separate non-argument short options:
+ -\?*|-h*|-n*|-v*)
+ func_split_short_opt "$opt"
+ set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"}
+ shift
+ ;;
+
+ --) break ;;
+ -*) func_fatal_help "unrecognized option \`$opt'" ;;
+ *) set dummy "$opt" ${1+"$@"}; shift; break ;;
+ esac
+ done
+
+ # Validate options:
+
+ # save first non-option argument
+ if test "$#" -gt 0; then
+ nonopt="$opt"
+ shift
+ fi
+
+ # preserve --debug
+ test "$opt_debug" = : || func_append preserve_args " --debug"
+
+ case $host in
+ *cygwin* | *mingw* | *pw32* | *cegcc*)
+ # don't eliminate duplications in $postdeps and $predeps
+ opt_duplicate_compiler_generated_deps=:
+ ;;
+ *)
+ opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps
+ ;;
+ esac
+
+ $opt_help || {
+ # Sanity checks first:
+ func_check_version_match
+
+ if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+ func_fatal_configuration "not configured to build any kind of library"
+ fi
+
+ # Darwin sucks
+ eval std_shrext=\"$shrext_cmds\"
+
+ # Only execute mode is allowed to have -dlopen flags.
+ if test -n "$opt_dlopen" && test "$opt_mode" != execute; then
+ func_error "unrecognized option \`-dlopen'"
+ $ECHO "$help" 1>&2
+ exit $EXIT_FAILURE
+ fi
+
+ # Change the help message to a mode-specific one.
+ generic_help="$help"
+ help="Try \`$progname --help --mode=$opt_mode' for more information."
+ }
+
+
+ # Bail if the options were screwed
+ $exit_cmd $EXIT_FAILURE
+}
+
+
+
+
+## ----------- ##
+## Main. ##
+## ----------- ##
+
+# func_lalib_p file
+# True iff FILE is a libtool `.la' library or `.lo' object file.
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_lalib_p ()
+{
+ test -f "$1" &&
+ $SED -e 4q "$1" 2>/dev/null \
+ | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
+}
+
+# func_lalib_unsafe_p file
+# True iff FILE is a libtool `.la' library or `.lo' object file.
+# This function implements the same check as func_lalib_p without
+# resorting to external programs. To this end, it redirects stdin and
+# closes it afterwards, without saving the original file descriptor.
+# As a safety measure, use it only where a negative result would be
+# fatal anyway. Works if `file' does not exist.
+func_lalib_unsafe_p ()
+{
+ lalib_p=no
+ if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then
+ for lalib_p_l in 1 2 3 4
+ do
+ read lalib_p_line
+ case "$lalib_p_line" in
+ \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;;
+ esac
+ done
+ exec 0<&5 5<&-
+ fi
+ test "$lalib_p" = yes
+}
+
+# func_ltwrapper_script_p file
+# True iff FILE is a libtool wrapper script
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_script_p ()
+{
+ func_lalib_p "$1"
+}
+
+# func_ltwrapper_executable_p file
+# True iff FILE is a libtool wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_executable_p ()
+{
+ func_ltwrapper_exec_suffix=
+ case $1 in
+ *.exe) ;;
+ *) func_ltwrapper_exec_suffix=.exe ;;
+ esac
+ $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
+}
+
+# func_ltwrapper_scriptname file
+# Assumes file is an ltwrapper_executable
+# uses $file to determine the appropriate filename for a
+# temporary ltwrapper_script.
+func_ltwrapper_scriptname ()
+{
+ func_dirname_and_basename "$1" "" "."
+ func_stripname '' '.exe' "$func_basename_result"
+ func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper"
+}
+
+# func_ltwrapper_p file
+# True iff FILE is a libtool wrapper script or wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_p ()
+{
+ func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1"
+}
+
+
+# func_execute_cmds commands fail_cmd
+# Execute tilde-delimited COMMANDS.
+# If FAIL_CMD is given, eval that upon failure.
+# FAIL_CMD may read-access the current command in variable CMD!
+func_execute_cmds ()
+{
+ $opt_debug
+ save_ifs=$IFS; IFS='~'
+ for cmd in $1; do
+ IFS=$save_ifs
+ eval cmd=\"$cmd\"
+ func_show_eval "$cmd" "${2-:}"
+ done
+ IFS=$save_ifs
+}
+
+
+# func_source file
+# Source FILE, adding directory component if necessary.
+# Note that it is not necessary on cygwin/mingw to append a dot to
+# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
+# behavior happens only for exec(3), not for open(2)! Also, sourcing
+# `FILE.' does not work on cygwin managed mounts.
+func_source ()
+{
+ $opt_debug
+ case $1 in
+ */* | *\\*) . "$1" ;;
+ *) . "./$1" ;;
+ esac
+}
+
+
+# func_resolve_sysroot PATH
+# Replace a leading = in PATH with a sysroot. Store the result into
+# func_resolve_sysroot_result
+func_resolve_sysroot ()
+{
+ func_resolve_sysroot_result=$1
+ case $func_resolve_sysroot_result in
+ =*)
+ func_stripname '=' '' "$func_resolve_sysroot_result"
+ func_resolve_sysroot_result=$lt_sysroot$func_stripname_result
+ ;;
+ esac
+}
+
+# func_replace_sysroot PATH
+# If PATH begins with the sysroot, replace it with = and
+# store the result into func_replace_sysroot_result.
+func_replace_sysroot ()
+{
+ case "$lt_sysroot:$1" in
+ ?*:"$lt_sysroot"*)
+ func_stripname "$lt_sysroot" '' "$1"
+ func_replace_sysroot_result="=$func_stripname_result"
+ ;;
+ *)
+ # Including no sysroot.
+ func_replace_sysroot_result=$1
+ ;;
+ esac
+}
+
+# func_infer_tag arg
+# Infer tagged configuration to use if any are available and
+# if one wasn't chosen via the "--tag" command line option.
+# Only attempt this if the compiler in the base compile
+# command doesn't match the default compiler.
+# arg is usually of the form 'gcc ...'
+func_infer_tag ()
+{
+ $opt_debug
+ if test -n "$available_tags" && test -z "$tagname"; then
+ CC_quoted=
+ for arg in $CC; do
+ func_append_quoted CC_quoted "$arg"
+ done
+ CC_expanded=`func_echo_all $CC`
+ CC_quoted_expanded=`func_echo_all $CC_quoted`
+ case $@ in
+ # Blanks in the command may have been stripped by the calling shell,
+ # but not from the CC environment variable when configure was run.
+ " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;;
+ # Blanks at the start of $base_compile will cause this to fail
+ # if we don't check for them as well.
+ *)
+ for z in $available_tags; do
+ if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
+ # Evaluate the configuration.
+ eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
+ CC_quoted=
+ for arg in $CC; do
+ # Double-quote args containing other shell metacharacters.
+ func_append_quoted CC_quoted "$arg"
+ done
+ CC_expanded=`func_echo_all $CC`
+ CC_quoted_expanded=`func_echo_all $CC_quoted`
+ case "$@ " in
+ " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*)
+ # The compiler in the base compile command matches
+ # the one in the tagged configuration.
+ # Assume this is the tagged configuration we want.
+ tagname=$z
+ break
+ ;;
+ esac
+ fi
+ done
+ # If $tagname still isn't set, then no tagged configuration
+ # was found and let the user know that the "--tag" command
+ # line option must be used.
+ if test -z "$tagname"; then
+ func_echo "unable to infer tagged configuration"
+ func_fatal_error "specify a tag with \`--tag'"
+# else
+# func_verbose "using $tagname tagged configuration"
+ fi
+ ;;
+ esac
+ fi
+}
+
+
+
+# func_write_libtool_object output_name pic_name nonpic_name
+# Create a libtool object file (analogous to a ".la" file),
+# but don't create it if we're doing a dry run.
+func_write_libtool_object ()
+{
+ write_libobj=${1}
+ if test "$build_libtool_libs" = yes; then
+ write_lobj=\'${2}\'
+ else
+ write_lobj=none
+ fi
+
+ if test "$build_old_libs" = yes; then
+ write_oldobj=\'${3}\'
+ else
+ write_oldobj=none
+ fi
+
+ $opt_dry_run || {
+ cat >${write_libobj}T <<EOF
+# $write_libobj - a libtool object file
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object=$write_lobj
+
+# Name of the non-PIC object
+non_pic_object=$write_oldobj
+
+EOF
+ $MV "${write_libobj}T" "${write_libobj}"
+ }
+}
+
+
+##################################################
+# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS #
+##################################################
+
+# func_convert_core_file_wine_to_w32 ARG
+# Helper function used by file name conversion functions when $build is *nix,
+# and $host is mingw, cygwin, or some other w32 environment. Relies on a
+# correctly configured wine environment available, with the winepath program
+# in $build's $PATH.
+#
+# ARG is the $build file name to be converted to w32 format.
+# Result is available in $func_convert_core_file_wine_to_w32_result, and will
+# be empty on error (or when ARG is empty)
+func_convert_core_file_wine_to_w32 ()
+{
+ $opt_debug
+ func_convert_core_file_wine_to_w32_result="$1"
+ if test -n "$1"; then
+ # Unfortunately, winepath does not exit with a non-zero error code, so we
+ # are forced to check the contents of stdout. On the other hand, if the
+ # command is not found, the shell will set an exit code of 127 and print
+ # *an error message* to stdout. So we must check for both error code of
+ # zero AND non-empty stdout, which explains the odd construction:
+ func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null`
+ if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then
+ func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" |
+ $SED -e "$lt_sed_naive_backslashify"`
+ else
+ func_convert_core_file_wine_to_w32_result=
+ fi
+ fi
+}
+# end: func_convert_core_file_wine_to_w32
+
+
+# func_convert_core_path_wine_to_w32 ARG
+# Helper function used by path conversion functions when $build is *nix, and
+# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly
+# configured wine environment available, with the winepath program in $build's
+# $PATH. Assumes ARG has no leading or trailing path separator characters.
+#
+# ARG is path to be converted from $build format to win32.
+# Result is available in $func_convert_core_path_wine_to_w32_result.
+# Unconvertible file (directory) names in ARG are skipped; if no directory names
+# are convertible, then the result may be empty.
+func_convert_core_path_wine_to_w32 ()
+{
+ $opt_debug
+ # unfortunately, winepath doesn't convert paths, only file names
+ func_convert_core_path_wine_to_w32_result=""
+ if test -n "$1"; then
+ oldIFS=$IFS
+ IFS=:
+ for func_convert_core_path_wine_to_w32_f in $1; do
+ IFS=$oldIFS
+ func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f"
+ if test -n "$func_convert_core_file_wine_to_w32_result" ; then
+ if test -z "$func_convert_core_path_wine_to_w32_result"; then
+ func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result"
+ else
+ func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result"
+ fi
+ fi
+ done
+ IFS=$oldIFS
+ fi
+}
+# end: func_convert_core_path_wine_to_w32
+
+
+# func_cygpath ARGS...
+# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when
+# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2)
+# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or
+# (2), returns the Cygwin file name or path in func_cygpath_result (input
+# file name or path is assumed to be in w32 format, as previously converted
+# from $build's *nix or MSYS format). In case (3), returns the w32 file name
+# or path in func_cygpath_result (input file name or path is assumed to be in
+# Cygwin format). Returns an empty string on error.
+#
+# ARGS are passed to cygpath, with the last one being the file name or path to
+# be converted.
+#
+# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH
+# environment variable; do not put it in $PATH.
+func_cygpath ()
+{
+ $opt_debug
+ if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then
+ func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null`
+ if test "$?" -ne 0; then
+ # on failure, ensure result is empty
+ func_cygpath_result=
+ fi
+ else
+ func_cygpath_result=
+ func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'"
+ fi
+}
+#end: func_cygpath
+
+
+# func_convert_core_msys_to_w32 ARG
+# Convert file name or path ARG from MSYS format to w32 format. Return
+# result in func_convert_core_msys_to_w32_result.
+func_convert_core_msys_to_w32 ()
+{
+ $opt_debug
+ # awkward: cmd appends spaces to result
+ func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null |
+ $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"`
+}
+#end: func_convert_core_msys_to_w32
+
+
+# func_convert_file_check ARG1 ARG2
+# Verify that ARG1 (a file name in $build format) was converted to $host
+# format in ARG2. Otherwise, emit an error message, but continue (resetting
+# func_to_host_file_result to ARG1).
+func_convert_file_check ()
+{
+ $opt_debug
+ if test -z "$2" && test -n "$1" ; then
+ func_error "Could not determine host file name corresponding to"
+ func_error " \`$1'"
+ func_error "Continuing, but uninstalled executables may not work."
+ # Fallback:
+ func_to_host_file_result="$1"
+ fi
+}
+# end func_convert_file_check
+
+
+# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH
+# Verify that FROM_PATH (a path in $build format) was converted to $host
+# format in TO_PATH. Otherwise, emit an error message, but continue, resetting
+# func_to_host_file_result to a simplistic fallback value (see below).
+func_convert_path_check ()
+{
+ $opt_debug
+ if test -z "$4" && test -n "$3"; then
+ func_error "Could not determine the host path corresponding to"
+ func_error " \`$3'"
+ func_error "Continuing, but uninstalled executables may not work."
+ # Fallback. This is a deliberately simplistic "conversion" and
+ # should not be "improved". See libtool.info.
+ if test "x$1" != "x$2"; then
+ lt_replace_pathsep_chars="s|$1|$2|g"
+ func_to_host_path_result=`echo "$3" |
+ $SED -e "$lt_replace_pathsep_chars"`
+ else
+ func_to_host_path_result="$3"
+ fi
+ fi
+}
+# end func_convert_path_check
+
+
+# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG
+# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT
+# and appending REPL if ORIG matches BACKPAT.
+func_convert_path_front_back_pathsep ()
+{
+ $opt_debug
+ case $4 in
+ $1 ) func_to_host_path_result="$3$func_to_host_path_result"
+ ;;
+ esac
+ case $4 in
+ $2 ) func_append func_to_host_path_result "$3"
+ ;;
+ esac
+}
+# end func_convert_path_front_back_pathsep
+
+
+##################################################
+# $build to $host FILE NAME CONVERSION FUNCTIONS #
+##################################################
+# invoked via `$to_host_file_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# Result will be available in $func_to_host_file_result.
+
+
+# func_to_host_file ARG
+# Converts the file name ARG from $build format to $host format. Return result
+# in func_to_host_file_result.
+func_to_host_file ()
+{
+ $opt_debug
+ $to_host_file_cmd "$1"
+}
+# end func_to_host_file
+
+
+# func_to_tool_file ARG LAZY
+# converts the file name ARG from $build format to toolchain format. Return
+# result in func_to_tool_file_result. If the conversion in use is listed
+# in (the comma separated) LAZY, no conversion takes place.
+func_to_tool_file ()
+{
+ $opt_debug
+ case ,$2, in
+ *,"$to_tool_file_cmd",*)
+ func_to_tool_file_result=$1
+ ;;
+ *)
+ $to_tool_file_cmd "$1"
+ func_to_tool_file_result=$func_to_host_file_result
+ ;;
+ esac
+}
+# end func_to_tool_file
+
+
+# func_convert_file_noop ARG
+# Copy ARG to func_to_host_file_result.
+func_convert_file_noop ()
+{
+ func_to_host_file_result="$1"
+}
+# end func_convert_file_noop
+
+
+# func_convert_file_msys_to_w32 ARG
+# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper. Returns result in
+# func_to_host_file_result.
+func_convert_file_msys_to_w32 ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ func_convert_core_msys_to_w32 "$1"
+ func_to_host_file_result="$func_convert_core_msys_to_w32_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_w32
+
+
+# func_convert_file_cygwin_to_w32 ARG
+# Convert file name ARG from Cygwin to w32 format. Returns result in
+# func_to_host_file_result.
+func_convert_file_cygwin_to_w32 ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ # because $build is cygwin, we call "the" cygpath in $PATH; no need to use
+ # LT_CYGPATH in this case.
+ func_to_host_file_result=`cygpath -m "$1"`
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_cygwin_to_w32
+
+
+# func_convert_file_nix_to_w32 ARG
+# Convert file name ARG from *nix to w32 format. Requires a wine environment
+# and a working winepath. Returns result in func_to_host_file_result.
+func_convert_file_nix_to_w32 ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ func_convert_core_file_wine_to_w32 "$1"
+ func_to_host_file_result="$func_convert_core_file_wine_to_w32_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_w32
+
+
+# func_convert_file_msys_to_cygwin ARG
+# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_file_msys_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ func_convert_core_msys_to_w32 "$1"
+ func_cygpath -u "$func_convert_core_msys_to_w32_result"
+ func_to_host_file_result="$func_cygpath_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_cygwin
+
+
+# func_convert_file_nix_to_cygwin ARG
+# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed
+# in a wine environment, working winepath, and LT_CYGPATH set. Returns result
+# in func_to_host_file_result.
+func_convert_file_nix_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ # convert from *nix to w32, then use cygpath to convert from w32 to cygwin.
+ func_convert_core_file_wine_to_w32 "$1"
+ func_cygpath -u "$func_convert_core_file_wine_to_w32_result"
+ func_to_host_file_result="$func_cygpath_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_cygwin
+
+
+#############################################
+# $build to $host PATH CONVERSION FUNCTIONS #
+#############################################
+# invoked via `$to_host_path_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# The result will be available in $func_to_host_path_result.
+#
+# Path separators are also converted from $build format to $host format. If
+# ARG begins or ends with a path separator character, it is preserved (but
+# converted to $host format) on output.
+#
+# All path conversion functions are named using the following convention:
+# file name conversion function : func_convert_file_X_to_Y ()
+# path conversion function : func_convert_path_X_to_Y ()
+# where, for any given $build/$host combination the 'X_to_Y' value is the
+# same. If conversion functions are added for new $build/$host combinations,
+# the two new functions must follow this pattern, or func_init_to_host_path_cmd
+# will break.
+
+
+# func_init_to_host_path_cmd
+# Ensures that function "pointer" variable $to_host_path_cmd is set to the
+# appropriate value, based on the value of $to_host_file_cmd.
+to_host_path_cmd=
+func_init_to_host_path_cmd ()
+{
+ $opt_debug
+ if test -z "$to_host_path_cmd"; then
+ func_stripname 'func_convert_file_' '' "$to_host_file_cmd"
+ to_host_path_cmd="func_convert_path_${func_stripname_result}"
+ fi
+}
+
+
+# func_to_host_path ARG
+# Converts the path ARG from $build format to $host format. Return result
+# in func_to_host_path_result.
+func_to_host_path ()
+{
+ $opt_debug
+ func_init_to_host_path_cmd
+ $to_host_path_cmd "$1"
+}
+# end func_to_host_path
+
+
+# func_convert_path_noop ARG
+# Copy ARG to func_to_host_path_result.
+func_convert_path_noop ()
+{
+ func_to_host_path_result="$1"
+}
+# end func_convert_path_noop
+
+
+# func_convert_path_msys_to_w32 ARG
+# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper. Returns result in
+# func_to_host_path_result.
+func_convert_path_msys_to_w32 ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # Remove leading and trailing path separator characters from ARG. MSYS
+ # behavior is inconsistent here; cygpath turns them into '.;' and ';.';
+ # and winepath ignores them completely.
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+ func_to_host_path_result="$func_convert_core_msys_to_w32_result"
+ func_convert_path_check : ";" \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+ fi
+}
+# end func_convert_path_msys_to_w32
+
+
+# func_convert_path_cygwin_to_w32 ARG
+# Convert path ARG from Cygwin to w32 format. Returns result in
+# func_to_host_file_result.
+func_convert_path_cygwin_to_w32 ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # See func_convert_path_msys_to_w32:
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"`
+ func_convert_path_check : ";" \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+ fi
+}
+# end func_convert_path_cygwin_to_w32
+
+
+# func_convert_path_nix_to_w32 ARG
+# Convert path ARG from *nix to w32 format. Requires a wine environment and
+# a working winepath. Returns result in func_to_host_file_result.
+func_convert_path_nix_to_w32 ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # See func_convert_path_msys_to_w32:
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+ func_to_host_path_result="$func_convert_core_path_wine_to_w32_result"
+ func_convert_path_check : ";" \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+ fi
+}
+# end func_convert_path_nix_to_w32
+
+
+# func_convert_path_msys_to_cygwin ARG
+# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_path_msys_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # See func_convert_path_msys_to_w32:
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+ func_cygpath -u -p "$func_convert_core_msys_to_w32_result"
+ func_to_host_path_result="$func_cygpath_result"
+ func_convert_path_check : : \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+ fi
+}
+# end func_convert_path_msys_to_cygwin
+
+
+# func_convert_path_nix_to_cygwin ARG
+# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a
+# a wine environment, working winepath, and LT_CYGPATH set. Returns result in
+# func_to_host_file_result.
+func_convert_path_nix_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # Remove leading and trailing path separator characters from
+ # ARG. msys behavior is inconsistent here, cygpath turns them
+ # into '.;' and ';.', and winepath ignores them completely.
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+ func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result"
+ func_to_host_path_result="$func_cygpath_result"
+ func_convert_path_check : : \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+ fi
+}
+# end func_convert_path_nix_to_cygwin
+
+
+# func_mode_compile arg...
+func_mode_compile ()
+{
+ $opt_debug
+ # Get the compilation command and the source file.
+ base_compile=
+ srcfile="$nonopt" # always keep a non-empty value in "srcfile"
+ suppress_opt=yes
+ suppress_output=
+ arg_mode=normal
+ libobj=
+ later=
+ pie_flag=
+
+ for arg
+ do
+ case $arg_mode in
+ arg )
+ # do not "continue". Instead, add this to base_compile
+ lastarg="$arg"
+ arg_mode=normal
+ ;;
+
+ target )
+ libobj="$arg"
+ arg_mode=normal
+ continue
+ ;;
+
+ normal )
+ # Accept any command-line options.
+ case $arg in
+ -o)
+ test -n "$libobj" && \
+ func_fatal_error "you cannot specify \`-o' more than once"
+ arg_mode=target
+ continue
+ ;;
+
+ -pie | -fpie | -fPIE)
+ func_append pie_flag " $arg"
+ continue
+ ;;
+
+ -shared | -static | -prefer-pic | -prefer-non-pic)
+ func_append later " $arg"
+ continue
+ ;;
+
+ -no-suppress)
+ suppress_opt=no
+ continue
+ ;;
+
+ -Xcompiler)
+ arg_mode=arg # the next one goes into the "base_compile" arg list
+ continue # The current "srcfile" will either be retained or
+ ;; # replaced later. I would guess that would be a bug.
+
+ -Wc,*)
+ func_stripname '-Wc,' '' "$arg"
+ args=$func_stripname_result
+ lastarg=
+ save_ifs="$IFS"; IFS=','
+ for arg in $args; do
+ IFS="$save_ifs"
+ func_append_quoted lastarg "$arg"
+ done
+ IFS="$save_ifs"
+ func_stripname ' ' '' "$lastarg"
+ lastarg=$func_stripname_result
+
+ # Add the arguments to base_compile.
+ func_append base_compile " $lastarg"
+ continue
+ ;;
+
+ *)
+ # Accept the current argument as the source file.
+ # The previous "srcfile" becomes the current argument.
+ #
+ lastarg="$srcfile"
+ srcfile="$arg"
+ ;;
+ esac # case $arg
+ ;;
+ esac # case $arg_mode
+
+ # Aesthetically quote the previous argument.
+ func_append_quoted base_compile "$lastarg"
+ done # for arg
+
+ case $arg_mode in
+ arg)
+ func_fatal_error "you must specify an argument for -Xcompile"
+ ;;
+ target)
+ func_fatal_error "you must specify a target with \`-o'"
+ ;;
+ *)
+ # Get the name of the library object.
+ test -z "$libobj" && {
+ func_basename "$srcfile"
+ libobj="$func_basename_result"
+ }
+ ;;
+ esac
+
+ # Recognize several different file suffixes.
+ # If the user specifies -o file.o, it is replaced with file.lo
+ case $libobj in
+ *.[cCFSifmso] | \
+ *.ada | *.adb | *.ads | *.asm | \
+ *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
+ *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup)
+ func_xform "$libobj"
+ libobj=$func_xform_result
+ ;;
+ esac
+
+ case $libobj in
+ *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;;
+ *)
+ func_fatal_error "cannot determine name of library object from \`$libobj'"
+ ;;
+ esac
+
+ func_infer_tag $base_compile
+
+ for arg in $later; do
+ case $arg in
+ -shared)
+ test "$build_libtool_libs" != yes && \
+ func_fatal_configuration "can not build a shared library"
+ build_old_libs=no
+ continue
+ ;;
+
+ -static)
+ build_libtool_libs=no
+ build_old_libs=yes
+ continue
+ ;;
+
+ -prefer-pic)
+ pic_mode=yes
+ continue
+ ;;
+
+ -prefer-non-pic)
+ pic_mode=no
+ continue
+ ;;
+ esac
+ done
+
+ func_quote_for_eval "$libobj"
+ test "X$libobj" != "X$func_quote_for_eval_result" \
+ && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \
+ && func_warning "libobj name \`$libobj' may not contain shell special characters."
+ func_dirname_and_basename "$obj" "/" ""
+ objname="$func_basename_result"
+ xdir="$func_dirname_result"
+ lobj=${xdir}$objdir/$objname
+
+ test -z "$base_compile" && \
+ func_fatal_help "you must specify a compilation command"
+
+ # Delete any leftover library objects.
+ if test "$build_old_libs" = yes; then
+ removelist="$obj $lobj $libobj ${libobj}T"
+ else
+ removelist="$lobj $libobj ${libobj}T"
+ fi
+
+ # On Cygwin there's no "real" PIC flag so we must build both object types
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2* | cegcc*)
+ pic_mode=default
+ ;;
+ esac
+ if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
+ # non-PIC code in shared libraries is not supported
+ pic_mode=default
+ fi
+
+ # Calculate the filename of the output object if compiler does
+ # not support -o with -c
+ if test "$compiler_c_o" = no; then
+ output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext}
+ lockfile="$output_obj.lock"
+ else
+ output_obj=
+ need_locks=no
+ lockfile=
+ fi
+
+ # Lock this critical section if it is needed
+ # We use this script file to make the link, it avoids creating a new file
+ if test "$need_locks" = yes; then
+ until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+ func_echo "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ elif test "$need_locks" = warn; then
+ if test -f "$lockfile"; then
+ $ECHO "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $opt_dry_run || $RM $removelist
+ exit $EXIT_FAILURE
+ fi
+ func_append removelist " $output_obj"
+ $ECHO "$srcfile" > "$lockfile"
+ fi
+
+ $opt_dry_run || $RM $removelist
+ func_append removelist " $lockfile"
+ trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
+
+ func_to_tool_file "$srcfile" func_convert_file_msys_to_w32
+ srcfile=$func_to_tool_file_result
+ func_quote_for_eval "$srcfile"
+ qsrcfile=$func_quote_for_eval_result
+
+ # Only build a PIC object if we are building libtool libraries.
+ if test "$build_libtool_libs" = yes; then
+ # Without this assignment, base_compile gets emptied.
+ fbsd_hideous_sh_bug=$base_compile
+
+ if test "$pic_mode" != no; then
+ command="$base_compile $qsrcfile $pic_flag"
+ else
+ # Don't build PIC code
+ command="$base_compile $qsrcfile"
+ fi
+
+ func_mkdir_p "$xdir$objdir"
+
+ if test -z "$output_obj"; then
+ # Place PIC objects in $objdir
+ func_append command " -o $lobj"
+ fi
+
+ func_show_eval_locale "$command" \
+ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'
+
+ if test "$need_locks" = warn &&
+ test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+ $ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $opt_dry_run || $RM $removelist
+ exit $EXIT_FAILURE
+ fi
+
+ # Just move the object if needed, then go on to compile the next one
+ if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
+ func_show_eval '$MV "$output_obj" "$lobj"' \
+ 'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+ fi
+
+ # Allow error messages only from the first compilation.
+ if test "$suppress_opt" = yes; then
+ suppress_output=' >/dev/null 2>&1'
+ fi
+ fi
+
+ # Only build a position-dependent object if we build old libraries.
+ if test "$build_old_libs" = yes; then
+ if test "$pic_mode" != yes; then
+ # Don't build PIC code
+ command="$base_compile $qsrcfile$pie_flag"
+ else
+ command="$base_compile $qsrcfile $pic_flag"
+ fi
+ if test "$compiler_c_o" = yes; then
+ func_append command " -o $obj"
+ fi
+
+ # Suppress compiler output if we already did a PIC compilation.
+ func_append command "$suppress_output"
+ func_show_eval_locale "$command" \
+ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
+
+ if test "$need_locks" = warn &&
+ test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+ $ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $opt_dry_run || $RM $removelist
+ exit $EXIT_FAILURE
+ fi
+
+ # Just move the object if needed
+ if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
+ func_show_eval '$MV "$output_obj" "$obj"' \
+ 'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+ fi
+ fi
+
+ $opt_dry_run || {
+ func_write_libtool_object "$libobj" "$objdir/$objname" "$objname"
+
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ removelist=$lockfile
+ $RM "$lockfile"
+ fi
+ }
+
+ exit $EXIT_SUCCESS
+}
+
+$opt_help || {
+ test "$opt_mode" = compile && func_mode_compile ${1+"$@"}
+}
+
+func_mode_help ()
+{
+ # We need to display help for each of the modes.
+ case $opt_mode in
+ "")
+ # Generic help is extracted from the usage comments
+ # at the start of this file.
+ func_help
+ ;;
+
+ clean)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+ compile)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+ -o OUTPUT-FILE set the output file name to OUTPUT-FILE
+ -no-suppress do not suppress compiler output for multiple passes
+ -prefer-pic try to build PIC objects only
+ -prefer-non-pic try to build non-PIC objects only
+ -shared do not build a \`.o' file suitable for static linking
+ -static only build a \`.o' file suitable for static linking
+ -Wc,FLAG pass FLAG directly to the compiler
+
+COMPILE-COMMAND is a command to be used in creating a \`standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix \`.c' with the
+library object suffix, \`.lo'."
+ ;;
+
+ execute)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+ -dlopen FILE add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to \`-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+ ;;
+
+ finish)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges. Use
+the \`--dry-run' option if you just want to see what would be executed."
+ ;;
+
+ install)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command. The first component should be
+either the \`install' or \`cp' program.
+
+The following components of INSTALL-COMMAND are treated specially:
+
+ -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+ ;;
+
+ link)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+ -all-static do not do any dynamic linking at all
+ -avoid-version do not add a version suffix if possible
+ -bindir BINDIR specify path to binaries directory (for systems where
+ libraries must be found in the PATH setting at runtime)
+ -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
+ -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
+ -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+ -export-symbols SYMFILE
+ try to export only the symbols listed in SYMFILE
+ -export-symbols-regex REGEX
+ try to export only the symbols matching REGEX
+ -LLIBDIR search LIBDIR for required installed libraries
+ -lNAME OUTPUT-FILE requires the installed library libNAME
+ -module build a library that can dlopened
+ -no-fast-install disable the fast-install mode
+ -no-install link a not-installable executable
+ -no-undefined declare that a library does not refer to external symbols
+ -o OUTPUT-FILE create OUTPUT-FILE from the specified objects
+ -objectlist FILE Use a list of object files found in FILE to specify objects
+ -precious-files-regex REGEX
+ don't remove output files matching REGEX
+ -release RELEASE specify package release information
+ -rpath LIBDIR the created library will eventually be installed in LIBDIR
+ -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
+ -shared only do dynamic linking of libtool libraries
+ -shrext SUFFIX override the standard shared library file extension
+ -static do not do any dynamic linking of uninstalled libtool libraries
+ -static-libtool-libs
+ do not do any dynamic linking of libtool libraries
+ -version-info CURRENT[:REVISION[:AGE]]
+ specify library version info [each variable defaults to 0]
+ -weak LIBNAME declare that the target provides the LIBNAME interface
+ -Wc,FLAG
+ -Xcompiler FLAG pass linker-specific FLAG directly to the compiler
+ -Wl,FLAG
+ -Xlinker FLAG pass linker-specific FLAG directly to the linker
+ -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC)
+
+All other options (arguments beginning with \`-') are ignored.
+
+Every other argument is treated as a filename. Files ending in \`.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
+only library objects (\`.lo' files) may be specified, and \`-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
+using \`ar' and \`ranlib', or on Windows using \`lib'.
+
+If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
+is created, otherwise an executable program is created."
+ ;;
+
+ uninstall)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+ *)
+ func_fatal_help "invalid operation mode \`$opt_mode'"
+ ;;
+ esac
+
+ echo
+ $ECHO "Try \`$progname --help' for more information about other modes."
+}
+
+# Now that we've collected a possible --mode arg, show help if necessary
+if $opt_help; then
+ if test "$opt_help" = :; then
+ func_mode_help
+ else
+ {
+ func_help noexit
+ for opt_mode in compile link execute install finish uninstall clean; do
+ func_mode_help
+ done
+ } | sed -n '1p; 2,$s/^Usage:/ or: /p'
+ {
+ func_help noexit
+ for opt_mode in compile link execute install finish uninstall clean; do
+ echo
+ func_mode_help
+ done
+ } |
+ sed '1d
+ /^When reporting/,/^Report/{
+ H
+ d
+ }
+ $x
+ /information about other modes/d
+ /more detailed .*MODE/d
+ s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/'
+ fi
+ exit $?
+fi
+
+
+# func_mode_execute arg...
+func_mode_execute ()
+{
+ $opt_debug
+ # The first argument is the command name.
+ cmd="$nonopt"
+ test -z "$cmd" && \
+ func_fatal_help "you must specify a COMMAND"
+
+ # Handle -dlopen flags immediately.
+ for file in $opt_dlopen; do
+ test -f "$file" \
+ || func_fatal_help "\`$file' is not a file"
+
+ dir=
+ case $file in
+ *.la)
+ func_resolve_sysroot "$file"
+ file=$func_resolve_sysroot_result
+
+ # Check to see that this really is a libtool archive.
+ func_lalib_unsafe_p "$file" \
+ || func_fatal_help "\`$lib' is not a valid libtool archive"
+
+ # Read the libtool library.
+ dlname=
+ library_names=
+ func_source "$file"
+
+ # Skip this library if it cannot be dlopened.
+ if test -z "$dlname"; then
+ # Warn if it was a shared library.
+ test -n "$library_names" && \
+ func_warning "\`$file' was not linked with \`-export-dynamic'"
+ continue
+ fi
+
+ func_dirname "$file" "" "."
+ dir="$func_dirname_result"
+
+ if test -f "$dir/$objdir/$dlname"; then
+ func_append dir "/$objdir"
+ else
+ if test ! -f "$dir/$dlname"; then
+ func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'"
+ fi
+ fi
+ ;;
+
+ *.lo)
+ # Just add the directory containing the .lo file.
+ func_dirname "$file" "" "."
+ dir="$func_dirname_result"
+ ;;
+
+ *)
+ func_warning "\`-dlopen' is ignored for non-libtool libraries and objects"
+ continue
+ ;;
+ esac
+
+ # Get the absolute pathname.
+ absdir=`cd "$dir" && pwd`
+ test -n "$absdir" && dir="$absdir"
+
+ # Now add the directory to shlibpath_var.
+ if eval "test -z \"\$$shlibpath_var\""; then
+ eval "$shlibpath_var=\"\$dir\""
+ else
+ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+ fi
+ done
+
+ # This variable tells wrapper scripts just to set shlibpath_var
+ # rather than running their programs.
+ libtool_execute_magic="$magic"
+
+ # Check if any of the arguments is a wrapper script.
+ args=
+ for file
+ do
+ case $file in
+ -* | *.la | *.lo ) ;;
+ *)
+ # Do a test to see if this is really a libtool program.
+ if func_ltwrapper_script_p "$file"; then
+ func_source "$file"
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ elif func_ltwrapper_executable_p "$file"; then
+ func_ltwrapper_scriptname "$file"
+ func_source "$func_ltwrapper_scriptname_result"
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ fi
+ ;;
+ esac
+ # Quote arguments (to preserve shell metacharacters).
+ func_append_quoted args "$file"
+ done
+
+ if test "X$opt_dry_run" = Xfalse; then
+ if test -n "$shlibpath_var"; then
+ # Export the shlibpath_var.
+ eval "export $shlibpath_var"
+ fi
+
+ # Restore saved environment variables
+ for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+ do
+ eval "if test \"\${save_$lt_var+set}\" = set; then
+ $lt_var=\$save_$lt_var; export $lt_var
+ else
+ $lt_unset $lt_var
+ fi"
+ done
+
+ # Now prepare to actually exec the command.
+ exec_cmd="\$cmd$args"
+ else
+ # Display what would be done.
+ if test -n "$shlibpath_var"; then
+ eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
+ echo "export $shlibpath_var"
+ fi
+ $ECHO "$cmd$args"
+ exit $EXIT_SUCCESS
+ fi
+}
+
+test "$opt_mode" = execute && func_mode_execute ${1+"$@"}
+
+
+# func_mode_finish arg...
+func_mode_finish ()
+{
+ $opt_debug
+ libs=
+ libdirs=
+ admincmds=
+
+ for opt in "$nonopt" ${1+"$@"}
+ do
+ if test -d "$opt"; then
+ func_append libdirs " $opt"
+
+ elif test -f "$opt"; then
+ if func_lalib_unsafe_p "$opt"; then
+ func_append libs " $opt"
+ else
+ func_warning "\`$opt' is not a valid libtool archive"
+ fi
+
+ else
+ func_fatal_error "invalid argument \`$opt'"
+ fi
+ done
+
+ if test -n "$libs"; then
+ if test -n "$lt_sysroot"; then
+ sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"`
+ sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;"
+ else
+ sysroot_cmd=
+ fi
+
+ # Remove sysroot references
+ if $opt_dry_run; then
+ for lib in $libs; do
+ echo "removing references to $lt_sysroot and \`=' prefixes from $lib"
+ done
+ else
+ tmpdir=`func_mktempdir`
+ for lib in $libs; do
+ sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \
+ > $tmpdir/tmp-la
+ mv -f $tmpdir/tmp-la $lib
+ done
+ ${RM}r "$tmpdir"
+ fi
+ fi
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ for libdir in $libdirs; do
+ if test -n "$finish_cmds"; then
+ # Do each command in the finish commands.
+ func_execute_cmds "$finish_cmds" 'admincmds="$admincmds
+'"$cmd"'"'
+ fi
+ if test -n "$finish_eval"; then
+ # Do the single finish_eval.
+ eval cmds=\"$finish_eval\"
+ $opt_dry_run || eval "$cmds" || func_append admincmds "
+ $cmds"
+ fi
+ done
+ fi
+
+ # Exit here if they wanted silent mode.
+ $opt_silent && exit $EXIT_SUCCESS
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ echo "----------------------------------------------------------------------"
+ echo "Libraries have been installed in:"
+ for libdir in $libdirs; do
+ $ECHO " $libdir"
+ done
+ echo
+ echo "If you ever happen to want to link against installed libraries"
+ echo "in a given directory, LIBDIR, you must either use libtool, and"
+ echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
+ echo "flag during linking and do at least one of the following:"
+ if test -n "$shlibpath_var"; then
+ echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
+ echo " during execution"
+ fi
+ if test -n "$runpath_var"; then
+ echo " - add LIBDIR to the \`$runpath_var' environment variable"
+ echo " during linking"
+ fi
+ if test -n "$hardcode_libdir_flag_spec"; then
+ libdir=LIBDIR
+ eval flag=\"$hardcode_libdir_flag_spec\"
+
+ $ECHO " - use the \`$flag' linker flag"
+ fi
+ if test -n "$admincmds"; then
+ $ECHO " - have your system administrator run these commands:$admincmds"
+ fi
+ if test -f /etc/ld.so.conf; then
+ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+ fi
+ echo
+
+ echo "See any operating system documentation about shared libraries for"
+ case $host in
+ solaris2.[6789]|solaris2.1[0-9])
+ echo "more information, such as the ld(1), crle(1) and ld.so(8) manual"
+ echo "pages."
+ ;;
+ *)
+ echo "more information, such as the ld(1) and ld.so(8) manual pages."
+ ;;
+ esac
+ echo "----------------------------------------------------------------------"
+ fi
+ exit $EXIT_SUCCESS
+}
+
+test "$opt_mode" = finish && func_mode_finish ${1+"$@"}
+
+
+# func_mode_install arg...
+func_mode_install ()
+{
+ $opt_debug
+ # There may be an optional sh(1) argument at the beginning of
+ # install_prog (especially on Windows NT).
+ if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
+ # Allow the use of GNU shtool's install command.
+ case $nonopt in *shtool*) :;; *) false;; esac; then
+ # Aesthetically quote it.
+ func_quote_for_eval "$nonopt"
+ install_prog="$func_quote_for_eval_result "
+ arg=$1
+ shift
+ else
+ install_prog=
+ arg=$nonopt
+ fi
+
+ # The real first argument should be the name of the installation program.
+ # Aesthetically quote it.
+ func_quote_for_eval "$arg"
+ func_append install_prog "$func_quote_for_eval_result"
+ install_shared_prog=$install_prog
+ case " $install_prog " in
+ *[\\\ /]cp\ *) install_cp=: ;;
+ *) install_cp=false ;;
+ esac
+
+ # We need to accept at least all the BSD install flags.
+ dest=
+ files=
+ opts=
+ prev=
+ install_type=
+ isdir=no
+ stripme=
+ no_mode=:
+ for arg
+ do
+ arg2=
+ if test -n "$dest"; then
+ func_append files " $dest"
+ dest=$arg
+ continue
+ fi
+
+ case $arg in
+ -d) isdir=yes ;;
+ -f)
+ if $install_cp; then :; else
+ prev=$arg
+ fi
+ ;;
+ -g | -m | -o)
+ prev=$arg
+ ;;
+ -s)
+ stripme=" -s"
+ continue
+ ;;
+ -*)
+ ;;
+ *)
+ # If the previous option needed an argument, then skip it.
+ if test -n "$prev"; then
+ if test "x$prev" = x-m && test -n "$install_override_mode"; then
+ arg2=$install_override_mode
+ no_mode=false
+ fi
+ prev=
+ else
+ dest=$arg
+ continue
+ fi
+ ;;
+ esac
+
+ # Aesthetically quote the argument.
+ func_quote_for_eval "$arg"
+ func_append install_prog " $func_quote_for_eval_result"
+ if test -n "$arg2"; then
+ func_quote_for_eval "$arg2"
+ fi
+ func_append install_shared_prog " $func_quote_for_eval_result"
+ done
+
+ test -z "$install_prog" && \
+ func_fatal_help "you must specify an install program"
+
+ test -n "$prev" && \
+ func_fatal_help "the \`$prev' option requires an argument"
+
+ if test -n "$install_override_mode" && $no_mode; then
+ if $install_cp; then :; else
+ func_quote_for_eval "$install_override_mode"
+ func_append install_shared_prog " -m $func_quote_for_eval_result"
+ fi
+ fi
+
+ if test -z "$files"; then
+ if test -z "$dest"; then
+ func_fatal_help "no file or destination specified"
+ else
+ func_fatal_help "you must specify a destination"
+ fi
+ fi
+
+ # Strip any trailing slash from the destination.
+ func_stripname '' '/' "$dest"
+ dest=$func_stripname_result
+
+ # Check to see that the destination is a directory.
+ test -d "$dest" && isdir=yes
+ if test "$isdir" = yes; then
+ destdir="$dest"
+ destname=
+ else
+ func_dirname_and_basename "$dest" "" "."
+ destdir="$func_dirname_result"
+ destname="$func_basename_result"
+
+ # Not a directory, so check to see that there is only one file specified.
+ set dummy $files; shift
+ test "$#" -gt 1 && \
+ func_fatal_help "\`$dest' is not a directory"
+ fi
+ case $destdir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ for file in $files; do
+ case $file in
+ *.lo) ;;
+ *)
+ func_fatal_help "\`$destdir' must be an absolute directory name"
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ staticlibs=
+ future_libdirs=
+ current_libdirs=
+ for file in $files; do
+
+ # Do each installation.
+ case $file in
+ *.$libext)
+ # Do the static libraries later.
+ func_append staticlibs " $file"
+ ;;
+
+ *.la)
+ func_resolve_sysroot "$file"
+ file=$func_resolve_sysroot_result
+
+ # Check to see that this really is a libtool archive.
+ func_lalib_unsafe_p "$file" \
+ || func_fatal_help "\`$file' is not a valid libtool archive"
+
+ library_names=
+ old_library=
+ relink_command=
+ func_source "$file"
+
+ # Add the libdir to current_libdirs if it is the destination.
+ if test "X$destdir" = "X$libdir"; then
+ case "$current_libdirs " in
+ *" $libdir "*) ;;
+ *) func_append current_libdirs " $libdir" ;;
+ esac
+ else
+ # Note the libdir as a future libdir.
+ case "$future_libdirs " in
+ *" $libdir "*) ;;
+ *) func_append future_libdirs " $libdir" ;;
+ esac
+ fi
+
+ func_dirname "$file" "/" ""
+ dir="$func_dirname_result"
+ func_append dir "$objdir"
+
+ if test -n "$relink_command"; then
+ # Determine the prefix the user has applied to our future dir.
+ inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"`
+
+ # Don't allow the user to place us outside of our expected
+ # location b/c this prevents finding dependent libraries that
+ # are installed to the same prefix.
+ # At present, this check doesn't affect windows .dll's that
+ # are installed into $libdir/../bin (currently, that works fine)
+ # but it's something to keep an eye on.
+ test "$inst_prefix_dir" = "$destdir" && \
+ func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir"
+
+ if test -n "$inst_prefix_dir"; then
+ # Stick the inst_prefix_dir data into the link command.
+ relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
+ else
+ relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
+ fi
+
+ func_warning "relinking \`$file'"
+ func_show_eval "$relink_command" \
+ 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"'
+ fi
+
+ # See the names of the shared library.
+ set dummy $library_names; shift
+ if test -n "$1"; then
+ realname="$1"
+ shift
+
+ srcname="$realname"
+ test -n "$relink_command" && srcname="$realname"T
+
+ # Install the shared library and build the symlinks.
+ func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \
+ 'exit $?'
+ tstripme="$stripme"
+ case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ case $realname in
+ *.dll.a)
+ tstripme=""
+ ;;
+ esac
+ ;;
+ esac
+ if test -n "$tstripme" && test -n "$striplib"; then
+ func_show_eval "$striplib $destdir/$realname" 'exit $?'
+ fi
+
+ if test "$#" -gt 0; then
+ # Delete the old symlinks, and create new ones.
+ # Try `ln -sf' first, because the `ln' binary might depend on
+ # the symlink we replace! Solaris /bin/ln does not understand -f,
+ # so we also need to try rm && ln -s.
+ for linkname
+ do
+ test "$linkname" != "$realname" \
+ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
+ done
+ fi
+
+ # Do each command in the postinstall commands.
+ lib="$destdir/$realname"
+ func_execute_cmds "$postinstall_cmds" 'exit $?'
+ fi
+
+ # Install the pseudo-library for information purposes.
+ func_basename "$file"
+ name="$func_basename_result"
+ instname="$dir/$name"i
+ func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
+
+ # Maybe install the static library, too.
+ test -n "$old_library" && func_append staticlibs " $dir/$old_library"
+ ;;
+
+ *.lo)
+ # Install (i.e. copy) a libtool object.
+
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ func_basename "$file"
+ destfile="$func_basename_result"
+ destfile="$destdir/$destfile"
+ fi
+
+ # Deduce the name of the destination old-style object file.
+ case $destfile in
+ *.lo)
+ func_lo2o "$destfile"
+ staticdest=$func_lo2o_result
+ ;;
+ *.$objext)
+ staticdest="$destfile"
+ destfile=
+ ;;
+ *)
+ func_fatal_help "cannot copy a libtool object to \`$destfile'"
+ ;;
+ esac
+
+ # Install the libtool object if requested.
+ test -n "$destfile" && \
+ func_show_eval "$install_prog $file $destfile" 'exit $?'
+
+ # Install the old object if enabled.
+ if test "$build_old_libs" = yes; then
+ # Deduce the name of the old-style object file.
+ func_lo2o "$file"
+ staticobj=$func_lo2o_result
+ func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
+ fi
+ exit $EXIT_SUCCESS
+ ;;
+
+ *)
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ func_basename "$file"
+ destfile="$func_basename_result"
+ destfile="$destdir/$destfile"
+ fi
+
+ # If the file is missing, and there is a .exe on the end, strip it
+ # because it is most likely a libtool script we actually want to
+ # install
+ stripped_ext=""
+ case $file in
+ *.exe)
+ if test ! -f "$file"; then
+ func_stripname '' '.exe' "$file"
+ file=$func_stripname_result
+ stripped_ext=".exe"
+ fi
+ ;;
+ esac
+
+ # Do a test to see if this is really a libtool program.
+ case $host in
+ *cygwin* | *mingw*)
+ if func_ltwrapper_executable_p "$file"; then
+ func_ltwrapper_scriptname "$file"
+ wrapper=$func_ltwrapper_scriptname_result
+ else
+ func_stripname '' '.exe' "$file"
+ wrapper=$func_stripname_result
+ fi
+ ;;
+ *)
+ wrapper=$file
+ ;;
+ esac
+ if func_ltwrapper_script_p "$wrapper"; then
+ notinst_deplibs=
+ relink_command=
+
+ func_source "$wrapper"
+
+ # Check the variables that should have been set.
+ test -z "$generated_by_libtool_version" && \
+ func_fatal_error "invalid libtool wrapper script \`$wrapper'"
+
+ finalize=yes
+ for lib in $notinst_deplibs; do
+ # Check to see that each library is installed.
+ libdir=
+ if test -f "$lib"; then
+ func_source "$lib"
+ fi
+ libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test
+ if test -n "$libdir" && test ! -f "$libfile"; then
+ func_warning "\`$lib' has not been installed in \`$libdir'"
+ finalize=no
+ fi
+ done
+
+ relink_command=
+ func_source "$wrapper"
+
+ outputname=
+ if test "$fast_install" = no && test -n "$relink_command"; then
+ $opt_dry_run || {
+ if test "$finalize" = yes; then
+ tmpdir=`func_mktempdir`
+ func_basename "$file$stripped_ext"
+ file="$func_basename_result"
+ outputname="$tmpdir/$file"
+ # Replace the output file specification.
+ relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'`
+
+ $opt_silent || {
+ func_quote_for_expand "$relink_command"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+ if eval "$relink_command"; then :
+ else
+ func_error "error: relink \`$file' with the above command before installing it"
+ $opt_dry_run || ${RM}r "$tmpdir"
+ continue
+ fi
+ file="$outputname"
+ else
+ func_warning "cannot relink \`$file'"
+ fi
+ }
+ else
+ # Install the binary that we compiled earlier.
+ file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"`
+ fi
+ fi
+
+ # remove .exe since cygwin /usr/bin/install will append another
+ # one anyway
+ case $install_prog,$host in
+ */usr/bin/install*,*cygwin*)
+ case $file:$destfile in
+ *.exe:*.exe)
+ # this is ok
+ ;;
+ *.exe:*)
+ destfile=$destfile.exe
+ ;;
+ *:*.exe)
+ func_stripname '' '.exe' "$destfile"
+ destfile=$func_stripname_result
+ ;;
+ esac
+ ;;
+ esac
+ func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
+ $opt_dry_run || if test -n "$outputname"; then
+ ${RM}r "$tmpdir"
+ fi
+ ;;
+ esac
+ done
+
+ for file in $staticlibs; do
+ func_basename "$file"
+ name="$func_basename_result"
+
+ # Set up the ranlib parameters.
+ oldlib="$destdir/$name"
+ func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+ tool_oldlib=$func_to_tool_file_result
+
+ func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
+
+ if test -n "$stripme" && test -n "$old_striplib"; then
+ func_show_eval "$old_striplib $tool_oldlib" 'exit $?'
+ fi
+
+ # Do each command in the postinstall commands.
+ func_execute_cmds "$old_postinstall_cmds" 'exit $?'
+ done
+
+ test -n "$future_libdirs" && \
+ func_warning "remember to run \`$progname --finish$future_libdirs'"
+
+ if test -n "$current_libdirs"; then
+ # Maybe just do a dry run.
+ $opt_dry_run && current_libdirs=" -n$current_libdirs"
+ exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
+ else
+ exit $EXIT_SUCCESS
+ fi
+}
+
+test "$opt_mode" = install && func_mode_install ${1+"$@"}
+
+
+# func_generate_dlsyms outputname originator pic_p
+# Extract symbols from dlprefiles and create ${outputname}S.o with
+# a dlpreopen symbol table.
+func_generate_dlsyms ()
+{
+ $opt_debug
+ my_outputname="$1"
+ my_originator="$2"
+ my_pic_p="${3-no}"
+ my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'`
+ my_dlsyms=
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ if test -n "$NM" && test -n "$global_symbol_pipe"; then
+ my_dlsyms="${my_outputname}S.c"
+ else
+ func_error "not configured to extract global symbols from dlpreopened files"
+ fi
+ fi
+
+ if test -n "$my_dlsyms"; then
+ case $my_dlsyms in
+ "") ;;
+ *.c)
+ # Discover the nlist of each of the dlfiles.
+ nlist="$output_objdir/${my_outputname}.nm"
+
+ func_show_eval "$RM $nlist ${nlist}S ${nlist}T"
+
+ # Parse the name list into a source file.
+ func_verbose "creating $output_objdir/$my_dlsyms"
+
+ $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
+/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */
+/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#endif
+
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+ relocations are performed -- see ld's documentation on pseudo-relocs. */
+# define LT_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data. */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+/* External symbol declarations for the compiler. */\
+"
+
+ if test "$dlself" = yes; then
+ func_verbose "generating symbol list for \`$output'"
+
+ $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
+
+ # Add our own program objects to the symbol list.
+ progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+ for progfile in $progfiles; do
+ func_to_tool_file "$progfile" func_convert_file_msys_to_w32
+ func_verbose "extracting global C symbols from \`$func_to_tool_file_result'"
+ $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -n "$exclude_expsyms"; then
+ $opt_dry_run || {
+ eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+ eval '$MV "$nlist"T "$nlist"'
+ }
+ fi
+
+ if test -n "$export_symbols_regex"; then
+ $opt_dry_run || {
+ eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+ eval '$MV "$nlist"T "$nlist"'
+ }
+ fi
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ export_symbols="$output_objdir/$outputname.exp"
+ $opt_dry_run || {
+ $RM $export_symbols
+ eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+ eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
+ ;;
+ esac
+ }
+ else
+ $opt_dry_run || {
+ eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
+ eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
+ eval '$MV "$nlist"T "$nlist"'
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+ eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
+ ;;
+ esac
+ }
+ fi
+ fi
+
+ for dlprefile in $dlprefiles; do
+ func_verbose "extracting global C symbols from \`$dlprefile'"
+ func_basename "$dlprefile"
+ name="$func_basename_result"
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ # if an import library, we need to obtain dlname
+ if func_win32_import_lib_p "$dlprefile"; then
+ func_tr_sh "$dlprefile"
+ eval "curr_lafile=\$libfile_$func_tr_sh_result"
+ dlprefile_dlbasename=""
+ if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then
+ # Use subshell, to avoid clobbering current variable values
+ dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"`
+ if test -n "$dlprefile_dlname" ; then
+ func_basename "$dlprefile_dlname"
+ dlprefile_dlbasename="$func_basename_result"
+ else
+ # no lafile. user explicitly requested -dlpreopen <import library>.
+ $sharedlib_from_linklib_cmd "$dlprefile"
+ dlprefile_dlbasename=$sharedlib_from_linklib_result
+ fi
+ fi
+ $opt_dry_run || {
+ if test -n "$dlprefile_dlbasename" ; then
+ eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"'
+ else
+ func_warning "Could not compute DLL name from $name"
+ eval '$ECHO ": $name " >> "$nlist"'
+ fi
+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe |
+ $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'"
+ }
+ else # not an import lib
+ $opt_dry_run || {
+ eval '$ECHO ": $name " >> "$nlist"'
+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+ }
+ fi
+ ;;
+ *)
+ $opt_dry_run || {
+ eval '$ECHO ": $name " >> "$nlist"'
+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+ }
+ ;;
+ esac
+ done
+
+ $opt_dry_run || {
+ # Make sure we have at least an empty file.
+ test -f "$nlist" || : > "$nlist"
+
+ if test -n "$exclude_expsyms"; then
+ $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+ $MV "$nlist"T "$nlist"
+ fi
+
+ # Try sorting and uniquifying the output.
+ if $GREP -v "^: " < "$nlist" |
+ if sort -k 3 </dev/null >/dev/null 2>&1; then
+ sort -k 3
+ else
+ sort +2
+ fi |
+ uniq > "$nlist"S; then
+ :
+ else
+ $GREP -v "^: " < "$nlist" > "$nlist"S
+ fi
+
+ if test -f "$nlist"S; then
+ eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
+ else
+ echo '/* NONE */' >> "$output_objdir/$my_dlsyms"
+ fi
+
+ echo >> "$output_objdir/$my_dlsyms" "\
+
+/* The mapping between symbol names and symbols. */
+typedef struct {
+ const char *name;
+ void *address;
+} lt_dlsymlist;
+extern LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[];
+LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[] =
+{\
+ { \"$my_originator\", (void *) 0 },"
+
+ case $need_lib_prefix in
+ no)
+ eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms"
+ ;;
+ *)
+ eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
+ ;;
+ esac
+ echo >> "$output_objdir/$my_dlsyms" "\
+ {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt_${my_prefix}_LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+ } # !$opt_dry_run
+
+ pic_flag_for_symtable=
+ case "$compile_command " in
+ *" -static "*) ;;
+ *)
+ case $host in
+ # compiling the symbol table file with pic_flag works around
+ # a FreeBSD bug that causes programs to crash when -lm is
+ # linked before any other PIC object. But we must not use
+ # pic_flag when linking with -static. The problem exists in
+ # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+ *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+ pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
+ *-*-hpux*)
+ pic_flag_for_symtable=" $pic_flag" ;;
+ *)
+ if test "X$my_pic_p" != Xno; then
+ pic_flag_for_symtable=" $pic_flag"
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ symtab_cflags=
+ for arg in $LTCFLAGS; do
+ case $arg in
+ -pie | -fpie | -fPIE) ;;
+ *) func_append symtab_cflags " $arg" ;;
+ esac
+ done
+
+ # Now compile the dynamic symbol file.
+ func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'
+
+ # Clean up the generated files.
+ func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"'
+
+ # Transform the symbol file into the correct name.
+ symfileobj="$output_objdir/${my_outputname}S.$objext"
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ if test -f "$output_objdir/$my_outputname.def"; then
+ compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+ else
+ compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+ fi
+ ;;
+ *)
+ compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+ ;;
+ esac
+ ;;
+ *)
+ func_fatal_error "unknown suffix for \`$my_dlsyms'"
+ ;;
+ esac
+ else
+ # We keep going just in case the user didn't refer to
+ # lt_preloaded_symbols. The linker will fail if global_symbol_pipe
+ # really was required.
+
+ # Nullify the symbol file.
+ compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"`
+ fi
+}
+
+# func_win32_libid arg
+# return the library type of file 'arg'
+#
+# Need a lot of goo to handle *both* DLLs and import libs
+# Has to be a shell function in order to 'eat' the argument
+# that is supplied when $file_magic_command is called.
+# Despite the name, also deal with 64 bit binaries.
+func_win32_libid ()
+{
+ $opt_debug
+ win32_libid_type="unknown"
+ win32_fileres=`file -L $1 2>/dev/null`
+ case $win32_fileres in
+ *ar\ archive\ import\ library*) # definitely import
+ win32_libid_type="x86 archive import"
+ ;;
+ *ar\ archive*) # could be an import, or static
+ # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD.
+ if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
+ $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then
+ func_to_tool_file "$1" func_convert_file_msys_to_w32
+ win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" |
+ $SED -n -e '
+ 1,100{
+ / I /{
+ s,.*,import,
+ p
+ q
+ }
+ }'`
+ case $win32_nmres in
+ import*) win32_libid_type="x86 archive import";;
+ *) win32_libid_type="x86 archive static";;
+ esac
+ fi
+ ;;
+ *DLL*)
+ win32_libid_type="x86 DLL"
+ ;;
+ *executable*) # but shell scripts are "executable" too...
+ case $win32_fileres in
+ *MS\ Windows\ PE\ Intel*)
+ win32_libid_type="x86 DLL"
+ ;;
+ esac
+ ;;
+ esac
+ $ECHO "$win32_libid_type"
+}
+
+# func_cygming_dll_for_implib ARG
+#
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+# Invoked by eval'ing the libtool variable
+# $sharedlib_from_linklib_cmd
+# Result is available in the variable
+# $sharedlib_from_linklib_result
+func_cygming_dll_for_implib ()
+{
+ $opt_debug
+ sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"`
+}
+
+# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs
+#
+# The is the core of a fallback implementation of a
+# platform-specific function to extract the name of the
+# DLL associated with the specified import library LIBNAME.
+#
+# SECTION_NAME is either .idata$6 or .idata$7, depending
+# on the platform and compiler that created the implib.
+#
+# Echos the name of the DLL associated with the
+# specified import library.
+func_cygming_dll_for_implib_fallback_core ()
+{
+ $opt_debug
+ match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"`
+ $OBJDUMP -s --section "$1" "$2" 2>/dev/null |
+ $SED '/^Contents of section '"$match_literal"':/{
+ # Place marker at beginning of archive member dllname section
+ s/.*/====MARK====/
+ p
+ d
+ }
+ # These lines can sometimes be longer than 43 characters, but
+ # are always uninteresting
+ /:[ ]*file format pe[i]\{,1\}-/d
+ /^In archive [^:]*:/d
+ # Ensure marker is printed
+ /^====MARK====/p
+ # Remove all lines with less than 43 characters
+ /^.\{43\}/!d
+ # From remaining lines, remove first 43 characters
+ s/^.\{43\}//' |
+ $SED -n '
+ # Join marker and all lines until next marker into a single line
+ /^====MARK====/ b para
+ H
+ $ b para
+ b
+ :para
+ x
+ s/\n//g
+ # Remove the marker
+ s/^====MARK====//
+ # Remove trailing dots and whitespace
+ s/[\. \t]*$//
+ # Print
+ /./p' |
+ # we now have a list, one entry per line, of the stringified
+ # contents of the appropriate section of all members of the
+ # archive which possess that section. Heuristic: eliminate
+ # all those which have a first or second character that is
+ # a '.' (that is, objdump's representation of an unprintable
+ # character.) This should work for all archives with less than
+ # 0x302f exports -- but will fail for DLLs whose name actually
+ # begins with a literal '.' or a single character followed by
+ # a '.'.
+ #
+ # Of those that remain, print the first one.
+ $SED -e '/^\./d;/^.\./d;q'
+}
+
+# func_cygming_gnu_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is a GNU/binutils-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_gnu_implib_p ()
+{
+ $opt_debug
+ func_to_tool_file "$1" func_convert_file_msys_to_w32
+ func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'`
+ test -n "$func_cygming_gnu_implib_tmp"
+}
+
+# func_cygming_ms_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is an MS-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_ms_implib_p ()
+{
+ $opt_debug
+ func_to_tool_file "$1" func_convert_file_msys_to_w32
+ func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'`
+ test -n "$func_cygming_ms_implib_tmp"
+}
+
+# func_cygming_dll_for_implib_fallback ARG
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+#
+# This fallback implementation is for use when $DLLTOOL
+# does not support the --identify-strict option.
+# Invoked by eval'ing the libtool variable
+# $sharedlib_from_linklib_cmd
+# Result is available in the variable
+# $sharedlib_from_linklib_result
+func_cygming_dll_for_implib_fallback ()
+{
+ $opt_debug
+ if func_cygming_gnu_implib_p "$1" ; then
+ # binutils import library
+ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"`
+ elif func_cygming_ms_implib_p "$1" ; then
+ # ms-generated import library
+ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"`
+ else
+ # unknown
+ sharedlib_from_linklib_result=""
+ fi
+}
+
+
+# func_extract_an_archive dir oldlib
+func_extract_an_archive ()
+{
+ $opt_debug
+ f_ex_an_ar_dir="$1"; shift
+ f_ex_an_ar_oldlib="$1"
+ if test "$lock_old_archive_extraction" = yes; then
+ lockfile=$f_ex_an_ar_oldlib.lock
+ until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+ func_echo "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ fi
+ func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \
+ 'stat=$?; rm -f "$lockfile"; exit $stat'
+ if test "$lock_old_archive_extraction" = yes; then
+ $opt_dry_run || rm -f "$lockfile"
+ fi
+ if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
+ :
+ else
+ func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
+ fi
+}
+
+
+# func_extract_archives gentop oldlib ...
+func_extract_archives ()
+{
+ $opt_debug
+ my_gentop="$1"; shift
+ my_oldlibs=${1+"$@"}
+ my_oldobjs=""
+ my_xlib=""
+ my_xabs=""
+ my_xdir=""
+
+ for my_xlib in $my_oldlibs; do
+ # Extract the objects.
+ case $my_xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;;
+ *) my_xabs=`pwd`"/$my_xlib" ;;
+ esac
+ func_basename "$my_xlib"
+ my_xlib="$func_basename_result"
+ my_xlib_u=$my_xlib
+ while :; do
+ case " $extracted_archives " in
+ *" $my_xlib_u "*)
+ func_arith $extracted_serial + 1
+ extracted_serial=$func_arith_result
+ my_xlib_u=lt$extracted_serial-$my_xlib ;;
+ *) break ;;
+ esac
+ done
+ extracted_archives="$extracted_archives $my_xlib_u"
+ my_xdir="$my_gentop/$my_xlib_u"
+
+ func_mkdir_p "$my_xdir"
+
+ case $host in
+ *-darwin*)
+ func_verbose "Extracting $my_xabs"
+ # Do not bother doing anything if just a dry run
+ $opt_dry_run || {
+ darwin_orig_dir=`pwd`
+ cd $my_xdir || exit $?
+ darwin_archive=$my_xabs
+ darwin_curdir=`pwd`
+ darwin_base_archive=`basename "$darwin_archive"`
+ darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true`
+ if test -n "$darwin_arches"; then
+ darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'`
+ darwin_arch=
+ func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
+ for darwin_arch in $darwin_arches ; do
+ func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
+ $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}"
+ cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
+ func_extract_an_archive "`pwd`" "${darwin_base_archive}"
+ cd "$darwin_curdir"
+ $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
+ done # $darwin_arches
+ ## Okay now we've a bunch of thin objects, gotta fatten them up :)
+ darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u`
+ darwin_file=
+ darwin_files=
+ for darwin_file in $darwin_filelist; do
+ darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP`
+ $LIPO -create -output "$darwin_file" $darwin_files
+ done # $darwin_filelist
+ $RM -rf unfat-$$
+ cd "$darwin_orig_dir"
+ else
+ cd $darwin_orig_dir
+ func_extract_an_archive "$my_xdir" "$my_xabs"
+ fi # $darwin_arches
+ } # !$opt_dry_run
+ ;;
+ *)
+ func_extract_an_archive "$my_xdir" "$my_xabs"
+ ;;
+ esac
+ my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP`
+ done
+
+ func_extract_archives_result="$my_oldobjs"
+}
+
+
+# func_emit_wrapper [arg=no]
+#
+# Emit a libtool wrapper script on stdout.
+# Don't directly open a file because we may want to
+# incorporate the script contents within a cygwin/mingw
+# wrapper executable. Must ONLY be called from within
+# func_mode_link because it depends on a number of variables
+# set therein.
+#
+# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
+# variable will take. If 'yes', then the emitted script
+# will assume that the directory in which it is stored is
+# the $objdir directory. This is a cygwin/mingw-specific
+# behavior.
+func_emit_wrapper ()
+{
+ func_emit_wrapper_arg1=${1-no}
+
+ $ECHO "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='$sed_quote_subst'
+
+# Be Bourne compatible
+if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+ # install mode needs the following variables:
+ generated_by_libtool_version='$macro_version'
+ notinst_deplibs='$notinst_deplibs'
+else
+ # When we are sourced in execute mode, \$file and \$ECHO are already set.
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ file=\"\$0\""
+
+ qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"`
+ $ECHO "\
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+ ECHO=\"$qECHO\"
+ fi
+
+# Very basic option parsing. These options are (a) specific to
+# the libtool wrapper, (b) are identical between the wrapper
+# /script/ and the wrapper /executable/ which is used only on
+# windows platforms, and (c) all begin with the string "--lt-"
+# (application programs are unlikely to have options which match
+# this pattern).
+#
+# There are only two supported options: --lt-debug and
+# --lt-dump-script. There is, deliberately, no --lt-help.
+#
+# The first argument to this parsing function should be the
+# script's $0 value, followed by "$@".
+lt_option_debug=
+func_parse_lt_options ()
+{
+ lt_script_arg0=\$0
+ shift
+ for lt_opt
+ do
+ case \"\$lt_opt\" in
+ --lt-debug) lt_option_debug=1 ;;
+ --lt-dump-script)
+ lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\`
+ test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=.
+ lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\`
+ cat \"\$lt_dump_D/\$lt_dump_F\"
+ exit 0
+ ;;
+ --lt-*)
+ \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+
+ # Print the debug banner immediately:
+ if test -n \"\$lt_option_debug\"; then
+ echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2
+ fi
+}
+
+# Used when --lt-debug. Prints its arguments to stdout
+# (redirection is the responsibility of the caller)
+func_lt_dump_args ()
+{
+ lt_dump_args_N=1;
+ for lt_arg
+ do
+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\"
+ lt_dump_args_N=\`expr \$lt_dump_args_N + 1\`
+ done
+}
+
+# Core function for launching the target application
+func_exec_program_core ()
+{
+"
+ case $host in
+ # Backslashes separate directories on plain windows
+ *-*-mingw | *-*-os2* | *-cegcc*)
+ $ECHO "\
+ if test -n \"\$lt_option_debug\"; then
+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2
+ func_lt_dump_args \${1+\"\$@\"} 1>&2
+ fi
+ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
+"
+ ;;
+
+ *)
+ $ECHO "\
+ if test -n \"\$lt_option_debug\"; then
+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2
+ func_lt_dump_args \${1+\"\$@\"} 1>&2
+ fi
+ exec \"\$progdir/\$program\" \${1+\"\$@\"}
+"
+ ;;
+ esac
+ $ECHO "\
+ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
+ exit 1
+}
+
+# A function to encapsulate launching the target application
+# Strips options in the --lt-* namespace from \$@ and
+# launches target application with the remaining arguments.
+func_exec_program ()
+{
+ case \" \$* \" in
+ *\\ --lt-*)
+ for lt_wr_arg
+ do
+ case \$lt_wr_arg in
+ --lt-*) ;;
+ *) set x \"\$@\" \"\$lt_wr_arg\"; shift;;
+ esac
+ shift
+ done ;;
+ esac
+ func_exec_program_core \${1+\"\$@\"}
+}
+
+ # Parse options
+ func_parse_lt_options \"\$0\" \${1+\"\$@\"}
+
+ # Find the directory that this script lives in.
+ thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\`
+ test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+ # Follow symbolic links until we get to the real thisdir.
+ file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\`
+ while test -n \"\$file\"; do
+ destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\`
+
+ # If there was a directory component, then change thisdir.
+ if test \"x\$destdir\" != \"x\$file\"; then
+ case \"\$destdir\" in
+ [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+ *) thisdir=\"\$thisdir/\$destdir\" ;;
+ esac
+ fi
+
+ file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\`
+ file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\`
+ done
+
+ # Usually 'no', except on cygwin/mingw when embedded into
+ # the cwrapper.
+ WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
+ if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
+ # special case for '.'
+ if test \"\$thisdir\" = \".\"; then
+ thisdir=\`pwd\`
+ fi
+ # remove .libs from thisdir
+ case \"\$thisdir\" in
+ *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;;
+ $objdir ) thisdir=. ;;
+ esac
+ fi
+
+ # Try to get the absolute directory name.
+ absdir=\`cd \"\$thisdir\" && pwd\`
+ test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+ if test "$fast_install" = yes; then
+ $ECHO "\
+ program=lt-'$outputname'$exeext
+ progdir=\"\$thisdir/$objdir\"
+
+ if test ! -f \"\$progdir/\$program\" ||
+ { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
+ test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+ file=\"\$\$-\$program\"
+
+ if test ! -d \"\$progdir\"; then
+ $MKDIR \"\$progdir\"
+ else
+ $RM \"\$progdir/\$file\"
+ fi"
+
+ $ECHO "\
+
+ # relink executable if necessary
+ if test -n \"\$relink_command\"; then
+ if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+ else
+ $ECHO \"\$relink_command_output\" >&2
+ $RM \"\$progdir/\$file\"
+ exit 1
+ fi
+ fi
+
+ $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+ { $RM \"\$progdir/\$program\";
+ $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+ $RM \"\$progdir/\$file\"
+ fi"
+ else
+ $ECHO "\
+ program='$outputname'
+ progdir=\"\$thisdir/$objdir\"
+"
+ fi
+
+ $ECHO "\
+
+ if test -f \"\$progdir/\$program\"; then"
+
+ # fixup the dll searchpath if we need to.
+ #
+ # Fix the DLL searchpath if we need to. Do this before prepending
+ # to shlibpath, because on Windows, both are PATH and uninstalled
+ # libraries must come first.
+ if test -n "$dllsearchpath"; then
+ $ECHO "\
+ # Add the dll search path components to the executable PATH
+ PATH=$dllsearchpath:\$PATH
+"
+ fi
+
+ # Export our shlibpath_var if we have one.
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ $ECHO "\
+ # Add our own library path to $shlibpath_var
+ $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+ # Some systems cannot cope with colon-terminated $shlibpath_var
+ # The second colon is a workaround for a bug in BeOS R4 sed
+ $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\`
+
+ export $shlibpath_var
+"
+ fi
+
+ $ECHO "\
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ # Run the actual program with our arguments.
+ func_exec_program \${1+\"\$@\"}
+ fi
+ else
+ # The program doesn't exist.
+ \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
+ \$ECHO \"This script is just a wrapper for \$program.\" 1>&2
+ \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
+ exit 1
+ fi
+fi\
+"
+}
+
+
+# func_emit_cwrapperexe_src
+# emit the source code for a wrapper executable on stdout
+# Must ONLY be called from within func_mode_link because
+# it depends on a number of variable set therein.
+func_emit_cwrapperexe_src ()
+{
+ cat <<EOF
+
+/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
+ Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+
+ The $output program cannot be directly executed until all the libtool
+ libraries that it depends on are installed.
+
+ This wrapper executable should never be moved out of the build directory.
+ If it is, it will not operate correctly.
+*/
+EOF
+ cat <<"EOF"
+#ifdef _MSC_VER
+# define _CRT_SECURE_NO_DEPRECATE 1
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+# include <direct.h>
+# include <process.h>
+# include <io.h>
+#else
+# include <unistd.h>
+# include <stdint.h>
+# ifdef __CYGWIN__
+# include <io.h>
+# endif
+#endif
+#include <malloc.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+/* declarations of non-ANSI functions */
+#if defined(__MINGW32__)
+# ifdef __STRICT_ANSI__
+int _putenv (const char *);
+# endif
+#elif defined(__CYGWIN__)
+# ifdef __STRICT_ANSI__
+char *realpath (const char *, char *);
+int putenv (char *);
+int setenv (const char *, const char *, int);
+# endif
+/* #elif defined (other platforms) ... */
+#endif
+
+/* portability defines, excluding path handling macros */
+#if defined(_MSC_VER)
+# define setmode _setmode
+# define stat _stat
+# define chmod _chmod
+# define getcwd _getcwd
+# define putenv _putenv
+# define S_IXUSR _S_IEXEC
+# ifndef _INTPTR_T_DEFINED
+# define _INTPTR_T_DEFINED
+# define intptr_t int
+# endif
+#elif defined(__MINGW32__)
+# define setmode _setmode
+# define stat _stat
+# define chmod _chmod
+# define getcwd _getcwd
+# define putenv _putenv
+#elif defined(__CYGWIN__)
+# define HAVE_SETENV
+# define FOPEN_WB "wb"
+/* #elif defined (other platforms) ... */
+#endif
+
+#if defined(PATH_MAX)
+# define LT_PATHMAX PATH_MAX
+#elif defined(MAXPATHLEN)
+# define LT_PATHMAX MAXPATHLEN
+#else
+# define LT_PATHMAX 1024
+#endif
+
+#ifndef S_IXOTH
+# define S_IXOTH 0
+#endif
+#ifndef S_IXGRP
+# define S_IXGRP 0
+#endif
+
+/* path handling portability macros */
+#ifndef DIR_SEPARATOR
+# define DIR_SEPARATOR '/'
+# define PATH_SEPARATOR ':'
+#endif
+
+#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
+ defined (__OS2__)
+# define HAVE_DOS_BASED_FILE_SYSTEM
+# define FOPEN_WB "wb"
+# ifndef DIR_SEPARATOR_2
+# define DIR_SEPARATOR_2 '\\'
+# endif
+# ifndef PATH_SEPARATOR_2
+# define PATH_SEPARATOR_2 ';'
+# endif
+#endif
+
+#ifndef DIR_SEPARATOR_2
+# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
+#else /* DIR_SEPARATOR_2 */
+# define IS_DIR_SEPARATOR(ch) \
+ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
+#endif /* DIR_SEPARATOR_2 */
+
+#ifndef PATH_SEPARATOR_2
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
+#else /* PATH_SEPARATOR_2 */
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
+#endif /* PATH_SEPARATOR_2 */
+
+#ifndef FOPEN_WB
+# define FOPEN_WB "w"
+#endif
+#ifndef _O_BINARY
+# define _O_BINARY 0
+#endif
+
+#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type)))
+#define XFREE(stale) do { \
+ if (stale) { free ((void *) stale); stale = 0; } \
+} while (0)
+
+#if defined(LT_DEBUGWRAPPER)
+static int lt_debug = 1;
+#else
+static int lt_debug = 0;
+#endif
+
+const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */
+
+void *xmalloc (size_t num);
+char *xstrdup (const char *string);
+const char *base_name (const char *name);
+char *find_executable (const char *wrapper);
+char *chase_symlinks (const char *pathspec);
+int make_executable (const char *path);
+int check_executable (const char *path);
+char *strendzap (char *str, const char *pat);
+void lt_debugprintf (const char *file, int line, const char *fmt, ...);
+void lt_fatal (const char *file, int line, const char *message, ...);
+static const char *nonnull (const char *s);
+static const char *nonempty (const char *s);
+void lt_setenv (const char *name, const char *value);
+char *lt_extend_str (const char *orig_value, const char *add, int to_end);
+void lt_update_exe_path (const char *name, const char *value);
+void lt_update_lib_path (const char *name, const char *value);
+char **prepare_spawn (char **argv);
+void lt_dump_script (FILE *f);
+EOF
+
+ cat <<EOF
+volatile const char * MAGIC_EXE = "$magic_exe";
+const char * LIB_PATH_VARNAME = "$shlibpath_var";
+EOF
+
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ func_to_host_path "$temp_rpath"
+ cat <<EOF
+const char * LIB_PATH_VALUE = "$func_to_host_path_result";
+EOF
+ else
+ cat <<"EOF"
+const char * LIB_PATH_VALUE = "";
+EOF
+ fi
+
+ if test -n "$dllsearchpath"; then
+ func_to_host_path "$dllsearchpath:"
+ cat <<EOF
+const char * EXE_PATH_VARNAME = "PATH";
+const char * EXE_PATH_VALUE = "$func_to_host_path_result";
+EOF
+ else
+ cat <<"EOF"
+const char * EXE_PATH_VARNAME = "";
+const char * EXE_PATH_VALUE = "";
+EOF
+ fi
+
+ if test "$fast_install" = yes; then
+ cat <<EOF
+const char * TARGET_PROGRAM_NAME = "lt-$outputname"; /* hopefully, no .exe */
+EOF
+ else
+ cat <<EOF
+const char * TARGET_PROGRAM_NAME = "$outputname"; /* hopefully, no .exe */
+EOF
+ fi
+
+
+ cat <<"EOF"
+
+#define LTWRAPPER_OPTION_PREFIX "--lt-"
+
+static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
+static const char *dumpscript_opt = LTWRAPPER_OPTION_PREFIX "dump-script";
+static const char *debug_opt = LTWRAPPER_OPTION_PREFIX "debug";
+
+int
+main (int argc, char *argv[])
+{
+ char **newargz;
+ int newargc;
+ char *tmp_pathspec;
+ char *actual_cwrapper_path;
+ char *actual_cwrapper_name;
+ char *target_name;
+ char *lt_argv_zero;
+ intptr_t rval = 127;
+
+ int i;
+
+ program_name = (char *) xstrdup (base_name (argv[0]));
+ newargz = XMALLOC (char *, argc + 1);
+
+ /* very simple arg parsing; don't want to rely on getopt
+ * also, copy all non cwrapper options to newargz, except
+ * argz[0], which is handled differently
+ */
+ newargc=0;
+ for (i = 1; i < argc; i++)
+ {
+ if (strcmp (argv[i], dumpscript_opt) == 0)
+ {
+EOF
+ case "$host" in
+ *mingw* | *cygwin* )
+ # make stdout use "unix" line endings
+ echo " setmode(1,_O_BINARY);"
+ ;;
+ esac
+
+ cat <<"EOF"
+ lt_dump_script (stdout);
+ return 0;
+ }
+ if (strcmp (argv[i], debug_opt) == 0)
+ {
+ lt_debug = 1;
+ continue;
+ }
+ if (strcmp (argv[i], ltwrapper_option_prefix) == 0)
+ {
+ /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
+ namespace, but it is not one of the ones we know about and
+ have already dealt with, above (inluding dump-script), then
+ report an error. Otherwise, targets might begin to believe
+ they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
+ namespace. The first time any user complains about this, we'll
+ need to make LTWRAPPER_OPTION_PREFIX a configure-time option
+ or a configure.ac-settable value.
+ */
+ lt_fatal (__FILE__, __LINE__,
+ "unrecognized %s option: '%s'",
+ ltwrapper_option_prefix, argv[i]);
+ }
+ /* otherwise ... */
+ newargz[++newargc] = xstrdup (argv[i]);
+ }
+ newargz[++newargc] = NULL;
+
+EOF
+ cat <<EOF
+ /* The GNU banner must be the first non-error debug message */
+ lt_debugprintf (__FILE__, __LINE__, "libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\n");
+EOF
+ cat <<"EOF"
+ lt_debugprintf (__FILE__, __LINE__, "(main) argv[0]: %s\n", argv[0]);
+ lt_debugprintf (__FILE__, __LINE__, "(main) program_name: %s\n", program_name);
+
+ tmp_pathspec = find_executable (argv[0]);
+ if (tmp_pathspec == NULL)
+ lt_fatal (__FILE__, __LINE__, "couldn't find %s", argv[0]);
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) found exe (before symlink chase) at: %s\n",
+ tmp_pathspec);
+
+ actual_cwrapper_path = chase_symlinks (tmp_pathspec);
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) found exe (after symlink chase) at: %s\n",
+ actual_cwrapper_path);
+ XFREE (tmp_pathspec);
+
+ actual_cwrapper_name = xstrdup (base_name (actual_cwrapper_path));
+ strendzap (actual_cwrapper_path, actual_cwrapper_name);
+
+ /* wrapper name transforms */
+ strendzap (actual_cwrapper_name, ".exe");
+ tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1);
+ XFREE (actual_cwrapper_name);
+ actual_cwrapper_name = tmp_pathspec;
+ tmp_pathspec = 0;
+
+ /* target_name transforms -- use actual target program name; might have lt- prefix */
+ target_name = xstrdup (base_name (TARGET_PROGRAM_NAME));
+ strendzap (target_name, ".exe");
+ tmp_pathspec = lt_extend_str (target_name, ".exe", 1);
+ XFREE (target_name);
+ target_name = tmp_pathspec;
+ tmp_pathspec = 0;
+
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) libtool target name: %s\n",
+ target_name);
+EOF
+
+ cat <<EOF
+ newargz[0] =
+ XMALLOC (char, (strlen (actual_cwrapper_path) +
+ strlen ("$objdir") + 1 + strlen (actual_cwrapper_name) + 1));
+ strcpy (newargz[0], actual_cwrapper_path);
+ strcat (newargz[0], "$objdir");
+ strcat (newargz[0], "/");
+EOF
+
+ cat <<"EOF"
+ /* stop here, and copy so we don't have to do this twice */
+ tmp_pathspec = xstrdup (newargz[0]);
+
+ /* do NOT want the lt- prefix here, so use actual_cwrapper_name */
+ strcat (newargz[0], actual_cwrapper_name);
+
+ /* DO want the lt- prefix here if it exists, so use target_name */
+ lt_argv_zero = lt_extend_str (tmp_pathspec, target_name, 1);
+ XFREE (tmp_pathspec);
+ tmp_pathspec = NULL;
+EOF
+
+ case $host_os in
+ mingw*)
+ cat <<"EOF"
+ {
+ char* p;
+ while ((p = strchr (newargz[0], '\\')) != NULL)
+ {
+ *p = '/';
+ }
+ while ((p = strchr (lt_argv_zero, '\\')) != NULL)
+ {
+ *p = '/';
+ }
+ }
+EOF
+ ;;
+ esac
+
+ cat <<"EOF"
+ XFREE (target_name);
+ XFREE (actual_cwrapper_path);
+ XFREE (actual_cwrapper_name);
+
+ lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
+ lt_setenv ("DUALCASE", "1"); /* for MSK sh */
+ /* Update the DLL searchpath. EXE_PATH_VALUE ($dllsearchpath) must
+ be prepended before (that is, appear after) LIB_PATH_VALUE ($temp_rpath)
+ because on Windows, both *_VARNAMEs are PATH but uninstalled
+ libraries must come first. */
+ lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
+ lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
+
+ lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n",
+ nonnull (lt_argv_zero));
+ for (i = 0; i < newargc; i++)
+ {
+ lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n",
+ i, nonnull (newargz[i]));
+ }
+
+EOF
+
+ case $host_os in
+ mingw*)
+ cat <<"EOF"
+ /* execv doesn't actually work on mingw as expected on unix */
+ newargz = prepare_spawn (newargz);
+ rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
+ if (rval == -1)
+ {
+ /* failed to start process */
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) failed to launch target \"%s\": %s\n",
+ lt_argv_zero, nonnull (strerror (errno)));
+ return 127;
+ }
+ return rval;
+EOF
+ ;;
+ *)
+ cat <<"EOF"
+ execv (lt_argv_zero, newargz);
+ return rval; /* =127, but avoids unused variable warning */
+EOF
+ ;;
+ esac
+
+ cat <<"EOF"
+}
+
+void *
+xmalloc (size_t num)
+{
+ void *p = (void *) malloc (num);
+ if (!p)
+ lt_fatal (__FILE__, __LINE__, "memory exhausted");
+
+ return p;
+}
+
+char *
+xstrdup (const char *string)
+{
+ return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
+ string) : NULL;
+}
+
+const char *
+base_name (const char *name)
+{
+ const char *base;
+
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+ /* Skip over the disk name in MSDOS pathnames. */
+ if (isalpha ((unsigned char) name[0]) && name[1] == ':')
+ name += 2;
+#endif
+
+ for (base = name; *name; name++)
+ if (IS_DIR_SEPARATOR (*name))
+ base = name + 1;
+ return base;
+}
+
+int
+check_executable (const char *path)
+{
+ struct stat st;
+
+ lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n",
+ nonempty (path));
+ if ((!path) || (!*path))
+ return 0;
+
+ if ((stat (path, &st) >= 0)
+ && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
+ return 1;
+ else
+ return 0;
+}
+
+int
+make_executable (const char *path)
+{
+ int rval = 0;
+ struct stat st;
+
+ lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n",
+ nonempty (path));
+ if ((!path) || (!*path))
+ return 0;
+
+ if (stat (path, &st) >= 0)
+ {
+ rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
+ }
+ return rval;
+}
+
+/* Searches for the full path of the wrapper. Returns
+ newly allocated full path name if found, NULL otherwise
+ Does not chase symlinks, even on platforms that support them.
+*/
+char *
+find_executable (const char *wrapper)
+{
+ int has_slash = 0;
+ const char *p;
+ const char *p_next;
+ /* static buffer for getcwd */
+ char tmp[LT_PATHMAX + 1];
+ int tmp_len;
+ char *concat_name;
+
+ lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n",
+ nonempty (wrapper));
+
+ if ((wrapper == NULL) || (*wrapper == '\0'))
+ return NULL;
+
+ /* Absolute path? */
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+ if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
+ {
+ concat_name = xstrdup (wrapper);
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ }
+ else
+ {
+#endif
+ if (IS_DIR_SEPARATOR (wrapper[0]))
+ {
+ concat_name = xstrdup (wrapper);
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ }
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+ }
+#endif
+
+ for (p = wrapper; *p; p++)
+ if (*p == '/')
+ {
+ has_slash = 1;
+ break;
+ }
+ if (!has_slash)
+ {
+ /* no slashes; search PATH */
+ const char *path = getenv ("PATH");
+ if (path != NULL)
+ {
+ for (p = path; *p; p = p_next)
+ {
+ const char *q;
+ size_t p_len;
+ for (q = p; *q; q++)
+ if (IS_PATH_SEPARATOR (*q))
+ break;
+ p_len = q - p;
+ p_next = (*q == '\0' ? q : q + 1);
+ if (p_len == 0)
+ {
+ /* empty path: current directory */
+ if (getcwd (tmp, LT_PATHMAX) == NULL)
+ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+ nonnull (strerror (errno)));
+ tmp_len = strlen (tmp);
+ concat_name =
+ XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+ memcpy (concat_name, tmp, tmp_len);
+ concat_name[tmp_len] = '/';
+ strcpy (concat_name + tmp_len + 1, wrapper);
+ }
+ else
+ {
+ concat_name =
+ XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
+ memcpy (concat_name, p, p_len);
+ concat_name[p_len] = '/';
+ strcpy (concat_name + p_len + 1, wrapper);
+ }
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ }
+ }
+ /* not found in PATH; assume curdir */
+ }
+ /* Relative path | not found in path: prepend cwd */
+ if (getcwd (tmp, LT_PATHMAX) == NULL)
+ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+ nonnull (strerror (errno)));
+ tmp_len = strlen (tmp);
+ concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+ memcpy (concat_name, tmp, tmp_len);
+ concat_name[tmp_len] = '/';
+ strcpy (concat_name + tmp_len + 1, wrapper);
+
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ return NULL;
+}
+
+char *
+chase_symlinks (const char *pathspec)
+{
+#ifndef S_ISLNK
+ return xstrdup (pathspec);
+#else
+ char buf[LT_PATHMAX];
+ struct stat s;
+ char *tmp_pathspec = xstrdup (pathspec);
+ char *p;
+ int has_symlinks = 0;
+ while (strlen (tmp_pathspec) && !has_symlinks)
+ {
+ lt_debugprintf (__FILE__, __LINE__,
+ "checking path component for symlinks: %s\n",
+ tmp_pathspec);
+ if (lstat (tmp_pathspec, &s) == 0)
+ {
+ if (S_ISLNK (s.st_mode) != 0)
+ {
+ has_symlinks = 1;
+ break;
+ }
+
+ /* search backwards for last DIR_SEPARATOR */
+ p = tmp_pathspec + strlen (tmp_pathspec) - 1;
+ while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+ p--;
+ if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+ {
+ /* no more DIR_SEPARATORS left */
+ break;
+ }
+ *p = '\0';
+ }
+ else
+ {
+ lt_fatal (__FILE__, __LINE__,
+ "error accessing file \"%s\": %s",
+ tmp_pathspec, nonnull (strerror (errno)));
+ }
+ }
+ XFREE (tmp_pathspec);
+
+ if (!has_symlinks)
+ {
+ return xstrdup (pathspec);
+ }
+
+ tmp_pathspec = realpath (pathspec, buf);
+ if (tmp_pathspec == 0)
+ {
+ lt_fatal (__FILE__, __LINE__,
+ "could not follow symlinks for %s", pathspec);
+ }
+ return xstrdup (tmp_pathspec);
+#endif
+}
+
+char *
+strendzap (char *str, const char *pat)
+{
+ size_t len, patlen;
+
+ assert (str != NULL);
+ assert (pat != NULL);
+
+ len = strlen (str);
+ patlen = strlen (pat);
+
+ if (patlen <= len)
+ {
+ str += len - patlen;
+ if (strcmp (str, pat) == 0)
+ *str = '\0';
+ }
+ return str;
+}
+
+void
+lt_debugprintf (const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+ if (lt_debug)
+ {
+ (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line);
+ va_start (args, fmt);
+ (void) vfprintf (stderr, fmt, args);
+ va_end (args);
+ }
+}
+
+static void
+lt_error_core (int exit_status, const char *file,
+ int line, const char *mode,
+ const char *message, va_list ap)
+{
+ fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode);
+ vfprintf (stderr, message, ap);
+ fprintf (stderr, ".\n");
+
+ if (exit_status >= 0)
+ exit (exit_status);
+}
+
+void
+lt_fatal (const char *file, int line, const char *message, ...)
+{
+ va_list ap;
+ va_start (ap, message);
+ lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap);
+ va_end (ap);
+}
+
+static const char *
+nonnull (const char *s)
+{
+ return s ? s : "(null)";
+}
+
+static const char *
+nonempty (const char *s)
+{
+ return (s && !*s) ? "(empty)" : nonnull (s);
+}
+
+void
+lt_setenv (const char *name, const char *value)
+{
+ lt_debugprintf (__FILE__, __LINE__,
+ "(lt_setenv) setting '%s' to '%s'\n",
+ nonnull (name), nonnull (value));
+ {
+#ifdef HAVE_SETENV
+ /* always make a copy, for consistency with !HAVE_SETENV */
+ char *str = xstrdup (value);
+ setenv (name, str, 1);
+#else
+ int len = strlen (name) + 1 + strlen (value) + 1;
+ char *str = XMALLOC (char, len);
+ sprintf (str, "%s=%s", name, value);
+ if (putenv (str) != EXIT_SUCCESS)
+ {
+ XFREE (str);
+ }
+#endif
+ }
+}
+
+char *
+lt_extend_str (const char *orig_value, const char *add, int to_end)
+{
+ char *new_value;
+ if (orig_value && *orig_value)
+ {
+ int orig_value_len = strlen (orig_value);
+ int add_len = strlen (add);
+ new_value = XMALLOC (char, add_len + orig_value_len + 1);
+ if (to_end)
+ {
+ strcpy (new_value, orig_value);
+ strcpy (new_value + orig_value_len, add);
+ }
+ else
+ {
+ strcpy (new_value, add);
+ strcpy (new_value + add_len, orig_value);
+ }
+ }
+ else
+ {
+ new_value = xstrdup (add);
+ }
+ return new_value;
+}
+
+void
+lt_update_exe_path (const char *name, const char *value)
+{
+ lt_debugprintf (__FILE__, __LINE__,
+ "(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
+ nonnull (name), nonnull (value));
+
+ if (name && *name && value && *value)
+ {
+ char *new_value = lt_extend_str (getenv (name), value, 0);
+ /* some systems can't cope with a ':'-terminated path #' */
+ int len = strlen (new_value);
+ while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1]))
+ {
+ new_value[len-1] = '\0';
+ }
+ lt_setenv (name, new_value);
+ XFREE (new_value);
+ }
+}
+
+void
+lt_update_lib_path (const char *name, const char *value)
+{
+ lt_debugprintf (__FILE__, __LINE__,
+ "(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
+ nonnull (name), nonnull (value));
+
+ if (name && *name && value && *value)
+ {
+ char *new_value = lt_extend_str (getenv (name), value, 0);
+ lt_setenv (name, new_value);
+ XFREE (new_value);
+ }
+}
+
+EOF
+ case $host_os in
+ mingw*)
+ cat <<"EOF"
+
+/* Prepares an argument vector before calling spawn().
+ Note that spawn() does not by itself call the command interpreter
+ (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") :
+ ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&v);
+ v.dwPlatformId == VER_PLATFORM_WIN32_NT;
+ }) ? "cmd.exe" : "command.com").
+ Instead it simply concatenates the arguments, separated by ' ', and calls
+ CreateProcess(). We must quote the arguments since Win32 CreateProcess()
+ interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a
+ special way:
+ - Space and tab are interpreted as delimiters. They are not treated as
+ delimiters if they are surrounded by double quotes: "...".
+ - Unescaped double quotes are removed from the input. Their only effect is
+ that within double quotes, space and tab are treated like normal
+ characters.
+ - Backslashes not followed by double quotes are not special.
+ - But 2*n+1 backslashes followed by a double quote become
+ n backslashes followed by a double quote (n >= 0):
+ \" -> "
+ \\\" -> \"
+ \\\\\" -> \\"
+ */
+#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+char **
+prepare_spawn (char **argv)
+{
+ size_t argc;
+ char **new_argv;
+ size_t i;
+
+ /* Count number of arguments. */
+ for (argc = 0; argv[argc] != NULL; argc++)
+ ;
+
+ /* Allocate new argument vector. */
+ new_argv = XMALLOC (char *, argc + 1);
+
+ /* Put quoted arguments into the new argument vector. */
+ for (i = 0; i < argc; i++)
+ {
+ const char *string = argv[i];
+
+ if (string[0] == '\0')
+ new_argv[i] = xstrdup ("\"\"");
+ else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL)
+ {
+ int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL);
+ size_t length;
+ unsigned int backslashes;
+ const char *s;
+ char *quoted_string;
+ char *p;
+
+ length = 0;
+ backslashes = 0;
+ if (quote_around)
+ length++;
+ for (s = string; *s != '\0'; s++)
+ {
+ char c = *s;
+ if (c == '"')
+ length += backslashes + 1;
+ length++;
+ if (c == '\\')
+ backslashes++;
+ else
+ backslashes = 0;
+ }
+ if (quote_around)
+ length += backslashes + 1;
+
+ quoted_string = XMALLOC (char, length + 1);
+
+ p = quoted_string;
+ backslashes = 0;
+ if (quote_around)
+ *p++ = '"';
+ for (s = string; *s != '\0'; s++)
+ {
+ char c = *s;
+ if (c == '"')
+ {
+ unsigned int j;
+ for (j = backslashes + 1; j > 0; j--)
+ *p++ = '\\';
+ }
+ *p++ = c;
+ if (c == '\\')
+ backslashes++;
+ else
+ backslashes = 0;
+ }
+ if (quote_around)
+ {
+ unsigned int j;
+ for (j = backslashes; j > 0; j--)
+ *p++ = '\\';
+ *p++ = '"';
+ }
+ *p = '\0';
+
+ new_argv[i] = quoted_string;
+ }
+ else
+ new_argv[i] = (char *) string;
+ }
+ new_argv[argc] = NULL;
+
+ return new_argv;
+}
+EOF
+ ;;
+ esac
+
+ cat <<"EOF"
+void lt_dump_script (FILE* f)
+{
+EOF
+ func_emit_wrapper yes |
+ $SED -n -e '
+s/^\(.\{79\}\)\(..*\)/\1\
+\2/
+h
+s/\([\\"]\)/\\\1/g
+s/$/\\n/
+s/\([^\n]*\).*/ fputs ("\1", f);/p
+g
+D'
+ cat <<"EOF"
+}
+EOF
+}
+# end: func_emit_cwrapperexe_src
+
+# func_win32_import_lib_p ARG
+# True if ARG is an import lib, as indicated by $file_magic_cmd
+func_win32_import_lib_p ()
+{
+ $opt_debug
+ case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in
+ *import*) : ;;
+ *) false ;;
+ esac
+}
+
+# func_mode_link arg...
+func_mode_link ()
+{
+ $opt_debug
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+ # It is impossible to link a dll without this setting, and
+ # we shouldn't force the makefile maintainer to figure out
+ # which system we are compiling for in order to pass an extra
+ # flag for every libtool invocation.
+ # allow_undefined=no
+
+ # FIXME: Unfortunately, there are problems with the above when trying
+ # to make a dll which has undefined symbols, in which case not
+ # even a static library is built. For now, we need to specify
+ # -no-undefined on the libtool link line when we can be certain
+ # that all symbols are satisfied, otherwise we get a static library.
+ allow_undefined=yes
+ ;;
+ *)
+ allow_undefined=yes
+ ;;
+ esac
+ libtool_args=$nonopt
+ base_compile="$nonopt $@"
+ compile_command=$nonopt
+ finalize_command=$nonopt
+
+ compile_rpath=
+ finalize_rpath=
+ compile_shlibpath=
+ finalize_shlibpath=
+ convenience=
+ old_convenience=
+ deplibs=
+ old_deplibs=
+ compiler_flags=
+ linker_flags=
+ dllsearchpath=
+ lib_search_path=`pwd`
+ inst_prefix_dir=
+ new_inherited_linker_flags=
+
+ avoid_version=no
+ bindir=
+ dlfiles=
+ dlprefiles=
+ dlself=no
+ export_dynamic=no
+ export_symbols=
+ export_symbols_regex=
+ generated=
+ libobjs=
+ ltlibs=
+ module=no
+ no_install=no
+ objs=
+ non_pic_objects=
+ precious_files_regex=
+ prefer_static_libs=no
+ preload=no
+ prev=
+ prevarg=
+ release=
+ rpath=
+ xrpath=
+ perm_rpath=
+ temp_rpath=
+ thread_safe=no
+ vinfo=
+ vinfo_number=no
+ weak_libs=
+ single_module="${wl}-single_module"
+ func_infer_tag $base_compile
+
+ # We need to know -static, to get the right output filenames.
+ for arg
+ do
+ case $arg in
+ -shared)
+ test "$build_libtool_libs" != yes && \
+ func_fatal_configuration "can not build a shared library"
+ build_old_libs=no
+ break
+ ;;
+ -all-static | -static | -static-libtool-libs)
+ case $arg in
+ -all-static)
+ if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
+ func_warning "complete static linking is impossible in this configuration"
+ fi
+ if test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ prefer_static_libs=yes
+ ;;
+ -static)
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ prefer_static_libs=built
+ ;;
+ -static-libtool-libs)
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ prefer_static_libs=yes
+ ;;
+ esac
+ build_libtool_libs=no
+ build_old_libs=yes
+ break
+ ;;
+ esac
+ done
+
+ # See if our shared archives depend on static archives.
+ test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+ # Go through the arguments, transforming them on the way.
+ while test "$#" -gt 0; do
+ arg="$1"
+ shift
+ func_quote_for_eval "$arg"
+ qarg=$func_quote_for_eval_unquoted_result
+ func_append libtool_args " $func_quote_for_eval_result"
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case $prev in
+ output)
+ func_append compile_command " @OUTPUT@"
+ func_append finalize_command " @OUTPUT@"
+ ;;
+ esac
+
+ case $prev in
+ bindir)
+ bindir="$arg"
+ prev=
+ continue
+ ;;
+ dlfiles|dlprefiles)
+ if test "$preload" = no; then
+ # Add the symbol object into the linking commands.
+ func_append compile_command " @SYMFILE@"
+ func_append finalize_command " @SYMFILE@"
+ preload=yes
+ fi
+ case $arg in
+ *.la | *.lo) ;; # We handle these cases below.
+ force)
+ if test "$dlself" = no; then
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ self)
+ if test "$prev" = dlprefiles; then
+ dlself=yes
+ elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
+ dlself=yes
+ else
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ *)
+ if test "$prev" = dlfiles; then
+ func_append dlfiles " $arg"
+ else
+ func_append dlprefiles " $arg"
+ fi
+ prev=
+ continue
+ ;;
+ esac
+ ;;
+ expsyms)
+ export_symbols="$arg"
+ test -f "$arg" \
+ || func_fatal_error "symbol file \`$arg' does not exist"
+ prev=
+ continue
+ ;;
+ expsyms_regex)
+ export_symbols_regex="$arg"
+ prev=
+ continue
+ ;;
+ framework)
+ case $host in
+ *-*-darwin*)
+ case "$deplibs " in
+ *" $qarg.ltframework "*) ;;
+ *) func_append deplibs " $qarg.ltframework" # this is fixed later
+ ;;
+ esac
+ ;;
+ esac
+ prev=
+ continue
+ ;;
+ inst_prefix)
+ inst_prefix_dir="$arg"
+ prev=
+ continue
+ ;;
+ objectlist)
+ if test -f "$arg"; then
+ save_arg=$arg
+ moreargs=
+ for fil in `cat "$save_arg"`
+ do
+# func_append moreargs " $fil"
+ arg=$fil
+ # A libtool-controlled object.
+
+ # Check to see that this really is a libtool object.
+ if func_lalib_unsafe_p "$arg"; then
+ pic_object=
+ non_pic_object=
+
+ # Read the .lo file
+ func_source "$arg"
+
+ if test -z "$pic_object" ||
+ test -z "$non_pic_object" ||
+ test "$pic_object" = none &&
+ test "$non_pic_object" = none; then
+ func_fatal_error "cannot find name of object for \`$arg'"
+ fi
+
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ if test "$pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ pic_object="$xdir$pic_object"
+
+ if test "$prev" = dlfiles; then
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ func_append dlfiles " $pic_object"
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ # CHECK ME: I think I busted this. -Ossama
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ func_append dlprefiles " $pic_object"
+ prev=
+ fi
+
+ # A PIC object.
+ func_append libobjs " $pic_object"
+ arg="$pic_object"
+ fi
+
+ # Non-PIC object.
+ if test "$non_pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ non_pic_object="$xdir$non_pic_object"
+
+ # A standard non-PIC object
+ func_append non_pic_objects " $non_pic_object"
+ if test -z "$pic_object" || test "$pic_object" = none ; then
+ arg="$non_pic_object"
+ fi
+ else
+ # If the PIC object exists, use it instead.
+ # $xdir was prepended to $pic_object above.
+ non_pic_object="$pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ fi
+ else
+ # Only an error if not doing a dry-run.
+ if $opt_dry_run; then
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ func_lo2o "$arg"
+ pic_object=$xdir$objdir/$func_lo2o_result
+ non_pic_object=$xdir$func_lo2o_result
+ func_append libobjs " $pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ else
+ func_fatal_error "\`$arg' is not a valid libtool object"
+ fi
+ fi
+ done
+ else
+ func_fatal_error "link input file \`$arg' does not exist"
+ fi
+ arg=$save_arg
+ prev=
+ continue
+ ;;
+ precious_regex)
+ precious_files_regex="$arg"
+ prev=
+ continue
+ ;;
+ release)
+ release="-$arg"
+ prev=
+ continue
+ ;;
+ rpath | xrpath)
+ # We need an absolute path.
+ case $arg in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ func_fatal_error "only absolute run-paths are allowed"
+ ;;
+ esac
+ if test "$prev" = rpath; then
+ case "$rpath " in
+ *" $arg "*) ;;
+ *) func_append rpath " $arg" ;;
+ esac
+ else
+ case "$xrpath " in
+ *" $arg "*) ;;
+ *) func_append xrpath " $arg" ;;
+ esac
+ fi
+ prev=
+ continue
+ ;;
+ shrext)
+ shrext_cmds="$arg"
+ prev=
+ continue
+ ;;
+ weak)
+ func_append weak_libs " $arg"
+ prev=
+ continue
+ ;;
+ xcclinker)
+ func_append linker_flags " $qarg"
+ func_append compiler_flags " $qarg"
+ prev=
+ func_append compile_command " $qarg"
+ func_append finalize_command " $qarg"
+ continue
+ ;;
+ xcompiler)
+ func_append compiler_flags " $qarg"
+ prev=
+ func_append compile_command " $qarg"
+ func_append finalize_command " $qarg"
+ continue
+ ;;
+ xlinker)
+ func_append linker_flags " $qarg"
+ func_append compiler_flags " $wl$qarg"
+ prev=
+ func_append compile_command " $wl$qarg"
+ func_append finalize_command " $wl$qarg"
+ continue
+ ;;
+ *)
+ eval "$prev=\"\$arg\""
+ prev=
+ continue
+ ;;
+ esac
+ fi # test -n "$prev"
+
+ prevarg="$arg"
+
+ case $arg in
+ -all-static)
+ if test -n "$link_static_flag"; then
+ # See comment for -static flag below, for more details.
+ func_append compile_command " $link_static_flag"
+ func_append finalize_command " $link_static_flag"
+ fi
+ continue
+ ;;
+
+ -allow-undefined)
+ # FIXME: remove this flag sometime in the future.
+ func_fatal_error "\`-allow-undefined' must not be used because it is the default"
+ ;;
+
+ -avoid-version)
+ avoid_version=yes
+ continue
+ ;;
+
+ -bindir)
+ prev=bindir
+ continue
+ ;;
+
+ -dlopen)
+ prev=dlfiles
+ continue
+ ;;
+
+ -dlpreopen)
+ prev=dlprefiles
+ continue
+ ;;
+
+ -export-dynamic)
+ export_dynamic=yes
+ continue
+ ;;
+
+ -export-symbols | -export-symbols-regex)
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ func_fatal_error "more than one -exported-symbols argument is not allowed"
+ fi
+ if test "X$arg" = "X-export-symbols"; then
+ prev=expsyms
+ else
+ prev=expsyms_regex
+ fi
+ continue
+ ;;
+
+ -framework)
+ prev=framework
+ continue
+ ;;
+
+ -inst-prefix-dir)
+ prev=inst_prefix
+ continue
+ ;;
+
+ # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+ # so, if we see these flags be careful not to treat them like -L
+ -L[A-Z][A-Z]*:*)
+ case $with_gcc/$host in
+ no/*-*-irix* | /*-*-irix*)
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ ;;
+ esac
+ continue
+ ;;
+
+ -L*)
+ func_stripname "-L" '' "$arg"
+ if test -z "$func_stripname_result"; then
+ if test "$#" -gt 0; then
+ func_fatal_error "require no space between \`-L' and \`$1'"
+ else
+ func_fatal_error "need path for \`-L' option"
+ fi
+ fi
+ func_resolve_sysroot "$func_stripname_result"
+ dir=$func_resolve_sysroot_result
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ test -z "$absdir" && \
+ func_fatal_error "cannot determine absolute directory name of \`$dir'"
+ dir="$absdir"
+ ;;
+ esac
+ case "$deplibs " in
+ *" -L$dir "* | *" $arg "*)
+ # Will only happen for absolute or sysroot arguments
+ ;;
+ *)
+ # Preserve sysroot, but never include relative directories
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;;
+ *) func_append deplibs " -L$dir" ;;
+ esac
+ func_append lib_search_path " $dir"
+ ;;
+ esac
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+ testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'`
+ case :$dllsearchpath: in
+ *":$dir:"*) ;;
+ ::) dllsearchpath=$dir;;
+ *) func_append dllsearchpath ":$dir";;
+ esac
+ case :$dllsearchpath: in
+ *":$testbindir:"*) ;;
+ ::) dllsearchpath=$testbindir;;
+ *) func_append dllsearchpath ":$testbindir";;
+ esac
+ ;;
+ esac
+ continue
+ ;;
+
+ -l*)
+ if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*)
+ # These systems don't actually have a C or math library (as such)
+ continue
+ ;;
+ *-*-os2*)
+ # These systems don't actually have a C library (as such)
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+ # Do not include libc due to us having libc/libc_r.
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C and math libraries are in the System framework
+ func_append deplibs " System.ltframework"
+ continue
+ ;;
+ *-*-sco3.2v5* | *-*-sco5v6*)
+ # Causes problems with __ctype
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+ # Compiler inserts libc in the correct place for threads to work
+ test "X$arg" = "X-lc" && continue
+ ;;
+ esac
+ elif test "X$arg" = "X-lc_r"; then
+ case $host in
+ *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+ # Do not include libc_r directly, use -pthread flag.
+ continue
+ ;;
+ esac
+ fi
+ func_append deplibs " $arg"
+ continue
+ ;;
+
+ -module)
+ module=yes
+ continue
+ ;;
+
+ # Tru64 UNIX uses -model [arg] to determine the layout of C++
+ # classes, name mangling, and exception handling.
+ # Darwin uses the -arch flag to determine output architecture.
+ -model|-arch|-isysroot|--sysroot)
+ func_append compiler_flags " $arg"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ prev=xcompiler
+ continue
+ ;;
+
+ -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+ func_append compiler_flags " $arg"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ case "$new_inherited_linker_flags " in
+ *" $arg "*) ;;
+ * ) func_append new_inherited_linker_flags " $arg" ;;
+ esac
+ continue
+ ;;
+
+ -multi_module)
+ single_module="${wl}-multi_module"
+ continue
+ ;;
+
+ -no-fast-install)
+ fast_install=no
+ continue
+ ;;
+
+ -no-install)
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*)
+ # The PATH hackery in wrapper scripts is required on Windows
+ # and Darwin in order for the loader to find any dlls it needs.
+ func_warning "\`-no-install' is ignored for $host"
+ func_warning "assuming \`-no-fast-install' instead"
+ fast_install=no
+ ;;
+ *) no_install=yes ;;
+ esac
+ continue
+ ;;
+
+ -no-undefined)
+ allow_undefined=no
+ continue
+ ;;
+
+ -objectlist)
+ prev=objectlist
+ continue
+ ;;
+
+ -o) prev=output ;;
+
+ -precious-files-regex)
+ prev=precious_regex
+ continue
+ ;;
+
+ -release)
+ prev=release
+ continue
+ ;;
+
+ -rpath)
+ prev=rpath
+ continue
+ ;;
+
+ -R)
+ prev=xrpath
+ continue
+ ;;
+
+ -R*)
+ func_stripname '-R' '' "$arg"
+ dir=$func_stripname_result
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ =*)
+ func_stripname '=' '' "$dir"
+ dir=$lt_sysroot$func_stripname_result
+ ;;
+ *)
+ func_fatal_error "only absolute run-paths are allowed"
+ ;;
+ esac
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) func_append xrpath " $dir" ;;
+ esac
+ continue
+ ;;
+
+ -shared)
+ # The effects of -shared are defined in a previous loop.
+ continue
+ ;;
+
+ -shrext)
+ prev=shrext
+ continue
+ ;;
+
+ -static | -static-libtool-libs)
+ # The effects of -static are defined in a previous loop.
+ # We used to do the same as -all-static on platforms that
+ # didn't have a PIC flag, but the assumption that the effects
+ # would be equivalent was wrong. It would break on at least
+ # Digital Unix and AIX.
+ continue
+ ;;
+
+ -thread-safe)
+ thread_safe=yes
+ continue
+ ;;
+
+ -version-info)
+ prev=vinfo
+ continue
+ ;;
+
+ -version-number)
+ prev=vinfo
+ vinfo_number=yes
+ continue
+ ;;
+
+ -weak)
+ prev=weak
+ continue
+ ;;
+
+ -Wc,*)
+ func_stripname '-Wc,' '' "$arg"
+ args=$func_stripname_result
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ func_quote_for_eval "$flag"
+ func_append arg " $func_quote_for_eval_result"
+ func_append compiler_flags " $func_quote_for_eval_result"
+ done
+ IFS="$save_ifs"
+ func_stripname ' ' '' "$arg"
+ arg=$func_stripname_result
+ ;;
+
+ -Wl,*)
+ func_stripname '-Wl,' '' "$arg"
+ args=$func_stripname_result
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ func_quote_for_eval "$flag"
+ func_append arg " $wl$func_quote_for_eval_result"
+ func_append compiler_flags " $wl$func_quote_for_eval_result"
+ func_append linker_flags " $func_quote_for_eval_result"
+ done
+ IFS="$save_ifs"
+ func_stripname ' ' '' "$arg"
+ arg=$func_stripname_result
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Xlinker)
+ prev=xlinker
+ continue
+ ;;
+
+ -XCClinker)
+ prev=xcclinker
+ continue
+ ;;
+
+ # -msg_* for osf cc
+ -msg_*)
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ ;;
+
+ # Flags to be passed through unchanged, with rationale:
+ # -64, -mips[0-9] enable 64-bit mode for the SGI compiler
+ # -r[0-9][0-9]* specify processor for the SGI compiler
+ # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler
+ # +DA*, +DD* enable 64-bit mode for the HP compiler
+ # -q* compiler args for the IBM compiler
+ # -m*, -t[45]*, -txscale* architecture-specific flags for GCC
+ # -F/path path to uninstalled frameworks, gcc on darwin
+ # -p, -pg, --coverage, -fprofile-* profiling flags for GCC
+ # @file GCC response files
+ # -tp=* Portland pgcc target processor selection
+ # --sysroot=* for sysroot support
+ # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
+ -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
+ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
+ -O*|-flto*|-fwhopr*|-fuse-linker-plugin)
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ func_append compiler_flags " $arg"
+ continue
+ ;;
+
+ # Some other compiler flag.
+ -* | +*)
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ ;;
+
+ *.$objext)
+ # A standard object.
+ func_append objs " $arg"
+ ;;
+
+ *.lo)
+ # A libtool-controlled object.
+
+ # Check to see that this really is a libtool object.
+ if func_lalib_unsafe_p "$arg"; then
+ pic_object=
+ non_pic_object=
+
+ # Read the .lo file
+ func_source "$arg"
+
+ if test -z "$pic_object" ||
+ test -z "$non_pic_object" ||
+ test "$pic_object" = none &&
+ test "$non_pic_object" = none; then
+ func_fatal_error "cannot find name of object for \`$arg'"
+ fi
+
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ if test "$pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ pic_object="$xdir$pic_object"
+
+ if test "$prev" = dlfiles; then
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ func_append dlfiles " $pic_object"
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ # CHECK ME: I think I busted this. -Ossama
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ func_append dlprefiles " $pic_object"
+ prev=
+ fi
+
+ # A PIC object.
+ func_append libobjs " $pic_object"
+ arg="$pic_object"
+ fi
+
+ # Non-PIC object.
+ if test "$non_pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ non_pic_object="$xdir$non_pic_object"
+
+ # A standard non-PIC object
+ func_append non_pic_objects " $non_pic_object"
+ if test -z "$pic_object" || test "$pic_object" = none ; then
+ arg="$non_pic_object"
+ fi
+ else
+ # If the PIC object exists, use it instead.
+ # $xdir was prepended to $pic_object above.
+ non_pic_object="$pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ fi
+ else
+ # Only an error if not doing a dry-run.
+ if $opt_dry_run; then
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ func_lo2o "$arg"
+ pic_object=$xdir$objdir/$func_lo2o_result
+ non_pic_object=$xdir$func_lo2o_result
+ func_append libobjs " $pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ else
+ func_fatal_error "\`$arg' is not a valid libtool object"
+ fi
+ fi
+ ;;
+
+ *.$libext)
+ # An archive.
+ func_append deplibs " $arg"
+ func_append old_deplibs " $arg"
+ continue
+ ;;
+
+ *.la)
+ # A libtool-controlled library.
+
+ func_resolve_sysroot "$arg"
+ if test "$prev" = dlfiles; then
+ # This library was specified with -dlopen.
+ func_append dlfiles " $func_resolve_sysroot_result"
+ prev=
+ elif test "$prev" = dlprefiles; then
+ # The library was specified with -dlpreopen.
+ func_append dlprefiles " $func_resolve_sysroot_result"
+ prev=
+ else
+ func_append deplibs " $func_resolve_sysroot_result"
+ fi
+ continue
+ ;;
+
+ # Some other compiler argument.
+ *)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ ;;
+ esac # arg
+
+ # Now actually substitute the argument into the commands.
+ if test -n "$arg"; then
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ fi
+ done # argument parsing loop
+
+ test -n "$prev" && \
+ func_fatal_help "the \`$prevarg' option requires an argument"
+
+ if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+ eval arg=\"$export_dynamic_flag_spec\"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ fi
+
+ oldlibs=
+ # calculate the name of the file, without its directory
+ func_basename "$output"
+ outputname="$func_basename_result"
+ libobjs_save="$libobjs"
+
+ if test -n "$shlibpath_var"; then
+ # get the directories listed in $shlibpath_var
+ eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\`
+ else
+ shlib_search_path=
+ fi
+ eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+ func_dirname "$output" "/" ""
+ output_objdir="$func_dirname_result$objdir"
+ func_to_tool_file "$output_objdir/"
+ tool_output_objdir=$func_to_tool_file_result
+ # Create the object directory.
+ func_mkdir_p "$output_objdir"
+
+ # Determine the type of output
+ case $output in
+ "")
+ func_fatal_help "you must specify an output file"
+ ;;
+ *.$libext) linkmode=oldlib ;;
+ *.lo | *.$objext) linkmode=obj ;;
+ *.la) linkmode=lib ;;
+ *) linkmode=prog ;; # Anything else should be a program.
+ esac
+
+ specialdeplibs=
+
+ libs=
+ # Find all interdependent deplibs by searching for libraries
+ # that are linked more than once (e.g. -la -lb -la)
+ for deplib in $deplibs; do
+ if $opt_preserve_dup_deps ; then
+ case "$libs " in
+ *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+ esac
+ fi
+ func_append libs " $deplib"
+ done
+
+ if test "$linkmode" = lib; then
+ libs="$predeps $libs $compiler_lib_search_path $postdeps"
+
+ # Compute libraries that are listed more than once in $predeps
+ # $postdeps and mark them as special (i.e., whose duplicates are
+ # not to be eliminated).
+ pre_post_deps=
+ if $opt_duplicate_compiler_generated_deps; then
+ for pre_post_dep in $predeps $postdeps; do
+ case "$pre_post_deps " in
+ *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;;
+ esac
+ func_append pre_post_deps " $pre_post_dep"
+ done
+ fi
+ pre_post_deps=
+ fi
+
+ deplibs=
+ newdependency_libs=
+ newlib_search_path=
+ need_relink=no # whether we're linking any uninstalled libtool libraries
+ notinst_deplibs= # not-installed libtool libraries
+ notinst_path= # paths that contain not-installed libtool libraries
+
+ case $linkmode in
+ lib)
+ passes="conv dlpreopen link"
+ for file in $dlfiles $dlprefiles; do
+ case $file in
+ *.la) ;;
+ *)
+ func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file"
+ ;;
+ esac
+ done
+ ;;
+ prog)
+ compile_deplibs=
+ finalize_deplibs=
+ alldeplibs=no
+ newdlfiles=
+ newdlprefiles=
+ passes="conv scan dlopen dlpreopen link"
+ ;;
+ *) passes="conv"
+ ;;
+ esac
+
+ for pass in $passes; do
+ # The preopen pass in lib mode reverses $deplibs; put it back here
+ # so that -L comes before libs that need it for instance...
+ if test "$linkmode,$pass" = "lib,link"; then
+ ## FIXME: Find the place where the list is rebuilt in the wrong
+ ## order, and fix it there properly
+ tmp_deplibs=
+ for deplib in $deplibs; do
+ tmp_deplibs="$deplib $tmp_deplibs"
+ done
+ deplibs="$tmp_deplibs"
+ fi
+
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan"; then
+ libs="$deplibs"
+ deplibs=
+ fi
+ if test "$linkmode" = prog; then
+ case $pass in
+ dlopen) libs="$dlfiles" ;;
+ dlpreopen) libs="$dlprefiles" ;;
+ link)
+ libs="$deplibs %DEPLIBS%"
+ test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs"
+ ;;
+ esac
+ fi
+ if test "$linkmode,$pass" = "lib,dlpreopen"; then
+ # Collect and forward deplibs of preopened libtool libs
+ for lib in $dlprefiles; do
+ # Ignore non-libtool-libs
+ dependency_libs=
+ func_resolve_sysroot "$lib"
+ case $lib in
+ *.la) func_source "$func_resolve_sysroot_result" ;;
+ esac
+
+ # Collect preopened libtool deplibs, except any this library
+ # has declared as weak libs
+ for deplib in $dependency_libs; do
+ func_basename "$deplib"
+ deplib_base=$func_basename_result
+ case " $weak_libs " in
+ *" $deplib_base "*) ;;
+ *) func_append deplibs " $deplib" ;;
+ esac
+ done
+ done
+ libs="$dlprefiles"
+ fi
+ if test "$pass" = dlopen; then
+ # Collect dlpreopened libraries
+ save_deplibs="$deplibs"
+ deplibs=
+ fi
+
+ for deplib in $libs; do
+ lib=
+ found=no
+ case $deplib in
+ -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ func_append compiler_flags " $deplib"
+ if test "$linkmode" = lib ; then
+ case "$new_inherited_linker_flags " in
+ *" $deplib "*) ;;
+ * ) func_append new_inherited_linker_flags " $deplib" ;;
+ esac
+ fi
+ fi
+ continue
+ ;;
+ -l*)
+ if test "$linkmode" != lib && test "$linkmode" != prog; then
+ func_warning "\`-l' is ignored for archives/objects"
+ continue
+ fi
+ func_stripname '-l' '' "$deplib"
+ name=$func_stripname_result
+ if test "$linkmode" = lib; then
+ searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path"
+ else
+ searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path"
+ fi
+ for searchdir in $searchdirs; do
+ for search_ext in .la $std_shrext .so .a; do
+ # Search the libtool library
+ lib="$searchdir/lib${name}${search_ext}"
+ if test -f "$lib"; then
+ if test "$search_ext" = ".la"; then
+ found=yes
+ else
+ found=no
+ fi
+ break 2
+ fi
+ done
+ done
+ if test "$found" != yes; then
+ # deplib doesn't seem to be a libtool library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ else # deplib is a libtool library
+ # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
+ # We need to do some special things here, and not later.
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $deplib "*)
+ if func_lalib_p "$lib"; then
+ library_names=
+ old_library=
+ func_source "$lib"
+ for l in $old_library $library_names; do
+ ll="$l"
+ done
+ if test "X$ll" = "X$old_library" ; then # only static version available
+ found=no
+ func_dirname "$lib" "" "."
+ ladir="$func_dirname_result"
+ lib=$ladir/$old_library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ fi
+ fi
+ ;;
+ *) ;;
+ esac
+ fi
+ fi
+ ;; # -l
+ *.ltframework)
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ if test "$linkmode" = lib ; then
+ case "$new_inherited_linker_flags " in
+ *" $deplib "*) ;;
+ * ) func_append new_inherited_linker_flags " $deplib" ;;
+ esac
+ fi
+ fi
+ continue
+ ;;
+ -L*)
+ case $linkmode in
+ lib)
+ deplibs="$deplib $deplibs"
+ test "$pass" = conv && continue
+ newdependency_libs="$deplib $newdependency_libs"
+ func_stripname '-L' '' "$deplib"
+ func_resolve_sysroot "$func_stripname_result"
+ func_append newlib_search_path " $func_resolve_sysroot_result"
+ ;;
+ prog)
+ if test "$pass" = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ if test "$pass" = scan; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ func_stripname '-L' '' "$deplib"
+ func_resolve_sysroot "$func_stripname_result"
+ func_append newlib_search_path " $func_resolve_sysroot_result"
+ ;;
+ *)
+ func_warning "\`-L' is ignored for archives/objects"
+ ;;
+ esac # linkmode
+ continue
+ ;; # -L
+ -R*)
+ if test "$pass" = link; then
+ func_stripname '-R' '' "$deplib"
+ func_resolve_sysroot "$func_stripname_result"
+ dir=$func_resolve_sysroot_result
+ # Make sure the xrpath contains only unique directories.
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) func_append xrpath " $dir" ;;
+ esac
+ fi
+ deplibs="$deplib $deplibs"
+ continue
+ ;;
+ *.la)
+ func_resolve_sysroot "$deplib"
+ lib=$func_resolve_sysroot_result
+ ;;
+ *.$libext)
+ if test "$pass" = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ case $linkmode in
+ lib)
+ # Linking convenience modules into shared libraries is allowed,
+ # but linking other static libraries is non-portable.
+ case " $dlpreconveniencelibs " in
+ *" $deplib "*) ;;
+ *)
+ valid_a_lib=no
+ case $deplibs_check_method in
+ match_pattern*)
+ set dummy $deplibs_check_method; shift
+ match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+ if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \
+ | $EGREP "$match_pattern_regex" > /dev/null; then
+ valid_a_lib=yes
+ fi
+ ;;
+ pass_all)
+ valid_a_lib=yes
+ ;;
+ esac
+ if test "$valid_a_lib" != yes; then
+ echo
+ $ECHO "*** Warning: Trying to link with static lib archive $deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because the file extensions .$libext of this argument makes me believe"
+ echo "*** that it is just a static archive that I should not use here."
+ else
+ echo
+ $ECHO "*** Warning: Linking the shared library $output against the"
+ $ECHO "*** static library $deplib is not portable!"
+ deplibs="$deplib $deplibs"
+ fi
+ ;;
+ esac
+ continue
+ ;;
+ prog)
+ if test "$pass" != link; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ continue
+ ;;
+ esac # linkmode
+ ;; # *.$libext
+ *.lo | *.$objext)
+ if test "$pass" = conv; then
+ deplibs="$deplib $deplibs"
+ elif test "$linkmode" = prog; then
+ if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlopen support or we're linking statically,
+ # we need to preload.
+ func_append newdlprefiles " $deplib"
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ func_append newdlfiles " $deplib"
+ fi
+ fi
+ continue
+ ;;
+ %DEPLIBS%)
+ alldeplibs=yes
+ continue
+ ;;
+ esac # case $deplib
+
+ if test "$found" = yes || test -f "$lib"; then :
+ else
+ func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'"
+ fi
+
+ # Check to see that this really is a libtool archive.
+ func_lalib_unsafe_p "$lib" \
+ || func_fatal_error "\`$lib' is not a valid libtool archive"
+
+ func_dirname "$lib" "" "."
+ ladir="$func_dirname_result"
+
+ dlname=
+ dlopen=
+ dlpreopen=
+ libdir=
+ library_names=
+ old_library=
+ inherited_linker_flags=
+ # If the library was installed with an old release of libtool,
+ # it will not redefine variables installed, or shouldnotlink
+ installed=yes
+ shouldnotlink=no
+ avoidtemprpath=
+
+
+ # Read the .la file
+ func_source "$lib"
+
+ # Convert "-framework foo" to "foo.ltframework"
+ if test -n "$inherited_linker_flags"; then
+ tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'`
+ for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
+ case " $new_inherited_linker_flags " in
+ *" $tmp_inherited_linker_flag "*) ;;
+ *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";;
+ esac
+ done
+ fi
+ dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan" ||
+ { test "$linkmode" != prog && test "$linkmode" != lib; }; then
+ test -n "$dlopen" && func_append dlfiles " $dlopen"
+ test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen"
+ fi
+
+ if test "$pass" = conv; then
+ # Only check for convenience libraries
+ deplibs="$lib $deplibs"
+ if test -z "$libdir"; then
+ if test -z "$old_library"; then
+ func_fatal_error "cannot find name of link library for \`$lib'"
+ fi
+ # It is a libtool convenience library, so add in its objects.
+ func_append convenience " $ladir/$objdir/$old_library"
+ func_append old_convenience " $ladir/$objdir/$old_library"
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ deplibs="$deplib $deplibs"
+ if $opt_preserve_dup_deps ; then
+ case "$tmp_libs " in
+ *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+ esac
+ fi
+ func_append tmp_libs " $deplib"
+ done
+ elif test "$linkmode" != prog && test "$linkmode" != lib; then
+ func_fatal_error "\`$lib' is not a convenience library"
+ fi
+ continue
+ fi # $pass = conv
+
+
+ # Get the name of the library we link against.
+ linklib=
+ if test -n "$old_library" &&
+ { test "$prefer_static_libs" = yes ||
+ test "$prefer_static_libs,$installed" = "built,no"; }; then
+ linklib=$old_library
+ else
+ for l in $old_library $library_names; do
+ linklib="$l"
+ done
+ fi
+ if test -z "$linklib"; then
+ func_fatal_error "cannot find name of link library for \`$lib'"
+ fi
+
+ # This library was specified with -dlopen.
+ if test "$pass" = dlopen; then
+ if test -z "$libdir"; then
+ func_fatal_error "cannot -dlopen a convenience library: \`$lib'"
+ fi
+ if test -z "$dlname" ||
+ test "$dlopen_support" != yes ||
+ test "$build_libtool_libs" = no; then
+ # If there is no dlname, no dlopen support or we're linking
+ # statically, we need to preload. We also need to preload any
+ # dependent libraries so libltdl's deplib preloader doesn't
+ # bomb out in the load deplibs phase.
+ func_append dlprefiles " $lib $dependency_libs"
+ else
+ func_append newdlfiles " $lib"
+ fi
+ continue
+ fi # $pass = dlopen
+
+ # We need an absolute path.
+ case $ladir in
+ [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
+ *)
+ abs_ladir=`cd "$ladir" && pwd`
+ if test -z "$abs_ladir"; then
+ func_warning "cannot determine absolute directory name of \`$ladir'"
+ func_warning "passing it literally to the linker, although it might fail"
+ abs_ladir="$ladir"
+ fi
+ ;;
+ esac
+ func_basename "$lib"
+ laname="$func_basename_result"
+
+ # Find the relevant object directory and library name.
+ if test "X$installed" = Xyes; then
+ if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ func_warning "library \`$lib' was moved."
+ dir="$ladir"
+ absdir="$abs_ladir"
+ libdir="$abs_ladir"
+ else
+ dir="$lt_sysroot$libdir"
+ absdir="$lt_sysroot$libdir"
+ fi
+ test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
+ else
+ if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ dir="$ladir"
+ absdir="$abs_ladir"
+ # Remove this search path later
+ func_append notinst_path " $abs_ladir"
+ else
+ dir="$ladir/$objdir"
+ absdir="$abs_ladir/$objdir"
+ # Remove this search path later
+ func_append notinst_path " $abs_ladir"
+ fi
+ fi # $installed = yes
+ func_stripname 'lib' '.la' "$laname"
+ name=$func_stripname_result
+
+ # This library was specified with -dlpreopen.
+ if test "$pass" = dlpreopen; then
+ if test -z "$libdir" && test "$linkmode" = prog; then
+ func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'"
+ fi
+ case "$host" in
+ # special handling for platforms with PE-DLLs.
+ *cygwin* | *mingw* | *cegcc* )
+ # Linker will automatically link against shared library if both
+ # static and shared are present. Therefore, ensure we extract
+ # symbols from the import library if a shared library is present
+ # (otherwise, the dlopen module name will be incorrect). We do
+ # this by putting the import library name into $newdlprefiles.
+ # We recover the dlopen module name by 'saving' the la file
+ # name in a special purpose variable, and (later) extracting the
+ # dlname from the la file.
+ if test -n "$dlname"; then
+ func_tr_sh "$dir/$linklib"
+ eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname"
+ func_append newdlprefiles " $dir/$linklib"
+ else
+ func_append newdlprefiles " $dir/$old_library"
+ # Keep a list of preopened convenience libraries to check
+ # that they are being used correctly in the link pass.
+ test -z "$libdir" && \
+ func_append dlpreconveniencelibs " $dir/$old_library"
+ fi
+ ;;
+ * )
+ # Prefer using a static library (so that no silly _DYNAMIC symbols
+ # are required to link).
+ if test -n "$old_library"; then
+ func_append newdlprefiles " $dir/$old_library"
+ # Keep a list of preopened convenience libraries to check
+ # that they are being used correctly in the link pass.
+ test -z "$libdir" && \
+ func_append dlpreconveniencelibs " $dir/$old_library"
+ # Otherwise, use the dlname, so that lt_dlopen finds it.
+ elif test -n "$dlname"; then
+ func_append newdlprefiles " $dir/$dlname"
+ else
+ func_append newdlprefiles " $dir/$linklib"
+ fi
+ ;;
+ esac
+ fi # $pass = dlpreopen
+
+ if test -z "$libdir"; then
+ # Link the convenience library
+ if test "$linkmode" = lib; then
+ deplibs="$dir/$old_library $deplibs"
+ elif test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$dir/$old_library $compile_deplibs"
+ finalize_deplibs="$dir/$old_library $finalize_deplibs"
+ else
+ deplibs="$lib $deplibs" # used for prog,scan pass
+ fi
+ continue
+ fi
+
+
+ if test "$linkmode" = prog && test "$pass" != link; then
+ func_append newlib_search_path " $ladir"
+ deplibs="$lib $deplibs"
+
+ linkalldeplibs=no
+ if test "$link_all_deplibs" != no || test -z "$library_names" ||
+ test "$build_libtool_libs" = no; then
+ linkalldeplibs=yes
+ fi
+
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) func_stripname '-L' '' "$deplib"
+ func_resolve_sysroot "$func_stripname_result"
+ func_append newlib_search_path " $func_resolve_sysroot_result"
+ ;;
+ esac
+ # Need to link against all dependency_libs?
+ if test "$linkalldeplibs" = yes; then
+ deplibs="$deplib $deplibs"
+ else
+ # Need to hardcode shared library paths
+ # or/and link against static libraries
+ newdependency_libs="$deplib $newdependency_libs"
+ fi
+ if $opt_preserve_dup_deps ; then
+ case "$tmp_libs " in
+ *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+ esac
+ fi
+ func_append tmp_libs " $deplib"
+ done # for deplib
+ continue
+ fi # $linkmode = prog...
+
+ if test "$linkmode,$pass" = "prog,link"; then
+ if test -n "$library_names" &&
+ { { test "$prefer_static_libs" = no ||
+ test "$prefer_static_libs,$installed" = "built,yes"; } ||
+ test -z "$old_library"; }; then
+ # We need to hardcode the library path
+ if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then
+ # Make sure the rpath contains only unique directories.
+ case "$temp_rpath:" in
+ *"$absdir:"*) ;;
+ *) func_append temp_rpath "$absdir:" ;;
+ esac
+ fi
+
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) func_append compile_rpath " $absdir" ;;
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) func_append finalize_rpath " $libdir" ;;
+ esac
+ ;;
+ esac
+ fi # $linkmode,$pass = prog,link...
+
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+ fi
+
+ link_static=no # Whether the deplib will be linked statically
+ use_static_libs=$prefer_static_libs
+ if test "$use_static_libs" = built && test "$installed" = yes; then
+ use_static_libs=no
+ fi
+ if test -n "$library_names" &&
+ { test "$use_static_libs" = no || test -z "$old_library"; }; then
+ case $host in
+ *cygwin* | *mingw* | *cegcc*)
+ # No point in relinking DLLs because paths are not encoded
+ func_append notinst_deplibs " $lib"
+ need_relink=no
+ ;;
+ *)
+ if test "$installed" = no; then
+ func_append notinst_deplibs " $lib"
+ need_relink=yes
+ fi
+ ;;
+ esac
+ # This is a shared library
+
+ # Warn about portability, can't link against -module's on some
+ # systems (darwin). Don't bleat about dlopened modules though!
+ dlopenmodule=""
+ for dlpremoduletest in $dlprefiles; do
+ if test "X$dlpremoduletest" = "X$lib"; then
+ dlopenmodule="$dlpremoduletest"
+ break
+ fi
+ done
+ if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then
+ echo
+ if test "$linkmode" = prog; then
+ $ECHO "*** Warning: Linking the executable $output against the loadable module"
+ else
+ $ECHO "*** Warning: Linking the shared library $output against the loadable module"
+ fi
+ $ECHO "*** $linklib is not portable!"
+ fi
+ if test "$linkmode" = lib &&
+ test "$hardcode_into_libs" = yes; then
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) func_append compile_rpath " $absdir" ;;
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) func_append finalize_rpath " $libdir" ;;
+ esac
+ ;;
+ esac
+ fi
+
+ if test -n "$old_archive_from_expsyms_cmds"; then
+ # figure out the soname
+ set dummy $library_names
+ shift
+ realname="$1"
+ shift
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ # use dlname if we got it. it's perfectly good, no?
+ if test -n "$dlname"; then
+ soname="$dlname"
+ elif test -n "$soname_spec"; then
+ # bleh windows
+ case $host in
+ *cygwin* | mingw* | *cegcc*)
+ func_arith $current - $age
+ major=$func_arith_result
+ versuffix="-$major"
+ ;;
+ esac
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+
+ # Make a new name for the extract_expsyms_cmds to use
+ soroot="$soname"
+ func_basename "$soroot"
+ soname="$func_basename_result"
+ func_stripname 'lib' '.dll' "$soname"
+ newlib=libimp-$func_stripname_result.a
+
+ # If the library has no export list, then create one now
+ if test -f "$output_objdir/$soname-def"; then :
+ else
+ func_verbose "extracting exported symbol list from \`$soname'"
+ func_execute_cmds "$extract_expsyms_cmds" 'exit $?'
+ fi
+
+ # Create $newlib
+ if test -f "$output_objdir/$newlib"; then :; else
+ func_verbose "generating import library for \`$soname'"
+ func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?'
+ fi
+ # make sure the library variables are pointing to the new library
+ dir=$output_objdir
+ linklib=$newlib
+ fi # test -n "$old_archive_from_expsyms_cmds"
+
+ if test "$linkmode" = prog || test "$opt_mode" != relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ lib_linked=yes
+ case $hardcode_action in
+ immediate | unsupported)
+ if test "$hardcode_direct" = no; then
+ add="$dir/$linklib"
+ case $host in
+ *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;;
+ *-*-sysv4*uw2*) add_dir="-L$dir" ;;
+ *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
+ *-*-unixware7*) add_dir="-L$dir" ;;
+ *-*-darwin* )
+ # if the lib is a (non-dlopened) module then we can not
+ # link against it, someone is ignoring the earlier warnings
+ if /usr/bin/file -L $add 2> /dev/null |
+ $GREP ": [^:]* bundle" >/dev/null ; then
+ if test "X$dlopenmodule" != "X$lib"; then
+ $ECHO "*** Warning: lib $linklib is a module, not a shared library"
+ if test -z "$old_library" ; then
+ echo
+ echo "*** And there doesn't seem to be a static archive available"
+ echo "*** The link will probably fail, sorry"
+ else
+ add="$dir/$old_library"
+ fi
+ elif test -n "$old_library"; then
+ add="$dir/$old_library"
+ fi
+ fi
+ esac
+ elif test "$hardcode_minus_L" = no; then
+ case $host in
+ *-*-sunos*) add_shlibpath="$dir" ;;
+ esac
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = no; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ relink)
+ if test "$hardcode_direct" = yes &&
+ test "$hardcode_direct_absolute" = no; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$absdir"
+ # Try looking first in the location we're being installed to.
+ if test -n "$inst_prefix_dir"; then
+ case $libdir in
+ [\\/]*)
+ func_append add_dir " -L$inst_prefix_dir$libdir"
+ ;;
+ esac
+ fi
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ *) lib_linked=no ;;
+ esac
+
+ if test "$lib_linked" != yes; then
+ func_fatal_configuration "unsupported hardcode properties"
+ fi
+
+ if test -n "$add_shlibpath"; then
+ case :$compile_shlibpath: in
+ *":$add_shlibpath:"*) ;;
+ *) func_append compile_shlibpath "$add_shlibpath:" ;;
+ esac
+ fi
+ if test "$linkmode" = prog; then
+ test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+ test -n "$add" && compile_deplibs="$add $compile_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ if test "$hardcode_direct" != yes &&
+ test "$hardcode_minus_L" != yes &&
+ test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) func_append finalize_shlibpath "$libdir:" ;;
+ esac
+ fi
+ fi
+ fi
+
+ if test "$linkmode" = prog || test "$opt_mode" = relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ # Finalize command for both is simple: just hardcode it.
+ if test "$hardcode_direct" = yes &&
+ test "$hardcode_direct_absolute" = no; then
+ add="$libdir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$libdir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) func_append finalize_shlibpath "$libdir:" ;;
+ esac
+ add="-l$name"
+ elif test "$hardcode_automatic" = yes; then
+ if test -n "$inst_prefix_dir" &&
+ test -f "$inst_prefix_dir$libdir/$linklib" ; then
+ add="$inst_prefix_dir$libdir/$linklib"
+ else
+ add="$libdir/$linklib"
+ fi
+ else
+ # We cannot seem to hardcode it, guess we'll fake it.
+ add_dir="-L$libdir"
+ # Try looking first in the location we're being installed to.
+ if test -n "$inst_prefix_dir"; then
+ case $libdir in
+ [\\/]*)
+ func_append add_dir " -L$inst_prefix_dir$libdir"
+ ;;
+ esac
+ fi
+ add="-l$name"
+ fi
+
+ if test "$linkmode" = prog; then
+ test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+ test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ fi
+ fi
+ elif test "$linkmode" = prog; then
+ # Here we assume that one of hardcode_direct or hardcode_minus_L
+ # is not unsupported. This is valid on all known static and
+ # shared platforms.
+ if test "$hardcode_direct" != unsupported; then
+ test -n "$old_library" && linklib="$old_library"
+ compile_deplibs="$dir/$linklib $compile_deplibs"
+ finalize_deplibs="$dir/$linklib $finalize_deplibs"
+ else
+ compile_deplibs="-l$name -L$dir $compile_deplibs"
+ finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+ fi
+ elif test "$build_libtool_libs" = yes; then
+ # Not a shared library
+ if test "$deplibs_check_method" != pass_all; then
+ # We're trying link a shared library against a static one
+ # but the system doesn't support it.
+
+ # Just print a warning and add the library to dependency_libs so
+ # that the program can be linked against the static library.
+ echo
+ $ECHO "*** Warning: This system can not link to static lib archive $lib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ if test "$module" = yes; then
+ echo "*** But as you try to build a module library, libtool will still create "
+ echo "*** a static module, that should work as long as the dlopening application"
+ echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ else
+ deplibs="$dir/$old_library $deplibs"
+ link_static=yes
+ fi
+ fi # link shared/static library?
+
+ if test "$linkmode" = lib; then
+ if test -n "$dependency_libs" &&
+ { test "$hardcode_into_libs" != yes ||
+ test "$build_old_libs" = yes ||
+ test "$link_static" = yes; }; then
+ # Extract -R from dependency_libs
+ temp_deplibs=
+ for libdir in $dependency_libs; do
+ case $libdir in
+ -R*) func_stripname '-R' '' "$libdir"
+ temp_xrpath=$func_stripname_result
+ case " $xrpath " in
+ *" $temp_xrpath "*) ;;
+ *) func_append xrpath " $temp_xrpath";;
+ esac;;
+ *) func_append temp_deplibs " $libdir";;
+ esac
+ done
+ dependency_libs="$temp_deplibs"
+ fi
+
+ func_append newlib_search_path " $absdir"
+ # Link against this library
+ test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+ # ... and its dependency_libs
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ newdependency_libs="$deplib $newdependency_libs"
+ case $deplib in
+ -L*) func_stripname '-L' '' "$deplib"
+ func_resolve_sysroot "$func_stripname_result";;
+ *) func_resolve_sysroot "$deplib" ;;
+ esac
+ if $opt_preserve_dup_deps ; then
+ case "$tmp_libs " in
+ *" $func_resolve_sysroot_result "*)
+ func_append specialdeplibs " $func_resolve_sysroot_result" ;;
+ esac
+ fi
+ func_append tmp_libs " $func_resolve_sysroot_result"
+ done
+
+ if test "$link_all_deplibs" != no; then
+ # Add the search paths of all dependency libraries
+ for deplib in $dependency_libs; do
+ path=
+ case $deplib in
+ -L*) path="$deplib" ;;
+ *.la)
+ func_resolve_sysroot "$deplib"
+ deplib=$func_resolve_sysroot_result
+ func_dirname "$deplib" "" "."
+ dir=$func_dirname_result
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ func_warning "cannot determine absolute directory name of \`$dir'"
+ absdir="$dir"
+ fi
+ ;;
+ esac
+ if $GREP "^installed=no" $deplib > /dev/null; then
+ case $host in
+ *-*-darwin*)
+ depdepl=
+ eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
+ if test -n "$deplibrary_names" ; then
+ for tmp in $deplibrary_names ; do
+ depdepl=$tmp
+ done
+ if test -f "$absdir/$objdir/$depdepl" ; then
+ depdepl="$absdir/$objdir/$depdepl"
+ darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+ if test -z "$darwin_install_name"; then
+ darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+ fi
+ func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
+ func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}"
+ path=
+ fi
+ fi
+ ;;
+ *)
+ path="-L$absdir/$objdir"
+ ;;
+ esac
+ else
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$deplib' is not a valid libtool archive"
+ test "$absdir" != "$libdir" && \
+ func_warning "\`$deplib' seems to be moved"
+
+ path="-L$absdir"
+ fi
+ ;;
+ esac
+ case " $deplibs " in
+ *" $path "*) ;;
+ *) deplibs="$path $deplibs" ;;
+ esac
+ done
+ fi # link_all_deplibs != no
+ fi # linkmode = lib
+ done # for deplib in $libs
+ if test "$pass" = link; then
+ if test "$linkmode" = "prog"; then
+ compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
+ finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
+ else
+ compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ fi
+ fi
+ dependency_libs="$newdependency_libs"
+ if test "$pass" = dlpreopen; then
+ # Link the dlpreopened libraries before other libraries
+ for deplib in $save_deplibs; do
+ deplibs="$deplib $deplibs"
+ done
+ fi
+ if test "$pass" != dlopen; then
+ if test "$pass" != conv; then
+ # Make sure lib_search_path contains only unique directories.
+ lib_search_path=
+ for dir in $newlib_search_path; do
+ case "$lib_search_path " in
+ *" $dir "*) ;;
+ *) func_append lib_search_path " $dir" ;;
+ esac
+ done
+ newlib_search_path=
+ fi
+
+ if test "$linkmode,$pass" != "prog,link"; then
+ vars="deplibs"
+ else
+ vars="compile_deplibs finalize_deplibs"
+ fi
+ for var in $vars dependency_libs; do
+ # Add libraries to $var in reverse order
+ eval tmp_libs=\"\$$var\"
+ new_libs=
+ for deplib in $tmp_libs; do
+ # FIXME: Pedantically, this is the right thing to do, so
+ # that some nasty dependency loop isn't accidentally
+ # broken:
+ #new_libs="$deplib $new_libs"
+ # Pragmatically, this seems to cause very few problems in
+ # practice:
+ case $deplib in
+ -L*) new_libs="$deplib $new_libs" ;;
+ -R*) ;;
+ *)
+ # And here is the reason: when a library appears more
+ # than once as an explicit dependence of a library, or
+ # is implicitly linked in more than once by the
+ # compiler, it is considered special, and multiple
+ # occurrences thereof are not removed. Compare this
+ # with having the same library being listed as a
+ # dependency of multiple other libraries: in this case,
+ # we know (pedantically, we assume) the library does not
+ # need to be listed more than once, so we keep only the
+ # last copy. This is not always right, but it is rare
+ # enough that we require users that really mean to play
+ # such unportable linking tricks to link the library
+ # using -Wl,-lname, so that libtool does not consider it
+ # for duplicate removal.
+ case " $specialdeplibs " in
+ *" $deplib "*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$deplib $new_libs" ;;
+ esac
+ ;;
+ esac
+ ;;
+ esac
+ done
+ tmp_libs=
+ for deplib in $new_libs; do
+ case $deplib in
+ -L*)
+ case " $tmp_libs " in
+ *" $deplib "*) ;;
+ *) func_append tmp_libs " $deplib" ;;
+ esac
+ ;;
+ *) func_append tmp_libs " $deplib" ;;
+ esac
+ done
+ eval $var=\"$tmp_libs\"
+ done # for var
+ fi
+ # Last step: remove runtime libs from dependency_libs
+ # (they stay in deplibs)
+ tmp_libs=
+ for i in $dependency_libs ; do
+ case " $predeps $postdeps $compiler_lib_search_path " in
+ *" $i "*)
+ i=""
+ ;;
+ esac
+ if test -n "$i" ; then
+ func_append tmp_libs " $i"
+ fi
+ done
+ dependency_libs=$tmp_libs
+ done # for pass
+ if test "$linkmode" = prog; then
+ dlfiles="$newdlfiles"
+ fi
+ if test "$linkmode" = prog || test "$linkmode" = lib; then
+ dlprefiles="$newdlprefiles"
+ fi
+
+ case $linkmode in
+ oldlib)
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ func_warning "\`-dlopen' is ignored for archives"
+ fi
+
+ case " $deplibs" in
+ *\ -l* | *\ -L*)
+ func_warning "\`-l' and \`-L' are ignored for archives" ;;
+ esac
+
+ test -n "$rpath" && \
+ func_warning "\`-rpath' is ignored for archives"
+
+ test -n "$xrpath" && \
+ func_warning "\`-R' is ignored for archives"
+
+ test -n "$vinfo" && \
+ func_warning "\`-version-info/-version-number' is ignored for archives"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for archives"
+
+ test -n "$export_symbols$export_symbols_regex" && \
+ func_warning "\`-export-symbols' is ignored for archives"
+
+ # Now set the variables for building old libraries.
+ build_libtool_libs=no
+ oldlibs="$output"
+ func_append objs "$old_deplibs"
+ ;;
+
+ lib)
+ # Make sure we only generate libraries of the form `libNAME.la'.
+ case $outputname in
+ lib*)
+ func_stripname 'lib' '.la' "$outputname"
+ name=$func_stripname_result
+ eval shared_ext=\"$shrext_cmds\"
+ eval libname=\"$libname_spec\"
+ ;;
+ *)
+ test "$module" = no && \
+ func_fatal_help "libtool library \`$output' must begin with \`lib'"
+
+ if test "$need_lib_prefix" != no; then
+ # Add the "lib" prefix for modules if required
+ func_stripname '' '.la' "$outputname"
+ name=$func_stripname_result
+ eval shared_ext=\"$shrext_cmds\"
+ eval libname=\"$libname_spec\"
+ else
+ func_stripname '' '.la' "$outputname"
+ libname=$func_stripname_result
+ fi
+ ;;
+ esac
+
+ if test -n "$objs"; then
+ if test "$deplibs_check_method" != pass_all; then
+ func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs"
+ else
+ echo
+ $ECHO "*** Warning: Linking the shared library $output against the non-libtool"
+ $ECHO "*** objects $objs is not portable!"
+ func_append libobjs " $objs"
+ fi
+ fi
+
+ test "$dlself" != no && \
+ func_warning "\`-dlopen self' is ignored for libtool libraries"
+
+ set dummy $rpath
+ shift
+ test "$#" -gt 1 && \
+ func_warning "ignoring multiple \`-rpath's for a libtool library"
+
+ install_libdir="$1"
+
+ oldlibs=
+ if test -z "$rpath"; then
+ if test "$build_libtool_libs" = yes; then
+ # Building a libtool convenience library.
+ # Some compilers have problems with a `.al' extension so
+ # convenience libraries should have the same extension an
+ # archive normally would.
+ oldlibs="$output_objdir/$libname.$libext $oldlibs"
+ build_libtool_libs=convenience
+ build_old_libs=yes
+ fi
+
+ test -n "$vinfo" && \
+ func_warning "\`-version-info/-version-number' is ignored for convenience libraries"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for convenience libraries"
+ else
+
+ # Parse the version information argument.
+ save_ifs="$IFS"; IFS=':'
+ set dummy $vinfo 0 0 0
+ shift
+ IFS="$save_ifs"
+
+ test -n "$7" && \
+ func_fatal_help "too many parameters to \`-version-info'"
+
+ # convert absolute version numbers to libtool ages
+ # this retains compatibility with .la files and attempts
+ # to make the code below a bit more comprehensible
+
+ case $vinfo_number in
+ yes)
+ number_major="$1"
+ number_minor="$2"
+ number_revision="$3"
+ #
+ # There are really only two kinds -- those that
+ # use the current revision as the major version
+ # and those that subtract age and use age as
+ # a minor version. But, then there is irix
+ # which has an extra 1 added just for fun
+ #
+ case $version_type in
+ # correct linux to gnu/linux during the next big refactor
+ darwin|linux|osf|windows|none)
+ func_arith $number_major + $number_minor
+ current=$func_arith_result
+ age="$number_minor"
+ revision="$number_revision"
+ ;;
+ freebsd-aout|freebsd-elf|qnx|sunos)
+ current="$number_major"
+ revision="$number_minor"
+ age="0"
+ ;;
+ irix|nonstopux)
+ func_arith $number_major + $number_minor
+ current=$func_arith_result
+ age="$number_minor"
+ revision="$number_minor"
+ lt_irix_increment=no
+ ;;
+ *)
+ func_fatal_configuration "$modename: unknown library version type \`$version_type'"
+ ;;
+ esac
+ ;;
+ no)
+ current="$1"
+ revision="$2"
+ age="$3"
+ ;;
+ esac
+
+ # Check that each of the things are valid numbers.
+ case $current in
+ 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+ *)
+ func_error "CURRENT \`$current' must be a nonnegative integer"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ ;;
+ esac
+
+ case $revision in
+ 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+ *)
+ func_error "REVISION \`$revision' must be a nonnegative integer"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ ;;
+ esac
+
+ case $age in
+ 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+ *)
+ func_error "AGE \`$age' must be a nonnegative integer"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ ;;
+ esac
+
+ if test "$age" -gt "$current"; then
+ func_error "AGE \`$age' is greater than the current interface number \`$current'"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ fi
+
+ # Calculate the version variables.
+ major=
+ versuffix=
+ verstring=
+ case $version_type in
+ none) ;;
+
+ darwin)
+ # Like Linux, but with the current version available in
+ # verstring for coding it into the library header
+ func_arith $current - $age
+ major=.$func_arith_result
+ versuffix="$major.$age.$revision"
+ # Darwin ld doesn't like 0 for these options...
+ func_arith $current + 1
+ minor_current=$func_arith_result
+ xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision"
+ verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+ ;;
+
+ freebsd-aout)
+ major=".$current"
+ versuffix=".$current.$revision";
+ ;;
+
+ freebsd-elf)
+ major=".$current"
+ versuffix=".$current"
+ ;;
+
+ irix | nonstopux)
+ if test "X$lt_irix_increment" = "Xno"; then
+ func_arith $current - $age
+ else
+ func_arith $current - $age + 1
+ fi
+ major=$func_arith_result
+
+ case $version_type in
+ nonstopux) verstring_prefix=nonstopux ;;
+ *) verstring_prefix=sgi ;;
+ esac
+ verstring="$verstring_prefix$major.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$revision
+ while test "$loop" -ne 0; do
+ func_arith $revision - $loop
+ iface=$func_arith_result
+ func_arith $loop - 1
+ loop=$func_arith_result
+ verstring="$verstring_prefix$major.$iface:$verstring"
+ done
+
+ # Before this point, $major must not contain `.'.
+ major=.$major
+ versuffix="$major.$revision"
+ ;;
+
+ linux) # correct to gnu/linux during the next big refactor
+ func_arith $current - $age
+ major=.$func_arith_result
+ versuffix="$major.$age.$revision"
+ ;;
+
+ osf)
+ func_arith $current - $age
+ major=.$func_arith_result
+ versuffix=".$current.$age.$revision"
+ verstring="$current.$age.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$age
+ while test "$loop" -ne 0; do
+ func_arith $current - $loop
+ iface=$func_arith_result
+ func_arith $loop - 1
+ loop=$func_arith_result
+ verstring="$verstring:${iface}.0"
+ done
+
+ # Make executables depend on our current version.
+ func_append verstring ":${current}.0"
+ ;;
+
+ qnx)
+ major=".$current"
+ versuffix=".$current"
+ ;;
+
+ sunos)
+ major=".$current"
+ versuffix=".$current.$revision"
+ ;;
+
+ windows)
+ # Use '-' rather than '.', since we only want one
+ # extension on DOS 8.3 filesystems.
+ func_arith $current - $age
+ major=$func_arith_result
+ versuffix="-$major"
+ ;;
+
+ *)
+ func_fatal_configuration "unknown library version type \`$version_type'"
+ ;;
+ esac
+
+ # Clear the version info if we defaulted, and they specified a release.
+ if test -z "$vinfo" && test -n "$release"; then
+ major=
+ case $version_type in
+ darwin)
+ # we can't check for "0.0" in archive_cmds due to quoting
+ # problems, so we reset it completely
+ verstring=
+ ;;
+ *)
+ verstring="0.0"
+ ;;
+ esac
+ if test "$need_version" = no; then
+ versuffix=
+ else
+ versuffix=".0.0"
+ fi
+ fi
+
+ # Remove version info from name if versioning should be avoided
+ if test "$avoid_version" = yes && test "$need_version" = no; then
+ major=
+ versuffix=
+ verstring=""
+ fi
+
+ # Check to see if the archive will have undefined symbols.
+ if test "$allow_undefined" = yes; then
+ if test "$allow_undefined_flag" = unsupported; then
+ func_warning "undefined symbols not allowed in $host shared libraries"
+ build_libtool_libs=no
+ build_old_libs=yes
+ fi
+ else
+ # Don't allow undefined symbols.
+ allow_undefined_flag="$no_undefined_flag"
+ fi
+
+ fi
+
+ func_generate_dlsyms "$libname" "$libname" "yes"
+ func_append libobjs " $symfileobj"
+ test "X$libobjs" = "X " && libobjs=
+
+ if test "$opt_mode" != relink; then
+ # Remove our outputs, but don't remove object files since they
+ # may have been created when compiling PIC objects.
+ removelist=
+ tempremovelist=`$ECHO "$output_objdir/*"`
+ for p in $tempremovelist; do
+ case $p in
+ *.$objext | *.gcno)
+ ;;
+ $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
+ if test "X$precious_files_regex" != "X"; then
+ if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
+ then
+ continue
+ fi
+ fi
+ func_append removelist " $p"
+ ;;
+ *) ;;
+ esac
+ done
+ test -n "$removelist" && \
+ func_show_eval "${RM}r \$removelist"
+ fi
+
+ # Now set the variables for building old libraries.
+ if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
+ func_append oldlibs " $output_objdir/$libname.$libext"
+
+ # Transform .lo files to .o files.
+ oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP`
+ fi
+
+ # Eliminate all temporary directories.
+ #for path in $notinst_path; do
+ # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"`
+ # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"`
+ # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"`
+ #done
+
+ if test -n "$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ temp_xrpath=
+ for libdir in $xrpath; do
+ func_replace_sysroot "$libdir"
+ func_append temp_xrpath " -R$func_replace_sysroot_result"
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) func_append finalize_rpath " $libdir" ;;
+ esac
+ done
+ if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
+ dependency_libs="$temp_xrpath $dependency_libs"
+ fi
+ fi
+
+ # Make sure dlfiles contains only unique files that won't be dlpreopened
+ old_dlfiles="$dlfiles"
+ dlfiles=
+ for lib in $old_dlfiles; do
+ case " $dlprefiles $dlfiles " in
+ *" $lib "*) ;;
+ *) func_append dlfiles " $lib" ;;
+ esac
+ done
+
+ # Make sure dlprefiles contains only unique files
+ old_dlprefiles="$dlprefiles"
+ dlprefiles=
+ for lib in $old_dlprefiles; do
+ case "$dlprefiles " in
+ *" $lib "*) ;;
+ *) func_append dlprefiles " $lib" ;;
+ esac
+ done
+
+ if test "$build_libtool_libs" = yes; then
+ if test -n "$rpath"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*)
+ # these systems don't actually have a c library (as such)!
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C library is in the System framework
+ func_append deplibs " System.ltframework"
+ ;;
+ *-*-netbsd*)
+ # Don't link with libc until the a.out ld.so is fixed.
+ ;;
+ *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ *-*-sco3.2v5* | *-*-sco5v6*)
+ # Causes problems with __ctype
+ ;;
+ *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+ # Compiler inserts libc in the correct place for threads to work
+ ;;
+ *)
+ # Add libc to deplibs on all other systems if necessary.
+ if test "$build_libtool_need_lc" = "yes"; then
+ func_append deplibs " -lc"
+ fi
+ ;;
+ esac
+ fi
+
+ # Transform deplibs into only deplibs that can be linked in shared.
+ name_save=$name
+ libname_save=$libname
+ release_save=$release
+ versuffix_save=$versuffix
+ major_save=$major
+ # I'm not sure if I'm treating the release correctly. I think
+ # release should show up in the -l (ie -lgmp5) so we don't want to
+ # add it in twice. Is that correct?
+ release=""
+ versuffix=""
+ major=""
+ newdeplibs=
+ droppeddeps=no
+ case $deplibs_check_method in
+ pass_all)
+ # Don't check for shared/static. Everything works.
+ # This might be a little naive. We might want to check
+ # whether the library exists or not. But this is on
+ # osf3 & osf4 and I'm not really sure... Just
+ # implementing what was already the behavior.
+ newdeplibs=$deplibs
+ ;;
+ test_compile)
+ # This code stresses the "libraries are programs" paradigm to its
+ # limits. Maybe even breaks it. We compile a program, linking it
+ # against the deplibs as a proxy for the library. Then we can check
+ # whether they linked in statically or dynamically with ldd.
+ $opt_dry_run || $RM conftest.c
+ cat > conftest.c <<EOF
+ int main() { return 0; }
+EOF
+ $opt_dry_run || $RM conftest
+ if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs; then
+ ldd_output=`ldd conftest`
+ for i in $deplibs; do
+ case $i in
+ -l*)
+ func_stripname -l '' "$i"
+ name=$func_stripname_result
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $i "*)
+ func_append newdeplibs " $i"
+ i=""
+ ;;
+ esac
+ fi
+ if test -n "$i" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+ set dummy $deplib_matches; shift
+ deplib_match=$1
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ func_append newdeplibs " $i"
+ else
+ droppeddeps=yes
+ echo
+ $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which I believe you do not have"
+ echo "*** because a test_compile did reveal that the linker did not use it for"
+ echo "*** its dynamic dependency list that programs get resolved with at runtime."
+ fi
+ fi
+ ;;
+ *)
+ func_append newdeplibs " $i"
+ ;;
+ esac
+ done
+ else
+ # Error occurred in the first compile. Let's try to salvage
+ # the situation: Compile a separate program for each library.
+ for i in $deplibs; do
+ case $i in
+ -l*)
+ func_stripname -l '' "$i"
+ name=$func_stripname_result
+ $opt_dry_run || $RM conftest
+ if $LTCC $LTCFLAGS -o conftest conftest.c $i; then
+ ldd_output=`ldd conftest`
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $i "*)
+ func_append newdeplibs " $i"
+ i=""
+ ;;
+ esac
+ fi
+ if test -n "$i" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+ set dummy $deplib_matches; shift
+ deplib_match=$1
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ func_append newdeplibs " $i"
+ else
+ droppeddeps=yes
+ echo
+ $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because a test_compile did reveal that the linker did not use this one"
+ echo "*** as a dynamic dependency that programs can get resolved with at runtime."
+ fi
+ fi
+ else
+ droppeddeps=yes
+ echo
+ $ECHO "*** Warning! Library $i is needed by this library but I was not able to"
+ echo "*** make it link in! You will probably need to install it or some"
+ echo "*** library that it depends on before this library will be fully"
+ echo "*** functional. Installing it before continuing would be even better."
+ fi
+ ;;
+ *)
+ func_append newdeplibs " $i"
+ ;;
+ esac
+ done
+ fi
+ ;;
+ file_magic*)
+ set dummy $deplibs_check_method; shift
+ file_magic_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+ for a_deplib in $deplibs; do
+ case $a_deplib in
+ -l*)
+ func_stripname -l '' "$a_deplib"
+ name=$func_stripname_result
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $a_deplib "*)
+ func_append newdeplibs " $a_deplib"
+ a_deplib=""
+ ;;
+ esac
+ fi
+ if test -n "$a_deplib" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ if test -n "$file_magic_glob"; then
+ libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob`
+ else
+ libnameglob=$libname
+ fi
+ test "$want_nocaseglob" = yes && nocaseglob=`shopt -p nocaseglob`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ if test "$want_nocaseglob" = yes; then
+ shopt -s nocaseglob
+ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+ $nocaseglob
+ else
+ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+ fi
+ for potent_lib in $potential_libs; do
+ # Follow soft links.
+ if ls -lLd "$potent_lib" 2>/dev/null |
+ $GREP " -> " >/dev/null; then
+ continue
+ fi
+ # The statement above tries to avoid entering an
+ # endless loop below, in case of cyclic links.
+ # We might still enter an endless loop, since a link
+ # loop can be closed while we follow links,
+ # but so what?
+ potlib="$potent_lib"
+ while test -h "$potlib" 2>/dev/null; do
+ potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
+ case $potliblink in
+ [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
+ *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";;
+ esac
+ done
+ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
+ $SED -e 10q |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ func_append newdeplibs " $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ fi
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ $ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because I did check the linker path looking for a file starting"
+ if test -z "$potlib" ; then
+ $ECHO "*** with $libname but no candidates were found. (...for file magic test)"
+ else
+ $ECHO "*** with $libname and none of the candidates passed a file format test"
+ $ECHO "*** using a file magic. Last file checked: $potlib"
+ fi
+ fi
+ ;;
+ *)
+ # Add a -L argument.
+ func_append newdeplibs " $a_deplib"
+ ;;
+ esac
+ done # Gone through all deplibs.
+ ;;
+ match_pattern*)
+ set dummy $deplibs_check_method; shift
+ match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+ for a_deplib in $deplibs; do
+ case $a_deplib in
+ -l*)
+ func_stripname -l '' "$a_deplib"
+ name=$func_stripname_result
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $a_deplib "*)
+ func_append newdeplibs " $a_deplib"
+ a_deplib=""
+ ;;
+ esac
+ fi
+ if test -n "$a_deplib" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ potlib="$potent_lib" # see symlink-check above in file_magic test
+ if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \
+ $EGREP "$match_pattern_regex" > /dev/null; then
+ func_append newdeplibs " $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ fi
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ $ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because I did check the linker path looking for a file starting"
+ if test -z "$potlib" ; then
+ $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
+ else
+ $ECHO "*** with $libname and none of the candidates passed a file format test"
+ $ECHO "*** using a regex pattern. Last file checked: $potlib"
+ fi
+ fi
+ ;;
+ *)
+ # Add a -L argument.
+ func_append newdeplibs " $a_deplib"
+ ;;
+ esac
+ done # Gone through all deplibs.
+ ;;
+ none | unknown | *)
+ newdeplibs=""
+ tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'`
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ for i in $predeps $postdeps ; do
+ # can't use Xsed below, because $i might contain '/'
+ tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"`
+ done
+ fi
+ case $tmp_deplibs in
+ *[!\ \ ]*)
+ echo
+ if test "X$deplibs_check_method" = "Xnone"; then
+ echo "*** Warning: inter-library dependencies are not supported in this platform."
+ else
+ echo "*** Warning: inter-library dependencies are not known to be supported."
+ fi
+ echo "*** All declared inter-library dependencies are being dropped."
+ droppeddeps=yes
+ ;;
+ esac
+ ;;
+ esac
+ versuffix=$versuffix_save
+ major=$major_save
+ release=$release_save
+ libname=$libname_save
+ name=$name_save
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library with the System framework
+ newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'`
+ ;;
+ esac
+
+ if test "$droppeddeps" = yes; then
+ if test "$module" = yes; then
+ echo
+ echo "*** Warning: libtool could not satisfy all declared inter-library"
+ $ECHO "*** dependencies of module $libname. Therefore, libtool will create"
+ echo "*** a static module, that should work as long as the dlopening"
+ echo "*** application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ else
+ echo "*** The inter-library dependencies that have been dropped here will be"
+ echo "*** automatically added whenever a program is linked with this library"
+ echo "*** or is declared to -dlopen it."
+
+ if test "$allow_undefined" = no; then
+ echo
+ echo "*** Since this library must not contain undefined symbols,"
+ echo "*** because either the platform does not support them or"
+ echo "*** it was explicitly requested with -no-undefined,"
+ echo "*** libtool will only create a static version of it."
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ fi
+ fi
+ # Done checking deplibs!
+ deplibs=$newdeplibs
+ fi
+ # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+ case $host in
+ *-*-darwin*)
+ newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ ;;
+ esac
+
+ # move library search paths that coincide with paths to not yet
+ # installed libraries to the beginning of the library search list
+ new_libs=
+ for path in $notinst_path; do
+ case " $new_libs " in
+ *" -L$path/$objdir "*) ;;
+ *)
+ case " $deplibs " in
+ *" -L$path/$objdir "*)
+ func_append new_libs " -L$path/$objdir" ;;
+ esac
+ ;;
+ esac
+ done
+ for deplib in $deplibs; do
+ case $deplib in
+ -L*)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) func_append new_libs " $deplib" ;;
+ esac
+ ;;
+ *) func_append new_libs " $deplib" ;;
+ esac
+ done
+ deplibs="$new_libs"
+
+ # All the library-specific variables (install_libdir is set above).
+ library_names=
+ old_library=
+ dlname=
+
+ # Test again, we may have decided not to build it any more
+ if test "$build_libtool_libs" = yes; then
+ # Remove ${wl} instances when linking with ld.
+ # FIXME: should test the right _cmds variable.
+ case $archive_cmds in
+ *\$LD\ *) wl= ;;
+ esac
+ if test "$hardcode_into_libs" = yes; then
+ # Hardcode the library paths
+ hardcode_libdirs=
+ dep_rpath=
+ rpath="$finalize_rpath"
+ test "$opt_mode" != relink && rpath="$compile_rpath$rpath"
+ for libdir in $rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ func_replace_sysroot "$libdir"
+ libdir=$func_replace_sysroot_result
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ func_append dep_rpath " $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) func_append perm_rpath " $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval "dep_rpath=\"$hardcode_libdir_flag_spec\""
+ fi
+ if test -n "$runpath_var" && test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ func_append rpath "$dir:"
+ done
+ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+ fi
+ test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+ fi
+
+ shlibpath="$finalize_shlibpath"
+ test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+ if test -n "$shlibpath"; then
+ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+ fi
+
+ # Get the real and link names of the library.
+ eval shared_ext=\"$shrext_cmds\"
+ eval library_names=\"$library_names_spec\"
+ set dummy $library_names
+ shift
+ realname="$1"
+ shift
+
+ if test -n "$soname_spec"; then
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+ if test -z "$dlname"; then
+ dlname=$soname
+ fi
+
+ lib="$output_objdir/$realname"
+ linknames=
+ for link
+ do
+ func_append linknames " $link"
+ done
+
+ # Use standard objects if they are pic
+ test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+ test "X$libobjs" = "X " && libobjs=
+
+ delfiles=
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
+ export_symbols="$output_objdir/$libname.uexp"
+ func_append delfiles " $export_symbols"
+ fi
+
+ orig_export_symbols=
+ case $host_os in
+ cygwin* | mingw* | cegcc*)
+ if test -n "$export_symbols" && test -z "$export_symbols_regex"; then
+ # exporting using user supplied symfile
+ if test "x`$SED 1q $export_symbols`" != xEXPORTS; then
+ # and it's NOT already a .def file. Must figure out
+ # which of the given symbols are data symbols and tag
+ # them as such. So, trigger use of export_symbols_cmds.
+ # export_symbols gets reassigned inside the "prepare
+ # the list of exported symbols" if statement, so the
+ # include_expsyms logic still works.
+ orig_export_symbols="$export_symbols"
+ export_symbols=
+ always_export_symbols=yes
+ fi
+ fi
+ ;;
+ esac
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
+ func_verbose "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $opt_dry_run || $RM $export_symbols
+ cmds=$export_symbols_cmds
+ save_ifs="$IFS"; IFS='~'
+ for cmd1 in $cmds; do
+ IFS="$save_ifs"
+ # Take the normal branch if the nm_file_list_spec branch
+ # doesn't work or if tool conversion is not needed.
+ case $nm_file_list_spec~$to_tool_file_cmd in
+ *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*)
+ try_normal_branch=yes
+ eval cmd=\"$cmd1\"
+ func_len " $cmd"
+ len=$func_len_result
+ ;;
+ *)
+ try_normal_branch=no
+ ;;
+ esac
+ if test "$try_normal_branch" = yes \
+ && { test "$len" -lt "$max_cmd_len" \
+ || test "$max_cmd_len" -le -1; }
+ then
+ func_show_eval "$cmd" 'exit $?'
+ skipped_export=false
+ elif test -n "$nm_file_list_spec"; then
+ func_basename "$output"
+ output_la=$func_basename_result
+ save_libobjs=$libobjs
+ save_output=$output
+ output=${output_objdir}/${output_la}.nm
+ func_to_tool_file "$output"
+ libobjs=$nm_file_list_spec$func_to_tool_file_result
+ func_append delfiles " $output"
+ func_verbose "creating $NM input file list: $output"
+ for obj in $save_libobjs; do
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result"
+ done > "$output"
+ eval cmd=\"$cmd1\"
+ func_show_eval "$cmd" 'exit $?'
+ output=$save_output
+ libobjs=$save_libobjs
+ skipped_export=false
+ else
+ # The command line is too long to execute in one step.
+ func_verbose "using reloadable object file for export list..."
+ skipped_export=:
+ # Break out early, otherwise skipped_export may be
+ # set to false by a later but shorter cmd.
+ break
+ fi
+ done
+ IFS="$save_ifs"
+ if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then
+ func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+ fi
+
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ tmp_export_symbols="$export_symbols"
+ test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
+ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+ fi
+
+ if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then
+ # The given exports_symbols file has to be filtered, so filter it.
+ func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
+ # FIXME: $output_objdir/$libname.filter potentially contains lots of
+ # 's' commands which not all seds can handle. GNU sed should be fine
+ # though. Also, the filter scales superlinearly with the number of
+ # global variables. join(1) would be nice here, but unfortunately
+ # isn't a blessed tool.
+ $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+ func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+ export_symbols=$output_objdir/$libname.def
+ $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+ fi
+
+ tmp_deplibs=
+ for test_deplib in $deplibs; do
+ case " $convenience " in
+ *" $test_deplib "*) ;;
+ *)
+ func_append tmp_deplibs " $test_deplib"
+ ;;
+ esac
+ done
+ deplibs="$tmp_deplibs"
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec" &&
+ test "$compiler_needs_object" = yes &&
+ test -z "$libobjs"; then
+ # extract the archives, so we have objects to list.
+ # TODO: could optimize this to just extract one archive.
+ whole_archive_flag_spec=
+ fi
+ if test -n "$whole_archive_flag_spec"; then
+ save_libobjs=$libobjs
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ test "X$libobjs" = "X " && libobjs=
+ else
+ gentop="$output_objdir/${outputname}x"
+ func_append generated " $gentop"
+
+ func_extract_archives $gentop $convenience
+ func_append libobjs " $func_extract_archives_result"
+ test "X$libobjs" = "X " && libobjs=
+ fi
+ fi
+
+ if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
+ eval flag=\"$thread_safe_flag_spec\"
+ func_append linker_flags " $flag"
+ fi
+
+ # Make a backup of the uninstalled library when relinking
+ if test "$opt_mode" = relink; then
+ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
+ fi
+
+ # Do each of the archive commands.
+ if test "$module" = yes && test -n "$module_cmds" ; then
+ if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+ eval test_cmds=\"$module_expsym_cmds\"
+ cmds=$module_expsym_cmds
+ else
+ eval test_cmds=\"$module_cmds\"
+ cmds=$module_cmds
+ fi
+ else
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ eval test_cmds=\"$archive_expsym_cmds\"
+ cmds=$archive_expsym_cmds
+ else
+ eval test_cmds=\"$archive_cmds\"
+ cmds=$archive_cmds
+ fi
+ fi
+
+ if test "X$skipped_export" != "X:" &&
+ func_len " $test_cmds" &&
+ len=$func_len_result &&
+ test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+ :
+ else
+ # The command line is too long to link in one step, link piecewise
+ # or, if using GNU ld and skipped_export is not :, use a linker
+ # script.
+
+ # Save the value of $output and $libobjs because we want to
+ # use them later. If we have whole_archive_flag_spec, we
+ # want to use save_libobjs as it was before
+ # whole_archive_flag_spec was expanded, because we can't
+ # assume the linker understands whole_archive_flag_spec.
+ # This may have to be revisited, in case too many
+ # convenience libraries get linked in and end up exceeding
+ # the spec.
+ if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
+ save_libobjs=$libobjs
+ fi
+ save_output=$output
+ func_basename "$output"
+ output_la=$func_basename_result
+
+ # Clear the reloadable object creation command queue and
+ # initialize k to one.
+ test_cmds=
+ concat_cmds=
+ objlist=
+ last_robj=
+ k=1
+
+ if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then
+ output=${output_objdir}/${output_la}.lnkscript
+ func_verbose "creating GNU ld script: $output"
+ echo 'INPUT (' > $output
+ for obj in $save_libobjs
+ do
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result" >> $output
+ done
+ echo ')' >> $output
+ func_append delfiles " $output"
+ func_to_tool_file "$output"
+ output=$func_to_tool_file_result
+ elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then
+ output=${output_objdir}/${output_la}.lnk
+ func_verbose "creating linker input file list: $output"
+ : > $output
+ set x $save_libobjs
+ shift
+ firstobj=
+ if test "$compiler_needs_object" = yes; then
+ firstobj="$1 "
+ shift
+ fi
+ for obj
+ do
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result" >> $output
+ done
+ func_append delfiles " $output"
+ func_to_tool_file "$output"
+ output=$firstobj\"$file_list_spec$func_to_tool_file_result\"
+ else
+ if test -n "$save_libobjs"; then
+ func_verbose "creating reloadable object files..."
+ output=$output_objdir/$output_la-${k}.$objext
+ eval test_cmds=\"$reload_cmds\"
+ func_len " $test_cmds"
+ len0=$func_len_result
+ len=$len0
+
+ # Loop over the list of objects to be linked.
+ for obj in $save_libobjs
+ do
+ func_len " $obj"
+ func_arith $len + $func_len_result
+ len=$func_arith_result
+ if test "X$objlist" = X ||
+ test "$len" -lt "$max_cmd_len"; then
+ func_append objlist " $obj"
+ else
+ # The command $test_cmds is almost too long, add a
+ # command to the queue.
+ if test "$k" -eq 1 ; then
+ # The first file doesn't have a previous command to add.
+ reload_objs=$objlist
+ eval concat_cmds=\"$reload_cmds\"
+ else
+ # All subsequent reloadable object files will link in
+ # the last one created.
+ reload_objs="$objlist $last_robj"
+ eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"
+ fi
+ last_robj=$output_objdir/$output_la-${k}.$objext
+ func_arith $k + 1
+ k=$func_arith_result
+ output=$output_objdir/$output_la-${k}.$objext
+ objlist=" $obj"
+ func_len " $last_robj"
+ func_arith $len0 + $func_len_result
+ len=$func_arith_result
+ fi
+ done
+ # Handle the remaining objects by creating one last
+ # reloadable object file. All subsequent reloadable object
+ # files will link in the last one created.
+ test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+ reload_objs="$objlist $last_robj"
+ eval concat_cmds=\"\${concat_cmds}$reload_cmds\"
+ if test -n "$last_robj"; then
+ eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"
+ fi
+ func_append delfiles " $output"
+
+ else
+ output=
+ fi
+
+ if ${skipped_export-false}; then
+ func_verbose "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $opt_dry_run || $RM $export_symbols
+ libobjs=$output
+ # Append the command to create the export file.
+ test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\"
+ if test -n "$last_robj"; then
+ eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+ fi
+ fi
+
+ test -n "$save_libobjs" &&
+ func_verbose "creating a temporary reloadable object file: $output"
+
+ # Loop through the commands generated above and execute them.
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $concat_cmds; do
+ IFS="$save_ifs"
+ $opt_silent || {
+ func_quote_for_expand "$cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+ $opt_dry_run || eval "$cmd" || {
+ lt_exit=$?
+
+ # Restore the uninstalled library and exit
+ if test "$opt_mode" = relink; then
+ ( cd "$output_objdir" && \
+ $RM "${realname}T" && \
+ $MV "${realname}U" "$realname" )
+ fi
+
+ exit $lt_exit
+ }
+ done
+ IFS="$save_ifs"
+
+ if test -n "$export_symbols_regex" && ${skipped_export-false}; then
+ func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+
+ if ${skipped_export-false}; then
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ tmp_export_symbols="$export_symbols"
+ test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
+ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+ fi
+
+ if test -n "$orig_export_symbols"; then
+ # The given exports_symbols file has to be filtered, so filter it.
+ func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
+ # FIXME: $output_objdir/$libname.filter potentially contains lots of
+ # 's' commands which not all seds can handle. GNU sed should be fine
+ # though. Also, the filter scales superlinearly with the number of
+ # global variables. join(1) would be nice here, but unfortunately
+ # isn't a blessed tool.
+ $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+ func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+ export_symbols=$output_objdir/$libname.def
+ $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+ fi
+ fi
+
+ libobjs=$output
+ # Restore the value of output.
+ output=$save_output
+
+ if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ test "X$libobjs" = "X " && libobjs=
+ fi
+ # Expand the library linking commands again to reset the
+ # value of $libobjs for piecewise linking.
+
+ # Do each of the archive commands.
+ if test "$module" = yes && test -n "$module_cmds" ; then
+ if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+ cmds=$module_expsym_cmds
+ else
+ cmds=$module_cmds
+ fi
+ else
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ cmds=$archive_expsym_cmds
+ else
+ cmds=$archive_cmds
+ fi
+ fi
+ fi
+
+ if test -n "$delfiles"; then
+ # Append the command to remove temporary files to $cmds.
+ eval cmds=\"\$cmds~\$RM $delfiles\"
+ fi
+
+ # Add any objects from preloaded convenience libraries
+ if test -n "$dlprefiles"; then
+ gentop="$output_objdir/${outputname}x"
+ func_append generated " $gentop"
+
+ func_extract_archives $gentop $dlprefiles
+ func_append libobjs " $func_extract_archives_result"
+ test "X$libobjs" = "X " && libobjs=
+ fi
+
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ eval cmd=\"$cmd\"
+ $opt_silent || {
+ func_quote_for_expand "$cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+ $opt_dry_run || eval "$cmd" || {
+ lt_exit=$?
+
+ # Restore the uninstalled library and exit
+ if test "$opt_mode" = relink; then
+ ( cd "$output_objdir" && \
+ $RM "${realname}T" && \
+ $MV "${realname}U" "$realname" )
+ fi
+
+ exit $lt_exit
+ }
+ done
+ IFS="$save_ifs"
+
+ # Restore the uninstalled library and exit
+ if test "$opt_mode" = relink; then
+ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
+
+ if test -n "$convenience"; then
+ if test -z "$whole_archive_flag_spec"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+ fi
+
+ exit $EXIT_SUCCESS
+ fi
+
+ # Create links to the real library.
+ for linkname in $linknames; do
+ if test "$realname" != "$linkname"; then
+ func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
+ fi
+ done
+
+ # If -module or -export-dynamic was specified, set the dlname.
+ if test "$module" = yes || test "$export_dynamic" = yes; then
+ # On all known operating systems, these are identical.
+ dlname="$soname"
+ fi
+ fi
+ ;;
+
+ obj)
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ func_warning "\`-dlopen' is ignored for objects"
+ fi
+
+ case " $deplibs" in
+ *\ -l* | *\ -L*)
+ func_warning "\`-l' and \`-L' are ignored for objects" ;;
+ esac
+
+ test -n "$rpath" && \
+ func_warning "\`-rpath' is ignored for objects"
+
+ test -n "$xrpath" && \
+ func_warning "\`-R' is ignored for objects"
+
+ test -n "$vinfo" && \
+ func_warning "\`-version-info' is ignored for objects"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for objects"
+
+ case $output in
+ *.lo)
+ test -n "$objs$old_deplibs" && \
+ func_fatal_error "cannot build library object \`$output' from non-libtool objects"
+
+ libobj=$output
+ func_lo2o "$libobj"
+ obj=$func_lo2o_result
+ ;;
+ *)
+ libobj=
+ obj="$output"
+ ;;
+ esac
+
+ # Delete the old objects.
+ $opt_dry_run || $RM $obj $libobj
+
+ # Objects from convenience libraries. This assumes
+ # single-version convenience libraries. Whenever we create
+ # different ones for PIC/non-PIC, this we'll have to duplicate
+ # the extraction.
+ reload_conv_objs=
+ gentop=
+ # reload_cmds runs $LD directly, so let us get rid of
+ # -Wl from whole_archive_flag_spec and hope we can get by with
+ # turning comma into space..
+ wl=
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
+ reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'`
+ else
+ gentop="$output_objdir/${obj}x"
+ func_append generated " $gentop"
+
+ func_extract_archives $gentop $convenience
+ reload_conv_objs="$reload_objs $func_extract_archives_result"
+ fi
+ fi
+
+ # If we're not building shared, we need to use non_pic_objs
+ test "$build_libtool_libs" != yes && libobjs="$non_pic_objects"
+
+ # Create the old-style object.
+ reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
+
+ output="$obj"
+ func_execute_cmds "$reload_cmds" 'exit $?'
+
+ # Exit if we aren't doing a library object file.
+ if test -z "$libobj"; then
+ if test -n "$gentop"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+
+ exit $EXIT_SUCCESS
+ fi
+
+ if test "$build_libtool_libs" != yes; then
+ if test -n "$gentop"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we don't
+ # accidentally link it into a program.
+ # $show "echo timestamp > $libobj"
+ # $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
+ exit $EXIT_SUCCESS
+ fi
+
+ if test -n "$pic_flag" || test "$pic_mode" != default; then
+ # Only do commands if we really have different PIC objects.
+ reload_objs="$libobjs $reload_conv_objs"
+ output="$libobj"
+ func_execute_cmds "$reload_cmds" 'exit $?'
+ fi
+
+ if test -n "$gentop"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+
+ exit $EXIT_SUCCESS
+ ;;
+
+ prog)
+ case $host in
+ *cygwin*) func_stripname '' '.exe' "$output"
+ output=$func_stripname_result.exe;;
+ esac
+ test -n "$vinfo" && \
+ func_warning "\`-version-info' is ignored for programs"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for programs"
+
+ test "$preload" = yes \
+ && test "$dlopen_support" = unknown \
+ && test "$dlopen_self" = unknown \
+ && test "$dlopen_self_static" = unknown && \
+ func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support."
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'`
+ finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'`
+ ;;
+ esac
+
+ case $host in
+ *-*-darwin*)
+ # Don't allow lazy linking, it breaks C++ global constructors
+ # But is supposedly fixed on 10.4 or later (yay!).
+ if test "$tagname" = CXX ; then
+ case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
+ 10.[0123])
+ func_append compile_command " ${wl}-bind_at_load"
+ func_append finalize_command " ${wl}-bind_at_load"
+ ;;
+ esac
+ fi
+ # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+ compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ ;;
+ esac
+
+
+ # move library search paths that coincide with paths to not yet
+ # installed libraries to the beginning of the library search list
+ new_libs=
+ for path in $notinst_path; do
+ case " $new_libs " in
+ *" -L$path/$objdir "*) ;;
+ *)
+ case " $compile_deplibs " in
+ *" -L$path/$objdir "*)
+ func_append new_libs " -L$path/$objdir" ;;
+ esac
+ ;;
+ esac
+ done
+ for deplib in $compile_deplibs; do
+ case $deplib in
+ -L*)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) func_append new_libs " $deplib" ;;
+ esac
+ ;;
+ *) func_append new_libs " $deplib" ;;
+ esac
+ done
+ compile_deplibs="$new_libs"
+
+
+ func_append compile_command " $compile_deplibs"
+ func_append finalize_command " $finalize_deplibs"
+
+ if test -n "$rpath$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ for libdir in $rpath $xrpath; do
+ # This is the magic to use -rpath.
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) func_append finalize_rpath " $libdir" ;;
+ esac
+ done
+ fi
+
+ # Now hardcode the library paths
+ rpath=
+ hardcode_libdirs=
+ for libdir in $compile_rpath $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ func_append rpath " $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) func_append perm_rpath " $libdir" ;;
+ esac
+ fi
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+ testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'`
+ case :$dllsearchpath: in
+ *":$libdir:"*) ;;
+ ::) dllsearchpath=$libdir;;
+ *) func_append dllsearchpath ":$libdir";;
+ esac
+ case :$dllsearchpath: in
+ *":$testbindir:"*) ;;
+ ::) dllsearchpath=$testbindir;;
+ *) func_append dllsearchpath ":$testbindir";;
+ esac
+ ;;
+ esac
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ compile_rpath="$rpath"
+
+ rpath=
+ hardcode_libdirs=
+ for libdir in $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ func_append rpath " $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$finalize_perm_rpath " in
+ *" $libdir "*) ;;
+ *) func_append finalize_perm_rpath " $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ finalize_rpath="$rpath"
+
+ if test -n "$libobjs" && test "$build_old_libs" = yes; then
+ # Transform all the library objects into standard objects.
+ compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+ finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+ fi
+
+ func_generate_dlsyms "$outputname" "@PROGRAM@" "no"
+
+ # template prelinking step
+ if test -n "$prelink_cmds"; then
+ func_execute_cmds "$prelink_cmds" 'exit $?'
+ fi
+
+ wrappers_required=yes
+ case $host in
+ *cegcc* | *mingw32ce*)
+ # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway.
+ wrappers_required=no
+ ;;
+ *cygwin* | *mingw* )
+ if test "$build_libtool_libs" != yes; then
+ wrappers_required=no
+ fi
+ ;;
+ *)
+ if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
+ wrappers_required=no
+ fi
+ ;;
+ esac
+ if test "$wrappers_required" = no; then
+ # Replace the output file specification.
+ compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+ link_command="$compile_command$compile_rpath"
+
+ # We have no uninstalled library dependencies, so finalize right now.
+ exit_status=0
+ func_show_eval "$link_command" 'exit_status=$?'
+
+ if test -n "$postlink_cmds"; then
+ func_to_tool_file "$output"
+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+ func_execute_cmds "$postlink_cmds" 'exit $?'
+ fi
+
+ # Delete the generated files.
+ if test -f "$output_objdir/${outputname}S.${objext}"; then
+ func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"'
+ fi
+
+ exit $exit_status
+ fi
+
+ if test -n "$compile_shlibpath$finalize_shlibpath"; then
+ compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+ fi
+ if test -n "$finalize_shlibpath"; then
+ finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+ fi
+
+ compile_var=
+ finalize_var=
+ if test -n "$runpath_var"; then
+ if test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ func_append rpath "$dir:"
+ done
+ compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ if test -n "$finalize_perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $finalize_perm_rpath; do
+ func_append rpath "$dir:"
+ done
+ finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ fi
+
+ if test "$no_install" = yes; then
+ # We don't need to create a wrapper script.
+ link_command="$compile_var$compile_command$compile_rpath"
+ # Replace the output file specification.
+ link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+ # Delete the old output file.
+ $opt_dry_run || $RM $output
+ # Link the executable and exit
+ func_show_eval "$link_command" 'exit $?'
+
+ if test -n "$postlink_cmds"; then
+ func_to_tool_file "$output"
+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+ func_execute_cmds "$postlink_cmds" 'exit $?'
+ fi
+
+ exit $EXIT_SUCCESS
+ fi
+
+ if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+
+ func_warning "this platform does not like uninstalled shared libraries"
+ func_warning "\`$output' will be relinked during installation"
+ else
+ if test "$fast_install" != no; then
+ link_command="$finalize_var$compile_command$finalize_rpath"
+ if test "$fast_install" = yes; then
+ relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'`
+ else
+ # fast_install is set to needless
+ relink_command=
+ fi
+ else
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+ fi
+ fi
+
+ # Replace the output file specification.
+ link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+ # Delete the old output files.
+ $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+ func_show_eval "$link_command" 'exit $?'
+
+ if test -n "$postlink_cmds"; then
+ func_to_tool_file "$output_objdir/$outputname"
+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+ func_execute_cmds "$postlink_cmds" 'exit $?'
+ fi
+
+ # Now create the wrapper script.
+ func_verbose "creating $output"
+
+ # Quote the relink command for shipping.
+ if test -n "$relink_command"; then
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ func_quote_for_eval "$var_value"
+ relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+ fi
+ done
+ relink_command="(cd `pwd`; $relink_command)"
+ relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+ fi
+
+ # Only actually do things if not in dry run mode.
+ $opt_dry_run || {
+ # win32 will think the script is a binary if it has
+ # a .exe suffix, so we strip it off here.
+ case $output in
+ *.exe) func_stripname '' '.exe' "$output"
+ output=$func_stripname_result ;;
+ esac
+ # test for cygwin because mv fails w/o .exe extensions
+ case $host in
+ *cygwin*)
+ exeext=.exe
+ func_stripname '' '.exe' "$outputname"
+ outputname=$func_stripname_result ;;
+ *) exeext= ;;
+ esac
+ case $host in
+ *cygwin* | *mingw* )
+ func_dirname_and_basename "$output" "" "."
+ output_name=$func_basename_result
+ output_path=$func_dirname_result
+ cwrappersource="$output_path/$objdir/lt-$output_name.c"
+ cwrapper="$output_path/$output_name.exe"
+ $RM $cwrappersource $cwrapper
+ trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
+
+ func_emit_cwrapperexe_src > $cwrappersource
+
+ # The wrapper executable is built using the $host compiler,
+ # because it contains $host paths and files. If cross-
+ # compiling, it, like the target executable, must be
+ # executed on the $host or under an emulation environment.
+ $opt_dry_run || {
+ $LTCC $LTCFLAGS -o $cwrapper $cwrappersource
+ $STRIP $cwrapper
+ }
+
+ # Now, create the wrapper script for func_source use:
+ func_ltwrapper_scriptname $cwrapper
+ $RM $func_ltwrapper_scriptname_result
+ trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
+ $opt_dry_run || {
+ # note: this script will not be executed, so do not chmod.
+ if test "x$build" = "x$host" ; then
+ $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
+ else
+ func_emit_wrapper no > $func_ltwrapper_scriptname_result
+ fi
+ }
+ ;;
+ * )
+ $RM $output
+ trap "$RM $output; exit $EXIT_FAILURE" 1 2 15
+
+ func_emit_wrapper no > $output
+ chmod +x $output
+ ;;
+ esac
+ }
+ exit $EXIT_SUCCESS
+ ;;
+ esac
+
+ # See if we need to build an old-fashioned archive.
+ for oldlib in $oldlibs; do
+
+ if test "$build_libtool_libs" = convenience; then
+ oldobjs="$libobjs_save $symfileobj"
+ addlibs="$convenience"
+ build_libtool_libs=no
+ else
+ if test "$build_libtool_libs" = module; then
+ oldobjs="$libobjs_save"
+ build_libtool_libs=no
+ else
+ oldobjs="$old_deplibs $non_pic_objects"
+ if test "$preload" = yes && test -f "$symfileobj"; then
+ func_append oldobjs " $symfileobj"
+ fi
+ fi
+ addlibs="$old_convenience"
+ fi
+
+ if test -n "$addlibs"; then
+ gentop="$output_objdir/${outputname}x"
+ func_append generated " $gentop"
+
+ func_extract_archives $gentop $addlibs
+ func_append oldobjs " $func_extract_archives_result"
+ fi
+
+ # Do each command in the archive commands.
+ if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
+ cmds=$old_archive_from_new_cmds
+ else
+
+ # Add any objects from preloaded convenience libraries
+ if test -n "$dlprefiles"; then
+ gentop="$output_objdir/${outputname}x"
+ func_append generated " $gentop"
+
+ func_extract_archives $gentop $dlprefiles
+ func_append oldobjs " $func_extract_archives_result"
+ fi
+
+ # POSIX demands no paths to be encoded in archives. We have
+ # to avoid creating archives with duplicate basenames if we
+ # might have to extract them afterwards, e.g., when creating a
+ # static archive out of a convenience library, or when linking
+ # the entirety of a libtool archive into another (currently
+ # not supported by libtool).
+ if (for obj in $oldobjs
+ do
+ func_basename "$obj"
+ $ECHO "$func_basename_result"
+ done | sort | sort -uc >/dev/null 2>&1); then
+ :
+ else
+ echo "copying selected object files to avoid basename conflicts..."
+ gentop="$output_objdir/${outputname}x"
+ func_append generated " $gentop"
+ func_mkdir_p "$gentop"
+ save_oldobjs=$oldobjs
+ oldobjs=
+ counter=1
+ for obj in $save_oldobjs
+ do
+ func_basename "$obj"
+ objbase="$func_basename_result"
+ case " $oldobjs " in
+ " ") oldobjs=$obj ;;
+ *[\ /]"$objbase "*)
+ while :; do
+ # Make sure we don't pick an alternate name that also
+ # overlaps.
+ newobj=lt$counter-$objbase
+ func_arith $counter + 1
+ counter=$func_arith_result
+ case " $oldobjs " in
+ *[\ /]"$newobj "*) ;;
+ *) if test ! -f "$gentop/$newobj"; then break; fi ;;
+ esac
+ done
+ func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
+ func_append oldobjs " $gentop/$newobj"
+ ;;
+ *) func_append oldobjs " $obj" ;;
+ esac
+ done
+ fi
+ func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+ tool_oldlib=$func_to_tool_file_result
+ eval cmds=\"$old_archive_cmds\"
+
+ func_len " $cmds"
+ len=$func_len_result
+ if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+ cmds=$old_archive_cmds
+ elif test -n "$archiver_list_spec"; then
+ func_verbose "using command file archive linking..."
+ for obj in $oldobjs
+ do
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result"
+ done > $output_objdir/$libname.libcmd
+ func_to_tool_file "$output_objdir/$libname.libcmd"
+ oldobjs=" $archiver_list_spec$func_to_tool_file_result"
+ cmds=$old_archive_cmds
+ else
+ # the command line is too long to link in one step, link in parts
+ func_verbose "using piecewise archive linking..."
+ save_RANLIB=$RANLIB
+ RANLIB=:
+ objlist=
+ concat_cmds=
+ save_oldobjs=$oldobjs
+ oldobjs=
+ # Is there a better way of finding the last object in the list?
+ for obj in $save_oldobjs
+ do
+ last_oldobj=$obj
+ done
+ eval test_cmds=\"$old_archive_cmds\"
+ func_len " $test_cmds"
+ len0=$func_len_result
+ len=$len0
+ for obj in $save_oldobjs
+ do
+ func_len " $obj"
+ func_arith $len + $func_len_result
+ len=$func_arith_result
+ func_append objlist " $obj"
+ if test "$len" -lt "$max_cmd_len"; then
+ :
+ else
+ # the above command should be used before it gets too long
+ oldobjs=$objlist
+ if test "$obj" = "$last_oldobj" ; then
+ RANLIB=$save_RANLIB
+ fi
+ test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
+ objlist=
+ len=$len0
+ fi
+ done
+ RANLIB=$save_RANLIB
+ oldobjs=$objlist
+ if test "X$oldobjs" = "X" ; then
+ eval cmds=\"\$concat_cmds\"
+ else
+ eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
+ fi
+ fi
+ fi
+ func_execute_cmds "$cmds" 'exit $?'
+ done
+
+ test -n "$generated" && \
+ func_show_eval "${RM}r$generated"
+
+ # Now create the libtool archive.
+ case $output in
+ *.la)
+ old_library=
+ test "$build_old_libs" = yes && old_library="$libname.$libext"
+ func_verbose "creating $output"
+
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ func_quote_for_eval "$var_value"
+ relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+ fi
+ done
+ # Quote the link command for shipping.
+ relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
+ relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+ if test "$hardcode_automatic" = yes ; then
+ relink_command=
+ fi
+
+ # Only create the output if not a dry run.
+ $opt_dry_run || {
+ for installed in no yes; do
+ if test "$installed" = yes; then
+ if test -z "$install_libdir"; then
+ break
+ fi
+ output="$output_objdir/$outputname"i
+ # Replace all uninstalled libtool libraries with the installed ones
+ newdependency_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ *.la)
+ func_basename "$deplib"
+ name="$func_basename_result"
+ func_resolve_sysroot "$deplib"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result`
+ test -z "$libdir" && \
+ func_fatal_error "\`$deplib' is not a valid libtool archive"
+ func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name"
+ ;;
+ -L*)
+ func_stripname -L '' "$deplib"
+ func_replace_sysroot "$func_stripname_result"
+ func_append newdependency_libs " -L$func_replace_sysroot_result"
+ ;;
+ -R*)
+ func_stripname -R '' "$deplib"
+ func_replace_sysroot "$func_stripname_result"
+ func_append newdependency_libs " -R$func_replace_sysroot_result"
+ ;;
+ *) func_append newdependency_libs " $deplib" ;;
+ esac
+ done
+ dependency_libs="$newdependency_libs"
+ newdlfiles=
+
+ for lib in $dlfiles; do
+ case $lib in
+ *.la)
+ func_basename "$lib"
+ name="$func_basename_result"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$lib' is not a valid libtool archive"
+ func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name"
+ ;;
+ *) func_append newdlfiles " $lib" ;;
+ esac
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ case $lib in
+ *.la)
+ # Only pass preopened files to the pseudo-archive (for
+ # eventual linking with the app. that links it) if we
+ # didn't already link the preopened objects directly into
+ # the library:
+ func_basename "$lib"
+ name="$func_basename_result"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$lib' is not a valid libtool archive"
+ func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name"
+ ;;
+ esac
+ done
+ dlprefiles="$newdlprefiles"
+ else
+ newdlfiles=
+ for lib in $dlfiles; do
+ case $lib in
+ [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
+ *) abs=`pwd`"/$lib" ;;
+ esac
+ func_append newdlfiles " $abs"
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ case $lib in
+ [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
+ *) abs=`pwd`"/$lib" ;;
+ esac
+ func_append newdlprefiles " $abs"
+ done
+ dlprefiles="$newdlprefiles"
+ fi
+ $RM $output
+ # place dlname in correct position for cygwin
+ # In fact, it would be nice if we could use this code for all target
+ # systems that can't hard-code library paths into their executables
+ # and that have no shared library path variable independent of PATH,
+ # but it turns out we can't easily determine that from inspecting
+ # libtool variables, so we have to hard-code the OSs to which it
+ # applies here; at the moment, that means platforms that use the PE
+ # object format with DLL files. See the long comment at the top of
+ # tests/bindir.at for full details.
+ tdlname=$dlname
+ case $host,$output,$installed,$module,$dlname in
+ *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll)
+ # If a -bindir argument was supplied, place the dll there.
+ if test "x$bindir" != x ;
+ then
+ func_relative_path "$install_libdir" "$bindir"
+ tdlname=$func_relative_path_result$dlname
+ else
+ # Otherwise fall back on heuristic.
+ tdlname=../bin/$dlname
+ fi
+ ;;
+ esac
+ $ECHO > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags='$new_inherited_linker_flags'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Names of additional weak libraries provided by this library
+weak_library_names='$weak_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=$module
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+ if test "$installed" = no && test "$need_relink" = yes; then
+ $ECHO >> $output "\
+relink_command=\"$relink_command\""
+ fi
+ done
+ }
+
+ # Do a symbolic link so that the libtool archive can be found in
+ # LD_LIBRARY_PATH before the program is installed.
+ func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
+ ;;
+ esac
+ exit $EXIT_SUCCESS
+}
+
+{ test "$opt_mode" = link || test "$opt_mode" = relink; } &&
+ func_mode_link ${1+"$@"}
+
+
+# func_mode_uninstall arg...
+func_mode_uninstall ()
+{
+ $opt_debug
+ RM="$nonopt"
+ files=
+ rmforce=
+ exit_status=0
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ for arg
+ do
+ case $arg in
+ -f) func_append RM " $arg"; rmforce=yes ;;
+ -*) func_append RM " $arg" ;;
+ *) func_append files " $arg" ;;
+ esac
+ done
+
+ test -z "$RM" && \
+ func_fatal_help "you must specify an RM program"
+
+ rmdirs=
+
+ for file in $files; do
+ func_dirname "$file" "" "."
+ dir="$func_dirname_result"
+ if test "X$dir" = X.; then
+ odir="$objdir"
+ else
+ odir="$dir/$objdir"
+ fi
+ func_basename "$file"
+ name="$func_basename_result"
+ test "$opt_mode" = uninstall && odir="$dir"
+
+ # Remember odir for removal later, being careful to avoid duplicates
+ if test "$opt_mode" = clean; then
+ case " $rmdirs " in
+ *" $odir "*) ;;
+ *) func_append rmdirs " $odir" ;;
+ esac
+ fi
+
+ # Don't error if the file doesn't exist and rm -f was used.
+ if { test -L "$file"; } >/dev/null 2>&1 ||
+ { test -h "$file"; } >/dev/null 2>&1 ||
+ test -f "$file"; then
+ :
+ elif test -d "$file"; then
+ exit_status=1
+ continue
+ elif test "$rmforce" = yes; then
+ continue
+ fi
+
+ rmfiles="$file"
+
+ case $name in
+ *.la)
+ # Possibly a libtool archive, so verify it.
+ if func_lalib_p "$file"; then
+ func_source $dir/$name
+
+ # Delete the libtool libraries and symlinks.
+ for n in $library_names; do
+ func_append rmfiles " $odir/$n"
+ done
+ test -n "$old_library" && func_append rmfiles " $odir/$old_library"
+
+ case "$opt_mode" in
+ clean)
+ case " $library_names " in
+ *" $dlname "*) ;;
+ *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;;
+ esac
+ test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i"
+ ;;
+ uninstall)
+ if test -n "$library_names"; then
+ # Do each command in the postuninstall commands.
+ func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
+ fi
+
+ if test -n "$old_library"; then
+ # Do each command in the old_postuninstall commands.
+ func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
+ fi
+ # FIXME: should reinstall the best remaining shared library.
+ ;;
+ esac
+ fi
+ ;;
+
+ *.lo)
+ # Possibly a libtool object, so verify it.
+ if func_lalib_p "$file"; then
+
+ # Read the .lo file
+ func_source $dir/$name
+
+ # Add PIC object to the list of files to remove.
+ if test -n "$pic_object" &&
+ test "$pic_object" != none; then
+ func_append rmfiles " $dir/$pic_object"
+ fi
+
+ # Add non-PIC object to the list of files to remove.
+ if test -n "$non_pic_object" &&
+ test "$non_pic_object" != none; then
+ func_append rmfiles " $dir/$non_pic_object"
+ fi
+ fi
+ ;;
+
+ *)
+ if test "$opt_mode" = clean ; then
+ noexename=$name
+ case $file in
+ *.exe)
+ func_stripname '' '.exe' "$file"
+ file=$func_stripname_result
+ func_stripname '' '.exe' "$name"
+ noexename=$func_stripname_result
+ # $file with .exe has already been added to rmfiles,
+ # add $file without .exe
+ func_append rmfiles " $file"
+ ;;
+ esac
+ # Do a test to see if this is a libtool program.
+ if func_ltwrapper_p "$file"; then
+ if func_ltwrapper_executable_p "$file"; then
+ func_ltwrapper_scriptname "$file"
+ relink_command=
+ func_source $func_ltwrapper_scriptname_result
+ func_append rmfiles " $func_ltwrapper_scriptname_result"
+ else
+ relink_command=
+ func_source $dir/$noexename
+ fi
+
+ # note $name still contains .exe if it was in $file originally
+ # as does the version of $file that was added into $rmfiles
+ func_append rmfiles " $odir/$name $odir/${name}S.${objext}"
+ if test "$fast_install" = yes && test -n "$relink_command"; then
+ func_append rmfiles " $odir/lt-$name"
+ fi
+ if test "X$noexename" != "X$name" ; then
+ func_append rmfiles " $odir/lt-${noexename}.c"
+ fi
+ fi
+ fi
+ ;;
+ esac
+ func_show_eval "$RM $rmfiles" 'exit_status=1'
+ done
+
+ # Try to remove the ${objdir}s in the directories where we deleted files
+ for dir in $rmdirs; do
+ if test -d "$dir"; then
+ func_show_eval "rmdir $dir >/dev/null 2>&1"
+ fi
+ done
+
+ exit $exit_status
+}
+
+{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } &&
+ func_mode_uninstall ${1+"$@"}
+
+test -z "$opt_mode" && {
+ help="$generic_help"
+ func_fatal_help "you must specify a MODE"
+}
+
+test -z "$exec_cmd" && \
+ func_fatal_help "invalid operation mode \`$opt_mode'"
+
+if test -n "$exec_cmd"; then
+ eval exec "$exec_cmd"
+ exit $EXIT_FAILURE
+fi
+
+exit $exit_status
+
+
+# The TAGs below are defined such that we never get into a situation
+# in which we disable both kinds of libraries. Given conflicting
+# choices, we go for a static library, that is the most portable,
+# since we can't tell whether shared libraries were disabled because
+# the user asked for that or because the platform doesn't support
+# them. This is particularly important on AIX, because we don't
+# support having both static and shared libraries enabled at the same
+# time on that platform, so we default to a shared-only configuration.
+# If a disable-shared tag is given, we'll fallback to a static-only
+# configuration. But we'll never go from static-only to shared-only.
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
+build_libtool_libs=no
+build_old_libs=yes
+# ### END LIBTOOL TAG CONFIG: disable-shared
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-static
+build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
+# ### END LIBTOOL TAG CONFIG: disable-static
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
+# vi:sw=2
+
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/m4/gtest.m4 b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/m4/gtest.m4
new file mode 100644
index 00000000..98e61f96
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/m4/gtest.m4
@@ -0,0 +1,74 @@
+dnl GTEST_LIB_CHECK([minimum version [,
+dnl action if found [,action if not found]]])
+dnl
+dnl Check for the presence of the Google Test library, optionally at a minimum
+dnl version, and indicate a viable version with the HAVE_GTEST flag. It defines
+dnl standard variables for substitution including GTEST_CPPFLAGS,
+dnl GTEST_CXXFLAGS, GTEST_LDFLAGS, and GTEST_LIBS. It also defines
+dnl GTEST_VERSION as the version of Google Test found. Finally, it provides
+dnl optional custom action slots in the event GTEST is found or not.
+AC_DEFUN([GTEST_LIB_CHECK],
+[
+dnl Provide a flag to enable or disable Google Test usage.
+AC_ARG_ENABLE([gtest],
+ [AS_HELP_STRING([--enable-gtest],
+ [Enable tests using the Google C++ Testing Framework.
+ (Default is enabled.)])],
+ [],
+ [enable_gtest=])
+AC_ARG_VAR([GTEST_CONFIG],
+ [The exact path of Google Test's 'gtest-config' script.])
+AC_ARG_VAR([GTEST_CPPFLAGS],
+ [C-like preprocessor flags for Google Test.])
+AC_ARG_VAR([GTEST_CXXFLAGS],
+ [C++ compile flags for Google Test.])
+AC_ARG_VAR([GTEST_LDFLAGS],
+ [Linker path and option flags for Google Test.])
+AC_ARG_VAR([GTEST_LIBS],
+ [Library linking flags for Google Test.])
+AC_ARG_VAR([GTEST_VERSION],
+ [The version of Google Test available.])
+HAVE_GTEST="no"
+AS_IF([test "x${enable_gtest}" != "xno"],
+ [AC_MSG_CHECKING([for 'gtest-config'])
+ AS_IF([test "x${enable_gtest}" = "xyes"],
+ [AS_IF([test -x "${enable_gtest}/scripts/gtest-config"],
+ [GTEST_CONFIG="${enable_gtest}/scripts/gtest-config"],
+ [GTEST_CONFIG="${enable_gtest}/bin/gtest-config"])
+ AS_IF([test -x "${GTEST_CONFIG}"], [],
+ [AC_MSG_RESULT([no])
+ AC_MSG_ERROR([dnl
+Unable to locate either a built or installed Google Test.
+The specific location '${enable_gtest}' was provided for a built or installed
+Google Test, but no 'gtest-config' script could be found at this location.])
+ ])],
+ [AC_PATH_PROG([GTEST_CONFIG], [gtest-config])])
+ AS_IF([test -x "${GTEST_CONFIG}"],
+ [AC_MSG_RESULT([${GTEST_CONFIG}])
+ m4_ifval([$1],
+ [_gtest_min_version="--min-version=$1"
+ AC_MSG_CHECKING([for Google Test at least version >= $1])],
+ [_gtest_min_version="--min-version=0"
+ AC_MSG_CHECKING([for Google Test])])
+ AS_IF([${GTEST_CONFIG} ${_gtest_min_version}],
+ [AC_MSG_RESULT([yes])
+ HAVE_GTEST='yes'],
+ [AC_MSG_RESULT([no])])],
+ [AC_MSG_RESULT([no])])
+ AS_IF([test "x${HAVE_GTEST}" = "xyes"],
+ [GTEST_CPPFLAGS=`${GTEST_CONFIG} --cppflags`
+ GTEST_CXXFLAGS=`${GTEST_CONFIG} --cxxflags`
+ GTEST_LDFLAGS=`${GTEST_CONFIG} --ldflags`
+ GTEST_LIBS=`${GTEST_CONFIG} --libs`
+ GTEST_VERSION=`${GTEST_CONFIG} --version`
+ AC_DEFINE([HAVE_GTEST],[1],[Defined when Google Test is available.])],
+ [AS_IF([test "x${enable_gtest}" = "xyes"],
+ [AC_MSG_ERROR([dnl
+Google Test was enabled, but no viable version could be found.])
+ ])])])
+AC_SUBST([HAVE_GTEST])
+AM_CONDITIONAL([HAVE_GTEST],[test "x$HAVE_GTEST" = "xyes"])
+AS_IF([test "x$HAVE_GTEST" = "xyes"],
+ [m4_ifval([$2], [$2])],
+ [m4_ifval([$3], [$3])])
+])
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/missing b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/missing
new file mode 100644
index 00000000..86a8fc31
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/missing
@@ -0,0 +1,331 @@
+#! /bin/sh
+# Common stub for a few missing GNU programs while installing.
+
+scriptversion=2012-01-06.13; # UTC
+
+# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
+# 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Originally by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+fi
+
+run=:
+sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
+sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
+
+# In the cases where this matters, `missing' is being run in the
+# srcdir already.
+if test -f configure.ac; then
+ configure_ac=configure.ac
+else
+ configure_ac=configure.in
+fi
+
+msg="missing on your system"
+
+case $1 in
+--run)
+ # Try to run requested program, and just exit if it succeeds.
+ run=
+ shift
+ "$@" && exit 0
+ # Exit code 63 means version mismatch. This often happens
+ # when the user try to use an ancient version of a tool on
+ # a file that requires a minimum version. In this case we
+ # we should proceed has if the program had been absent, or
+ # if --run hadn't been passed.
+ if test $? = 63; then
+ run=:
+ msg="probably too old"
+ fi
+ ;;
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
+error status if there is no known handling for PROGRAM.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+ --run try to run the given command, and emulate it if it fails
+
+Supported PROGRAM values:
+ aclocal touch file \`aclocal.m4'
+ autoconf touch file \`configure'
+ autoheader touch file \`config.h.in'
+ autom4te touch the output file, or create a stub one
+ automake touch all \`Makefile.in' files
+ bison create \`y.tab.[ch]', if possible, from existing .[ch]
+ flex create \`lex.yy.c', if possible, from existing .c
+ help2man touch the output file
+ lex create \`lex.yy.c', if possible, from existing .c
+ makeinfo touch the output file
+ yacc create \`y.tab.[ch]', if possible, from existing .[ch]
+
+Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
+\`g' are ignored when checking the name.
+
+Send bug reports to <bug-automake@gnu.org>."
+ exit $?
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing $scriptversion (GNU Automake)"
+ exit $?
+ ;;
+
+ -*)
+ echo 1>&2 "$0: Unknown \`$1' option"
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+ ;;
+
+esac
+
+# normalize program name to check for.
+program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
+# Now exit if we have it, but it failed. Also exit now if we
+# don't have it and --version was passed (most likely to detect
+# the program). This is about non-GNU programs, so use $1 not
+# $program.
+case $1 in
+ lex*|yacc*)
+ # Not GNU programs, they don't have --version.
+ ;;
+
+ *)
+ if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
+ # We have it, but it failed.
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ # Could not run --version or --help. This is probably someone
+ # running `$TOOL --version' or `$TOOL --help' to check whether
+ # $TOOL exists and not knowing $TOOL uses missing.
+ exit 1
+ fi
+ ;;
+esac
+
+# If it does not exist, or fails to run (possibly an outdated version),
+# try to emulate it.
+case $program in
+ aclocal*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acinclude.m4' or \`${configure_ac}'. You might want
+ to install the \`Automake' and \`Perl' packages. Grab them from
+ any GNU archive site."
+ touch aclocal.m4
+ ;;
+
+ autoconf*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`${configure_ac}'. You might want to install the
+ \`Autoconf' and \`GNU m4' packages. Grab them from any GNU
+ archive site."
+ touch configure
+ ;;
+
+ autoheader*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acconfig.h' or \`${configure_ac}'. You might want
+ to install the \`Autoconf' and \`GNU m4' packages. Grab them
+ from any GNU archive site."
+ files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
+ test -z "$files" && files="config.h"
+ touch_files=
+ for f in $files; do
+ case $f in
+ *:*) touch_files="$touch_files "`echo "$f" |
+ sed -e 's/^[^:]*://' -e 's/:.*//'`;;
+ *) touch_files="$touch_files $f.in";;
+ esac
+ done
+ touch $touch_files
+ ;;
+
+ automake*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
+ You might want to install the \`Automake' and \`Perl' packages.
+ Grab them from any GNU archive site."
+ find . -type f -name Makefile.am -print |
+ sed 's/\.am$/.in/' |
+ while read f; do touch "$f"; done
+ ;;
+
+ autom4te*)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, but is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them.
+ You can get \`$1' as part of \`Autoconf' from any GNU
+ archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo "#! /bin/sh"
+ echo "# Created by GNU Automake missing as a replacement of"
+ echo "# $ $@"
+ echo "exit 0"
+ chmod +x $file
+ exit 1
+ fi
+ ;;
+
+ bison*|yacc*)
+ echo 1>&2 "\
+WARNING: \`$1' $msg. You should only need it if
+ you modified a \`.y' file. You may need the \`Bison' package
+ in order for those modifications to take effect. You can get
+ \`Bison' from any GNU archive site."
+ rm -f y.tab.c y.tab.h
+ if test $# -ne 1; then
+ eval LASTARG=\${$#}
+ case $LASTARG in
+ *.y)
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.c
+ fi
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.h
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f y.tab.h; then
+ echo >y.tab.h
+ fi
+ if test ! -f y.tab.c; then
+ echo 'main() { return 0; }' >y.tab.c
+ fi
+ ;;
+
+ lex*|flex*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.l' file. You may need the \`Flex' package
+ in order for those modifications to take effect. You can get
+ \`Flex' from any GNU archive site."
+ rm -f lex.yy.c
+ if test $# -ne 1; then
+ eval LASTARG=\${$#}
+ case $LASTARG in
+ *.l)
+ SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" lex.yy.c
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f lex.yy.c; then
+ echo 'main() { return 0; }' >lex.yy.c
+ fi
+ ;;
+
+ help2man*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a dependency of a manual page. You may need the
+ \`Help2man' package in order for those modifications to take
+ effect. You can get \`Help2man' from any GNU archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo ".ab help2man is required to generate this page"
+ exit $?
+ fi
+ ;;
+
+ makeinfo*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.texi' or \`.texinfo' file, or any other file
+ indirectly affecting the aspect of the manual. The spurious
+ call might also be the consequence of using a buggy \`make' (AIX,
+ DU, IRIX). You might want to install the \`Texinfo' package or
+ the \`GNU make' package. Grab either from any GNU archive site."
+ # The file to touch is that specified with -o ...
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -z "$file"; then
+ # ... or it is the one specified with @setfilename ...
+ infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
+ file=`sed -n '
+ /^@setfilename/{
+ s/.* \([^ ]*\) *$/\1/
+ p
+ q
+ }' $infile`
+ # ... or it is derived from the source name (dir/f.texi becomes f.info)
+ test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
+ fi
+ # If the file does not exist, the user really needs makeinfo;
+ # let's fail without touching anything.
+ test -f $file || exit 1
+ touch $file
+ ;;
+
+ *)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, and is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them. Check the \`README' file,
+ it often tells you about the needed prerequisites for installing
+ this package. You may also peek at any GNU archive site, in case
+ some other package would contain this missing \`$1' program."
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.cc b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.cc
new file mode 100644
index 00000000..473a0b09
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.cc
@@ -0,0 +1,90 @@
+// Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "snappy.h"
+#include "snappy-c.h"
+
+extern "C" {
+
+snappy_status snappy_compress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t *compressed_length) {
+ if (*compressed_length < snappy_max_compressed_length(input_length)) {
+ return SNAPPY_BUFFER_TOO_SMALL;
+ }
+ snappy::RawCompress(input, input_length, compressed, compressed_length);
+ return SNAPPY_OK;
+}
+
+snappy_status snappy_uncompress(const char* compressed,
+ size_t compressed_length,
+ char* uncompressed,
+ size_t* uncompressed_length) {
+ size_t real_uncompressed_length;
+ if (!snappy::GetUncompressedLength(compressed,
+ compressed_length,
+ &real_uncompressed_length)) {
+ return SNAPPY_INVALID_INPUT;
+ }
+ if (*uncompressed_length < real_uncompressed_length) {
+ return SNAPPY_BUFFER_TOO_SMALL;
+ }
+ if (!snappy::RawUncompress(compressed, compressed_length, uncompressed)) {
+ return SNAPPY_INVALID_INPUT;
+ }
+ *uncompressed_length = real_uncompressed_length;
+ return SNAPPY_OK;
+}
+
+size_t snappy_max_compressed_length(size_t source_length) {
+ return snappy::MaxCompressedLength(source_length);
+}
+
+snappy_status snappy_uncompressed_length(const char *compressed,
+ size_t compressed_length,
+ size_t *result) {
+ if (snappy::GetUncompressedLength(compressed,
+ compressed_length,
+ result)) {
+ return SNAPPY_OK;
+ } else {
+ return SNAPPY_INVALID_INPUT;
+ }
+}
+
+snappy_status snappy_validate_compressed_buffer(const char *compressed,
+ size_t compressed_length) {
+ if (snappy::IsValidCompressedBuffer(compressed, compressed_length)) {
+ return SNAPPY_OK;
+ } else {
+ return SNAPPY_INVALID_INPUT;
+ }
+}
+
+} // extern "C"
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.h
new file mode 100644
index 00000000..c6c2a860
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-c.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2011 Martin Gieseking <martin.gieseking@uos.de>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Plain C interface (a wrapper around the C++ implementation).
+ */
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+
+/*
+ * Return values; see the documentation for each function to know
+ * what each can return.
+ */
+typedef enum {
+ SNAPPY_OK = 0,
+ SNAPPY_INVALID_INPUT = 1,
+ SNAPPY_BUFFER_TOO_SMALL = 2
+} snappy_status;
+
+/*
+ * Takes the data stored in "input[0..input_length-1]" and stores
+ * it in the array pointed to by "compressed".
+ *
+ * <compressed_length> signals the space available in "compressed".
+ * If it is not at least equal to "snappy_max_compressed_length(input_length)",
+ * SNAPPY_BUFFER_TOO_SMALL is returned. After successful compression,
+ * <compressed_length> contains the true length of the compressed output,
+ * and SNAPPY_OK is returned.
+ *
+ * Example:
+ * size_t output_length = snappy_max_compressed_length(input_length);
+ * char* output = (char*)malloc(output_length);
+ * if (snappy_compress(input, input_length, output, &output_length)
+ * == SNAPPY_OK) {
+ * ... Process(output, output_length) ...
+ * }
+ * free(output);
+ */
+snappy_status snappy_compress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
+
+/*
+ * Given data in "compressed[0..compressed_length-1]" generated by
+ * calling the snappy_compress routine, this routine stores
+ * the uncompressed data to
+ * uncompressed[0..uncompressed_length-1].
+ * Returns failure (a value not equal to SNAPPY_OK) if the message
+ * is corrupted and could not be decrypted.
+ *
+ * <uncompressed_length> signals the space available in "uncompressed".
+ * If it is not at least equal to the value returned by
+ * snappy_uncompressed_length for this stream, SNAPPY_BUFFER_TOO_SMALL
+ * is returned. After successful decompression, <uncompressed_length>
+ * contains the true length of the decompressed output.
+ *
+ * Example:
+ * size_t output_length;
+ * if (snappy_uncompressed_length(input, input_length, &output_length)
+ * != SNAPPY_OK) {
+ * ... fail ...
+ * }
+ * char* output = (char*)malloc(output_length);
+ * if (snappy_uncompress(input, input_length, output, &output_length)
+ * == SNAPPY_OK) {
+ * ... Process(output, output_length) ...
+ * }
+ * free(output);
+ */
+snappy_status snappy_uncompress(const char* compressed,
+ size_t compressed_length,
+ char* uncompressed,
+ size_t* uncompressed_length);
+
+/*
+ * Returns the maximal size of the compressed representation of
+ * input data that is "source_length" bytes in length.
+ */
+size_t snappy_max_compressed_length(size_t source_length);
+
+/*
+ * REQUIRES: "compressed[]" was produced by snappy_compress()
+ * Returns SNAPPY_OK and stores the length of the uncompressed data in
+ * *result normally. Returns SNAPPY_INVALID_INPUT on parsing error.
+ * This operation takes O(1) time.
+ */
+snappy_status snappy_uncompressed_length(const char* compressed,
+ size_t compressed_length,
+ size_t* result);
+
+/*
+ * Check if the contents of "compressed[]" can be uncompressed successfully.
+ * Does not return the uncompressed data; if so, returns SNAPPY_OK,
+ * or if not, returns SNAPPY_INVALID_INPUT.
+ * Takes time proportional to compressed_length, but is usually at least a
+ * factor of four faster than actual decompression.
+ */
+snappy_status snappy_validate_compressed_buffer(const char* compressed,
+ size_t compressed_length);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* UTIL_SNAPPY_OPENSOURCE_SNAPPY_C_H_ */
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-internal.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-internal.h
new file mode 100644
index 00000000..c99d3313
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-internal.h
@@ -0,0 +1,150 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Internals shared between the Snappy implementation and its unittest.
+
+#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+
+#include "snappy-stubs-internal.h"
+
+namespace snappy {
+namespace internal {
+
+class WorkingMemory {
+ public:
+ WorkingMemory() : large_table_(NULL) { }
+ ~WorkingMemory() { delete[] large_table_; }
+
+ // Allocates and clears a hash table using memory in "*this",
+ // stores the number of buckets in "*table_size" and returns a pointer to
+ // the base of the hash table.
+ uint16* GetHashTable(size_t input_size, int* table_size);
+
+ private:
+ uint16 small_table_[1<<10]; // 2KB
+ uint16* large_table_; // Allocated only when needed
+
+ DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
+};
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input_length <= kBlockSize"
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input_length)" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+char* CompressFragment(const char* input,
+ size_t input_length,
+ char* op,
+ uint16* table,
+ const int table_size);
+
+// Return the largest n such that
+//
+// s1[0,n-1] == s2[0,n-1]
+// and n <= (s2_limit - s2).
+//
+// Does not read *s2_limit or beyond.
+// Does not read *(s1 + (s2_limit - s2)) or beyond.
+// Requires that s2_limit >= s2.
+//
+// Separate implementation for x86_64, for speed. Uses the fact that
+// x86_64 is little endian.
+#if defined(ARCH_K8)
+static inline int FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
+ assert(s2_limit >= s2);
+ int matched = 0;
+
+ // Find out how long the match is. We loop over the data 64 bits at a
+ // time until we find a 64-bit block that doesn't match; then we find
+ // the first non-matching bit and use that to calculate the total
+ // length of the match.
+ while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
+ if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
+ s2 += 8;
+ matched += 8;
+ } else {
+ // On current (mid-2008) Opteron models there is a 3% more
+ // efficient code sequence to find the first non-matching byte.
+ // However, what follows is ~10% better on Intel Core 2 and newer,
+ // and we expect AMD's bsf instruction to improve.
+ uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
+ matched += matching_bits >> 3;
+ return matched;
+ }
+ }
+ while (PREDICT_TRUE(s2 < s2_limit)) {
+ if (PREDICT_TRUE(s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ } else {
+ return matched;
+ }
+ }
+ return matched;
+}
+#else
+static inline int FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
+ // Implementation based on the x86-64 version, above.
+ assert(s2_limit >= s2);
+ int matched = 0;
+
+ while (s2 <= s2_limit - 4 &&
+ UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
+ s2 += 4;
+ matched += 4;
+ }
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
+ uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero(x);
+ matched += matching_bits >> 3;
+ } else {
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ }
+ }
+ return matched;
+}
+#endif
+
+} // end namespace internal
+} // end namespace snappy
+
+#endif // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.cc b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.cc
new file mode 100644
index 00000000..5844552c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.cc
@@ -0,0 +1,71 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string.h>
+
+#include "snappy-sinksource.h"
+
+namespace snappy {
+
+Source::~Source() { }
+
+Sink::~Sink() { }
+
+char* Sink::GetAppendBuffer(size_t length, char* scratch) {
+ return scratch;
+}
+
+ByteArraySource::~ByteArraySource() { }
+
+size_t ByteArraySource::Available() const { return left_; }
+
+const char* ByteArraySource::Peek(size_t* len) {
+ *len = left_;
+ return ptr_;
+}
+
+void ByteArraySource::Skip(size_t n) {
+ left_ -= n;
+ ptr_ += n;
+}
+
+UncheckedByteArraySink::~UncheckedByteArraySink() { }
+
+void UncheckedByteArraySink::Append(const char* data, size_t n) {
+ // Do no copying if the caller filled in the result of GetAppendBuffer()
+ if (data != dest_) {
+ memcpy(dest_, data, n);
+ }
+ dest_ += n;
+}
+
+char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
+ return dest_;
+}
+
+}
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.h
new file mode 100644
index 00000000..faabfa1e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-sinksource.h
@@ -0,0 +1,137 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+
+#include <stddef.h>
+
+
+namespace snappy {
+
+// A Sink is an interface that consumes a sequence of bytes.
+class Sink {
+ public:
+ Sink() { }
+ virtual ~Sink();
+
+ // Append "bytes[0,n-1]" to this.
+ virtual void Append(const char* bytes, size_t n) = 0;
+
+ // Returns a writable buffer of the specified length for appending.
+ // May return a pointer to the caller-owned scratch buffer which
+ // must have at least the indicated length. The returned buffer is
+ // only valid until the next operation on this Sink.
+ //
+ // After writing at most "length" bytes, call Append() with the
+ // pointer returned from this function and the number of bytes
+ // written. Many Append() implementations will avoid copying
+ // bytes if this function returned an internal buffer.
+ //
+ // If a non-scratch buffer is returned, the caller may only pass a
+ // prefix of it to Append(). That is, it is not correct to pass an
+ // interior pointer of the returned array to Append().
+ //
+ // The default implementation always returns the scratch buffer.
+ virtual char* GetAppendBuffer(size_t length, char* scratch);
+
+
+ private:
+ // No copying
+ Sink(const Sink&);
+ void operator=(const Sink&);
+};
+
+// A Source is an interface that yields a sequence of bytes
+class Source {
+ public:
+ Source() { }
+ virtual ~Source();
+
+ // Return the number of bytes left to read from the source
+ virtual size_t Available() const = 0;
+
+ // Peek at the next flat region of the source. Does not reposition
+ // the source. The returned region is empty iff Available()==0.
+ //
+ // Returns a pointer to the beginning of the region and store its
+ // length in *len.
+ //
+ // The returned region is valid until the next call to Skip() or
+ // until this object is destroyed, whichever occurs first.
+ //
+ // The returned region may be larger than Available() (for example
+ // if this ByteSource is a view on a substring of a larger source).
+ // The caller is responsible for ensuring that it only reads the
+ // Available() bytes.
+ virtual const char* Peek(size_t* len) = 0;
+
+ // Skip the next n bytes. Invalidates any buffer returned by
+ // a previous call to Peek().
+ // REQUIRES: Available() >= n
+ virtual void Skip(size_t n) = 0;
+
+ private:
+ // No copying
+ Source(const Source&);
+ void operator=(const Source&);
+};
+
+// A Source implementation that yields the contents of a flat array
+class ByteArraySource : public Source {
+ public:
+ ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
+ virtual ~ByteArraySource();
+ virtual size_t Available() const;
+ virtual const char* Peek(size_t* len);
+ virtual void Skip(size_t n);
+ private:
+ const char* ptr_;
+ size_t left_;
+};
+
+// A Sink implementation that writes to a flat array without any bound checks.
+class UncheckedByteArraySink : public Sink {
+ public:
+ explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
+ virtual ~UncheckedByteArraySink();
+ virtual void Append(const char* data, size_t n);
+ virtual char* GetAppendBuffer(size_t len, char* scratch);
+
+ // Return the current output pointer so that a caller can see how
+ // many bytes were produced.
+ // Note: this is not a Sink method.
+ char* CurrentDestination() const { return dest_; }
+ private:
+ char* dest_;
+};
+
+
+}
+
+#endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.cc b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.cc
new file mode 100644
index 00000000..6ed33437
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.cc
@@ -0,0 +1,42 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <string>
+
+#include "snappy-stubs-internal.h"
+
+namespace snappy {
+
+void Varint::Append32(string* s, uint32 value) {
+ char buf[Varint::kMax32];
+ const char* p = Varint::Encode32(buf, value);
+ s->append(buf, p - buf);
+}
+
+} // namespace snappy
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.h
new file mode 100644
index 00000000..12393b62
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-internal.h
@@ -0,0 +1,491 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various stubs for the open-source version of Snappy.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <string>
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#include "snappy-stubs-public.h"
+
+#if defined(__x86_64__)
+
+// Enable 64-bit optimized versions of some routines.
+#define ARCH_K8 1
+
+#endif
+
+// Needed by OS X, among others.
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+// Pull in std::min, std::ostream, and the likes. This is safe because this
+// header file is never used from any public header files.
+using namespace std;
+
+// The size of an array, if known at compile-time.
+// Will give unexpected results if used on a pointer.
+// We undefine it first, since some compilers already have a definition.
+#ifdef ARRAYSIZE
+#undef ARRAYSIZE
+#endif
+#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+
+// Static prediction hints.
+#ifdef HAVE_BUILTIN_EXPECT
+#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define PREDICT_FALSE(x) x
+#define PREDICT_TRUE(x) x
+#endif
+
+// This is only used for recomputing the tag byte table used during
+// decompression; for simplicity we just remove it from the open-source
+// version (anyone who wants to regenerate it can just do the call
+// themselves within main()).
+#define DEFINE_bool(flag_name, default_value, description) \
+ bool FLAGS_ ## flag_name = default_value
+#define DECLARE_bool(flag_name) \
+ extern bool FLAGS_ ## flag_name
+
+namespace snappy {
+
+static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
+static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
+
+// Potentially unaligned loads and stores.
+
+// x86 and PowerPC can simply do these loads and stores native.
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
+
+#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
+
+#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
+
+// ARMv7 and newer support native unaligned accesses, but only of 16-bit
+// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
+// do an unaligned read and rotate the words around a bit, or do the reads very
+// slowly (trip through kernel mode). There's no simple #define that says just
+// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
+// sub-architectures.
+//
+// This is a mess, but there's not much we can do about it.
+
+#elif defined(__arm__) && \
+ !defined(__ARM_ARCH_4__) && \
+ !defined(__ARM_ARCH_4T__) && \
+ !defined(__ARM_ARCH_5__) && \
+ !defined(__ARM_ARCH_5T__) && \
+ !defined(__ARM_ARCH_5TE__) && \
+ !defined(__ARM_ARCH_5TEJ__) && \
+ !defined(__ARM_ARCH_6__) && \
+ !defined(__ARM_ARCH_6J__) && \
+ !defined(__ARM_ARCH_6K__) && \
+ !defined(__ARM_ARCH_6Z__) && \
+ !defined(__ARM_ARCH_6ZK__) && \
+ !defined(__ARM_ARCH_6T2__)
+
+#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+
+#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+
+// TODO(user): NEON supports unaligned 64-bit loads and stores.
+// See if that would be more efficient on platforms supporting it,
+// at least for copies.
+
+inline uint64 UNALIGNED_LOAD64(const void *p) {
+ uint64 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline void UNALIGNED_STORE64(void *p, uint64 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+#else
+
+// These functions are provided for architectures that don't support
+// unaligned loads and stores.
+
+inline uint16 UNALIGNED_LOAD16(const void *p) {
+ uint16 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint32 UNALIGNED_LOAD32(const void *p) {
+ uint32 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint64 UNALIGNED_LOAD64(const void *p) {
+ uint64 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline void UNALIGNED_STORE16(void *p, uint16 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+inline void UNALIGNED_STORE32(void *p, uint32 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+inline void UNALIGNED_STORE64(void *p, uint64 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+#endif
+
+// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
+// on some platforms, in particular ARM.
+inline void UnalignedCopy64(const void *src, void *dst) {
+ if (sizeof(void *) == 8) {
+ UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
+ } else {
+ const char *src_char = reinterpret_cast<const char *>(src);
+ char *dst_char = reinterpret_cast<char *>(dst);
+
+ UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
+ UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
+ }
+}
+
+// The following guarantees declaration of the byte swap functions.
+#ifdef WORDS_BIGENDIAN
+
+#ifdef HAVE_SYS_BYTEORDER_H
+#include <sys/byteorder.h>
+#endif
+
+#ifdef HAVE_SYS_ENDIAN_H
+#include <sys/endian.h>
+#endif
+
+#ifdef _MSC_VER
+#include <stdlib.h>
+#define bswap_16(x) _byteswap_ushort(x)
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#define bswap_16(x) OSSwapInt16(x)
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#elif defined(HAVE_BYTESWAP_H)
+#include <byteswap.h>
+
+#elif defined(bswap32)
+// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
+#define bswap_16(x) bswap16(x)
+#define bswap_32(x) bswap32(x)
+#define bswap_64(x) bswap64(x)
+
+#elif defined(BSWAP_64)
+// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
+#define bswap_16(x) BSWAP_16(x)
+#define bswap_32(x) BSWAP_32(x)
+#define bswap_64(x) BSWAP_64(x)
+
+#else
+
+inline uint16 bswap_16(uint16 x) {
+ return (x << 8) | (x >> 8);
+}
+
+inline uint32 bswap_32(uint32 x) {
+ x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
+ return (x >> 16) | (x << 16);
+}
+
+inline uint64 bswap_64(uint64 x) {
+ x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
+ x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
+ return (x >> 32) | (x << 32);
+}
+
+#endif
+
+#endif // WORDS_BIGENDIAN
+
+// Convert to little-endian storage, opposite of network format.
+// Convert x from host to little endian: x = LittleEndian.FromHost(x);
+// convert x from little endian to host: x = LittleEndian.ToHost(x);
+//
+// Store values into unaligned memory converting to little endian order:
+// LittleEndian.Store16(p, x);
+//
+// Load unaligned values stored in little endian converting to host order:
+// x = LittleEndian.Load16(p);
+class LittleEndian {
+ public:
+ // Conversion functions.
+#ifdef WORDS_BIGENDIAN
+
+ static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+ static uint16 ToHost16(uint16 x) { return bswap_16(x); }
+
+ static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+ static uint32 ToHost32(uint32 x) { return bswap_32(x); }
+
+ static bool IsLittleEndian() { return false; }
+
+#else // !defined(WORDS_BIGENDIAN)
+
+ static uint16 FromHost16(uint16 x) { return x; }
+ static uint16 ToHost16(uint16 x) { return x; }
+
+ static uint32 FromHost32(uint32 x) { return x; }
+ static uint32 ToHost32(uint32 x) { return x; }
+
+ static bool IsLittleEndian() { return true; }
+
+#endif // !defined(WORDS_BIGENDIAN)
+
+ // Functions to do unaligned loads and stores in little-endian order.
+ static uint16 Load16(const void *p) {
+ return ToHost16(UNALIGNED_LOAD16(p));
+ }
+
+ static void Store16(void *p, uint16 v) {
+ UNALIGNED_STORE16(p, FromHost16(v));
+ }
+
+ static uint32 Load32(const void *p) {
+ return ToHost32(UNALIGNED_LOAD32(p));
+ }
+
+ static void Store32(void *p, uint32 v) {
+ UNALIGNED_STORE32(p, FromHost32(v));
+ }
+};
+
+// Some bit-manipulation functions.
+class Bits {
+ public:
+ // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
+ static int Log2Floor(uint32 n);
+
+ // Return the first set least / most significant bit, 0-indexed. Returns an
+ // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
+ // that it's 0-indexed.
+ static int FindLSBSetNonZero(uint32 n);
+ static int FindLSBSetNonZero64(uint64 n);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Bits);
+};
+
+#ifdef HAVE_BUILTIN_CTZ
+
+inline int Bits::Log2Floor(uint32 n) {
+ return n == 0 ? -1 : 31 ^ __builtin_clz(n);
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ return __builtin_ctz(n);
+}
+
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ return __builtin_ctzll(n);
+}
+
+#else // Portable versions.
+
+inline int Bits::Log2Floor(uint32 n) {
+ if (n == 0)
+ return -1;
+ int log = 0;
+ uint32 value = n;
+ for (int i = 4; i >= 0; --i) {
+ int shift = (1 << i);
+ uint32 x = value >> shift;
+ if (x != 0) {
+ value = x;
+ log += shift;
+ }
+ }
+ assert(value == 1);
+ return log;
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ int rc = 31;
+ for (int i = 4, shift = 1 << 4; i >= 0; --i) {
+ const uint32 x = n << shift;
+ if (x != 0) {
+ n = x;
+ rc -= shift;
+ }
+ shift >>= 1;
+ }
+ return rc;
+}
+
+// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ const uint32 bottombits = static_cast<uint32>(n);
+ if (bottombits == 0) {
+ // Bottom bits are zero, so scan in top bits
+ return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
+ } else {
+ return FindLSBSetNonZero(bottombits);
+ }
+}
+
+#endif // End portable versions.
+
+// Variable-length integer encoding.
+class Varint {
+ public:
+ // Maximum lengths of varint encoding of uint32.
+ static const int kMax32 = 5;
+
+ // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
+ // Never reads a character at or beyond limit. If a valid/terminated varint32
+ // was found in the range, stores it in *OUTPUT and returns a pointer just
+ // past the last byte of the varint32. Else returns NULL. On success,
+ // "result <= limit".
+ static const char* Parse32WithLimit(const char* ptr, const char* limit,
+ uint32* OUTPUT);
+
+ // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
+ // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
+ // byte just past the last encoded byte.
+ static char* Encode32(char* ptr, uint32 v);
+
+ // EFFECTS Appends the varint representation of "value" to "*s".
+ static void Append32(string* s, uint32 value);
+};
+
+inline const char* Varint::Parse32WithLimit(const char* p,
+ const char* l,
+ uint32* OUTPUT) {
+ const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
+ const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
+ uint32 b, result;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result = b & 127; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
+ return NULL; // Value is too long to be a varint32
+ done:
+ *OUTPUT = result;
+ return reinterpret_cast<const char*>(ptr);
+}
+
+inline char* Varint::Encode32(char* sptr, uint32 v) {
+ // Operate on characters as unsigneds
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
+ static const int B = 128;
+ if (v < (1<<7)) {
+ *(ptr++) = v;
+ } else if (v < (1<<14)) {
+ *(ptr++) = v | B;
+ *(ptr++) = v>>7;
+ } else if (v < (1<<21)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = v>>14;
+ } else if (v < (1<<28)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = (v>>14) | B;
+ *(ptr++) = v>>21;
+ } else {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = (v>>14) | B;
+ *(ptr++) = (v>>21) | B;
+ *(ptr++) = v>>28;
+ }
+ return reinterpret_cast<char*>(ptr);
+}
+
+// If you know the internal layout of the std::string in use, you can
+// replace this function with one that resizes the string without
+// filling the new space with zeros (if applicable) --
+// it will be non-portable but faster.
+inline void STLStringResizeUninitialized(string* s, size_t new_size) {
+ s->resize(new_size);
+}
+
+// Return a mutable char* pointing to a string's internal buffer,
+// which may not be null-terminated. Writing through this pointer will
+// modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// As of 2006-04, there is no standard-blessed way of getting a
+// mutable reference to a string's internal buffer. However, issue 530
+// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
+// proposes this as the method. It will officially be part of the standard
+// for C++0x. This should already work on all current implementations.
+inline char* string_as_array(string* str) {
+ return str->empty() ? NULL : &*str->begin();
+}
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h
new file mode 100644
index 00000000..6b41bbe9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h
@@ -0,0 +1,98 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sesse@google.com (Steinar H. Gunderson)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various type stubs for the open-source version of Snappy.
+//
+// This file cannot include config.h, as it is included from snappy.h,
+// which is a public header. Instead, snappy-stubs-public.h is generated by
+// from snappy-stubs-public.h.in at configure time.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+
+#if 1
+#include <stdint.h>
+#endif
+
+#if 1
+#include <stddef.h>
+#endif
+
+#if 0
+#include <sys/uio.h>
+#endif
+
+#define SNAPPY_MAJOR 1
+#define SNAPPY_MINOR 1
+#define SNAPPY_PATCHLEVEL 2
+#define SNAPPY_VERSION \
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+
+#include <string>
+
+namespace snappy {
+
+#if 1
+typedef int8_t int8;
+typedef uint8_t uint8;
+typedef int16_t int16;
+typedef uint16_t uint16;
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#else
+typedef signed char int8;
+typedef unsigned char uint8;
+typedef short int16;
+typedef unsigned short uint16;
+typedef int int32;
+typedef unsigned int uint32;
+typedef long long int64;
+typedef unsigned long long uint64;
+#endif
+
+typedef std::string string;
+
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+#if !0
+// Windows does not have an iovec type, yet the concept is universally useful.
+// It is simple to define it ourselves, so we put it inside our own namespace.
+struct iovec {
+ void* iov_base;
+ size_t iov_len;
+};
+#endif
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h.in b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h.in
new file mode 100644
index 00000000..6c181a1c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-stubs-public.h.in
@@ -0,0 +1,98 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sesse@google.com (Steinar H. Gunderson)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various type stubs for the open-source version of Snappy.
+//
+// This file cannot include config.h, as it is included from snappy.h,
+// which is a public header. Instead, snappy-stubs-public.h is generated by
+// from snappy-stubs-public.h.in at configure time.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+
+#if @ac_cv_have_stdint_h@
+#include <stdint.h>
+#endif
+
+#if @ac_cv_have_stddef_h@
+#include <stddef.h>
+#endif
+
+#if @ac_cv_have_sys_uio_h@
+#include <sys/uio.h>
+#endif
+
+#define SNAPPY_MAJOR @SNAPPY_MAJOR@
+#define SNAPPY_MINOR @SNAPPY_MINOR@
+#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
+#define SNAPPY_VERSION \
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+
+#include <string>
+
+namespace snappy {
+
+#if @ac_cv_have_stdint_h@
+typedef int8_t int8;
+typedef uint8_t uint8;
+typedef int16_t int16;
+typedef uint16_t uint16;
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#else
+typedef signed char int8;
+typedef unsigned char uint8;
+typedef short int16;
+typedef unsigned short uint16;
+typedef int int32;
+typedef unsigned int uint32;
+typedef long long int64;
+typedef unsigned long long uint64;
+#endif
+
+typedef std::string string;
+
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+#if !@ac_cv_have_sys_uio_h@
+// Windows does not have an iovec type, yet the concept is universally useful.
+// It is simple to define it ourselves, so we put it inside our own namespace.
+struct iovec {
+ void* iov_base;
+ size_t iov_len;
+};
+#endif
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.cc b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.cc
new file mode 100644
index 00000000..46194109
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.cc
@@ -0,0 +1,606 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various stubs for the unit tests for the open-source version of Snappy.
+
+#include "snappy-test.h"
+
+#ifdef HAVE_WINDOWS_H
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+
+#include <algorithm>
+
+DEFINE_bool(run_microbenchmarks, true,
+ "Run microbenchmarks before doing anything else.");
+
+namespace snappy {
+
+string ReadTestDataFile(const string& base, size_t size_limit) {
+ string contents;
+ const char* srcdir = getenv("srcdir"); // This is set by Automake.
+ string prefix;
+ if (srcdir) {
+ prefix = string(srcdir) + "/";
+ }
+ file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
+ ).CheckSuccess();
+ if (size_limit > 0) {
+ contents = contents.substr(0, size_limit);
+ }
+ return contents;
+}
+
+string ReadTestDataFile(const string& base) {
+ return ReadTestDataFile(base, 0);
+}
+
+string StringPrintf(const char* format, ...) {
+ char buf[4096];
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ return buf;
+}
+
+bool benchmark_running = false;
+int64 benchmark_real_time_us = 0;
+int64 benchmark_cpu_time_us = 0;
+string *benchmark_label = NULL;
+int64 benchmark_bytes_processed = 0;
+
+void ResetBenchmarkTiming() {
+ benchmark_real_time_us = 0;
+ benchmark_cpu_time_us = 0;
+}
+
+#ifdef WIN32
+LARGE_INTEGER benchmark_start_real;
+FILETIME benchmark_start_cpu;
+#else // WIN32
+struct timeval benchmark_start_real;
+struct rusage benchmark_start_cpu;
+#endif // WIN32
+
+void StartBenchmarkTiming() {
+#ifdef WIN32
+ QueryPerformanceCounter(&benchmark_start_real);
+ FILETIME dummy;
+ CHECK(GetProcessTimes(
+ GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
+#else
+ gettimeofday(&benchmark_start_real, NULL);
+ if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
+ perror("getrusage(RUSAGE_SELF)");
+ exit(1);
+ }
+#endif
+ benchmark_running = true;
+}
+
+void StopBenchmarkTiming() {
+ if (!benchmark_running) {
+ return;
+ }
+
+#ifdef WIN32
+ LARGE_INTEGER benchmark_stop_real;
+ LARGE_INTEGER benchmark_frequency;
+ QueryPerformanceCounter(&benchmark_stop_real);
+ QueryPerformanceFrequency(&benchmark_frequency);
+
+ double elapsed_real = static_cast<double>(
+ benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
+ benchmark_frequency.QuadPart;
+ benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
+
+ FILETIME benchmark_stop_cpu, dummy;
+ CHECK(GetProcessTimes(
+ GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
+
+ ULARGE_INTEGER start_ulargeint;
+ start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
+ start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
+
+ ULARGE_INTEGER stop_ulargeint;
+ stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
+ stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
+
+ benchmark_cpu_time_us +=
+ (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
+#else // WIN32
+ struct timeval benchmark_stop_real;
+ gettimeofday(&benchmark_stop_real, NULL);
+ benchmark_real_time_us +=
+ 1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
+ benchmark_real_time_us +=
+ (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
+
+ struct rusage benchmark_stop_cpu;
+ if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
+ perror("getrusage(RUSAGE_SELF)");
+ exit(1);
+ }
+ benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
+ benchmark_start_cpu.ru_utime.tv_sec);
+ benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
+ benchmark_start_cpu.ru_utime.tv_usec);
+#endif // WIN32
+
+ benchmark_running = false;
+}
+
+void SetBenchmarkLabel(const string& str) {
+ if (benchmark_label) {
+ delete benchmark_label;
+ }
+ benchmark_label = new string(str);
+}
+
+void SetBenchmarkBytesProcessed(int64 bytes) {
+ benchmark_bytes_processed = bytes;
+}
+
+struct BenchmarkRun {
+ int64 real_time_us;
+ int64 cpu_time_us;
+};
+
+struct BenchmarkCompareCPUTime {
+ bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
+ return a.cpu_time_us < b.cpu_time_us;
+ }
+};
+
+void Benchmark::Run() {
+ for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
+ // Run a few iterations first to find out approximately how fast
+ // the benchmark is.
+ const int kCalibrateIterations = 100;
+ ResetBenchmarkTiming();
+ StartBenchmarkTiming();
+ (*function_)(kCalibrateIterations, test_case_num);
+ StopBenchmarkTiming();
+
+ // Let each test case run for about 200ms, but at least as many
+ // as we used to calibrate.
+ // Run five times and pick the median.
+ const int kNumRuns = 5;
+ const int kMedianPos = kNumRuns / 2;
+ int num_iterations = 0;
+ if (benchmark_real_time_us > 0) {
+ num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
+ }
+ num_iterations = max(num_iterations, kCalibrateIterations);
+ BenchmarkRun benchmark_runs[kNumRuns];
+
+ for (int run = 0; run < kNumRuns; ++run) {
+ ResetBenchmarkTiming();
+ StartBenchmarkTiming();
+ (*function_)(num_iterations, test_case_num);
+ StopBenchmarkTiming();
+
+ benchmark_runs[run].real_time_us = benchmark_real_time_us;
+ benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
+ }
+
+ string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
+ string human_readable_speed;
+
+ nth_element(benchmark_runs,
+ benchmark_runs + kMedianPos,
+ benchmark_runs + kNumRuns,
+ BenchmarkCompareCPUTime());
+ int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
+ int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
+ if (cpu_time_us <= 0) {
+ human_readable_speed = "?";
+ } else {
+ int64 bytes_per_second =
+ benchmark_bytes_processed * 1000000 / cpu_time_us;
+ if (bytes_per_second < 1024) {
+ human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
+ } else if (bytes_per_second < 1024 * 1024) {
+ human_readable_speed = StringPrintf(
+ "%.1fkB/s", bytes_per_second / 1024.0f);
+ } else if (bytes_per_second < 1024 * 1024 * 1024) {
+ human_readable_speed = StringPrintf(
+ "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
+ } else {
+ human_readable_speed = StringPrintf(
+ "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
+ }
+ }
+
+ fprintf(stderr,
+#ifdef WIN32
+ "%-18s %10I64d %10I64d %10d %s %s\n",
+#else
+ "%-18s %10lld %10lld %10d %s %s\n",
+#endif
+ heading.c_str(),
+ static_cast<long long>(real_time_us * 1000 / num_iterations),
+ static_cast<long long>(cpu_time_us * 1000 / num_iterations),
+ num_iterations,
+ human_readable_speed.c_str(),
+ benchmark_label->c_str());
+ }
+}
+
+#ifdef HAVE_LIBZ
+
+ZLib::ZLib()
+ : comp_init_(false),
+ uncomp_init_(false) {
+ Reinit();
+}
+
+ZLib::~ZLib() {
+ if (comp_init_) { deflateEnd(&comp_stream_); }
+ if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
+}
+
+void ZLib::Reinit() {
+ compression_level_ = Z_DEFAULT_COMPRESSION;
+ window_bits_ = MAX_WBITS;
+ mem_level_ = 8; // DEF_MEM_LEVEL
+ if (comp_init_) {
+ deflateEnd(&comp_stream_);
+ comp_init_ = false;
+ }
+ if (uncomp_init_) {
+ inflateEnd(&uncomp_stream_);
+ uncomp_init_ = false;
+ }
+ first_chunk_ = true;
+}
+
+void ZLib::Reset() {
+ first_chunk_ = true;
+}
+
+// --------- COMPRESS MODE
+
+// Initialization method to be called if we hit an error while
+// compressing. On hitting an error, call this method before returning
+// the error.
+void ZLib::CompressErrorInit() {
+ deflateEnd(&comp_stream_);
+ comp_init_ = false;
+ Reset();
+}
+
+int ZLib::DeflateInit() {
+ return deflateInit2(&comp_stream_,
+ compression_level_,
+ Z_DEFLATED,
+ window_bits_,
+ mem_level_,
+ Z_DEFAULT_STRATEGY);
+}
+
+int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen) {
+ int err;
+
+ comp_stream_.next_in = (Bytef*)source;
+ comp_stream_.avail_in = (uInt)*sourceLen;
+ if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
+ comp_stream_.next_out = dest;
+ comp_stream_.avail_out = (uInt)*destLen;
+ if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
+
+ if ( !first_chunk_ ) // only need to set up stream the first time through
+ return Z_OK;
+
+ if (comp_init_) { // we've already initted it
+ err = deflateReset(&comp_stream_);
+ if (err != Z_OK) {
+ LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
+ deflateEnd(&comp_stream_);
+ comp_init_ = false;
+ }
+ }
+ if (!comp_init_) { // first use
+ comp_stream_.zalloc = (alloc_func)0;
+ comp_stream_.zfree = (free_func)0;
+ comp_stream_.opaque = (voidpf)0;
+ err = DeflateInit();
+ if (err != Z_OK) return err;
+ comp_init_ = true;
+ }
+ return Z_OK;
+}
+
+// In a perfect world we'd always have the full buffer to compress
+// when the time came, and we could just call Compress(). Alas, we
+// want to do chunked compression on our webserver. In this
+// application, we compress the header, send it off, then compress the
+// results, send them off, then compress the footer. Thus we need to
+// use the chunked compression features of zlib.
+int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen,
+ int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
+ int err;
+
+ if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
+ return err;
+
+ // This is used to figure out how many bytes we wrote *this chunk*
+ int compressed_size = comp_stream_.total_out;
+
+ // Some setup happens only for the first chunk we compress in a run
+ if ( first_chunk_ ) {
+ first_chunk_ = false;
+ }
+
+ // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
+ // compression.
+ err = deflate(&comp_stream_, flush_mode);
+
+ *sourceLen = comp_stream_.avail_in;
+
+ if ((err == Z_STREAM_END || err == Z_OK)
+ && comp_stream_.avail_in == 0
+ && comp_stream_.avail_out != 0 ) {
+ // we processed everything ok and the output buffer was large enough.
+ ;
+ } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
+ return Z_BUF_ERROR; // should never happen
+ } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
+ // an error happened
+ CompressErrorInit();
+ return err;
+ } else if (comp_stream_.avail_out == 0) { // not enough space
+ err = Z_BUF_ERROR;
+ }
+
+ assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
+ if (err == Z_STREAM_END)
+ err = Z_OK;
+
+ // update the crc and other metadata
+ compressed_size = comp_stream_.total_out - compressed_size; // delta
+ *destLen = compressed_size;
+
+ return err;
+}
+
+int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen,
+ int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
+ const int ret =
+ CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
+ if (ret == Z_BUF_ERROR)
+ CompressErrorInit();
+ return ret;
+}
+
+// This routine only initializes the compression stream once. Thereafter, it
+// just does a deflateReset on the stream, which should be faster.
+int ZLib::Compress(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen) {
+ int err;
+ if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
+ Z_FINISH)) != Z_OK )
+ return err;
+ Reset(); // reset for next call to Compress
+
+ return Z_OK;
+}
+
+
+// --------- UNCOMPRESS MODE
+
+int ZLib::InflateInit() {
+ return inflateInit2(&uncomp_stream_, MAX_WBITS);
+}
+
+// Initialization method to be called if we hit an error while
+// uncompressing. On hitting an error, call this method before
+// returning the error.
+void ZLib::UncompressErrorInit() {
+ inflateEnd(&uncomp_stream_);
+ uncomp_init_ = false;
+ Reset();
+}
+
+int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen) {
+ int err;
+
+ uncomp_stream_.next_in = (Bytef*)source;
+ uncomp_stream_.avail_in = (uInt)*sourceLen;
+ // Check for source > 64K on 16-bit machine:
+ if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
+
+ uncomp_stream_.next_out = dest;
+ uncomp_stream_.avail_out = (uInt)*destLen;
+ if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
+
+ if ( !first_chunk_ ) // only need to set up stream the first time through
+ return Z_OK;
+
+ if (uncomp_init_) { // we've already initted it
+ err = inflateReset(&uncomp_stream_);
+ if (err != Z_OK) {
+ LOG(WARNING)
+ << "ERROR: Can't reset uncompress object; creating a new one";
+ UncompressErrorInit();
+ }
+ }
+ if (!uncomp_init_) {
+ uncomp_stream_.zalloc = (alloc_func)0;
+ uncomp_stream_.zfree = (free_func)0;
+ uncomp_stream_.opaque = (voidpf)0;
+ err = InflateInit();
+ if (err != Z_OK) return err;
+ uncomp_init_ = true;
+ }
+ return Z_OK;
+}
+
+// If you compressed your data a chunk at a time, with CompressChunk,
+// you can uncompress it a chunk at a time with UncompressChunk.
+// Only difference bewteen chunked and unchunked uncompression
+// is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
+int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen,
+ int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
+ int err = Z_OK;
+
+ if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
+ LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
+ << *sourceLen;
+ return err;
+ }
+
+ // This is used to figure out how many output bytes we wrote *this chunk*:
+ const uLong old_total_out = uncomp_stream_.total_out;
+
+ // This is used to figure out how many input bytes we read *this chunk*:
+ const uLong old_total_in = uncomp_stream_.total_in;
+
+ // Some setup happens only for the first chunk we compress in a run
+ if ( first_chunk_ ) {
+ first_chunk_ = false; // so we don't do this again
+
+ // For the first chunk *only* (to avoid infinite troubles), we let
+ // there be no actual data to uncompress. This sometimes triggers
+ // when the input is only the gzip header, say.
+ if ( *sourceLen == 0 ) {
+ *destLen = 0;
+ return Z_OK;
+ }
+ }
+
+ // We'll uncompress as much as we can. If we end OK great, otherwise
+ // if we get an error that seems to be the gzip footer, we store the
+ // gzip footer and return OK, otherwise we return the error.
+
+ // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
+ err = inflate(&uncomp_stream_, flush_mode);
+
+ // Figure out how many bytes of the input zlib slurped up:
+ const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
+ CHECK_LE(source + bytes_read, source + *sourceLen);
+ *sourceLen = uncomp_stream_.avail_in;
+
+ if ((err == Z_STREAM_END || err == Z_OK) // everything went ok
+ && uncomp_stream_.avail_in == 0) { // and we read it all
+ ;
+ } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
+ LOG(WARNING)
+ << "UncompressChunkOrAll: Received some extra data, bytes total: "
+ << uncomp_stream_.avail_in << " bytes: "
+ << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
+ min(int(uncomp_stream_.avail_in), 20));
+ UncompressErrorInit();
+ return Z_DATA_ERROR; // what's the extra data for?
+ } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
+ // an error happened
+ LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
+ << " avail_out: " << uncomp_stream_.avail_out;
+ UncompressErrorInit();
+ return err;
+ } else if (uncomp_stream_.avail_out == 0) {
+ err = Z_BUF_ERROR;
+ }
+
+ assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
+ if (err == Z_STREAM_END)
+ err = Z_OK;
+
+ *destLen = uncomp_stream_.total_out - old_total_out; // size for this call
+
+ return err;
+}
+
+int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen,
+ int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
+ const int ret =
+ UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
+ if (ret == Z_BUF_ERROR)
+ UncompressErrorInit();
+ return ret;
+}
+
+int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen) {
+ return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
+}
+
+// We make sure we've uncompressed everything, that is, the current
+// uncompress stream is at a compressed-buffer-EOF boundary. In gzip
+// mode, we also check the gzip footer to make sure we pass the gzip
+// consistency checks. We RETURN true iff both types of checks pass.
+bool ZLib::UncompressChunkDone() {
+ assert(!first_chunk_ && uncomp_init_);
+ // Make sure we're at the end-of-compressed-data point. This means
+ // if we call inflate with Z_FINISH we won't consume any input or
+ // write any output
+ Bytef dummyin, dummyout;
+ uLongf dummylen = 0;
+ if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
+ != Z_OK ) {
+ return false;
+ }
+
+ // Make sure that when we exit, we can start a new round of chunks later
+ Reset();
+
+ return true;
+}
+
+// Uncompresses the source buffer into the destination buffer.
+// The destination buffer must be long enough to hold the entire
+// decompressed contents.
+//
+// We only initialize the uncomp_stream once. Thereafter, we use
+// inflateReset, which should be faster.
+//
+// Returns Z_OK on success, otherwise, it returns a zlib error code.
+int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen) {
+ int err;
+ if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
+ Z_FINISH)) != Z_OK ) {
+ Reset(); // let us try to compress again
+ return err;
+ }
+ if ( !UncompressChunkDone() ) // calls Reset()
+ return Z_DATA_ERROR;
+ return Z_OK; // stream_end is ok
+}
+
+#endif // HAVE_LIBZ
+
+} // namespace snappy
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.h
new file mode 100644
index 00000000..0f18bf14
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy-test.h
@@ -0,0 +1,582 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various stubs for the unit tests for the open-source version of Snappy.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
+
+#include <iostream>
+#include <string>
+
+#include "snappy-stubs-internal.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#ifdef HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef HAVE_WINDOWS_H
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+
+#include <string>
+
+#ifdef HAVE_GTEST
+
+#include <gtest/gtest.h>
+#undef TYPED_TEST
+#define TYPED_TEST TEST
+#define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
+
+#else
+
+// Stubs for if the user doesn't have Google Test installed.
+
+#define TEST(test_case, test_subcase) \
+ void Test_ ## test_case ## _ ## test_subcase()
+#define INIT_GTEST(argc, argv)
+
+#define TYPED_TEST TEST
+#define EXPECT_EQ CHECK_EQ
+#define EXPECT_NE CHECK_NE
+#define EXPECT_FALSE(cond) CHECK(!(cond))
+
+#endif
+
+#ifdef HAVE_GFLAGS
+
+#include <gflags/gflags.h>
+
+// This is tricky; both gflags and Google Test want to look at the command line
+// arguments. Google Test seems to be the most happy with unknown arguments,
+// though, so we call it first and hope for the best.
+#define InitGoogle(argv0, argc, argv, remove_flags) \
+ INIT_GTEST(argc, argv); \
+ google::ParseCommandLineFlags(argc, argv, remove_flags);
+
+#else
+
+// If we don't have the gflags package installed, these can only be
+// changed at compile time.
+#define DEFINE_int32(flag_name, default_value, description) \
+ static int FLAGS_ ## flag_name = default_value;
+
+#define InitGoogle(argv0, argc, argv, remove_flags) \
+ INIT_GTEST(argc, argv)
+
+#endif
+
+#ifdef HAVE_LIBZ
+#include "zlib.h"
+#endif
+
+#ifdef HAVE_LIBLZO2
+#include "lzo/lzo1x.h"
+#endif
+
+#ifdef HAVE_LIBLZF
+extern "C" {
+#include "lzf.h"
+}
+#endif
+
+#ifdef HAVE_LIBFASTLZ
+#include "fastlz.h"
+#endif
+
+#ifdef HAVE_LIBQUICKLZ
+#include "quicklz.h"
+#endif
+
+namespace {
+
+namespace File {
+ void Init() { }
+} // namespace File
+
+namespace file {
+ int Defaults() { }
+
+ class DummyStatus {
+ public:
+ void CheckSuccess() { }
+ };
+
+ DummyStatus GetContents(const string& filename, string* data, int unused) {
+ FILE* fp = fopen(filename.c_str(), "rb");
+ if (fp == NULL) {
+ perror(filename.c_str());
+ exit(1);
+ }
+
+ data->clear();
+ while (!feof(fp)) {
+ char buf[4096];
+ size_t ret = fread(buf, 1, 4096, fp);
+ if (ret == 0 && ferror(fp)) {
+ perror("fread");
+ exit(1);
+ }
+ data->append(string(buf, ret));
+ }
+
+ fclose(fp);
+ }
+
+ DummyStatus SetContents(const string& filename,
+ const string& str,
+ int unused) {
+ FILE* fp = fopen(filename.c_str(), "wb");
+ if (fp == NULL) {
+ perror(filename.c_str());
+ exit(1);
+ }
+
+ int ret = fwrite(str.data(), str.size(), 1, fp);
+ if (ret != 1) {
+ perror("fwrite");
+ exit(1);
+ }
+
+ fclose(fp);
+ }
+} // namespace file
+
+} // namespace
+
+namespace snappy {
+
+#define FLAGS_test_random_seed 301
+typedef string TypeParam;
+
+void Test_CorruptedTest_VerifyCorrupted();
+void Test_Snappy_SimpleTests();
+void Test_Snappy_MaxBlowup();
+void Test_Snappy_RandomData();
+void Test_Snappy_FourByteOffset();
+void Test_SnappyCorruption_TruncatedVarint();
+void Test_SnappyCorruption_UnterminatedVarint();
+void Test_Snappy_ReadPastEndOfBuffer();
+void Test_Snappy_FindMatchLength();
+void Test_Snappy_FindMatchLengthRandom();
+
+string ReadTestDataFile(const string& base, size_t size_limit);
+
+string ReadTestDataFile(const string& base);
+
+// A sprintf() variant that returns a std::string.
+// Not safe for general use due to truncation issues.
+string StringPrintf(const char* format, ...);
+
+// A simple, non-cryptographically-secure random generator.
+class ACMRandom {
+ public:
+ explicit ACMRandom(uint32 seed) : seed_(seed) {}
+
+ int32 Next();
+
+ int32 Uniform(int32 n) {
+ return Next() % n;
+ }
+ uint8 Rand8() {
+ return static_cast<uint8>((Next() >> 1) & 0x000000ff);
+ }
+ bool OneIn(int X) { return Uniform(X) == 0; }
+
+ // Skewed: pick "base" uniformly from range [0,max_log] and then
+ // return "base" random bits. The effect is to pick a number in the
+ // range [0,2^max_log-1] with bias towards smaller numbers.
+ int32 Skewed(int max_log);
+
+ private:
+ static const uint32 M = 2147483647L; // 2^31-1
+ uint32 seed_;
+};
+
+inline int32 ACMRandom::Next() {
+ static const uint64 A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
+ // We are computing
+ // seed_ = (seed_ * A) % M, where M = 2^31-1
+ //
+ // seed_ must not be zero or M, or else all subsequent computed values
+ // will be zero or M respectively. For all other values, seed_ will end
+ // up cycling through every number in [1,M-1]
+ uint64 product = seed_ * A;
+
+ // Compute (product % M) using the fact that ((x << 31) % M) == x.
+ seed_ = (product >> 31) + (product & M);
+ // The first reduction may overflow by 1 bit, so we may need to repeat.
+ // mod == M is not possible; using > allows the faster sign-bit-based test.
+ if (seed_ > M) {
+ seed_ -= M;
+ }
+ return seed_;
+}
+
+inline int32 ACMRandom::Skewed(int max_log) {
+ const int32 base = (Next() - 1) % (max_log+1);
+ return (Next() - 1) & ((1u << base)-1);
+}
+
+// A wall-time clock. This stub is not super-accurate, nor resistant to the
+// system time changing.
+class CycleTimer {
+ public:
+ CycleTimer() : real_time_us_(0) {}
+
+ void Start() {
+#ifdef WIN32
+ QueryPerformanceCounter(&start_);
+#else
+ gettimeofday(&start_, NULL);
+#endif
+ }
+
+ void Stop() {
+#ifdef WIN32
+ LARGE_INTEGER stop;
+ LARGE_INTEGER frequency;
+ QueryPerformanceCounter(&stop);
+ QueryPerformanceFrequency(&frequency);
+
+ double elapsed = static_cast<double>(stop.QuadPart - start_.QuadPart) /
+ frequency.QuadPart;
+ real_time_us_ += elapsed * 1e6 + 0.5;
+#else
+ struct timeval stop;
+ gettimeofday(&stop, NULL);
+
+ real_time_us_ += 1000000 * (stop.tv_sec - start_.tv_sec);
+ real_time_us_ += (stop.tv_usec - start_.tv_usec);
+#endif
+ }
+
+ double Get() {
+ return real_time_us_ * 1e-6;
+ }
+
+ private:
+ int64 real_time_us_;
+#ifdef WIN32
+ LARGE_INTEGER start_;
+#else
+ struct timeval start_;
+#endif
+};
+
+// Minimalistic microbenchmark framework.
+
+typedef void (*BenchmarkFunction)(int, int);
+
+class Benchmark {
+ public:
+ Benchmark(const string& name, BenchmarkFunction function) :
+ name_(name), function_(function) {}
+
+ Benchmark* DenseRange(int start, int stop) {
+ start_ = start;
+ stop_ = stop;
+ return this;
+ }
+
+ void Run();
+
+ private:
+ const string name_;
+ const BenchmarkFunction function_;
+ int start_, stop_;
+};
+#define BENCHMARK(benchmark_name) \
+ Benchmark* Benchmark_ ## benchmark_name = \
+ (new Benchmark(#benchmark_name, benchmark_name))
+
+extern Benchmark* Benchmark_BM_UFlat;
+extern Benchmark* Benchmark_BM_UIOVec;
+extern Benchmark* Benchmark_BM_UValidate;
+extern Benchmark* Benchmark_BM_ZFlat;
+
+void ResetBenchmarkTiming();
+void StartBenchmarkTiming();
+void StopBenchmarkTiming();
+void SetBenchmarkLabel(const string& str);
+void SetBenchmarkBytesProcessed(int64 bytes);
+
+#ifdef HAVE_LIBZ
+
+// Object-oriented wrapper around zlib.
+class ZLib {
+ public:
+ ZLib();
+ ~ZLib();
+
+ // Wipe a ZLib object to a virgin state. This differs from Reset()
+ // in that it also breaks any state.
+ void Reinit();
+
+ // Call this to make a zlib buffer as good as new. Here's the only
+ // case where they differ:
+ // CompressChunk(a); CompressChunk(b); CompressChunkDone(); vs
+ // CompressChunk(a); Reset(); CompressChunk(b); CompressChunkDone();
+ // You'll want to use Reset(), then, when you interrupt a compress
+ // (or uncompress) in the middle of a chunk and want to start over.
+ void Reset();
+
+ // According to the zlib manual, when you Compress, the destination
+ // buffer must have size at least src + .1%*src + 12. This function
+ // helps you calculate that. Augment this to account for a potential
+ // gzip header and footer, plus a few bytes of slack.
+ static int MinCompressbufSize(int uncompress_size) {
+ return uncompress_size + uncompress_size/1000 + 40;
+ }
+
+ // Compresses the source buffer into the destination buffer.
+ // sourceLen is the byte length of the source buffer.
+ // Upon entry, destLen is the total size of the destination buffer,
+ // which must be of size at least MinCompressbufSize(sourceLen).
+ // Upon exit, destLen is the actual size of the compressed buffer.
+ //
+ // This function can be used to compress a whole file at once if the
+ // input file is mmap'ed.
+ //
+ // Returns Z_OK if success, Z_MEM_ERROR if there was not
+ // enough memory, Z_BUF_ERROR if there was not enough room in the
+ // output buffer. Note that if the output buffer is exactly the same
+ // size as the compressed result, we still return Z_BUF_ERROR.
+ // (check CL#1936076)
+ int Compress(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen);
+
+ // Uncompresses the source buffer into the destination buffer.
+ // The destination buffer must be long enough to hold the entire
+ // decompressed contents.
+ //
+ // Returns Z_OK on success, otherwise, it returns a zlib error code.
+ int Uncompress(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen);
+
+ // Uncompress data one chunk at a time -- ie you can call this
+ // more than once. To get this to work you need to call per-chunk
+ // and "done" routines.
+ //
+ // Returns Z_OK if success, Z_MEM_ERROR if there was not
+ // enough memory, Z_BUF_ERROR if there was not enough room in the
+ // output buffer.
+
+ int UncompressAtMost(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen);
+
+ // Checks gzip footer information, as needed. Mostly this just
+ // makes sure the checksums match. Whenever you call this, it
+ // will assume the last 8 bytes from the previous UncompressChunk
+ // call are the footer. Returns true iff everything looks ok.
+ bool UncompressChunkDone();
+
+ private:
+ int InflateInit(); // sets up the zlib inflate structure
+ int DeflateInit(); // sets up the zlib deflate structure
+
+ // These init the zlib data structures for compressing/uncompressing
+ int CompressInit(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen);
+ int UncompressInit(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen);
+ // Initialization method to be called if we hit an error while
+ // uncompressing. On hitting an error, call this method before
+ // returning the error.
+ void UncompressErrorInit();
+
+ // Helper function for Compress
+ int CompressChunkOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen,
+ int flush_mode);
+ int CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen,
+ int flush_mode);
+
+ // Likewise for UncompressAndUncompressChunk
+ int UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen,
+ int flush_mode);
+
+ int UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen,
+ int flush_mode);
+
+ // Initialization method to be called if we hit an error while
+ // compressing. On hitting an error, call this method before
+ // returning the error.
+ void CompressErrorInit();
+
+ int compression_level_; // compression level
+ int window_bits_; // log base 2 of the window size used in compression
+ int mem_level_; // specifies the amount of memory to be used by
+ // compressor (1-9)
+ z_stream comp_stream_; // Zlib stream data structure
+ bool comp_init_; // True if we have initialized comp_stream_
+ z_stream uncomp_stream_; // Zlib stream data structure
+ bool uncomp_init_; // True if we have initialized uncomp_stream_
+
+ // These are used only with chunked compression.
+ bool first_chunk_; // true if we need to emit headers with this chunk
+};
+
+#endif // HAVE_LIBZ
+
+} // namespace snappy
+
+DECLARE_bool(run_microbenchmarks);
+
+static void RunSpecifiedBenchmarks() {
+ if (!FLAGS_run_microbenchmarks) {
+ return;
+ }
+
+ fprintf(stderr, "Running microbenchmarks.\n");
+#ifndef NDEBUG
+ fprintf(stderr, "WARNING: Compiled with assertions enabled, will be slow.\n");
+#endif
+#ifndef __OPTIMIZE__
+ fprintf(stderr, "WARNING: Compiled without optimization, will be slow.\n");
+#endif
+ fprintf(stderr, "Benchmark Time(ns) CPU(ns) Iterations\n");
+ fprintf(stderr, "---------------------------------------------------\n");
+
+ snappy::Benchmark_BM_UFlat->Run();
+ snappy::Benchmark_BM_UIOVec->Run();
+ snappy::Benchmark_BM_UValidate->Run();
+ snappy::Benchmark_BM_ZFlat->Run();
+
+ fprintf(stderr, "\n");
+}
+
+#ifndef HAVE_GTEST
+
+static inline int RUN_ALL_TESTS() {
+ fprintf(stderr, "Running correctness tests.\n");
+ snappy::Test_CorruptedTest_VerifyCorrupted();
+ snappy::Test_Snappy_SimpleTests();
+ snappy::Test_Snappy_MaxBlowup();
+ snappy::Test_Snappy_RandomData();
+ snappy::Test_Snappy_FourByteOffset();
+ snappy::Test_SnappyCorruption_TruncatedVarint();
+ snappy::Test_SnappyCorruption_UnterminatedVarint();
+ snappy::Test_Snappy_ReadPastEndOfBuffer();
+ snappy::Test_Snappy_FindMatchLength();
+ snappy::Test_Snappy_FindMatchLengthRandom();
+ fprintf(stderr, "All tests passed.\n");
+
+ return 0;
+}
+
+#endif // HAVE_GTEST
+
+// For main().
+namespace snappy {
+
+static void CompressFile(const char* fname);
+static void UncompressFile(const char* fname);
+static void MeasureFile(const char* fname);
+
+// Logging.
+
+#define LOG(level) LogMessage()
+#define VLOG(level) true ? (void)0 : \
+ snappy::LogMessageVoidify() & snappy::LogMessage()
+
+class LogMessage {
+ public:
+ LogMessage() { }
+ ~LogMessage() {
+ cerr << endl;
+ }
+
+ LogMessage& operator<<(const std::string& msg) {
+ cerr << msg;
+ return *this;
+ }
+ LogMessage& operator<<(int x) {
+ cerr << x;
+ return *this;
+ }
+};
+
+// Asserts, both versions activated in debug mode only,
+// and ones that are always active.
+
+#define CRASH_UNLESS(condition) \
+ PREDICT_TRUE(condition) ? (void)0 : \
+ snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+
+class LogMessageCrash : public LogMessage {
+ public:
+ LogMessageCrash() { }
+ ~LogMessageCrash() {
+ cerr << endl;
+ abort();
+ }
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class LogMessageVoidify {
+ public:
+ LogMessageVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(const LogMessage&) { }
+};
+
+#define CHECK(cond) CRASH_UNLESS(cond)
+#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+
+} // namespace
+
+using snappy::CompressFile;
+using snappy::UncompressFile;
+using snappy::MeasureFile;
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.cc b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.cc
new file mode 100644
index 00000000..f8d0d23d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.cc
@@ -0,0 +1,1306 @@
+// Copyright 2005 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "snappy.h"
+#include "snappy-internal.h"
+#include "snappy-sinksource.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+namespace snappy {
+
+// Any hash function will produce a valid compressed bitstream, but a good
+// hash function reduces the number of collisions and thus yields better
+// compression for compressible input, and more speed for incompressible
+// input. Of course, it doesn't hurt if the hash function is reasonably fast
+// either, as it gets called a lot.
+static inline uint32 HashBytes(uint32 bytes, int shift) {
+ uint32 kMul = 0x1e35a7bd;
+ return (bytes * kMul) >> shift;
+}
+static inline uint32 Hash(const char* p, int shift) {
+ return HashBytes(UNALIGNED_LOAD32(p), shift);
+}
+
+size_t MaxCompressedLength(size_t source_len) {
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ return 32 + source_len + source_len/6;
+}
+
+enum {
+ LITERAL = 0,
+ COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
+ COPY_2_BYTE_OFFSET = 2,
+ COPY_4_BYTE_OFFSET = 3
+};
+static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
+
+// Copy "len" bytes from "src" to "op", one byte at a time. Used for
+// handling COPY operations where the input and output regions may
+// overlap. For example, suppose:
+// src == "ab"
+// op == src + 2
+// len == 20
+// After IncrementalCopy(src, op, len), the result will have
+// eleven copies of "ab"
+// ababababababababababab
+// Note that this does not match the semantics of either memcpy()
+// or memmove().
+static inline void IncrementalCopy(const char* src, char* op, ssize_t len) {
+ assert(len > 0);
+ do {
+ *op++ = *src++;
+ } while (--len > 0);
+}
+
+// Equivalent to IncrementalCopy except that it can write up to ten extra
+// bytes after the end of the copy, and that it is faster.
+//
+// The main part of this loop is a simple copy of eight bytes at a time until
+// we've copied (at least) the requested amount of bytes. However, if op and
+// src are less than eight bytes apart (indicating a repeating pattern of
+// length < 8), we first need to expand the pattern in order to get the correct
+// results. For instance, if the buffer looks like this, with the eight-byte
+// <src> and <op> patterns marked as intervals:
+//
+// abxxxxxxxxxxxx
+// [------] src
+// [------] op
+//
+// a single eight-byte copy from <src> to <op> will repeat the pattern once,
+// after which we can move <op> two bytes without moving <src>:
+//
+// ababxxxxxxxxxx
+// [------] src
+// [------] op
+//
+// and repeat the exercise until the two no longer overlap.
+//
+// This allows us to do very well in the special case of one single byte
+// repeated many times, without taking a big hit for more general cases.
+//
+// The worst case of extra writing past the end of the match occurs when
+// op - src == 1 and len == 1; the last copy will read from byte positions
+// [0..7] and write to [4..11], whereas it was only supposed to write to
+// position 1. Thus, ten excess bytes.
+
+namespace {
+
+const int kMaxIncrementCopyOverflow = 10;
+
+inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) {
+ while (op - src < 8) {
+ UnalignedCopy64(src, op);
+ len -= op - src;
+ op += op - src;
+ }
+ while (len > 0) {
+ UnalignedCopy64(src, op);
+ src += 8;
+ op += 8;
+ len -= 8;
+ }
+}
+
+} // namespace
+
+static inline char* EmitLiteral(char* op,
+ const char* literal,
+ int len,
+ bool allow_fast_path) {
+ int n = len - 1; // Zero-length literals are disallowed
+ if (n < 60) {
+ // Fits in tag byte
+ *op++ = LITERAL | (n << 2);
+
+ // The vast majority of copies are below 16 bytes, for which a
+ // call to memcpy is overkill. This fast path can sometimes
+ // copy up to 15 bytes too much, but that is okay in the
+ // main loop, since we have a bit to go on for both sides:
+ //
+ // - The input will always have kInputMarginBytes = 15 extra
+ // available bytes, as long as we're in the main loop, and
+ // if not, allow_fast_path = false.
+ // - The output will always have 32 spare bytes (see
+ // MaxCompressedLength).
+ if (allow_fast_path && len <= 16) {
+ UnalignedCopy64(literal, op);
+ UnalignedCopy64(literal + 8, op + 8);
+ return op + len;
+ }
+ } else {
+ // Encode in upcoming bytes
+ char* base = op;
+ int count = 0;
+ op++;
+ while (n > 0) {
+ *op++ = n & 0xff;
+ n >>= 8;
+ count++;
+ }
+ assert(count >= 1);
+ assert(count <= 4);
+ *base = LITERAL | ((59+count) << 2);
+ }
+ memcpy(op, literal, len);
+ return op + len;
+}
+
+static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
+ assert(len <= 64);
+ assert(len >= 4);
+ assert(offset < 65536);
+
+ if ((len < 12) && (offset < 2048)) {
+ size_t len_minus_4 = len - 4;
+ assert(len_minus_4 < 8); // Must fit in 3 bits
+ *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5);
+ *op++ = offset & 0xff;
+ } else {
+ *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2);
+ LittleEndian::Store16(op, offset);
+ op += 2;
+ }
+ return op;
+}
+
+static inline char* EmitCopy(char* op, size_t offset, int len) {
+ // Emit 64 byte copies but make sure to keep at least four bytes reserved
+ while (len >= 68) {
+ op = EmitCopyLessThan64(op, offset, 64);
+ len -= 64;
+ }
+
+ // Emit an extra 60 byte copy if have too much data to fit in one copy
+ if (len > 64) {
+ op = EmitCopyLessThan64(op, offset, 60);
+ len -= 60;
+ }
+
+ // Emit remainder
+ op = EmitCopyLessThan64(op, offset, len);
+ return op;
+}
+
+
+bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
+ uint32 v = 0;
+ const char* limit = start + n;
+ if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
+ *result = v;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+namespace internal {
+uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
+ // Use smaller hash table when input.size() is smaller, since we
+ // fill the table, incurring O(hash table size) overhead for
+ // compression, and if the input is short, we won't need that
+ // many hash table entries anyway.
+ assert(kMaxHashTableSize >= 256);
+ size_t htsize = 256;
+ while (htsize < kMaxHashTableSize && htsize < input_size) {
+ htsize <<= 1;
+ }
+
+ uint16* table;
+ if (htsize <= ARRAYSIZE(small_table_)) {
+ table = small_table_;
+ } else {
+ if (large_table_ == NULL) {
+ large_table_ = new uint16[kMaxHashTableSize];
+ }
+ table = large_table_;
+ }
+
+ *table_size = htsize;
+ memset(table, 0, htsize * sizeof(*table));
+ return table;
+}
+} // end namespace internal
+
+// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
+// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
+// empirically found that overlapping loads such as
+// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+//
+// We have different versions for 64- and 32-bit; ideally we would avoid the
+// two functions and just inline the UNALIGNED_LOAD64 call into
+// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
+// enough to avoid loading the value multiple times then. For 64-bit, the load
+// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
+// done at GetUint32AtOffset() time.
+
+#ifdef ARCH_K8
+
+typedef uint64 EightBytesReference;
+
+static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+ return UNALIGNED_LOAD64(ptr);
+}
+
+static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
+ assert(offset >= 0);
+ assert(offset <= 4);
+ return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
+}
+
+#else
+
+typedef const char* EightBytesReference;
+
+static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+ return ptr;
+}
+
+static inline uint32 GetUint32AtOffset(const char* v, int offset) {
+ assert(offset >= 0);
+ assert(offset <= 4);
+ return UNALIGNED_LOAD32(v + offset);
+}
+
+#endif
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input" is at most "kBlockSize" bytes long.
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input.size())" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+namespace internal {
+char* CompressFragment(const char* input,
+ size_t input_size,
+ char* op,
+ uint16* table,
+ const int table_size) {
+ // "ip" is the input pointer, and "op" is the output pointer.
+ const char* ip = input;
+ assert(input_size <= kBlockSize);
+ assert((table_size & (table_size - 1)) == 0); // table must be power of two
+ const int shift = 32 - Bits::Log2Floor(table_size);
+ assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
+ const char* ip_end = input + input_size;
+ const char* base_ip = ip;
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
+ // [next_emit, ip_end) after the main loop.
+ const char* next_emit = ip;
+
+ const size_t kInputMarginBytes = 15;
+ if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
+ const char* ip_limit = input + input_size - kInputMarginBytes;
+
+ for (uint32 next_hash = Hash(++ip, shift); ; ) {
+ assert(next_emit < ip);
+ // The body of this loop calls EmitLiteral once and then EmitCopy one or
+ // more times. (The exception is that when we're close to exhausting
+ // the input we goto emit_remainder.)
+ //
+ // In the first iteration of this loop we're just starting, so
+ // there's nothing to copy, so calling EmitLiteral once is
+ // necessary. And we only start a new iteration when the
+ // current iteration has determined that a call to EmitLiteral will
+ // precede the next call to EmitCopy (if any).
+ //
+ // Step 1: Scan forward in the input looking for a 4-byte-long match.
+ // If we get close to exhausting the input then goto emit_remainder.
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned, look at every third byte, etc.. When a match is found,
+ // immediately go back to looking at every byte. This is a small loss
+ // (~5% performance, ~0.1% density) for compressible data due to more
+ // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
+ // win since the compressor quickly "realizes" the data is incompressible
+ // and doesn't bother looking for matches everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since the
+ // last match; dividing it by 32 (ie. right-shifting by five) gives the
+ // number of bytes to move ahead for each iteration.
+ uint32 skip = 32;
+
+ const char* next_ip = ip;
+ const char* candidate;
+ do {
+ ip = next_ip;
+ uint32 hash = next_hash;
+ assert(hash == Hash(ip, shift));
+ uint32 bytes_between_hash_lookups = skip++ >> 5;
+ next_ip = ip + bytes_between_hash_lookups;
+ if (PREDICT_FALSE(next_ip > ip_limit)) {
+ goto emit_remainder;
+ }
+ next_hash = Hash(next_ip, shift);
+ candidate = base_ip + table[hash];
+ assert(candidate >= base_ip);
+ assert(candidate < ip);
+
+ table[hash] = ip - base_ip;
+ } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
+ UNALIGNED_LOAD32(candidate)));
+
+ // Step 2: A 4-byte match has been found. We'll later see if more
+ // than 4 bytes match. But, prior to the match, input
+ // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
+ assert(next_emit + 16 <= ip_end);
+ op = EmitLiteral(op, next_emit, ip - next_emit, true);
+
+ // Step 3: Call EmitCopy, and then see if another EmitCopy could
+ // be our next move. Repeat until we find no match for the
+ // input immediately after what was consumed by the last EmitCopy call.
+ //
+ // If we exit this loop normally then we need to call EmitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can exit
+ // this loop via goto if we get close to exhausting the input.
+ EightBytesReference input_bytes;
+ uint32 candidate_bytes = 0;
+
+ do {
+ // We have a 4-byte match at ip, and no need to emit any
+ // "literal bytes" prior to ip.
+ const char* base = ip;
+ int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
+ ip += matched;
+ size_t offset = base - candidate;
+ assert(0 == memcmp(base, candidate, matched));
+ op = EmitCopy(op, offset, matched);
+ // We could immediately start working at ip now, but to improve
+ // compression we first update table[Hash(ip - 1, ...)].
+ const char* insert_tail = ip - 1;
+ next_emit = ip;
+ if (PREDICT_FALSE(ip >= ip_limit)) {
+ goto emit_remainder;
+ }
+ input_bytes = GetEightBytesAt(insert_tail);
+ uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
+ table[prev_hash] = ip - base_ip - 1;
+ uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
+ candidate = base_ip + table[cur_hash];
+ candidate_bytes = UNALIGNED_LOAD32(candidate);
+ table[cur_hash] = ip - base_ip;
+ } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
+
+ next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
+ ++ip;
+ }
+ }
+
+ emit_remainder:
+ // Emit the remaining bytes as a literal
+ if (next_emit < ip_end) {
+ op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
+ }
+
+ return op;
+}
+} // end namespace internal
+
+// Signature of output types needed by decompression code.
+// The decompression code is templatized on a type that obeys this
+// signature so that we do not pay virtual function call overhead in
+// the middle of a tight decompression loop.
+//
+// class DecompressionWriter {
+// public:
+// // Called before decompression
+// void SetExpectedLength(size_t length);
+//
+// // Called after decompression
+// bool CheckLength() const;
+//
+// // Called repeatedly during decompression
+// bool Append(const char* ip, size_t length);
+// bool AppendFromSelf(uint32 offset, size_t length);
+//
+// // The rules for how TryFastAppend differs from Append are somewhat
+// // convoluted:
+// //
+// // - TryFastAppend is allowed to decline (return false) at any
+// // time, for any reason -- just "return false" would be
+// // a perfectly legal implementation of TryFastAppend.
+// // The intention is for TryFastAppend to allow a fast path
+// // in the common case of a small append.
+// // - TryFastAppend is allowed to read up to <available> bytes
+// // from the input buffer, whereas Append is allowed to read
+// // <length>. However, if it returns true, it must leave
+// // at least five (kMaximumTagLength) bytes in the input buffer
+// // afterwards, so that there is always enough space to read the
+// // next tag without checking for a refill.
+// // - TryFastAppend must always return decline (return false)
+// // if <length> is 61 or more, as in this case the literal length is not
+// // decoded fully. In practice, this should not be a big problem,
+// // as it is unlikely that one would implement a fast path accepting
+// // this much data.
+// //
+// bool TryFastAppend(const char* ip, size_t available, size_t length);
+// };
+
+// -----------------------------------------------------------------------
+// Lookup table for decompression code. Generated by ComputeTable() below.
+// -----------------------------------------------------------------------
+
+// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
+static const uint32 wordmask[] = {
+ 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+};
+
+// Data stored per entry in lookup table:
+// Range Bits-used Description
+// ------------------------------------
+// 1..64 0..7 Literal/copy length encoded in opcode byte
+// 0..7 8..10 Copy offset encoded in opcode byte / 256
+// 0..4 11..13 Extra bytes after opcode
+//
+// We use eight bits for the length even though 7 would have sufficed
+// because of efficiency reasons:
+// (1) Extracting a byte is faster than a bit-field
+// (2) It properly aligns copy offset so we do not need a <<8
+static const uint16 char_table[256] = {
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+};
+
+// In debug mode, allow optional computation of the table at startup.
+// Also, check that the decompression table is correct.
+#ifndef NDEBUG
+DEFINE_bool(snappy_dump_decompression_table, false,
+ "If true, we print the decompression table at startup.");
+
+static uint16 MakeEntry(unsigned int extra,
+ unsigned int len,
+ unsigned int copy_offset) {
+ // Check that all of the fields fit within the allocated space
+ assert(extra == (extra & 0x7)); // At most 3 bits
+ assert(copy_offset == (copy_offset & 0x7)); // At most 3 bits
+ assert(len == (len & 0x7f)); // At most 7 bits
+ return len | (copy_offset << 8) | (extra << 11);
+}
+
+static void ComputeTable() {
+ uint16 dst[256];
+
+ // Place invalid entries in all places to detect missing initialization
+ int assigned = 0;
+ for (int i = 0; i < 256; i++) {
+ dst[i] = 0xffff;
+ }
+
+ // Small LITERAL entries. We store (len-1) in the top 6 bits.
+ for (unsigned int len = 1; len <= 60; len++) {
+ dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
+ assigned++;
+ }
+
+ // Large LITERAL entries. We use 60..63 in the high 6 bits to
+ // encode the number of bytes of length info that follow the opcode.
+ for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
+ // We set the length field in the lookup table to 1 because extra
+ // bytes encode len-1.
+ dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
+ assigned++;
+ }
+
+ // COPY_1_BYTE_OFFSET.
+ //
+ // The tag byte in the compressed data stores len-4 in 3 bits, and
+ // offset/256 in 5 bits. offset%256 is stored in the next byte.
+ //
+ // This format is used for length in range [4..11] and offset in
+ // range [0..2047]
+ for (unsigned int len = 4; len < 12; len++) {
+ for (unsigned int offset = 0; offset < 2048; offset += 256) {
+ dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
+ MakeEntry(1, len, offset>>8);
+ assigned++;
+ }
+ }
+
+ // COPY_2_BYTE_OFFSET.
+ // Tag contains len-1 in top 6 bits, and offset in next two bytes.
+ for (unsigned int len = 1; len <= 64; len++) {
+ dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
+ assigned++;
+ }
+
+ // COPY_4_BYTE_OFFSET.
+ // Tag contents len-1 in top 6 bits, and offset in next four bytes.
+ for (unsigned int len = 1; len <= 64; len++) {
+ dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
+ assigned++;
+ }
+
+ // Check that each entry was initialized exactly once.
+ if (assigned != 256) {
+ fprintf(stderr, "ComputeTable: assigned only %d of 256\n", assigned);
+ abort();
+ }
+ for (int i = 0; i < 256; i++) {
+ if (dst[i] == 0xffff) {
+ fprintf(stderr, "ComputeTable: did not assign byte %d\n", i);
+ abort();
+ }
+ }
+
+ if (FLAGS_snappy_dump_decompression_table) {
+ printf("static const uint16 char_table[256] = {\n ");
+ for (int i = 0; i < 256; i++) {
+ printf("0x%04x%s",
+ dst[i],
+ ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
+ }
+ printf("};\n");
+ }
+
+ // Check that computed table matched recorded table
+ for (int i = 0; i < 256; i++) {
+ if (dst[i] != char_table[i]) {
+ fprintf(stderr, "ComputeTable: byte %d: computed (%x), expect (%x)\n",
+ i, static_cast<int>(dst[i]), static_cast<int>(char_table[i]));
+ abort();
+ }
+ }
+}
+#endif /* !NDEBUG */
+
+// Helper class for decompression
+class SnappyDecompressor {
+ private:
+ Source* reader_; // Underlying source of bytes to decompress
+ const char* ip_; // Points to next buffered byte
+ const char* ip_limit_; // Points just past buffered bytes
+ uint32 peeked_; // Bytes peeked from reader (need to skip)
+ bool eof_; // Hit end of input without an error?
+ char scratch_[kMaximumTagLength]; // See RefillTag().
+
+ // Ensure that all of the tag metadata for the next tag is available
+ // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
+ // if (ip_limit_ - ip_ < 5).
+ //
+ // Returns true on success, false on error or end of input.
+ bool RefillTag();
+
+ public:
+ explicit SnappyDecompressor(Source* reader)
+ : reader_(reader),
+ ip_(NULL),
+ ip_limit_(NULL),
+ peeked_(0),
+ eof_(false) {
+ }
+
+ ~SnappyDecompressor() {
+ // Advance past any bytes we peeked at from the reader
+ reader_->Skip(peeked_);
+ }
+
+ // Returns true iff we have hit the end of the input without an error.
+ bool eof() const {
+ return eof_;
+ }
+
+ // Read the uncompressed length stored at the start of the compressed data.
+ // On succcess, stores the length in *result and returns true.
+ // On failure, returns false.
+ bool ReadUncompressedLength(uint32* result) {
+ assert(ip_ == NULL); // Must not have read anything yet
+ // Length is encoded in 1..5 bytes
+ *result = 0;
+ uint32 shift = 0;
+ while (true) {
+ if (shift >= 32) return false;
+ size_t n;
+ const char* ip = reader_->Peek(&n);
+ if (n == 0) return false;
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ reader_->Skip(1);
+ *result |= static_cast<uint32>(c & 0x7f) << shift;
+ if (c < 128) {
+ break;
+ }
+ shift += 7;
+ }
+ return true;
+ }
+
+ // Process the next item found in the input.
+ // Returns true if successful, false on error or end of input.
+ template <class Writer>
+ void DecompressAllTags(Writer* writer) {
+ const char* ip = ip_;
+
+ // We could have put this refill fragment only at the beginning of the loop.
+ // However, duplicating it at the end of each branch gives the compiler more
+ // scope to optimize the <ip_limit_ - ip> expression based on the local
+ // context, which overall increases speed.
+ #define MAYBE_REFILL() \
+ if (ip_limit_ - ip < kMaximumTagLength) { \
+ ip_ = ip; \
+ if (!RefillTag()) return; \
+ ip = ip_; \
+ }
+
+ MAYBE_REFILL();
+ for ( ;; ) {
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
+
+ if ((c & 0x3) == LITERAL) {
+ size_t literal_length = (c >> 2) + 1u;
+ if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
+ assert(literal_length < 61);
+ ip += literal_length;
+ // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend()
+ // will not return true unless there's already at least five spare
+ // bytes in addition to the literal.
+ continue;
+ }
+ if (PREDICT_FALSE(literal_length >= 61)) {
+ // Long literal.
+ const size_t literal_length_length = literal_length - 60;
+ literal_length =
+ (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
+ ip += literal_length_length;
+ }
+
+ size_t avail = ip_limit_ - ip;
+ while (avail < literal_length) {
+ if (!writer->Append(ip, avail)) return;
+ literal_length -= avail;
+ reader_->Skip(peeked_);
+ size_t n;
+ ip = reader_->Peek(&n);
+ avail = n;
+ peeked_ = avail;
+ if (avail == 0) return; // Premature end of input
+ ip_limit_ = ip + avail;
+ }
+ if (!writer->Append(ip, literal_length)) {
+ return;
+ }
+ ip += literal_length;
+ MAYBE_REFILL();
+ } else {
+ const uint32 entry = char_table[c];
+ const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
+ const uint32 length = entry & 0xff;
+ ip += entry >> 11;
+
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
+ // those bits, we get copy_offset (since the bit-field starts at
+ // bit 8).
+ const uint32 copy_offset = entry & 0x700;
+ if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
+ return;
+ }
+ MAYBE_REFILL();
+ }
+ }
+
+#undef MAYBE_REFILL
+ }
+};
+
+bool SnappyDecompressor::RefillTag() {
+ const char* ip = ip_;
+ if (ip == ip_limit_) {
+ // Fetch a new fragment from the reader
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ size_t n;
+ ip = reader_->Peek(&n);
+ peeked_ = n;
+ if (n == 0) {
+ eof_ = true;
+ return false;
+ }
+ ip_limit_ = ip + n;
+ }
+
+ // Read the tag character
+ assert(ip < ip_limit_);
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ const uint32 entry = char_table[c];
+ const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
+ assert(needed <= sizeof(scratch_));
+
+ // Read more bytes from reader if needed
+ uint32 nbuf = ip_limit_ - ip;
+ if (nbuf < needed) {
+ // Stitch together bytes from ip and reader to form the word
+ // contents. We store the needed bytes in "scratch_". They
+ // will be consumed immediately by the caller since we do not
+ // read more than we need.
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ while (nbuf < needed) {
+ size_t length;
+ const char* src = reader_->Peek(&length);
+ if (length == 0) return false;
+ uint32 to_add = min<uint32>(needed - nbuf, length);
+ memcpy(scratch_ + nbuf, src, to_add);
+ nbuf += to_add;
+ reader_->Skip(to_add);
+ }
+ assert(nbuf == needed);
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + needed;
+ } else if (nbuf < kMaximumTagLength) {
+ // Have enough bytes, but move into scratch_ so that we do not
+ // read past end of input
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + nbuf;
+ } else {
+ // Pass pointer to buffer returned by reader_.
+ ip_ = ip;
+ }
+ return true;
+}
+
+template <typename Writer>
+static bool InternalUncompress(Source* r, Writer* writer) {
+ // Read the uncompressed length from the front of the compressed input
+ SnappyDecompressor decompressor(r);
+ uint32 uncompressed_len = 0;
+ if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
+ return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
+}
+
+template <typename Writer>
+static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
+ Writer* writer,
+ uint32 uncompressed_len) {
+ writer->SetExpectedLength(uncompressed_len);
+
+ // Process the entire input
+ decompressor->DecompressAllTags(writer);
+ return (decompressor->eof() && writer->CheckLength());
+}
+
+bool GetUncompressedLength(Source* source, uint32* result) {
+ SnappyDecompressor decompressor(source);
+ return decompressor.ReadUncompressedLength(result);
+}
+
+size_t Compress(Source* reader, Sink* writer) {
+ size_t written = 0;
+ size_t N = reader->Available();
+ char ulength[Varint::kMax32];
+ char* p = Varint::Encode32(ulength, N);
+ writer->Append(ulength, p-ulength);
+ written += (p - ulength);
+
+ internal::WorkingMemory wmem;
+ char* scratch = NULL;
+ char* scratch_output = NULL;
+
+ while (N > 0) {
+ // Get next block to compress (without copying if possible)
+ size_t fragment_size;
+ const char* fragment = reader->Peek(&fragment_size);
+ assert(fragment_size != 0); // premature end of input
+ const size_t num_to_read = min(N, kBlockSize);
+ size_t bytes_read = fragment_size;
+
+ size_t pending_advance = 0;
+ if (bytes_read >= num_to_read) {
+ // Buffer returned by reader is large enough
+ pending_advance = num_to_read;
+ fragment_size = num_to_read;
+ } else {
+ // Read into scratch buffer
+ if (scratch == NULL) {
+ // If this is the last iteration, we want to allocate N bytes
+ // of space, otherwise the max possible kBlockSize space.
+ // num_to_read contains exactly the correct value
+ scratch = new char[num_to_read];
+ }
+ memcpy(scratch, fragment, bytes_read);
+ reader->Skip(bytes_read);
+
+ while (bytes_read < num_to_read) {
+ fragment = reader->Peek(&fragment_size);
+ size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
+ memcpy(scratch + bytes_read, fragment, n);
+ bytes_read += n;
+ reader->Skip(n);
+ }
+ assert(bytes_read == num_to_read);
+ fragment = scratch;
+ fragment_size = num_to_read;
+ }
+ assert(fragment_size == num_to_read);
+
+ // Get encoding table for compression
+ int table_size;
+ uint16* table = wmem.GetHashTable(num_to_read, &table_size);
+
+ // Compress input_fragment and append to dest
+ const int max_output = MaxCompressedLength(num_to_read);
+
+ // Need a scratch buffer for the output, in case the byte sink doesn't
+ // have room for us directly.
+ if (scratch_output == NULL) {
+ scratch_output = new char[max_output];
+ } else {
+ // Since we encode kBlockSize regions followed by a region
+ // which is <= kBlockSize in length, a previously allocated
+ // scratch_output[] region is big enough for this iteration.
+ }
+ char* dest = writer->GetAppendBuffer(max_output, scratch_output);
+ char* end = internal::CompressFragment(fragment, fragment_size,
+ dest, table, table_size);
+ writer->Append(dest, end - dest);
+ written += (end - dest);
+
+ N -= num_to_read;
+ reader->Skip(pending_advance);
+ }
+
+ delete[] scratch;
+ delete[] scratch_output;
+
+ return written;
+}
+
+// -----------------------------------------------------------------------
+// IOVec interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to an iovec.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyIOVecWriter {
+ private:
+ const struct iovec* output_iov_;
+ const size_t output_iov_count_;
+
+ // We are currently writing into output_iov_[curr_iov_index_].
+ int curr_iov_index_;
+
+ // Bytes written to output_iov_[curr_iov_index_] so far.
+ size_t curr_iov_written_;
+
+ // Total bytes decompressed into output_iov_ so far.
+ size_t total_written_;
+
+ // Maximum number of bytes that will be decompressed into output_iov_.
+ size_t output_limit_;
+
+ inline char* GetIOVecPointer(int index, size_t offset) {
+ return reinterpret_cast<char*>(output_iov_[index].iov_base) +
+ offset;
+ }
+
+ public:
+ // Does not take ownership of iov. iov must be valid during the
+ // entire lifetime of the SnappyIOVecWriter.
+ inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
+ : output_iov_(iov),
+ output_iov_count_(iov_count),
+ curr_iov_index_(0),
+ curr_iov_written_(0),
+ total_written_(0),
+ output_limit_(-1) {
+ }
+
+ inline void SetExpectedLength(size_t len) {
+ output_limit_ = len;
+ }
+
+ inline bool CheckLength() const {
+ return total_written_ == output_limit_;
+ }
+
+ inline bool Append(const char* ip, size_t len) {
+ if (total_written_ + len > output_limit_) {
+ return false;
+ }
+
+ while (len > 0) {
+ assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+ if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) {
+ // This iovec is full. Go to the next one.
+ if (curr_iov_index_ + 1 >= output_iov_count_) {
+ return false;
+ }
+ curr_iov_written_ = 0;
+ ++curr_iov_index_;
+ }
+
+ const size_t to_write = std::min(
+ len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_);
+ memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+ ip,
+ to_write);
+ curr_iov_written_ += to_write;
+ total_written_ += to_write;
+ ip += to_write;
+ len -= to_write;
+ }
+
+ return true;
+ }
+
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+ const size_t space_left = output_limit_ - total_written_;
+ if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
+ output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) {
+ // Fast path, used for the majority (about 95%) of invocations.
+ char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_);
+ UnalignedCopy64(ip, ptr);
+ UnalignedCopy64(ip + 8, ptr + 8);
+ curr_iov_written_ += len;
+ total_written_ += len;
+ return true;
+ }
+
+ return false;
+ }
+
+ inline bool AppendFromSelf(size_t offset, size_t len) {
+ if (offset > total_written_ || offset == 0) {
+ return false;
+ }
+ const size_t space_left = output_limit_ - total_written_;
+ if (len > space_left) {
+ return false;
+ }
+
+ // Locate the iovec from which we need to start the copy.
+ int from_iov_index = curr_iov_index_;
+ size_t from_iov_offset = curr_iov_written_;
+ while (offset > 0) {
+ if (from_iov_offset >= offset) {
+ from_iov_offset -= offset;
+ break;
+ }
+
+ offset -= from_iov_offset;
+ --from_iov_index;
+ assert(from_iov_index >= 0);
+ from_iov_offset = output_iov_[from_iov_index].iov_len;
+ }
+
+ // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
+ // the current iovec.
+ while (len > 0) {
+ assert(from_iov_index <= curr_iov_index_);
+ if (from_iov_index != curr_iov_index_) {
+ const size_t to_copy = std::min(
+ output_iov_[from_iov_index].iov_len - from_iov_offset,
+ len);
+ Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy);
+ len -= to_copy;
+ if (len > 0) {
+ ++from_iov_index;
+ from_iov_offset = 0;
+ }
+ } else {
+ assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+ size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len -
+ curr_iov_written_,
+ len);
+ if (to_copy == 0) {
+ // This iovec is full. Go to the next one.
+ if (curr_iov_index_ + 1 >= output_iov_count_) {
+ return false;
+ }
+ ++curr_iov_index_;
+ curr_iov_written_ = 0;
+ continue;
+ }
+ if (to_copy > len) {
+ to_copy = len;
+ }
+ IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset),
+ GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+ to_copy);
+ curr_iov_written_ += to_copy;
+ from_iov_offset += to_copy;
+ total_written_ += to_copy;
+ len -= to_copy;
+ }
+ }
+
+ return true;
+ }
+
+};
+
+bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
+ const struct iovec* iov, size_t iov_cnt) {
+ ByteArraySource reader(compressed, compressed_length);
+ return RawUncompressToIOVec(&reader, iov, iov_cnt);
+}
+
+bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
+ size_t iov_cnt) {
+ SnappyIOVecWriter output(iov, iov_cnt);
+ return InternalUncompress(compressed, &output);
+}
+
+// -----------------------------------------------------------------------
+// Flat array interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to a flat array.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyArrayWriter {
+ private:
+ char* base_;
+ char* op_;
+ char* op_limit_;
+
+ public:
+ inline explicit SnappyArrayWriter(char* dst)
+ : base_(dst),
+ op_(dst) {
+ }
+
+ inline void SetExpectedLength(size_t len) {
+ op_limit_ = op_ + len;
+ }
+
+ inline bool CheckLength() const {
+ return op_ == op_limit_;
+ }
+
+ inline bool Append(const char* ip, size_t len) {
+ char* op = op_;
+ const size_t space_left = op_limit_ - op;
+ if (space_left < len) {
+ return false;
+ }
+ memcpy(op, ip, len);
+ op_ = op + len;
+ return true;
+ }
+
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+ char* op = op_;
+ const size_t space_left = op_limit_ - op;
+ if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
+ // Fast path, used for the majority (about 95%) of invocations.
+ UnalignedCopy64(ip, op);
+ UnalignedCopy64(ip + 8, op + 8);
+ op_ = op + len;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ inline bool AppendFromSelf(size_t offset, size_t len) {
+ char* op = op_;
+ const size_t space_left = op_limit_ - op;
+
+ // Check if we try to append from before the start of the buffer.
+ // Normally this would just be a check for "produced < offset",
+ // but "produced <= offset - 1u" is equivalent for every case
+ // except the one where offset==0, where the right side will wrap around
+ // to a very big number. This is convenient, as offset==0 is another
+ // invalid case that we also want to catch, so that we do not go
+ // into an infinite loop.
+ assert(op >= base_);
+ size_t produced = op - base_;
+ if (produced <= offset - 1u) {
+ return false;
+ }
+ if (len <= 16 && offset >= 8 && space_left >= 16) {
+ // Fast path, used for the majority (70-80%) of dynamic invocations.
+ UnalignedCopy64(op - offset, op);
+ UnalignedCopy64(op - offset + 8, op + 8);
+ } else {
+ if (space_left >= len + kMaxIncrementCopyOverflow) {
+ IncrementalCopyFastPath(op - offset, op, len);
+ } else {
+ if (space_left < len) {
+ return false;
+ }
+ IncrementalCopy(op - offset, op, len);
+ }
+ }
+
+ op_ = op + len;
+ return true;
+ }
+};
+
+bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
+ ByteArraySource reader(compressed, n);
+ return RawUncompress(&reader, uncompressed);
+}
+
+bool RawUncompress(Source* compressed, char* uncompressed) {
+ SnappyArrayWriter output(uncompressed);
+ return InternalUncompress(compressed, &output);
+}
+
+bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
+ size_t ulength;
+ if (!GetUncompressedLength(compressed, n, &ulength)) {
+ return false;
+ }
+ // On 32-bit builds: max_size() < kuint32max. Check for that instead
+ // of crashing (e.g., consider externally specified compressed data).
+ if (ulength > uncompressed->max_size()) {
+ return false;
+ }
+ STLStringResizeUninitialized(uncompressed, ulength);
+ return RawUncompress(compressed, n, string_as_array(uncompressed));
+}
+
+
+// A Writer that drops everything on the floor and just does validation
+class SnappyDecompressionValidator {
+ private:
+ size_t expected_;
+ size_t produced_;
+
+ public:
+ inline SnappyDecompressionValidator() : produced_(0) { }
+ inline void SetExpectedLength(size_t len) {
+ expected_ = len;
+ }
+ inline bool CheckLength() const {
+ return expected_ == produced_;
+ }
+ inline bool Append(const char* ip, size_t len) {
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+ return false;
+ }
+ inline bool AppendFromSelf(size_t offset, size_t len) {
+ // See SnappyArrayWriter::AppendFromSelf for an explanation of
+ // the "offset - 1u" trick.
+ if (produced_ <= offset - 1u) return false;
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+};
+
+bool IsValidCompressedBuffer(const char* compressed, size_t n) {
+ ByteArraySource reader(compressed, n);
+ SnappyDecompressionValidator writer;
+ return InternalUncompress(&reader, &writer);
+}
+
+void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length) {
+ ByteArraySource reader(input, input_length);
+ UncheckedByteArraySink writer(compressed);
+ Compress(&reader, &writer);
+
+ // Compute how many bytes were added
+ *compressed_length = (writer.CurrentDestination() - compressed);
+}
+
+size_t Compress(const char* input, size_t input_length, string* compressed) {
+ // Pre-grow the buffer to the max length of the compressed output
+ compressed->resize(MaxCompressedLength(input_length));
+
+ size_t compressed_length;
+ RawCompress(input, input_length, string_as_array(compressed),
+ &compressed_length);
+ compressed->resize(compressed_length);
+ return compressed_length;
+}
+
+
+} // end namespace snappy
+
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.h b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.h
new file mode 100644
index 00000000..e879e794
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy.h
@@ -0,0 +1,184 @@
+// Copyright 2005 and onwards Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A light-weight compression algorithm. It is designed for speed of
+// compression and decompression, rather than for the utmost in space
+// savings.
+//
+// For getting better compression ratios when you are compressing data
+// with long repeated sequences or compressing data that is similar to
+// other data, while still compressing fast, you might look at first
+// using BMDiff and then compressing the output of BMDiff with
+// Snappy.
+
+#ifndef UTIL_SNAPPY_SNAPPY_H__
+#define UTIL_SNAPPY_SNAPPY_H__
+
+#include <stddef.h>
+#include <string>
+
+#include "snappy-stubs-public.h"
+
+namespace snappy {
+ class Source;
+ class Sink;
+
+ // ------------------------------------------------------------------------
+ // Generic compression/decompression routines.
+ // ------------------------------------------------------------------------
+
+ // Compress the bytes read from "*source" and append to "*sink". Return the
+ // number of bytes written.
+ size_t Compress(Source* source, Sink* sink);
+
+ // Find the uncompressed length of the given stream, as given by the header.
+ // Note that the true length could deviate from this; the stream could e.g.
+ // be truncated.
+ //
+ // Also note that this leaves "*source" in a state that is unsuitable for
+ // further operations, such as RawUncompress(). You will need to rewind
+ // or recreate the source yourself before attempting any further calls.
+ bool GetUncompressedLength(Source* source, uint32* result);
+
+ // ------------------------------------------------------------------------
+ // Higher-level string based routines (should be sufficient for most users)
+ // ------------------------------------------------------------------------
+
+ // Sets "*output" to the compressed version of "input[0,input_length-1]".
+ // Original contents of *output are lost.
+ //
+ // REQUIRES: "input[]" is not an alias of "*output".
+ size_t Compress(const char* input, size_t input_length, string* output);
+
+ // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
+ // Original contents of "*uncompressed" are lost.
+ //
+ // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
+ //
+ // returns false if the message is corrupted and could not be decompressed
+ bool Uncompress(const char* compressed, size_t compressed_length,
+ string* uncompressed);
+
+
+ // ------------------------------------------------------------------------
+ // Lower-level character array based routines. May be useful for
+ // efficiency reasons in certain circumstances.
+ // ------------------------------------------------------------------------
+
+ // REQUIRES: "compressed" must point to an area of memory that is at
+ // least "MaxCompressedLength(input_length)" bytes in length.
+ //
+ // Takes the data stored in "input[0..input_length]" and stores
+ // it in the array pointed to by "compressed".
+ //
+ // "*compressed_length" is set to the length of the compressed output.
+ //
+ // Example:
+ // char* output = new char[snappy::MaxCompressedLength(input_length)];
+ // size_t output_length;
+ // RawCompress(input, input_length, output, &output_length);
+ // ... Process(output, output_length) ...
+ // delete [] output;
+ void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
+
+ // Given data in "compressed[0..compressed_length-1]" generated by
+ // calling the Snappy::Compress routine, this routine
+ // stores the uncompressed data to
+ // uncompressed[0..GetUncompressedLength(compressed)-1]
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompress(const char* compressed, size_t compressed_length,
+ char* uncompressed);
+
+ // Given data from the byte source 'compressed' generated by calling
+ // the Snappy::Compress routine, this routine stores the uncompressed
+ // data to
+ // uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompress(Source* compressed, char* uncompressed);
+
+ // Given data in "compressed[0..compressed_length-1]" generated by
+ // calling the Snappy::Compress routine, this routine
+ // stores the uncompressed data to the iovec "iov". The number of physical
+ // buffers in "iov" is given by iov_cnt and their cumulative size
+ // must be at least GetUncompressedLength(compressed). The individual buffers
+ // in "iov" must not overlap with each other.
+ //
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
+ const struct iovec* iov, size_t iov_cnt);
+
+ // Given data from the byte source 'compressed' generated by calling
+ // the Snappy::Compress routine, this routine stores the uncompressed
+ // data to the iovec "iov". The number of physical
+ // buffers in "iov" is given by iov_cnt and their cumulative size
+ // must be at least GetUncompressedLength(compressed). The individual buffers
+ // in "iov" must not overlap with each other.
+ //
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
+ size_t iov_cnt);
+
+ // Returns the maximal size of the compressed representation of
+ // input data that is "source_bytes" bytes in length;
+ size_t MaxCompressedLength(size_t source_bytes);
+
+ // REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
+ // Returns true and stores the length of the uncompressed data in
+ // *result normally. Returns false on parsing error.
+ // This operation takes O(1) time.
+ bool GetUncompressedLength(const char* compressed, size_t compressed_length,
+ size_t* result);
+
+ // Returns true iff the contents of "compressed[]" can be uncompressed
+ // successfully. Does not return the uncompressed data. Takes
+ // time proportional to compressed_length, but is usually at least
+ // a factor of four faster than actual decompression.
+ bool IsValidCompressedBuffer(const char* compressed,
+ size_t compressed_length);
+
+ // The size of a compression block. Note that many parts of the compression
+ // code assumes that kBlockSize <= 65536; in particular, the hash table
+ // can only store 16-bit offsets, and EmitCopy() also assumes the offset
+ // is 65535 bytes or less. Note also that if you change this, it will
+ // affect the framing format (see framing_format.txt).
+ //
+ // Note that there might be older data around that is compressed with larger
+ // block sizes, so the decompression code should not rely on the
+ // non-existence of long backreferences.
+ static const int kBlockLog = 16;
+ static const size_t kBlockSize = 1 << kBlockLog;
+
+ static const int kMaxHashTableBits = 14;
+ static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
+} // end namespace snappy
+
+
+#endif // UTIL_SNAPPY_SNAPPY_H__
diff --git a/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy_unittest.cc b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy_unittest.cc
new file mode 100644
index 00000000..be7bba6b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/snappy-1.1.2/snappy_unittest.cc
@@ -0,0 +1,1355 @@
+// Copyright 2005 and onwards Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <math.h>
+#include <stdlib.h>
+
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "snappy.h"
+#include "snappy-internal.h"
+#include "snappy-test.h"
+#include "snappy-sinksource.h"
+
+DEFINE_int32(start_len, -1,
+ "Starting prefix size for testing (-1: just full file contents)");
+DEFINE_int32(end_len, -1,
+ "Starting prefix size for testing (-1: just full file contents)");
+DEFINE_int32(bytes, 10485760,
+ "How many bytes to compress/uncompress per file for timing");
+
+DEFINE_bool(zlib, false,
+ "Run zlib compression (http://www.zlib.net)");
+DEFINE_bool(lzo, false,
+ "Run LZO compression (http://www.oberhumer.com/opensource/lzo/)");
+DEFINE_bool(quicklz, false,
+ "Run quickLZ compression (http://www.quicklz.com/)");
+DEFINE_bool(liblzf, false,
+ "Run libLZF compression "
+ "(http://www.goof.com/pcg/marc/liblzf.html)");
+DEFINE_bool(fastlz, false,
+ "Run FastLZ compression (http://www.fastlz.org/");
+DEFINE_bool(snappy, true, "Run snappy compression");
+
+
+DEFINE_bool(write_compressed, false,
+ "Write compressed versions of each file to <file>.comp");
+DEFINE_bool(write_uncompressed, false,
+ "Write uncompressed versions of each file to <file>.uncomp");
+
+namespace snappy {
+
+
+#ifdef HAVE_FUNC_MMAP
+
+// To test against code that reads beyond its input, this class copies a
+// string to a newly allocated group of pages, the last of which
+// is made unreadable via mprotect. Note that we need to allocate the
+// memory with mmap(), as POSIX allows mprotect() only on memory allocated
+// with mmap(), and some malloc/posix_memalign implementations expect to
+// be able to read previously allocated memory while doing heap allocations.
+class DataEndingAtUnreadablePage {
+ public:
+ explicit DataEndingAtUnreadablePage(const string& s) {
+ const size_t page_size = getpagesize();
+ const size_t size = s.size();
+ // Round up space for string to a multiple of page_size.
+ size_t space_for_string = (size + page_size - 1) & ~(page_size - 1);
+ alloc_size_ = space_for_string + page_size;
+ mem_ = mmap(NULL, alloc_size_,
+ PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ CHECK_NE(MAP_FAILED, mem_);
+ protected_page_ = reinterpret_cast<char*>(mem_) + space_for_string;
+ char* dst = protected_page_ - size;
+ memcpy(dst, s.data(), size);
+ data_ = dst;
+ size_ = size;
+ // Make guard page unreadable.
+ CHECK_EQ(0, mprotect(protected_page_, page_size, PROT_NONE));
+ }
+
+ ~DataEndingAtUnreadablePage() {
+ // Undo the mprotect.
+ CHECK_EQ(0, mprotect(protected_page_, getpagesize(), PROT_READ|PROT_WRITE));
+ CHECK_EQ(0, munmap(mem_, alloc_size_));
+ }
+
+ const char* data() const { return data_; }
+ size_t size() const { return size_; }
+
+ private:
+ size_t alloc_size_;
+ void* mem_;
+ char* protected_page_;
+ const char* data_;
+ size_t size_;
+};
+
+#else // HAVE_FUNC_MMAP
+
+// Fallback for systems without mmap.
+typedef string DataEndingAtUnreadablePage;
+
+#endif
+
+enum CompressorType {
+ ZLIB, LZO, LIBLZF, QUICKLZ, FASTLZ, SNAPPY
+};
+
+const char* names[] = {
+ "ZLIB", "LZO", "LIBLZF", "QUICKLZ", "FASTLZ", "SNAPPY"
+};
+
+static size_t MinimumRequiredOutputSpace(size_t input_size,
+ CompressorType comp) {
+ switch (comp) {
+#ifdef ZLIB_VERSION
+ case ZLIB:
+ return ZLib::MinCompressbufSize(input_size);
+#endif // ZLIB_VERSION
+
+#ifdef LZO_VERSION
+ case LZO:
+ return input_size + input_size/64 + 16 + 3;
+#endif // LZO_VERSION
+
+#ifdef LZF_VERSION
+ case LIBLZF:
+ return input_size;
+#endif // LZF_VERSION
+
+#ifdef QLZ_VERSION_MAJOR
+ case QUICKLZ:
+ return input_size + 36000; // 36000 is used for scratch.
+#endif // QLZ_VERSION_MAJOR
+
+#ifdef FASTLZ_VERSION
+ case FASTLZ:
+ return max(static_cast<int>(ceil(input_size * 1.05)), 66);
+#endif // FASTLZ_VERSION
+
+ case SNAPPY:
+ return snappy::MaxCompressedLength(input_size);
+
+ default:
+ LOG(FATAL) << "Unknown compression type number " << comp;
+ }
+}
+
+// Returns true if we successfully compressed, false otherwise.
+//
+// If compressed_is_preallocated is set, do not resize the compressed buffer.
+// This is typically what you want for a benchmark, in order to not spend
+// time in the memory allocator. If you do set this flag, however,
+// "compressed" must be preinitialized to at least MinCompressbufSize(comp)
+// number of bytes, and may contain junk bytes at the end after return.
+static bool Compress(const char* input, size_t input_size, CompressorType comp,
+ string* compressed, bool compressed_is_preallocated) {
+ if (!compressed_is_preallocated) {
+ compressed->resize(MinimumRequiredOutputSpace(input_size, comp));
+ }
+
+ switch (comp) {
+#ifdef ZLIB_VERSION
+ case ZLIB: {
+ ZLib zlib;
+ uLongf destlen = compressed->size();
+ int ret = zlib.Compress(
+ reinterpret_cast<Bytef*>(string_as_array(compressed)),
+ &destlen,
+ reinterpret_cast<const Bytef*>(input),
+ input_size);
+ CHECK_EQ(Z_OK, ret);
+ if (!compressed_is_preallocated) {
+ compressed->resize(destlen);
+ }
+ return true;
+ }
+#endif // ZLIB_VERSION
+
+#ifdef LZO_VERSION
+ case LZO: {
+ unsigned char* mem = new unsigned char[LZO1X_1_15_MEM_COMPRESS];
+ lzo_uint destlen;
+ int ret = lzo1x_1_15_compress(
+ reinterpret_cast<const uint8*>(input),
+ input_size,
+ reinterpret_cast<uint8*>(string_as_array(compressed)),
+ &destlen,
+ mem);
+ CHECK_EQ(LZO_E_OK, ret);
+ delete[] mem;
+ if (!compressed_is_preallocated) {
+ compressed->resize(destlen);
+ }
+ break;
+ }
+#endif // LZO_VERSION
+
+#ifdef LZF_VERSION
+ case LIBLZF: {
+ int destlen = lzf_compress(input,
+ input_size,
+ string_as_array(compressed),
+ input_size);
+ if (destlen == 0) {
+ // lzf *can* cause lots of blowup when compressing, so they
+ // recommend to limit outsize to insize, and just not compress
+ // if it's bigger. Ideally, we'd just swap input and output.
+ compressed->assign(input, input_size);
+ destlen = input_size;
+ }
+ if (!compressed_is_preallocated) {
+ compressed->resize(destlen);
+ }
+ break;
+ }
+#endif // LZF_VERSION
+
+#ifdef QLZ_VERSION_MAJOR
+ case QUICKLZ: {
+ qlz_state_compress *state_compress = new qlz_state_compress;
+ int destlen = qlz_compress(input,
+ string_as_array(compressed),
+ input_size,
+ state_compress);
+ delete state_compress;
+ CHECK_NE(0, destlen);
+ if (!compressed_is_preallocated) {
+ compressed->resize(destlen);
+ }
+ break;
+ }
+#endif // QLZ_VERSION_MAJOR
+
+#ifdef FASTLZ_VERSION
+ case FASTLZ: {
+ // Use level 1 compression since we mostly care about speed.
+ int destlen = fastlz_compress_level(
+ 1,
+ input,
+ input_size,
+ string_as_array(compressed));
+ if (!compressed_is_preallocated) {
+ compressed->resize(destlen);
+ }
+ CHECK_NE(destlen, 0);
+ break;
+ }
+#endif // FASTLZ_VERSION
+
+ case SNAPPY: {
+ size_t destlen;
+ snappy::RawCompress(input, input_size,
+ string_as_array(compressed),
+ &destlen);
+ CHECK_LE(destlen, snappy::MaxCompressedLength(input_size));
+ if (!compressed_is_preallocated) {
+ compressed->resize(destlen);
+ }
+ break;
+ }
+
+
+ default: {
+ return false; // the asked-for library wasn't compiled in
+ }
+ }
+ return true;
+}
+
+static bool Uncompress(const string& compressed, CompressorType comp,
+ int size, string* output) {
+ switch (comp) {
+#ifdef ZLIB_VERSION
+ case ZLIB: {
+ output->resize(size);
+ ZLib zlib;
+ uLongf destlen = output->size();
+ int ret = zlib.Uncompress(
+ reinterpret_cast<Bytef*>(string_as_array(output)),
+ &destlen,
+ reinterpret_cast<const Bytef*>(compressed.data()),
+ compressed.size());
+ CHECK_EQ(Z_OK, ret);
+ CHECK_EQ(static_cast<uLongf>(size), destlen);
+ break;
+ }
+#endif // ZLIB_VERSION
+
+#ifdef LZO_VERSION
+ case LZO: {
+ output->resize(size);
+ lzo_uint destlen;
+ int ret = lzo1x_decompress(
+ reinterpret_cast<const uint8*>(compressed.data()),
+ compressed.size(),
+ reinterpret_cast<uint8*>(string_as_array(output)),
+ &destlen,
+ NULL);
+ CHECK_EQ(LZO_E_OK, ret);
+ CHECK_EQ(static_cast<lzo_uint>(size), destlen);
+ break;
+ }
+#endif // LZO_VERSION
+
+#ifdef LZF_VERSION
+ case LIBLZF: {
+ output->resize(size);
+ int destlen = lzf_decompress(compressed.data(),
+ compressed.size(),
+ string_as_array(output),
+ output->size());
+ if (destlen == 0) {
+ // This error probably means we had decided not to compress,
+ // and thus have stored input in output directly.
+ output->assign(compressed.data(), compressed.size());
+ destlen = compressed.size();
+ }
+ CHECK_EQ(destlen, size);
+ break;
+ }
+#endif // LZF_VERSION
+
+#ifdef QLZ_VERSION_MAJOR
+ case QUICKLZ: {
+ output->resize(size);
+ qlz_state_decompress *state_decompress = new qlz_state_decompress;
+ int destlen = qlz_decompress(compressed.data(),
+ string_as_array(output),
+ state_decompress);
+ delete state_decompress;
+ CHECK_EQ(destlen, size);
+ break;
+ }
+#endif // QLZ_VERSION_MAJOR
+
+#ifdef FASTLZ_VERSION
+ case FASTLZ: {
+ output->resize(size);
+ int destlen = fastlz_decompress(compressed.data(),
+ compressed.length(),
+ string_as_array(output),
+ size);
+ CHECK_EQ(destlen, size);
+ break;
+ }
+#endif // FASTLZ_VERSION
+
+ case SNAPPY: {
+ snappy::RawUncompress(compressed.data(), compressed.size(),
+ string_as_array(output));
+ break;
+ }
+
+
+ default: {
+ return false; // the asked-for library wasn't compiled in
+ }
+ }
+ return true;
+}
+
+static void Measure(const char* data,
+ size_t length,
+ CompressorType comp,
+ int repeats,
+ int block_size) {
+ // Run tests a few time and pick median running times
+ static const int kRuns = 5;
+ double ctime[kRuns];
+ double utime[kRuns];
+ int compressed_size = 0;
+
+ {
+ // Chop the input into blocks
+ int num_blocks = (length + block_size - 1) / block_size;
+ vector<const char*> input(num_blocks);
+ vector<size_t> input_length(num_blocks);
+ vector<string> compressed(num_blocks);
+ vector<string> output(num_blocks);
+ for (int b = 0; b < num_blocks; b++) {
+ int input_start = b * block_size;
+ int input_limit = min<int>((b+1)*block_size, length);
+ input[b] = data+input_start;
+ input_length[b] = input_limit-input_start;
+
+ // Pre-grow the output buffer so we don't measure string append time.
+ compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
+ }
+
+ // First, try one trial compression to make sure the code is compiled in
+ if (!Compress(input[0], input_length[0], comp, &compressed[0], true)) {
+ LOG(WARNING) << "Skipping " << names[comp] << ": "
+ << "library not compiled in";
+ return;
+ }
+
+ for (int run = 0; run < kRuns; run++) {
+ CycleTimer ctimer, utimer;
+
+ for (int b = 0; b < num_blocks; b++) {
+ // Pre-grow the output buffer so we don't measure string append time.
+ compressed[b].resize(MinimumRequiredOutputSpace(block_size, comp));
+ }
+
+ ctimer.Start();
+ for (int b = 0; b < num_blocks; b++)
+ for (int i = 0; i < repeats; i++)
+ Compress(input[b], input_length[b], comp, &compressed[b], true);
+ ctimer.Stop();
+
+ // Compress once more, with resizing, so we don't leave junk
+ // at the end that will confuse the decompressor.
+ for (int b = 0; b < num_blocks; b++) {
+ Compress(input[b], input_length[b], comp, &compressed[b], false);
+ }
+
+ for (int b = 0; b < num_blocks; b++) {
+ output[b].resize(input_length[b]);
+ }
+
+ utimer.Start();
+ for (int i = 0; i < repeats; i++)
+ for (int b = 0; b < num_blocks; b++)
+ Uncompress(compressed[b], comp, input_length[b], &output[b]);
+ utimer.Stop();
+
+ ctime[run] = ctimer.Get();
+ utime[run] = utimer.Get();
+ }
+
+ compressed_size = 0;
+ for (int i = 0; i < compressed.size(); i++) {
+ compressed_size += compressed[i].size();
+ }
+ }
+
+ sort(ctime, ctime + kRuns);
+ sort(utime, utime + kRuns);
+ const int med = kRuns/2;
+
+ float comp_rate = (length / ctime[med]) * repeats / 1048576.0;
+ float uncomp_rate = (length / utime[med]) * repeats / 1048576.0;
+ string x = names[comp];
+ x += ":";
+ string urate = (uncomp_rate >= 0)
+ ? StringPrintf("%.1f", uncomp_rate)
+ : string("?");
+ printf("%-7s [b %dM] bytes %6d -> %6d %4.1f%% "
+ "comp %5.1f MB/s uncomp %5s MB/s\n",
+ x.c_str(),
+ block_size/(1<<20),
+ static_cast<int>(length), static_cast<uint32>(compressed_size),
+ (compressed_size * 100.0) / max<int>(1, length),
+ comp_rate,
+ urate.c_str());
+}
+
+
+static int VerifyString(const string& input) {
+ string compressed;
+ DataEndingAtUnreadablePage i(input);
+ const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+ CHECK_EQ(written, compressed.size());
+ CHECK_LE(compressed.size(),
+ snappy::MaxCompressedLength(input.size()));
+ CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+
+ string uncompressed;
+ DataEndingAtUnreadablePage c(compressed);
+ CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
+ CHECK_EQ(uncompressed, input);
+ return uncompressed.size();
+}
+
+
+static void VerifyIOVec(const string& input) {
+ string compressed;
+ DataEndingAtUnreadablePage i(input);
+ const size_t written = snappy::Compress(i.data(), i.size(), &compressed);
+ CHECK_EQ(written, compressed.size());
+ CHECK_LE(compressed.size(),
+ snappy::MaxCompressedLength(input.size()));
+ CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+
+ // Try uncompressing into an iovec containing a random number of entries
+ // ranging from 1 to 10.
+ char* buf = new char[input.size()];
+ ACMRandom rnd(input.size());
+ int num = rnd.Next() % 10 + 1;
+ if (input.size() < num) {
+ num = input.size();
+ }
+ struct iovec* iov = new iovec[num];
+ int used_so_far = 0;
+ for (int i = 0; i < num; ++i) {
+ iov[i].iov_base = buf + used_so_far;
+ if (i == num - 1) {
+ iov[i].iov_len = input.size() - used_so_far;
+ } else {
+ // Randomly choose to insert a 0 byte entry.
+ if (rnd.OneIn(5)) {
+ iov[i].iov_len = 0;
+ } else {
+ iov[i].iov_len = rnd.Uniform(input.size());
+ }
+ }
+ used_so_far += iov[i].iov_len;
+ }
+ CHECK(snappy::RawUncompressToIOVec(
+ compressed.data(), compressed.size(), iov, num));
+ CHECK(!memcmp(buf, input.data(), input.size()));
+ delete[] iov;
+ delete[] buf;
+}
+
+// Test that data compressed by a compressor that does not
+// obey block sizes is uncompressed properly.
+static void VerifyNonBlockedCompression(const string& input) {
+ if (input.length() > snappy::kBlockSize) {
+ // We cannot test larger blocks than the maximum block size, obviously.
+ return;
+ }
+
+ string prefix;
+ Varint::Append32(&prefix, input.size());
+
+ // Setup compression table
+ snappy::internal::WorkingMemory wmem;
+ int table_size;
+ uint16* table = wmem.GetHashTable(input.size(), &table_size);
+
+ // Compress entire input in one shot
+ string compressed;
+ compressed += prefix;
+ compressed.resize(prefix.size()+snappy::MaxCompressedLength(input.size()));
+ char* dest = string_as_array(&compressed) + prefix.size();
+ char* end = snappy::internal::CompressFragment(input.data(), input.size(),
+ dest, table, table_size);
+ compressed.resize(end - compressed.data());
+
+ // Uncompress into string
+ string uncomp_str;
+ CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncomp_str));
+ CHECK_EQ(uncomp_str, input);
+
+}
+
+// Expand the input so that it is at least K times as big as block size
+static string Expand(const string& input) {
+ static const int K = 3;
+ string data = input;
+ while (data.size() < K * snappy::kBlockSize) {
+ data += input;
+ }
+ return data;
+}
+
+static int Verify(const string& input) {
+ VLOG(1) << "Verifying input of size " << input.size();
+
+ // Compress using string based routines
+ const int result = VerifyString(input);
+
+
+ VerifyNonBlockedCompression(input);
+ VerifyIOVec(input);
+ if (!input.empty()) {
+ const string expanded = Expand(input);
+ VerifyNonBlockedCompression(expanded);
+ VerifyIOVec(input);
+ }
+
+
+ return result;
+}
+
+// This test checks to ensure that snappy doesn't coredump if it gets
+// corrupted data.
+
+static bool IsValidCompressedBuffer(const string& c) {
+ return snappy::IsValidCompressedBuffer(c.data(), c.size());
+}
+static bool Uncompress(const string& c, string* u) {
+ return snappy::Uncompress(c.data(), c.size(), u);
+}
+
+TYPED_TEST(CorruptedTest, VerifyCorrupted) {
+ string source = "making sure we don't crash with corrupted input";
+ VLOG(1) << source;
+ string dest;
+ TypeParam uncmp;
+ snappy::Compress(source.data(), source.size(), &dest);
+
+ // Mess around with the data. It's hard to simulate all possible
+ // corruptions; this is just one example ...
+ CHECK_GT(dest.size(), 3);
+ dest[1]--;
+ dest[3]++;
+ // this really ought to fail.
+ CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
+ CHECK(!Uncompress(TypeParam(dest), &uncmp));
+
+ // This is testing for a security bug - a buffer that decompresses to 100k
+ // but we lie in the snappy header and only reserve 0 bytes of memory :)
+ source.resize(100000);
+ for (int i = 0; i < source.length(); ++i) {
+ source[i] = 'A';
+ }
+ snappy::Compress(source.data(), source.size(), &dest);
+ dest[0] = dest[1] = dest[2] = dest[3] = 0;
+ CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
+ CHECK(!Uncompress(TypeParam(dest), &uncmp));
+
+ if (sizeof(void *) == 4) {
+ // Another security check; check a crazy big length can't DoS us with an
+ // over-allocation.
+ // Currently this is done only for 32-bit builds. On 64-bit builds,
+ // where 3 GB might be an acceptable allocation size, Uncompress()
+ // attempts to decompress, and sometimes causes the test to run out of
+ // memory.
+ dest[0] = dest[1] = dest[2] = dest[3] = 0xff;
+ // This decodes to a really large size, i.e., about 3 GB.
+ dest[4] = 'k';
+ CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
+ CHECK(!Uncompress(TypeParam(dest), &uncmp));
+ } else {
+ LOG(WARNING) << "Crazy decompression lengths not checked on 64-bit build";
+ }
+
+ // This decodes to about 2 MB; much smaller, but should still fail.
+ dest[0] = dest[1] = dest[2] = 0xff;
+ dest[3] = 0x00;
+ CHECK(!IsValidCompressedBuffer(TypeParam(dest)));
+ CHECK(!Uncompress(TypeParam(dest), &uncmp));
+
+ // try reading stuff in from a bad file.
+ for (int i = 1; i <= 3; ++i) {
+ string data = ReadTestDataFile(StringPrintf("baddata%d.snappy", i).c_str(),
+ 0);
+ string uncmp;
+ // check that we don't return a crazy length
+ size_t ulen;
+ CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen)
+ || (ulen < (1<<20)));
+ uint32 ulen2;
+ snappy::ByteArraySource source(data.data(), data.size());
+ CHECK(!snappy::GetUncompressedLength(&source, &ulen2) ||
+ (ulen2 < (1<<20)));
+ CHECK(!IsValidCompressedBuffer(TypeParam(data)));
+ CHECK(!Uncompress(TypeParam(data), &uncmp));
+ }
+}
+
+// Helper routines to construct arbitrary compressed strings.
+// These mirror the compression code in snappy.cc, but are copied
+// here so that we can bypass some limitations in the how snappy.cc
+// invokes these routines.
+static void AppendLiteral(string* dst, const string& literal) {
+ if (literal.empty()) return;
+ int n = literal.size() - 1;
+ if (n < 60) {
+ // Fit length in tag byte
+ dst->push_back(0 | (n << 2));
+ } else {
+ // Encode in upcoming bytes
+ char number[4];
+ int count = 0;
+ while (n > 0) {
+ number[count++] = n & 0xff;
+ n >>= 8;
+ }
+ dst->push_back(0 | ((59+count) << 2));
+ *dst += string(number, count);
+ }
+ *dst += literal;
+}
+
+static void AppendCopy(string* dst, int offset, int length) {
+ while (length > 0) {
+ // Figure out how much to copy in one shot
+ int to_copy;
+ if (length >= 68) {
+ to_copy = 64;
+ } else if (length > 64) {
+ to_copy = 60;
+ } else {
+ to_copy = length;
+ }
+ length -= to_copy;
+
+ if ((to_copy >= 4) && (to_copy < 12) && (offset < 2048)) {
+ assert(to_copy-4 < 8); // Must fit in 3 bits
+ dst->push_back(1 | ((to_copy-4) << 2) | ((offset >> 8) << 5));
+ dst->push_back(offset & 0xff);
+ } else if (offset < 65536) {
+ dst->push_back(2 | ((to_copy-1) << 2));
+ dst->push_back(offset & 0xff);
+ dst->push_back(offset >> 8);
+ } else {
+ dst->push_back(3 | ((to_copy-1) << 2));
+ dst->push_back(offset & 0xff);
+ dst->push_back((offset >> 8) & 0xff);
+ dst->push_back((offset >> 16) & 0xff);
+ dst->push_back((offset >> 24) & 0xff);
+ }
+ }
+}
+
+TEST(Snappy, SimpleTests) {
+ Verify("");
+ Verify("a");
+ Verify("ab");
+ Verify("abc");
+
+ Verify("aaaaaaa" + string(16, 'b') + string("aaaaa") + "abc");
+ Verify("aaaaaaa" + string(256, 'b') + string("aaaaa") + "abc");
+ Verify("aaaaaaa" + string(2047, 'b') + string("aaaaa") + "abc");
+ Verify("aaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc");
+ Verify("abcaaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc");
+}
+
+// Verify max blowup (lots of four-byte copies)
+TEST(Snappy, MaxBlowup) {
+ string input;
+ for (int i = 0; i < 20000; i++) {
+ ACMRandom rnd(i);
+ uint32 bytes = static_cast<uint32>(rnd.Next());
+ input.append(reinterpret_cast<char*>(&bytes), sizeof(bytes));
+ }
+ for (int i = 19999; i >= 0; i--) {
+ ACMRandom rnd(i);
+ uint32 bytes = static_cast<uint32>(rnd.Next());
+ input.append(reinterpret_cast<char*>(&bytes), sizeof(bytes));
+ }
+ Verify(input);
+}
+
+TEST(Snappy, RandomData) {
+ ACMRandom rnd(FLAGS_test_random_seed);
+
+ const int num_ops = 20000;
+ for (int i = 0; i < num_ops; i++) {
+ if ((i % 1000) == 0) {
+ VLOG(0) << "Random op " << i << " of " << num_ops;
+ }
+
+ string x;
+ int len = rnd.Uniform(4096);
+ if (i < 100) {
+ len = 65536 + rnd.Uniform(65536);
+ }
+ while (x.size() < len) {
+ int run_len = 1;
+ if (rnd.OneIn(10)) {
+ run_len = rnd.Skewed(8);
+ }
+ char c = (i < 100) ? rnd.Uniform(256) : rnd.Skewed(3);
+ while (run_len-- > 0 && x.size() < len) {
+ x += c;
+ }
+ }
+
+ Verify(x);
+ }
+}
+
+TEST(Snappy, FourByteOffset) {
+ // The new compressor cannot generate four-byte offsets since
+ // it chops up the input into 32KB pieces. So we hand-emit the
+ // copy manually.
+
+ // The two fragments that make up the input string.
+ string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz";
+ string fragment2 = "some other string";
+
+ // How many times each fragment is emitted.
+ const int n1 = 2;
+ const int n2 = 100000 / fragment2.size();
+ const int length = n1 * fragment1.size() + n2 * fragment2.size();
+
+ string compressed;
+ Varint::Append32(&compressed, length);
+
+ AppendLiteral(&compressed, fragment1);
+ string src = fragment1;
+ for (int i = 0; i < n2; i++) {
+ AppendLiteral(&compressed, fragment2);
+ src += fragment2;
+ }
+ AppendCopy(&compressed, src.size(), fragment1.size());
+ src += fragment1;
+ CHECK_EQ(length, src.size());
+
+ string uncompressed;
+ CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ CHECK(snappy::Uncompress(compressed.data(), compressed.size(),
+ &uncompressed));
+ CHECK_EQ(uncompressed, src);
+}
+
+TEST(Snappy, IOVecEdgeCases) {
+ // Test some tricky edge cases in the iovec output that are not necessarily
+ // exercised by random tests.
+
+ // Our output blocks look like this initially (the last iovec is bigger
+ // than depicted):
+ // [ ] [ ] [ ] [ ] [ ]
+ static const int kLengths[] = { 2, 1, 4, 8, 128 };
+
+ struct iovec iov[ARRAYSIZE(kLengths)];
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ iov[i].iov_base = new char[kLengths[i]];
+ iov[i].iov_len = kLengths[i];
+ }
+
+ string compressed;
+ Varint::Append32(&compressed, 22);
+
+ // A literal whose output crosses three blocks.
+ // [ab] [c] [123 ] [ ] [ ]
+ AppendLiteral(&compressed, "abc123");
+
+ // A copy whose output crosses two blocks (source and destination
+ // segments marked).
+ // [ab] [c] [1231] [23 ] [ ]
+ // ^--^ --
+ AppendCopy(&compressed, 3, 3);
+
+ // A copy where the input is, at first, in the block before the output:
+ //
+ // [ab] [c] [1231] [231231 ] [ ]
+ // ^--- ^---
+ // Then during the copy, the pointers move such that the input and
+ // output pointers are in the same block:
+ //
+ // [ab] [c] [1231] [23123123] [ ]
+ // ^- ^-
+ // And then they move again, so that the output pointer is no longer
+ // in the same block as the input pointer:
+ // [ab] [c] [1231] [23123123] [123 ]
+ // ^-- ^--
+ AppendCopy(&compressed, 6, 9);
+
+ // Finally, a copy where the input is from several blocks back,
+ // and it also crosses three blocks:
+ //
+ // [ab] [c] [1231] [23123123] [123b ]
+ // ^ ^
+ // [ab] [c] [1231] [23123123] [123bc ]
+ // ^ ^
+ // [ab] [c] [1231] [23123123] [123bc12 ]
+ // ^- ^-
+ AppendCopy(&compressed, 17, 4);
+
+ CHECK(snappy::RawUncompressToIOVec(
+ compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+ CHECK_EQ(0, memcmp(iov[0].iov_base, "ab", 2));
+ CHECK_EQ(0, memcmp(iov[1].iov_base, "c", 1));
+ CHECK_EQ(0, memcmp(iov[2].iov_base, "1231", 4));
+ CHECK_EQ(0, memcmp(iov[3].iov_base, "23123123", 8));
+ CHECK_EQ(0, memcmp(iov[4].iov_base, "123bc12", 7));
+
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ delete[] reinterpret_cast<char *>(iov[i].iov_base);
+ }
+}
+
+TEST(Snappy, IOVecLiteralOverflow) {
+ static const int kLengths[] = { 3, 4 };
+
+ struct iovec iov[ARRAYSIZE(kLengths)];
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ iov[i].iov_base = new char[kLengths[i]];
+ iov[i].iov_len = kLengths[i];
+ }
+
+ string compressed;
+ Varint::Append32(&compressed, 8);
+
+ AppendLiteral(&compressed, "12345678");
+
+ CHECK(!snappy::RawUncompressToIOVec(
+ compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ delete[] reinterpret_cast<char *>(iov[i].iov_base);
+ }
+}
+
+TEST(Snappy, IOVecCopyOverflow) {
+ static const int kLengths[] = { 3, 4 };
+
+ struct iovec iov[ARRAYSIZE(kLengths)];
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ iov[i].iov_base = new char[kLengths[i]];
+ iov[i].iov_len = kLengths[i];
+ }
+
+ string compressed;
+ Varint::Append32(&compressed, 8);
+
+ AppendLiteral(&compressed, "123");
+ AppendCopy(&compressed, 3, 5);
+
+ CHECK(!snappy::RawUncompressToIOVec(
+ compressed.data(), compressed.size(), iov, ARRAYSIZE(iov)));
+
+ for (int i = 0; i < ARRAYSIZE(kLengths); ++i) {
+ delete[] reinterpret_cast<char *>(iov[i].iov_base);
+ }
+}
+
+
+static bool CheckUncompressedLength(const string& compressed,
+ size_t* ulength) {
+ const bool result1 = snappy::GetUncompressedLength(compressed.data(),
+ compressed.size(),
+ ulength);
+
+ snappy::ByteArraySource source(compressed.data(), compressed.size());
+ uint32 length;
+ const bool result2 = snappy::GetUncompressedLength(&source, &length);
+ CHECK_EQ(result1, result2);
+ return result1;
+}
+
+TEST(SnappyCorruption, TruncatedVarint) {
+ string compressed, uncompressed;
+ size_t ulength;
+ compressed.push_back('\xf0');
+ CHECK(!CheckUncompressedLength(compressed, &ulength));
+ CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
+ &uncompressed));
+}
+
+TEST(SnappyCorruption, UnterminatedVarint) {
+ string compressed, uncompressed;
+ size_t ulength;
+ compressed.push_back(128);
+ compressed.push_back(128);
+ compressed.push_back(128);
+ compressed.push_back(128);
+ compressed.push_back(128);
+ compressed.push_back(10);
+ CHECK(!CheckUncompressedLength(compressed, &ulength));
+ CHECK(!snappy::IsValidCompressedBuffer(compressed.data(), compressed.size()));
+ CHECK(!snappy::Uncompress(compressed.data(), compressed.size(),
+ &uncompressed));
+}
+
+TEST(Snappy, ReadPastEndOfBuffer) {
+ // Check that we do not read past end of input
+
+ // Make a compressed string that ends with a single-byte literal
+ string compressed;
+ Varint::Append32(&compressed, 1);
+ AppendLiteral(&compressed, "x");
+
+ string uncompressed;
+ DataEndingAtUnreadablePage c(compressed);
+ CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed));
+ CHECK_EQ(uncompressed, string("x"));
+}
+
+// Check for an infinite loop caused by a copy with offset==0
+TEST(Snappy, ZeroOffsetCopy) {
+ const char* compressed = "\x40\x12\x00\x00";
+ // \x40 Length (must be > kMaxIncrementCopyOverflow)
+ // \x12\x00\x00 Copy with offset==0, length==5
+ char uncompressed[100];
+ EXPECT_FALSE(snappy::RawUncompress(compressed, 4, uncompressed));
+}
+
+TEST(Snappy, ZeroOffsetCopyValidation) {
+ const char* compressed = "\x05\x12\x00\x00";
+ // \x05 Length
+ // \x12\x00\x00 Copy with offset==0, length==5
+ EXPECT_FALSE(snappy::IsValidCompressedBuffer(compressed, 4));
+}
+
+
+namespace {
+
+int TestFindMatchLength(const char* s1, const char *s2, unsigned length) {
+ return snappy::internal::FindMatchLength(s1, s2, s2 + length);
+}
+
+} // namespace
+
+TEST(Snappy, FindMatchLength) {
+ // Exercise all different code paths through the function.
+ // 64-bit version:
+
+ // Hit s1_limit in 64-bit loop, hit s1_limit in single-character loop.
+ EXPECT_EQ(6, TestFindMatchLength("012345", "012345", 6));
+ EXPECT_EQ(11, TestFindMatchLength("01234567abc", "01234567abc", 11));
+
+ // Hit s1_limit in 64-bit loop, find a non-match in single-character loop.
+ EXPECT_EQ(9, TestFindMatchLength("01234567abc", "01234567axc", 9));
+
+ // Same, but edge cases.
+ EXPECT_EQ(11, TestFindMatchLength("01234567abc!", "01234567abc!", 11));
+ EXPECT_EQ(11, TestFindMatchLength("01234567abc!", "01234567abc?", 11));
+
+ // Find non-match at once in first loop.
+ EXPECT_EQ(0, TestFindMatchLength("01234567xxxxxxxx", "?1234567xxxxxxxx", 16));
+ EXPECT_EQ(1, TestFindMatchLength("01234567xxxxxxxx", "0?234567xxxxxxxx", 16));
+ EXPECT_EQ(4, TestFindMatchLength("01234567xxxxxxxx", "01237654xxxxxxxx", 16));
+ EXPECT_EQ(7, TestFindMatchLength("01234567xxxxxxxx", "0123456?xxxxxxxx", 16));
+
+ // Find non-match in first loop after one block.
+ EXPECT_EQ(8, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
+ "abcdefgh?1234567xxxxxxxx", 24));
+ EXPECT_EQ(9, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
+ "abcdefgh0?234567xxxxxxxx", 24));
+ EXPECT_EQ(12, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
+ "abcdefgh01237654xxxxxxxx", 24));
+ EXPECT_EQ(15, TestFindMatchLength("abcdefgh01234567xxxxxxxx",
+ "abcdefgh0123456?xxxxxxxx", 24));
+
+ // 32-bit version:
+
+ // Short matches.
+ EXPECT_EQ(0, TestFindMatchLength("01234567", "?1234567", 8));
+ EXPECT_EQ(1, TestFindMatchLength("01234567", "0?234567", 8));
+ EXPECT_EQ(2, TestFindMatchLength("01234567", "01?34567", 8));
+ EXPECT_EQ(3, TestFindMatchLength("01234567", "012?4567", 8));
+ EXPECT_EQ(4, TestFindMatchLength("01234567", "0123?567", 8));
+ EXPECT_EQ(5, TestFindMatchLength("01234567", "01234?67", 8));
+ EXPECT_EQ(6, TestFindMatchLength("01234567", "012345?7", 8));
+ EXPECT_EQ(7, TestFindMatchLength("01234567", "0123456?", 8));
+ EXPECT_EQ(7, TestFindMatchLength("01234567", "0123456?", 7));
+ EXPECT_EQ(7, TestFindMatchLength("01234567!", "0123456??", 7));
+
+ // Hit s1_limit in 32-bit loop, hit s1_limit in single-character loop.
+ EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd", "xxxxxxabcd", 10));
+ EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd?", "xxxxxxabcd?", 10));
+ EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcdef", "xxxxxxabcdef", 13));
+
+ // Same, but edge cases.
+ EXPECT_EQ(12, TestFindMatchLength("xxxxxx0123abc!", "xxxxxx0123abc!", 12));
+ EXPECT_EQ(12, TestFindMatchLength("xxxxxx0123abc!", "xxxxxx0123abc?", 12));
+
+ // Hit s1_limit in 32-bit loop, find a non-match in single-character loop.
+ EXPECT_EQ(11, TestFindMatchLength("xxxxxx0123abc", "xxxxxx0123axc", 13));
+
+ // Find non-match at once in first loop.
+ EXPECT_EQ(6, TestFindMatchLength("xxxxxx0123xxxxxxxx",
+ "xxxxxx?123xxxxxxxx", 18));
+ EXPECT_EQ(7, TestFindMatchLength("xxxxxx0123xxxxxxxx",
+ "xxxxxx0?23xxxxxxxx", 18));
+ EXPECT_EQ(8, TestFindMatchLength("xxxxxx0123xxxxxxxx",
+ "xxxxxx0132xxxxxxxx", 18));
+ EXPECT_EQ(9, TestFindMatchLength("xxxxxx0123xxxxxxxx",
+ "xxxxxx012?xxxxxxxx", 18));
+
+ // Same, but edge cases.
+ EXPECT_EQ(6, TestFindMatchLength("xxxxxx0123", "xxxxxx?123", 10));
+ EXPECT_EQ(7, TestFindMatchLength("xxxxxx0123", "xxxxxx0?23", 10));
+ EXPECT_EQ(8, TestFindMatchLength("xxxxxx0123", "xxxxxx0132", 10));
+ EXPECT_EQ(9, TestFindMatchLength("xxxxxx0123", "xxxxxx012?", 10));
+
+ // Find non-match in first loop after one block.
+ EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd0123xx",
+ "xxxxxxabcd?123xx", 16));
+ EXPECT_EQ(11, TestFindMatchLength("xxxxxxabcd0123xx",
+ "xxxxxxabcd0?23xx", 16));
+ EXPECT_EQ(12, TestFindMatchLength("xxxxxxabcd0123xx",
+ "xxxxxxabcd0132xx", 16));
+ EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcd0123xx",
+ "xxxxxxabcd012?xx", 16));
+
+ // Same, but edge cases.
+ EXPECT_EQ(10, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd?123", 14));
+ EXPECT_EQ(11, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0?23", 14));
+ EXPECT_EQ(12, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd0132", 14));
+ EXPECT_EQ(13, TestFindMatchLength("xxxxxxabcd0123", "xxxxxxabcd012?", 14));
+}
+
+TEST(Snappy, FindMatchLengthRandom) {
+ const int kNumTrials = 10000;
+ const int kTypicalLength = 10;
+ ACMRandom rnd(FLAGS_test_random_seed);
+
+ for (int i = 0; i < kNumTrials; i++) {
+ string s, t;
+ char a = rnd.Rand8();
+ char b = rnd.Rand8();
+ while (!rnd.OneIn(kTypicalLength)) {
+ s.push_back(rnd.OneIn(2) ? a : b);
+ t.push_back(rnd.OneIn(2) ? a : b);
+ }
+ DataEndingAtUnreadablePage u(s);
+ DataEndingAtUnreadablePage v(t);
+ int matched = snappy::internal::FindMatchLength(
+ u.data(), v.data(), v.data() + t.size());
+ if (matched == t.size()) {
+ EXPECT_EQ(s, t);
+ } else {
+ EXPECT_NE(s[matched], t[matched]);
+ for (int j = 0; j < matched; j++) {
+ EXPECT_EQ(s[j], t[j]);
+ }
+ }
+ }
+}
+
+
+static void CompressFile(const char* fname) {
+ string fullinput;
+ file::GetContents(fname, &fullinput, file::Defaults()).CheckSuccess();
+
+ string compressed;
+ Compress(fullinput.data(), fullinput.size(), SNAPPY, &compressed, false);
+
+ file::SetContents(string(fname).append(".comp"), compressed, file::Defaults())
+ .CheckSuccess();
+}
+
+static void UncompressFile(const char* fname) {
+ string fullinput;
+ file::GetContents(fname, &fullinput, file::Defaults()).CheckSuccess();
+
+ size_t uncompLength;
+ CHECK(CheckUncompressedLength(fullinput, &uncompLength));
+
+ string uncompressed;
+ uncompressed.resize(uncompLength);
+ CHECK(snappy::Uncompress(fullinput.data(), fullinput.size(), &uncompressed));
+
+ file::SetContents(string(fname).append(".uncomp"), uncompressed,
+ file::Defaults()).CheckSuccess();
+}
+
+static void MeasureFile(const char* fname) {
+ string fullinput;
+ file::GetContents(fname, &fullinput, file::Defaults()).CheckSuccess();
+ printf("%-40s :\n", fname);
+
+ int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len;
+ int end_len = fullinput.size();
+ if (FLAGS_end_len >= 0) {
+ end_len = min<int>(fullinput.size(), FLAGS_end_len);
+ }
+ for (int len = start_len; len <= end_len; len++) {
+ const char* const input = fullinput.data();
+ int repeats = (FLAGS_bytes + len) / (len + 1);
+ if (FLAGS_zlib) Measure(input, len, ZLIB, repeats, 1024<<10);
+ if (FLAGS_lzo) Measure(input, len, LZO, repeats, 1024<<10);
+ if (FLAGS_liblzf) Measure(input, len, LIBLZF, repeats, 1024<<10);
+ if (FLAGS_quicklz) Measure(input, len, QUICKLZ, repeats, 1024<<10);
+ if (FLAGS_fastlz) Measure(input, len, FASTLZ, repeats, 1024<<10);
+ if (FLAGS_snappy) Measure(input, len, SNAPPY, repeats, 4096<<10);
+
+ // For block-size based measurements
+ if (0 && FLAGS_snappy) {
+ Measure(input, len, SNAPPY, repeats, 8<<10);
+ Measure(input, len, SNAPPY, repeats, 16<<10);
+ Measure(input, len, SNAPPY, repeats, 32<<10);
+ Measure(input, len, SNAPPY, repeats, 64<<10);
+ Measure(input, len, SNAPPY, repeats, 256<<10);
+ Measure(input, len, SNAPPY, repeats, 1024<<10);
+ }
+ }
+}
+
+static struct {
+ const char* label;
+ const char* filename;
+ size_t size_limit;
+} files[] = {
+ { "html", "html", 0 },
+ { "urls", "urls.10K", 0 },
+ { "jpg", "fireworks.jpeg", 0 },
+ { "jpg_200", "fireworks.jpeg", 200 },
+ { "pdf", "paper-100k.pdf", 0 },
+ { "html4", "html_x_4", 0 },
+ { "txt1", "alice29.txt", 0 },
+ { "txt2", "asyoulik.txt", 0 },
+ { "txt3", "lcet10.txt", 0 },
+ { "txt4", "plrabn12.txt", 0 },
+ { "pb", "geo.protodata", 0 },
+ { "gaviota", "kppkn.gtb", 0 },
+};
+
+static void BM_UFlat(int iters, int arg) {
+ StopBenchmarkTiming();
+
+ // Pick file to process based on "arg"
+ CHECK_GE(arg, 0);
+ CHECK_LT(arg, ARRAYSIZE(files));
+ string contents = ReadTestDataFile(files[arg].filename,
+ files[arg].size_limit);
+
+ string zcontents;
+ snappy::Compress(contents.data(), contents.size(), &zcontents);
+ char* dst = new char[contents.size()];
+
+ SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+ static_cast<int64>(contents.size()));
+ SetBenchmarkLabel(files[arg].label);
+ StartBenchmarkTiming();
+ while (iters-- > 0) {
+ CHECK(snappy::RawUncompress(zcontents.data(), zcontents.size(), dst));
+ }
+ StopBenchmarkTiming();
+
+ delete[] dst;
+}
+BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
+
+static void BM_UValidate(int iters, int arg) {
+ StopBenchmarkTiming();
+
+ // Pick file to process based on "arg"
+ CHECK_GE(arg, 0);
+ CHECK_LT(arg, ARRAYSIZE(files));
+ string contents = ReadTestDataFile(files[arg].filename,
+ files[arg].size_limit);
+
+ string zcontents;
+ snappy::Compress(contents.data(), contents.size(), &zcontents);
+
+ SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+ static_cast<int64>(contents.size()));
+ SetBenchmarkLabel(files[arg].label);
+ StartBenchmarkTiming();
+ while (iters-- > 0) {
+ CHECK(snappy::IsValidCompressedBuffer(zcontents.data(), zcontents.size()));
+ }
+ StopBenchmarkTiming();
+}
+BENCHMARK(BM_UValidate)->DenseRange(0, 4);
+
+static void BM_UIOVec(int iters, int arg) {
+ StopBenchmarkTiming();
+
+ // Pick file to process based on "arg"
+ CHECK_GE(arg, 0);
+ CHECK_LT(arg, ARRAYSIZE(files));
+ string contents = ReadTestDataFile(files[arg].filename,
+ files[arg].size_limit);
+
+ string zcontents;
+ snappy::Compress(contents.data(), contents.size(), &zcontents);
+
+ // Uncompress into an iovec containing ten entries.
+ const int kNumEntries = 10;
+ struct iovec iov[kNumEntries];
+ char *dst = new char[contents.size()];
+ int used_so_far = 0;
+ for (int i = 0; i < kNumEntries; ++i) {
+ iov[i].iov_base = dst + used_so_far;
+ if (used_so_far == contents.size()) {
+ iov[i].iov_len = 0;
+ continue;
+ }
+
+ if (i == kNumEntries - 1) {
+ iov[i].iov_len = contents.size() - used_so_far;
+ } else {
+ iov[i].iov_len = contents.size() / kNumEntries;
+ }
+ used_so_far += iov[i].iov_len;
+ }
+
+ SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+ static_cast<int64>(contents.size()));
+ SetBenchmarkLabel(files[arg].label);
+ StartBenchmarkTiming();
+ while (iters-- > 0) {
+ CHECK(snappy::RawUncompressToIOVec(zcontents.data(), zcontents.size(), iov,
+ kNumEntries));
+ }
+ StopBenchmarkTiming();
+
+ delete[] dst;
+}
+BENCHMARK(BM_UIOVec)->DenseRange(0, 4);
+
+
+static void BM_ZFlat(int iters, int arg) {
+ StopBenchmarkTiming();
+
+ // Pick file to process based on "arg"
+ CHECK_GE(arg, 0);
+ CHECK_LT(arg, ARRAYSIZE(files));
+ string contents = ReadTestDataFile(files[arg].filename,
+ files[arg].size_limit);
+
+ char* dst = new char[snappy::MaxCompressedLength(contents.size())];
+
+ SetBenchmarkBytesProcessed(static_cast<int64>(iters) *
+ static_cast<int64>(contents.size()));
+ StartBenchmarkTiming();
+
+ size_t zsize = 0;
+ while (iters-- > 0) {
+ snappy::RawCompress(contents.data(), contents.size(), dst, &zsize);
+ }
+ StopBenchmarkTiming();
+ const double compression_ratio =
+ static_cast<double>(zsize) / std::max<size_t>(1, contents.size());
+ SetBenchmarkLabel(StringPrintf("%s (%.2f %%)",
+ files[arg].label, 100.0 * compression_ratio));
+ VLOG(0) << StringPrintf("compression for %s: %zd -> %zd bytes",
+ files[arg].label, contents.size(), zsize);
+ delete[] dst;
+}
+BENCHMARK(BM_ZFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
+
+
+} // namespace snappy
+
+
+int main(int argc, char** argv) {
+ InitGoogle(argv[0], &argc, &argv, true);
+ RunSpecifiedBenchmarks();
+
+
+ if (argc >= 2) {
+ for (int arg = 1; arg < argc; arg++) {
+ if (FLAGS_write_compressed) {
+ CompressFile(argv[arg]);
+ } else if (FLAGS_write_uncompressed) {
+ UncompressFile(argv[arg]);
+ } else {
+ MeasureFile(argv[arg]);
+ }
+ }
+ return 0;
+ }
+
+ return RUN_ALL_TESTS();
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ABOUT-NLS b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ABOUT-NLS
new file mode 100644
index 00000000..ec20977e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ABOUT-NLS
@@ -0,0 +1,1101 @@
+1 Notes on the Free Translation Project
+***************************************
+
+Free software is going international! The Free Translation Project is
+a way to get maintainers of free software, translators, and users all
+together, so that free software will gradually become able to speak many
+languages. A few packages already provide translations for their
+messages.
+
+ If you found this `ABOUT-NLS' file inside a distribution, you may
+assume that the distributed package does use GNU `gettext' internally,
+itself available at your nearest GNU archive site. But you do _not_
+need to install GNU `gettext' prior to configuring, installing or using
+this package with messages translated.
+
+ Installers will find here some useful hints. These notes also
+explain how users should proceed for getting the programs to use the
+available translations. They tell how people wanting to contribute and
+work on translations can contact the appropriate team.
+
+ When reporting bugs in the `intl/' directory or bugs which may be
+related to internationalization, you should tell about the version of
+`gettext' which is used. The information can be found in the
+`intl/VERSION' file, in internationalized packages.
+
+1.1 Quick configuration advice
+==============================
+
+If you want to exploit the full power of internationalization, you
+should configure it using
+
+ ./configure --with-included-gettext
+
+to force usage of internationalizing routines provided within this
+package, despite the existence of internationalizing capabilities in the
+operating system where this package is being installed. So far, only
+the `gettext' implementation in the GNU C library version 2 provides as
+many features (such as locale alias, message inheritance, automatic
+charset conversion or plural form handling) as the implementation here.
+It is also not possible to offer this additional functionality on top
+of a `catgets' implementation. Future versions of GNU `gettext' will
+very likely convey even more functionality. So it might be a good idea
+to change to GNU `gettext' as soon as possible.
+
+ So you need _not_ provide this option if you are using GNU libc 2 or
+you have installed a recent copy of the GNU gettext package with the
+included `libintl'.
+
+1.2 INSTALL Matters
+===================
+
+Some packages are "localizable" when properly installed; the programs
+they contain can be made to speak your own native language. Most such
+packages use GNU `gettext'. Other packages have their own ways to
+internationalization, predating GNU `gettext'.
+
+ By default, this package will be installed to allow translation of
+messages. It will automatically detect whether the system already
+provides the GNU `gettext' functions. If not, the included GNU
+`gettext' library will be used. This library is wholly contained
+within this package, usually in the `intl/' subdirectory, so prior
+installation of the GNU `gettext' package is _not_ required.
+Installers may use special options at configuration time for changing
+the default behaviour. The commands:
+
+ ./configure --with-included-gettext
+ ./configure --disable-nls
+
+will, respectively, bypass any pre-existing `gettext' to use the
+internationalizing routines provided within this package, or else,
+_totally_ disable translation of messages.
+
+ When you already have GNU `gettext' installed on your system and run
+configure without an option for your new package, `configure' will
+probably detect the previously built and installed `libintl.a' file and
+will decide to use this. This might not be desirable. You should use
+the more recent version of the GNU `gettext' library. I.e. if the file
+`intl/VERSION' shows that the library which comes with this package is
+more recent, you should use
+
+ ./configure --with-included-gettext
+
+to prevent auto-detection.
+
+ The configuration process will not test for the `catgets' function
+and therefore it will not be used. The reason is that even an
+emulation of `gettext' on top of `catgets' could not provide all the
+extensions of the GNU `gettext' library.
+
+ Internationalized packages usually have many `po/LL.po' files, where
+LL gives an ISO 639 two-letter code identifying the language. Unless
+translations have been forbidden at `configure' time by using the
+`--disable-nls' switch, all available translations are installed
+together with the package. However, the environment variable `LINGUAS'
+may be set, prior to configuration, to limit the installed set.
+`LINGUAS' should then contain a space separated list of two-letter
+codes, stating which languages are allowed.
+
+1.3 Using This Package
+======================
+
+As a user, if your language has been installed for this package, you
+only have to set the `LANG' environment variable to the appropriate
+`LL_CC' combination. Here `LL' is an ISO 639 two-letter language code,
+and `CC' is an ISO 3166 two-letter country code. For example, let's
+suppose that you speak German and live in Germany. At the shell
+prompt, merely execute `setenv LANG de_DE' (in `csh'),
+`export LANG; LANG=de_DE' (in `sh') or `export LANG=de_DE' (in `bash').
+This can be done from your `.login' or `.profile' file, once and for
+all.
+
+ You might think that the country code specification is redundant.
+But in fact, some languages have dialects in different countries. For
+example, `de_AT' is used for Austria, and `pt_BR' for Brazil. The
+country code serves to distinguish the dialects.
+
+ The locale naming convention of `LL_CC', with `LL' denoting the
+language and `CC' denoting the country, is the one use on systems based
+on GNU libc. On other systems, some variations of this scheme are
+used, such as `LL' or `LL_CC.ENCODING'. You can get the list of
+locales supported by your system for your language by running the
+command `locale -a | grep '^LL''.
+
+ Not all programs have translations for all languages. By default, an
+English message is shown in place of a nonexistent translation. If you
+understand other languages, you can set up a priority list of languages.
+This is done through a different environment variable, called
+`LANGUAGE'. GNU `gettext' gives preference to `LANGUAGE' over `LANG'
+for the purpose of message handling, but you still need to have `LANG'
+set to the primary language; this is required by other parts of the
+system libraries. For example, some Swedish users who would rather
+read translations in German than English for when Swedish is not
+available, set `LANGUAGE' to `sv:de' while leaving `LANG' to `sv_SE'.
+
+ Special advice for Norwegian users: The language code for Norwegian
+bokma*l changed from `no' to `nb' recently (in 2003). During the
+transition period, while some message catalogs for this language are
+installed under `nb' and some older ones under `no', it's recommended
+for Norwegian users to set `LANGUAGE' to `nb:no' so that both newer and
+older translations are used.
+
+ In the `LANGUAGE' environment variable, but not in the `LANG'
+environment variable, `LL_CC' combinations can be abbreviated as `LL'
+to denote the language's main dialect. For example, `de' is equivalent
+to `de_DE' (German as spoken in Germany), and `pt' to `pt_PT'
+(Portuguese as spoken in Portugal) in this context.
+
+1.4 Translating Teams
+=====================
+
+For the Free Translation Project to be a success, we need interested
+people who like their own language and write it well, and who are also
+able to synergize with other translators speaking the same language.
+Each translation team has its own mailing list. The up-to-date list of
+teams can be found at the Free Translation Project's homepage,
+`http://www.iro.umontreal.ca/contrib/po/HTML/', in the "National teams"
+area.
+
+ If you'd like to volunteer to _work_ at translating messages, you
+should become a member of the translating team for your own language.
+The subscribing address is _not_ the same as the list itself, it has
+`-request' appended. For example, speakers of Swedish can send a
+message to `sv-request@li.org', having this message body:
+
+ subscribe
+
+ Keep in mind that team members are expected to participate
+_actively_ in translations, or at solving translational difficulties,
+rather than merely lurking around. If your team does not exist yet and
+you want to start one, or if you are unsure about what to do or how to
+get started, please write to `translation@iro.umontreal.ca' to reach the
+coordinator for all translator teams.
+
+ The English team is special. It works at improving and uniformizing
+the terminology in use. Proven linguistic skills are praised more than
+programming skills, here.
+
+1.5 Available Packages
+======================
+
+Languages are not equally supported in all packages. The following
+matrix shows the current state of internationalization, as of October
+2006. The matrix shows, in regard of each package, for which languages
+PO files have been submitted to translation coordination, with a
+translation percentage of at least 50%.
+
+ Ready PO files af am ar az be bg bs ca cs cy da de el en en_GB eo
+ +----------------------------------------------------+
+ GNUnet | [] |
+ a2ps | [] [] [] [] [] |
+ aegis | () |
+ ant-phone | () |
+ anubis | [] |
+ ap-utils | |
+ aspell | [] [] [] [] [] |
+ bash | [] [] [] |
+ batchelor | [] |
+ bfd | |
+ bibshelf | [] |
+ binutils | [] |
+ bison | [] [] |
+ bison-runtime | |
+ bluez-pin | [] [] [] [] [] |
+ cflow | [] |
+ clisp | [] [] |
+ console-tools | [] [] |
+ coreutils | [] [] [] |
+ cpio | |
+ cpplib | [] [] [] |
+ cryptonit | [] |
+ darkstat | [] () [] |
+ dialog | [] [] [] [] [] [] |
+ diffutils | [] [] [] [] [] [] |
+ doodle | [] |
+ e2fsprogs | [] [] |
+ enscript | [] [] [] [] |
+ error | [] [] [] [] |
+ fetchmail | [] [] () [] |
+ fileutils | [] [] |
+ findutils | [] [] [] |
+ flex | [] [] [] |
+ fslint | [] |
+ gas | |
+ gawk | [] [] [] |
+ gbiff | [] |
+ gcal | [] |
+ gcc | [] |
+ gettext-examples | [] [] [] [] [] |
+ gettext-runtime | [] [] [] [] [] |
+ gettext-tools | [] [] |
+ gimp-print | [] [] [] [] |
+ gip | [] |
+ gliv | [] |
+ glunarclock | [] |
+ gmult | [] [] |
+ gnubiff | () |
+ gnucash | () () [] |
+ gnucash-glossary | [] () |
+ gnuedu | |
+ gnulib | [] [] [] [] [] [] |
+ gnunet-gtk | |
+ gnutls | |
+ gpe-aerial | [] [] |
+ gpe-beam | [] [] |
+ gpe-calendar | |
+ gpe-clock | [] [] |
+ gpe-conf | [] [] |
+ gpe-contacts | |
+ gpe-edit | [] |
+ gpe-filemanager | |
+ gpe-go | [] |
+ gpe-login | [] [] |
+ gpe-ownerinfo | [] [] |
+ gpe-package | |
+ gpe-sketchbook | [] [] |
+ gpe-su | [] [] |
+ gpe-taskmanager | [] [] |
+ gpe-timesheet | [] |
+ gpe-today | [] [] |
+ gpe-todo | |
+ gphoto2 | [] [] [] [] |
+ gprof | [] [] |
+ gpsdrive | () () |
+ gramadoir | [] [] |
+ grep | [] [] [] [] [] [] |
+ gretl | |
+ gsasl | |
+ gss | |
+ gst-plugins | [] [] [] [] |
+ gst-plugins-base | [] [] [] |
+ gst-plugins-good | [] [] [] [] [] [] [] |
+ gstreamer | [] [] [] [] [] [] [] |
+ gtick | () |
+ gtkam | [] [] [] |
+ gtkorphan | [] [] |
+ gtkspell | [] [] [] [] |
+ gutenprint | [] |
+ hello | [] [] [] [] [] |
+ id-utils | [] [] |
+ impost | |
+ indent | [] [] [] |
+ iso_3166 | [] [] |
+ iso_3166_2 | |
+ iso_4217 | [] |
+ iso_639 | [] [] |
+ jpilot | [] |
+ jtag | |
+ jwhois | |
+ kbd | [] [] [] [] |
+ keytouch | |
+ keytouch-editor | |
+ keytouch-keyboa... | |
+ latrine | () |
+ ld | [] |
+ leafpad | [] [] [] [] [] |
+ libc | [] [] [] [] [] |
+ libexif | [] |
+ libextractor | [] |
+ libgpewidget | [] [] [] |
+ libgpg-error | [] |
+ libgphoto2 | [] [] |
+ libgphoto2_port | [] [] |
+ libgsasl | |
+ libiconv | [] [] |
+ libidn | [] [] |
+ lifelines | [] () |
+ lilypond | [] |
+ lingoteach | |
+ lynx | [] [] [] [] |
+ m4 | [] [] [] [] |
+ mailutils | [] |
+ make | [] [] |
+ man-db | [] () [] [] |
+ minicom | [] [] [] |
+ mysecretdiary | [] [] |
+ nano | [] [] [] |
+ nano_1_0 | [] () [] [] |
+ opcodes | [] |
+ parted | |
+ pilot-qof | [] |
+ psmisc | [] |
+ pwdutils | |
+ python | |
+ qof | |
+ radius | [] |
+ recode | [] [] [] [] [] [] |
+ rpm | [] [] |
+ screem | |
+ scrollkeeper | [] [] [] [] [] [] [] [] |
+ sed | [] [] [] |
+ sh-utils | [] [] |
+ shared-mime-info | [] [] [] [] |
+ sharutils | [] [] [] [] [] [] |
+ shishi | |
+ silky | |
+ skencil | [] () |
+ sketch | [] () |
+ solfege | |
+ soundtracker | [] [] |
+ sp | [] |
+ stardict | [] |
+ system-tools-ba... | [] [] [] [] [] [] [] [] [] |
+ tar | [] |
+ texinfo | [] [] [] |
+ textutils | [] [] [] |
+ tin | () () |
+ tp-robot | [] |
+ tuxpaint | [] [] [] [] [] |
+ unicode-han-tra... | |
+ unicode-transla... | |
+ util-linux | [] [] [] [] |
+ vorbis-tools | [] [] [] [] |
+ wastesedge | () |
+ wdiff | [] [] [] [] |
+ wget | [] [] |
+ xchat | [] [] [] [] [] [] |
+ xkeyboard-config | |
+ xpad | [] [] |
+ +----------------------------------------------------+
+ af am ar az be bg bs ca cs cy da de el en en_GB eo
+ 10 0 1 2 9 22 1 42 41 2 60 95 16 1 17 16
+
+ es et eu fa fi fr ga gl gu he hi hr hu id is it
+ +--------------------------------------------------+
+ GNUnet | |
+ a2ps | [] [] [] () |
+ aegis | |
+ ant-phone | [] |
+ anubis | [] |
+ ap-utils | [] [] |
+ aspell | [] [] [] |
+ bash | [] [] [] |
+ batchelor | [] [] |
+ bfd | [] |
+ bibshelf | [] [] [] |
+ binutils | [] [] [] |
+ bison | [] [] [] [] [] [] |
+ bison-runtime | [] [] [] [] [] |
+ bluez-pin | [] [] [] [] [] |
+ cflow | [] |
+ clisp | [] [] |
+ console-tools | |
+ coreutils | [] [] [] [] [] [] |
+ cpio | [] [] [] |
+ cpplib | [] [] |
+ cryptonit | [] |
+ darkstat | [] () [] [] [] |
+ dialog | [] [] [] [] [] [] [] [] |
+ diffutils | [] [] [] [] [] [] [] [] [] |
+ doodle | [] [] |
+ e2fsprogs | [] [] [] |
+ enscript | [] [] [] |
+ error | [] [] [] [] [] |
+ fetchmail | [] |
+ fileutils | [] [] [] [] [] [] |
+ findutils | [] [] [] [] |
+ flex | [] [] [] |
+ fslint | [] |
+ gas | [] [] |
+ gawk | [] [] [] [] |
+ gbiff | [] |
+ gcal | [] [] |
+ gcc | [] |
+ gettext-examples | [] [] [] [] [] [] |
+ gettext-runtime | [] [] [] [] [] [] |
+ gettext-tools | [] [] [] |
+ gimp-print | [] [] |
+ gip | [] [] [] |
+ gliv | () |
+ glunarclock | [] [] [] |
+ gmult | [] [] [] |
+ gnubiff | () () |
+ gnucash | () () () |
+ gnucash-glossary | [] [] |
+ gnuedu | [] |
+ gnulib | [] [] [] [] [] [] [] [] |
+ gnunet-gtk | |
+ gnutls | |
+ gpe-aerial | [] [] |
+ gpe-beam | [] [] |
+ gpe-calendar | |
+ gpe-clock | [] [] [] [] |
+ gpe-conf | [] |
+ gpe-contacts | [] [] |
+ gpe-edit | [] [] [] [] |
+ gpe-filemanager | [] |
+ gpe-go | [] [] [] |
+ gpe-login | [] [] [] |
+ gpe-ownerinfo | [] [] [] [] [] |
+ gpe-package | [] |
+ gpe-sketchbook | [] [] |
+ gpe-su | [] [] [] [] |
+ gpe-taskmanager | [] [] [] |
+ gpe-timesheet | [] [] [] [] |
+ gpe-today | [] [] [] [] |
+ gpe-todo | [] |
+ gphoto2 | [] [] [] [] [] |
+ gprof | [] [] [] [] |
+ gpsdrive | () () [] () |
+ gramadoir | [] [] |
+ grep | [] [] [] [] [] [] [] [] [] [] [] [] |
+ gretl | [] [] [] |
+ gsasl | [] [] |
+ gss | [] |
+ gst-plugins | [] [] [] |
+ gst-plugins-base | [] [] |
+ gst-plugins-good | [] [] [] |
+ gstreamer | [] [] [] |
+ gtick | [] |
+ gtkam | [] [] [] [] |
+ gtkorphan | [] [] |
+ gtkspell | [] [] [] [] [] [] |
+ gutenprint | [] |
+ hello | [] [] [] [] [] [] [] [] [] [] [] [] [] |
+ id-utils | [] [] [] [] [] |
+ impost | [] [] |
+ indent | [] [] [] [] [] [] [] [] [] [] |
+ iso_3166 | [] [] [] |
+ iso_3166_2 | [] |
+ iso_4217 | [] [] [] [] |
+ iso_639 | [] [] [] [] [] |
+ jpilot | [] [] |
+ jtag | [] |
+ jwhois | [] [] [] [] [] |
+ kbd | [] [] |
+ keytouch | [] |
+ keytouch-editor | [] |
+ keytouch-keyboa... | [] |
+ latrine | [] [] [] |
+ ld | [] [] |
+ leafpad | [] [] [] [] [] [] |
+ libc | [] [] [] [] [] |
+ libexif | [] |
+ libextractor | [] |
+ libgpewidget | [] [] [] [] [] |
+ libgpg-error | |
+ libgphoto2 | [] [] [] |
+ libgphoto2_port | [] [] |
+ libgsasl | [] [] |
+ libiconv | [] [] |
+ libidn | [] [] |
+ lifelines | () |
+ lilypond | [] |
+ lingoteach | [] [] [] |
+ lynx | [] [] [] |
+ m4 | [] [] [] [] |
+ mailutils | [] [] |
+ make | [] [] [] [] [] [] [] [] |
+ man-db | () |
+ minicom | [] [] [] [] |
+ mysecretdiary | [] [] [] |
+ nano | [] [] [] [] [] [] |
+ nano_1_0 | [] [] [] [] [] |
+ opcodes | [] [] [] [] |
+ parted | [] [] [] [] |
+ pilot-qof | |
+ psmisc | [] [] [] |
+ pwdutils | |
+ python | |
+ qof | [] |
+ radius | [] [] |
+ recode | [] [] [] [] [] [] [] [] |
+ rpm | [] [] |
+ screem | |
+ scrollkeeper | [] [] [] |
+ sed | [] [] [] [] [] |
+ sh-utils | [] [] [] [] [] [] [] |
+ shared-mime-info | [] [] [] [] [] [] |
+ sharutils | [] [] [] [] [] [] [] [] |
+ shishi | |
+ silky | [] |
+ skencil | [] [] |
+ sketch | [] [] |
+ solfege | [] |
+ soundtracker | [] [] [] |
+ sp | [] |
+ stardict | [] |
+ system-tools-ba... | [] [] [] [] [] [] [] [] |
+ tar | [] [] [] [] [] [] [] |
+ texinfo | [] [] |
+ textutils | [] [] [] [] [] |
+ tin | [] () |
+ tp-robot | [] [] [] [] |
+ tuxpaint | [] [] |
+ unicode-han-tra... | |
+ unicode-transla... | [] [] |
+ util-linux | [] [] [] [] [] [] [] |
+ vorbis-tools | [] [] |
+ wastesedge | () |
+ wdiff | [] [] [] [] [] [] [] [] |
+ wget | [] [] [] [] [] [] [] [] |
+ xchat | [] [] [] [] [] [] [] [] |
+ xkeyboard-config | [] [] [] [] |
+ xpad | [] [] [] |
+ +--------------------------------------------------+
+ es et eu fa fi fr ga gl gu he hi hr hu id is it
+ 88 22 14 2 40 115 61 14 1 8 1 6 59 31 0 52
+
+ ja ko ku ky lg lt lv mk mn ms mt nb ne nl nn no
+ +-------------------------------------------------+
+ GNUnet | |
+ a2ps | () [] [] () |
+ aegis | () |
+ ant-phone | [] |
+ anubis | [] [] [] |
+ ap-utils | [] |
+ aspell | [] [] |
+ bash | [] |
+ batchelor | [] [] |
+ bfd | |
+ bibshelf | [] |
+ binutils | |
+ bison | [] [] [] |
+ bison-runtime | [] [] [] |
+ bluez-pin | [] [] [] |
+ cflow | |
+ clisp | [] |
+ console-tools | |
+ coreutils | [] |
+ cpio | |
+ cpplib | [] |
+ cryptonit | [] |
+ darkstat | [] [] |
+ dialog | [] [] |
+ diffutils | [] [] [] |
+ doodle | |
+ e2fsprogs | [] |
+ enscript | [] |
+ error | [] |
+ fetchmail | [] [] |
+ fileutils | [] [] |
+ findutils | [] |
+ flex | [] [] |
+ fslint | [] [] |
+ gas | |
+ gawk | [] [] |
+ gbiff | [] |
+ gcal | |
+ gcc | |
+ gettext-examples | [] [] |
+ gettext-runtime | [] [] [] |
+ gettext-tools | [] [] |
+ gimp-print | [] [] |
+ gip | [] [] |
+ gliv | [] |
+ glunarclock | [] [] |
+ gmult | [] [] |
+ gnubiff | |
+ gnucash | () () |
+ gnucash-glossary | [] |
+ gnuedu | |
+ gnulib | [] [] [] [] |
+ gnunet-gtk | |
+ gnutls | |
+ gpe-aerial | [] |
+ gpe-beam | [] |
+ gpe-calendar | [] |
+ gpe-clock | [] [] [] |
+ gpe-conf | [] [] |
+ gpe-contacts | [] |
+ gpe-edit | [] [] [] |
+ gpe-filemanager | [] [] |
+ gpe-go | [] [] [] |
+ gpe-login | [] [] [] |
+ gpe-ownerinfo | [] [] |
+ gpe-package | [] [] |
+ gpe-sketchbook | [] [] |
+ gpe-su | [] [] [] |
+ gpe-taskmanager | [] [] [] [] |
+ gpe-timesheet | [] |
+ gpe-today | [] [] |
+ gpe-todo | [] |
+ gphoto2 | [] [] |
+ gprof | |
+ gpsdrive | () () () |
+ gramadoir | () |
+ grep | [] [] [] [] |
+ gretl | |
+ gsasl | [] |
+ gss | |
+ gst-plugins | [] |
+ gst-plugins-base | |
+ gst-plugins-good | [] |
+ gstreamer | [] |
+ gtick | |
+ gtkam | [] |
+ gtkorphan | [] |
+ gtkspell | [] [] |
+ gutenprint | |
+ hello | [] [] [] [] [] [] |
+ id-utils | [] |
+ impost | |
+ indent | [] [] |
+ iso_3166 | [] |
+ iso_3166_2 | [] |
+ iso_4217 | [] [] [] |
+ iso_639 | [] [] |
+ jpilot | () () () |
+ jtag | |
+ jwhois | [] |
+ kbd | [] |
+ keytouch | [] |
+ keytouch-editor | |
+ keytouch-keyboa... | |
+ latrine | [] |
+ ld | |
+ leafpad | [] [] |
+ libc | [] [] [] [] [] |
+ libexif | |
+ libextractor | |
+ libgpewidget | [] |
+ libgpg-error | |
+ libgphoto2 | [] |
+ libgphoto2_port | [] |
+ libgsasl | [] |
+ libiconv | |
+ libidn | [] [] |
+ lifelines | [] |
+ lilypond | |
+ lingoteach | [] |
+ lynx | [] [] |
+ m4 | [] [] |
+ mailutils | |
+ make | [] [] [] |
+ man-db | () |
+ minicom | [] |
+ mysecretdiary | [] |
+ nano | [] [] [] |
+ nano_1_0 | [] [] [] |
+ opcodes | [] |
+ parted | [] [] |
+ pilot-qof | |
+ psmisc | [] [] [] |
+ pwdutils | |
+ python | |
+ qof | |
+ radius | |
+ recode | [] |
+ rpm | [] [] |
+ screem | [] |
+ scrollkeeper | [] [] [] [] |
+ sed | [] [] |
+ sh-utils | [] [] |
+ shared-mime-info | [] [] [] [] [] |
+ sharutils | [] [] |
+ shishi | |
+ silky | [] |
+ skencil | |
+ sketch | |
+ solfege | |
+ soundtracker | |
+ sp | () |
+ stardict | [] [] |
+ system-tools-ba... | [] [] [] [] |
+ tar | [] [] [] |
+ texinfo | [] [] [] |
+ textutils | [] [] [] |
+ tin | |
+ tp-robot | [] |
+ tuxpaint | [] |
+ unicode-han-tra... | |
+ unicode-transla... | |
+ util-linux | [] [] |
+ vorbis-tools | [] |
+ wastesedge | [] |
+ wdiff | [] [] |
+ wget | [] [] |
+ xchat | [] [] [] [] |
+ xkeyboard-config | [] |
+ xpad | [] [] [] |
+ +-------------------------------------------------+
+ ja ko ku ky lg lt lv mk mn ms mt nb ne nl nn no
+ 52 24 2 2 1 3 0 2 3 21 0 15 1 97 5 1
+
+ nso or pa pl pt pt_BR rm ro ru rw sk sl sq sr sv ta
+ +------------------------------------------------------+
+ GNUnet | |
+ a2ps | () [] [] [] [] [] [] |
+ aegis | () () |
+ ant-phone | [] [] |
+ anubis | [] [] [] |
+ ap-utils | () |
+ aspell | [] [] |
+ bash | [] [] [] |
+ batchelor | [] [] |
+ bfd | |
+ bibshelf | [] |
+ binutils | [] [] |
+ bison | [] [] [] [] [] |
+ bison-runtime | [] [] [] [] |
+ bluez-pin | [] [] [] [] [] [] [] [] [] |
+ cflow | [] |
+ clisp | [] |
+ console-tools | [] |
+ coreutils | [] [] [] [] |
+ cpio | [] [] [] |
+ cpplib | [] |
+ cryptonit | [] [] |
+ darkstat | [] [] [] [] [] [] |
+ dialog | [] [] [] [] [] [] [] [] [] |
+ diffutils | [] [] [] [] [] [] |
+ doodle | [] [] |
+ e2fsprogs | [] [] |
+ enscript | [] [] [] [] [] |
+ error | [] [] [] [] |
+ fetchmail | [] [] [] |
+ fileutils | [] [] [] [] [] |
+ findutils | [] [] [] [] [] [] |
+ flex | [] [] [] [] [] |
+ fslint | [] [] [] [] |
+ gas | |
+ gawk | [] [] [] [] |
+ gbiff | [] |
+ gcal | [] |
+ gcc | [] |
+ gettext-examples | [] [] [] [] [] [] [] [] |
+ gettext-runtime | [] [] [] [] [] [] [] [] |
+ gettext-tools | [] [] [] [] [] [] [] |
+ gimp-print | [] [] |
+ gip | [] [] [] [] |
+ gliv | [] [] [] [] |
+ glunarclock | [] [] [] [] [] [] |
+ gmult | [] [] [] [] |
+ gnubiff | () |
+ gnucash | () [] |
+ gnucash-glossary | [] [] [] |
+ gnuedu | |
+ gnulib | [] [] [] [] [] |
+ gnunet-gtk | [] |
+ gnutls | [] [] |
+ gpe-aerial | [] [] [] [] [] [] [] |
+ gpe-beam | [] [] [] [] [] [] [] |
+ gpe-calendar | [] |
+ gpe-clock | [] [] [] [] [] [] [] [] |
+ gpe-conf | [] [] [] [] [] [] [] |
+ gpe-contacts | [] [] [] [] [] |
+ gpe-edit | [] [] [] [] [] [] [] [] |
+ gpe-filemanager | [] [] |
+ gpe-go | [] [] [] [] [] [] |
+ gpe-login | [] [] [] [] [] [] [] [] |
+ gpe-ownerinfo | [] [] [] [] [] [] [] [] |
+ gpe-package | [] [] |
+ gpe-sketchbook | [] [] [] [] [] [] [] [] |
+ gpe-su | [] [] [] [] [] [] [] [] |
+ gpe-taskmanager | [] [] [] [] [] [] [] [] |
+ gpe-timesheet | [] [] [] [] [] [] [] [] |
+ gpe-today | [] [] [] [] [] [] [] [] |
+ gpe-todo | [] [] [] [] |
+ gphoto2 | [] [] [] [] [] |
+ gprof | [] [] [] |
+ gpsdrive | [] [] [] |
+ gramadoir | [] [] |
+ grep | [] [] [] [] [] [] [] [] |
+ gretl | [] |
+ gsasl | [] [] [] |
+ gss | [] [] [] |
+ gst-plugins | [] [] [] [] |
+ gst-plugins-base | [] |
+ gst-plugins-good | [] [] [] [] |
+ gstreamer | [] [] [] |
+ gtick | [] |
+ gtkam | [] [] [] [] |
+ gtkorphan | [] |
+ gtkspell | [] [] [] [] [] [] [] [] |
+ gutenprint | [] |
+ hello | [] [] [] [] [] [] [] [] |
+ id-utils | [] [] [] [] |
+ impost | [] |
+ indent | [] [] [] [] [] [] |
+ iso_3166 | [] [] [] [] [] [] |
+ iso_3166_2 | |
+ iso_4217 | [] [] [] [] |
+ iso_639 | [] [] [] [] |
+ jpilot | |
+ jtag | [] |
+ jwhois | [] [] [] [] |
+ kbd | [] [] [] |
+ keytouch | [] |
+ keytouch-editor | [] |
+ keytouch-keyboa... | [] |
+ latrine | [] [] |
+ ld | [] |
+ leafpad | [] [] [] [] [] [] |
+ libc | [] [] [] [] [] |
+ libexif | [] |
+ libextractor | [] [] |
+ libgpewidget | [] [] [] [] [] [] [] |
+ libgpg-error | [] [] |
+ libgphoto2 | [] |
+ libgphoto2_port | [] [] [] |
+ libgsasl | [] [] [] [] |
+ libiconv | [] [] |
+ libidn | [] [] () |
+ lifelines | [] [] |
+ lilypond | |
+ lingoteach | [] |
+ lynx | [] [] [] |
+ m4 | [] [] [] [] [] |
+ mailutils | [] [] [] [] |
+ make | [] [] [] [] |
+ man-db | [] [] |
+ minicom | [] [] [] [] [] |
+ mysecretdiary | [] [] [] [] |
+ nano | [] [] [] |
+ nano_1_0 | [] [] [] [] |
+ opcodes | [] [] |
+ parted | [] |
+ pilot-qof | [] |
+ psmisc | [] [] |
+ pwdutils | [] [] |
+ python | |
+ qof | [] [] |
+ radius | [] [] |
+ recode | [] [] [] [] [] [] [] |
+ rpm | [] [] [] [] |
+ screem | |
+ scrollkeeper | [] [] [] [] [] [] [] |
+ sed | [] [] [] [] [] [] [] [] [] |
+ sh-utils | [] [] [] |
+ shared-mime-info | [] [] [] [] [] |
+ sharutils | [] [] [] [] |
+ shishi | [] |
+ silky | [] |
+ skencil | [] [] [] |
+ sketch | [] [] [] |
+ solfege | [] |
+ soundtracker | [] [] |
+ sp | |
+ stardict | [] [] [] |
+ system-tools-ba... | [] [] [] [] [] [] [] [] [] |
+ tar | [] [] [] [] [] |
+ texinfo | [] [] [] [] |
+ textutils | [] [] [] |
+ tin | () |
+ tp-robot | [] |
+ tuxpaint | [] [] [] [] [] |
+ unicode-han-tra... | |
+ unicode-transla... | |
+ util-linux | [] [] [] [] |
+ vorbis-tools | [] [] |
+ wastesedge | |
+ wdiff | [] [] [] [] [] [] |
+ wget | [] [] [] [] |
+ xchat | [] [] [] [] [] [] [] |
+ xkeyboard-config | [] [] |
+ xpad | [] [] [] |
+ +------------------------------------------------------+
+ nso or pa pl pt pt_BR rm ro ru rw sk sl sq sr sv ta
+ 0 2 3 58 30 54 5 73 72 4 40 46 11 50 128 2
+
+ tg th tk tr uk ven vi wa xh zh_CN zh_HK zh_TW zu
+ +---------------------------------------------------+
+ GNUnet | [] | 2
+ a2ps | [] [] [] | 19
+ aegis | | 0
+ ant-phone | [] [] | 6
+ anubis | [] [] [] | 11
+ ap-utils | () [] | 4
+ aspell | [] [] [] | 15
+ bash | [] | 11
+ batchelor | [] [] | 9
+ bfd | | 1
+ bibshelf | [] | 7
+ binutils | [] [] [] | 9
+ bison | [] [] [] | 19
+ bison-runtime | [] [] [] | 15
+ bluez-pin | [] [] [] [] [] [] | 28
+ cflow | [] [] | 5
+ clisp | | 6
+ console-tools | [] [] | 5
+ coreutils | [] [] | 16
+ cpio | [] [] [] | 9
+ cpplib | [] [] [] [] | 11
+ cryptonit | | 5
+ darkstat | [] () () | 15
+ dialog | [] [] [] [] [] | 30
+ diffutils | [] [] [] [] | 28
+ doodle | [] | 6
+ e2fsprogs | [] [] | 10
+ enscript | [] [] [] | 16
+ error | [] [] [] [] | 18
+ fetchmail | [] [] | 12
+ fileutils | [] [] [] | 18
+ findutils | [] [] [] | 17
+ flex | [] [] | 15
+ fslint | [] | 9
+ gas | [] | 3
+ gawk | [] [] | 15
+ gbiff | [] | 5
+ gcal | [] | 5
+ gcc | [] [] [] | 6
+ gettext-examples | [] [] [] [] [] [] | 27
+ gettext-runtime | [] [] [] [] [] [] | 28
+ gettext-tools | [] [] [] [] [] | 19
+ gimp-print | [] [] | 12
+ gip | [] [] | 12
+ gliv | [] [] | 8
+ glunarclock | [] [] [] | 15
+ gmult | [] [] [] [] | 15
+ gnubiff | [] | 1
+ gnucash | () | 2
+ gnucash-glossary | [] [] | 9
+ gnuedu | [] | 2
+ gnulib | [] [] [] [] [] | 28
+ gnunet-gtk | | 1
+ gnutls | | 2
+ gpe-aerial | [] [] | 14
+ gpe-beam | [] [] | 14
+ gpe-calendar | [] | 3
+ gpe-clock | [] [] [] [] | 21
+ gpe-conf | [] [] | 14
+ gpe-contacts | [] [] | 10
+ gpe-edit | [] [] [] [] | 20
+ gpe-filemanager | [] | 6
+ gpe-go | [] [] | 15
+ gpe-login | [] [] [] [] [] | 21
+ gpe-ownerinfo | [] [] [] [] | 21
+ gpe-package | [] | 6
+ gpe-sketchbook | [] [] | 16
+ gpe-su | [] [] [] | 20
+ gpe-taskmanager | [] [] [] | 20
+ gpe-timesheet | [] [] [] [] | 18
+ gpe-today | [] [] [] [] [] | 21
+ gpe-todo | [] | 7
+ gphoto2 | [] [] [] [] | 20
+ gprof | [] [] | 11
+ gpsdrive | | 4
+ gramadoir | [] | 7
+ grep | [] [] [] [] | 34
+ gretl | | 4
+ gsasl | [] [] | 8
+ gss | [] | 5
+ gst-plugins | [] [] [] | 15
+ gst-plugins-base | [] [] [] | 9
+ gst-plugins-good | [] [] [] [] [] | 20
+ gstreamer | [] [] [] | 17
+ gtick | [] | 3
+ gtkam | [] | 13
+ gtkorphan | [] | 7
+ gtkspell | [] [] [] [] [] [] | 26
+ gutenprint | | 3
+ hello | [] [] [] [] [] | 37
+ id-utils | [] [] | 14
+ impost | [] | 4
+ indent | [] [] [] [] | 25
+ iso_3166 | [] [] [] [] | 16
+ iso_3166_2 | | 2
+ iso_4217 | [] [] | 14
+ iso_639 | [] | 14
+ jpilot | [] [] [] [] | 7
+ jtag | [] | 3
+ jwhois | [] [] [] | 13
+ kbd | [] [] | 12
+ keytouch | [] | 4
+ keytouch-editor | | 2
+ keytouch-keyboa... | [] | 3
+ latrine | [] [] | 8
+ ld | [] [] [] [] | 8
+ leafpad | [] [] [] [] | 23
+ libc | [] [] [] | 23
+ libexif | [] | 4
+ libextractor | [] | 5
+ libgpewidget | [] [] [] | 19
+ libgpg-error | [] | 4
+ libgphoto2 | [] | 8
+ libgphoto2_port | [] [] [] | 11
+ libgsasl | [] | 8
+ libiconv | [] | 7
+ libidn | [] [] | 10
+ lifelines | | 4
+ lilypond | | 2
+ lingoteach | [] | 6
+ lynx | [] [] [] | 15
+ m4 | [] [] [] | 18
+ mailutils | [] | 8
+ make | [] [] [] | 20
+ man-db | [] | 6
+ minicom | [] | 14
+ mysecretdiary | [] [] | 12
+ nano | [] [] | 17
+ nano_1_0 | [] [] [] | 18
+ opcodes | [] [] | 10
+ parted | [] [] [] | 10
+ pilot-qof | [] | 3
+ psmisc | [] | 10
+ pwdutils | [] | 3
+ python | | 0
+ qof | [] | 4
+ radius | [] | 6
+ recode | [] [] [] | 25
+ rpm | [] [] [] [] | 14
+ screem | [] | 2
+ scrollkeeper | [] [] [] [] | 26
+ sed | [] [] [] | 22
+ sh-utils | [] | 15
+ shared-mime-info | [] [] [] [] | 24
+ sharutils | [] [] [] | 23
+ shishi | | 1
+ silky | [] | 4
+ skencil | [] | 7
+ sketch | | 6
+ solfege | | 2
+ soundtracker | [] [] | 9
+ sp | [] | 3
+ stardict | [] [] [] [] | 11
+ system-tools-ba... | [] [] [] [] [] [] [] | 37
+ tar | [] [] [] [] | 20
+ texinfo | [] [] [] | 15
+ textutils | [] [] [] | 17
+ tin | | 1
+ tp-robot | [] [] [] | 10
+ tuxpaint | [] [] [] | 16
+ unicode-han-tra... | | 0
+ unicode-transla... | | 2
+ util-linux | [] [] [] | 20
+ vorbis-tools | [] [] | 11
+ wastesedge | | 1
+ wdiff | [] [] | 22
+ wget | [] [] [] | 19
+ xchat | [] [] [] [] | 29
+ xkeyboard-config | [] [] [] [] | 11
+ xpad | [] [] [] | 14
+ +---------------------------------------------------+
+ 77 teams tg th tk tr uk ven vi wa xh zh_CN zh_HK zh_TW zu
+ 170 domains 0 1 1 77 39 0 136 10 1 48 5 54 0 2028
+
+ Some counters in the preceding matrix are higher than the number of
+visible blocks let us expect. This is because a few extra PO files are
+used for implementing regional variants of languages, or language
+dialects.
+
+ For a PO file in the matrix above to be effective, the package to
+which it applies should also have been internationalized and
+distributed as such by its maintainer. There might be an observable
+lag between the mere existence a PO file and its wide availability in a
+distribution.
+
+ If October 2006 seems to be old, you may fetch a more recent copy of
+this `ABOUT-NLS' file on most GNU archive sites. The most up-to-date
+matrix with full percentage details can be found at
+`http://www.iro.umontreal.ca/contrib/po/HTML/matrix.html'.
+
+1.6 Using `gettext' in new packages
+===================================
+
+If you are writing a freely available program and want to
+internationalize it you are welcome to use GNU `gettext' in your
+package. Of course you have to respect the GNU Library General Public
+License which covers the use of the GNU `gettext' library. This means
+in particular that even non-free programs can use `libintl' as a shared
+library, whereas only free software can use `libintl' as a static
+library or use modified versions of `libintl'.
+
+ Once the sources are changed appropriately and the setup can handle
+the use of `gettext' the only thing missing are the translations. The
+Free Translation Project is also available for packages which are not
+developed inside the GNU project. Therefore the information given above
+applies also for every other Free Software Project. Contact
+`translation@iro.umontreal.ca' to make the `.pot' files available to
+the translation teams.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/AUTHORS b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/AUTHORS
new file mode 100644
index 00000000..63a9815b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/AUTHORS
@@ -0,0 +1,27 @@
+
+Authors of XZ Utils
+===================
+
+ XZ Utils is developed and maintained by Lasse Collin
+ <lasse.collin@tukaani.org>.
+
+ Major parts of liblzma are based on code written by Igor Pavlov,
+ specifically the LZMA SDK <http://7-zip.org/sdk.html>. Without
+ this code, XZ Utils wouldn't exist.
+
+ The SHA-256 implementation in liblzma is based on the code found from
+ 7-Zip <http://7-zip.org/>, which has a modified version of the SHA-256
+ code found from Crypto++ <http://www.cryptopp.com/>. The SHA-256 code
+ in Crypto++ was written by Kevin Springle and Wei Dai.
+
+ Some scripts have been adapted from gzip. The original versions
+ were written by Jean-loup Gailly, Charles Levert, and Paul Eggert.
+ Andrew Dudman helped adapting the script and their man pages for
+ XZ Utils.
+
+ The GNU Autotools based build system contains files from many authors,
+ which I'm not trying list here.
+
+ Several people have contributed fixes or reported bugs. Most of them
+ are mentioned in the file THANKS.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING
new file mode 100644
index 00000000..43c90d05
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING
@@ -0,0 +1,65 @@
+
+XZ Utils Licensing
+==================
+
+ Different licenses apply to different files in this package. Here
+ is a rough summary of which licenses apply to which parts of this
+ package (but check the individual files to be sure!):
+
+ - liblzma is in the public domain.
+
+ - xz, xzdec, and lzmadec command line tools are in the public
+ domain unless GNU getopt_long had to be compiled and linked
+ in from the lib directory. The getopt_long code is under
+ GNU LGPLv2.1+.
+
+ - The scripts to grep, diff, and view compressed files have been
+ adapted from gzip. These scripts and their documentation are
+ under GNU GPLv2+.
+
+ - All the documentation in the doc directory and most of the
+ XZ Utils specific documentation files in other directories
+ are in the public domain.
+
+ - Translated messages are in the public domain.
+
+ - The build system contains public domain files, and files that
+ are under GNU GPLv2+ or GNU GPLv3+. None of these files end up
+ in the binaries being built.
+
+ - Test files and test code in the tests directory, and debugging
+ utilities in the debug directory are in the public domain.
+
+ - The extra directory may contain public domain files, and files
+ that are under various free software licenses.
+
+ You can do whatever you want with the files that have been put into
+ the public domain. If you find public domain legally problematic,
+ take the previous sentence as a license grant. If you still find
+ the lack of copyright legally problematic, you have too many
+ lawyers.
+
+ As usual, this software is provided "as is", without any warranty.
+
+ If you copy significant amounts of public domain code from XZ Utils
+ into your project, acknowledging this somewhere in your software is
+ polite (especially if it is proprietary, non-free software), but
+ naturally it is not legally required. Here is an example of a good
+ notice to put into "about box" or into documentation:
+
+ This software includes code from XZ Utils <http://tukaani.org/xz/>.
+
+ The following license texts are included in the following files:
+ - COPYING.LGPLv2.1: GNU Lesser General Public License version 2.1
+ - COPYING.GPLv2: GNU General Public License version 2
+ - COPYING.GPLv3: GNU General Public License version 3
+
+ Note that the toolchain (compiler, linker etc.) may add some code
+ pieces that are copyrighted. Thus, it is possible that e.g. liblzma
+ binary wouldn't actually be in the public domain in its entirety
+ even though it contains no copyrighted code from the XZ Utils source
+ package.
+
+ If you have questions, don't hesitate to ask the author(s) for more
+ information.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv2 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv2
new file mode 100644
index 00000000..6e475df5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv2
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv3 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv3
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.GPLv3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.LGPLv2.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.LGPLv2.1
new file mode 100644
index 00000000..dbe030dd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/COPYING.LGPLv2.1
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ChangeLog b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ChangeLog
new file mode 100644
index 00000000..15102ef6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/ChangeLog
@@ -0,0 +1,7041 @@
+commit 682efdc1f9492fdd76c9ce82e7c00ca0768067e8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 18:36:59 +0300
+
+ "make dist" fixes
+
+ Makefile.am | 13 ++++++-------
+ 1 files changed, 6 insertions(+), 7 deletions(-)
+
+commit c8c184db1c95bf70f78256ec6237845a57f342af
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 17:08:33 +0300
+
+ Update xz man page date.
+
+ src/xz/xz.1 | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 9756fce565e98b8fa5fe6ead296d84e7601ec254
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 17:00:22 +0300
+
+ Fix the debug directory.
+
+ 6a2eb54092fc625d59921a607ff68cd1a90aa898 and
+ 71f18e8a066a01dda0c8e5508b135ef104e43e4c required
+ some changes that weren't applied in debug.
+
+ debug/Makefile.am | 5 +++--
+ debug/full_flush.c | 1 +
+ debug/known_sizes.c | 1 +
+ debug/memusage.c | 1 +
+ debug/sync_flush.c | 1 +
+ 5 files changed, 7 insertions(+), 2 deletions(-)
+
+commit 77007a7fb20187fcf3d1dd9839c79ace2d63f2ea
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 16:36:40 +0300
+
+ Add missing files to EXTRA_DIST.
+
+ Makefile.am | 11 +++++++----
+ 1 files changed, 7 insertions(+), 4 deletions(-)
+
+commit 04dcbfdeb921e5f361a4487134e91e23fffbe09d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 16:21:22 +0300
+
+ Bumped version to 4.999.9beta.
+
+ src/liblzma/api/lzma/version.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit fd7618611a22f42a6913bc8d518c9bbc9252d6b4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 16:17:47 +0300
+
+ Updated THANKS.
+
+ THANKS | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit c29e76c0f910fca0a90a50b78d337f6c32623e9d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 16:12:52 +0300
+
+ .xz file format specification 1.0.4 (probably).
+
+ Thanks to Christian von Roques, Peter Lawler,
+ and Jim Meyering for the fixes.
+
+ doc/xz-file-format.txt | 26 +++++++++++++++-----------
+ 1 files changed, 15 insertions(+), 11 deletions(-)
+
+commit 696d7ee3953beaf4f0ed18e78917ccf300431966
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 15:43:54 +0300
+
+ Require GNU libtool 2.2.
+
+ configure.ac | 13 +++----------
+ 1 files changed, 3 insertions(+), 10 deletions(-)
+
+commit 4c3558aa8305a8f8b6c43b8569eb539717ca9e8d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 15:34:45 +0300
+
+ Add "dos" to EXTRA_DIST.
+
+ Makefile.am | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 35b29e4424ced5a3ababf132283e519080c7b298
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 15:23:27 +0300
+
+ Updated TODO.
+
+ TODO | 6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+commit 23414377192c21f3f34c84cdfe0ef0fbd06a1dea
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 15:17:00 +0300
+
+ Some xz man page improvements.
+
+ src/xz/xz.1 | 78 ++++++++++++++++++++++++++++++++++++++++++++++------------
+ 1 files changed, 62 insertions(+), 16 deletions(-)
+
+commit 371b04e19fc9051dbaeec51ec0badec6a1f0699d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 10:41:01 +0300
+
+ Removed doc/bugs.txt.
+
+ doc/bugs.txt | 46 ----------------------------------------------
+ 1 files changed, 0 insertions(+), 46 deletions(-)
+
+commit d88c4072b36d3a76f839185799fb1d91037a1b81
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 10:40:25 +0300
+
+ Updated README.
+
+ It now includes bug reporting instructions/tips.
+
+ README | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 files changed, 55 insertions(+), 10 deletions(-)
+
+commit 92e536d8b8d33a6b12d0802bcd7be4437046f13e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 10:21:18 +0300
+
+ Fix a typo in FAQ.
+
+ Thanks to Jim Meyering.
+
+ (From now on, I try to always remember to put
+ the relevant thanks to commit messages.)
+
+ doc/faq.txt | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 3e2ba8b58585743e59251e69ad2783eb08357079
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-27 10:13:46 +0300
+
+ Updates to liblzma API headers.
+
+ Added lzma_nothrow for every function. It adds
+ throw() when the header is used in C++ code.
+
+ Some lzma_attrs were added or removed.
+
+ Lots of comments were improved.
+
+ src/liblzma/api/lzma.h | 20 +++++++
+ src/liblzma/api/lzma/base.h | 48 +++++++++--------
+ src/liblzma/api/lzma/block.h | 38 +++++++-------
+ src/liblzma/api/lzma/check.h | 22 ++++----
+ src/liblzma/api/lzma/container.h | 36 +++++++------
+ src/liblzma/api/lzma/filter.h | 81 ++++++++++++++++++-----------
+ src/liblzma/api/lzma/index.h | 97 +++++++++++++++++++++--------------
+ src/liblzma/api/lzma/index_hash.h | 14 +++--
+ src/liblzma/api/lzma/lzma.h | 87 ++++++++++++++++---------------
+ src/liblzma/api/lzma/stream_flags.h | 12 ++--
+ src/liblzma/api/lzma/version.h | 10 ++--
+ src/liblzma/api/lzma/vli.h | 7 ++-
+ 12 files changed, 275 insertions(+), 197 deletions(-)
+
+commit 8e8ebc17c535a1f8846718059b48417409c37050
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-18 00:30:09 +0300
+
+ Install faq.txt.
+
+ Makefile.am | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit b198e770a146e4a41f91a93f0b233713f2515848
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-18 00:26:48 +0300
+
+ Updated faq.txt.
+
+ Some questions worth answering were removed, because I
+ currently don't have good up to date answers to them.
+
+ doc/faq.txt | 239 ++++++++++++++++++-----------------------------------------
+ 1 files changed, 73 insertions(+), 166 deletions(-)
+
+commit fe111a25cd788d31b581996e4533910388a7f0a9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-17 22:45:50 +0300
+
+ Some xz man changes.
+
+ src/xz/xz.1 | 88 +++++++++++++++++++++++++++++------------------------------
+ 1 files changed, 43 insertions(+), 45 deletions(-)
+
+commit 10242a21e9abda0c5c6a03501703cc40b8a699a5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-16 22:15:42 +0300
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 3ce1916c83041113b9cad9ead5c97a527cf8aa1d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-16 22:15:13 +0300
+
+ Fix data corruption in LZ/LZMA2 encoder.
+
+ Thanks to Jonathan Stott for the bug report.
+
+ src/liblzma/lz/lz_encoder.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 66da129c8ec33dd66acc92f113f7c1ca740ca81a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 15:15:37 +0300
+
+ Updated INSTALL and PACKAGERS to match the changes
+ made in --enable-dynamic.
+
+ INSTALL | 20 ++++++++++++++++----
+ PACKAGERS | 11 ++++++-----
+ 2 files changed, 22 insertions(+), 9 deletions(-)
+
+commit 8238c4b2402f952c4e492e5b778aa272e57b6705
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 15:03:46 +0300
+
+ Link lzmainfo against shared liblzma by default.
+
+ src/lzmainfo/Makefile.am | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 71f18e8a066a01dda0c8e5508b135ef104e43e4c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 15:00:21 +0300
+
+ Make --enable-dynamic a tristate option.
+
+ Some programs will by default be linked against static
+ liblzma and some against shared liblzma. --enable-dynamic
+ now allows overriding the default to both directions
+ (all dynamic or all static) even when building both
+ shared and static liblzma.
+
+ This is quite messy compared to how simple thing it is supposed
+ to be. The complexity is mostly due to Windows support.
+
+ configure.ac | 77 +++++++++++++++++++++++++++++++++++++++++++---------------
+ 1 files changed, 57 insertions(+), 20 deletions(-)
+
+commit 5aa4678b2342dcfc1d2b31aa9fa4f39c539e4b61
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 12:56:47 +0300
+
+ Fix xz Makefile.am for the man page.
+
+ install-exec-hook -> install-data-hook
+
+ src/xz/Makefile.am | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit e51b4e49e800bd84e6d589dca2964d3985e88139
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 12:55:45 +0300
+
+ Add lzmainfo for backward compatibility with LZMA Utils.
+
+ lzmainfo now links against static liblzma. In contrast
+ to other command line tools in XZ Utils, linking lzmainfo
+ against static liblzma by default is dumb. This will be
+ fixed once I have fixed some related issues in configure.ac.
+
+ configure.ac | 1 +
+ src/Makefile.am | 2 +-
+ src/lzmainfo/Makefile.am | 29 ++++++
+ src/lzmainfo/lzmainfo.1 | 55 +++++++++++
+ src/lzmainfo/lzmainfo.c | 242 ++++++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 328 insertions(+), 1 deletions(-)
+
+commit a4165d0584376d948c213ec93c6065d24ff6a5e7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 12:42:36 +0300
+
+ Sync some error messages from xz to xzdec.
+
+ Make xz error message translation usable outside
+ xz (at least in upcoming lzmainfo).
+
+ src/xz/main.c | 4 ++--
+ src/xzdec/xzdec.c | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+commit df636eb4e066b4e154ce8e66e82c87ba1db652a6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 09:37:21 +0300
+
+ Add xz man page to manfiles in toplevel Makefile.am.
+
+ Makefile.am | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 180bdf58ea5bb07941e0a99b304d9aa832198748
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-13 09:37:01 +0300
+
+ Fix first line of xz man page.
+
+ src/xz/xz.1 | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit e1ce2291e759b50ebfcf7cbbcc04cd098f1705a4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-10 11:22:31 +0300
+
+ Added a rough version of the xz man page.
+
+ src/xz/Makefile.am | 15 +
+ src/xz/xz.1 | 1206 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 1221 insertions(+), 0 deletions(-)
+
+commit e71903fc6101f1c039d702e335b08aad1e1b4100
+Author: Jonathan Nieder <jrnieder@gmail.com>
+Date: 2009-08-09 13:41:20 -0500
+
+ “xzdiff a.xz b.xz” always fails
+
+ Attempts to compare two compressed files result in no output and
+ exit status 2.
+
+ Instead of going to standard output, ‘diff’ output is being
+ captured in the xz_status variable along with the exit status from
+ the decompression commands. Later, when this variable is examined
+ for nonzero status codes, numerals from dates in the ‘diff’ output
+ make it appear as though decompression failed.
+
+ So let the ‘diff’ output leak to standard output with another file
+ descriptor. (This trick is used in all similar contexts elsewhere
+ in xzdiff and in the analogous context in gzip’s zdiff script.)
+
+ src/scripts/xzdiff.in | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 1d314b81aa5b0c4530638ffabd4e0edb52e5362c
+Author: Jonathan Nieder <jrnieder@gmail.com>
+Date: 2009-08-09 13:22:12 -0500
+
+ xzless: Support compressed standard input
+
+ It can be somewhat confusing that
+
+ less < some_file.txt
+
+ works fine, whereas
+
+ xzless < some_file.txt.xz
+
+ does not. Since version 429, ‘less’ allows a filter specified in
+ the LESSOPEN environment variable to preprocess its input even if
+ it comes from standard input, if $LESSOPEN begins with ‘|-’. So
+ set $LESSOPEN to take advantage of this feature.
+
+ Check less’s version at runtime so xzless can continue to work
+ with older versions.
+
+ src/scripts/xzless.in | 8 +++++++-
+ 1 files changed, 7 insertions(+), 1 deletions(-)
+
+commit a7f5d2fe4826ac68839d00059f05004fb81d5c69
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-09 20:57:46 +0300
+
+ GPLv2+ not GPLv2 for Doxyfile.in is probably OK.
+
+ Doxyfile.in | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit b735cde20cc14857136ae65a0e5d336ed7ddc862
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-02 00:27:29 +0300
+
+ Added a copyright notice to Doxyfile.in since it contains
+ lots of comments from Doxygen.
+
+ It seems that the Doxygen authors' intent is to not apply
+ their copyright on generated files, but since it doesn't
+ matter for XZ Utils at all, better safe than sorry.
+
+ Doxyfile.in | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+commit 0fd157cc008446adfc8f91394f5503868025a642
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-02 00:11:37 +0300
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit b198da96ff9ac8c89b466b4d196c5f3fe1c7904f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-08-02 00:10:22 +0300
+
+ Updated TODO.
+
+ TODO | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit 669413bb2db954bbfde3c4542fddbbab53891eb4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-30 12:25:55 +0300
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit dbbd8fb870ae789d96497911006c869d37148c15
+Author: Jonathan Nieder <jrnieder@gmail.com>
+Date: 2009-07-28 17:37:24 -0500
+
+ xzdiff: add missing ;; to case statement
+
+ src/scripts/xzdiff.in | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit adbad2d16cb5909f85d4a429011005613ea62ffe
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-24 13:15:06 +0300
+
+ Added history.txt to doc_DATA.
+
+ Makefile.am | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit e0236f12569eb36f9b81ce7a1e52e0f73698ac27
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-24 12:00:40 +0300
+
+ Updated .gitignore files.
+
+ .gitignore | 36 +++++++++++++++++++-----------------
+ po/.gitignore | 3 +++
+ 2 files changed, 22 insertions(+), 17 deletions(-)
+
+commit 2f34fb269265e3aba43a2a9c734020a45268826d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-24 11:34:02 +0300
+
+ Minor improvements to COPYING.
+
+ COPYING | 11 ++++++++---
+ 1 files changed, 8 insertions(+), 3 deletions(-)
+
+commit 0db1befcfbc120377df4b89923762f16d25f548a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-23 19:10:55 +0300
+
+ Fix incorrect usage of getopt_long(), which caused
+ invalid memory access if XZ_OPT was defined.
+
+ src/xz/args.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit 8f8ec942d6d21ada2096eaf063411bc8bc7e2d48
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-20 15:43:32 +0300
+
+ Avoid internal error with --format=xz --lzma1.
+
+ src/xz/coder.c | 16 ++++++++++++----
+ 1 files changed, 12 insertions(+), 4 deletions(-)
+
+commit 99f9e879a6a8bb54a65da99c12e0f390216c152a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-19 13:14:20 +0300
+
+ Major documentation update.
+
+ Installation and packaging instructions were added.
+ README and other generic docs were revised.
+
+ Some of the documentation files are now installed to $docdir.
+
+ AUTHORS | 35 ++++--
+ ChangeLog | 7 +-
+ INSTALL | 327 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ INSTALL.generic | 302 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ Makefile.am | 11 ++
+ PACKAGERS | 278 ++++++++++++++++++++++++++++++++++++++++++++++
+ README | 263 +++++++++++++++++++-------------------------
+ THANKS | 17 ++--
+ 8 files changed, 1070 insertions(+), 170 deletions(-)
+
+commit ef4cf1851de89022cba5674784f1a8f6343c15b0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-19 11:09:31 +0300
+
+ Added missing author notice to xzless.in.
+
+ src/scripts/xzless.in | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 4c9c989d45b188667799a7a1d6c728ed43f7bf77
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-18 18:54:55 +0300
+
+ Use AC_CONFIG_AUX_DIR to clean up the toplevel directory
+ a little.
+
+ Fixed a related bug in the toplevel Makefile.am.
+
+ Added the build-aux directory to .gitignore.
+
+ .gitignore | 1 +
+ Makefile.am | 1 -
+ configure.ac | 3 ++-
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+commit 366e436090a7a87215e9bf0e3ddcd55f05b50587
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-18 14:34:08 +0300
+
+ Updated the totally outdated TODO file.
+
+ TODO | 117 +++++++++++++++--------------------------------------------------
+ 1 files changed, 27 insertions(+), 90 deletions(-)
+
+commit 64e498c89d8b9966e8663f43bf64d47c26c55c62
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-18 11:26:39 +0300
+
+ Added public domain notice into a few files.
+
+ src/common/common_w32res.rc | 9 ++++++++-
+ src/liblzma/liblzma.pc.in | 7 +++++++
+ src/liblzma/liblzma_w32res.rc | 7 +++++++
+ src/xz/xz_w32res.rc | 7 +++++++
+ src/xzdec/xzdec_w32res.rc | 7 +++++++
+ 5 files changed, 36 insertions(+), 1 deletions(-)
+
+commit a35755c5de808df027675688855d1b621a4fb428
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-14 21:10:36 +0300
+
+ Allow extra commas in filter-specific options on xz command line.
+
+ This may slightly ease writing scripts that construct
+ filter-specific option strings dynamically.
+
+ src/xz/options.c | 7 +++++++
+ 1 files changed, 7 insertions(+), 0 deletions(-)
+
+commit 98f3cac1ad31191c5160a7e48398bf85141e941c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-14 18:04:31 +0300
+
+ Accept --lzma2=preset=6e where "e" is equivalent to --extreme
+ when no custom chain is in use.
+
+ src/xz/options.c | 80 +++++++++++++++++++++++++++++++++++++++--------------
+ 1 files changed, 59 insertions(+), 21 deletions(-)
+
+commit d873a09e956363e54bf58c577c8f7e487b6fb464
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-12 19:08:30 +0300
+
+ Add dist-hook to create ChangeLog from the commit log,
+ and to conver the man pages to PDF and plain text, which
+ may be convenient to those who cannot render man pages.
+
+ Makefile.am | 31 +++++++++++++++++++++++++++++++
+ 1 files changed, 31 insertions(+), 0 deletions(-)
+
+commit cd69a5a6c16c289f6f8e2823b03c72289472270f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-10 11:39:38 +0300
+
+ BCJ filters: Reject invalid start offsets with LZMA_OPTIONS_ERROR.
+
+ This is a quick and slightly dirty fix to make the code
+ conform to the latest file format specification. Without
+ this patch, it's possible to make corrupt files by
+ specifying start offset that is not a multiple of the
+ filter's alignment. Custom start offset is almost never
+ used, so this was only a minor bug.
+
+ The xz command line tool doesn't validate the start offset,
+ so one will get a bit unclear error message if trying to use
+ an invalid start offset.
+
+ src/liblzma/simple/arm.c | 2 +-
+ src/liblzma/simple/armthumb.c | 2 +-
+ src/liblzma/simple/ia64.c | 2 +-
+ src/liblzma/simple/powerpc.c | 2 +-
+ src/liblzma/simple/simple_coder.c | 5 ++++-
+ src/liblzma/simple/simple_private.h | 3 ++-
+ src/liblzma/simple/sparc.c | 2 +-
+ src/liblzma/simple/x86.c | 2 +-
+ 8 files changed, 12 insertions(+), 8 deletions(-)
+
+commit eed9953732b801f6c97317fb3160445a8754180b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-10 11:33:21 +0300
+
+ Look for full command names instead of substrings
+ like "un", "cat", and "lz" when determining if
+ xz is run as unxz, xzcat, lzma, unlzma, or lzcat.
+
+ This is to ensure that if xz is renamed (e.g. via
+ --program-transform-name), it doesn't so easily
+ work in wrong mode.
+
+ src/xz/args.c | 22 +++++++++++++---------
+ 1 files changed, 13 insertions(+), 9 deletions(-)
+
+commit 6f62fa88f4ff7ba78565c314c0e6e71c498fa658
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-08 23:06:46 +0300
+
+ Updated THANKS.
+
+ THANKS | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit 1754b7e03e2aa7e2e0196807fe8b0f3f5a637b0e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-08 23:05:29 +0300
+
+ Portability improvement to version.sh.
+
+ version.sh | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+commit 3bdb53792c0e3e3febe9370e56eda5b08f89410f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-08 22:50:16 +0300
+
+ Remove --force from xzdec.
+
+ It was ignored for compatibility with xz, but now that
+ --decompress --stdout --force copies unrecognized files
+ as is to stdout, simply ignoring --force in xzdec would
+ be wrong. xzdec will not support copying unrecognized
+ data as is to stdout, so it cannot support --force.
+
+ src/xzdec/xzdec.1 | 5 -----
+ src/xzdec/xzdec.c | 5 +----
+ 2 files changed, 1 insertions(+), 9 deletions(-)
+
+commit 5f16ef4abf220028a9ddbcb138217597a9455f62
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-06 10:36:04 +0300
+
+ Use sed instead of $(SED) so that we don't need to
+ use AC_PROG_SED. We don't do anything fancy with sed,
+ so this should work OK. libtool 2.2 sets SED but 1.5
+ doesn't, so $(SED) happened to work when using libtool 2.2.
+
+ src/liblzma/Makefile.am | 2 +-
+ src/scripts/Makefile.am | 28 ++++++++++++++--------------
+ src/xz/Makefile.am | 6 +++---
+ src/xzdec/Makefile.am | 6 +++---
+ 4 files changed, 21 insertions(+), 21 deletions(-)
+
+commit 96e4b257e101d72072d43e144897d92920270669
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-05 22:25:17 +0300
+
+ Major update to the xzgrep and other scripts based on
+ the latest versions found from gzip CVS repository.
+
+ configure will try to find a POSIX shell to be used by
+ the scripts. This should ease portability on systems
+ which have pre-POSIX /bin/sh.
+
+ xzgrep and xzdiff support .xz, .lzma, .gz, and .bz2 files.
+ xzmore and xzless support only .xz and .lzma files.
+
+ The name of the xz executable used in these scripts is
+ now correct even if --program-transform-name has been used.
+
+ configure.ac | 14 ++++
+ m4/posix-shell.m4 | 63 +++++++++++++++
+ src/scripts/Makefile.am | 24 +++---
+ src/scripts/xzdiff | 67 ----------------
+ src/scripts/xzdiff.1 | 58 +++++++++-----
+ src/scripts/xzdiff.in | 172 +++++++++++++++++++++++++++++++++++++++++
+ src/scripts/xzgrep | 123 -----------------------------
+ src/scripts/xzgrep.1 | 85 ++++++++++++--------
+ src/scripts/xzgrep.in | 196 +++++++++++++++++++++++++++++++++++++++++++++++
+ src/scripts/xzless.1 | 66 ++++++++++++++++
+ src/scripts/xzless.in | 51 ++++++++++++
+ src/scripts/xzmore | 74 ------------------
+ src/scripts/xzmore.1 | 64 ++++++---------
+ src/scripts/xzmore.in | 78 +++++++++++++++++++
+ 14 files changed, 766 insertions(+), 369 deletions(-)
+
+commit 25cc7a6e8c2506a0d80084a4c1c67d33e7439100
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-05 19:26:53 +0300
+
+ Use @PACKAGE_HOMEPAGE@ in liblzma.pc.in.
+
+ src/liblzma/liblzma.pc.in | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 18c10c30d2833f394cd7bce0e6a821044b15832f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-04 00:40:44 +0300
+
+ Make "xz --decompress --stdout --force" copy unrecognized
+ files as is to standard output.
+
+ This feature is needed to be more compatible with gzip's
+ behavior. This was more complicated to implement than it
+ sounds, because the way liblzma is able to return errors with
+ files of only a few bytes in size. xz now has its own file
+ type detection code and no longer uses lzma_auto_decoder().
+
+ src/xz/coder.c | 213 ++++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 files changed, 178 insertions(+), 35 deletions(-)
+
+commit 0a289c01ac821ea9c4250aa906b0ae3cfa953633
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-02 14:30:38 +0300
+
+ Define PACKAGE_HOMEPAGE in configure.ac and use it in
+ xz and xzdec.
+
+ Use also PACKAGE_NAME instead of hardcoding "XZ Utils".
+
+ configure.ac | 5 +++++
+ src/xz/message.c | 4 ++--
+ src/xzdec/xzdec.c | 4 ++--
+ 3 files changed, 9 insertions(+), 4 deletions(-)
+
+commit 5cc99db5bae8633f85559e5cdaef4cd905a4ee9c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-07-01 12:21:24 +0300
+
+ Avoid visibility related compiler warnings on Windows.
+
+ configure.ac | 20 +++++++++-----------
+ 1 files changed, 9 insertions(+), 11 deletions(-)
+
+commit 7653d1cf48080e63b189ed9d58dea0e82b6b1c5e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-30 17:14:39 +0300
+
+ Use static liblzma by default also for tests.
+
+ tests/Makefile.am | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+commit f42ee981668b545ab6d06c6072e262c29605273c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-30 17:09:57 +0300
+
+ Build system fixes
+
+ Don't use libtool convenience libraries to avoid recently
+ discovered long-standing subtle but somewhat severe bugs
+ in libtool (at least 1.5.22 and 2.2.6 are affected). It
+ was found when porting XZ Utils to Windows
+ <http://lists.gnu.org/archive/html/libtool/2009-06/msg00070.html>
+ but the problem is significant also e.g. on GNU/Linux.
+
+ Unless --disable-shared is passed to configure, static
+ library built from a set of convenience libraries will
+ contain PIC objects. That is, while libtool builds non-PIC
+ objects too, only PIC objects will be used from the
+ convenience libraries. On 32-bit x86 (tested on mobile XP2400+),
+ using PIC instead of non-PIC makes the decompressor 10 % slower
+ with the default CFLAGS.
+
+ So while xz was linked against static liblzma by default,
+ it got the slower PIC objects unless --disable-shared was
+ used. I tend develop and benchmark with --disable-shared
+ due to faster build time, so I hadn't noticed the problem
+ in benchmarks earlier.
+
+ This commit also adds support for building Windows resources
+ into liblzma and executables.
+
+ configure.ac | 34 ++++++++++-----
+ src/liblzma/Makefile.am | 79 +++++++++++++++++++++++++++-------
+ src/liblzma/check/Makefile.am | 47 ---------------------
+ src/liblzma/check/Makefile.inc | 51 ++++++++++++++++++++++
+ src/liblzma/common/Makefile.am | 78 ----------------------------------
+ src/liblzma/common/Makefile.inc | 67 +++++++++++++++++++++++++++++
+ src/liblzma/common/common.h | 16 ++++---
+ src/liblzma/delta/Makefile.am | 28 ------------
+ src/liblzma/delta/Makefile.inc | 23 ++++++++++
+ src/liblzma/lz/Makefile.am | 29 -------------
+ src/liblzma/lz/Makefile.inc | 21 +++++++++
+ src/liblzma/lzma/Makefile.am | 51 ----------------------
+ src/liblzma/lzma/Makefile.inc | 43 +++++++++++++++++++
+ src/liblzma/rangecoder/Makefile.am | 26 -----------
+ src/liblzma/rangecoder/Makefile.inc | 21 +++++++++
+ src/liblzma/simple/Makefile.am | 51 ----------------------
+ src/liblzma/simple/Makefile.inc | 47 +++++++++++++++++++++
+ src/liblzma/subblock/Makefile.am | 26 -----------
+ src/liblzma/subblock/Makefile.inc | 20 +++++++++
+ src/xz/Makefile.am | 11 ++++-
+ src/xzdec/Makefile.am | 20 ++++++++-
+ 21 files changed, 417 insertions(+), 372 deletions(-)
+
+commit 89dac1db6f168d7469cfbc4432651d4724c5c0de
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-29 22:19:51 +0300
+
+ Added a comment about "autoconf -fi" to autogen.sh.
+
+ autogen.sh | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+commit 6e685aae4594bc0af1b5032e01bb37d0edaa3ebd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-28 10:04:24 +0300
+
+ Add -no-undefined to get shared liblzma on Windows.
+
+ src/liblzma/Makefile.am | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 73f560ee5fa064992b76688d9472baf139432540
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 22:57:15 +0300
+
+ Make physmem() work on Cygwin 1.5 and older.
+
+ src/common/physmem.h | 77 ++++++++++++++++++++++++++------------------------
+ 1 files changed, 40 insertions(+), 37 deletions(-)
+
+commit 7ff0004fbce24ae72eddfe392828ffd7d4639ed1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 17:28:01 +0300
+
+ Moved the Windows resource files outside the windows directory
+ to prepare for building them with Autotools.
+
+ src/common/common_w32res.rc | 46 +++++++++++++++++++++++++++++++++++++++++
+ src/liblzma/liblzma_w32res.rc | 5 ++++
+ src/xz/xz_w32res.rc | 5 ++++
+ src/xzdec/lzmadec_w32res.rc | 5 ++++
+ src/xzdec/xzdec_w32res.rc | 5 ++++
+ windows/Makefile | 35 +++++++++++++++++--------------
+ windows/common.rc | 46 -----------------------------------------
+ windows/liblzma.rc | 5 ----
+ windows/lzmadec.rc | 5 ----
+ windows/xz.rc | 5 ----
+ windows/xzdec.rc | 5 ----
+ 11 files changed, 85 insertions(+), 82 deletions(-)
+
+commit 449c634674f35336a4815d398172e447659a135e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 13:05:03 +0300
+
+ Added missing $(EXEEXT).
+
+ src/xz/Makefile.am | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 792db79f27ad9ab1fb977e23be65c7761f545752
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 12:32:40 +0300
+
+ Create correct symlinks even when
+ --program-{prefix,suffix,transform} is passed to configure.
+
+ src/scripts/Makefile.am | 80 +++++++++++++++++++++++++++--------------------
+ src/xz/Makefile.am | 21 +++++++-----
+ src/xzdec/Makefile.am | 9 +++--
+ 3 files changed, 65 insertions(+), 45 deletions(-)
+
+commit 0adc72feb84f5b903f6ad9d3f759b1c326fafc6b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 10:02:24 +0300
+
+ Silence a compiler warning on DOS-like systems.
+
+ src/xz/file_io.c | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+commit ad12edc95254ede3f0cb8dec8645e8789e984c4f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 09:35:15 +0300
+
+ Updated the filenames in POTFILES.in too.
+
+ po/POTFILES.in | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit b2b1f867532732fe9969131f8713bdd6b0731763
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-27 00:43:06 +0300
+
+ Hopefully improved portability of the assembler code in
+ Autotools based builds on Windows.
+
+ src/liblzma/check/crc32_x86.S | 8 +++++++-
+ src/liblzma/check/crc64_x86.S | 8 +++++++-
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+commit c393055947247627a09b6a6b8f20aa0c32f9be16
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 21:17:29 +0300
+
+ Updated THANKS (most of today's commits are based on
+ Charles Wilson's patches).
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit da0af22e4b4139b8a10710945f8b245b3a77c97d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 21:00:35 +0300
+
+ Updated comments to match renamed files.
+
+ src/xz/coder.c | 2 +-
+ src/xz/coder.h | 2 +-
+ src/xz/file_io.c | 2 +-
+ src/xz/file_io.h | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+commit 65014fd211dfbd4be48685998cb5a12aaa29c8d2
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 20:49:54 +0300
+
+ Rename process.[hc] to coder.[hc] and io.[hc] to file_io.[hc]
+ to avoid problems on systems with system headers with those
+ names.
+
+ dos/Makefile | 4 +-
+ src/xz/Makefile.am | 8 +-
+ src/xz/coder.c | 488 +++++++++++++++++++++++++++++++++++
+ src/xz/coder.h | 57 ++++
+ src/xz/file_io.c | 716 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ src/xz/file_io.h | 86 +++++++
+ src/xz/io.c | 716 ----------------------------------------------------
+ src/xz/io.h | 86 -------
+ src/xz/private.h | 4 +-
+ src/xz/process.c | 488 -----------------------------------
+ src/xz/process.h | 57 ----
+ windows/Makefile | 4 +-
+ 12 files changed, 1357 insertions(+), 1357 deletions(-)
+
+commit 5e1257466dcb66f1d7a3f71814a5ad885cba43e8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 20:43:36 +0300
+
+ Rename process_file() to coder_run().
+
+ src/xz/main.c | 6 +++---
+ src/xz/process.c | 6 +++---
+ src/xz/process.h | 5 ++---
+ 3 files changed, 8 insertions(+), 9 deletions(-)
+
+commit cad62551c5fa9865dbe0841a0b3bc729c4fbe8fc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 20:36:45 +0300
+
+ Ugly hack to make it possible to use the thousand separator
+ format character with snprintf() on POSIX systems but not
+ on non-POSIX systems and still keep xgettext working.
+
+ dos/Makefile | 16 +++-------------
+ src/xz/message.c | 17 +++++++++--------
+ src/xz/process.c | 30 +++++++++++++++---------------
+ src/xz/util.c | 34 ++++++++++++++++++++++++++++++++++
+ src/xz/util.h | 20 ++++++++++++++++++++
+ windows/Makefile | 13 +++----------
+ 6 files changed, 84 insertions(+), 46 deletions(-)
+
+commit fe378d47074b16c52b00fe184d119287c68ce2e7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 15:40:40 +0300
+
+ Added missing source files to windows/Makefile.
+
+ windows/Makefile | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit 390a6408563067613b29de895cb40e4d0386d62c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 15:37:53 +0300
+
+ Basic support for building with Cygwin and MinGW using
+ the Autotools based build system. It's not good yet, more
+ fixes will follow.
+
+ configure.ac | 7 +++++++
+ src/liblzma/api/lzma.h | 7 +++++--
+ src/liblzma/check/crc32_x86.S | 7 ++++---
+ src/liblzma/check/crc64_x86.S | 7 ++++---
+ src/liblzma/common/common.h | 2 +-
+ windows/Makefile | 16 +++++++---------
+ 6 files changed, 28 insertions(+), 18 deletions(-)
+
+commit 1c9360b7d1197457aaad2f8888b99f1149861579
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 14:47:31 +0300
+
+ Fix @variables@ to $(variables) in Makefile.am files.
+ Fix the ordering of libgnu.a and LTLIBINTL on the linker
+ command line and added missing LTLIBINTL to tests/Makefile.am.
+
+ debug/Makefile.am | 12 ++++++------
+ src/liblzma/check/Makefile.am | 4 ++--
+ src/liblzma/common/Makefile.am | 16 ++++++++--------
+ src/liblzma/delta/Makefile.am | 4 ++--
+ src/liblzma/lz/Makefile.am | 6 +++---
+ src/liblzma/lzma/Makefile.am | 8 ++++----
+ src/liblzma/rangecoder/Makefile.am | 4 ++--
+ src/liblzma/simple/Makefile.am | 4 ++--
+ src/liblzma/subblock/Makefile.am | 4 ++--
+ src/xz/Makefile.am | 21 +++++++++++----------
+ src/xzdec/Makefile.am | 19 ++++++++++---------
+ tests/Makefile.am | 12 +++++++-----
+ 12 files changed, 59 insertions(+), 55 deletions(-)
+
+commit d45615c555e250209ebb55aa3649abe790f1eeac
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 14:20:02 +0300
+
+ Allow to explicitly specify autotool versions in autogen.sh.
+
+ autogen.sh | 12 ++++++------
+ 1 files changed, 6 insertions(+), 6 deletions(-)
+
+commit eaf8367368a329afa48785380f9dca6b681f3397
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-26 14:18:32 +0300
+
+ Add version.sh to EXTRA_DIST.
+
+ Makefile.am | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+commit b317b218e2d383dd27a700094c0de4510540ea18
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-24 20:14:10 +0300
+
+ Support HW_PHYSMEM64
+
+ src/common/physmem.h | 30 +++++++++++++++++++-----------
+ 1 files changed, 19 insertions(+), 11 deletions(-)
+
+commit ae82dde5d9cc60c80cc89601b6c51cc1611d48e7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-24 13:01:59 +0300
+
+ Cast a char argument to isspace() to unsigned char.
+
+ src/xz/args.c | 11 +++++++++--
+ 1 files changed, 9 insertions(+), 2 deletions(-)
+
+commit 1735d31ea347210e914df038eeea4b2626e76e42
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-05 13:46:26 +0300
+
+ A few more spelling fixes. Released the .xz spec 1.0.3.
+
+ doc/xz-file-format.txt | 12 +++++++-----
+ 1 files changed, 7 insertions(+), 5 deletions(-)
+
+commit 8ed156ce894966103e895aa08f2a9fb912f6fad5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-04 23:42:12 +0300
+
+ Added xzdec man page.
+
+ src/xzdec/Makefile.am | 11 +++
+ src/xzdec/xzdec.1 | 173 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 184 insertions(+), 0 deletions(-)
+
+commit f6df39afaa84f71439507178a49b2a5dda6e824c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-04 23:26:47 +0300
+
+ Harmonized xzdec --memory with xz --memory and made
+ minor cleanups.
+
+ src/xzdec/xzdec.c | 74 ++++++++++++++++++++++++++++++++++++++---------------
+ 1 files changed, 53 insertions(+), 21 deletions(-)
+
+commit 1774f27c61ce294a56712ca2f4785f90a62441bc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-04 22:59:55 +0300
+
+ Fix purporse -> purpose. Thanks to Andrew Dudman.
+ Released .xz spec 1.0.2 due to this fix too.
+
+ THANKS | 1 +
+ doc/xz-file-format.txt | 8 +++++---
+ src/liblzma/liblzma.pc.in | 2 +-
+ windows/Makefile | 2 +-
+ 4 files changed, 8 insertions(+), 5 deletions(-)
+
+commit cb613455642f48fb51059e22018615f64c59b70f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-06-01 14:53:57 +0300
+
+ The .xz file format version 1.0.1
+
+ doc/xz-file-format.txt | 29 ++++++++++++++++++++++-------
+ 1 files changed, 22 insertions(+), 7 deletions(-)
+
+commit 083c23c680ff844846d177cfc58bb7a874e7e6b9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-26 14:48:48 +0300
+
+ Make the raw value of the Check field available to applications
+ via lzma_block structure.
+
+ This changes ABI but not doesn't break API.
+
+ src/liblzma/api/lzma/block.h | 17 ++++++++++++++
+ src/liblzma/common/block_buffer_encoder.c | 1 +
+ src/liblzma/common/block_decoder.c | 34 ++++++++++++----------------
+ src/liblzma/common/block_encoder.c | 21 +++++++----------
+ 4 files changed, 42 insertions(+), 31 deletions(-)
+
+commit b4f5c814090dc07d4350453576305e41eb9c998d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-23 16:57:21 +0300
+
+ Remove undocumented alternative option names --bcj, --ppc,
+ and --itanium.
+
+ src/xz/args.c | 3 ---
+ 1 files changed, 0 insertions(+), 3 deletions(-)
+
+commit b1edee2cdc7ef4411b1a21c07094ec763f071281
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-23 15:12:23 +0300
+
+ Add support for specifying the BCJ filter start offset
+ in the xz command line tool.
+
+ src/xz/args.c | 36 +++++++++++++++++++++---------------
+ src/xz/message.c | 14 ++++++++------
+ src/xz/options.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ src/xz/options.h | 7 +++++++
+ 4 files changed, 76 insertions(+), 21 deletions(-)
+
+commit 72aa0e9c5f4289f10ef5bf240a9448d3017f1ceb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-23 14:51:09 +0300
+
+ Updated THANKS.
+
+ THANKS | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit dcedb6998cefeca6597dd1219328a3abf5acf66d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 16:40:50 +0300
+
+ Added support for --quiet and --no-warn to xzdec.
+ Cleaned up the --help message a little.
+
+ src/xzdec/xzdec.c | 76 ++++++++++++++++++++++++++++++++++-------------------
+ 1 files changed, 49 insertions(+), 27 deletions(-)
+
+commit 5f735dae80aa629853f4831d7b84ec1c614979eb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 15:11:52 +0300
+
+ Use the 40 % of RAM memory usage limit in xzdec too.
+
+ Update the memory usage info text in --help to match
+ the text in xz --long-help.
+
+ src/xzdec/xzdec.c | 10 +++++-----
+ 1 files changed, 5 insertions(+), 5 deletions(-)
+
+commit b60376249e0c586910c4121fab4f791820cc1289
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 14:43:00 +0300
+
+ Add --no-warn.
+
+ src/xz/args.c | 8 +++++++-
+ src/xz/main.c | 17 +++++++++++++++++
+ src/xz/main.h | 6 ++++++
+ src/xz/message.c | 4 ++++
+ 4 files changed, 34 insertions(+), 1 deletions(-)
+
+commit b4f92f522d4b854c0adb7c38be7531e1a6a7b008
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 14:27:40 +0300
+
+ Fix a comment.
+
+ src/xz/main.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 4dd21d23f22569285ae706b58b0e5904b8db1839
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 14:21:20 +0300
+
+ Remove the --info option, which was an alias for --list.
+
+ src/xz/args.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit 8836139b63ce774bdd62abf17ab69b290e08229e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 12:27:43 +0300
+
+ If xz is run as lzma, unlzma, or lzcat, simply imply
+ --format=lzma. This means that xz emulating lzma
+ doesn't decompress .xz files, while before this
+ commit it did. The new way is slightly simpler in
+ code and especially in upcoming documentation.
+
+ src/xz/args.c | 17 ++++++-----------
+ 1 files changed, 6 insertions(+), 11 deletions(-)
+
+commit b0063023f8adb06ea735ec4af5c6f5b7bdb8e84d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-22 11:29:50 +0300
+
+ Make the default memory usage limit 40 % of RAM for both
+ compressing and decompressing. This should be OK now that
+ xz automatically scales down the compression settings if
+ they would exceed the memory usage limit (earlier, the limit
+ for compression was increased to 90 % because low limit broke
+ scripts that used "xz -9" on systems with low RAM).
+
+ Support spcifying the memory usage limit as a percentage
+ of RAM (e.g. --memory=50%).
+
+ Support --threads=0 to reset the thread limit to the default
+ value (number of available CPU cores). Use UINT32_MAX instead
+ of SIZE_MAX as the maximum in args.c. hardware.c was already
+ expecting uint32_t value.
+
+ Cleaned up the output of --help and --long-help.
+
+ src/xz/args.c | 28 +++++++++++++----
+ src/xz/hardware.c | 86 ++++++++++++++++++++++------------------------------
+ src/xz/hardware.h | 10 +++---
+ src/xz/message.c | 28 ++++++++---------
+ src/xz/process.c | 18 ++++-------
+ 5 files changed, 82 insertions(+), 88 deletions(-)
+
+commit 071b825b23911a69dd1cd2f8cda004ef8a781fae
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-21 17:22:01 +0300
+
+ Support special value "max" where xz and xzdec accept an integer.
+ Don't round the memory usage limit in xzdec --help to avoid
+ an integer overflow and to not give wrong impression that
+ the limit is high enough when it may not actually be.
+
+ src/xz/util.c | 4 ++++
+ src/xzdec/xzdec.c | 6 +++++-
+ 2 files changed, 9 insertions(+), 1 deletions(-)
+
+commit 03ca67fd37dd43fa7f590de340899cd497c10802
+Author: ABCD <en.abcd@gmail.com>
+Date: 2009-05-20 17:31:18 -0400
+
+ Install lzdiff, lzgrep, and lzmore as symlinks
+
+ This adds lzdiff, lzgrep, and lzmore to the list of symlinks to install.
+ It also installs symlinks for the manual pages and removes the new
+ symlinks on uninstall.
+
+ src/scripts/Makefile.am | 16 ++++++++++++++--
+ 1 files changed, 14 insertions(+), 2 deletions(-)
+
+commit a6f43e64128a6da5cd641de1e1e527433b3e5638
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-02 16:10:14 +0300
+
+ Use a GCC-specific #pragma instead of GCC-specific
+ -Wno-uninitialized to silence a bogus warning.
+
+ configure.ac | 13 -------------
+ src/liblzma/check/Makefile.am | 5 -----
+ src/liblzma/check/sha256.c | 5 +++++
+ 3 files changed, 5 insertions(+), 18 deletions(-)
+
+commit f6ce63ebdb45a857c8949960c83c9580ae888951
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-02 14:46:50 +0300
+
+ Removed --disable-encoder and --disable-decoder. Use the values
+ given to --enable-encoders and --enable-decoders to determine
+ if any encoder or decoder support is wanted.
+
+ configure.ac | 48 ++++++++----------------------------------------
+ 1 files changed, 8 insertions(+), 40 deletions(-)
+
+commit be06858d5cf8ba46557395035d821dc332f3f830
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-01 11:28:52 +0300
+
+ Remove docs that are too outdated to be updated
+ (rewrite will be better).
+
+ doc/liblzma-advanced.txt | 324 ----------------------------------------------
+ doc/liblzma-hacking.txt | 112 ----------------
+ doc/liblzma-intro.txt | 194 ---------------------------
+ doc/liblzma-security.txt | 219 -------------------------------
+ doc/lzma-intro.txt | 107 ---------------
+ 5 files changed, 0 insertions(+), 956 deletions(-)
+
+commit 0255401e57c96af87c6b159eca28974e79430a82
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-01 11:21:46 +0300
+
+ Added documentation about the legacy .lzma file format.
+
+ doc/lzma-file-format.txt | 166 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 166 insertions(+), 0 deletions(-)
+
+commit 1496ff437c46f38303e0e94c511ca604b3a11f85
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-05-01 11:20:23 +0300
+
+ Renamed the file format specification to xz-file-format.txt
+ which is the filename used on the WWW.
+
+ doc/file-format.txt | 1127 ------------------------------------------------
+ doc/xz-file-format.txt | 1127 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 1127 insertions(+), 1127 deletions(-)
+
+commit 21c6b94373d239d7e86bd480fcd558e30391712f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-28 23:08:32 +0300
+
+ Fixed a crash in liblzma.
+
+ liblzma tries to avoid useless free()/malloc() pairs in
+ initialization when multiple files are handled using the
+ same lzma_stream. This didn't work with filter chains
+ due to comparison of wrong pointers in lzma_next_coder_init(),
+ making liblzma think that no memory reallocation is needed
+ even when it actually is.
+
+ Easy way to trigger this bug is to decompress two files with
+ a single xz command. The first file should have e.g. x86+LZMA2
+ as the filter chain, and the second file just LZMA2.
+
+ src/liblzma/common/alone_decoder.c | 2 +-
+ src/liblzma/common/alone_encoder.c | 4 ++--
+ src/liblzma/common/auto_decoder.c | 2 +-
+ src/liblzma/common/block_decoder.c | 2 +-
+ src/liblzma/common/block_encoder.c | 2 +-
+ src/liblzma/common/common.h | 4 ++--
+ src/liblzma/common/easy_encoder.c | 2 +-
+ src/liblzma/common/index_decoder.c | 2 +-
+ src/liblzma/common/index_encoder.c | 2 +-
+ src/liblzma/common/stream_decoder.c | 2 +-
+ src/liblzma/common/stream_encoder.c | 2 +-
+ 11 files changed, 13 insertions(+), 13 deletions(-)
+
+commit e518d167aa5958e469982f4fb3a24b9b6a2b5d1c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-15 14:13:38 +0300
+
+ Fix uint32_t -> size_t in ARM and ARM-Thumb filters.
+
+ On 64-bit system it would have gone into infinite
+ loop if a single input buffer was over 4 GiB (unlikely).
+
+ src/liblzma/simple/arm.c | 2 +-
+ src/liblzma/simple/armthumb.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 31decdce041581e57c0d8a407d4795b114ef27ca
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-14 11:48:46 +0300
+
+ Minor fixes to test files' README.
+
+ tests/files/README | 17 +++++++++--------
+ 1 files changed, 9 insertions(+), 8 deletions(-)
+
+commit 4787d654434891c7df5b43959b0d2873718f06e0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-13 16:36:41 +0300
+
+ Updated history.txt.
+
+ doc/history.txt | 123 +++++++++++++++++++++++++++++-------------------------
+ 1 files changed, 66 insertions(+), 57 deletions(-)
+
+commit 2f0bc9cd40f709152a0177c8e585c0757e9af9c9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-13 14:49:48 +0300
+
+ Quick & dirty update to support xz in diff/grep/more scripts.
+
+ src/scripts/Makefile.am | 38 +++++++++-----
+ src/scripts/lzdiff | 67 -------------------------
+ src/scripts/lzdiff.1 | 51 -------------------
+ src/scripts/lzgrep | 123 -----------------------------------------------
+ src/scripts/lzgrep.1 | 61 -----------------------
+ src/scripts/lzmore | 74 ----------------------------
+ src/scripts/lzmore.1 | 55 ---------------------
+ src/scripts/xzdiff | 67 +++++++++++++++++++++++++
+ src/scripts/xzdiff.1 | 58 ++++++++++++++++++++++
+ src/scripts/xzgrep | 123 +++++++++++++++++++++++++++++++++++++++++++++++
+ src/scripts/xzgrep.1 | 77 +++++++++++++++++++++++++++++
+ src/scripts/xzmore | 74 ++++++++++++++++++++++++++++
+ src/scripts/xzmore.1 | 66 +++++++++++++++++++++++++
+ 13 files changed, 489 insertions(+), 445 deletions(-)
+
+commit 02ddf09bc3079b3e17297729b9e43f14d407b8fc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-13 11:27:40 +0300
+
+ Put the interesting parts of XZ Utils into the public domain.
+ Some minor documentation cleanups were made at the same time.
+
+ COPYING | 67 ++++++++++++++++++------
+ ChangeLog | 2 +-
+ Doxyfile.in | 8 ++-
+ Makefile.am | 13 +----
+ autogen.sh | 9 +++
+ configure.ac | 13 +----
+ debug/Makefile.am | 13 +----
+ debug/crc32.c | 13 +----
+ debug/full_flush.c | 13 +----
+ debug/hex2bin.c | 7 +--
+ debug/known_sizes.c | 13 +----
+ debug/memusage.c | 13 +----
+ debug/repeat.c | 13 +----
+ debug/sync_flush.c | 13 +----
+ doc/faq.txt | 38 +------------
+ lib/Makefile.am | 18 +++---
+ src/Makefile.am | 13 +----
+ src/common/bswap.h | 7 +--
+ src/common/cpucores.h | 7 +--
+ src/common/integer.h | 7 +--
+ src/common/mythread.h | 4 +-
+ src/common/open_stdxxx.h | 7 +--
+ src/common/physmem.h | 7 +--
+ src/common/sysdefs.h | 13 +----
+ src/liblzma/Makefile.am | 13 +----
+ src/liblzma/api/Makefile.am | 13 +----
+ src/liblzma/api/lzma.h | 38 ++++++++-----
+ src/liblzma/api/lzma/base.h | 20 +++----
+ src/liblzma/api/lzma/bcj.h | 18 +++----
+ src/liblzma/api/lzma/block.h | 18 +++----
+ src/liblzma/api/lzma/check.h | 18 +++----
+ src/liblzma/api/lzma/container.h | 18 +++----
+ src/liblzma/api/lzma/delta.h | 18 +++----
+ src/liblzma/api/lzma/filter.h | 18 +++----
+ src/liblzma/api/lzma/index.h | 18 +++----
+ src/liblzma/api/lzma/index_hash.h | 22 +++-----
+ src/liblzma/api/lzma/lzma.h | 18 +++----
+ src/liblzma/api/lzma/stream_flags.h | 18 +++----
+ src/liblzma/api/lzma/subblock.h | 18 +++----
+ src/liblzma/api/lzma/version.h | 18 +++----
+ src/liblzma/api/lzma/vli.h | 42 +++++++--------
+ src/liblzma/check/Makefile.am | 8 +--
+ src/liblzma/check/check.c | 7 +--
+ src/liblzma/check/check.h | 7 +--
+ src/liblzma/check/crc32_fast.c | 30 ++++------
+ src/liblzma/check/crc32_small.c | 7 +--
+ src/liblzma/check/crc32_table.c | 7 +--
+ src/liblzma/check/crc32_tablegen.c | 7 +--
+ src/liblzma/check/crc32_x86.S | 21 +++++---
+ src/liblzma/check/crc64_fast.c | 20 +++----
+ src/liblzma/check/crc64_small.c | 7 +--
+ src/liblzma/check/crc64_table.c | 7 +--
+ src/liblzma/check/crc64_tablegen.c | 7 +--
+ src/liblzma/check/crc64_x86.S | 14 +++--
+ src/liblzma/check/crc_macros.h | 9 ++--
+ src/liblzma/check/sha256.c | 23 +++++---
+ src/liblzma/common/Makefile.am | 13 +----
+ src/liblzma/common/alone_decoder.c | 13 +----
+ src/liblzma/common/alone_decoder.h | 13 +----
+ src/liblzma/common/alone_encoder.c | 13 +----
+ src/liblzma/common/auto_decoder.c | 13 +----
+ src/liblzma/common/block_buffer_decoder.c | 13 +----
+ src/liblzma/common/block_buffer_encoder.c | 13 +----
+ src/liblzma/common/block_decoder.c | 13 +----
+ src/liblzma/common/block_decoder.h | 13 +----
+ src/liblzma/common/block_encoder.c | 13 +----
+ src/liblzma/common/block_encoder.h | 13 +----
+ src/liblzma/common/block_header_decoder.c | 13 +----
+ src/liblzma/common/block_header_encoder.c | 13 +----
+ src/liblzma/common/block_util.c | 13 +----
+ src/liblzma/common/bsr.h | 7 +--
+ src/liblzma/common/chunk_size.c | 13 +----
+ src/liblzma/common/common.c | 13 +----
+ src/liblzma/common/common.h | 13 +----
+ src/liblzma/common/easy_buffer_encoder.c | 13 +----
+ src/liblzma/common/easy_decoder_memusage.c | 13 +----
+ src/liblzma/common/easy_encoder.c | 13 +----
+ src/liblzma/common/easy_encoder_memusage.c | 13 +----
+ src/liblzma/common/easy_preset.c | 13 +----
+ src/liblzma/common/easy_preset.h | 13 +----
+ src/liblzma/common/filter_buffer_decoder.c | 13 +----
+ src/liblzma/common/filter_buffer_encoder.c | 13 +----
+ src/liblzma/common/filter_common.c | 13 +----
+ src/liblzma/common/filter_common.h | 13 +----
+ src/liblzma/common/filter_decoder.c | 13 +----
+ src/liblzma/common/filter_decoder.h | 13 +----
+ src/liblzma/common/filter_encoder.c | 13 +----
+ src/liblzma/common/filter_encoder.h | 13 +----
+ src/liblzma/common/filter_flags_decoder.c | 13 +----
+ src/liblzma/common/filter_flags_encoder.c | 13 +----
+ src/liblzma/common/index.c | 13 +----
+ src/liblzma/common/index.h | 13 +----
+ src/liblzma/common/index_decoder.c | 13 +----
+ src/liblzma/common/index_encoder.c | 13 +----
+ src/liblzma/common/index_encoder.h | 13 +----
+ src/liblzma/common/index_hash.c | 13 +----
+ src/liblzma/common/stream_buffer_decoder.c | 13 +----
+ src/liblzma/common/stream_buffer_encoder.c | 13 +----
+ src/liblzma/common/stream_decoder.c | 13 +----
+ src/liblzma/common/stream_decoder.h | 13 +----
+ src/liblzma/common/stream_encoder.c | 13 +----
+ src/liblzma/common/stream_encoder.h | 13 +----
+ src/liblzma/common/stream_flags_common.c | 13 +----
+ src/liblzma/common/stream_flags_common.h | 13 +----
+ src/liblzma/common/stream_flags_decoder.c | 13 +----
+ src/liblzma/common/stream_flags_encoder.c | 13 +----
+ src/liblzma/common/vli_decoder.c | 13 +----
+ src/liblzma/common/vli_encoder.c | 13 +----
+ src/liblzma/common/vli_size.c | 13 +----
+ src/liblzma/delta/Makefile.am | 13 +----
+ src/liblzma/delta/delta_common.c | 13 +----
+ src/liblzma/delta/delta_common.h | 13 +----
+ src/liblzma/delta/delta_decoder.c | 13 +----
+ src/liblzma/delta/delta_decoder.h | 13 +----
+ src/liblzma/delta/delta_encoder.c | 13 +----
+ src/liblzma/delta/delta_encoder.h | 13 +----
+ src/liblzma/delta/delta_private.h | 13 +----
+ src/liblzma/lz/Makefile.am | 13 +----
+ src/liblzma/lz/lz_decoder.c | 17 ++----
+ src/liblzma/lz/lz_decoder.h | 17 ++----
+ src/liblzma/lz/lz_encoder.c | 17 ++----
+ src/liblzma/lz/lz_encoder.h | 17 ++----
+ src/liblzma/lz/lz_encoder_hash.h | 13 +----
+ src/liblzma/lz/lz_encoder_mf.c | 17 ++----
+ src/liblzma/lzma/Makefile.am | 13 +----
+ src/liblzma/lzma/fastpos.h | 17 ++----
+ src/liblzma/lzma/fastpos_tablegen.c | 17 ++----
+ src/liblzma/lzma/lzma2_decoder.c | 17 ++----
+ src/liblzma/lzma/lzma2_decoder.h | 17 ++----
+ src/liblzma/lzma/lzma2_encoder.c | 17 ++----
+ src/liblzma/lzma/lzma2_encoder.h | 17 ++----
+ src/liblzma/lzma/lzma_common.h | 17 ++----
+ src/liblzma/lzma/lzma_decoder.c | 17 ++----
+ src/liblzma/lzma/lzma_decoder.h | 17 ++----
+ src/liblzma/lzma/lzma_encoder.c | 17 ++----
+ src/liblzma/lzma/lzma_encoder.h | 17 ++----
+ src/liblzma/lzma/lzma_encoder_optimum_fast.c | 13 +----
+ src/liblzma/lzma/lzma_encoder_optimum_normal.c | 13 +----
+ src/liblzma/lzma/lzma_encoder_presets.c | 13 +----
+ src/liblzma/lzma/lzma_encoder_private.h | 17 ++----
+ src/liblzma/rangecoder/Makefile.am | 13 +----
+ src/liblzma/rangecoder/price.h | 13 +----
+ src/liblzma/rangecoder/price_tablegen.c | 16 ++----
+ src/liblzma/rangecoder/range_common.h | 17 ++----
+ src/liblzma/rangecoder/range_decoder.h | 17 ++----
+ src/liblzma/rangecoder/range_encoder.h | 17 ++----
+ src/liblzma/simple/Makefile.am | 13 +----
+ src/liblzma/simple/arm.c | 17 ++----
+ src/liblzma/simple/armthumb.c | 17 ++----
+ src/liblzma/simple/ia64.c | 17 ++----
+ src/liblzma/simple/powerpc.c | 17 ++----
+ src/liblzma/simple/simple_coder.c | 13 +----
+ src/liblzma/simple/simple_coder.h | 14 +----
+ src/liblzma/simple/simple_decoder.c | 13 +----
+ src/liblzma/simple/simple_decoder.h | 13 +----
+ src/liblzma/simple/simple_encoder.c | 13 +----
+ src/liblzma/simple/simple_encoder.h | 13 +----
+ src/liblzma/simple/simple_private.h | 13 +----
+ src/liblzma/simple/sparc.c | 17 ++----
+ src/liblzma/simple/x86.c | 17 ++----
+ src/liblzma/subblock/Makefile.am | 13 +----
+ src/liblzma/subblock/subblock_decoder.c | 13 +----
+ src/liblzma/subblock/subblock_decoder.h | 13 +----
+ src/liblzma/subblock/subblock_decoder_helper.c | 13 +----
+ src/liblzma/subblock/subblock_decoder_helper.h | 13 +----
+ src/liblzma/subblock/subblock_encoder.c | 13 +----
+ src/liblzma/subblock/subblock_encoder.h | 13 +----
+ src/scripts/Makefile.am | 7 +++
+ src/xz/Makefile.am | 13 +----
+ src/xz/args.c | 13 +----
+ src/xz/args.h | 13 +----
+ src/xz/hardware.c | 13 +----
+ src/xz/hardware.h | 13 +----
+ src/xz/io.c | 13 +----
+ src/xz/io.h | 13 +----
+ src/xz/list.c | 13 +----
+ src/xz/main.c | 13 +----
+ src/xz/main.h | 13 +----
+ src/xz/message.c | 13 +----
+ src/xz/message.h | 13 +----
+ src/xz/options.c | 13 +----
+ src/xz/options.h | 13 +----
+ src/xz/private.h | 13 +----
+ src/xz/process.c | 13 +----
+ src/xz/process.h | 13 +----
+ src/xz/signals.c | 13 +----
+ src/xz/signals.h | 13 +----
+ src/xz/suffix.c | 13 +----
+ src/xz/suffix.h | 13 +----
+ src/xz/util.c | 13 +----
+ src/xz/util.h | 13 +----
+ src/xzdec/Makefile.am | 13 +----
+ src/xzdec/xzdec.c | 13 +----
+ tests/Makefile.am | 13 +----
+ tests/bcj_test.c | 7 +--
+ tests/create_compress_files.c | 13 +----
+ tests/test_block.c | 13 +----
+ tests/test_block_header.c | 13 +----
+ tests/test_check.c | 13 +----
+ tests/test_compress.sh | 13 +----
+ tests/test_files.sh | 13 +----
+ tests/test_filter_flags.c | 13 +----
+ tests/test_index.c | 13 +----
+ tests/test_stream_flags.c | 13 +----
+ tests/tests.h | 13 +----
+ windows/common.rc | 2 +-
+ 206 files changed, 866 insertions(+), 2021 deletions(-)
+
+commit e79c42d854657ae7f75613bd80c1a35ff7c525cb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-04-10 11:17:02 +0300
+
+ Fix off-by-one in LZ decoder.
+
+ Fortunately, this bug had no security risk other than accepting
+ some corrupt files as valid.
+
+ src/liblzma/lz/lz_decoder.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 94eb9ad46f1fded6d8369cf3d38bb9754c1375af
+Author: Pavel Roskin <proski@gnu.org>
+Date: 2009-03-31 12:15:01 -0400
+
+ Fix minor typos in README
+
+ README | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 9bab5336ebd765ec4e12252f416eefdf04eba750
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-03-31 21:52:51 +0300
+
+ Add a note and work-around instructions to README about
+ problems detecting a C99 compiler when some standard
+ headers are missing.
+
+ README | 11 +++++++++++
+ 1 files changed, 11 insertions(+), 0 deletions(-)
+
+commit a0497ff7a06f9350349264fe9b52dfefc6d53ead
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-03-18 16:54:38 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 390e69887fc5e0a108eb41203bed9acd100a3d76
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-03-18 16:51:41 +0200
+
+ Fix wrong macro names in lc_cpucores.m4 and cpucores.h.
+ Thanks to Bert Wesarg.
+
+ m4/lc_cpucores.m4 | 4 ++--
+ src/common/cpucores.h | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+commit 0df9299e2478c2a0c62c05b1ae14a85a353e20d6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-03-01 09:03:08 +0200
+
+ Test for Linux-specific sysinfo() only on Linux systems.
+ Some other systems have sysinfo() with different semantics.
+
+ m4/lc_physmem.m4 | 28 +++++++++++++++++++---------
+ 1 files changed, 19 insertions(+), 9 deletions(-)
+
+commit cf751edfde3ad6e088dc18e0522d31ae38405933
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-03-01 09:00:06 +0200
+
+ Added AC_CONFIG_MACRO_DIR to configure.ac.
+
+ configure.ac | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 63df14c57dee7c461717784287056688482a7eb9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-03-01 08:58:41 +0200
+
+ Fix the Autoconf test for getopt_long replacement.
+ It was broken by e114502b2bc371e4a45449832cb69be036360722.
+
+ m4/getopt.m4 | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit fd6a380f4eda4f00be5f2aa8d222992cd74a714f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-22 19:07:54 +0200
+
+ Add a rough explanation of --extreme to output of --help.
+
+ src/xz/message.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit 68bf7ac2984d3627369a240ef0491934d53f7899
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-22 18:52:49 +0200
+
+ Fixes to progress message handling in xz:
+
+ - Don't use Windows-specific code on Windows. The old code
+ required at least Windows 2000. Now it should work on
+ Windows 98 and later, and maybe on Windows 95 too.
+
+ - Use less precision when showing estimated remaining time.
+
+ - Fix some small design issues.
+
+ src/xz/message.c | 483 ++++++++++++++++++++++++++++++++++--------------------
+ src/xz/message.h | 28 ++-
+ src/xz/process.c | 53 +++---
+ 3 files changed, 351 insertions(+), 213 deletions(-)
+
+commit 47c2e21f82242f50f18713a27d644c2c94ab3fea
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-18 13:00:10 +0200
+
+ Added files missing from the previous commit.
+
+ src/liblzma/api/lzma/container.h | 33 +++++++++++++++++++++++++++++++++
+ src/liblzma/common/Makefile.am | 7 ++++++-
+ 2 files changed, 39 insertions(+), 1 deletions(-)
+
+commit 489a3dbaa0465f04400804e956a1cfbbee3654a2
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-17 10:43:00 +0200
+
+ Added lzma_easy_buffer_encode(). Splitted easy.c into small
+ pieces to avoid unneeded dependencies making statically
+ linked applications bigger than needed.
+
+ dos/Makefile | 6 +-
+ src/liblzma/common/easy.c | 128 ----------------------------
+ src/liblzma/common/easy_buffer_encoder.c | 34 ++++++++
+ src/liblzma/common/easy_decoder_memusage.c | 31 +++++++
+ src/liblzma/common/easy_encoder.c | 87 +++++++++++++++++++
+ src/liblzma/common/easy_encoder_memusage.c | 31 +++++++
+ src/liblzma/common/easy_preset.c | 34 ++++++++
+ src/liblzma/common/easy_preset.h | 39 +++++++++
+ windows/Makefile | 6 +-
+ 9 files changed, 266 insertions(+), 130 deletions(-)
+
+commit 7494816ab08d82f4d6409788825930c4e43cfd0d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-15 15:48:45 +0200
+
+ Make physmem.h work on old Windows versions.
+ Thanks to Hongbo Ni for the original patch.
+
+ src/common/physmem.h | 31 +++++++++++++++++++++++++++----
+ 1 files changed, 27 insertions(+), 4 deletions(-)
+
+commit 11ae4ae35fd70182c713f2d914b7cb1143bc76f0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-14 20:44:52 +0200
+
+ Fix microsecond vs. nanosecond confusion in my_time().
+
+ src/xz/message.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 3084d662d2646ab7eb58daf0dc32cf3f9a74eec7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-14 00:45:29 +0200
+
+ Cleanups to the code that detects the amount of RAM and
+ the number of CPU cores. Added support for using sysinfo()
+ on Linux systems whose libc lacks appropriate sysconf()
+ support (at least dietlibc). The Autoconf macros were
+ split into separate files, and CPU core count detection
+ was moved from hardware.c to cpucores.h. The core count
+ isn't used for anything real for now, so a problematic
+ part in process.c was commented out.
+
+ configure.ac | 89 +-----------------------------------------------
+ m4/lc_cpucores.m4 | 57 +++++++++++++++++++++++++++++++
+ m4/lc_physmem.m4 | 74 ++++++++++++++++++++++++++++++++++++++++
+ src/common/cpucores.h | 52 ++++++++++++++++++++++++++++
+ src/common/physmem.h | 21 +++++++----
+ src/xz/args.c | 4 +-
+ src/xz/hardware.c | 50 ++++++++++++---------------
+ src/xz/hardware.h | 11 ++++--
+ src/xz/message.c | 5 ++-
+ src/xz/process.c | 2 +
+ 10 files changed, 235 insertions(+), 130 deletions(-)
+
+commit 9c62371eab2706c46b1072f5935e28cb4cd9dca8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-13 18:23:50 +0200
+
+ Initial port to DOS using DJGPP.
+
+ dos/Makefile | 261 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ dos/README | 113 +++++++++++++++++++++++++
+ dos/config.h | 150 +++++++++++++++++++++++++++++++++
+ 3 files changed, 524 insertions(+), 0 deletions(-)
+
+commit 0dae8b7751d09e9c5a482d5519daaee4800ce203
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-13 18:02:05 +0200
+
+ Windows port: Take advantage of the version number macros.
+ Now the version number is not duplicated in the
+ Windows-specific files anymore.
+
+ windows/Makefile | 2 +-
+ windows/common.rc | 16 ++++++++--------
+ windows/config.h | 15 ---------------
+ 3 files changed, 9 insertions(+), 24 deletions(-)
+
+commit fdbc0cfa71f7d660855098a609175ba384259529
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-13 18:00:03 +0200
+
+ Changed how the version number is specified in various places.
+ Now configure.ac will get the version number directly from
+ src/liblzma/api/lzma/version.h. The intent is to reduce the
+ number of places where the version number is duplicated. In
+ future, support for displaying Git commit ID may be added too.
+
+ configure.ac | 3 +-
+ src/liblzma/api/lzma/version.h | 70 ++++++++++++++++++++++++++++++++++++++--
+ src/liblzma/common/common.c | 2 +-
+ src/xz/message.c | 2 +-
+ src/xzdec/xzdec.c | 2 +-
+ version.sh | 23 +++++++++++++
+ 6 files changed, 95 insertions(+), 7 deletions(-)
+
+commit 1d924e584b146136989f48c13fff2632896efb3d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-13 17:30:30 +0200
+
+ Fix handling of integrity check type in the xz command line tool.
+
+ src/xz/args.c | 9 ++++++++-
+ src/xz/process.c | 4 ++++
+ 2 files changed, 12 insertions(+), 1 deletions(-)
+
+commit 96c46df7deb231ea68a03d8d1da9de4c774e36d8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-13 17:29:02 +0200
+
+ Improve support for DOS-like systems.
+ Here DOS-like means DOS, Windows, and OS/2.
+
+ src/common/physmem.h | 12 ++++++++++++
+ src/common/sysdefs.h | 4 ++++
+ src/liblzma/check/crc32_x86.S | 6 +++---
+ src/liblzma/check/crc64_x86.S | 6 +++---
+ src/xz/args.c | 13 +++++--------
+ src/xz/io.c | 39 +++++++++++++++++++++++++--------------
+ src/xz/main.c | 23 +++++++++++++++++++++++
+ src/xz/message.c | 3 +++
+ src/xz/suffix.c | 5 +++++
+ src/xzdec/xzdec.c | 5 +++--
+ 10 files changed, 86 insertions(+), 30 deletions(-)
+
+commit b6a30ee8c2de60ecd722cd05223e4ba72f822e33
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-11 20:02:32 +0200
+
+ Remove dead directories from .gitignore.
+
+ .gitignore | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+commit 1ec5b0027911d94cb6f98892cbc690f818d8a861
+Author: Jim Meyering <jim@meyering.net>
+Date: 2009-02-11 14:45:14 +0100
+
+ .gitignore vs. Makefiles
+
+ How about this for those of us who do srcdir builds?
+
+ .gitignore | 22 ++++++++++++++++++++++
+ 1 files changed, 22 insertions(+), 0 deletions(-)
+
+commit 154f5aec2de201c674841de4fcc9804c2a87af07
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-10 21:48:35 +0200
+
+ Removed Makefile from .gitignore since not all Makefiles
+ in the repository are generated by Autotools. People
+ should do test builds in a separate build directory anyway.
+
+ .gitignore | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit e605c2663691b0a4c307786aa368d124ea081daa
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-10 21:48:05 +0200
+
+ Added resource files for the Windows build.
+
+ windows/Makefile | 37 ++++++++++++++++++++++---------------
+ windows/common.rc | 46 ++++++++++++++++++++++++++++++++++++++++++++++
+ windows/liblzma.rc | 5 +++++
+ windows/lzmadec.rc | 5 +++++
+ windows/xz.rc | 5 +++++
+ windows/xzdec.rc | 5 +++++
+ 6 files changed, 88 insertions(+), 15 deletions(-)
+
+commit a3bbbe05d32b1f7ea9eb98805df4dda2e811b476
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-09 14:54:31 +0200
+
+ Let the user specify custom CFLAGS on the make command
+ line. Previously custom CFLAGS worked only when they were
+ passed to configure.
+
+ configure.ac | 58 +++++++++++++++++++++-------------------
+ src/liblzma/check/Makefile.am | 2 +-
+ 2 files changed, 31 insertions(+), 29 deletions(-)
+
+commit 53f7598998b1860a69c51243b5d2e34623c6bf60
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-08 21:35:11 +0200
+
+ Fix aliasing issue in physmem.h.
+
+ src/common/physmem.h | 19 ++++++++++---------
+ 1 files changed, 10 insertions(+), 9 deletions(-)
+
+commit 0e27028d74c5c7a8e036ae2a9b8cecb0ac79d3a6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-08 18:24:50 +0200
+
+ Add a separate internal function to initialize the CRC32
+ table, which is used also by LZ encoder. This was needed
+ because calling lzma_crc32() and ignoring the result is
+ a no-op due to lzma_attr_pure.
+
+ src/liblzma/check/check.h | 1 +
+ src/liblzma/check/crc32_small.c | 10 +++++++++-
+ src/liblzma/lz/lz_encoder.c | 4 ++--
+ 3 files changed, 12 insertions(+), 3 deletions(-)
+
+commit ae1ad9af54210c9a2be336b1316532da5071516c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-08 18:17:05 +0200
+
+ Make "xz --force" to write to terminal as the error
+ message suggests.
+
+ src/xz/main.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 79e25eded48d2fe33f31441ab7a034f902e335f8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-08 10:37:50 +0200
+
+ Support both slash and backslash as path component
+ separator on Windows when parsing argv[0].
+
+ src/xz/args.c | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+commit bc7c7109cc4410055a888c1c70cbd1c9445c4361
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 23:18:13 +0200
+
+ Omit the wrong and (even if corrected) nowadays useless rm
+ from autogen.sh.
+
+ autogen.sh | 28 ----------------------------
+ 1 files changed, 0 insertions(+), 28 deletions(-)
+
+commit edfc2031e56f8a2ccda063f02936b3a848d88723
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 21:41:52 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 880c3309386aac58fc4f3d7ca99bd31bcb1526a3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 21:17:07 +0200
+
+ Make it easy to choose if command line tools should be
+ linked statically or dynamically against liblzma. The
+ default is still to use static liblzma, but it can now
+ be changed by passing --enable-dynamic to configure.
+ Thanks to Mike Frysinger for the original patch.
+
+ Fixed a few minor bugs in configure.ac.
+
+ configure.ac | 39 +++++++++++++++++++++++++++++++++++++++
+ src/xz/Makefile.am | 8 +++-----
+ src/xzdec/Makefile.am | 5 +++--
+ 3 files changed, 45 insertions(+), 7 deletions(-)
+
+commit 3f86532407e4ace3debb62be16035e009b56ca36
+Author: Mike Frysinger <vapier@gentoo.org>
+Date: 2009-02-06 23:38:39 -0500
+
+ add gitignore files
+
+ Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+
+ .gitignore | 32 ++++++++++++++++++++++++++++++++
+ m4/.gitignore | 35 +++++++++++++++++++++++++++++++++++
+ po/.gitignore | 12 ++++++++++++
+ 3 files changed, 79 insertions(+), 0 deletions(-)
+
+commit bd7ca1dad5c146b6217799ffaa230c32d207a3e5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 17:07:52 +0200
+
+ Assume 32 MiB of RAM on unsupported operating systems like
+ the comment in hardware.c already said.
+
+ src/xz/hardware.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit d0ab8c1c73ae712adb0d26fbb9da762d99a63618
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 16:26:58 +0200
+
+ MinGW support: Don't build fastpos_tablegen.c as part of
+ liblzma. Build both static and dynamic liblzma, and also
+ static and dynamic versions of the command line tools.
+
+ windows/Makefile | 92 ++++++++++++++++++++++++++++++++++++++++-------------
+ windows/README | 10 ------
+ 2 files changed, 69 insertions(+), 33 deletions(-)
+
+commit bfd91198e44a52bd9bfe3cd6dcae5edab7c6eb45
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 15:55:47 +0200
+
+ Support LZMA_API_STATIC in assembler files to
+ avoid __declspec(dllexport) equivalent.
+
+ src/liblzma/check/crc32_x86.S | 4 ++++
+ src/liblzma/check/crc64_x86.S | 2 ++
+ 2 files changed, 6 insertions(+), 0 deletions(-)
+
+commit 3306cf3883492720b3c34baa02f4eb4227d91c73
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-07 11:11:50 +0200
+
+ Introduced LZMA_API_STATIC macro, which the applications
+ need to #define when linking against static liblzma on
+ platforms like Windows. Most developers don't need to
+ care about LZMA_API_STATIC at all.
+
+ src/liblzma/api/lzma.h | 31 ++++++++++++++++++++++++-------
+ src/liblzma/common/common.h | 2 +-
+ 2 files changed, 25 insertions(+), 8 deletions(-)
+
+commit b719e63c5f4c91d2d5e2ea585d4c055ec3767d0b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-06 16:55:45 +0200
+
+ Another grammar fix
+
+ README | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit fe5434f940f75fec3611cf9d9edf78c4da8ac760
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-06 12:30:23 +0200
+
+ Grammar fix in README.
+
+ README | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 3dfa58a9eedf5a0e566452b078801c9cbcf7a245
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-06 10:06:32 +0200
+
+ Some MSYS installations (e.g. MsysGit) don't include
+ install.exe, so don't rely on it.
+
+ windows/Makefile | 12 +++++++-----
+ windows/README | 11 ++++++-----
+ 2 files changed, 13 insertions(+), 10 deletions(-)
+
+commit 975d8fd72a5148d46b2e1745f7a211cf1dfd9d31
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-06 09:13:15 +0200
+
+ Recreated the BCJ test files for x86 and SPARC. The old files
+ were linked with crt*.o, which are copyrighted, and thus the
+ old test files were not in the public domain as a whole. They
+ are freely distributable though, but it is better to be careful
+ and avoid including any copyrighted pieces in the test files.
+ The new files are just compiled and assembled object files,
+ and thus don't contain any copyrighted code.
+
+ tests/bcj_test.c | 2 +-
+ tests/compress_prepared_bcj_sparc | Bin 6804 -> 1240 bytes
+ tests/compress_prepared_bcj_x86 | Bin 4649 -> 1388 bytes
+ tests/files/good-1-sparc-lzma2.xz | Bin 2296 -> 612 bytes
+ tests/files/good-1-x86-lzma2.xz | Bin 1936 -> 716 bytes
+ 5 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 094b1b09a531f0d201ec81f2b07346a995fd80b9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-05 21:21:27 +0200
+
+ Add the "windows" directory to EXTRA_DIST.
+
+ Makefile.am | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit e1c3412eec7acec7ca3b32c9c828f3147dc65b49
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-05 09:17:51 +0200
+
+ Added initial experimental makefile for use with MinGW.
+
+ windows/Makefile | 253 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ windows/README | 164 +++++++++++++++++++++++++++++++++++
+ windows/config.h | 180 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 597 insertions(+), 0 deletions(-)
+
+commit 75905a9afc0ee89954ede7d08af70d1148bf0fd9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-05 09:12:57 +0200
+
+ Various code cleanups the the xz command line tool.
+ It now builds with MinGW.
+
+ src/common/physmem.h | 13 ++++
+ src/xz/Makefile.am | 2 +
+ src/xz/args.h | 8 --
+ src/xz/hardware.h | 10 +---
+ src/xz/io.c | 93 +++++++++++++++++++-------
+ src/xz/io.h | 12 +--
+ src/xz/main.c | 132 ++-----------------------------------
+ src/xz/main.h | 22 ------
+ src/xz/message.c | 65 ++++++++++++++++--
+ src/xz/message.h | 6 --
+ src/xz/options.h | 8 --
+ src/xz/private.h | 18 ++++--
+ src/xz/process.h | 10 +---
+ src/xz/signals.c | 180 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ src/xz/signals.h | 51 ++++++++++++++
+ src/xz/suffix.h | 5 --
+ src/xz/util.c | 5 +-
+ src/xz/util.h | 5 --
+ 18 files changed, 399 insertions(+), 246 deletions(-)
+
+commit d0c0b9e94e0af59d1d8f7f4829695d6efe19ccfe
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-03 12:15:17 +0200
+
+ Another utime() fix.
+
+ src/xz/io.c | 9 +++++----
+ 1 files changed, 5 insertions(+), 4 deletions(-)
+
+commit ccf92a29e8c7234284f1568c1ec0fd7cb98356ca
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-03 10:41:11 +0200
+
+ Fix wrong filename argument for utime() and utimes().
+ This doesn't affect most systems, since most systems
+ have better functions available.
+
+ src/xz/io.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 99c1c2abfae2e87f3c17e929783e6d1bb7a3f302
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-02 21:19:01 +0200
+
+ Updated the x86 assembler code:
+ - Use call/ret pair to get instruction pointer for PIC.
+ - Use PIC only if PIC or __PIC__ is #defined.
+ - The code should work on MinGW and Darwin in addition
+ to GNU/Linux and Solaris.
+
+ configure.ac | 6 ---
+ src/liblzma/check/crc32_x86.S | 84 +++++++++++++++++++++++++++++++++++------
+ src/liblzma/check/crc64_x86.S | 82 +++++++++++++++++++++++++++++++++++-----
+ 3 files changed, 144 insertions(+), 28 deletions(-)
+
+commit 22a0c6dd940b78cdac2f4a4b4b0e7cc0ac15021f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-02 20:14:03 +0200
+
+ Modify LZMA_API macro so that it works on Windows with
+ other compilers than MinGW. This may hurt readability
+ of the API headers slightly, but I don't know any
+ better way to do this.
+
+ src/liblzma/api/lzma.h | 6 ++--
+ src/liblzma/api/lzma/base.h | 11 ++++---
+ src/liblzma/api/lzma/block.h | 22 +++++++-------
+ src/liblzma/api/lzma/check.h | 10 +++---
+ src/liblzma/api/lzma/container.h | 22 +++++++-------
+ src/liblzma/api/lzma/filter.h | 28 ++++++++++----------
+ src/liblzma/api/lzma/index.h | 40 ++++++++++++++--------------
+ src/liblzma/api/lzma/index_hash.h | 10 +++---
+ src/liblzma/api/lzma/lzma.h | 6 ++--
+ src/liblzma/api/lzma/stream_flags.h | 10 +++---
+ src/liblzma/api/lzma/version.h | 4 +-
+ src/liblzma/api/lzma/vli.h | 6 ++--
+ src/liblzma/check/check.c | 4 +-
+ src/liblzma/check/crc32_fast.c | 2 +-
+ src/liblzma/check/crc32_small.c | 2 +-
+ src/liblzma/check/crc64_fast.c | 2 +-
+ src/liblzma/check/crc64_small.c | 2 +-
+ src/liblzma/common/alone_decoder.c | 2 +-
+ src/liblzma/common/alone_encoder.c | 2 +-
+ src/liblzma/common/auto_decoder.c | 2 +-
+ src/liblzma/common/block_buffer_decoder.c | 2 +-
+ src/liblzma/common/block_buffer_encoder.c | 4 +-
+ src/liblzma/common/block_decoder.c | 2 +-
+ src/liblzma/common/block_encoder.c | 2 +-
+ src/liblzma/common/block_header_decoder.c | 2 +-
+ src/liblzma/common/block_header_encoder.c | 4 +-
+ src/liblzma/common/block_util.c | 6 ++--
+ src/liblzma/common/chunk_size.c | 2 +-
+ src/liblzma/common/common.c | 16 +++++-----
+ src/liblzma/common/common.h | 2 +-
+ src/liblzma/common/easy.c | 6 ++--
+ src/liblzma/common/filter_buffer_decoder.c | 2 +-
+ src/liblzma/common/filter_buffer_encoder.c | 2 +-
+ src/liblzma/common/filter_decoder.c | 8 +++---
+ src/liblzma/common/filter_encoder.c | 12 ++++----
+ src/liblzma/common/filter_flags_decoder.c | 2 +-
+ src/liblzma/common/filter_flags_encoder.c | 4 +-
+ src/liblzma/common/index.c | 32 +++++++++++-----------
+ src/liblzma/common/index_decoder.c | 4 +-
+ src/liblzma/common/index_encoder.c | 4 +-
+ src/liblzma/common/index_hash.c | 10 +++---
+ src/liblzma/common/stream_buffer_decoder.c | 2 +-
+ src/liblzma/common/stream_buffer_encoder.c | 4 +-
+ src/liblzma/common/stream_decoder.c | 2 +-
+ src/liblzma/common/stream_encoder.c | 2 +-
+ src/liblzma/common/stream_flags_common.c | 2 +-
+ src/liblzma/common/stream_flags_decoder.c | 4 +-
+ src/liblzma/common/stream_flags_encoder.c | 4 +-
+ src/liblzma/common/vli_decoder.c | 2 +-
+ src/liblzma/common/vli_encoder.c | 2 +-
+ src/liblzma/common/vli_size.c | 2 +-
+ src/liblzma/lz/lz_encoder.c | 2 +-
+ src/liblzma/lzma/lzma_encoder.c | 2 +-
+ src/liblzma/lzma/lzma_encoder_presets.c | 2 +-
+ 54 files changed, 177 insertions(+), 176 deletions(-)
+
+commit 8dd7b6052e18621e2e6c62f40f762ee88bd3eb65
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-01 22:40:35 +0200
+
+ Fix a bug in lzma_block_buffer_decode(), although this
+ function should be rewritten anyway.
+
+ src/liblzma/common/block_buffer_decoder.c | 8 ++++----
+ 1 files changed, 4 insertions(+), 4 deletions(-)
+
+commit 55fd41431e61fb8178858283d636b6781e33e847
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-01 22:39:07 +0200
+
+ Added initial version of raw buffer-to-buffer coding
+ functions, and cleaned up filter.h API header a little.
+ May be very buggy, not tested yet.
+
+ src/liblzma/api/lzma/filter.h | 84 +++++++++++++++++++------
+ src/liblzma/common/Makefile.am | 2 +
+ src/liblzma/common/filter_buffer_decoder.c | 94 ++++++++++++++++++++++++++++
+ src/liblzma/common/filter_buffer_encoder.c | 61 ++++++++++++++++++
+ 4 files changed, 221 insertions(+), 20 deletions(-)
+
+commit 3e54ecee5cad30a5ca361a88a99230407abc0699
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-01 00:11:20 +0200
+
+ Fix missing newlines in xzdec.c.
+
+ src/xzdec/xzdec.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit d64ca34f1b6f34e86adefc7f735b4eff8e6d4a35
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-02-01 00:10:07 +0200
+
+ Use __cdecl also for function pointers in liblzma API when
+ on Windows.
+
+ src/liblzma/api/lzma.h | 18 +++++++++++-------
+ src/liblzma/api/lzma/base.h | 4 ++--
+ src/liblzma/common/common.h | 18 +++++++-----------
+ 3 files changed, 20 insertions(+), 20 deletions(-)
+
+commit 6a2eb54092fc625d59921a607ff68cd1a90aa898
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-31 11:01:48 +0200
+
+ Add LZMA_API to liblzma API headers. It's useful at least
+ on Windows. sysdefs.h no longer #includes lzma.h, so lzma.h
+ has to be #included separately where needed.
+
+ src/common/sysdefs.h | 2 -
+ src/liblzma/api/lzma.h | 17 ++++++++++++
+ src/liblzma/api/lzma/base.h | 10 +++---
+ src/liblzma/api/lzma/block.h | 25 ++++++++++--------
+ src/liblzma/api/lzma/check.h | 12 +++++---
+ src/liblzma/api/lzma/container.h | 23 +++++++++--------
+ src/liblzma/api/lzma/filter.h | 24 +++++++++---------
+ src/liblzma/api/lzma/index.h | 47 ++++++++++++++++++++---------------
+ src/liblzma/api/lzma/index_hash.h | 11 ++++---
+ src/liblzma/api/lzma/lzma.h | 8 +++--
+ src/liblzma/api/lzma/stream_flags.h | 10 +++---
+ src/liblzma/api/lzma/version.h | 4 +-
+ src/liblzma/api/lzma/vli.h | 10 +++---
+ src/liblzma/common/common.h | 13 ++++++++-
+ src/xz/private.h | 1 +
+ src/xzdec/xzdec.c | 1 +
+ tests/tests.h | 1 +
+ 17 files changed, 131 insertions(+), 88 deletions(-)
+
+commit d9993fcb4dfc1f93abaf31ae23b3ef1f3123892b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-31 10:13:09 +0200
+
+ Use _WIN32 instead of WIN32 in xzdec.c to test if compiling on Windows.
+
+ src/xzdec/xzdec.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 2dbdc5befb33c3703e4609809101047c67caf343
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-31 10:02:52 +0200
+
+ Fix two lines in lzma.h on which the # wasn't at the
+ beginning of the line.
+
+ src/liblzma/api/lzma.h | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 4ab760109106dc04f39dd81c97d50f528d1b51c1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-31 09:55:05 +0200
+
+ Add support for using liblzma headers in MSVC, which has no
+ stdint.h or inttypes.h.
+
+ src/liblzma/api/lzma.h | 70 +++++++++++++++++++++++++++++++----------------
+ 1 files changed, 46 insertions(+), 24 deletions(-)
+
+commit b2172cf823d3be34cb0246cb4cb32d105e2a34c9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-31 08:49:54 +0200
+
+ Fix # -> ## in a macro in lzma.h.
+
+ src/liblzma/api/lzma.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 1aae8698746d3c87a93f8398cdde2de9ba1f7208
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-30 18:50:16 +0200
+
+ Updated README.
+
+ README | 30 ++++++++++++++++++++----------
+ 1 files changed, 20 insertions(+), 10 deletions(-)
+
+commit f54bcf6f80d585236bc03ce49f7c73e1abaa17eb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-30 00:29:58 +0200
+
+ Remove dangling crc64_init.c.
+
+ src/liblzma/check/crc64_init.c | 55 ----------------------------------------
+ 1 files changed, 0 insertions(+), 55 deletions(-)
+
+commit 982da7ed314398420c38bf154a8f759d5f18b480
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-28 17:16:38 +0200
+
+ The .xz file format specification version 1.0.0 is now
+ officially released. The format has been technically the same
+ since 2008-11-19, but now that it is frozen, people can start
+ using it without a fear that the format will break.
+
+ doc/file-format.txt | 84 +++++++++++++++++++++++++++++---------------------
+ 1 files changed, 49 insertions(+), 35 deletions(-)
+
+commit c4683a660b4372156bdaf92f0cdc54a58f95ee6f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-28 08:45:59 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 3241317093595db9f79104faafe93cb989c9f858
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-28 08:43:26 +0200
+
+ Fix uninitialized variables in alone_decoder.c. This bug was
+ triggered by the previous commit, since these variables were
+ not used by anything before support for a preset dictionary.
+
+ src/liblzma/common/alone_decoder.c | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit f76e39cf930f888d460b443d18f977ebedea8b2a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-27 18:36:05 +0200
+
+ Added initial support for preset dictionary for raw LZMA1
+ and LZMA2. It is not supported by the .xz format or the xz
+ command line tool yet.
+
+ src/liblzma/lz/lz_decoder.c | 35 +++++++++++++++++++++++++----------
+ src/liblzma/lz/lz_decoder.h | 9 ++++++++-
+ src/liblzma/lz/lz_encoder.c | 18 ++++++++++++++++--
+ src/liblzma/lzma/lzma2_decoder.c | 9 ++++++---
+ src/liblzma/lzma/lzma2_encoder.c | 12 +++++++-----
+ src/liblzma/lzma/lzma_decoder.c | 10 ++++++----
+ src/liblzma/lzma/lzma_decoder.h | 2 +-
+ src/liblzma/lzma/lzma_encoder.c | 9 ++++++++-
+ 8 files changed, 77 insertions(+), 27 deletions(-)
+
+commit 449b8c832b26c3633f3bec60095e57d2d3ada1f3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-26 20:09:17 +0200
+
+ Regenerate the CRC tables without trailing blanks.
+
+ src/liblzma/check/crc32_table_be.h | 1008 ++++++++++++++++++------------------
+ src/liblzma/check/crc32_table_le.h | 1008 ++++++++++++++++++------------------
+ src/liblzma/check/crc64_table_be.h | 1016 ++++++++++++++++++------------------
+ src/liblzma/check/crc64_table_le.h | 1016 ++++++++++++++++++------------------
+ 4 files changed, 2024 insertions(+), 2024 deletions(-)
+
+commit 850f7400428dc9c5fd08a2f35a5bd2c9e45aede2
+Author: Jim Meyering <meyering@redhat.com>
+Date: 2009-01-19 21:37:16 +0100
+
+ remove trailing blanks from all but .xz files
+
+ debug/known_sizes.c | 2 +-
+ extra/scanlzma/scanlzma.c | 5 ++---
+ src/liblzma/check/crc32_tablegen.c | 2 +-
+ src/liblzma/check/crc64_tablegen.c | 2 +-
+ src/scripts/lzdiff.1 | 4 ++--
+ src/scripts/lzmore.1 | 6 +++---
+ tests/test_compress.sh | 4 ++--
+ 7 files changed, 12 insertions(+), 13 deletions(-)
+
+commit 667481f1aad34e1ed15738e7913a9c7e256b4cf5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-26 14:34:10 +0200
+
+ Add lzma_block_buffer_decode().
+
+ src/liblzma/api/lzma/block.h | 41 ++++++++++++++
+ src/liblzma/common/Makefile.am | 1 +
+ src/liblzma/common/block_buffer_decoder.c | 87 +++++++++++++++++++++++++++++
+ 3 files changed, 129 insertions(+), 0 deletions(-)
+
+commit 5fb34d8324d3e7e0061df25d0086b64c8726b19d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-26 14:33:28 +0200
+
+ Add more sanity checks to lzma_stream_buffer_decode().
+
+ src/liblzma/common/stream_buffer_decoder.c | 7 +++++++
+ 1 files changed, 7 insertions(+), 0 deletions(-)
+
+commit c129748675a5daa8838df92bde32cc04f6ce61ba
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-26 14:33:13 +0200
+
+ Avoid hardcoded constant in easy.c.
+
+ src/liblzma/common/easy.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 1859d22d75e072463db74c25bc3f5a7992e5fdf6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-26 13:06:49 +0200
+
+ Tiny bit better sanity check in block_util.c
+
+ src/liblzma/common/block_util.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 2c5fe958e4bbe9b147b10c255955dfe2827fb8e7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-25 01:35:56 +0200
+
+ Fix a dumb bug in Block decoder, which made it return
+ LZMA_DATA_ERROR with valid data. The bug was added in
+ e114502b2bc371e4a45449832cb69be036360722.
+
+ src/liblzma/common/block_decoder.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit c81f13ff29271de7293f8af3d81848b1dcae3d19
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-23 22:27:50 +0200
+
+ Added lzma_stream_buffer_decode() and made minor cleanups.
+
+ src/liblzma/api/lzma/block.h | 3 +-
+ src/liblzma/api/lzma/container.h | 51 +++++++++++++++-
+ src/liblzma/common/Makefile.am | 1 +
+ src/liblzma/common/stream_buffer_decoder.c | 91 ++++++++++++++++++++++++++++
+ 4 files changed, 144 insertions(+), 2 deletions(-)
+
+commit 0b3318661ce749550b8531dfd469639a08930391
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-22 12:53:33 +0200
+
+ Fix a comment.
+
+ src/liblzma/common/common.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 9ec80355a7212a0a2f8c89d98e51b1d8b4e34eec
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-20 16:37:27 +0200
+
+ Add some single-call buffer-to-buffer coding functions.
+
+ src/liblzma/api/lzma/block.h | 57 +++++
+ src/liblzma/api/lzma/container.h | 56 +++++
+ src/liblzma/api/lzma/index.h | 70 ++++++-
+ src/liblzma/common/Makefile.am | 2 +
+ src/liblzma/common/block_buffer_encoder.c | 305 ++++++++++++++++++++++++++++
+ src/liblzma/common/index_decoder.c | 83 +++++++--
+ src/liblzma/common/index_encoder.c | 59 +++++-
+ src/liblzma/common/stream_buffer_encoder.c | 138 +++++++++++++
+ tests/test_index.c | 24 +++
+ 9 files changed, 768 insertions(+), 26 deletions(-)
+
+commit d8b58d099340f8f4007b24b211ee41a7210c061c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-20 13:45:41 +0200
+
+ Block encoder cleanups
+
+ src/liblzma/common/block_encoder.c | 28 +++++++---------------------
+ src/liblzma/common/block_encoder.h | 25 +++++++++++++++++++++++++
+ 2 files changed, 32 insertions(+), 21 deletions(-)
+
+commit 0c09810cb3635cb575cb54e694d41523e7d0a335
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-20 10:35:15 +0200
+
+ Use LZMA_PROG_ERROR in lzma_code() as documented in base.h.
+
+ src/liblzma/common/common.c | 24 ++++++++----------------
+ 1 files changed, 8 insertions(+), 16 deletions(-)
+
+commit 2f1a8e8eb898f6c036cde55d153ad348bfab3c00
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-19 22:53:18 +0200
+
+ Fix handling of non-fatal errors in lzma_code().
+
+ src/liblzma/common/common.c | 9 ++++++++-
+ 1 files changed, 8 insertions(+), 1 deletions(-)
+
+commit 4810b6bc25087be872960b9dd1d11ff07735dc88
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-19 14:00:33 +0200
+
+ Move some LZMA2 constants to lzma2_encoder.h so that they
+ can be used outside lzma2_encoder.c.
+
+ src/liblzma/lzma/lzma2_encoder.c | 13 -------------
+ src/liblzma/lzma/lzma2_encoder.h | 14 ++++++++++++++
+ src/liblzma/lzma/lzma_encoder.c | 3 ++-
+ 3 files changed, 16 insertions(+), 14 deletions(-)
+
+commit 00be5d2e09f9c7a6a8563465ad8b8042866817a4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-19 13:52:36 +0200
+
+ Remove dead code.
+
+ src/liblzma/lzma/lzma_encoder.h | 8 --------
+ 1 files changed, 0 insertions(+), 8 deletions(-)
+
+commit 128586213f77c9bd82b7e9a62927f6d0c3769d85
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-17 14:24:25 +0200
+
+ Beta was supposed to be API stable but I had forgot to rename
+ lzma_memlimit_encoder and lzma_memlimit_decoder to
+ lzma_raw_encoder_memlimit and lzma_raw_decoder_memlimit. :-(
+ Now it is fixed. Hopefully it doesn't cause too much trouble
+ to those who already thought API is stable.
+
+ src/liblzma/api/lzma/filter.h | 4 ++--
+ src/liblzma/common/easy.c | 4 ++--
+ src/liblzma/common/filter_common.c | 2 +-
+ src/liblzma/common/filter_common.h | 2 +-
+ src/liblzma/common/filter_decoder.c | 4 ++--
+ src/liblzma/common/filter_encoder.c | 4 ++--
+ src/liblzma/common/stream_decoder.c | 2 +-
+ src/xz/process.c | 6 +++---
+ 8 files changed, 14 insertions(+), 14 deletions(-)
+
+commit b056379490be5c584c264a967f0540041a163a1e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-15 14:29:22 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit dc8f3be06d54ef6e6cfb5134dd3d25edd08cef89
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-15 14:27:32 +0200
+
+ Fixed a bug in 7z2lzma.bash to make it work with .7z files
+ that use something else than 2^n as the dictionary size.
+ Thanks to Dan Shechter for the bug report.
+
+ extra/7z2lzma/7z2lzma.bash | 47 ++++++++++++++++++++++---------------------
+ 1 files changed, 24 insertions(+), 23 deletions(-)
+
+commit 8286a60b8f4bd5accfbc9d229d2204bac31994f2
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2009-01-07 18:41:15 +0200
+
+ Use pthread_sigmask() instead of sigprocmask() when pthreads
+ are enabled.
+
+ src/common/mythread.h | 6 ++++++
+ src/xz/main.c | 4 ++--
+ src/xz/private.h | 1 +
+ 3 files changed, 9 insertions(+), 2 deletions(-)
+
+commit 4fd43cb3a906f6da2943f69239ee984c4787c9a9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 20:01:00 +0200
+
+ Bumped version to 4.999.8beta right after the release
+ of 4.999.7beta.
+
+ configure.ac | 2 +-
+ src/liblzma/api/lzma/version.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 061748f5932719643cda73383db715167d543c22
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 18:59:02 +0200
+
+ Disable Subblock filter from test_compress.sh since it is
+ disabled by default in configure.ac.
+
+ tests/test_compress.sh | 22 +++++++++++++---------
+ 1 files changed, 13 insertions(+), 9 deletions(-)
+
+commit 9c45658ddc8bd4a7819ef8547d3e7ccf73203e78
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 17:44:20 +0200
+
+ Disable both Subblock encoder and decoder my default,
+ since they are not finished and may have security issues too.
+
+ configure.ac | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit b59f1e98f50694cf6a8f1b342fd878feebdb2f88
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 17:42:50 +0200
+
+ Update some files in debug directory.
+
+ debug/full_flush.c | 2 --
+ debug/memusage.c | 2 --
+ debug/sync_flush.c | 2 --
+ 3 files changed, 0 insertions(+), 6 deletions(-)
+
+commit d1d17a40d33a9682424ca37282813492f2cba6d0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 17:41:46 +0200
+
+ Prepare for 4.999.7beta release.
+
+ AUTHORS | 4 ++--
+ README | 34 +++++++++++++++++-----------------
+ configure.ac | 4 ++--
+ src/liblzma/api/lzma/version.h | 2 +-
+ 4 files changed, 22 insertions(+), 22 deletions(-)
+
+commit 88d3e6b0b18e24142b6d3b41dc1b84b00c49fef3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 17:15:03 +0200
+
+ Cleaned up some comments in the API headers.
+
+ src/liblzma/api/lzma/check.h | 23 +++++++++++------------
+ src/liblzma/api/lzma/container.h | 2 +-
+ src/liblzma/api/lzma/version.h | 4 +++-
+ 3 files changed, 15 insertions(+), 14 deletions(-)
+
+commit 322ecf93c961e45a1da8c4a794a7fdacefcd7f40
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 16:29:39 +0200
+
+ Renamed lzma_options_simple to lzma_options_bcj in the API.
+ The internal implementation is still using the name "simple".
+ It may need some cleanups, so I look at it later.
+
+ src/liblzma/api/Makefile.am | 2 +-
+ src/liblzma/api/lzma.h | 2 +-
+ src/liblzma/api/lzma/bcj.h | 94 +++++++++++++++++++++++++++++++++++
+ src/liblzma/api/lzma/simple.h | 94 -----------------------------------
+ src/liblzma/simple/simple_coder.c | 2 +-
+ src/liblzma/simple/simple_decoder.c | 4 +-
+ src/liblzma/simple/simple_encoder.c | 4 +-
+ tests/test_filter_flags.c | 8 ++--
+ 8 files changed, 105 insertions(+), 105 deletions(-)
+
+commit 7eea8bec3abfed883efba66264a1452a1c04f6b0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 00:57:27 +0200
+
+ Fixed missing quoting in configure.ac.
+
+ configure.ac | 38 +++++++++++++++++++-------------------
+ 1 files changed, 19 insertions(+), 19 deletions(-)
+
+commit 28e75f7086dbe9501d926c370375c69dfb1236ce
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 00:48:23 +0200
+
+ Updated src/liblzma/Makefile.am to use liblzma.pc.in, which
+ should have been in the previous commit.
+
+ src/liblzma/Makefile.am | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 7ed9d943b31d3ee9c5fb2387e84a241ba33afe90
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-31 00:30:49 +0200
+
+ Remove lzma_init() and other init functions from liblzma API.
+ Half of developers were already forgetting to use these
+ functions, which could have caused total breakage in some future
+ liblzma version or even now if --enable-small was used. Now
+ liblzma uses pthread_once() to do the initializations unless
+ it has been built with --disable-threads which make these
+ initializations thread-unsafe.
+
+ When --enable-small isn't used, liblzma currently gets needlessly
+ linked against libpthread (on systems that have it). While it is
+ stupid for now, liblzma will need threads in future anyway, so
+ this stupidity will be temporary only.
+
+ When --enable-small is used, different code CRC32 and CRC64 is
+ now used than without --enable-small. This made the resulting
+ binary slightly smaller, but the main reason was to clean it up
+ and to handle the lack of lzma_init_check().
+
+ The pkg-config file lzma.pc was renamed to liblzma.pc. I'm not
+ sure if it works correctly and portably for static linking
+ (Libs.private includes -pthread or other operating system
+ specific flags). Hopefully someone complains if it is bad.
+
+ lzma_rc_prices[] is now included as a precomputed array even
+ with --enable-small. It's just 128 bytes now that it uses uint8_t
+ instead of uint32_t. Smaller array seemed to be at least as fast
+ as the more bloated uint32_t array on x86; hopefully it's not bad
+ on other architectures.
+
+ configure.ac | 29 ++++++++--
+ src/common/mythread.h | 34 +++++++++++
+ src/liblzma/api/Makefile.am | 1 -
+ src/liblzma/api/lzma.h | 1 -
+ src/liblzma/api/lzma/init.h | 85 ----------------------------
+ src/liblzma/check/Makefile.am | 29 +++------
+ src/liblzma/check/check.c | 10 ++--
+ src/liblzma/check/check.h | 25 +++-----
+ src/liblzma/check/check_init.c | 37 ------------
+ src/liblzma/check/crc32.c | 88 -----------------------------
+ src/liblzma/check/crc32_fast.c | 88 +++++++++++++++++++++++++++++
+ src/liblzma/check/crc32_init.c | 55 ------------------
+ src/liblzma/check/crc32_small.c | 54 ++++++++++++++++++
+ src/liblzma/check/crc32_tablegen.c | 55 ++++++++++++++++--
+ src/liblzma/check/crc64.c | 75 ------------------------
+ src/liblzma/check/crc64_fast.c | 75 ++++++++++++++++++++++++
+ src/liblzma/check/crc64_small.c | 54 ++++++++++++++++++
+ src/liblzma/check/crc64_tablegen.c | 55 ++++++++++++++++--
+ src/liblzma/common/Makefile.am | 3 -
+ src/liblzma/common/common.h | 1 +
+ src/liblzma/common/init.c | 39 -------------
+ src/liblzma/common/init_decoder.c | 31 ----------
+ src/liblzma/common/init_encoder.c | 40 -------------
+ src/liblzma/liblzma.pc.in | 12 ++++
+ src/liblzma/lz/lz_encoder.c | 6 ++
+ src/liblzma/lzma.pc.in | 11 ----
+ src/liblzma/rangecoder/Makefile.am | 8 +--
+ src/liblzma/rangecoder/price.h | 16 +-----
+ src/liblzma/rangecoder/price_table.c | 2 +-
+ src/liblzma/rangecoder/price_table_init.c | 55 ------------------
+ src/liblzma/rangecoder/price_tablegen.c | 51 ++++++++++++++--
+ src/xz/Makefile.am | 5 +-
+ src/xz/main.c | 3 -
+ src/xzdec/xzdec.c | 3 -
+ tests/test_block_header.c | 1 -
+ tests/test_check.c | 2 -
+ tests/test_filter_flags.c | 2 -
+ tests/test_index.c | 2 -
+ tests/test_stream_flags.c | 2 -
+ tests/tests.h | 2 +-
+ 40 files changed, 519 insertions(+), 628 deletions(-)
+
+commit 5cda29b5665004fc0f21d0c41d78022a6a559ab2
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-27 19:40:31 +0200
+
+ Use 28 MiB as memory usage limit for encoding in test_compress.sh.
+
+ tests/test_compress.sh | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 050eb14d29e2537c014662e83599fd8a77f13c45
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-27 19:32:20 +0200
+
+ Revert a change made in 3b34851de1eaf358cf9268922fa0eeed8278d680
+ that was related to LZMA_MODE_FAST. The original code is slightly
+ faster although it compresses slightly worse. But since it is fast
+ mode, it is better to select the faster version.
+
+ src/liblzma/lzma/lzma_encoder_optimum_fast.c | 23 ++++++++---------------
+ 1 files changed, 8 insertions(+), 15 deletions(-)
+
+commit 4820f10d0f173864f6a2ea7479663b509ac53358
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-27 19:30:19 +0200
+
+ Some xz command line tool improvements.
+
+ src/xz/args.c | 23 ++++-----
+ src/xz/message.c | 4 +-
+ src/xz/options.c | 2 +-
+ src/xz/process.c | 133 ++++++++++++++++++++++++++++++++++++++++++-----------
+ src/xz/process.h | 3 +
+ 5 files changed, 121 insertions(+), 44 deletions(-)
+
+commit e33194e79d8f5ce07cb4aca909b324ae75098f7e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-27 19:27:49 +0200
+
+ Bunch of liblzma tweaks, including some API changes.
+ The API and ABI should now be very close to stable,
+ although the code behind it isn't yet.
+
+ src/liblzma/api/lzma.h | 8 ++--
+ src/liblzma/api/lzma/block.h | 63 +++++++++++++++++++++++-
+ src/liblzma/api/lzma/container.h | 76 ++++++++++++++--------------
+ src/liblzma/api/lzma/lzma.h | 41 +++++++++-------
+ src/liblzma/common/alone_decoder.c | 36 +++++++-------
+ src/liblzma/common/alone_encoder.c | 22 +++-----
+ src/liblzma/common/auto_decoder.c | 2 +-
+ src/liblzma/common/block_decoder.c | 54 ++++++++++----------
+ src/liblzma/common/block_decoder.h | 4 +-
+ src/liblzma/common/block_encoder.c | 37 ++++++++------
+ src/liblzma/common/block_encoder.h | 4 +-
+ src/liblzma/common/block_header_decoder.c | 41 ++++++++-------
+ src/liblzma/common/block_header_encoder.c | 51 +++++++++----------
+ src/liblzma/common/block_util.c | 3 +-
+ src/liblzma/common/easy.c | 45 ++++++-----------
+ src/liblzma/common/stream_decoder.c | 3 +-
+ src/liblzma/common/stream_decoder.h | 2 +-
+ src/liblzma/common/stream_encoder.c | 3 +-
+ src/liblzma/common/stream_encoder.h | 2 +-
+ src/liblzma/common/stream_flags_decoder.c | 2 +-
+ src/liblzma/common/stream_flags_encoder.c | 2 +-
+ src/liblzma/lzma/lzma_encoder.c | 2 +-
+ src/liblzma/lzma/lzma_encoder_presets.c | 53 ++++++--------------
+ 23 files changed, 294 insertions(+), 262 deletions(-)
+
+commit 4d00652e75dd2736aedc3a3a8baff3dd0ea38074
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-18 13:42:52 +0200
+
+ Updated Makefile.am that was missing from the previous commit.
+
+ src/liblzma/common/Makefile.am | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit 634636fa56ccee6e744f78b0abed76c8940f2f8f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-17 21:49:53 +0200
+
+ Remove the alignment functions for now. Maybe they will
+ be added back in some form later, but the current version
+ wasn't modular, so it would need fixing anyway.
+
+ src/liblzma/api/Makefile.am | 1 -
+ src/liblzma/api/lzma.h | 1 -
+ src/liblzma/api/lzma/alignment.h | 60 --------------------
+ src/liblzma/common/alignment.c | 114 --------------------------------------
+ 4 files changed, 0 insertions(+), 176 deletions(-)
+
+commit 4fed98417d1687f5eccccb42a133fde3ec81216a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-17 20:11:23 +0200
+
+ xz message handling improvements
+
+ src/xz/message.c | 125 +++++++++++++++++++++++++++++++++++++++++++++++++-----
+ src/xz/message.h | 7 +++-
+ src/xz/process.c | 28 +++++++++++-
+ 3 files changed, 146 insertions(+), 14 deletions(-)
+
+commit 653e457e3756ef35e5d1b2be3523b3e4b1e9ee4d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-15 23:26:43 +0200
+
+ Fix a dumb bug in .lzma decoder which was introduced in
+ the previous commit. (Probably the previous commit has
+ other bugs too, it wasn't tested.)
+
+ src/liblzma/common/alone_decoder.c | 29 ++++++++++++++---------------
+ 1 files changed, 14 insertions(+), 15 deletions(-)
+
+commit 671a5adf1e844bfdd6fd327016c3c28694493158
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-15 19:39:13 +0200
+
+ Bunch of liblzma API cleanups and fixes.
+
+ src/liblzma/api/lzma.h | 122 +++++++++++---------
+ src/liblzma/api/lzma/base.h | 174 +++++++++++++++++++----------
+ src/liblzma/api/lzma/block.h | 211 +++++++++++++++++++++++------------
+ src/liblzma/api/lzma/check.h | 28 +++--
+ src/liblzma/api/lzma/container.h | 155 +++++++++++++-------------
+ src/liblzma/api/lzma/delta.h | 12 +-
+ src/liblzma/api/lzma/filter.h | 27 +++--
+ src/liblzma/api/lzma/index.h | 97 ++++++++++++++---
+ src/liblzma/api/lzma/index_hash.h | 26 +++-
+ src/liblzma/api/lzma/init.h | 2 +-
+ src/liblzma/api/lzma/lzma.h | 12 +-
+ src/liblzma/api/lzma/simple.h | 4 +-
+ src/liblzma/api/lzma/stream_flags.h | 46 +++++---
+ src/liblzma/api/lzma/version.h | 6 +-
+ src/liblzma/api/lzma/vli.h | 17 +--
+ src/liblzma/common/alone_decoder.c | 47 ++++++---
+ src/liblzma/common/auto_decoder.c | 29 +++++
+ src/liblzma/common/block_util.c | 52 +++++----
+ src/liblzma/common/common.c | 58 ++++++++++
+ src/liblzma/common/common.h | 9 ++-
+ src/liblzma/common/easy.c | 33 ++++--
+ src/liblzma/common/filter_common.c | 2 +-
+ src/liblzma/common/index.c | 11 ++
+ src/liblzma/common/index_decoder.c | 46 +++++++-
+ src/liblzma/common/stream_decoder.c | 47 +++++++-
+ src/liblzma/lzma/lzma2_encoder.c | 6 +-
+ tests/test_index.c | 10 +-
+ 27 files changed, 863 insertions(+), 426 deletions(-)
+
+commit 17781c2c20fd77029cb32e77792889f2f211d69d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-15 14:26:52 +0200
+
+ The LZMA2 decoder fix introduced a bug to LZ decoder,
+ which made LZ decoder return too early after dictionary
+ reset. This fixes it.
+
+ src/liblzma/lz/lz_decoder.c | 33 +++++++++++++++++++++++----------
+ 1 files changed, 23 insertions(+), 10 deletions(-)
+
+commit f9f2d1e74398500724041f7fb3c38db35ad8c8d8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-15 11:20:22 +0200
+
+ Added two new test files.
+
+ tests/files/README | 7 +++++++
+ tests/files/bad-1-lzma2-8.xz | Bin 0 -> 464 bytes
+ tests/files/good-1-lzma2-4.xz | Bin 0 -> 464 bytes
+ 3 files changed, 7 insertions(+), 0 deletions(-)
+
+commit ff7fb2c605bccc411069e07b9f11fb957aea2ddf
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-15 10:01:59 +0200
+
+ Fix data corruption in LZMA2 decoder.
+
+ src/liblzma/lz/lz_decoder.c | 17 ++++++++++++++++-
+ src/liblzma/lz/lz_decoder.h | 8 +++++---
+ src/liblzma/lzma/lzma2_decoder.c | 15 +++++++++++----
+ 3 files changed, 32 insertions(+), 8 deletions(-)
+
+commit 1ceebcf7e1bd30b95125f0ad67a09fdb6215d613
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-13 00:54:11 +0200
+
+ Name the package "xz" in configure.ac.
+
+ configure.ac | 11 ++++-------
+ 1 files changed, 4 insertions(+), 7 deletions(-)
+
+commit a94bf00d0af9b423851905b031be5a645a657820
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-12 22:43:21 +0200
+
+ Some adjustments to GCC warning flags. The important change
+ is the removal of -pedantic. It messes up -Werror (which I
+ really want to keep so that I don't miss any warnings) with
+ printf format strings that are in POSIX but not in C99.
+
+ configure.ac | 8 +++++---
+ 1 files changed, 5 insertions(+), 3 deletions(-)
+
+commit 8582d392baacd2cdac07ca60041f8c661323676d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-10 01:31:00 +0200
+
+ Remove obsolete comment.
+
+ src/xz/message.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit b1ae6dd731ea3636c3c2bfc7aefa71457d3328f1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-10 01:27:15 +0200
+
+ Use "decompression" consistently in --long-help.
+
+ src/xz/message.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 1ea9e7f15afd5d3981e2432710e932320597bca9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-10 01:23:58 +0200
+
+ Added preset=NUM to --lzma1 and --lzma2. This makes it easy
+ to take a preset as a template and modify it a little.
+
+ src/xz/message.c | 1 +
+ src/xz/options.c | 8 ++++++++
+ 2 files changed, 9 insertions(+), 0 deletions(-)
+
+commit bceb3918dbb21f34976bfdd4c171a81319de71f7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-09 17:43:31 +0200
+
+ Put the file format specification into the public domain.
+ Same will be done to the actual code later.
+
+ doc/file-format.txt | 24 +++++++++---------------
+ 1 files changed, 9 insertions(+), 15 deletions(-)
+
+commit 6efa2d80d46a38861016f41f0eb6fa2ec9260fe6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-09 17:41:49 +0200
+
+ Make the memusage functions of LZMA1 and LZMA2 encoders
+ to validate the filter options. Add missing validation
+ to LZMA2 encoder when options are changed in the middle
+ of encoding.
+
+ src/liblzma/lzma/lzma2_encoder.c | 5 +++-
+ src/liblzma/lzma/lzma_encoder.c | 44 +++++++++++++++++++++++++------------
+ src/liblzma/lzma/lzma_encoder.h | 2 +-
+ 3 files changed, 35 insertions(+), 16 deletions(-)
+
+commit f20a03206b71ff01b827bb7a932411d6a6a4e06a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-09 10:36:24 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit ef7890d56453dca1aeb2e12db29b7e418d93dde4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-01 23:04:12 +0200
+
+ In command line tool, take advantage of memusage calculation's
+ ability to also validate the filter chain and options (not
+ implemented yet for all filters).
+
+ src/xz/process.c | 8 ++++----
+ 1 files changed, 4 insertions(+), 4 deletions(-)
+
+commit ccd57afa09e332d664d6d6a7498702791ea5f659
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-01 22:59:28 +0200
+
+ Validate the filter chain before checking filter-specific
+ memory usage.
+
+ src/liblzma/common/filter_common.c | 14 ++++++++++----
+ 1 files changed, 10 insertions(+), 4 deletions(-)
+
+commit c596fda40b62fe1683d0ac34d0c673dcaae2aa15
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-01 22:58:22 +0200
+
+ Make the memusage functions of LZMA1 and LZMA2 decoders
+ to validate the filter options.
+
+ src/liblzma/lzma/lzma2_decoder.c | 7 ++-----
+ src/liblzma/lzma/lzma_decoder.c | 14 ++++++++++----
+ src/liblzma/lzma/lzma_decoder.h | 5 +++++
+ 3 files changed, 17 insertions(+), 9 deletions(-)
+
+commit c58f469be5bb9b0bdab825c6687445fd553f4f3a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-01 22:55:18 +0200
+
+ Added the changes for Delta filter that should have been
+ part of 656ec87882ee74b192c4ea4a233a235eca7b04d4.
+
+ src/liblzma/common/filter_decoder.c | 2 +-
+ src/liblzma/common/filter_encoder.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit cd708015202dbf7585b84a8781462a20c42a324b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-01 22:50:28 +0200
+
+ LZMA2 decoder cleanups. Make it require new LZMA properties
+ also in the first LZMA chunk after a dictionary reset in
+ uncompressed chunk.
+
+ src/liblzma/lzma/lzma2_decoder.c | 95 ++++++++++++++++---------------------
+ 1 files changed, 41 insertions(+), 54 deletions(-)
+
+commit 656ec87882ee74b192c4ea4a233a235eca7b04d4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-12-01 16:30:11 +0200
+
+ Added lzma_delta_coder_memusage() which also validates
+ the options.
+
+ src/liblzma/delta/Makefile.am | 3 +-
+ src/liblzma/delta/delta_common.c | 28 +++++++++++++++++------
+ src/liblzma/delta/delta_common.h | 19 +---------------
+ src/liblzma/delta/delta_decoder.c | 2 +-
+ src/liblzma/delta/delta_decoder.h | 2 +-
+ src/liblzma/delta/delta_encoder.c | 14 +++--------
+ src/liblzma/delta/delta_encoder.h | 2 +-
+ src/liblzma/delta/delta_private.h | 44 +++++++++++++++++++++++++++++++++++++
+ 8 files changed, 75 insertions(+), 39 deletions(-)
+
+commit 691a9155b7a28882baf37e9d1e969e32e91dbc7a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-29 10:03:49 +0200
+
+ Automake includes the m4 directory, so don't add it in
+ Makefile.am separately.
+
+ Updated THANKS.
+
+ Makefile.am | 1 -
+ THANKS | 1 +
+ 2 files changed, 1 insertions(+), 1 deletions(-)
+
+commit c7007ddf06ac2b0e018d71d281c21b99f16e7ae0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-28 12:00:48 +0200
+
+ Tested using COLUMNS environment variable to avoid broken
+ progress indicator but since COLUMNS isn't usually available,
+ the code was left commented out.
+
+ src/xz/message.c | 14 +++++++++-----
+ 1 files changed, 9 insertions(+), 5 deletions(-)
+
+commit ae65dcfde27014e4d811e1a1308aa5d0fe8debbd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-27 19:28:59 +0200
+
+ Cleanups to message.c.
+
+ src/xz/message.c | 47 ++++++++++++++++++-----------------------------
+ 1 files changed, 18 insertions(+), 29 deletions(-)
+
+commit a8368b75cdcd5427299001cc42839287f27b244d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-25 02:37:47 +0200
+
+ Remove the nowadays unneeded memory limitting malloc() wrapper.
+
+ src/liblzma/api/Makefile.am | 1 -
+ src/liblzma/api/lzma.h | 1 -
+ src/liblzma/api/lzma/memlimit.h | 207 -------------------------
+ src/liblzma/common/Makefile.am | 1 -
+ src/liblzma/common/memory_limiter.c | 288 -----------------------------------
+ tests/Makefile.am | 2 -
+ tests/test_memlimit.c | 114 --------------
+ 7 files changed, 0 insertions(+), 614 deletions(-)
+
+commit 69472ee5f055a2bb6f28106f0923e1461fd1d080
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-23 15:09:03 +0200
+
+ VLI encoder and decoder cleanups. Made encoder return
+ LZMA_PROG_ERROR in single-call mode if there's no output
+ space.
+
+ src/liblzma/common/vli_decoder.c | 15 +++++++++------
+ src/liblzma/common/vli_encoder.c | 31 ++++++++++++++++++++++++-------
+ 2 files changed, 33 insertions(+), 13 deletions(-)
+
+commit 4249c8c15a08f55b51b7012e6aaafce3aa9eb650
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-22 17:44:33 +0200
+
+ Typo fix
+
+ src/xz/process.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 6d1d6f4598d121253dbe1084c6866b66e95c361b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-20 22:59:10 +0200
+
+ Support NetBSD's errno for O_NOFOLLOW.
+
+ src/xz/io.c | 8 ++++++++
+ 1 files changed, 8 insertions(+), 0 deletions(-)
+
+commit f901a290eef67b8ea4720ccdf5f46edf775ed9d7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-20 18:05:52 +0200
+
+ Build xzdec and lzmadec from xzdec.c. xzdec supports only .xz
+ files and lzmadec only .lzma files.
+
+ src/xzdec/Makefile.am | 7 +-
+ src/xzdec/xzdec.c | 311 +++++++++++++++++++++----------------------------
+ 2 files changed, 140 insertions(+), 178 deletions(-)
+
+commit 86a0ed8f01c8ed44721223f885e679c71b7bb94c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-20 11:01:29 +0200
+
+ Minor cleanups to xzdec.
+
+ src/xzdec/xzdec.c | 20 ++++++++++----------
+ 1 files changed, 10 insertions(+), 10 deletions(-)
+
+commit 54f716ba8905d09752dcd1519455a40bd21d5317
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-19 23:55:22 +0200
+
+ Added missing check for uint16_t.
+
+ configure.ac | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 1880a3927b23f265f63b2adb86fbdb81ea09eb06
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-19 23:52:24 +0200
+
+ Renamed lzma to xz and lzmadec to xzdec. We create symlinks
+ lzma, unlzma, and lzcat in "make install" for backwards
+ compatibility with LZMA Utils 4.32.x; I'm not sure if this
+ should be the default though.
+
+ configure.ac | 4 +-
+ po/POTFILES.in | 21 +-
+ src/Makefile.am | 2 +-
+ src/lzma/Makefile.am | 72 ----
+ src/lzma/args.c | 500 --------------------------
+ src/lzma/args.h | 56 ---
+ src/lzma/hardware.c | 122 -------
+ src/lzma/hardware.h | 45 ---
+ src/lzma/io.c | 658 ----------------------------------
+ src/lzma/io.h | 97 -----
+ src/lzma/list.c | 477 -------------------------
+ src/lzma/main.c | 402 ---------------------
+ src/lzma/main.h | 60 ----
+ src/lzma/message.c | 892 -----------------------------------------------
+ src/lzma/message.h | 132 -------
+ src/lzma/options.c | 352 -------------------
+ src/lzma/options.h | 46 ---
+ src/lzma/private.h | 52 ---
+ src/lzma/process.c | 391 ---------------------
+ src/lzma/process.h | 70 ----
+ src/lzma/suffix.c | 213 -----------
+ src/lzma/suffix.h | 40 ---
+ src/lzma/util.c | 199 -----------
+ src/lzma/util.h | 71 ----
+ src/lzmadec/Makefile.am | 29 --
+ src/lzmadec/lzmadec.c | 492 --------------------------
+ src/xz/Makefile.am | 74 ++++
+ src/xz/args.c | 500 ++++++++++++++++++++++++++
+ src/xz/args.h | 56 +++
+ src/xz/hardware.c | 122 +++++++
+ src/xz/hardware.h | 45 +++
+ src/xz/io.c | 658 ++++++++++++++++++++++++++++++++++
+ src/xz/io.h | 97 +++++
+ src/xz/list.c | 477 +++++++++++++++++++++++++
+ src/xz/main.c | 402 +++++++++++++++++++++
+ src/xz/main.h | 60 ++++
+ src/xz/message.c | 892 +++++++++++++++++++++++++++++++++++++++++++++++
+ src/xz/message.h | 132 +++++++
+ src/xz/options.c | 352 +++++++++++++++++++
+ src/xz/options.h | 46 +++
+ src/xz/private.h | 52 +++
+ src/xz/process.c | 391 +++++++++++++++++++++
+ src/xz/process.h | 70 ++++
+ src/xz/suffix.c | 213 +++++++++++
+ src/xz/suffix.h | 40 +++
+ src/xz/util.c | 199 +++++++++++
+ src/xz/util.h | 71 ++++
+ src/xzdec/Makefile.am | 29 ++
+ src/xzdec/xzdec.c | 492 ++++++++++++++++++++++++++
+ tests/test_compress.sh | 29 +-
+ tests/test_files.sh | 4 +-
+ 51 files changed, 5498 insertions(+), 5500 deletions(-)
+
+commit e114502b2bc371e4a45449832cb69be036360722
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-11-19 20:46:52 +0200
+
+ Oh well, big messy commit again. Some highlights:
+ - Updated to the latest, probably final file format version.
+ - Command line tool reworked to not use threads anymore.
+ Threading will probably go into liblzma anyway.
+ - Memory usage limit is now about 30 % for uncompression
+ and about 90 % for compression.
+ - Progress indicator with --verbose
+ - Simplified --help and full --long-help
+ - Upgraded to the last LGPLv2.1+ getopt_long from gnulib.
+ - Some bug fixes
+
+ THANKS | 1 +
+ configure.ac | 48 +-
+ debug/full_flush.c | 6 +-
+ debug/known_sizes.c | 2 +-
+ debug/memusage.c | 2 +-
+ debug/sync_flush.c | 10 +-
+ doc/file-format.txt | 260 +++++----
+ lib/Makefile.am | 10 +-
+ lib/getopt.c | 14 +-
+ lib/getopt.in.h | 226 +++++++
+ lib/getopt1.c | 8 +-
+ lib/getopt_.h | 226 -------
+ lib/gettext.h | 240 -------
+ m4/getopt.m4 | 64 +--
+ src/common/bswap.h | 15 +-
+ src/common/physmem.h | 4 +
+ src/common/sysdefs.h | 12 +-
+ src/liblzma/api/lzma/block.h | 47 +-
+ src/liblzma/api/lzma/filter.h | 8 +
+ src/liblzma/api/lzma/index.h | 20 +-
+ src/liblzma/api/lzma/index_hash.h | 4 +-
+ src/liblzma/common/block_decoder.c | 59 +-
+ src/liblzma/common/block_encoder.c | 41 +-
+ src/liblzma/common/block_header_decoder.c | 31 +-
+ src/liblzma/common/block_header_encoder.c | 69 +--
+ src/liblzma/common/block_util.c | 45 +-
+ src/liblzma/common/common.h | 8 -
+ src/liblzma/common/filter_common.c | 4 +-
+ src/liblzma/common/index.c | 259 ++++----
+ src/liblzma/common/index.h | 33 +-
+ src/liblzma/common/index_decoder.c | 31 +-
+ src/liblzma/common/index_encoder.c | 16 +-
+ src/liblzma/common/index_hash.c | 68 +-
+ src/liblzma/common/stream_decoder.c | 9 +-
+ src/liblzma/common/stream_encoder.c | 6 +-
+ src/liblzma/lz/lz_decoder.h | 4 +-
+ src/liblzma/subblock/subblock_decoder.c | 3 +-
+ src/lzma/Makefile.am | 9 +-
+ src/lzma/alloc.c | 106 ---
+ src/lzma/alloc.h | 42 --
+ src/lzma/args.c | 531 +++++++---------
+ src/lzma/args.h | 42 +-
+ src/lzma/error.c | 162 -----
+ src/lzma/error.h | 67 --
+ src/lzma/hardware.c | 75 ++-
+ src/lzma/hardware.h | 16 +-
+ src/lzma/help.c | 170 -----
+ src/lzma/help.h | 32 -
+ src/lzma/io.c | 757 +++++++++++-----------
+ src/lzma/io.h | 51 ++-
+ src/lzma/main.c | 392 ++++++++----
+ src/lzma/main.h | 60 ++
+ src/lzma/message.c | 892 ++++++++++++++++++++++++++
+ src/lzma/message.h | 132 ++++
+ src/lzma/options.c | 42 +-
+ src/lzma/options.h | 6 +-
+ src/lzma/private.h | 28 +-
+ src/lzma/process.c | 525 +++++++--------
+ src/lzma/process.h | 40 ++
+ src/lzma/suffix.c | 52 +-
+ src/lzma/suffix.h | 17 +-
+ src/lzma/util.c | 100 ++--
+ src/lzma/util.h | 43 ++-
+ src/lzmadec/lzmadec.c | 36 +-
+ tests/files/README | 12 +-
+ tests/files/bad-1-block_header-1.xz | Bin 64 -> 64 bytes
+ tests/files/bad-1-block_header-2.xz | Bin 64 -> 64 bytes
+ tests/files/bad-1-block_header-3.xz | Bin 68 -> 68 bytes
+ tests/files/bad-1-block_header-4.xz | Bin 72 -> 76 bytes
+ tests/files/bad-1-block_header-5.xz | Bin 0 -> 72 bytes
+ tests/files/bad-1-check-crc32.xz | Bin 68 -> 68 bytes
+ tests/files/bad-1-check-crc64.xz | Bin 72 -> 72 bytes
+ tests/files/bad-1-check-sha256.xz | Bin 96 -> 96 bytes
+ tests/files/bad-1-lzma2-1.xz | Bin 64 -> 64 bytes
+ tests/files/bad-1-lzma2-2.xz | Bin 424 -> 424 bytes
+ tests/files/bad-1-lzma2-3.xz | Bin 424 -> 424 bytes
+ tests/files/bad-1-lzma2-4.xz | Bin 408 -> 408 bytes
+ tests/files/bad-1-lzma2-5.xz | Bin 408 -> 408 bytes
+ tests/files/bad-1-lzma2-6.xz | Bin 68 -> 68 bytes
+ tests/files/bad-1-lzma2-7.xz | Bin 408 -> 408 bytes
+ tests/files/bad-1-stream_flags-1.xz | Bin 68 -> 68 bytes
+ tests/files/bad-1-stream_flags-2.xz | Bin 68 -> 68 bytes
+ tests/files/bad-1-stream_flags-3.xz | Bin 68 -> 68 bytes
+ tests/files/bad-1-vli-1.xz | Bin 72 -> 72 bytes
+ tests/files/bad-1-vli-2.xz | Bin 72 -> 76 bytes
+ tests/files/bad-2-compressed_data_padding.xz | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-1.xz | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-2.xz | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-3.xz | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-4.xz | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-5.xz | Bin 0 -> 92 bytes
+ tests/files/good-1-3delta-lzma2.xz | Bin 528 -> 528 bytes
+ tests/files/good-1-block_header-1.xz | Bin 72 -> 72 bytes
+ tests/files/good-1-block_header-2.xz | Bin 68 -> 68 bytes
+ tests/files/good-1-block_header-3.xz | Bin 68 -> 68 bytes
+ tests/files/good-1-check-crc32.xz | Bin 68 -> 68 bytes
+ tests/files/good-1-check-crc64.xz | Bin 72 -> 72 bytes
+ tests/files/good-1-check-none.xz | Bin 64 -> 64 bytes
+ tests/files/good-1-check-sha256.xz | Bin 96 -> 96 bytes
+ tests/files/good-1-delta-lzma2.tiff.xz | Bin 51312 -> 51316 bytes
+ tests/files/good-1-lzma2-1.xz | Bin 424 -> 424 bytes
+ tests/files/good-1-lzma2-2.xz | Bin 424 -> 424 bytes
+ tests/files/good-1-lzma2-3.xz | Bin 408 -> 408 bytes
+ tests/files/good-1-sparc-lzma2.xz | Bin 2292 -> 2296 bytes
+ tests/files/good-1-x86-lzma2.xz | Bin 1936 -> 1936 bytes
+ tests/files/good-2-lzma2.xz | Bin 92 -> 92 bytes
+ tests/files/unsupported-block_header.xz | Bin 68 -> 68 bytes
+ tests/files/unsupported-check.xz | Bin 68 -> 68 bytes
+ tests/files/unsupported-filter_flags-1.xz | Bin 68 -> 68 bytes
+ tests/files/unsupported-filter_flags-2.xz | Bin 68 -> 68 bytes
+ tests/files/unsupported-filter_flags-3.xz | Bin 68 -> 68 bytes
+ tests/test_block_header.c | 16 +-
+ tests/test_index.c | 42 +-
+ 113 files changed, 3462 insertions(+), 2946 deletions(-)
+
+commit 3c3905b53462ae235c9438d86a4dc51086410932
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-10-09 11:12:29 +0300
+
+ Fixed the test that should have been fixed as part
+ of 1e8e4fd1f3e50129b4541406ad765d2aa1233943.
+
+ tests/test_block_header.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 0f295bf7a3ece01f667caae318cc3e3424085886
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-10-07 16:42:18 +0300
+
+ Fixed some help messages.
+
+ src/lzma/help.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 1e8e4fd1f3e50129b4541406ad765d2aa1233943
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-10-07 09:40:31 +0300
+
+ Made the preset numbering more logical in liblzma API.
+
+ src/liblzma/api/lzma/container.h | 20 ++++++++++----------
+ src/liblzma/api/lzma/lzma.h | 2 +-
+ src/liblzma/lzma/lzma_encoder_presets.c | 3 ++-
+ src/lzma/args.c | 8 ++++----
+ src/lzma/args.h | 2 +-
+ 5 files changed, 18 insertions(+), 17 deletions(-)
+
+commit 5e4df4c3c09c82bbbb1a916784e3dc717ca4ff81
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-10-03 19:36:09 +0300
+
+ Removed fi from po/LINGUAS.
+
+ po/LINGUAS | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit fcfb86c7770328cfffa2e83b176af9a1ba2d9128
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-10-03 07:06:48 +0300
+
+ Fixed suffix handling with --format=raw.
+
+ src/lzma/suffix.c | 28 +++++++++++++++++++---------
+ 1 files changed, 19 insertions(+), 9 deletions(-)
+
+commit bd137524f2f50e30ba054f42f1f6536cd3cee920
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-10-02 22:51:46 +0300
+
+ Initial changes to change the suffix of the new format to .xz.
+ This also fixes a bug related to --suffix option. Some issues
+ with suffixes with --format=raw were not fixed.
+
+ src/lzma/args.c | 67 ++++++++++++++++++++++++++++++-------------
+ src/lzma/args.h | 13 ++++----
+ src/lzma/help.c | 4 +-
+ src/lzma/process.c | 24 +++++++++------
+ src/lzma/suffix.c | 74 +++++++++++++++++++++++++++++++++++++++--------
+ tests/test_compress.sh | 3 +-
+ 6 files changed, 133 insertions(+), 52 deletions(-)
+
+commit 4c321a41c482821aa3c4d64cdf886a6ed904d844
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-30 17:43:55 +0300
+
+ Renamed the test files from .lzma suffix to .xz suffix.
+
+ tests/files/README | 128 ++++++++++++------------
+ tests/files/bad-0-backward_size.lzma | Bin 32 -> 0 bytes
+ tests/files/bad-0-backward_size.xz | Bin 0 -> 32 bytes
+ tests/files/bad-0-empty-truncated.lzma | Bin 31 -> 0 bytes
+ tests/files/bad-0-empty-truncated.xz | Bin 0 -> 31 bytes
+ tests/files/bad-0-footer_magic.lzma | Bin 32 -> 0 bytes
+ tests/files/bad-0-footer_magic.xz | Bin 0 -> 32 bytes
+ tests/files/bad-0-header_magic.lzma | Bin 32 -> 0 bytes
+ tests/files/bad-0-header_magic.xz | Bin 0 -> 32 bytes
+ tests/files/bad-0-nonempty_index.lzma | Bin 32 -> 0 bytes
+ tests/files/bad-0-nonempty_index.xz | Bin 0 -> 32 bytes
+ tests/files/bad-0cat-alone.lzma | Bin 55 -> 0 bytes
+ tests/files/bad-0cat-alone.xz | Bin 0 -> 55 bytes
+ tests/files/bad-0cat-header_magic.lzma | Bin 64 -> 0 bytes
+ tests/files/bad-0cat-header_magic.xz | Bin 0 -> 64 bytes
+ tests/files/bad-0catpad-empty.lzma | Bin 69 -> 0 bytes
+ tests/files/bad-0catpad-empty.xz | Bin 0 -> 69 bytes
+ tests/files/bad-0pad-empty.lzma | Bin 37 -> 0 bytes
+ tests/files/bad-0pad-empty.xz | Bin 0 -> 37 bytes
+ tests/files/bad-1-block_header-1.lzma | Bin 64 -> 0 bytes
+ tests/files/bad-1-block_header-1.xz | Bin 0 -> 64 bytes
+ tests/files/bad-1-block_header-2.lzma | Bin 64 -> 0 bytes
+ tests/files/bad-1-block_header-2.xz | Bin 0 -> 64 bytes
+ tests/files/bad-1-block_header-3.lzma | Bin 68 -> 0 bytes
+ tests/files/bad-1-block_header-3.xz | Bin 0 -> 68 bytes
+ tests/files/bad-1-block_header-4.lzma | Bin 72 -> 0 bytes
+ tests/files/bad-1-block_header-4.xz | Bin 0 -> 72 bytes
+ tests/files/bad-1-check-crc32.lzma | Bin 68 -> 0 bytes
+ tests/files/bad-1-check-crc32.xz | Bin 0 -> 68 bytes
+ tests/files/bad-1-check-crc64.lzma | Bin 72 -> 0 bytes
+ tests/files/bad-1-check-crc64.xz | Bin 0 -> 72 bytes
+ tests/files/bad-1-check-sha256.lzma | Bin 96 -> 0 bytes
+ tests/files/bad-1-check-sha256.xz | Bin 0 -> 96 bytes
+ tests/files/bad-1-lzma2-1.lzma | Bin 64 -> 0 bytes
+ tests/files/bad-1-lzma2-1.xz | Bin 0 -> 64 bytes
+ tests/files/bad-1-lzma2-2.lzma | Bin 424 -> 0 bytes
+ tests/files/bad-1-lzma2-2.xz | Bin 0 -> 424 bytes
+ tests/files/bad-1-lzma2-3.lzma | Bin 424 -> 0 bytes
+ tests/files/bad-1-lzma2-3.xz | Bin 0 -> 424 bytes
+ tests/files/bad-1-lzma2-4.lzma | Bin 408 -> 0 bytes
+ tests/files/bad-1-lzma2-4.xz | Bin 0 -> 408 bytes
+ tests/files/bad-1-lzma2-5.lzma | Bin 408 -> 0 bytes
+ tests/files/bad-1-lzma2-5.xz | Bin 0 -> 408 bytes
+ tests/files/bad-1-lzma2-6.lzma | Bin 68 -> 0 bytes
+ tests/files/bad-1-lzma2-6.xz | Bin 0 -> 68 bytes
+ tests/files/bad-1-lzma2-7.lzma | Bin 408 -> 0 bytes
+ tests/files/bad-1-lzma2-7.xz | Bin 0 -> 408 bytes
+ tests/files/bad-1-stream_flags-1.lzma | Bin 68 -> 0 bytes
+ tests/files/bad-1-stream_flags-1.xz | Bin 0 -> 68 bytes
+ tests/files/bad-1-stream_flags-2.lzma | Bin 68 -> 0 bytes
+ tests/files/bad-1-stream_flags-2.xz | Bin 0 -> 68 bytes
+ tests/files/bad-1-stream_flags-3.lzma | Bin 68 -> 0 bytes
+ tests/files/bad-1-stream_flags-3.xz | Bin 0 -> 68 bytes
+ tests/files/bad-1-vli-1.lzma | Bin 72 -> 0 bytes
+ tests/files/bad-1-vli-1.xz | Bin 0 -> 72 bytes
+ tests/files/bad-1-vli-2.lzma | Bin 72 -> 0 bytes
+ tests/files/bad-1-vli-2.xz | Bin 0 -> 72 bytes
+ tests/files/bad-2-compressed_data_padding.lzma | Bin 92 -> 0 bytes
+ tests/files/bad-2-compressed_data_padding.xz | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-1.lzma | Bin 92 -> 0 bytes
+ tests/files/bad-2-index-1.xz | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-2.lzma | Bin 92 -> 0 bytes
+ tests/files/bad-2-index-2.xz | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-3.lzma | Bin 92 -> 0 bytes
+ tests/files/bad-2-index-3.xz | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-4.lzma | Bin 92 -> 0 bytes
+ tests/files/bad-2-index-4.xz | Bin 0 -> 92 bytes
+ tests/files/good-0-empty.lzma | Bin 32 -> 0 bytes
+ tests/files/good-0-empty.xz | Bin 0 -> 32 bytes
+ tests/files/good-0cat-empty.lzma | Bin 64 -> 0 bytes
+ tests/files/good-0cat-empty.xz | Bin 0 -> 64 bytes
+ tests/files/good-0catpad-empty.lzma | Bin 68 -> 0 bytes
+ tests/files/good-0catpad-empty.xz | Bin 0 -> 68 bytes
+ tests/files/good-0pad-empty.lzma | Bin 36 -> 0 bytes
+ tests/files/good-0pad-empty.xz | Bin 0 -> 36 bytes
+ tests/files/good-1-3delta-lzma2.lzma | Bin 528 -> 0 bytes
+ tests/files/good-1-3delta-lzma2.xz | Bin 0 -> 528 bytes
+ tests/files/good-1-block_header-1.lzma | Bin 72 -> 0 bytes
+ tests/files/good-1-block_header-1.xz | Bin 0 -> 72 bytes
+ tests/files/good-1-block_header-2.lzma | Bin 68 -> 0 bytes
+ tests/files/good-1-block_header-2.xz | Bin 0 -> 68 bytes
+ tests/files/good-1-block_header-3.lzma | Bin 68 -> 0 bytes
+ tests/files/good-1-block_header-3.xz | Bin 0 -> 68 bytes
+ tests/files/good-1-check-crc32.lzma | Bin 68 -> 0 bytes
+ tests/files/good-1-check-crc32.xz | Bin 0 -> 68 bytes
+ tests/files/good-1-check-crc64.lzma | Bin 72 -> 0 bytes
+ tests/files/good-1-check-crc64.xz | Bin 0 -> 72 bytes
+ tests/files/good-1-check-none.lzma | Bin 64 -> 0 bytes
+ tests/files/good-1-check-none.xz | Bin 0 -> 64 bytes
+ tests/files/good-1-check-sha256.lzma | Bin 96 -> 0 bytes
+ tests/files/good-1-check-sha256.xz | Bin 0 -> 96 bytes
+ tests/files/good-1-delta-lzma2.tiff.lzma | Bin 51312 -> 0 bytes
+ tests/files/good-1-delta-lzma2.tiff.xz | Bin 0 -> 51312 bytes
+ tests/files/good-1-lzma2-1.lzma | Bin 424 -> 0 bytes
+ tests/files/good-1-lzma2-1.xz | Bin 0 -> 424 bytes
+ tests/files/good-1-lzma2-2.lzma | Bin 424 -> 0 bytes
+ tests/files/good-1-lzma2-2.xz | Bin 0 -> 424 bytes
+ tests/files/good-1-lzma2-3.lzma | Bin 408 -> 0 bytes
+ tests/files/good-1-lzma2-3.xz | Bin 0 -> 408 bytes
+ tests/files/good-1-sparc-lzma2.lzma | Bin 2292 -> 0 bytes
+ tests/files/good-1-sparc-lzma2.xz | Bin 0 -> 2292 bytes
+ tests/files/good-1-x86-lzma2.lzma | Bin 1936 -> 0 bytes
+ tests/files/good-1-x86-lzma2.xz | Bin 0 -> 1936 bytes
+ tests/files/good-2-lzma2.lzma | Bin 92 -> 0 bytes
+ tests/files/good-2-lzma2.xz | Bin 0 -> 92 bytes
+ tests/files/unsupported-block_header.lzma | Bin 68 -> 0 bytes
+ tests/files/unsupported-block_header.xz | Bin 0 -> 68 bytes
+ tests/files/unsupported-check.lzma | Bin 68 -> 0 bytes
+ tests/files/unsupported-check.xz | Bin 0 -> 68 bytes
+ tests/files/unsupported-filter_flags-1.lzma | Bin 68 -> 0 bytes
+ tests/files/unsupported-filter_flags-1.xz | Bin 0 -> 68 bytes
+ tests/files/unsupported-filter_flags-2.lzma | Bin 68 -> 0 bytes
+ tests/files/unsupported-filter_flags-2.xz | Bin 0 -> 68 bytes
+ tests/files/unsupported-filter_flags-3.lzma | Bin 68 -> 0 bytes
+ tests/files/unsupported-filter_flags-3.xz | Bin 0 -> 68 bytes
+ tests/test_files.sh | 6 +-
+ 116 files changed, 66 insertions(+), 68 deletions(-)
+
+commit 8e60c889a2816a63013a35c99ce26bf28f5b78eb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-30 13:57:44 +0300
+
+ Fixed Stream decoder to actually use the first_stream variable.
+
+ src/liblzma/common/stream_decoder.c | 5 +++++
+ 1 files changed, 5 insertions(+), 0 deletions(-)
+
+commit 3bdbc12c054d1961133ee19802af7dd3c3494543
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-30 13:56:57 +0300
+
+ Added one more test file.
+
+ tests/files/README | 15 +++++++++++----
+ tests/files/bad-0cat-header_magic.lzma | Bin 0 -> 64 bytes
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+commit a6639022fdc536e5659b070a465221b4cf7c51fa
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-30 13:34:07 +0300
+
+ Fixed uninitialized variable in Stream decoder.
+
+ src/liblzma/common/stream_decoder.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit ed3709000a3f17ecefab29b2235d7e2221b00003
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-30 13:27:28 +0300
+
+ Added two test files.
+
+ tests/files/README | 6 ++++++
+ tests/files/bad-0-footer_magic.lzma | Bin 0 -> 32 bytes
+ tests/files/bad-0-header_magic.lzma | Bin 0 -> 32 bytes
+ 3 files changed, 6 insertions(+), 0 deletions(-)
+
+commit ea560b0ea80525752bdcd0074d24f8dc170bbe29
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 23:49:24 +0300
+
+ Fix conflicting Subblock helper filter's ID.
+
+ src/liblzma/common/common.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit ad97483b6e55142fd8d5c041db057017a891cd95
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 23:37:13 +0300
+
+ Changed magic bytes to match the updated spec. Filename
+ suffix wasn't changed yet.
+
+ src/liblzma/common/auto_decoder.c | 4 ++--
+ src/liblzma/common/stream_flags_common.c | 2 +-
+ tests/files/bad-0-backward_size.lzma | Bin 32 -> 32 bytes
+ tests/files/bad-0-empty-truncated.lzma | Bin 31 -> 31 bytes
+ tests/files/bad-0-nonempty_index.lzma | Bin 32 -> 32 bytes
+ tests/files/bad-0cat-alone.lzma | Bin 55 -> 55 bytes
+ tests/files/bad-0catpad-empty.lzma | Bin 69 -> 69 bytes
+ tests/files/bad-0pad-empty.lzma | Bin 37 -> 37 bytes
+ tests/files/bad-1-block_header-1.lzma | Bin 64 -> 64 bytes
+ tests/files/bad-1-block_header-2.lzma | Bin 64 -> 64 bytes
+ tests/files/bad-1-block_header-3.lzma | Bin 68 -> 68 bytes
+ tests/files/bad-1-block_header-4.lzma | Bin 72 -> 72 bytes
+ tests/files/bad-1-check-crc32.lzma | Bin 68 -> 68 bytes
+ tests/files/bad-1-check-crc64.lzma | Bin 72 -> 72 bytes
+ tests/files/bad-1-check-sha256.lzma | Bin 96 -> 96 bytes
+ tests/files/bad-1-lzma2-1.lzma | Bin 64 -> 64 bytes
+ tests/files/bad-1-lzma2-2.lzma | Bin 424 -> 424 bytes
+ tests/files/bad-1-lzma2-3.lzma | Bin 424 -> 424 bytes
+ tests/files/bad-1-lzma2-4.lzma | Bin 408 -> 408 bytes
+ tests/files/bad-1-lzma2-5.lzma | Bin 408 -> 408 bytes
+ tests/files/bad-1-lzma2-6.lzma | Bin 68 -> 68 bytes
+ tests/files/bad-1-lzma2-7.lzma | Bin 408 -> 408 bytes
+ tests/files/bad-1-stream_flags-1.lzma | Bin 68 -> 68 bytes
+ tests/files/bad-1-stream_flags-2.lzma | Bin 68 -> 68 bytes
+ tests/files/bad-1-stream_flags-3.lzma | Bin 68 -> 68 bytes
+ tests/files/bad-1-vli-1.lzma | Bin 72 -> 72 bytes
+ tests/files/bad-1-vli-2.lzma | Bin 72 -> 72 bytes
+ tests/files/bad-2-compressed_data_padding.lzma | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-1.lzma | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-2.lzma | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-3.lzma | Bin 92 -> 92 bytes
+ tests/files/bad-2-index-4.lzma | Bin 92 -> 92 bytes
+ tests/files/good-0-empty.lzma | Bin 32 -> 32 bytes
+ tests/files/good-0cat-empty.lzma | Bin 64 -> 64 bytes
+ tests/files/good-0catpad-empty.lzma | Bin 68 -> 68 bytes
+ tests/files/good-0pad-empty.lzma | Bin 36 -> 36 bytes
+ tests/files/good-1-3delta-lzma2.lzma | Bin 528 -> 528 bytes
+ tests/files/good-1-block_header-1.lzma | Bin 72 -> 72 bytes
+ tests/files/good-1-block_header-2.lzma | Bin 68 -> 68 bytes
+ tests/files/good-1-block_header-3.lzma | Bin 68 -> 68 bytes
+ tests/files/good-1-check-crc32.lzma | Bin 68 -> 68 bytes
+ tests/files/good-1-check-crc64.lzma | Bin 72 -> 72 bytes
+ tests/files/good-1-check-none.lzma | Bin 64 -> 64 bytes
+ tests/files/good-1-check-sha256.lzma | Bin 96 -> 96 bytes
+ tests/files/good-1-delta-lzma2.tiff.lzma | Bin 51312 -> 51312 bytes
+ tests/files/good-1-lzma2-1.lzma | Bin 424 -> 424 bytes
+ tests/files/good-1-lzma2-2.lzma | Bin 424 -> 424 bytes
+ tests/files/good-1-lzma2-3.lzma | Bin 408 -> 408 bytes
+ tests/files/good-1-sparc-lzma2.lzma | Bin 2292 -> 2292 bytes
+ tests/files/good-1-x86-lzma2.lzma | Bin 1936 -> 1936 bytes
+ tests/files/good-2-lzma2.lzma | Bin 92 -> 92 bytes
+ tests/files/unsupported-block_header.lzma | Bin 68 -> 68 bytes
+ tests/files/unsupported-check.lzma | Bin 68 -> 68 bytes
+ tests/files/unsupported-filter_flags-1.lzma | Bin 68 -> 68 bytes
+ tests/files/unsupported-filter_flags-2.lzma | Bin 68 -> 68 bytes
+ tests/files/unsupported-filter_flags-3.lzma | Bin 68 -> 68 bytes
+ 56 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 7a57069167e9e63394e2b095ee3a63253fcb51c7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 23:16:09 +0300
+
+ Remove po/fi.po since I'm not keeping it updated for now.
+
+ po/fi.po | 446 --------------------------------------------------------------
+ 1 files changed, 0 insertions(+), 446 deletions(-)
+
+commit 018ae09df8f2fee5a7374f307df4cb42fad0b81e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 23:13:54 +0300
+
+ Fix also test_compress.sh.
+
+ tests/test_compress.sh | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 3a62a5fb85d2eebd8666e64ed5d364d095062858
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 23:01:15 +0300
+
+ Fixed compilation of test_filter_flags.c, which was broken by
+ 1dcecfb09b55157b8653d747963069c8bed74f04.
+
+ tests/test_filter_flags.c | 16 ++++++++--------
+ 1 files changed, 8 insertions(+), 8 deletions(-)
+
+commit c6ca26eef7cd07eba449035514e2b8f9ac3111c0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 19:11:02 +0300
+
+ Updated file format specification. It changes the suffix
+ of the new format to .xz and removes the recently added
+ LZMA filter.
+
+ doc/file-format.txt | 125 +++++++++++++--------------------------------------
+ 1 files changed, 32 insertions(+), 93 deletions(-)
+
+commit 1dcecfb09b55157b8653d747963069c8bed74f04
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 19:09:21 +0300
+
+ Some API changes, bug fixes, cleanups etc.
+
+ configure.ac | 18 +-
+ debug/full_flush.c | 7 +-
+ debug/known_sizes.c | 6 +-
+ debug/memusage.c | 22 ++--
+ debug/sync_flush.c | 18 +-
+ src/liblzma/Makefile.am | 2 +-
+ src/liblzma/api/lzma/delta.h | 8 +-
+ src/liblzma/api/lzma/lzma.h | 230 +++++++++++++++---------
+ src/liblzma/common/alignment.c | 7 +-
+ src/liblzma/common/alone_decoder.c | 11 +-
+ src/liblzma/common/alone_encoder.c | 9 +-
+ src/liblzma/common/chunk_size.c | 2 +-
+ src/liblzma/common/easy.c | 20 ++-
+ src/liblzma/common/filter_common.c | 4 +-
+ src/liblzma/common/filter_decoder.c | 4 +-
+ src/liblzma/common/filter_encoder.c | 4 +-
+ src/liblzma/common/init_encoder.c | 2 +-
+ src/liblzma/delta/delta_common.c | 12 +-
+ src/liblzma/delta/delta_common.h | 2 +-
+ src/liblzma/delta/delta_decoder.c | 2 +-
+ src/liblzma/delta/delta_encoder.c | 6 +-
+ src/liblzma/lz/lz_encoder.c | 30 ++--
+ src/liblzma/lz/lz_encoder.h | 26 ++--
+ src/liblzma/lz/lz_encoder_mf.c | 30 ++--
+ src/liblzma/lzma/Makefile.am | 4 +-
+ src/liblzma/lzma/lzma2_decoder.c | 10 +-
+ src/liblzma/lzma/lzma2_encoder.c | 27 +--
+ src/liblzma/lzma/lzma_common.h | 26 +--
+ src/liblzma/lzma/lzma_decoder.c | 37 ++--
+ src/liblzma/lzma/lzma_encoder.c | 51 +++---
+ src/liblzma/lzma/lzma_encoder_optimum_fast.c | 10 +-
+ src/liblzma/lzma/lzma_encoder_optimum_normal.c | 20 +-
+ src/liblzma/lzma/lzma_encoder_presets.c | 50 ++++--
+ src/liblzma/rangecoder/Makefile.am | 4 +-
+ src/liblzma/subblock/subblock_decoder.c | 2 +-
+ src/lzma/args.c | 33 +++--
+ src/lzma/help.c | 17 +-
+ src/lzma/options.c | 92 ++++++----
+ tests/test_block_header.c | 9 +-
+ tests/test_compress.sh | 4 +-
+ tests/test_filter_flags.c | 2 +-
+ 41 files changed, 482 insertions(+), 398 deletions(-)
+
+commit 5cc5064cae603b649c64c40125c7dd365de54c9d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-27 11:28:49 +0300
+
+ Added 7z2lzma.bash.
+
+ extra/7z2lzma/7z2lzma.bash | 114 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 114 insertions(+), 0 deletions(-)
+
+commit f147666a5cd15542d4e427da58629f4a71cc38e1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-17 22:11:39 +0300
+
+ Miscellaneous LZ and LZMA encoder cleanups
+
+ src/liblzma/api/lzma/lzma.h | 14 ------
+ src/liblzma/lz/lz_encoder.c | 8 +++-
+ src/liblzma/lzma/Makefile.am | 1 -
+ src/liblzma/lzma/lzma_encoder.c | 64 +++++++++++-------------------
+ src/liblzma/lzma/lzma_encoder_features.c | 59 ---------------------------
+ 5 files changed, 29 insertions(+), 117 deletions(-)
+
+commit 13d68b069849e19c33822cd8996cd6447890abb1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-13 13:54:00 +0300
+
+ LZ decoder cleanup
+
+ src/liblzma/lz/lz_decoder.c | 5 ++---
+ 1 files changed, 2 insertions(+), 3 deletions(-)
+
+commit 13a74b78e37f16c9096ba5fe1859cc04eaa2f9f7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-13 12:10:43 +0300
+
+ Renamed constants:
+ - LZMA_VLI_VALUE_MAX -> LZMA_VLI_MAX
+ - LZMA_VLI_VALUE_UNKNOWN -> LZMA_VLI_UNKNOWN
+ - LZMA_HEADER_ERRRO -> LZMA_OPTIONS_ERROR
+
+ debug/full_flush.c | 2 +-
+ debug/known_sizes.c | 2 +-
+ debug/sync_flush.c | 2 +-
+ src/liblzma/api/lzma/alignment.h | 2 +-
+ src/liblzma/api/lzma/base.h | 4 +-
+ src/liblzma/api/lzma/block.h | 28 +++++++++++++-------------
+ src/liblzma/api/lzma/container.h | 12 +++++-----
+ src/liblzma/api/lzma/filter.h | 28 +++++++++++++-------------
+ src/liblzma/api/lzma/index.h | 2 +-
+ src/liblzma/api/lzma/lzma.h | 4 +-
+ src/liblzma/api/lzma/simple.h | 2 +-
+ src/liblzma/api/lzma/stream_flags.h | 20 +++++++++---------
+ src/liblzma/api/lzma/vli.h | 16 +++++++-------
+ src/liblzma/common/alignment.c | 6 ++--
+ src/liblzma/common/alone_decoder.c | 2 +-
+ src/liblzma/common/auto_decoder.c | 2 +-
+ src/liblzma/common/block_decoder.c | 12 +++++-----
+ src/liblzma/common/block_encoder.c | 6 ++--
+ src/liblzma/common/block_header_decoder.c | 16 +++++++-------
+ src/liblzma/common/block_header_encoder.c | 24 +++++++++++-----------
+ src/liblzma/common/block_util.c | 8 +++---
+ src/liblzma/common/chunk_size.c | 2 +-
+ src/liblzma/common/easy.c | 4 +-
+ src/liblzma/common/filter_common.c | 22 ++++++++++----------
+ src/liblzma/common/filter_decoder.c | 6 ++--
+ src/liblzma/common/filter_encoder.c | 14 ++++++------
+ src/liblzma/common/index.c | 24 ++++++++++------------
+ src/liblzma/common/index.h | 2 +-
+ src/liblzma/common/index_hash.c | 13 +++++------
+ src/liblzma/common/stream_decoder.c | 4 +-
+ src/liblzma/common/stream_encoder.c | 4 +-
+ src/liblzma/common/stream_flags_common.c | 6 ++--
+ src/liblzma/common/stream_flags_decoder.c | 6 ++--
+ src/liblzma/common/stream_flags_encoder.c | 4 +-
+ src/liblzma/common/vli_encoder.c | 2 +-
+ src/liblzma/common/vli_size.c | 2 +-
+ src/liblzma/delta/delta_common.c | 2 +-
+ src/liblzma/delta/delta_decoder.c | 2 +-
+ src/liblzma/delta/delta_encoder.c | 2 +-
+ src/liblzma/lz/lz_decoder.c | 2 +-
+ src/liblzma/lz/lz_encoder.c | 2 +-
+ src/liblzma/lzma/lzma2_decoder.c | 6 ++--
+ src/liblzma/lzma/lzma_decoder.c | 14 ++++++------
+ src/liblzma/lzma/lzma_encoder.c | 8 +++---
+ src/liblzma/simple/simple_coder.c | 2 +-
+ src/liblzma/simple/simple_decoder.c | 2 +-
+ src/liblzma/subblock/subblock_decoder.c | 6 ++--
+ src/liblzma/subblock/subblock_encoder.c | 17 +++++++--------
+ src/lzma/args.c | 2 +-
+ src/lzma/error.c | 2 +-
+ src/lzma/list.c | 6 ++--
+ src/lzmadec/lzmadec.c | 2 +-
+ tests/test_block.c | 8 +++---
+ tests/test_block_header.c | 30 ++++++++++++++--------------
+ tests/test_filter_flags.c | 2 +-
+ tests/test_index.c | 2 +-
+ tests/test_stream_flags.c | 8 +++---
+ tests/tests.h | 2 +-
+ 58 files changed, 220 insertions(+), 224 deletions(-)
+
+commit 320601b2c7b08fc7da9da18d5bf7c3c1a189b080
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-12 22:41:40 +0300
+
+ Improved the Stream Flags handling API.
+
+ src/liblzma/api/lzma/stream_flags.h | 84 +++++++++++++++++++++++++++--
+ src/liblzma/common/stream_decoder.c | 5 +-
+ src/liblzma/common/stream_encoder.c | 2 +
+ src/liblzma/common/stream_flags_common.c | 28 +++++++---
+ src/liblzma/common/stream_flags_common.h | 9 +++
+ src/liblzma/common/stream_flags_decoder.c | 3 +-
+ src/liblzma/common/stream_flags_encoder.c | 10 +++-
+ tests/test_stream_flags.c | 8 ++-
+ 8 files changed, 129 insertions(+), 20 deletions(-)
+
+commit ec490da5228263b25bf786bb23d1008468f55b30
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-11 23:10:44 +0300
+
+ Simplified debug/known_sizes.c to match the relaxed
+ requirements of Block encoder.
+
+ debug/known_sizes.c | 14 +++++---------
+ 1 files changed, 5 insertions(+), 9 deletions(-)
+
+commit 16e8b98f2659347edfa74afdbbb9e73311153cb9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-11 23:09:24 +0300
+
+ Remove a check from Block encoder that should have already
+ been removed in 2ba01bfa755e47ff6af84a978e3c8d63d7d2775e.
+
+ src/liblzma/common/block_encoder.c | 5 -----
+ 1 files changed, 0 insertions(+), 5 deletions(-)
+
+commit 5a710c3805bdf6d7e3c92e954e4e4565b27bcb13
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-11 20:02:38 +0300
+
+ Remove bogus #includes.
+
+ src/liblzma/common/Makefile.am | 1 -
+ src/liblzma/common/stream_decoder.c | 3 --
+ src/liblzma/common/stream_encoder.c | 1 -
+ src/liblzma/common/stream_flags_decoder.h | 31 -----------------------------
+ 4 files changed, 0 insertions(+), 36 deletions(-)
+
+commit 01892b2ca5f69bed0ea746e04b604030d57806bb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-11 10:49:14 +0300
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 962f2231d49409fe6852e44ffe8c5dbabb04bc7d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-11 10:48:12 +0300
+
+ Fix a compiler error on big endian systems that don't
+ support unaligned memory access.
+
+ src/common/integer.h | 32 ++++++++++++++++++--------------
+ 1 files changed, 18 insertions(+), 14 deletions(-)
+
+commit fa3ab0df8ae7a8a1ad55b52266dc0fd387458671
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-11 10:46:14 +0300
+
+ Silence a compiler warning.
+
+ src/lzma/process.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 9373e81e18822db4972819442ea4c2cb9955470b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-10 19:16:32 +0300
+
+ Bumped version to 4.999.6alpha.
+
+ configure.ac | 2 +-
+ src/liblzma/api/lzma/version.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit cb072b7c8442ba68bb0c62c0abbbe939794887a3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-10 17:02:00 +0300
+
+ Check for LZMA_FILTER_RESERVED_START in filter_flags_encoder.c.
+ Use LZMA_PROG_ERROR instead of LZMA_HEADER_ERROR if the Filter ID
+ is in the reserved range. This allows Block Header encoder to
+ detect unallowed Filter IDs, which is good for Stream encoder.
+
+ src/liblzma/common/filter_flags_encoder.c | 7 ++++---
+ 1 files changed, 4 insertions(+), 3 deletions(-)
+
+commit 123ab0acec435c9e9866a99e30482116cfbd9ba5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-10 16:44:32 +0300
+
+ Filter handling cleanups
+
+ src/liblzma/api/lzma/filter.h | 133 +++++++++++++++++++++++++++--------
+ src/liblzma/common/filter_common.h | 3 +
+ src/liblzma/common/filter_decoder.c | 80 +++++++--------------
+ src/liblzma/common/filter_decoder.h | 5 --
+ src/liblzma/common/filter_encoder.c | 82 +++++++---------------
+ src/liblzma/common/filter_encoder.h | 4 -
+ 6 files changed, 156 insertions(+), 151 deletions(-)
+
+commit 9cfcd0c4f2f865d8fbbb46ea28344a9be0dd8ad1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-10 00:33:00 +0300
+
+ Comments
+
+ src/liblzma/common/stream_encoder.c | 6 +++++-
+ 1 files changed, 5 insertions(+), 1 deletions(-)
+
+commit 2ba01bfa755e47ff6af84a978e3c8d63d7d2775e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-10 00:27:02 +0300
+
+ Cleaned up Block encoder and moved the no longer shared
+ code from block_private.h to block_decoder.c. Now the Block
+ encoder doesn't need compressed_size and uncompressed_size
+ from lzma_block structure to be initialized.
+
+ src/liblzma/api/lzma/block.h | 3 -
+ src/liblzma/common/Makefile.am | 1 -
+ src/liblzma/common/block_decoder.c | 23 +++++++++-
+ src/liblzma/common/block_encoder.c | 92 +++++++++++++++++-------------------
+ src/liblzma/common/block_private.h | 47 ------------------
+ 5 files changed, 66 insertions(+), 100 deletions(-)
+
+commit 07efcb5a6bc5d7018798ebd728586f84183e7d64
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-07 10:23:13 +0300
+
+ Changed Filter ID of LZMA to 0x20.
+
+ doc/file-format.txt | 4 ++--
+ src/liblzma/api/lzma/lzma.h | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 32fe5fa541e82c08e054086279079ae5016bd8d8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-06 23:42:50 +0300
+
+ Comments
+
+ src/liblzma/api/lzma/base.h | 81 +++++++++++++++++++++++++-------------
+ src/liblzma/api/lzma/container.h | 6 ++-
+ src/liblzma/lz/lz_encoder.c | 3 +-
+ src/liblzma/lz/lz_encoder.h | 12 +++---
+ src/liblzma/lz/lz_encoder_mf.c | 2 +-
+ 5 files changed, 65 insertions(+), 39 deletions(-)
+
+commit 0a31ed9d5e3cde4feb094b66f3a8b2c074605d84
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-06 15:14:30 +0300
+
+ Some API cleanups
+
+ src/liblzma/api/lzma/base.h | 314 ++++++++++++++++++++++-------------
+ src/liblzma/api/lzma/check.h | 10 +
+ src/liblzma/api/lzma/container.h | 40 +++--
+ src/liblzma/common/auto_decoder.c | 18 +-
+ src/liblzma/common/common.c | 7 +
+ src/liblzma/common/common.h | 18 +-
+ src/liblzma/common/easy.c | 2 +-
+ src/liblzma/common/stream_decoder.c | 31 ++--
+ src/lzma/process.c | 2 +-
+ src/lzmadec/lzmadec.c | 6 +-
+ tests/tests.h | 72 ++------
+ 11 files changed, 301 insertions(+), 219 deletions(-)
+
+commit da98df54400998be2a6c3876f9655a3c51b93c10
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-04 11:53:06 +0300
+
+ Added support for raw encoding and decoding to the command
+ line tool, and made various cleanups. --lzma was renamed to
+ --lzma1 to prevent people from accidentally using LZMA when
+ they want LZMA2.
+
+ src/lzma/args.c | 17 +++++++++--------
+ src/lzma/args.h | 1 +
+ src/lzma/help.c | 24 ++++++------------------
+ src/lzma/process.c | 42 ++++++++++++++++++++++++++++++++++--------
+ 4 files changed, 50 insertions(+), 34 deletions(-)
+
+commit 2496aee8a7741a8a0d42987db41ff2cf1a4bdabd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-04 10:39:15 +0300
+
+ Don't allow LZMA_SYNC_FLUSH with decoders anymore. There's
+ simply nothing that would use it. Allow LZMA_FINISH to the
+ decoders, which will usually ignore it (auto decoder and
+ Stream decoder being exceptions).
+
+ src/liblzma/common/alone_decoder.c | 1 -
+ src/liblzma/common/block_decoder.c | 2 +-
+ src/liblzma/common/filter_decoder.c | 2 +-
+ 3 files changed, 2 insertions(+), 3 deletions(-)
+
+commit bea301c26d5d52675e11e0236faec0492af98f60
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-03 17:06:25 +0300
+
+ Minor updates to the file format specification.
+
+ doc/file-format.txt | 105 +++++++++++++++++++++++++++++++++++++++++----------
+ 1 files changed, 85 insertions(+), 20 deletions(-)
+
+commit 9c75b089b4a9e0edcf4cf7970a4383768707d6c8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-02 19:33:32 +0300
+
+ Command line tool fixes
+
+ src/lzma/process.c | 21 +++++++++++++--------
+ 1 files changed, 13 insertions(+), 8 deletions(-)
+
+commit bab0590504b5aeff460ab4ca8c964dd7c1bad9e4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-02 19:31:42 +0300
+
+ Auto decoder cleanup
+
+ src/liblzma/common/auto_decoder.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 689602336d126a46b60d791a67decab65e1e81f5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-02 19:12:12 +0300
+
+ Updated auto decoder to handle LZMA_CONCATENATED when decoding
+ LZMA_Alone files. Decoding of concatenated LZMA_Alone files is
+ intentionally not supported, so it is better to put this in
+ auto decoder than LZMA_Alone decoder.
+
+ src/liblzma/common/auto_decoder.c | 87 ++++++++++++++++++++++++++++++-------
+ 1 files changed, 71 insertions(+), 16 deletions(-)
+
+commit 80c4158f19904026433eb6f5d5ca98a0ecd4f66c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-02 14:56:52 +0300
+
+ Stream decoder cleanups
+
+ src/liblzma/common/stream_decoder.c | 57 +++++++++++++++++++---------------
+ 1 files changed, 32 insertions(+), 25 deletions(-)
+
+commit fc681657450ce57be1fe08f7a15d31dcc705e514
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-09-02 11:45:39 +0300
+
+ Some fixes to LZ encoder.
+
+ src/liblzma/lz/lz_encoder.c | 56 +++++++++++++++++++----
+ src/liblzma/lz/lz_encoder.h | 18 ++++---
+ src/liblzma/lz/lz_encoder_mf.c | 95 ++++++++++++++++------------------------
+ 3 files changed, 94 insertions(+), 75 deletions(-)
+
+commit ede675f9ac1ca82a7d7c290324adba672118bc8d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-08-31 11:47:01 +0300
+
+ Fix wrong pointer calculation in LZMA encoder.
+
+ src/liblzma/lzma/lzma_encoder.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+commit 3b34851de1eaf358cf9268922fa0eeed8278d680
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-08-28 22:53:15 +0300
+
+ Sort of garbage collection commit. :-| Many things are still
+ broken. API has changed a lot and it will still change a
+ little more here and there. The command line tool doesn't
+ have all the required changes to reflect the API changes, so
+ it's easy to get "internal error" or trigger assertions.
+
+ configure.ac | 356 +++---
+ debug/Makefile.am | 5 +-
+ debug/crc32.c | 45 +
+ debug/full_flush.c | 14 +-
+ debug/hex2bin.c | 54 +
+ debug/known_sizes.c | 135 ++
+ debug/memusage.c | 8 +-
+ debug/sync_flush.c | 20 +-
+ src/common/integer.h | 26 +-
+ src/common/sysdefs.h | 42 +-
+ src/liblzma/Makefile.am | 17 +-
+ src/liblzma/api/Makefile.am | 6 +-
+ src/liblzma/api/lzma.h | 161 ++-
+ src/liblzma/api/lzma/alignment.h | 6 +-
+ src/liblzma/api/lzma/alone.h | 52 -
+ src/liblzma/api/lzma/auto.h | 36 -
+ src/liblzma/api/lzma/base.h | 61 +-
+ src/liblzma/api/lzma/block.h | 38 +-
+ src/liblzma/api/lzma/check.h | 41 +-
+ src/liblzma/api/lzma/container.h | 252 ++++
+ src/liblzma/api/lzma/delta.h | 36 +-
+ src/liblzma/api/lzma/easy.h | 121 --
+ src/liblzma/api/lzma/filter.h | 74 +-
+ src/liblzma/api/lzma/index.h | 40 +-
+ src/liblzma/api/lzma/index_hash.h | 12 +-
+ src/liblzma/api/lzma/lzma.h | 222 ++--
+ src/liblzma/api/lzma/memlimit.h | 15 +-
+ src/liblzma/api/lzma/raw.h | 60 -
+ src/liblzma/api/lzma/simple.h | 2 +-
+ src/liblzma/api/lzma/stream.h | 53 -
+ src/liblzma/api/lzma/stream_flags.h | 17 +-
+ src/liblzma/api/lzma/subblock.h | 4 +-
+ src/liblzma/api/lzma/version.h | 10 +-
+ src/liblzma/api/lzma/vli.h | 131 +--
+ src/liblzma/check/check.c | 128 +--
+ src/liblzma/check/check.h | 67 +-
+ src/liblzma/check/sha256.c | 29 +-
+ src/liblzma/common/Makefile.am | 51 +-
+ src/liblzma/common/alignment.c | 4 +-
+ src/liblzma/common/allocator.c | 58 -
+ src/liblzma/common/alone_decoder.c | 49 +-
+ src/liblzma/common/alone_decoder.h | 9 +-
+ src/liblzma/common/alone_encoder.c | 13 +-
+ src/liblzma/common/auto_decoder.c | 38 +-
+ src/liblzma/common/block_decoder.c | 67 +-
+ src/liblzma/common/block_decoder.h | 2 +-
+ src/liblzma/common/block_encoder.c | 42 +-
+ src/liblzma/common/block_encoder.h | 2 +-
+ src/liblzma/common/block_header_decoder.c | 6 +-
+ src/liblzma/common/block_header_encoder.c | 9 +-
+ src/liblzma/common/block_util.c | 10 +-
+ src/liblzma/common/code.c | 203 ---
+ src/liblzma/common/common.c | 298 +++++
+ src/liblzma/common/common.h | 237 ++--
+ src/liblzma/common/delta_common.c | 66 -
+ src/liblzma/common/delta_common.h | 44 -
+ src/liblzma/common/delta_decoder.c | 61 -
+ src/liblzma/common/delta_decoder.h | 28 -
+ src/liblzma/common/delta_encoder.c | 98 --
+ src/liblzma/common/delta_encoder.h | 28 -
+ src/liblzma/common/easy.c | 18 +-
+ src/liblzma/common/features.c | 66 -
+ src/liblzma/common/filter_common.c | 262 ++++
+ src/liblzma/common/filter_common.h | 52 +
+ src/liblzma/common/filter_decoder.c | 236 ++++
+ src/liblzma/common/filter_decoder.h | 35 +
+ src/liblzma/common/filter_encoder.c | 308 +++++
+ src/liblzma/common/filter_encoder.h | 38 +
+ src/liblzma/common/filter_flags_decoder.c | 185 +---
+ src/liblzma/common/filter_flags_encoder.c | 261 +----
+ src/liblzma/common/index_decoder.c | 14 +-
+ src/liblzma/common/index_encoder.c | 16 +-
+ src/liblzma/common/index_hash.c | 8 +-
+ src/liblzma/common/init_encoder.c | 2 +-
+ src/liblzma/common/memory_usage.c | 112 --
+ src/liblzma/common/next_coder.c | 65 -
+ src/liblzma/common/raw_common.c | 127 --
+ src/liblzma/common/raw_common.h | 30 -
+ src/liblzma/common/raw_decoder.c | 116 --
+ src/liblzma/common/raw_decoder.h | 29 -
+ src/liblzma/common/raw_encoder.c | 111 --
+ src/liblzma/common/raw_encoder.h | 29 -
+ src/liblzma/common/stream_common.c | 23 -
+ src/liblzma/common/stream_common.h | 31 -
+ src/liblzma/common/stream_decoder.c | 238 +++-
+ src/liblzma/common/stream_decoder.h | 4 +-
+ src/liblzma/common/stream_encoder.c | 35 +-
+ src/liblzma/common/stream_encoder.h | 2 +-
+ src/liblzma/common/stream_flags_common.c | 40 +
+ src/liblzma/common/stream_flags_common.h | 31 +
+ src/liblzma/common/stream_flags_decoder.c | 2 +-
+ src/liblzma/common/stream_flags_encoder.c | 2 +-
+ src/liblzma/common/stream_flags_equal.c | 36 -
+ src/liblzma/common/version.c | 25 -
+ src/liblzma/common/vli_decoder.c | 29 +-
+ src/liblzma/common/vli_encoder.c | 23 +-
+ src/liblzma/common/vli_size.c | 37 +
+ src/liblzma/delta/Makefile.am | 34 +
+ src/liblzma/delta/delta_common.c | 66 +
+ src/liblzma/delta/delta_common.h | 44 +
+ src/liblzma/delta/delta_decoder.c | 82 ++
+ src/liblzma/delta/delta_decoder.h | 32 +
+ src/liblzma/delta/delta_encoder.c | 119 ++
+ src/liblzma/delta/delta_encoder.h | 30 +
+ src/liblzma/lz/Makefile.am | 35 +-
+ src/liblzma/lz/bt2.c | 27 -
+ src/liblzma/lz/bt2.h | 31 -
+ src/liblzma/lz/bt3.c | 29 -
+ src/liblzma/lz/bt3.h | 31 -
+ src/liblzma/lz/bt4.c | 30 -
+ src/liblzma/lz/bt4.h | 31 -
+ src/liblzma/lz/hc3.c | 30 -
+ src/liblzma/lz/hc3.h | 31 -
+ src/liblzma/lz/hc4.c | 31 -
+ src/liblzma/lz/hc4.h | 31 -
+ src/liblzma/lz/lz_decoder.c | 547 +++------
+ src/liblzma/lz/lz_decoder.h | 308 +++---
+ src/liblzma/lz/lz_encoder.c | 780 ++++++------
+ src/liblzma/lz/lz_encoder.h | 334 ++++--
+ src/liblzma/lz/lz_encoder_hash.h | 104 ++
+ src/liblzma/lz/lz_encoder_mf.c | 780 ++++++++++++
+ src/liblzma/lz/lz_encoder_private.h | 40 -
+ src/liblzma/lz/match_c.h | 412 ------
+ src/liblzma/lz/match_h.h | 69 -
+ src/liblzma/lzma/Makefile.am | 37 +-
+ src/liblzma/lzma/fastpos.h | 8 +-
+ src/liblzma/lzma/lzma2_decoder.c | 318 +++++
+ src/liblzma/lzma/lzma2_decoder.h | 35 +
+ src/liblzma/lzma/lzma2_encoder.c | 406 ++++++
+ src/liblzma/lzma/lzma2_encoder.h | 34 +
+ src/liblzma/lzma/lzma_common.h | 208 +++-
+ src/liblzma/lzma/lzma_decoder.c | 1306 ++++++++++++--------
+ src/liblzma/lzma/lzma_decoder.h | 21 +-
+ src/liblzma/lzma/lzma_encoder.c | 576 +++++++--
+ src/liblzma/lzma/lzma_encoder.h | 38 +-
+ src/liblzma/lzma/lzma_encoder_features.c | 2 +-
+ src/liblzma/lzma/lzma_encoder_getoptimum.c | 925 --------------
+ src/liblzma/lzma/lzma_encoder_getoptimumfast.c | 201 ---
+ src/liblzma/lzma/lzma_encoder_init.c | 228 ----
+ src/liblzma/lzma/lzma_encoder_optimum_fast.c | 193 +++
+ src/liblzma/lzma/lzma_encoder_optimum_normal.c | 875 +++++++++++++
+ src/liblzma/lzma/lzma_encoder_presets.c | 52 +-
+ src/liblzma/lzma/lzma_encoder_private.h | 174 +--
+ src/liblzma/lzma/lzma_literal.c | 51 -
+ src/liblzma/lzma/lzma_literal.h | 71 --
+ src/liblzma/rangecoder/Makefile.am | 10 +-
+ src/liblzma/rangecoder/price.h | 111 ++
+ src/liblzma/rangecoder/price_table.c | 84 +-
+ src/liblzma/rangecoder/price_table_gen.c | 55 -
+ src/liblzma/rangecoder/price_table_init.c | 33 +-
+ src/liblzma/rangecoder/price_tablegen.c | 56 +
+ src/liblzma/rangecoder/range_common.h | 17 +-
+ src/liblzma/rangecoder/range_decoder.h | 209 ++--
+ src/liblzma/rangecoder/range_encoder.h | 92 +--
+ src/liblzma/simple/Makefile.am | 12 +
+ src/liblzma/simple/simple_coder.c | 8 +-
+ src/liblzma/simple/simple_decoder.c | 47 +
+ src/liblzma/simple/simple_decoder.h | 29 +
+ src/liblzma/simple/simple_encoder.c | 45 +
+ src/liblzma/simple/simple_encoder.h | 30 +
+ src/liblzma/subblock/Makefile.am | 4 +-
+ src/liblzma/subblock/subblock_decoder.c | 20 +-
+ src/liblzma/subblock/subblock_decoder_helper.c | 2 +-
+ src/liblzma/subblock/subblock_encoder.c | 28 +-
+ src/lzma/args.c | 35 +-
+ src/lzma/args.h | 4 +-
+ src/lzma/options.c | 14 +-
+ src/lzma/process.c | 88 +-
+ src/lzmadec/lzmadec.c | 157 +--
+ tests/Makefile.am | 1 +
+ tests/files/README | 303 ++---
+ tests/files/bad-0-backward_size.lzma | Bin 0 -> 32 bytes
+ tests/files/bad-0-empty-truncated.lzma | Bin 0 -> 31 bytes
+ tests/files/bad-0-nonempty_index.lzma | Bin 0 -> 32 bytes
+ tests/files/bad-0cat-alone.lzma | Bin 0 -> 55 bytes
+ tests/files/bad-0catpad-empty.lzma | Bin 0 -> 69 bytes
+ tests/files/bad-0pad-empty.lzma | Bin 0 -> 37 bytes
+ tests/files/bad-1-block_header-1.lzma | Bin 0 -> 64 bytes
+ tests/files/bad-1-block_header-2.lzma | Bin 0 -> 64 bytes
+ tests/files/bad-1-block_header-3.lzma | Bin 0 -> 68 bytes
+ tests/files/bad-1-block_header-4.lzma | Bin 0 -> 72 bytes
+ tests/files/bad-1-check-crc32.lzma | Bin 0 -> 68 bytes
+ tests/files/bad-1-check-crc64.lzma | Bin 0 -> 72 bytes
+ tests/files/bad-1-check-sha256.lzma | Bin 0 -> 96 bytes
+ tests/files/bad-1-lzma2-1.lzma | Bin 0 -> 64 bytes
+ tests/files/bad-1-lzma2-2.lzma | Bin 0 -> 424 bytes
+ tests/files/bad-1-lzma2-3.lzma | Bin 0 -> 424 bytes
+ tests/files/bad-1-lzma2-4.lzma | Bin 0 -> 408 bytes
+ tests/files/bad-1-lzma2-5.lzma | Bin 0 -> 408 bytes
+ tests/files/bad-1-lzma2-6.lzma | Bin 0 -> 68 bytes
+ tests/files/bad-1-lzma2-7.lzma | Bin 0 -> 408 bytes
+ tests/files/bad-1-stream_flags-1.lzma | Bin 0 -> 68 bytes
+ tests/files/bad-1-stream_flags-2.lzma | Bin 0 -> 68 bytes
+ tests/files/bad-1-stream_flags-3.lzma | Bin 0 -> 68 bytes
+ tests/files/bad-1-vli-1.lzma | Bin 0 -> 72 bytes
+ tests/files/bad-1-vli-2.lzma | Bin 0 -> 72 bytes
+ tests/files/bad-2-compressed_data_padding.lzma | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-1.lzma | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-2.lzma | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-3.lzma | Bin 0 -> 92 bytes
+ tests/files/bad-2-index-4.lzma | Bin 0 -> 92 bytes
+ tests/files/bad-cat-single-none-pad_garbage_1.lzma | Bin 65 -> 0 bytes
+ tests/files/bad-cat-single-none-pad_garbage_2.lzma | Bin 65 -> 0 bytes
+ tests/files/bad-cat-single-none-pad_garbage_3.lzma | Bin 65 -> 0 bytes
+ tests/files/bad-multi-none-1.lzma | Bin 54 -> 0 bytes
+ tests/files/bad-multi-none-2.lzma | Bin 53 -> 0 bytes
+ tests/files/bad-multi-none-3.lzma | Bin 53 -> 0 bytes
+ tests/files/bad-multi-none-block_1.lzma | Bin 66 -> 0 bytes
+ tests/files/bad-multi-none-block_2.lzma | Bin 66 -> 0 bytes
+ tests/files/bad-multi-none-block_3.lzma | Bin 58 -> 0 bytes
+ tests/files/bad-multi-none-extra_1.lzma | Bin 54 -> 0 bytes
+ tests/files/bad-multi-none-extra_2.lzma | Bin 54 -> 0 bytes
+ tests/files/bad-multi-none-extra_3.lzma | Bin 55 -> 0 bytes
+ tests/files/bad-multi-none-header_1.lzma | Bin 57 -> 0 bytes
+ tests/files/bad-multi-none-header_2.lzma | Bin 61 -> 0 bytes
+ tests/files/bad-multi-none-header_3.lzma | Bin 59 -> 0 bytes
+ tests/files/bad-multi-none-header_4.lzma | Bin 59 -> 0 bytes
+ tests/files/bad-multi-none-header_5.lzma | Bin 58 -> 0 bytes
+ tests/files/bad-multi-none-header_6.lzma | Bin 59 -> 0 bytes
+ tests/files/bad-multi-none-header_7.lzma | Bin 59 -> 0 bytes
+ tests/files/bad-multi-none-index_1.lzma | Bin 51 -> 0 bytes
+ tests/files/bad-multi-none-index_2.lzma | Bin 49 -> 0 bytes
+ tests/files/bad-multi-none-index_3.lzma | Bin 51 -> 0 bytes
+ tests/files/bad-multi-none-index_4.lzma | Bin 51 -> 0 bytes
+ tests/files/bad-single-data_after_eopm_1.lzma | Bin 55 -> 0 bytes
+ tests/files/bad-single-data_after_eopm_2.lzma | Bin 56 -> 0 bytes
+ tests/files/bad-single-lzma-flush_beginning.lzma | Bin 53 -> 0 bytes
+ tests/files/bad-single-lzma-flush_twice.lzma | Bin 63 -> 0 bytes
+ tests/files/bad-single-none-empty.lzma | Bin 19 -> 0 bytes
+ .../files/bad-single-none-footer_filter_flags.lzma | Bin 30 -> 0 bytes
+ tests/files/bad-single-none-too_long_vli.lzma | Bin 39 -> 0 bytes
+ tests/files/bad-single-none-truncated.lzma | Bin 29 -> 0 bytes
+ tests/files/bad-single-subblock-padding_loop.lzma | Bin 43 -> 0 bytes
+ tests/files/bad-single-subblock1023-slow.lzma | Bin 7886 -> 0 bytes
+ tests/files/bad-single-subblock_subblock.lzma | Bin 26 -> 0 bytes
+ tests/files/good-0-empty.lzma | Bin 0 -> 32 bytes
+ tests/files/good-0cat-empty.lzma | Bin 0 -> 64 bytes
+ tests/files/good-0catpad-empty.lzma | Bin 0 -> 68 bytes
+ tests/files/good-0pad-empty.lzma | Bin 0 -> 36 bytes
+ tests/files/good-1-3delta-lzma2.lzma | Bin 0 -> 528 bytes
+ tests/files/good-1-block_header-1.lzma | Bin 0 -> 72 bytes
+ tests/files/good-1-block_header-2.lzma | Bin 0 -> 68 bytes
+ tests/files/good-1-block_header-3.lzma | Bin 0 -> 68 bytes
+ tests/files/good-1-check-crc32.lzma | Bin 0 -> 68 bytes
+ tests/files/good-1-check-crc64.lzma | Bin 0 -> 72 bytes
+ tests/files/good-1-check-none.lzma | Bin 0 -> 64 bytes
+ tests/files/good-1-check-sha256.lzma | Bin 0 -> 96 bytes
+ tests/files/good-1-delta-lzma2.tiff.lzma | Bin 0 -> 51312 bytes
+ tests/files/good-1-lzma2-1.lzma | Bin 0 -> 424 bytes
+ tests/files/good-1-lzma2-2.lzma | Bin 0 -> 424 bytes
+ tests/files/good-1-lzma2-3.lzma | Bin 0 -> 408 bytes
+ tests/files/good-1-sparc-lzma2.lzma | Bin 0 -> 2292 bytes
+ tests/files/good-1-x86-lzma2.lzma | Bin 0 -> 1936 bytes
+ tests/files/good-2-lzma2.lzma | Bin 0 -> 92 bytes
+ tests/files/good-cat-single-none-pad.lzma | Bin 64 -> 0 bytes
+ tests/files/good-multi-none-1.lzma | Bin 75 -> 0 bytes
+ tests/files/good-multi-none-2.lzma | Bin 53 -> 0 bytes
+ tests/files/good-multi-none-block_1.lzma | Bin 66 -> 0 bytes
+ tests/files/good-multi-none-block_2.lzma | Bin 58 -> 0 bytes
+ tests/files/good-multi-none-extra_1.lzma | Bin 51 -> 0 bytes
+ tests/files/good-multi-none-extra_2.lzma | Bin 79 -> 0 bytes
+ tests/files/good-multi-none-extra_3.lzma | Bin 55 -> 0 bytes
+ tests/files/good-multi-none-header_1.lzma | Bin 58 -> 0 bytes
+ tests/files/good-multi-none-header_2.lzma | Bin 66 -> 0 bytes
+ tests/files/good-multi-none-header_3.lzma | Bin 59 -> 0 bytes
+ tests/files/good-single-delta-lzma.tiff.lzma | Bin 51409 -> 0 bytes
+ tests/files/good-single-lzma-empty.lzma | Bin 21 -> 0 bytes
+ tests/files/good-single-lzma-flush_1.lzma | Bin 48 -> 0 bytes
+ tests/files/good-single-lzma-flush_2.lzma | Bin 63 -> 0 bytes
+ tests/files/good-single-lzma.lzma | Bin 44 -> 0 bytes
+ tests/files/good-single-none-empty_1.lzma | Bin 18 -> 0 bytes
+ tests/files/good-single-none-empty_2.lzma | Bin 26 -> 0 bytes
+ tests/files/good-single-none-empty_3.lzma | Bin 19 -> 0 bytes
+ tests/files/good-single-none-pad.lzma | Bin 32 -> 0 bytes
+ tests/files/good-single-none.lzma | Bin 30 -> 0 bytes
+ tests/files/good-single-sparc-lzma.lzma | Bin 2263 -> 0 bytes
+ tests/files/good-single-subblock-lzma.lzma | Bin 50 -> 0 bytes
+ tests/files/good-single-subblock_implicit.lzma | Bin 35 -> 0 bytes
+ tests/files/good-single-subblock_rle.lzma | Bin 118 -> 0 bytes
+ tests/files/good-single-x86-lzma.lzma | Bin 1909 -> 0 bytes
+ tests/files/malicious-multi-metadata-64PiB.lzma | Bin 51 -> 0 bytes
+ tests/files/malicious-single-subblock-256MiB.lzma | Bin 30 -> 0 bytes
+ tests/files/malicious-single-subblock-64PiB.lzma | Bin 45 -> 0 bytes
+ tests/files/malicious-single-subblock31-slow.lzma | Bin 1233 -> 0 bytes
+ tests/files/unsupported-block_header.lzma | Bin 0 -> 68 bytes
+ tests/files/unsupported-check.lzma | Bin 0 -> 68 bytes
+ tests/files/unsupported-filter_flags-1.lzma | Bin 0 -> 68 bytes
+ tests/files/unsupported-filter_flags-2.lzma | Bin 0 -> 68 bytes
+ tests/files/unsupported-filter_flags-3.lzma | Bin 0 -> 68 bytes
+ tests/test_block_header.c | 28 +-
+ tests/test_compress.sh | 4 +-
+ tests/test_filter_flags.c | 51 +-
+ tests/test_stream_flags.c | 4 +-
+ tests/tests.h | 8 +
+ 294 files changed, 9768 insertions(+), 8195 deletions(-)
+
+commit 57b9a145a527f0716822615e5ed536d33aebd3fc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-20 17:16:32 +0300
+
+ Fix test_filter_flags to match the new restriction of lc+lp.
+
+ tests/test_filter_flags.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+commit eaafc4367c77ec1d910e16d11b4da293969d97a3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-20 16:19:54 +0300
+
+ Remove some redundant code from LZMA encoder.
+
+ src/liblzma/lzma/lzma_encoder.c | 15 +--------------
+ 1 files changed, 1 insertions(+), 14 deletions(-)
+
+commit 0809c46534fa5664fe35d9e98d95e87312ed130e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-19 16:35:08 +0300
+
+ Add limit of lc + lp <= 4. Now we can allocate the
+ literal coder as part of the main LZMA encoder or
+ decoder structure.
+
+ Make the LZMA decoder to rely on the current internal API
+ to free the allocated memory in case an error occurs.
+
+ src/liblzma/api/lzma/lzma.h | 10 +++++-
+ src/liblzma/lzma/lzma_decoder.c | 57 +++++++-----------------------
+ src/liblzma/lzma/lzma_encoder_init.c | 13 +++----
+ src/liblzma/lzma/lzma_encoder_private.h | 2 +-
+ src/liblzma/lzma/lzma_literal.c | 39 ++++-----------------
+ src/liblzma/lzma/lzma_literal.h | 13 +++----
+ 6 files changed, 43 insertions(+), 91 deletions(-)
+
+commit d25ab1b96178f06a0e724f58e3cd68300b2b1275
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-18 21:45:19 +0300
+
+ Comments
+
+ src/liblzma/lzma/lzma_encoder.c | 7 ++-----
+ 1 files changed, 2 insertions(+), 5 deletions(-)
+
+commit 6368a2fa5901c75864be5171dd57a50af7adbb41
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-18 19:19:02 +0300
+
+ Delete old code that was supposed to be already deleted
+ from test_block_header.c.
+
+ tests/test_block_header.c | 30 ------------------------------
+ 1 files changed, 0 insertions(+), 30 deletions(-)
+
+commit 7d17818cec8597f847b0a2537fde991bbc3d9e96
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-18 18:02:10 +0300
+
+ Update the code to mostly match the new simpler file format
+ specification. Simplify things by removing most of the
+ support for known uncompressed size in most places.
+ There are some miscellaneous changes here and there too.
+
+ The API of liblzma has got many changes and still some
+ more will be done soon. While most of the code has been
+ updated, some things are not fixed (the command line tool
+ will choke with invalid filter chain, if nothing else).
+
+ Subblock filter is somewhat broken for now. It will be
+ updated once the encoded format of the Subblock filter
+ has been decided.
+
+ configure.ac | 41 +-
+ debug/full_flush.c | 16 +-
+ debug/sync_flush.c | 15 +-
+ src/common/bswap.h | 44 ++
+ src/common/integer.h | 167 +++++
+ src/liblzma/api/Makefile.am | 5 +-
+ src/liblzma/api/lzma.h | 9 +-
+ src/liblzma/api/lzma/alone.h | 32 +-
+ src/liblzma/api/lzma/auto.h | 7 +-
+ src/liblzma/api/lzma/base.h | 15 +
+ src/liblzma/api/lzma/block.h | 304 +++-------
+ src/liblzma/api/lzma/check.h | 18 +-
+ src/liblzma/api/lzma/copy.h | 29 -
+ src/liblzma/api/lzma/easy.h | 61 +--
+ src/liblzma/api/lzma/extra.h | 114 ----
+ src/liblzma/api/lzma/filter.h | 5 +-
+ src/liblzma/api/lzma/index.h | 204 +++++-
+ src/liblzma/api/lzma/index_hash.h | 94 +++
+ src/liblzma/api/lzma/info.h | 315 ---------
+ src/liblzma/api/lzma/lzma.h | 2 +-
+ src/liblzma/api/lzma/metadata.h | 100 ---
+ src/liblzma/api/lzma/raw.h | 20 +-
+ src/liblzma/api/lzma/stream.h | 157 +----
+ src/liblzma/api/lzma/stream_flags.h | 146 ++---
+ src/liblzma/api/lzma/version.h | 2 +-
+ src/liblzma/api/lzma/vli.h | 83 ++--
+ src/liblzma/check/Makefile.am | 1 -
+ src/liblzma/check/check.c | 55 ++-
+ src/liblzma/check/check.h | 47 +-
+ src/liblzma/check/check_byteswap.h | 43 --
+ src/liblzma/check/crc32_init.c | 2 +-
+ src/liblzma/check/crc64_init.c | 2 +-
+ src/liblzma/check/crc_macros.h | 2 +-
+ src/liblzma/check/sha256.c | 53 +-
+ src/liblzma/common/Makefile.am | 31 +-
+ src/liblzma/common/alignment.c | 5 +-
+ src/liblzma/common/alone_decoder.c | 77 ++-
+ src/liblzma/common/alone_encoder.c | 99 ++--
+ src/liblzma/common/auto_decoder.c | 18 +-
+ src/liblzma/common/block_decoder.c | 298 +++-------
+ src/liblzma/common/block_encoder.c | 228 ++-----
+ src/liblzma/common/block_header_decoder.c | 400 +++----------
+ src/liblzma/common/block_header_encoder.c | 207 +++----
+ src/liblzma/common/block_private.h | 51 +--
+ src/liblzma/common/block_util.c | 73 +++
+ src/liblzma/common/common.h | 44 +-
+ src/liblzma/common/copy_coder.c | 144 -----
+ src/liblzma/common/copy_coder.h | 31 -
+ src/liblzma/common/delta_common.c | 4 -
+ src/liblzma/common/delta_common.h | 4 -
+ src/liblzma/common/delta_decoder.c | 55 +--
+ src/liblzma/common/delta_encoder.c | 7 +-
+ src/liblzma/common/easy.c | 122 ++++
+ src/liblzma/common/easy_common.c | 54 --
+ src/liblzma/common/easy_common.h | 28 -
+ src/liblzma/common/easy_multi.c | 103 ---
+ src/liblzma/common/easy_single.c | 37 --
+ src/liblzma/common/extra.c | 34 -
+ src/liblzma/common/features.c | 4 -
+ src/liblzma/common/filter_flags_decoder.c | 384 +++--------
+ src/liblzma/common/filter_flags_encoder.c | 120 +---
+ src/liblzma/common/index.c | 773 ++++++++++++++++++++---
+ src/liblzma/common/index.h | 67 ++
+ src/liblzma/common/index_decoder.c | 252 ++++++++
+ src/liblzma/common/index_encoder.c | 222 +++++++
+ src/liblzma/common/index_encoder.h | 30 +
+ src/liblzma/common/index_hash.c | 340 ++++++++++
+ src/liblzma/common/info.c | 814 ------------------------
+ src/liblzma/common/memory_usage.c | 1 -
+ src/liblzma/common/metadata_decoder.c | 578 -----------------
+ src/liblzma/common/metadata_decoder.h | 31 -
+ src/liblzma/common/metadata_encoder.c | 435 -------------
+ src/liblzma/common/metadata_encoder.h | 30 -
+ src/liblzma/common/raw_common.c | 178 ++----
+ src/liblzma/common/raw_common.h | 5 +-
+ src/liblzma/common/raw_decoder.c | 19 +-
+ src/liblzma/common/raw_decoder.h | 3 +-
+ src/liblzma/common/raw_encoder.c | 101 +---
+ src/liblzma/common/raw_encoder.h | 3 +-
+ src/liblzma/common/stream_common.h | 3 +
+ src/liblzma/common/stream_decoder.c | 458 ++++----------
+ src/liblzma/common/stream_decoder.h | 28 +
+ src/liblzma/common/stream_encoder.c | 282 ++++++++
+ src/liblzma/common/stream_encoder.h | 30 +
+ src/liblzma/common/stream_encoder_multi.c | 445 -------------
+ src/liblzma/common/stream_encoder_multi.h | 26 -
+ src/liblzma/common/stream_encoder_single.c | 219 -------
+ src/liblzma/common/stream_flags_decoder.c | 260 ++-------
+ src/liblzma/common/stream_flags_encoder.c | 56 +-
+ src/liblzma/common/stream_flags_equal.c | 36 +
+ src/liblzma/common/vli_decoder.c | 68 ++-
+ src/liblzma/common/vli_encoder.c | 59 +-
+ src/liblzma/common/vli_reverse_decoder.c | 55 --
+ src/liblzma/lz/lz_decoder.c | 6 +-
+ src/liblzma/lz/lz_decoder.h | 10 +-
+ src/liblzma/lzma/lzma_decoder.c | 13 +-
+ src/liblzma/lzma/lzma_decoder.h | 10 +-
+ src/liblzma/simple/simple_coder.c | 29 +-
+ src/liblzma/simple/simple_private.h | 4 -
+ src/liblzma/subblock/subblock_decoder.c | 106 +---
+ src/liblzma/subblock/subblock_decoder_helper.c | 5 +-
+ src/liblzma/subblock/subblock_encoder.c | 8 +-
+ src/lzma/args.c | 22 +-
+ src/lzma/args.h | 2 -
+ src/lzma/error.c | 6 +
+ src/lzma/process.c | 26 +-
+ src/lzmadec/lzmadec.c | 8 +-
+ tests/Makefile.am | 5 +-
+ tests/test_block_header.c | 411 +++++--------
+ tests/test_compress.sh | 65 +--
+ tests/test_filter_flags.c | 116 ++---
+ tests/test_index.c | 504 ++++++++++++++-
+ tests/test_info.c | 717 ---------------------
+ tests/test_stream_flags.c | 134 ++--
+ tests/tests.h | 14 +-
+ 115 files changed, 4845 insertions(+), 8155 deletions(-)
+
+commit bf6348d1a3ff09fdc06940468f318f75ffa6af11
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-17 15:03:46 +0300
+
+ Update the file format specification draft. The new one is
+ a lot simpler than the previous versions, but it also means
+ that the existing code will change a lot.
+
+ doc/file-format.txt | 1794 +++++++++++++++------------------------------------
+ 1 files changed, 508 insertions(+), 1286 deletions(-)
+
+commit 803194ddd26f01ff60ba4e9924c6087a56b29827
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-11 21:42:47 +0300
+
+ Fix uninitialized variable in LZMA encoder. This was
+ introduced in 369f72fd656f537a9a8e06f13e6d0d4c242be22f.
+
+ src/liblzma/lzma/lzma_encoder_init.c | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit 0ea98e52ba87453497b1355c51f13bad55c8924a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-11 15:08:44 +0300
+
+ Improve command line integer parsing a little in lzma and
+ lzmadec to make them accept also KiB in addition Ki etc.
+ Fix also memory usage information in lzmadec --help.
+
+ src/lzma/util.c | 23 ++++++++++++++---------
+ src/lzmadec/lzmadec.c | 31 ++++++++++++++++++-------------
+ 2 files changed, 32 insertions(+), 22 deletions(-)
+
+commit 436fa5fae96d4e35759aed33066060f09ee8c6ef
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-10 20:36:12 +0300
+
+ s/decompressed/compressed/ in the command line tool's
+ error message.
+
+ src/lzma/main.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 369f72fd656f537a9a8e06f13e6d0d4c242be22f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-06-01 12:48:17 +0300
+
+ Fix a buffer overflow in the LZMA encoder. It was due to my
+ misunderstanding of the code. There's no tiny fix for this
+ problem, so I also cleaned up the code in general.
+
+ This reduces the speed of the encoder 2-5 % in the fastest
+ compression mode ("lzma -1"). High compression modes should
+ have no noticeable performance difference.
+
+ This commit breaks things (especially LZMA_SYNC_FLUSH) but I
+ will fix them once the new format and LZMA2 has been roughly
+ implemented. Plain LZMA won't support LZMA_SYNC_FLUSH at all
+ and won't be supported in the new .lzma format. This may
+ change still but this is what it looks like now.
+
+ Support for known uncompressed size (that is, LZMA or LZMA2
+ without EOPM) is likely to go away. This means there will
+ be API changes.
+
+ src/liblzma/lz/lz_encoder.c | 113 +-----
+ src/liblzma/lz/lz_encoder.h | 18 +-
+ src/liblzma/lzma/lzma_encoder.c | 551 +++++++++++-------------
+ src/liblzma/lzma/lzma_encoder_getoptimum.c | 59 ++-
+ src/liblzma/lzma/lzma_encoder_getoptimumfast.c | 4 +-
+ src/liblzma/lzma/lzma_encoder_init.c | 9 +-
+ src/liblzma/lzma/lzma_encoder_private.h | 15 +-
+ src/liblzma/rangecoder/range_encoder.h | 383 +++++++++--------
+ 8 files changed, 532 insertions(+), 620 deletions(-)
+
+commit e55e0e873ce2511325749d415ae547d62ab5f00d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-05-30 11:53:41 +0300
+
+ Typo fixes from meyering.
+
+ doc/faq.txt | 4 ++--
+ doc/liblzma-advanced.txt | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+commit ed6664146fcbe9cc4a3b23b31632182ed812ea93
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-05-11 14:24:42 +0300
+
+ Remove support for pre-C89 libc versions that lack memcpy,
+ memmove, and memset.
+
+ configure.ac | 2 +-
+ src/common/sysdefs.h | 15 ++-------------
+ src/liblzma/common/allocator.c | 2 +-
+ 3 files changed, 4 insertions(+), 15 deletions(-)
+
+commit b09464bf9ae694afc2d1dc26188ac4e2e8af0a63
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-05-11 14:17:21 +0300
+
+ Improved C99 compiler detection in configure.ac. It will
+ pass -std=gnu99 instead of -std=c99 to GCC now, but -pedantic
+ should still give warnings about GNU extensions like before
+ except with some special keywords like asm().
+
+ configure.ac | 24 ++++++++++++------------
+ 1 files changed, 12 insertions(+), 12 deletions(-)
+
+commit 11de5d5267f7a0a7f0a4d34eec147e65eaf9f9cf
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-05-06 15:15:07 +0300
+
+ Bunch of grammar fixes from meyering.
+
+ doc/liblzma-security.txt | 8 ++++----
+ src/liblzma/api/lzma/memlimit.h | 6 +++---
+ src/lzma/help.c | 2 +-
+ tests/files/README | 2 +-
+ 4 files changed, 9 insertions(+), 9 deletions(-)
+
+commit dc192b6343ae36276c85fcf7ef6006147816eadc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-05-06 13:41:05 +0300
+
+ Typo fix
+
+ src/liblzma/api/lzma/init.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 944b62b93239b27b338d117f2668c0e95849659b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-05-04 22:29:27 +0300
+
+ Don't print an error message on broken pipe unless --verbose
+ is used.
+
+ src/lzma/io.c | 15 ++++++++++++++-
+ 1 files changed, 14 insertions(+), 1 deletions(-)
+
+commit 8e074349e47ea6832b8fdf9244e581d453733433
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-30 22:16:17 +0300
+
+ Fix a crash with --format=alone if other filters than LZMA
+ are specified on the command line.
+
+ src/lzma/args.c | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+commit 2f361ac19b7fd3abcd362de4d470e6a9eb495b73
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-28 17:08:27 +0300
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 3be21fb12f4cec2cf07799e8960382f4cb375369
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-28 17:06:34 +0300
+
+ Fixed wrong spelling "limitter" to "limiter". This affects
+ liblzma's API.
+
+ doc/liblzma-security.txt | 14 +-
+ src/liblzma/api/lzma/base.h | 4 +-
+ src/liblzma/api/lzma/memlimit.h | 10 +-
+ src/liblzma/api/lzma/stream.h | 4 +-
+ src/liblzma/common/Makefile.am | 2 +-
+ src/liblzma/common/memory_limiter.c | 288 ++++++++++++++++++++++++++++++++++
+ src/liblzma/common/memory_limitter.c | 288 ----------------------------------
+ src/lzma/list.c | 6 +-
+ src/lzmadec/lzmadec.c | 12 +-
+ tests/test_memlimit.c | 4 +-
+ 10 files changed, 316 insertions(+), 316 deletions(-)
+
+commit beeb81060821dfec4e7898e0d44b7900dcb2215e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-25 15:39:50 +0300
+
+ Prevent LZ encoder from hanging with known uncompressed
+ size. The "fix" breaks LZMA_SYNC_FLUSH at end of stream
+ with known uncompressed size, but since it currently seems
+ likely that support for encoding with known uncompressed
+ size will go away anyway, I'm not fixing this problem now.
+
+ src/liblzma/lz/lz_encoder.c | 9 +++++++--
+ 1 files changed, 7 insertions(+), 2 deletions(-)
+
+commit c324325f9f13cdeb92153c5d00962341ba070ca2
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-25 13:58:56 +0300
+
+ Removed src/liblzma/common/sysdefs.h symlink, which was
+ annoying, because "make dist" put two copies of sysdefs.h
+ into the tarball instead of the symlink.
+
+ src/liblzma/check/crc32_table.c | 2 +-
+ src/liblzma/check/crc64_table.c | 2 +-
+ src/liblzma/common/Makefile.am | 1 -
+ src/liblzma/common/common.h | 2 +-
+ src/liblzma/common/sysdefs.h | 1 -
+ 5 files changed, 3 insertions(+), 5 deletions(-)
+
+commit d3ba30243c75c13d094de1793f9c58acdbacc692
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-25 13:41:29 +0300
+
+ Added memusage.c to debug directory.
+
+ debug/Makefile.am | 3 +-
+ debug/memusage.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 57 insertions(+), 1 deletions(-)
+
+commit 8f804c29aa8471ccd6438ddca254092b8869ca52
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-25 13:32:35 +0300
+
+ Bumped version number to 4.999.3alpha. It will become 5.0.0
+ once we have a stable release (won't be very soon). The
+ version number is no longer related to version of LZMA SDK.
+
+ Made some small Automake-related changes to toplevel
+ Makefile.am and configure.ac.
+
+ Makefile.am | 7 +++++--
+ README | 29 +++++++++++++++++++++++++++++
+ configure.ac | 4 ++--
+ src/liblzma/api/lzma/version.h | 22 ++++++++++------------
+ 4 files changed, 46 insertions(+), 16 deletions(-)
+
+commit c99037ea10f121cbacf60c37a36c29768ae53447
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 20:25:39 +0300
+
+ Fix a memory leak by calling free(extra->data) in
+ lzma_extra_free().
+
+ src/liblzma/common/extra.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 22ba3b0b5043fa481903482ce85015fe775939e5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 20:23:05 +0300
+
+ Make unlzma and lzcat symlinks.
+
+ src/lzma/Makefile.am | 12 ++++++++++++
+ 1 files changed, 12 insertions(+), 0 deletions(-)
+
+commit 17c36422d4cbc2c70d5c83ec389406f92cd9e85e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 20:20:27 +0300
+
+ Fixed a bug in command line option parsing.
+
+ src/lzma/options.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 283f939974c32c47f05d495e8dea455ec646ed64
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 20:19:20 +0300
+
+ Added two assert()s.
+
+ src/liblzma/lzma/lzma_encoder.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+commit eb348a60b6e19a7c093f892434f23c4756973ffd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 19:22:53 +0300
+
+ Switch to uint16_t as the type of range coder probabilities.
+
+ src/liblzma/rangecoder/range_common.h | 25 +++++++++++++++++++------
+ 1 files changed, 19 insertions(+), 6 deletions(-)
+
+commit 6c5306e312bcfd254cf654f88c04e34ba786df3d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 18:39:57 +0300
+
+ Fix wrong return type (uint32_t -> bool).
+
+ src/liblzma/lz/lz_encoder.c | 2 +-
+ src/liblzma/lz/lz_encoder.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 712cfe3ebfd24df24d8896b1315c53c3bc4369c8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 18:38:00 +0300
+
+ Fix data corruption in LZ encoder with LZMA_SYNC_FLUSH.
+
+ src/liblzma/lz/lz_encoder.c | 16 ++++++++++++++++
+ src/liblzma/lz/lz_encoder.h | 4 ++++
+ src/liblzma/lz/match_c.h | 23 ++++++++++++++++++-----
+ 3 files changed, 38 insertions(+), 5 deletions(-)
+
+commit bc04486e368d20b3027cde625267762aae063965
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 17:33:01 +0300
+
+ Fix fastpos problem in Makefile.am when built with --enable-small.
+
+ src/liblzma/lzma/Makefile.am | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+commit 7ab493924e0ed590a5121a15ee54038d238880d3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-04-24 17:30:51 +0300
+
+ Use 64-bit integer as range encoder's cache size. This fixes a
+ theoretical data corruption, which should be very hard to trigger
+ even intentionally.
+
+ src/liblzma/rangecoder/range_encoder.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 641998c3e1ecc8b598fe0eb051fab8b9535c291b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-24 16:38:40 +0200
+
+ Replaced the range decoder optimization that used arithmetic
+ right shift with as fast version that doesn't need
+ arithmetic right shift. Removed the related check from
+ configure.ac.
+
+ configure.ac | 1 -
+ m4/ax_c_arithmetic_rshift.m4 | 36 ---------------------
+ src/liblzma/rangecoder/range_decoder.h | 53 +++++++++----------------------
+ 3 files changed, 16 insertions(+), 74 deletions(-)
+
+commit ad999efd279d95f1e7ac555b14170e8e9020488c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-22 14:39:34 +0200
+
+ Take advantage of arithmetic right shift in range decoder.
+
+ src/liblzma/rangecoder/range_decoder.h | 52 ++++++++++++++++++++++---------
+ 1 files changed, 37 insertions(+), 15 deletions(-)
+
+commit 03e0e8a0d7228b6ff1f0af39e2c040a4e425973d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-22 14:18:29 +0200
+
+ Added autoconf check to detect if we can use arithmetic
+ right shift for optimizations.
+
+ configure.ac | 1 +
+ m4/ax_c_arithmetic_rshift.m4 | 36 ++++++++++++++++++++++++++++++++++++
+ 2 files changed, 37 insertions(+), 0 deletions(-)
+
+commit 7521bbdc83acab834594a22bec50c8e1bd836298
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-22 01:26:36 +0200
+
+ Update a comment to use the variable name rep_len_decoder.
+
+ (And BTW, the previous commit actually did change the
+ program logic slightly.)
+
+ src/liblzma/lzma/lzma_decoder.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 63b74d000eedaebb8485f623e56864ff5ab71064
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-22 00:57:33 +0200
+
+ Demystified the "state" variable in LZMA code. Use the
+ word literal instead of char for better consistency.
+ There are still some names with _char instead of _literal
+ in lzma_optimum, these may be changed later.
+
+ Renamed length coder variables.
+
+ This commit doesn't change the program logic.
+
+ src/liblzma/lzma/lzma_common.h | 69 +++++++++++++++++++++-------
+ src/liblzma/lzma/lzma_decoder.c | 47 ++++++++++---------
+ src/liblzma/lzma/lzma_encoder.c | 14 +++---
+ src/liblzma/lzma/lzma_encoder_getoptimum.c | 34 +++++++-------
+ src/liblzma/lzma/lzma_encoder_init.c | 5 +-
+ src/liblzma/lzma/lzma_encoder_private.h | 8 ++--
+ 6 files changed, 107 insertions(+), 70 deletions(-)
+
+commit e6eb0a26757e851cef62b9440319a8e73b015cb9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-14 23:16:11 +0200
+
+ Fix data corruption in LZMA encoder. Note that this bug was
+ specific to liblzma and was *not* present in LZMA SDK.
+
+ src/liblzma/lzma/lzma_encoder.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit 7d516f5129e4373a6d57249d7f608c634c66bf12
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-14 21:32:37 +0200
+
+ Fix a comment API header.
+
+ src/liblzma/api/lzma/lzma.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 748d6e4274921a350bd0a317380309717441ef9c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-12 23:14:50 +0200
+
+ Make lzma_stream.next_in const. Let's see if anyone complains.
+
+ src/liblzma/api/lzma/base.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit bfde3b24a5ae25ce53c854762b6148952386b025
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-11 15:35:34 +0200
+
+ Apply a minor speed optimization to LZMA decoder.
+
+ src/liblzma/lzma/lzma_decoder.c | 85 ++++++++++++++++++++-------------------
+ 1 files changed, 43 insertions(+), 42 deletions(-)
+
+commit f310c50286d9e4e9c6170bb65348c9bb430a65b4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-11 15:17:16 +0200
+
+ Initialize the last byte of the dictionary to zero so that
+ lz_get_byte(lz, 0) returns zero. This was broken by
+ 1a3b21859818e4d8e89a1da99699233c1bfd197d.
+
+ src/liblzma/lz/lz_decoder.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 5ead36cf7f823093672a4e43c3180b38c9abbaff
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-10 15:57:55 +0200
+
+ Really fix the price count initialization.
+
+ src/liblzma/lzma/lzma_encoder_init.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit d4d7feb83d1a1ded8f662a82e21e053841ca726c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-10 13:47:17 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 0541c5ea63ef3c0ff85eeddb0a420e56b0c65258
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-10 13:46:48 +0200
+
+ Initialize align_price_count and match_price_count in
+ lzma_encoder_init.c. While we don't call
+ fill_distances_prices() and fill_align_prices() in
+ lzma_lzma_encoder_init(), we still need to initialize
+ these two variables so that the fill functions get
+ called in lzma_encoder_getoptimum.c in the beginning
+ of a stream.
+
+ src/liblzma/lzma/lzma_encoder_init.c | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+commit 596fa1fac72823e4ef5bc26bb53f9090445bf748
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-10 13:44:29 +0200
+
+ Always initialize lz->temp_size in lz_decoder.c. temp_size did
+ get initialized as a side-effect after allocating a new decoder,
+ but not when the decoder was reused.
+
+ src/liblzma/lz/lz_decoder.c | 11 ++++++-----
+ 1 files changed, 6 insertions(+), 5 deletions(-)
+
+commit 45e43e169527e7a98a8c8a821d37bf25822b764d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-03-10 13:41:25 +0200
+
+ Don't fill allocated memory with 0xFD when debugging is
+ enabled. It hides errors from Valgrind.
+
+ src/liblzma/common/allocator.c | 7 ++++---
+ 1 files changed, 4 insertions(+), 3 deletions(-)
+
+commit c0e19e0662205f81a86da8903cdc325d50635870
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-02-28 10:24:31 +0200
+
+ Remove two redundant validity checks from the LZMA decoder.
+ These are already checked elsewhere, so omitting these
+ gives (very) tiny speed up.
+
+ src/liblzma/lzma/lzma_decoder.c | 23 ++++-------------------
+ 1 files changed, 4 insertions(+), 19 deletions(-)
+
+commit de7485806284d1614095ae8cb2ebbb5d74c9ac45
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-02-06 13:25:32 +0200
+
+ Tiny clean up to file-format.txt.
+
+ doc/file-format.txt | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 1a3b21859818e4d8e89a1da99699233c1bfd197d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-02-02 14:51:06 +0200
+
+ Don't memzero() the history buffer when initializing LZ
+ decoder. There's no danger of information leak here, so
+ it isn't required. Doing memzero() takes a lot of time
+ with large dictionaries, which could make it easier to
+ construct DoS attack to consume too much CPU time.
+
+ src/liblzma/lz/lz_decoder.c | 7 +++----
+ 1 files changed, 3 insertions(+), 4 deletions(-)
+
+commit 7e796e312bf644ea95aea0ff85480f47cfa30fc0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-02-01 08:39:26 +0200
+
+ Do uncompressed size validation in raw encoder. This way
+ it gets done for not only raw encoder, but also Block
+ and LZMA_Alone encoders.
+
+ src/liblzma/common/raw_encoder.c | 90 ++++++++++++++++++++++++++++++-------
+ 1 files changed, 73 insertions(+), 17 deletions(-)
+
+commit 7dd48578a3853e0cfab9f1830bc30927173ec4bc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-02-01 08:32:05 +0200
+
+ Avoid unneeded function call in raw_common.c.
+
+ src/liblzma/common/raw_common.c | 20 +++++++++++---------
+ 1 files changed, 11 insertions(+), 9 deletions(-)
+
+commit b596fac963c3ff96f615d4d9b427a213ec341211
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-26 21:42:38 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit e9f6e9c075ad93141a568d94f7d4eb0f2edbd6c2
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-26 21:40:23 +0200
+
+ Added note.GNU-stack to x86 assembler files. It is needed
+ when using non-executable stack.
+
+ src/liblzma/check/crc32_x86.S | 9 +++++++++
+ src/liblzma/check/crc64_x86.S | 9 +++++++++
+ 2 files changed, 18 insertions(+), 0 deletions(-)
+
+commit 4c7ad179c78f97f68ad548cb40a9dfa6871655ae
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-26 19:12:50 +0200
+
+ Added api/lzma/easy.h. I had forgot to add this to the
+ git repo. Thanks to Stephan Kulow.
+
+ src/liblzma/api/lzma/easy.h | 174 +++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 174 insertions(+), 0 deletions(-)
+
+commit 288b232f54c3692cd36f471d4042f51daf3ea79f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-26 11:09:17 +0200
+
+ Added more test files.
+
+ tests/files/README | 11 +++++++++++
+ tests/files/bad-multi-none-header_7.lzma | Bin 0 -> 59 bytes
+ tests/files/good-single-sparc-lzma.lzma | Bin 0 -> 2263 bytes
+ tests/files/good-single-x86-lzma.lzma | Bin 0 -> 1909 bytes
+ 4 files changed, 11 insertions(+), 0 deletions(-)
+
+commit c467b0defccf233d0c79234407bc38d7d09574d3
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-26 10:47:55 +0200
+
+ Added more test files.
+
+ tests/files/README | 6 ++++++
+ tests/files/bad-multi-none-block_3.lzma | Bin 0 -> 58 bytes
+ tests/files/good-multi-none-block_2.lzma | Bin 0 -> 58 bytes
+ 3 files changed, 6 insertions(+), 0 deletions(-)
+
+commit f9842f712732c482f2def9f24437851e57dd83f8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-26 00:25:34 +0200
+
+ Return LZMA_HEADER_ERROR if LZMA_SYNC_FLUSH is used with any
+ of the so called simple filters. If there is demand, limited
+ support for LZMA_SYNC_FLUSH may be added in future.
+
+ After this commit, using LZMA_SYNC_FLUSH shouldn't cause
+ undefined behavior in any situation.
+
+ src/liblzma/api/lzma/simple.h | 9 +++++++++
+ src/liblzma/simple/simple_coder.c | 8 ++++++++
+ 2 files changed, 17 insertions(+), 0 deletions(-)
+
+commit e988ea1d1a286dd0f27af0657f9665d5cd8573aa
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-25 23:50:35 +0200
+
+ Added more Multi-Block test files. Improved some
+ descriptions in the test files' README.
+
+ tests/files/README | 34 ++++++++++++++++++++++++-----
+ tests/files/bad-multi-none-block_1.lzma | Bin 0 -> 66 bytes
+ tests/files/bad-multi-none-block_2.lzma | Bin 0 -> 66 bytes
+ tests/files/good-multi-none-block_1.lzma | Bin 0 -> 66 bytes
+ 4 files changed, 28 insertions(+), 6 deletions(-)
+
+commit 4441e004185cd4c61bda184010eca5924c9dec87
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-25 23:12:36 +0200
+
+ Combine lzma_options_block validation needed by both Block
+ encoder and decoder, and put the shared things to
+ block_private.h. Improved the checks a little so that
+ they may detect too big Compressed Size at initialization
+ time if lzma_options_block.total_size or .total_limit is
+ known.
+
+ Allow encoding and decoding Blocks with combinations of
+ fields that are not allowed by the file format specification.
+ Doing this requires that the application passes such a
+ combination in lzma_options_lzma; liblzma doesn't do that,
+ but it's not impossible that someone could find them useful
+ in some custom file format.
+
+ src/liblzma/common/block_decoder.c | 37 +++++++++++----------------
+ src/liblzma/common/block_encoder.c | 32 ++++------------------
+ src/liblzma/common/block_private.h | 50 ++++++++++++++++++++++++++++++++++++
+ 3 files changed, 71 insertions(+), 48 deletions(-)
+
+commit bf4200c818fcf9102e56328d39cde91bfa13cfb6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-25 19:21:22 +0200
+
+ Added test_memlimit.c.
+
+ tests/Makefile.am | 2 +
+ tests/test_memlimit.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 116 insertions(+), 0 deletions(-)
+
+commit 7b8fc7e6b501a32a36636dac79ecb57099269005
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-25 19:20:28 +0200
+
+ Improved the memory limitter:
+ - Added lzma_memlimit_max() and lzma_memlimit_reached()
+ API functions.
+ - Added simple estimation of malloc()'s memory usage
+ overhead.
+ - Fixed integer overflow detection in lzma_memlimit_alloc().
+ - Made some white space cleanups and added more comments.
+
+ The description of lzma_memlimit_max() in memlimit.h is bad
+ and should be improved.
+
+ src/liblzma/api/lzma/memlimit.h | 35 ++++++++++++
+ src/liblzma/common/memory_limitter.c | 97 +++++++++++++++++++++++++++++-----
+ 2 files changed, 118 insertions(+), 14 deletions(-)
+
+commit e0c3d0043da2f670cfdb1abbb3223d5a594ad8db
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-25 13:55:52 +0200
+
+ Use more parenthesis in succeed() macro in tests/tests.h.
+
+ tests/tests.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 1fd76d488179580d37f31ee11948f4932aed31fd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-24 14:49:34 +0200
+
+ Added more Multi-Block Stream test files.
+
+ tests/files/README | 23 +++++++++++++++++++++++
+ tests/files/bad-multi-none-header_2.lzma | Bin 0 -> 61 bytes
+ tests/files/bad-multi-none-header_3.lzma | Bin 0 -> 59 bytes
+ tests/files/bad-multi-none-header_4.lzma | Bin 0 -> 59 bytes
+ tests/files/bad-multi-none-header_5.lzma | Bin 0 -> 58 bytes
+ tests/files/bad-multi-none-header_6.lzma | Bin 0 -> 59 bytes
+ tests/files/good-multi-none-header_3.lzma | Bin 0 -> 59 bytes
+ 7 files changed, 23 insertions(+), 0 deletions(-)
+
+commit 6e27b1098a28f4ce09bfa6df68ad94182dfc2936
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-24 00:46:05 +0200
+
+ Added bunch of test files containing Multi-Block Streams.
+
+ tests/files/README | 53 +++++++++++++++++++++++++++++
+ tests/files/bad-multi-none-1.lzma | Bin 0 -> 54 bytes
+ tests/files/bad-multi-none-2.lzma | Bin 0 -> 53 bytes
+ tests/files/bad-multi-none-3.lzma | Bin 0 -> 53 bytes
+ tests/files/bad-multi-none-extra_1.lzma | Bin 0 -> 54 bytes
+ tests/files/bad-multi-none-extra_2.lzma | Bin 0 -> 54 bytes
+ tests/files/bad-multi-none-extra_3.lzma | Bin 0 -> 55 bytes
+ tests/files/bad-multi-none-header_1.lzma | Bin 0 -> 57 bytes
+ tests/files/bad-multi-none-index_1.lzma | Bin 0 -> 51 bytes
+ tests/files/bad-multi-none-index_2.lzma | Bin 0 -> 49 bytes
+ tests/files/bad-multi-none-index_3.lzma | Bin 0 -> 51 bytes
+ tests/files/bad-multi-none-index_4.lzma | Bin 0 -> 51 bytes
+ tests/files/good-multi-none-1.lzma | Bin 0 -> 75 bytes
+ tests/files/good-multi-none-2.lzma | Bin 0 -> 53 bytes
+ tests/files/good-multi-none-extra_1.lzma | Bin 0 -> 51 bytes
+ tests/files/good-multi-none-extra_2.lzma | Bin 0 -> 79 bytes
+ tests/files/good-multi-none-extra_3.lzma | Bin 0 -> 55 bytes
+ tests/files/good-multi-none-header_1.lzma | Bin 0 -> 58 bytes
+ tests/files/good-multi-none-header_2.lzma | Bin 0 -> 66 bytes
+ 19 files changed, 53 insertions(+), 0 deletions(-)
+
+commit db9df0a9609c01a00a227329fb96e983971040f5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 23:43:00 +0200
+
+ Fix decoding of empty Metadata Blocks, that don't have
+ even the Metadata Flags field. Earlier the code allowed
+ such files; now they are prohibited as the file format
+ specification requires.
+
+ src/liblzma/common/metadata_decoder.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+commit 765f0b05f6e95ed9194fb90819cee189ebbac36b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 23:38:18 +0200
+
+ Fix a bug related to 99e12af4e2b866c011fe0106cd1e0bfdcc8fe9c6.
+ lzma_metadata.header_metadata_size was not properly set to
+ zero if the Metadata had only the Metadata Flags field.
+
+ src/liblzma/common/metadata_decoder.c | 13 +++++++------
+ 1 files changed, 7 insertions(+), 6 deletions(-)
+
+commit 3a7cc5c3dec7b078941f961b0393b86c418883b6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 23:35:49 +0200
+
+ Fix decoding of Extra Records that have empty Data.
+
+ src/liblzma/common/metadata_decoder.c | 13 ++++++++++++-
+ 1 files changed, 12 insertions(+), 1 deletions(-)
+
+commit e5fdec93e273855c1bcc2579b83cfb481a9a1492
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 22:02:38 +0200
+
+ Add the trailing '\0' to lzma_extra.data as the API header
+ already documents.
+
+ src/liblzma/common/metadata_decoder.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+commit ed40dc5a2c28a8dfccab8c165b3780738eeef93e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 21:21:21 +0200
+
+ Added debug/full_flush.c.
+
+ debug/Makefile.am | 3 +-
+ debug/full_flush.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 107 insertions(+), 1 deletions(-)
+
+commit ae0cd09a666a1682da8fc09487322227679e218d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 21:05:33 +0200
+
+ Return LZMA_STREAM_END instead of LZMA_OK if
+ LZMA_SYNC_FLUSH or LZMA_FULL_FLUSH is used when
+ there's no unfinished Block open.
+
+ src/liblzma/common/stream_encoder_multi.c | 6 +++++-
+ 1 files changed, 5 insertions(+), 1 deletions(-)
+
+commit 0e80ded13dfceb98f9494cbb5381a95eb44d03db
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 20:05:01 +0200
+
+ Added bad-single-none-footer_filter_flags.lzma and
+ bad-single-none-too_long_vli.lzma.
+
+ tests/files/README | 5 +++++
+ .../files/bad-single-none-footer_filter_flags.lzma | Bin 0 -> 30 bytes
+ tests/files/bad-single-none-too_long_vli.lzma | Bin 0 -> 39 bytes
+ 3 files changed, 5 insertions(+), 0 deletions(-)
+
+commit 8c8eb14055d8dd536b1b1c58fb284d34bb8ed1dd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 13:42:35 +0200
+
+ Fixed a typo.
+
+ src/liblzma/subblock/subblock_decoder_helper.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 980f65a9a10160c4d105767871e3002b9aaba3e0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 13:40:45 +0200
+
+ Fix a memory leak in the Subblock encoder.
+
+ src/liblzma/subblock/subblock_encoder.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 99e12af4e2b866c011fe0106cd1e0bfdcc8fe9c6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 13:36:07 +0200
+
+ Fix Size of Header Metadata Block handling. Now
+ lzma_metadata.header_metadata_size == LZMA_VLI_VALUE_UNKNOWN
+ is not allowed at all. To indicate missing Header Metadata
+ Block, header_metadata_size must be set to zero. This is
+ what Metadata decoder does after this patch too.
+
+ Note that other missing fields in lzma_metadata are still
+ indicated with LZMA_VLI_VALUE_UNKNOWN. This isn't as
+ illogical as it sounds at first, because missing Size of
+ Header Metadata Block means that Header Metadata Block is
+ not present in the Stream. With other Metadata fields,
+ a missing field means only that the value is unknown.
+
+ src/liblzma/common/info.c | 13 ++++---------
+ src/liblzma/common/metadata_decoder.c | 6 ++++++
+ src/liblzma/common/metadata_encoder.c | 11 +++++------
+ tests/test_info.c | 4 ++--
+ 4 files changed, 17 insertions(+), 17 deletions(-)
+
+commit 58b78ab20c1bcced45cf71ae6684868fc90b4b81
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 13:15:55 +0200
+
+ Fix a memory leak in metadata_decoder.c.
+
+ src/liblzma/common/metadata_decoder.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 4d8cdbdab44400fd98f0f18a0f701e27cd1acdae
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 13:13:58 +0200
+
+ Fix the fix 863028cb7ad6d8d0455fa69348f56b376d7b908f which
+ just moved to problem. Now it's really fixed.
+
+ src/liblzma/common/info.c | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+commit 67321de963ccf69410b3868b8e31534fe18a90de
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 00:21:04 +0200
+
+ Take advantage of return_if_error() macro in
+ lzma_info_metadata_set() in info.c.
+
+ src/liblzma/common/info.c | 24 ++++++++----------------
+ 1 files changed, 8 insertions(+), 16 deletions(-)
+
+commit 863028cb7ad6d8d0455fa69348f56b376d7b908f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-23 00:18:32 +0200
+
+ Fixed a dangling pointer that caused invalid free().
+
+ src/liblzma/common/info.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit cf49f42a6bd40143f54a6b10d6e605599e958c0b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-22 22:49:24 +0200
+
+ Added lzma_easy_* functions. These should make using
+ liblzma as easy as using zlib, because the easy API
+ don't require developers to know any fancy LZMA options.
+
+ Note that Multi-Block Stream encoding is currently broken.
+ The easy API should be OK, the bug(s) are elsewhere.
+
+ src/liblzma/api/Makefile.am | 1 +
+ src/liblzma/api/lzma.h | 1 +
+ src/liblzma/common/Makefile.am | 5 ++
+ src/liblzma/common/easy_common.c | 54 +++++++++++++++
+ src/liblzma/common/easy_common.h | 28 ++++++++
+ src/liblzma/common/easy_multi.c | 103 +++++++++++++++++++++++++++++
+ src/liblzma/common/easy_single.c | 37 ++++++++++
+ src/liblzma/common/stream_encoder_multi.c | 3 +-
+ src/liblzma/common/stream_encoder_multi.h | 26 +++++++
+ 9 files changed, 256 insertions(+), 2 deletions(-)
+
+commit 1747b85a43abc1c3f152dbd349be2ef4089ecf6a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-22 21:16:22 +0200
+
+ Fix Multi-Block Stream encoder's EOPM usage.
+
+ src/liblzma/common/stream_encoder_multi.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 0ed6f1adcea540fb9593ca115d36de537f7f0dc6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-22 00:15:11 +0200
+
+ Made lzma_extra pointers const in lzma_options_stream.
+
+ src/liblzma/api/lzma/stream.h | 4 ++--
+ src/liblzma/common/stream_encoder_multi.c | 8 ++++++--
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+commit 305afa38f64c75af8e81c4167e2d8fa8d85b53a4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-20 20:15:21 +0200
+
+ Updated debug/sync_flush.c.
+
+ debug/sync_flush.c | 26 ++++++++++++++++++++++++--
+ 1 files changed, 24 insertions(+), 2 deletions(-)
+
+commit d53e9b77054cfade6a643e77d085273a348b189c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-20 20:14:26 +0200
+
+ Added debug/repeat.c.
+
+ debug/Makefile.am | 1 +
+ debug/repeat.c | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 44 insertions(+), 0 deletions(-)
+
+commit 107259e306bcfc2336a0fb870fb58034c28faa52
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-20 20:12:58 +0200
+
+ Fix alignment handling bugs in Subblock encoder.
+
+ This leaves one known alignment bug unfixed: If repeat count
+ doesn't fit into 28-bit integer, the encoder has to split
+ this to multiple Subblocks with Subblock Type `Repeating Data'.
+ The extra Subblocks may have wrong alignment. Correct alignment
+ is restored after the split Repeating Data has been completely
+ written out.
+
+ Since the encoder doesn't even try to fix the alignment unless
+ the size of Data is at least 4 bytes, to trigger this bug you
+ need at least 4 GiB of repeating data with sequence length of
+ 4 or more bytes. Since the worst thing done by this bug is
+ misaligned data (no data corruption), this bug simply isn't
+ worth fixing, because a proper fix isn't simple.
+
+ src/liblzma/subblock/subblock_encoder.c | 170 +++++++++++++++++++++---------
+ 1 files changed, 119 insertions(+), 51 deletions(-)
+
+commit e141fe18950400faaa3503ff88ac20eacd73e88c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-19 21:16:33 +0200
+
+ Implemented LZMA_SYNC_FLUSH support to the Subblock encoder.
+ The API for handing Subfilters was changed to make it
+ consistent with LZMA_SYNC_FLUSH.
+
+ A few sanity checks were added for Subfilter handling. Some
+ small bugs were fixed. More comments were added.
+
+ src/liblzma/api/lzma/subblock.h | 29 ++--
+ src/liblzma/subblock/subblock_encoder.c | 263 +++++++++++++++++++++++--------
+ 2 files changed, 214 insertions(+), 78 deletions(-)
+
+commit 23c227a864a3b69f38c6a74306161d4e6918d1cc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-19 15:19:21 +0200
+
+ Revised the Delta filter implementation. The initialization
+ function is still shared between encoder and decoder, but the
+ actual coding is in separate files for encoder and decoder.
+
+ There are now separate functions for the actual delta
+ calculation depending on if Delta is the last filter in the
+ chain or not. If it is the last, the new code copies the
+ data from input to output buffer and does the delta
+ calculation at the same time. The old code first copied the
+ data, then did the delta in the target buffer, which required
+ reading through the data twice.
+
+ Support for LZMA_SYNC_FLUSH was added to the Delta encoder.
+ This doesn't change anything in the file format.
+
+ src/liblzma/common/Makefile.am | 14 +++-
+ src/liblzma/common/delta_coder.c | 189 ------------------------------------
+ src/liblzma/common/delta_coder.h | 31 ------
+ src/liblzma/common/delta_common.c | 70 +++++++++++++
+ src/liblzma/common/delta_common.h | 48 +++++++++
+ src/liblzma/common/delta_decoder.c | 102 +++++++++++++++++++
+ src/liblzma/common/delta_decoder.h | 28 ++++++
+ src/liblzma/common/delta_encoder.c | 97 ++++++++++++++++++
+ src/liblzma/common/delta_encoder.h | 28 ++++++
+ src/liblzma/common/raw_decoder.c | 2 +-
+ src/liblzma/common/raw_encoder.c | 2 +-
+ 11 files changed, 387 insertions(+), 224 deletions(-)
+
+commit 61dc82f3e306b25ce3cd3d529df9ec7a0ec04b73
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-18 20:18:08 +0200
+
+ Added the debug directory and the first debug tool
+ (sync_flush). These tools are not built unless the
+ user runs "make" in the debug directory.
+
+ Makefile.am | 1 +
+ configure.ac | 1 +
+ debug/Makefile.am | 30 +++++++++++++
+ debug/README | 17 ++++++++
+ debug/sync_flush.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 165 insertions(+), 0 deletions(-)
+
+commit 0ae3208db94585eb8294b97ded387de0a3a07646
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-18 20:13:00 +0200
+
+ Added test files to test usage of flush marker in LZMA.
+
+ tests/files/README | 12 ++++++++++++
+ tests/files/bad-single-lzma-flush_beginning.lzma | Bin 0 -> 53 bytes
+ tests/files/bad-single-lzma-flush_twice.lzma | Bin 0 -> 63 bytes
+ tests/files/good-single-lzma-flush_1.lzma | Bin 0 -> 48 bytes
+ tests/files/good-single-lzma-flush_2.lzma | Bin 0 -> 63 bytes
+ 5 files changed, 12 insertions(+), 0 deletions(-)
+
+commit ab5feaf1fcc146ef9fd39360c53c290bec39524e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-18 20:02:52 +0200
+
+ Fix LZMA_SYNC_FLUSH handling in LZ and LZMA encoders.
+ That code is now almost completely in LZ coder, where
+ it can be shared with other LZ77-based algorithms in
+ future.
+
+ src/liblzma/lz/lz_encoder.c | 34 ++++++++++++++++++++++++++--------
+ src/liblzma/lz/lz_encoder.h | 1 +
+ src/liblzma/lzma/lzma_encoder.c | 27 ++-------------------------
+ 3 files changed, 29 insertions(+), 33 deletions(-)
+
+commit 079c4f7fc26b3d0b33d9ae7536697b45f3b73585
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-18 17:21:24 +0200
+
+ Don't add -g to CFLAGS when --enable-debug is specified.
+ It's the job of the user to put that in CFLAGS.
+
+ configure.ac | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+commit 61d1784d8f1761d979a6da6e223e279ca33815e6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-18 14:17:37 +0200
+
+ Set stdin and stdout to binary mode on Windows. This patch is
+ a forward port of b7b22fcb979a16d3a47c8001f058c9f7d4416068
+ from lzma-utils-legacy.git. I don't know if the new code base
+ builds on Windows, but this is a start.
+
+ src/lzmadec/lzmadec.c | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+commit c9cba976913e55ff9aac8a8133cc94416c7c1c9c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-18 00:50:29 +0200
+
+ Added test_compress.sh and bunch of files needed by it.
+ This new set of tests compress and decompress several
+ test files with many different compression options.
+ This set of tests will be extended later.
+
+ tests/Makefile.am | 30 ++++--
+ tests/bcj_test.c | 66 +++++++++++++
+ tests/compress_prepared_bcj_sparc | Bin 0 -> 6804 bytes
+ tests/compress_prepared_bcj_x86 | Bin 0 -> 4649 bytes
+ tests/create_compress_files.c | 164 +++++++++++++++++++++++++++++++++
+ tests/test_compress.sh | 183 +++++++++++++++++++++++++++++++++++++
+ 6 files changed, 433 insertions(+), 10 deletions(-)
+
+commit 33be3c0e24d8f43376ccf71cc77d53671e792f07
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-17 18:56:53 +0200
+
+ Subblock decoder: Don't exit the main loop in decode_buffer()
+ too early if we hit End of Input while decoding a Subblock of
+ type Repeating Data. To keep the loop termination condition
+ elegant, the order of enumerations in coder->sequence were
+ changed.
+
+ To keep the case-labels in roughly the same order as the
+ enumerations in coder->sequence, large chunks of code was
+ moved around. This made the diff big and ugly compared to
+ the amount of the actual changes made.
+
+ src/liblzma/subblock/subblock_decoder.c | 272 ++++++++++++++++---------------
+ 1 files changed, 139 insertions(+), 133 deletions(-)
+
+commit b254bd97b1cdb68d127523d91ca9e054ed89c4fd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-17 17:39:42 +0200
+
+ Fix wrong too small size of argument unfiltered_max
+ in ia64_coder_init(). It triggered assert() in
+ simple_coder.c, and could have caused a buffer overflow.
+
+ This error was probably a copypaste mistake, since most
+ of the simple filters use unfiltered_max = 4.
+
+ src/liblzma/simple/ia64.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 8f5794c8f1a30e8e3b524b415bbe81af2e04c64a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-17 17:27:45 +0200
+
+ Added --delta to the output of "lzma --help".
+
+ src/lzma/help.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit f88590e0014b38d40465937c19f25f05f16c79ae
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-17 13:14:20 +0200
+
+ Fix Subblock docoder: If Subblock filter was used with known
+ Uncompressed Size, and the last output byte was from RLE,
+ the code didn't stop decoding as it should have done.
+
+ src/liblzma/subblock/subblock_decoder.c | 6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+commit bc0b945ca376e333077644d2f7fd54c2848aab8a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-16 16:33:37 +0200
+
+ Tiny non-technical edits to file-format.txt.
+
+ doc/file-format.txt | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 7599bb7064ccf007f054595dedda7927af868252
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-16 14:48:04 +0200
+
+ Plugged a memory leak in stream_decoder.c.
+
+ src/liblzma/common/stream_decoder.c | 20 ++++++++++++++++++++
+ 1 files changed, 20 insertions(+), 0 deletions(-)
+
+commit 0b581539311f3712946e81e747839f8fb5f441a7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-16 14:47:27 +0200
+
+ Added memory leak detection to lzmadec.c.
+
+ src/lzmadec/lzmadec.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+commit 5b5b13c7bb8fde6331064d21f3ebde41072480c4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-16 14:46:50 +0200
+
+ Added lzma_memlimit_count().
+
+ src/liblzma/api/lzma/memlimit.h | 10 ++++++++++
+ src/liblzma/common/memory_limitter.c | 19 +++++++++++++++++++
+ 2 files changed, 29 insertions(+), 0 deletions(-)
+
+commit 19389f2b82ec54fd4c847a18f16482e7be4c9887
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-16 14:31:44 +0200
+
+ Added ARRAY_SIZE(array) macro.
+
+ src/common/sysdefs.h | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit 9bc33a54cbf83952130adbcb1be32c6882485416
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-16 13:27:03 +0200
+
+ Make Uncompresed Size validation more strict
+ in alone_decoder.c.
+
+ src/liblzma/common/alone_decoder.c | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+commit 01d71d60b79027e1ce3eb9c79ae5191e1407c883
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 17:46:59 +0200
+
+ Free the allocated memory in lzmadec if debugging is
+ enabled. This should make it possible to detect possible
+ memory leaks with Valgrind.
+
+ src/lzmadec/lzmadec.c | 7 +++++++
+ 1 files changed, 7 insertions(+), 0 deletions(-)
+
+commit 8235e6e5b2878f76633afcda9a334640db503ef5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 16:25:38 +0200
+
+ Fix memory leaks from test_block_header.c.
+
+ tests/test_block_header.c | 19 +++++++++++++++++--
+ 1 files changed, 17 insertions(+), 2 deletions(-)
+
+commit f10fc6a69d40b6d5c9cfbf8d3746f49869c2e2f6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 14:23:35 +0200
+
+ Use fastpos.h when encoding LZMA dictionary size in
+ Filter Flags encoder.
+
+ src/liblzma/common/filter_flags_encoder.c | 40 +++++++++++++---------------
+ 1 files changed, 19 insertions(+), 21 deletions(-)
+
+commit e5728142a2048979f5c0c2149ce71ae952a092e1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 14:02:22 +0200
+
+ Revised the fastpos code. It now uses the slightly faster
+ table-based version from LZMA SDK 4.57. This should be
+ fast on most systems.
+
+ A simpler and smaller alternative version is also provided.
+ On some CPUs this can be even a little faster than the
+ default table-based version (see comments in fastpos.h),
+ but on most systems the table-based code is faster.
+
+ src/liblzma/common/init_encoder.c | 3 -
+ src/liblzma/lzma/Makefile.am | 4 +
+ src/liblzma/lzma/fastpos.h | 156 +++++++++
+ src/liblzma/lzma/fastpos_table.c | 519 ++++++++++++++++++++++++++++
+ src/liblzma/lzma/fastpos_tablegen.c | 63 ++++
+ src/liblzma/lzma/lzma_common.h | 3 +-
+ src/liblzma/lzma/lzma_encoder.c | 1 +
+ src/liblzma/lzma/lzma_encoder_getoptimum.c | 1 +
+ src/liblzma/lzma/lzma_encoder_init.c | 22 --
+ src/liblzma/lzma/lzma_encoder_private.h | 21 --
+ 10 files changed, 746 insertions(+), 47 deletions(-)
+
+commit 10437b5b567f6a025ff16c45a572e417a0a9cc26
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 13:32:13 +0200
+
+ Added bsr.h.
+
+ src/liblzma/common/Makefile.am | 1 +
+ src/liblzma/common/bsr.h | 61 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 62 insertions(+), 0 deletions(-)
+
+commit f3c88e8b8d8dd57f4bba5f0921eebf276437c244
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 13:29:14 +0200
+
+ Fixed assembler detection in configure.ac, and added
+ detection for x86_64.
+
+ configure.ac | 32 ++++++++++++++++----------------
+ 1 files changed, 16 insertions(+), 16 deletions(-)
+
+commit 54ec204f58287f50d3976288295da4188a19192b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 12:20:41 +0200
+
+ Omit invalid space from printf() format string
+ in price_table_gen.c.
+
+ src/liblzma/rangecoder/price_table_gen.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 01b4b19f49f00e17a0f9cb8754c672ac0847b6e1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 09:54:34 +0200
+
+ Removed a few unused macros from lzma_common.h.
+
+ src/liblzma/lzma/lzma_common.h | 8 ++------
+ 1 files changed, 2 insertions(+), 6 deletions(-)
+
+commit 19bd7f3cf25e4ff8487ef7098ca4a7b58681961d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 08:37:42 +0200
+
+ Fix a typo in lzma_encoder.c.
+
+ src/liblzma/lzma/lzma_encoder.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 9f9b1983013048f2142e8bc7e240149d2687bedc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 08:36:25 +0200
+
+ Convert bittree_get_price() and bittree_reverse_get_price()
+ from macros to inline functions.
+
+ src/liblzma/lzma/lzma_encoder.c | 19 +++----
+ src/liblzma/lzma/lzma_encoder_getoptimum.c | 16 ++----
+ src/liblzma/rangecoder/range_encoder.h | 76 ++++++++++++++++------------
+ 3 files changed, 56 insertions(+), 55 deletions(-)
+
+commit 78e85cb1a7667c54853670d2eb09d754bcbda87d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 07:44:59 +0200
+
+ Fix CRC code in case --enable-small is used.
+
+ src/liblzma/check/crc32_init.c | 2 +-
+ src/liblzma/check/crc64_init.c | 2 +-
+ src/liblzma/common/init_decoder.c | 2 --
+ src/liblzma/common/init_encoder.c | 2 --
+ tests/test_check.c | 2 ++
+ 5 files changed, 4 insertions(+), 6 deletions(-)
+
+commit 949d4346e2d75bcd9dcb66c394d8d851d8db3aa0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 07:41:39 +0200
+
+ Fix typo in test_index.c.
+
+ tests/test_index.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit d13d693155c176fc9e9ad5c50d48ccba27c2d9c6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-15 07:40:21 +0200
+
+ Added precomputed range coder probability price table.
+
+ src/liblzma/common/init_encoder.c | 5 +-
+ src/liblzma/rangecoder/Makefile.am | 9 +++-
+ src/liblzma/rangecoder/price_table.c | 70 +++++++++++++++++++++++++++++
+ src/liblzma/rangecoder/price_table_gen.c | 55 ++++++++++++++++++++++
+ src/liblzma/rangecoder/price_table_init.c | 48 ++++++++++++++++++++
+ src/liblzma/rangecoder/range_common.h | 4 +-
+ src/liblzma/rangecoder/range_encoder.c | 46 -------------------
+ src/liblzma/rangecoder/range_encoder.h | 21 ++++-----
+ 8 files changed, 197 insertions(+), 61 deletions(-)
+
+commit 362dc3843b373c1007a50a4719f378981f18ae03
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 13:42:43 +0200
+
+ Remove RC_BUFFER_SIZE from lzma_encoder_private.h
+ and replace it with a sanity check.
+
+ src/liblzma/lzma/lzma_encoder_private.h | 6 ++++--
+ 1 files changed, 4 insertions(+), 2 deletions(-)
+
+commit e22b37968d153683fec61ad37b6b160cb7ca4ddc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 13:39:54 +0200
+
+ Major changes to LZ encoder, LZMA encoder, and range encoder.
+ These changes implement support for LZMA_SYNC_FLUSH in LZMA
+ encoder, and move the temporary buffer needed by range encoder
+ from lzma_range_encoder structure to lzma_lz_encoder.
+
+ src/liblzma/lz/lz_encoder.c | 138 +++++++++++++++++++++++++++-----
+ src/liblzma/lz/lz_encoder.h | 17 +++-
+ src/liblzma/lzma/lzma_encoder.c | 74 ++++++++++-------
+ src/liblzma/rangecoder/range_encoder.h | 117 ++++++++-------------------
+ 4 files changed, 206 insertions(+), 140 deletions(-)
+
+commit b59ef3973781f892c0a72b5e5934194567100be5
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 13:34:29 +0200
+
+ Added one assert() to process.c of the command line tool.
+
+ src/lzma/process.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 9547e734a00ddb64c851fa3f116e4f9e7d763ea7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 12:09:52 +0200
+
+ Don't use coder->lz.stream_end_was_reached in assertions
+ in match_c.h.
+
+ src/liblzma/lz/match_c.h | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+commit 3e09e1c05871f3757f759b801890ccccc9286608
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 12:08:02 +0200
+
+ In lzma_read_match_distances(), don't use
+ coder->lz.stream_end_was_reached. That variable
+ will be removed, and the check isn't required anyway.
+ Rearrange the check so that it doesn't make one to
+ think that there could be an integer overflow.
+
+ src/liblzma/lzma/lzma_encoder_private.h | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit a670fec8021e5962429689c194148a04c3418872
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 11:56:41 +0200
+
+ Small LZMA_SYNC_FLUSH fixes to Block and Single-Stream encoders.
+
+ src/liblzma/common/block_encoder.c | 4 ++--
+ src/liblzma/common/stream_encoder_single.c | 1 +
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+commit 3599dba9570a6972a16b6398d6c838e9b420e985
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-14 11:54:56 +0200
+
+ More fixes to LZMA decoder's flush marker handling.
+
+ src/liblzma/lzma/lzma_decoder.c | 52 ++++++++++++++++++++++----------------
+ 1 files changed, 30 insertions(+), 22 deletions(-)
+
+commit f73c2ab6079ed5675a42b39d584a567befbd4624
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-10 17:13:42 +0200
+
+ Eliminate lzma_lz_encoder.must_move_pos. It's needed
+ only in one place which isn't performance criticial.
+
+ src/liblzma/lz/lz_encoder.c | 6 ++----
+ src/liblzma/lz/lz_encoder.h | 4 ----
+ 2 files changed, 2 insertions(+), 8 deletions(-)
+
+commit 382808514a42b2f4b4a64515e2dfb3fc1bc48ecd
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-09 20:05:57 +0200
+
+ Define HAVE_ASM_X86 when x86 assembler optimizations are
+ used. This #define will be useful for inline assembly.
+
+ configure.ac | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+commit 0e70fbe4032351aab13a1cd8e5deced105c0b276
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-09 12:06:46 +0200
+
+ Added good-single-none-empty_3.lzma and
+ bad-single-none-empty.lzma.
+
+ tests/files/README | 6 ++++++
+ tests/files/bad-single-none-empty.lzma | Bin 0 -> 19 bytes
+ tests/files/good-single-none-empty_3.lzma | Bin 0 -> 19 bytes
+ 3 files changed, 6 insertions(+), 0 deletions(-)
+
+commit 379fbbe84d922c7cc00afa65c6f0c095da596b19
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 23:11:59 +0200
+
+ Take advantage of return_if_error() in block_decoder.c.
+
+ src/liblzma/common/block_decoder.c | 23 +++++++----------------
+ 1 files changed, 7 insertions(+), 16 deletions(-)
+
+commit 97d5fa82077e57815dfad995dc393c2809a78539
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 23:10:57 +0200
+
+ Updated tests/files/README.
+
+ tests/files/README | 15 +++++++++------
+ 1 files changed, 9 insertions(+), 6 deletions(-)
+
+commit 3bb9bb310936cba6a743b4f06739a397dec7c28f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 23:05:40 +0200
+
+ Added test files with empty Compressed Data.
+
+ tests/files/README | 6 ++++++
+ tests/files/good-single-lzma-empty.lzma | Bin 0 -> 21 bytes
+ tests/files/good-single-none-empty_1.lzma | Bin 0 -> 18 bytes
+ tests/files/good-single-none-empty_2.lzma | Bin 0 -> 26 bytes
+ 4 files changed, 6 insertions(+), 0 deletions(-)
+
+commit 7054c5f5888ac6a7178cd43dc9583ce6c7e78c9f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 22:58:42 +0200
+
+ Fix decoding of Blocks that have only Block Header.
+
+ src/liblzma/common/block_decoder.c | 37 +++++++++++++----------------------
+ 1 files changed, 14 insertions(+), 23 deletions(-)
+
+commit 753e4d95cd1cf29c632dfe1a670af7c67aeffbf4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 22:27:46 +0200
+
+ Added good-single-subblock_implicit.lzma.
+
+ tests/files/README | 2 ++
+ tests/files/good-single-subblock_implicit.lzma | Bin 0 -> 35 bytes
+ 2 files changed, 2 insertions(+), 0 deletions(-)
+
+commit faeac7b7aca75f86afed1e7cc06279d9d497c627
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 18:50:30 +0200
+
+ Disable CRC32 from Block Headers when --check=none
+ has been specified.
+
+ src/lzma/process.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit a751126dbb656767ed4666cf0e5d3e17349d93d1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 13:36:29 +0200
+
+ Fixed encoding of empty files. Arguments to is_size_valid()
+ were in wrong order in block_encoder.c.
+
+ src/liblzma/common/block_encoder.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 9080267603b1006c4867c823307dca9df8be0d20
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 13:35:36 +0200
+
+ Added a few test files.
+
+ tests/files/README | 21 +++++++++++++++++--
+ tests/files/bad-cat-single-none-pad_garbage_1.lzma | Bin 0 -> 65 bytes
+ tests/files/bad-cat-single-none-pad_garbage_2.lzma | Bin 0 -> 65 bytes
+ tests/files/bad-cat-single-none-pad_garbage_3.lzma | Bin 0 -> 65 bytes
+ tests/files/bad-single-data_after_eopm.lzma | Bin 55 -> 0 bytes
+ tests/files/bad-single-data_after_eopm_1.lzma | Bin 0 -> 55 bytes
+ tests/files/bad-single-none-truncated.lzma | Bin 0 -> 29 bytes
+ 7 files changed, 18 insertions(+), 3 deletions(-)
+
+commit b4943ccf73b64fc93a90a23474509c316f55eb2b
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 12:29:58 +0200
+
+ Avoid using ! in test_files.sh, because that doesn't work
+ with some ancient /bin/sh versions.
+
+ tests/test_files.sh | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+commit e2417b2b9134f3f65e14b61e23cd3644d8954353
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-08 00:48:30 +0200
+
+ More pre-C99 inttypes.h compatibility fixes. Now the code
+ should work even if the system has no inttypes.h.
+
+ src/common/physmem.h | 11 -----------
+ src/liblzma/check/crc32_init.c | 5 +----
+ src/liblzma/check/crc32_tablegen.c | 7 ++-----
+ src/liblzma/check/crc64_init.c | 5 +----
+ src/liblzma/check/crc64_tablegen.c | 7 ++-----
+ 5 files changed, 6 insertions(+), 29 deletions(-)
+
+commit 5d227e51c23639423f4ade06aabb54e131f8505e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 23:25:32 +0200
+
+ Updated fi.po although it's currently pretty much crap.
+
+ po/fi.po | 12 ++++++------
+ 1 files changed, 6 insertions(+), 6 deletions(-)
+
+commit c7189d981a1b27c63da0c1ee80d9b5cd8ce1733d
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 23:14:25 +0200
+
+ Test for $GCC = yes instead of if it is non-empty. This
+ way it is possible to use ac_cv_c_compiler_gnu=no to
+ force configure to think it is using non-GNU C compiler.
+
+ configure.ac | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 3dbbea82b74bb841c995ad332a3aeca613015e10
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 21:49:41 +0200
+
+ Added test_files.sh to tests/Makefile.am so it gets
+ included in the tarball with "make dist".
+
+ tests/Makefile.am | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 2fd2d181543feab1b4003f3ac6e85625fbee04f0
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 18:22:24 +0200
+
+ Cosmetic edit to test_files.sh.
+
+ tests/test_files.sh | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 9a71d573100a990ceb30ce0bec6a9a15d795605f
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 18:09:44 +0200
+
+ Added tests/files/README.
+
+ tests/files/README | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 108 insertions(+), 0 deletions(-)
+
+commit 47f48fe9936ed72617a60fbd015df7e0e47a1e43
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 14:20:57 +0200
+
+ Tell in COPYING that everything in tests/files is
+ public domain.
+
+ COPYING | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+commit 3502b3e1d00251d3c8dda96079440705c28d8225
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 14:19:05 +0200
+
+ Cleaned up the tests/files directory.
+
+ tests/files/bad-single-subblock-padding_loop.lzma | Bin 0 -> 43 bytes
+ tests/files/bad-single-subblock1023-slow.lzma | Bin 0 -> 7886 bytes
+ tests/files/malicious-single-subblock-loop.lzma | Bin 43 -> 0 bytes
+ tests/files/malicious-single-subblock-lzma.lzma | Bin 505 -> 0 bytes
+ .../files/malicious-single-subblock1023-slow.lzma | Bin 7886 -> 0 bytes
+ 5 files changed, 0 insertions(+), 0 deletions(-)
+
+commit 908b2ac604b9940369d7fe8a45e9eb6da5d2a24c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 13:49:19 +0200
+
+ Added test_files.sh to test decoding of the files in
+ the tests/files directory. It doesn't test the malicious
+ files yet.
+
+ tests/Makefile.am | 4 +++-
+ tests/test_files.sh | 40 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 43 insertions(+), 1 deletions(-)
+
+commit ecb2a6548f5978022a8fa931719dc575f5fd3bf6
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 11:23:13 +0200
+
+ Updated README regarding the assembler optimizations.
+
+ README | 10 +++++-----
+ 1 files changed, 5 insertions(+), 5 deletions(-)
+
+commit eacb8050438d3e6146c86eb9732d3fb1ef1825cb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-07 10:58:00 +0200
+
+ Updated THANKS.
+
+ THANKS | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+commit 1239649f96132b18e3b7e2dd152ecf53a195caa8
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-06 21:47:17 +0200
+
+ Cosmetic changes to configure.ac.
+
+ configure.ac | 14 ++++++--------
+ 1 files changed, 6 insertions(+), 8 deletions(-)
+
+commit 88ee301ec2e4506a30ec7ac9aaa2288e2dcadd0e
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-06 19:46:38 +0200
+
+ Automatically disable assembler code on Darwin x86.
+ Darwin has different ABI than GNU+Linux and Solaris,
+ thus the assembler code doesn't assemble on Darwin.
+
+ configure.ac | 17 +++++++++++++++--
+ 1 files changed, 15 insertions(+), 2 deletions(-)
+
+commit c15a7abf66e3a70792f7444115e484c7981c8284
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-06 19:45:27 +0200
+
+ With printf(), use PRIu64 with a cast to uint64_t instead
+ of %zu, because some pre-C99 libc versions don't support %zu.
+
+ src/lzma/help.c | 13 +++++++------
+ src/lzmadec/lzmadec.c | 6 ++++--
+ 2 files changed, 11 insertions(+), 8 deletions(-)
+
+commit 4e7e54c4c522ab2f6a7abb92cefc4f707e9568fb
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-06 16:27:41 +0200
+
+ Introduced compatibility with systems that have pre-C99
+ or no inttypes.h. This is useful when the compiler has
+ good enough support for C99, but libc headers don't.
+
+ Changed liblzma API so that sys/types.h and inttypes.h
+ have to be #included before #including lzma.h. On systems
+ that don't have C99 inttypes.h, it's the problem of the
+ applications to provide the required types and macros
+ before #including lzma.h.
+
+ If lzma.h defined the missing types and macros, it could
+ conflict with third-party applications whose configure
+ has detected that the types are missing and defined them
+ in config.h already. An alternative would have been
+ introducing lzma_uint32 and similar types, but that would
+ just be an extra pain on modern systems.
+
+ configure.ac | 13 +++++++-
+ doc/liblzma-intro.txt | 10 +++++-
+ src/common/sysdefs.h | 59 +++++++++++++++++++++++++++++++++++-
+ src/liblzma/api/lzma.h | 40 ++++++++++++++++--------
+ src/liblzma/check/crc32_table.c | 4 +--
+ src/liblzma/check/crc32_table_be.h | 2 -
+ src/liblzma/check/crc32_table_le.h | 2 -
+ src/liblzma/check/crc32_tablegen.c | 1 -
+ src/liblzma/check/crc64_table.c | 4 +--
+ src/liblzma/check/crc64_table_be.h | 2 -
+ src/liblzma/check/crc64_table_le.h | 2 -
+ src/liblzma/check/crc64_tablegen.c | 1 -
+ src/lzma/private.h | 1 -
+ 13 files changed, 106 insertions(+), 35 deletions(-)
+
+commit a71864f77dfb76b5d78a270641539947c312583a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-05 19:57:00 +0200
+
+ Fix typo in comment (INT64_MAX -> UINT64_MAX).
+
+ src/liblzma/api/lzma/vli.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 072927905a3b66281c6311b4b351caa501d8b73a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-05 19:42:04 +0200
+
+ Rearranged testing of GCC-specific flags.
+
+ configure.ac | 33 +++++++++++++++++++++++----------
+ 1 files changed, 23 insertions(+), 10 deletions(-)
+
+commit d160ee32598c6d1cd9054ef019e8c9331208b188
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-05 01:20:24 +0200
+
+ Another bug fix for flush marker detection.
+
+ src/liblzma/lzma/lzma_decoder.c | 10 +++++++++-
+ 1 files changed, 9 insertions(+), 1 deletions(-)
+
+commit fc67f79f607cbfa78c6f47a69dec098d8659b162
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-04 21:37:01 +0200
+
+ Fix stupid bugs in flush marker detection.
+
+ src/liblzma/lzma/lzma_decoder.c | 7 ++++---
+ 1 files changed, 4 insertions(+), 3 deletions(-)
+
+commit 0029cbbabe87d491fc046a55a629a6d556010baa
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-04 21:30:33 +0200
+
+ Added support for flush marker, which will be in files
+ that use LZMA_SYNC_FLUSH with encoder (not implemented
+ yet). This is a new feature in the raw LZMA format,
+ which isn't supported by old decoders. This shouldn't
+ be a problem in practice, since lzma_alone_encoder()
+ will not allow LZMA_SYNC_FLUSH, and thus not allow
+ creating files on decodable with old decoders.
+
+ Made lzma_decoder.c to require tab width of 4 characters
+ if one wants to fit the code in 80 columns. This makes
+ the code easier to read.
+
+ src/liblzma/lzma/lzma_common.h | 4 +
+ src/liblzma/lzma/lzma_decoder.c | 217 ++++++++++++++++++---------------------
+ 2 files changed, 104 insertions(+), 117 deletions(-)
+
+commit bbfd1f6ab058a7e661545205befcb7f70c5685ab
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2008-01-04 20:45:05 +0200
+
+ Moved range decoder initialization (reading the first
+ five input bytes) from LZMA decoder to range decoder
+ header. Did the same for decoding of direct bits.
+
+ src/liblzma/lzma/lzma_decoder.c | 42 ++-------------
+ src/liblzma/rangecoder/range_decoder.h | 87 ++++++++++++++++++++++----------
+ 2 files changed, 66 insertions(+), 63 deletions(-)
+
+commit 5db745cd2a74f6ed2e52f5c716c08ed0daf17ebc
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-14 11:15:21 +0200
+
+ Added a note to README that --disable-assembler
+ must be used on Darwin.
+
+ README | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+commit 44b333d4615b5aabc557a0e1b6bb0096da3fae24
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-14 10:07:10 +0200
+
+ Use the filename suffix .S instead of .s for assembler files
+ so that the preprocessor removes the /* */ style comments,
+ which are not supported by some non-GNU assemblers (Solaris)
+ that otherwise work with this code.
+
+ src/liblzma/check/Makefile.am | 4 +-
+ src/liblzma/check/crc32_x86.S | 217 +++++++++++++++++++++++++++++++++++++++++
+ src/liblzma/check/crc32_x86.s | 217 -----------------------------------------
+ src/liblzma/check/crc64_x86.S | 203 ++++++++++++++++++++++++++++++++++++++
+ src/liblzma/check/crc64_x86.s | 203 --------------------------------------
+ 5 files changed, 422 insertions(+), 422 deletions(-)
+
+commit ec1c82b2e82f395f6e8e19ac212a639644330cd7
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-14 09:59:05 +0200
+
+ Fixed wrong symbol name in crc64_x86.s.
+
+ src/liblzma/check/crc64_x86.s | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 2881570df6803eed2fe550af34574e8e61794804
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-14 09:53:24 +0200
+
+ Use .globl instead of .global in x86 assembler code for
+ better portability. Still needs fixing the commenting.
+
+ src/liblzma/check/crc32_x86.s | 2 +-
+ src/liblzma/check/crc64_x86.s | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit 698470b8f33fc0e5f27dafa93b39b6dd5dde5a66
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-13 20:14:37 +0200
+
+ Fixed a few short options that take an argument.
+ short_opts[] was missing colons to indicate
+ required argument. Thanks to Fabio Pedretti for
+ the bug report.
+
+ src/lzma/args.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 918bcb0e0728d2d976621e9f35b56f224f11d989
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-11 17:08:04 +0200
+
+ Removed uncompressed size tracking from Delta encoder too.
+
+ src/liblzma/common/delta_coder.c | 21 +++------------------
+ 1 files changed, 3 insertions(+), 18 deletions(-)
+
+commit 3e16d51dd645667b05ff826665b1fc353aa41cd9
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-11 16:49:19 +0200
+
+ Remove uncompressed size tracking from the filter encoders.
+ It's not strictly needed there, and just complicates the
+ code. LZ encoder never even had this feature.
+
+ The primary reason to have uncompressed size tracking in
+ filter encoders was validating that the application
+ doesn't give different amount of input that it had
+ promised. A side effect was to validate internal workings
+ of liblzma.
+
+ Uncompressed size tracking is still present in the Block
+ encoder. Maybe it should be added to LZMA_Alone and raw
+ encoders too. It's simpler to have one coder just to
+ validate the uncompressed size instead of having it
+ in every filter.
+
+ src/liblzma/common/copy_coder.c | 25 +------------------
+ src/liblzma/simple/simple_coder.c | 29 +++--------------------
+ src/liblzma/subblock/subblock_encoder.c | 38 +++++--------------------------
+ 3 files changed, 12 insertions(+), 80 deletions(-)
+
+commit 5286723e0d1ac386d5b07f08d78e61becf895a5a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-11 14:10:53 +0200
+
+ Get rid of no-NLS gnulib. I don't know how to get it
+ working with Automake. People who want smaller lzmadec
+ should use --disable-nls on non-GNU systems.
+
+ lib/Makefile.am | 10 +---------
+ src/lzma/Makefile.am | 2 +-
+ src/lzmadec/Makefile.am | 4 +++-
+ 3 files changed, 5 insertions(+), 11 deletions(-)
+
+commit ce8b036a6c7a43b290356b673d953f6d76b2be64
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-11 14:09:35 +0200
+
+ Fixed a typo in tests/Makefile.am which prevented
+ building the tests if gnulib was needed.
+
+ tests/Makefile.am | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit 7c1ad41eb611ed89e5bb8792a3beb533b7aa59f4
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-11 11:18:58 +0200
+
+ Fixed wrong type of flags_size in Subblock encoder.
+
+ src/liblzma/subblock/subblock_encoder.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+commit ce64df716243fdc40359090d1f6541f3a4f5f21a
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-10 20:44:16 +0200
+
+ Bumped version number to 4.42.3alpha.
+
+ configure.ac | 2 +-
+ src/liblzma/api/lzma/version.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit b499a0403ea5c41d6a25b40275eb6c57643052ce
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-10 15:02:50 +0200
+
+ Disabled some unneeded warnings and made "make dist" work.
+
+ Makefile.am | 9 +++------
+ configure.ac | 9 ++++++---
+ po/fi.po | 47 ++++++++++++++++++++++++-----------------------
+ 3 files changed, 33 insertions(+), 32 deletions(-)
+
+commit 2ab8adb5165a0b77114a7eb21f9ff1e6a266f172
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 21:43:15 +0200
+
+ Added LZMA_SYNC_FLUSH support to the Copy filter.
+
+ src/liblzma/common/copy_coder.c | 92 ++++++++++++++++++++++++---------------
+ 1 files changed, 57 insertions(+), 35 deletions(-)
+
+commit 329c272d501e88793dda5540358d55c12428d194
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 17:14:07 +0200
+
+ Added missing LZMA_API to the C versions of the CRC functions.
+ The x86 assembler versions were already OK.
+
+ src/liblzma/check/crc32.c | 2 +-
+ src/liblzma/check/crc64.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+commit c90daf86ce683fa8cf80491d624ffb158dfbd9d7
+Author: Jim Meyering <meyering@redhat.com>
+Date: 2007-12-09 15:34:25 +0100
+
+ * tests/test_block_header.c (test3): Remove duplicate initializer.
+
+ autogen.sh | 2 +-
+ tests/test_block_header.c | 1 -
+ 2 files changed, 1 insertions(+), 2 deletions(-)
+
+commit 07ac881779a8477f2c1ab112b91a129e24aa743c
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 17:06:45 +0200
+
+ Take advantage of return_if_error() macro in more places.
+ Cleaned Subblock filter's initialization code too.
+
+ src/liblzma/common/block_decoder.c | 22 +++-------
+ src/liblzma/common/delta_coder.c | 8 +---
+ src/liblzma/common/stream_decoder.c | 17 +++-----
+ src/liblzma/common/stream_encoder_multi.c | 68 +++++++++------------------
+ src/liblzma/common/stream_encoder_single.c | 8 +--
+ src/liblzma/subblock/subblock_decoder.c | 33 +++++---------
+ src/liblzma/subblock/subblock_encoder.c | 45 +++++-------------
+ 7 files changed, 63 insertions(+), 138 deletions(-)
+
+commit 41338717964f510ee61d70b25bd4c502ec9f77cf
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 12:13:01 +0200
+
+ Added a bunch of .lzma test files.
+
+ tests/files/bad-single-data_after_eopm.lzma | Bin 0 -> 55 bytes
+ tests/files/bad-single-data_after_eopm_2.lzma | Bin 0 -> 56 bytes
+ tests/files/bad-single-subblock_subblock.lzma | Bin 0 -> 26 bytes
+ tests/files/good-cat-single-none-pad.lzma | Bin 0 -> 64 bytes
+ tests/files/good-single-delta-lzma.tiff.lzma | Bin 0 -> 51409 bytes
+ tests/files/good-single-lzma.lzma | Bin 0 -> 44 bytes
+ tests/files/good-single-none-pad.lzma | Bin 0 -> 32 bytes
+ tests/files/good-single-none.lzma | Bin 0 -> 30 bytes
+ tests/files/good-single-subblock-lzma.lzma | Bin 0 -> 50 bytes
+ tests/files/good-single-subblock_rle.lzma | Bin 0 -> 118 bytes
+ tests/files/malicious-multi-metadata-64PiB.lzma | Bin 0 -> 51 bytes
+ tests/files/malicious-single-subblock-256MiB.lzma | Bin 0 -> 30 bytes
+ tests/files/malicious-single-subblock-64PiB.lzma | Bin 0 -> 45 bytes
+ tests/files/malicious-single-subblock-loop.lzma | Bin 0 -> 43 bytes
+ tests/files/malicious-single-subblock-lzma.lzma | Bin 0 -> 505 bytes
+ .../files/malicious-single-subblock1023-slow.lzma | Bin 0 -> 7886 bytes
+ tests/files/malicious-single-subblock31-slow.lzma | Bin 0 -> 1233 bytes
+ 17 files changed, 0 insertions(+), 0 deletions(-)
+
+commit ff946ceb7975d4f11950afd33f6315b4d20d1a03
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 11:24:48 +0200
+
+ Re-enabled the security checks in Subblock decoder
+ that were disabled for debugging reasons.
+
+ src/liblzma/subblock/subblock_decoder.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+commit 2bf36d22d2c24ac3f488e63b35564fa2f6dab8d1
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 11:03:28 +0200
+
+ Fixed the tests to build with -Werror.
+
+ tests/test_block_header.c | 2 +-
+ tests/test_check.c | 2 +-
+ tests/test_filter_flags.c | 2 +-
+ tests/test_index.c | 14 +++++++-------
+ tests/test_info.c | 2 +-
+ tests/test_stream_flags.c | 2 +-
+ 6 files changed, 12 insertions(+), 12 deletions(-)
+
+commit 5d018dc03549c1ee4958364712fb0c94e1bf2741
+Author: Lasse Collin <lasse.collin@tukaani.org>
+Date: 2007-12-09 00:42:33 +0200
+
+ Imported to git.
+
+ AUTHORS | 18 +
+ COPYING | 24 +
+ COPYING.GPLv2 | 339 +++++
+ COPYING.GPLv3 | 674 +++++++++
+ COPYING.LGPLv2.1 | 504 +++++++
+ ChangeLog | 2 +
+ Doxyfile.in | 1229 ++++++++++++++++
+ Makefile.am | 38 +
+ README | 151 ++
+ THANKS | 23 +
+ TODO | 109 ++
+ autogen.sh | 38 +
+ configure.ac | 611 ++++++++
+ doc/bugs.txt | 46 +
+ doc/faq.txt | 247 ++++
+ doc/file-format.txt | 1861 ++++++++++++++++++++++++
+ doc/history.txt | 140 ++
+ doc/liblzma-advanced.txt | 324 ++++
+ doc/liblzma-hacking.txt | 112 ++
+ doc/liblzma-intro.txt | 188 +++
+ doc/liblzma-security.txt | 219 +++
+ doc/lzma-intro.txt | 107 ++
+ extra/scanlzma/scanlzma.c | 85 ++
+ lib/Makefile.am | 40 +
+ lib/getopt.c | 1191 +++++++++++++++
+ lib/getopt1.c | 171 +++
+ lib/getopt_.h | 226 +++
+ lib/getopt_int.h | 131 ++
+ lib/gettext.h | 240 +++
+ m4/acx_pthread.m4 | 279 ++++
+ m4/getopt.m4 | 83 ++
+ po/LINGUAS | 1 +
+ po/Makevars | 46 +
+ po/POTFILES.in | 13 +
+ po/fi.po | 445 ++++++
+ src/Makefile.am | 16 +
+ src/common/open_stdxxx.h | 50 +
+ src/common/physmem.h | 77 +
+ src/common/sysdefs.h | 100 ++
+ src/liblzma/Makefile.am | 47 +
+ src/liblzma/api/Makefile.am | 39 +
+ src/liblzma/api/lzma.h | 122 ++
+ src/liblzma/api/lzma/alignment.h | 60 +
+ src/liblzma/api/lzma/alone.h | 82 ++
+ src/liblzma/api/lzma/auto.h | 41 +
+ src/liblzma/api/lzma/base.h | 410 ++++++
+ src/liblzma/api/lzma/block.h | 409 ++++++
+ src/liblzma/api/lzma/check.h | 128 ++
+ src/liblzma/api/lzma/copy.h | 29 +
+ src/liblzma/api/lzma/delta.h | 49 +
+ src/liblzma/api/lzma/extra.h | 114 ++
+ src/liblzma/api/lzma/filter.h | 166 +++
+ src/liblzma/api/lzma/index.h | 84 ++
+ src/liblzma/api/lzma/info.h | 315 ++++
+ src/liblzma/api/lzma/init.h | 85 ++
+ src/liblzma/api/lzma/lzma.h | 312 ++++
+ src/liblzma/api/lzma/memlimit.h | 157 ++
+ src/liblzma/api/lzma/metadata.h | 100 ++
+ src/liblzma/api/lzma/raw.h | 72 +
+ src/liblzma/api/lzma/simple.h | 85 ++
+ src/liblzma/api/lzma/stream.h | 178 +++
+ src/liblzma/api/lzma/stream_flags.h | 142 ++
+ src/liblzma/api/lzma/subblock.h | 197 +++
+ src/liblzma/api/lzma/version.h | 59 +
+ src/liblzma/api/lzma/vli.h | 244 ++++
+ src/liblzma/check/Makefile.am | 64 +
+ src/liblzma/check/check.c | 160 ++
+ src/liblzma/check/check.h | 102 ++
+ src/liblzma/check/check_byteswap.h | 43 +
+ src/liblzma/check/check_init.c | 37 +
+ src/liblzma/check/crc32.c | 88 ++
+ src/liblzma/check/crc32_init.c | 58 +
+ src/liblzma/check/crc32_table.c | 22 +
+ src/liblzma/check/crc32_table_be.h | 527 +++++++
+ src/liblzma/check/crc32_table_le.h | 527 +++++++
+ src/liblzma/check/crc32_tablegen.c | 55 +
+ src/liblzma/check/crc32_x86.s | 217 +++
+ src/liblzma/check/crc64.c | 75 +
+ src/liblzma/check/crc64_init.c | 58 +
+ src/liblzma/check/crc64_table.c | 22 +
+ src/liblzma/check/crc64_table_be.h | 523 +++++++
+ src/liblzma/check/crc64_table_le.h | 523 +++++++
+ src/liblzma/check/crc64_tablegen.c | 56 +
+ src/liblzma/check/crc64_x86.s | 203 +++
+ src/liblzma/check/crc_macros.h | 33 +
+ src/liblzma/check/sha256.c | 203 +++
+ src/liblzma/common/Makefile.am | 94 ++
+ src/liblzma/common/alignment.c | 118 ++
+ src/liblzma/common/allocator.c | 57 +
+ src/liblzma/common/alone_decoder.c | 197 +++
+ src/liblzma/common/alone_decoder.h | 24 +
+ src/liblzma/common/alone_encoder.c | 167 +++
+ src/liblzma/common/auto_decoder.c | 113 ++
+ src/liblzma/common/block_decoder.c | 405 +++++
+ src/liblzma/common/block_decoder.h | 29 +
+ src/liblzma/common/block_encoder.c | 375 +++++
+ src/liblzma/common/block_encoder.h | 29 +
+ src/liblzma/common/block_header_decoder.c | 373 +++++
+ src/liblzma/common/block_header_encoder.c | 211 +++
+ src/liblzma/common/block_private.h | 46 +
+ src/liblzma/common/chunk_size.c | 74 +
+ src/liblzma/common/code.c | 203 +++
+ src/liblzma/common/common.h | 271 ++++
+ src/liblzma/common/copy_coder.c | 143 ++
+ src/liblzma/common/copy_coder.h | 31 +
+ src/liblzma/common/delta_coder.c | 210 +++
+ src/liblzma/common/delta_coder.h | 31 +
+ src/liblzma/common/extra.c | 33 +
+ src/liblzma/common/features.c | 70 +
+ src/liblzma/common/filter_flags_decoder.c | 382 +++++
+ src/liblzma/common/filter_flags_encoder.c | 359 +++++
+ src/liblzma/common/index.c | 140 ++
+ src/liblzma/common/info.c | 823 +++++++++++
+ src/liblzma/common/init.c | 39 +
+ src/liblzma/common/init_decoder.c | 33 +
+ src/liblzma/common/init_encoder.c | 44 +
+ src/liblzma/common/memory_limitter.c | 200 +++
+ src/liblzma/common/memory_usage.c | 113 ++
+ src/liblzma/common/metadata_decoder.c | 555 +++++++
+ src/liblzma/common/metadata_decoder.h | 31 +
+ src/liblzma/common/metadata_encoder.c | 436 ++++++
+ src/liblzma/common/metadata_encoder.h | 30 +
+ src/liblzma/common/next_coder.c | 65 +
+ src/liblzma/common/raw_common.c | 175 +++
+ src/liblzma/common/raw_common.h | 31 +
+ src/liblzma/common/raw_decoder.c | 127 ++
+ src/liblzma/common/raw_decoder.h | 30 +
+ src/liblzma/common/raw_encoder.c | 124 ++
+ src/liblzma/common/raw_encoder.h | 30 +
+ src/liblzma/common/stream_common.c | 23 +
+ src/liblzma/common/stream_common.h | 28 +
+ src/liblzma/common/stream_decoder.c | 454 ++++++
+ src/liblzma/common/stream_encoder_multi.c | 460 ++++++
+ src/liblzma/common/stream_encoder_single.c | 220 +++
+ src/liblzma/common/stream_flags_decoder.c | 258 ++++
+ src/liblzma/common/stream_flags_decoder.h | 31 +
+ src/liblzma/common/stream_flags_encoder.c | 75 +
+ src/liblzma/common/sysdefs.h | 1 +
+ src/liblzma/common/version.c | 25 +
+ src/liblzma/common/vli_decoder.c | 69 +
+ src/liblzma/common/vli_encoder.c | 81 +
+ src/liblzma/common/vli_reverse_decoder.c | 55 +
+ src/liblzma/lz/Makefile.am | 63 +
+ src/liblzma/lz/bt2.c | 27 +
+ src/liblzma/lz/bt2.h | 31 +
+ src/liblzma/lz/bt3.c | 29 +
+ src/liblzma/lz/bt3.h | 31 +
+ src/liblzma/lz/bt4.c | 30 +
+ src/liblzma/lz/bt4.h | 31 +
+ src/liblzma/lz/hc3.c | 30 +
+ src/liblzma/lz/hc3.h | 31 +
+ src/liblzma/lz/hc4.c | 31 +
+ src/liblzma/lz/hc4.h | 31 +
+ src/liblzma/lz/lz_decoder.c | 462 ++++++
+ src/liblzma/lz/lz_decoder.h | 214 +++
+ src/liblzma/lz/lz_encoder.c | 481 ++++++
+ src/liblzma/lz/lz_encoder.h | 161 ++
+ src/liblzma/lz/lz_encoder_private.h | 40 +
+ src/liblzma/lz/match_c.h | 401 +++++
+ src/liblzma/lz/match_h.h | 69 +
+ src/liblzma/lzma.pc.in | 11 +
+ src/liblzma/lzma/Makefile.am | 43 +
+ src/liblzma/lzma/lzma_common.h | 128 ++
+ src/liblzma/lzma/lzma_decoder.c | 844 +++++++++++
+ src/liblzma/lzma/lzma_decoder.h | 41 +
+ src/liblzma/lzma/lzma_encoder.c | 413 ++++++
+ src/liblzma/lzma/lzma_encoder.h | 35 +
+ src/liblzma/lzma/lzma_encoder_features.c | 59 +
+ src/liblzma/lzma/lzma_encoder_getoptimum.c | 893 ++++++++++++
+ src/liblzma/lzma/lzma_encoder_getoptimumfast.c | 201 +++
+ src/liblzma/lzma/lzma_encoder_init.c | 245 ++++
+ src/liblzma/lzma/lzma_encoder_presets.c | 34 +
+ src/liblzma/lzma/lzma_encoder_private.h | 225 +++
+ src/liblzma/lzma/lzma_literal.c | 74 +
+ src/liblzma/lzma/lzma_literal.h | 74 +
+ src/liblzma/rangecoder/Makefile.am | 28 +
+ src/liblzma/rangecoder/range_common.h | 68 +
+ src/liblzma/rangecoder/range_decoder.h | 189 +++
+ src/liblzma/rangecoder/range_encoder.c | 46 +
+ src/liblzma/rangecoder/range_encoder.h | 317 ++++
+ src/liblzma/simple/Makefile.am | 46 +
+ src/liblzma/simple/arm.c | 76 +
+ src/liblzma/simple/armthumb.c | 81 +
+ src/liblzma/simple/ia64.c | 117 ++
+ src/liblzma/simple/powerpc.c | 80 +
+ src/liblzma/simple/simple_coder.c | 306 ++++
+ src/liblzma/simple/simple_coder.h | 68 +
+ src/liblzma/simple/simple_private.h | 86 ++
+ src/liblzma/simple/sparc.c | 88 ++
+ src/liblzma/simple/x86.c | 161 ++
+ src/liblzma/subblock/Makefile.am | 33 +
+ src/liblzma/subblock/subblock_decoder.c | 681 +++++++++
+ src/liblzma/subblock/subblock_decoder.h | 29 +
+ src/liblzma/subblock/subblock_decoder_helper.c | 80 +
+ src/liblzma/subblock/subblock_decoder_helper.h | 36 +
+ src/liblzma/subblock/subblock_encoder.c | 841 +++++++++++
+ src/liblzma/subblock/subblock_encoder.h | 28 +
+ src/lzma/Makefile.am | 63 +
+ src/lzma/alloc.c | 106 ++
+ src/lzma/alloc.h | 42 +
+ src/lzma/args.c | 566 +++++++
+ src/lzma/args.h | 64 +
+ src/lzma/error.c | 156 ++
+ src/lzma/error.h | 67 +
+ src/lzma/hardware.c | 99 ++
+ src/lzma/hardware.h | 31 +
+ src/lzma/help.c | 178 +++
+ src/lzma/help.h | 32 +
+ src/lzma/io.c | 664 +++++++++
+ src/lzma/io.h | 60 +
+ src/lzma/list.c | 477 ++++++
+ src/lzma/main.c | 254 ++++
+ src/lzma/options.c | 346 +++++
+ src/lzma/options.h | 46 +
+ src/lzma/private.h | 55 +
+ src/lzma/process.c | 458 ++++++
+ src/lzma/process.h | 30 +
+ src/lzma/suffix.c | 145 ++
+ src/lzma/suffix.h | 25 +
+ src/lzma/util.c | 182 +++
+ src/lzma/util.h | 32 +
+ src/lzmadec/Makefile.am | 27 +
+ src/lzmadec/lzmadec.c | 515 +++++++
+ src/scripts/Makefile.am | 24 +
+ src/scripts/lzdiff | 67 +
+ src/scripts/lzdiff.1 | 51 +
+ src/scripts/lzgrep | 123 ++
+ src/scripts/lzgrep.1 | 61 +
+ src/scripts/lzmore | 74 +
+ src/scripts/lzmore.1 | 55 +
+ tests/Makefile.am | 43 +
+ tests/test_block.c | 59 +
+ tests/test_block_header.c | 352 +++++
+ tests/test_check.c | 90 ++
+ tests/test_filter_flags.c | 326 +++++
+ tests/test_index.c | 43 +
+ tests/test_info.c | 717 +++++++++
+ tests/test_stream_flags.c | 191 +++
+ tests/tests.h | 148 ++
+ 239 files changed, 42513 insertions(+), 0 deletions(-)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Doxyfile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Doxyfile.in
new file mode 100644
index 00000000..fa3c1e1d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Doxyfile.in
@@ -0,0 +1,1234 @@
+# Doxyfile 1.4.7
+
+# Copyright (C) 1997-2007 by Dimitri van Heesch
+# License: GNU GPLv2+
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = "@PACKAGE_NAME@"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = "@PACKAGE_VERSION@"
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish,
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese,
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish,
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# This tag can be used to specify the encoding used in the generated output.
+# The encoding is not always determined by the language that is chosen,
+# but also whether or not the output is meant for Windows or non-Windows users.
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES
+# forces the Windows encoding (this is the default for the Windows binary),
+# whereas setting the tag to NO uses a Unix-style encoding (the default for
+# all platforms other than Windows).
+
+USE_WINDOWS_ENCODING = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = @top_srcdir@/src
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS = *.h *.c
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = YES
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = LZMA_API(type)=type \
+ LZMA_API_IMPORT \
+ LZMA_API_CALL=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a caller dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that a graph may be further truncated if the graph's
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default),
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL
new file mode 100644
index 00000000..ec757202
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL
@@ -0,0 +1,339 @@
+
+XZ Utils Installation
+=====================
+
+ 0. Preface
+ 1. Supported platforms
+ 1.1. Compilers
+ 1.2. Platform-specific notes
+ 1.2.1. Darwin (Mac OS X)
+ 1.2.2. Tru64
+ 1.2.3. Windows
+ 1.2.4. DOS
+ 1.2.5. OS/2
+ 1.3. Adding support for new platforms
+ 2. configure options
+ 3. xzgrep and other scripts
+ 3.1. Dependencies
+ 3.2. PATH
+ 4. Troubleshooting
+ 4.1. "No C99 compiler was found."
+ 4.1. "No POSIX conforming shell (sh) was found."
+ 4.2. configure works but build fails at crc32_x86.S
+
+
+0. Preface
+----------
+
+ If you aren't familiar with building packages that use GNU Autotools,
+ see the file INSTALL.generic for generic instructions before reading
+ further.
+
+ If you are going to build a package for distribution, see also the
+ file PACKAGERS. It contains information that should help making the
+ binary packages as good as possible, but the information isn't very
+ interesting to those making local builds for private use or for use
+ in special situations like embedded systems.
+
+
+1. Supported platforms
+----------------------
+
+ XZ Utils are developed on GNU/Linux, but they should work on many
+ POSIX-like operating systems like *BSDs and Solaris, and even on
+ a few non-POSIX operating systems.
+
+
+1.1. Compilers
+
+ A C99 compiler is required to compile XZ Utils. If you use GCC, you
+ need at least version 3.x.x. GCC version 2.xx.x doesn't support some
+ C99 features used in XZ Utils source code, thus GCC 2 won't compile
+ XZ Utils.
+
+ XZ Utils takes advantage of some GNU C extensions when building
+ with GCC. Because these extensions are used only when building
+ with GCC, it should be possible to use any C99 compiler.
+
+
+1.2. Platform-specific notes
+
+1.2.1. Darwin (Mac OS X)
+
+ You may need --disable-assembler if building universal binaries on
+ Darwin. This is because different files are built when assembler is
+ enabled, and there's no way to make it work with universal build.
+ If you want to keep the assembler code, consider building one
+ architecture at a time, and then combining the results to create
+ universal binaries (see lipo(1)).
+
+
+1.2.2. Tru64
+
+ If you try to use the native C compiler on Tru64 (passing CC=cc to
+ configure), it is possible that the configure script will complain
+ that no C99 compiler was found even when the native compiler supports
+ C99. You can safely override the test for C99 compiler by passing
+ ac_cv_prog_cc_c99= as the argument to the configure script.
+
+
+1.2.3. Windows
+
+ Building XZ Utils on Windows is supported under MinGW and Cygwin.
+ If the Autotools based build gives you trouble with MinGW, you may
+ want try the alternative method found from the "windows" directory.
+
+ MSVC doesn't support C99, thus it is not possible to use MSVC to
+ compile XZ Utils. However, it is possible to use liblzma.dll from
+ MSVC once liblzma.dll has been built with MinGW. The required
+ import library for MSVC can be created from liblzma.def using the
+ "lib" command shipped in MSVC:
+
+ lib /def:liblzma.def /out:liblzma.lib /machine:ix86
+
+ On x86-64, the /machine argument has to naturally be changed:
+
+ lib /def:liblzma.def /out:liblzma.lib /machine:x64
+
+
+1.2.4. DOS
+
+ There is an experimental Makefile in the "dos" directory to build
+ XZ Utils on DOS using DJGPP. Support for long file names (LFN) is
+ needed.
+
+ GNU Autotools based build hasn't been tried on DOS.
+
+
+1.2.5. OS/2
+
+ You will need to pass --disable-assembler to configure when building
+ on OS/2.
+
+
+1.3. Adding support for new platforms
+
+ If you have written patches to make XZ Utils to work on previously
+ unsupported platform, please send the patches to me! I will consider
+ including them to the official version. It's nice to minimize the
+ need of third-party patching.
+
+ One exception: Don't request or send patches to change the whole
+ source package to C89. I find C99 substantially nicer to write and
+ maintain. However, the public library headers must be in C89 to
+ avoid frustrating those who maintain programs, which are strictly
+ in C89 or C++.
+
+
+2. configure options
+--------------------
+
+ In most cases, the defaults are what you want. Most of the options
+ below are useful only when building a size-optimized version of
+ liblzma or command line tools.
+
+ --enable-encoders=LIST
+ --disable-encoders
+ Specify a comma-separated LIST of filter encoders to
+ build. See "./configure --help" for exact list of
+ available filter encoders. The default is to build all
+ supported encoders.
+
+ If LIST is empty or --disable-encoders is used, no filter
+ encoders will be built and also the code shared between
+ encoders will be omitted.
+
+ Disabling encoders will remove some symbols from the
+ liblzma ABI, so this option should be used only when it
+ is known to not cause problems.
+
+ --enable-decoders=LIST
+ --disable-decoders
+ This is like --enable-encoders but for decoders. The
+ default is to build all supported decoders.
+
+ --enable-match-finders=LIST
+ liblzma includes two categories of match finders:
+ hash chains and binary trees. Hash chains (hc3 and hc4)
+ are quite fast but they don't provide the best compression
+ ratio. Binary trees (bt2, bt3 and bt4) give excellent
+ compression ratio, but they are slower and need more
+ memory than hash chains.
+
+ You need to enable at least one match finder to build the
+ LZMA1 or LZMA2 filter encoders. Usually hash chains are
+ used only in the fast mode, while binary trees are used to
+ when the best compression ratio is wanted.
+
+ The default is to build all the match finders if LZMA1
+ or LZMA2 filter encoders are being built.
+
+ --enable-checks=LIST
+ liblzma support multiple integrity checks. CRC32 is
+ mandatory, and cannot be omitted. See "./configure --help"
+ for exact list of available integrity check types.
+
+ liblzma and the command line tools can decompress files
+ which use unsupported integrity check type, but naturally
+ the file integrity cannot be verified in that case.
+
+ Disabling integrity checks may remove some symbols from
+ the liblzma ABI, so this option should be used only when
+ it is known to not cause problems.
+
+ --disable-assembler
+ liblzma includes some assembler optimizations. Currently
+ there is only assembler code for CRC32 and CRC64 for
+ 32-bit x86.
+
+ All the assembler code in liblzma is position-independent
+ code, which is suitable for use in shared libraries and
+ position-independent executables. So far only i386
+ instructions are used, but the code is optimized for i686
+ class CPUs. If you are compiling liblzma exclusively for
+ pre-i686 systems, you may want to disable the assembler
+ code.
+
+ --enable-unaligned-access
+ Allow liblzma to use unaligned memory access for 16-bit
+ and 32-bit loads and stores. This should be enabled only
+ when the hardware supports this, i.e. when unaligned
+ access is fast. Some operating system kernels emulate
+ unaligned access, which is extremely slow. This option
+ shouldn't be used on systems that rely on such emulation.
+
+ Unaligned access is enabled by default on x86, x86-64,
+ and big endian PowerPC.
+
+ --enable-small
+ Reduce the size of liblzma by selecting smaller but
+ semantically equivalent version of some functions, and
+ omit precomputed lookup tables. This option tends to
+ make liblzma slightly slower.
+
+ Note that while omitting the precomputed tables makes
+ liblzma smaller on disk, the tables are still needed at
+ run time, and need to be computed at startup. This also
+ means that the RAM holding the tables won't be shared
+ between applications linked against shared liblzma.
+
+ --disable-threads
+ Disable threading support. This makes some things
+ thread-unsafe, meaning that if multithreaded application
+ calls liblzma functions from more than one thread,
+ something bad may happen.
+
+ Use this option if threading support causes you trouble,
+ or if you know that you will use liblzma only from
+ single-threaded applications and want to avoid dependency
+ on libpthread.
+
+ --enable-dynamic=TYPE
+ Specify how command line tools should be linked against
+ liblzma. Possible TYPES:
+
+ yes All command line tools are linked against
+ shared liblzma (if shared liblzma was built).
+ This is equivalent to --enable-dynamic (i.e.
+ no =TYPE).
+
+ mixed Some tools are linked against static liblzma
+ and some against shared liblzma. This is the
+ default and recommended way.
+
+ no All command line tools are linked against
+ static liblzma (if static liblzma was built).
+ This is equivalent to --disable-dynamic.
+
+ This option is mostly useful for packagers, if distro
+ policy requires linking against shared libaries. See the
+ file PACKAGERS for more information about pros and cons
+ of this option.
+
+ --enable-debug
+ This enables the assert() macro and possibly some other
+ run-time consistency checks. It makes the code slower, so
+ you normally don't want to have this enabled.
+
+ --enable-werror
+ If building with GCC, make all compiler warnings an error,
+ that abort the compilation. This may help catching bugs,
+ and should work on most systems. This has no effect on the
+ resulting binaries.
+
+
+3. xzgrep and other scripts
+---------------------------
+
+3.1. Dependencies
+
+ POSIX shell (sh) and bunch of other standard POSIX tools are required
+ to run the scripts. The configure script tries to find a POSIX
+ compliant sh, but if it fails, you can force the shell by passing
+ gl_cv_posix_shell=/path/to/posix-sh as an argument to the configure
+ script.
+
+ Some of the scripts require also mktemp. The original mktemp can be
+ found from <http://www.mktemp.org/>. On GNU, most will use the mktemp
+ program from GNU coreutils instead of the original implementation.
+ Both mktemp versions are fine for XZ Utils (and practically for
+ everything else too).
+
+
+3.2. PATH
+
+ The scripts assume that the required tools (standard POSIX utilities,
+ mktemp, and xz) are in PATH; the scripts don't set the PATH themselves.
+ Some people like this while some think this is a bug. Those in the
+ latter group can easily patch the scripts before running the configure
+ script by taking advantage of a placeholder line in the scripts.
+
+ For example, to make the scripts prefix /usr/bin:/bin to PATH:
+
+ perl -pi -e 's|^#SET_PATH.*$|PATH=/usr/bin:/bin:\$PATH|' \
+ src/scripts/xz*.in
+
+
+4. Troubleshooting
+------------------
+
+4.1. "No C99 compiler was found."
+
+ You need a C99 compiler to build XZ Utils. If the configure script
+ cannot find a C99 compiler and you think you have such a compiler
+ installed, set the compiler command by passing CC=/path/to/c99 as
+ an argument to the configure script.
+
+ If you get this error even when you think your compiler supports C99,
+ you can override the test by passing ac_cv_prog_cc_c99= as an argument
+ to the configure script. The test for C99 compiler is not perfect (and
+ it is not as easy to make it perfect as it sounds), so sometimes this
+ may be needed. You will get a compile error if your compiler doesn't
+ support enough C99.
+
+
+4.1. "No POSIX conforming shell (sh) was found."
+
+ xzgrep and other scripts need a shell that (roughly) conforms
+ to POSIX. The configure script tries to find such a shell. If
+ it fails, you can force the shell to be used by passing
+ gl_cv_posix_shell=/path/to/posix-sh as an argument to the configure
+ script.
+
+
+4.2. configure works but build fails at crc32_x86.S
+
+ The easy fix is to pass --disable-assembler to the configure script.
+
+ The configure script determines if assembler code can be used by
+ looking at the configure triplet; there is currently no check if
+ the assembler code can actually actually be built. The x86 assembler
+ code should work on x86 GNU/Linux, *BSDs, Solaris, Darwin, MinGW,
+ Cygwin, and DJGPP. On other x86 systems, there may be problems and
+ the assembler code may need to be disabled with the configure option.
+
+ If you get this error when building for x86-64, you have specified or
+ the configure script has misguessed your architecture. Pass the
+ correct configure triplet using the --build=CPU-COMPANY-SYSTEM option
+ (see INSTALL.generic).
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL.generic b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL.generic
new file mode 100644
index 00000000..2550dab7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/INSTALL.generic
@@ -0,0 +1,302 @@
+Installation Instructions
+*************************
+
+Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005,
+2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+
+ This file is free documentation; the Free Software Foundation gives
+unlimited permission to copy, distribute and modify it.
+
+Basic Installation
+==================
+
+ Briefly, the shell commands `./configure; make; make install' should
+configure, build, and install this package. The following
+more-detailed instructions are generic; see the `README' file for
+instructions specific to this package.
+
+ The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation. It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions. Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, and a
+file `config.log' containing compiler output (useful mainly for
+debugging `configure').
+
+ It can also use an optional file (typically called `config.cache'
+and enabled with `--cache-file=config.cache' or simply `-C') that saves
+the results of its tests to speed up reconfiguring. Caching is
+disabled by default to prevent problems with accidental use of stale
+cache files.
+
+ If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release. If you are using the cache, and at
+some point `config.cache' contains results you don't want to keep, you
+may remove or edit it.
+
+ The file `configure.ac' (or `configure.in') is used to create
+`configure' by a program called `autoconf'. You need `configure.ac' if
+you want to change it or regenerate `configure' using a newer version
+of `autoconf'.
+
+The simplest way to compile this package is:
+
+ 1. `cd' to the directory containing the package's source code and type
+ `./configure' to configure the package for your system.
+
+ Running `configure' might take a while. While running, it prints
+ some messages telling which features it is checking for.
+
+ 2. Type `make' to compile the package.
+
+ 3. Optionally, type `make check' to run any self-tests that come with
+ the package.
+
+ 4. Type `make install' to install the programs and any data files and
+ documentation.
+
+ 5. You can remove the program binaries and object files from the
+ source code directory by typing `make clean'. To also remove the
+ files that `configure' created (so you can compile the package for
+ a different kind of computer), type `make distclean'. There is
+ also a `make maintainer-clean' target, but that is intended mainly
+ for the package's developers. If you use it, you may have to get
+ all sorts of other programs in order to regenerate files that came
+ with the distribution.
+
+ 6. Often, you can also type `make uninstall' to remove the installed
+ files again.
+
+Compilers and Options
+=====================
+
+ Some systems require unusual options for compilation or linking that
+the `configure' script does not know about. Run `./configure --help'
+for details on some of the pertinent environment variables.
+
+ You can give `configure' initial values for configuration parameters
+by setting variables in the command line or in the environment. Here
+is an example:
+
+ ./configure CC=c99 CFLAGS=-g LIBS=-lposix
+
+ *Note Defining Variables::, for more details.
+
+Compiling For Multiple Architectures
+====================================
+
+ You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory. To do this, you can use GNU `make'. `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script. `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'.
+
+ With a non-GNU `make', it is safer to compile the package for one
+architecture at a time in the source code directory. After you have
+installed the package for one architecture, use `make distclean' before
+reconfiguring for another architecture.
+
+ On MacOS X 10.5 and later systems, you can create libraries and
+executables that work on multiple system types--known as "fat" or
+"universal" binaries--by specifying multiple `-arch' options to the
+compiler but only a single `-arch' option to the preprocessor. Like
+this:
+
+ ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
+ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
+ CPP="gcc -E" CXXCPP="g++ -E"
+
+ This is not guaranteed to produce working output in all cases, you
+may have to build one architecture at a time and combine the results
+using the `lipo' tool if you have problems.
+
+Installation Names
+==================
+
+ By default, `make install' installs the package's commands under
+`/usr/local/bin', include files under `/usr/local/include', etc. You
+can specify an installation prefix other than `/usr/local' by giving
+`configure' the option `--prefix=PREFIX'.
+
+ You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files. If you
+pass the option `--exec-prefix=PREFIX' to `configure', the package uses
+PREFIX as the prefix for installing programs and libraries.
+Documentation and other data files still use the regular prefix.
+
+ In addition, if you use an unusual directory layout you can give
+options like `--bindir=DIR' to specify different values for particular
+kinds of files. Run `configure --help' for a list of the directories
+you can set and what kinds of files go in them.
+
+ If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+Optional Features
+=================
+
+ Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System). The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+ For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+Particular systems
+==================
+
+ On HP-UX, the default C compiler is not ANSI C compatible. If GNU
+CC is not installed, it is recommended to use the following options in
+order to use an ANSI C compiler:
+
+ ./configure CC="cc -Ae -D_XOPEN_SOURCE=500"
+
+and if that doesn't work, install pre-built binaries of GCC for HP-UX.
+
+ On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot
+parse its `<wchar.h>' header file. The option `-nodtk' can be used as
+a workaround. If GNU CC is not installed, it is therefore recommended
+to try
+
+ ./configure CC="cc"
+
+and if that doesn't work, try
+
+ ./configure CC="cc -nodtk"
+
+ On Solaris, don't put `/usr/ucb' early in your `PATH'. This
+directory contains several dysfunctional programs; working variants of
+these programs are available in `/usr/bin'. So, if you need `/usr/ucb'
+in your `PATH', put it _after_ `/usr/bin'.
+
+ On Haiku, software installed for all users goes in `/boot/common',
+not `/usr/local'. It is recommended to use the following options:
+
+ ./configure --prefix=/boot/common
+
+Specifying the System Type
+==========================
+
+ There may be some features `configure' cannot figure out
+automatically, but needs to determine by the type of machine the package
+will run on. Usually, assuming the package is built to be run on the
+_same_ architectures, `configure' can figure that out, but if it prints
+a message saying it cannot guess the machine type, give it the
+`--build=TYPE' option. TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name which has the form:
+
+ CPU-COMPANY-SYSTEM
+
+where SYSTEM can have one of these forms:
+
+ OS
+ KERNEL-OS
+
+ See the file `config.sub' for the possible values of each field. If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the machine type.
+
+ If you are _building_ compiler tools for cross-compiling, you should
+use the option `--target=TYPE' to select the type of system they will
+produce code for.
+
+ If you want to _use_ a cross compiler, that generates code for a
+platform different from the build platform, you should specify the
+"host" platform (i.e., that on which the generated programs will
+eventually be run) with `--host=TYPE'.
+
+Sharing Defaults
+================
+
+ If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists. Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Defining Variables
+==================
+
+ Variables not defined in a site shell script can be set in the
+environment passed to `configure'. However, some packages may run
+configure again during the build, and the customized values of these
+variables may be lost. In order to avoid this problem, you should set
+them in the `configure' command line, using `VAR=value'. For example:
+
+ ./configure CC=/usr/local2/bin/gcc
+
+causes the specified `gcc' to be used as the C compiler (unless it is
+overridden in the site shell script).
+
+Unfortunately, this technique does not work for `CONFIG_SHELL' due to
+an Autoconf bug. Until the bug is fixed you can use this workaround:
+
+ CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
+
+`configure' Invocation
+======================
+
+ `configure' recognizes the following options to control how it
+operates.
+
+`--help'
+`-h'
+ Print a summary of all of the options to `configure', and exit.
+
+`--help=short'
+`--help=recursive'
+ Print a summary of the options unique to this package's
+ `configure', and exit. The `short' variant lists options used
+ only in the top level, while the `recursive' variant lists options
+ also present in any nested packages.
+
+`--version'
+`-V'
+ Print the version of Autoconf used to generate the `configure'
+ script, and exit.
+
+`--cache-file=FILE'
+ Enable the cache: use and save the results of the tests in FILE,
+ traditionally `config.cache'. FILE defaults to `/dev/null' to
+ disable caching.
+
+`--config-cache'
+`-C'
+ Alias for `--cache-file=config.cache'.
+
+`--quiet'
+`--silent'
+`-q'
+ Do not print messages saying which checks are being made. To
+ suppress all normal output, redirect it to `/dev/null' (any error
+ messages will still be shown).
+
+`--srcdir=DIR'
+ Look for the package's source code in directory DIR. Usually
+ `configure' can determine that directory automatically.
+
+`--prefix=DIR'
+ Use DIR as the installation prefix. *Note Installation Names::
+ for more details, including other options available for fine-tuning
+ the installation locations.
+
+`--no-create'
+`-n'
+ Run the configure checks, but stop before creating any output
+ files.
+
+`configure' also accepts some other, not widely useful, options. Run
+`configure --help' for more details.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.am
new file mode 100644
index 00000000..79cf1954
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.am
@@ -0,0 +1,80 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+DIST_SUBDIRS = lib src po tests debug
+SUBDIRS =
+
+if COND_GNULIB
+SUBDIRS += lib
+endif
+
+SUBDIRS += src po tests
+
+dist_doc_DATA = \
+ AUTHORS \
+ COPYING \
+ COPYING.GPLv2 \
+ NEWS \
+ README \
+ THANKS \
+ TODO \
+ doc/faq.txt \
+ doc/history.txt \
+ doc/xz-file-format.txt \
+ doc/lzma-file-format.txt
+
+EXTRA_DIST = \
+ extra \
+ dos \
+ windows \
+ autogen.sh \
+ version.sh \
+ Doxyfile.in \
+ COPYING.GPLv2 \
+ COPYING.GPLv3 \
+ COPYING.LGPLv2.1 \
+ INSTALL.generic \
+ PACKAGERS
+
+ACLOCAL_AMFLAGS = -I m4
+
+# List of man pages to conver to PDF and plain text in the dist-hook target.
+manfiles = \
+ src/xz/xz.1 \
+ src/xzdec/xzdec.1 \
+ src/scripts/xzdiff.1 \
+ src/scripts/xzgrep.1 \
+ src/scripts/xzless.1 \
+ src/scripts/xzmore.1
+
+# Create ChangeLog from output of "git log --date=iso --stat".
+# Convert the man pages to PDF and plain text (ASCII only) formats.
+dist-hook:
+ if test -d "$(srcdir)/.git" && type git > /dev/null 2>&1; then \
+ ( cd "$(srcdir)" && git log --date=iso --stat ) \
+ > "$(distdir)/ChangeLog"; \
+ fi
+ if type groff > /dev/null 2>&1 && type ps2pdf > /dev/null 2>&1; then \
+ dest="$(distdir)/doc/man" && \
+ $(MKDIR_P) "$$dest/pdf-a4" "$$dest/pdf-letter" "$$dest/txt" && \
+ for FILE in $(manfiles); do \
+ BASE=`basename $$FILE .1` && \
+ groff -man -t -Tps -P-pa4 < "$(srcdir)/$$FILE" \
+ | ps2pdf - - \
+ > "$$dest/pdf-a4/$$BASE-a4.pdf" && \
+ groff -man -t -Tps -P-pletter < "$(srcdir)/$$FILE" \
+ | ps2pdf - - \
+ > "$$dest/pdf-letter/$$BASE-letter.pdf" && \
+ groff -man -t -Tascii < "$(srcdir)/$$FILE" \
+ | col -bx > "$$dest/txt/$$BASE.txt"; \
+ done; \
+ fi
+
+# This works with GNU tar and gives cleaner package than normal 'make dist'.
+mydist:
+ TAR_OPTIONS='--owner=0 --group=0 --numeric-owner --mode=u+rw,go+r-w' \
+ $(MAKE) dist-gzip
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.in
new file mode 100644
index 00000000..eef55e61
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/Makefile.in
@@ -0,0 +1,887 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+@COND_GNULIB_TRUE@am__append_1 = lib
+subdir = .
+DIST_COMMON = README $(am__configure_deps) $(dist_doc_DATA) \
+ $(srcdir)/Doxyfile.in $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in $(srcdir)/config.h.in \
+ $(top_srcdir)/configure ABOUT-NLS AUTHORS COPYING ChangeLog \
+ INSTALL NEWS THANKS TODO build-aux/compile \
+ build-aux/config.guess build-aux/config.rpath \
+ build-aux/config.sub build-aux/depcomp build-aux/install-sh \
+ build-aux/ltmain.sh build-aux/missing
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = config.h
+CONFIG_CLEAN_FILES = Doxyfile
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+ html-recursive info-recursive install-data-recursive \
+ install-dvi-recursive install-exec-recursive \
+ install-html-recursive install-info-recursive \
+ install-pdf-recursive install-ps-recursive install-recursive \
+ installcheck-recursive installdirs-recursive pdf-recursive \
+ ps-recursive uninstall-recursive
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(docdir)"
+DATA = $(dist_doc_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
+ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
+ distdir dist dist-all distcheck
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ { test ! -d "$(distdir)" \
+ || { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -fr "$(distdir)"; }; }
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+distuninstallcheck_listfiles = find . -type f -print
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+DIST_SUBDIRS = lib src po tests debug
+SUBDIRS = $(am__append_1) src po tests
+dist_doc_DATA = \
+ AUTHORS \
+ COPYING \
+ COPYING.GPLv2 \
+ NEWS \
+ README \
+ THANKS \
+ TODO \
+ doc/faq.txt \
+ doc/history.txt \
+ doc/xz-file-format.txt \
+ doc/lzma-file-format.txt
+
+EXTRA_DIST = \
+ extra \
+ dos \
+ windows \
+ autogen.sh \
+ version.sh \
+ Doxyfile.in \
+ COPYING.GPLv2 \
+ COPYING.GPLv3 \
+ COPYING.LGPLv2.1 \
+ INSTALL.generic \
+ PACKAGERS
+
+ACLOCAL_AMFLAGS = -I m4
+
+# List of man pages to conver to PDF and plain text in the dist-hook target.
+manfiles = \
+ src/xz/xz.1 \
+ src/xzdec/xzdec.1 \
+ src/scripts/xzdiff.1 \
+ src/scripts/xzgrep.1 \
+ src/scripts/xzless.1 \
+ src/scripts/xzmore.1
+
+all: config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+am--refresh:
+ @:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
+ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ $(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+config.h: stamp-h1
+ @if test ! -f $@; then \
+ rm -f stamp-h1; \
+ $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \
+ else :; fi
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: $(am__configure_deps)
+ ($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+Doxyfile: $(top_builddir)/config.status $(srcdir)/Doxyfile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+ -rm -f libtool config.lt
+install-dist_docDATA: $(dist_doc_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(docdir)" || $(MKDIR_P) "$(DESTDIR)$(docdir)"
+ @list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(docdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(docdir)" || exit $$?; \
+ done
+
+uninstall-dist_docDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(docdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(docdir)" && rm -f $$files
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ rev=''; for subdir in $$list; do \
+ if test "$$subdir" = "."; then :; else \
+ rev="$$subdir $$rev"; \
+ fi; \
+ done; \
+ rev="$$rev ."; \
+ target=`echo $@ | sed s/-recursive//`; \
+ for subdir in $$rev; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done && test -z "$$fail"
+tags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+ done
+ctags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+ done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ $(am__remove_distdir)
+ test -d "$(distdir)" || mkdir "$(distdir)"
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$(top_distdir)" distdir="$(distdir)" \
+ dist-hook
+ -test -n "$(am__skip_mode_fix)" \
+ || find "$(distdir)" -type d ! -perm -777 -exec chmod a+rwx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r "$(distdir)"
+ @if test -z "$(am__skip_length_check)" && find "$(distdir)" -type f -print | \
+ grep '^...................................................................................................' 1>&2; then \
+ echo 'error: the above filenames are too long' 1>&2; \
+ exit 1; \
+ else :; fi
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
+ $(am__remove_distdir)
+
+dist-lzma: distdir
+ tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
+ $(am__remove_distdir)
+
+dist-xz: distdir
+ tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz
+ $(am__remove_distdir)
+
+dist-tarZ: distdir
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__remove_distdir)
+
+dist-shar: distdir
+ shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+ $(am__remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__remove_distdir)
+
+dist dist-all: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lzma*) \
+ unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\
+ *.tar.xz*) \
+ xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ esac
+ chmod -R a-w $(distdir); chmod a+w $(distdir)
+ mkdir $(distdir)/_build
+ mkdir $(distdir)/_inst
+ chmod a-w $(distdir)
+ test -d $(distdir)/_build || exit 0; \
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && am__cwd=`pwd` \
+ && $(am__cd) $(distdir)/_build \
+ && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+ && cd "$$am__cwd" \
+ || exit 1
+ $(am__remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+ @$(am__cd) '$(distuninstallcheck_dir)' \
+ && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(DATA) config.h
+installdirs: installdirs-recursive
+installdirs-am:
+ for dir in "$(DESTDIR)$(docdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-hdr \
+ distclean-libtool distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am: install-dist_docDATA
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-dist_docDATA
+
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \
+ ctags-recursive install-am install-strip tags-recursive
+
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+ all all-am am--refresh check check-am clean clean-generic \
+ clean-libtool ctags ctags-recursive dist dist-all dist-bzip2 \
+ dist-gzip dist-hook dist-lzma dist-shar dist-tarZ dist-xz \
+ dist-zip distcheck distclean distclean-generic distclean-hdr \
+ distclean-libtool distclean-tags distcleancheck distdir \
+ distuninstallcheck dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am \
+ install-dist_docDATA install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \
+ uninstall uninstall-am uninstall-dist_docDATA
+
+
+# Create ChangeLog from output of "git log --date=iso --stat".
+# Convert the man pages to PDF and plain text (ASCII only) formats.
+dist-hook:
+ if test -d "$(srcdir)/.git" && type git > /dev/null 2>&1; then \
+ ( cd "$(srcdir)" && git log --date=iso --stat ) \
+ > "$(distdir)/ChangeLog"; \
+ fi
+ if type groff > /dev/null 2>&1 && type ps2pdf > /dev/null 2>&1; then \
+ dest="$(distdir)/doc/man" && \
+ $(MKDIR_P) "$$dest/pdf-a4" "$$dest/pdf-letter" "$$dest/txt" && \
+ for FILE in $(manfiles); do \
+ BASE=`basename $$FILE .1` && \
+ groff -man -t -Tps -P-pa4 < "$(srcdir)/$$FILE" \
+ | ps2pdf - - \
+ > "$$dest/pdf-a4/$$BASE-a4.pdf" && \
+ groff -man -t -Tps -P-pletter < "$(srcdir)/$$FILE" \
+ | ps2pdf - - \
+ > "$$dest/pdf-letter/$$BASE-letter.pdf" && \
+ groff -man -t -Tascii < "$(srcdir)/$$FILE" \
+ | col -bx > "$$dest/txt/$$BASE.txt"; \
+ done; \
+ fi
+
+# This works with GNU tar and gives cleaner package than normal 'make dist'.
+mydist:
+ TAR_OPTIONS='--owner=0 --group=0 --numeric-owner --mode=u+rw,go+r-w' \
+ $(MAKE) dist-gzip
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/NEWS b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/NEWS
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/NEWS
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/PACKAGERS b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/PACKAGERS
new file mode 100644
index 00000000..c5d375ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/PACKAGERS
@@ -0,0 +1,279 @@
+
+Information to packagers of XZ Utils
+====================================
+
+ 0. Preface
+ 1. Package naming
+ 2. Package description
+ 3. License
+ 4. configure options
+ 4.1. Static vs. dynamic linking of liblzma
+ 4.2. Optimizing xzdec and lzmadec
+ 5. Additional documentation
+ 6. Extra files
+ 7. Installing XZ Utils and LZMA Utils in parallel
+ 8. Example
+
+
+0. Preface
+----------
+
+ This document is meant for people who create and maintain XZ Utils
+ packages for operating system distributions. The focus is on GNU/Linux
+ systems, but most things apply to other systems too.
+
+ While the standard "configure && make DESTDIR=$PKG install" should
+ give a pretty good package, there are some details which packagers
+ may want to tweak.
+
+ Packagers should also read the INSTALL file.
+
+
+1. Package naming
+-----------------
+
+ The preferred name for the XZ Utils package is "xz", because that's
+ the name of the upstream tarball. Naturally you may have good reasons
+ to use some other name; I won't get angry about it. ;-) It's just nice
+ to be able to point people to the correct package name without asking
+ what distro they have.
+
+ If your distro policy is to split things into small pieces, here is
+ one suggestion:
+
+ xz xz, xzdec, scripts (xzdiff, xzgrep, etc.), docs
+ xz-lzma lzma, unlzma, lzcat, lzgrep etc. symlinks and
+ lzmadec binary for compatibility with LZMA Utils
+ liblzma liblzma.so.*
+ liblzma-devel liblzma.so, liblzma.a, API headers
+
+
+2. Package description
+----------------------
+
+ Here is a suggestion which you may use as the package description.
+ If you can use only one-line description, pick only the first line.
+ Naturally, feel free to use some other description if you find it
+ better, and maybe send it to me too.
+
+ Library and command line tools for XZ and LZMA compressed files
+
+ XZ Utils provide a general purpose data compression library
+ and command line tools. The native file format is the .xz
+ format, but also the legacy .lzma format is supported. The .xz
+ format supports multiple compression algorithms, of which LZMA2
+ is currently the primary algorithm. With typical files, XZ Utils
+ create about 30 % smaller files than gzip.
+
+ If you are splitting XZ Utils into multiple packages, here are some
+ suggestions for package descriptions:
+
+ xz:
+
+ Command line tools for XZ and LZMA compressed files
+
+ This package includes the xz compression tool and other command
+ line tools from XZ Utils. xz has command line syntax similar to
+ that of gzip. The native file format is the .xz format, but also
+ the legacy .lzma format is supported. The .xz format supports
+ multiple compression algorithms, of which LZMA2 is currently the
+ primary algorithm. With typical files, XZ Utils create about 30 %
+ smaller files than gzip.
+
+ Note that this package doesn't include the files needed for
+ LZMA Utils 4.32.x compatibility. Install also the xz-lzma
+ package to make XZ Utils emulate LZMA Utils 4.32.x.
+
+ xz-lzma:
+
+ LZMA Utils emulation with XZ Utils
+
+ This package includes executables and symlinks to make
+ XZ Utils emulate lzma, unlzma, lzcat, and other command
+ line tools found from the legacy LZMA Utils 4.32.x package.
+
+ liblzma:
+
+ Library for XZ and LZMA compressed files
+
+ liblzma is a general purpose data compression library with
+ an API similar to that of zlib. liblzma supports multiple
+ algorithms, of which LZMA2 is currently the primary algorithm.
+ The native file format is .xz, but also the legacy .lzma
+ format and raw streams (no headers at all) are supported.
+
+ This package includes the shared library.
+
+ liblzma-devel:
+
+ Library for XZ and LZMA compressed files
+
+ This package includes the API headers, static library, and
+ other development files related to liblzma.
+
+
+3. License
+----------
+
+ If the package manager supports a license field, you probably should
+ put GPLv2+ there (GNU GPL v2 or later). The interesting parts of
+ XZ Utils are in the public domain, but some less important files
+ ending up into the binary package are under GPLv2+. So it is simplest
+ to just say GPLv2+ if you cannot specify "public domain and GPLv2+".
+
+ If you split XZ Utils into multiple packages as described earlier
+ in this file, liblzma and liblzma-dev packages will contain only
+ public domain code (from XZ Utils at least; compiler or linker may
+ add some third-party code, which may be copyrighted).
+
+
+4. configure options
+--------------------
+
+ Unless you are building a package for a distribution that is meant
+ only for embedded systems, don't use the following configure options:
+
+ --enable-debug
+ --enable-encoders (*)
+ --enable-decoders
+ --enable-match-finders
+ --enable-checks
+ --enable-small (*)
+ --disable-threads (*)
+
+ (*) These are OK when building xzdec and lzmadec as explained later.
+
+ You may use --enable-werror but be careful with it since it may break
+ the build due to some useless warning when the build environment
+ changes (like CPU architecture or compiler version).
+
+
+4.1. Static vs. dynamic linking of liblzma
+
+ The default is to link the most important command line tools against
+ static liblzma, and the less important tools against shared liblzma.
+ This can be changed by passing --enable-dynamic to configure, or by
+ not building static libraries at all by passing --disable-static
+ to configure. It is mildly recommended that you use the default, but
+ the configure options make it easy to do otherwise if the distro policy
+ so requires.
+
+ On 32-bit x86, linking against static liblzma can give a minor
+ speed improvement. Static libraries on x86 are usually compiled as
+ position-dependent code (non-PIC) and shared libraries are built as
+ position-independent code (PIC). PIC wastes one register, which can
+ make the code slightly slower compared to a non-PIC version. (Note
+ that this doesn't apply to x86-64.)
+
+ Linking against static liblzma avoids a dependency on liblzma shared
+ library, and makes it slightly easier to copy the command line tools
+ between systems (e.g. quick 'n' dirty emergency recovery of some
+ files). It also allows putting the command line tools to /bin while
+ leaving liblzma to /usr/lib (assuming that your distribution uses
+ such a file system hierarchy), if no other file in /bin would require
+ liblzma.
+
+ If you don't want to distribute static libraries but you still
+ want to link the command line tools against static liblzma, it is
+ probably easiest to build both static and shared liblzma, but after
+ "make DESTDIR=$PKG install" remove liblzma.a and modify liblzma.la
+ to not contain a reference to liblzma.a.
+
+
+4.2. Optimizing xzdec and lzmadec
+
+ xzdec and lzmadec are intended to be relatively small instead of
+ optimizing for the best speed. Thus, it is a good idea to build
+ xzdec and lzmadec separately:
+
+ - Only decoder code is needed, so you can speed up the build
+ slightly by passing --disable-encoders to configure. This
+ shouldn't affect the final size of the executables though,
+ because the linker is able to omit the encoder code anyway.
+
+ - xzdec and lzmadec will never use multithreading capabilities of
+ liblzma. You can avoid dependency on libpthread by passing
+ --disable-threads to configure.
+
+ - There are and will be no translated messages for xzdec and
+ lzmadec, so it is fine to pass also --disable-nls to configure.
+
+ - To select somewhat size-optimized variant of some things in
+ liblzma, pass --enable-small to configure.
+
+ - Tell the compiler to optimize for size instead of speed.
+ E.g. with GCC, put -Os into CFLAGS.
+
+
+5. Additional documentation
+---------------------------
+
+ "make install" copies some additional documentation to $docdir
+ (--docdir in configure). These a copy of the GNU GPL v2, which can
+ be replaced with a symlink if your distro ships with shared copies
+ of the common license texts.
+
+
+6. Extra files
+--------------
+
+ The "extra" directory contains some small extra tools or other files.
+ The exact set of extra files can vary between XZ Utils releases. The
+ extra files have only limited use or they are too dangerous to be
+ put directly to $bindir (7z2lzma.sh is a good example, since it can
+ silently create corrupt output if certain conditions are not met).
+
+ If you feel like it, you may copy the extra directory under the doc
+ directory (e.g. /usr/share/doc/xz/extra). Maybe some people will find
+ them useful. However, most people needing these tools probably are
+ able to find them from the source package too.
+
+ The "debug" directory contains some tools that are useful only when
+ hacking on XZ Utils. Don't package these tools.
+
+
+7. Installing XZ Utils and LZMA Utils in parallel
+-------------------------------------------------
+
+ XZ Utils and LZMA Utils 4.32.x can be installed in parallel by
+ omitting the compatibility symlinks (lzma, unlzma, lzcat, lzgrep etc.)
+ from the XZ Utils package. It's probably a good idea to still package
+ the symlinks into a separate package so that users may choose if they
+ want to use XZ Utils or LZMA Utils for handling .lzma files.
+
+
+8. Example
+----------
+
+ Here is an example for i686 GNU/Linux that
+ - links xz against static liblzma;
+ - includes only shared liblzma in the final package;
+ - links xzdec and lzmadec against static liblzma while
+ avoiding libpthread dependency.
+
+ PKG=/tmp/xz-pkg
+ tar xf xz-x.y.z.tar.gz
+ cd xz-x.y.z
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ CFLAGS='-march=i686 -O2'
+ make
+ make DESTDIR=$PKG install-strip
+ rm -f $PKG/usr/lib/lib*.a
+ sed -i "s/^old_library=.*$/old_library=''/" $PKG/usr/lib/lib*.la
+ make clean
+ ./configure \
+ --prefix=/usr \
+ --sysconfdir=/etc \
+ --disable-shared \
+ --disable-nls \
+ --disable-encoders \
+ --enable-small \
+ --disable-threads \
+ CFLAGS='-march=i686 -Os'
+ make -C src/liblzma
+ make -C src/xzdec
+ make -C src/xzdec DESTDIR=$PKG install-strip
+ cp -a extra $PKG/usr/share/doc/xz
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/README b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/README
new file mode 100644
index 00000000..3544938e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/README
@@ -0,0 +1,217 @@
+
+XZ Utils
+========
+
+ 0. Overview
+ 1. Documentation
+ 1.1. Overall documentation
+ 1.2. Documentation for command line tools
+ 1.3. Documentation for liblzma
+ 2. Version numbering
+ 3. Reporting bugs
+ 4. Other implementations of the .xz format
+ 5. Contact information
+
+
+0. Overview
+-----------
+
+ XZ Utils provide a general purporse data compression library and
+ command line tools. The native file format is the .xz format, but
+ also the legacy .lzma format is supported. The .xz format supports
+ multiple compression algorithms, which are called "filters" in
+ context of XZ Utils. The primary filter is currently LZMA2. With
+ typical files, XZ Utils create about 30 % smaller files than gzip.
+
+ To ease adapting support for the .xz format into existing applications
+ and scripts, the API of liblzma is somewhat similar to the API of the
+ popular zlib library. For the same reason, the command line tool xz
+ has similar command line syntax than that of gzip.
+
+ When aiming for the highest compression ratio, LZMA2 encoder uses
+ a lot of CPU time and may use, depending on the settings, even
+ hundreds of megabytes of RAM. However, in fast modes, LZMA2 encoder
+ competes with bzip2 in compression speed, RAM usage, and compression
+ ratio.
+
+ LZMA2 is reasonably fast to decompress. It is a little slower than
+ gzip, but a lot faster than bzip2. Being fast to decompress means
+ that the .xz format is especially nice when the same file will be
+ decompressed very many times (usually on different computers), which
+ is the case e.g. when distributing software packages. In such
+ situations, it's not too bad if the compression takes some time,
+ since that needs to be done only once to benefit many people.
+
+ With some file types, combining (or "chaining") LZMA2 with an
+ additional filter can improve compression ratio. A filter chain may
+ contain up to four filters, although usually only one two is used.
+ For example, putting a BCJ (Branch/Call/Jump) filter before LZMA2
+ in the filter chain can improve compression ratio of executable files.
+
+ Since the .xz format allows adding new filter IDs, it is possible that
+ some day there will be a filter that is, for example, much faster to
+ compress than LZMA2 (but probably with worse compression ratio).
+ Similarly, it is possible that some day there is a filter that will
+ compress better than LZMA2.
+
+ XZ Utils doesn't support multithreaded compression or decompression
+ yet. It has been planned though and taken into account when designing
+ the .xz file format.
+
+
+1. Documentation
+----------------
+
+1.1. Overall documentation
+
+ README This file
+
+ INSTALL.generic Generic install instructions for those not familiar
+ with packages using GNU Autotools
+ INSTALL Installation instructions specific to XZ Utils
+ PACKAGERS Information to packagers of XZ Utils
+
+ COPYING XZ Utils copyright and license information
+ COPYING.GPLv2 GNU General Public License version 2
+ COPYING.GPLv3 GNU General Public License version 3
+ COPYING.LGPLv2.1 GNU Lesser General Public License version 2.1
+
+ AUTHORS The main authors of XZ Utils
+ THANKS Incomplete list of people who have helped making
+ this software
+ NEWS User-visible changes between XZ Utils releases
+ ChangeLog Detailed list of changes (commit log)
+
+ Note that only some of the above files are included in binary
+ packages.
+
+
+1.2. Documentation for command line tools
+
+ The command line tools are documented as man pages. In source code
+ releases (and possibly also in some binary packages), the man pages
+ are also provided in plain text (ASCII only) and PDF formats in the
+ directory "doc/man" to make the man pages more accessible to those
+ whose operating system doesn't provide an easy way to view man pages.
+
+
+1.3. Documentation for liblzma
+
+ The liblzma API headers include short docs about each function
+ and data type as Doxygen tags. These docs should be quite OK as
+ a quick reference.
+
+ I have planned to write a bunch of very well documented example
+ programs, which (due to comments) should work as a tutorial to
+ various features of liblzma. No such example programs have been
+ written yet.
+
+ For now, if you have never used liblzma, libbzip2, or zlib, I
+ recommend learning *basics* of zlib API. Once you know that, it
+ should be easier to learn liblzma.
+
+ http://zlib.net/manual.html
+ http://zlib.net/zlib_how.html
+
+
+2. Version numbering
+--------------------
+
+ The version number format of XZ Utils is X.Y.ZS:
+
+ - X is the major version. When this is incremented, the library
+ API and ABI break.
+
+ - Y is the minor version. It is incremented when new features are
+ added without breaking existing API or ABI. Even Y indicates
+ stable release and odd Y indicates unstable (alpha or beta
+ version).
+
+ - Z is the revision. This has different meaning for stable and
+ unstable releases:
+ * Stable: Z is incremented when bugs get fixed without adding
+ any new features.
+ * Unstable: Z is just a counter. API or ABI of features added
+ in earlier unstable releases having the same X.Y may break.
+
+ - S indicates stability of the release. It is missing from the
+ stable releases where Y is an even number. When Y is odd, S
+ is either "alpha" or "beta" to make it very clear that such
+ versions are not stable releases. The same X.Y.Z combination is
+ not used for more than one stability level i.e. after X.Y.Zalpha,
+ the next version can be X.Y.(Z+1)beta but not X.Y.Zbeta.
+
+
+3. Reporting bugs
+-----------------
+
+ Naturally it is easiest for me if you already know what causes the
+ unexpected behavior. Even better if you have a patch to propose.
+ However, quite often the reason for unexpected behavior is unknown,
+ so here are a few things to do before sending a bug report:
+
+ 1. Try to create a small example how to reprocude the issue.
+
+ 2. Compile XZ Utils with debugging code using configure switches
+ --enable-debug and, if possible, --disable-shared. If you are
+ using GCC, use CFLAGS='-O0 -ggdb3'. Don't strip the resulting
+ binaries.
+
+ 3. Turn on core dumps. The exact command depends on your shell;
+ for example in GNU bash it is done with "ulimit -c unlimited",
+ and in tcsh with "limit coredumpsize unlimited".
+
+ 4. Try to reproduce the suspected bug. If you get "assertion failed"
+ message, be sure to include the complete message in your bug
+ report. If the application leaves a coredump, get a backtrace
+ using gdb:
+ $ gdb /path/to/app-binary # Load the app to the debugger.
+ (gdb) core core # Open the coredump.
+ (gdb) bt # Print the backtrace. Copy & paste to bug report.
+ (gdb) quit # Quit gdb.
+
+ Report your bug via email or IRC (see Contact information below).
+ Don't send core dump files or any executables. If you have a small
+ example file(s) (total size less than 256 KiB), please include
+ it/them as an attachment. If you have bigger test files, put them
+ online somewhere and include an URL to the file(s) in the bug report.
+
+ Always include the exact version number of XZ Utils in the bug report.
+ If you are using a snapshot from the git repository, use "git describe"
+ to get the exact snapshot version. If you are using XZ Utils shipped
+ in an operating system distribution, mention the distribution name,
+ distribution version, and exact xz package version; if you cannot
+ repeat the bug with the code compiled from unpatched source code,
+ you probably need to report a bug to your distribution's bug tracking
+ system.
+
+
+4. Other implementations of the .xz format
+------------------------------------------
+
+ 7-Zip and the p7zip port of 7-Zip support the .xz format starting
+ from the version 9.00alpha.
+
+ http://7-zip.org/
+ http://p7zip.sourceforge.net/
+
+ XZ Embedded is a limited implementation written for use in the Linux
+ kernel, but it is also suitable for other embedded use.
+
+ http://tukaani.org/xz/embedded.html
+
+
+5. Contact information
+----------------------
+
+ If you have questions, bug reports, patches etc. related to XZ Utils,
+ contact Lasse Collin <lasse.collin@tukaani.org> (in Finnish or English).
+ tukaani.org uses greylisting to reduce spam, thus when you send your
+ first email, it may get delayed by a few hours. In addition to that,
+ I'm sometimes slow at replying. If you haven't got a reply within two
+ weeks, assume that your email has got lost and resend it or use IRC.
+
+ You can find me also from #tukaani on Freenode; my nick is Larhzu.
+ The channel tends to be pretty quiet, so just ask your question and
+ someone may wake up.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/THANKS b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/THANKS
new file mode 100644
index 00000000..426a2ed0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/THANKS
@@ -0,0 +1,47 @@
+
+Thanks
+======
+
+Some people have helped more, some less, but nevertheless everyone's help
+has been important. :-) In alphabetical order:
+ - Mark Adler
+ - H. Peter Anvin
+ - Nelson H. F. Beebe
+ - Anders F. Björklund
+ - Emmanuel Blot
+ - David Burklund
+ - Andrew Dudman
+ - İsmail Dönmez
+ - Mike Frysinger
+ - Per Øyvind Karlsen
+ - Ville Koskinen
+ - Stephan Kulow
+ - Peter Lawler
+ - Hin-Tak Leung
+ - Andraž 'ruskie' Levstik
+ - Jim Meyering
+ - Hongbo Ni
+ - Jonathan Nieder
+ - Igor Pavlov
+ - Mikko Pouru
+ - Bernhard Reutner-Fischer
+ - Christian von Roques
+ - Alexandre Sauvé
+ - Andreas Schwab
+ - Dan Shechter
+ - Jonathan Stott
+ - Paul Townsend
+ - Mohammed Adnène Trojette
+ - Patrick J. Volkerding
+ - Bert Wesarg
+ - Ralf Wildenhues
+ - Charles Wilson
+ - Lars Wirzenius
+ - Pilorz Wojciech
+ - Andreas Zieringer
+
+Also thanks to all the people who have participated in the Tukaani project.
+
+I have probably forgot to add some names to the above list. Sorry about
+that and thanks for your help.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/TODO b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/TODO
new file mode 100644
index 00000000..6912943b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/TODO
@@ -0,0 +1,56 @@
+
+XZ Utils To-Do List
+===================
+
+Known bugs
+----------
+
+ The test suite is too incomplete.
+
+ If the memor usage limit is less than about 13 MiB, xz is unable to
+ automatically scale down the compression settings enough even though
+ it would be possible by switching from BT2/BT3/BT4 match finder to
+ HC3/HC4.
+
+ The code to detect number of CPU cores doesn't count hyperthreading
+ as multiple cores. In context of xz, it probably should.
+ Hyperthreading is good at least with p7zip.
+
+ XZ Utils compress some files significantly worse than LZMA Utils.
+ This is due to faster compression presets used by XZ Utils, and
+ can be worked around by using "xz --extreme". However, the presets
+ need some tweaking and maybe this issue can be minimized without
+ making the typical case too much slower.
+
+
+Missing features
+----------------
+
+ "xz --list"
+
+ xz could create sparse files when decompressing. (Some prototyping
+ has been done.)
+
+ xz doesn't support copying extended attributes, access control
+ lists etc. from source to target file.
+
+ Multithreaded compression
+
+ Multithreaded decompression
+
+ Buffer-to-buffer coding could use less RAM (especially when
+ decompressing LZMA1 or LZMA2).
+
+ I/O library is not implemented. It will possibly be named libzzf.
+
+
+Documentation
+-------------
+
+ Some tutorial is needed for liblzma. I have planned to write some
+ extremely well commented example programs, which would work as
+ a tutorial. I suppose the Doxygen tags are quite OK as a quick
+ reference once one is familiar with the liblzma API.
+
+ Document the LZMA1 and LZMA2 algorithms.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/aclocal.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/aclocal.m4
new file mode 100644
index 00000000..09162f8a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/aclocal.m4
@@ -0,0 +1,1027 @@
+# generated automatically by aclocal 1.11 -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+# 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.64],,
+[m4_warning([this file was generated for autoconf 2.64.
+You have another version of autoconf. It may work, but is not guaranteed to.
+If you have problems, you may need to regenerate the build system entirely.
+To do so, use the procedure documented by the package, typically `autoreconf'.])])
+
+# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_AUTOMAKE_VERSION(VERSION)
+# ----------------------------
+# Automake X.Y traces this macro to ensure aclocal.m4 has been
+# generated from the m4 files accompanying Automake X.Y.
+# (This private macro should not be called outside this file.)
+AC_DEFUN([AM_AUTOMAKE_VERSION],
+[am__api_version='1.11'
+dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+dnl require some minimum version. Point them to the right macro.
+m4_if([$1], [1.11], [],
+ [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+])
+
+# _AM_AUTOCONF_VERSION(VERSION)
+# -----------------------------
+# aclocal traces this macro to find the Autoconf version.
+# This is a private macro too. Using m4_define simplifies
+# the logic in aclocal, which can simply ignore this definition.
+m4_define([_AM_AUTOCONF_VERSION], [])
+
+# AM_SET_CURRENT_AUTOMAKE_VERSION
+# -------------------------------
+# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+[AM_AUTOMAKE_VERSION([1.11])dnl
+m4_ifndef([AC_AUTOCONF_VERSION],
+ [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+
+# Figure out how to run the assembler. -*- Autoconf -*-
+
+# Copyright (C) 2001, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# AM_PROG_AS
+# ----------
+AC_DEFUN([AM_PROG_AS],
+[# By default we simply use the C compiler to build assembly code.
+AC_REQUIRE([AC_PROG_CC])
+test "${CCAS+set}" = set || CCAS=$CC
+test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS
+AC_ARG_VAR([CCAS], [assembler compiler command (defaults to CC)])
+AC_ARG_VAR([CCASFLAGS], [assembler compiler flags (defaults to CFLAGS)])
+_AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
+])
+
+# AM_AUX_DIR_EXPAND -*- Autoconf -*-
+
+# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
+# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to
+# `$srcdir', `$srcdir/..', or `$srcdir/../..'.
+#
+# Of course, Automake must honor this variable whenever it calls a
+# tool from the auxiliary directory. The problem is that $srcdir (and
+# therefore $ac_aux_dir as well) can be either absolute or relative,
+# depending on how configure is run. This is pretty annoying, since
+# it makes $ac_aux_dir quite unusable in subdirectories: in the top
+# source directory, any form will work fine, but in subdirectories a
+# relative path needs to be adjusted first.
+#
+# $ac_aux_dir/missing
+# fails when called from a subdirectory if $ac_aux_dir is relative
+# $top_srcdir/$ac_aux_dir/missing
+# fails if $ac_aux_dir is absolute,
+# fails when called from a subdirectory in a VPATH build with
+# a relative $ac_aux_dir
+#
+# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
+# are both prefixed by $srcdir. In an in-source build this is usually
+# harmless because $srcdir is `.', but things will broke when you
+# start a VPATH build or use an absolute $srcdir.
+#
+# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
+# iff we strip the leading $srcdir from $ac_aux_dir. That would be:
+# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
+# and then we would define $MISSING as
+# MISSING="\${SHELL} $am_aux_dir/missing"
+# This will work as long as MISSING is not called from configure, because
+# unfortunately $(top_srcdir) has no meaning in configure.
+# However there are other variables, like CC, which are often used in
+# configure, and could therefore not use this "fixed" $ac_aux_dir.
+#
+# Another solution, used here, is to always expand $ac_aux_dir to an
+# absolute PATH. The drawback is that using absolute paths prevent a
+# configured tree to be moved without reconfiguration.
+
+AC_DEFUN([AM_AUX_DIR_EXPAND],
+[dnl Rely on autoconf to set up CDPATH properly.
+AC_PREREQ([2.50])dnl
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+])
+
+# AM_CONDITIONAL -*- Autoconf -*-
+
+# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 9
+
+# AM_CONDITIONAL(NAME, SHELL-CONDITION)
+# -------------------------------------
+# Define a conditional.
+AC_DEFUN([AM_CONDITIONAL],
+[AC_PREREQ(2.52)dnl
+ ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
+ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+AC_SUBST([$1_TRUE])dnl
+AC_SUBST([$1_FALSE])dnl
+_AM_SUBST_NOTMAKE([$1_TRUE])dnl
+_AM_SUBST_NOTMAKE([$1_FALSE])dnl
+m4_define([_AM_COND_VALUE_$1], [$2])dnl
+if $2; then
+ $1_TRUE=
+ $1_FALSE='#'
+else
+ $1_TRUE='#'
+ $1_FALSE=
+fi
+AC_CONFIG_COMMANDS_PRE(
+[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
+ AC_MSG_ERROR([[conditional "$1" was never defined.
+Usually this means the macro was only invoked conditionally.]])
+fi])])
+
+# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 10
+
+# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be
+# written in clear, in which case automake, when reading aclocal.m4,
+# will think it sees a *use*, and therefore will trigger all it's
+# C support machinery. Also note that it means that autoscan, seeing
+# CC etc. in the Makefile, will ask for an AC_PROG_CC use...
+
+
+# _AM_DEPENDENCIES(NAME)
+# ----------------------
+# See how the compiler implements dependency checking.
+# NAME is "CC", "CXX", "GCJ", or "OBJC".
+# We try a few techniques and use that to set a single cache variable.
+#
+# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
+# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular
+# dependency, and given that the user is not expected to run this macro,
+# just rely on AC_PROG_CC.
+AC_DEFUN([_AM_DEPENDENCIES],
+[AC_REQUIRE([AM_SET_DEPDIR])dnl
+AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
+AC_REQUIRE([AM_MAKE_INCLUDE])dnl
+AC_REQUIRE([AM_DEP_TRACK])dnl
+
+ifelse([$1], CC, [depcc="$CC" am_compiler_list=],
+ [$1], CXX, [depcc="$CXX" am_compiler_list=],
+ [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+ [$1], UPC, [depcc="$UPC" am_compiler_list=],
+ [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
+ [depcc="$$1" am_compiler_list=])
+
+AC_CACHE_CHECK([dependency style of $depcc],
+ [am_cv_$1_dependencies_compiler_type],
+[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_$1_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp`
+ fi
+ am__universal=false
+ m4_case([$1], [CC],
+ [case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac],
+ [CXX],
+ [case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac])
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_$1_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_$1_dependencies_compiler_type=none
+fi
+])
+AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type])
+AM_CONDITIONAL([am__fastdep$1], [
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_$1_dependencies_compiler_type" = gcc3])
+])
+
+
+# AM_SET_DEPDIR
+# -------------
+# Choose a directory name for dependency files.
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES
+AC_DEFUN([AM_SET_DEPDIR],
+[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
+])
+
+
+# AM_DEP_TRACK
+# ------------
+AC_DEFUN([AM_DEP_TRACK],
+[AC_ARG_ENABLE(dependency-tracking,
+[ --disable-dependency-tracking speeds up one-time build
+ --enable-dependency-tracking do not reject slow dependency extractors])
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+fi
+AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
+AC_SUBST([AMDEPBACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
+])
+
+# Generate code to set up dependency tracking. -*- Autoconf -*-
+
+# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+#serial 5
+
+# _AM_OUTPUT_DEPENDENCY_COMMANDS
+# ------------------------------
+AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
+[{
+ # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named `Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`AS_DIRNAME("$mf")`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running `make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # When using ansi2knr, U may be empty or an underscore; expand it
+ U=`sed -n 's/^U = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`AS_DIRNAME(["$file"])`
+ AS_MKDIR_P([$dirpart/$fdir])
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+])# _AM_OUTPUT_DEPENDENCY_COMMANDS
+
+
+# AM_OUTPUT_DEPENDENCY_COMMANDS
+# -----------------------------
+# This macro should only be invoked once -- use via AC_REQUIRE.
+#
+# This code is only required when automatic dependency tracking
+# is enabled. FIXME. This creates each `.P' file that we will
+# need in order to bootstrap the dependency handling code.
+AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
+[AC_CONFIG_COMMANDS([depfiles],
+ [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
+ [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
+])
+
+# Do all the work for Automake. -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+# 2005, 2006, 2008, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 16
+
+# This macro actually does too much. Some checks are only needed if
+# your package does certain things. But this isn't really a big deal.
+
+# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
+# AM_INIT_AUTOMAKE([OPTIONS])
+# -----------------------------------------------
+# The call with PACKAGE and VERSION arguments is the old style
+# call (pre autoconf-2.50), which is being phased out. PACKAGE
+# and VERSION should now be passed to AC_INIT and removed from
+# the call to AM_INIT_AUTOMAKE.
+# We support both call styles for the transition. After
+# the next Automake release, Autoconf can make the AC_INIT
+# arguments mandatory, and then we can depend on a new Autoconf
+# release and drop the old call support.
+AC_DEFUN([AM_INIT_AUTOMAKE],
+[AC_PREREQ([2.62])dnl
+dnl Autoconf wants to disallow AM_ names. We explicitly allow
+dnl the ones we care about.
+m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
+AC_REQUIRE([AC_PROG_INSTALL])dnl
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+AC_SUBST([CYGPATH_W])
+
+# Define the identity of the package.
+dnl Distinguish between old-style and new-style calls.
+m4_ifval([$2],
+[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ AC_SUBST([PACKAGE], [$1])dnl
+ AC_SUBST([VERSION], [$2])],
+[_AM_SET_OPTIONS([$1])dnl
+dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,,
+ [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
+
+_AM_IF_OPTION([no-define],,
+[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
+ AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl
+
+# Some tools Automake needs.
+AC_REQUIRE([AM_SANITY_CHECK])dnl
+AC_REQUIRE([AC_ARG_PROGRAM])dnl
+AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version})
+AM_MISSING_PROG(AUTOCONF, autoconf)
+AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version})
+AM_MISSING_PROG(AUTOHEADER, autoheader)
+AM_MISSING_PROG(MAKEINFO, makeinfo)
+AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
+AC_REQUIRE([AM_PROG_MKDIR_P])dnl
+# We need awk for the "check" target. The system "awk" is bad on
+# some platforms.
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
+ [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
+ [_AM_PROG_TAR([v7])])])
+_AM_IF_OPTION([no-dependencies],,
+[AC_PROVIDE_IFELSE([AC_PROG_CC],
+ [_AM_DEPENDENCIES(CC)],
+ [define([AC_PROG_CC],
+ defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_CXX],
+ [_AM_DEPENDENCIES(CXX)],
+ [define([AC_PROG_CXX],
+ defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJC],
+ [_AM_DEPENDENCIES(OBJC)],
+ [define([AC_PROG_OBJC],
+ defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl
+])
+_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
+dnl The `parallel-tests' driver may need to know about EXEEXT, so add the
+dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
+dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_CONFIG_COMMANDS_PRE(dnl
+[m4_provide_if([_AM_COMPILER_EXEEXT],
+ [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+])
+
+dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
+dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
+dnl mangled by Autoconf and run in a shell conditional statement.
+m4_define([_AC_COMPILER_EXEEXT],
+m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
+
+
+# When config.status generates a header, we must update the stamp-h file.
+# This file resides in the same directory as the config header
+# that is generated. The stamp files are numbered to have different names.
+
+# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
+# loop where config.status creates the headers, so we can generate
+# our stamp files there.
+AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
+[# Compute $1's index in $config_headers.
+_am_arg=$1
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+
+# Copyright (C) 2001, 2003, 2005, 2008 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_SH
+# ------------------
+# Define $install_sh.
+AC_DEFUN([AM_PROG_INSTALL_SH],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+if test x"${install_sh}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+AC_SUBST(install_sh)])
+
+# Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# Check whether the underlying file-system supports filenames
+# with a leading dot. For instance MS-DOS doesn't.
+AC_DEFUN([AM_SET_LEADING_DOT],
+[rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+AC_SUBST([am__leading_dot])])
+
+# Check to see how 'make' treats includes. -*- Autoconf -*-
+
+# Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 4
+
+# AM_MAKE_INCLUDE()
+# -----------------
+# Check to see how make treats includes.
+AC_DEFUN([AM_MAKE_INCLUDE],
+[am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+AC_MSG_CHECKING([for style of include used by $am_make])
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+AC_SUBST([am__include])
+AC_SUBST([am__quote])
+AC_MSG_RESULT([$_am_result])
+rm -f confinc confmf
+])
+
+# Copyright (C) 1999, 2000, 2001, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 6
+
+# AM_PROG_CC_C_O
+# --------------
+# Like AC_PROG_CC_C_O, but changed for automake.
+AC_DEFUN([AM_PROG_CC_C_O],
+[AC_REQUIRE([AC_PROG_CC_C_O])dnl
+AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+# FIXME: we rely on the cache variable name because
+# there is no other way.
+set dummy $CC
+am_cc=`echo $[2] | sed ['s/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/']`
+eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o
+if test "$am_t" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+dnl Make sure AC_PROG_CC is never called again, or it will override our
+dnl setting of CC.
+m4_define([AC_PROG_CC],
+ [m4_fatal([AC_PROG_CC cannot be called after AM_PROG_CC_C_O])])
+])
+
+# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
+
+# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 6
+
+# AM_MISSING_PROG(NAME, PROGRAM)
+# ------------------------------
+AC_DEFUN([AM_MISSING_PROG],
+[AC_REQUIRE([AM_MISSING_HAS_RUN])
+$1=${$1-"${am_missing_run}$2"}
+AC_SUBST($1)])
+
+
+# AM_MISSING_HAS_RUN
+# ------------------
+# Define MISSING if not defined so far and test if it supports --run.
+# If it does, set am_missing_run to use it, otherwise, to nothing.
+AC_DEFUN([AM_MISSING_HAS_RUN],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([missing])dnl
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+ am_missing_run="$MISSING --run "
+else
+ am_missing_run=
+ AC_MSG_WARN([`missing' script is too old or missing])
+fi
+])
+
+# Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_MKDIR_P
+# ---------------
+# Check for `mkdir -p'.
+AC_DEFUN([AM_PROG_MKDIR_P],
+[AC_PREREQ([2.60])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P,
+dnl while keeping a definition of mkdir_p for backward compatibility.
+dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile.
+dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of
+dnl Makefile.ins that do not define MKDIR_P, so we do our own
+dnl adjustment using top_builddir (which is defined more often than
+dnl MKDIR_P).
+AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl
+case $mkdir_p in
+ [[\\/$]]* | ?:[[\\/]]*) ;;
+ */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+])
+
+# Helper functions for option handling. -*- Autoconf -*-
+
+# Copyright (C) 2001, 2002, 2003, 2005, 2008 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 4
+
+# _AM_MANGLE_OPTION(NAME)
+# -----------------------
+AC_DEFUN([_AM_MANGLE_OPTION],
+[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
+
+# _AM_SET_OPTION(NAME)
+# ------------------------------
+# Set option NAME. Presently that only means defining a flag for this option.
+AC_DEFUN([_AM_SET_OPTION],
+[m4_define(_AM_MANGLE_OPTION([$1]), 1)])
+
+# _AM_SET_OPTIONS(OPTIONS)
+# ----------------------------------
+# OPTIONS is a space-separated list of Automake options.
+AC_DEFUN([_AM_SET_OPTIONS],
+[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
+
+# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
+# -------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+AC_DEFUN([_AM_IF_OPTION],
+[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+
+# Check to make sure that the build environment is sane. -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# AM_SANITY_CHECK
+# ---------------
+AC_DEFUN([AM_SANITY_CHECK],
+[AC_MSG_CHECKING([whether build environment is sane])
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[[\\\"\#\$\&\'\`$am_lf]]*)
+ AC_MSG_ERROR([unsafe absolute working directory name]);;
+esac
+case $srcdir in
+ *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
+ AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$[*]" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ rm -f conftest.file
+ if test "$[*]" != "X $srcdir/configure conftest.file" \
+ && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+alias in your environment])
+ fi
+
+ test "$[2]" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+AC_MSG_RESULT(yes)])
+
+# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_INSTALL_STRIP
+# ---------------------
+# One issue with vendor `install' (even GNU) is that you can't
+# specify the program used to strip binaries. This is especially
+# annoying in cross-compiling environments, where the build's strip
+# is unlikely to handle the host's binaries.
+# Fortunately install-sh will honor a STRIPPROG variable, so we
+# always use install-sh in `make install-strip', and initialize
+# STRIPPROG with the value of the STRIP variable (set by the user).
+AC_DEFUN([AM_PROG_INSTALL_STRIP],
+[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'. However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be `maybe'.
+if test "$cross_compiling" != no; then
+ AC_CHECK_TOOL([STRIP], [strip], :)
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+AC_SUBST([INSTALL_STRIP_PROGRAM])])
+
+# Copyright (C) 2006, 2008 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# _AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
+# This macro is traced by Automake.
+AC_DEFUN([_AM_SUBST_NOTMAKE])
+
+# AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Public sister of _AM_SUBST_NOTMAKE.
+AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+
+# Check how to create a tarball. -*- Autoconf -*-
+
+# Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# _AM_PROG_TAR(FORMAT)
+# --------------------
+# Check how to create a tarball in format FORMAT.
+# FORMAT should be one of `v7', `ustar', or `pax'.
+#
+# Substitute a variable $(am__tar) that is a command
+# writing to stdout a FORMAT-tarball containing the directory
+# $tardir.
+# tardir=directory && $(am__tar) > result.tar
+#
+# Substitute a variable $(am__untar) that extract such
+# a tarball read from stdin.
+# $(am__untar) < result.tar
+AC_DEFUN([_AM_PROG_TAR],
+[# Always define AMTAR for backward compatibility.
+AM_MISSING_PROG([AMTAR], [tar])
+m4_if([$1], [v7],
+ [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'],
+ [m4_case([$1], [ustar],, [pax],,
+ [m4_fatal([Unknown tar format])])
+AC_MSG_CHECKING([how to create a $1 tar archive])
+# Loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
+_am_tools=${am_cv_prog_tar_$1-$_am_tools}
+# Do not fold the above two line into one, because Tru64 sh and
+# Solaris sh will not grok spaces in the rhs of `-'.
+for _am_tool in $_am_tools
+do
+ case $_am_tool in
+ gnutar)
+ for _am_tar in tar gnutar gtar;
+ do
+ AM_RUN_LOG([$_am_tar --version]) && break
+ done
+ am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+ am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+ am__untar="$_am_tar -xf -"
+ ;;
+ plaintar)
+ # Must skip GNU tar: if it does not support --format= it doesn't create
+ # ustar tarball either.
+ (tar --version) >/dev/null 2>&1 && continue
+ am__tar='tar chf - "$$tardir"'
+ am__tar_='tar chf - "$tardir"'
+ am__untar='tar xf -'
+ ;;
+ pax)
+ am__tar='pax -L -x $1 -w "$$tardir"'
+ am__tar_='pax -L -x $1 -w "$tardir"'
+ am__untar='pax -r'
+ ;;
+ cpio)
+ am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+ am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+ am__untar='cpio -i -H $1 -d'
+ ;;
+ none)
+ am__tar=false
+ am__tar_=false
+ am__untar=false
+ ;;
+ esac
+
+ # If the value was cached, stop now. We just wanted to have am__tar
+ # and am__untar set.
+ test -n "${am_cv_prog_tar_$1}" && break
+
+ # tar/untar a dummy directory, and stop if the command works
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ echo GrepMe > conftest.dir/file
+ AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+ rm -rf conftest.dir
+ if test -s conftest.tar; then
+ AM_RUN_LOG([$am__untar <conftest.tar])
+ grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+ fi
+done
+rm -rf conftest.dir
+
+AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+AC_SUBST([am__tar])
+AC_SUBST([am__untar])
+]) # _AM_PROG_TAR
+
+m4_include([m4/acx_pthread.m4])
+m4_include([m4/getopt.m4])
+m4_include([m4/gettext.m4])
+m4_include([m4/iconv.m4])
+m4_include([m4/lc_cpucores.m4])
+m4_include([m4/lc_physmem.m4])
+m4_include([m4/lib-ld.m4])
+m4_include([m4/lib-link.m4])
+m4_include([m4/lib-prefix.m4])
+m4_include([m4/libtool.m4])
+m4_include([m4/ltoptions.m4])
+m4_include([m4/ltsugar.m4])
+m4_include([m4/ltversion.m4])
+m4_include([m4/lt~obsolete.m4])
+m4_include([m4/nls.m4])
+m4_include([m4/po.m4])
+m4_include([m4/posix-shell.m4])
+m4_include([m4/progtest.m4])
+m4_include([m4/visibility.m4])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
new file mode 100755
index 00000000..f0195eca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+###############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+###############################################################################
+
+# The result of using "autoreconf -fi" should be identical to using this
+# script. I'm leaving this script here just in case someone finds it useful.
+
+set -e -x
+
+${AUTOPOINT:-autopoint} -f
+${LIBTOOLIZE:-libtoolize} -c -f || glibtoolize -c -f
+${ACLOCAL:-aclocal} -I m4
+${AUTOCONF:-autoconf}
+${AUTOHEADER:-autoheader}
+${AUTOMAKE:-automake} -acf --foreign
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
new file mode 100755
index 00000000..ec64c622
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
@@ -0,0 +1,143 @@
+#! /bin/sh
+# Wrapper for compilers which do not understand `-c -o'.
+
+scriptversion=2009-04-28.21; # UTC
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2009 Free Software
+# Foundation, Inc.
+# Written by Tom Tromey <tromey@cygnus.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake@gnu.org> or send patches to
+# <automake-patches@gnu.org>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: compile [--help] [--version] PROGRAM [ARGS]
+
+Wrapper for compilers which do not understand `-c -o'.
+Remove `-o dest.o' from ARGS, run PROGRAM with the remaining
+arguments, and rename the output as expected.
+
+If you are trying to build a whole package this is not the
+right script to run: please start by reading the file `INSTALL'.
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "compile $scriptversion"
+ exit $?
+ ;;
+esac
+
+ofile=
+cfile=
+eat=
+
+for arg
+do
+ if test -n "$eat"; then
+ eat=
+ else
+ case $1 in
+ -o)
+ # configure might choose to run compile as `compile cc -o foo foo.c'.
+ # So we strip `-o arg' only if arg is an object.
+ eat=1
+ case $2 in
+ *.o | *.obj)
+ ofile=$2
+ ;;
+ *)
+ set x "$@" -o "$2"
+ shift
+ ;;
+ esac
+ ;;
+ *.c)
+ cfile=$1
+ set x "$@" "$1"
+ shift
+ ;;
+ *)
+ set x "$@" "$1"
+ shift
+ ;;
+ esac
+ fi
+ shift
+done
+
+if test -z "$ofile" || test -z "$cfile"; then
+ # If no `-o' option was seen then we might have been invoked from a
+ # pattern rule where we don't need one. That is ok -- this is a
+ # normal compilation that the losing compiler can handle. If no
+ # `.c' file was seen then we are probably linking. That is also
+ # ok.
+ exec "$@"
+fi
+
+# Name of file we expect compiler to create.
+cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
+
+# Create the lock directory.
+# Note: use `[/\\:.-]' here to ensure that we don't use the same name
+# that we are using for the .o file. Also, base the name on the expected
+# object file name, since that is what matters with a parallel build.
+lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
+while true; do
+ if mkdir "$lockdir" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+# FIXME: race condition here if user kills between mkdir and trap.
+trap "rmdir '$lockdir'; exit 1" 1 2 15
+
+# Run the compile.
+"$@"
+ret=$?
+
+if test -f "$cofile"; then
+ mv "$cofile" "$ofile"
+elif test -f "${cofile}bj"; then
+ mv "${cofile}bj" "$ofile"
+fi
+
+rmdir "$lockdir"
+exit $ret
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
new file mode 100755
index 00000000..7501b1be
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
@@ -0,0 +1,1530 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2016-06-22'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner. Please send patches (context
+# diff format) to <config-patches@gnu.org> and include a ChangeLog
+# entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm:riscos:*:*|arm:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ i*:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ cris:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-gnu
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-gnu
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ i*86:Linux:*:*)
+ LIBC=gnu
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #ifdef __dietlibc__
+ LIBC=dietlibc
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ or32:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-gnu
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-gnu
+ exit ;;
+ x86_64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ case $UNAME_PROCESSOR in
+ i386)
+ eval $set_cc_for_build
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ UNAME_PROCESSOR="x86_64"
+ fi
+ fi ;;
+ unknown) UNAME_PROCESSOR=powerpc ;;
+ esac
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-?:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ c34*)
+ echo c34-convex-bsd
+ exit ;;
+ c38*)
+ echo c38-convex-bsd
+ exit ;;
+ c4*)
+ echo c4-convex-bsd
+ exit ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
new file mode 100755
index 00000000..c492a93b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
@@ -0,0 +1,614 @@
+#! /bin/sh
+# Output a system dependent set of variables, describing how to set the
+# run time search path of shared libraries in an executable.
+#
+# Copyright 1996-2006 Free Software Foundation, Inc.
+# Taken from GNU libtool, 2001
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+#
+# The first argument passed to this file is the canonical host specification,
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld
+# should be set by the caller.
+#
+# The set of defined variables is at the end of this script.
+
+# Known limitations:
+# - On IRIX 6.5 with CC="cc", the run time search patch must not be longer
+# than 256 bytes, otherwise the compiler driver will dump core. The only
+# known workaround is to choose shorter directory names for the build
+# directory and/or the installation directory.
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+shrext=.so
+
+host="$1"
+host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+
+# Code taken from libtool.m4's _LT_CC_BASENAME.
+
+for cc_temp in $CC""; do
+ case $cc_temp in
+ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'`
+
+# Code taken from libtool.m4's AC_LIBTOOL_PROG_COMPILER_PIC.
+
+wl=
+if test "$GCC" = yes; then
+ wl='-Wl,'
+else
+ case "$host_os" in
+ aix*)
+ wl='-Wl,'
+ ;;
+ darwin*)
+ case $cc_basename in
+ xlc*)
+ wl='-Wl,'
+ ;;
+ esac
+ ;;
+ mingw* | pw32* | os2*)
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ wl='-Wl,'
+ ;;
+ irix5* | irix6* | nonstopux*)
+ wl='-Wl,'
+ ;;
+ newsos6)
+ ;;
+ linux*)
+ case $cc_basename in
+ icc* | ecc*)
+ wl='-Wl,'
+ ;;
+ pgcc | pgf77 | pgf90)
+ wl='-Wl,'
+ ;;
+ ccc*)
+ wl='-Wl,'
+ ;;
+ como)
+ wl='-lopt='
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ wl='-Wl,'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+ osf3* | osf4* | osf5*)
+ wl='-Wl,'
+ ;;
+ sco3.2v5*)
+ ;;
+ solaris*)
+ wl='-Wl,'
+ ;;
+ sunos4*)
+ wl='-Qoption ld '
+ ;;
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ wl='-Wl,'
+ ;;
+ sysv4*MP*)
+ ;;
+ unicos*)
+ wl='-Wl,'
+ ;;
+ uts4*)
+ ;;
+ esac
+fi
+
+# Code taken from libtool.m4's AC_LIBTOOL_PROG_LD_SHLIBS.
+
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+
+case "$host_os" in
+ cygwin* | mingw* | pw32*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+ interix*)
+ # we just hope/assume this is gcc and not c89 (= MSVC++)
+ with_gnu_ld=yes
+ ;;
+ openbsd*)
+ with_gnu_ld=no
+ ;;
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # Set some defaults for GNU ld with shared library support. These
+ # are reset later if shared libraries are not supported. Putting them
+ # here allows them to be overridden if necessary.
+ # Unlike libtool, we use -rpath here, not --rpath, since the documented
+ # option of GNU ld is called -rpath, not --rpath.
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ case "$host_os" in
+ aix3* | aix4* | aix5*)
+ # On AIX/PPC, the GNU linker is very broken
+ if test "$host_cpu" != ia64; then
+ ld_shlibs=no
+ fi
+ ;;
+ amigaos*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we cannot use
+ # them.
+ ld_shlibs=no
+ ;;
+ beos*)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ cygwin* | mingw* | pw32*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ interix3*)
+ hardcode_direct=no
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ linux*)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ netbsd*)
+ ;;
+ solaris*)
+ if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+ case `$LD -v 2>&1` in
+ *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+ ld_shlibs=no
+ ;;
+ *)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+ ;;
+ sunos4*)
+ hardcode_direct=yes
+ ;;
+ *)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+ if test "$ld_shlibs" = no; then
+ hardcode_libdir_flag_spec=
+ fi
+else
+ case "$host_os" in
+ aix3*)
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+ aix4* | aix5*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ else
+ aix_use_runtimelinking=no
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[23]|aix4.[23].*|aix5*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ ;;
+ esac
+ fi
+ hardcode_direct=yes
+ hardcode_libdir_separator=':'
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[012]|aix4.[012].*)
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ ;;
+ esac
+ fi
+ # Begin _LT_AC_SYS_LIBPATH_AIX.
+ echo 'int main () { return 0; }' > conftest.c
+ ${CC} ${LDFLAGS} conftest.c -o conftest
+ aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; }
+}'`
+ if test -z "$aix_libpath"; then
+ aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; }
+}'`
+ fi
+ if test -z "$aix_libpath"; then
+ aix_libpath="/usr/lib:/lib"
+ fi
+ rm -f conftest.c conftest
+ # End _LT_AC_SYS_LIBPATH_AIX.
+ if test "$aix_use_runtimelinking" = yes; then
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ else
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ fi
+ fi
+ ;;
+ amigaos*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+ bsdi[45]*)
+ ;;
+ cygwin* | mingw* | pw32*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ libext=lib
+ ;;
+ darwin* | rhapsody*)
+ hardcode_direct=no
+ if test "$GCC" = yes ; then
+ :
+ else
+ case $cc_basename in
+ xlc*)
+ ;;
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+ fi
+ ;;
+ dgux*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ ;;
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+ freebsd2.2*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ ;;
+ freebsd2*)
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ ;;
+ freebsd* | kfreebsd*-gnu | dragonfly*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ ;;
+ hpux9*)
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ ;;
+ hpux10*)
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ fi
+ ;;
+ hpux11*)
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ case $host_cpu in
+ hppa*64*|ia64*)
+ hardcode_direct=no
+ ;;
+ *)
+ hardcode_direct=yes
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ ;;
+ esac
+ fi
+ ;;
+ irix5* | irix6* | nonstopux*)
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ netbsd*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ ;;
+ newsos6)
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ openbsd*)
+ hardcode_direct=yes
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ else
+ case "$host_os" in
+ openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ ;;
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ ;;
+ osf3*)
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ osf4* | osf5*)
+ if test "$GCC" = yes; then
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ # Both cc and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ hardcode_libdir_separator=:
+ ;;
+ solaris*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ sunos4*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ ;;
+ sysv4)
+ case $host_vendor in
+ sni)
+ hardcode_direct=yes # is this really true???
+ ;;
+ siemens)
+ hardcode_direct=no
+ ;;
+ motorola)
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ ;;
+ esac
+ ;;
+ sysv4.3*)
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ ld_shlibs=yes
+ fi
+ ;;
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*)
+ ;;
+ sysv5* | sco3.2v5* | sco5v6*)
+ hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`'
+ hardcode_libdir_separator=':'
+ ;;
+ uts4*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ ;;
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+
+# Check dynamic linker characteristics
+# Code taken from libtool.m4's AC_LIBTOOL_SYS_DYNAMIC_LINKER.
+libname_spec='lib$name'
+case "$host_os" in
+ aix3*)
+ ;;
+ aix4* | aix5*)
+ ;;
+ amigaos*)
+ ;;
+ beos*)
+ ;;
+ bsdi[45]*)
+ ;;
+ cygwin* | mingw* | pw32*)
+ shrext=.dll
+ ;;
+ darwin* | rhapsody*)
+ shrext=.dylib
+ ;;
+ dgux*)
+ ;;
+ freebsd1*)
+ ;;
+ kfreebsd*-gnu)
+ ;;
+ freebsd* | dragonfly*)
+ ;;
+ gnu*)
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ case $host_cpu in
+ ia64*)
+ shrext=.so
+ ;;
+ hppa*64*)
+ shrext=.sl
+ ;;
+ *)
+ shrext=.sl
+ ;;
+ esac
+ ;;
+ interix3*)
+ ;;
+ irix5* | irix6* | nonstopux*)
+ case "$host_os" in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;;
+ *) libsuff= shlibsuff= ;;
+ esac
+ ;;
+ esac
+ ;;
+ linux*oldld* | linux*aout* | linux*coff*)
+ ;;
+ linux*)
+ ;;
+ knetbsd*-gnu)
+ ;;
+ netbsd*)
+ ;;
+ newsos6)
+ ;;
+ nto-qnx*)
+ ;;
+ openbsd*)
+ ;;
+ os2*)
+ libname_spec='$name'
+ shrext=.dll
+ ;;
+ osf3* | osf4* | osf5*)
+ ;;
+ solaris*)
+ ;;
+ sunos4*)
+ ;;
+ sysv4 | sysv4.3*)
+ ;;
+ sysv4*MP*)
+ ;;
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ ;;
+ uts4*)
+ ;;
+esac
+
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"`
+shlibext=`echo "$shrext" | sed -e 's,^\.,,'`
+escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"`
+
+LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' <<EOF
+
+# How to pass a linker flag through the compiler.
+wl="$escaped_wl"
+
+# Static library suffix (normally "a").
+libext="$libext"
+
+# Shared library suffix (normally "so").
+shlibext="$shlibext"
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec="$escaped_hardcode_libdir_flag_spec"
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator="$hardcode_libdir_separator"
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct="$hardcode_direct"
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L="$hardcode_minus_L"
+
+EOF
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
new file mode 100755
index 00000000..8b9b9b53
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
@@ -0,0 +1,1686 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+# Free Software Foundation, Inc.
+
+timestamp='2009-04-17'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1335 USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
+ uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+ | bfin \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | fido | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nios | nios2 \
+ | ns16k | ns32k \
+ | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | pyramid \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu | strongarm \
+ | tahoe | thumb | tic4x | tic80 | tron \
+ | v850 | v850e \
+ | we32k \
+ | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
+ | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nios-* | nios2-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | pyramid-* \
+ | romp-* | rs6000-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
+ | tahoe-* | thumb-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
+ | tron-* \
+ | v850-* | v850e-* | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tic54x | c54x*)
+ basic_machine=tic54x-unknown
+ os=-coff
+ ;;
+ tic55x | c55x*)
+ basic_machine=tic55x-unknown
+ os=-coff
+ ;;
+ tic6x | c6x*)
+ basic_machine=tic6x-unknown
+ os=-coff
+ ;;
+ tile*)
+ basic_machine=tile-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -kopensolaris* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -kaos*)
+ os=-kaos
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
new file mode 100755
index 00000000..df8eea7e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
@@ -0,0 +1,630 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2009-04-28.21; # UTC
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009 Free
+# Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+ depmode Dependency tracking mode.
+ source Source file read by `PROGRAMS ARGS'.
+ object Object file output by `PROGRAMS ARGS'.
+ DEPDIR directory where to store dependencies.
+ depfile Dependency file to output.
+ tmpdepfile Temporary file to use when outputing dependencies.
+ libtool Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "depcomp $scriptversion"
+ exit $?
+ ;;
+esac
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+ echo "depcomp: Variables source, object and depmode must be set" 1>&2
+ exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+ sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Some modes work just like other modes, but use different flags. We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write. Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+ # HP compiler uses -M and no extra arg.
+ gccflag=-M
+ depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+ # This is just like dashmstdout with a different argument.
+ dashmflag=-xM
+ depmode=dashmstdout
+fi
+
+cygpath_u="cygpath -u -f -"
+if test "$depmode" = msvcmsys; then
+ # This is just like msvisualcpp but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u="sed s,\\\\\\\\,/,g"
+ depmode=msvisualcpp
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want. Yay! Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff. Hmm.
+## Unfortunately, FreeBSD c89 acceptance of flags depends upon
+## the command line argument order; so add the flags where they
+## appear in depend2.am. Note that the slowdown incurred here
+## affects only configure: in makefiles, %FASTDEP% shortcuts this.
+ for arg
+ do
+ case $arg in
+ -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
+ *) set fnord "$@" "$arg" ;;
+ esac
+ shift # fnord
+ shift # $arg
+ done
+ "$@"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ mv "$tmpdepfile" "$depfile"
+ ;;
+
+gcc)
+## There are various ways to get dependency output from gcc. Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+## up in a subdir. Having to rename by hand is ugly.
+## (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+## -MM, not -M (despite what the docs say).
+## - Using -M directly means running the compiler twice (even worse
+## than renaming).
+ if test -z "$gccflag"; then
+ gccflag=-MD,
+ fi
+ "$@" -Wp,"$gccflag$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
+## The second -e expression handles DOS-style file names with drive letters.
+ sed -e 's/^[^:]*: / /' \
+ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the `deleted header file' problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header). We avoid this by adding
+## dummy dependencies for each header file. Too bad gcc doesn't do
+## this for us directly.
+ tr ' ' '
+' < "$tmpdepfile" |
+## Some versions of gcc put a space before the `:'. On the theory
+## that the space means something, we add a space to the output as
+## well.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+sgi)
+ if test "$libtool" = yes; then
+ "$@" "-Wp,-MDupdate,$tmpdepfile"
+ else
+ "$@" -MDupdate "$tmpdepfile"
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+
+ if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
+ echo "$object : \\" > "$depfile"
+
+ # Clip off the initial element (the dependent). Don't try to be
+ # clever and replace this with sed code, as IRIX sed won't handle
+ # lines with more than a fixed number of characters (4096 in
+ # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
+ # the IRIX cc adds comments like `#:fec' to the end of the
+ # dependency line.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
+ tr '
+' ' ' >> "$depfile"
+ echo >> "$depfile"
+
+ # The second pass generates a dummy entry for each header file.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+ >> "$depfile"
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+aix)
+ # The C for AIX Compiler uses -M and outputs the dependencies
+ # in a .u file. In older versions, this file always lives in the
+ # current directory. Also, the AIX compiler puts `$object:' at the
+ # start of each line; $object doesn't have directory information.
+ # Version 6 uses the directory in both cases.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$base.u
+ tmpdepfile3=$dir.libs/$base.u
+ "$@" -Wc,-M
+ else
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$dir$base.u
+ tmpdepfile3=$dir$base.u
+ "$@" -M
+ fi
+ stat=$?
+
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ # Each line is of the form `foo.o: dependent.h'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+icc)
+ # Intel's C compiler understands `-MD -MF file'. However on
+ # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
+ # ICC 7.0 will fill foo.d with something like
+ # foo.o: sub/foo.c
+ # foo.o: sub/foo.h
+ # which is wrong. We want:
+ # sub/foo.o: sub/foo.c
+ # sub/foo.o: sub/foo.h
+ # sub/foo.c:
+ # sub/foo.h:
+ # ICC 7.1 will output
+ # foo.o: sub/foo.c sub/foo.h
+ # and will wrap long lines using \ :
+ # foo.o: sub/foo.c ... \
+ # sub/foo.h ... \
+ # ...
+
+ "$@" -MD -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ # Each line is of the form `foo.o: dependent.h',
+ # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
+ sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp2)
+ # The "hp" stanza above does not work with aCC (C++) and HP's ia64
+ # compilers, which have integrated preprocessors. The correct option
+ # to use with these is +Maked; it writes dependencies to a file named
+ # 'foo.d', which lands next to the object file, wherever that
+ # happens to be.
+ # Much of this is similar to the tru64 case; see comments there.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir.libs/$base.d
+ "$@" -Wc,+Maked
+ else
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir$base.d
+ "$@" +Maked
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
+ # Add `dependent.h:' lines.
+ sed -ne '2,${
+ s/^ *//
+ s/ \\*$//
+ s/$/:/
+ p
+ }' "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile" "$tmpdepfile2"
+ ;;
+
+tru64)
+ # The Tru64 compiler uses -MD to generate dependencies as a side
+ # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
+ # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+ # dependencies in `foo.d' instead, so we check for that too.
+ # Subdirectories are respected.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+
+ if test "$libtool" = yes; then
+ # With Tru64 cc, shared objects can also be used to make a
+ # static library. This mechanism is used in libtool 1.4 series to
+ # handle both shared and static libraries in a single compilation.
+ # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
+ #
+ # With libtool 1.5 this exception was removed, and libtool now
+ # generates 2 separate objects for the 2 libraries. These two
+ # compilations output dependencies in $dir.libs/$base.o.d and
+ # in $dir$base.o.d. We have to check for both files, because
+ # one of the two compilations can be disabled. We should prefer
+ # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+ # automatically cleaned when .libs/ is deleted, while ignoring
+ # the former would cause a distcleancheck panic.
+ tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4
+ tmpdepfile2=$dir$base.o.d # libtool 1.5
+ tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5
+ tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504
+ "$@" -Wc,-MD
+ else
+ tmpdepfile1=$dir$base.o.d
+ tmpdepfile2=$dir$base.d
+ tmpdepfile3=$dir$base.d
+ tmpdepfile4=$dir$base.d
+ "$@" -MD
+ fi
+
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+#nosideeffect)
+ # This comment above is used by automake to tell side-effect
+ # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ test -z "$dashmflag" && dashmflag=-M
+ # Require at least two characters before searching for `:'
+ # in the target name. This is to cope with DOS-style filenames:
+ # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
+ "$@" $dashmflag |
+ sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ tr ' ' '
+' < "$tmpdepfile" | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+dashXmstdout)
+ # This case only exists to satisfy depend.m4. It is never actually
+ # run, as this mode is specially recognized in the preamble.
+ exit 1
+ ;;
+
+makedepend)
+ "$@" || exit $?
+ # Remove any Libtool call
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+ # X makedepend
+ shift
+ cleared=no eat=no
+ for arg
+ do
+ case $cleared in
+ no)
+ set ""; shift
+ cleared=yes ;;
+ esac
+ if test $eat = yes; then
+ eat=no
+ continue
+ fi
+ case "$arg" in
+ -D*|-I*)
+ set fnord "$@" "$arg"; shift ;;
+ # Strip any option that makedepend may not understand. Remove
+ # the object too, otherwise makedepend will parse it as a source file.
+ -arch)
+ eat=yes ;;
+ -*|$object)
+ ;;
+ *)
+ set fnord "$@" "$arg"; shift ;;
+ esac
+ done
+ obj_suffix=`echo "$object" | sed 's/^.*\././'`
+ touch "$tmpdepfile"
+ ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ sed '1,2d' "$tmpdepfile" | tr ' ' '
+' | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile" "$tmpdepfile".bak
+ ;;
+
+cpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ "$@" -E |
+ sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
+ sed '$ s: \\$::' > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ cat < "$tmpdepfile" >> "$depfile"
+ sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvisualcpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ IFS=" "
+ for arg
+ do
+ case "$arg" in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+ set fnord "$@"
+ shift
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift
+ shift
+ ;;
+ esac
+ done
+ "$@" -E 2>/dev/null |
+ sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
+ echo " " >> "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvcmsys)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+none)
+ exec "$@"
+ ;;
+
+*)
+ echo "Unknown depmode $depmode" 1>&2
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
new file mode 100755
index 00000000..6781b987
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
@@ -0,0 +1,520 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2009-04-28.21; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+nl='
+'
+IFS=" "" $nl"
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit=${DOITPROG-}
+if test -z "$doit"; then
+ doit_exec=exec
+else
+ doit_exec=$doit
+fi
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_glob='?'
+initialize_posix_glob='
+ test "$posix_glob" != "?" || {
+ if (set -f) 2>/dev/null; then
+ posix_glob=
+ else
+ posix_glob=:
+ fi
+ }
+'
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+no_target_directory=
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+ or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+ --help display this help and exit.
+ --version display version info and exit.
+
+ -c (ignored)
+ -C install only if different (preserve the last data modification time)
+ -d create directories instead of installing files.
+ -g GROUP $chgrpprog installed files to GROUP.
+ -m MODE $chmodprog installed files to MODE.
+ -o USER $chownprog installed files to USER.
+ -s $stripprog installed files.
+ -t DIRECTORY install into DIRECTORY.
+ -T report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+ CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+ RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+ case $1 in
+ -c) ;;
+
+ -C) copy_on_change=true;;
+
+ -d) dir_arg=true;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift;;
+
+ --help) echo "$usage"; exit $?;;
+
+ -m) mode=$2
+ case $mode in
+ *' '* | *' '* | *'
+'* | *'*'* | *'?'* | *'['*)
+ echo "$0: invalid mode: $mode" >&2
+ exit 1;;
+ esac
+ shift;;
+
+ -o) chowncmd="$chownprog $2"
+ shift;;
+
+ -s) stripcmd=$stripprog;;
+
+ -t) dst_arg=$2
+ shift;;
+
+ -T) no_target_directory=true;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ --) shift
+ break;;
+
+ -*) echo "$0: invalid option: $1" >&2
+ exit 1;;
+
+ *) break;;
+ esac
+ shift
+done
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+ # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dst_arg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dst_arg"
+ shift # fnord
+ fi
+ shift # arg
+ dst_arg=$arg
+ done
+fi
+
+if test $# -eq 0; then
+ if test -z "$dir_arg"; then
+ echo "$0: no input file specified." >&2
+ exit 1
+ fi
+ # It's OK to call `install-sh -d' without argument.
+ # This can happen when creating conditional directories.
+ exit 0
+fi
+
+if test -z "$dir_arg"; then
+ trap '(exit $?); exit' 1 2 13 15
+
+ # Set umask so as not to create temps with too-generous modes.
+ # However, 'strip' requires both read and write access to temps.
+ case $mode in
+ # Optimize common cases.
+ *644) cp_umask=133;;
+ *755) cp_umask=22;;
+
+ *[0-7])
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw='% 200'
+ fi
+ cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+ *)
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw=,u+rw
+ fi
+ cp_umask=$mode$u_plus_rw;;
+ esac
+fi
+
+for src
+do
+ # Protect names starting with `-'.
+ case $src in
+ -*) src=./$src;;
+ esac
+
+ if test -n "$dir_arg"; then
+ dst=$src
+ dstdir=$dst
+ test -d "$dstdir"
+ dstdir_status=$?
+ else
+
+ # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+ # might cause directories to be created, which would be especially bad
+ # if $src (and thus $dsttmp) contains '*'.
+ if test ! -f "$src" && test ! -d "$src"; then
+ echo "$0: $src does not exist." >&2
+ exit 1
+ fi
+
+ if test -z "$dst_arg"; then
+ echo "$0: no destination specified." >&2
+ exit 1
+ fi
+
+ dst=$dst_arg
+ # Protect names starting with `-'.
+ case $dst in
+ -*) dst=./$dst;;
+ esac
+
+ # If destination is a directory, append the input filename; won't work
+ # if double slashes aren't ignored.
+ if test -d "$dst"; then
+ if test -n "$no_target_directory"; then
+ echo "$0: $dst_arg: Is a directory" >&2
+ exit 1
+ fi
+ dstdir=$dst
+ dst=$dstdir/`basename "$src"`
+ dstdir_status=0
+ else
+ # Prefer dirname, but fall back on a substitute if dirname fails.
+ dstdir=`
+ (dirname "$dst") 2>/dev/null ||
+ expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$dst" : 'X\(//\)[^/]' \| \
+ X"$dst" : 'X\(//\)$' \| \
+ X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
+ echo X"$dst" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'
+ `
+
+ test -d "$dstdir"
+ dstdir_status=$?
+ fi
+ fi
+
+ obsolete_mkdir_used=false
+
+ if test $dstdir_status != 0; then
+ case $posix_mkdir in
+ '')
+ # Create intermediate dirs using mode 755 as modified by the umask.
+ # This is like FreeBSD 'install' as of 1997-10-28.
+ umask=`umask`
+ case $stripcmd.$umask in
+ # Optimize common cases.
+ *[2367][2367]) mkdir_umask=$umask;;
+ .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+ *[0-7])
+ mkdir_umask=`expr $umask + 22 \
+ - $umask % 100 % 40 + $umask % 20 \
+ - $umask % 10 % 4 + $umask % 2
+ `;;
+ *) mkdir_umask=$umask,go-w;;
+ esac
+
+ # With -d, create the new directory with the user-specified mode.
+ # Otherwise, rely on $mkdir_umask.
+ if test -n "$dir_arg"; then
+ mkdir_mode=-m$mode
+ else
+ mkdir_mode=
+ fi
+
+ posix_mkdir=false
+ case $umask in
+ *[123567][0-7][0-7])
+ # POSIX mkdir -p sets u+wx bits regardless of umask, which
+ # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+ ;;
+ *)
+ tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+ if (umask $mkdir_umask &&
+ exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+ then
+ if test -z "$dir_arg" || {
+ # Check for POSIX incompatibilities with -m.
+ # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+ # other-writeable bit of parent directory when it shouldn't.
+ # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+ ls_ld_tmpdir=`ls -ld "$tmpdir"`
+ case $ls_ld_tmpdir in
+ d????-?r-*) different_mode=700;;
+ d????-?--*) different_mode=755;;
+ *) false;;
+ esac &&
+ $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+ ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+ test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+ }
+ }
+ then posix_mkdir=:
+ fi
+ rmdir "$tmpdir/d" "$tmpdir"
+ else
+ # Remove any dirs left behind by ancient mkdir implementations.
+ rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+ fi
+ trap '' 0;;
+ esac;;
+ esac
+
+ if
+ $posix_mkdir && (
+ umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+ )
+ then :
+ else
+
+ # The umask is ridiculous, or mkdir does not conform to POSIX,
+ # or it failed possibly due to a race condition. Create the
+ # directory the slow way, step by step, checking for races as we go.
+
+ case $dstdir in
+ /*) prefix='/';;
+ -*) prefix='./';;
+ *) prefix='';;
+ esac
+
+ eval "$initialize_posix_glob"
+
+ oIFS=$IFS
+ IFS=/
+ $posix_glob set -f
+ set fnord $dstdir
+ shift
+ $posix_glob set +f
+ IFS=$oIFS
+
+ prefixes=
+
+ for d
+ do
+ test -z "$d" && continue
+
+ prefix=$prefix$d
+ if test -d "$prefix"; then
+ prefixes=
+ else
+ if $posix_mkdir; then
+ (umask=$mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+ # Don't fail if two instances are running concurrently.
+ test -d "$prefix" || exit 1
+ else
+ case $prefix in
+ *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) qprefix=$prefix;;
+ esac
+ prefixes="$prefixes '$qprefix'"
+ fi
+ fi
+ prefix=$prefix/
+ done
+
+ if test -n "$prefixes"; then
+ # Don't fail if two instances are running concurrently.
+ (umask $mkdir_umask &&
+ eval "\$doit_exec \$mkdirprog $prefixes") ||
+ test -d "$dstdir" || exit 1
+ obsolete_mkdir_used=true
+ fi
+ fi
+ fi
+
+ if test -n "$dir_arg"; then
+ { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+ { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+ else
+
+ # Make a couple of temp file names in the proper directory.
+ dsttmp=$dstdir/_inst.$$_
+ rmtmp=$dstdir/_rm.$$_
+
+ # Trap to clean up those temp files at exit.
+ trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+ # Copy the file name to the temp name.
+ (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+ # and set any options; do chmod last to preserve setuid bits.
+ #
+ # If any of these fail, we abort the whole thing. If we want to
+ # ignore errors from any of these, just make sure not to ignore
+ # errors from the above "$doit $cpprog $src $dsttmp" command.
+ #
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+ { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+ { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+ # If -C, don't bother to copy if it wouldn't change the file.
+ if $copy_on_change &&
+ old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
+ new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
+
+ eval "$initialize_posix_glob" &&
+ $posix_glob set -f &&
+ set X $old && old=:$2:$4:$5:$6 &&
+ set X $new && new=:$2:$4:$5:$6 &&
+ $posix_glob set +f &&
+
+ test "$old" = "$new" &&
+ $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+ then
+ rm -f "$dsttmp"
+ else
+ # Rename the file to the real destination.
+ $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+ {
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ test ! -f "$dst" ||
+ $doit $rmcmd -f "$dst" 2>/dev/null ||
+ { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+ { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+ } ||
+ { echo "$0: cannot unlink or rename $dst" >&2
+ (exit 1); exit 1
+ }
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dst"
+ }
+ fi || exit 1
+
+ trap '' 0
+ fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
new file mode 100755
index 00000000..58923bb7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
@@ -0,0 +1,8406 @@
+# Generated from ltmain.m4sh.
+
+# ltmain.sh (GNU libtool) 2.2.6
+# Written by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions. There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING. If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html,
+# or obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+
+# Usage: $progname [OPTION]... [MODE-ARG]...
+#
+# Provide generalized library-building support services.
+#
+# --config show all configuration variables
+# --debug enable verbose shell tracing
+# -n, --dry-run display commands without modifying any files
+# --features display basic configuration information and exit
+# --mode=MODE use operation mode MODE
+# --preserve-dup-deps don't remove duplicate dependency libraries
+# --quiet, --silent don't print informational messages
+# --tag=TAG use configuration variables from tag TAG
+# -v, --verbose print informational messages (default)
+# --version print version information
+# -h, --help print short or long help message
+#
+# MODE must be one of the following:
+#
+# clean remove files from the build directory
+# compile compile a source file into a libtool object
+# execute automatically set library path, then run a program
+# finish complete the installation of libtool libraries
+# install install libraries or executables
+# link create a library or an executable
+# uninstall remove libraries from an installed directory
+#
+# MODE-ARGS vary depending on the MODE.
+# Try `$progname --help --mode=MODE' for a more detailed description of MODE.
+#
+# When reporting a bug, please describe a test case to reproduce it and
+# include the following information:
+#
+# host-triplet: $host
+# shell: $SHELL
+# compiler: $LTCC
+# compiler flags: $LTCFLAGS
+# linker: $LD (gnu? $with_gnu_ld)
+# $progname: (GNU libtool) 2.2.6
+# automake: $automake_version
+# autoconf: $autoconf_version
+#
+# Report bugs to <bug-libtool@gnu.org>.
+
+PROGRAM=ltmain.sh
+PACKAGE=libtool
+VERSION=2.2.6
+TIMESTAMP=""
+package_revision=1.3012
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# NLS nuisances: We save the old values to restore during execute mode.
+# Only set LANG and LC_ALL to C if already set.
+# These must not be set unconditionally because not all systems understand
+# e.g. LANG=C (notably SCO).
+lt_user_locale=
+lt_safe_locale=
+for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+do
+ eval "if test \"\${$lt_var+set}\" = set; then
+ save_$lt_var=\$$lt_var
+ $lt_var=C
+ export $lt_var
+ lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\"
+ lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\"
+ fi"
+done
+
+$lt_unset CDPATH
+
+
+
+
+
+: ${CP="cp -f"}
+: ${ECHO="echo"}
+: ${EGREP="/usr/bin/grep -E"}
+: ${FGREP="/usr/bin/grep -F"}
+: ${GREP="/usr/bin/grep"}
+: ${LN_S="ln -s"}
+: ${MAKE="make"}
+: ${MKDIR="mkdir"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+: ${SED="/opt/local/bin/gsed"}
+: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
+: ${Xsed="$SED -e 1s/^X//"}
+
+# Global variables:
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing.
+EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake.
+
+exit_status=$EXIT_SUCCESS
+
+# Make sure IFS has a sensible default
+lt_nl='
+'
+IFS=" $lt_nl"
+
+dirname="s,/[^/]*$,,"
+basename="s,^.*/,,"
+
+# func_dirname_and_basename file append nondir_replacement
+# perform func_basename and func_dirname in a single function
+# call:
+# dirname: Compute the dirname of FILE. If nonempty,
+# add APPEND to the result, otherwise set result
+# to NONDIR_REPLACEMENT.
+# value returned in "$func_dirname_result"
+# basename: Compute filename of FILE.
+# value retuned in "$func_basename_result"
+# Implementation must be kept synchronized with func_dirname
+# and func_basename. For efficiency, we do not delegate to
+# those functions but instead duplicate the functionality here.
+func_dirname_and_basename ()
+{
+ # Extract subdirectory from the argument.
+ func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+ func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
+}
+
+# Generated shell functions inserted here.
+
+# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
+# is ksh but when the shell is invoked as "sh" and the current value of
+# the _XPG environment variable is not equal to 1 (one), the special
+# positional parameter $0, within a function call, is the name of the
+# function.
+progpath="$0"
+
+# The name of this program:
+# In the unlikely event $progname began with a '-', it would play havoc with
+# func_echo (imagine progname=-n), so we prepend ./ in that case:
+func_dirname_and_basename "$progpath"
+progname=$func_basename_result
+case $progname in
+ -*) progname=./$progname ;;
+esac
+
+# Make sure we have an absolute path for reexecution:
+case $progpath in
+ [\\/]*|[A-Za-z]:\\*) ;;
+ *[\\/]*)
+ progdir=$func_dirname_result
+ progdir=`cd "$progdir" && pwd`
+ progpath="$progdir/$progname"
+ ;;
+ *)
+ save_IFS="$IFS"
+ IFS=:
+ for progdir in $PATH; do
+ IFS="$save_IFS"
+ test -x "$progdir/$progname" && break
+ done
+ IFS="$save_IFS"
+ test -n "$progdir" || progdir=`pwd`
+ progpath="$progdir/$progname"
+ ;;
+esac
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed="${SED}"' -e 1s/^X//'
+sed_quote_subst='s/\([`"$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Re-`\' parameter expansions in output of double_quote_subst that were
+# `\'-ed in input to the same. If an odd number of `\' preceded a '$'
+# in input to double_quote_subst, that '$' was protected from expansion.
+# Since each input `\' is now two `\'s, look for any number of runs of
+# four `\'s followed by two `\'s and then a '$'. `\' that '$'.
+bs='\\'
+bs2='\\\\'
+bs4='\\\\\\\\'
+dollar='\$'
+sed_double_backslash="\
+ s/$bs4/&\\
+/g
+ s/^$bs2$dollar/$bs&/
+ s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g
+ s/\n//g"
+
+# Standard options:
+opt_dry_run=false
+opt_help=false
+opt_quiet=false
+opt_verbose=false
+opt_warning=:
+
+# func_echo arg...
+# Echo program name prefixed message, along with the current mode
+# name if it has been set yet.
+func_echo ()
+{
+ $ECHO "$progname${mode+: }$mode: $*"
+}
+
+# func_verbose arg...
+# Echo program name prefixed message in verbose mode only.
+func_verbose ()
+{
+ $opt_verbose && func_echo ${1+"$@"}
+
+ # A bug in bash halts the script if the last line of a function
+ # fails when set -e is in force, so we need another command to
+ # work around that:
+ :
+}
+
+# func_error arg...
+# Echo program name prefixed message to standard error.
+func_error ()
+{
+ $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2
+}
+
+# func_warning arg...
+# Echo program name prefixed warning message to standard error.
+func_warning ()
+{
+ $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2
+
+ # bash bug again:
+ :
+}
+
+# func_fatal_error arg...
+# Echo program name prefixed message to standard error, and exit.
+func_fatal_error ()
+{
+ func_error ${1+"$@"}
+ exit $EXIT_FAILURE
+}
+
+# func_fatal_help arg...
+# Echo program name prefixed message to standard error, followed by
+# a help hint, and exit.
+func_fatal_help ()
+{
+ func_error ${1+"$@"}
+ func_fatal_error "$help"
+}
+help="Try \`$progname --help' for more information." ## default
+
+
+# func_grep expression filename
+# Check whether EXPRESSION matches any line of FILENAME, without output.
+func_grep ()
+{
+ $GREP "$1" "$2" >/dev/null 2>&1
+}
+
+
+# func_mkdir_p directory-path
+# Make sure the entire path to DIRECTORY-PATH is available.
+func_mkdir_p ()
+{
+ my_directory_path="$1"
+ my_dir_list=
+
+ if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then
+
+ # Protect directory names starting with `-'
+ case $my_directory_path in
+ -*) my_directory_path="./$my_directory_path" ;;
+ esac
+
+ # While some portion of DIR does not yet exist...
+ while test ! -d "$my_directory_path"; do
+ # ...make a list in topmost first order. Use a colon delimited
+ # list incase some portion of path contains whitespace.
+ my_dir_list="$my_directory_path:$my_dir_list"
+
+ # If the last portion added has no slash in it, the list is done
+ case $my_directory_path in */*) ;; *) break ;; esac
+
+ # ...otherwise throw away the child directory and loop
+ my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"`
+ done
+ my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'`
+
+ save_mkdir_p_IFS="$IFS"; IFS=':'
+ for my_dir in $my_dir_list; do
+ IFS="$save_mkdir_p_IFS"
+ # mkdir can fail with a `File exist' error if two processes
+ # try to create one of the directories concurrently. Don't
+ # stop in that case!
+ $MKDIR "$my_dir" 2>/dev/null || :
+ done
+ IFS="$save_mkdir_p_IFS"
+
+ # Bail out if we (or some other process) failed to create a directory.
+ test -d "$my_directory_path" || \
+ func_fatal_error "Failed to create \`$1'"
+ fi
+}
+
+
+# func_mktempdir [string]
+# Make a temporary directory that won't clash with other running
+# libtool processes, and avoids race conditions if possible. If
+# given, STRING is the basename for that directory.
+func_mktempdir ()
+{
+ my_template="${TMPDIR-/tmp}/${1-$progname}"
+
+ if test "$opt_dry_run" = ":"; then
+ # Return a directory name, but don't create it in dry-run mode
+ my_tmpdir="${my_template}-$$"
+ else
+
+ # If mktemp works, use that first and foremost
+ my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null`
+
+ if test ! -d "$my_tmpdir"; then
+ # Failing that, at least try and use $RANDOM to avoid a race
+ my_tmpdir="${my_template}-${RANDOM-0}$$"
+
+ save_mktempdir_umask=`umask`
+ umask 0077
+ $MKDIR "$my_tmpdir"
+ umask $save_mktempdir_umask
+ fi
+
+ # If we're not in dry-run mode, bomb out on failure
+ test -d "$my_tmpdir" || \
+ func_fatal_error "cannot create temporary directory \`$my_tmpdir'"
+ fi
+
+ $ECHO "X$my_tmpdir" | $Xsed
+}
+
+
+# func_quote_for_eval arg
+# Aesthetically quote ARG to be evaled later.
+# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT
+# is double-quoted, suitable for a subsequent eval, whereas
+# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters
+# which are still active within double quotes backslashified.
+func_quote_for_eval ()
+{
+ case $1 in
+ *[\\\`\"\$]*)
+ func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;;
+ *)
+ func_quote_for_eval_unquoted_result="$1" ;;
+ esac
+
+ case $func_quote_for_eval_unquoted_result in
+ # Double-quote args containing shell metacharacters to delay
+ # word splitting, command substitution and and variable
+ # expansion for a subsequent eval.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\""
+ ;;
+ *)
+ func_quote_for_eval_result="$func_quote_for_eval_unquoted_result"
+ esac
+}
+
+
+# func_quote_for_expand arg
+# Aesthetically quote ARG to be evaled later; same as above,
+# but do not quote variable references.
+func_quote_for_expand ()
+{
+ case $1 in
+ *[\\\`\"]*)
+ my_arg=`$ECHO "X$1" | $Xsed \
+ -e "$double_quote_subst" -e "$sed_double_backslash"` ;;
+ *)
+ my_arg="$1" ;;
+ esac
+
+ case $my_arg in
+ # Double-quote args containing shell metacharacters to delay
+ # word splitting and command substitution for a subsequent eval.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ my_arg="\"$my_arg\""
+ ;;
+ esac
+
+ func_quote_for_expand_result="$my_arg"
+}
+
+
+# func_show_eval cmd [fail_exp]
+# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is
+# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.
+func_show_eval ()
+{
+ my_cmd="$1"
+ my_fail_exp="${2-:}"
+
+ ${opt_silent-false} || {
+ func_quote_for_expand "$my_cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+
+ if ${opt_dry_run-false}; then :; else
+ eval "$my_cmd"
+ my_status=$?
+ if test "$my_status" -eq 0; then :; else
+ eval "(exit $my_status); $my_fail_exp"
+ fi
+ fi
+}
+
+
+# func_show_eval_locale cmd [fail_exp]
+# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is
+# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it. Use the saved locale for evaluation.
+func_show_eval_locale ()
+{
+ my_cmd="$1"
+ my_fail_exp="${2-:}"
+
+ ${opt_silent-false} || {
+ func_quote_for_expand "$my_cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+
+ if ${opt_dry_run-false}; then :; else
+ eval "$lt_user_locale
+ $my_cmd"
+ my_status=$?
+ eval "$lt_safe_locale"
+ if test "$my_status" -eq 0; then :; else
+ eval "(exit $my_status); $my_fail_exp"
+ fi
+ fi
+}
+
+
+
+
+
+# func_version
+# Echo version message to standard output and exit.
+func_version ()
+{
+ $SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / {
+ s/^# //
+ s/^# *$//
+ s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/
+ p
+ }' < "$progpath"
+ exit $?
+}
+
+# func_usage
+# Echo short help message to standard output and exit.
+func_usage ()
+{
+ $SED -n '/^# Usage:/,/# -h/ {
+ s/^# //
+ s/^# *$//
+ s/\$progname/'$progname'/
+ p
+ }' < "$progpath"
+ $ECHO
+ $ECHO "run \`$progname --help | more' for full usage"
+ exit $?
+}
+
+# func_help
+# Echo long help message to standard output and exit.
+func_help ()
+{
+ $SED -n '/^# Usage:/,/# Report bugs to/ {
+ s/^# //
+ s/^# *$//
+ s*\$progname*'$progname'*
+ s*\$host*'"$host"'*
+ s*\$SHELL*'"$SHELL"'*
+ s*\$LTCC*'"$LTCC"'*
+ s*\$LTCFLAGS*'"$LTCFLAGS"'*
+ s*\$LD*'"$LD"'*
+ s/\$with_gnu_ld/'"$with_gnu_ld"'/
+ s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/
+ s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/
+ p
+ }' < "$progpath"
+ exit $?
+}
+
+# func_missing_arg argname
+# Echo program name prefixed message to standard error and set global
+# exit_cmd.
+func_missing_arg ()
+{
+ func_error "missing argument for $1"
+ exit_cmd=exit
+}
+
+exit_cmd=:
+
+
+
+
+
+# Check that we have a working $ECHO.
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then
+ # Yippee, $ECHO works!
+ :
+else
+ # Restart under the correct shell, and then maybe $ECHO will work.
+ exec $SHELL "$progpath" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit $EXIT_SUCCESS
+fi
+
+magic="%%%MAGIC variable%%%"
+magic_exe="%%%MAGIC EXE variable%%%"
+
+# Global variables.
+# $mode is unset
+nonopt=
+execute_dlfiles=
+preserve_args=
+lo2o="s/\\.lo\$/.${objext}/"
+o2lo="s/\\.${objext}\$/.lo/"
+extracted_archives=
+extracted_serial=0
+
+opt_dry_run=false
+opt_duplicate_deps=false
+opt_silent=false
+opt_debug=:
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end. This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+# func_fatal_configuration arg...
+# Echo program name prefixed message to standard error, followed by
+# a configuration failure hint, and exit.
+func_fatal_configuration ()
+{
+ func_error ${1+"$@"}
+ func_error "See the $PACKAGE documentation for more information."
+ func_fatal_error "Fatal configuration error."
+}
+
+
+# func_config
+# Display the configuration for all the tags in this script.
+func_config ()
+{
+ re_begincf='^# ### BEGIN LIBTOOL'
+ re_endcf='^# ### END LIBTOOL'
+
+ # Default configuration.
+ $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"
+
+ # Now print the configurations for the tags.
+ for tagname in $taglist; do
+ $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
+ done
+
+ exit $?
+}
+
+# func_features
+# Display the features supported by this script.
+func_features ()
+{
+ $ECHO "host: $host"
+ if test "$build_libtool_libs" = yes; then
+ $ECHO "enable shared libraries"
+ else
+ $ECHO "disable shared libraries"
+ fi
+ if test "$build_old_libs" = yes; then
+ $ECHO "enable static libraries"
+ else
+ $ECHO "disable static libraries"
+ fi
+
+ exit $?
+}
+
+# func_enable_tag tagname
+# Verify that TAGNAME is valid, and either flag an error and exit, or
+# enable the TAGNAME tag. We also add TAGNAME to the global $taglist
+# variable here.
+func_enable_tag ()
+{
+ # Global variable:
+ tagname="$1"
+
+ re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$"
+ re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$"
+ sed_extractcf="/$re_begincf/,/$re_endcf/p"
+
+ # Validate tagname.
+ case $tagname in
+ *[!-_A-Za-z0-9,/]*)
+ func_fatal_error "invalid tag name: $tagname"
+ ;;
+ esac
+
+ # Don't test for the "default" C tag, as we know it's
+ # there but not specially marked.
+ case $tagname in
+ CC) ;;
+ *)
+ if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then
+ taglist="$taglist $tagname"
+
+ # Evaluate the configuration. Be careful to quote the path
+ # and the sed script, to avoid splitting on whitespace, but
+ # also don't use non-portable quotes within backquotes within
+ # quotes we have to do it in 2 steps:
+ extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"`
+ eval "$extractedcf"
+ else
+ func_error "ignoring unknown tag $tagname"
+ fi
+ ;;
+ esac
+}
+
+# Parse options once, thoroughly. This comes as soon as possible in
+# the script to make things like `libtool --version' happen quickly.
+{
+
+ # Shorthand for --mode=foo, only valid as the first argument
+ case $1 in
+ clean|clea|cle|cl)
+ shift; set dummy --mode clean ${1+"$@"}; shift
+ ;;
+ compile|compil|compi|comp|com|co|c)
+ shift; set dummy --mode compile ${1+"$@"}; shift
+ ;;
+ execute|execut|execu|exec|exe|ex|e)
+ shift; set dummy --mode execute ${1+"$@"}; shift
+ ;;
+ finish|finis|fini|fin|fi|f)
+ shift; set dummy --mode finish ${1+"$@"}; shift
+ ;;
+ install|instal|insta|inst|ins|in|i)
+ shift; set dummy --mode install ${1+"$@"}; shift
+ ;;
+ link|lin|li|l)
+ shift; set dummy --mode link ${1+"$@"}; shift
+ ;;
+ uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
+ shift; set dummy --mode uninstall ${1+"$@"}; shift
+ ;;
+ esac
+
+ # Parse non-mode specific arguments:
+ while test "$#" -gt 0; do
+ opt="$1"
+ shift
+
+ case $opt in
+ --config) func_config ;;
+
+ --debug) preserve_args="$preserve_args $opt"
+ func_echo "enabling shell trace mode"
+ opt_debug='set -x'
+ $opt_debug
+ ;;
+
+ -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break
+ execute_dlfiles="$execute_dlfiles $1"
+ shift
+ ;;
+
+ --dry-run | -n) opt_dry_run=: ;;
+ --features) func_features ;;
+ --finish) mode="finish" ;;
+
+ --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break
+ case $1 in
+ # Valid mode arguments:
+ clean) ;;
+ compile) ;;
+ execute) ;;
+ finish) ;;
+ install) ;;
+ link) ;;
+ relink) ;;
+ uninstall) ;;
+
+ # Catch anything else as an error
+ *) func_error "invalid argument for $opt"
+ exit_cmd=exit
+ break
+ ;;
+ esac
+
+ mode="$1"
+ shift
+ ;;
+
+ --preserve-dup-deps)
+ opt_duplicate_deps=: ;;
+
+ --quiet|--silent) preserve_args="$preserve_args $opt"
+ opt_silent=:
+ ;;
+
+ --verbose| -v) preserve_args="$preserve_args $opt"
+ opt_silent=false
+ ;;
+
+ --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break
+ preserve_args="$preserve_args $opt $1"
+ func_enable_tag "$1" # tagname is set here
+ shift
+ ;;
+
+ # Separate optargs to long options:
+ -dlopen=*|--mode=*|--tag=*)
+ func_opt_split "$opt"
+ set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"}
+ shift
+ ;;
+
+ -\?|-h) func_usage ;;
+ --help) opt_help=: ;;
+ --version) func_version ;;
+
+ -*) func_fatal_help "unrecognized option \`$opt'" ;;
+
+ *) nonopt="$opt"
+ break
+ ;;
+ esac
+ done
+
+
+ case $host in
+ *cygwin* | *mingw* | *pw32* | *cegcc*)
+ # don't eliminate duplications in $postdeps and $predeps
+ opt_duplicate_compiler_generated_deps=:
+ ;;
+ *)
+ opt_duplicate_compiler_generated_deps=$opt_duplicate_deps
+ ;;
+ esac
+
+ # Having warned about all mis-specified options, bail out if
+ # anything was wrong.
+ $exit_cmd $EXIT_FAILURE
+}
+
+# func_check_version_match
+# Ensure that we are using m4 macros, and libtool script from the same
+# release of libtool.
+func_check_version_match ()
+{
+ if test "$package_revision" != "$macro_revision"; then
+ if test "$VERSION" != "$macro_version"; then
+ if test -z "$macro_version"; then
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from an older release.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+ else
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+ fi
+ else
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision,
+$progname: but the definition of this LT_INIT comes from revision $macro_revision.
+$progname: You should recreate aclocal.m4 with macros from revision $package_revision
+$progname: of $PACKAGE $VERSION and run autoconf again.
+_LT_EOF
+ fi
+
+ exit $EXIT_MISMATCH
+ fi
+}
+
+
+## ----------- ##
+## Main. ##
+## ----------- ##
+
+$opt_help || {
+ # Sanity checks first:
+ func_check_version_match
+
+ if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+ func_fatal_configuration "not configured to build any kind of library"
+ fi
+
+ test -z "$mode" && func_fatal_error "error: you must specify a MODE."
+
+
+ # Darwin sucks
+ eval std_shrext=\"$shrext_cmds\"
+
+
+ # Only execute mode is allowed to have -dlopen flags.
+ if test -n "$execute_dlfiles" && test "$mode" != execute; then
+ func_error "unrecognized option \`-dlopen'"
+ $ECHO "$help" 1>&2
+ exit $EXIT_FAILURE
+ fi
+
+ # Change the help message to a mode-specific one.
+ generic_help="$help"
+ help="Try \`$progname --help --mode=$mode' for more information."
+}
+
+
+# func_lalib_p file
+# True iff FILE is a libtool `.la' library or `.lo' object file.
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_lalib_p ()
+{
+ test -f "$1" &&
+ $SED -e 4q "$1" 2>/dev/null \
+ | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
+}
+
+# func_lalib_unsafe_p file
+# True iff FILE is a libtool `.la' library or `.lo' object file.
+# This function implements the same check as func_lalib_p without
+# resorting to external programs. To this end, it redirects stdin and
+# closes it afterwards, without saving the original file descriptor.
+# As a safety measure, use it only where a negative result would be
+# fatal anyway. Works if `file' does not exist.
+func_lalib_unsafe_p ()
+{
+ lalib_p=no
+ if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then
+ for lalib_p_l in 1 2 3 4
+ do
+ read lalib_p_line
+ case "$lalib_p_line" in
+ \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;;
+ esac
+ done
+ exec 0<&5 5<&-
+ fi
+ test "$lalib_p" = yes
+}
+
+# func_ltwrapper_script_p file
+# True iff FILE is a libtool wrapper script
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_script_p ()
+{
+ func_lalib_p "$1"
+}
+
+# func_ltwrapper_executable_p file
+# True iff FILE is a libtool wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_executable_p ()
+{
+ func_ltwrapper_exec_suffix=
+ case $1 in
+ *.exe) ;;
+ *) func_ltwrapper_exec_suffix=.exe ;;
+ esac
+ $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
+}
+
+# func_ltwrapper_scriptname file
+# Assumes file is an ltwrapper_executable
+# uses $file to determine the appropriate filename for a
+# temporary ltwrapper_script.
+func_ltwrapper_scriptname ()
+{
+ func_ltwrapper_scriptname_result=""
+ if func_ltwrapper_executable_p "$1"; then
+ func_dirname_and_basename "$1" "" "."
+ func_stripname '' '.exe' "$func_basename_result"
+ func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper"
+ fi
+}
+
+# func_ltwrapper_p file
+# True iff FILE is a libtool wrapper script or wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_p ()
+{
+ func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1"
+}
+
+
+# func_execute_cmds commands fail_cmd
+# Execute tilde-delimited COMMANDS.
+# If FAIL_CMD is given, eval that upon failure.
+# FAIL_CMD may read-access the current command in variable CMD!
+func_execute_cmds ()
+{
+ $opt_debug
+ save_ifs=$IFS; IFS='~'
+ for cmd in $1; do
+ IFS=$save_ifs
+ eval cmd=\"$cmd\"
+ func_show_eval "$cmd" "${2-:}"
+ done
+ IFS=$save_ifs
+}
+
+
+# func_source file
+# Source FILE, adding directory component if necessary.
+# Note that it is not necessary on cygwin/mingw to append a dot to
+# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
+# behavior happens only for exec(3), not for open(2)! Also, sourcing
+# `FILE.' does not work on cygwin managed mounts.
+func_source ()
+{
+ $opt_debug
+ case $1 in
+ */* | *\\*) . "$1" ;;
+ *) . "./$1" ;;
+ esac
+}
+
+
+# func_infer_tag arg
+# Infer tagged configuration to use if any are available and
+# if one wasn't chosen via the "--tag" command line option.
+# Only attempt this if the compiler in the base compile
+# command doesn't match the default compiler.
+# arg is usually of the form 'gcc ...'
+func_infer_tag ()
+{
+ $opt_debug
+ if test -n "$available_tags" && test -z "$tagname"; then
+ CC_quoted=
+ for arg in $CC; do
+ func_quote_for_eval "$arg"
+ CC_quoted="$CC_quoted $func_quote_for_eval_result"
+ done
+ case $@ in
+ # Blanks in the command may have been stripped by the calling shell,
+ # but not from the CC environment variable when configure was run.
+ " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;;
+ # Blanks at the start of $base_compile will cause this to fail
+ # if we don't check for them as well.
+ *)
+ for z in $available_tags; do
+ if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
+ # Evaluate the configuration.
+ eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
+ CC_quoted=
+ for arg in $CC; do
+ # Double-quote args containing other shell metacharacters.
+ func_quote_for_eval "$arg"
+ CC_quoted="$CC_quoted $func_quote_for_eval_result"
+ done
+ case "$@ " in
+ " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*)
+ # The compiler in the base compile command matches
+ # the one in the tagged configuration.
+ # Assume this is the tagged configuration we want.
+ tagname=$z
+ break
+ ;;
+ esac
+ fi
+ done
+ # If $tagname still isn't set, then no tagged configuration
+ # was found and let the user know that the "--tag" command
+ # line option must be used.
+ if test -z "$tagname"; then
+ func_echo "unable to infer tagged configuration"
+ func_fatal_error "specify a tag with \`--tag'"
+# else
+# func_verbose "using $tagname tagged configuration"
+ fi
+ ;;
+ esac
+ fi
+}
+
+
+
+# func_write_libtool_object output_name pic_name nonpic_name
+# Create a libtool object file (analogous to a ".la" file),
+# but don't create it if we're doing a dry run.
+func_write_libtool_object ()
+{
+ write_libobj=${1}
+ if test "$build_libtool_libs" = yes; then
+ write_lobj=\'${2}\'
+ else
+ write_lobj=none
+ fi
+
+ if test "$build_old_libs" = yes; then
+ write_oldobj=\'${3}\'
+ else
+ write_oldobj=none
+ fi
+
+ $opt_dry_run || {
+ cat >${write_libobj}T <<EOF
+# $write_libobj - a libtool object file
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object=$write_lobj
+
+# Name of the non-PIC object
+non_pic_object=$write_oldobj
+
+EOF
+ $MV "${write_libobj}T" "${write_libobj}"
+ }
+}
+
+# func_mode_compile arg...
+func_mode_compile ()
+{
+ $opt_debug
+ # Get the compilation command and the source file.
+ base_compile=
+ srcfile="$nonopt" # always keep a non-empty value in "srcfile"
+ suppress_opt=yes
+ suppress_output=
+ arg_mode=normal
+ libobj=
+ later=
+ pie_flag=
+
+ for arg
+ do
+ case $arg_mode in
+ arg )
+ # do not "continue". Instead, add this to base_compile
+ lastarg="$arg"
+ arg_mode=normal
+ ;;
+
+ target )
+ libobj="$arg"
+ arg_mode=normal
+ continue
+ ;;
+
+ normal )
+ # Accept any command-line options.
+ case $arg in
+ -o)
+ test -n "$libobj" && \
+ func_fatal_error "you cannot specify \`-o' more than once"
+ arg_mode=target
+ continue
+ ;;
+
+ -pie | -fpie | -fPIE)
+ pie_flag="$pie_flag $arg"
+ continue
+ ;;
+
+ -shared | -static | -prefer-pic | -prefer-non-pic)
+ later="$later $arg"
+ continue
+ ;;
+
+ -no-suppress)
+ suppress_opt=no
+ continue
+ ;;
+
+ -Xcompiler)
+ arg_mode=arg # the next one goes into the "base_compile" arg list
+ continue # The current "srcfile" will either be retained or
+ ;; # replaced later. I would guess that would be a bug.
+
+ -Wc,*)
+ func_stripname '-Wc,' '' "$arg"
+ args=$func_stripname_result
+ lastarg=
+ save_ifs="$IFS"; IFS=','
+ for arg in $args; do
+ IFS="$save_ifs"
+ func_quote_for_eval "$arg"
+ lastarg="$lastarg $func_quote_for_eval_result"
+ done
+ IFS="$save_ifs"
+ func_stripname ' ' '' "$lastarg"
+ lastarg=$func_stripname_result
+
+ # Add the arguments to base_compile.
+ base_compile="$base_compile $lastarg"
+ continue
+ ;;
+
+ *)
+ # Accept the current argument as the source file.
+ # The previous "srcfile" becomes the current argument.
+ #
+ lastarg="$srcfile"
+ srcfile="$arg"
+ ;;
+ esac # case $arg
+ ;;
+ esac # case $arg_mode
+
+ # Aesthetically quote the previous argument.
+ func_quote_for_eval "$lastarg"
+ base_compile="$base_compile $func_quote_for_eval_result"
+ done # for arg
+
+ case $arg_mode in
+ arg)
+ func_fatal_error "you must specify an argument for -Xcompile"
+ ;;
+ target)
+ func_fatal_error "you must specify a target with \`-o'"
+ ;;
+ *)
+ # Get the name of the library object.
+ test -z "$libobj" && {
+ func_basename "$srcfile"
+ libobj="$func_basename_result"
+ }
+ ;;
+ esac
+
+ # Recognize several different file suffixes.
+ # If the user specifies -o file.o, it is replaced with file.lo
+ case $libobj in
+ *.[cCFSifmso] | \
+ *.ada | *.adb | *.ads | *.asm | \
+ *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
+ *.[fF][09]? | *.for | *.java | *.obj | *.sx)
+ func_xform "$libobj"
+ libobj=$func_xform_result
+ ;;
+ esac
+
+ case $libobj in
+ *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;;
+ *)
+ func_fatal_error "cannot determine name of library object from \`$libobj'"
+ ;;
+ esac
+
+ func_infer_tag $base_compile
+
+ for arg in $later; do
+ case $arg in
+ -shared)
+ test "$build_libtool_libs" != yes && \
+ func_fatal_configuration "can not build a shared library"
+ build_old_libs=no
+ continue
+ ;;
+
+ -static)
+ build_libtool_libs=no
+ build_old_libs=yes
+ continue
+ ;;
+
+ -prefer-pic)
+ pic_mode=yes
+ continue
+ ;;
+
+ -prefer-non-pic)
+ pic_mode=no
+ continue
+ ;;
+ esac
+ done
+
+ func_quote_for_eval "$libobj"
+ test "X$libobj" != "X$func_quote_for_eval_result" \
+ && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \
+ && func_warning "libobj name \`$libobj' may not contain shell special characters."
+ func_dirname_and_basename "$obj" "/" ""
+ objname="$func_basename_result"
+ xdir="$func_dirname_result"
+ lobj=${xdir}$objdir/$objname
+
+ test -z "$base_compile" && \
+ func_fatal_help "you must specify a compilation command"
+
+ # Delete any leftover library objects.
+ if test "$build_old_libs" = yes; then
+ removelist="$obj $lobj $libobj ${libobj}T"
+ else
+ removelist="$lobj $libobj ${libobj}T"
+ fi
+
+ # On Cygwin there's no "real" PIC flag so we must build both object types
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2* | cegcc*)
+ pic_mode=default
+ ;;
+ esac
+ if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
+ # non-PIC code in shared libraries is not supported
+ pic_mode=default
+ fi
+
+ # Calculate the filename of the output object if compiler does
+ # not support -o with -c
+ if test "$compiler_c_o" = no; then
+ output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
+ lockfile="$output_obj.lock"
+ else
+ output_obj=
+ need_locks=no
+ lockfile=
+ fi
+
+ # Lock this critical section if it is needed
+ # We use this script file to make the link, it avoids creating a new file
+ if test "$need_locks" = yes; then
+ until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+ func_echo "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ elif test "$need_locks" = warn; then
+ if test -f "$lockfile"; then
+ $ECHO "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $opt_dry_run || $RM $removelist
+ exit $EXIT_FAILURE
+ fi
+ removelist="$removelist $output_obj"
+ $ECHO "$srcfile" > "$lockfile"
+ fi
+
+ $opt_dry_run || $RM $removelist
+ removelist="$removelist $lockfile"
+ trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
+
+ if test -n "$fix_srcfile_path"; then
+ eval srcfile=\"$fix_srcfile_path\"
+ fi
+ func_quote_for_eval "$srcfile"
+ qsrcfile=$func_quote_for_eval_result
+
+ # Only build a PIC object if we are building libtool libraries.
+ if test "$build_libtool_libs" = yes; then
+ # Without this assignment, base_compile gets emptied.
+ fbsd_hideous_sh_bug=$base_compile
+
+ if test "$pic_mode" != no; then
+ command="$base_compile $qsrcfile $pic_flag"
+ else
+ # Don't build PIC code
+ command="$base_compile $qsrcfile"
+ fi
+
+ func_mkdir_p "$xdir$objdir"
+
+ if test -z "$output_obj"; then
+ # Place PIC objects in $objdir
+ command="$command -o $lobj"
+ fi
+
+ func_show_eval_locale "$command" \
+ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'
+
+ if test "$need_locks" = warn &&
+ test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+ $ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $opt_dry_run || $RM $removelist
+ exit $EXIT_FAILURE
+ fi
+
+ # Just move the object if needed, then go on to compile the next one
+ if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
+ func_show_eval '$MV "$output_obj" "$lobj"' \
+ 'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+ fi
+
+ # Allow error messages only from the first compilation.
+ if test "$suppress_opt" = yes; then
+ suppress_output=' >/dev/null 2>&1'
+ fi
+ fi
+
+ # Only build a position-dependent object if we build old libraries.
+ if test "$build_old_libs" = yes; then
+ if test "$pic_mode" != yes; then
+ # Don't build PIC code
+ command="$base_compile $qsrcfile$pie_flag"
+ else
+ command="$base_compile $qsrcfile $pic_flag"
+ fi
+ if test "$compiler_c_o" = yes; then
+ command="$command -o $obj"
+ fi
+
+ # Suppress compiler output if we already did a PIC compilation.
+ command="$command$suppress_output"
+ func_show_eval_locale "$command" \
+ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
+
+ if test "$need_locks" = warn &&
+ test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+ $ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together. If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+ $opt_dry_run || $RM $removelist
+ exit $EXIT_FAILURE
+ fi
+
+ # Just move the object if needed
+ if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
+ func_show_eval '$MV "$output_obj" "$obj"' \
+ 'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+ fi
+ fi
+
+ $opt_dry_run || {
+ func_write_libtool_object "$libobj" "$objdir/$objname" "$objname"
+
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ removelist=$lockfile
+ $RM "$lockfile"
+ fi
+ }
+
+ exit $EXIT_SUCCESS
+}
+
+$opt_help || {
+test "$mode" = compile && func_mode_compile ${1+"$@"}
+}
+
+func_mode_help ()
+{
+ # We need to display help for each of the modes.
+ case $mode in
+ "")
+ # Generic help is extracted from the usage comments
+ # at the start of this file.
+ func_help
+ ;;
+
+ clean)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+ compile)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+ -o OUTPUT-FILE set the output file name to OUTPUT-FILE
+ -no-suppress do not suppress compiler output for multiple passes
+ -prefer-pic try to building PIC objects only
+ -prefer-non-pic try to building non-PIC objects only
+ -shared do not build a \`.o' file suitable for static linking
+ -static only build a \`.o' file suitable for static linking
+
+COMPILE-COMMAND is a command to be used in creating a \`standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix \`.c' with the
+library object suffix, \`.lo'."
+ ;;
+
+ execute)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+ -dlopen FILE add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to \`-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+ ;;
+
+ finish)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges. Use
+the \`--dry-run' option if you just want to see what would be executed."
+ ;;
+
+ install)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command. The first component should be
+either the \`install' or \`cp' program.
+
+The following components of INSTALL-COMMAND are treated specially:
+
+ -inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+ ;;
+
+ link)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+ -all-static do not do any dynamic linking at all
+ -avoid-version do not add a version suffix if possible
+ -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
+ -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
+ -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+ -export-symbols SYMFILE
+ try to export only the symbols listed in SYMFILE
+ -export-symbols-regex REGEX
+ try to export only the symbols matching REGEX
+ -LLIBDIR search LIBDIR for required installed libraries
+ -lNAME OUTPUT-FILE requires the installed library libNAME
+ -module build a library that can dlopened
+ -no-fast-install disable the fast-install mode
+ -no-install link a not-installable executable
+ -no-undefined declare that a library does not refer to external symbols
+ -o OUTPUT-FILE create OUTPUT-FILE from the specified objects
+ -objectlist FILE Use a list of object files found in FILE to specify objects
+ -precious-files-regex REGEX
+ don't remove output files matching REGEX
+ -release RELEASE specify package release information
+ -rpath LIBDIR the created library will eventually be installed in LIBDIR
+ -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
+ -shared only do dynamic linking of libtool libraries
+ -shrext SUFFIX override the standard shared library file extension
+ -static do not do any dynamic linking of uninstalled libtool libraries
+ -static-libtool-libs
+ do not do any dynamic linking of libtool libraries
+ -version-info CURRENT[:REVISION[:AGE]]
+ specify library version info [each variable defaults to 0]
+ -weak LIBNAME declare that the target provides the LIBNAME interface
+
+All other options (arguments beginning with \`-') are ignored.
+
+Every other argument is treated as a filename. Files ending in \`.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
+only library objects (\`.lo' files) may be specified, and \`-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
+using \`ar' and \`ranlib', or on Windows using \`lib'.
+
+If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
+is created, otherwise an executable program is created."
+ ;;
+
+ uninstall)
+ $ECHO \
+"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+ ;;
+
+ *)
+ func_fatal_help "invalid operation mode \`$mode'"
+ ;;
+ esac
+
+ $ECHO
+ $ECHO "Try \`$progname --help' for more information about other modes."
+
+ exit $?
+}
+
+ # Now that we've collected a possible --mode arg, show help if necessary
+ $opt_help && func_mode_help
+
+
+# func_mode_execute arg...
+func_mode_execute ()
+{
+ $opt_debug
+ # The first argument is the command name.
+ cmd="$nonopt"
+ test -z "$cmd" && \
+ func_fatal_help "you must specify a COMMAND"
+
+ # Handle -dlopen flags immediately.
+ for file in $execute_dlfiles; do
+ test -f "$file" \
+ || func_fatal_help "\`$file' is not a file"
+
+ dir=
+ case $file in
+ *.la)
+ # Check to see that this really is a libtool archive.
+ func_lalib_unsafe_p "$file" \
+ || func_fatal_help "\`$lib' is not a valid libtool archive"
+
+ # Read the libtool library.
+ dlname=
+ library_names=
+ func_source "$file"
+
+ # Skip this library if it cannot be dlopened.
+ if test -z "$dlname"; then
+ # Warn if it was a shared library.
+ test -n "$library_names" && \
+ func_warning "\`$file' was not linked with \`-export-dynamic'"
+ continue
+ fi
+
+ func_dirname "$file" "" "."
+ dir="$func_dirname_result"
+
+ if test -f "$dir/$objdir/$dlname"; then
+ dir="$dir/$objdir"
+ else
+ if test ! -f "$dir/$dlname"; then
+ func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'"
+ fi
+ fi
+ ;;
+
+ *.lo)
+ # Just add the directory containing the .lo file.
+ func_dirname "$file" "" "."
+ dir="$func_dirname_result"
+ ;;
+
+ *)
+ func_warning "\`-dlopen' is ignored for non-libtool libraries and objects"
+ continue
+ ;;
+ esac
+
+ # Get the absolute pathname.
+ absdir=`cd "$dir" && pwd`
+ test -n "$absdir" && dir="$absdir"
+
+ # Now add the directory to shlibpath_var.
+ if eval "test -z \"\$$shlibpath_var\""; then
+ eval "$shlibpath_var=\"\$dir\""
+ else
+ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+ fi
+ done
+
+ # This variable tells wrapper scripts just to set shlibpath_var
+ # rather than running their programs.
+ libtool_execute_magic="$magic"
+
+ # Check if any of the arguments is a wrapper script.
+ args=
+ for file
+ do
+ case $file in
+ -*) ;;
+ *)
+ # Do a test to see if this is really a libtool program.
+ if func_ltwrapper_script_p "$file"; then
+ func_source "$file"
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ elif func_ltwrapper_executable_p "$file"; then
+ func_ltwrapper_scriptname "$file"
+ func_source "$func_ltwrapper_scriptname_result"
+ # Transform arg to wrapped name.
+ file="$progdir/$program"
+ fi
+ ;;
+ esac
+ # Quote arguments (to preserve shell metacharacters).
+ func_quote_for_eval "$file"
+ args="$args $func_quote_for_eval_result"
+ done
+
+ if test "X$opt_dry_run" = Xfalse; then
+ if test -n "$shlibpath_var"; then
+ # Export the shlibpath_var.
+ eval "export $shlibpath_var"
+ fi
+
+ # Restore saved environment variables
+ for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+ do
+ eval "if test \"\${save_$lt_var+set}\" = set; then
+ $lt_var=\$save_$lt_var; export $lt_var
+ else
+ $lt_unset $lt_var
+ fi"
+ done
+
+ # Now prepare to actually exec the command.
+ exec_cmd="\$cmd$args"
+ else
+ # Display what would be done.
+ if test -n "$shlibpath_var"; then
+ eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
+ $ECHO "export $shlibpath_var"
+ fi
+ $ECHO "$cmd$args"
+ exit $EXIT_SUCCESS
+ fi
+}
+
+test "$mode" = execute && func_mode_execute ${1+"$@"}
+
+
+# func_mode_finish arg...
+func_mode_finish ()
+{
+ $opt_debug
+ libdirs="$nonopt"
+ admincmds=
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ for dir
+ do
+ libdirs="$libdirs $dir"
+ done
+
+ for libdir in $libdirs; do
+ if test -n "$finish_cmds"; then
+ # Do each command in the finish commands.
+ func_execute_cmds "$finish_cmds" 'admincmds="$admincmds
+'"$cmd"'"'
+ fi
+ if test -n "$finish_eval"; then
+ # Do the single finish_eval.
+ eval cmds=\"$finish_eval\"
+ $opt_dry_run || eval "$cmds" || admincmds="$admincmds
+ $cmds"
+ fi
+ done
+ fi
+
+ # Exit here if they wanted silent mode.
+ $opt_silent && exit $EXIT_SUCCESS
+
+ $ECHO "X----------------------------------------------------------------------" | $Xsed
+ $ECHO "Libraries have been installed in:"
+ for libdir in $libdirs; do
+ $ECHO " $libdir"
+ done
+ $ECHO
+ $ECHO "If you ever happen to want to link against installed libraries"
+ $ECHO "in a given directory, LIBDIR, you must either use libtool, and"
+ $ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'"
+ $ECHO "flag during linking and do at least one of the following:"
+ if test -n "$shlibpath_var"; then
+ $ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable"
+ $ECHO " during execution"
+ fi
+ if test -n "$runpath_var"; then
+ $ECHO " - add LIBDIR to the \`$runpath_var' environment variable"
+ $ECHO " during linking"
+ fi
+ if test -n "$hardcode_libdir_flag_spec"; then
+ libdir=LIBDIR
+ eval flag=\"$hardcode_libdir_flag_spec\"
+
+ $ECHO " - use the \`$flag' linker flag"
+ fi
+ if test -n "$admincmds"; then
+ $ECHO " - have your system administrator run these commands:$admincmds"
+ fi
+ if test -f /etc/ld.so.conf; then
+ $ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+ fi
+ $ECHO
+
+ $ECHO "See any operating system documentation about shared libraries for"
+ case $host in
+ solaris2.[6789]|solaris2.1[0-9])
+ $ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual"
+ $ECHO "pages."
+ ;;
+ *)
+ $ECHO "more information, such as the ld(1) and ld.so(8) manual pages."
+ ;;
+ esac
+ $ECHO "X----------------------------------------------------------------------" | $Xsed
+ exit $EXIT_SUCCESS
+}
+
+test "$mode" = finish && func_mode_finish ${1+"$@"}
+
+
+# func_mode_install arg...
+func_mode_install ()
+{
+ $opt_debug
+ # There may be an optional sh(1) argument at the beginning of
+ # install_prog (especially on Windows NT).
+ if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
+ # Allow the use of GNU shtool's install command.
+ $ECHO "X$nonopt" | $GREP shtool >/dev/null; then
+ # Aesthetically quote it.
+ func_quote_for_eval "$nonopt"
+ install_prog="$func_quote_for_eval_result "
+ arg=$1
+ shift
+ else
+ install_prog=
+ arg=$nonopt
+ fi
+
+ # The real first argument should be the name of the installation program.
+ # Aesthetically quote it.
+ func_quote_for_eval "$arg"
+ install_prog="$install_prog$func_quote_for_eval_result"
+
+ # We need to accept at least all the BSD install flags.
+ dest=
+ files=
+ opts=
+ prev=
+ install_type=
+ isdir=no
+ stripme=
+ for arg
+ do
+ if test -n "$dest"; then
+ files="$files $dest"
+ dest=$arg
+ continue
+ fi
+
+ case $arg in
+ -d) isdir=yes ;;
+ -f)
+ case " $install_prog " in
+ *[\\\ /]cp\ *) ;;
+ *) prev=$arg ;;
+ esac
+ ;;
+ -g | -m | -o)
+ prev=$arg
+ ;;
+ -s)
+ stripme=" -s"
+ continue
+ ;;
+ -*)
+ ;;
+ *)
+ # If the previous option needed an argument, then skip it.
+ if test -n "$prev"; then
+ prev=
+ else
+ dest=$arg
+ continue
+ fi
+ ;;
+ esac
+
+ # Aesthetically quote the argument.
+ func_quote_for_eval "$arg"
+ install_prog="$install_prog $func_quote_for_eval_result"
+ done
+
+ test -z "$install_prog" && \
+ func_fatal_help "you must specify an install program"
+
+ test -n "$prev" && \
+ func_fatal_help "the \`$prev' option requires an argument"
+
+ if test -z "$files"; then
+ if test -z "$dest"; then
+ func_fatal_help "no file or destination specified"
+ else
+ func_fatal_help "you must specify a destination"
+ fi
+ fi
+
+ # Strip any trailing slash from the destination.
+ func_stripname '' '/' "$dest"
+ dest=$func_stripname_result
+
+ # Check to see that the destination is a directory.
+ test -d "$dest" && isdir=yes
+ if test "$isdir" = yes; then
+ destdir="$dest"
+ destname=
+ else
+ func_dirname_and_basename "$dest" "" "."
+ destdir="$func_dirname_result"
+ destname="$func_basename_result"
+
+ # Not a directory, so check to see that there is only one file specified.
+ set dummy $files; shift
+ test "$#" -gt 1 && \
+ func_fatal_help "\`$dest' is not a directory"
+ fi
+ case $destdir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ for file in $files; do
+ case $file in
+ *.lo) ;;
+ *)
+ func_fatal_help "\`$destdir' must be an absolute directory name"
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ staticlibs=
+ future_libdirs=
+ current_libdirs=
+ for file in $files; do
+
+ # Do each installation.
+ case $file in
+ *.$libext)
+ # Do the static libraries later.
+ staticlibs="$staticlibs $file"
+ ;;
+
+ *.la)
+ # Check to see that this really is a libtool archive.
+ func_lalib_unsafe_p "$file" \
+ || func_fatal_help "\`$file' is not a valid libtool archive"
+
+ library_names=
+ old_library=
+ relink_command=
+ func_source "$file"
+
+ # Add the libdir to current_libdirs if it is the destination.
+ if test "X$destdir" = "X$libdir"; then
+ case "$current_libdirs " in
+ *" $libdir "*) ;;
+ *) current_libdirs="$current_libdirs $libdir" ;;
+ esac
+ else
+ # Note the libdir as a future libdir.
+ case "$future_libdirs " in
+ *" $libdir "*) ;;
+ *) future_libdirs="$future_libdirs $libdir" ;;
+ esac
+ fi
+
+ func_dirname "$file" "/" ""
+ dir="$func_dirname_result"
+ dir="$dir$objdir"
+
+ if test -n "$relink_command"; then
+ # Determine the prefix the user has applied to our future dir.
+ inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"`
+
+ # Don't allow the user to place us outside of our expected
+ # location b/c this prevents finding dependent libraries that
+ # are installed to the same prefix.
+ # At present, this check doesn't affect windows .dll's that
+ # are installed into $libdir/../bin (currently, that works fine)
+ # but it's something to keep an eye on.
+ test "$inst_prefix_dir" = "$destdir" && \
+ func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir"
+
+ if test -n "$inst_prefix_dir"; then
+ # Stick the inst_prefix_dir data into the link command.
+ relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
+ else
+ relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"`
+ fi
+
+ func_warning "relinking \`$file'"
+ func_show_eval "$relink_command" \
+ 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"'
+ fi
+
+ # See the names of the shared library.
+ set dummy $library_names; shift
+ if test -n "$1"; then
+ realname="$1"
+ shift
+
+ srcname="$realname"
+ test -n "$relink_command" && srcname="$realname"T
+
+ # Install the shared library and build the symlinks.
+ func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \
+ 'exit $?'
+ tstripme="$stripme"
+ case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ case $realname in
+ *.dll.a)
+ tstripme=""
+ ;;
+ esac
+ ;;
+ esac
+ if test -n "$tstripme" && test -n "$striplib"; then
+ func_show_eval "$striplib $destdir/$realname" 'exit $?'
+ fi
+
+ if test "$#" -gt 0; then
+ # Delete the old symlinks, and create new ones.
+ # Try `ln -sf' first, because the `ln' binary might depend on
+ # the symlink we replace! Solaris /bin/ln does not understand -f,
+ # so we also need to try rm && ln -s.
+ for linkname
+ do
+ test "$linkname" != "$realname" \
+ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
+ done
+ fi
+
+ # Do each command in the postinstall commands.
+ lib="$destdir/$realname"
+ func_execute_cmds "$postinstall_cmds" 'exit $?'
+ fi
+
+ # Install the pseudo-library for information purposes.
+ func_basename "$file"
+ name="$func_basename_result"
+ instname="$dir/$name"i
+ func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
+
+ # Maybe install the static library, too.
+ test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
+ ;;
+
+ *.lo)
+ # Install (i.e. copy) a libtool object.
+
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ func_basename "$file"
+ destfile="$func_basename_result"
+ destfile="$destdir/$destfile"
+ fi
+
+ # Deduce the name of the destination old-style object file.
+ case $destfile in
+ *.lo)
+ func_lo2o "$destfile"
+ staticdest=$func_lo2o_result
+ ;;
+ *.$objext)
+ staticdest="$destfile"
+ destfile=
+ ;;
+ *)
+ func_fatal_help "cannot copy a libtool object to \`$destfile'"
+ ;;
+ esac
+
+ # Install the libtool object if requested.
+ test -n "$destfile" && \
+ func_show_eval "$install_prog $file $destfile" 'exit $?'
+
+ # Install the old object if enabled.
+ if test "$build_old_libs" = yes; then
+ # Deduce the name of the old-style object file.
+ func_lo2o "$file"
+ staticobj=$func_lo2o_result
+ func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
+ fi
+ exit $EXIT_SUCCESS
+ ;;
+
+ *)
+ # Figure out destination file name, if it wasn't already specified.
+ if test -n "$destname"; then
+ destfile="$destdir/$destname"
+ else
+ func_basename "$file"
+ destfile="$func_basename_result"
+ destfile="$destdir/$destfile"
+ fi
+
+ # If the file is missing, and there is a .exe on the end, strip it
+ # because it is most likely a libtool script we actually want to
+ # install
+ stripped_ext=""
+ case $file in
+ *.exe)
+ if test ! -f "$file"; then
+ func_stripname '' '.exe' "$file"
+ file=$func_stripname_result
+ stripped_ext=".exe"
+ fi
+ ;;
+ esac
+
+ # Do a test to see if this is really a libtool program.
+ case $host in
+ *cygwin* | *mingw*)
+ if func_ltwrapper_executable_p "$file"; then
+ func_ltwrapper_scriptname "$file"
+ wrapper=$func_ltwrapper_scriptname_result
+ else
+ func_stripname '' '.exe' "$file"
+ wrapper=$func_stripname_result
+ fi
+ ;;
+ *)
+ wrapper=$file
+ ;;
+ esac
+ if func_ltwrapper_script_p "$wrapper"; then
+ notinst_deplibs=
+ relink_command=
+
+ func_source "$wrapper"
+
+ # Check the variables that should have been set.
+ test -z "$generated_by_libtool_version" && \
+ func_fatal_error "invalid libtool wrapper script \`$wrapper'"
+
+ finalize=yes
+ for lib in $notinst_deplibs; do
+ # Check to see that each library is installed.
+ libdir=
+ if test -f "$lib"; then
+ func_source "$lib"
+ fi
+ libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
+ if test -n "$libdir" && test ! -f "$libfile"; then
+ func_warning "\`$lib' has not been installed in \`$libdir'"
+ finalize=no
+ fi
+ done
+
+ relink_command=
+ func_source "$wrapper"
+
+ outputname=
+ if test "$fast_install" = no && test -n "$relink_command"; then
+ $opt_dry_run || {
+ if test "$finalize" = yes; then
+ tmpdir=`func_mktempdir`
+ func_basename "$file$stripped_ext"
+ file="$func_basename_result"
+ outputname="$tmpdir/$file"
+ # Replace the output file specification.
+ relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
+
+ $opt_silent || {
+ func_quote_for_expand "$relink_command"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+ if eval "$relink_command"; then :
+ else
+ func_error "error: relink \`$file' with the above command before installing it"
+ $opt_dry_run || ${RM}r "$tmpdir"
+ continue
+ fi
+ file="$outputname"
+ else
+ func_warning "cannot relink \`$file'"
+ fi
+ }
+ else
+ # Install the binary that we compiled earlier.
+ file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
+ fi
+ fi
+
+ # remove .exe since cygwin /usr/bin/install will append another
+ # one anyway
+ case $install_prog,$host in
+ */usr/bin/install*,*cygwin*)
+ case $file:$destfile in
+ *.exe:*.exe)
+ # this is ok
+ ;;
+ *.exe:*)
+ destfile=$destfile.exe
+ ;;
+ *:*.exe)
+ func_stripname '' '.exe' "$destfile"
+ destfile=$func_stripname_result
+ ;;
+ esac
+ ;;
+ esac
+ func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
+ $opt_dry_run || if test -n "$outputname"; then
+ ${RM}r "$tmpdir"
+ fi
+ ;;
+ esac
+ done
+
+ for file in $staticlibs; do
+ func_basename "$file"
+ name="$func_basename_result"
+
+ # Set up the ranlib parameters.
+ oldlib="$destdir/$name"
+
+ func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
+
+ if test -n "$stripme" && test -n "$old_striplib"; then
+ func_show_eval "$old_striplib $oldlib" 'exit $?'
+ fi
+
+ # Do each command in the postinstall commands.
+ func_execute_cmds "$old_postinstall_cmds" 'exit $?'
+ done
+
+ test -n "$future_libdirs" && \
+ func_warning "remember to run \`$progname --finish$future_libdirs'"
+
+ if test -n "$current_libdirs"; then
+ # Maybe just do a dry run.
+ $opt_dry_run && current_libdirs=" -n$current_libdirs"
+ exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
+ else
+ exit $EXIT_SUCCESS
+ fi
+}
+
+test "$mode" = install && func_mode_install ${1+"$@"}
+
+
+# func_generate_dlsyms outputname originator pic_p
+# Extract symbols from dlprefiles and create ${outputname}S.o with
+# a dlpreopen symbol table.
+func_generate_dlsyms ()
+{
+ $opt_debug
+ my_outputname="$1"
+ my_originator="$2"
+ my_pic_p="${3-no}"
+ my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'`
+ my_dlsyms=
+
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ if test -n "$NM" && test -n "$global_symbol_pipe"; then
+ my_dlsyms="${my_outputname}S.c"
+ else
+ func_error "not configured to extract global symbols from dlpreopened files"
+ fi
+ fi
+
+ if test -n "$my_dlsyms"; then
+ case $my_dlsyms in
+ "") ;;
+ *.c)
+ # Discover the nlist of each of the dlfiles.
+ nlist="$output_objdir/${my_outputname}.nm"
+
+ func_show_eval "$RM $nlist ${nlist}S ${nlist}T"
+
+ # Parse the name list into a source file.
+ func_verbose "creating $output_objdir/$my_dlsyms"
+
+ $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
+/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */
+/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+/* External symbol declarations for the compiler. */\
+"
+
+ if test "$dlself" = yes; then
+ func_verbose "generating symbol list for \`$output'"
+
+ $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
+
+ # Add our own program objects to the symbol list.
+ progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ for progfile in $progfiles; do
+ func_verbose "extracting global C symbols from \`$progfile'"
+ $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'"
+ done
+
+ if test -n "$exclude_expsyms"; then
+ $opt_dry_run || {
+ eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+ eval '$MV "$nlist"T "$nlist"'
+ }
+ fi
+
+ if test -n "$export_symbols_regex"; then
+ $opt_dry_run || {
+ eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+ eval '$MV "$nlist"T "$nlist"'
+ }
+ fi
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ export_symbols="$output_objdir/$outputname.exp"
+ $opt_dry_run || {
+ $RM $export_symbols
+ eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+ eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
+ ;;
+ esac
+ }
+ else
+ $opt_dry_run || {
+ eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
+ eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
+ eval '$MV "$nlist"T "$nlist"'
+ case $host in
+ *cygwin | *mingw* | *cegcc* )
+ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+ eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
+ ;;
+ esac
+ }
+ fi
+ fi
+
+ for dlprefile in $dlprefiles; do
+ func_verbose "extracting global C symbols from \`$dlprefile'"
+ func_basename "$dlprefile"
+ name="$func_basename_result"
+ $opt_dry_run || {
+ eval '$ECHO ": $name " >> "$nlist"'
+ eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+ }
+ done
+
+ $opt_dry_run || {
+ # Make sure we have at least an empty file.
+ test -f "$nlist" || : > "$nlist"
+
+ if test -n "$exclude_expsyms"; then
+ $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+ $MV "$nlist"T "$nlist"
+ fi
+
+ # Try sorting and uniquifying the output.
+ if $GREP -v "^: " < "$nlist" |
+ if sort -k 3 </dev/null >/dev/null 2>&1; then
+ sort -k 3
+ else
+ sort +2
+ fi |
+ uniq > "$nlist"S; then
+ :
+ else
+ $GREP -v "^: " < "$nlist" > "$nlist"S
+ fi
+
+ if test -f "$nlist"S; then
+ eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
+ else
+ $ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms"
+ fi
+
+ $ECHO >> "$output_objdir/$my_dlsyms" "\
+
+/* The mapping between symbol names and symbols. */
+typedef struct {
+ const char *name;
+ void *address;
+} lt_dlsymlist;
+"
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ $ECHO >> "$output_objdir/$my_dlsyms" "\
+/* DATA imports from DLLs on WIN32 con't be const, because
+ runtime relocations are performed -- see ld's documentation
+ on pseudo-relocs. */"
+ lt_dlsym_const= ;;
+ *osf5*)
+ echo >> "$output_objdir/$my_dlsyms" "\
+/* This system does not cope well with relocations in const data */"
+ lt_dlsym_const= ;;
+ *)
+ lt_dlsym_const=const ;;
+ esac
+
+ $ECHO >> "$output_objdir/$my_dlsyms" "\
+extern $lt_dlsym_const lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[];
+$lt_dlsym_const lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[] =
+{\
+ { \"$my_originator\", (void *) 0 },"
+
+ case $need_lib_prefix in
+ no)
+ eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms"
+ ;;
+ *)
+ eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
+ ;;
+ esac
+ $ECHO >> "$output_objdir/$my_dlsyms" "\
+ {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt_${my_prefix}_LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+ } # !$opt_dry_run
+
+ pic_flag_for_symtable=
+ case "$compile_command " in
+ *" -static "*) ;;
+ *)
+ case $host in
+ # compiling the symbol table file with pic_flag works around
+ # a FreeBSD bug that causes programs to crash when -lm is
+ # linked before any other PIC object. But we must not use
+ # pic_flag when linking with -static. The problem exists in
+ # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+ *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+ pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
+ *-*-hpux*)
+ pic_flag_for_symtable=" $pic_flag" ;;
+ *)
+ if test "X$my_pic_p" != Xno; then
+ pic_flag_for_symtable=" $pic_flag"
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ symtab_cflags=
+ for arg in $LTCFLAGS; do
+ case $arg in
+ -pie | -fpie | -fPIE) ;;
+ *) symtab_cflags="$symtab_cflags $arg" ;;
+ esac
+ done
+
+ # Now compile the dynamic symbol file.
+ func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'
+
+ # Clean up the generated files.
+ func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"'
+
+ # Transform the symbol file into the correct name.
+ symfileobj="$output_objdir/${my_outputname}S.$objext"
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ if test -f "$output_objdir/$my_outputname.def"; then
+ compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+ finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+ else
+ compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
+ finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
+ fi
+ ;;
+ *)
+ compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
+ finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
+ ;;
+ esac
+ ;;
+ *)
+ func_fatal_error "unknown suffix for \`$my_dlsyms'"
+ ;;
+ esac
+ else
+ # We keep going just in case the user didn't refer to
+ # lt_preloaded_symbols. The linker will fail if global_symbol_pipe
+ # really was required.
+
+ # Nullify the symbol file.
+ compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
+ finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
+ fi
+}
+
+# func_win32_libid arg
+# return the library type of file 'arg'
+#
+# Need a lot of goo to handle *both* DLLs and import libs
+# Has to be a shell function in order to 'eat' the argument
+# that is supplied when $file_magic_command is called.
+func_win32_libid ()
+{
+ $opt_debug
+ win32_libid_type="unknown"
+ win32_fileres=`file -L $1 2>/dev/null`
+ case $win32_fileres in
+ *ar\ archive\ import\ library*) # definitely import
+ win32_libid_type="x86 archive import"
+ ;;
+ *ar\ archive*) # could be an import, or static
+ if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
+ $EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then
+ win32_nmres=`eval $NM -f posix -A $1 |
+ $SED -n -e '
+ 1,100{
+ / I /{
+ s,.*,import,
+ p
+ q
+ }
+ }'`
+ case $win32_nmres in
+ import*) win32_libid_type="x86 archive import";;
+ *) win32_libid_type="x86 archive static";;
+ esac
+ fi
+ ;;
+ *DLL*)
+ win32_libid_type="x86 DLL"
+ ;;
+ *executable*) # but shell scripts are "executable" too...
+ case $win32_fileres in
+ *MS\ Windows\ PE\ Intel*)
+ win32_libid_type="x86 DLL"
+ ;;
+ esac
+ ;;
+ esac
+ $ECHO "$win32_libid_type"
+}
+
+
+
+# func_extract_an_archive dir oldlib
+func_extract_an_archive ()
+{
+ $opt_debug
+ f_ex_an_ar_dir="$1"; shift
+ f_ex_an_ar_oldlib="$1"
+ func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?'
+ if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
+ :
+ else
+ func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
+ fi
+}
+
+
+# func_extract_archives gentop oldlib ...
+func_extract_archives ()
+{
+ $opt_debug
+ my_gentop="$1"; shift
+ my_oldlibs=${1+"$@"}
+ my_oldobjs=""
+ my_xlib=""
+ my_xabs=""
+ my_xdir=""
+
+ for my_xlib in $my_oldlibs; do
+ # Extract the objects.
+ case $my_xlib in
+ [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;;
+ *) my_xabs=`pwd`"/$my_xlib" ;;
+ esac
+ func_basename "$my_xlib"
+ my_xlib="$func_basename_result"
+ my_xlib_u=$my_xlib
+ while :; do
+ case " $extracted_archives " in
+ *" $my_xlib_u "*)
+ func_arith $extracted_serial + 1
+ extracted_serial=$func_arith_result
+ my_xlib_u=lt$extracted_serial-$my_xlib ;;
+ *) break ;;
+ esac
+ done
+ extracted_archives="$extracted_archives $my_xlib_u"
+ my_xdir="$my_gentop/$my_xlib_u"
+
+ func_mkdir_p "$my_xdir"
+
+ case $host in
+ *-darwin*)
+ func_verbose "Extracting $my_xabs"
+ # Do not bother doing anything if just a dry run
+ $opt_dry_run || {
+ darwin_orig_dir=`pwd`
+ cd $my_xdir || exit $?
+ darwin_archive=$my_xabs
+ darwin_curdir=`pwd`
+ darwin_base_archive=`basename "$darwin_archive"`
+ darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true`
+ if test -n "$darwin_arches"; then
+ darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'`
+ darwin_arch=
+ func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
+ for darwin_arch in $darwin_arches ; do
+ func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
+ $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}"
+ cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
+ func_extract_an_archive "`pwd`" "${darwin_base_archive}"
+ cd "$darwin_curdir"
+ $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
+ done # $darwin_arches
+ ## Okay now we've a bunch of thin objects, gotta fatten them up :)
+ darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u`
+ darwin_file=
+ darwin_files=
+ for darwin_file in $darwin_filelist; do
+ darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP`
+ $LIPO -create -output "$darwin_file" $darwin_files
+ done # $darwin_filelist
+ $RM -rf unfat-$$
+ cd "$darwin_orig_dir"
+ else
+ cd $darwin_orig_dir
+ func_extract_an_archive "$my_xdir" "$my_xabs"
+ fi # $darwin_arches
+ } # !$opt_dry_run
+ ;;
+ *)
+ func_extract_an_archive "$my_xdir" "$my_xabs"
+ ;;
+ esac
+ my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
+ done
+
+ func_extract_archives_result="$my_oldobjs"
+}
+
+
+
+# func_emit_wrapper_part1 [arg=no]
+#
+# Emit the first part of a libtool wrapper script on stdout.
+# For more information, see the description associated with
+# func_emit_wrapper(), below.
+func_emit_wrapper_part1 ()
+{
+ func_emit_wrapper_part1_arg1=no
+ if test -n "$1" ; then
+ func_emit_wrapper_part1_arg1=$1
+ fi
+
+ $ECHO "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='${SED} -e 1s/^X//'
+sed_quote_subst='$sed_quote_subst'
+
+# Be Bourne compatible
+if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '\${1+\"\$@\"}'='\"\$@\"'
+ setopt NO_GLOB_SUBST
+else
+ case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+ # install mode needs the following variables:
+ generated_by_libtool_version='$macro_version'
+ notinst_deplibs='$notinst_deplibs'
+else
+ # When we are sourced in execute mode, \$file and \$ECHO are already set.
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ ECHO=\"$qecho\"
+ file=\"\$0\"
+ # Make sure echo works.
+ if test \"X\$1\" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+ elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then
+ # Yippee, \$ECHO works!
+ :
+ else
+ # Restart under the correct shell, and then maybe \$ECHO will work.
+ exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
+ fi
+ fi\
+"
+ $ECHO "\
+
+ # Find the directory that this script lives in.
+ thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
+ test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+ # Follow symbolic links until we get to the real thisdir.
+ file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\`
+ while test -n \"\$file\"; do
+ destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
+
+ # If there was a directory component, then change thisdir.
+ if test \"x\$destdir\" != \"x\$file\"; then
+ case \"\$destdir\" in
+ [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+ *) thisdir=\"\$thisdir/\$destdir\" ;;
+ esac
+ fi
+
+ file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
+ file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\`
+ done
+"
+}
+# end: func_emit_wrapper_part1
+
+# func_emit_wrapper_part2 [arg=no]
+#
+# Emit the second part of a libtool wrapper script on stdout.
+# For more information, see the description associated with
+# func_emit_wrapper(), below.
+func_emit_wrapper_part2 ()
+{
+ func_emit_wrapper_part2_arg1=no
+ if test -n "$1" ; then
+ func_emit_wrapper_part2_arg1=$1
+ fi
+
+ $ECHO "\
+
+ # Usually 'no', except on cygwin/mingw when embedded into
+ # the cwrapper.
+ WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_part2_arg1
+ if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
+ # special case for '.'
+ if test \"\$thisdir\" = \".\"; then
+ thisdir=\`pwd\`
+ fi
+ # remove .libs from thisdir
+ case \"\$thisdir\" in
+ *[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;;
+ $objdir ) thisdir=. ;;
+ esac
+ fi
+
+ # Try to get the absolute directory name.
+ absdir=\`cd \"\$thisdir\" && pwd\`
+ test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+ if test "$fast_install" = yes; then
+ $ECHO "\
+ program=lt-'$outputname'$exeext
+ progdir=\"\$thisdir/$objdir\"
+
+ if test ! -f \"\$progdir/\$program\" ||
+ { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
+ test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+ file=\"\$\$-\$program\"
+
+ if test ! -d \"\$progdir\"; then
+ $MKDIR \"\$progdir\"
+ else
+ $RM \"\$progdir/\$file\"
+ fi"
+
+ $ECHO "\
+
+ # relink executable if necessary
+ if test -n \"\$relink_command\"; then
+ if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+ else
+ $ECHO \"\$relink_command_output\" >&2
+ $RM \"\$progdir/\$file\"
+ exit 1
+ fi
+ fi
+
+ $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+ { $RM \"\$progdir/\$program\";
+ $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+ $RM \"\$progdir/\$file\"
+ fi"
+ else
+ $ECHO "\
+ program='$outputname'
+ progdir=\"\$thisdir/$objdir\"
+"
+ fi
+
+ $ECHO "\
+
+ if test -f \"\$progdir/\$program\"; then"
+
+ # Export our shlibpath_var if we have one.
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ $ECHO "\
+ # Add our own library path to $shlibpath_var
+ $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+ # Some systems cannot cope with colon-terminated $shlibpath_var
+ # The second colon is a workaround for a bug in BeOS R4 sed
+ $shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
+
+ export $shlibpath_var
+"
+ fi
+
+ # fixup the dll searchpath if we need to.
+ if test -n "$dllsearchpath"; then
+ $ECHO "\
+ # Add the dll search path components to the executable PATH
+ PATH=$dllsearchpath:\$PATH
+"
+ fi
+
+ $ECHO "\
+ if test \"\$libtool_execute_magic\" != \"$magic\"; then
+ # Run the actual program with our arguments.
+"
+ case $host in
+ # Backslashes separate directories on plain windows
+ *-*-mingw | *-*-os2* | *-cegcc*)
+ $ECHO "\
+ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
+"
+ ;;
+
+ *)
+ $ECHO "\
+ exec \"\$progdir/\$program\" \${1+\"\$@\"}
+"
+ ;;
+ esac
+ $ECHO "\
+ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
+ exit 1
+ fi
+ else
+ # The program doesn't exist.
+ \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
+ \$ECHO \"This script is just a wrapper for \$program.\" 1>&2
+ $ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
+ exit 1
+ fi
+fi\
+"
+}
+# end: func_emit_wrapper_part2
+
+
+# func_emit_wrapper [arg=no]
+#
+# Emit a libtool wrapper script on stdout.
+# Don't directly open a file because we may want to
+# incorporate the script contents within a cygwin/mingw
+# wrapper executable. Must ONLY be called from within
+# func_mode_link because it depends on a number of variables
+# set therein.
+#
+# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
+# variable will take. If 'yes', then the emitted script
+# will assume that the directory in which it is stored is
+# the $objdir directory. This is a cygwin/mingw-specific
+# behavior.
+func_emit_wrapper ()
+{
+ func_emit_wrapper_arg1=no
+ if test -n "$1" ; then
+ func_emit_wrapper_arg1=$1
+ fi
+
+ # split this up so that func_emit_cwrapperexe_src
+ # can call each part independently.
+ func_emit_wrapper_part1 "${func_emit_wrapper_arg1}"
+ func_emit_wrapper_part2 "${func_emit_wrapper_arg1}"
+}
+
+
+# func_to_host_path arg
+#
+# Convert paths to host format when used with build tools.
+# Intended for use with "native" mingw (where libtool itself
+# is running under the msys shell), or in the following cross-
+# build environments:
+# $build $host
+# mingw (msys) mingw [e.g. native]
+# cygwin mingw
+# *nix + wine mingw
+# where wine is equipped with the `winepath' executable.
+# In the native mingw case, the (msys) shell automatically
+# converts paths for any non-msys applications it launches,
+# but that facility isn't available from inside the cwrapper.
+# Similar accommodations are necessary for $host mingw and
+# $build cygwin. Calling this function does no harm for other
+# $host/$build combinations not listed above.
+#
+# ARG is the path (on $build) that should be converted to
+# the proper representation for $host. The result is stored
+# in $func_to_host_path_result.
+func_to_host_path ()
+{
+ func_to_host_path_result="$1"
+ if test -n "$1" ; then
+ case $host in
+ *mingw* )
+ lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+ case $build in
+ *mingw* ) # actually, msys
+ # awkward: cmd appends spaces to result
+ lt_sed_strip_trailing_spaces="s/[ ]*\$//"
+ func_to_host_path_tmp1=`( cmd //c echo "$1" |\
+ $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""`
+ func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\
+ $SED -e "$lt_sed_naive_backslashify"`
+ ;;
+ *cygwin* )
+ func_to_host_path_tmp1=`cygpath -w "$1"`
+ func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\
+ $SED -e "$lt_sed_naive_backslashify"`
+ ;;
+ * )
+ # Unfortunately, winepath does not exit with a non-zero
+ # error code, so we are forced to check the contents of
+ # stdout. On the other hand, if the command is not
+ # found, the shell will set an exit code of 127 and print
+ # *an error message* to stdout. So we must check for both
+ # error code of zero AND non-empty stdout, which explains
+ # the odd construction:
+ func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null`
+ if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then
+ func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\
+ $SED -e "$lt_sed_naive_backslashify"`
+ else
+ # Allow warning below.
+ func_to_host_path_result=""
+ fi
+ ;;
+ esac
+ if test -z "$func_to_host_path_result" ; then
+ func_error "Could not determine host path corresponding to"
+ func_error " '$1'"
+ func_error "Continuing, but uninstalled executables may not work."
+ # Fallback:
+ func_to_host_path_result="$1"
+ fi
+ ;;
+ esac
+ fi
+}
+# end: func_to_host_path
+
+# func_to_host_pathlist arg
+#
+# Convert pathlists to host format when used with build tools.
+# See func_to_host_path(), above. This function supports the
+# following $build/$host combinations (but does no harm for
+# combinations not listed here):
+# $build $host
+# mingw (msys) mingw [e.g. native]
+# cygwin mingw
+# *nix + wine mingw
+#
+# Path separators are also converted from $build format to
+# $host format. If ARG begins or ends with a path separator
+# character, it is preserved (but converted to $host format)
+# on output.
+#
+# ARG is a pathlist (on $build) that should be converted to
+# the proper representation on $host. The result is stored
+# in $func_to_host_pathlist_result.
+func_to_host_pathlist ()
+{
+ func_to_host_pathlist_result="$1"
+ if test -n "$1" ; then
+ case $host in
+ *mingw* )
+ lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+ # Remove leading and trailing path separator characters from
+ # ARG. msys behavior is inconsistent here, cygpath turns them
+ # into '.;' and ';.', and winepath ignores them completely.
+ func_to_host_pathlist_tmp2="$1"
+ # Once set for this call, this variable should not be
+ # reassigned. It is used in tha fallback case.
+ func_to_host_pathlist_tmp1=`echo "$func_to_host_pathlist_tmp2" |\
+ $SED -e 's|^:*||' -e 's|:*$||'`
+ case $build in
+ *mingw* ) # Actually, msys.
+ # Awkward: cmd appends spaces to result.
+ lt_sed_strip_trailing_spaces="s/[ ]*\$//"
+ func_to_host_pathlist_tmp2=`( cmd //c echo "$func_to_host_pathlist_tmp1" |\
+ $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""`
+ func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\
+ $SED -e "$lt_sed_naive_backslashify"`
+ ;;
+ *cygwin* )
+ func_to_host_pathlist_tmp2=`cygpath -w -p "$func_to_host_pathlist_tmp1"`
+ func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\
+ $SED -e "$lt_sed_naive_backslashify"`
+ ;;
+ * )
+ # unfortunately, winepath doesn't convert pathlists
+ func_to_host_pathlist_result=""
+ func_to_host_pathlist_oldIFS=$IFS
+ IFS=:
+ for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do
+ IFS=$func_to_host_pathlist_oldIFS
+ if test -n "$func_to_host_pathlist_f" ; then
+ func_to_host_path "$func_to_host_pathlist_f"
+ if test -n "$func_to_host_path_result" ; then
+ if test -z "$func_to_host_pathlist_result" ; then
+ func_to_host_pathlist_result="$func_to_host_path_result"
+ else
+ func_to_host_pathlist_result="$func_to_host_pathlist_result;$func_to_host_path_result"
+ fi
+ fi
+ fi
+ IFS=:
+ done
+ IFS=$func_to_host_pathlist_oldIFS
+ ;;
+ esac
+ if test -z "$func_to_host_pathlist_result" ; then
+ func_error "Could not determine the host path(s) corresponding to"
+ func_error " '$1'"
+ func_error "Continuing, but uninstalled executables may not work."
+ # Fallback. This may break if $1 contains DOS-style drive
+ # specifications. The fix is not to complicate the expression
+ # below, but for the user to provide a working wine installation
+ # with winepath so that path translation in the cross-to-mingw
+ # case works properly.
+ lt_replace_pathsep_nix_to_dos="s|:|;|g"
+ func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\
+ $SED -e "$lt_replace_pathsep_nix_to_dos"`
+ fi
+ # Now, add the leading and trailing path separators back
+ case "$1" in
+ :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result"
+ ;;
+ esac
+ case "$1" in
+ *: ) func_to_host_pathlist_result="$func_to_host_pathlist_result;"
+ ;;
+ esac
+ ;;
+ esac
+ fi
+}
+# end: func_to_host_pathlist
+
+# func_emit_cwrapperexe_src
+# emit the source code for a wrapper executable on stdout
+# Must ONLY be called from within func_mode_link because
+# it depends on a number of variable set therein.
+func_emit_cwrapperexe_src ()
+{
+ cat <<EOF
+
+/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
+ Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+
+ The $output program cannot be directly executed until all the libtool
+ libraries that it depends on are installed.
+
+ This wrapper executable should never be moved out of the build directory.
+ If it is, it will not operate correctly.
+
+ Currently, it simply execs the wrapper *script* "$SHELL $output",
+ but could eventually absorb all of the scripts functionality and
+ exec $objdir/$outputname directly.
+*/
+EOF
+ cat <<"EOF"
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+# include <direct.h>
+# include <process.h>
+# include <io.h>
+# define setmode _setmode
+#else
+# include <unistd.h>
+# include <stdint.h>
+# ifdef __CYGWIN__
+# include <io.h>
+# define HAVE_SETENV
+# ifdef __STRICT_ANSI__
+char *realpath (const char *, char *);
+int putenv (char *);
+int setenv (const char *, const char *, int);
+# endif
+# endif
+#endif
+#include <malloc.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#if defined(PATH_MAX)
+# define LT_PATHMAX PATH_MAX
+#elif defined(MAXPATHLEN)
+# define LT_PATHMAX MAXPATHLEN
+#else
+# define LT_PATHMAX 1024
+#endif
+
+#ifndef S_IXOTH
+# define S_IXOTH 0
+#endif
+#ifndef S_IXGRP
+# define S_IXGRP 0
+#endif
+
+#ifdef _MSC_VER
+# define S_IXUSR _S_IEXEC
+# define stat _stat
+# ifndef _INTPTR_T_DEFINED
+# define intptr_t int
+# endif
+#endif
+
+#ifndef DIR_SEPARATOR
+# define DIR_SEPARATOR '/'
+# define PATH_SEPARATOR ':'
+#endif
+
+#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
+ defined (__OS2__)
+# define HAVE_DOS_BASED_FILE_SYSTEM
+# define FOPEN_WB "wb"
+# ifndef DIR_SEPARATOR_2
+# define DIR_SEPARATOR_2 '\\'
+# endif
+# ifndef PATH_SEPARATOR_2
+# define PATH_SEPARATOR_2 ';'
+# endif
+#endif
+
+#ifndef DIR_SEPARATOR_2
+# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
+#else /* DIR_SEPARATOR_2 */
+# define IS_DIR_SEPARATOR(ch) \
+ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
+#endif /* DIR_SEPARATOR_2 */
+
+#ifndef PATH_SEPARATOR_2
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
+#else /* PATH_SEPARATOR_2 */
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
+#endif /* PATH_SEPARATOR_2 */
+
+#ifdef __CYGWIN__
+# define FOPEN_WB "wb"
+#endif
+
+#ifndef FOPEN_WB
+# define FOPEN_WB "w"
+#endif
+#ifndef _O_BINARY
+# define _O_BINARY 0
+#endif
+
+#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type)))
+#define XFREE(stale) do { \
+ if (stale) { free ((void *) stale); stale = 0; } \
+} while (0)
+
+#undef LTWRAPPER_DEBUGPRINTF
+#if defined DEBUGWRAPPER
+# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args
+static void
+ltwrapper_debugprintf (const char *fmt, ...)
+{
+ va_list args;
+ va_start (args, fmt);
+ (void) vfprintf (stderr, fmt, args);
+ va_end (args);
+}
+#else
+# define LTWRAPPER_DEBUGPRINTF(args)
+#endif
+
+const char *program_name = NULL;
+
+void *xmalloc (size_t num);
+char *xstrdup (const char *string);
+const char *base_name (const char *name);
+char *find_executable (const char *wrapper);
+char *chase_symlinks (const char *pathspec);
+int make_executable (const char *path);
+int check_executable (const char *path);
+char *strendzap (char *str, const char *pat);
+void lt_fatal (const char *message, ...);
+void lt_setenv (const char *name, const char *value);
+char *lt_extend_str (const char *orig_value, const char *add, int to_end);
+void lt_opt_process_env_set (const char *arg);
+void lt_opt_process_env_prepend (const char *arg);
+void lt_opt_process_env_append (const char *arg);
+int lt_split_name_value (const char *arg, char** name, char** value);
+void lt_update_exe_path (const char *name, const char *value);
+void lt_update_lib_path (const char *name, const char *value);
+
+static const char *script_text_part1 =
+EOF
+
+ func_emit_wrapper_part1 yes |
+ $SED -e 's/\([\\"]\)/\\\1/g' \
+ -e 's/^/ "/' -e 's/$/\\n"/'
+ echo ";"
+ cat <<EOF
+
+static const char *script_text_part2 =
+EOF
+ func_emit_wrapper_part2 yes |
+ $SED -e 's/\([\\"]\)/\\\1/g' \
+ -e 's/^/ "/' -e 's/$/\\n"/'
+ echo ";"
+
+ cat <<EOF
+const char * MAGIC_EXE = "$magic_exe";
+const char * LIB_PATH_VARNAME = "$shlibpath_var";
+EOF
+
+ if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+ func_to_host_pathlist "$temp_rpath"
+ cat <<EOF
+const char * LIB_PATH_VALUE = "$func_to_host_pathlist_result";
+EOF
+ else
+ cat <<"EOF"
+const char * LIB_PATH_VALUE = "";
+EOF
+ fi
+
+ if test -n "$dllsearchpath"; then
+ func_to_host_pathlist "$dllsearchpath:"
+ cat <<EOF
+const char * EXE_PATH_VARNAME = "PATH";
+const char * EXE_PATH_VALUE = "$func_to_host_pathlist_result";
+EOF
+ else
+ cat <<"EOF"
+const char * EXE_PATH_VARNAME = "";
+const char * EXE_PATH_VALUE = "";
+EOF
+ fi
+
+ if test "$fast_install" = yes; then
+ cat <<EOF
+const char * TARGET_PROGRAM_NAME = "lt-$outputname"; /* hopefully, no .exe */
+EOF
+ else
+ cat <<EOF
+const char * TARGET_PROGRAM_NAME = "$outputname"; /* hopefully, no .exe */
+EOF
+ fi
+
+
+ cat <<"EOF"
+
+#define LTWRAPPER_OPTION_PREFIX "--lt-"
+#define LTWRAPPER_OPTION_PREFIX_LENGTH 5
+
+static const size_t opt_prefix_len = LTWRAPPER_OPTION_PREFIX_LENGTH;
+static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
+
+static const char *dumpscript_opt = LTWRAPPER_OPTION_PREFIX "dump-script";
+
+static const size_t env_set_opt_len = LTWRAPPER_OPTION_PREFIX_LENGTH + 7;
+static const char *env_set_opt = LTWRAPPER_OPTION_PREFIX "env-set";
+ /* argument is putenv-style "foo=bar", value of foo is set to bar */
+
+static const size_t env_prepend_opt_len = LTWRAPPER_OPTION_PREFIX_LENGTH + 11;
+static const char *env_prepend_opt = LTWRAPPER_OPTION_PREFIX "env-prepend";
+ /* argument is putenv-style "foo=bar", new value of foo is bar${foo} */
+
+static const size_t env_append_opt_len = LTWRAPPER_OPTION_PREFIX_LENGTH + 10;
+static const char *env_append_opt = LTWRAPPER_OPTION_PREFIX "env-append";
+ /* argument is putenv-style "foo=bar", new value of foo is ${foo}bar */
+
+int
+main (int argc, char *argv[])
+{
+ char **newargz;
+ int newargc;
+ char *tmp_pathspec;
+ char *actual_cwrapper_path;
+ char *actual_cwrapper_name;
+ char *target_name;
+ char *lt_argv_zero;
+ intptr_t rval = 127;
+
+ int i;
+
+ program_name = (char *) xstrdup (base_name (argv[0]));
+ LTWRAPPER_DEBUGPRINTF (("(main) argv[0] : %s\n", argv[0]));
+ LTWRAPPER_DEBUGPRINTF (("(main) program_name : %s\n", program_name));
+
+ /* very simple arg parsing; don't want to rely on getopt */
+ for (i = 1; i < argc; i++)
+ {
+ if (strcmp (argv[i], dumpscript_opt) == 0)
+ {
+EOF
+ case "$host" in
+ *mingw* | *cygwin* )
+ # make stdout use "unix" line endings
+ echo " setmode(1,_O_BINARY);"
+ ;;
+ esac
+
+ cat <<"EOF"
+ printf ("%s", script_text_part1);
+ printf ("%s", script_text_part2);
+ return 0;
+ }
+ }
+
+ newargz = XMALLOC (char *, argc + 1);
+ tmp_pathspec = find_executable (argv[0]);
+ if (tmp_pathspec == NULL)
+ lt_fatal ("Couldn't find %s", argv[0]);
+ LTWRAPPER_DEBUGPRINTF (("(main) found exe (before symlink chase) at : %s\n",
+ tmp_pathspec));
+
+ actual_cwrapper_path = chase_symlinks (tmp_pathspec);
+ LTWRAPPER_DEBUGPRINTF (("(main) found exe (after symlink chase) at : %s\n",
+ actual_cwrapper_path));
+ XFREE (tmp_pathspec);
+
+ actual_cwrapper_name = xstrdup( base_name (actual_cwrapper_path));
+ strendzap (actual_cwrapper_path, actual_cwrapper_name);
+
+ /* wrapper name transforms */
+ strendzap (actual_cwrapper_name, ".exe");
+ tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1);
+ XFREE (actual_cwrapper_name);
+ actual_cwrapper_name = tmp_pathspec;
+ tmp_pathspec = 0;
+
+ /* target_name transforms -- use actual target program name; might have lt- prefix */
+ target_name = xstrdup (base_name (TARGET_PROGRAM_NAME));
+ strendzap (target_name, ".exe");
+ tmp_pathspec = lt_extend_str (target_name, ".exe", 1);
+ XFREE (target_name);
+ target_name = tmp_pathspec;
+ tmp_pathspec = 0;
+
+ LTWRAPPER_DEBUGPRINTF (("(main) libtool target name: %s\n",
+ target_name));
+EOF
+
+ cat <<EOF
+ newargz[0] =
+ XMALLOC (char, (strlen (actual_cwrapper_path) +
+ strlen ("$objdir") + 1 + strlen (actual_cwrapper_name) + 1));
+ strcpy (newargz[0], actual_cwrapper_path);
+ strcat (newargz[0], "$objdir");
+ strcat (newargz[0], "/");
+EOF
+
+ cat <<"EOF"
+ /* stop here, and copy so we don't have to do this twice */
+ tmp_pathspec = xstrdup (newargz[0]);
+
+ /* do NOT want the lt- prefix here, so use actual_cwrapper_name */
+ strcat (newargz[0], actual_cwrapper_name);
+
+ /* DO want the lt- prefix here if it exists, so use target_name */
+ lt_argv_zero = lt_extend_str (tmp_pathspec, target_name, 1);
+ XFREE (tmp_pathspec);
+ tmp_pathspec = NULL;
+EOF
+
+ case $host_os in
+ mingw*)
+ cat <<"EOF"
+ {
+ char* p;
+ while ((p = strchr (newargz[0], '\\')) != NULL)
+ {
+ *p = '/';
+ }
+ while ((p = strchr (lt_argv_zero, '\\')) != NULL)
+ {
+ *p = '/';
+ }
+ }
+EOF
+ ;;
+ esac
+
+ cat <<"EOF"
+ XFREE (target_name);
+ XFREE (actual_cwrapper_path);
+ XFREE (actual_cwrapper_name);
+
+ lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
+ lt_setenv ("DUALCASE", "1"); /* for MSK sh */
+ lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
+ lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
+
+ newargc=0;
+ for (i = 1; i < argc; i++)
+ {
+ if (strncmp (argv[i], env_set_opt, env_set_opt_len) == 0)
+ {
+ if (argv[i][env_set_opt_len] == '=')
+ {
+ const char *p = argv[i] + env_set_opt_len + 1;
+ lt_opt_process_env_set (p);
+ }
+ else if (argv[i][env_set_opt_len] == '\0' && i + 1 < argc)
+ {
+ lt_opt_process_env_set (argv[++i]); /* don't copy */
+ }
+ else
+ lt_fatal ("%s missing required argument", env_set_opt);
+ continue;
+ }
+ if (strncmp (argv[i], env_prepend_opt, env_prepend_opt_len) == 0)
+ {
+ if (argv[i][env_prepend_opt_len] == '=')
+ {
+ const char *p = argv[i] + env_prepend_opt_len + 1;
+ lt_opt_process_env_prepend (p);
+ }
+ else if (argv[i][env_prepend_opt_len] == '\0' && i + 1 < argc)
+ {
+ lt_opt_process_env_prepend (argv[++i]); /* don't copy */
+ }
+ else
+ lt_fatal ("%s missing required argument", env_prepend_opt);
+ continue;
+ }
+ if (strncmp (argv[i], env_append_opt, env_append_opt_len) == 0)
+ {
+ if (argv[i][env_append_opt_len] == '=')
+ {
+ const char *p = argv[i] + env_append_opt_len + 1;
+ lt_opt_process_env_append (p);
+ }
+ else if (argv[i][env_append_opt_len] == '\0' && i + 1 < argc)
+ {
+ lt_opt_process_env_append (argv[++i]); /* don't copy */
+ }
+ else
+ lt_fatal ("%s missing required argument", env_append_opt);
+ continue;
+ }
+ if (strncmp (argv[i], ltwrapper_option_prefix, opt_prefix_len) == 0)
+ {
+ /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
+ namespace, but it is not one of the ones we know about and
+ have already dealt with, above (inluding dump-script), then
+ report an error. Otherwise, targets might begin to believe
+ they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
+ namespace. The first time any user complains about this, we'll
+ need to make LTWRAPPER_OPTION_PREFIX a configure-time option
+ or a configure.ac-settable value.
+ */
+ lt_fatal ("Unrecognized option in %s namespace: '%s'",
+ ltwrapper_option_prefix, argv[i]);
+ }
+ /* otherwise ... */
+ newargz[++newargc] = xstrdup (argv[i]);
+ }
+ newargz[++newargc] = NULL;
+
+ LTWRAPPER_DEBUGPRINTF (("(main) lt_argv_zero : %s\n", (lt_argv_zero ? lt_argv_zero : "<NULL>")));
+ for (i = 0; i < newargc; i++)
+ {
+ LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : "<NULL>")));
+ }
+
+EOF
+
+ case $host_os in
+ mingw*)
+ cat <<"EOF"
+ /* execv doesn't actually work on mingw as expected on unix */
+ rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
+ if (rval == -1)
+ {
+ /* failed to start process */
+ LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno));
+ return 127;
+ }
+ return rval;
+EOF
+ ;;
+ *)
+ cat <<"EOF"
+ execv (lt_argv_zero, newargz);
+ return rval; /* =127, but avoids unused variable warning */
+EOF
+ ;;
+ esac
+
+ cat <<"EOF"
+}
+
+void *
+xmalloc (size_t num)
+{
+ void *p = (void *) malloc (num);
+ if (!p)
+ lt_fatal ("Memory exhausted");
+
+ return p;
+}
+
+char *
+xstrdup (const char *string)
+{
+ return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
+ string) : NULL;
+}
+
+const char *
+base_name (const char *name)
+{
+ const char *base;
+
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+ /* Skip over the disk name in MSDOS pathnames. */
+ if (isalpha ((unsigned char) name[0]) && name[1] == ':')
+ name += 2;
+#endif
+
+ for (base = name; *name; name++)
+ if (IS_DIR_SEPARATOR (*name))
+ base = name + 1;
+ return base;
+}
+
+int
+check_executable (const char *path)
+{
+ struct stat st;
+
+ LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n",
+ path ? (*path ? path : "EMPTY!") : "NULL!"));
+ if ((!path) || (!*path))
+ return 0;
+
+ if ((stat (path, &st) >= 0)
+ && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
+ return 1;
+ else
+ return 0;
+}
+
+int
+make_executable (const char *path)
+{
+ int rval = 0;
+ struct stat st;
+
+ LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n",
+ path ? (*path ? path : "EMPTY!") : "NULL!"));
+ if ((!path) || (!*path))
+ return 0;
+
+ if (stat (path, &st) >= 0)
+ {
+ rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
+ }
+ return rval;
+}
+
+/* Searches for the full path of the wrapper. Returns
+ newly allocated full path name if found, NULL otherwise
+ Does not chase symlinks, even on platforms that support them.
+*/
+char *
+find_executable (const char *wrapper)
+{
+ int has_slash = 0;
+ const char *p;
+ const char *p_next;
+ /* static buffer for getcwd */
+ char tmp[LT_PATHMAX + 1];
+ int tmp_len;
+ char *concat_name;
+
+ LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n",
+ wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!"));
+
+ if ((wrapper == NULL) || (*wrapper == '\0'))
+ return NULL;
+
+ /* Absolute path? */
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+ if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
+ {
+ concat_name = xstrdup (wrapper);
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ }
+ else
+ {
+#endif
+ if (IS_DIR_SEPARATOR (wrapper[0]))
+ {
+ concat_name = xstrdup (wrapper);
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ }
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+ }
+#endif
+
+ for (p = wrapper; *p; p++)
+ if (*p == '/')
+ {
+ has_slash = 1;
+ break;
+ }
+ if (!has_slash)
+ {
+ /* no slashes; search PATH */
+ const char *path = getenv ("PATH");
+ if (path != NULL)
+ {
+ for (p = path; *p; p = p_next)
+ {
+ const char *q;
+ size_t p_len;
+ for (q = p; *q; q++)
+ if (IS_PATH_SEPARATOR (*q))
+ break;
+ p_len = q - p;
+ p_next = (*q == '\0' ? q : q + 1);
+ if (p_len == 0)
+ {
+ /* empty path: current directory */
+ if (getcwd (tmp, LT_PATHMAX) == NULL)
+ lt_fatal ("getcwd failed");
+ tmp_len = strlen (tmp);
+ concat_name =
+ XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+ memcpy (concat_name, tmp, tmp_len);
+ concat_name[tmp_len] = '/';
+ strcpy (concat_name + tmp_len + 1, wrapper);
+ }
+ else
+ {
+ concat_name =
+ XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
+ memcpy (concat_name, p, p_len);
+ concat_name[p_len] = '/';
+ strcpy (concat_name + p_len + 1, wrapper);
+ }
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ }
+ }
+ /* not found in PATH; assume curdir */
+ }
+ /* Relative path | not found in path: prepend cwd */
+ if (getcwd (tmp, LT_PATHMAX) == NULL)
+ lt_fatal ("getcwd failed");
+ tmp_len = strlen (tmp);
+ concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+ memcpy (concat_name, tmp, tmp_len);
+ concat_name[tmp_len] = '/';
+ strcpy (concat_name + tmp_len + 1, wrapper);
+
+ if (check_executable (concat_name))
+ return concat_name;
+ XFREE (concat_name);
+ return NULL;
+}
+
+char *
+chase_symlinks (const char *pathspec)
+{
+#ifndef S_ISLNK
+ return xstrdup (pathspec);
+#else
+ char buf[LT_PATHMAX];
+ struct stat s;
+ char *tmp_pathspec = xstrdup (pathspec);
+ char *p;
+ int has_symlinks = 0;
+ while (strlen (tmp_pathspec) && !has_symlinks)
+ {
+ LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n",
+ tmp_pathspec));
+ if (lstat (tmp_pathspec, &s) == 0)
+ {
+ if (S_ISLNK (s.st_mode) != 0)
+ {
+ has_symlinks = 1;
+ break;
+ }
+
+ /* search backwards for last DIR_SEPARATOR */
+ p = tmp_pathspec + strlen (tmp_pathspec) - 1;
+ while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+ p--;
+ if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+ {
+ /* no more DIR_SEPARATORS left */
+ break;
+ }
+ *p = '\0';
+ }
+ else
+ {
+ char *errstr = strerror (errno);
+ lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr);
+ }
+ }
+ XFREE (tmp_pathspec);
+
+ if (!has_symlinks)
+ {
+ return xstrdup (pathspec);
+ }
+
+ tmp_pathspec = realpath (pathspec, buf);
+ if (tmp_pathspec == 0)
+ {
+ lt_fatal ("Could not follow symlinks for %s", pathspec);
+ }
+ return xstrdup (tmp_pathspec);
+#endif
+}
+
+char *
+strendzap (char *str, const char *pat)
+{
+ size_t len, patlen;
+
+ assert (str != NULL);
+ assert (pat != NULL);
+
+ len = strlen (str);
+ patlen = strlen (pat);
+
+ if (patlen <= len)
+ {
+ str += len - patlen;
+ if (strcmp (str, pat) == 0)
+ *str = '\0';
+ }
+ return str;
+}
+
+static void
+lt_error_core (int exit_status, const char *mode,
+ const char *message, va_list ap)
+{
+ fprintf (stderr, "%s: %s: ", program_name, mode);
+ vfprintf (stderr, message, ap);
+ fprintf (stderr, ".\n");
+
+ if (exit_status >= 0)
+ exit (exit_status);
+}
+
+void
+lt_fatal (const char *message, ...)
+{
+ va_list ap;
+ va_start (ap, message);
+ lt_error_core (EXIT_FAILURE, "FATAL", message, ap);
+ va_end (ap);
+}
+
+void
+lt_setenv (const char *name, const char *value)
+{
+ LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n",
+ (name ? name : "<NULL>"),
+ (value ? value : "<NULL>")));
+ {
+#ifdef HAVE_SETENV
+ /* always make a copy, for consistency with !HAVE_SETENV */
+ char *str = xstrdup (value);
+ setenv (name, str, 1);
+#else
+ int len = strlen (name) + 1 + strlen (value) + 1;
+ char *str = XMALLOC (char, len);
+ sprintf (str, "%s=%s", name, value);
+ if (putenv (str) != EXIT_SUCCESS)
+ {
+ XFREE (str);
+ }
+#endif
+ }
+}
+
+char *
+lt_extend_str (const char *orig_value, const char *add, int to_end)
+{
+ char *new_value;
+ if (orig_value && *orig_value)
+ {
+ int orig_value_len = strlen (orig_value);
+ int add_len = strlen (add);
+ new_value = XMALLOC (char, add_len + orig_value_len + 1);
+ if (to_end)
+ {
+ strcpy (new_value, orig_value);
+ strcpy (new_value + orig_value_len, add);
+ }
+ else
+ {
+ strcpy (new_value, add);
+ strcpy (new_value + add_len, orig_value);
+ }
+ }
+ else
+ {
+ new_value = xstrdup (add);
+ }
+ return new_value;
+}
+
+int
+lt_split_name_value (const char *arg, char** name, char** value)
+{
+ const char *p;
+ int len;
+ if (!arg || !*arg)
+ return 1;
+
+ p = strchr (arg, (int)'=');
+
+ if (!p)
+ return 1;
+
+ *value = xstrdup (++p);
+
+ len = strlen (arg) - strlen (*value);
+ *name = XMALLOC (char, len);
+ strncpy (*name, arg, len-1);
+ (*name)[len - 1] = '\0';
+
+ return 0;
+}
+
+void
+lt_opt_process_env_set (const char *arg)
+{
+ char *name = NULL;
+ char *value = NULL;
+
+ if (lt_split_name_value (arg, &name, &value) != 0)
+ {
+ XFREE (name);
+ XFREE (value);
+ lt_fatal ("bad argument for %s: '%s'", env_set_opt, arg);
+ }
+
+ lt_setenv (name, value);
+ XFREE (name);
+ XFREE (value);
+}
+
+void
+lt_opt_process_env_prepend (const char *arg)
+{
+ char *name = NULL;
+ char *value = NULL;
+ char *new_value = NULL;
+
+ if (lt_split_name_value (arg, &name, &value) != 0)
+ {
+ XFREE (name);
+ XFREE (value);
+ lt_fatal ("bad argument for %s: '%s'", env_prepend_opt, arg);
+ }
+
+ new_value = lt_extend_str (getenv (name), value, 0);
+ lt_setenv (name, new_value);
+ XFREE (new_value);
+ XFREE (name);
+ XFREE (value);
+}
+
+void
+lt_opt_process_env_append (const char *arg)
+{
+ char *name = NULL;
+ char *value = NULL;
+ char *new_value = NULL;
+
+ if (lt_split_name_value (arg, &name, &value) != 0)
+ {
+ XFREE (name);
+ XFREE (value);
+ lt_fatal ("bad argument for %s: '%s'", env_append_opt, arg);
+ }
+
+ new_value = lt_extend_str (getenv (name), value, 1);
+ lt_setenv (name, new_value);
+ XFREE (new_value);
+ XFREE (name);
+ XFREE (value);
+}
+
+void
+lt_update_exe_path (const char *name, const char *value)
+{
+ LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
+ (name ? name : "<NULL>"),
+ (value ? value : "<NULL>")));
+
+ if (name && *name && value && *value)
+ {
+ char *new_value = lt_extend_str (getenv (name), value, 0);
+ /* some systems can't cope with a ':'-terminated path #' */
+ int len = strlen (new_value);
+ while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1]))
+ {
+ new_value[len-1] = '\0';
+ }
+ lt_setenv (name, new_value);
+ XFREE (new_value);
+ }
+}
+
+void
+lt_update_lib_path (const char *name, const char *value)
+{
+ LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
+ (name ? name : "<NULL>"),
+ (value ? value : "<NULL>")));
+
+ if (name && *name && value && *value)
+ {
+ char *new_value = lt_extend_str (getenv (name), value, 0);
+ lt_setenv (name, new_value);
+ XFREE (new_value);
+ }
+}
+
+
+EOF
+}
+# end: func_emit_cwrapperexe_src
+
+# func_mode_link arg...
+func_mode_link ()
+{
+ $opt_debug
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+ # It is impossible to link a dll without this setting, and
+ # we shouldn't force the makefile maintainer to figure out
+ # which system we are compiling for in order to pass an extra
+ # flag for every libtool invocation.
+ # allow_undefined=no
+
+ # FIXME: Unfortunately, there are problems with the above when trying
+ # to make a dll which has undefined symbols, in which case not
+ # even a static library is built. For now, we need to specify
+ # -no-undefined on the libtool link line when we can be certain
+ # that all symbols are satisfied, otherwise we get a static library.
+ allow_undefined=yes
+ ;;
+ *)
+ allow_undefined=yes
+ ;;
+ esac
+ libtool_args=$nonopt
+ base_compile="$nonopt $@"
+ compile_command=$nonopt
+ finalize_command=$nonopt
+
+ compile_rpath=
+ finalize_rpath=
+ compile_shlibpath=
+ finalize_shlibpath=
+ convenience=
+ old_convenience=
+ deplibs=
+ old_deplibs=
+ compiler_flags=
+ linker_flags=
+ dllsearchpath=
+ lib_search_path=`pwd`
+ inst_prefix_dir=
+ new_inherited_linker_flags=
+
+ avoid_version=no
+ dlfiles=
+ dlprefiles=
+ dlself=no
+ export_dynamic=no
+ export_symbols=
+ export_symbols_regex=
+ generated=
+ libobjs=
+ ltlibs=
+ module=no
+ no_install=no
+ objs=
+ non_pic_objects=
+ precious_files_regex=
+ prefer_static_libs=no
+ preload=no
+ prev=
+ prevarg=
+ release=
+ rpath=
+ xrpath=
+ perm_rpath=
+ temp_rpath=
+ thread_safe=no
+ vinfo=
+ vinfo_number=no
+ weak_libs=
+ single_module="${wl}-single_module"
+ func_infer_tag $base_compile
+
+ # We need to know -static, to get the right output filenames.
+ for arg
+ do
+ case $arg in
+ -shared)
+ test "$build_libtool_libs" != yes && \
+ func_fatal_configuration "can not build a shared library"
+ build_old_libs=no
+ break
+ ;;
+ -all-static | -static | -static-libtool-libs)
+ case $arg in
+ -all-static)
+ if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
+ func_warning "complete static linking is impossible in this configuration"
+ fi
+ if test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ prefer_static_libs=yes
+ ;;
+ -static)
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ prefer_static_libs=built
+ ;;
+ -static-libtool-libs)
+ if test -z "$pic_flag" && test -n "$link_static_flag"; then
+ dlopen_self=$dlopen_self_static
+ fi
+ prefer_static_libs=yes
+ ;;
+ esac
+ build_libtool_libs=no
+ build_old_libs=yes
+ break
+ ;;
+ esac
+ done
+
+ # See if our shared archives depend on static archives.
+ test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+ # Go through the arguments, transforming them on the way.
+ while test "$#" -gt 0; do
+ arg="$1"
+ shift
+ func_quote_for_eval "$arg"
+ qarg=$func_quote_for_eval_unquoted_result
+ func_append libtool_args " $func_quote_for_eval_result"
+
+ # If the previous option needs an argument, assign it.
+ if test -n "$prev"; then
+ case $prev in
+ output)
+ func_append compile_command " @OUTPUT@"
+ func_append finalize_command " @OUTPUT@"
+ ;;
+ esac
+
+ case $prev in
+ dlfiles|dlprefiles)
+ if test "$preload" = no; then
+ # Add the symbol object into the linking commands.
+ func_append compile_command " @SYMFILE@"
+ func_append finalize_command " @SYMFILE@"
+ preload=yes
+ fi
+ case $arg in
+ *.la | *.lo) ;; # We handle these cases below.
+ force)
+ if test "$dlself" = no; then
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ self)
+ if test "$prev" = dlprefiles; then
+ dlself=yes
+ elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
+ dlself=yes
+ else
+ dlself=needless
+ export_dynamic=yes
+ fi
+ prev=
+ continue
+ ;;
+ *)
+ if test "$prev" = dlfiles; then
+ dlfiles="$dlfiles $arg"
+ else
+ dlprefiles="$dlprefiles $arg"
+ fi
+ prev=
+ continue
+ ;;
+ esac
+ ;;
+ expsyms)
+ export_symbols="$arg"
+ test -f "$arg" \
+ || func_fatal_error "symbol file \`$arg' does not exist"
+ prev=
+ continue
+ ;;
+ expsyms_regex)
+ export_symbols_regex="$arg"
+ prev=
+ continue
+ ;;
+ framework)
+ case $host in
+ *-*-darwin*)
+ case "$deplibs " in
+ *" $qarg.ltframework "*) ;;
+ *) deplibs="$deplibs $qarg.ltframework" # this is fixed later
+ ;;
+ esac
+ ;;
+ esac
+ prev=
+ continue
+ ;;
+ inst_prefix)
+ inst_prefix_dir="$arg"
+ prev=
+ continue
+ ;;
+ objectlist)
+ if test -f "$arg"; then
+ save_arg=$arg
+ moreargs=
+ for fil in `cat "$save_arg"`
+ do
+# moreargs="$moreargs $fil"
+ arg=$fil
+ # A libtool-controlled object.
+
+ # Check to see that this really is a libtool object.
+ if func_lalib_unsafe_p "$arg"; then
+ pic_object=
+ non_pic_object=
+
+ # Read the .lo file
+ func_source "$arg"
+
+ if test -z "$pic_object" ||
+ test -z "$non_pic_object" ||
+ test "$pic_object" = none &&
+ test "$non_pic_object" = none; then
+ func_fatal_error "cannot find name of object for \`$arg'"
+ fi
+
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ if test "$pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ pic_object="$xdir$pic_object"
+
+ if test "$prev" = dlfiles; then
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ dlfiles="$dlfiles $pic_object"
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ # CHECK ME: I think I busted this. -Ossama
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ dlprefiles="$dlprefiles $pic_object"
+ prev=
+ fi
+
+ # A PIC object.
+ func_append libobjs " $pic_object"
+ arg="$pic_object"
+ fi
+
+ # Non-PIC object.
+ if test "$non_pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ non_pic_object="$xdir$non_pic_object"
+
+ # A standard non-PIC object
+ func_append non_pic_objects " $non_pic_object"
+ if test -z "$pic_object" || test "$pic_object" = none ; then
+ arg="$non_pic_object"
+ fi
+ else
+ # If the PIC object exists, use it instead.
+ # $xdir was prepended to $pic_object above.
+ non_pic_object="$pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ fi
+ else
+ # Only an error if not doing a dry-run.
+ if $opt_dry_run; then
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ func_lo2o "$arg"
+ pic_object=$xdir$objdir/$func_lo2o_result
+ non_pic_object=$xdir$func_lo2o_result
+ func_append libobjs " $pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ else
+ func_fatal_error "\`$arg' is not a valid libtool object"
+ fi
+ fi
+ done
+ else
+ func_fatal_error "link input file \`$arg' does not exist"
+ fi
+ arg=$save_arg
+ prev=
+ continue
+ ;;
+ precious_regex)
+ precious_files_regex="$arg"
+ prev=
+ continue
+ ;;
+ release)
+ release="-$arg"
+ prev=
+ continue
+ ;;
+ rpath | xrpath)
+ # We need an absolute path.
+ case $arg in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ func_fatal_error "only absolute run-paths are allowed"
+ ;;
+ esac
+ if test "$prev" = rpath; then
+ case "$rpath " in
+ *" $arg "*) ;;
+ *) rpath="$rpath $arg" ;;
+ esac
+ else
+ case "$xrpath " in
+ *" $arg "*) ;;
+ *) xrpath="$xrpath $arg" ;;
+ esac
+ fi
+ prev=
+ continue
+ ;;
+ shrext)
+ shrext_cmds="$arg"
+ prev=
+ continue
+ ;;
+ weak)
+ weak_libs="$weak_libs $arg"
+ prev=
+ continue
+ ;;
+ xcclinker)
+ linker_flags="$linker_flags $qarg"
+ compiler_flags="$compiler_flags $qarg"
+ prev=
+ func_append compile_command " $qarg"
+ func_append finalize_command " $qarg"
+ continue
+ ;;
+ xcompiler)
+ compiler_flags="$compiler_flags $qarg"
+ prev=
+ func_append compile_command " $qarg"
+ func_append finalize_command " $qarg"
+ continue
+ ;;
+ xlinker)
+ linker_flags="$linker_flags $qarg"
+ compiler_flags="$compiler_flags $wl$qarg"
+ prev=
+ func_append compile_command " $wl$qarg"
+ func_append finalize_command " $wl$qarg"
+ continue
+ ;;
+ *)
+ eval "$prev=\"\$arg\""
+ prev=
+ continue
+ ;;
+ esac
+ fi # test -n "$prev"
+
+ prevarg="$arg"
+
+ case $arg in
+ -all-static)
+ if test -n "$link_static_flag"; then
+ # See comment for -static flag below, for more details.
+ func_append compile_command " $link_static_flag"
+ func_append finalize_command " $link_static_flag"
+ fi
+ continue
+ ;;
+
+ -allow-undefined)
+ # FIXME: remove this flag sometime in the future.
+ func_fatal_error "\`-allow-undefined' must not be used because it is the default"
+ ;;
+
+ -avoid-version)
+ avoid_version=yes
+ continue
+ ;;
+
+ -dlopen)
+ prev=dlfiles
+ continue
+ ;;
+
+ -dlpreopen)
+ prev=dlprefiles
+ continue
+ ;;
+
+ -export-dynamic)
+ export_dynamic=yes
+ continue
+ ;;
+
+ -export-symbols | -export-symbols-regex)
+ if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+ func_fatal_error "more than one -exported-symbols argument is not allowed"
+ fi
+ if test "X$arg" = "X-export-symbols"; then
+ prev=expsyms
+ else
+ prev=expsyms_regex
+ fi
+ continue
+ ;;
+
+ -framework)
+ prev=framework
+ continue
+ ;;
+
+ -inst-prefix-dir)
+ prev=inst_prefix
+ continue
+ ;;
+
+ # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+ # so, if we see these flags be careful not to treat them like -L
+ -L[A-Z][A-Z]*:*)
+ case $with_gcc/$host in
+ no/*-*-irix* | /*-*-irix*)
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ ;;
+ esac
+ continue
+ ;;
+
+ -L*)
+ func_stripname '-L' '' "$arg"
+ dir=$func_stripname_result
+ if test -z "$dir"; then
+ if test "$#" -gt 0; then
+ func_fatal_error "require no space between \`-L' and \`$1'"
+ else
+ func_fatal_error "need path for \`-L' option"
+ fi
+ fi
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ test -z "$absdir" && \
+ func_fatal_error "cannot determine absolute directory name of \`$dir'"
+ dir="$absdir"
+ ;;
+ esac
+ case "$deplibs " in
+ *" -L$dir "*) ;;
+ *)
+ deplibs="$deplibs -L$dir"
+ lib_search_path="$lib_search_path $dir"
+ ;;
+ esac
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+ testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'`
+ case :$dllsearchpath: in
+ *":$dir:"*) ;;
+ ::) dllsearchpath=$dir;;
+ *) dllsearchpath="$dllsearchpath:$dir";;
+ esac
+ case :$dllsearchpath: in
+ *":$testbindir:"*) ;;
+ ::) dllsearchpath=$testbindir;;
+ *) dllsearchpath="$dllsearchpath:$testbindir";;
+ esac
+ ;;
+ esac
+ continue
+ ;;
+
+ -l*)
+ if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc*)
+ # These systems don't actually have a C or math library (as such)
+ continue
+ ;;
+ *-*-os2*)
+ # These systems don't actually have a C library (as such)
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+ # Do not include libc due to us having libc/libc_r.
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C and math libraries are in the System framework
+ deplibs="$deplibs System.ltframework"
+ continue
+ ;;
+ *-*-sco3.2v5* | *-*-sco5v6*)
+ # Causes problems with __ctype
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+ # Compiler inserts libc in the correct place for threads to work
+ test "X$arg" = "X-lc" && continue
+ ;;
+ esac
+ elif test "X$arg" = "X-lc_r"; then
+ case $host in
+ *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+ # Do not include libc_r directly, use -pthread flag.
+ continue
+ ;;
+ esac
+ fi
+ deplibs="$deplibs $arg"
+ continue
+ ;;
+
+ -module)
+ module=yes
+ continue
+ ;;
+
+ # Tru64 UNIX uses -model [arg] to determine the layout of C++
+ # classes, name mangling, and exception handling.
+ # Darwin uses the -arch flag to determine output architecture.
+ -model|-arch|-isysroot)
+ compiler_flags="$compiler_flags $arg"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ prev=xcompiler
+ continue
+ ;;
+
+ -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads)
+ compiler_flags="$compiler_flags $arg"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ case "$new_inherited_linker_flags " in
+ *" $arg "*) ;;
+ * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;;
+ esac
+ continue
+ ;;
+
+ -multi_module)
+ single_module="${wl}-multi_module"
+ continue
+ ;;
+
+ -no-fast-install)
+ fast_install=no
+ continue
+ ;;
+
+ -no-install)
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*)
+ # The PATH hackery in wrapper scripts is required on Windows
+ # and Darwin in order for the loader to find any dlls it needs.
+ func_warning "\`-no-install' is ignored for $host"
+ func_warning "assuming \`-no-fast-install' instead"
+ fast_install=no
+ ;;
+ *) no_install=yes ;;
+ esac
+ continue
+ ;;
+
+ -no-undefined)
+ allow_undefined=no
+ continue
+ ;;
+
+ -objectlist)
+ prev=objectlist
+ continue
+ ;;
+
+ -o) prev=output ;;
+
+ -precious-files-regex)
+ prev=precious_regex
+ continue
+ ;;
+
+ -release)
+ prev=release
+ continue
+ ;;
+
+ -rpath)
+ prev=rpath
+ continue
+ ;;
+
+ -R)
+ prev=xrpath
+ continue
+ ;;
+
+ -R*)
+ func_stripname '-R' '' "$arg"
+ dir=$func_stripname_result
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) ;;
+ *)
+ func_fatal_error "only absolute run-paths are allowed"
+ ;;
+ esac
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ continue
+ ;;
+
+ -shared)
+ # The effects of -shared are defined in a previous loop.
+ continue
+ ;;
+
+ -shrext)
+ prev=shrext
+ continue
+ ;;
+
+ -static | -static-libtool-libs)
+ # The effects of -static are defined in a previous loop.
+ # We used to do the same as -all-static on platforms that
+ # didn't have a PIC flag, but the assumption that the effects
+ # would be equivalent was wrong. It would break on at least
+ # Digital Unix and AIX.
+ continue
+ ;;
+
+ -thread-safe)
+ thread_safe=yes
+ continue
+ ;;
+
+ -version-info)
+ prev=vinfo
+ continue
+ ;;
+
+ -version-number)
+ prev=vinfo
+ vinfo_number=yes
+ continue
+ ;;
+
+ -weak)
+ prev=weak
+ continue
+ ;;
+
+ -Wc,*)
+ func_stripname '-Wc,' '' "$arg"
+ args=$func_stripname_result
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ func_quote_for_eval "$flag"
+ arg="$arg $wl$func_quote_for_eval_result"
+ compiler_flags="$compiler_flags $func_quote_for_eval_result"
+ done
+ IFS="$save_ifs"
+ func_stripname ' ' '' "$arg"
+ arg=$func_stripname_result
+ ;;
+
+ -Wl,*)
+ func_stripname '-Wl,' '' "$arg"
+ args=$func_stripname_result
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ func_quote_for_eval "$flag"
+ arg="$arg $wl$func_quote_for_eval_result"
+ compiler_flags="$compiler_flags $wl$func_quote_for_eval_result"
+ linker_flags="$linker_flags $func_quote_for_eval_result"
+ done
+ IFS="$save_ifs"
+ func_stripname ' ' '' "$arg"
+ arg=$func_stripname_result
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Xlinker)
+ prev=xlinker
+ continue
+ ;;
+
+ -XCClinker)
+ prev=xcclinker
+ continue
+ ;;
+
+ # -msg_* for osf cc
+ -msg_*)
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ ;;
+
+ # -64, -mips[0-9] enable 64-bit mode on the SGI compiler
+ # -r[0-9][0-9]* specifies the processor on the SGI compiler
+ # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler
+ # +DA*, +DD* enable 64-bit mode on the HP compiler
+ # -q* pass through compiler args for the IBM compiler
+ # -m*, -t[45]*, -txscale* pass through architecture-specific
+ # compiler args for GCC
+ # -F/path gives path to uninstalled frameworks, gcc on darwin
+ # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC
+ # @file GCC response files
+ -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
+ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*)
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ compiler_flags="$compiler_flags $arg"
+ continue
+ ;;
+
+ # Some other compiler flag.
+ -* | +*)
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ ;;
+
+ *.$objext)
+ # A standard object.
+ objs="$objs $arg"
+ ;;
+
+ *.lo)
+ # A libtool-controlled object.
+
+ # Check to see that this really is a libtool object.
+ if func_lalib_unsafe_p "$arg"; then
+ pic_object=
+ non_pic_object=
+
+ # Read the .lo file
+ func_source "$arg"
+
+ if test -z "$pic_object" ||
+ test -z "$non_pic_object" ||
+ test "$pic_object" = none &&
+ test "$non_pic_object" = none; then
+ func_fatal_error "cannot find name of object for \`$arg'"
+ fi
+
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ if test "$pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ pic_object="$xdir$pic_object"
+
+ if test "$prev" = dlfiles; then
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ dlfiles="$dlfiles $pic_object"
+ prev=
+ continue
+ else
+ # If libtool objects are unsupported, then we need to preload.
+ prev=dlprefiles
+ fi
+ fi
+
+ # CHECK ME: I think I busted this. -Ossama
+ if test "$prev" = dlprefiles; then
+ # Preload the old-style object.
+ dlprefiles="$dlprefiles $pic_object"
+ prev=
+ fi
+
+ # A PIC object.
+ func_append libobjs " $pic_object"
+ arg="$pic_object"
+ fi
+
+ # Non-PIC object.
+ if test "$non_pic_object" != none; then
+ # Prepend the subdirectory the object is found in.
+ non_pic_object="$xdir$non_pic_object"
+
+ # A standard non-PIC object
+ func_append non_pic_objects " $non_pic_object"
+ if test -z "$pic_object" || test "$pic_object" = none ; then
+ arg="$non_pic_object"
+ fi
+ else
+ # If the PIC object exists, use it instead.
+ # $xdir was prepended to $pic_object above.
+ non_pic_object="$pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ fi
+ else
+ # Only an error if not doing a dry-run.
+ if $opt_dry_run; then
+ # Extract subdirectory from the argument.
+ func_dirname "$arg" "/" ""
+ xdir="$func_dirname_result"
+
+ func_lo2o "$arg"
+ pic_object=$xdir$objdir/$func_lo2o_result
+ non_pic_object=$xdir$func_lo2o_result
+ func_append libobjs " $pic_object"
+ func_append non_pic_objects " $non_pic_object"
+ else
+ func_fatal_error "\`$arg' is not a valid libtool object"
+ fi
+ fi
+ ;;
+
+ *.$libext)
+ # An archive.
+ deplibs="$deplibs $arg"
+ old_deplibs="$old_deplibs $arg"
+ continue
+ ;;
+
+ *.la)
+ # A libtool-controlled library.
+
+ if test "$prev" = dlfiles; then
+ # This library was specified with -dlopen.
+ dlfiles="$dlfiles $arg"
+ prev=
+ elif test "$prev" = dlprefiles; then
+ # The library was specified with -dlpreopen.
+ dlprefiles="$dlprefiles $arg"
+ prev=
+ else
+ deplibs="$deplibs $arg"
+ fi
+ continue
+ ;;
+
+ # Some other compiler argument.
+ *)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ func_quote_for_eval "$arg"
+ arg="$func_quote_for_eval_result"
+ ;;
+ esac # arg
+
+ # Now actually substitute the argument into the commands.
+ if test -n "$arg"; then
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ fi
+ done # argument parsing loop
+
+ test -n "$prev" && \
+ func_fatal_help "the \`$prevarg' option requires an argument"
+
+ if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+ eval arg=\"$export_dynamic_flag_spec\"
+ func_append compile_command " $arg"
+ func_append finalize_command " $arg"
+ fi
+
+ oldlibs=
+ # calculate the name of the file, without its directory
+ func_basename "$output"
+ outputname="$func_basename_result"
+ libobjs_save="$libobjs"
+
+ if test -n "$shlibpath_var"; then
+ # get the directories listed in $shlibpath_var
+ eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
+ else
+ shlib_search_path=
+ fi
+ eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+ func_dirname "$output" "/" ""
+ output_objdir="$func_dirname_result$objdir"
+ # Create the object directory.
+ func_mkdir_p "$output_objdir"
+
+ # Determine the type of output
+ case $output in
+ "")
+ func_fatal_help "you must specify an output file"
+ ;;
+ *.$libext) linkmode=oldlib ;;
+ *.lo | *.$objext) linkmode=obj ;;
+ *.la) linkmode=lib ;;
+ *) linkmode=prog ;; # Anything else should be a program.
+ esac
+
+ specialdeplibs=
+
+ libs=
+ # Find all interdependent deplibs by searching for libraries
+ # that are linked more than once (e.g. -la -lb -la)
+ for deplib in $deplibs; do
+ if $opt_duplicate_deps ; then
+ case "$libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ fi
+ libs="$libs $deplib"
+ done
+
+ if test "$linkmode" = lib; then
+ libs="$predeps $libs $compiler_lib_search_path $postdeps"
+
+ # Compute libraries that are listed more than once in $predeps
+ # $postdeps and mark them as special (i.e., whose duplicates are
+ # not to be eliminated).
+ pre_post_deps=
+ if $opt_duplicate_compiler_generated_deps; then
+ for pre_post_dep in $predeps $postdeps; do
+ case "$pre_post_deps " in
+ *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;;
+ esac
+ pre_post_deps="$pre_post_deps $pre_post_dep"
+ done
+ fi
+ pre_post_deps=
+ fi
+
+ deplibs=
+ newdependency_libs=
+ newlib_search_path=
+ need_relink=no # whether we're linking any uninstalled libtool libraries
+ notinst_deplibs= # not-installed libtool libraries
+ notinst_path= # paths that contain not-installed libtool libraries
+
+ case $linkmode in
+ lib)
+ passes="conv dlpreopen link"
+ for file in $dlfiles $dlprefiles; do
+ case $file in
+ *.la) ;;
+ *)
+ func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file"
+ ;;
+ esac
+ done
+ ;;
+ prog)
+ compile_deplibs=
+ finalize_deplibs=
+ alldeplibs=no
+ newdlfiles=
+ newdlprefiles=
+ passes="conv scan dlopen dlpreopen link"
+ ;;
+ *) passes="conv"
+ ;;
+ esac
+
+ for pass in $passes; do
+ # The preopen pass in lib mode reverses $deplibs; put it back here
+ # so that -L comes before libs that need it for instance...
+ if test "$linkmode,$pass" = "lib,link"; then
+ ## FIXME: Find the place where the list is rebuilt in the wrong
+ ## order, and fix it there properly
+ tmp_deplibs=
+ for deplib in $deplibs; do
+ tmp_deplibs="$deplib $tmp_deplibs"
+ done
+ deplibs="$tmp_deplibs"
+ fi
+
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan"; then
+ libs="$deplibs"
+ deplibs=
+ fi
+ if test "$linkmode" = prog; then
+ case $pass in
+ dlopen) libs="$dlfiles" ;;
+ dlpreopen) libs="$dlprefiles" ;;
+ link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
+ esac
+ fi
+ if test "$linkmode,$pass" = "lib,dlpreopen"; then
+ # Collect and forward deplibs of preopened libtool libs
+ for lib in $dlprefiles; do
+ # Ignore non-libtool-libs
+ dependency_libs=
+ case $lib in
+ *.la) func_source "$lib" ;;
+ esac
+
+ # Collect preopened libtool deplibs, except any this library
+ # has declared as weak libs
+ for deplib in $dependency_libs; do
+ deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"`
+ case " $weak_libs " in
+ *" $deplib_base "*) ;;
+ *) deplibs="$deplibs $deplib" ;;
+ esac
+ done
+ done
+ libs="$dlprefiles"
+ fi
+ if test "$pass" = dlopen; then
+ # Collect dlpreopened libraries
+ save_deplibs="$deplibs"
+ deplibs=
+ fi
+
+ for deplib in $libs; do
+ lib=
+ found=no
+ case $deplib in
+ -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads)
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ compiler_flags="$compiler_flags $deplib"
+ if test "$linkmode" = lib ; then
+ case "$new_inherited_linker_flags " in
+ *" $deplib "*) ;;
+ * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;;
+ esac
+ fi
+ fi
+ continue
+ ;;
+ -l*)
+ if test "$linkmode" != lib && test "$linkmode" != prog; then
+ func_warning "\`-l' is ignored for archives/objects"
+ continue
+ fi
+ func_stripname '-l' '' "$deplib"
+ name=$func_stripname_result
+ if test "$linkmode" = lib; then
+ searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path"
+ else
+ searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path"
+ fi
+ for searchdir in $searchdirs; do
+ for search_ext in .la $std_shrext .so .a; do
+ # Search the libtool library
+ lib="$searchdir/lib${name}${search_ext}"
+ if test -f "$lib"; then
+ if test "$search_ext" = ".la"; then
+ found=yes
+ else
+ found=no
+ fi
+ break 2
+ fi
+ done
+ done
+ if test "$found" != yes; then
+ # deplib doesn't seem to be a libtool library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ else # deplib is a libtool library
+ # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
+ # We need to do some special things here, and not later.
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $deplib "*)
+ if func_lalib_p "$lib"; then
+ library_names=
+ old_library=
+ func_source "$lib"
+ for l in $old_library $library_names; do
+ ll="$l"
+ done
+ if test "X$ll" = "X$old_library" ; then # only static version available
+ found=no
+ func_dirname "$lib" "" "."
+ ladir="$func_dirname_result"
+ lib=$ladir/$old_library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ fi
+ fi
+ ;;
+ *) ;;
+ esac
+ fi
+ fi
+ ;; # -l
+ *.ltframework)
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ if test "$linkmode" = lib ; then
+ case "$new_inherited_linker_flags " in
+ *" $deplib "*) ;;
+ * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;;
+ esac
+ fi
+ fi
+ continue
+ ;;
+ -L*)
+ case $linkmode in
+ lib)
+ deplibs="$deplib $deplibs"
+ test "$pass" = conv && continue
+ newdependency_libs="$deplib $newdependency_libs"
+ func_stripname '-L' '' "$deplib"
+ newlib_search_path="$newlib_search_path $func_stripname_result"
+ ;;
+ prog)
+ if test "$pass" = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ if test "$pass" = scan; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ func_stripname '-L' '' "$deplib"
+ newlib_search_path="$newlib_search_path $func_stripname_result"
+ ;;
+ *)
+ func_warning "\`-L' is ignored for archives/objects"
+ ;;
+ esac # linkmode
+ continue
+ ;; # -L
+ -R*)
+ if test "$pass" = link; then
+ func_stripname '-R' '' "$deplib"
+ dir=$func_stripname_result
+ # Make sure the xrpath contains only unique directories.
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ fi
+ deplibs="$deplib $deplibs"
+ continue
+ ;;
+ *.la) lib="$deplib" ;;
+ *.$libext)
+ if test "$pass" = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ case $linkmode in
+ lib)
+ # Linking convenience modules into shared libraries is allowed,
+ # but linking other static libraries is non-portable.
+ case " $dlpreconveniencelibs " in
+ *" $deplib "*) ;;
+ *)
+ valid_a_lib=no
+ case $deplibs_check_method in
+ match_pattern*)
+ set dummy $deplibs_check_method; shift
+ match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+ if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \
+ | $EGREP "$match_pattern_regex" > /dev/null; then
+ valid_a_lib=yes
+ fi
+ ;;
+ pass_all)
+ valid_a_lib=yes
+ ;;
+ esac
+ if test "$valid_a_lib" != yes; then
+ $ECHO
+ $ECHO "*** Warning: Trying to link with static lib archive $deplib."
+ $ECHO "*** I have the capability to make that library automatically link in when"
+ $ECHO "*** you link to this library. But I can only do this if you have a"
+ $ECHO "*** shared version of the library, which you do not appear to have"
+ $ECHO "*** because the file extensions .$libext of this argument makes me believe"
+ $ECHO "*** that it is just a static archive that I should not use here."
+ else
+ $ECHO
+ $ECHO "*** Warning: Linking the shared library $output against the"
+ $ECHO "*** static library $deplib is not portable!"
+ deplibs="$deplib $deplibs"
+ fi
+ ;;
+ esac
+ continue
+ ;;
+ prog)
+ if test "$pass" != link; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ continue
+ ;;
+ esac # linkmode
+ ;; # *.$libext
+ *.lo | *.$objext)
+ if test "$pass" = conv; then
+ deplibs="$deplib $deplibs"
+ elif test "$linkmode" = prog; then
+ if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlopen support or we're linking statically,
+ # we need to preload.
+ newdlprefiles="$newdlprefiles $deplib"
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ newdlfiles="$newdlfiles $deplib"
+ fi
+ fi
+ continue
+ ;;
+ %DEPLIBS%)
+ alldeplibs=yes
+ continue
+ ;;
+ esac # case $deplib
+
+ if test "$found" = yes || test -f "$lib"; then :
+ else
+ func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'"
+ fi
+
+ # Check to see that this really is a libtool archive.
+ func_lalib_unsafe_p "$lib" \
+ || func_fatal_error "\`$lib' is not a valid libtool archive"
+
+ func_dirname "$lib" "" "."
+ ladir="$func_dirname_result"
+
+ dlname=
+ dlopen=
+ dlpreopen=
+ libdir=
+ library_names=
+ old_library=
+ inherited_linker_flags=
+ # If the library was installed with an old release of libtool,
+ # it will not redefine variables installed, or shouldnotlink
+ installed=yes
+ shouldnotlink=no
+ avoidtemprpath=
+
+
+ # Read the .la file
+ func_source "$lib"
+
+ # Convert "-framework foo" to "foo.ltframework"
+ if test -n "$inherited_linker_flags"; then
+ tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'`
+ for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
+ case " $new_inherited_linker_flags " in
+ *" $tmp_inherited_linker_flag "*) ;;
+ *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";;
+ esac
+ done
+ fi
+ dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan" ||
+ { test "$linkmode" != prog && test "$linkmode" != lib; }; then
+ test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
+ test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
+ fi
+
+ if test "$pass" = conv; then
+ # Only check for convenience libraries
+ deplibs="$lib $deplibs"
+ if test -z "$libdir"; then
+ if test -z "$old_library"; then
+ func_fatal_error "cannot find name of link library for \`$lib'"
+ fi
+ # It is a libtool convenience library, so add in its objects.
+ convenience="$convenience $ladir/$objdir/$old_library"
+ old_convenience="$old_convenience $ladir/$objdir/$old_library"
+ elif test "$linkmode" != prog && test "$linkmode" != lib; then
+ func_fatal_error "\`$lib' is not a convenience library"
+ fi
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ deplibs="$deplib $deplibs"
+ if $opt_duplicate_deps ; then
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ fi
+ tmp_libs="$tmp_libs $deplib"
+ done
+ continue
+ fi # $pass = conv
+
+
+ # Get the name of the library we link against.
+ linklib=
+ for l in $old_library $library_names; do
+ linklib="$l"
+ done
+ if test -z "$linklib"; then
+ func_fatal_error "cannot find name of link library for \`$lib'"
+ fi
+
+ # This library was specified with -dlopen.
+ if test "$pass" = dlopen; then
+ if test -z "$libdir"; then
+ func_fatal_error "cannot -dlopen a convenience library: \`$lib'"
+ fi
+ if test -z "$dlname" ||
+ test "$dlopen_support" != yes ||
+ test "$build_libtool_libs" = no; then
+ # If there is no dlname, no dlopen support or we're linking
+ # statically, we need to preload. We also need to preload any
+ # dependent libraries so libltdl's deplib preloader doesn't
+ # bomb out in the load deplibs phase.
+ dlprefiles="$dlprefiles $lib $dependency_libs"
+ else
+ newdlfiles="$newdlfiles $lib"
+ fi
+ continue
+ fi # $pass = dlopen
+
+ # We need an absolute path.
+ case $ladir in
+ [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
+ *)
+ abs_ladir=`cd "$ladir" && pwd`
+ if test -z "$abs_ladir"; then
+ func_warning "cannot determine absolute directory name of \`$ladir'"
+ func_warning "passing it literally to the linker, although it might fail"
+ abs_ladir="$ladir"
+ fi
+ ;;
+ esac
+ func_basename "$lib"
+ laname="$func_basename_result"
+
+ # Find the relevant object directory and library name.
+ if test "X$installed" = Xyes; then
+ if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ func_warning "library \`$lib' was moved."
+ dir="$ladir"
+ absdir="$abs_ladir"
+ libdir="$abs_ladir"
+ else
+ dir="$libdir"
+ absdir="$libdir"
+ fi
+ test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
+ else
+ if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ dir="$ladir"
+ absdir="$abs_ladir"
+ # Remove this search path later
+ notinst_path="$notinst_path $abs_ladir"
+ else
+ dir="$ladir/$objdir"
+ absdir="$abs_ladir/$objdir"
+ # Remove this search path later
+ notinst_path="$notinst_path $abs_ladir"
+ fi
+ fi # $installed = yes
+ func_stripname 'lib' '.la' "$laname"
+ name=$func_stripname_result
+
+ # This library was specified with -dlpreopen.
+ if test "$pass" = dlpreopen; then
+ if test -z "$libdir" && test "$linkmode" = prog; then
+ func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'"
+ fi
+ # Prefer using a static library (so that no silly _DYNAMIC symbols
+ # are required to link).
+ if test -n "$old_library"; then
+ newdlprefiles="$newdlprefiles $dir/$old_library"
+ # Keep a list of preopened convenience libraries to check
+ # that they are being used correctly in the link pass.
+ test -z "$libdir" && \
+ dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library"
+ # Otherwise, use the dlname, so that lt_dlopen finds it.
+ elif test -n "$dlname"; then
+ newdlprefiles="$newdlprefiles $dir/$dlname"
+ else
+ newdlprefiles="$newdlprefiles $dir/$linklib"
+ fi
+ fi # $pass = dlpreopen
+
+ if test -z "$libdir"; then
+ # Link the convenience library
+ if test "$linkmode" = lib; then
+ deplibs="$dir/$old_library $deplibs"
+ elif test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$dir/$old_library $compile_deplibs"
+ finalize_deplibs="$dir/$old_library $finalize_deplibs"
+ else
+ deplibs="$lib $deplibs" # used for prog,scan pass
+ fi
+ continue
+ fi
+
+
+ if test "$linkmode" = prog && test "$pass" != link; then
+ newlib_search_path="$newlib_search_path $ladir"
+ deplibs="$lib $deplibs"
+
+ linkalldeplibs=no
+ if test "$link_all_deplibs" != no || test -z "$library_names" ||
+ test "$build_libtool_libs" = no; then
+ linkalldeplibs=yes
+ fi
+
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) func_stripname '-L' '' "$deplib"
+ newlib_search_path="$newlib_search_path $func_stripname_result"
+ ;;
+ esac
+ # Need to link against all dependency_libs?
+ if test "$linkalldeplibs" = yes; then
+ deplibs="$deplib $deplibs"
+ else
+ # Need to hardcode shared library paths
+ # or/and link against static libraries
+ newdependency_libs="$deplib $newdependency_libs"
+ fi
+ if $opt_duplicate_deps ; then
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ fi
+ tmp_libs="$tmp_libs $deplib"
+ done # for deplib
+ continue
+ fi # $linkmode = prog...
+
+ if test "$linkmode,$pass" = "prog,link"; then
+ if test -n "$library_names" &&
+ { { test "$prefer_static_libs" = no ||
+ test "$prefer_static_libs,$installed" = "built,yes"; } ||
+ test -z "$old_library"; }; then
+ # We need to hardcode the library path
+ if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then
+ # Make sure the rpath contains only unique directories.
+ case "$temp_rpath:" in
+ *"$absdir:"*) ;;
+ *) temp_rpath="$temp_rpath$absdir:" ;;
+ esac
+ fi
+
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) compile_rpath="$compile_rpath $absdir"
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir"
+ esac
+ ;;
+ esac
+ fi # $linkmode,$pass = prog,link...
+
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+ fi
+
+ link_static=no # Whether the deplib will be linked statically
+ use_static_libs=$prefer_static_libs
+ if test "$use_static_libs" = built && test "$installed" = yes; then
+ use_static_libs=no
+ fi
+ if test -n "$library_names" &&
+ { test "$use_static_libs" = no || test -z "$old_library"; }; then
+ case $host in
+ *cygwin* | *mingw* | *cegcc*)
+ # No point in relinking DLLs because paths are not encoded
+ notinst_deplibs="$notinst_deplibs $lib"
+ need_relink=no
+ ;;
+ *)
+ if test "$installed" = no; then
+ notinst_deplibs="$notinst_deplibs $lib"
+ need_relink=yes
+ fi
+ ;;
+ esac
+ # This is a shared library
+
+ # Warn about portability, can't link against -module's on some
+ # systems (darwin). Don't bleat about dlopened modules though!
+ dlopenmodule=""
+ for dlpremoduletest in $dlprefiles; do
+ if test "X$dlpremoduletest" = "X$lib"; then
+ dlopenmodule="$dlpremoduletest"
+ break
+ fi
+ done
+ if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then
+ $ECHO
+ if test "$linkmode" = prog; then
+ $ECHO "*** Warning: Linking the executable $output against the loadable module"
+ else
+ $ECHO "*** Warning: Linking the shared library $output against the loadable module"
+ fi
+ $ECHO "*** $linklib is not portable!"
+ fi
+ if test "$linkmode" = lib &&
+ test "$hardcode_into_libs" = yes; then
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) compile_rpath="$compile_rpath $absdir"
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
+ *" $libdir "*) ;;
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir"
+ esac
+ ;;
+ esac
+ fi
+
+ if test -n "$old_archive_from_expsyms_cmds"; then
+ # figure out the soname
+ set dummy $library_names
+ shift
+ realname="$1"
+ shift
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ # use dlname if we got it. it's perfectly good, no?
+ if test -n "$dlname"; then
+ soname="$dlname"
+ elif test -n "$soname_spec"; then
+ # bleh windows
+ case $host in
+ *cygwin* | mingw* | *cegcc*)
+ func_arith $current - $age
+ major=$func_arith_result
+ versuffix="-$major"
+ ;;
+ esac
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+
+ # Make a new name for the extract_expsyms_cmds to use
+ soroot="$soname"
+ func_basename "$soroot"
+ soname="$func_basename_result"
+ func_stripname 'lib' '.dll' "$soname"
+ newlib=libimp-$func_stripname_result.a
+
+ # If the library has no export list, then create one now
+ if test -f "$output_objdir/$soname-def"; then :
+ else
+ func_verbose "extracting exported symbol list from \`$soname'"
+ func_execute_cmds "$extract_expsyms_cmds" 'exit $?'
+ fi
+
+ # Create $newlib
+ if test -f "$output_objdir/$newlib"; then :; else
+ func_verbose "generating import library for \`$soname'"
+ func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?'
+ fi
+ # make sure the library variables are pointing to the new library
+ dir=$output_objdir
+ linklib=$newlib
+ fi # test -n "$old_archive_from_expsyms_cmds"
+
+ if test "$linkmode" = prog || test "$mode" != relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ lib_linked=yes
+ case $hardcode_action in
+ immediate | unsupported)
+ if test "$hardcode_direct" = no; then
+ add="$dir/$linklib"
+ case $host in
+ *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;;
+ *-*-sysv4*uw2*) add_dir="-L$dir" ;;
+ *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
+ *-*-unixware7*) add_dir="-L$dir" ;;
+ *-*-darwin* )
+ # if the lib is a (non-dlopened) module then we can not
+ # link against it, someone is ignoring the earlier warnings
+ if /usr/bin/file -L $add 2> /dev/null |
+ $GREP ": [^:]* bundle" >/dev/null ; then
+ if test "X$dlopenmodule" != "X$lib"; then
+ $ECHO "*** Warning: lib $linklib is a module, not a shared library"
+ if test -z "$old_library" ; then
+ $ECHO
+ $ECHO "*** And there doesn't seem to be a static archive available"
+ $ECHO "*** The link will probably fail, sorry"
+ else
+ add="$dir/$old_library"
+ fi
+ elif test -n "$old_library"; then
+ add="$dir/$old_library"
+ fi
+ fi
+ esac
+ elif test "$hardcode_minus_L" = no; then
+ case $host in
+ *-*-sunos*) add_shlibpath="$dir" ;;
+ esac
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = no; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ relink)
+ if test "$hardcode_direct" = yes &&
+ test "$hardcode_direct_absolute" = no; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$dir"
+ # Try looking first in the location we're being installed to.
+ if test -n "$inst_prefix_dir"; then
+ case $libdir in
+ [\\/]*)
+ add_dir="$add_dir -L$inst_prefix_dir$libdir"
+ ;;
+ esac
+ fi
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ *) lib_linked=no ;;
+ esac
+
+ if test "$lib_linked" != yes; then
+ func_fatal_configuration "unsupported hardcode properties"
+ fi
+
+ if test -n "$add_shlibpath"; then
+ case :$compile_shlibpath: in
+ *":$add_shlibpath:"*) ;;
+ *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
+ esac
+ fi
+ if test "$linkmode" = prog; then
+ test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+ test -n "$add" && compile_deplibs="$add $compile_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ if test "$hardcode_direct" != yes &&
+ test "$hardcode_minus_L" != yes &&
+ test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ fi
+ fi
+ fi
+
+ if test "$linkmode" = prog || test "$mode" = relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ # Finalize command for both is simple: just hardcode it.
+ if test "$hardcode_direct" = yes &&
+ test "$hardcode_direct_absolute" = no; then
+ add="$libdir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$libdir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ add="-l$name"
+ elif test "$hardcode_automatic" = yes; then
+ if test -n "$inst_prefix_dir" &&
+ test -f "$inst_prefix_dir$libdir/$linklib" ; then
+ add="$inst_prefix_dir$libdir/$linklib"
+ else
+ add="$libdir/$linklib"
+ fi
+ else
+ # We cannot seem to hardcode it, guess we'll fake it.
+ add_dir="-L$libdir"
+ # Try looking first in the location we're being installed to.
+ if test -n "$inst_prefix_dir"; then
+ case $libdir in
+ [\\/]*)
+ add_dir="$add_dir -L$inst_prefix_dir$libdir"
+ ;;
+ esac
+ fi
+ add="-l$name"
+ fi
+
+ if test "$linkmode" = prog; then
+ test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+ test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ fi
+ fi
+ elif test "$linkmode" = prog; then
+ # Here we assume that one of hardcode_direct or hardcode_minus_L
+ # is not unsupported. This is valid on all known static and
+ # shared platforms.
+ if test "$hardcode_direct" != unsupported; then
+ test -n "$old_library" && linklib="$old_library"
+ compile_deplibs="$dir/$linklib $compile_deplibs"
+ finalize_deplibs="$dir/$linklib $finalize_deplibs"
+ else
+ compile_deplibs="-l$name -L$dir $compile_deplibs"
+ finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+ fi
+ elif test "$build_libtool_libs" = yes; then
+ # Not a shared library
+ if test "$deplibs_check_method" != pass_all; then
+ # We're trying link a shared library against a static one
+ # but the system doesn't support it.
+
+ # Just print a warning and add the library to dependency_libs so
+ # that the program can be linked against the static library.
+ $ECHO
+ $ECHO "*** Warning: This system can not link to static lib archive $lib."
+ $ECHO "*** I have the capability to make that library automatically link in when"
+ $ECHO "*** you link to this library. But I can only do this if you have a"
+ $ECHO "*** shared version of the library, which you do not appear to have."
+ if test "$module" = yes; then
+ $ECHO "*** But as you try to build a module library, libtool will still create "
+ $ECHO "*** a static module, that should work as long as the dlopening application"
+ $ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime."
+ if test -z "$global_symbol_pipe"; then
+ $ECHO
+ $ECHO "*** However, this would only work if libtool was able to extract symbol"
+ $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ $ECHO "*** not find such a program. So, this module is probably useless."
+ $ECHO "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ else
+ deplibs="$dir/$old_library $deplibs"
+ link_static=yes
+ fi
+ fi # link shared/static library?
+
+ if test "$linkmode" = lib; then
+ if test -n "$dependency_libs" &&
+ { test "$hardcode_into_libs" != yes ||
+ test "$build_old_libs" = yes ||
+ test "$link_static" = yes; }; then
+ # Extract -R from dependency_libs
+ temp_deplibs=
+ for libdir in $dependency_libs; do
+ case $libdir in
+ -R*) func_stripname '-R' '' "$libdir"
+ temp_xrpath=$func_stripname_result
+ case " $xrpath " in
+ *" $temp_xrpath "*) ;;
+ *) xrpath="$xrpath $temp_xrpath";;
+ esac;;
+ *) temp_deplibs="$temp_deplibs $libdir";;
+ esac
+ done
+ dependency_libs="$temp_deplibs"
+ fi
+
+ newlib_search_path="$newlib_search_path $absdir"
+ # Link against this library
+ test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+ # ... and its dependency_libs
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ newdependency_libs="$deplib $newdependency_libs"
+ if $opt_duplicate_deps ; then
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ fi
+ tmp_libs="$tmp_libs $deplib"
+ done
+
+ if test "$link_all_deplibs" != no; then
+ # Add the search paths of all dependency libraries
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) path="$deplib" ;;
+ *.la)
+ func_dirname "$deplib" "" "."
+ dir="$func_dirname_result"
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ func_warning "cannot determine absolute directory name of \`$dir'"
+ absdir="$dir"
+ fi
+ ;;
+ esac
+ if $GREP "^installed=no" $deplib > /dev/null; then
+ case $host in
+ *-*-darwin*)
+ depdepl=
+ eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
+ if test -n "$deplibrary_names" ; then
+ for tmp in $deplibrary_names ; do
+ depdepl=$tmp
+ done
+ if test -f "$absdir/$objdir/$depdepl" ; then
+ depdepl="$absdir/$objdir/$depdepl"
+ darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+ if test -z "$darwin_install_name"; then
+ darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+ fi
+ compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
+ linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}"
+ path=
+ fi
+ fi
+ ;;
+ *)
+ path="-L$absdir/$objdir"
+ ;;
+ esac
+ else
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$deplib' is not a valid libtool archive"
+ test "$absdir" != "$libdir" && \
+ func_warning "\`$deplib' seems to be moved"
+
+ path="-L$absdir"
+ fi
+ ;;
+ esac
+ case " $deplibs " in
+ *" $path "*) ;;
+ *) deplibs="$path $deplibs" ;;
+ esac
+ done
+ fi # link_all_deplibs != no
+ fi # linkmode = lib
+ done # for deplib in $libs
+ if test "$pass" = link; then
+ if test "$linkmode" = "prog"; then
+ compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
+ finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
+ else
+ compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ fi
+ fi
+ dependency_libs="$newdependency_libs"
+ if test "$pass" = dlpreopen; then
+ # Link the dlpreopened libraries before other libraries
+ for deplib in $save_deplibs; do
+ deplibs="$deplib $deplibs"
+ done
+ fi
+ if test "$pass" != dlopen; then
+ if test "$pass" != conv; then
+ # Make sure lib_search_path contains only unique directories.
+ lib_search_path=
+ for dir in $newlib_search_path; do
+ case "$lib_search_path " in
+ *" $dir "*) ;;
+ *) lib_search_path="$lib_search_path $dir" ;;
+ esac
+ done
+ newlib_search_path=
+ fi
+
+ if test "$linkmode,$pass" != "prog,link"; then
+ vars="deplibs"
+ else
+ vars="compile_deplibs finalize_deplibs"
+ fi
+ for var in $vars dependency_libs; do
+ # Add libraries to $var in reverse order
+ eval tmp_libs=\"\$$var\"
+ new_libs=
+ for deplib in $tmp_libs; do
+ # FIXME: Pedantically, this is the right thing to do, so
+ # that some nasty dependency loop isn't accidentally
+ # broken:
+ #new_libs="$deplib $new_libs"
+ # Pragmatically, this seems to cause very few problems in
+ # practice:
+ case $deplib in
+ -L*) new_libs="$deplib $new_libs" ;;
+ -R*) ;;
+ *)
+ # And here is the reason: when a library appears more
+ # than once as an explicit dependence of a library, or
+ # is implicitly linked in more than once by the
+ # compiler, it is considered special, and multiple
+ # occurrences thereof are not removed. Compare this
+ # with having the same library being listed as a
+ # dependency of multiple other libraries: in this case,
+ # we know (pedantically, we assume) the library does not
+ # need to be listed more than once, so we keep only the
+ # last copy. This is not always right, but it is rare
+ # enough that we require users that really mean to play
+ # such unportable linking tricks to link the library
+ # using -Wl,-lname, so that libtool does not consider it
+ # for duplicate removal.
+ case " $specialdeplibs " in
+ *" $deplib "*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$deplib $new_libs" ;;
+ esac
+ ;;
+ esac
+ ;;
+ esac
+ done
+ tmp_libs=
+ for deplib in $new_libs; do
+ case $deplib in
+ -L*)
+ case " $tmp_libs " in
+ *" $deplib "*) ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ done
+ eval $var=\"$tmp_libs\"
+ done # for var
+ fi
+ # Last step: remove runtime libs from dependency_libs
+ # (they stay in deplibs)
+ tmp_libs=
+ for i in $dependency_libs ; do
+ case " $predeps $postdeps $compiler_lib_search_path " in
+ *" $i "*)
+ i=""
+ ;;
+ esac
+ if test -n "$i" ; then
+ tmp_libs="$tmp_libs $i"
+ fi
+ done
+ dependency_libs=$tmp_libs
+ done # for pass
+ if test "$linkmode" = prog; then
+ dlfiles="$newdlfiles"
+ fi
+ if test "$linkmode" = prog || test "$linkmode" = lib; then
+ dlprefiles="$newdlprefiles"
+ fi
+
+ case $linkmode in
+ oldlib)
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ func_warning "\`-dlopen' is ignored for archives"
+ fi
+
+ case " $deplibs" in
+ *\ -l* | *\ -L*)
+ func_warning "\`-l' and \`-L' are ignored for archives" ;;
+ esac
+
+ test -n "$rpath" && \
+ func_warning "\`-rpath' is ignored for archives"
+
+ test -n "$xrpath" && \
+ func_warning "\`-R' is ignored for archives"
+
+ test -n "$vinfo" && \
+ func_warning "\`-version-info/-version-number' is ignored for archives"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for archives"
+
+ test -n "$export_symbols$export_symbols_regex" && \
+ func_warning "\`-export-symbols' is ignored for archives"
+
+ # Now set the variables for building old libraries.
+ build_libtool_libs=no
+ oldlibs="$output"
+ objs="$objs$old_deplibs"
+ ;;
+
+ lib)
+ # Make sure we only generate libraries of the form `libNAME.la'.
+ case $outputname in
+ lib*)
+ func_stripname 'lib' '.la' "$outputname"
+ name=$func_stripname_result
+ eval shared_ext=\"$shrext_cmds\"
+ eval libname=\"$libname_spec\"
+ ;;
+ *)
+ test "$module" = no && \
+ func_fatal_help "libtool library \`$output' must begin with \`lib'"
+
+ if test "$need_lib_prefix" != no; then
+ # Add the "lib" prefix for modules if required
+ func_stripname '' '.la' "$outputname"
+ name=$func_stripname_result
+ eval shared_ext=\"$shrext_cmds\"
+ eval libname=\"$libname_spec\"
+ else
+ func_stripname '' '.la' "$outputname"
+ libname=$func_stripname_result
+ fi
+ ;;
+ esac
+
+ if test -n "$objs"; then
+ if test "$deplibs_check_method" != pass_all; then
+ func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs"
+ else
+ $ECHO
+ $ECHO "*** Warning: Linking the shared library $output against the non-libtool"
+ $ECHO "*** objects $objs is not portable!"
+ libobjs="$libobjs $objs"
+ fi
+ fi
+
+ test "$dlself" != no && \
+ func_warning "\`-dlopen self' is ignored for libtool libraries"
+
+ set dummy $rpath
+ shift
+ test "$#" -gt 1 && \
+ func_warning "ignoring multiple \`-rpath's for a libtool library"
+
+ install_libdir="$1"
+
+ oldlibs=
+ if test -z "$rpath"; then
+ if test "$build_libtool_libs" = yes; then
+ # Building a libtool convenience library.
+ # Some compilers have problems with a `.al' extension so
+ # convenience libraries should have the same extension an
+ # archive normally would.
+ oldlibs="$output_objdir/$libname.$libext $oldlibs"
+ build_libtool_libs=convenience
+ build_old_libs=yes
+ fi
+
+ test -n "$vinfo" && \
+ func_warning "\`-version-info/-version-number' is ignored for convenience libraries"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for convenience libraries"
+ else
+
+ # Parse the version information argument.
+ save_ifs="$IFS"; IFS=':'
+ set dummy $vinfo 0 0 0
+ shift
+ IFS="$save_ifs"
+
+ test -n "$7" && \
+ func_fatal_help "too many parameters to \`-version-info'"
+
+ # convert absolute version numbers to libtool ages
+ # this retains compatibility with .la files and attempts
+ # to make the code below a bit more comprehensible
+
+ case $vinfo_number in
+ yes)
+ number_major="$1"
+ number_minor="$2"
+ number_revision="$3"
+ #
+ # There are really only two kinds -- those that
+ # use the current revision as the major version
+ # and those that subtract age and use age as
+ # a minor version. But, then there is irix
+ # which has an extra 1 added just for fun
+ #
+ case $version_type in
+ darwin|linux|osf|windows|none)
+ func_arith $number_major + $number_minor
+ current=$func_arith_result
+ age="$number_minor"
+ revision="$number_revision"
+ ;;
+ freebsd-aout|freebsd-elf|sunos)
+ current="$number_major"
+ revision="$number_minor"
+ age="0"
+ ;;
+ irix|nonstopux)
+ func_arith $number_major + $number_minor
+ current=$func_arith_result
+ age="$number_minor"
+ revision="$number_minor"
+ lt_irix_increment=no
+ ;;
+ esac
+ ;;
+ no)
+ current="$1"
+ revision="$2"
+ age="$3"
+ ;;
+ esac
+
+ # Check that each of the things are valid numbers.
+ case $current in
+ 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+ *)
+ func_error "CURRENT \`$current' must be a nonnegative integer"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ ;;
+ esac
+
+ case $revision in
+ 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+ *)
+ func_error "REVISION \`$revision' must be a nonnegative integer"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ ;;
+ esac
+
+ case $age in
+ 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+ *)
+ func_error "AGE \`$age' must be a nonnegative integer"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ ;;
+ esac
+
+ if test "$age" -gt "$current"; then
+ func_error "AGE \`$age' is greater than the current interface number \`$current'"
+ func_fatal_error "\`$vinfo' is not valid version information"
+ fi
+
+ # Calculate the version variables.
+ major=
+ versuffix=
+ verstring=
+ case $version_type in
+ none) ;;
+
+ darwin)
+ # Like Linux, but with the current version available in
+ # verstring for coding it into the library header
+ func_arith $current - $age
+ major=.$func_arith_result
+ versuffix="$major.$age.$revision"
+ # Darwin ld doesn't like 0 for these options...
+ func_arith $current + 1
+ minor_current=$func_arith_result
+ xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision"
+ verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+ ;;
+
+ freebsd-aout)
+ major=".$current"
+ versuffix=".$current.$revision";
+ ;;
+
+ freebsd-elf)
+ major=".$current"
+ versuffix=".$current"
+ ;;
+
+ irix | nonstopux)
+ if test "X$lt_irix_increment" = "Xno"; then
+ func_arith $current - $age
+ else
+ func_arith $current - $age + 1
+ fi
+ major=$func_arith_result
+
+ case $version_type in
+ nonstopux) verstring_prefix=nonstopux ;;
+ *) verstring_prefix=sgi ;;
+ esac
+ verstring="$verstring_prefix$major.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$revision
+ while test "$loop" -ne 0; do
+ func_arith $revision - $loop
+ iface=$func_arith_result
+ func_arith $loop - 1
+ loop=$func_arith_result
+ verstring="$verstring_prefix$major.$iface:$verstring"
+ done
+
+ # Before this point, $major must not contain `.'.
+ major=.$major
+ versuffix="$major.$revision"
+ ;;
+
+ linux)
+ func_arith $current - $age
+ major=.$func_arith_result
+ versuffix="$major.$age.$revision"
+ ;;
+
+ osf)
+ func_arith $current - $age
+ major=.$func_arith_result
+ versuffix=".$current.$age.$revision"
+ verstring="$current.$age.$revision"
+
+ # Add in all the interfaces that we are compatible with.
+ loop=$age
+ while test "$loop" -ne 0; do
+ func_arith $current - $loop
+ iface=$func_arith_result
+ func_arith $loop - 1
+ loop=$func_arith_result
+ verstring="$verstring:${iface}.0"
+ done
+
+ # Make executables depend on our current version.
+ verstring="$verstring:${current}.0"
+ ;;
+
+ qnx)
+ major=".$current"
+ versuffix=".$current"
+ ;;
+
+ sunos)
+ major=".$current"
+ versuffix=".$current.$revision"
+ ;;
+
+ windows)
+ # Use '-' rather than '.', since we only want one
+ # extension on DOS 8.3 filesystems.
+ func_arith $current - $age
+ major=$func_arith_result
+ versuffix="-$major"
+ ;;
+
+ *)
+ func_fatal_configuration "unknown library version type \`$version_type'"
+ ;;
+ esac
+
+ # Clear the version info if we defaulted, and they specified a release.
+ if test -z "$vinfo" && test -n "$release"; then
+ major=
+ case $version_type in
+ darwin)
+ # we can't check for "0.0" in archive_cmds due to quoting
+ # problems, so we reset it completely
+ verstring=
+ ;;
+ *)
+ verstring="0.0"
+ ;;
+ esac
+ if test "$need_version" = no; then
+ versuffix=
+ else
+ versuffix=".0.0"
+ fi
+ fi
+
+ # Remove version info from name if versioning should be avoided
+ if test "$avoid_version" = yes && test "$need_version" = no; then
+ major=
+ versuffix=
+ verstring=""
+ fi
+
+ # Check to see if the archive will have undefined symbols.
+ if test "$allow_undefined" = yes; then
+ if test "$allow_undefined_flag" = unsupported; then
+ func_warning "undefined symbols not allowed in $host shared libraries"
+ build_libtool_libs=no
+ build_old_libs=yes
+ fi
+ else
+ # Don't allow undefined symbols.
+ allow_undefined_flag="$no_undefined_flag"
+ fi
+
+ fi
+
+ func_generate_dlsyms "$libname" "$libname" "yes"
+ libobjs="$libobjs $symfileobj"
+ test "X$libobjs" = "X " && libobjs=
+
+ if test "$mode" != relink; then
+ # Remove our outputs, but don't remove object files since they
+ # may have been created when compiling PIC objects.
+ removelist=
+ tempremovelist=`$ECHO "$output_objdir/*"`
+ for p in $tempremovelist; do
+ case $p in
+ *.$objext | *.gcno)
+ ;;
+ $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
+ if test "X$precious_files_regex" != "X"; then
+ if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
+ then
+ continue
+ fi
+ fi
+ removelist="$removelist $p"
+ ;;
+ *) ;;
+ esac
+ done
+ test -n "$removelist" && \
+ func_show_eval "${RM}r \$removelist"
+ fi
+
+ # Now set the variables for building old libraries.
+ if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
+ oldlibs="$oldlibs $output_objdir/$libname.$libext"
+
+ # Transform .lo files to .o files.
+ oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
+ fi
+
+ # Eliminate all temporary directories.
+ #for path in $notinst_path; do
+ # lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"`
+ # deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"`
+ # dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"`
+ #done
+
+ if test -n "$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ temp_xrpath=
+ for libdir in $xrpath; do
+ temp_xrpath="$temp_xrpath -R$libdir"
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
+ dependency_libs="$temp_xrpath $dependency_libs"
+ fi
+ fi
+
+ # Make sure dlfiles contains only unique files that won't be dlpreopened
+ old_dlfiles="$dlfiles"
+ dlfiles=
+ for lib in $old_dlfiles; do
+ case " $dlprefiles $dlfiles " in
+ *" $lib "*) ;;
+ *) dlfiles="$dlfiles $lib" ;;
+ esac
+ done
+
+ # Make sure dlprefiles contains only unique files
+ old_dlprefiles="$dlprefiles"
+ dlprefiles=
+ for lib in $old_dlprefiles; do
+ case "$dlprefiles " in
+ *" $lib "*) ;;
+ *) dlprefiles="$dlprefiles $lib" ;;
+ esac
+ done
+
+ if test "$build_libtool_libs" = yes; then
+ if test -n "$rpath"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc*)
+ # these systems don't actually have a c library (as such)!
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C library is in the System framework
+ deplibs="$deplibs System.ltframework"
+ ;;
+ *-*-netbsd*)
+ # Don't link with libc until the a.out ld.so is fixed.
+ ;;
+ *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ *-*-sco3.2v5* | *-*-sco5v6*)
+ # Causes problems with __ctype
+ ;;
+ *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+ # Compiler inserts libc in the correct place for threads to work
+ ;;
+ *)
+ # Add libc to deplibs on all other systems if necessary.
+ if test "$build_libtool_need_lc" = "yes"; then
+ deplibs="$deplibs -lc"
+ fi
+ ;;
+ esac
+ fi
+
+ # Transform deplibs into only deplibs that can be linked in shared.
+ name_save=$name
+ libname_save=$libname
+ release_save=$release
+ versuffix_save=$versuffix
+ major_save=$major
+ # I'm not sure if I'm treating the release correctly. I think
+ # release should show up in the -l (ie -lgmp5) so we don't want to
+ # add it in twice. Is that correct?
+ release=""
+ versuffix=""
+ major=""
+ newdeplibs=
+ droppeddeps=no
+ case $deplibs_check_method in
+ pass_all)
+ # Don't check for shared/static. Everything works.
+ # This might be a little naive. We might want to check
+ # whether the library exists or not. But this is on
+ # osf3 & osf4 and I'm not really sure... Just
+ # implementing what was already the behavior.
+ newdeplibs=$deplibs
+ ;;
+ test_compile)
+ # This code stresses the "libraries are programs" paradigm to its
+ # limits. Maybe even breaks it. We compile a program, linking it
+ # against the deplibs as a proxy for the library. Then we can check
+ # whether they linked in statically or dynamically with ldd.
+ $opt_dry_run || $RM conftest.c
+ cat > conftest.c <<EOF
+ int main() { return 0; }
+EOF
+ $opt_dry_run || $RM conftest
+ if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs; then
+ ldd_output=`ldd conftest`
+ for i in $deplibs; do
+ case $i in
+ -l*)
+ func_stripname -l '' "$i"
+ name=$func_stripname_result
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $i "*)
+ newdeplibs="$newdeplibs $i"
+ i=""
+ ;;
+ esac
+ fi
+ if test -n "$i" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+ set dummy $deplib_matches; shift
+ deplib_match=$1
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ $ECHO
+ $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+ $ECHO "*** I have the capability to make that library automatically link in when"
+ $ECHO "*** you link to this library. But I can only do this if you have a"
+ $ECHO "*** shared version of the library, which I believe you do not have"
+ $ECHO "*** because a test_compile did reveal that the linker did not use it for"
+ $ECHO "*** its dynamic dependency list that programs get resolved with at runtime."
+ fi
+ fi
+ ;;
+ *)
+ newdeplibs="$newdeplibs $i"
+ ;;
+ esac
+ done
+ else
+ # Error occurred in the first compile. Let's try to salvage
+ # the situation: Compile a separate program for each library.
+ for i in $deplibs; do
+ case $i in
+ -l*)
+ func_stripname -l '' "$i"
+ name=$func_stripname_result
+ $opt_dry_run || $RM conftest
+ if $LTCC $LTCFLAGS -o conftest conftest.c $i; then
+ ldd_output=`ldd conftest`
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $i "*)
+ newdeplibs="$newdeplibs $i"
+ i=""
+ ;;
+ esac
+ fi
+ if test -n "$i" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+ set dummy $deplib_matches; shift
+ deplib_match=$1
+ if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+ newdeplibs="$newdeplibs $i"
+ else
+ droppeddeps=yes
+ $ECHO
+ $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+ $ECHO "*** I have the capability to make that library automatically link in when"
+ $ECHO "*** you link to this library. But I can only do this if you have a"
+ $ECHO "*** shared version of the library, which you do not appear to have"
+ $ECHO "*** because a test_compile did reveal that the linker did not use this one"
+ $ECHO "*** as a dynamic dependency that programs can get resolved with at runtime."
+ fi
+ fi
+ else
+ droppeddeps=yes
+ $ECHO
+ $ECHO "*** Warning! Library $i is needed by this library but I was not able to"
+ $ECHO "*** make it link in! You will probably need to install it or some"
+ $ECHO "*** library that it depends on before this library will be fully"
+ $ECHO "*** functional. Installing it before continuing would be even better."
+ fi
+ ;;
+ *)
+ newdeplibs="$newdeplibs $i"
+ ;;
+ esac
+ done
+ fi
+ ;;
+ file_magic*)
+ set dummy $deplibs_check_method; shift
+ file_magic_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+ for a_deplib in $deplibs; do
+ case $a_deplib in
+ -l*)
+ func_stripname -l '' "$a_deplib"
+ name=$func_stripname_result
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $a_deplib "*)
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ ;;
+ esac
+ fi
+ if test -n "$a_deplib" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ # Follow soft links.
+ if ls -lLd "$potent_lib" 2>/dev/null |
+ $GREP " -> " >/dev/null; then
+ continue
+ fi
+ # The statement above tries to avoid entering an
+ # endless loop below, in case of cyclic links.
+ # We might still enter an endless loop, since a link
+ # loop can be closed while we follow links,
+ # but so what?
+ potlib="$potent_lib"
+ while test -h "$potlib" 2>/dev/null; do
+ potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
+ case $potliblink in
+ [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
+ *) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
+ esac
+ done
+ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
+ $SED -e 10q |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ fi
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ $ECHO
+ $ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+ $ECHO "*** I have the capability to make that library automatically link in when"
+ $ECHO "*** you link to this library. But I can only do this if you have a"
+ $ECHO "*** shared version of the library, which you do not appear to have"
+ $ECHO "*** because I did check the linker path looking for a file starting"
+ if test -z "$potlib" ; then
+ $ECHO "*** with $libname but no candidates were found. (...for file magic test)"
+ else
+ $ECHO "*** with $libname and none of the candidates passed a file format test"
+ $ECHO "*** using a file magic. Last file checked: $potlib"
+ fi
+ fi
+ ;;
+ *)
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ ;;
+ esac
+ done # Gone through all deplibs.
+ ;;
+ match_pattern*)
+ set dummy $deplibs_check_method; shift
+ match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+ for a_deplib in $deplibs; do
+ case $a_deplib in
+ -l*)
+ func_stripname -l '' "$a_deplib"
+ name=$func_stripname_result
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ case " $predeps $postdeps " in
+ *" $a_deplib "*)
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ ;;
+ esac
+ fi
+ if test -n "$a_deplib" ; then
+ libname=`eval "\\$ECHO \"$libname_spec\""`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ potlib="$potent_lib" # see symlink-check above in file_magic test
+ if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \
+ $EGREP "$match_pattern_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ fi
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ $ECHO
+ $ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+ $ECHO "*** I have the capability to make that library automatically link in when"
+ $ECHO "*** you link to this library. But I can only do this if you have a"
+ $ECHO "*** shared version of the library, which you do not appear to have"
+ $ECHO "*** because I did check the linker path looking for a file starting"
+ if test -z "$potlib" ; then
+ $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
+ else
+ $ECHO "*** with $libname and none of the candidates passed a file format test"
+ $ECHO "*** using a regex pattern. Last file checked: $potlib"
+ fi
+ fi
+ ;;
+ *)
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ ;;
+ esac
+ done # Gone through all deplibs.
+ ;;
+ none | unknown | *)
+ newdeplibs=""
+ tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \
+ -e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'`
+ if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+ for i in $predeps $postdeps ; do
+ # can't use Xsed below, because $i might contain '/'
+ tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"`
+ done
+ fi
+ if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' |
+ $GREP . >/dev/null; then
+ $ECHO
+ if test "X$deplibs_check_method" = "Xnone"; then
+ $ECHO "*** Warning: inter-library dependencies are not supported in this platform."
+ else
+ $ECHO "*** Warning: inter-library dependencies are not known to be supported."
+ fi
+ $ECHO "*** All declared inter-library dependencies are being dropped."
+ droppeddeps=yes
+ fi
+ ;;
+ esac
+ versuffix=$versuffix_save
+ major=$major_save
+ release=$release_save
+ libname=$libname_save
+ name=$name_save
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library with the System framework
+ newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
+ ;;
+ esac
+
+ if test "$droppeddeps" = yes; then
+ if test "$module" = yes; then
+ $ECHO
+ $ECHO "*** Warning: libtool could not satisfy all declared inter-library"
+ $ECHO "*** dependencies of module $libname. Therefore, libtool will create"
+ $ECHO "*** a static module, that should work as long as the dlopening"
+ $ECHO "*** application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ $ECHO
+ $ECHO "*** However, this would only work if libtool was able to extract symbol"
+ $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ $ECHO "*** not find such a program. So, this module is probably useless."
+ $ECHO "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ else
+ $ECHO "*** The inter-library dependencies that have been dropped here will be"
+ $ECHO "*** automatically added whenever a program is linked with this library"
+ $ECHO "*** or is declared to -dlopen it."
+
+ if test "$allow_undefined" = no; then
+ $ECHO
+ $ECHO "*** Since this library must not contain undefined symbols,"
+ $ECHO "*** because either the platform does not support them or"
+ $ECHO "*** it was explicitly requested with -no-undefined,"
+ $ECHO "*** libtool will only create a static version of it."
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ fi
+ fi
+ # Done checking deplibs!
+ deplibs=$newdeplibs
+ fi
+ # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+ case $host in
+ *-*-darwin*)
+ newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ ;;
+ esac
+
+ # move library search paths that coincide with paths to not yet
+ # installed libraries to the beginning of the library search list
+ new_libs=
+ for path in $notinst_path; do
+ case " $new_libs " in
+ *" -L$path/$objdir "*) ;;
+ *)
+ case " $deplibs " in
+ *" -L$path/$objdir "*)
+ new_libs="$new_libs -L$path/$objdir" ;;
+ esac
+ ;;
+ esac
+ done
+ for deplib in $deplibs; do
+ case $deplib in
+ -L*)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$new_libs $deplib" ;;
+ esac
+ ;;
+ *) new_libs="$new_libs $deplib" ;;
+ esac
+ done
+ deplibs="$new_libs"
+
+ # All the library-specific variables (install_libdir is set above).
+ library_names=
+ old_library=
+ dlname=
+
+ # Test again, we may have decided not to build it any more
+ if test "$build_libtool_libs" = yes; then
+ if test "$hardcode_into_libs" = yes; then
+ # Hardcode the library paths
+ hardcode_libdirs=
+ dep_rpath=
+ rpath="$finalize_rpath"
+ test "$mode" != relink && rpath="$compile_rpath$rpath"
+ for libdir in $rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ dep_rpath="$dep_rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ if test -n "$hardcode_libdir_flag_spec_ld"; then
+ eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\"
+ else
+ eval dep_rpath=\"$hardcode_libdir_flag_spec\"
+ fi
+ fi
+ if test -n "$runpath_var" && test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+ fi
+ test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+ fi
+
+ shlibpath="$finalize_shlibpath"
+ test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+ if test -n "$shlibpath"; then
+ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+ fi
+
+ # Get the real and link names of the library.
+ eval shared_ext=\"$shrext_cmds\"
+ eval library_names=\"$library_names_spec\"
+ set dummy $library_names
+ shift
+ realname="$1"
+ shift
+
+ if test -n "$soname_spec"; then
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+ if test -z "$dlname"; then
+ dlname=$soname
+ fi
+
+ lib="$output_objdir/$realname"
+ linknames=
+ for link
+ do
+ linknames="$linknames $link"
+ done
+
+ # Use standard objects if they are pic
+ test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ test "X$libobjs" = "X " && libobjs=
+
+ delfiles=
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
+ export_symbols="$output_objdir/$libname.uexp"
+ delfiles="$delfiles $export_symbols"
+ fi
+
+ orig_export_symbols=
+ case $host_os in
+ cygwin* | mingw* | cegcc*)
+ if test -n "$export_symbols" && test -z "$export_symbols_regex"; then
+ # exporting using user supplied symfile
+ if test "x`$SED 1q $export_symbols`" != xEXPORTS; then
+ # and it's NOT already a .def file. Must figure out
+ # which of the given symbols are data symbols and tag
+ # them as such. So, trigger use of export_symbols_cmds.
+ # export_symbols gets reassigned inside the "prepare
+ # the list of exported symbols" if statement, so the
+ # include_expsyms logic still works.
+ orig_export_symbols="$export_symbols"
+ export_symbols=
+ always_export_symbols=yes
+ fi
+ fi
+ ;;
+ esac
+
+ # Prepare the list of exported symbols
+ if test -z "$export_symbols"; then
+ if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
+ func_verbose "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $opt_dry_run || $RM $export_symbols
+ cmds=$export_symbols_cmds
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ eval cmd=\"$cmd\"
+ func_len " $cmd"
+ len=$func_len_result
+ if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+ func_show_eval "$cmd" 'exit $?'
+ skipped_export=false
+ else
+ # The command line is too long to execute in one step.
+ func_verbose "using reloadable object file for export list..."
+ skipped_export=:
+ # Break out early, otherwise skipped_export may be
+ # set to false by a later but shorter cmd.
+ break
+ fi
+ done
+ IFS="$save_ifs"
+ if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then
+ func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+ fi
+
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ tmp_export_symbols="$export_symbols"
+ test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
+ $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"'
+ fi
+
+ if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then
+ # The given exports_symbols file has to be filtered, so filter it.
+ func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
+ # FIXME: $output_objdir/$libname.filter potentially contains lots of
+ # 's' commands which not all seds can handle. GNU sed should be fine
+ # though. Also, the filter scales superlinearly with the number of
+ # global variables. join(1) would be nice here, but unfortunately
+ # isn't a blessed tool.
+ $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+ delfiles="$delfiles $export_symbols $output_objdir/$libname.filter"
+ export_symbols=$output_objdir/$libname.def
+ $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+ fi
+
+ tmp_deplibs=
+ for test_deplib in $deplibs; do
+ case " $convenience " in
+ *" $test_deplib "*) ;;
+ *)
+ tmp_deplibs="$tmp_deplibs $test_deplib"
+ ;;
+ esac
+ done
+ deplibs="$tmp_deplibs"
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec" &&
+ test "$compiler_needs_object" = yes &&
+ test -z "$libobjs"; then
+ # extract the archives, so we have objects to list.
+ # TODO: could optimize this to just extract one archive.
+ whole_archive_flag_spec=
+ fi
+ if test -n "$whole_archive_flag_spec"; then
+ save_libobjs=$libobjs
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ test "X$libobjs" = "X " && libobjs=
+ else
+ gentop="$output_objdir/${outputname}x"
+ generated="$generated $gentop"
+
+ func_extract_archives $gentop $convenience
+ libobjs="$libobjs $func_extract_archives_result"
+ test "X$libobjs" = "X " && libobjs=
+ fi
+ fi
+
+ if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
+ eval flag=\"$thread_safe_flag_spec\"
+ linker_flags="$linker_flags $flag"
+ fi
+
+ # Make a backup of the uninstalled library when relinking
+ if test "$mode" = relink; then
+ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
+ fi
+
+ # Do each of the archive commands.
+ if test "$module" = yes && test -n "$module_cmds" ; then
+ if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+ eval test_cmds=\"$module_expsym_cmds\"
+ cmds=$module_expsym_cmds
+ else
+ eval test_cmds=\"$module_cmds\"
+ cmds=$module_cmds
+ fi
+ else
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ eval test_cmds=\"$archive_expsym_cmds\"
+ cmds=$archive_expsym_cmds
+ else
+ eval test_cmds=\"$archive_cmds\"
+ cmds=$archive_cmds
+ fi
+ fi
+
+ if test "X$skipped_export" != "X:" &&
+ func_len " $test_cmds" &&
+ len=$func_len_result &&
+ test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+ :
+ else
+ # The command line is too long to link in one step, link piecewise
+ # or, if using GNU ld and skipped_export is not :, use a linker
+ # script.
+
+ # Save the value of $output and $libobjs because we want to
+ # use them later. If we have whole_archive_flag_spec, we
+ # want to use save_libobjs as it was before
+ # whole_archive_flag_spec was expanded, because we can't
+ # assume the linker understands whole_archive_flag_spec.
+ # This may have to be revisited, in case too many
+ # convenience libraries get linked in and end up exceeding
+ # the spec.
+ if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
+ save_libobjs=$libobjs
+ fi
+ save_output=$output
+ output_la=`$ECHO "X$output" | $Xsed -e "$basename"`
+
+ # Clear the reloadable object creation command queue and
+ # initialize k to one.
+ test_cmds=
+ concat_cmds=
+ objlist=
+ last_robj=
+ k=1
+
+ if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then
+ output=${output_objdir}/${output_la}.lnkscript
+ func_verbose "creating GNU ld script: $output"
+ $ECHO 'INPUT (' > $output
+ for obj in $save_libobjs
+ do
+ $ECHO "$obj" >> $output
+ done
+ $ECHO ')' >> $output
+ delfiles="$delfiles $output"
+ elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then
+ output=${output_objdir}/${output_la}.lnk
+ func_verbose "creating linker input file list: $output"
+ : > $output
+ set x $save_libobjs
+ shift
+ firstobj=
+ if test "$compiler_needs_object" = yes; then
+ firstobj="$1 "
+ shift
+ fi
+ for obj
+ do
+ $ECHO "$obj" >> $output
+ done
+ delfiles="$delfiles $output"
+ output=$firstobj\"$file_list_spec$output\"
+ else
+ if test -n "$save_libobjs"; then
+ func_verbose "creating reloadable object files..."
+ output=$output_objdir/$output_la-${k}.$objext
+ eval test_cmds=\"$reload_cmds\"
+ func_len " $test_cmds"
+ len0=$func_len_result
+ len=$len0
+
+ # Loop over the list of objects to be linked.
+ for obj in $save_libobjs
+ do
+ func_len " $obj"
+ func_arith $len + $func_len_result
+ len=$func_arith_result
+ if test "X$objlist" = X ||
+ test "$len" -lt "$max_cmd_len"; then
+ func_append objlist " $obj"
+ else
+ # The command $test_cmds is almost too long, add a
+ # command to the queue.
+ if test "$k" -eq 1 ; then
+ # The first file doesn't have a previous command to add.
+ eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
+ else
+ # All subsequent reloadable object files will link in
+ # the last one created.
+ eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\"
+ fi
+ last_robj=$output_objdir/$output_la-${k}.$objext
+ func_arith $k + 1
+ k=$func_arith_result
+ output=$output_objdir/$output_la-${k}.$objext
+ objlist=$obj
+ func_len " $last_robj"
+ func_arith $len0 + $func_len_result
+ len=$func_arith_result
+ fi
+ done
+ # Handle the remaining objects by creating one last
+ # reloadable object file. All subsequent reloadable object
+ # files will link in the last one created.
+ test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+ eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
+ if test -n "$last_robj"; then
+ eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"
+ fi
+ delfiles="$delfiles $output"
+
+ else
+ output=
+ fi
+
+ if ${skipped_export-false}; then
+ func_verbose "generating symbol list for \`$libname.la'"
+ export_symbols="$output_objdir/$libname.exp"
+ $opt_dry_run || $RM $export_symbols
+ libobjs=$output
+ # Append the command to create the export file.
+ test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\"
+ if test -n "$last_robj"; then
+ eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+ fi
+ fi
+
+ test -n "$save_libobjs" &&
+ func_verbose "creating a temporary reloadable object file: $output"
+
+ # Loop through the commands generated above and execute them.
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $concat_cmds; do
+ IFS="$save_ifs"
+ $opt_silent || {
+ func_quote_for_expand "$cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+ $opt_dry_run || eval "$cmd" || {
+ lt_exit=$?
+
+ # Restore the uninstalled library and exit
+ if test "$mode" = relink; then
+ ( cd "$output_objdir" && \
+ $RM "${realname}T" && \
+ $MV "${realname}U" "$realname" )
+ fi
+
+ exit $lt_exit
+ }
+ done
+ IFS="$save_ifs"
+
+ if test -n "$export_symbols_regex" && ${skipped_export-false}; then
+ func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+ func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+ fi
+ fi
+
+ if ${skipped_export-false}; then
+ if test -n "$export_symbols" && test -n "$include_expsyms"; then
+ tmp_export_symbols="$export_symbols"
+ test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
+ $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"'
+ fi
+
+ if test -n "$orig_export_symbols"; then
+ # The given exports_symbols file has to be filtered, so filter it.
+ func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
+ # FIXME: $output_objdir/$libname.filter potentially contains lots of
+ # 's' commands which not all seds can handle. GNU sed should be fine
+ # though. Also, the filter scales superlinearly with the number of
+ # global variables. join(1) would be nice here, but unfortunately
+ # isn't a blessed tool.
+ $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+ delfiles="$delfiles $export_symbols $output_objdir/$libname.filter"
+ export_symbols=$output_objdir/$libname.def
+ $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+ fi
+ fi
+
+ libobjs=$output
+ # Restore the value of output.
+ output=$save_output
+
+ if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+ test "X$libobjs" = "X " && libobjs=
+ fi
+ # Expand the library linking commands again to reset the
+ # value of $libobjs for piecewise linking.
+
+ # Do each of the archive commands.
+ if test "$module" = yes && test -n "$module_cmds" ; then
+ if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+ cmds=$module_expsym_cmds
+ else
+ cmds=$module_cmds
+ fi
+ else
+ if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+ cmds=$archive_expsym_cmds
+ else
+ cmds=$archive_cmds
+ fi
+ fi
+ fi
+
+ if test -n "$delfiles"; then
+ # Append the command to remove temporary files to $cmds.
+ eval cmds=\"\$cmds~\$RM $delfiles\"
+ fi
+
+ # Add any objects from preloaded convenience libraries
+ if test -n "$dlprefiles"; then
+ gentop="$output_objdir/${outputname}x"
+ generated="$generated $gentop"
+
+ func_extract_archives $gentop $dlprefiles
+ libobjs="$libobjs $func_extract_archives_result"
+ test "X$libobjs" = "X " && libobjs=
+ fi
+
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ eval cmd=\"$cmd\"
+ $opt_silent || {
+ func_quote_for_expand "$cmd"
+ eval "func_echo $func_quote_for_expand_result"
+ }
+ $opt_dry_run || eval "$cmd" || {
+ lt_exit=$?
+
+ # Restore the uninstalled library and exit
+ if test "$mode" = relink; then
+ ( cd "$output_objdir" && \
+ $RM "${realname}T" && \
+ $MV "${realname}U" "$realname" )
+ fi
+
+ exit $lt_exit
+ }
+ done
+ IFS="$save_ifs"
+
+ # Restore the uninstalled library and exit
+ if test "$mode" = relink; then
+ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
+
+ if test -n "$convenience"; then
+ if test -z "$whole_archive_flag_spec"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+ fi
+
+ exit $EXIT_SUCCESS
+ fi
+
+ # Create links to the real library.
+ for linkname in $linknames; do
+ if test "$realname" != "$linkname"; then
+ func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
+ fi
+ done
+
+ # If -module or -export-dynamic was specified, set the dlname.
+ if test "$module" = yes || test "$export_dynamic" = yes; then
+ # On all known operating systems, these are identical.
+ dlname="$soname"
+ fi
+ fi
+ ;;
+
+ obj)
+ if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+ func_warning "\`-dlopen' is ignored for objects"
+ fi
+
+ case " $deplibs" in
+ *\ -l* | *\ -L*)
+ func_warning "\`-l' and \`-L' are ignored for objects" ;;
+ esac
+
+ test -n "$rpath" && \
+ func_warning "\`-rpath' is ignored for objects"
+
+ test -n "$xrpath" && \
+ func_warning "\`-R' is ignored for objects"
+
+ test -n "$vinfo" && \
+ func_warning "\`-version-info' is ignored for objects"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for objects"
+
+ case $output in
+ *.lo)
+ test -n "$objs$old_deplibs" && \
+ func_fatal_error "cannot build library object \`$output' from non-libtool objects"
+
+ libobj=$output
+ func_lo2o "$libobj"
+ obj=$func_lo2o_result
+ ;;
+ *)
+ libobj=
+ obj="$output"
+ ;;
+ esac
+
+ # Delete the old objects.
+ $opt_dry_run || $RM $obj $libobj
+
+ # Objects from convenience libraries. This assumes
+ # single-version convenience libraries. Whenever we create
+ # different ones for PIC/non-PIC, this we'll have to duplicate
+ # the extraction.
+ reload_conv_objs=
+ gentop=
+ # reload_cmds runs $LD directly, so let us get rid of
+ # -Wl from whole_archive_flag_spec and hope we can get by with
+ # turning comma into space..
+ wl=
+
+ if test -n "$convenience"; then
+ if test -n "$whole_archive_flag_spec"; then
+ eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
+ reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'`
+ else
+ gentop="$output_objdir/${obj}x"
+ generated="$generated $gentop"
+
+ func_extract_archives $gentop $convenience
+ reload_conv_objs="$reload_objs $func_extract_archives_result"
+ fi
+ fi
+
+ # Create the old-style object.
+ reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
+
+ output="$obj"
+ func_execute_cmds "$reload_cmds" 'exit $?'
+
+ # Exit if we aren't doing a library object file.
+ if test -z "$libobj"; then
+ if test -n "$gentop"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+
+ exit $EXIT_SUCCESS
+ fi
+
+ if test "$build_libtool_libs" != yes; then
+ if test -n "$gentop"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+
+ # Create an invalid libtool object if no PIC, so that we don't
+ # accidentally link it into a program.
+ # $show "echo timestamp > $libobj"
+ # $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
+ exit $EXIT_SUCCESS
+ fi
+
+ if test -n "$pic_flag" || test "$pic_mode" != default; then
+ # Only do commands if we really have different PIC objects.
+ reload_objs="$libobjs $reload_conv_objs"
+ output="$libobj"
+ func_execute_cmds "$reload_cmds" 'exit $?'
+ fi
+
+ if test -n "$gentop"; then
+ func_show_eval '${RM}r "$gentop"'
+ fi
+
+ exit $EXIT_SUCCESS
+ ;;
+
+ prog)
+ case $host in
+ *cygwin*) func_stripname '' '.exe' "$output"
+ output=$func_stripname_result.exe;;
+ esac
+ test -n "$vinfo" && \
+ func_warning "\`-version-info' is ignored for programs"
+
+ test -n "$release" && \
+ func_warning "\`-release' is ignored for programs"
+
+ test "$preload" = yes \
+ && test "$dlopen_support" = unknown \
+ && test "$dlopen_self" = unknown \
+ && test "$dlopen_self_static" = unknown && \
+ func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support."
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
+ finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
+ ;;
+ esac
+
+ case $host in
+ *-*-darwin*)
+ # Don't allow lazy linking, it breaks C++ global constructors
+ # But is supposedly fixed on 10.4 or later (yay!).
+ if test "$tagname" = CXX ; then
+ case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
+ 10.[0123])
+ compile_command="$compile_command ${wl}-bind_at_load"
+ finalize_command="$finalize_command ${wl}-bind_at_load"
+ ;;
+ esac
+ fi
+ # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+ compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ ;;
+ esac
+
+
+ # move library search paths that coincide with paths to not yet
+ # installed libraries to the beginning of the library search list
+ new_libs=
+ for path in $notinst_path; do
+ case " $new_libs " in
+ *" -L$path/$objdir "*) ;;
+ *)
+ case " $compile_deplibs " in
+ *" -L$path/$objdir "*)
+ new_libs="$new_libs -L$path/$objdir" ;;
+ esac
+ ;;
+ esac
+ done
+ for deplib in $compile_deplibs; do
+ case $deplib in
+ -L*)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$new_libs $deplib" ;;
+ esac
+ ;;
+ *) new_libs="$new_libs $deplib" ;;
+ esac
+ done
+ compile_deplibs="$new_libs"
+
+
+ compile_command="$compile_command $compile_deplibs"
+ finalize_command="$finalize_command $finalize_deplibs"
+
+ if test -n "$rpath$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ for libdir in $rpath $xrpath; do
+ # This is the magic to use -rpath.
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ fi
+
+ # Now hardcode the library paths
+ rpath=
+ hardcode_libdirs=
+ for libdir in $compile_rpath $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+ testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'`
+ case :$dllsearchpath: in
+ *":$libdir:"*) ;;
+ ::) dllsearchpath=$libdir;;
+ *) dllsearchpath="$dllsearchpath:$libdir";;
+ esac
+ case :$dllsearchpath: in
+ *":$testbindir:"*) ;;
+ ::) dllsearchpath=$testbindir;;
+ *) dllsearchpath="$dllsearchpath:$testbindir";;
+ esac
+ ;;
+ esac
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ compile_rpath="$rpath"
+
+ rpath=
+ hardcode_libdirs=
+ for libdir in $finalize_rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ rpath="$rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$finalize_perm_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval rpath=\" $hardcode_libdir_flag_spec\"
+ fi
+ finalize_rpath="$rpath"
+
+ if test -n "$libobjs" && test "$build_old_libs" = yes; then
+ # Transform all the library objects into standard objects.
+ compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ fi
+
+ func_generate_dlsyms "$outputname" "@PROGRAM@" "no"
+
+ # template prelinking step
+ if test -n "$prelink_cmds"; then
+ func_execute_cmds "$prelink_cmds" 'exit $?'
+ fi
+
+ wrappers_required=yes
+ case $host in
+ *cygwin* | *mingw* )
+ if test "$build_libtool_libs" != yes; then
+ wrappers_required=no
+ fi
+ ;;
+ *cegcc)
+ # Disable wrappers for cegcc, we are cross compiling anyway.
+ wrappers_required=no
+ ;;
+ *)
+ if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
+ wrappers_required=no
+ fi
+ ;;
+ esac
+ if test "$wrappers_required" = no; then
+ # Replace the output file specification.
+ compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ link_command="$compile_command$compile_rpath"
+
+ # We have no uninstalled library dependencies, so finalize right now.
+ exit_status=0
+ func_show_eval "$link_command" 'exit_status=$?'
+
+ # Delete the generated files.
+ if test -f "$output_objdir/${outputname}S.${objext}"; then
+ func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"'
+ fi
+
+ exit $exit_status
+ fi
+
+ if test -n "$compile_shlibpath$finalize_shlibpath"; then
+ compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+ fi
+ if test -n "$finalize_shlibpath"; then
+ finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+ fi
+
+ compile_var=
+ finalize_var=
+ if test -n "$runpath_var"; then
+ if test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ if test -n "$finalize_perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $finalize_perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+ fi
+ fi
+
+ if test "$no_install" = yes; then
+ # We don't need to create a wrapper script.
+ link_command="$compile_var$compile_command$compile_rpath"
+ # Replace the output file specification.
+ link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ # Delete the old output file.
+ $opt_dry_run || $RM $output
+ # Link the executable and exit
+ func_show_eval "$link_command" 'exit $?'
+ exit $EXIT_SUCCESS
+ fi
+
+ if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+
+ func_warning "this platform does not like uninstalled shared libraries"
+ func_warning "\`$output' will be relinked during installation"
+ else
+ if test "$fast_install" != no; then
+ link_command="$finalize_var$compile_command$finalize_rpath"
+ if test "$fast_install" = yes; then
+ relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
+ else
+ # fast_install is set to needless
+ relink_command=
+ fi
+ else
+ link_command="$compile_var$compile_command$compile_rpath"
+ relink_command="$finalize_var$finalize_command$finalize_rpath"
+ fi
+ fi
+
+ # Replace the output file specification.
+ link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+ # Delete the old output files.
+ $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+ func_show_eval "$link_command" 'exit $?'
+
+ # Now create the wrapper script.
+ func_verbose "creating $output"
+
+ # Quote the relink command for shipping.
+ if test -n "$relink_command"; then
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ func_quote_for_eval "$var_value"
+ relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+ fi
+ done
+ relink_command="(cd `pwd`; $relink_command)"
+ relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Quote $ECHO for shipping.
+ if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then
+ case $progpath in
+ [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";;
+ *) qecho="$SHELL `pwd`/$progpath --fallback-echo";;
+ esac
+ qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"`
+ else
+ qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"`
+ fi
+
+ # Only actually do things if not in dry run mode.
+ $opt_dry_run || {
+ # win32 will think the script is a binary if it has
+ # a .exe suffix, so we strip it off here.
+ case $output in
+ *.exe) func_stripname '' '.exe' "$output"
+ output=$func_stripname_result ;;
+ esac
+ # test for cygwin because mv fails w/o .exe extensions
+ case $host in
+ *cygwin*)
+ exeext=.exe
+ func_stripname '' '.exe' "$outputname"
+ outputname=$func_stripname_result ;;
+ *) exeext= ;;
+ esac
+ case $host in
+ *cygwin* | *mingw* )
+ func_dirname_and_basename "$output" "" "."
+ output_name=$func_basename_result
+ output_path=$func_dirname_result
+ cwrappersource="$output_path/$objdir/lt-$output_name.c"
+ cwrapper="$output_path/$output_name.exe"
+ $RM $cwrappersource $cwrapper
+ trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
+
+ func_emit_cwrapperexe_src > $cwrappersource
+
+ # The wrapper executable is built using the $host compiler,
+ # because it contains $host paths and files. If cross-
+ # compiling, it, like the target executable, must be
+ # executed on the $host or under an emulation environment.
+ $opt_dry_run || {
+ $LTCC $LTCFLAGS -o $cwrapper $cwrappersource
+ $STRIP $cwrapper
+ }
+
+ # Now, create the wrapper script for func_source use:
+ func_ltwrapper_scriptname $cwrapper
+ $RM $func_ltwrapper_scriptname_result
+ trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
+ $opt_dry_run || {
+ # note: this script will not be executed, so do not chmod.
+ if test "x$build" = "x$host" ; then
+ $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
+ else
+ func_emit_wrapper no > $func_ltwrapper_scriptname_result
+ fi
+ }
+ ;;
+ * )
+ $RM $output
+ trap "$RM $output; exit $EXIT_FAILURE" 1 2 15
+
+ func_emit_wrapper no > $output
+ chmod +x $output
+ ;;
+ esac
+ }
+ exit $EXIT_SUCCESS
+ ;;
+ esac
+
+ # See if we need to build an old-fashioned archive.
+ for oldlib in $oldlibs; do
+
+ if test "$build_libtool_libs" = convenience; then
+ oldobjs="$libobjs_save $symfileobj"
+ addlibs="$convenience"
+ build_libtool_libs=no
+ else
+ if test "$build_libtool_libs" = module; then
+ oldobjs="$libobjs_save"
+ build_libtool_libs=no
+ else
+ oldobjs="$old_deplibs $non_pic_objects"
+ if test "$preload" = yes && test -f "$symfileobj"; then
+ oldobjs="$oldobjs $symfileobj"
+ fi
+ fi
+ addlibs="$old_convenience"
+ fi
+
+ if test -n "$addlibs"; then
+ gentop="$output_objdir/${outputname}x"
+ generated="$generated $gentop"
+
+ func_extract_archives $gentop $addlibs
+ oldobjs="$oldobjs $func_extract_archives_result"
+ fi
+
+ # Do each command in the archive commands.
+ if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
+ cmds=$old_archive_from_new_cmds
+ else
+
+ # Add any objects from preloaded convenience libraries
+ if test -n "$dlprefiles"; then
+ gentop="$output_objdir/${outputname}x"
+ generated="$generated $gentop"
+
+ func_extract_archives $gentop $dlprefiles
+ oldobjs="$oldobjs $func_extract_archives_result"
+ fi
+
+ # POSIX demands no paths to be encoded in archives. We have
+ # to avoid creating archives with duplicate basenames if we
+ # might have to extract them afterwards, e.g., when creating a
+ # static archive out of a convenience library, or when linking
+ # the entirety of a libtool archive into another (currently
+ # not supported by libtool).
+ if (for obj in $oldobjs
+ do
+ func_basename "$obj"
+ $ECHO "$func_basename_result"
+ done | sort | sort -uc >/dev/null 2>&1); then
+ :
+ else
+ $ECHO "copying selected object files to avoid basename conflicts..."
+ gentop="$output_objdir/${outputname}x"
+ generated="$generated $gentop"
+ func_mkdir_p "$gentop"
+ save_oldobjs=$oldobjs
+ oldobjs=
+ counter=1
+ for obj in $save_oldobjs
+ do
+ func_basename "$obj"
+ objbase="$func_basename_result"
+ case " $oldobjs " in
+ " ") oldobjs=$obj ;;
+ *[\ /]"$objbase "*)
+ while :; do
+ # Make sure we don't pick an alternate name that also
+ # overlaps.
+ newobj=lt$counter-$objbase
+ func_arith $counter + 1
+ counter=$func_arith_result
+ case " $oldobjs " in
+ *[\ /]"$newobj "*) ;;
+ *) if test ! -f "$gentop/$newobj"; then break; fi ;;
+ esac
+ done
+ func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
+ oldobjs="$oldobjs $gentop/$newobj"
+ ;;
+ *) oldobjs="$oldobjs $obj" ;;
+ esac
+ done
+ fi
+ eval cmds=\"$old_archive_cmds\"
+
+ func_len " $cmds"
+ len=$func_len_result
+ if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+ cmds=$old_archive_cmds
+ else
+ # the command line is too long to link in one step, link in parts
+ func_verbose "using piecewise archive linking..."
+ save_RANLIB=$RANLIB
+ RANLIB=:
+ objlist=
+ concat_cmds=
+ save_oldobjs=$oldobjs
+ oldobjs=
+ # Is there a better way of finding the last object in the list?
+ for obj in $save_oldobjs
+ do
+ last_oldobj=$obj
+ done
+ eval test_cmds=\"$old_archive_cmds\"
+ func_len " $test_cmds"
+ len0=$func_len_result
+ len=$len0
+ for obj in $save_oldobjs
+ do
+ func_len " $obj"
+ func_arith $len + $func_len_result
+ len=$func_arith_result
+ func_append objlist " $obj"
+ if test "$len" -lt "$max_cmd_len"; then
+ :
+ else
+ # the above command should be used before it gets too long
+ oldobjs=$objlist
+ if test "$obj" = "$last_oldobj" ; then
+ RANLIB=$save_RANLIB
+ fi
+ test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
+ objlist=
+ len=$len0
+ fi
+ done
+ RANLIB=$save_RANLIB
+ oldobjs=$objlist
+ if test "X$oldobjs" = "X" ; then
+ eval cmds=\"\$concat_cmds\"
+ else
+ eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
+ fi
+ fi
+ fi
+ func_execute_cmds "$cmds" 'exit $?'
+ done
+
+ test -n "$generated" && \
+ func_show_eval "${RM}r$generated"
+
+ # Now create the libtool archive.
+ case $output in
+ *.la)
+ old_library=
+ test "$build_old_libs" = yes && old_library="$libname.$libext"
+ func_verbose "creating $output"
+
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ func_quote_for_eval "$var_value"
+ relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+ fi
+ done
+ # Quote the link command for shipping.
+ relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
+ relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+ if test "$hardcode_automatic" = yes ; then
+ relink_command=
+ fi
+
+ # Only create the output if not a dry run.
+ $opt_dry_run || {
+ for installed in no yes; do
+ if test "$installed" = yes; then
+ if test -z "$install_libdir"; then
+ break
+ fi
+ output="$output_objdir/$outputname"i
+ # Replace all uninstalled libtool libraries with the installed ones
+ newdependency_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ *.la)
+ func_basename "$deplib"
+ name="$func_basename_result"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$deplib' is not a valid libtool archive"
+ newdependency_libs="$newdependency_libs $libdir/$name"
+ ;;
+ *) newdependency_libs="$newdependency_libs $deplib" ;;
+ esac
+ done
+ dependency_libs="$newdependency_libs"
+ newdlfiles=
+
+ for lib in $dlfiles; do
+ case $lib in
+ *.la)
+ func_basename "$lib"
+ name="$func_basename_result"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$lib' is not a valid libtool archive"
+ newdlfiles="$newdlfiles $libdir/$name"
+ ;;
+ *) newdlfiles="$newdlfiles $lib" ;;
+ esac
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ case $lib in
+ *.la)
+ # Only pass preopened files to the pseudo-archive (for
+ # eventual linking with the app. that links it) if we
+ # didn't already link the preopened objects directly into
+ # the library:
+ func_basename "$lib"
+ name="$func_basename_result"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ test -z "$libdir" && \
+ func_fatal_error "\`$lib' is not a valid libtool archive"
+ newdlprefiles="$newdlprefiles $libdir/$name"
+ ;;
+ esac
+ done
+ dlprefiles="$newdlprefiles"
+ else
+ newdlfiles=
+ for lib in $dlfiles; do
+ case $lib in
+ [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
+ *) abs=`pwd`"/$lib" ;;
+ esac
+ newdlfiles="$newdlfiles $abs"
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ case $lib in
+ [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
+ *) abs=`pwd`"/$lib" ;;
+ esac
+ newdlprefiles="$newdlprefiles $abs"
+ done
+ dlprefiles="$newdlprefiles"
+ fi
+ $RM $output
+ # place dlname in correct position for cygwin
+ tdlname=$dlname
+ case $host,$output,$installed,$module,$dlname in
+ *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
+ esac
+ $ECHO > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags='$new_inherited_linker_flags'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Names of additional weak libraries provided by this library
+weak_library_names='$weak_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=$module
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+ if test "$installed" = no && test "$need_relink" = yes; then
+ $ECHO >> $output "\
+relink_command=\"$relink_command\""
+ fi
+ done
+ }
+
+ # Do a symbolic link so that the libtool archive can be found in
+ # LD_LIBRARY_PATH before the program is installed.
+ func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
+ ;;
+ esac
+ exit $EXIT_SUCCESS
+}
+
+{ test "$mode" = link || test "$mode" = relink; } &&
+ func_mode_link ${1+"$@"}
+
+
+# func_mode_uninstall arg...
+func_mode_uninstall ()
+{
+ $opt_debug
+ RM="$nonopt"
+ files=
+ rmforce=
+ exit_status=0
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
+
+ for arg
+ do
+ case $arg in
+ -f) RM="$RM $arg"; rmforce=yes ;;
+ -*) RM="$RM $arg" ;;
+ *) files="$files $arg" ;;
+ esac
+ done
+
+ test -z "$RM" && \
+ func_fatal_help "you must specify an RM program"
+
+ rmdirs=
+
+ origobjdir="$objdir"
+ for file in $files; do
+ func_dirname "$file" "" "."
+ dir="$func_dirname_result"
+ if test "X$dir" = X.; then
+ objdir="$origobjdir"
+ else
+ objdir="$dir/$origobjdir"
+ fi
+ func_basename "$file"
+ name="$func_basename_result"
+ test "$mode" = uninstall && objdir="$dir"
+
+ # Remember objdir for removal later, being careful to avoid duplicates
+ if test "$mode" = clean; then
+ case " $rmdirs " in
+ *" $objdir "*) ;;
+ *) rmdirs="$rmdirs $objdir" ;;
+ esac
+ fi
+
+ # Don't error if the file doesn't exist and rm -f was used.
+ if { test -L "$file"; } >/dev/null 2>&1 ||
+ { test -h "$file"; } >/dev/null 2>&1 ||
+ test -f "$file"; then
+ :
+ elif test -d "$file"; then
+ exit_status=1
+ continue
+ elif test "$rmforce" = yes; then
+ continue
+ fi
+
+ rmfiles="$file"
+
+ case $name in
+ *.la)
+ # Possibly a libtool archive, so verify it.
+ if func_lalib_p "$file"; then
+ func_source $dir/$name
+
+ # Delete the libtool libraries and symlinks.
+ for n in $library_names; do
+ rmfiles="$rmfiles $objdir/$n"
+ done
+ test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
+
+ case "$mode" in
+ clean)
+ case " $library_names " in
+ # " " in the beginning catches empty $dlname
+ *" $dlname "*) ;;
+ *) rmfiles="$rmfiles $objdir/$dlname" ;;
+ esac
+ test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
+ ;;
+ uninstall)
+ if test -n "$library_names"; then
+ # Do each command in the postuninstall commands.
+ func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
+ fi
+
+ if test -n "$old_library"; then
+ # Do each command in the old_postuninstall commands.
+ func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
+ fi
+ # FIXME: should reinstall the best remaining shared library.
+ ;;
+ esac
+ fi
+ ;;
+
+ *.lo)
+ # Possibly a libtool object, so verify it.
+ if func_lalib_p "$file"; then
+
+ # Read the .lo file
+ func_source $dir/$name
+
+ # Add PIC object to the list of files to remove.
+ if test -n "$pic_object" &&
+ test "$pic_object" != none; then
+ rmfiles="$rmfiles $dir/$pic_object"
+ fi
+
+ # Add non-PIC object to the list of files to remove.
+ if test -n "$non_pic_object" &&
+ test "$non_pic_object" != none; then
+ rmfiles="$rmfiles $dir/$non_pic_object"
+ fi
+ fi
+ ;;
+
+ *)
+ if test "$mode" = clean ; then
+ noexename=$name
+ case $file in
+ *.exe)
+ func_stripname '' '.exe' "$file"
+ file=$func_stripname_result
+ func_stripname '' '.exe' "$name"
+ noexename=$func_stripname_result
+ # $file with .exe has already been added to rmfiles,
+ # add $file without .exe
+ rmfiles="$rmfiles $file"
+ ;;
+ esac
+ # Do a test to see if this is a libtool program.
+ if func_ltwrapper_p "$file"; then
+ if func_ltwrapper_executable_p "$file"; then
+ func_ltwrapper_scriptname "$file"
+ relink_command=
+ func_source $func_ltwrapper_scriptname_result
+ rmfiles="$rmfiles $func_ltwrapper_scriptname_result"
+ else
+ relink_command=
+ func_source $dir/$noexename
+ fi
+
+ # note $name still contains .exe if it was in $file originally
+ # as does the version of $file that was added into $rmfiles
+ rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
+ if test "$fast_install" = yes && test -n "$relink_command"; then
+ rmfiles="$rmfiles $objdir/lt-$name"
+ fi
+ if test "X$noexename" != "X$name" ; then
+ rmfiles="$rmfiles $objdir/lt-${noexename}.c"
+ fi
+ fi
+ fi
+ ;;
+ esac
+ func_show_eval "$RM $rmfiles" 'exit_status=1'
+ done
+ objdir="$origobjdir"
+
+ # Try to remove the ${objdir}s in the directories where we deleted files
+ for dir in $rmdirs; do
+ if test -d "$dir"; then
+ func_show_eval "rmdir $dir >/dev/null 2>&1"
+ fi
+ done
+
+ exit $exit_status
+}
+
+{ test "$mode" = uninstall || test "$mode" = clean; } &&
+ func_mode_uninstall ${1+"$@"}
+
+test -z "$mode" && {
+ help="$generic_help"
+ func_fatal_help "you must specify a MODE"
+}
+
+test -z "$exec_cmd" && \
+ func_fatal_help "invalid operation mode \`$mode'"
+
+if test -n "$exec_cmd"; then
+ eval exec "$exec_cmd"
+ exit $EXIT_FAILURE
+fi
+
+exit $exit_status
+
+
+# The TAGs below are defined such that we never get into a situation
+# in which we disable both kinds of libraries. Given conflicting
+# choices, we go for a static library, that is the most portable,
+# since we can't tell whether shared libraries were disabled because
+# the user asked for that or because the platform doesn't support
+# them. This is particularly important on AIX, because we don't
+# support having both static and shared libraries enabled at the same
+# time on that platform, so we default to a shared-only configuration.
+# If a disable-shared tag is given, we'll fallback to a static-only
+# configuration. But we'll never go from static-only to shared-only.
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
+build_libtool_libs=no
+build_old_libs=yes
+# ### END LIBTOOL TAG CONFIG: disable-shared
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-static
+build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
+# ### END LIBTOOL TAG CONFIG: disable-static
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
+# vi:sw=2
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
new file mode 100755
index 00000000..28055d2a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
@@ -0,0 +1,376 @@
+#! /bin/sh
+# Common stub for a few missing GNU programs while installing.
+
+scriptversion=2009-04-28.21; # UTC
+
+# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
+# 2008, 2009 Free Software Foundation, Inc.
+# Originally by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+fi
+
+run=:
+sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
+sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
+
+# In the cases where this matters, `missing' is being run in the
+# srcdir already.
+if test -f configure.ac; then
+ configure_ac=configure.ac
+else
+ configure_ac=configure.in
+fi
+
+msg="missing on your system"
+
+case $1 in
+--run)
+ # Try to run requested program, and just exit if it succeeds.
+ run=
+ shift
+ "$@" && exit 0
+ # Exit code 63 means version mismatch. This often happens
+ # when the user try to use an ancient version of a tool on
+ # a file that requires a minimum version. In this case we
+ # we should proceed has if the program had been absent, or
+ # if --run hadn't been passed.
+ if test $? = 63; then
+ run=:
+ msg="probably too old"
+ fi
+ ;;
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
+error status if there is no known handling for PROGRAM.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+ --run try to run the given command, and emulate it if it fails
+
+Supported PROGRAM values:
+ aclocal touch file \`aclocal.m4'
+ autoconf touch file \`configure'
+ autoheader touch file \`config.h.in'
+ autom4te touch the output file, or create a stub one
+ automake touch all \`Makefile.in' files
+ bison create \`y.tab.[ch]', if possible, from existing .[ch]
+ flex create \`lex.yy.c', if possible, from existing .c
+ help2man touch the output file
+ lex create \`lex.yy.c', if possible, from existing .c
+ makeinfo touch the output file
+ tar try tar, gnutar, gtar, then tar without non-portable flags
+ yacc create \`y.tab.[ch]', if possible, from existing .[ch]
+
+Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
+\`g' are ignored when checking the name.
+
+Send bug reports to <bug-automake@gnu.org>."
+ exit $?
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing $scriptversion (GNU Automake)"
+ exit $?
+ ;;
+
+ -*)
+ echo 1>&2 "$0: Unknown \`$1' option"
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+ ;;
+
+esac
+
+# normalize program name to check for.
+program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
+# Now exit if we have it, but it failed. Also exit now if we
+# don't have it and --version was passed (most likely to detect
+# the program). This is about non-GNU programs, so use $1 not
+# $program.
+case $1 in
+ lex*|yacc*)
+ # Not GNU programs, they don't have --version.
+ ;;
+
+ tar*)
+ if test -n "$run"; then
+ echo 1>&2 "ERROR: \`tar' requires --run"
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ exit 1
+ fi
+ ;;
+
+ *)
+ if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
+ # We have it, but it failed.
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ # Could not run --version or --help. This is probably someone
+ # running `$TOOL --version' or `$TOOL --help' to check whether
+ # $TOOL exists and not knowing $TOOL uses missing.
+ exit 1
+ fi
+ ;;
+esac
+
+# If it does not exist, or fails to run (possibly an outdated version),
+# try to emulate it.
+case $program in
+ aclocal*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acinclude.m4' or \`${configure_ac}'. You might want
+ to install the \`Automake' and \`Perl' packages. Grab them from
+ any GNU archive site."
+ touch aclocal.m4
+ ;;
+
+ autoconf*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`${configure_ac}'. You might want to install the
+ \`Autoconf' and \`GNU m4' packages. Grab them from any GNU
+ archive site."
+ touch configure
+ ;;
+
+ autoheader*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acconfig.h' or \`${configure_ac}'. You might want
+ to install the \`Autoconf' and \`GNU m4' packages. Grab them
+ from any GNU archive site."
+ files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
+ test -z "$files" && files="config.h"
+ touch_files=
+ for f in $files; do
+ case $f in
+ *:*) touch_files="$touch_files "`echo "$f" |
+ sed -e 's/^[^:]*://' -e 's/:.*//'`;;
+ *) touch_files="$touch_files $f.in";;
+ esac
+ done
+ touch $touch_files
+ ;;
+
+ automake*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
+ You might want to install the \`Automake' and \`Perl' packages.
+ Grab them from any GNU archive site."
+ find . -type f -name Makefile.am -print |
+ sed 's/\.am$/.in/' |
+ while read f; do touch "$f"; done
+ ;;
+
+ autom4te*)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, but is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them.
+ You can get \`$1' as part of \`Autoconf' from any GNU
+ archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo "#! /bin/sh"
+ echo "# Created by GNU Automake missing as a replacement of"
+ echo "# $ $@"
+ echo "exit 0"
+ chmod +x $file
+ exit 1
+ fi
+ ;;
+
+ bison*|yacc*)
+ echo 1>&2 "\
+WARNING: \`$1' $msg. You should only need it if
+ you modified a \`.y' file. You may need the \`Bison' package
+ in order for those modifications to take effect. You can get
+ \`Bison' from any GNU archive site."
+ rm -f y.tab.c y.tab.h
+ if test $# -ne 1; then
+ eval LASTARG="\${$#}"
+ case $LASTARG in
+ *.y)
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.c
+ fi
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.h
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f y.tab.h; then
+ echo >y.tab.h
+ fi
+ if test ! -f y.tab.c; then
+ echo 'main() { return 0; }' >y.tab.c
+ fi
+ ;;
+
+ lex*|flex*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.l' file. You may need the \`Flex' package
+ in order for those modifications to take effect. You can get
+ \`Flex' from any GNU archive site."
+ rm -f lex.yy.c
+ if test $# -ne 1; then
+ eval LASTARG="\${$#}"
+ case $LASTARG in
+ *.l)
+ SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" lex.yy.c
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f lex.yy.c; then
+ echo 'main() { return 0; }' >lex.yy.c
+ fi
+ ;;
+
+ help2man*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a dependency of a manual page. You may need the
+ \`Help2man' package in order for those modifications to take
+ effect. You can get \`Help2man' from any GNU archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo ".ab help2man is required to generate this page"
+ exit $?
+ fi
+ ;;
+
+ makeinfo*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.texi' or \`.texinfo' file, or any other file
+ indirectly affecting the aspect of the manual. The spurious
+ call might also be the consequence of using a buggy \`make' (AIX,
+ DU, IRIX). You might want to install the \`Texinfo' package or
+ the \`GNU make' package. Grab either from any GNU archive site."
+ # The file to touch is that specified with -o ...
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -z "$file"; then
+ # ... or it is the one specified with @setfilename ...
+ infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
+ file=`sed -n '
+ /^@setfilename/{
+ s/.* \([^ ]*\) *$/\1/
+ p
+ q
+ }' $infile`
+ # ... or it is derived from the source name (dir/f.texi becomes f.info)
+ test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
+ fi
+ # If the file does not exist, the user really needs makeinfo;
+ # let's fail without touching anything.
+ test -f $file || exit 1
+ touch $file
+ ;;
+
+ tar*)
+ shift
+
+ # We have already tried tar in the generic part.
+ # Look for gnutar/gtar before invocation to avoid ugly error
+ # messages.
+ if (gnutar --version > /dev/null 2>&1); then
+ gnutar "$@" && exit 0
+ fi
+ if (gtar --version > /dev/null 2>&1); then
+ gtar "$@" && exit 0
+ fi
+ firstarg="$1"
+ if shift; then
+ case $firstarg in
+ *o*)
+ firstarg=`echo "$firstarg" | sed s/o//`
+ tar "$firstarg" "$@" && exit 0
+ ;;
+ esac
+ case $firstarg in
+ *h*)
+ firstarg=`echo "$firstarg" | sed s/h//`
+ tar "$firstarg" "$@" && exit 0
+ ;;
+ esac
+ fi
+
+ echo 1>&2 "\
+WARNING: I can't seem to be able to run \`tar' with the given arguments.
+ You may want to install GNU tar or Free paxutils, or check the
+ command line arguments."
+ exit 1
+ ;;
+
+ *)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, and is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them. Check the \`README' file,
+ it often tells you about the needed prerequisites for installing
+ this package. You may also peek at any GNU archive site, in case
+ some other package would contain this missing \`$1' program."
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/config.h.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/config.h.in
new file mode 100644
index 00000000..0e31e37e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/config.h.in
@@ -0,0 +1,404 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+#undef AC_APPLE_UNIVERSAL_BUILD
+
+/* Define to 1 if translation of program messages to the user's native
+ language is requested. */
+#undef ENABLE_NLS
+
+/* Define to 1 if using x86 assembler optimizations. */
+#undef HAVE_ASM_X86
+
+/* Define to 1 if using x86_64 assembler optimizations. */
+#undef HAVE_ASM_X86_64
+
+/* Define to 1 if bswap_16 is available. */
+#undef HAVE_BSWAP_16
+
+/* Define to 1 if bswap_32 is available. */
+#undef HAVE_BSWAP_32
+
+/* Define to 1 if bswap_64 is available. */
+#undef HAVE_BSWAP_64
+
+/* Define to 1 if you have the <byteswap.h> header file. */
+#undef HAVE_BYTESWAP_H
+
+/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the
+ CoreFoundation framework. */
+#undef HAVE_CFLOCALECOPYCURRENT
+
+/* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in
+ the CoreFoundation framework. */
+#undef HAVE_CFPREFERENCESCOPYAPPVALUE
+
+/* Define to 1 if crc32 integrity check is enabled. */
+#undef HAVE_CHECK_CRC32
+
+/* Define to 1 if crc64 integrity check is enabled. */
+#undef HAVE_CHECK_CRC64
+
+/* Define to 1 if sha256 integrity check is enabled. */
+#undef HAVE_CHECK_SHA256
+
+/* Define to 1 if the number of available CPU cores can be detected with
+ sysconf(_SC_NPROCESSORS_ONLN). */
+#undef HAVE_CPUCORES_SYSCONF
+
+/* Define to 1 if the number of available CPU cores can be detected with
+ sysctl(). */
+#undef HAVE_CPUCORES_SYSCTL
+
+/* Define if the GNU dcgettext() function is already present or preinstalled.
+ */
+#undef HAVE_DCGETTEXT
+
+/* Define to 1 if decoder components are enabled. */
+#undef HAVE_DECODER
+
+/* Define to 1 if arm decoder is enabled. */
+#undef HAVE_DECODER_ARM
+
+/* Define to 1 if armthumb decoder is enabled. */
+#undef HAVE_DECODER_ARMTHUMB
+
+/* Define to 1 if delta decoder is enabled. */
+#undef HAVE_DECODER_DELTA
+
+/* Define to 1 if ia64 decoder is enabled. */
+#undef HAVE_DECODER_IA64
+
+/* Define to 1 if lzma1 decoder is enabled. */
+#undef HAVE_DECODER_LZMA1
+
+/* Define to 1 if lzma2 decoder is enabled. */
+#undef HAVE_DECODER_LZMA2
+
+/* Define to 1 if powerpc decoder is enabled. */
+#undef HAVE_DECODER_POWERPC
+
+/* Define to 1 if sparc decoder is enabled. */
+#undef HAVE_DECODER_SPARC
+
+/* Define to 1 if subblock decoder is enabled. */
+#undef HAVE_DECODER_SUBBLOCK
+
+/* Define to 1 if x86 decoder is enabled. */
+#undef HAVE_DECODER_X86
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define to 1 if encoder components are enabled. */
+#undef HAVE_ENCODER
+
+/* Define to 1 if arm encoder is enabled. */
+#undef HAVE_ENCODER_ARM
+
+/* Define to 1 if armthumb encoder is enabled. */
+#undef HAVE_ENCODER_ARMTHUMB
+
+/* Define to 1 if delta encoder is enabled. */
+#undef HAVE_ENCODER_DELTA
+
+/* Define to 1 if ia64 encoder is enabled. */
+#undef HAVE_ENCODER_IA64
+
+/* Define to 1 if lzma1 encoder is enabled. */
+#undef HAVE_ENCODER_LZMA1
+
+/* Define to 1 if lzma2 encoder is enabled. */
+#undef HAVE_ENCODER_LZMA2
+
+/* Define to 1 if powerpc encoder is enabled. */
+#undef HAVE_ENCODER_POWERPC
+
+/* Define to 1 if sparc encoder is enabled. */
+#undef HAVE_ENCODER_SPARC
+
+/* Define to 1 if subblock encoder is enabled. */
+#undef HAVE_ENCODER_SUBBLOCK
+
+/* Define to 1 if x86 encoder is enabled. */
+#undef HAVE_ENCODER_X86
+
+/* Define to 1 if the system supports fast unaligned memory access. */
+#undef HAVE_FAST_UNALIGNED_ACCESS
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#undef HAVE_FCNTL_H
+
+/* Define to 1 if you have the `futimens' function. */
+#undef HAVE_FUTIMENS
+
+/* Define to 1 if you have the `futimes' function. */
+#undef HAVE_FUTIMES
+
+/* Define to 1 if you have the `futimesat' function. */
+#undef HAVE_FUTIMESAT
+
+/* Define to 1 if you have the <getopt.h> header file. */
+#undef HAVE_GETOPT_H
+
+/* Define to 1 if you have the `getopt_long' function. */
+#undef HAVE_GETOPT_LONG
+
+/* Define if the GNU gettext() function is already present or preinstalled. */
+#undef HAVE_GETTEXT
+
+/* Define if you have the iconv() function. */
+#undef HAVE_ICONV
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <limits.h> header file. */
+#undef HAVE_LIMITS_H
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 to enable bt2 match finder. */
+#undef HAVE_MF_BT2
+
+/* Define to 1 to enable bt3 match finder. */
+#undef HAVE_MF_BT3
+
+/* Define to 1 to enable bt4 match finder. */
+#undef HAVE_MF_BT4
+
+/* Define to 1 to enable hc3 match finder. */
+#undef HAVE_MF_HC3
+
+/* Define to 1 to enable hc4 match finder. */
+#undef HAVE_MF_HC4
+
+/* Define to 1 if getopt.h declares extern int optreset. */
+#undef HAVE_OPTRESET
+
+/* Define to 1 if the amount of physical memory can be detected with
+ sysconf(_SC_PAGESIZE) and sysconf(_SC_PHYS_PAGES). */
+#undef HAVE_PHYSMEM_SYSCONF
+
+/* Define to 1 if the amount of physical memory can be detected with sysctl().
+ */
+#undef HAVE_PHYSMEM_SYSCTL
+
+/* Define to 1 if the amount of physical memory can be detected with Linux
+ sysinfo(). */
+#undef HAVE_PHYSMEM_SYSINFO
+
+/* Define if you have POSIX threads libraries and header files. */
+#undef HAVE_PTHREAD
+
+/* Define to 1 if optimizing for size. */
+#undef HAVE_SMALL
+
+/* Define to 1 if stdbool.h conforms to C99. */
+#undef HAVE_STDBOOL_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if `struct stat' is a member of `st_atimensec'. */
+#undef HAVE_STRUCT_STAT_ST_ATIMENSEC
+
+/* Define to 1 if `struct stat' is a member of `st_atimespec.tv_nsec'. */
+#undef HAVE_STRUCT_STAT_ST_ATIMESPEC_TV_NSEC
+
+/* Define to 1 if `struct stat' is a member of `st_atim.st__tim.tv_nsec'. */
+#undef HAVE_STRUCT_STAT_ST_ATIM_ST__TIM_TV_NSEC
+
+/* Define to 1 if `struct stat' is a member of `st_atim.tv_nsec'. */
+#undef HAVE_STRUCT_STAT_ST_ATIM_TV_NSEC
+
+/* Define to 1 if `struct stat' is a member of `st_uatime'. */
+#undef HAVE_STRUCT_STAT_ST_UATIME
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#undef HAVE_SYS_PARAM_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/sysctl.h> header file. */
+#undef HAVE_SYS_SYSCTL_H
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#undef HAVE_SYS_TIME_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if the system has the type `uintptr_t'. */
+#undef HAVE_UINTPTR_T
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if you have the `utime' function. */
+#undef HAVE_UTIME
+
+/* Define to 1 if you have the `utimes' function. */
+#undef HAVE_UTIMES
+
+/* Define to 1 or 0, depending whether the compiler supports simple visibility
+ declarations. */
+#undef HAVE_VISIBILITY
+
+/* Define to 1 if the system has the type `_Bool'. */
+#undef HAVE__BOOL
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#undef LT_OBJDIR
+
+/* Define to 1 to disable debugging code. */
+#undef NDEBUG
+
+/* Define to 1 if your C compiler doesn't accept -c and -o together. */
+#undef NO_MINUS_C_MINUS_O
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the URL of the home page of this package. */
+#undef PACKAGE_HOMEPAGE
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+#undef PTHREAD_CREATE_JOINABLE
+
+/* The size of `size_t', as computed by sizeof. */
+#undef SIZEOF_SIZE_T
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Enable extensions on AIX 3, Interix. */
+#ifndef _ALL_SOURCE
+# undef _ALL_SOURCE
+#endif
+/* Enable GNU extensions on systems that have them. */
+#ifndef _GNU_SOURCE
+# undef _GNU_SOURCE
+#endif
+/* Enable threading extensions on Solaris. */
+#ifndef _POSIX_PTHREAD_SEMANTICS
+# undef _POSIX_PTHREAD_SEMANTICS
+#endif
+/* Enable extensions on HP NonStop. */
+#ifndef _TANDEM_SOURCE
+# undef _TANDEM_SOURCE
+#endif
+/* Enable general extensions on Solaris. */
+#ifndef __EXTENSIONS__
+# undef __EXTENSIONS__
+#endif
+
+
+/* Version number of package */
+#undef VERSION
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+# undef WORDS_BIGENDIAN
+# endif
+#endif
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+#undef _FILE_OFFSET_BITS
+
+/* Define for large files, on AIX-style hosts. */
+#undef _LARGE_FILES
+
+/* Define to 1 if on MINIX. */
+#undef _MINIX
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+ this defined. */
+#undef _POSIX_1_SOURCE
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+#undef _POSIX_SOURCE
+
+/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#undef _UINT32_T
+
+/* Define for Solaris 2.5.1 so the uint64_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#undef _UINT64_T
+
+/* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#undef _UINT8_T
+
+/* Define to rpl_ if the getopt replacement functions and variables should be
+ used. */
+#undef __GETOPT_PREFIX
+
+/* Define to the type of a signed integer type of width exactly 32 bits if
+ such a type exists and the standard includes do not define it. */
+#undef int32_t
+
+/* Define to the type of a signed integer type of width exactly 64 bits if
+ such a type exists and the standard includes do not define it. */
+#undef int64_t
+
+/* Define to the type of an unsigned integer type of width exactly 16 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint16_t
+
+/* Define to the type of an unsigned integer type of width exactly 32 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint32_t
+
+/* Define to the type of an unsigned integer type of width exactly 64 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint64_t
+
+/* Define to the type of an unsigned integer type of width exactly 8 bits if
+ such a type exists and the standard includes do not define it. */
+#undef uint8_t
+
+/* Define to the type of an unsigned integer type wide enough to hold a
+ pointer, if such a type exists, and if the system does not define it. */
+#undef uintptr_t
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure
new file mode 100755
index 00000000..d0f14a1f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure
@@ -0,0 +1,22982 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.63 for XZ Utils 4.999.9beta.
+#
+# Report bugs to <lasse.collin@tukaani.org>.
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+if test "x$CONFIG_SHELL" = x; then
+ if (eval ":") 2>/dev/null; then
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+
+ if test $as_have_required = yes && (eval ":
+(as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=\$LINENO
+ as_lineno_2=\$LINENO
+ test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
+ test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
+") 2> /dev/null; then
+ :
+else
+ as_candidate_shells=
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ case $as_dir in
+ /*)
+ for as_base in sh bash ksh sh5; do
+ as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
+ done;;
+ esac
+done
+IFS=$as_save_IFS
+
+
+ for as_shell in $as_candidate_shells $SHELL; do
+ # Try only shells that exist, to save several forks.
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { ("$as_shell") 2> /dev/null <<\_ASEOF
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+:
+_ASEOF
+}; then
+ CONFIG_SHELL=$as_shell
+ as_have_required=yes
+ if { "$as_shell" 2> /dev/null <<\_ASEOF
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+:
+(as_func_return () {
+ (exit $1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = "$1" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test $exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
+
+_ASEOF
+}; then
+ break
+fi
+
+fi
+
+ done
+
+ if test "x$CONFIG_SHELL" != x; then
+ for as_var in BASH_ENV ENV
+ do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ done
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+fi
+
+
+ if test $as_have_required = no; then
+ echo This script requires a shell more modern than all the
+ echo shells that I found on your system. Please install a
+ echo modern shell, or manually run the script under such a
+ echo shell if you do have one.
+ { (exit 1); exit 1; }
+fi
+
+
+fi
+
+fi
+
+
+
+(eval "as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0") || {
+ echo No shell found that supports shell functions.
+ echo Please tell bug-autoconf@gnu.org about your system,
+ echo including any error possibly output before this message.
+ echo This can help us improve future autoconf versions.
+ echo Configuration will now proceed without shell functions.
+}
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+
+
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+case X$lt_ECHO in
+X*--fallback-echo)
+ # Remove one level of quotation (which was required for Make).
+ ECHO=`echo "$lt_ECHO" | sed 's,\\\\\$\\$0,'$0','`
+ ;;
+esac
+
+ECHO=${lt_ECHO-echo}
+if test "X$1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X$1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then
+ # Yippee, $ECHO works!
+ :
+else
+ # Restart under the correct shell.
+ exec $SHELL "$0" --no-reexec ${1+"$@"}
+fi
+
+if test "X$1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<_LT_EOF
+$*
+_LT_EOF
+ exit 0
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+if test -z "$lt_ECHO"; then
+ if test "X${echo_test_string+set}" != Xset; then
+ # find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if { echo_test_string=`eval $cmd`; } 2>/dev/null &&
+ { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null
+ then
+ break
+ fi
+ done
+ fi
+
+ if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ :
+ else
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for dir in $PATH /usr/ucb; do
+ IFS="$lt_save_ifs"
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ ECHO="$dir/echo"
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+
+ if test "X$ECHO" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ ECHO='print -r'
+ elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running configure again with it.
+ ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"}
+ else
+ # Try using printf.
+ ECHO='printf %s\n'
+ if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ ECHO="$CONFIG_SHELL $0 --fallback-echo"
+ elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ ECHO="$CONFIG_SHELL $0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do
+ if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null
+ then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "$0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ ECHO=echo
+ fi
+ fi
+ fi
+ fi
+ fi
+fi
+
+# Copy echo and quote the copy suitably for passing to libtool from
+# the Makefile, instead of quoting the original, which is used later.
+lt_ECHO=$ECHO
+if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then
+ lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo"
+fi
+
+
+
+
+exec 7<&0 </dev/null 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Identity of this package.
+PACKAGE_NAME='XZ Utils'
+PACKAGE_TARNAME='xz'
+PACKAGE_VERSION='4.999.9beta'
+PACKAGE_STRING='XZ Utils 4.999.9beta'
+PACKAGE_BUGREPORT='lasse.collin@tukaani.org'
+
+ac_unique_file="src/liblzma/common/common.h"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+gt_needs=
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+xz
+DYNAMIC_LDFLAGS
+DYNAMIC_CPPFLAGS
+STATIC_LDFLAGS
+STATIC_CPPFLAGS
+AM_CFLAGS
+COND_GNULIB_FALSE
+COND_GNULIB_TRUE
+HAVE_VISIBILITY
+CFLAG_VISIBILITY
+GETOPT_H
+LIBOBJS
+POSUB
+LTLIBINTL
+LIBINTL
+INTLLIBS
+LTLIBICONV
+LIBICONV
+INTL_MACOSX_LIBS
+MSGMERGE
+XGETTEXT_015
+XGETTEXT
+GMSGFMT_015
+MSGFMT_015
+GMSGFMT
+MSGFMT
+USE_NLS
+COND_SHARED_FALSE
+COND_SHARED_TRUE
+RC
+OTOOL64
+OTOOL
+LIPO
+NMEDIT
+DSYMUTIL
+lt_ECHO
+RANLIB
+AR
+NM
+ac_ct_DUMPBIN
+DUMPBIN
+LD
+FGREP
+SED
+LIBTOOL
+OBJDUMP
+DLLTOOL
+AS
+PTHREAD_CFLAGS
+PTHREAD_LIBS
+PTHREAD_CC
+acx_pthread_config
+EGREP
+GREP
+CPP
+am__fastdepCCAS_FALSE
+am__fastdepCCAS_TRUE
+CCASDEPMODE
+CCASFLAGS
+CCAS
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__quote
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+LN_S
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+PREFERABLY_POSIX_SHELL
+POSIX_SHELL
+COND_SMALL_FALSE
+COND_SMALL_TRUE
+COND_ASM_X86_64_FALSE
+COND_ASM_X86_64_TRUE
+COND_ASM_X86_FALSE
+COND_ASM_X86_TRUE
+COND_CHECK_SHA256_FALSE
+COND_CHECK_SHA256_TRUE
+COND_CHECK_CRC64_FALSE
+COND_CHECK_CRC64_TRUE
+COND_CHECK_CRC32_FALSE
+COND_CHECK_CRC32_TRUE
+COND_DECODER_LZ_FALSE
+COND_DECODER_LZ_TRUE
+COND_ENCODER_LZ_FALSE
+COND_ENCODER_LZ_TRUE
+COND_FILTER_LZ_FALSE
+COND_FILTER_LZ_TRUE
+COND_DECODER_SIMPLE_FALSE
+COND_DECODER_SIMPLE_TRUE
+COND_ENCODER_SIMPLE_FALSE
+COND_ENCODER_SIMPLE_TRUE
+COND_FILTER_SIMPLE_FALSE
+COND_FILTER_SIMPLE_TRUE
+COND_DECODER_SPARC_FALSE
+COND_DECODER_SPARC_TRUE
+COND_ENCODER_SPARC_FALSE
+COND_ENCODER_SPARC_TRUE
+COND_FILTER_SPARC_FALSE
+COND_FILTER_SPARC_TRUE
+COND_DECODER_ARMTHUMB_FALSE
+COND_DECODER_ARMTHUMB_TRUE
+COND_ENCODER_ARMTHUMB_FALSE
+COND_ENCODER_ARMTHUMB_TRUE
+COND_FILTER_ARMTHUMB_FALSE
+COND_FILTER_ARMTHUMB_TRUE
+COND_DECODER_ARM_FALSE
+COND_DECODER_ARM_TRUE
+COND_ENCODER_ARM_FALSE
+COND_ENCODER_ARM_TRUE
+COND_FILTER_ARM_FALSE
+COND_FILTER_ARM_TRUE
+COND_DECODER_IA64_FALSE
+COND_DECODER_IA64_TRUE
+COND_ENCODER_IA64_FALSE
+COND_ENCODER_IA64_TRUE
+COND_FILTER_IA64_FALSE
+COND_FILTER_IA64_TRUE
+COND_DECODER_POWERPC_FALSE
+COND_DECODER_POWERPC_TRUE
+COND_ENCODER_POWERPC_FALSE
+COND_ENCODER_POWERPC_TRUE
+COND_FILTER_POWERPC_FALSE
+COND_FILTER_POWERPC_TRUE
+COND_DECODER_X86_FALSE
+COND_DECODER_X86_TRUE
+COND_ENCODER_X86_FALSE
+COND_ENCODER_X86_TRUE
+COND_FILTER_X86_FALSE
+COND_FILTER_X86_TRUE
+COND_DECODER_DELTA_FALSE
+COND_DECODER_DELTA_TRUE
+COND_ENCODER_DELTA_FALSE
+COND_ENCODER_DELTA_TRUE
+COND_FILTER_DELTA_FALSE
+COND_FILTER_DELTA_TRUE
+COND_DECODER_SUBBLOCK_FALSE
+COND_DECODER_SUBBLOCK_TRUE
+COND_ENCODER_SUBBLOCK_FALSE
+COND_ENCODER_SUBBLOCK_TRUE
+COND_FILTER_SUBBLOCK_FALSE
+COND_FILTER_SUBBLOCK_TRUE
+COND_DECODER_LZMA2_FALSE
+COND_DECODER_LZMA2_TRUE
+COND_ENCODER_LZMA2_FALSE
+COND_ENCODER_LZMA2_TRUE
+COND_FILTER_LZMA2_FALSE
+COND_FILTER_LZMA2_TRUE
+COND_DECODER_LZMA1_FALSE
+COND_DECODER_LZMA1_TRUE
+COND_ENCODER_LZMA1_FALSE
+COND_ENCODER_LZMA1_TRUE
+COND_FILTER_LZMA1_FALSE
+COND_FILTER_LZMA1_TRUE
+COND_MAIN_DECODER_FALSE
+COND_MAIN_DECODER_TRUE
+COND_MAIN_ENCODER_FALSE
+COND_MAIN_ENCODER_TRUE
+COND_W32_FALSE
+COND_W32_TRUE
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
+PACKAGE_HOMEPAGE
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_debug
+enable_encoders
+enable_decoders
+enable_match_finders
+enable_checks
+enable_assembler
+enable_unaligned_access
+enable_small
+enable_threads
+enable_dynamic
+enable_dependency_tracking
+enable_shared
+enable_static
+with_pic
+enable_fast_install
+with_gnu_ld
+enable_libtool_lock
+enable_nls
+enable_rpath
+with_libiconv_prefix
+with_libintl_prefix
+enable_largefile
+enable_werror
+'
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CCAS
+CCASFLAGS
+CPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"enable_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval enable_$ac_useropt=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2
+ { (exit 1); exit 1; }; }
+ ac_useropt_orig=$ac_useropt
+ ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+ case $ac_user_opts in
+ *"
+"with_$ac_useropt"
+"*) ;;
+ *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+ ac_unrecognized_sep=', ';;
+ esac
+ eval with_$ac_useropt=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { $as_echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { $as_echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+ case $enable_option_checking in
+ no) ;;
+ fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2
+ { (exit 1); exit 1; }; } ;;
+ *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+ esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir
+do
+ eval ac_val=\$$ac_var
+ # Remove trailing slashes.
+ case $ac_val in
+ */ )
+ ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+ eval $ac_var=\$ac_val;;
+ esac
+ # Be sure to have absolute directory names.
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; }
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ { $as_echo "$as_me: error: working directory cannot be determined" >&2
+ { (exit 1); exit 1; }; }
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ { $as_echo "$as_me: error: pwd does not report name of working directory" >&2
+ { (exit 1); exit 1; }; }
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_myself" : 'X\(//\)[^/]' \| \
+ X"$as_myself" : 'X\(//\)$' \| \
+ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2
+ { (exit 1); exit 1; }; }
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures XZ Utils 4.999.9beta to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/xz]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+
+Program names:
+ --program-prefix=PREFIX prepend PREFIX to installed program names
+ --program-suffix=SUFFIX append SUFFIX to installed program names
+ --program-transform-name=PROGRAM run sed PROGRAM on installed program names
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+ case $ac_init_help in
+ short | recursive ) echo "Configuration of XZ Utils 4.999.9beta:";;
+ esac
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-option-checking ignore unrecognized --enable/--with options
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-debug Enable debugging code.
+ --enable-encoders=LIST Comma-separated list of encoders to build.
+ Default=all. Available encoders: lzma1 lzma2
+ subblock delta x86 powerpc ia64 arm armthumb sparc
+ --enable-decoders=LIST Comma-separated list of decoders to build.
+ Default=all. Available decoders are the same as
+ available encoders.
+ --enable-match-finders=LIST
+ Comma-separated list of match finders to build.
+ Default=all. At least one match finder is required
+ for encoding with the LZMA1 and LZMA2 filters.
+ Available match finders: hc3 hc4 bt2 bt3 bt4
+ --enable-checks=LIST Comma-separated list of integrity checks to build.
+ Default=all. Available integrity checks: crc32 crc64
+ sha256
+ --disable-assembler Do not use assembler optimizations even if such
+ exist for the architecture.
+ --enable-unaligned-access
+ Enable if the system supports *fast* unaligned
+ memory access with 16-bit and 32-bit integers. By
+ default, this is enabled only on x86, x86_64, and
+ big endian PowerPC.
+ --enable-small Make liblzma smaller and a little slower. This is
+ disabled by default to optimize for speed.
+ --disable-threads Disable threading support. This makes some things
+ thread-unsafe.
+ --enable-dynamic=TYPE Set how command line tools are linked against
+ liblzma. TYPE can be mixed, yes, or no. The default
+ is mixed.
+ --disable-dependency-tracking speeds up one-time build
+ --enable-dependency-tracking do not reject slow dependency extractors
+ --enable-shared[=PKGS] build shared libraries [default=yes]
+ --enable-static[=PKGS] build static libraries [default=yes]
+ --enable-fast-install[=PKGS]
+ optimize for fast installation [default=yes]
+ --disable-libtool-lock avoid locking (might break parallel builds)
+ --disable-nls do not use Native Language Support
+ --disable-rpath do not hardcode runtime library paths
+ --disable-largefile omit support for large files
+ --enable-werror Enable -Werror to abort compilation on all compiler
+ warnings.
+
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-pic try to use only PIC/non-PIC objects [default=use
+ both]
+ --with-gnu-ld assume the C compiler uses GNU ld [default=no]
+ --with-gnu-ld assume the C compiler uses GNU ld default=no
+ --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib
+ --without-libiconv-prefix don't search for libiconv in includedir and libdir
+ --with-libintl-prefix[=DIR] search for libintl in DIR/include and DIR/lib
+ --without-libintl-prefix don't search for libintl in includedir and libdir
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ LIBS libraries to pass to the linker, e.g. -l<library>
+ CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CCAS assembler compiler command (defaults to CC)
+ CCASFLAGS assembler compiler flags (defaults to CFLAGS)
+ CPP C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <lasse.collin@tukaani.org>.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" ||
+ { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+ continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+XZ Utils configure 4.999.9beta
+generated by GNU Autoconf 2.63
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by XZ Utils $as_me 4.999.9beta, which was
+generated by GNU Autoconf 2.63. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ $as_echo "PATH: $as_dir"
+done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args '$ac_arg'"
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------------- ##
+## File substitutions. ##
+## ------------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ $as_echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ $as_echo "$as_me: caught signal $ac_signal"
+ $as_echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+ ac_site_file1=$CONFIG_SITE
+elif test "x$prefix" != xNONE; then
+ ac_site_file1=$prefix/share/config.site
+ ac_site_file2=$prefix/etc/config.site
+else
+ ac_site_file1=$ac_default_prefix/share/config.site
+ ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+ test "x$ac_site_file" = xNONE && continue
+ if test -r "$ac_site_file"; then
+ { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+gt_needs="$gt_needs "
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ # differences in whitespace do not lead to failure.
+ ac_old_val_w=`echo x $ac_old_val`
+ ac_new_val_w=`echo x $ac_new_val`
+ if test "$ac_old_val_w" != "$ac_new_val_w"; then
+ { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ ac_cache_corrupted=:
+ else
+ { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+ eval $ac_var=\$ac_old_val
+ fi
+ { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5
+$as_echo "$as_me: former value: \`$ac_old_val'" >&2;}
+ { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5
+$as_echo "$as_me: current value: \`$ac_new_val'" >&2;}
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+$as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+ac_aux_dir=
+for ac_dir in build-aux "$srcdir"/build-aux; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in build-aux \"$srcdir\"/build-aux" >&5
+$as_echo "$as_me: error: cannot find install-sh or install.sh in build-aux \"$srcdir\"/build-aux" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+
+ac_config_headers="$ac_config_headers config.h"
+
+
+PACKAGE_HOMEPAGE=http://tukaani.org/xz/
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_HOMEPAGE "$PACKAGE_HOMEPAGE"
+_ACEOF
+
+
+
+echo
+echo "$PACKAGE_STRING"
+
+echo
+echo "System type:"
+# This is needed to know if assembler optimizations can be used.
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ { { $as_echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5
+$as_echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;}
+ { (exit 1); exit 1; }; }
+
+{ $as_echo "$as_me:$LINENO: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if test "${ac_cv_build+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ { { $as_echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5
+$as_echo "$as_me: error: cannot guess build type; you must specify one" >&2;}
+ { (exit 1); exit 1; }; }
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5
+$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical build" >&5
+$as_echo "$as_me: error: invalid value of canonical build" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:$LINENO: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if test "${ac_cv_host+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5
+$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical host" >&5
+$as_echo "$as_me: error: invalid value of canonical host" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+
+# We do some special things on Windows (32-bit or 64-bit) builds.
+case $host_os in
+ mingw* | cygwin*) is_w32=yes ;;
+ *) is_w32=no ;;
+esac
+ if test "$is_w32" = yes; then
+ COND_W32_TRUE=
+ COND_W32_FALSE='#'
+else
+ COND_W32_TRUE='#'
+ COND_W32_FALSE=
+fi
+
+
+
+echo
+echo "Configure options:"
+AM_CFLAGS=
+
+
+#############
+# Debugging #
+#############
+
+{ $as_echo "$as_me:$LINENO: checking if debugging code should be compiled" >&5
+$as_echo_n "checking if debugging code should be compiled... " >&6; }
+# Check whether --enable-debug was given.
+if test "${enable_debug+set}" = set; then
+ enableval=$enable_debug;
+else
+ enable_debug=no
+fi
+
+if test "x$enable_debug" = xyes; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+
+cat >>confdefs.h <<\_ACEOF
+#define NDEBUG 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+###########
+# Filters #
+###########
+
+
+
+
+enable_filter_lzma1=no
+enable_encoder_lzma1=no
+enable_decoder_lzma1=no
+enable_filter_lzma2=no
+enable_encoder_lzma2=no
+enable_decoder_lzma2=no
+enable_filter_subblock=no
+enable_encoder_subblock=no
+enable_decoder_subblock=no
+enable_filter_delta=no
+enable_encoder_delta=no
+enable_decoder_delta=no
+enable_filter_x86=no
+enable_encoder_x86=no
+enable_decoder_x86=no
+enable_filter_powerpc=no
+enable_encoder_powerpc=no
+enable_decoder_powerpc=no
+enable_filter_ia64=no
+enable_encoder_ia64=no
+enable_decoder_ia64=no
+enable_filter_arm=no
+enable_encoder_arm=no
+enable_decoder_arm=no
+enable_filter_armthumb=no
+enable_encoder_armthumb=no
+enable_decoder_armthumb=no
+enable_filter_sparc=no
+enable_encoder_sparc=no
+enable_decoder_sparc=no
+
+{ $as_echo "$as_me:$LINENO: checking which encoders to build" >&5
+$as_echo_n "checking which encoders to build... " >&6; }
+# Check whether --enable-encoders was given.
+if test "${enable_encoders+set}" = set; then
+ enableval=$enable_encoders;
+else
+ enable_encoders=lzma1,lzma2,subblock,delta,x86,powerpc,ia64,arm,armthumb,sparc
+fi
+
+enable_encoders=`echo "$enable_encoders" | sed 's/,subblock//; s/,/ /g'`
+if test "x$enable_encoders" = xno || test "x$enable_encoders" = x; then
+ { $as_echo "$as_me:$LINENO: result: (none)" >&5
+$as_echo "(none)" >&6; }
+else
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER 1
+_ACEOF
+
+ for arg in $enable_encoders
+ do
+ case $arg in
+ lzma1)
+ enable_filter_lzma1=yes
+ enable_encoder_lzma1=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_LZMA1 1
+_ACEOF
+
+ ;;
+ lzma2)
+ enable_filter_lzma2=yes
+ enable_encoder_lzma2=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_LZMA2 1
+_ACEOF
+
+ ;;
+ subblock)
+ enable_filter_subblock=yes
+ enable_encoder_subblock=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_SUBBLOCK 1
+_ACEOF
+
+ ;;
+ delta)
+ enable_filter_delta=yes
+ enable_encoder_delta=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_DELTA 1
+_ACEOF
+
+ ;;
+ x86)
+ enable_filter_x86=yes
+ enable_encoder_x86=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_X86 1
+_ACEOF
+
+ ;;
+ powerpc)
+ enable_filter_powerpc=yes
+ enable_encoder_powerpc=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_POWERPC 1
+_ACEOF
+
+ ;;
+ ia64)
+ enable_filter_ia64=yes
+ enable_encoder_ia64=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_IA64 1
+_ACEOF
+
+ ;;
+ arm)
+ enable_filter_arm=yes
+ enable_encoder_arm=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_ARM 1
+_ACEOF
+
+ ;;
+ armthumb)
+ enable_filter_armthumb=yes
+ enable_encoder_armthumb=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_ARMTHUMB 1
+_ACEOF
+
+ ;;
+ sparc)
+ enable_filter_sparc=yes
+ enable_encoder_sparc=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ENCODER_SPARC 1
+_ACEOF
+
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: unknown filter: $arg" >&5
+$as_echo "$as_me: error: unknown filter: $arg" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ esac
+ done
+ { $as_echo "$as_me:$LINENO: result: $enable_encoders" >&5
+$as_echo "$enable_encoders" >&6; }
+fi
+
+{ $as_echo "$as_me:$LINENO: checking which decoders to build" >&5
+$as_echo_n "checking which decoders to build... " >&6; }
+# Check whether --enable-decoders was given.
+if test "${enable_decoders+set}" = set; then
+ enableval=$enable_decoders;
+else
+ enable_decoders=lzma1,lzma2,subblock,delta,x86,powerpc,ia64,arm,armthumb,sparc
+fi
+
+enable_decoders=`echo "$enable_decoders" | sed 's/,subblock//; s/,/ /g'`
+if test "x$enable_decoders" = xno || test "x$enable_decoders" = x; then
+ { $as_echo "$as_me:$LINENO: result: (none)" >&5
+$as_echo "(none)" >&6; }
+else
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER 1
+_ACEOF
+
+ for arg in $enable_decoders
+ do
+ case $arg in
+ lzma1)
+ enable_filter_lzma1=yes
+ enable_decoder_lzma1=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_LZMA1 1
+_ACEOF
+
+ ;;
+ lzma2)
+ enable_filter_lzma2=yes
+ enable_decoder_lzma2=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_LZMA2 1
+_ACEOF
+
+ ;;
+ subblock)
+ enable_filter_subblock=yes
+ enable_decoder_subblock=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_SUBBLOCK 1
+_ACEOF
+
+ ;;
+ delta)
+ enable_filter_delta=yes
+ enable_decoder_delta=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_DELTA 1
+_ACEOF
+
+ ;;
+ x86)
+ enable_filter_x86=yes
+ enable_decoder_x86=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_X86 1
+_ACEOF
+
+ ;;
+ powerpc)
+ enable_filter_powerpc=yes
+ enable_decoder_powerpc=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_POWERPC 1
+_ACEOF
+
+ ;;
+ ia64)
+ enable_filter_ia64=yes
+ enable_decoder_ia64=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_IA64 1
+_ACEOF
+
+ ;;
+ arm)
+ enable_filter_arm=yes
+ enable_decoder_arm=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_ARM 1
+_ACEOF
+
+ ;;
+ armthumb)
+ enable_filter_armthumb=yes
+ enable_decoder_armthumb=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_ARMTHUMB 1
+_ACEOF
+
+ ;;
+ sparc)
+ enable_filter_sparc=yes
+ enable_decoder_sparc=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DECODER_SPARC 1
+_ACEOF
+
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: unknown filter: $arg" >&5
+$as_echo "$as_me: error: unknown filter: $arg" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ esac
+ done
+
+ # LZMA2 requires that LZMA1 is enabled.
+ test "x$enable_encoder_lzma2" = xyes && enable_encoder_lzma1=yes
+ test "x$enable_decoder_lzma2" = xyes && enable_decoder_lzma1=yes
+
+ { $as_echo "$as_me:$LINENO: result: $enable_decoders" >&5
+$as_echo "$enable_decoders" >&6; }
+fi
+
+if test "x$enable_encoder_lzma2$enable_encoder_lzma1" = xyesno \
+ || test "x$enable_decoder_lzma2$enable_decoder_lzma1" = xyesno; then
+ { { $as_echo "$as_me:$LINENO: error: LZMA2 requires that LZMA1 is also enabled." >&5
+$as_echo "$as_me: error: LZMA2 requires that LZMA1 is also enabled." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ if test "x$enable_encoders" != xno && test "x$enable_encoders" != x; then
+ COND_MAIN_ENCODER_TRUE=
+ COND_MAIN_ENCODER_FALSE='#'
+else
+ COND_MAIN_ENCODER_TRUE='#'
+ COND_MAIN_ENCODER_FALSE=
+fi
+
+ if test "x$enable_decoders" != xno && test "x$enable_decoders" != x; then
+ COND_MAIN_DECODER_TRUE=
+ COND_MAIN_DECODER_FALSE='#'
+else
+ COND_MAIN_DECODER_TRUE='#'
+ COND_MAIN_DECODER_FALSE=
+fi
+
+
+ if test "x$enable_filter_lzma1" = xyes; then
+ COND_FILTER_LZMA1_TRUE=
+ COND_FILTER_LZMA1_FALSE='#'
+else
+ COND_FILTER_LZMA1_TRUE='#'
+ COND_FILTER_LZMA1_FALSE=
+fi
+
+ if test "x$enable_encoder_lzma1" = xyes; then
+ COND_ENCODER_LZMA1_TRUE=
+ COND_ENCODER_LZMA1_FALSE='#'
+else
+ COND_ENCODER_LZMA1_TRUE='#'
+ COND_ENCODER_LZMA1_FALSE=
+fi
+
+ if test "x$enable_decoder_lzma1" = xyes; then
+ COND_DECODER_LZMA1_TRUE=
+ COND_DECODER_LZMA1_FALSE='#'
+else
+ COND_DECODER_LZMA1_TRUE='#'
+ COND_DECODER_LZMA1_FALSE=
+fi
+
+ if test "x$enable_filter_lzma2" = xyes; then
+ COND_FILTER_LZMA2_TRUE=
+ COND_FILTER_LZMA2_FALSE='#'
+else
+ COND_FILTER_LZMA2_TRUE='#'
+ COND_FILTER_LZMA2_FALSE=
+fi
+
+ if test "x$enable_encoder_lzma2" = xyes; then
+ COND_ENCODER_LZMA2_TRUE=
+ COND_ENCODER_LZMA2_FALSE='#'
+else
+ COND_ENCODER_LZMA2_TRUE='#'
+ COND_ENCODER_LZMA2_FALSE=
+fi
+
+ if test "x$enable_decoder_lzma2" = xyes; then
+ COND_DECODER_LZMA2_TRUE=
+ COND_DECODER_LZMA2_FALSE='#'
+else
+ COND_DECODER_LZMA2_TRUE='#'
+ COND_DECODER_LZMA2_FALSE=
+fi
+
+ if test "x$enable_filter_subblock" = xyes; then
+ COND_FILTER_SUBBLOCK_TRUE=
+ COND_FILTER_SUBBLOCK_FALSE='#'
+else
+ COND_FILTER_SUBBLOCK_TRUE='#'
+ COND_FILTER_SUBBLOCK_FALSE=
+fi
+
+ if test "x$enable_encoder_subblock" = xyes; then
+ COND_ENCODER_SUBBLOCK_TRUE=
+ COND_ENCODER_SUBBLOCK_FALSE='#'
+else
+ COND_ENCODER_SUBBLOCK_TRUE='#'
+ COND_ENCODER_SUBBLOCK_FALSE=
+fi
+
+ if test "x$enable_decoder_subblock" = xyes; then
+ COND_DECODER_SUBBLOCK_TRUE=
+ COND_DECODER_SUBBLOCK_FALSE='#'
+else
+ COND_DECODER_SUBBLOCK_TRUE='#'
+ COND_DECODER_SUBBLOCK_FALSE=
+fi
+
+ if test "x$enable_filter_delta" = xyes; then
+ COND_FILTER_DELTA_TRUE=
+ COND_FILTER_DELTA_FALSE='#'
+else
+ COND_FILTER_DELTA_TRUE='#'
+ COND_FILTER_DELTA_FALSE=
+fi
+
+ if test "x$enable_encoder_delta" = xyes; then
+ COND_ENCODER_DELTA_TRUE=
+ COND_ENCODER_DELTA_FALSE='#'
+else
+ COND_ENCODER_DELTA_TRUE='#'
+ COND_ENCODER_DELTA_FALSE=
+fi
+
+ if test "x$enable_decoder_delta" = xyes; then
+ COND_DECODER_DELTA_TRUE=
+ COND_DECODER_DELTA_FALSE='#'
+else
+ COND_DECODER_DELTA_TRUE='#'
+ COND_DECODER_DELTA_FALSE=
+fi
+
+ if test "x$enable_filter_x86" = xyes; then
+ COND_FILTER_X86_TRUE=
+ COND_FILTER_X86_FALSE='#'
+else
+ COND_FILTER_X86_TRUE='#'
+ COND_FILTER_X86_FALSE=
+fi
+
+ if test "x$enable_encoder_x86" = xyes; then
+ COND_ENCODER_X86_TRUE=
+ COND_ENCODER_X86_FALSE='#'
+else
+ COND_ENCODER_X86_TRUE='#'
+ COND_ENCODER_X86_FALSE=
+fi
+
+ if test "x$enable_decoder_x86" = xyes; then
+ COND_DECODER_X86_TRUE=
+ COND_DECODER_X86_FALSE='#'
+else
+ COND_DECODER_X86_TRUE='#'
+ COND_DECODER_X86_FALSE=
+fi
+
+ if test "x$enable_filter_powerpc" = xyes; then
+ COND_FILTER_POWERPC_TRUE=
+ COND_FILTER_POWERPC_FALSE='#'
+else
+ COND_FILTER_POWERPC_TRUE='#'
+ COND_FILTER_POWERPC_FALSE=
+fi
+
+ if test "x$enable_encoder_powerpc" = xyes; then
+ COND_ENCODER_POWERPC_TRUE=
+ COND_ENCODER_POWERPC_FALSE='#'
+else
+ COND_ENCODER_POWERPC_TRUE='#'
+ COND_ENCODER_POWERPC_FALSE=
+fi
+
+ if test "x$enable_decoder_powerpc" = xyes; then
+ COND_DECODER_POWERPC_TRUE=
+ COND_DECODER_POWERPC_FALSE='#'
+else
+ COND_DECODER_POWERPC_TRUE='#'
+ COND_DECODER_POWERPC_FALSE=
+fi
+
+ if test "x$enable_filter_ia64" = xyes; then
+ COND_FILTER_IA64_TRUE=
+ COND_FILTER_IA64_FALSE='#'
+else
+ COND_FILTER_IA64_TRUE='#'
+ COND_FILTER_IA64_FALSE=
+fi
+
+ if test "x$enable_encoder_ia64" = xyes; then
+ COND_ENCODER_IA64_TRUE=
+ COND_ENCODER_IA64_FALSE='#'
+else
+ COND_ENCODER_IA64_TRUE='#'
+ COND_ENCODER_IA64_FALSE=
+fi
+
+ if test "x$enable_decoder_ia64" = xyes; then
+ COND_DECODER_IA64_TRUE=
+ COND_DECODER_IA64_FALSE='#'
+else
+ COND_DECODER_IA64_TRUE='#'
+ COND_DECODER_IA64_FALSE=
+fi
+
+ if test "x$enable_filter_arm" = xyes; then
+ COND_FILTER_ARM_TRUE=
+ COND_FILTER_ARM_FALSE='#'
+else
+ COND_FILTER_ARM_TRUE='#'
+ COND_FILTER_ARM_FALSE=
+fi
+
+ if test "x$enable_encoder_arm" = xyes; then
+ COND_ENCODER_ARM_TRUE=
+ COND_ENCODER_ARM_FALSE='#'
+else
+ COND_ENCODER_ARM_TRUE='#'
+ COND_ENCODER_ARM_FALSE=
+fi
+
+ if test "x$enable_decoder_arm" = xyes; then
+ COND_DECODER_ARM_TRUE=
+ COND_DECODER_ARM_FALSE='#'
+else
+ COND_DECODER_ARM_TRUE='#'
+ COND_DECODER_ARM_FALSE=
+fi
+
+ if test "x$enable_filter_armthumb" = xyes; then
+ COND_FILTER_ARMTHUMB_TRUE=
+ COND_FILTER_ARMTHUMB_FALSE='#'
+else
+ COND_FILTER_ARMTHUMB_TRUE='#'
+ COND_FILTER_ARMTHUMB_FALSE=
+fi
+
+ if test "x$enable_encoder_armthumb" = xyes; then
+ COND_ENCODER_ARMTHUMB_TRUE=
+ COND_ENCODER_ARMTHUMB_FALSE='#'
+else
+ COND_ENCODER_ARMTHUMB_TRUE='#'
+ COND_ENCODER_ARMTHUMB_FALSE=
+fi
+
+ if test "x$enable_decoder_armthumb" = xyes; then
+ COND_DECODER_ARMTHUMB_TRUE=
+ COND_DECODER_ARMTHUMB_FALSE='#'
+else
+ COND_DECODER_ARMTHUMB_TRUE='#'
+ COND_DECODER_ARMTHUMB_FALSE=
+fi
+
+ if test "x$enable_filter_sparc" = xyes; then
+ COND_FILTER_SPARC_TRUE=
+ COND_FILTER_SPARC_FALSE='#'
+else
+ COND_FILTER_SPARC_TRUE='#'
+ COND_FILTER_SPARC_FALSE=
+fi
+
+ if test "x$enable_encoder_sparc" = xyes; then
+ COND_ENCODER_SPARC_TRUE=
+ COND_ENCODER_SPARC_FALSE='#'
+else
+ COND_ENCODER_SPARC_TRUE='#'
+ COND_ENCODER_SPARC_FALSE=
+fi
+
+ if test "x$enable_decoder_sparc" = xyes; then
+ COND_DECODER_SPARC_TRUE=
+ COND_DECODER_SPARC_FALSE='#'
+else
+ COND_DECODER_SPARC_TRUE='#'
+ COND_DECODER_SPARC_FALSE=
+fi
+
+
+# The so called "simple filters" share common code.
+enable_filter_simple=no
+enable_encoder_simple=no
+enable_decoder_simple=no
+test "x$enable_filter_x86" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_x86" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_x86" = xyes && enable_decoder_simple=yes
+test "x$enable_filter_powerpc" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_powerpc" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_powerpc" = xyes && enable_decoder_simple=yes
+test "x$enable_filter_ia64" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_ia64" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_ia64" = xyes && enable_decoder_simple=yes
+test "x$enable_filter_arm" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_arm" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_arm" = xyes && enable_decoder_simple=yes
+test "x$enable_filter_armthumb" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_armthumb" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_armthumb" = xyes && enable_decoder_simple=yes
+test "x$enable_filter_sparc" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_sparc" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_sparc" = xyes && enable_decoder_simple=yes
+ if test "x$enable_filter_simple" = xyes; then
+ COND_FILTER_SIMPLE_TRUE=
+ COND_FILTER_SIMPLE_FALSE='#'
+else
+ COND_FILTER_SIMPLE_TRUE='#'
+ COND_FILTER_SIMPLE_FALSE=
+fi
+
+ if test "x$enable_encoder_simple" = xyes; then
+ COND_ENCODER_SIMPLE_TRUE=
+ COND_ENCODER_SIMPLE_FALSE='#'
+else
+ COND_ENCODER_SIMPLE_TRUE='#'
+ COND_ENCODER_SIMPLE_FALSE=
+fi
+
+ if test "x$enable_decoder_simple" = xyes; then
+ COND_DECODER_SIMPLE_TRUE=
+ COND_DECODER_SIMPLE_FALSE='#'
+else
+ COND_DECODER_SIMPLE_TRUE='#'
+ COND_DECODER_SIMPLE_FALSE=
+fi
+
+
+# LZ-based filters share common code.
+enable_filter_lz=no
+enable_encoder_lz=no
+enable_decoder_lz=no
+test "x$enable_filter_lzma1" = xyes && enable_filter_lz=yes
+test "x$enable_encoder_lzma1" = xyes && enable_encoder_lz=yes
+test "x$enable_decoder_lzma1" = xyes && enable_decoder_lz=yes
+test "x$enable_filter_lzma2" = xyes && enable_filter_lz=yes
+test "x$enable_encoder_lzma2" = xyes && enable_encoder_lz=yes
+test "x$enable_decoder_lzma2" = xyes && enable_decoder_lz=yes
+ if test "x$enable_filter_lz" = xyes; then
+ COND_FILTER_LZ_TRUE=
+ COND_FILTER_LZ_FALSE='#'
+else
+ COND_FILTER_LZ_TRUE='#'
+ COND_FILTER_LZ_FALSE=
+fi
+
+ if test "x$enable_encoder_lz" = xyes; then
+ COND_ENCODER_LZ_TRUE=
+ COND_ENCODER_LZ_FALSE='#'
+else
+ COND_ENCODER_LZ_TRUE='#'
+ COND_ENCODER_LZ_FALSE=
+fi
+
+ if test "x$enable_decoder_lz" = xyes; then
+ COND_DECODER_LZ_TRUE=
+ COND_DECODER_LZ_FALSE='#'
+else
+ COND_DECODER_LZ_TRUE='#'
+ COND_DECODER_LZ_FALSE=
+fi
+
+
+
+#################
+# Match finders #
+#################
+
+
+
+enable_match_finder_hc3=no
+enable_match_finder_hc4=no
+enable_match_finder_bt2=no
+enable_match_finder_bt3=no
+enable_match_finder_bt4=no
+
+
+{ $as_echo "$as_me:$LINENO: checking which match finders to build" >&5
+$as_echo_n "checking which match finders to build... " >&6; }
+# Check whether --enable-match-finders was given.
+if test "${enable_match_finders+set}" = set; then
+ enableval=$enable_match_finders;
+else
+ enable_match_finders=hc3,hc4,bt2,bt3,bt4
+fi
+
+enable_match_finders=`echo "$enable_match_finders" | sed 's/,/ /g'`
+if test "x$enable_encoder_lz" = xyes ; then
+ for arg in $enable_match_finders
+ do
+ case $arg in
+ hc3)
+ enable_match_finder_hc3=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_MF_HC3 1
+_ACEOF
+
+ ;;
+ hc4)
+ enable_match_finder_hc4=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_MF_HC4 1
+_ACEOF
+
+ ;;
+ bt2)
+ enable_match_finder_bt2=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_MF_BT2 1
+_ACEOF
+
+ ;;
+ bt3)
+ enable_match_finder_bt3=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_MF_BT3 1
+_ACEOF
+
+ ;;
+ bt4)
+ enable_match_finder_bt4=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_MF_BT4 1
+_ACEOF
+
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: unknown match finder: $arg" >&5
+$as_echo "$as_me: error: unknown match finder: $arg" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ esac
+ done
+ { $as_echo "$as_me:$LINENO: result: $enable_match_finders" >&5
+$as_echo "$enable_match_finders" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: (none because not building any LZ-based encoder)" >&5
+$as_echo "(none because not building any LZ-based encoder)" >&6; }
+fi
+
+
+####################
+# Integrity checks #
+####################
+
+
+
+enable_check_lzma1=no
+enable_check_lzma2=no
+enable_check_subblock=no
+enable_check_delta=no
+enable_check_x86=no
+enable_check_powerpc=no
+enable_check_ia64=no
+enable_check_arm=no
+enable_check_armthumb=no
+enable_check_sparc=no
+
+{ $as_echo "$as_me:$LINENO: checking which integrity checks to build" >&5
+$as_echo_n "checking which integrity checks to build... " >&6; }
+# Check whether --enable-checks was given.
+if test "${enable_checks+set}" = set; then
+ enableval=$enable_checks;
+else
+ enable_checks=crc32,crc64,sha256
+fi
+
+enable_checks=`echo "$enable_checks" | sed 's/,/ /g'`
+if test "x$enable_checks" = xno || test "x$enable_checks" = x; then
+ { $as_echo "$as_me:$LINENO: result: (none)" >&5
+$as_echo "(none)" >&6; }
+else
+ for arg in $enable_checks
+ do
+ case $arg in
+ crc32)
+ enable_check_crc32=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CHECK_CRC32 1
+_ACEOF
+
+ ;;
+ crc64)
+ enable_check_crc64=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CHECK_CRC64 1
+_ACEOF
+
+ ;;
+ sha256)
+ enable_check_sha256=yes
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CHECK_SHA256 1
+_ACEOF
+
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: unknown integrity check: $arg" >&5
+$as_echo "$as_me: error: unknown integrity check: $arg" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ esac
+ done
+ { $as_echo "$as_me:$LINENO: result: $enable_checks" >&5
+$as_echo "$enable_checks" >&6; }
+fi
+if test "x$enable_checks_crc32" = xno ; then
+ { { $as_echo "$as_me:$LINENO: error: For now, the CRC32 check must always be enabled." >&5
+$as_echo "$as_me: error: For now, the CRC32 check must always be enabled." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ if test "x$enable_check_crc32" = xyes; then
+ COND_CHECK_CRC32_TRUE=
+ COND_CHECK_CRC32_FALSE='#'
+else
+ COND_CHECK_CRC32_TRUE='#'
+ COND_CHECK_CRC32_FALSE=
+fi
+
+ if test "x$enable_check_crc64" = xyes; then
+ COND_CHECK_CRC64_TRUE=
+ COND_CHECK_CRC64_FALSE='#'
+else
+ COND_CHECK_CRC64_TRUE='#'
+ COND_CHECK_CRC64_FALSE=
+fi
+
+ if test "x$enable_check_sha256" = xyes; then
+ COND_CHECK_SHA256_TRUE=
+ COND_CHECK_SHA256_FALSE='#'
+else
+ COND_CHECK_SHA256_TRUE='#'
+ COND_CHECK_SHA256_FALSE=
+fi
+
+
+
+###########################
+# Assembler optimizations #
+###########################
+
+{ $as_echo "$as_me:$LINENO: checking if assembler optimizations should be used" >&5
+$as_echo_n "checking if assembler optimizations should be used... " >&6; }
+# Check whether --enable-assembler was given.
+if test "${enable_assembler+set}" = set; then
+ enableval=$enable_assembler;
+else
+ enable_assembler=yes
+fi
+
+if test "x$enable_assembler" = xyes; then
+ case $host_cpu in
+ i?86) enable_assembler=x86 ;;
+ x86_64) enable_assembler=x86_64 ;;
+ *) enable_assembler=no ;;
+ esac
+fi
+case $enable_assembler in
+ x86)
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ASM_X86 1
+_ACEOF
+
+ ;;
+ x86_64)
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ASM_X86_64 1
+_ACEOF
+
+ ;;
+ no)
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: --enable-assembler accepts only \`yes', \`no', \`x86', or \`x86_64'." >&5
+$as_echo "$as_me: error: --enable-assembler accepts only \`yes', \`no', \`x86', or \`x86_64'." >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: result: $enable_assembler" >&5
+$as_echo "$enable_assembler" >&6; }
+ if test "x$enable_assembler" = xx86; then
+ COND_ASM_X86_TRUE=
+ COND_ASM_X86_FALSE='#'
+else
+ COND_ASM_X86_TRUE='#'
+ COND_ASM_X86_FALSE=
+fi
+
+ if test "x$enable_assembler" = xx86_64; then
+ COND_ASM_X86_64_TRUE=
+ COND_ASM_X86_64_FALSE='#'
+else
+ COND_ASM_X86_64_TRUE='#'
+ COND_ASM_X86_64_FALSE=
+fi
+
+
+
+################################
+# Fast unaligned memory access #
+################################
+
+{ $as_echo "$as_me:$LINENO: checking if unaligned memory access should be used" >&5
+$as_echo_n "checking if unaligned memory access should be used... " >&6; }
+# Check whether --enable-unaligned-access was given.
+if test "${enable_unaligned_access+set}" = set; then
+ enableval=$enable_unaligned_access;
+else
+ enable_unaligned_access=auto
+fi
+
+if test "x$enable_unaligned_access" = xauto ; then
+ case $host_cpu in
+ i?86|x86_64|powerpc|powerpc64)
+ enable_unaligned_access=yes
+ ;;
+ *)
+ enable_unaligned_access=no
+ ;;
+ esac
+fi
+if test "x$enable_unaligned_access" = xyes ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FAST_UNALIGNED_ACCESS 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+#####################
+# Size optimization #
+#####################
+
+{ $as_echo "$as_me:$LINENO: checking if small size is preferred over speed" >&5
+$as_echo_n "checking if small size is preferred over speed... " >&6; }
+# Check whether --enable-small was given.
+if test "${enable_small+set}" = set; then
+ enableval=$enable_small;
+else
+ enable_small=no
+fi
+
+if test "x$enable_small" = xyes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_SMALL 1
+_ACEOF
+
+elif test "x$enable_small" != xno; then
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: --enable-small accepts only \`yes' or \`no'" >&5
+$as_echo "$as_me: error: --enable-small accepts only \`yes' or \`no'" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+{ $as_echo "$as_me:$LINENO: result: $enable_small" >&5
+$as_echo "$enable_small" >&6; }
+ if test "x$enable_small" = xyes; then
+ COND_SMALL_TRUE=
+ COND_SMALL_FALSE='#'
+else
+ COND_SMALL_TRUE='#'
+ COND_SMALL_FALSE=
+fi
+
+
+
+#############
+# Threading #
+#############
+
+{ $as_echo "$as_me:$LINENO: checking if threading support is wanted" >&5
+$as_echo_n "checking if threading support is wanted... " >&6; }
+# Check whether --enable-threads was given.
+if test "${enable_threads+set}" = set; then
+ enableval=$enable_threads;
+else
+ enable_threads=yes
+fi
+
+if test "x$enable_threads" != xyes && test "x$enable_threads" != xno; then
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: --enable-threads accepts only \`yes' or \`no'" >&5
+$as_echo "$as_me: error: --enable-threads accepts only \`yes' or \`no'" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+{ $as_echo "$as_me:$LINENO: result: $enable_threads" >&5
+$as_echo "$enable_threads" >&6; }
+# We use the actual result a little later.
+
+
+############################################
+# xz/xzdec/lzmadec linkage against liblzma #
+############################################
+
+# Link the xz, xzdec, and lzmadec command line tools against static liblzma
+# unless using --enable-dynamic. Using static liblzma gives a little bit
+# faster executable on x86, because no register is wasted for PIC. We also
+# have one dependency less, which allows users to more freely copy the xz
+# binary to other boxes. However, I wouldn't be surprised if distro
+# maintainers still prefer dynamic linking, so let's make it easy for them.
+
+{ $as_echo "$as_me:$LINENO: checking how programs should be linked against liblzma" >&5
+$as_echo_n "checking how programs should be linked against liblzma... " >&6; }
+# Check whether --enable-dynamic was given.
+if test "${enable_dynamic+set}" = set; then
+ enableval=$enable_dynamic;
+else
+ enable_dynamic=mixed
+fi
+
+case $enable_dynamic in
+ mixed)
+ { $as_echo "$as_me:$LINENO: result: mixed (some dynamically, some statically)" >&5
+$as_echo "mixed (some dynamically, some statically)" >&6; }
+ ;;
+ yes)
+ { $as_echo "$as_me:$LINENO: result: dynamically" >&5
+$as_echo "dynamically" >&6; }
+ ;;
+ no)
+ { $as_echo "$as_me:$LINENO: result: statically" >&5
+$as_echo "statically" >&6; }
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: " >&5
+$as_echo "" >&6; }
+ { { $as_echo "$as_me:$LINENO: error: --enable-dynamic accepts only \`mixed', \`yes', or \`no'" >&5
+$as_echo "$as_me: error: --enable-dynamic accepts only \`mixed', \`yes', or \`no'" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+esac
+# We use the actual results later, because we don't know yet
+# if --disable-shared or --disable-static was used.
+
+
+###############################################################################
+# Checks for programs.
+###############################################################################
+
+echo
+
+ { $as_echo "$as_me:$LINENO: checking for a shell that conforms to POSIX" >&5
+$as_echo_n "checking for a shell that conforms to POSIX... " >&6; }
+if test "${gl_cv_posix_shell+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ gl_test_posix_shell_script='
+ func_return () {
+ (exit $1)
+ }
+ func_success () {
+ func_return 0
+ }
+ func_failure () {
+ func_return 1
+ }
+ func_ret_success () {
+ return 0
+ }
+ func_ret_failure () {
+ return 1
+ }
+ subshell_umask_sanity () {
+ (umask 22; (umask 0); test $(umask) -eq 22)
+ }
+ test "$(echo foo)" = foo &&
+ func_success &&
+ ! func_failure &&
+ func_ret_success &&
+ ! func_ret_failure &&
+ (set x && func_ret_success y && test x = "$1") &&
+ subshell_umask_sanity
+ '
+ for gl_cv_posix_shell in \
+ "$CONFIG_SHELL" "$SHELL" /bin/sh /bin/bash /bin/ksh /bin/sh5 no; do
+ case $gl_cv_posix_shell in
+ /*)
+ "$gl_cv_posix_shell" -c "$gl_test_posix_shell_script" 2>/dev/null \
+ && break;;
+ esac
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $gl_cv_posix_shell" >&5
+$as_echo "$gl_cv_posix_shell" >&6; }
+
+ if test "$gl_cv_posix_shell" != no; then
+ POSIX_SHELL=$gl_cv_posix_shell
+ PREFERABLY_POSIX_SHELL=$POSIX_SHELL
+ else
+ POSIX_SHELL=
+ PREFERABLY_POSIX_SHELL=/bin/sh
+ fi
+
+
+
+if test -z "$POSIX_SHELL" ; then
+ { { $as_echo "$as_me:$LINENO: error: No POSIX conforming shell (sh) was found." >&5
+$as_echo "$as_me: error: No POSIX conforming shell (sh) was found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+echo
+echo "Initializing Automake:"
+
+am__api_version='1.11'
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ rm -rf conftest.one conftest.two conftest.dir
+ echo one > conftest.one
+ echo two > conftest.two
+ mkdir conftest.dir
+ if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ test -s conftest.one && test -s conftest.two &&
+ test -s conftest.dir/conftest.one &&
+ test -s conftest.dir/conftest.two
+ then
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+
+done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ $as_echo "$as_me:$LINENO: checking whether build environment is sane" >&5
+$as_echo_n "checking whether build environment is sane... " >&6; }
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name. Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+ *[\\\"\#\$\&\'\`$am_lf]*)
+ { { $as_echo "$as_me:$LINENO: error: unsafe absolute working directory name" >&5
+$as_echo "$as_me: error: unsafe absolute working directory name" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+case $srcdir in
+ *[\\\"\#\$\&\'\`$am_lf\ \ ]*)
+ { { $as_echo "$as_me:$LINENO: error: unsafe srcdir value: \`$srcdir'" >&5
+$as_echo "$as_me: error: unsafe srcdir value: \`$srcdir'" >&2;}
+ { (exit 1); exit 1; }; };;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ rm -f conftest.file
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ { { $as_echo "$as_me:$LINENO: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" >&5
+$as_echo "$as_me: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+ test "$2" = conftest.file
+ )
+then
+ # Ok.
+ :
+else
+ { { $as_echo "$as_me:$LINENO: error: newly created file is older than distributed files!
+Check your system clock" >&5
+$as_echo "$as_me: error: newly created file is older than distributed files!
+Check your system clock" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+{ $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+test "$program_prefix" != NONE &&
+ program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+
+if test x"${MISSING+set}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+ *)
+ MISSING="\${SHELL} $am_aux_dir/missing" ;;
+ esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+ am_missing_run="$MISSING --run "
+else
+ am_missing_run=
+ { $as_echo "$as_me:$LINENO: WARNING: \`missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh}" != xset; then
+ case $am_aux_dir in
+ *\ * | *\ *)
+ install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+ *)
+ install_sh="\${SHELL} $am_aux_dir/install-sh"
+ esac
+fi
+
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'. However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_STRIP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:$LINENO: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+{ $as_echo "$as_me:$LINENO: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue
+ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir (GNU coreutils) '* | \
+ 'mkdir (coreutils) '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+done
+IFS=$as_save_IFS
+
+fi
+
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ test -d ./--version && rmdir ./--version
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
+mkdir_p="$MKDIR_P"
+case $mkdir_p in
+ [\\/$]* | ?:[\\/]*) ;;
+ */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+
+for ac_prog in gawk mawk nawk awk
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_AWK+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AWK"; then
+ ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_AWK="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+ { $as_echo "$as_me:$LINENO: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+ @echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+ *@@@%%%=?*=@@@%%%*)
+ eval ac_cv_prog_make_${ac_make}_set=yes;;
+ *)
+ eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+ SET_MAKE=
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+ am__leading_dot=.
+else
+ am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+ # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+ # is not polluted with repeated "-I."
+ am__isrc=' -I$(srcdir)'
+ # test to see if srcdir already configured
+ if test -f $srcdir/config.status; then
+ { { $as_echo "$as_me:$LINENO: error: source directory already configured; run \"make distclean\" there first" >&5
+$as_echo "$as_me: error: source directory already configured; run \"make distclean\" there first" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+ if (cygpath --version) >/dev/null 2>/dev/null; then
+ CYGPATH_W='cygpath -w'
+ else
+ CYGPATH_W=echo
+ fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='xz'
+ VERSION='4.999.9beta'
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# We need awk for the "check" target. The system "awk" is bad on
+# some platforms.
+# Always define AMTAR for backward compatibility.
+
+AMTAR=${AMTAR-"${am_missing_run}tar"}
+
+am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'
+
+
+
+
+
+{ $as_echo "$as_me:$LINENO: checking whether ln -s works" >&5
+$as_echo_n "checking whether ln -s works... " >&6; }
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no, using $LN_S" >&5
+$as_echo "no, using $LN_S" >&6; }
+fi
+
+
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+
+am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+ @echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+{ $as_echo "$as_me:$LINENO: checking for style of include used by $am_make" >&5
+$as_echo_n "checking for style of include used by $am_make... " >&6; }
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+ am__include=include
+ am__quote=
+ _am_result=GNU
+ ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+ echo '.include "confinc"' > confmf
+ case `$am_make -s -f confmf 2> /dev/null` in #(
+ *the\ am__doit\ target*)
+ am__include=.include
+ am__quote="\""
+ _am_result=BSD
+ ;;
+ esac
+fi
+
+
+{ $as_echo "$as_me:$LINENO: result: $_am_result" >&5
+$as_echo "$_am_result" >&6; }
+rm -f confinc confmf
+
+# Check whether --enable-dependency-tracking was given.
+if test "${enable_dependency_tracking+set}" = set; then
+ enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+ am_depcomp="$ac_aux_dir/depcomp"
+ AMDEPBACKSLASH='\'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+ AMDEP_TRUE=
+ AMDEP_FALSE='#'
+else
+ AMDEP_TRUE='#'
+ AMDEP_FALSE=
+fi
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { $as_echo "$as_me:$LINENO: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:$LINENO: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+{ (ac_try="$ac_compiler --version >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler --version >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -v >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler -v >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -V >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compiler -V >&5") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:$LINENO: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { (ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ ac_file=''
+fi
+
+{ $as_echo "$as_me:$LINENO: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+if test -z "$ac_file"; then
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: C compiler cannot create executables
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: C compiler cannot create executables
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }; }
+fi
+
+ac_exeext=$ac_cv_exeext
+
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:$LINENO: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
+# If not cross compiling, check that we can run a simple program.
+if test "$cross_compiling" != yes; then
+ if { ac_try='./$ac_file'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+ fi
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+{ $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+{ $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+fi
+
+rm -f conftest$ac_cv_exeext
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+{ $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if test "${ac_cv_objext+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+fi
+
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_compiler_gnu=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_compiler_gnu=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GCC=yes
+else
+ GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_g=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ CFLAGS=""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_g=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_c89=$ac_arg
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { $as_echo "$as_me:$LINENO: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:$LINENO: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CC" am_compiler_list=
+
+{ $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CC_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CC_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+ am__fastdepCC_TRUE=
+ am__fastdepCC_FALSE='#'
+else
+ am__fastdepCC_TRUE='#'
+ am__fastdepCC_FALSE=
+fi
+
+
+ { $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C99" >&5
+$as_echo_n "checking for $CC option to accept ISO C99... " >&6; }
+if test "${ac_cv_prog_cc_c99+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_prog_cc_c99=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <wchar.h>
+#include <stdio.h>
+
+// Check varargs macros. These examples are taken from C99 6.10.3.5.
+#define debug(...) fprintf (stderr, __VA_ARGS__)
+#define showlist(...) puts (#__VA_ARGS__)
+#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__))
+static void
+test_varargs_macros (void)
+{
+ int x = 1234;
+ int y = 5678;
+ debug ("Flag");
+ debug ("X = %d\n", x);
+ showlist (The first, second, and third items.);
+ report (x>y, "x is %d but y is %d", x, y);
+}
+
+// Check long long types.
+#define BIG64 18446744073709551615ull
+#define BIG32 4294967295ul
+#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0)
+#if !BIG_OK
+ your preprocessor is broken;
+#endif
+#if BIG_OK
+#else
+ your preprocessor is broken;
+#endif
+static long long int bignum = -9223372036854775807LL;
+static unsigned long long int ubignum = BIG64;
+
+struct incomplete_array
+{
+ int datasize;
+ double data[];
+};
+
+struct named_init {
+ int number;
+ const wchar_t *name;
+ double average;
+};
+
+typedef const char *ccp;
+
+static inline int
+test_restrict (ccp restrict text)
+{
+ // See if C++-style comments work.
+ // Iterate through items via the restricted pointer.
+ // Also check for declarations in for loops.
+ for (unsigned int i = 0; *(text+i) != '\0'; ++i)
+ continue;
+ return 0;
+}
+
+// Check varargs and va_copy.
+static void
+test_varargs (const char *format, ...)
+{
+ va_list args;
+ va_start (args, format);
+ va_list args_copy;
+ va_copy (args_copy, args);
+
+ const char *str;
+ int number;
+ float fnumber;
+
+ while (*format)
+ {
+ switch (*format++)
+ {
+ case 's': // string
+ str = va_arg (args_copy, const char *);
+ break;
+ case 'd': // int
+ number = va_arg (args_copy, int);
+ break;
+ case 'f': // float
+ fnumber = va_arg (args_copy, double);
+ break;
+ default:
+ break;
+ }
+ }
+ va_end (args_copy);
+ va_end (args);
+}
+
+int
+main ()
+{
+
+ // Check bool.
+ _Bool success = false;
+
+ // Check restrict.
+ if (test_restrict ("String literal") == 0)
+ success = true;
+ char *restrict newvar = "Another string";
+
+ // Check varargs.
+ test_varargs ("s, d' f .", "string", 65, 34.234);
+ test_varargs_macros ();
+
+ // Check flexible array members.
+ struct incomplete_array *ia =
+ malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10));
+ ia->datasize = 10;
+ for (int i = 0; i < ia->datasize; ++i)
+ ia->data[i] = i * 1.234;
+
+ // Check named initializers.
+ struct named_init ni = {
+ .number = 34,
+ .name = L"Test wide string",
+ .average = 543.34343,
+ };
+
+ ni.number = 58;
+
+ int dynamic_array[ni.number];
+ dynamic_array[ni.number - 1] = 543;
+
+ // work around unused variable warnings
+ return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x'
+ || dynamic_array[ni.number - 1] != 543);
+
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -xc99=all -qlanglvl=extc99
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_prog_cc_c99=$ac_arg
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c99" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c99" in
+ x)
+ { $as_echo "$as_me:$LINENO: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+ xno)
+ { $as_echo "$as_me:$LINENO: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c99"
+ { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c99" >&5
+$as_echo "$ac_cv_prog_cc_c99" >&6; } ;;
+esac
+
+
+
+if test x$ac_cv_prog_cc_c99 = xno ; then
+ { { $as_echo "$as_me:$LINENO: error: No C99 compiler was found." >&5
+$as_echo "$as_me: error: No C99 compiler was found." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test "x$CC" != xcc; then
+ { $as_echo "$as_me:$LINENO: checking whether $CC and cc understand -c and -o together" >&5
+$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; }
+else
+ { $as_echo "$as_me:$LINENO: checking whether cc understands -c and -o together" >&5
+$as_echo_n "checking whether cc understands -c and -o together... " >&6; }
+fi
+set dummy $CC; ac_cc=`$as_echo "$2" |
+ sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
+if { as_var=ac_cv_prog_cc_${ac_cc}_c_o; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+# Make sure it works both with $CC and with simple cc.
+# We do the test twice because some compilers refuse to overwrite an
+# existing .o file with -o, though they will create one.
+ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5'
+rm -f conftest2.*
+if { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ test -f conftest2.$ac_objext && { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); };
+then
+ eval ac_cv_prog_cc_${ac_cc}_c_o=yes
+ if test "x$CC" != xcc; then
+ # Test first that cc exists at all.
+ if { ac_try='cc -c conftest.$ac_ext >&5'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5'
+ rm -f conftest2.*
+ if { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ test -f conftest2.$ac_objext && { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); };
+ then
+ # cc works too.
+ :
+ else
+ # cc exists but doesn't like -o.
+ eval ac_cv_prog_cc_${ac_cc}_c_o=no
+ fi
+ fi
+ fi
+else
+ eval ac_cv_prog_cc_${ac_cc}_c_o=no
+fi
+rm -f core conftest*
+
+fi
+if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define NO_MINUS_C_MINUS_O 1
+_ACEOF
+
+fi
+
+# FIXME: we rely on the cache variable name because
+# there is no other way.
+set dummy $CC
+am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
+eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o
+if test "$am_t" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+
+
+# By default we simply use the C compiler to build assembly code.
+
+test "${CCAS+set}" = set || CCAS=$CC
+test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS
+
+
+
+depcc="$CCAS" am_compiler_list=
+
+{ $as_echo "$as_me:$LINENO: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if test "${am_cv_CCAS_dependencies_compiler_type+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CCAS_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CCAS_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CCAS_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $am_cv_CCAS_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CCAS_dependencies_compiler_type" >&6; }
+CCASDEPMODE=depmode=$am_cv_CCAS_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CCAS_dependencies_compiler_type" = gcc3; then
+ am__fastdepCCAS_TRUE=
+ am__fastdepCCAS_FALSE='#'
+else
+ am__fastdepCCAS_TRUE='#'
+ am__fastdepCCAS_FALSE=
+fi
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
+$as_echo_n "checking how to run the C preprocessor... " >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if test "${ac_cv_prog_CPP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ # Broken: success on invalid input.
+continue
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+{ $as_echo "$as_me:$LINENO: result: $CPP" >&5
+$as_echo "$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ # Broken: success on invalid input.
+continue
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ :
+else
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ $as_echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if test "${ac_cv_path_GREP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$GREP"; then
+ ac_path_GREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_GREP_found && break 3
+ done
+ done
+done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_GREP"; then
+ { { $as_echo "$as_me:$LINENO: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+$as_echo "$as_me: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:$LINENO: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if test "${ac_cv_path_EGREP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ if test -z "$EGREP"; then
+ ac_path_EGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_EGREP_found && break 3
+ done
+ done
+done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_EGREP"; then
+ { { $as_echo "$as_me:$LINENO: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+$as_echo "$as_me: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if test "${ac_cv_header_stdc+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_header_stdc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_header_stdc=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_header_stdc=no
+fi
+rm -rf conftest.dSYM
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+
+
+fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STDC_HEADERS 1
+_ACEOF
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+
+
+
+
+
+
+
+
+
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ eval "$as_ac_Header=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_Header=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+ if test "${ac_cv_header_minix_config_h+set}" = set; then
+ { $as_echo "$as_me:$LINENO: checking for minix/config.h" >&5
+$as_echo_n "checking for minix/config.h... " >&6; }
+if test "${ac_cv_header_minix_config_h+set}" = set; then
+ $as_echo_n "(cached) " >&6
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_minix_config_h" >&5
+$as_echo "$ac_cv_header_minix_config_h" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking minix/config.h usability" >&5
+$as_echo_n "checking minix/config.h usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <minix/config.h>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking minix/config.h presence" >&5
+$as_echo_n "checking minix/config.h presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <minix/config.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: minix/config.h: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: minix/config.h: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: minix/config.h: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: minix/config.h: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: minix/config.h: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: minix/config.h: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: minix/config.h: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: minix/config.h: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: minix/config.h: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## --------------------------------------- ##
+## Report this to lasse.collin@tukaani.org ##
+## --------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for minix/config.h" >&5
+$as_echo_n "checking for minix/config.h... " >&6; }
+if test "${ac_cv_header_minix_config_h+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_header_minix_config_h=$ac_header_preproc
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_minix_config_h" >&5
+$as_echo "$ac_cv_header_minix_config_h" >&6; }
+
+fi
+if test "x$ac_cv_header_minix_config_h" = x""yes; then
+ MINIX=yes
+else
+ MINIX=
+fi
+
+
+ if test "$MINIX" = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define _POSIX_SOURCE 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define _POSIX_1_SOURCE 2
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define _MINIX 1
+_ACEOF
+
+ fi
+
+
+
+ { $as_echo "$as_me:$LINENO: checking whether it is safe to define __EXTENSIONS__" >&5
+$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; }
+if test "${ac_cv_safe_to_define___extensions__+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+# define __EXTENSIONS__ 1
+ $ac_includes_default
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_safe_to_define___extensions__=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_safe_to_define___extensions__=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_safe_to_define___extensions__" >&5
+$as_echo "$ac_cv_safe_to_define___extensions__" >&6; }
+ test $ac_cv_safe_to_define___extensions__ = yes &&
+ cat >>confdefs.h <<\_ACEOF
+#define __EXTENSIONS__ 1
+_ACEOF
+
+ cat >>confdefs.h <<\_ACEOF
+#define _ALL_SOURCE 1
+_ACEOF
+
+ cat >>confdefs.h <<\_ACEOF
+#define _GNU_SOURCE 1
+_ACEOF
+
+ cat >>confdefs.h <<\_ACEOF
+#define _POSIX_PTHREAD_SEMANTICS 1
+_ACEOF
+
+ cat >>confdefs.h <<\_ACEOF
+#define _TANDEM_SOURCE 1
+_ACEOF
+
+
+
+if test "x$enable_threads" = xyes; then
+ echo
+ echo "Threading support:"
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+acx_pthread_ok=no
+
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on True64 or Sequent).
+# It gets checked for in the link test anyway.
+
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ { $as_echo "$as_me:$LINENO: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5
+$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; }
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char pthread_join ();
+int
+main ()
+{
+return pthread_join ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ acx_pthread_ok=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ { $as_echo "$as_me:$LINENO: result: $acx_pthread_ok" >&5
+$as_echo "$acx_pthread_ok" >&6; }
+ if test x"$acx_pthread_ok" = xno; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+fi
+
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
+
+# Create a list of thread flags to try. Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all, and "pthread-config"
+# which is a program returning the flags for the Pth emulation library.
+
+acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
+
+# The ordering *is* (sometimes) important. Some notes on the
+# individual items follow:
+
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+# other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
+# -pthreads: Solaris/gcc
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+# doesn't hurt to check since this sometimes defines pthreads too;
+# also defines -D_REENTRANT)
+# ... -mt is also the pthreads flag for HP/aCC
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
+# pthread-config: use pthread-config program (for GNU Pth library)
+
+case "${host_cpu}-${host_os}" in
+ *solaris*)
+
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (We need to link with -pthreads/-mt/
+ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
+ # a function called by this macro, so we could check for that, but
+ # who knows whether they'll stub that too in a future libc.) So,
+ # we'll just look for -pthreads and -lpthread first:
+
+ acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags"
+ ;;
+esac
+
+if test x"$acx_pthread_ok" = xno; then
+for flag in $acx_pthread_flags; do
+
+ case $flag in
+ none)
+ { $as_echo "$as_me:$LINENO: checking whether pthreads work without any flags" >&5
+$as_echo_n "checking whether pthreads work without any flags... " >&6; }
+ ;;
+
+ -*)
+ { $as_echo "$as_me:$LINENO: checking whether pthreads work with $flag" >&5
+$as_echo_n "checking whether pthreads work with $flag... " >&6; }
+ PTHREAD_CFLAGS="$flag"
+ ;;
+
+ pthread-config)
+ # Extract the first word of "pthread-config", so it can be a program name with args.
+set dummy pthread-config; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_acx_pthread_config+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$acx_pthread_config"; then
+ ac_cv_prog_acx_pthread_config="$acx_pthread_config" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_acx_pthread_config="yes"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_prog_acx_pthread_config" && ac_cv_prog_acx_pthread_config="no"
+fi
+fi
+acx_pthread_config=$ac_cv_prog_acx_pthread_config
+if test -n "$acx_pthread_config"; then
+ { $as_echo "$as_me:$LINENO: result: $acx_pthread_config" >&5
+$as_echo "$acx_pthread_config" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ if test x"$acx_pthread_config" = xno; then continue; fi
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ { $as_echo "$as_me:$LINENO: checking for the pthreads library -l$flag" >&5
+$as_echo_n "checking for the pthreads library -l$flag... " >&6; }
+ PTHREAD_LIBS="-l$flag"
+ ;;
+ esac
+
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <pthread.h>
+int
+main ()
+{
+pthread_t th; pthread_join(th, 0);
+ pthread_attr_init(0); pthread_cleanup_push(0, 0);
+ pthread_create(0,0,0,0); pthread_cleanup_pop(0);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ acx_pthread_ok=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ { $as_echo "$as_me:$LINENO: result: $acx_pthread_ok" >&5
+$as_echo "$acx_pthread_ok" >&6; }
+ if test "x$acx_pthread_ok" = xyes; then
+ break;
+ fi
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+done
+fi
+
+# Various other checks:
+if test "x$acx_pthread_ok" = xyes; then
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ { $as_echo "$as_me:$LINENO: checking for joinable pthread attribute" >&5
+$as_echo_n "checking for joinable pthread attribute... " >&6; }
+ attr_name=unknown
+ for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <pthread.h>
+int
+main ()
+{
+int attr=$attr; return attr;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ attr_name=$attr; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ done
+ { $as_echo "$as_me:$LINENO: result: $attr_name" >&5
+$as_echo "$attr_name" >&6; }
+ if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then
+
+cat >>confdefs.h <<_ACEOF
+#define PTHREAD_CREATE_JOINABLE $attr_name
+_ACEOF
+
+ fi
+
+ { $as_echo "$as_me:$LINENO: checking if more special flags are required for pthreads" >&5
+$as_echo_n "checking if more special flags are required for pthreads... " >&6; }
+ flag=no
+ case "${host_cpu}-${host_os}" in
+ *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";;
+ *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";;
+ esac
+ { $as_echo "$as_me:$LINENO: result: ${flag}" >&5
+$as_echo "${flag}" >&6; }
+ if test "x$flag" != xno; then
+ PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
+ fi
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ # More AIX lossage: must compile with xlc_r or cc_r
+ if test x"$GCC" != xyes; then
+ for ac_prog in xlc_r cc_r
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_PTHREAD_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$PTHREAD_CC"; then
+ ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_PTHREAD_CC="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+PTHREAD_CC=$ac_cv_prog_PTHREAD_CC
+if test -n "$PTHREAD_CC"; then
+ { $as_echo "$as_me:$LINENO: result: $PTHREAD_CC" >&5
+$as_echo "$PTHREAD_CC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$PTHREAD_CC" && break
+done
+test -n "$PTHREAD_CC" || PTHREAD_CC="${CC}"
+
+ else
+ PTHREAD_CC=$CC
+ fi
+else
+ PTHREAD_CC="$CC"
+fi
+
+
+
+
+
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$acx_pthread_ok" = xyes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_PTHREAD 1
+_ACEOF
+
+ :
+else
+ acx_pthread_ok=no
+
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+ LIBS="$LIBS $PTHREAD_LIBS"
+ AM_CFLAGS="$AM_CFLAGS $PTHREAD_CFLAGS"
+ CC="$PTHREAD_CC"
+fi
+
+echo
+echo "Initializing Libtool:"
+
+case `pwd` in
+ *\ * | *\ *)
+ { $as_echo "$as_me:$LINENO: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
+$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
+esac
+
+
+
+macro_version='2.2.6'
+macro_revision='1.3012'
+
+
+
+
+
+
+
+
+
+
+
+
+
+ltmain="$ac_aux_dir/ltmain.sh"
+
+{ $as_echo "$as_me:$LINENO: checking for a sed that does not truncate output" >&5
+$as_echo_n "checking for a sed that does not truncate output... " >&6; }
+if test "${ac_cv_path_SED+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+ for ac_i in 1 2 3 4 5 6 7; do
+ ac_script="$ac_script$as_nl$ac_script"
+ done
+ echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed
+ $as_unset ac_script || ac_script=
+ if test -z "$SED"; then
+ ac_path_SED_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in sed gsed; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_SED="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue
+# Check for GNU ac_path_SED and select it if it is found.
+ # Check for GNU $ac_path_SED
+case `"$ac_path_SED" --version 2>&1` in
+*GNU*)
+ ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo '' >> "conftest.nl"
+ "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_SED_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_SED="$ac_path_SED"
+ ac_path_SED_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_SED_found && break 3
+ done
+ done
+done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_SED"; then
+ { { $as_echo "$as_me:$LINENO: error: no acceptable sed could be found in \$PATH" >&5
+$as_echo "$as_me: error: no acceptable sed could be found in \$PATH" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+else
+ ac_cv_path_SED=$SED
+fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_SED" >&5
+$as_echo "$ac_cv_path_SED" >&6; }
+ SED="$ac_cv_path_SED"
+ rm -f conftest.sed
+
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:$LINENO: checking for fgrep" >&5
+$as_echo_n "checking for fgrep... " >&6; }
+if test "${ac_cv_path_FGREP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
+ then ac_cv_path_FGREP="$GREP -F"
+ else
+ if test -z "$FGREP"; then
+ ac_path_FGREP_found=false
+ # Loop through the user's path and test for each of PROGNAME-LIST
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in fgrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue
+# Check for GNU ac_path_FGREP and select it if it is found.
+ # Check for GNU $ac_path_FGREP
+case `"$ac_path_FGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;;
+*)
+ ac_count=0
+ $as_echo_n 0123456789 >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ $as_echo 'FGREP' >> "conftest.nl"
+ "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_FGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_FGREP="$ac_path_FGREP"
+ ac_path_FGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+ $ac_path_FGREP_found && break 3
+ done
+ done
+done
+IFS=$as_save_IFS
+ if test -z "$ac_cv_path_FGREP"; then
+ { { $as_echo "$as_me:$LINENO: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+$as_echo "$as_me: error: no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+else
+ ac_cv_path_FGREP=$FGREP
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_FGREP" >&5
+$as_echo "$ac_cv_path_FGREP" >&6; }
+ FGREP="$ac_cv_path_FGREP"
+
+
+test -z "$GREP" && GREP=grep
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then
+ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ { $as_echo "$as_me:$LINENO: checking for ld used by $CC" >&5
+$as_echo_n "checking for ld used by $CC... " >&6; }
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [\\/]* | ?:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the pathname of ld
+ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+ { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if test "${lt_cv_path_LD+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$LD"; then
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some variants of GNU ld only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break
+ ;;
+ *)
+ test "$with_gnu_ld" != yes && break
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ { $as_echo "$as_me:$LINENO: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5
+$as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;}
+ { (exit 1); exit 1; }; }
+{ $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5
+$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
+if test "${lt_cv_prog_gnu_ld+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ lt_cv_prog_gnu_ld=yes
+ ;;
+*)
+ lt_cv_prog_gnu_ld=no
+ ;;
+esac
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_gnu_ld" >&5
+$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:$LINENO: checking for BSD- or MS-compatible name lister (nm)" >&5
+$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
+if test "${lt_cv_path_NM+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ lt_nm_to_check="${ac_tool_prefix}nm"
+ if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+ lt_nm_to_check="$lt_nm_to_check nm"
+ fi
+ for lt_tmp_nm in $lt_nm_to_check; do
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm="$ac_dir/$lt_tmp_nm"
+ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
+ */dev/null* | *'Invalid file or object type'*)
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ ;;
+ *)
+ case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+ */dev/null*)
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ ;;
+ *)
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+ done
+ : ${lt_cv_path_NM=no}
+fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_path_NM" >&5
+$as_echo "$lt_cv_path_NM" >&6; }
+if test "$lt_cv_path_NM" != "no"; then
+ NM="$lt_cv_path_NM"
+else
+ # Didn't find any BSD compatible name lister, look for dumpbin.
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in "dumpbin -symbols" "link -dump -symbols"
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_DUMPBIN+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DUMPBIN"; then
+ ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+DUMPBIN=$ac_cv_prog_DUMPBIN
+if test -n "$DUMPBIN"; then
+ { $as_echo "$as_me:$LINENO: result: $DUMPBIN" >&5
+$as_echo "$DUMPBIN" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$DUMPBIN" && break
+ done
+fi
+if test -z "$DUMPBIN"; then
+ ac_ct_DUMPBIN=$DUMPBIN
+ for ac_prog in "dumpbin -symbols" "link -dump -symbols"
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DUMPBIN"; then
+ ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN
+if test -n "$ac_ct_DUMPBIN"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_DUMPBIN" >&5
+$as_echo "$ac_ct_DUMPBIN" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_DUMPBIN" && break
+done
+
+ if test "x$ac_ct_DUMPBIN" = x; then
+ DUMPBIN=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DUMPBIN=$ac_ct_DUMPBIN
+ fi
+fi
+
+
+ if test "$DUMPBIN" != ":"; then
+ NM="$DUMPBIN"
+ fi
+fi
+test -z "$NM" && NM=nm
+
+
+
+
+
+
+{ $as_echo "$as_me:$LINENO: checking the name lister ($NM) interface" >&5
+$as_echo_n "checking the name lister ($NM) interface... " >&6; }
+if test "${lt_cv_nm_interface+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_nm_interface="BSD nm"
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ (eval echo "\"\$as_me:7392: $ac_compile\"" >&5)
+ (eval "$ac_compile" 2>conftest.err)
+ cat conftest.err >&5
+ (eval echo "\"\$as_me:7395: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+ (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+ cat conftest.err >&5
+ (eval echo "\"\$as_me:7398: output\"" >&5)
+ cat conftest.out >&5
+ if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+ lt_cv_nm_interface="MS dumpbin"
+ fi
+ rm -f conftest*
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_nm_interface" >&5
+$as_echo "$lt_cv_nm_interface" >&6; }
+
+# find the maximum length of command line arguments
+{ $as_echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5
+$as_echo_n "checking the maximum length of command line arguments... " >&6; }
+if test "${lt_cv_sys_max_cmd_len+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ i=0
+ teststring="ABCD"
+
+ case $build_os in
+ msdosdjgpp*)
+ # On DJGPP, this test can blow up pretty badly due to problems in libc
+ # (any single argument exceeding 2000 bytes causes a buffer overrun
+ # during glob expansion). Even if it were fixed, the result of this
+ # check would be larger than it should be.
+ lt_cv_sys_max_cmd_len=12288; # 12K is about right
+ ;;
+
+ gnu*)
+ # Under GNU Hurd, this test is not required because there is
+ # no limit to the length of command line arguments.
+ # Libtool will interpret -1 as no limit whatsoever
+ lt_cv_sys_max_cmd_len=-1;
+ ;;
+
+ cygwin* | mingw* | cegcc*)
+ # On Win9x/ME, this test blows up -- it succeeds, but takes
+ # about 5 minutes as the teststring grows exponentially.
+ # Worse, since 9x/ME are not pre-emptively multitasking,
+ # you end up with a "frozen" computer, even though with patience
+ # the test eventually succeeds (with a max line length of 256k).
+ # Instead, let's just punt: use the minimum linelength reported by
+ # all of the supported platforms: 8192 (on NT/2K/XP).
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ amigaos*)
+ # On AmigaOS with pdksh, this test takes hours, literally.
+ # So we just punt and use a minimum line length of 8192.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+ # This has been around since 386BSD, at least. Likely further.
+ if test -x /sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+ elif test -x /usr/sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+ else
+ lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs
+ fi
+ # And add a safety zone
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ ;;
+
+ interix*)
+ # We know the value 262144 and hardcode it with a safety zone (like BSD)
+ lt_cv_sys_max_cmd_len=196608
+ ;;
+
+ osf*)
+ # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+ # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+ # nice to cause kernel panics so lets avoid the loop below.
+ # First set a reasonable default.
+ lt_cv_sys_max_cmd_len=16384
+ #
+ if test -x /sbin/sysconfig; then
+ case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+ *1*) lt_cv_sys_max_cmd_len=-1 ;;
+ esac
+ fi
+ ;;
+ sco3.2v5*)
+ lt_cv_sys_max_cmd_len=102400
+ ;;
+ sysv5* | sco5v6* | sysv4.2uw2*)
+ kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+ if test -n "$kargmax"; then
+ lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'`
+ else
+ lt_cv_sys_max_cmd_len=32768
+ fi
+ ;;
+ *)
+ lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+ if test -n "$lt_cv_sys_max_cmd_len"; then
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ else
+ # Make teststring a little bigger before we do anything with it.
+ # a 1K string should be a reasonable start.
+ for i in 1 2 3 4 5 6 7 8 ; do
+ teststring=$teststring$teststring
+ done
+ SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+ # If test is not a shell built-in, we'll probably end up computing a
+ # maximum length that is only half of the actual maximum length, but
+ # we can't tell.
+ while { test "X"`$SHELL $0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \
+ = "XX$teststring$teststring"; } >/dev/null 2>&1 &&
+ test $i != 17 # 1/2 MB should be enough
+ do
+ i=`expr $i + 1`
+ teststring=$teststring$teststring
+ done
+ # Only check the string length outside the loop.
+ lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+ teststring=
+ # Add a significant safety factor because C++ compilers can tack on
+ # massive amounts of additional arguments before passing them to the
+ # linker. It appears as though 1/2 is a usable value.
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+ fi
+ ;;
+ esac
+
+fi
+
+if test -n $lt_cv_sys_max_cmd_len ; then
+ { $as_echo "$as_me:$LINENO: result: $lt_cv_sys_max_cmd_len" >&5
+$as_echo "$lt_cv_sys_max_cmd_len" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: none" >&5
+$as_echo "none" >&6; }
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+
+
+
+
+
+
+: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+
+{ $as_echo "$as_me:$LINENO: checking whether the shell understands some XSI constructs" >&5
+$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; }
+# Try some XSI features
+xsi_shell=no
+( _lt_dummy="a/b/c"
+ test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \
+ = c,a/b,, \
+ && eval 'test $(( 1 + 1 )) -eq 2 \
+ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
+ && xsi_shell=yes
+{ $as_echo "$as_me:$LINENO: result: $xsi_shell" >&5
+$as_echo "$xsi_shell" >&6; }
+
+
+{ $as_echo "$as_me:$LINENO: checking whether the shell understands \"+=\"" >&5
+$as_echo_n "checking whether the shell understands \"+=\"... " >&6; }
+lt_shell_append=no
+( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \
+ >/dev/null 2>&1 \
+ && lt_shell_append=yes
+{ $as_echo "$as_me:$LINENO: result: $lt_shell_append" >&5
+$as_echo "$lt_shell_append" >&6; }
+
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ lt_unset=unset
+else
+ lt_unset=false
+fi
+
+
+
+
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+ # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+ lt_SP2NL='tr \040 \012'
+ lt_NL2SP='tr \015\012 \040\040'
+ ;;
+ *) # EBCDIC based system
+ lt_SP2NL='tr \100 \n'
+ lt_NL2SP='tr \r\n \100\100'
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:$LINENO: checking for $LD option to reload object files" >&5
+$as_echo_n "checking for $LD option to reload object files... " >&6; }
+if test "${lt_cv_ld_reload_flag+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ld_reload_flag='-r'
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_ld_reload_flag" >&5
+$as_echo "$lt_cv_ld_reload_flag" >&6; }
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+ darwin*)
+ if test "$GCC" = yes; then
+ reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+ else
+ reload_cmds='$LD$reload_flag -o $output$reload_objs'
+ fi
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
+set dummy ${ac_tool_prefix}objdump; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_OBJDUMP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OBJDUMP"; then
+ ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+OBJDUMP=$ac_cv_prog_OBJDUMP
+if test -n "$OBJDUMP"; then
+ { $as_echo "$as_me:$LINENO: result: $OBJDUMP" >&5
+$as_echo "$OBJDUMP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OBJDUMP"; then
+ ac_ct_OBJDUMP=$OBJDUMP
+ # Extract the first word of "objdump", so it can be a program name with args.
+set dummy objdump; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OBJDUMP"; then
+ ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OBJDUMP="objdump"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
+if test -n "$ac_ct_OBJDUMP"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_OBJDUMP" >&5
+$as_echo "$ac_ct_OBJDUMP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OBJDUMP" = x; then
+ OBJDUMP="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OBJDUMP=$ac_ct_OBJDUMP
+ fi
+else
+ OBJDUMP="$ac_cv_prog_OBJDUMP"
+fi
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+
+
+
+
+
+
+{ $as_echo "$as_me:$LINENO: checking how to recognize dependent libraries" >&5
+$as_echo_n "checking how to recognize dependent libraries... " >&6; }
+if test "${lt_cv_deplibs_check_method+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given extended regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[4-9]*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi[45]*)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin*)
+ # func_win32_libid is a shell function defined in ltmain.sh
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ ;;
+
+mingw* | pw32*)
+ # Base MSYS/MinGW do not provide the 'file' command needed by
+ # func_win32_libid shell function, so use a weaker test based on 'objdump',
+ # unless we find 'file', for example because we are cross-compiling.
+ if ( file / ) >/dev/null 2>&1; then
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ else
+ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ fi
+ ;;
+
+cegcc)
+ # use the weaker test based on 'objdump'. See mingw*.
+ lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+freebsd* | dragonfly*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20* | hpux11*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ case $host_cpu in
+ ia64*)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64'
+ lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+ ;;
+ hppa*64*)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'
+ lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+ ;;
+ *)
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library'
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+ esac
+ ;;
+
+interix[3-9]*)
+ # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$'
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be Linux ELF.
+linux* | k*bsd*-gnu)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+*nto* | *qnx*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+openbsd*)
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+rdos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.3*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ siemens)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ pc)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ esac
+ ;;
+
+tpf*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+esac
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_deplibs_check_method" >&5
+$as_echo "$lt_cv_deplibs_check_method" >&6; }
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ar; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_AR+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AR"; then
+ ac_cv_prog_AR="$AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_AR="${ac_tool_prefix}ar"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+AR=$ac_cv_prog_AR
+if test -n "$AR"; then
+ { $as_echo "$as_me:$LINENO: result: $AR" >&5
+$as_echo "$AR" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_AR"; then
+ ac_ct_AR=$AR
+ # Extract the first word of "ar", so it can be a program name with args.
+set dummy ar; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_AR+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_AR"; then
+ ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_AR="ar"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_AR" >&5
+$as_echo "$ac_ct_AR" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_AR" = x; then
+ AR="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ AR=$ac_ct_AR
+ fi
+else
+ AR="$ac_cv_prog_AR"
+fi
+
+test -z "$AR" && AR=ar
+test -z "$AR_FLAGS" && AR_FLAGS=cru
+
+
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_STRIP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$STRIP"; then
+ ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+ { $as_echo "$as_me:$LINENO: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+ ac_ct_STRIP=$STRIP
+ # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_STRIP"; then
+ ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_STRIP="strip"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_STRIP" = x; then
+ STRIP=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ STRIP=$ac_ct_STRIP
+ fi
+else
+ STRIP="$ac_cv_prog_STRIP"
+fi
+
+test -z "$STRIP" && STRIP=:
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_RANLIB+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ { $as_echo "$as_me:$LINENO: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_RANLIB" = x; then
+ RANLIB=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ RANLIB=$ac_ct_RANLIB
+ fi
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+test -z "$RANLIB" && RANLIB=:
+
+
+
+
+
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib"
+ ;;
+ *)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+{ $as_echo "$as_me:$LINENO: checking command to parse $NM output from $compiler object" >&5
+$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; }
+if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[BCDEGRST]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[BCDT]'
+ ;;
+cygwin* | mingw* | pw32* | cegcc*)
+ symcode='[ABCDGISTW]'
+ ;;
+hpux*)
+ if test "$host_cpu" = ia64; then
+ symcode='[ABCDEGRST]'
+ fi
+ ;;
+irix* | nonstopux*)
+ symcode='[BCDEGRST]'
+ ;;
+osf*)
+ symcode='[BCDEGQRST]'
+ ;;
+solaris*)
+ symcode='[BDRT]'
+ ;;
+sco3.2v5*)
+ symcode='[DT]'
+ ;;
+sysv4.2uw2*)
+ symcode='[DT]'
+ ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+ symcode='[ABDT]'
+ ;;
+sysv4)
+ symcode='[DFNSTU]'
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+ symcode='[ABCDGIRSTW]' ;;
+esac
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+ opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+ symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+ # Write the raw and C identifiers.
+ if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ # Fake it for dumpbin and say T for any non-static function
+ # and D for any global variable.
+ # Also find C++ and __fastcall symbols from MSVC++,
+ # which start with @ or ?.
+ lt_cv_sys_global_symbol_pipe="$AWK '"\
+" {last_section=section; section=\$ 3};"\
+" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+" \$ 0!~/External *\|/{next};"\
+" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+" {if(hide[section]) next};"\
+" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
+" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
+" s[1]~/^[@?]/{print s[1], s[1]; next};"\
+" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+" ' prfx=^$ac_symprfx"
+ else
+ lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+ fi
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+
+ rm -f conftest*
+ cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if { (eval echo "$as_me:$LINENO: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\"") >&5
+ (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+ if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<_LT_EOF > conftest.$ac_ext
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+ cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ void *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[] =
+{
+ { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+ $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+ cat <<\_LT_EOF >> conftest.$ac_ext
+ {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ lt_save_LIBS="$LIBS"
+ lt_save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
+ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s conftest${ac_exeext}; then
+ pipe_works=yes
+ fi
+ LIBS="$lt_save_LIBS"
+ CFLAGS="$lt_save_CFLAGS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&5
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&5
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5
+ fi
+ else
+ echo "$progname: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ fi
+ rm -rf conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+
+fi
+
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+ { $as_echo "$as_me:$LINENO: result: failed" >&5
+$as_echo "failed" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: ok" >&5
+$as_echo "ok" >&6; }
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --enable-libtool-lock was given.
+if test "${enable_libtool_lock+set}" = set; then
+ enableval=$enable_libtool_lock;
+fi
+
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *ELF-32*)
+ HPUX_IA64_MODE="32"
+ ;;
+ *ELF-64*)
+ HPUX_IA64_MODE="64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '#line 8589 "configure"' > conftest.$ac_ext
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -melf32bsmip"
+ ;;
+ *N32*)
+ LD="${LD-ld} -melf32bmipn32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -melf64bmip"
+ ;;
+ esac
+ else
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ fi
+ rm -rf conftest*
+ ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ case `/usr/bin/file conftest.o` in
+ *32-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_i386_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_i386"
+ ;;
+ ppc64-*linux*|powerpc64-*linux*)
+ LD="${LD-ld} -m elf32ppclinux"
+ ;;
+ s390x-*linux*)
+ LD="${LD-ld} -m elf_s390"
+ ;;
+ sparc64-*linux*)
+ LD="${LD-ld} -m elf32_sparc"
+ ;;
+ esac
+ ;;
+ *64-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_x86_64_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ ppc*-*linux*|powerpc*-*linux*)
+ LD="${LD-ld} -m elf64ppc"
+ ;;
+ s390*-*linux*|s390*-*tpf*)
+ LD="${LD-ld} -m elf64_s390"
+ ;;
+ sparc*-*linux*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ { $as_echo "$as_me:$LINENO: checking whether the C compiler needs -belf" >&5
+$as_echo_n "checking whether the C compiler needs -belf... " >&6; }
+if test "${lt_cv_cc_needs_belf+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ lt_cv_cc_needs_belf=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ lt_cv_cc_needs_belf=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_cc_needs_belf" >&5
+$as_echo "$lt_cv_cc_needs_belf" >&6; }
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+sparc*-*solaris*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ case `/usr/bin/file conftest.o` in
+ *64-bit*)
+ case $lt_cv_prog_gnu_ld in
+ yes*) LD="${LD-ld} -m elf64_sparc" ;;
+ *)
+ if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+ LD="${LD-ld} -64"
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+esac
+
+need_locks="$enable_libtool_lock"
+
+
+ case $host_os in
+ rhapsody* | darwin*)
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_DSYMUTIL+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DSYMUTIL"; then
+ ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+DSYMUTIL=$ac_cv_prog_DSYMUTIL
+if test -n "$DSYMUTIL"; then
+ { $as_echo "$as_me:$LINENO: result: $DSYMUTIL" >&5
+$as_echo "$DSYMUTIL" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DSYMUTIL"; then
+ ac_ct_DSYMUTIL=$DSYMUTIL
+ # Extract the first word of "dsymutil", so it can be a program name with args.
+set dummy dsymutil; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DSYMUTIL"; then
+ ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL
+if test -n "$ac_ct_DSYMUTIL"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_DSYMUTIL" >&5
+$as_echo "$ac_ct_DSYMUTIL" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_DSYMUTIL" = x; then
+ DSYMUTIL=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DSYMUTIL=$ac_ct_DSYMUTIL
+ fi
+else
+ DSYMUTIL="$ac_cv_prog_DSYMUTIL"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args.
+set dummy ${ac_tool_prefix}nmedit; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_NMEDIT+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$NMEDIT"; then
+ ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+NMEDIT=$ac_cv_prog_NMEDIT
+if test -n "$NMEDIT"; then
+ { $as_echo "$as_me:$LINENO: result: $NMEDIT" >&5
+$as_echo "$NMEDIT" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_NMEDIT"; then
+ ac_ct_NMEDIT=$NMEDIT
+ # Extract the first word of "nmedit", so it can be a program name with args.
+set dummy nmedit; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_NMEDIT"; then
+ ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_NMEDIT="nmedit"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT
+if test -n "$ac_ct_NMEDIT"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_NMEDIT" >&5
+$as_echo "$ac_ct_NMEDIT" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_NMEDIT" = x; then
+ NMEDIT=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ NMEDIT=$ac_ct_NMEDIT
+ fi
+else
+ NMEDIT="$ac_cv_prog_NMEDIT"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args.
+set dummy ${ac_tool_prefix}lipo; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_LIPO+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$LIPO"; then
+ ac_cv_prog_LIPO="$LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+LIPO=$ac_cv_prog_LIPO
+if test -n "$LIPO"; then
+ { $as_echo "$as_me:$LINENO: result: $LIPO" >&5
+$as_echo "$LIPO" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_LIPO"; then
+ ac_ct_LIPO=$LIPO
+ # Extract the first word of "lipo", so it can be a program name with args.
+set dummy lipo; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_LIPO"; then
+ ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_LIPO="lipo"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO
+if test -n "$ac_ct_LIPO"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_LIPO" >&5
+$as_echo "$ac_ct_LIPO" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_LIPO" = x; then
+ LIPO=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ LIPO=$ac_ct_LIPO
+ fi
+else
+ LIPO="$ac_cv_prog_LIPO"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_OTOOL+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OTOOL"; then
+ ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL=$ac_cv_prog_OTOOL
+if test -n "$OTOOL"; then
+ { $as_echo "$as_me:$LINENO: result: $OTOOL" >&5
+$as_echo "$OTOOL" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL"; then
+ ac_ct_OTOOL=$OTOOL
+ # Extract the first word of "otool", so it can be a program name with args.
+set dummy otool; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OTOOL"; then
+ ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OTOOL="otool"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL
+if test -n "$ac_ct_OTOOL"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL" >&5
+$as_echo "$ac_ct_OTOOL" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OTOOL" = x; then
+ OTOOL=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OTOOL=$ac_ct_OTOOL
+ fi
+else
+ OTOOL="$ac_cv_prog_OTOOL"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool64; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_OTOOL64+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OTOOL64"; then
+ ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL64=$ac_cv_prog_OTOOL64
+if test -n "$OTOOL64"; then
+ { $as_echo "$as_me:$LINENO: result: $OTOOL64" >&5
+$as_echo "$OTOOL64" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL64"; then
+ ac_ct_OTOOL64=$OTOOL64
+ # Extract the first word of "otool64", so it can be a program name with args.
+set dummy otool64; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OTOOL64"; then
+ ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OTOOL64="otool64"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64
+if test -n "$ac_ct_OTOOL64"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_OTOOL64" >&5
+$as_echo "$ac_ct_OTOOL64" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OTOOL64" = x; then
+ OTOOL64=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OTOOL64=$ac_ct_OTOOL64
+ fi
+else
+ OTOOL64="$ac_cv_prog_OTOOL64"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking for -single_module linker flag" >&5
+$as_echo_n "checking for -single_module linker flag... " >&6; }
+if test "${lt_cv_apple_cc_single_mod+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_apple_cc_single_mod=no
+ if test -z "${LT_MULTI_MODULE}"; then
+ # By default we will add the -single_module flag. You can override
+ # by either setting the environment variable LT_MULTI_MODULE
+ # non-empty at configure time, or by adding -multi_module to the
+ # link flags.
+ rm -rf libconftest.dylib*
+ echo "int foo(void){return 1;}" > conftest.c
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&5
+ $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+ _lt_result=$?
+ if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then
+ lt_cv_apple_cc_single_mod=yes
+ else
+ cat conftest.err >&5
+ fi
+ rm -rf libconftest.dylib*
+ rm -f conftest.*
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_apple_cc_single_mod" >&5
+$as_echo "$lt_cv_apple_cc_single_mod" >&6; }
+ { $as_echo "$as_me:$LINENO: checking for -exported_symbols_list linker flag" >&5
+$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; }
+if test "${lt_cv_ld_exported_symbols_list+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ld_exported_symbols_list=no
+ save_LDFLAGS=$LDFLAGS
+ echo "_main" > conftest.sym
+ LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ lt_cv_ld_exported_symbols_list=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ lt_cv_ld_exported_symbols_list=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_ld_exported_symbols_list" >&5
+$as_echo "$lt_cv_ld_exported_symbols_list" >&6; }
+ case $host_os in
+ rhapsody* | darwin1.[012])
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
+ darwin1.*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ darwin*) # darwin 5.x on
+ # if running on 10.5 or later, the deployment target defaults
+ # to the OS version, if on x86, and 10.4, the deployment
+ # target defaults to 10.4. Don't you love it?
+ case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
+ 10.0,*86*-darwin8*|10.0,*-darwin[91]*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ 10.[012]*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ 10.*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ esac
+ ;;
+ esac
+ if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+ _lt_dar_single_mod='$single_module'
+ fi
+ if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
+ _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+ else
+ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+ fi
+ if test "$DSYMUTIL" != ":"; then
+ _lt_dsymutil='~$DSYMUTIL $lib || :'
+ else
+ _lt_dsymutil=
+ fi
+ ;;
+ esac
+
+
+for ac_header in dlfcn.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ eval "$as_ac_Header=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_Header=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
+# Set options
+enable_win32_dll=yes
+
+case $host in
+*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*)
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}as", so it can be a program name with args.
+set dummy ${ac_tool_prefix}as; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_AS+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AS"; then
+ ac_cv_prog_AS="$AS" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_AS="${ac_tool_prefix}as"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+AS=$ac_cv_prog_AS
+if test -n "$AS"; then
+ { $as_echo "$as_me:$LINENO: result: $AS" >&5
+$as_echo "$AS" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_AS"; then
+ ac_ct_AS=$AS
+ # Extract the first word of "as", so it can be a program name with args.
+set dummy as; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_AS+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_AS"; then
+ ac_cv_prog_ac_ct_AS="$ac_ct_AS" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_AS="as"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AS=$ac_cv_prog_ac_ct_AS
+if test -n "$ac_ct_AS"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_AS" >&5
+$as_echo "$ac_ct_AS" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_AS" = x; then
+ AS="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ AS=$ac_ct_AS
+ fi
+else
+ AS="$ac_cv_prog_AS"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dlltool; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_DLLTOOL+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DLLTOOL"; then
+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+DLLTOOL=$ac_cv_prog_DLLTOOL
+if test -n "$DLLTOOL"; then
+ { $as_echo "$as_me:$LINENO: result: $DLLTOOL" >&5
+$as_echo "$DLLTOOL" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DLLTOOL"; then
+ ac_ct_DLLTOOL=$DLLTOOL
+ # Extract the first word of "dlltool", so it can be a program name with args.
+set dummy dlltool; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_DLLTOOL+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DLLTOOL"; then
+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_DLLTOOL="dlltool"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
+if test -n "$ac_ct_DLLTOOL"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_DLLTOOL" >&5
+$as_echo "$ac_ct_DLLTOOL" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_DLLTOOL" = x; then
+ DLLTOOL="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DLLTOOL=$ac_ct_DLLTOOL
+ fi
+else
+ DLLTOOL="$ac_cv_prog_DLLTOOL"
+fi
+
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
+set dummy ${ac_tool_prefix}objdump; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_OBJDUMP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$OBJDUMP"; then
+ ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+OBJDUMP=$ac_cv_prog_OBJDUMP
+if test -n "$OBJDUMP"; then
+ { $as_echo "$as_me:$LINENO: result: $OBJDUMP" >&5
+$as_echo "$OBJDUMP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OBJDUMP"; then
+ ac_ct_OBJDUMP=$OBJDUMP
+ # Extract the first word of "objdump", so it can be a program name with args.
+set dummy objdump; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_OBJDUMP"; then
+ ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_OBJDUMP="objdump"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
+if test -n "$ac_ct_OBJDUMP"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_OBJDUMP" >&5
+$as_echo "$ac_ct_OBJDUMP" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_OBJDUMP" = x; then
+ OBJDUMP="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ OBJDUMP=$ac_ct_OBJDUMP
+ fi
+else
+ OBJDUMP="$ac_cv_prog_OBJDUMP"
+fi
+
+ ;;
+esac
+
+test -z "$AS" && AS=as
+
+
+
+
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+
+
+
+
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+
+
+
+
+
+
+
+ enable_dlopen=no
+
+
+
+ # Check whether --enable-shared was given.
+if test "${enable_shared+set}" = set; then
+ enableval=$enable_shared; p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_shared=yes ;;
+ no) enable_shared=no ;;
+ *)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ enable_shared=yes
+fi
+
+
+
+
+
+
+
+
+
+ # Check whether --enable-static was given.
+if test "${enable_static+set}" = set; then
+ enableval=$enable_static; p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_static=yes ;;
+ no) enable_static=no ;;
+ *)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ enable_static=yes
+fi
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-pic was given.
+if test "${with_pic+set}" = set; then
+ withval=$with_pic; pic_mode="$withval"
+else
+ pic_mode=default
+fi
+
+
+test -z "$pic_mode" && pic_mode=default
+
+
+
+
+
+
+
+ # Check whether --enable-fast-install was given.
+if test "${enable_fast_install+set}" = set; then
+ enableval=$enable_fast_install; p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_fast_install=yes ;;
+ no) enable_fast_install=no ;;
+ *)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
+else
+ enable_fast_install=yes
+fi
+
+
+
+
+
+
+
+
+
+
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ltmain"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+test -z "$LN_S" && LN_S="ln -s"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+
+{ $as_echo "$as_me:$LINENO: checking for objdir" >&5
+$as_echo_n "checking for objdir... " >&6; }
+if test "${lt_cv_objdir+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ lt_cv_objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_objdir" >&5
+$as_echo "$lt_cv_objdir" >&6; }
+objdir=$lt_cv_objdir
+
+
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define LT_OBJDIR "$lt_cv_objdir/"
+_ACEOF
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+for cc_temp in $compiler""; do
+ case $cc_temp in
+ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"`
+
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ { $as_echo "$as_me:$LINENO: checking for ${ac_tool_prefix}file" >&5
+$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; }
+if test "${lt_cv_path_MAGIC_CMD+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ case $MAGIC_CMD in
+[\\/*] | ?:[\\/]*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+*)
+ lt_save_MAGIC_CMD="$MAGIC_CMD"
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+ for ac_dir in $ac_dummy; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/${ac_tool_prefix}file; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+ MAGIC_CMD="$lt_save_MAGIC_CMD"
+ ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5
+$as_echo "$MAGIC_CMD" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+
+
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ { $as_echo "$as_me:$LINENO: checking for file" >&5
+$as_echo_n "checking for file... " >&6; }
+if test "${lt_cv_path_MAGIC_CMD+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ case $MAGIC_CMD in
+[\\/*] | ?:[\\/]*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+*)
+ lt_save_MAGIC_CMD="$MAGIC_CMD"
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+ for ac_dir in $ac_dummy; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/file; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/file"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+ MAGIC_CMD="$lt_save_MAGIC_CMD"
+ ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ { $as_echo "$as_me:$LINENO: result: $MAGIC_CMD" >&5
+$as_echo "$MAGIC_CMD" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ else
+ MAGIC_CMD=:
+ fi
+fi
+
+ fi
+ ;;
+esac
+
+# Use C for the default configuration in the libtool script
+
+lt_save_CC="$CC"
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+objext=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+## CAVEAT EMPTOR:
+## There is no encapsulation within the following macros, do not change
+## the running order or otherwise move them around unless you know exactly
+## what you are doing...
+if test -n "$compiler"; then
+
+lt_prog_compiler_no_builtin_flag=
+
+if test "$GCC" = yes; then
+ lt_prog_compiler_no_builtin_flag=' -fno-builtin'
+
+ { $as_echo "$as_me:$LINENO: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
+if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_rtti_exceptions=no
+ ac_outfile=conftest.$ac_objext
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="-fno-rtti -fno-exceptions"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:10256: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&5
+ echo "$as_me:10260: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_rtti_exceptions=yes
+ fi
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
+$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
+
+if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then
+ lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions"
+else
+ :
+fi
+
+fi
+
+
+
+
+
+
+ lt_prog_compiler_wl=
+lt_prog_compiler_pic=
+lt_prog_compiler_static=
+
+{ $as_echo "$as_me:$LINENO: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+
+ if test "$GCC" = yes; then
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_static='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_prog_compiler_static='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ lt_prog_compiler_pic='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ lt_prog_compiler_pic='-DDLL_EXPORT'
+ ;;
+
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_prog_compiler_pic='-fno-common'
+ ;;
+
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ # +Z the default
+ ;;
+ *)
+ lt_prog_compiler_pic='-fPIC'
+ ;;
+ esac
+ ;;
+
+ interix[3-9]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+
+ msdosdjgpp*)
+ # Just because we use GCC doesn't mean we suddenly get shared libraries
+ # on systems that don't support them.
+ lt_prog_compiler_can_build_shared=no
+ enable_shared=no
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ lt_prog_compiler_pic='-fPIC -shared'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_prog_compiler_pic=-Kconform_pic
+ fi
+ ;;
+
+ *)
+ lt_prog_compiler_pic='-fPIC'
+ ;;
+ esac
+ else
+ # PORTME Check for flag to pass linker flags through the system compiler.
+ case $host_os in
+ aix*)
+ lt_prog_compiler_wl='-Wl,'
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_prog_compiler_static='-Bstatic'
+ else
+ lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_prog_compiler_pic='-DDLL_EXPORT'
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ lt_prog_compiler_wl='-Wl,'
+ # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+ # not for PA HP-UX.
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ lt_prog_compiler_pic='+Z'
+ ;;
+ esac
+ # Is there a better lt_prog_compiler_static that works with the bundled CC?
+ lt_prog_compiler_static='${wl}-a ${wl}archive'
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ lt_prog_compiler_wl='-Wl,'
+ # PIC (with -KPIC) is the default.
+ lt_prog_compiler_static='-non_shared'
+ ;;
+
+ linux* | k*bsd*-gnu)
+ case $cc_basename in
+ # old Intel for x86_64 which still supported -KPIC.
+ ecc*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-static'
+ ;;
+ # icc used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ icc* | ifort*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fPIC'
+ lt_prog_compiler_static='-static'
+ ;;
+ # Lahey Fortran 8.1.
+ lf95*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='--shared'
+ lt_prog_compiler_static='--static'
+ ;;
+ pgcc* | pgf77* | pgf90* | pgf95*)
+ # Portland Group compilers (*not* the Pentium gcc compiler,
+ # which looks to be a dead project)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fpic'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+ ccc*)
+ lt_prog_compiler_wl='-Wl,'
+ # All Alpha code is PIC.
+ lt_prog_compiler_static='-non_shared'
+ ;;
+ xl*)
+ # IBM XL C 8.0/Fortran 10.1 on PPC
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-qpic'
+ lt_prog_compiler_static='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C 5.9
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl='-Wl,'
+ ;;
+ *Sun\ F*)
+ # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl=''
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ newsos6)
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ lt_prog_compiler_pic='-fPIC -shared'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ lt_prog_compiler_wl='-Wl,'
+ # All OSF/1 code is PIC.
+ lt_prog_compiler_static='-non_shared'
+ ;;
+
+ rdos*)
+ lt_prog_compiler_static='-non_shared'
+ ;;
+
+ solaris*)
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ case $cc_basename in
+ f77* | f90* | f95*)
+ lt_prog_compiler_wl='-Qoption ld ';;
+ *)
+ lt_prog_compiler_wl='-Wl,';;
+ esac
+ ;;
+
+ sunos4*)
+ lt_prog_compiler_wl='-Qoption ld '
+ lt_prog_compiler_pic='-PIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ lt_prog_compiler_pic='-Kconform_pic'
+ lt_prog_compiler_static='-Bstatic'
+ fi
+ ;;
+
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ unicos*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_can_build_shared=no
+ ;;
+
+ uts4*)
+ lt_prog_compiler_pic='-pic'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+
+ *)
+ lt_prog_compiler_can_build_shared=no
+ ;;
+ esac
+ fi
+
+case $host_os in
+ # For platforms which do not support PIC, -DPIC is meaningless:
+ *djgpp*)
+ lt_prog_compiler_pic=
+ ;;
+ *)
+ lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: result: $lt_prog_compiler_pic" >&5
+$as_echo "$lt_prog_compiler_pic" >&6; }
+
+
+
+
+
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic"; then
+ { $as_echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
+$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
+if test "${lt_cv_prog_compiler_pic_works+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_pic_works=no
+ ac_outfile=conftest.$ac_objext
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="$lt_prog_compiler_pic -DPIC"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:10595: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&5
+ echo "$as_me:10599: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_pic_works=yes
+ fi
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works" >&5
+$as_echo "$lt_cv_prog_compiler_pic_works" >&6; }
+
+if test x"$lt_cv_prog_compiler_pic_works" = xyes; then
+ case $lt_prog_compiler_pic in
+ "" | " "*) ;;
+ *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;;
+ esac
+else
+ lt_prog_compiler_pic=
+ lt_prog_compiler_can_build_shared=no
+fi
+
+fi
+
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
+{ $as_echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if test "${lt_cv_prog_compiler_static_works+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_static_works=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_static_works=yes
+ fi
+ else
+ lt_cv_prog_compiler_static_works=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works" >&5
+$as_echo "$lt_cv_prog_compiler_static_works" >&6; }
+
+if test x"$lt_cv_prog_compiler_static_works" = xyes; then
+ :
+else
+ lt_prog_compiler_static=
+fi
+
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if test "${lt_cv_prog_compiler_c_o+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_c_o=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:10700: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&5
+ echo "$as_me:10704: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_c_o=yes
+ fi
+ fi
+ chmod u+w . 2>&5
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5
+$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if test "${lt_cv_prog_compiler_c_o+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_c_o=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:10755: $lt_compile\"" >&5)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&5
+ echo "$as_me:10759: \$? = $ac_status" >&5
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler_c_o=yes
+ fi
+ fi
+ chmod u+w . 2>&5
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_c_o" >&5
+$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+hard_links="nottested"
+if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ { $as_echo "$as_me:$LINENO: checking if we can lock with hard links" >&5
+$as_echo_n "checking if we can lock with hard links... " >&6; }
+ hard_links=yes
+ $RM conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ { $as_echo "$as_me:$LINENO: result: $hard_links" >&5
+$as_echo "$hard_links" >&6; }
+ if test "$hard_links" = no; then
+ { $as_echo "$as_me:$LINENO: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+ runpath_var=
+ allow_undefined_flag=
+ always_export_symbols=no
+ archive_cmds=
+ archive_expsym_cmds=
+ compiler_needs_object=no
+ enable_shared_with_static_runtimes=no
+ export_dynamic_flag_spec=
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ hardcode_automatic=no
+ hardcode_direct=no
+ hardcode_direct_absolute=no
+ hardcode_libdir_flag_spec=
+ hardcode_libdir_flag_spec_ld=
+ hardcode_libdir_separator=
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=unsupported
+ inherit_rpath=no
+ link_all_deplibs=unknown
+ module_cmds=
+ module_expsym_cmds=
+ old_archive_from_new_cmds=
+ old_archive_from_expsyms_cmds=
+ thread_safe_flag_spec=
+ whole_archive_flag_spec=
+ # include_expsyms should be a list of space-separated symbols to be *always*
+ # included in the symbol list
+ include_expsyms=
+ # exclude_expsyms can be an extended regexp of symbols to exclude
+ # it will be wrapped by ` (' and `)$', so one must not match beginning or
+ # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+ # as well as any symbol that contains `d'.
+ exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+ # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+ # platforms (ab)use it in PIC code, but their linkers get confused if
+ # the symbol is explicitly referenced. Since portable code cannot
+ # rely on this symbol name, it's probably fine to never include it in
+ # preloaded symbol tables.
+ # Exclude shared library initialization/finalization symbols.
+ extract_expsyms_cmds=
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+ interix*)
+ # we just hope/assume this is gcc and not c89 (= MSVC++)
+ with_gnu_ld=yes
+ ;;
+ openbsd*)
+ with_gnu_ld=no
+ ;;
+ esac
+
+ ld_shlibs=yes
+ if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # Set some defaults for GNU ld with shared library support. These
+ # are reset later if shared libraries are not supported. Putting them
+ # here allows them to be overridden if necessary.
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ supports_anon_versioning=no
+ case `$LD -v 2>&1` in
+ *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
+ *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+ *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+ *\ 2.11.*) ;; # other 2.11 versions
+ *) supports_anon_versioning=yes ;;
+ esac
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix[3-9]*)
+ # On AIX/PPC, the GNU linker is very broken
+ if test "$host_cpu" != ia64; then
+ ld_shlibs=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+_LT_EOF
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds=''
+ ;;
+ m68k)
+ archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ ;;
+ esac
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
+ # as there is no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ allow_undefined_flag=unsupported
+ always_export_symbols=no
+ enable_shared_with_static_runtimes=yes
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ interix[3-9]*)
+ hardcode_direct=no
+ hardcode_shlibpath_var=no
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+
+ gnu* | linux* | tpf* | k*bsd*-gnu)
+ tmp_diet=no
+ if test "$host_os" = linux-dietlibc; then
+ case $cc_basename in
+ diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn)
+ esac
+ fi
+ if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+ && test "$tmp_diet" = no
+ then
+ tmp_addflag=
+ tmp_sharedflag='-shared'
+ case $cc_basename,$host_cpu in
+ pgcc*) # Portland Group C compiler
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag'
+ ;;
+ pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag -Mnomain' ;;
+ ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64
+ tmp_addflag=' -i_dynamic' ;;
+ efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64
+ tmp_addflag=' -i_dynamic -nofor_main' ;;
+ ifc* | ifort*) # Intel Fortran compiler
+ tmp_addflag=' -nofor_main' ;;
+ lf95*) # Lahey Fortran 8.1
+ whole_archive_flag_spec=
+ tmp_sharedflag='--shared' ;;
+ xl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ tmp_sharedflag='-qmkshrobj'
+ tmp_addflag= ;;
+ esac
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*) # Sun C 5.9
+ whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ compiler_needs_object=yes
+ tmp_sharedflag='-G' ;;
+ *Sun\ F*) # Sun Fortran 8.3
+ tmp_sharedflag='-G' ;;
+ esac
+ archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+ if test "x$supports_anon_versioning" = xyes; then
+ archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+
+ case $cc_basename in
+ xlf*)
+ # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+ whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
+ hardcode_libdir_flag_spec=
+ hardcode_libdir_flag_spec_ld='-rpath $libdir'
+ archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ esac
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris*)
+ if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+ case `$LD -v 2>&1` in
+ *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+ ld_shlibs=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ ;;
+ *)
+ # For security reasons, it is highly recommended that you always
+ # use absolute paths for naming shared libraries, and exclude the
+ # DT_RUNPATH tag from executables and libraries. But doing so
+ # requires that you compile everything twice, which is a pain.
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = no; then
+ runpath_var=
+ hardcode_libdir_flag_spec=
+ export_dynamic_flag_spec=
+ whole_archive_flag_spec=
+ fi
+ else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix[4-9]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ archive_cmds=''
+ hardcode_direct=yes
+ hardcode_direct_absolute=yes
+ hardcode_libdir_separator=':'
+ link_all_deplibs=yes
+ file_list_spec='${wl}-f,'
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[012]|aix4.[012].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ ;;
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ export_dynamic_flag_spec='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to export.
+ always_export_symbols=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='-berok'
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+
+lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\(.*\)$/\1/
+ p
+ }
+ }'
+aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+# Check for a 64-bit object if we didn't find anything.
+if test -z "$aix_libpath"; then
+ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+fi
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag="-z nodefs"
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+
+lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\(.*\)$/\1/
+ p
+ }
+ }'
+aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+# Check for a 64-bit object if we didn't find anything.
+if test -z "$aix_libpath"; then
+ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+fi
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ no_undefined_flag=' ${wl}-bernotok'
+ allow_undefined_flag=' ${wl}-berok'
+ # Exported symbols can be pulled into shared objects from archives
+ whole_archive_flag_spec='$convenience'
+ archive_cmds_need_lc=yes
+ # This is similar to how AIX traditionally builds its shared libraries.
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds=''
+ ;;
+ m68k)
+ archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ ;;
+ esac
+ ;;
+
+ bsdi[45]*)
+ export_dynamic_flag_spec=-rdynamic
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
+ fix_srcfile_path='`cygpath -w "$srcfile"`'
+ enable_shared_with_static_runtimes=yes
+ ;;
+
+ darwin* | rhapsody*)
+
+
+ archive_cmds_need_lc=no
+ hardcode_direct=no
+ hardcode_automatic=yes
+ hardcode_shlibpath_var=unsupported
+ whole_archive_flag_spec=''
+ link_all_deplibs=yes
+ allow_undefined_flag="$_lt_dar_allow_undefined"
+ case $cc_basename in
+ ifort*) _lt_dar_can_shared=yes ;;
+ *) _lt_dar_can_shared=$GCC ;;
+ esac
+ if test "$_lt_dar_can_shared" = "yes"; then
+ output_verbose_link_cmd=echo
+ archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+ module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+ archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+ module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+
+ else
+ ld_shlibs=no
+ fi
+
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd* | dragonfly*)
+ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9*)
+ if test "$GCC" = yes; then
+ archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ hpux10*)
+ if test "$GCC" = yes -a "$with_gnu_ld" = no; then
+ archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_flag_spec_ld='+b $libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_direct_absolute=yes
+ export_dynamic_flag_spec='${wl}-E'
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ fi
+ ;;
+
+ hpux11*)
+ if test "$GCC" = yes -a "$with_gnu_ld" = no; then
+ case $host_cpu in
+ hppa*64*)
+ archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ else
+ case $host_cpu in
+ hppa*64*)
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ fi
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ hardcode_direct=no
+ hardcode_shlibpath_var=no
+ ;;
+ *)
+ hardcode_direct=yes
+ hardcode_direct_absolute=yes
+ export_dynamic_flag_spec='${wl}-E'
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ ;;
+ esac
+ fi
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ # Try to use the -exported_symbol ld option, if it does not
+ # work, assume that -exports_file does not work either and
+ # implicitly export all symbols.
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+ cat >conftest.$ac_ext <<_ACEOF
+int foo(void) {}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS="$save_LDFLAGS"
+ else
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+ fi
+ archive_cmds_need_lc='no'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ inherit_rpath=yes
+ link_all_deplibs=yes
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ newsos6)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_shlibpath_var=no
+ ;;
+
+ *nto* | *qnx*)
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ hardcode_direct_absolute=yes
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ else
+ case $host_os in
+ openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ archive_cmds_need_lc='no'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+ # Both c and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ archive_cmds_need_lc='no'
+ hardcode_libdir_separator=:
+ ;;
+
+ solaris*)
+ no_undefined_flag=' -z defs'
+ if test "$GCC" = yes; then
+ wlarc='${wl}'
+ archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ else
+ case `$CC -V 2>&1` in
+ *"Compilers 5.0"*)
+ wlarc=''
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+ ;;
+ *)
+ wlarc='${wl}'
+ archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ ;;
+ esac
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case $host_os in
+ solaris2.[0-5] | solaris2.[0-5].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'. GCC discards it without `$wl',
+ # but is careful enough not to reorder.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ if test "$GCC" = yes; then
+ whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ else
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract'
+ fi
+ ;;
+ esac
+ link_all_deplibs=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ case $host_vendor in
+ sni)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes # is this really true???
+ ;;
+ siemens)
+ ## LD is ld it makes a PLAMLIB
+ ## CC just makes a GrossModule.
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ reload_cmds='$CC -r -o $output$reload_objs'
+ hardcode_direct=no
+ ;;
+ motorola)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ ;;
+ esac
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+ no_undefined_flag='${wl}-z,text'
+ archive_cmds_need_lc=no
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ no_undefined_flag='${wl}-z,text'
+ allow_undefined_flag='${wl}-z,nodefs'
+ archive_cmds_need_lc=no
+ hardcode_shlibpath_var=no
+ hardcode_libdir_flag_spec='${wl}-R,$libdir'
+ hardcode_libdir_separator=':'
+ link_all_deplibs=yes
+ export_dynamic_flag_spec='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+
+ if test x$host_vendor = xsni; then
+ case $host in
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ export_dynamic_flag_spec='${wl}-Blargedynsym'
+ ;;
+ esac
+ fi
+ fi
+
+{ $as_echo "$as_me:$LINENO: result: $ld_shlibs" >&5
+$as_echo "$ld_shlibs" >&6; }
+test "$ld_shlibs" = no && can_build_shared=no
+
+with_gnu_ld=$with_gnu_ld
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc" in
+x|xyes)
+ # Assume -lc should be added
+ archive_cmds_need_lc=yes
+
+ if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ { $as_echo "$as_me:$LINENO: checking whether -lc should be explicitly linked in" >&5
+$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
+ $RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } 2>conftest.err; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_prog_compiler_wl
+ pic_flag=$lt_prog_compiler_pic
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if { (eval echo "$as_me:$LINENO: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\"") >&5
+ (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+ then
+ archive_cmds_need_lc=no
+ else
+ archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+ { $as_echo "$as_me:$LINENO: result: $archive_cmds_need_lc" >&5
+$as_echo "$archive_cmds_need_lc" >&6; }
+ ;;
+ esac
+ fi
+ ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking dynamic linker characteristics" >&5
+$as_echo_n "checking dynamic linker characteristics... " >&6; }
+
+if test "$GCC" = yes; then
+ case $host_os in
+ darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
+ *) lt_awk_arg="/^libraries:/" ;;
+ esac
+ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"`
+ if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then
+ # if the path contains ";" then we assume it to be the separator
+ # otherwise default to the standard path separator (i.e. ":") - it is
+ # assumed that no part of a normal pathname contains ";" but that should
+ # okay in the real world where ";" in dirpaths is itself problematic.
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # Ok, now we have the path, separated by spaces, we can step through it
+ # and add multilib dir if necessary.
+ lt_tmp_lt_search_path_spec=
+ lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+ for lt_sys_path in $lt_search_path_spec; do
+ if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+ else
+ test -d "$lt_sys_path" && \
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+ fi
+ done
+ lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk '
+BEGIN {RS=" "; FS="/|\n";} {
+ lt_foo="";
+ lt_count=0;
+ for (lt_i = NF; lt_i > 0; lt_i--) {
+ if ($lt_i != "" && $lt_i != ".") {
+ if ($lt_i == "..") {
+ lt_count++;
+ } else {
+ if (lt_count == 0) {
+ lt_foo="/" $lt_i lt_foo;
+ } else {
+ lt_count--;
+ }
+ }
+ }
+ }
+ if (lt_foo != "") { lt_freq[lt_foo]++; }
+ if (lt_freq[lt_foo] == 1) { print lt_foo; }
+}'`
+ sys_lib_search_path_spec=`$ECHO $lt_search_path_spec`
+else
+ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX 3 has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+
+aix[4-9]*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ hardcode_into_libs=yes
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[01] | aix4.[01].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+ # soname into executable. Probably we can add versioning support to
+ # collect2, so additional links can be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ case $host_cpu in
+ powerpc)
+ # Since July 2007 AmigaOS4 officially supports .so libraries.
+ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ ;;
+ m68k)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ ;;
+ esac
+ ;;
+
+beos*)
+ library_names_spec='${libname}${shared_ext}'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi[45]*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+ version_type=windows
+ shrext_cmds=".dll"
+ need_version=no
+ need_lib_prefix=no
+
+ case $GCC,$host_os in
+ yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*)
+ library_names_spec='$libname.dll.a'
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname~
+ chmod a+x \$dldir/$dlname~
+ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+ fi'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+
+ case $host_os in
+ cygwin*)
+ # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib"
+ ;;
+ mingw* | cegcc*)
+ # MinGW DLLs use traditional 'lib' prefix
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"`
+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+ # It is most probably a Windows format PATH printed by
+ # mingw gcc, but we are running on Cygwin. Gcc prints its search
+ # path with ; separators, and with drive letters. We can handle the
+ # drive letters (cygwin fileutils understands them), so leave them,
+ # especially as we might pass files found there to a mingw objdump,
+ # which wouldn't understand a cygwinified path. Ahh.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ ;;
+ pw32*)
+ # pw32 DLLs use 'pw' prefix rather than 'lib'
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ esac
+ ;;
+
+ *)
+ library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+ soname_spec='${libname}${release}${major}$shared_ext'
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"
+ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd* | dragonfly*)
+ # DragonFly does not have aout. When/if they implement a new
+ # versioning mechanism, adjust this.
+ if test -x /usr/bin/objformat; then
+ objformat=`/usr/bin/objformat`
+ else
+ case $host_os in
+ freebsd[123]*) objformat=aout ;;
+ *) objformat=elf ;;
+ esac
+ fi
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ freebsd3.[01]* | freebsdelf3.[01]*)
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ *) # from 4.6 on, and DragonFly
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ case $host_cpu in
+ ia64*)
+ shrext_cmds='.so'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.so"
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ if test "X$HPUX_IA64_MODE" = X32; then
+ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+ else
+ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+ fi
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ hppa*64*)
+ shrext_cmds='.sl'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ *)
+ shrext_cmds='.sl'
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+ esac
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+ ;;
+
+interix[3-9]*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $host_os in
+ nonstopux*) version_type=nonstopux ;;
+ *)
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ version_type=linux
+ else
+ version_type=irix
+ fi ;;
+ esac
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+ case $host_os in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+ libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+ libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+ libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ hardcode_into_libs=yes
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux* | k*bsd*-gnu)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ # Some binutils ld are patched to set DT_RUNPATH
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
+ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then
+ shlibpath_overrides_runpath=yes
+fi
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # Append ld.so.conf contents to the search path
+ if test -f /etc/ld.so.conf; then
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '`
+ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+ fi
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+*nto* | *qnx*)
+ version_type=qnx
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='ldqnx.so'
+ ;;
+
+openbsd*)
+ version_type=sunos
+ sys_lib_dlsearch_path_spec="/usr/lib"
+ need_lib_prefix=no
+ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+ case $host_os in
+ openbsd3.3 | openbsd3.3.*) need_version=yes ;;
+ *) need_version=no ;;
+ esac
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case $host_os in
+ openbsd2.[89] | openbsd2.[89].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ ;;
+
+os2*)
+ libname_spec='$name'
+ shrext_cmds=".dll"
+ need_lib_prefix=no
+ library_names_spec='$libname${shared_ext} $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+rdos*)
+ dynamic_linker=no
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.3*)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ need_lib_prefix=no
+ runpath_var=LD_RUN_PATH
+ ;;
+ siemens)
+ need_lib_prefix=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+ soname_spec='$libname${shared_ext}.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ version_type=freebsd-elf
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ if test "$with_gnu_ld" = yes; then
+ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+ else
+ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+ case $host_os in
+ sco3.2v5*)
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+ ;;
+ esac
+ fi
+ sys_lib_dlsearch_path_spec='/usr/lib'
+ ;;
+
+tpf*)
+ # TPF is a cross-target only. Preferred cross-host = GNU/Linux.
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: result: $dynamic_linker" >&5
+$as_echo "$dynamic_linker" >&6; }
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking how to hardcode library paths into programs" >&5
+$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" ||
+ test -n "$runpath_var" ||
+ test "X$hardcode_automatic" = "Xyes" ; then
+
+ # We can hardcode non-existent directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+{ $as_echo "$as_me:$LINENO: result: $hardcode_action" >&5
+$as_echo "$hardcode_action" >&6; }
+
+if test "$hardcode_action" = relink ||
+ test "$inherit_rpath" = yes; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+
+
+
+
+
+ if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ mingw* | pw32* | cegcc*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ cygwin*)
+ lt_cv_dlopen="dlopen"
+ lt_cv_dlopen_libs=
+ ;;
+
+ darwin*)
+ # if libdl is installed we need to link against it
+ { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5
+$as_echo_n "checking for dlopen in -ldl... " >&6; }
+if test "${ac_cv_lib_dl_dlopen+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_dl_dlopen=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_lib_dl_dlopen=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5
+$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = x""yes; then
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+
+ lt_cv_dlopen="dyld"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+
+fi
+
+ ;;
+
+ *)
+ { $as_echo "$as_me:$LINENO: checking for shl_load" >&5
+$as_echo_n "checking for shl_load... " >&6; }
+if test "${ac_cv_func_shl_load+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* Define shl_load to an innocuous variant, in case <limits.h> declares shl_load.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define shl_load innocuous_shl_load
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char shl_load (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef shl_load
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shl_load ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_shl_load || defined __stub___shl_load
+choke me
+#endif
+
+int
+main ()
+{
+return shl_load ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_func_shl_load=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_func_shl_load=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_func_shl_load" >&5
+$as_echo "$ac_cv_func_shl_load" >&6; }
+if test "x$ac_cv_func_shl_load" = x""yes; then
+ lt_cv_dlopen="shl_load"
+else
+ { $as_echo "$as_me:$LINENO: checking for shl_load in -ldld" >&5
+$as_echo_n "checking for shl_load in -ldld... " >&6; }
+if test "${ac_cv_lib_dld_shl_load+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shl_load ();
+int
+main ()
+{
+return shl_load ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_dld_shl_load=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_lib_dld_shl_load=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5
+$as_echo "$ac_cv_lib_dld_shl_load" >&6; }
+if test "x$ac_cv_lib_dld_shl_load" = x""yes; then
+ lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
+else
+ { $as_echo "$as_me:$LINENO: checking for dlopen" >&5
+$as_echo_n "checking for dlopen... " >&6; }
+if test "${ac_cv_func_dlopen+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* Define dlopen to an innocuous variant, in case <limits.h> declares dlopen.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define dlopen innocuous_dlopen
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char dlopen (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef dlopen
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_dlopen || defined __stub___dlopen
+choke me
+#endif
+
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_func_dlopen=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_func_dlopen=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_func_dlopen" >&5
+$as_echo "$ac_cv_func_dlopen" >&6; }
+if test "x$ac_cv_func_dlopen" = x""yes; then
+ lt_cv_dlopen="dlopen"
+else
+ { $as_echo "$as_me:$LINENO: checking for dlopen in -ldl" >&5
+$as_echo_n "checking for dlopen in -ldl... " >&6; }
+if test "${ac_cv_lib_dl_dlopen+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_dl_dlopen=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_lib_dl_dlopen=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5
+$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = x""yes; then
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+ { $as_echo "$as_me:$LINENO: checking for dlopen in -lsvld" >&5
+$as_echo_n "checking for dlopen in -lsvld... " >&6; }
+if test "${ac_cv_lib_svld_dlopen+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lsvld $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_svld_dlopen=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_lib_svld_dlopen=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_svld_dlopen" >&5
+$as_echo "$ac_cv_lib_svld_dlopen" >&6; }
+if test "x$ac_cv_lib_svld_dlopen" = x""yes; then
+ lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"
+else
+ { $as_echo "$as_me:$LINENO: checking for dld_link in -ldld" >&5
+$as_echo_n "checking for dld_link in -ldld... " >&6; }
+if test "${ac_cv_lib_dld_dld_link+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld $LIBS"
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dld_link ();
+int
+main ()
+{
+return dld_link ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ ac_cv_lib_dld_dld_link=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_lib_dld_dld_link=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5
+$as_echo "$ac_cv_lib_dld_dld_link" >&6; }
+if test "x$ac_cv_lib_dld_dld_link" = x""yes; then
+ lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ { $as_echo "$as_me:$LINENO: checking whether a program can dlopen itself" >&5
+$as_echo_n "checking whether a program can dlopen itself... " >&6; }
+if test "${lt_cv_dlopen_self+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ lt_cv_dlopen_self=cross
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+#line 13555 "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+ else
+ puts (dlerror ());
+
+ return status;
+}
+_LT_EOF
+ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) >&5 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;;
+ x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;;
+ x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;;
+ esac
+ else :
+ # compilation failed
+ lt_cv_dlopen_self=no
+ fi
+fi
+rm -fr conftest*
+
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self" >&5
+$as_echo "$lt_cv_dlopen_self" >&6; }
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+ { $as_echo "$as_me:$LINENO: checking whether a statically linked program can dlopen itself" >&5
+$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; }
+if test "${lt_cv_dlopen_self_static+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ lt_cv_dlopen_self_static=cross
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+#line 13651 "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+ else
+ puts (dlerror ());
+
+ return status;
+}
+_LT_EOF
+ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
+ (eval $ac_link) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) >&5 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;;
+ x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;;
+ x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;;
+ esac
+ else :
+ # compilation failed
+ lt_cv_dlopen_self_static=no
+ fi
+fi
+rm -fr conftest*
+
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $lt_cv_dlopen_self_static" >&5
+$as_echo "$lt_cv_dlopen_self_static" >&6; }
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+striplib=
+old_striplib=
+{ $as_echo "$as_me:$LINENO: checking whether stripping libraries is possible" >&5
+$as_echo_n "checking whether stripping libraries is possible... " >&6; }
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+ case $host_os in
+ darwin*)
+ if test -n "$STRIP" ; then
+ striplib="$STRIP -x"
+ old_striplib="$STRIP -S"
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ ;;
+ esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+ # Report which library types will actually be built
+ { $as_echo "$as_me:$LINENO: checking if libtool supports shared libraries" >&5
+$as_echo_n "checking if libtool supports shared libraries... " >&6; }
+ { $as_echo "$as_me:$LINENO: result: $can_build_shared" >&5
+$as_echo "$can_build_shared" >&6; }
+
+ { $as_echo "$as_me:$LINENO: checking whether to build shared libraries" >&5
+$as_echo_n "checking whether to build shared libraries... " >&6; }
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+ aix[4-9]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ { $as_echo "$as_me:$LINENO: result: $enable_shared" >&5
+$as_echo "$enable_shared" >&6; }
+
+ { $as_echo "$as_me:$LINENO: checking whether to build static libraries" >&5
+$as_echo_n "checking whether to build static libraries... " >&6; }
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ { $as_echo "$as_me:$LINENO: result: $enable_static" >&5
+$as_echo "$enable_static" >&6; }
+
+
+
+
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+CC="$lt_save_CC"
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ac_config_commands="$ac_config_commands libtool"
+
+
+
+
+# Only expand once:
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}windres", so it can be a program name with args.
+set dummy ${ac_tool_prefix}windres; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_RC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$RC"; then
+ ac_cv_prog_RC="$RC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_RC="${ac_tool_prefix}windres"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+RC=$ac_cv_prog_RC
+if test -n "$RC"; then
+ { $as_echo "$as_me:$LINENO: result: $RC" >&5
+$as_echo "$RC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RC"; then
+ ac_ct_RC=$RC
+ # Extract the first word of "windres", so it can be a program name with args.
+set dummy windres; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_RC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_RC"; then
+ ac_cv_prog_ac_ct_RC="$ac_ct_RC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_RC="windres"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RC=$ac_cv_prog_ac_ct_RC
+if test -n "$ac_ct_RC"; then
+ { $as_echo "$as_me:$LINENO: result: $ac_ct_RC" >&5
+$as_echo "$ac_ct_RC" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_RC" = x; then
+ RC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ RC=$ac_ct_RC
+ fi
+else
+ RC="$ac_cv_prog_RC"
+fi
+
+
+
+
+# Source file extension for RC test sources.
+ac_ext=rc
+
+# Object file extension for compiled RC test sources.
+objext=o
+objext_RC=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
+
+# Code to be used in simple link tests
+lt_simple_link_test_code="$lt_simple_compile_test_code"
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+# save warnings/boilerplate of simple test code
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+# Allow CC to be a program name with arguments.
+lt_save_CC="$CC"
+lt_save_GCC=$GCC
+GCC=
+CC=${RC-"windres"}
+compiler=$CC
+compiler_RC=$CC
+for cc_temp in $compiler""; do
+ case $cc_temp in
+ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"`
+
+lt_cv_prog_compiler_c_o_RC=yes
+
+if test -n "$compiler"; then
+ :
+
+
+
+fi
+
+GCC=$lt_save_GCC
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+CC="$lt_save_CC"
+
+
+# This is a bit wrong since it is possible to request that only some libs
+# are built as shared. Using that feature isn't so common though, and this
+# breaks only on Windows (at least for now) if the user enables only some
+# libs as shared.
+ if test "x$enable_shared" != xno; then
+ COND_SHARED_TRUE=
+ COND_SHARED_FALSE='#'
+else
+ COND_SHARED_TRUE='#'
+ COND_SHARED_FALSE=
+fi
+
+
+
+###############################################################################
+# Checks for libraries.
+###############################################################################
+
+echo
+echo "Initializing gettext:"
+
+
+ { $as_echo "$as_me:$LINENO: checking whether NLS is requested" >&5
+$as_echo_n "checking whether NLS is requested... " >&6; }
+ # Check whether --enable-nls was given.
+if test "${enable_nls+set}" = set; then
+ enableval=$enable_nls; USE_NLS=$enableval
+else
+ USE_NLS=yes
+fi
+
+ { $as_echo "$as_me:$LINENO: result: $USE_NLS" >&5
+$as_echo "$USE_NLS" >&6; }
+
+
+
+
+
+
+# Prepare PATH_SEPARATOR.
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Find out how to test for executable files. Don't use a zero-byte file,
+# as systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ ac_executable_p="test -x"
+else
+ ac_executable_p="test -f"
+fi
+rm -f conf$$.file
+
+# Extract the first word of "msgfmt", so it can be a program name with args.
+set dummy msgfmt; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_MSGFMT+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ case "$MSGFMT" in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path.
+ ;;
+ *)
+ ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$ac_save_IFS"
+ test -z "$ac_dir" && ac_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then
+ echo "$as_me: trying $ac_dir/$ac_word..." >&5
+ if $ac_dir/$ac_word --statistics /dev/null >&5 2>&1 &&
+ (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then
+ ac_cv_path_MSGFMT="$ac_dir/$ac_word$ac_exec_ext"
+ break 2
+ fi
+ fi
+ done
+ done
+ IFS="$ac_save_IFS"
+ test -z "$ac_cv_path_MSGFMT" && ac_cv_path_MSGFMT=":"
+ ;;
+esac
+fi
+MSGFMT="$ac_cv_path_MSGFMT"
+if test "$MSGFMT" != ":"; then
+ { $as_echo "$as_me:$LINENO: result: $MSGFMT" >&5
+$as_echo "$MSGFMT" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ # Extract the first word of "gmsgfmt", so it can be a program name with args.
+set dummy gmsgfmt; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_GMSGFMT+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ case $GMSGFMT in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_GMSGFMT="$GMSGFMT" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_GMSGFMT="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_GMSGFMT" && ac_cv_path_GMSGFMT="$MSGFMT"
+ ;;
+esac
+fi
+GMSGFMT=$ac_cv_path_GMSGFMT
+if test -n "$GMSGFMT"; then
+ { $as_echo "$as_me:$LINENO: result: $GMSGFMT" >&5
+$as_echo "$GMSGFMT" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+ case `$MSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in
+ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) MSGFMT_015=: ;;
+ *) MSGFMT_015=$MSGFMT ;;
+ esac
+
+ case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in
+ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;;
+ *) GMSGFMT_015=$GMSGFMT ;;
+ esac
+
+
+
+# Prepare PATH_SEPARATOR.
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Find out how to test for executable files. Don't use a zero-byte file,
+# as systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ ac_executable_p="test -x"
+else
+ ac_executable_p="test -f"
+fi
+rm -f conf$$.file
+
+# Extract the first word of "xgettext", so it can be a program name with args.
+set dummy xgettext; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_XGETTEXT+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ case "$XGETTEXT" in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_XGETTEXT="$XGETTEXT" # Let the user override the test with a path.
+ ;;
+ *)
+ ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$ac_save_IFS"
+ test -z "$ac_dir" && ac_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then
+ echo "$as_me: trying $ac_dir/$ac_word..." >&5
+ if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&5 2>&1 &&
+ (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then
+ ac_cv_path_XGETTEXT="$ac_dir/$ac_word$ac_exec_ext"
+ break 2
+ fi
+ fi
+ done
+ done
+ IFS="$ac_save_IFS"
+ test -z "$ac_cv_path_XGETTEXT" && ac_cv_path_XGETTEXT=":"
+ ;;
+esac
+fi
+XGETTEXT="$ac_cv_path_XGETTEXT"
+if test "$XGETTEXT" != ":"; then
+ { $as_echo "$as_me:$LINENO: result: $XGETTEXT" >&5
+$as_echo "$XGETTEXT" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ rm -f messages.po
+
+ case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in
+ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;;
+ *) XGETTEXT_015=$XGETTEXT ;;
+ esac
+
+
+
+# Prepare PATH_SEPARATOR.
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Find out how to test for executable files. Don't use a zero-byte file,
+# as systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ ac_executable_p="test -x"
+else
+ ac_executable_p="test -f"
+fi
+rm -f conf$$.file
+
+# Extract the first word of "msgmerge", so it can be a program name with args.
+set dummy msgmerge; ac_word=$2
+{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_MSGMERGE+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ case "$MSGMERGE" in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_MSGMERGE="$MSGMERGE" # Let the user override the test with a path.
+ ;;
+ *)
+ ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$ac_save_IFS"
+ test -z "$ac_dir" && ac_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then
+ echo "$as_me: trying $ac_dir/$ac_word..." >&5
+ if $ac_dir/$ac_word --update -q /dev/null /dev/null >&5 2>&1; then
+ ac_cv_path_MSGMERGE="$ac_dir/$ac_word$ac_exec_ext"
+ break 2
+ fi
+ fi
+ done
+ done
+ IFS="$ac_save_IFS"
+ test -z "$ac_cv_path_MSGMERGE" && ac_cv_path_MSGMERGE=":"
+ ;;
+esac
+fi
+MSGMERGE="$ac_cv_path_MSGMERGE"
+if test "$MSGMERGE" != ":"; then
+ { $as_echo "$as_me:$LINENO: result: $MSGMERGE" >&5
+$as_echo "$MSGMERGE" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$localedir" || localedir='${datadir}/locale'
+
+
+ ac_config_commands="$ac_config_commands po-directories"
+
+
+
+ if test "X$prefix" = "XNONE"; then
+ acl_final_prefix="$ac_default_prefix"
+ else
+ acl_final_prefix="$prefix"
+ fi
+ if test "X$exec_prefix" = "XNONE"; then
+ acl_final_exec_prefix='${prefix}'
+ else
+ acl_final_exec_prefix="$exec_prefix"
+ fi
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ eval acl_final_exec_prefix=\"$acl_final_exec_prefix\"
+ prefix="$acl_save_prefix"
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then
+ withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+
+# Prepare PATH_SEPARATOR.
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ { $as_echo "$as_me:$LINENO: checking for ld used by GCC" >&5
+$as_echo_n "checking for ld used by GCC... " >&6; }
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [\\/]* | [A-Za-z]:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ { $as_echo "$as_me:$LINENO: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+ { $as_echo "$as_me:$LINENO: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if test "${acl_cv_path_LD+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ acl_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break ;;
+ *)
+ test "$with_gnu_ld" != yes && break ;;
+ esac
+ fi
+ done
+ IFS="$ac_save_ifs"
+else
+ acl_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$acl_cv_path_LD"
+if test -n "$LD"; then
+ { $as_echo "$as_me:$LINENO: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+test -z "$LD" && { { $as_echo "$as_me:$LINENO: error: no acceptable ld found in \$PATH" >&5
+$as_echo "$as_me: error: no acceptable ld found in \$PATH" >&2;}
+ { (exit 1); exit 1; }; }
+{ $as_echo "$as_me:$LINENO: checking if the linker ($LD) is GNU ld" >&5
+$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
+if test "${acl_cv_prog_gnu_ld+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ # I'd rather use --version here, but apparently some GNU ld's only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ acl_cv_prog_gnu_ld=yes ;;
+*)
+ acl_cv_prog_gnu_ld=no ;;
+esac
+fi
+{ $as_echo "$as_me:$LINENO: result: $acl_cv_prog_gnu_ld" >&5
+$as_echo "$acl_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$acl_cv_prog_gnu_ld
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking for shared library run path origin" >&5
+$as_echo_n "checking for shared library run path origin... " >&6; }
+if test "${acl_cv_rpath+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+
+ CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \
+ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh
+ . ./conftest.sh
+ rm -f ./conftest.sh
+ acl_cv_rpath=done
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $acl_cv_rpath" >&5
+$as_echo "$acl_cv_rpath" >&6; }
+ wl="$acl_cv_wl"
+ libext="$acl_cv_libext"
+ shlibext="$acl_cv_shlibext"
+ hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec"
+ hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator"
+ hardcode_direct="$acl_cv_hardcode_direct"
+ hardcode_minus_L="$acl_cv_hardcode_minus_L"
+ # Check whether --enable-rpath was given.
+if test "${enable_rpath+set}" = set; then
+ enableval=$enable_rpath; :
+else
+ enable_rpath=yes
+fi
+
+
+
+ acl_libdirstem=lib
+ searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'`
+ if test -n "$searchpath"; then
+ acl_save_IFS="${IFS= }"; IFS=":"
+ for searchdir in $searchpath; do
+ if test -d "$searchdir"; then
+ case "$searchdir" in
+ */lib64/ | */lib64 ) acl_libdirstem=lib64 ;;
+ *) searchdir=`cd "$searchdir" && pwd`
+ case "$searchdir" in
+ */lib64 ) acl_libdirstem=lib64 ;;
+ esac ;;
+ esac
+ fi
+ done
+ IFS="$acl_save_IFS"
+ fi
+
+
+
+
+
+
+
+
+ use_additional=yes
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+
+# Check whether --with-libiconv-prefix was given.
+if test "${with_libiconv_prefix+set}" = set; then
+ withval=$with_libiconv_prefix;
+ if test "X$withval" = "Xno"; then
+ use_additional=no
+ else
+ if test "X$withval" = "X"; then
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ else
+ additional_includedir="$withval/include"
+ additional_libdir="$withval/$acl_libdirstem"
+ fi
+ fi
+
+fi
+
+ LIBICONV=
+ LTLIBICONV=
+ INCICONV=
+ rpathdirs=
+ ltrpathdirs=
+ names_already_handled=
+ names_next_round='iconv '
+ while test -n "$names_next_round"; do
+ names_this_round="$names_next_round"
+ names_next_round=
+ for name in $names_this_round; do
+ already_handled=
+ for n in $names_already_handled; do
+ if test "$n" = "$name"; then
+ already_handled=yes
+ break
+ fi
+ done
+ if test -z "$already_handled"; then
+ names_already_handled="$names_already_handled $name"
+ uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'`
+ eval value=\"\$HAVE_LIB$uppername\"
+ if test -n "$value"; then
+ if test "$value" = yes; then
+ eval value=\"\$LIB$uppername\"
+ test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value"
+ eval value=\"\$LTLIB$uppername\"
+ test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value"
+ else
+ :
+ fi
+ else
+ found_dir=
+ found_la=
+ found_so=
+ found_a=
+ if test $use_additional = yes; then
+ if test -n "$shlibext" \
+ && { test -f "$additional_libdir/lib$name.$shlibext" \
+ || { test "$shlibext" = dll \
+ && test -f "$additional_libdir/lib$name.dll.a"; }; }; then
+ found_dir="$additional_libdir"
+ if test -f "$additional_libdir/lib$name.$shlibext"; then
+ found_so="$additional_libdir/lib$name.$shlibext"
+ else
+ found_so="$additional_libdir/lib$name.dll.a"
+ fi
+ if test -f "$additional_libdir/lib$name.la"; then
+ found_la="$additional_libdir/lib$name.la"
+ fi
+ else
+ if test -f "$additional_libdir/lib$name.$libext"; then
+ found_dir="$additional_libdir"
+ found_a="$additional_libdir/lib$name.$libext"
+ if test -f "$additional_libdir/lib$name.la"; then
+ found_la="$additional_libdir/lib$name.la"
+ fi
+ fi
+ fi
+ fi
+ if test "X$found_dir" = "X"; then
+ for x in $LDFLAGS $LTLIBICONV; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ case "$x" in
+ -L*)
+ dir=`echo "X$x" | sed -e 's/^X-L//'`
+ if test -n "$shlibext" \
+ && { test -f "$dir/lib$name.$shlibext" \
+ || { test "$shlibext" = dll \
+ && test -f "$dir/lib$name.dll.a"; }; }; then
+ found_dir="$dir"
+ if test -f "$dir/lib$name.$shlibext"; then
+ found_so="$dir/lib$name.$shlibext"
+ else
+ found_so="$dir/lib$name.dll.a"
+ fi
+ if test -f "$dir/lib$name.la"; then
+ found_la="$dir/lib$name.la"
+ fi
+ else
+ if test -f "$dir/lib$name.$libext"; then
+ found_dir="$dir"
+ found_a="$dir/lib$name.$libext"
+ if test -f "$dir/lib$name.la"; then
+ found_la="$dir/lib$name.la"
+ fi
+ fi
+ fi
+ ;;
+ esac
+ if test "X$found_dir" != "X"; then
+ break
+ fi
+ done
+ fi
+ if test "X$found_dir" != "X"; then
+ LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name"
+ if test "X$found_so" != "X"; then
+ if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so"
+ else
+ haveit=
+ for x in $ltrpathdirs; do
+ if test "X$x" = "X$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ ltrpathdirs="$ltrpathdirs $found_dir"
+ fi
+ if test "$hardcode_direct" = yes; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so"
+ else
+ if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so"
+ haveit=
+ for x in $rpathdirs; do
+ if test "X$x" = "X$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ rpathdirs="$rpathdirs $found_dir"
+ fi
+ else
+ haveit=
+ for x in $LDFLAGS $LIBICONV; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-L$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir"
+ fi
+ if test "$hardcode_minus_L" != no; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so"
+ else
+ LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name"
+ fi
+ fi
+ fi
+ fi
+ else
+ if test "X$found_a" != "X"; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a"
+ else
+ LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name"
+ fi
+ fi
+ additional_includedir=
+ case "$found_dir" in
+ */$acl_libdirstem | */$acl_libdirstem/)
+ basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'`
+ additional_includedir="$basedir/include"
+ ;;
+ esac
+ if test "X$additional_includedir" != "X"; then
+ if test "X$additional_includedir" != "X/usr/include"; then
+ haveit=
+ if test "X$additional_includedir" = "X/usr/local/include"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ for x in $CPPFLAGS $INCICONV; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-I$additional_includedir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_includedir"; then
+ INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir"
+ fi
+ fi
+ fi
+ fi
+ fi
+ if test -n "$found_la"; then
+ save_libdir="$libdir"
+ case "$found_la" in
+ */* | *\\*) . "$found_la" ;;
+ *) . "./$found_la" ;;
+ esac
+ libdir="$save_libdir"
+ for dep in $dependency_libs; do
+ case "$dep" in
+ -L*)
+ additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'`
+ if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then
+ haveit=
+ if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ haveit=
+ for x in $LDFLAGS $LIBICONV; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ LIBICONV="${LIBICONV}${LIBICONV:+ }-L$additional_libdir"
+ fi
+ fi
+ haveit=
+ for x in $LDFLAGS $LTLIBICONV; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$additional_libdir"
+ fi
+ fi
+ fi
+ fi
+ ;;
+ -R*)
+ dir=`echo "X$dep" | sed -e 's/^X-R//'`
+ if test "$enable_rpath" != no; then
+ haveit=
+ for x in $rpathdirs; do
+ if test "X$x" = "X$dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ rpathdirs="$rpathdirs $dir"
+ fi
+ haveit=
+ for x in $ltrpathdirs; do
+ if test "X$x" = "X$dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ ltrpathdirs="$ltrpathdirs $dir"
+ fi
+ fi
+ ;;
+ -l*)
+ names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'`
+ ;;
+ *.la)
+ names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'`
+ ;;
+ *)
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$dep"
+ LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep"
+ ;;
+ esac
+ done
+ fi
+ else
+ LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name"
+ LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name"
+ fi
+ fi
+ fi
+ done
+ done
+ if test "X$rpathdirs" != "X"; then
+ if test -n "$hardcode_libdir_separator"; then
+ alldirs=
+ for found_dir in $rpathdirs; do
+ alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir"
+ done
+ acl_save_libdir="$libdir"
+ libdir="$alldirs"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$flag"
+ else
+ for found_dir in $rpathdirs; do
+ acl_save_libdir="$libdir"
+ libdir="$found_dir"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ LIBICONV="${LIBICONV}${LIBICONV:+ }$flag"
+ done
+ fi
+ fi
+ if test "X$ltrpathdirs" != "X"; then
+ for found_dir in $ltrpathdirs; do
+ LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir"
+ done
+ fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ { $as_echo "$as_me:$LINENO: checking for CFPreferencesCopyAppValue" >&5
+$as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; }
+if test "${gt_cv_func_CFPreferencesCopyAppValue+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ gt_save_LIBS="$LIBS"
+ LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <CoreFoundation/CFPreferences.h>
+int
+main ()
+{
+CFPreferencesCopyAppValue(NULL, NULL)
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ gt_cv_func_CFPreferencesCopyAppValue=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ gt_cv_func_CFPreferencesCopyAppValue=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ LIBS="$gt_save_LIBS"
+fi
+{ $as_echo "$as_me:$LINENO: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5
+$as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; }
+ if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CFPREFERENCESCOPYAPPVALUE 1
+_ACEOF
+
+ fi
+ { $as_echo "$as_me:$LINENO: checking for CFLocaleCopyCurrent" >&5
+$as_echo_n "checking for CFLocaleCopyCurrent... " >&6; }
+if test "${gt_cv_func_CFLocaleCopyCurrent+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ gt_save_LIBS="$LIBS"
+ LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <CoreFoundation/CFLocale.h>
+int
+main ()
+{
+CFLocaleCopyCurrent();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ gt_cv_func_CFLocaleCopyCurrent=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ gt_cv_func_CFLocaleCopyCurrent=no
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ LIBS="$gt_save_LIBS"
+fi
+{ $as_echo "$as_me:$LINENO: result: $gt_cv_func_CFLocaleCopyCurrent" >&5
+$as_echo "$gt_cv_func_CFLocaleCopyCurrent" >&6; }
+ if test $gt_cv_func_CFLocaleCopyCurrent = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CFLOCALECOPYCURRENT 1
+_ACEOF
+
+ fi
+ INTL_MACOSX_LIBS=
+ if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then
+ INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation"
+ fi
+
+
+
+
+
+
+ LIBINTL=
+ LTLIBINTL=
+ POSUB=
+
+ case " $gt_needs " in
+ *" need-formatstring-macros "*) gt_api_version=3 ;;
+ *" need-ngettext "*) gt_api_version=2 ;;
+ *) gt_api_version=1 ;;
+ esac
+ gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc"
+ gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl"
+
+ if test "$USE_NLS" = "yes"; then
+ gt_use_preinstalled_gnugettext=no
+
+
+ if test $gt_api_version -ge 3; then
+ gt_revision_test_code='
+#ifndef __GNU_GETTEXT_SUPPORTED_REVISION
+#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1)
+#endif
+typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1];
+'
+ else
+ gt_revision_test_code=
+ fi
+ if test $gt_api_version -ge 2; then
+ gt_expression_test_code=' + * ngettext ("", "", 0)'
+ else
+ gt_expression_test_code=
+ fi
+
+ { $as_echo "$as_me:$LINENO: checking for GNU gettext in libc" >&5
+$as_echo_n "checking for GNU gettext in libc... " >&6; }
+if { as_var=$gt_func_gnugettext_libc; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <libintl.h>
+$gt_revision_test_code
+extern int _nl_msg_cat_cntr;
+extern int *_nl_domain_bindings;
+int
+main ()
+{
+bindtextdomain ("", "");
+return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_domain_bindings
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ eval "$gt_func_gnugettext_libc=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$gt_func_gnugettext_libc=no"
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$gt_func_gnugettext_libc'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+
+ if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then
+
+
+
+
+
+ am_save_CPPFLAGS="$CPPFLAGS"
+
+ for element in $INCICONV; do
+ haveit=
+ for x in $CPPFLAGS; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X$element"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element"
+ fi
+ done
+
+
+ { $as_echo "$as_me:$LINENO: checking for iconv" >&5
+$as_echo_n "checking for iconv... " >&6; }
+if test "${am_cv_func_iconv+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+
+ am_cv_func_iconv="no, consider installing GNU libiconv"
+ am_cv_lib_iconv=no
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <iconv.h>
+int
+main ()
+{
+iconv_t cd = iconv_open("","");
+ iconv(cd,NULL,NULL,NULL,NULL);
+ iconv_close(cd);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ am_cv_func_iconv=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ if test "$am_cv_func_iconv" != yes; then
+ am_save_LIBS="$LIBS"
+ LIBS="$LIBS $LIBICONV"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <iconv.h>
+int
+main ()
+{
+iconv_t cd = iconv_open("","");
+ iconv(cd,NULL,NULL,NULL,NULL);
+ iconv_close(cd);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ am_cv_lib_iconv=yes
+ am_cv_func_iconv=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ LIBS="$am_save_LIBS"
+ fi
+
+fi
+{ $as_echo "$as_me:$LINENO: result: $am_cv_func_iconv" >&5
+$as_echo "$am_cv_func_iconv" >&6; }
+ if test "$am_cv_func_iconv" = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_ICONV 1
+_ACEOF
+
+ fi
+ if test "$am_cv_lib_iconv" = yes; then
+ { $as_echo "$as_me:$LINENO: checking how to link with libiconv" >&5
+$as_echo_n "checking how to link with libiconv... " >&6; }
+ { $as_echo "$as_me:$LINENO: result: $LIBICONV" >&5
+$as_echo "$LIBICONV" >&6; }
+ else
+ CPPFLAGS="$am_save_CPPFLAGS"
+ LIBICONV=
+ LTLIBICONV=
+ fi
+
+
+
+
+
+
+
+ use_additional=yes
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+
+# Check whether --with-libintl-prefix was given.
+if test "${with_libintl_prefix+set}" = set; then
+ withval=$with_libintl_prefix;
+ if test "X$withval" = "Xno"; then
+ use_additional=no
+ else
+ if test "X$withval" = "X"; then
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ else
+ additional_includedir="$withval/include"
+ additional_libdir="$withval/$acl_libdirstem"
+ fi
+ fi
+
+fi
+
+ LIBINTL=
+ LTLIBINTL=
+ INCINTL=
+ rpathdirs=
+ ltrpathdirs=
+ names_already_handled=
+ names_next_round='intl '
+ while test -n "$names_next_round"; do
+ names_this_round="$names_next_round"
+ names_next_round=
+ for name in $names_this_round; do
+ already_handled=
+ for n in $names_already_handled; do
+ if test "$n" = "$name"; then
+ already_handled=yes
+ break
+ fi
+ done
+ if test -z "$already_handled"; then
+ names_already_handled="$names_already_handled $name"
+ uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'`
+ eval value=\"\$HAVE_LIB$uppername\"
+ if test -n "$value"; then
+ if test "$value" = yes; then
+ eval value=\"\$LIB$uppername\"
+ test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value"
+ eval value=\"\$LTLIB$uppername\"
+ test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value"
+ else
+ :
+ fi
+ else
+ found_dir=
+ found_la=
+ found_so=
+ found_a=
+ if test $use_additional = yes; then
+ if test -n "$shlibext" \
+ && { test -f "$additional_libdir/lib$name.$shlibext" \
+ || { test "$shlibext" = dll \
+ && test -f "$additional_libdir/lib$name.dll.a"; }; }; then
+ found_dir="$additional_libdir"
+ if test -f "$additional_libdir/lib$name.$shlibext"; then
+ found_so="$additional_libdir/lib$name.$shlibext"
+ else
+ found_so="$additional_libdir/lib$name.dll.a"
+ fi
+ if test -f "$additional_libdir/lib$name.la"; then
+ found_la="$additional_libdir/lib$name.la"
+ fi
+ else
+ if test -f "$additional_libdir/lib$name.$libext"; then
+ found_dir="$additional_libdir"
+ found_a="$additional_libdir/lib$name.$libext"
+ if test -f "$additional_libdir/lib$name.la"; then
+ found_la="$additional_libdir/lib$name.la"
+ fi
+ fi
+ fi
+ fi
+ if test "X$found_dir" = "X"; then
+ for x in $LDFLAGS $LTLIBINTL; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ case "$x" in
+ -L*)
+ dir=`echo "X$x" | sed -e 's/^X-L//'`
+ if test -n "$shlibext" \
+ && { test -f "$dir/lib$name.$shlibext" \
+ || { test "$shlibext" = dll \
+ && test -f "$dir/lib$name.dll.a"; }; }; then
+ found_dir="$dir"
+ if test -f "$dir/lib$name.$shlibext"; then
+ found_so="$dir/lib$name.$shlibext"
+ else
+ found_so="$dir/lib$name.dll.a"
+ fi
+ if test -f "$dir/lib$name.la"; then
+ found_la="$dir/lib$name.la"
+ fi
+ else
+ if test -f "$dir/lib$name.$libext"; then
+ found_dir="$dir"
+ found_a="$dir/lib$name.$libext"
+ if test -f "$dir/lib$name.la"; then
+ found_la="$dir/lib$name.la"
+ fi
+ fi
+ fi
+ ;;
+ esac
+ if test "X$found_dir" != "X"; then
+ break
+ fi
+ done
+ fi
+ if test "X$found_dir" != "X"; then
+ LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name"
+ if test "X$found_so" != "X"; then
+ if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so"
+ else
+ haveit=
+ for x in $ltrpathdirs; do
+ if test "X$x" = "X$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ ltrpathdirs="$ltrpathdirs $found_dir"
+ fi
+ if test "$hardcode_direct" = yes; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so"
+ else
+ if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so"
+ haveit=
+ for x in $rpathdirs; do
+ if test "X$x" = "X$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ rpathdirs="$rpathdirs $found_dir"
+ fi
+ else
+ haveit=
+ for x in $LDFLAGS $LIBINTL; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-L$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir"
+ fi
+ if test "$hardcode_minus_L" != no; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so"
+ else
+ LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name"
+ fi
+ fi
+ fi
+ fi
+ else
+ if test "X$found_a" != "X"; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a"
+ else
+ LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name"
+ fi
+ fi
+ additional_includedir=
+ case "$found_dir" in
+ */$acl_libdirstem | */$acl_libdirstem/)
+ basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'`
+ additional_includedir="$basedir/include"
+ ;;
+ esac
+ if test "X$additional_includedir" != "X"; then
+ if test "X$additional_includedir" != "X/usr/include"; then
+ haveit=
+ if test "X$additional_includedir" = "X/usr/local/include"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ for x in $CPPFLAGS $INCINTL; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-I$additional_includedir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_includedir"; then
+ INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir"
+ fi
+ fi
+ fi
+ fi
+ fi
+ if test -n "$found_la"; then
+ save_libdir="$libdir"
+ case "$found_la" in
+ */* | *\\*) . "$found_la" ;;
+ *) . "./$found_la" ;;
+ esac
+ libdir="$save_libdir"
+ for dep in $dependency_libs; do
+ case "$dep" in
+ -L*)
+ additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'`
+ if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then
+ haveit=
+ if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ haveit=
+ for x in $LDFLAGS $LIBINTL; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir"
+ fi
+ fi
+ haveit=
+ for x in $LDFLAGS $LTLIBINTL; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir"
+ fi
+ fi
+ fi
+ fi
+ ;;
+ -R*)
+ dir=`echo "X$dep" | sed -e 's/^X-R//'`
+ if test "$enable_rpath" != no; then
+ haveit=
+ for x in $rpathdirs; do
+ if test "X$x" = "X$dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ rpathdirs="$rpathdirs $dir"
+ fi
+ haveit=
+ for x in $ltrpathdirs; do
+ if test "X$x" = "X$dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ ltrpathdirs="$ltrpathdirs $dir"
+ fi
+ fi
+ ;;
+ -l*)
+ names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'`
+ ;;
+ *.la)
+ names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'`
+ ;;
+ *)
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$dep"
+ LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep"
+ ;;
+ esac
+ done
+ fi
+ else
+ LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name"
+ LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name"
+ fi
+ fi
+ fi
+ done
+ done
+ if test "X$rpathdirs" != "X"; then
+ if test -n "$hardcode_libdir_separator"; then
+ alldirs=
+ for found_dir in $rpathdirs; do
+ alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir"
+ done
+ acl_save_libdir="$libdir"
+ libdir="$alldirs"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$flag"
+ else
+ for found_dir in $rpathdirs; do
+ acl_save_libdir="$libdir"
+ libdir="$found_dir"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ LIBINTL="${LIBINTL}${LIBINTL:+ }$flag"
+ done
+ fi
+ fi
+ if test "X$ltrpathdirs" != "X"; then
+ for found_dir in $ltrpathdirs; do
+ LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir"
+ done
+ fi
+
+ { $as_echo "$as_me:$LINENO: checking for GNU gettext in libintl" >&5
+$as_echo_n "checking for GNU gettext in libintl... " >&6; }
+if { as_var=$gt_func_gnugettext_libintl; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ gt_save_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $INCINTL"
+ gt_save_LIBS="$LIBS"
+ LIBS="$LIBS $LIBINTL"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <libintl.h>
+$gt_revision_test_code
+extern int _nl_msg_cat_cntr;
+extern
+#ifdef __cplusplus
+"C"
+#endif
+const char *_nl_expand_alias (const char *);
+int
+main ()
+{
+bindtextdomain ("", "");
+return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("")
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ eval "$gt_func_gnugettext_libintl=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$gt_func_gnugettext_libintl=no"
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then
+ LIBS="$LIBS $LIBICONV"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <libintl.h>
+$gt_revision_test_code
+extern int _nl_msg_cat_cntr;
+extern
+#ifdef __cplusplus
+"C"
+#endif
+const char *_nl_expand_alias (const char *);
+int
+main ()
+{
+bindtextdomain ("", "");
+return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("")
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ LIBINTL="$LIBINTL $LIBICONV"
+ LTLIBINTL="$LTLIBINTL $LTLIBICONV"
+ eval "$gt_func_gnugettext_libintl=yes"
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+ fi
+ CPPFLAGS="$gt_save_CPPFLAGS"
+ LIBS="$gt_save_LIBS"
+fi
+ac_res=`eval 'as_val=${'$gt_func_gnugettext_libintl'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ fi
+
+ if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \
+ || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \
+ && test "$PACKAGE" != gettext-runtime \
+ && test "$PACKAGE" != gettext-tools; }; then
+ gt_use_preinstalled_gnugettext=yes
+ else
+ LIBINTL=
+ LTLIBINTL=
+ INCINTL=
+ fi
+
+
+
+ if test -n "$INTL_MACOSX_LIBS"; then
+ if test "$gt_use_preinstalled_gnugettext" = "yes" \
+ || test "$nls_cv_use_gnu_gettext" = "yes"; then
+ LIBINTL="$LIBINTL $INTL_MACOSX_LIBS"
+ LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS"
+ fi
+ fi
+
+ if test "$gt_use_preinstalled_gnugettext" = "yes" \
+ || test "$nls_cv_use_gnu_gettext" = "yes"; then
+
+cat >>confdefs.h <<\_ACEOF
+#define ENABLE_NLS 1
+_ACEOF
+
+ else
+ USE_NLS=no
+ fi
+ fi
+
+ { $as_echo "$as_me:$LINENO: checking whether to use NLS" >&5
+$as_echo_n "checking whether to use NLS... " >&6; }
+ { $as_echo "$as_me:$LINENO: result: $USE_NLS" >&5
+$as_echo "$USE_NLS" >&6; }
+ if test "$USE_NLS" = "yes"; then
+ { $as_echo "$as_me:$LINENO: checking where the gettext function comes from" >&5
+$as_echo_n "checking where the gettext function comes from... " >&6; }
+ if test "$gt_use_preinstalled_gnugettext" = "yes"; then
+ if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then
+ gt_source="external libintl"
+ else
+ gt_source="libc"
+ fi
+ else
+ gt_source="included intl directory"
+ fi
+ { $as_echo "$as_me:$LINENO: result: $gt_source" >&5
+$as_echo "$gt_source" >&6; }
+ fi
+
+ if test "$USE_NLS" = "yes"; then
+
+ if test "$gt_use_preinstalled_gnugettext" = "yes"; then
+ if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then
+ { $as_echo "$as_me:$LINENO: checking how to link with libintl" >&5
+$as_echo_n "checking how to link with libintl... " >&6; }
+ { $as_echo "$as_me:$LINENO: result: $LIBINTL" >&5
+$as_echo "$LIBINTL" >&6; }
+
+ for element in $INCINTL; do
+ haveit=
+ for x in $CPPFLAGS; do
+
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ eval x=\"$x\"
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+
+ if test "X$x" = "X$element"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element"
+ fi
+ done
+
+ fi
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_GETTEXT 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_DCGETTEXT 1
+_ACEOF
+
+ fi
+
+ POSUB=po
+ fi
+
+
+
+ INTLLIBS="$LIBINTL"
+
+
+
+
+
+
+
+###############################################################################
+# Checks for header files.
+###############################################################################
+
+echo
+echo "System headers and functions:"
+
+# There is currently no workarounds in this package if some of
+# these headers are missing.
+
+
+
+for ac_header in fcntl.h limits.h sys/time.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5
+$as_echo_n "checking $ac_header usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5
+$as_echo_n "checking $ac_header presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## --------------------------------------- ##
+## Report this to lasse.collin@tukaani.org ##
+## --------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+
+fi
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ { { $as_echo "$as_me:$LINENO: error: Required header file(s) are missing." >&5
+$as_echo "$as_me: error: Required header file(s) are missing." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+done
+
+
+# If any of these headers are missing, things should still work correctly:
+
+
+
+for ac_header in sys/param.h sys/sysctl.h byteswap.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ eval "$as_ac_Header=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_Header=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+# Even if we have byteswap.h, we may lack the specific macros/functions.
+if test x$ac_cv_header_byteswap_h = xyes ; then
+
+ { $as_echo "$as_me:$LINENO: checking if bswap_16 is available" >&5
+$as_echo_n "checking if bswap_16 is available... " >&6; }
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <byteswap.h>
+int
+main(void)
+{
+ bswap_16(42);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BSWAP_16 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+
+
+ { $as_echo "$as_me:$LINENO: checking if bswap_32 is available" >&5
+$as_echo_n "checking if bswap_32 is available... " >&6; }
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <byteswap.h>
+int
+main(void)
+{
+ bswap_32(42);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BSWAP_32 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+
+
+ { $as_echo "$as_me:$LINENO: checking if bswap_64 is available" >&5
+$as_echo_n "checking if bswap_64 is available... " >&6; }
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <byteswap.h>
+int
+main(void)
+{
+ bswap_64(42);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_BSWAP_64 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+
+ fi
+
+
+###############################################################################
+# Checks for typedefs, structures, and compiler characteristics.
+###############################################################################
+
+
+{ $as_echo "$as_me:$LINENO: checking for stdbool.h that conforms to C99" >&5
+$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
+if test "${ac_cv_header_stdbool_h+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <stdbool.h>
+#ifndef bool
+ "error: bool is not defined"
+#endif
+#ifndef false
+ "error: false is not defined"
+#endif
+#if false
+ "error: false is not 0"
+#endif
+#ifndef true
+ "error: true is not defined"
+#endif
+#if true != 1
+ "error: true is not 1"
+#endif
+#ifndef __bool_true_false_are_defined
+ "error: __bool_true_false_are_defined is not defined"
+#endif
+
+ struct s { _Bool s: 1; _Bool t; } s;
+
+ char a[true == 1 ? 1 : -1];
+ char b[false == 0 ? 1 : -1];
+ char c[__bool_true_false_are_defined == 1 ? 1 : -1];
+ char d[(bool) 0.5 == true ? 1 : -1];
+ bool e = &s;
+ char f[(_Bool) 0.0 == false ? 1 : -1];
+ char g[true];
+ char h[sizeof (_Bool)];
+ char i[sizeof s.t];
+ enum { j = false, k = true, l = false * true, m = true * 256 };
+ /* The following fails for
+ HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
+ _Bool n[m];
+ char o[sizeof n == m * sizeof n[0] ? 1 : -1];
+ char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
+# if defined __xlc__ || defined __GNUC__
+ /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0
+ reported by James Lemley on 2005-10-05; see
+ http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html
+ This test is not quite right, since xlc is allowed to
+ reject this program, as the initializer for xlcbug is
+ not one of the forms that C requires support for.
+ However, doing the test right would require a runtime
+ test, and that would make cross-compilation harder.
+ Let us hope that IBM fixes the xlc bug, and also adds
+ support for this kind of constant expression. In the
+ meantime, this test will reject xlc, which is OK, since
+ our stdbool.h substitute should suffice. We also test
+ this with GCC, where it should work, to detect more
+ quickly whether someone messes up the test in the
+ future. */
+ char digs[] = "0123456789";
+ int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1);
+# endif
+ /* Catch a bug in an HP-UX C compiler. See
+ http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
+ http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
+ */
+ _Bool q = true;
+ _Bool *pq = &q;
+
+int
+main ()
+{
+
+ *pq |= q;
+ *pq |= ! q;
+ /* Refer to every declared value, to avoid compiler optimizations. */
+ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
+ + !m + !n + !o + !p + !q + !pq);
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_header_stdbool_h=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_header_stdbool_h=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdbool_h" >&5
+$as_echo "$ac_cv_header_stdbool_h" >&6; }
+{ $as_echo "$as_me:$LINENO: checking for _Bool" >&5
+$as_echo_n "checking for _Bool... " >&6; }
+if test "${ac_cv_type__Bool+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_type__Bool=no
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+if (sizeof (_Bool))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+if (sizeof ((_Bool)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_type__Bool=yes
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_type__Bool" >&5
+$as_echo "$ac_cv_type__Bool" >&6; }
+if test "x$ac_cv_type__Bool" = x""yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BOOL 1
+_ACEOF
+
+
+fi
+
+if test $ac_cv_header_stdbool_h = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_STDBOOL_H 1
+_ACEOF
+
+fi
+
+
+
+ { $as_echo "$as_me:$LINENO: checking for uint8_t" >&5
+$as_echo_n "checking for uint8_t... " >&6; }
+if test "${ac_cv_c_uint8_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_uint8_t=no
+ for ac_type in 'uint8_t' 'unsigned int' 'unsigned long int' \
+ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) -1 >> (8 - 1) == 1)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ case $ac_type in
+ uint8_t) ac_cv_c_uint8_t=yes ;;
+ *) ac_cv_c_uint8_t=$ac_type ;;
+esac
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_uint8_t" != no && break
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_uint8_t" >&5
+$as_echo "$ac_cv_c_uint8_t" >&6; }
+ case $ac_cv_c_uint8_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<\_ACEOF
+#define _UINT8_T 1
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint8_t $ac_cv_c_uint8_t
+_ACEOF
+;;
+ esac
+
+
+ { $as_echo "$as_me:$LINENO: checking for uint16_t" >&5
+$as_echo_n "checking for uint16_t... " >&6; }
+if test "${ac_cv_c_uint16_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_uint16_t=no
+ for ac_type in 'uint16_t' 'unsigned int' 'unsigned long int' \
+ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) -1 >> (16 - 1) == 1)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ case $ac_type in
+ uint16_t) ac_cv_c_uint16_t=yes ;;
+ *) ac_cv_c_uint16_t=$ac_type ;;
+esac
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_uint16_t" != no && break
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_uint16_t" >&5
+$as_echo "$ac_cv_c_uint16_t" >&6; }
+ case $ac_cv_c_uint16_t in #(
+ no|yes) ;; #(
+ *)
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint16_t $ac_cv_c_uint16_t
+_ACEOF
+;;
+ esac
+
+
+ { $as_echo "$as_me:$LINENO: checking for int32_t" >&5
+$as_echo_n "checking for int32_t... " >&6; }
+if test "${ac_cv_c_int32_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_int32_t=no
+ for ac_type in 'int32_t' 'int' 'long int' \
+ 'long long int' 'short int' 'signed char'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(0 < ($ac_type) (((($ac_type) 1 << (32 - 2)) - 1) * 2 + 1))];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) (((($ac_type) 1 << (32 - 2)) - 1) * 2 + 1)
+ < ($ac_type) (((($ac_type) 1 << (32 - 2)) - 1) * 2 + 2))];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ case $ac_type in
+ int32_t) ac_cv_c_int32_t=yes ;;
+ *) ac_cv_c_int32_t=$ac_type ;;
+esac
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_int32_t" != no && break
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_int32_t" >&5
+$as_echo "$ac_cv_c_int32_t" >&6; }
+ case $ac_cv_c_int32_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<_ACEOF
+#define int32_t $ac_cv_c_int32_t
+_ACEOF
+;;
+ esac
+
+
+ { $as_echo "$as_me:$LINENO: checking for uint32_t" >&5
+$as_echo_n "checking for uint32_t... " >&6; }
+if test "${ac_cv_c_uint32_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_uint32_t=no
+ for ac_type in 'uint32_t' 'unsigned int' 'unsigned long int' \
+ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) -1 >> (32 - 1) == 1)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ case $ac_type in
+ uint32_t) ac_cv_c_uint32_t=yes ;;
+ *) ac_cv_c_uint32_t=$ac_type ;;
+esac
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_uint32_t" != no && break
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_uint32_t" >&5
+$as_echo "$ac_cv_c_uint32_t" >&6; }
+ case $ac_cv_c_uint32_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<\_ACEOF
+#define _UINT32_T 1
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint32_t $ac_cv_c_uint32_t
+_ACEOF
+;;
+ esac
+
+
+ { $as_echo "$as_me:$LINENO: checking for int64_t" >&5
+$as_echo_n "checking for int64_t... " >&6; }
+if test "${ac_cv_c_int64_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_int64_t=no
+ for ac_type in 'int64_t' 'int' 'long int' \
+ 'long long int' 'short int' 'signed char'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(0 < ($ac_type) (((($ac_type) 1 << (64 - 2)) - 1) * 2 + 1))];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) (((($ac_type) 1 << (64 - 2)) - 1) * 2 + 1)
+ < ($ac_type) (((($ac_type) 1 << (64 - 2)) - 1) * 2 + 2))];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ case $ac_type in
+ int64_t) ac_cv_c_int64_t=yes ;;
+ *) ac_cv_c_int64_t=$ac_type ;;
+esac
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_int64_t" != no && break
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_int64_t" >&5
+$as_echo "$ac_cv_c_int64_t" >&6; }
+ case $ac_cv_c_int64_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<_ACEOF
+#define int64_t $ac_cv_c_int64_t
+_ACEOF
+;;
+ esac
+
+
+ { $as_echo "$as_me:$LINENO: checking for uint64_t" >&5
+$as_echo_n "checking for uint64_t... " >&6; }
+if test "${ac_cv_c_uint64_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_uint64_t=no
+ for ac_type in 'uint64_t' 'unsigned int' 'unsigned long int' \
+ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(($ac_type) -1 >> (64 - 1) == 1)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ case $ac_type in
+ uint64_t) ac_cv_c_uint64_t=yes ;;
+ *) ac_cv_c_uint64_t=$ac_type ;;
+esac
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_uint64_t" != no && break
+ done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_uint64_t" >&5
+$as_echo "$ac_cv_c_uint64_t" >&6; }
+ case $ac_cv_c_uint64_t in #(
+ no|yes) ;; #(
+ *)
+
+cat >>confdefs.h <<\_ACEOF
+#define _UINT64_T 1
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define uint64_t $ac_cv_c_uint64_t
+_ACEOF
+;;
+ esac
+
+
+ { $as_echo "$as_me:$LINENO: checking for uintptr_t" >&5
+$as_echo_n "checking for uintptr_t... " >&6; }
+if test "${ac_cv_type_uintptr_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_type_uintptr_t=no
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+if (sizeof (uintptr_t))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+if (sizeof ((uintptr_t)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ :
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_type_uintptr_t=yes
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_uintptr_t" >&5
+$as_echo "$ac_cv_type_uintptr_t" >&6; }
+if test "x$ac_cv_type_uintptr_t" = x""yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_UINTPTR_T 1
+_ACEOF
+
+else
+ for ac_type in 'unsigned int' 'unsigned long int' \
+ 'unsigned long long int'; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+cat >>confdefs.h <<_ACEOF
+#define uintptr_t $ac_type
+_ACEOF
+
+ ac_type=
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test -z "$ac_type" && break
+ done
+fi
+
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:$LINENO: checking size of size_t" >&5
+$as_echo_n "checking size of size_t... " >&6; }
+if test "${ac_cv_sizeof_size_t+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (size_t))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (size_t))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_hi=$ac_mid; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (size_t))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (size_t))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_lo=$ac_mid; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo= ac_hi=
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (size_t))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_hi=$ac_mid
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_size_t=$ac_lo;;
+'') if test "$ac_cv_type_size_t" = yes; then
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot compute sizeof (size_t)
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot compute sizeof (size_t)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }; }
+ else
+ ac_cv_sizeof_size_t=0
+ fi ;;
+esac
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+static long int longval () { return (long int) (sizeof (size_t)); }
+static unsigned long int ulongval () { return (long int) (sizeof (size_t)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ return 1;
+ if (((long int) (sizeof (size_t))) < 0)
+ {
+ long int i = longval ();
+ if (i != ((long int) (sizeof (size_t))))
+ return 1;
+ fprintf (f, "%ld", i);
+ }
+ else
+ {
+ unsigned long int i = ulongval ();
+ if (i != ((long int) (sizeof (size_t))))
+ return 1;
+ fprintf (f, "%lu", i);
+ }
+ /* Do not output a trailing newline, as this causes \r\n confusion
+ on some platforms. */
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_size_t=`cat conftest.val`
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+if test "$ac_cv_type_size_t" = yes; then
+ { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ { $as_echo "$as_me:$LINENO: error: cannot compute sizeof (size_t)
+See \`config.log' for more details." >&5
+$as_echo "$as_me: error: cannot compute sizeof (size_t)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }; }
+ else
+ ac_cv_sizeof_size_t=0
+ fi
+fi
+rm -rf conftest.dSYM
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.val
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_sizeof_size_t" >&5
+$as_echo "$ac_cv_sizeof_size_t" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t
+_ACEOF
+
+
+
+# The command line tool can copy high resolution timestamps if such
+# information is availabe in struct stat. Otherwise one second accuracy
+# is used.
+{ $as_echo "$as_me:$LINENO: checking for struct stat.st_atim.tv_nsec" >&5
+$as_echo_n "checking for struct stat.st_atim.tv_nsec... " >&6; }
+if test "${ac_cv_member_struct_stat_st_atim_tv_nsec+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (ac_aggr.st_atim.tv_nsec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atim_tv_nsec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (sizeof ac_aggr.st_atim.tv_nsec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atim_tv_nsec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_member_struct_stat_st_atim_tv_nsec=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_atim_tv_nsec" >&5
+$as_echo "$ac_cv_member_struct_stat_st_atim_tv_nsec" >&6; }
+if test "x$ac_cv_member_struct_stat_st_atim_tv_nsec" = x""yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_STAT_ST_ATIM_TV_NSEC 1
+_ACEOF
+
+
+fi
+{ $as_echo "$as_me:$LINENO: checking for struct stat.st_atimespec.tv_nsec" >&5
+$as_echo_n "checking for struct stat.st_atimespec.tv_nsec... " >&6; }
+if test "${ac_cv_member_struct_stat_st_atimespec_tv_nsec+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (ac_aggr.st_atimespec.tv_nsec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atimespec_tv_nsec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (sizeof ac_aggr.st_atimespec.tv_nsec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atimespec_tv_nsec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_member_struct_stat_st_atimespec_tv_nsec=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_atimespec_tv_nsec" >&5
+$as_echo "$ac_cv_member_struct_stat_st_atimespec_tv_nsec" >&6; }
+if test "x$ac_cv_member_struct_stat_st_atimespec_tv_nsec" = x""yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_STAT_ST_ATIMESPEC_TV_NSEC 1
+_ACEOF
+
+
+fi
+{ $as_echo "$as_me:$LINENO: checking for struct stat.st_atimensec" >&5
+$as_echo_n "checking for struct stat.st_atimensec... " >&6; }
+if test "${ac_cv_member_struct_stat_st_atimensec+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (ac_aggr.st_atimensec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atimensec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (sizeof ac_aggr.st_atimensec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atimensec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_member_struct_stat_st_atimensec=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_atimensec" >&5
+$as_echo "$ac_cv_member_struct_stat_st_atimensec" >&6; }
+if test "x$ac_cv_member_struct_stat_st_atimensec" = x""yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_STAT_ST_ATIMENSEC 1
+_ACEOF
+
+
+fi
+{ $as_echo "$as_me:$LINENO: checking for struct stat.st_uatime" >&5
+$as_echo_n "checking for struct stat.st_uatime... " >&6; }
+if test "${ac_cv_member_struct_stat_st_uatime+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (ac_aggr.st_uatime)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_uatime=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (sizeof ac_aggr.st_uatime)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_uatime=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_member_struct_stat_st_uatime=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_uatime" >&5
+$as_echo "$ac_cv_member_struct_stat_st_uatime" >&6; }
+if test "x$ac_cv_member_struct_stat_st_uatime" = x""yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_STAT_ST_UATIME 1
+_ACEOF
+
+
+fi
+{ $as_echo "$as_me:$LINENO: checking for struct stat.st_atim.st__tim.tv_nsec" >&5
+$as_echo_n "checking for struct stat.st_atim.st__tim.tv_nsec... " >&6; }
+if test "${ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (ac_aggr.st_atim.st__tim.tv_nsec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+static struct stat ac_aggr;
+if (sizeof ac_aggr.st_atim.st__tim.tv_nsec)
+return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec" >&5
+$as_echo "$ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec" >&6; }
+if test "x$ac_cv_member_struct_stat_st_atim_st__tim_tv_nsec" = x""yes; then
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_STRUCT_STAT_ST_ATIM_ST__TIM_TV_NSEC 1
+_ACEOF
+
+
+fi
+
+
+# Check whether --enable-largefile was given.
+if test "${enable_largefile+set}" = set; then
+ enableval=$enable_largefile;
+fi
+
+if test "$enable_largefile" != no; then
+
+ { $as_echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5
+$as_echo_n "checking for special C compiler options needed for large files... " >&6; }
+if test "${ac_cv_sys_largefile_CC+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_sys_largefile_CC=no
+ if test "$GCC" != yes; then
+ ac_save_CC=$CC
+ while :; do
+ # IRIX 6.2 and later do not support large files by default,
+ # so use the C compiler's -n32 option if that helps.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ CC="$CC -n32"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_sys_largefile_CC=' -n32'; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ break
+ done
+ CC=$ac_save_CC
+ rm -f conftest.$ac_ext
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5
+$as_echo "$ac_cv_sys_largefile_CC" >&6; }
+ if test "$ac_cv_sys_largefile_CC" != no; then
+ CC=$CC$ac_cv_sys_largefile_CC
+ fi
+
+ { $as_echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5
+$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; }
+if test "${ac_cv_sys_file_offset_bits+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_sys_file_offset_bits=no; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#define _FILE_OFFSET_BITS 64
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_sys_file_offset_bits=64; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cv_sys_file_offset_bits=unknown
+ break
+done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5
+$as_echo "$ac_cv_sys_file_offset_bits" >&6; }
+case $ac_cv_sys_file_offset_bits in #(
+ no | unknown) ;;
+ *)
+cat >>confdefs.h <<_ACEOF
+#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits
+_ACEOF
+;;
+esac
+rm -rf conftest*
+ if test $ac_cv_sys_file_offset_bits = unknown; then
+ { $as_echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5
+$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; }
+if test "${ac_cv_sys_large_files+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_sys_large_files=no; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#define _LARGE_FILES 1
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_sys_large_files=1; break
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cv_sys_large_files=unknown
+ break
+done
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5
+$as_echo "$ac_cv_sys_large_files" >&6; }
+case $ac_cv_sys_large_files in #(
+ no | unknown) ;;
+ *)
+cat >>confdefs.h <<_ACEOF
+#define _LARGE_FILES $ac_cv_sys_large_files
+_ACEOF
+;;
+esac
+rm -rf conftest*
+ fi
+fi
+
+
+ { $as_echo "$as_me:$LINENO: checking whether byte ordering is bigendian" >&5
+$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
+if test "${ac_cv_c_bigendian+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_bigendian=unknown
+ # See if we're dealing with a universal compiler.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifndef __APPLE_CC__
+ not a universal capable compiler
+ #endif
+ typedef int dummy;
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+ # Check for potential -arch flags. It is not universal unless
+ # there are some -arch flags. Note that *ppc* also matches
+ # ppc64. This check is also rather less than ideal.
+ case "${CC} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS}" in #(
+ *-arch*ppc*|*-arch*i386*|*-arch*x86_64*) ac_cv_c_bigendian=universal;;
+ esac
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if sys/param.h defines the BYTE_ORDER macro.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \
+ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \
+ && LITTLE_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ # It does; now see whether it defined to BIG_ENDIAN or not.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if BYTE_ORDER != BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_c_bigendian=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_c_bigendian=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if <limits.h> defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris).
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ # It does; now see whether it defined to _BIG_ENDIAN or not.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#ifndef _BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_c_bigendian=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_c_bigendian=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # Compile a test program.
+ if test "$cross_compiling" = yes; then
+ # Try to guess by grepping values from an object file.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+short int ascii_mm[] =
+ { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
+ short int ascii_ii[] =
+ { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
+ int use_ascii (int i) {
+ return ascii_mm[i] + ascii_ii[i];
+ }
+ short int ebcdic_ii[] =
+ { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
+ short int ebcdic_mm[] =
+ { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
+ int use_ebcdic (int i) {
+ return ebcdic_mm[i] + ebcdic_ii[i];
+ }
+ extern int foo;
+
+int
+main ()
+{
+return use_ascii (foo) == use_ebcdic (foo);
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then
+ ac_cv_c_bigendian=yes
+ fi
+ if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then
+ if test "$ac_cv_c_bigendian" = unknown; then
+ ac_cv_c_bigendian=no
+ else
+ # finding both strings is unlikely to happen, but who knows?
+ ac_cv_c_bigendian=unknown
+ fi
+ fi
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+
+ /* Are we little or big endian? From Harbison&Steele. */
+ union
+ {
+ long int l;
+ char c[sizeof (long int)];
+ } u;
+ u.l = 1;
+ return u.c[sizeof (long int) - 1] == 1;
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_c_bigendian=no
+else
+ $as_echo "$as_me: program exited with status $ac_status" >&5
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_c_bigendian=yes
+fi
+rm -rf conftest.dSYM
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+
+
+ fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_bigendian" >&5
+$as_echo "$ac_cv_c_bigendian" >&6; }
+ case $ac_cv_c_bigendian in #(
+ yes)
+ cat >>confdefs.h <<\_ACEOF
+#define WORDS_BIGENDIAN 1
+_ACEOF
+;; #(
+ no)
+ ;; #(
+ universal)
+
+cat >>confdefs.h <<\_ACEOF
+#define AC_APPLE_UNIVERSAL_BUILD 1
+_ACEOF
+
+ ;; #(
+ *)
+ { { $as_echo "$as_me:$LINENO: error: unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" >&5
+$as_echo "$as_me: error: unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" >&2;}
+ { (exit 1); exit 1; }; } ;;
+ esac
+
+
+
+###############################################################################
+# Checks for library functions.
+###############################################################################
+
+# Gnulib replacements as needed
+
+ if test -z "$GETOPT_H"; then
+
+for ac_header in getopt.h
+do
+as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5
+$as_echo_n "checking $ac_header usability... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_header_compiler=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5
+$as_echo_n "checking $ac_header presence... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null && {
+ test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ }; then
+ ac_header_preproc=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+ yes:no: )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## --------------------------------------- ##
+## Report this to lasse.collin@tukaani.org ##
+## --------------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5
+$as_echo_n "checking for $ac_header... " >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+
+fi
+as_val=`eval 'as_val=${'$as_ac_Header'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ GETOPT_H=getopt.h
+fi
+
+done
+
+ fi
+
+ if test -z "$GETOPT_H"; then
+
+for ac_func in getopt_long
+do
+as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5
+$as_echo_n "checking for $ac_func... " >&6; }
+if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* Define $ac_func to an innocuous variant, in case <limits.h> declares $ac_func.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $ac_func innocuous_$ac_func
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $ac_func
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$ac_func || defined __stub___$ac_func
+choke me
+#endif
+
+int
+main ()
+{
+return $ac_func ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ eval "$as_ac_var=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_var=no"
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$as_ac_var'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_var'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+else
+ GETOPT_H=getopt.h
+fi
+done
+
+ fi
+
+ if test -z "$GETOPT_H"; then
+ { $as_echo "$as_me:$LINENO: checking whether optreset is declared" >&5
+$as_echo_n "checking whether optreset is declared... " >&6; }
+if test "${ac_cv_have_decl_optreset+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <getopt.h>
+
+int
+main ()
+{
+#ifndef optreset
+ (void) optreset;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ ac_cv_have_decl_optreset=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_have_decl_optreset=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:$LINENO: result: $ac_cv_have_decl_optreset" >&5
+$as_echo "$ac_cv_have_decl_optreset" >&6; }
+if test "x$ac_cv_have_decl_optreset" = x""yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_OPTRESET 1
+_ACEOF
+
+fi
+
+ fi
+
+
+
+
+ if test -n "$GETOPT_H"; then
+
+ case " $LIBOBJS " in
+ *" getopt.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS getopt.$ac_objext"
+ ;;
+esac
+
+ case " $LIBOBJS " in
+ *" getopt1.$ac_objext "* ) ;;
+ *) LIBOBJS="$LIBOBJS getopt1.$ac_objext"
+ ;;
+esac
+
+
+ GETOPT_H=getopt.h
+
+cat >>confdefs.h <<\_ACEOF
+#define __GETOPT_PREFIX rpl_
+_ACEOF
+
+
+
+
+fi
+
+
+
+# Find the best function to set timestamps.
+
+
+
+
+
+for ac_func in futimens futimes futimesat utimes utime
+do
+as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5
+$as_echo_n "checking for $ac_func... " >&6; }
+if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then
+ $as_echo_n "(cached) " >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* Define $ac_func to an innocuous variant, in case <limits.h> declares $ac_func.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $ac_func innocuous_$ac_func
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $ac_func
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$ac_func || defined __stub___$ac_func
+choke me
+#endif
+
+int
+main ()
+{
+return $ac_func ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then
+ eval "$as_ac_var=yes"
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_var=no"
+fi
+
+rm -rf conftest.dSYM
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+ac_res=`eval 'as_val=${'$as_ac_var'}
+ $as_echo "$as_val"'`
+ { $as_echo "$as_me:$LINENO: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+as_val=`eval 'as_val=${'$as_ac_var'}
+ $as_echo "$as_val"'`
+ if test "x$as_val" = x""yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+ break
+fi
+done
+
+
+
+{ $as_echo "$as_me:$LINENO: checking how to detect the amount of physical memory" >&5
+$as_echo_n "checking how to detect the amount of physical memory... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <unistd.h>
+int
+main(void)
+{
+ long i;
+ i = sysconf(_SC_PAGESIZE);
+ i = sysconf(_SC_PHYS_PAGES);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_PHYSMEM_SYSCONF 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: sysconf" >&5
+$as_echo "sysconf" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+#include <sys/sysctl.h>
+int
+main(void)
+{
+ int name[2] = { CTL_HW, HW_PHYSMEM };
+ unsigned long mem;
+ size_t mem_ptr_size = sizeof(mem);
+ sysctl(name, 2, &mem, &mem_ptr_size, NULL, NULL);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_PHYSMEM_SYSCTL 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: sysctl" >&5
+$as_echo "sysctl" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+case $host_os in
+ linux*)
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/sysinfo.h>
+int
+main(void)
+{
+ struct sysinfo si;
+ sysinfo(&si);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_PHYSMEM_SYSINFO 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: sysinfo" >&5
+$as_echo "sysinfo" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+ { $as_echo "$as_me:$LINENO: result: unknown" >&5
+$as_echo "unknown" >&6; }
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ;;
+ *)
+ { $as_echo "$as_me:$LINENO: result: unknown" >&5
+$as_echo "unknown" >&6; }
+ ;;
+esac
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:$LINENO: checking how to detect the number of available CPU cores" >&5
+$as_echo_n "checking how to detect the number of available CPU cores... " >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <unistd.h>
+int
+main(void)
+{
+ long i;
+ i = sysconf(_SC_NPROCESSORS_ONLN);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CPUCORES_SYSCONF 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: sysconf" >&5
+$as_echo "sysconf" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+#include <sys/sysctl.h>
+int
+main(void)
+{
+ int name[2] = { CTL_HW, HW_NCPU };
+ int cpus;
+ size_t cpus_size = sizeof(cpus);
+ sysctl(name, 2, &cpus, &cpus_size, NULL, NULL);
+ return 0;
+}
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CPUCORES_SYSCTL 1
+_ACEOF
+
+ { $as_echo "$as_me:$LINENO: result: sysctl" >&5
+$as_echo "sysctl" >&6; }
+
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+ { $as_echo "$as_me:$LINENO: result: unknown" >&5
+$as_echo "unknown" >&6; }
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+
+###############################################################################
+# If using GCC, set some additional AM_CFLAGS:
+###############################################################################
+
+if test "$GCC" = yes ; then
+ echo
+ echo "GCC extensions:"
+fi
+
+# Always do the visibility check but don't set AM_CFLAGS on Windows.
+# This way things get set properly even on Windows.
+
+
+ CFLAG_VISIBILITY=
+ HAVE_VISIBILITY=0
+ if test -n "$GCC"; then
+ { $as_echo "$as_me:$LINENO: checking for simple visibility declarations" >&5
+$as_echo_n "checking for simple visibility declarations... " >&6; }
+ if test "${gl_cv_cc_visibility+set}" = set; then
+ $as_echo_n "(cached) " >&6
+else
+
+ gl_save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fvisibility=hidden"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+extern __attribute__((__visibility__("hidden"))) int hiddenvar;
+ extern __attribute__((__visibility__("default"))) int exportedvar;
+ extern __attribute__((__visibility__("hidden"))) int hiddenfunc (void);
+ extern __attribute__((__visibility__("default"))) int exportedfunc (void);
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
+$as_echo "$ac_try_echo") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && {
+ test -z "$ac_c_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then
+ gl_cv_cc_visibility=yes
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ gl_cv_cc_visibility=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS="$gl_save_CFLAGS"
+fi
+
+ { $as_echo "$as_me:$LINENO: result: $gl_cv_cc_visibility" >&5
+$as_echo "$gl_cv_cc_visibility" >&6; }
+ if test $gl_cv_cc_visibility = yes; then
+ CFLAG_VISIBILITY="-fvisibility=hidden"
+ HAVE_VISIBILITY=1
+ fi
+ fi
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_VISIBILITY $HAVE_VISIBILITY
+_ACEOF
+
+
+if test -n "$CFLAG_VISIBILITY" && test "$is_w32" = no; then
+ AM_CFLAGS="$AM_CFLAGS $CFLAG_VISIBILITY"
+fi
+
+###############################################################################
+# Create the makefiles and config.h
+###############################################################################
+
+echo
+
+# Don't build the lib directory at all if we don't need any replacement
+# functions.
+ if test -n "$LIBOBJS"; then
+ COND_GNULIB_TRUE=
+ COND_GNULIB_FALSE='#'
+else
+ COND_GNULIB_TRUE='#'
+ COND_GNULIB_FALSE=
+fi
+
+
+# Add default AM_CFLAGS.
+
+
+# Set additional flags for static/dynamic linking. The idea is that every
+# program (not library) being built will use either STATIC_{CPPFLAGS,LDFLAGS}
+# or DYNAMIC_{CPPFLAGS,LDFLAGS} depending on which type of linkage is
+# preferred. These preferences get overridden by use of --disable-static,
+# --disable-shared, or --enable-dynamic.
+#
+# This is quite messy, because we want to use LZMA_API_STATIC when linking
+# against static liblzma. It's needed on Windows.
+if test "x$enable_static" = xno; then
+ enable_dynamic=yes
+fi
+if test "x$enable_shared" = xno; then
+ enable_dynamic=no
+fi
+case $enable_dynamic in
+ yes)
+ STATIC_CPPFLAGS=
+ STATIC_LDFLAGS=
+ DYNAMIC_CPPFLAGS=
+ DYNAMIC_LDFLAGS=
+ ;;
+ mixed)
+ STATIC_CPPFLAGS="-DLZMA_API_STATIC"
+ STATIC_LDFLAGS="-static"
+ DYNAMIC_CPPFLAGS=
+ DYNAMIC_LDFLAGS=
+ ;;
+ no)
+ STATIC_CPPFLAGS="-DLZMA_API_STATIC"
+ STATIC_LDFLAGS="-static"
+ DYNAMIC_CPPFLAGS="-DLZMA_API_STATIC"
+ DYNAMIC_LDFLAGS="-static"
+ ;;
+esac
+
+
+
+
+
+# This is needed for src/scripts.
+xz=`echo xz | sed "$program_transform_name"`
+
+
+ac_config_files="$ac_config_files Doxyfile Makefile po/Makefile.in lib/Makefile src/Makefile src/liblzma/liblzma.pc src/liblzma/Makefile src/liblzma/api/Makefile src/xz/Makefile src/xzdec/Makefile src/lzmainfo/Makefile src/scripts/Makefile src/scripts/xzdiff src/scripts/xzgrep src/scripts/xzmore src/scripts/xzless tests/Makefile debug/Makefile"
+
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+if test -z "${COND_W32_TRUE}" && test -z "${COND_W32_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_W32\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_W32\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_MAIN_ENCODER_TRUE}" && test -z "${COND_MAIN_ENCODER_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_MAIN_ENCODER\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_MAIN_ENCODER\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_MAIN_DECODER_TRUE}" && test -z "${COND_MAIN_DECODER_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_MAIN_DECODER\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_MAIN_DECODER\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_LZMA1_TRUE}" && test -z "${COND_FILTER_LZMA1_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_LZMA1\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_LZMA1\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_LZMA1_TRUE}" && test -z "${COND_ENCODER_LZMA1_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_LZMA1\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_LZMA1\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_LZMA1_TRUE}" && test -z "${COND_DECODER_LZMA1_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_LZMA1\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_LZMA1\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_LZMA2_TRUE}" && test -z "${COND_FILTER_LZMA2_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_LZMA2\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_LZMA2\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_LZMA2_TRUE}" && test -z "${COND_ENCODER_LZMA2_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_LZMA2\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_LZMA2\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_LZMA2_TRUE}" && test -z "${COND_DECODER_LZMA2_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_LZMA2\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_LZMA2\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_SUBBLOCK_TRUE}" && test -z "${COND_FILTER_SUBBLOCK_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_SUBBLOCK\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_SUBBLOCK\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_SUBBLOCK_TRUE}" && test -z "${COND_ENCODER_SUBBLOCK_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_SUBBLOCK\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_SUBBLOCK\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_SUBBLOCK_TRUE}" && test -z "${COND_DECODER_SUBBLOCK_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_SUBBLOCK\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_SUBBLOCK\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_DELTA_TRUE}" && test -z "${COND_FILTER_DELTA_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_DELTA\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_DELTA\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_DELTA_TRUE}" && test -z "${COND_ENCODER_DELTA_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_DELTA\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_DELTA\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_DELTA_TRUE}" && test -z "${COND_DECODER_DELTA_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_DELTA\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_DELTA\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_X86_TRUE}" && test -z "${COND_FILTER_X86_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_X86_TRUE}" && test -z "${COND_ENCODER_X86_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_X86_TRUE}" && test -z "${COND_DECODER_X86_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_POWERPC_TRUE}" && test -z "${COND_FILTER_POWERPC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_POWERPC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_POWERPC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_POWERPC_TRUE}" && test -z "${COND_ENCODER_POWERPC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_POWERPC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_POWERPC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_POWERPC_TRUE}" && test -z "${COND_DECODER_POWERPC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_POWERPC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_POWERPC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_IA64_TRUE}" && test -z "${COND_FILTER_IA64_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_IA64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_IA64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_IA64_TRUE}" && test -z "${COND_ENCODER_IA64_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_IA64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_IA64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_IA64_TRUE}" && test -z "${COND_DECODER_IA64_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_IA64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_IA64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_ARM_TRUE}" && test -z "${COND_FILTER_ARM_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_ARM\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_ARM\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_ARM_TRUE}" && test -z "${COND_ENCODER_ARM_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_ARM\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_ARM\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_ARM_TRUE}" && test -z "${COND_DECODER_ARM_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_ARM\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_ARM\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_ARMTHUMB_TRUE}" && test -z "${COND_FILTER_ARMTHUMB_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_ARMTHUMB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_ARMTHUMB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_ARMTHUMB_TRUE}" && test -z "${COND_ENCODER_ARMTHUMB_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_ARMTHUMB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_ARMTHUMB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_ARMTHUMB_TRUE}" && test -z "${COND_DECODER_ARMTHUMB_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_ARMTHUMB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_ARMTHUMB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_SPARC_TRUE}" && test -z "${COND_FILTER_SPARC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_SPARC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_SPARC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_SPARC_TRUE}" && test -z "${COND_ENCODER_SPARC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_SPARC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_SPARC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_SPARC_TRUE}" && test -z "${COND_DECODER_SPARC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_SPARC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_SPARC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_SIMPLE_TRUE}" && test -z "${COND_FILTER_SIMPLE_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_SIMPLE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_SIMPLE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_SIMPLE_TRUE}" && test -z "${COND_ENCODER_SIMPLE_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_SIMPLE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_SIMPLE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_SIMPLE_TRUE}" && test -z "${COND_DECODER_SIMPLE_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_SIMPLE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_SIMPLE\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_FILTER_LZ_TRUE}" && test -z "${COND_FILTER_LZ_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_FILTER_LZ\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_FILTER_LZ\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ENCODER_LZ_TRUE}" && test -z "${COND_ENCODER_LZ_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ENCODER_LZ\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ENCODER_LZ\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_DECODER_LZ_TRUE}" && test -z "${COND_DECODER_LZ_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_DECODER_LZ\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_DECODER_LZ\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_CHECK_CRC32_TRUE}" && test -z "${COND_CHECK_CRC32_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_CHECK_CRC32\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_CHECK_CRC32\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_CHECK_CRC64_TRUE}" && test -z "${COND_CHECK_CRC64_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_CHECK_CRC64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_CHECK_CRC64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_CHECK_SHA256_TRUE}" && test -z "${COND_CHECK_SHA256_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_CHECK_SHA256\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_CHECK_SHA256\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ASM_X86_TRUE}" && test -z "${COND_ASM_X86_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ASM_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ASM_X86\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_ASM_X86_64_TRUE}" && test -z "${COND_ASM_X86_64_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_ASM_X86_64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_ASM_X86_64\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_SMALL_TRUE}" && test -z "${COND_SMALL_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_SMALL\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_SMALL\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+ if test -n "$EXEEXT"; then
+ am__EXEEXT_TRUE=
+ am__EXEEXT_FALSE='#'
+else
+ am__EXEEXT_TRUE='#'
+ am__EXEEXT_FALSE=
+fi
+
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"am__fastdepCCAS\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"am__fastdepCCAS\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+if test -z "${COND_SHARED_TRUE}" && test -z "${COND_SHARED_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_SHARED\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_SHARED\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+if test -z "${COND_GNULIB_TRUE}" && test -z "${COND_GNULIB_FALSE}"; then
+ { { $as_echo "$as_me:$LINENO: error: conditional \"COND_GNULIB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+$as_echo "$as_me: error: conditional \"COND_GNULIB\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+: ${CONFIG_STATUS=./config.status}
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in
+ *posix*) set -o posix ;;
+esac
+
+fi
+
+
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+ as_echo='printf %s\n'
+ as_echo_n='printf %s'
+else
+ if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+ as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+ as_echo_n='/usr/ucb/echo -n'
+ else
+ as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+ as_echo_n_body='eval
+ arg=$1;
+ case $arg in
+ *"$as_nl"*)
+ expr "X$arg" : "X\\(.*\\)$as_nl";
+ arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+ esac;
+ expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+ '
+ export as_echo_n_body
+ as_echo_n='sh -c $as_echo_n_body as_echo'
+ fi
+ export as_echo_body
+ as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ PATH_SEPARATOR=:
+ (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+ (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+ PATH_SEPARATOR=';'
+ }
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+ if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+ elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+ else
+ as_ln_s='cp -p'
+ fi
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+ as_test_x='test -x'
+else
+ if ls -dL / >/dev/null 2>&1; then
+ as_ls_L_option=L
+ else
+ as_ls_L_option=
+ fi
+ as_test_x='
+ eval sh -c '\''
+ if test -d "$1"; then
+ test -d "$1/.";
+ else
+ case $1 in
+ -*)set "./$1";;
+ esac;
+ case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
+ ???[sx]*):;;*)false;;esac;fi
+ '\'' sh
+ '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+
+# Save the log message, to keep $[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by XZ Utils $as_me 4.999.9beta, which was
+generated by GNU Autoconf 2.63. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTION]... [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number and configuration settings, then exit
+ -q, --quiet, --silent
+ do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to <bug-autoconf@gnu.org>."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_version="\\
+XZ Utils config.status 4.999.9beta
+configured by $0, generated by GNU Autoconf 2.63,
+ with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright (C) 2008 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ $as_echo "$ac_cs_version"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ CONFIG_FILES="$CONFIG_FILES '$ac_optarg'"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ case $ac_optarg in
+ *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ { $as_echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ $as_echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { $as_echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+ set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ shift
+ \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+ CONFIG_SHELL='$SHELL'
+ export CONFIG_SHELL
+ exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
+
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+macro_version='`$ECHO "X$macro_version" | $Xsed -e "$delay_single_quote_subst"`'
+macro_revision='`$ECHO "X$macro_revision" | $Xsed -e "$delay_single_quote_subst"`'
+AS='`$ECHO "X$AS" | $Xsed -e "$delay_single_quote_subst"`'
+DLLTOOL='`$ECHO "X$DLLTOOL" | $Xsed -e "$delay_single_quote_subst"`'
+OBJDUMP='`$ECHO "X$OBJDUMP" | $Xsed -e "$delay_single_quote_subst"`'
+enable_shared='`$ECHO "X$enable_shared" | $Xsed -e "$delay_single_quote_subst"`'
+enable_static='`$ECHO "X$enable_static" | $Xsed -e "$delay_single_quote_subst"`'
+pic_mode='`$ECHO "X$pic_mode" | $Xsed -e "$delay_single_quote_subst"`'
+enable_fast_install='`$ECHO "X$enable_fast_install" | $Xsed -e "$delay_single_quote_subst"`'
+host_alias='`$ECHO "X$host_alias" | $Xsed -e "$delay_single_quote_subst"`'
+host='`$ECHO "X$host" | $Xsed -e "$delay_single_quote_subst"`'
+host_os='`$ECHO "X$host_os" | $Xsed -e "$delay_single_quote_subst"`'
+build_alias='`$ECHO "X$build_alias" | $Xsed -e "$delay_single_quote_subst"`'
+build='`$ECHO "X$build" | $Xsed -e "$delay_single_quote_subst"`'
+build_os='`$ECHO "X$build_os" | $Xsed -e "$delay_single_quote_subst"`'
+SED='`$ECHO "X$SED" | $Xsed -e "$delay_single_quote_subst"`'
+Xsed='`$ECHO "X$Xsed" | $Xsed -e "$delay_single_quote_subst"`'
+GREP='`$ECHO "X$GREP" | $Xsed -e "$delay_single_quote_subst"`'
+EGREP='`$ECHO "X$EGREP" | $Xsed -e "$delay_single_quote_subst"`'
+FGREP='`$ECHO "X$FGREP" | $Xsed -e "$delay_single_quote_subst"`'
+LD='`$ECHO "X$LD" | $Xsed -e "$delay_single_quote_subst"`'
+NM='`$ECHO "X$NM" | $Xsed -e "$delay_single_quote_subst"`'
+LN_S='`$ECHO "X$LN_S" | $Xsed -e "$delay_single_quote_subst"`'
+max_cmd_len='`$ECHO "X$max_cmd_len" | $Xsed -e "$delay_single_quote_subst"`'
+ac_objext='`$ECHO "X$ac_objext" | $Xsed -e "$delay_single_quote_subst"`'
+exeext='`$ECHO "X$exeext" | $Xsed -e "$delay_single_quote_subst"`'
+lt_unset='`$ECHO "X$lt_unset" | $Xsed -e "$delay_single_quote_subst"`'
+lt_SP2NL='`$ECHO "X$lt_SP2NL" | $Xsed -e "$delay_single_quote_subst"`'
+lt_NL2SP='`$ECHO "X$lt_NL2SP" | $Xsed -e "$delay_single_quote_subst"`'
+reload_flag='`$ECHO "X$reload_flag" | $Xsed -e "$delay_single_quote_subst"`'
+reload_cmds='`$ECHO "X$reload_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+deplibs_check_method='`$ECHO "X$deplibs_check_method" | $Xsed -e "$delay_single_quote_subst"`'
+file_magic_cmd='`$ECHO "X$file_magic_cmd" | $Xsed -e "$delay_single_quote_subst"`'
+AR='`$ECHO "X$AR" | $Xsed -e "$delay_single_quote_subst"`'
+AR_FLAGS='`$ECHO "X$AR_FLAGS" | $Xsed -e "$delay_single_quote_subst"`'
+STRIP='`$ECHO "X$STRIP" | $Xsed -e "$delay_single_quote_subst"`'
+RANLIB='`$ECHO "X$RANLIB" | $Xsed -e "$delay_single_quote_subst"`'
+old_postinstall_cmds='`$ECHO "X$old_postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+old_postuninstall_cmds='`$ECHO "X$old_postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+old_archive_cmds='`$ECHO "X$old_archive_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+CC='`$ECHO "X$CC" | $Xsed -e "$delay_single_quote_subst"`'
+CFLAGS='`$ECHO "X$CFLAGS" | $Xsed -e "$delay_single_quote_subst"`'
+compiler='`$ECHO "X$compiler" | $Xsed -e "$delay_single_quote_subst"`'
+GCC='`$ECHO "X$GCC" | $Xsed -e "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_pipe='`$ECHO "X$lt_cv_sys_global_symbol_pipe" | $Xsed -e "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_cdecl='`$ECHO "X$lt_cv_sys_global_symbol_to_cdecl" | $Xsed -e "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address" | $Xsed -e "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`'
+objdir='`$ECHO "X$objdir" | $Xsed -e "$delay_single_quote_subst"`'
+SHELL='`$ECHO "X$SHELL" | $Xsed -e "$delay_single_quote_subst"`'
+ECHO='`$ECHO "X$ECHO" | $Xsed -e "$delay_single_quote_subst"`'
+MAGIC_CMD='`$ECHO "X$MAGIC_CMD" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag='`$ECHO "X$lt_prog_compiler_no_builtin_flag" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_wl='`$ECHO "X$lt_prog_compiler_wl" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_pic='`$ECHO "X$lt_prog_compiler_pic" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_static='`$ECHO "X$lt_prog_compiler_static" | $Xsed -e "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o='`$ECHO "X$lt_cv_prog_compiler_c_o" | $Xsed -e "$delay_single_quote_subst"`'
+need_locks='`$ECHO "X$need_locks" | $Xsed -e "$delay_single_quote_subst"`'
+DSYMUTIL='`$ECHO "X$DSYMUTIL" | $Xsed -e "$delay_single_quote_subst"`'
+NMEDIT='`$ECHO "X$NMEDIT" | $Xsed -e "$delay_single_quote_subst"`'
+LIPO='`$ECHO "X$LIPO" | $Xsed -e "$delay_single_quote_subst"`'
+OTOOL='`$ECHO "X$OTOOL" | $Xsed -e "$delay_single_quote_subst"`'
+OTOOL64='`$ECHO "X$OTOOL64" | $Xsed -e "$delay_single_quote_subst"`'
+libext='`$ECHO "X$libext" | $Xsed -e "$delay_single_quote_subst"`'
+shrext_cmds='`$ECHO "X$shrext_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+extract_expsyms_cmds='`$ECHO "X$extract_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+archive_cmds_need_lc='`$ECHO "X$archive_cmds_need_lc" | $Xsed -e "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes='`$ECHO "X$enable_shared_with_static_runtimes" | $Xsed -e "$delay_single_quote_subst"`'
+export_dynamic_flag_spec='`$ECHO "X$export_dynamic_flag_spec" | $Xsed -e "$delay_single_quote_subst"`'
+whole_archive_flag_spec='`$ECHO "X$whole_archive_flag_spec" | $Xsed -e "$delay_single_quote_subst"`'
+compiler_needs_object='`$ECHO "X$compiler_needs_object" | $Xsed -e "$delay_single_quote_subst"`'
+old_archive_from_new_cmds='`$ECHO "X$old_archive_from_new_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds='`$ECHO "X$old_archive_from_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+archive_cmds='`$ECHO "X$archive_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+archive_expsym_cmds='`$ECHO "X$archive_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+module_cmds='`$ECHO "X$module_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+module_expsym_cmds='`$ECHO "X$module_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+with_gnu_ld='`$ECHO "X$with_gnu_ld" | $Xsed -e "$delay_single_quote_subst"`'
+allow_undefined_flag='`$ECHO "X$allow_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`'
+no_undefined_flag='`$ECHO "X$no_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec='`$ECHO "X$hardcode_libdir_flag_spec" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_ld='`$ECHO "X$hardcode_libdir_flag_spec_ld" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_libdir_separator='`$ECHO "X$hardcode_libdir_separator" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_direct='`$ECHO "X$hardcode_direct" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_direct_absolute='`$ECHO "X$hardcode_direct_absolute" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_minus_L='`$ECHO "X$hardcode_minus_L" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_shlibpath_var='`$ECHO "X$hardcode_shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_automatic='`$ECHO "X$hardcode_automatic" | $Xsed -e "$delay_single_quote_subst"`'
+inherit_rpath='`$ECHO "X$inherit_rpath" | $Xsed -e "$delay_single_quote_subst"`'
+link_all_deplibs='`$ECHO "X$link_all_deplibs" | $Xsed -e "$delay_single_quote_subst"`'
+fix_srcfile_path='`$ECHO "X$fix_srcfile_path" | $Xsed -e "$delay_single_quote_subst"`'
+always_export_symbols='`$ECHO "X$always_export_symbols" | $Xsed -e "$delay_single_quote_subst"`'
+export_symbols_cmds='`$ECHO "X$export_symbols_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+exclude_expsyms='`$ECHO "X$exclude_expsyms" | $Xsed -e "$delay_single_quote_subst"`'
+include_expsyms='`$ECHO "X$include_expsyms" | $Xsed -e "$delay_single_quote_subst"`'
+prelink_cmds='`$ECHO "X$prelink_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+file_list_spec='`$ECHO "X$file_list_spec" | $Xsed -e "$delay_single_quote_subst"`'
+variables_saved_for_relink='`$ECHO "X$variables_saved_for_relink" | $Xsed -e "$delay_single_quote_subst"`'
+need_lib_prefix='`$ECHO "X$need_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`'
+need_version='`$ECHO "X$need_version" | $Xsed -e "$delay_single_quote_subst"`'
+version_type='`$ECHO "X$version_type" | $Xsed -e "$delay_single_quote_subst"`'
+runpath_var='`$ECHO "X$runpath_var" | $Xsed -e "$delay_single_quote_subst"`'
+shlibpath_var='`$ECHO "X$shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`'
+shlibpath_overrides_runpath='`$ECHO "X$shlibpath_overrides_runpath" | $Xsed -e "$delay_single_quote_subst"`'
+libname_spec='`$ECHO "X$libname_spec" | $Xsed -e "$delay_single_quote_subst"`'
+library_names_spec='`$ECHO "X$library_names_spec" | $Xsed -e "$delay_single_quote_subst"`'
+soname_spec='`$ECHO "X$soname_spec" | $Xsed -e "$delay_single_quote_subst"`'
+postinstall_cmds='`$ECHO "X$postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+postuninstall_cmds='`$ECHO "X$postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+finish_cmds='`$ECHO "X$finish_cmds" | $Xsed -e "$delay_single_quote_subst"`'
+finish_eval='`$ECHO "X$finish_eval" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_into_libs='`$ECHO "X$hardcode_into_libs" | $Xsed -e "$delay_single_quote_subst"`'
+sys_lib_search_path_spec='`$ECHO "X$sys_lib_search_path_spec" | $Xsed -e "$delay_single_quote_subst"`'
+sys_lib_dlsearch_path_spec='`$ECHO "X$sys_lib_dlsearch_path_spec" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_action='`$ECHO "X$hardcode_action" | $Xsed -e "$delay_single_quote_subst"`'
+enable_dlopen='`$ECHO "X$enable_dlopen" | $Xsed -e "$delay_single_quote_subst"`'
+enable_dlopen_self='`$ECHO "X$enable_dlopen_self" | $Xsed -e "$delay_single_quote_subst"`'
+enable_dlopen_self_static='`$ECHO "X$enable_dlopen_self_static" | $Xsed -e "$delay_single_quote_subst"`'
+old_striplib='`$ECHO "X$old_striplib" | $Xsed -e "$delay_single_quote_subst"`'
+striplib='`$ECHO "X$striplib" | $Xsed -e "$delay_single_quote_subst"`'
+LD_RC='`$ECHO "X$LD_RC" | $Xsed -e "$delay_single_quote_subst"`'
+old_archive_cmds_RC='`$ECHO "X$old_archive_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+compiler_RC='`$ECHO "X$compiler_RC" | $Xsed -e "$delay_single_quote_subst"`'
+GCC_RC='`$ECHO "X$GCC_RC" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag_RC='`$ECHO "X$lt_prog_compiler_no_builtin_flag_RC" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_wl_RC='`$ECHO "X$lt_prog_compiler_wl_RC" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_pic_RC='`$ECHO "X$lt_prog_compiler_pic_RC" | $Xsed -e "$delay_single_quote_subst"`'
+lt_prog_compiler_static_RC='`$ECHO "X$lt_prog_compiler_static_RC" | $Xsed -e "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o_RC='`$ECHO "X$lt_cv_prog_compiler_c_o_RC" | $Xsed -e "$delay_single_quote_subst"`'
+archive_cmds_need_lc_RC='`$ECHO "X$archive_cmds_need_lc_RC" | $Xsed -e "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes_RC='`$ECHO "X$enable_shared_with_static_runtimes_RC" | $Xsed -e "$delay_single_quote_subst"`'
+export_dynamic_flag_spec_RC='`$ECHO "X$export_dynamic_flag_spec_RC" | $Xsed -e "$delay_single_quote_subst"`'
+whole_archive_flag_spec_RC='`$ECHO "X$whole_archive_flag_spec_RC" | $Xsed -e "$delay_single_quote_subst"`'
+compiler_needs_object_RC='`$ECHO "X$compiler_needs_object_RC" | $Xsed -e "$delay_single_quote_subst"`'
+old_archive_from_new_cmds_RC='`$ECHO "X$old_archive_from_new_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds_RC='`$ECHO "X$old_archive_from_expsyms_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+archive_cmds_RC='`$ECHO "X$archive_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+archive_expsym_cmds_RC='`$ECHO "X$archive_expsym_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+module_cmds_RC='`$ECHO "X$module_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+module_expsym_cmds_RC='`$ECHO "X$module_expsym_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+with_gnu_ld_RC='`$ECHO "X$with_gnu_ld_RC" | $Xsed -e "$delay_single_quote_subst"`'
+allow_undefined_flag_RC='`$ECHO "X$allow_undefined_flag_RC" | $Xsed -e "$delay_single_quote_subst"`'
+no_undefined_flag_RC='`$ECHO "X$no_undefined_flag_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_RC='`$ECHO "X$hardcode_libdir_flag_spec_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_ld_RC='`$ECHO "X$hardcode_libdir_flag_spec_ld_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_libdir_separator_RC='`$ECHO "X$hardcode_libdir_separator_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_direct_RC='`$ECHO "X$hardcode_direct_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_direct_absolute_RC='`$ECHO "X$hardcode_direct_absolute_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_minus_L_RC='`$ECHO "X$hardcode_minus_L_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_shlibpath_var_RC='`$ECHO "X$hardcode_shlibpath_var_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_automatic_RC='`$ECHO "X$hardcode_automatic_RC" | $Xsed -e "$delay_single_quote_subst"`'
+inherit_rpath_RC='`$ECHO "X$inherit_rpath_RC" | $Xsed -e "$delay_single_quote_subst"`'
+link_all_deplibs_RC='`$ECHO "X$link_all_deplibs_RC" | $Xsed -e "$delay_single_quote_subst"`'
+fix_srcfile_path_RC='`$ECHO "X$fix_srcfile_path_RC" | $Xsed -e "$delay_single_quote_subst"`'
+always_export_symbols_RC='`$ECHO "X$always_export_symbols_RC" | $Xsed -e "$delay_single_quote_subst"`'
+export_symbols_cmds_RC='`$ECHO "X$export_symbols_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+exclude_expsyms_RC='`$ECHO "X$exclude_expsyms_RC" | $Xsed -e "$delay_single_quote_subst"`'
+include_expsyms_RC='`$ECHO "X$include_expsyms_RC" | $Xsed -e "$delay_single_quote_subst"`'
+prelink_cmds_RC='`$ECHO "X$prelink_cmds_RC" | $Xsed -e "$delay_single_quote_subst"`'
+file_list_spec_RC='`$ECHO "X$file_list_spec_RC" | $Xsed -e "$delay_single_quote_subst"`'
+hardcode_action_RC='`$ECHO "X$hardcode_action_RC" | $Xsed -e "$delay_single_quote_subst"`'
+
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# Quote evaled strings.
+for var in SED \
+GREP \
+EGREP \
+FGREP \
+LD \
+NM \
+LN_S \
+lt_SP2NL \
+lt_NL2SP \
+reload_flag \
+deplibs_check_method \
+file_magic_cmd \
+AR \
+AR_FLAGS \
+STRIP \
+RANLIB \
+CC \
+CFLAGS \
+compiler \
+lt_cv_sys_global_symbol_pipe \
+lt_cv_sys_global_symbol_to_cdecl \
+lt_cv_sys_global_symbol_to_c_name_address \
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
+SHELL \
+ECHO \
+lt_prog_compiler_no_builtin_flag \
+lt_prog_compiler_wl \
+lt_prog_compiler_pic \
+lt_prog_compiler_static \
+lt_cv_prog_compiler_c_o \
+need_locks \
+DSYMUTIL \
+NMEDIT \
+LIPO \
+OTOOL \
+OTOOL64 \
+shrext_cmds \
+export_dynamic_flag_spec \
+whole_archive_flag_spec \
+compiler_needs_object \
+with_gnu_ld \
+allow_undefined_flag \
+no_undefined_flag \
+hardcode_libdir_flag_spec \
+hardcode_libdir_flag_spec_ld \
+hardcode_libdir_separator \
+fix_srcfile_path \
+exclude_expsyms \
+include_expsyms \
+file_list_spec \
+variables_saved_for_relink \
+libname_spec \
+library_names_spec \
+soname_spec \
+finish_eval \
+old_striplib \
+striplib \
+LD_RC \
+compiler_RC \
+lt_prog_compiler_no_builtin_flag_RC \
+lt_prog_compiler_wl_RC \
+lt_prog_compiler_pic_RC \
+lt_prog_compiler_static_RC \
+lt_cv_prog_compiler_c_o_RC \
+export_dynamic_flag_spec_RC \
+whole_archive_flag_spec_RC \
+compiler_needs_object_RC \
+with_gnu_ld_RC \
+allow_undefined_flag_RC \
+no_undefined_flag_RC \
+hardcode_libdir_flag_spec_RC \
+hardcode_libdir_flag_spec_ld_RC \
+hardcode_libdir_separator_RC \
+fix_srcfile_path_RC \
+exclude_expsyms_RC \
+include_expsyms_RC \
+file_list_spec_RC; do
+ case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ *[\\\\\\\`\\"\\\$]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+# Double-quote double-evaled strings.
+for var in reload_cmds \
+old_postinstall_cmds \
+old_postuninstall_cmds \
+old_archive_cmds \
+extract_expsyms_cmds \
+old_archive_from_new_cmds \
+old_archive_from_expsyms_cmds \
+archive_cmds \
+archive_expsym_cmds \
+module_cmds \
+module_expsym_cmds \
+export_symbols_cmds \
+prelink_cmds \
+postinstall_cmds \
+postuninstall_cmds \
+finish_cmds \
+sys_lib_search_path_spec \
+sys_lib_dlsearch_path_spec \
+old_archive_cmds_RC \
+old_archive_from_new_cmds_RC \
+old_archive_from_expsyms_cmds_RC \
+archive_cmds_RC \
+archive_expsym_cmds_RC \
+module_cmds_RC \
+module_expsym_cmds_RC \
+export_symbols_cmds_RC \
+prelink_cmds_RC; do
+ case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ *[\\\\\\\`\\"\\\$]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+# Fix-up fallback echo if it was mangled by the above quoting rules.
+case \$lt_ECHO in
+*'\\\$0 --fallback-echo"') lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\$0 --fallback-echo"\$/\$0 --fallback-echo"/'\`
+ ;;
+esac
+
+ac_aux_dir='$ac_aux_dir'
+xsi_shell='$xsi_shell'
+lt_shell_append='$lt_shell_append'
+
+# See if we are running on zsh, and set the options which allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+
+
+ PACKAGE='$PACKAGE'
+ VERSION='$VERSION'
+ TIMESTAMP='$TIMESTAMP'
+ RM='$RM'
+ ofile='$ofile'
+
+
+
+
+
+# Capture the value of obsolete ALL_LINGUAS because we need it to compute
+ # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it
+ # from automake < 1.5.
+ eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"'
+ # Capture the value of LINGUAS because we need it to compute CATALOGS.
+ LINGUAS="${LINGUAS-%UNSET%}"
+
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+ "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
+ "po-directories") CONFIG_COMMANDS="$CONFIG_COMMANDS po-directories" ;;
+ "Doxyfile") CONFIG_FILES="$CONFIG_FILES Doxyfile" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+ "po/Makefile.in") CONFIG_FILES="$CONFIG_FILES po/Makefile.in" ;;
+ "lib/Makefile") CONFIG_FILES="$CONFIG_FILES lib/Makefile" ;;
+ "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;;
+ "src/liblzma/liblzma.pc") CONFIG_FILES="$CONFIG_FILES src/liblzma/liblzma.pc" ;;
+ "src/liblzma/Makefile") CONFIG_FILES="$CONFIG_FILES src/liblzma/Makefile" ;;
+ "src/liblzma/api/Makefile") CONFIG_FILES="$CONFIG_FILES src/liblzma/api/Makefile" ;;
+ "src/xz/Makefile") CONFIG_FILES="$CONFIG_FILES src/xz/Makefile" ;;
+ "src/xzdec/Makefile") CONFIG_FILES="$CONFIG_FILES src/xzdec/Makefile" ;;
+ "src/lzmainfo/Makefile") CONFIG_FILES="$CONFIG_FILES src/lzmainfo/Makefile" ;;
+ "src/scripts/Makefile") CONFIG_FILES="$CONFIG_FILES src/scripts/Makefile" ;;
+ "src/scripts/xzdiff") CONFIG_FILES="$CONFIG_FILES src/scripts/xzdiff" ;;
+ "src/scripts/xzgrep") CONFIG_FILES="$CONFIG_FILES src/scripts/xzgrep" ;;
+ "src/scripts/xzmore") CONFIG_FILES="$CONFIG_FILES src/scripts/xzmore" ;;
+ "src/scripts/xzless") CONFIG_FILES="$CONFIG_FILES src/scripts/xzless" ;;
+ "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;;
+ "debug/Makefile") CONFIG_FILES="$CONFIG_FILES debug/Makefile" ;;
+
+ *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+ test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} ||
+{
+ $as_echo "$as_me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=' '
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+ ac_cs_awk_cr='\\r'
+else
+ ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+ echo "cat >conf$$subs.awk <<_ACEOF" &&
+ echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+ echo "_ACEOF"
+} >conf$$subs.sh ||
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ . ./conf$$subs.sh ||
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+
+ ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+ if test $ac_delim_n = $ac_delim_num; then
+ break
+ elif $ac_last_try; then
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\).*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\).*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+ N
+ s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$tmp/subs1.awk" <<_ACAWK &&
+ for (key in S) S_is_set[key] = 1
+ FS = ""
+
+}
+{
+ line = $ 0
+ nfields = split(line, field, "@")
+ substed = 0
+ len = length(field[1])
+ for (i = 2; i < nfields; i++) {
+ key = field[i]
+ keylen = length(key)
+ if (S_is_set[key]) {
+ value = S[key]
+ line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+ len += length(value) + length(field[++i])
+ substed = 1
+ } else
+ len += 1 + keylen
+ }
+
+ print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+ cat
+fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
+ || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5
+$as_echo "$as_me: error: could not setup config files machinery" >&2;}
+ { (exit 1); exit 1; }; }
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[ ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+ ac_t=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_t"; then
+ break
+ elif $ac_last_try; then
+ { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_HEADERS" >&5
+$as_echo "$as_me: error: could not make $CONFIG_HEADERS" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any. Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[ ]*#[ ]*define[ ][ ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ for (key in D) D_is_set[key] = 1
+ FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+ line = \$ 0
+ split(line, arg, " ")
+ if (arg[1] == "#") {
+ defundef = arg[2]
+ mac1 = arg[3]
+ } else {
+ defundef = substr(arg[1], 2)
+ mac1 = arg[2]
+ }
+ split(mac1, mac2, "(") #)
+ macro = mac2[1]
+ prefix = substr(line, 1, index(line, defundef) - 1)
+ if (D_is_set[macro]) {
+ # Preserve the white space surrounding the "#".
+ print prefix "define", macro P[macro] D[macro]
+ next
+ } else {
+ # Replace #undef with comments. This is necessary, for example,
+ # in the case of _POSIX_SOURCE, which is predefined and required
+ # on some systems where configure will not decide to define it.
+ if (defundef == "undef") {
+ print "/*", prefix defundef, macro, "*/"
+ next
+ }
+ }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5
+$as_echo "$as_me: error: could not setup config headers machinery" >&2;}
+ { (exit 1); exit 1; }; }
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5
+$as_echo "$as_me: error: invalid tag $ac_tag" >&2;}
+ { (exit 1); exit 1; }; };;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
+$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+ case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+ ac_file_inputs="$ac_file_inputs '$ac_f'"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input='Generated from '`
+ $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ `' by configure.'
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { $as_echo "$as_me:$LINENO: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+ fi
+ # Neutralize special characters interpreted by sed in replacement strings.
+ case $configure_input in #(
+ *\&* | *\|* | *\\* )
+ ac_sed_conf_input=`$as_echo "$configure_input" |
+ sed 's/[\\\\&|]/\\\\&/g'`;; #(
+ *) ac_sed_conf_input=$configure_input;;
+ esac
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; } ;;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir="$ac_dir"
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+
+ac_sed_dataroot='
+/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p
+'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out" && rm -f "$tmp/out";;
+ *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+ esac \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ if test x"$ac_file" != x-; then
+ {
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
+ } >"$tmp/config.h" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+ { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f "$ac_file"
+ mv "$tmp/config.h" "$ac_file" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5
+$as_echo "$as_me: error: could not create $ac_file" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ else
+ $as_echo "/* $configure_input */" \
+ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
+ || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5
+$as_echo "$as_me: error: could not create -" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+ case $_am_header in
+ $_am_arg | $_am_arg:* )
+ break ;;
+ * )
+ _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+ esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$_am_arg" : 'X\(//\)[^/]' \| \
+ X"$_am_arg" : 'X\(//\)$' \| \
+ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+ :C) { $as_echo "$as_me:$LINENO: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+ esac
+
+
+ case $ac_file$ac_mode in
+ "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+ # Autoconf 2.62 quotes --file arguments for eval, but not when files
+ # are listed without --file. Let's play safe and only enable the eval
+ # if we detect the quoting.
+ case $CONFIG_FILES in
+ *\'*) eval set x "$CONFIG_FILES" ;;
+ *) set x $CONFIG_FILES ;;
+ esac
+ shift
+ for mf
+ do
+ # Strip MF so we end up with the name of the file.
+ mf=`echo "$mf" | sed -e 's/:.*$//'`
+ # Check whether this is an Automake generated Makefile or not.
+ # We used to match only the files named `Makefile.in', but
+ # some people rename them; so instead we look at the file content.
+ # Grep'ing the first line is not enough: some people post-process
+ # each Makefile.in and add a new line on top of each file to say so.
+ # Grep'ing the whole file is not good either: AIX grep has a line
+ # limit of 2048, but all sed's we know have understand at least 4000.
+ if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+ dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$mf" : 'X\(//\)[^/]' \| \
+ X"$mf" : 'X\(//\)$' \| \
+ X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ else
+ continue
+ fi
+ # Extract the definition of DEPDIR, am__include, and am__quote
+ # from the Makefile without running `make'.
+ DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+ test -z "$DEPDIR" && continue
+ am__include=`sed -n 's/^am__include = //p' < "$mf"`
+ test -z "am__include" && continue
+ am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+ # When using ansi2knr, U may be empty or an underscore; expand it
+ U=`sed -n 's/^U = //p' < "$mf"`
+ # Find all dependency output files, they are included files with
+ # $(DEPDIR) in their names. We invoke sed twice because it is the
+ # simplest approach to changing $(DEPDIR) to its actual value in the
+ # expansion.
+ for file in `sed -n "
+ s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ # Make sure the directory exists.
+ test -f "$dirpart/$file" && continue
+ fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$file" : 'X\(//\)[^/]' \| \
+ X"$file" : 'X\(//\)$' \| \
+ X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir=$dirpart/$fdir
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+$as_echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ # echo "creating $dirpart/$file"
+ echo '# dummy' > "$dirpart/$file"
+ done
+ done
+}
+ ;;
+ "libtool":C)
+
+ # See if we are running on zsh, and set the options which allow our
+ # commands through without removal of \ escapes.
+ if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+ fi
+
+ cfgfile="${ofile}T"
+ trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+ $RM "$cfgfile"
+
+ cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+
+# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+# 2006, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gordon Matzigkeit, 1996
+#
+# This file is part of GNU Libtool.
+#
+# GNU Libtool is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING. If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
+# obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+
+
+# The names of the tagged configurations supported by this script.
+available_tags="RC "
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Which release of libtool.m4 was used?
+macro_version=$macro_version
+macro_revision=$macro_revision
+
+# Assembler program.
+AS=$AS
+
+# DLL creation program.
+DLLTOOL=$DLLTOOL
+
+# Object dumper program.
+OBJDUMP=$OBJDUMP
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# What type of objects to build.
+pic_mode=$pic_mode
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# The host system.
+host_alias=$host_alias
+host=$host
+host_os=$host_os
+
+# The build system.
+build_alias=$build_alias
+build=$build
+build_os=$build_os
+
+# A sed program that does not truncate output.
+SED=$lt_SED
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="\$SED -e 1s/^X//"
+
+# A grep program that handles long lines.
+GREP=$lt_GREP
+
+# An ERE matcher.
+EGREP=$lt_EGREP
+
+# A literal string matcher.
+FGREP=$lt_FGREP
+
+# A BSD- or MS-compatible name lister.
+NM=$lt_NM
+
+# Whether we need soft or hard links.
+LN_S=$lt_LN_S
+
+# What is the maximum length of a command?
+max_cmd_len=$max_cmd_len
+
+# Object file suffix (normally "o").
+objext=$ac_objext
+
+# Executable file suffix (normally "").
+exeext=$exeext
+
+# whether the shell understands "unset".
+lt_unset=$lt_unset
+
+# turn spaces into newlines.
+SP2NL=$lt_lt_SP2NL
+
+# turn newlines into spaces.
+NL2SP=$lt_lt_NL2SP
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method == "file_magic".
+file_magic_cmd=$lt_file_magic_cmd
+
+# The archiver.
+AR=$lt_AR
+AR_FLAGS=$lt_AR_FLAGS
+
+# A symbol stripping program.
+STRIP=$lt_STRIP
+
+# Commands used to install an old-style archive.
+RANLIB=$lt_RANLIB
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# A C compiler.
+LTCC=$lt_CC
+
+# LTCC compiler flags.
+LTCFLAGS=$lt_CFLAGS
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration.
+global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair.
+global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
+
+# Transform the output of nm in a C name address pair when lib prefix is needed.
+global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# An echo program that does not interpret backslashes.
+ECHO=$lt_ECHO
+
+# Used to examine libraries when file_magic_cmd begins with "file".
+MAGIC_CMD=$MAGIC_CMD
+
+# Must we lock files when doing compilation?
+need_locks=$lt_need_locks
+
+# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
+DSYMUTIL=$lt_DSYMUTIL
+
+# Tool to change global to local symbols on Mac OS X.
+NMEDIT=$lt_NMEDIT
+
+# Tool to manipulate fat objects and archives on Mac OS X.
+LIPO=$lt_LIPO
+
+# ldd/readelf like tool for Mach-O binaries on Mac OS X.
+OTOOL=$lt_OTOOL
+
+# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4.
+OTOOL64=$lt_OTOOL64
+
+# Old archive suffix (normally "a").
+libext=$libext
+
+# Shared library suffix (normally ".so").
+shrext_cmds=$lt_shrext_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at link time.
+variables_saved_for_relink=$lt_variables_saved_for_relink
+
+# Do we need the "lib" prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Library versioning type.
+version_type=$version_type
+
+# Shared library runtime path variable.
+runpath_var=$runpath_var
+
+# Shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Command to use after installation of a shared archive.
+postinstall_cmds=$lt_postinstall_cmds
+
+# Command to use after uninstallation of a shared archive.
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# As "finish_cmds", except a single script fragment to be evaled but
+# not shown.
+finish_eval=$lt_finish_eval
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Compile-time system search path for libraries.
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries.
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds
+
+# A language specific compiler.
+CC=$lt_compiler
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds
+module_expsym_cmds=$lt_module_expsym_cmds
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# If ld is used when linking, flag to hardcode \$libdir into a binary
+# during linking. This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path=$lt_fix_srcfile_path
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+ case $host_os in
+ aix3*)
+ cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+_LT_EOF
+ ;;
+ esac
+
+
+ltmain="$ac_aux_dir/ltmain.sh"
+
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ case $xsi_shell in
+ yes)
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE. If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac
+}
+
+# func_basename file
+func_basename ()
+{
+ func_basename_result="${1##*/}"
+}
+
+# func_dirname_and_basename file append nondir_replacement
+# perform func_basename and func_dirname in a single function
+# call:
+# dirname: Compute the dirname of FILE. If nonempty,
+# add APPEND to the result, otherwise set result
+# to NONDIR_REPLACEMENT.
+# value returned in "$func_dirname_result"
+# basename: Compute filename of FILE.
+# value retuned in "$func_basename_result"
+# Implementation must be kept synchronized with func_dirname
+# and func_basename. For efficiency, we do not delegate to
+# those functions but instead duplicate the functionality here.
+func_dirname_and_basename ()
+{
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac
+ func_basename_result="${1##*/}"
+}
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+func_stripname ()
+{
+ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+ # positional parameters, so assign one to ordinary parameter first.
+ func_stripname_result=${3}
+ func_stripname_result=${func_stripname_result#"${1}"}
+ func_stripname_result=${func_stripname_result%"${2}"}
+}
+
+# func_opt_split
+func_opt_split ()
+{
+ func_opt_split_opt=${1%%=*}
+ func_opt_split_arg=${1#*=}
+}
+
+# func_lo2o object
+func_lo2o ()
+{
+ case ${1} in
+ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
+ *) func_lo2o_result=${1} ;;
+ esac
+}
+
+# func_xform libobj-or-source
+func_xform ()
+{
+ func_xform_result=${1%.*}.lo
+}
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+ func_arith_result=$(( $* ))
+}
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+ func_len_result=${#1}
+}
+
+_LT_EOF
+ ;;
+ *) # Bourne compatible functions.
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE. If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+ # Extract subdirectory from the argument.
+ func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+}
+
+# func_basename file
+func_basename ()
+{
+ func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
+}
+
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+# func_strip_suffix prefix name
+func_stripname ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "X${3}" \
+ | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "X${3}" \
+ | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;;
+ esac
+}
+
+# sed scripts:
+my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q'
+my_sed_long_arg='1s/^-[^=]*=//'
+
+# func_opt_split
+func_opt_split ()
+{
+ func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"`
+ func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"`
+}
+
+# func_lo2o object
+func_lo2o ()
+{
+ func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"`
+}
+
+# func_xform libobj-or-source
+func_xform ()
+{
+ func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[^.]*$/.lo/'`
+}
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+ func_arith_result=`expr "$@"`
+}
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+ func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len`
+}
+
+_LT_EOF
+esac
+
+case $lt_shell_append in
+ yes)
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+ eval "$1+=\$2"
+}
+_LT_EOF
+ ;;
+ *)
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+ eval "$1=\$$1\$2"
+}
+
+_LT_EOF
+ ;;
+ esac
+
+
+ sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ mv -f "$cfgfile" "$ofile" ||
+ (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+ chmod +x "$ofile"
+
+
+ cat <<_LT_EOF >> "$ofile"
+
+# ### BEGIN LIBTOOL TAG CONFIG: RC
+
+# The linker used to build libraries.
+LD=$lt_LD_RC
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds_RC
+
+# A language specific compiler.
+CC=$lt_compiler_RC
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC_RC
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_RC
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl_RC
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic_RC
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static_RC
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o_RC
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc_RC
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_RC
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_RC
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec_RC
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object_RC
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_RC
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_RC
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds_RC
+archive_expsym_cmds=$lt_archive_expsym_cmds_RC
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds_RC
+module_expsym_cmds=$lt_module_expsym_cmds_RC
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld_RC
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag_RC
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag_RC
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_RC
+
+# If ld is used when linking, flag to hardcode \$libdir into a binary
+# during linking. This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_RC
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator_RC
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct_RC
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute_RC
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L_RC
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var_RC
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic_RC
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath_RC
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs_RC
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path=$lt_fix_srcfile_path_RC
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols_RC
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds_RC
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms_RC
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms_RC
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds_RC
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec_RC
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action_RC
+
+# ### END LIBTOOL TAG CONFIG: RC
+_LT_EOF
+
+ ;;
+ "po-directories":C)
+ for ac_file in $CONFIG_FILES; do
+ # Support "outfile[:infile[:infile...]]"
+ case "$ac_file" in
+ *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;;
+ esac
+ # PO directories have a Makefile.in generated from Makefile.in.in.
+ case "$ac_file" in */Makefile.in)
+ # Adjust a relative srcdir.
+ ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'`
+ ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`"
+ ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'`
+ # In autoconf-2.13 it is called $ac_given_srcdir.
+ # In autoconf-2.50 it is called $srcdir.
+ test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir"
+ case "$ac_given_srcdir" in
+ .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;;
+ /*) top_srcdir="$ac_given_srcdir" ;;
+ *) top_srcdir="$ac_dots$ac_given_srcdir" ;;
+ esac
+ # Treat a directory as a PO directory if and only if it has a
+ # POTFILES.in file. This allows packages to have multiple PO
+ # directories under different names or in different locations.
+ if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then
+ rm -f "$ac_dir/POTFILES"
+ test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES"
+ cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES"
+ POMAKEFILEDEPS="POTFILES.in"
+ # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend
+ # on $ac_dir but don't depend on user-specified configuration
+ # parameters.
+ if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then
+ # The LINGUAS file contains the set of available languages.
+ if test -n "$OBSOLETE_ALL_LINGUAS"; then
+ test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete"
+ fi
+ ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"`
+ # Hide the ALL_LINGUAS assigment from automake < 1.5.
+ eval 'ALL_LINGUAS''=$ALL_LINGUAS_'
+ POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS"
+ else
+ # The set of available languages was given in configure.in.
+ # Hide the ALL_LINGUAS assigment from automake < 1.5.
+ eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS'
+ fi
+ # Compute POFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po)
+ # Compute UPDATEPOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update)
+ # Compute DUMMYPOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop)
+ # Compute GMOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo)
+ case "$ac_given_srcdir" in
+ .) srcdirpre= ;;
+ *) srcdirpre='$(srcdir)/' ;;
+ esac
+ POFILES=
+ UPDATEPOFILES=
+ DUMMYPOFILES=
+ GMOFILES=
+ for lang in $ALL_LINGUAS; do
+ POFILES="$POFILES $srcdirpre$lang.po"
+ UPDATEPOFILES="$UPDATEPOFILES $lang.po-update"
+ DUMMYPOFILES="$DUMMYPOFILES $lang.nop"
+ GMOFILES="$GMOFILES $srcdirpre$lang.gmo"
+ done
+ # CATALOGS depends on both $ac_dir and the user's LINGUAS
+ # environment variable.
+ INST_LINGUAS=
+ if test -n "$ALL_LINGUAS"; then
+ for presentlang in $ALL_LINGUAS; do
+ useit=no
+ if test "%UNSET%" != "$LINGUAS"; then
+ desiredlanguages="$LINGUAS"
+ else
+ desiredlanguages="$ALL_LINGUAS"
+ fi
+ for desiredlang in $desiredlanguages; do
+ # Use the presentlang catalog if desiredlang is
+ # a. equal to presentlang, or
+ # b. a variant of presentlang (because in this case,
+ # presentlang can be used as a fallback for messages
+ # which are not translated in the desiredlang catalog).
+ case "$desiredlang" in
+ "$presentlang"*) useit=yes;;
+ esac
+ done
+ if test $useit = yes; then
+ INST_LINGUAS="$INST_LINGUAS $presentlang"
+ fi
+ done
+ fi
+ CATALOGS=
+ if test -n "$INST_LINGUAS"; then
+ for lang in $INST_LINGUAS; do
+ CATALOGS="$CATALOGS $lang.gmo"
+ done
+ fi
+ test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile"
+ sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile"
+ for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do
+ if test -f "$f"; then
+ case "$f" in
+ *.orig | *.bak | *~) ;;
+ *) cat "$f" >> "$ac_dir/Makefile" ;;
+ esac
+ fi
+ done
+ fi
+ ;;
+ esac
+ done ;;
+
+ esac
+done # for ac_tag
+
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+ { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+ { $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure.ac b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure.ac
new file mode 100644
index 00000000..e2bb8778
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/configure.ac
@@ -0,0 +1,649 @@
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+###############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+###############################################################################
+
+# NOTE: Don't add useless checks. autoscan detects this and that, but don't
+# let it confuse you. For example, we don't care about checking for behavior
+# of malloc(), stat(), or lstat(), since we don't use those functions in
+# a way that would cause the problems the autoconf macros check.
+
+AC_PREREQ([2.61])
+
+AC_INIT([XZ Utils], m4_esyscmd([/bin/sh version.sh]),
+ [lasse.collin@tukaani.org], [xz])
+AC_CONFIG_SRCDIR([src/liblzma/common/common.h])
+AC_CONFIG_AUX_DIR([build-aux])
+AC_CONFIG_MACRO_DIR([m4])
+AC_CONFIG_HEADER([config.h])
+
+PACKAGE_HOMEPAGE=http://tukaani.org/xz/
+AC_DEFINE_UNQUOTED([PACKAGE_HOMEPAGE], ["$PACKAGE_HOMEPAGE"],
+ [Define to the URL of the home page of this package.])
+AC_SUBST([PACKAGE_HOMEPAGE])
+
+echo
+echo "$PACKAGE_STRING"
+
+echo
+echo "System type:"
+# This is needed to know if assembler optimizations can be used.
+AC_CANONICAL_HOST
+
+# We do some special things on Windows (32-bit or 64-bit) builds.
+case $host_os in
+ mingw* | cygwin*) is_w32=yes ;;
+ *) is_w32=no ;;
+esac
+AM_CONDITIONAL([COND_W32], [test "$is_w32" = yes])
+
+
+echo
+echo "Configure options:"
+AM_CFLAGS=
+
+
+#############
+# Debugging #
+#############
+
+AC_MSG_CHECKING([if debugging code should be compiled])
+AC_ARG_ENABLE([debug], AC_HELP_STRING([--enable-debug], [Enable debugging code.]),
+ [], enable_debug=no)
+if test "x$enable_debug" = xyes; then
+ AC_MSG_RESULT([yes])
+else
+ AC_DEFINE([NDEBUG], [1], [Define to 1 to disable debugging code.])
+ AC_MSG_RESULT([no])
+fi
+
+
+###########
+# Filters #
+###########
+
+m4_define([SUPPORTED_FILTERS], [lzma1,lzma2,subblock,delta,x86,powerpc,ia64,arm,armthumb,sparc])dnl
+m4_define([SIMPLE_FILTERS], [x86,powerpc,ia64,arm,armthumb,sparc])
+m4_define([LZ_FILTERS], [lzma1,lzma2])
+
+m4_foreach([NAME], [SUPPORTED_FILTERS],
+[enable_filter_[]NAME=no
+enable_encoder_[]NAME=no
+enable_decoder_[]NAME=no
+])dnl
+
+AC_MSG_CHECKING([which encoders to build])
+AC_ARG_ENABLE([encoders], AC_HELP_STRING([--enable-encoders=LIST],
+ [Comma-separated list of encoders to build. Default=all.
+ Available encoders:]
+ m4_translit(m4_defn([SUPPORTED_FILTERS]), [,], [ ])),
+ [], [enable_encoders=SUPPORTED_FILTERS])
+enable_encoders=`echo "$enable_encoders" | sed 's/,subblock//; s/,/ /g'`
+if test "x$enable_encoders" = xno || test "x$enable_encoders" = x; then
+ AC_MSG_RESULT([(none)])
+else
+ AC_DEFINE([HAVE_ENCODER], [1],
+ [Define to 1 if encoder components are enabled.])
+ for arg in $enable_encoders
+ do
+ case $arg in m4_foreach([NAME], [SUPPORTED_FILTERS], [
+ NAME)
+ enable_filter_[]NAME=yes
+ enable_encoder_[]NAME=yes
+ AC_DEFINE(HAVE_ENCODER_[]m4_toupper(NAME), [1],
+ [Define to 1 if] NAME [encoder is enabled.])
+ ;;])
+ *)
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([unknown filter: $arg])
+ ;;
+ esac
+ done
+ AC_MSG_RESULT([$enable_encoders])
+fi
+
+AC_MSG_CHECKING([which decoders to build])
+AC_ARG_ENABLE([decoders], AC_HELP_STRING([--enable-decoders=LIST],
+ [Comma-separated list of decoders to build. Default=all.
+ Available decoders are the same as available encoders.]),
+ [], [enable_decoders=SUPPORTED_FILTERS])
+enable_decoders=`echo "$enable_decoders" | sed 's/,subblock//; s/,/ /g'`
+if test "x$enable_decoders" = xno || test "x$enable_decoders" = x; then
+ AC_MSG_RESULT([(none)])
+else
+ AC_DEFINE([HAVE_DECODER], [1],
+ [Define to 1 if decoder components are enabled.])
+ for arg in $enable_decoders
+ do
+ case $arg in m4_foreach([NAME], [SUPPORTED_FILTERS], [
+ NAME)
+ enable_filter_[]NAME=yes
+ enable_decoder_[]NAME=yes
+ AC_DEFINE(HAVE_DECODER_[]m4_toupper(NAME), [1],
+ [Define to 1 if] NAME [decoder is enabled.])
+ ;;])
+ *)
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([unknown filter: $arg])
+ ;;
+ esac
+ done
+
+ # LZMA2 requires that LZMA1 is enabled.
+ test "x$enable_encoder_lzma2" = xyes && enable_encoder_lzma1=yes
+ test "x$enable_decoder_lzma2" = xyes && enable_decoder_lzma1=yes
+
+ AC_MSG_RESULT([$enable_decoders])
+fi
+
+if test "x$enable_encoder_lzma2$enable_encoder_lzma1" = xyesno \
+ || test "x$enable_decoder_lzma2$enable_decoder_lzma1" = xyesno; then
+ AC_MSG_ERROR([LZMA2 requires that LZMA1 is also enabled.])
+fi
+
+AM_CONDITIONAL(COND_MAIN_ENCODER, test "x$enable_encoders" != xno && test "x$enable_encoders" != x)
+AM_CONDITIONAL(COND_MAIN_DECODER, test "x$enable_decoders" != xno && test "x$enable_decoders" != x)
+
+m4_foreach([NAME], [SUPPORTED_FILTERS],
+[AM_CONDITIONAL(COND_FILTER_[]m4_toupper(NAME), test "x$enable_filter_[]NAME" = xyes)
+AM_CONDITIONAL(COND_ENCODER_[]m4_toupper(NAME), test "x$enable_encoder_[]NAME" = xyes)
+AM_CONDITIONAL(COND_DECODER_[]m4_toupper(NAME), test "x$enable_decoder_[]NAME" = xyes)
+])dnl
+
+# The so called "simple filters" share common code.
+enable_filter_simple=no
+enable_encoder_simple=no
+enable_decoder_simple=no
+m4_foreach([NAME], [SIMPLE_FILTERS],
+[test "x$enable_filter_[]NAME" = xyes && enable_filter_simple=yes
+test "x$enable_encoder_[]NAME" = xyes && enable_encoder_simple=yes
+test "x$enable_decoder_[]NAME" = xyes && enable_decoder_simple=yes
+])dnl
+AM_CONDITIONAL(COND_FILTER_SIMPLE, test "x$enable_filter_simple" = xyes)
+AM_CONDITIONAL(COND_ENCODER_SIMPLE, test "x$enable_encoder_simple" = xyes)
+AM_CONDITIONAL(COND_DECODER_SIMPLE, test "x$enable_decoder_simple" = xyes)
+
+# LZ-based filters share common code.
+enable_filter_lz=no
+enable_encoder_lz=no
+enable_decoder_lz=no
+m4_foreach([NAME], [LZ_FILTERS],
+[test "x$enable_filter_[]NAME" = xyes && enable_filter_lz=yes
+test "x$enable_encoder_[]NAME" = xyes && enable_encoder_lz=yes
+test "x$enable_decoder_[]NAME" = xyes && enable_decoder_lz=yes
+])dnl
+AM_CONDITIONAL(COND_FILTER_LZ, test "x$enable_filter_lz" = xyes)
+AM_CONDITIONAL(COND_ENCODER_LZ, test "x$enable_encoder_lz" = xyes)
+AM_CONDITIONAL(COND_DECODER_LZ, test "x$enable_decoder_lz" = xyes)
+
+
+#################
+# Match finders #
+#################
+
+m4_define([SUPPORTED_MATCH_FINDERS], [hc3,hc4,bt2,bt3,bt4])
+
+m4_foreach([NAME], [SUPPORTED_MATCH_FINDERS],
+[enable_match_finder_[]NAME=no
+])
+
+AC_MSG_CHECKING([which match finders to build])
+AC_ARG_ENABLE([match-finders], AC_HELP_STRING([--enable-match-finders=LIST],
+ [Comma-separated list of match finders to build. Default=all.
+ At least one match finder is required for encoding with
+ the LZMA1 and LZMA2 filters. Available match finders:]
+ m4_translit(m4_defn([SUPPORTED_MATCH_FINDERS]), [,], [ ])), [],
+ [enable_match_finders=SUPPORTED_MATCH_FINDERS])
+enable_match_finders=`echo "$enable_match_finders" | sed 's/,/ /g'`
+if test "x$enable_encoder_lz" = xyes ; then
+ for arg in $enable_match_finders
+ do
+ case $arg in m4_foreach([NAME], [SUPPORTED_MATCH_FINDERS], [
+ NAME)
+ enable_match_finder_[]NAME=yes
+ AC_DEFINE(HAVE_MF_[]m4_toupper(NAME), [1],
+ [Define to 1 to enable] NAME [match finder.])
+ ;;])
+ *)
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([unknown match finder: $arg])
+ ;;
+ esac
+ done
+ AC_MSG_RESULT([$enable_match_finders])
+else
+ AC_MSG_RESULT([(none because not building any LZ-based encoder)])
+fi
+
+
+####################
+# Integrity checks #
+####################
+
+m4_define([SUPPORTED_CHECKS], [crc32,crc64,sha256])
+
+m4_foreach([NAME], [SUPPORTED_FILTERS],
+[enable_check_[]NAME=no
+])dnl
+
+AC_MSG_CHECKING([which integrity checks to build])
+AC_ARG_ENABLE([checks], AC_HELP_STRING([--enable-checks=LIST],
+ [Comma-separated list of integrity checks to build.
+ Default=all. Available integrity checks:]
+ m4_translit(m4_defn([SUPPORTED_CHECKS]), [,], [ ])),
+ [], [enable_checks=SUPPORTED_CHECKS])
+enable_checks=`echo "$enable_checks" | sed 's/,/ /g'`
+if test "x$enable_checks" = xno || test "x$enable_checks" = x; then
+ AC_MSG_RESULT([(none)])
+else
+ for arg in $enable_checks
+ do
+ case $arg in m4_foreach([NAME], [SUPPORTED_CHECKS], [
+ NAME)
+ enable_check_[]NAME=yes
+ AC_DEFINE(HAVE_CHECK_[]m4_toupper(NAME), [1],
+ [Define to 1 if] NAME
+ [integrity check is enabled.])
+ ;;])
+ *)
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([unknown integrity check: $arg])
+ ;;
+ esac
+ done
+ AC_MSG_RESULT([$enable_checks])
+fi
+if test "x$enable_checks_crc32" = xno ; then
+ AC_MSG_ERROR([For now, the CRC32 check must always be enabled.])
+fi
+
+m4_foreach([NAME], [SUPPORTED_CHECKS],
+[AM_CONDITIONAL(COND_CHECK_[]m4_toupper(NAME), test "x$enable_check_[]NAME" = xyes)
+])dnl
+
+
+###########################
+# Assembler optimizations #
+###########################
+
+AC_MSG_CHECKING([if assembler optimizations should be used])
+AC_ARG_ENABLE([assembler], AC_HELP_STRING([--disable-assembler],
+ [Do not use assembler optimizations even if such exist
+ for the architecture.]),
+ [], [enable_assembler=yes])
+if test "x$enable_assembler" = xyes; then
+ case $host_cpu in
+ i?86) enable_assembler=x86 ;;
+ x86_64) enable_assembler=x86_64 ;;
+ *) enable_assembler=no ;;
+ esac
+fi
+case $enable_assembler in
+ x86)
+ AC_DEFINE([HAVE_ASM_X86], [1],
+ [Define to 1 if using x86 assembler optimizations.])
+ ;;
+ x86_64)
+ AC_DEFINE([HAVE_ASM_X86_64], [1],
+ [Define to 1 if using x86_64 assembler optimizations.])
+ ;;
+ no)
+ ;;
+ *)
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([--enable-assembler accepts only \`yes', \`no', \`x86', or \`x86_64'.])
+ ;;
+esac
+AC_MSG_RESULT([$enable_assembler])
+AM_CONDITIONAL(COND_ASM_X86, test "x$enable_assembler" = xx86)
+AM_CONDITIONAL(COND_ASM_X86_64, test "x$enable_assembler" = xx86_64)
+
+
+################################
+# Fast unaligned memory access #
+################################
+
+AC_MSG_CHECKING([if unaligned memory access should be used])
+AC_ARG_ENABLE([unaligned-access], AC_HELP_STRING([--enable-unaligned-access],
+ [Enable if the system supports *fast* unaligned memory access
+ with 16-bit and 32-bit integers. By default, this is enabled
+ only on x86, x86_64, and big endian PowerPC.]),
+ [], [enable_unaligned_access=auto])
+if test "x$enable_unaligned_access" = xauto ; then
+ case $host_cpu in
+ i?86|x86_64|powerpc|powerpc64)
+ enable_unaligned_access=yes
+ ;;
+ *)
+ enable_unaligned_access=no
+ ;;
+ esac
+fi
+if test "x$enable_unaligned_access" = xyes ; then
+ AC_DEFINE([HAVE_FAST_UNALIGNED_ACCESS], [1], [Define to 1 if
+ the system supports fast unaligned memory access.])
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+
+
+#####################
+# Size optimization #
+#####################
+
+AC_MSG_CHECKING([if small size is preferred over speed])
+AC_ARG_ENABLE([small], AC_HELP_STRING([--enable-small],
+ [Make liblzma smaller and a little slower.
+ This is disabled by default to optimize for speed.]),
+ [], [enable_small=no])
+if test "x$enable_small" = xyes; then
+ AC_DEFINE([HAVE_SMALL], [1], [Define to 1 if optimizing for size.])
+elif test "x$enable_small" != xno; then
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([--enable-small accepts only \`yes' or \`no'])
+fi
+AC_MSG_RESULT([$enable_small])
+AM_CONDITIONAL(COND_SMALL, test "x$enable_small" = xyes)
+
+
+#############
+# Threading #
+#############
+
+AC_MSG_CHECKING([if threading support is wanted])
+AC_ARG_ENABLE([threads], AC_HELP_STRING([--disable-threads],
+ [Disable threading support.
+ This makes some things thread-unsafe.]),
+ [], [enable_threads=yes])
+if test "x$enable_threads" != xyes && test "x$enable_threads" != xno; then
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([--enable-threads accepts only \`yes' or \`no'])
+fi
+AC_MSG_RESULT([$enable_threads])
+# We use the actual result a little later.
+
+
+############################################
+# xz/xzdec/lzmadec linkage against liblzma #
+############################################
+
+# Link the xz, xzdec, and lzmadec command line tools against static liblzma
+# unless using --enable-dynamic. Using static liblzma gives a little bit
+# faster executable on x86, because no register is wasted for PIC. We also
+# have one dependency less, which allows users to more freely copy the xz
+# binary to other boxes. However, I wouldn't be surprised if distro
+# maintainers still prefer dynamic linking, so let's make it easy for them.
+
+AC_MSG_CHECKING([how programs should be linked against liblzma])
+AC_ARG_ENABLE([dynamic], [AC_HELP_STRING([--enable-dynamic=TYPE],
+ [Set how command line tools are linked against liblzma.
+ TYPE can be mixed, yes, or no. The default is mixed.])],
+ [], [enable_dynamic=mixed])
+case $enable_dynamic in
+ mixed)
+ AC_MSG_RESULT([mixed (some dynamically, some statically)])
+ ;;
+ yes)
+ AC_MSG_RESULT([dynamically])
+ ;;
+ no)
+ AC_MSG_RESULT([statically])
+ ;;
+ *)
+ AC_MSG_RESULT([])
+ AC_MSG_ERROR([--enable-dynamic accepts only \`mixed', \`yes', or \`no'])
+ ;;
+esac
+# We use the actual results later, because we don't know yet
+# if --disable-shared or --disable-static was used.
+
+
+###############################################################################
+# Checks for programs.
+###############################################################################
+
+echo
+gl_POSIX_SHELL
+if test -z "$POSIX_SHELL" ; then
+ AC_MSG_ERROR([No POSIX conforming shell (sh) was found.])
+fi
+
+echo
+echo "Initializing Automake:"
+
+AM_INIT_AUTOMAKE([1.10 foreign tar-v7 filename-length-max=99])
+AC_PROG_LN_S
+
+AC_PROG_CC_C99
+if test x$ac_cv_prog_cc_c99 = xno ; then
+ AC_MSG_ERROR([No C99 compiler was found.])
+fi
+
+AM_PROG_CC_C_O
+AM_PROG_AS
+AC_USE_SYSTEM_EXTENSIONS
+
+if test "x$enable_threads" = xyes; then
+ echo
+ echo "Threading support:"
+ ACX_PTHREAD
+ LIBS="$LIBS $PTHREAD_LIBS"
+ AM_CFLAGS="$AM_CFLAGS $PTHREAD_CFLAGS"
+ CC="$PTHREAD_CC"
+fi
+
+echo
+echo "Initializing Libtool:"
+LT_PREREQ([2.2])
+LT_INIT([win32-dll])
+LT_LANG([Windows Resource])
+
+# This is a bit wrong since it is possible to request that only some libs
+# are built as shared. Using that feature isn't so common though, and this
+# breaks only on Windows (at least for now) if the user enables only some
+# libs as shared.
+AM_CONDITIONAL([COND_SHARED], [test "x$enable_shared" != xno])
+
+
+###############################################################################
+# Checks for libraries.
+###############################################################################
+
+echo
+echo "Initializing gettext:"
+AM_GNU_GETTEXT_VERSION([0.16.1])
+AM_GNU_GETTEXT([external])
+
+###############################################################################
+# Checks for header files.
+###############################################################################
+
+echo
+echo "System headers and functions:"
+
+# There is currently no workarounds in this package if some of
+# these headers are missing.
+AC_CHECK_HEADERS([fcntl.h limits.h sys/time.h],
+ [],
+ [AC_MSG_ERROR([Required header file(s) are missing.])])
+
+# If any of these headers are missing, things should still work correctly:
+AC_CHECK_HEADERS([sys/param.h sys/sysctl.h byteswap.h],
+ [], [], [
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+])
+
+# Even if we have byteswap.h, we may lack the specific macros/functions.
+if test x$ac_cv_header_byteswap_h = xyes ; then
+ m4_foreach([FUNC], [bswap_16,bswap_32,bswap_64], [
+ AC_MSG_CHECKING([if FUNC is available])
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+#include <byteswap.h>
+int
+main(void)
+{
+ FUNC[](42);
+ return 0;
+}
+ ])], [
+ AC_DEFINE(HAVE_[]m4_toupper(FUNC), [1],
+ [Define to 1 if] FUNC [is available.])
+ AC_MSG_RESULT([yes])
+ ], [AC_MSG_RESULT([no])])
+
+ ])dnl
+fi
+
+
+###############################################################################
+# Checks for typedefs, structures, and compiler characteristics.
+###############################################################################
+
+dnl We don't need these as long as we need a C99 compiler anyway.
+dnl AC_C_INLINE
+dnl AC_C_RESTRICT
+
+AC_HEADER_STDBOOL
+
+AC_TYPE_UINT8_T
+AC_TYPE_UINT16_T
+AC_TYPE_INT32_T
+AC_TYPE_UINT32_T
+AC_TYPE_INT64_T
+AC_TYPE_UINT64_T
+AC_TYPE_UINTPTR_T
+
+AC_CHECK_SIZEOF([size_t])
+
+# The command line tool can copy high resolution timestamps if such
+# information is availabe in struct stat. Otherwise one second accuracy
+# is used.
+AC_CHECK_MEMBERS([
+ struct stat.st_atim.tv_nsec,
+ struct stat.st_atimespec.tv_nsec,
+ struct stat.st_atimensec,
+ struct stat.st_uatime,
+ struct stat.st_atim.st__tim.tv_nsec])
+
+AC_SYS_LARGEFILE
+AC_C_BIGENDIAN
+
+
+###############################################################################
+# Checks for library functions.
+###############################################################################
+
+# Gnulib replacements as needed
+gl_GETOPT
+
+# Find the best function to set timestamps.
+AC_CHECK_FUNCS([futimens futimes futimesat utimes utime], [break])
+
+lc_PHYSMEM
+lc_CPUCORES
+
+
+###############################################################################
+# If using GCC, set some additional AM_CFLAGS:
+###############################################################################
+
+if test "$GCC" = yes ; then
+ echo
+ echo "GCC extensions:"
+fi
+
+# Always do the visibility check but don't set AM_CFLAGS on Windows.
+# This way things get set properly even on Windows.
+gl_VISIBILITY
+if test -n "$CFLAG_VISIBILITY" && test "$is_w32" = no; then
+ AM_CFLAGS="$AM_CFLAGS $CFLAG_VISIBILITY"
+fi
+
+###############################################################################
+# Create the makefiles and config.h
+###############################################################################
+
+echo
+
+# Don't build the lib directory at all if we don't need any replacement
+# functions.
+AM_CONDITIONAL([COND_GNULIB], test -n "$LIBOBJS")
+
+# Add default AM_CFLAGS.
+AC_SUBST([AM_CFLAGS])
+
+# Set additional flags for static/dynamic linking. The idea is that every
+# program (not library) being built will use either STATIC_{CPPFLAGS,LDFLAGS}
+# or DYNAMIC_{CPPFLAGS,LDFLAGS} depending on which type of linkage is
+# preferred. These preferences get overridden by use of --disable-static,
+# --disable-shared, or --enable-dynamic.
+#
+# This is quite messy, because we want to use LZMA_API_STATIC when linking
+# against static liblzma. It's needed on Windows.
+if test "x$enable_static" = xno; then
+ enable_dynamic=yes
+fi
+if test "x$enable_shared" = xno; then
+ enable_dynamic=no
+fi
+case $enable_dynamic in
+ yes)
+ STATIC_CPPFLAGS=
+ STATIC_LDFLAGS=
+ DYNAMIC_CPPFLAGS=
+ DYNAMIC_LDFLAGS=
+ ;;
+ mixed)
+ STATIC_CPPFLAGS="-DLZMA_API_STATIC"
+ STATIC_LDFLAGS="-static"
+ DYNAMIC_CPPFLAGS=
+ DYNAMIC_LDFLAGS=
+ ;;
+ no)
+ STATIC_CPPFLAGS="-DLZMA_API_STATIC"
+ STATIC_LDFLAGS="-static"
+ DYNAMIC_CPPFLAGS="-DLZMA_API_STATIC"
+ DYNAMIC_LDFLAGS="-static"
+ ;;
+esac
+AC_SUBST([STATIC_CPPFLAGS])
+AC_SUBST([STATIC_LDFLAGS])
+AC_SUBST([DYNAMIC_CPPFLAGS])
+AC_SUBST([DYNAMIC_LDFLAGS])
+
+# This is needed for src/scripts.
+xz=`echo xz | sed "$program_transform_name"`
+AC_SUBST([xz])
+
+AC_CONFIG_FILES([
+ Doxyfile
+ Makefile
+ po/Makefile.in
+ lib/Makefile
+ src/Makefile
+ src/liblzma/liblzma.pc
+ src/liblzma/Makefile
+ src/liblzma/api/Makefile
+ src/xz/Makefile
+ src/xzdec/Makefile
+ src/lzmainfo/Makefile
+ src/scripts/Makefile
+ src/scripts/xzdiff
+ src/scripts/xzgrep
+ src/scripts/xzmore
+ src/scripts/xzless
+ tests/Makefile
+ debug/Makefile
+])
+
+AC_OUTPUT
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.am
new file mode 100644
index 00000000..23834f4f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.am
@@ -0,0 +1,30 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+noinst_PROGRAMS = \
+ repeat \
+ sync_flush \
+ full_flush \
+ memusage \
+ crc32 \
+ known_sizes \
+ hex2bin
+
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ $(STATIC_CPPFLAGS)
+
+AM_LDFLAGS = $(STATIC_LDFLAGS)
+
+LDADD = $(top_builddir)/src/liblzma/liblzma.la
+
+if COND_GNULIB
+LDADD += $(top_builddir)/lib/libgnu.a
+endif
+
+LDADD += $(LTLIBINTL)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.in
new file mode 100644
index 00000000..e8d6418c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/Makefile.in
@@ -0,0 +1,580 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+noinst_PROGRAMS = repeat$(EXEEXT) sync_flush$(EXEEXT) \
+ full_flush$(EXEEXT) memusage$(EXEEXT) crc32$(EXEEXT) \
+ known_sizes$(EXEEXT) hex2bin$(EXEEXT)
+@COND_GNULIB_TRUE@am__append_1 = $(top_builddir)/lib/libgnu.a
+subdir = debug
+DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+PROGRAMS = $(noinst_PROGRAMS)
+crc32_SOURCES = crc32.c
+crc32_OBJECTS = crc32.$(OBJEXT)
+crc32_LDADD = $(LDADD)
+am__DEPENDENCIES_1 =
+crc32_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+full_flush_SOURCES = full_flush.c
+full_flush_OBJECTS = full_flush.$(OBJEXT)
+full_flush_LDADD = $(LDADD)
+full_flush_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+hex2bin_SOURCES = hex2bin.c
+hex2bin_OBJECTS = hex2bin.$(OBJEXT)
+hex2bin_LDADD = $(LDADD)
+hex2bin_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+known_sizes_SOURCES = known_sizes.c
+known_sizes_OBJECTS = known_sizes.$(OBJEXT)
+known_sizes_LDADD = $(LDADD)
+known_sizes_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+memusage_SOURCES = memusage.c
+memusage_OBJECTS = memusage.$(OBJEXT)
+memusage_LDADD = $(LDADD)
+memusage_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+repeat_SOURCES = repeat.c
+repeat_OBJECTS = repeat.$(OBJEXT)
+repeat_LDADD = $(LDADD)
+repeat_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+sync_flush_SOURCES = sync_flush.c
+sync_flush_OBJECTS = sync_flush.$(OBJEXT)
+sync_flush_LDADD = $(LDADD)
+sync_flush_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = crc32.c full_flush.c hex2bin.c known_sizes.c memusage.c \
+ repeat.c sync_flush.c
+DIST_SOURCES = crc32.c full_flush.c hex2bin.c known_sizes.c memusage.c \
+ repeat.c sync_flush.c
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ $(STATIC_CPPFLAGS)
+
+AM_LDFLAGS = $(STATIC_LDFLAGS)
+LDADD = $(top_builddir)/src/liblzma/liblzma.la $(am__append_1) \
+ $(LTLIBINTL)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign debug/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign debug/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstPROGRAMS:
+ @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+crc32$(EXEEXT): $(crc32_OBJECTS) $(crc32_DEPENDENCIES)
+ @rm -f crc32$(EXEEXT)
+ $(LINK) $(crc32_OBJECTS) $(crc32_LDADD) $(LIBS)
+full_flush$(EXEEXT): $(full_flush_OBJECTS) $(full_flush_DEPENDENCIES)
+ @rm -f full_flush$(EXEEXT)
+ $(LINK) $(full_flush_OBJECTS) $(full_flush_LDADD) $(LIBS)
+hex2bin$(EXEEXT): $(hex2bin_OBJECTS) $(hex2bin_DEPENDENCIES)
+ @rm -f hex2bin$(EXEEXT)
+ $(LINK) $(hex2bin_OBJECTS) $(hex2bin_LDADD) $(LIBS)
+known_sizes$(EXEEXT): $(known_sizes_OBJECTS) $(known_sizes_DEPENDENCIES)
+ @rm -f known_sizes$(EXEEXT)
+ $(LINK) $(known_sizes_OBJECTS) $(known_sizes_LDADD) $(LIBS)
+memusage$(EXEEXT): $(memusage_OBJECTS) $(memusage_DEPENDENCIES)
+ @rm -f memusage$(EXEEXT)
+ $(LINK) $(memusage_OBJECTS) $(memusage_LDADD) $(LIBS)
+repeat$(EXEEXT): $(repeat_OBJECTS) $(repeat_DEPENDENCIES)
+ @rm -f repeat$(EXEEXT)
+ $(LINK) $(repeat_OBJECTS) $(repeat_LDADD) $(LIBS)
+sync_flush$(EXEEXT): $(sync_flush_OBJECTS) $(sync_flush_DEPENDENCIES)
+ @rm -f sync_flush$(EXEEXT)
+ $(LINK) $(sync_flush_OBJECTS) $(sync_flush_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/crc32.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/full_flush.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hex2bin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/known_sizes.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/memusage.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/repeat.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sync_flush.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-noinstPROGRAMS ctags distclean \
+ distclean-compile distclean-generic distclean-libtool \
+ distclean-tags distdir dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+ pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/README b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/README
new file mode 100644
index 00000000..749610d7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/README
@@ -0,0 +1,17 @@
+
+Debug tools
+-----------
+
+ This directory contains a few tiny programs that may be helpful when
+ debugging LZMA Utils.
+
+ These tools are not meant to be installed. Often one needs to edit
+ the source code a little to make the programs do the wanted things.
+ If you don't know how these programs could help you, it is likely
+ that they really are useless to you.
+
+ These aren't intended to be used as example programs. They take some
+ shortcuts here and there, which correct programs should not do. Many
+ possible errors (especially I/O errors) are ignored. Don't report
+ bugs or send patches to fix this kind of bugs.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/crc32.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/crc32.c
new file mode 100644
index 00000000..3291c73f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/crc32.c
@@ -0,0 +1,40 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc32.c
+/// \brief Primitive CRC32 calculation tool
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include <stdio.h>
+
+
+int
+main(void)
+{
+ uint32_t crc = 0;
+
+ do {
+ uint8_t buf[BUFSIZ];
+ const size_t size = fread(buf, 1, sizeof(buf), stdin);
+ crc = lzma_crc32(buf, size, crc);
+ } while (!ferror(stdin) && !feof(stdin));
+
+ //printf("%08" PRIX32 "\n", crc);
+
+ // I want it little endian so it's easy to work with hex editor.
+ printf("%02" PRIX32 " ", crc & 0xFF);
+ printf("%02" PRIX32 " ", (crc >> 8) & 0xFF);
+ printf("%02" PRIX32 " ", (crc >> 16) & 0xFF);
+ printf("%02" PRIX32 " ", crc >> 24);
+ printf("\n");
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/full_flush.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/full_flush.c
new file mode 100644
index 00000000..e9ab95e8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/full_flush.c
@@ -0,0 +1,104 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file full_flush.c
+/// \brief Encode files using LZMA_FULL_FLUSH
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include "lzma.h"
+#include <stdio.h>
+
+
+static lzma_stream strm = LZMA_STREAM_INIT;
+static FILE *file_in;
+
+
+static void
+encode(size_t size, lzma_action action)
+{
+ static const size_t CHUNK = 64;
+ uint8_t in[CHUNK];
+ uint8_t out[CHUNK];
+ lzma_ret ret;
+
+ do {
+ if (strm.avail_in == 0 && size > 0) {
+ const size_t amount = MIN(size, CHUNK);
+ strm.avail_in = fread(in, 1, amount, file_in);
+ strm.next_in = in;
+ size -= amount; // Intentionally not using avail_in.
+ }
+
+ strm.next_out = out;
+ strm.avail_out = CHUNK;
+
+ ret = lzma_code(&strm, size == 0 ? action : LZMA_RUN);
+
+ if (ret != LZMA_OK && ret != LZMA_STREAM_END) {
+ fprintf(stderr, "%s:%u: %s: ret == %d\n",
+ __FILE__, __LINE__, __func__, ret);
+ exit(1);
+ }
+
+ fwrite(out, 1, CHUNK - strm.avail_out, stdout);
+
+ } while (size > 0 || strm.avail_out == 0);
+
+ if ((action == LZMA_RUN && ret != LZMA_OK)
+ || (action != LZMA_RUN && ret != LZMA_STREAM_END)) {
+ fprintf(stderr, "%s:%u: %s: ret == %d\n",
+ __FILE__, __LINE__, __func__, ret);
+ exit(1);
+ }
+}
+
+
+int
+main(int argc, char **argv)
+{
+ file_in = argc > 1 ? fopen(argv[1], "rb") : stdin;
+
+
+ // Config
+ lzma_options_lzma opt_lzma;
+ if (lzma_lzma_preset(&opt_lzma, 1)) {
+ fprintf(stderr, "preset failed\n");
+ exit(1);
+ }
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
+ filters[0].id = LZMA_FILTER_LZMA2;
+ filters[0].options = &opt_lzma;
+ filters[1].id = LZMA_VLI_UNKNOWN;
+
+ // Init
+ if (lzma_stream_encoder(&strm, filters, LZMA_CHECK_CRC32) != LZMA_OK) {
+ fprintf(stderr, "init failed\n");
+ exit(1);
+ }
+
+// if (lzma_easy_encoder(&strm, 1)) {
+// fprintf(stderr, "init failed\n");
+// exit(1);
+// }
+
+ // Encoding
+ encode(0, LZMA_FULL_FLUSH);
+ encode(6, LZMA_FULL_FLUSH);
+ encode(0, LZMA_FULL_FLUSH);
+ encode(7, LZMA_FULL_FLUSH);
+ encode(0, LZMA_FULL_FLUSH);
+ encode(0, LZMA_FINISH);
+
+ // Clean up
+ lzma_end(&strm);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/hex2bin.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/hex2bin.c
new file mode 100644
index 00000000..4d8cfcd5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/hex2bin.c
@@ -0,0 +1,55 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file hex2bin.c
+/// \brief Converts hexadecimal input strings to binary
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include <stdio.h>
+#include <ctype.h>
+
+
+static int
+getbin(int x)
+{
+ if (x >= '0' && x <= '9')
+ return x - '0';
+
+ if (x >= 'A' && x <= 'F')
+ return x - 'A' + 10;
+
+ return x - 'a' + 10;
+}
+
+
+int
+main(void)
+{
+ while (true) {
+ int byte = getchar();
+ if (byte == EOF)
+ return 0;
+ if (!isxdigit(byte))
+ continue;
+
+ const int digit = getchar();
+ if (digit == EOF || !isxdigit(digit)) {
+ fprintf(stderr, "Invalid input\n");
+ return 1;
+ }
+
+ byte = (getbin(byte) << 4) | getbin(digit);
+ if (putchar(byte) == EOF) {
+ perror(NULL);
+ return 1;
+ }
+ }
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/known_sizes.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/known_sizes.c
new file mode 100644
index 00000000..6ed1c367
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/known_sizes.c
@@ -0,0 +1,131 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file known_sizes.c
+/// \brief Encodes .lzma Stream with sizes known in Block Header
+///
+/// The input file is encoded in RAM, and the known Compressed Size
+/// and/or Uncompressed Size values are stored in the Block Header.
+/// As of writing there's no such Stream encoder in liblzma.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include "lzma.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/unistd.h>
+#include <stdio.h>
+
+
+// Support file sizes up to 1 MiB. We use this for output space too, so files
+// close to 1 MiB had better compress at least a little or we have a buffer
+// overflow.
+#define BUFFER_SIZE (1U << 20)
+
+
+int
+main(void)
+{
+ // Allocate the buffers.
+ uint8_t *in = malloc(BUFFER_SIZE);
+ uint8_t *out = malloc(BUFFER_SIZE);
+ if (in == NULL || out == NULL)
+ return 1;
+
+ // Fill the input buffer.
+ const size_t in_size = fread(in, 1, BUFFER_SIZE, stdin);
+
+ // Filter setup
+ lzma_options_lzma opt_lzma;
+ if (lzma_lzma_preset(&opt_lzma, 1))
+ return 1;
+
+ lzma_filter filters[] = {
+ {
+ .id = LZMA_FILTER_LZMA2,
+ .options = &opt_lzma
+ },
+ {
+ .id = LZMA_VLI_UNKNOWN
+ }
+ };
+
+ lzma_block block = {
+ .check = LZMA_CHECK_CRC32,
+ .compressed_size = BUFFER_SIZE, // Worst case reserve
+ .uncompressed_size = in_size,
+ .filters = filters,
+ };
+
+ lzma_stream strm = LZMA_STREAM_INIT;
+ if (lzma_block_encoder(&strm, &block) != LZMA_OK)
+ return 1;
+
+ // Reserve space for Stream Header and Block Header. We need to
+ // calculate the size of the Block Header first.
+ if (lzma_block_header_size(&block) != LZMA_OK)
+ return 1;
+
+ size_t out_size = LZMA_STREAM_HEADER_SIZE + block.header_size;
+
+ strm.next_in = in;
+ strm.avail_in = in_size;
+ strm.next_out = out + out_size;
+ strm.avail_out = BUFFER_SIZE - out_size;
+
+ if (lzma_code(&strm, LZMA_FINISH) != LZMA_STREAM_END)
+ return 1;
+
+ out_size += strm.total_out;
+
+ if (lzma_block_header_encode(&block, out + LZMA_STREAM_HEADER_SIZE)
+ != LZMA_OK)
+ return 1;
+
+ lzma_index *idx = lzma_index_init(NULL, NULL);
+ if (idx == NULL)
+ return 1;
+
+ if (lzma_index_append(idx, NULL, block.header_size + strm.total_out,
+ strm.total_in) != LZMA_OK)
+ return 1;
+
+ if (lzma_index_encoder(&strm, idx) != LZMA_OK)
+ return 1;
+
+ if (lzma_code(&strm, LZMA_RUN) != LZMA_STREAM_END)
+ return 1;
+
+ out_size += strm.total_out;
+
+ lzma_end(&strm);
+
+ lzma_index_end(idx, NULL);
+
+ // Encode the Stream Header and Stream Footer. backwards_size is
+ // needed only for the Stream Footer.
+ lzma_stream_flags sf = {
+ .backward_size = strm.total_out,
+ .check = block.check,
+ };
+
+ if (lzma_stream_header_encode(&sf, out) != LZMA_OK)
+ return 1;
+
+ if (lzma_stream_footer_encode(&sf, out + out_size) != LZMA_OK)
+ return 1;
+
+ out_size += LZMA_STREAM_HEADER_SIZE;
+
+ // Write out the file.
+ fwrite(out, 1, out_size, stdout);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/memusage.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/memusage.c
new file mode 100644
index 00000000..381357d3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/memusage.c
@@ -0,0 +1,51 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file memusage.c
+/// \brief Calculates memory usage using lzma_memory_usage()
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include "lzma.h"
+#include <stdio.h>
+
+int
+main(void)
+{
+ lzma_options_lzma lzma = {
+ .dict_size = (1U << 30) + (1U << 29),
+ .lc = 3,
+ .lp = 0,
+ .pb = 2,
+ .preset_dict = NULL,
+ .preset_dict_size = 0,
+ .mode = LZMA_MODE_NORMAL,
+ .nice_len = 48,
+ .mf = LZMA_MF_BT4,
+ .depth = 0,
+ };
+
+/*
+ lzma_options_filter filters[] = {
+ { LZMA_FILTER_LZMA1,
+ (lzma_options_lzma *)&lzma_preset_lzma[6 - 1] },
+ { UINT64_MAX, NULL }
+ };
+*/
+ lzma_filter filters[] = {
+ { LZMA_FILTER_LZMA1, &lzma },
+ { UINT64_MAX, NULL }
+ };
+
+ printf("Encoder: %10" PRIu64 " B\n", lzma_memusage_encoder(filters));
+ printf("Decoder: %10" PRIu64 " B\n", lzma_memusage_decoder(filters));
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/repeat.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/repeat.c
new file mode 100644
index 00000000..408cc1fe
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/repeat.c
@@ -0,0 +1,38 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file repeat.c
+/// \brief Repeats given string given times
+///
+/// This program can be useful when debugging run-length encoder in
+/// the Subblock filter, especially the condition when repeat count
+/// doesn't fit into 28-bit integer.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include <stdio.h>
+
+
+int
+main(int argc, char **argv)
+{
+ if (argc != 3) {
+ fprintf(stderr, "Usage: %s COUNT STRING\n", argv[0]);
+ exit(1);
+ }
+
+ unsigned long long count = strtoull(argv[1], NULL, 10);
+ const size_t size = strlen(argv[2]);
+
+ while (count-- != 0)
+ fwrite(argv[2], 1, size, stdout);
+
+ return !!(ferror(stdout) || fclose(stdout));
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/sync_flush.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/sync_flush.c
new file mode 100644
index 00000000..9ba1f699
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/debug/sync_flush.c
@@ -0,0 +1,135 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file sync_flush.c
+/// \brief Encode files using LZMA_SYNC_FLUSH
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include "lzma.h"
+#include <stdio.h>
+
+
+static lzma_stream strm = LZMA_STREAM_INIT;
+static FILE *file_in;
+
+
+static void
+encode(size_t size, lzma_action action)
+{
+ static const size_t CHUNK = 64;
+ uint8_t in[CHUNK];
+ uint8_t out[CHUNK];
+ lzma_ret ret;
+
+ do {
+ if (strm.avail_in == 0 && size > 0) {
+ const size_t amount = MIN(size, CHUNK);
+ strm.avail_in = fread(in, 1, amount, file_in);
+ strm.next_in = in;
+ size -= amount; // Intentionally not using avail_in.
+ }
+
+ strm.next_out = out;
+ strm.avail_out = CHUNK;
+
+ ret = lzma_code(&strm, size == 0 ? action : LZMA_RUN);
+
+ if (ret != LZMA_OK && ret != LZMA_STREAM_END) {
+ fprintf(stderr, "%s:%u: %s: ret == %d\n",
+ __FILE__, __LINE__, __func__, ret);
+ exit(1);
+ }
+
+ fwrite(out, 1, CHUNK - strm.avail_out, stdout);
+
+ } while (size > 0 || strm.avail_out == 0);
+
+ if ((action == LZMA_RUN && ret != LZMA_OK)
+ || (action != LZMA_RUN && ret != LZMA_STREAM_END)) {
+ fprintf(stderr, "%s:%u: %s: ret == %d\n",
+ __FILE__, __LINE__, __func__, ret);
+ exit(1);
+ }
+}
+
+
+int
+main(int argc, char **argv)
+{
+ file_in = argc > 1 ? fopen(argv[1], "rb") : stdin;
+
+ // Config
+ lzma_options_lzma opt_lzma = {
+ .dict_size = 1U << 16,
+ .lc = LZMA_LC_DEFAULT,
+ .lp = LZMA_LP_DEFAULT,
+ .pb = LZMA_PB_DEFAULT,
+ .preset_dict = NULL,
+ .persistent = true,
+ .mode = LZMA_MODE_NORMAL,
+ .nice_len = 32,
+ .mf = LZMA_MF_HC3,
+ .depth = 0,
+ };
+
+ lzma_options_delta opt_delta = {
+ .dist = 16
+ };
+
+ lzma_options_subblock opt_subblock = {
+ .allow_subfilters = true,
+ .alignment = 8, // LZMA_SUBBLOCK_ALIGNMENT_DEFAULT,
+ .subblock_data_size = LZMA_SUBBLOCK_DATA_SIZE_DEFAULT,
+ .rle = 1, // LZMA_SUBBLOCK_RLE_OFF,
+ .subfilter_mode = LZMA_SUBFILTER_SET,
+ };
+ opt_subblock.subfilter_options.id = LZMA_FILTER_LZMA1;
+ opt_subblock.subfilter_options.options = &opt_lzma;
+ opt_subblock.subfilter_options.id = LZMA_FILTER_DELTA;
+ opt_subblock.subfilter_options.options = &opt_delta;
+
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
+ filters[0].id = LZMA_FILTER_LZMA2;
+ filters[0].options = &opt_lzma;
+ filters[1].id = LZMA_VLI_UNKNOWN;
+
+ // Init
+ if (lzma_stream_encoder(&strm, filters, LZMA_CHECK_CRC32) != LZMA_OK) {
+ fprintf(stderr, "init failed\n");
+ exit(1);
+ }
+
+ // Encoding
+
+ encode(0, LZMA_SYNC_FLUSH);
+ encode(6, LZMA_SYNC_FLUSH);
+ encode(0, LZMA_SYNC_FLUSH);
+ encode(7, LZMA_SYNC_FLUSH);
+ encode(0, LZMA_SYNC_FLUSH);
+ encode(0, LZMA_FINISH);
+/*
+ encode(53, LZMA_SYNC_FLUSH);
+// opt_lzma.literal_context_bits = 2;
+// opt_lzma.literal_pos_bits = 1;
+// opt_lzma.pos_bits = 0;
+ encode(404, LZMA_FINISH);
+*/
+ // Clean up
+ lzma_end(&strm);
+
+ return 0;
+
+ // Prevent useless warnings so we don't need to have special CFLAGS
+ // to disable -Werror.
+ (void)opt_lzma;
+ (void)opt_subblock;
+ (void)opt_delta;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/faq.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/faq.txt
new file mode 100644
index 00000000..48c4ec71
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/faq.txt
@@ -0,0 +1,122 @@
+
+XZ Utils FAQ
+============
+
+Q: What do the letters XZ mean?
+
+A: Nothing. They are just two letters, which come from the file format
+ suffix .xz. The .xz suffix was selected, because it seemed to be
+ pretty much unused. It is no deeper meaning.
+
+
+Q: What are LZMA and LZMA2?
+
+A: LZMA stands for Lempel-Ziv-Markov chain-Algorithm. It is the name
+ of the compression algorithm designed by Igor Pavlov for 7-Zip.
+ LZMA is based on LZ77 and range encoding.
+
+ LZMA2 is an updated version of the original LZMA to fix a couple of
+ practical issues. In context of XZ Utils, LZMA is called LZMA1 to
+ emphasize that LZMA is not the same thing as LZMA2. LZMA2 is the
+ primary compression algorithm in the .xz file format.
+
+
+Q: There are many LZMA related projects. How does XZ Utils relate to them?
+
+A: 7-Zip and LZMA SDK are the original projects. LZMA SDK is roughly
+ a subset of the 7-Zip source tree.
+
+ p7zip is 7-Zip's command line tools ported to POSIX-like systems.
+
+ LZMA Utils provide a gzip-like lzma tool for POSIX-like systems.
+ LZMA Utils are based on LZMA SDK. XZ Utils are the successor to
+ LZMA Utils.
+
+ There are several other projects using LZMA. Most are more or less
+ based on LZMA SDK.
+
+
+Q: Do XZ Utils support the .7z format?
+
+A: No. Use 7-Zip (Windows) or p7zip (POSIX-like systems) to handle .7z
+ files.
+
+
+Q: I have many .tar.7z files. Can I convert them to .tar.xz without
+ spending hours recompressing the data?
+
+A: In the "extra" directory, there is a script named 7z2lzma.bash which
+ is able to convert some .7z files to the .lzma format (not .xz). It
+ needs the 7za (or 7z) command from p7zip. The script may silently
+ produce corrupt output if certain assumptions are not met, so
+ decompress the resulting .lzma file and compare it against the
+ original before deleting the original file!
+
+
+Q: I have many .lzma files. Can I quickly convert them to the .xz format?
+
+A: For now, no. Since XZ Utils supports the .lzma format, it's usually
+ not too bad to keep the old files in the old format. If you want to
+ do the conversion anyway, you need to decompress the .lzma files and
+ then recompress to the .xz format.
+
+ Technically, there is a way to make the conversion relatively fast
+ (roughly twice the time that normal decompression takes). Writing
+ such a tool would take quite a bit time though, and would probably
+ be useful to only a few people. If you really want such a conversion
+ tool, contact Lasse Collin and offer some money.
+
+
+Q: Can I recover parts of a broken .xz file (e.g. corrupted CD-R)?
+
+A: It may be possible if the file consists of multiple blocks, which
+ typically is not the case if the file was created in single-threaded
+ mode. There is no recovery program yet.
+
+
+Q: Is (some part of) XZ Utils patented?
+
+A: Lasse Collin is not aware of any patents that could affect XZ Utils.
+ However, due to nature of software patents, it's not possible to
+ guarantee that XZ Utils isn't affected by any third party patent(s).
+
+
+Q: Where can I find documentation about the file format and algorithms?
+
+A: The .xz format is documented in xz-file-format.txt. It is a container
+ format only, and doesn't include descriptions of any non-trivial
+ filters.
+
+ Documenting LZMA and LZMA2 is planned, but for now, there is no other
+ documentation that the source code. Before you begin, you should know
+ the basics of LZ77 and range coding algorithms. LZMA is based on LZ77,
+ but LZMA is *a lot* more complex. Range coding is used to compress
+ the final bitstream like Huffman coding is used in Deflate.
+
+
+Q: I cannot find BCJ and BCJ2 filters. Don't they exist in liblzma?
+
+A: BCJ filter is called "x86" in liblzma. BCJ2 is not included,
+ because it requires using more than one encoded output stream.
+
+
+Q: How do I build a program that needs liblzmadec (lzmadec.h)?
+
+A: liblzmadec is part of LZMA Utils. XZ Utils has liblzma, but no
+ liblzmadec. The code using liblzmadec should be ported to use
+ liblzma instead. If you cannot or don't want to do that, download
+ LZMA Utils from <http://tukaani.org/lzma/>.
+
+
+Q: The default build of liblzma is too big. How can I make it smaller?
+
+A: Give --enable-small to the configure script. Use also appropriate
+ --enable or --disable options to include only those filter encoders
+ and decoders and integrity checks that you actually need. Use
+ CFLAGS=-Os (with GCC) or equivalent to tell your compiler to optimize
+ for size. See INSTALL for information about configure options.
+
+ If the result is still too big, take a look at XZ Embedded. It is
+ a separate project, which provides a limited but signinificantly
+ smaller XZ decoder implementation than XZ Utils.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/history.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/history.txt
new file mode 100644
index 00000000..c97492e8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/history.txt
@@ -0,0 +1,149 @@
+
+History of LZMA Utils and XZ Utils
+==================================
+
+Tukaani distribution
+
+ In 2005, there was a small group working on Tukaani distribution, which
+ was a Slackware fork. One of the project goals was to fit the distro on
+ a single 700 MiB ISO-9660 image. Using LZMA instead of gzip helped a
+ lot. Roughly speaking, one could fit data that took 1000 MiB in gzipped
+ form into 700 MiB with LZMA. Naturally compression ratio varied across
+ packages, but this was what we got on average.
+
+ Slackware packages have traditionally had .tgz as the filename suffix,
+ which is an abbreviation of .tar.gz. A logical naming for LZMA
+ compressed packages was .tlz, being an abbreviation of .tar.lzma.
+
+ At the end of the year 2007, there was no distribution under the
+ Tukaani project anymore, but development of LZMA Utils was kept going.
+ Still, there were .tlz packages around, because at least Vector Linux
+ (a Slackware based distribution) used LZMA for its packages.
+
+ First versions of the modified pkgtools used the LZMA_Alone tool from
+ Igor Pavlov's LZMA SDK as is. It was fine, because users wouldn't need
+ to interact with LZMA_Alone directly. But people soon wanted to use
+ LZMA for other files too, and the interface of LZMA_Alone wasn't
+ comfortable for those used to gzip and bzip2.
+
+
+First steps of LZMA Utils
+
+ The first version of LZMA Utils (4.22.0) included a shell script called
+ lzmash. It was wrapper that had gzip-like command line interface. It
+ used the LZMA_Alone tool from LZMA SDK to do all the real work. zgrep,
+ zdiff, and related scripts from gzip were adapted work with LZMA and
+ were part of the first LZMA Utils release too.
+
+ LZMA Utils 4.22.0 included also lzmadec, which was a small (less than
+ 10 KiB) decoder-only command line tool. It was written on top of the
+ decoder-only C code found from the LZMA SDK. lzmadec was convenient in
+ situations where LZMA_Alone (a few hundred KiB) would be too big.
+
+ lzmash and lzmadec were written by Lasse Collin.
+
+
+Second generation
+
+ The lzmash script was an ugly and not very secure hack. The last
+ version of LZMA Utils to use lzmash was 4.27.1.
+
+ LZMA Utils 4.32.0beta1 introduced a new lzma command line tool written
+ by Ville Koskinen. It was written in C++, and used the encoder and
+ decoder from C++ LZMA SDK with little modifications. This tool replaced
+ both the lzmash script and the LZMA_Alone command line tool in LZMA
+ Utils.
+
+ Introducing this new tool caused some temporary incompatibilities,
+ because LZMA_Alone executable was simply named lzma like the new
+ command line tool, but they had completely different command line
+ interface. The file format was still the same.
+
+ Lasse wrote liblzmadec, which was a small decoder-only library based
+ on the C code found from LZMA SDK. liblzmadec had API similar to zlib,
+ although there were some significant differences, which made it
+ non-trivial to use it in some applications designed for zlib and
+ libbzip2.
+
+ The lzmadec command line tool was converted to use liblzmadec.
+
+ Alexandre Sauvé helped converting build system to use GNU Autotools.
+ This made is easier to test for certain less portable features needed
+ by the new command line tool.
+
+ Since the new command line tool never got completely finished (for
+ example, it didn't support LZMA_OPT environment variable), the intent
+ was to not call 4.32.x stable. Similarly, liblzmadec wasn't polished,
+ but appeared to work well enough, so some people started using it too.
+
+ Because the development of the third generation of LZMA Utils was
+ delayed considerably (3-4 years), the 4.32.x branch had to be kept
+ maintained. It got some bug fixes now and then, and finally it was
+ decided to call it stable, although most of the missing features were
+ never added.
+
+
+File format problems
+
+ The file format used by LZMA_Alone was primitive. It was designed for
+ embedded systems in mind, and thus provided only minimal set of
+ features. The two biggest problems for non-embedded use were lack of
+ magic bytes and integrity check.
+
+ Igor and Lasse started developing a new file format with some help
+ from Ville Koskinen. Also Mark Adler, Mikko Pouru, H. Peter Anvin,
+ and Lars Wirzenius helped with some minor things at some point of the
+ development. Designing the new format took quite a long time (actually,
+ too long time would be more appropriate expression). It was mostly
+ because Lasse was quite slow at getting things done due to personal
+ reasons.
+
+ Originally the new format was supposed to use the same .lzma suffix
+ that was already used by the old file format. Switching to the new
+ format wouldn't have caused much trouble when the old format wasn't
+ used by many people. But since the development of the new format took
+ so long time, the old format got quite popular, and it was decided
+ that the new file format must use a different suffix.
+
+ It was decided to use .xz as the suffix of the new file format. The
+ first stable .xz file format specification was finally released in
+ December 2008. In addition to fixing the most obvious problems of
+ the old .lzma format, the .xz format added some new features like
+ support for multiple filters (compression algorithms), filter chaining
+ (like piping on the command line), and limited random-access reading.
+
+ Currently the primary compression algorithm used in .xz is LZMA2.
+ It is an extension on top of the original LZMA to fix some practical
+ problems: LZMA2 adds support for flushing the encoder, uncompressed
+ chunks, eases stateful decoder implementations, and improves support
+ for multithreading. Since LZMA2 is better than the original LZMA, the
+ original LZMA is not supported in .xz.
+
+
+Transition to XZ Utils
+
+ The early versions of XZ Utils were called LZMA Utils. The first
+ releases were 4.42.0alphas. They dropped the rest of the C++ LZMA SDK.
+ The code was still directly based on LZMA SDK but ported to C and
+ converted from callback API to stateful API. Later, Igor Pavlov made
+ C version of the LZMA encoder too; these ports from C++ to C were
+ independent in LZMA SDK and LZMA Utils.
+
+ The core of the new LZMA Utils was liblzma, a compression library with
+ zlib-like API. liblzma supported both the old and new file format. The
+ gzip-like lzma command line tool was rewritten to use liblzma.
+
+ The new LZMA Utils code base was renamed to XZ Utils when the name
+ of the new file format had been decided. The liblzma compression
+ library retained its name though, because changing it would have
+ caused unnecessary breakage in applications already using the early
+ liblzma snapshots.
+
+ The xz command line tool can emulate the gzip-like lzma tool by
+ creating appropriate symlinks (e.g. lzma -> xz). Thus, practically
+ all scripts using the lzma tool from LZMA Utils will work as is with
+ XZ Utils (and will keep using the old .lzma format). Still, the .lzma
+ format is more or less deprecated. XZ Utils will keep supporting it,
+ but new applications should use the .xz format, and migrating old
+ applications to .xz is often a good idea too.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/lzma-file-format.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/lzma-file-format.txt
new file mode 100644
index 00000000..21fcb19f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/lzma-file-format.txt
@@ -0,0 +1,166 @@
+
+The .lzma File Format
+=====================
+
+ 0. Preface
+ 0.1. Notices and Acknowledgements
+ 0.2. Changes
+ 1. File Format
+ 1.1. Header
+ 1.1.1. Properties
+ 1.1.2. Dictionary Size
+ 1.1.3. Uncompressed Size
+ 1.2. LZMA Compressed Data
+ 2. References
+
+
+0. Preface
+
+ This document describes the .lzma file format, which is
+ sometimes also called LZMA_Alone format. It is a legacy file
+ format, which is being or has been replaced by the .xz format.
+ The MIME type of the .lzma format is `application/x-lzma'.
+
+ The most commonly used software to handle .lzma files are
+ LZMA SDK, LZMA Utils, 7-Zip, and XZ Utils. This document
+ describes some of the differences between these implementations
+ and gives hints what subset of the .lzma format is the most
+ portable.
+
+
+0.1. Notices and Acknowledgements
+
+ This file format was designed by Igor Pavlov for use in
+ LZMA SDK. This document was written by Lasse Collin
+ <lasse.collin@tukaani.org> using the documentation found
+ from the LZMA SDK.
+
+ This document has been put into the public domain.
+
+
+0.2. Changes
+
+ Last modified: 2009-05-01 11:15+0300
+
+
+1. File Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+==========================+
+ | Header | LZMA Compressed Data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+==========================+
+
+ The .lzma format file consist of 13-byte Header followed by
+ the LZMA Compressed Data.
+
+ Unlike the .gz, .bz2, and .xz formats, it is not possible to
+ concatenate multiple .lzma files as is and expect the
+ decompression tool to decode the resulting file as if it were
+ a single .lzma file.
+
+ For example, the command line tools from LZMA Utils and
+ LZMA SDK silently ignore all the data after the first .lzma
+ stream. In contrast, the command line tool from XZ Utils
+ considers the .lzma file to be corrupt if there is data after
+ the first .lzma stream.
+
+
+1.1. Header
+
+ +------------+----+----+----+----+--+--+--+--+--+--+--+--+
+ | Properties | Dictionary Size | Uncompressed Size |
+ +------------+----+----+----+----+--+--+--+--+--+--+--+--+
+
+
+1.1.1. Properties
+
+ The Properties field contains three properties. An abbreviation
+ is given in parentheses, followed by the value range of the
+ property. The field consists of
+
+ 1) the number of literal context bits (lc, [0, 8]);
+ 2) the number of literal position bits (lp, [0, 4]); and
+ 3) the number of position bits (pb, [0, 4]).
+
+ The properties are encoded using the following formula:
+
+ Properties = (pb * 5 + lp) * 9 + lc
+
+ The following C code illustrates a straightforward way to
+ decode the Properties field:
+
+ uint8_t lc, lp, pb;
+ uint8_t prop = get_lzma_properties();
+ if (prop > (4 * 5 + 4) * 9 + 8)
+ return LZMA_PROPERTIES_ERROR;
+
+ pb = prop / (9 * 5);
+ prop -= pb * 9 * 5;
+ lp = prop / 9;
+ lc = prop - lp * 9;
+
+ XZ Utils has an additional requirement: lc + lp <= 4. Files
+ which don't follow this requirement cannot be decompressed
+ with XZ Utils. Usually this isn't a problem since the most
+ common lc/lp/pb values are 3/0/2. It is the only lc/lp/pb
+ combination that the files created by LZMA Utils can have,
+ but LZMA Utils can decompress files with any lc/lp/pb.
+
+
+1.1.2. Dictionary Size
+
+ Dictionary Size is stored as an unsigned 32-bit little endian
+ integer. Any 32-bit value is possible, but for maximum
+ portability, only sizes of 2^n and 2^n + 2^(n-1) should be
+ used.
+
+ LZMA Utils creates only files with dictionary size 2^n,
+ 16 <= n <= 25. LZMA Utils can decompress files with any
+ dictionary size.
+
+ XZ Utils creates and decompresses .lzma files only with
+ dictionary sizes 2^n and 2^n + 2^(n-1). If some other
+ dictionary size is specified when compressing, the value
+ stored in the Dictionary Size field is a rounded up, but the
+ specified value is still used in the actual compression code.
+
+
+1.1.3. Uncompressed Size
+
+ Uncompressed Size is stored as unsigned 64-bit little endian
+ integer. A special value of 0xFFFF_FFFF_FFFF_FFFF indicates
+ that Uncompressed Size is unknown. End of Payload Marker (*)
+ is used if and only if Uncompressed Size is unknown.
+
+ XZ Utils rejects files whose Uncompressed Size field specifies
+ a known size that is 256 GiB or more. This is to reject false
+ positives when trying to guess if the input file is in the
+ .lzma format. When Uncompressed Size is unknown, there is no
+ limit for the uncompressed size of the file.
+
+ (*) Some tools use the term End of Stream (EOS) marker
+ instead of End of Payload Marker.
+
+
+1.2. LZMA Compressed Data
+
+ Detailed description of the format of this field is out of
+ scope of this document.
+
+
+2. References
+
+ LZMA SDK - The original LZMA implementation
+ http://7-zip.org/sdk.html
+
+ 7-Zip
+ http://7-zip.org/
+
+ LZMA Utils - LZMA adapted to POSIX-like systems
+ http://tukaani.org/lzma/
+
+ XZ Utils - The next generation of LZMA Utils
+ http://tukaani.org/xz/
+
+ The .xz file format - The successor of the the .lzma format
+ http://tukaani.org/xz/xz-file-format.txt
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xz-a4.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xz-a4.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xz-a4.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdec-a4.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdec-a4.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdec-a4.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdiff-a4.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdiff-a4.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzdiff-a4.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzgrep-a4.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzgrep-a4.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzgrep-a4.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzless-a4.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzless-a4.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzless-a4.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzmore-a4.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzmore-a4.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-a4/xzmore-a4.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xz-letter.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xz-letter.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xz-letter.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdec-letter.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdec-letter.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdec-letter.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdiff-letter.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdiff-letter.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzdiff-letter.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzgrep-letter.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzgrep-letter.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzgrep-letter.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzless-letter.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzless-letter.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzless-letter.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzmore-letter.pdf b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzmore-letter.pdf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/pdf-letter/xzmore-letter.pdf
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xz.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xz.txt
new file mode 100644
index 00000000..e9b0ee5e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xz.txt
@@ -0,0 +1,786 @@
+XZ(1) XZ Utils XZ(1)
+
+
+
+NAME
+ xz, unxz, xzcat, lzma, unlzma, lzcat - Compress or decompress .xz and
+ .lzma files
+
+SYNOPSIS
+ xz [option]... [file]...
+
+ unxz is equivalent to xz --decompress.
+ xzcat is equivalent to xz --decompress --stdout.
+ lzma is equivalent to xz --format=lzma.
+ unlzma is equivalent to xz --format=lzma --decompress.
+ lzcat is equivalent to xz --format=lzma --decompress --stdout.
+
+ When writing scripts that need to decompress files, it is recommended
+ to always use the name xz with appropriate arguments (xz -d or xz -dc)
+ instead of the names unxz and xzcat.
+
+DESCRIPTION
+ xz is a general-purpose data compression tool with command line syntax
+ similar to gzip(1) and bzip2(1). The native file format is the .xz
+ format, but also the legacy .lzma format and raw compressed streams
+ with no container format headers are supported.
+
+ xz compresses or decompresses each file according to the selected oper-
+ ation mode. If no files are given or file is -, xz reads from standard
+ input and writes the processed data to standard output. xz will refuse
+ (display an error and skip the file) to write compressed data to stan-
+ dard output if it is a terminal. Similarly, xz will refuse to read com-
+ pressed data from standard input if it is a terminal.
+
+ Unless --stdout is specified, files other than - are written to a new
+ file whose name is derived from the source file name:
+
+ o When compressing, the suffix of the target file format (.xz or
+ .lzma) is appended to the source filename to get the target file-
+ name.
+
+ o When decompressing, the .xz or .lzma suffix is removed from the
+ filename to get the target filename. xz also recognizes the suf-
+ fixes .txz and .tlz, and replaces them with the .tar suffix.
+
+ If the target file already exists, an error is displayed and the file
+ is skipped.
+
+ Unless writing to standard output, xz will display a warning and skip
+ the file if any of the following applies:
+
+ o File is not a regular file. Symbolic links are not followed, thus
+ they are never considered to be regular files.
+
+ o File has more than one hardlink.
+
+ o File has setuid, setgid, or sticky bit set.
+
+ o The operation mode is set to compress, and the file already has a
+ suffix of the target file format (.xz or .txz when compressing to
+ the .xz format, and .lzma or .tlz when compressing to the .lzma for-
+ mat).
+
+ o The operation mode is set to decompress, and the file doesn't have a
+ suffix of any of the supported file formats (.xz, .txz, .lzma, or
+ .tlz).
+
+ After successfully compressing or decompressing the file, xz copies the
+ owner, group, permissions, access time, and modification time from the
+ source file to the target file. If copying the group fails, the permis-
+ sions are modified so that the target file doesn't become accessible to
+ users who didn't have permission to access the source file. xz doesn't
+ support copying other metadata like access control lists or extended
+ attributes yet.
+
+ Once the target file has been successfully closed, the source file is
+ removed unless --keep was specified. The source file is never removed
+ if the output is written to standard output.
+
+ Sending SIGINFO or SIGUSR1 to the xz process makes it print progress
+ information to standard error. This has only limited use since when
+ standard error is a terminal, using --verbose will display an automati-
+ cally updating progress indicator.
+
+ Memory usage
+ The memory usage of xz varies from a few hundred kilobytes to several
+ gigabytes depending on the compression settings. The settings used when
+ compressing a file affect also the memory usage of the decompressor.
+ Typically the decompressor needs only 5 % to 20 % of the amount of RAM
+ that the compressor needed when creating the file. Still, the worst-
+ case memory usage of the decompressor is several gigabytes.
+
+ To prevent uncomfortable surprises caused by huge memory usage, xz has
+ a built-in memory usage limiter. The default limit is 40 % of total
+ physical RAM. While operating systems provide ways to limit the memory
+ usage of processes, relying on it wasn't deemed to be flexible enough.
+
+ When compressing, if the selected compression settings exceed the mem-
+ ory usage limit, the settings are automatically adjusted downwards and
+ a notice about this is displayed. As an exception, if the memory usage
+ limit is exceeded when compressing with --format=raw, an error is dis-
+ played and xz will exit with exit status 1.
+
+ If source file cannot be decompressed without exceeding the memory
+ usage limit, an error message is displayed and the file is skipped.
+ Note that compressed files may contain many blocks, which may have been
+ compressed with different settings. Typically all blocks will have
+ roughly the same memory requirements, but it is possible that a block
+ later in the file will exceed the memory usage limit, and an error
+ about too low memory usage limit gets displayed after some data has
+ already been decompressed.
+
+ The absolute value of the active memory usage limit can be seen near
+ the bottom of the output of --long-help. The default limit can be
+ overridden with --memory=limit.
+
+OPTIONS
+ Integer suffixes and special values
+ In most places where an integer argument is expected, an optional suf-
+ fix is supported to easily indicate large integers. There must be no
+ space between the integer and the suffix.
+
+ k or kB
+ The integer is multiplied by 1,000 (10^3). For example, 5k or
+ 5kB equals 5000.
+
+ Ki or KiB
+ The integer is multiplied by 1,024 (2^10).
+
+ M or MB
+ The integer is multiplied by 1,000,000 (10^6).
+
+ Mi or MiB
+ The integer is multiplied by 1,048,576 (2^20).
+
+ G or GB
+ The integer is multiplied by 1,000,000,000 (10^9).
+
+ Gi or GiB
+ The integer is multiplied by 1,073,741,824 (2^30).
+
+ A special value max can be used to indicate the maximum integer value
+ supported by the option.
+
+ Operation mode
+ If multiple operation mode options are given, the last one takes
+ effect.
+
+ -z, --compress
+ Compress. This is the default operation mode when no operation
+ mode option is specified, and no other operation mode is implied
+ from the command name (for example, unxz implies --decompress).
+
+ -d, --decompress, --uncompress
+ Decompress.
+
+ -t, --test
+ Test the integrity of compressed files. No files are created or
+ removed. This option is equivalent to --decompress --stdout
+ except that the decompressed data is discarded instead of being
+ written to standard output.
+
+ -l, --list
+ View information about the compressed files. No uncompressed
+ output is produced, and no files are created or removed. In list
+ mode, the program cannot read the compressed data from standard
+ input or from other unseekable sources.
+
+ This feature has not been implemented yet.
+
+ Operation modifiers
+ -k, --keep
+ Keep (don't delete) the input files.
+
+ -f, --force
+ This option has several effects:
+
+ o If the target file already exists, delete it before compress-
+ ing or decompressing.
+
+ o Compress or decompress even if the input is not a regular
+ file, has more than one hardlink, or has setuid, setgid, or
+ sticky bit set. The setuid, setgid, and sticky bits are not
+ copied to the target file.
+
+ o If combined with --decompress --stdout and xz doesn't recog-
+ nize the type of the source file, xz will copy the source
+ file as is to standard output. This allows using xzcat
+ --force like cat(1) for files that have not been compressed
+ with xz. Note that in future, xz might support new com-
+ pressed file formats, which may make xz decompress more types
+ of files instead of copying them as is to standard output.
+ --format=format can be used to restrict xz to decompress only
+ a single file format.
+
+ o Allow writing compressed data to a terminal, and reading com-
+ pressed data from a terminal.
+
+ -c, --stdout, --to-stdout
+ Write the compressed or decompressed data to standard output
+ instead of a file. This implies --keep.
+
+ -S .suf, --suffix=.suf
+ When compressing, use .suf as the suffix for the target file
+ instead of .xz or .lzma. If not writing to standard output and
+ the source file already has the suffix .suf, a warning is dis-
+ played and the file is skipped.
+
+ When decompressing, recognize also files with the suffix .suf in
+ addition to files with the .xz, .txz, .lzma, or .tlz suffix. If
+ the source file has the suffix .suf, the suffix is removed to
+ get the target filename.
+
+ When compressing or decompressing raw streams (--format=raw),
+ the suffix must always be specified unless writing to standard
+ output, because there is no default suffix for raw streams.
+
+ --files[=file]
+ Read the filenames to process from file; if file is omitted,
+ filenames are read from standard input. Filenames must be termi-
+ nated with the newline character. If filenames are given also as
+ command line arguments, they are processed before the filenames
+ read from file.
+
+ --files0[=file]
+ This is identical to --files[=file] except that the filenames
+ must be terminated with the null character.
+
+ Basic file format and compression options
+ -F format, --format=format
+ Specify the file format to compress or decompress:
+
+ o auto: This is the default. When compressing, auto is equiva-
+ lent to xz. When decompressing, the format of the input file
+ is autodetected. Note that raw streams (created with --for-
+ mat=raw) cannot be autodetected.
+
+ o xz: Compress to the .xz file format, or accept only .xz files
+ when decompressing.
+
+ o lzma or alone: Compress to the legacy .lzma file format, or
+ accept only .lzma files when decompressing. The alternative
+ name alone is provided for backwards compatibility with LZMA
+ Utils.
+
+ o raw: Compress or uncompress a raw stream (no headers). This
+ is meant for advanced users only. To decode raw streams, you
+ need to set not only --format=raw but also specify the filter
+ chain, which would normally be stored in the container format
+ headers.
+
+ -C check, --check=check
+ Specify the type of the integrity check, which is calculated
+ from the uncompressed data. This option has an effect only when
+ compressing into the .xz format; the .lzma format doesn't sup-
+ port integrity checks. The integrity check (if any) is verified
+ when the .xz file is decompressed.
+
+ Supported check types:
+
+ o none: Don't calculate an integrity check at all. This is usu-
+ ally a bad idea. This can be useful when integrity of the
+ data is verified by other means anyway.
+
+ o crc32: Calculate CRC32 using the polynomial from IEEE-802.3
+ (Ethernet).
+
+ o crc64: Calculate CRC64 using the polynomial from ECMA-182.
+ This is the default, since it is slightly better than CRC32
+ at detecting damaged files and the speed difference is negli-
+ gible.
+
+ o sha256: Calculate SHA-256. This is somewhat slower than CRC32
+ and CRC64.
+
+ Integrity of the .xz headers is always verified with CRC32. It
+ is not possible to change or disable it.
+
+ -0 ... -9
+ Select compression preset. If a preset level is specified multi-
+ ple times, the last one takes effect.
+
+ The compression preset levels can be categorised roughly into
+ three categories:
+
+ -0 ... -2
+ Fast presets with relatively low memory usage. -1 and -2
+ should give compression speed and ratios comparable to
+ bzip2 -1 and bzip2 -9, respectively. Currently -0 is not
+ very good (not much faster than -1 but much worse com-
+ pression). In future, -0 may be indicate some fast algo-
+ rithm instead of LZMA2.
+
+ -3 ... -5
+ Good compression ratio with low to medium memory usage.
+ These are significantly slower than levels 0-2.
+
+ -6 ... -9
+ Excellent compression with medium to high memory usage.
+ These are also slower than the lower preset levels. The
+ default is -6. Unless you want to maximize the compres-
+ sion ratio, you probably don't want a higher preset level
+ than -7 due to speed and memory usage.
+
+ The exact compression settings (filter chain) used by each pre-
+ set may vary between xz versions. The settings may also vary
+ between files being compressed, if xz determines that modified
+ settings will probably give better compression ratio without
+ significantly affecting compression time or memory usage.
+
+ Because the settings may vary, the memory usage may vary too.
+ The following table lists the maximum memory usage of each pre-
+ set level, which won't be exceeded even in future versions of
+ xz.
+
+ FIXME: The table below is just a rough idea.
+
+ Preset Compression Decompression
+ -0 6 MiB 1 MiB
+ -1 6 MiB 1 MiB
+ -2 10 MiB 1 MiB
+ -3 20 MiB 2 MiB
+ -4 30 MiB 3 MiB
+ -5 60 MiB 6 MiB
+ -6 100 MiB 10 MiB
+ -7 200 MiB 20 MiB
+ -8 400 MiB 40 MiB
+ -9 800 MiB 80 MiB
+
+ When compressing, xz automatically adjusts the compression set-
+ tings downwards if the memory usage limit would be exceeded, so
+ it is safe to specify a high preset level even on systems that
+ don't have lots of RAM.
+
+ --fast and --best
+ These are somewhat misleading aliases for -0 and -9, respec-
+ tively. These are provided only for backwards compatibility
+ with LZMA Utils. Avoid using these options.
+
+ Especially the name of --best is misleading, because the defini-
+ tion of best depends on the input data, and that usually people
+ don't want the very best compression ratio anyway, because it
+ would be very slow.
+
+ -e, --extreme
+ Modify the compression preset (-0 ... -9) so that a little bit
+ better compression ratio can be achieved without increasing mem-
+ ory usage of the compressor or decompressor (exception: compres-
+ sor memory usage may increase a little with presets -0 ... -2).
+ The downside is that the compression time will increase dramati-
+ cally (it can easily double).
+
+ -M limit, --memory=limit
+ Set the memory usage limit. If this option is specied multiple
+ times, the last one takes effect. The limit can be specified in
+ multiple ways:
+
+ o The limit can be an absolute value in bytes. Using an integer
+ suffix like MiB can be useful. Example: --memory=80MiB
+
+ o The limit can be specified as a percentage of physical RAM.
+ Example: --memory=70%
+
+ o The limit can be reset back to its default value (currently
+ 40 % of physical RAM) by setting it to 0.
+
+ o The memory usage limiting can be effectively disabled by set-
+ ting limit to max. This isn't recommended. It's usually bet-
+ ter to use, for example, --memory=90%.
+
+ The current limit can be seen near the bottom of the output of
+ the --long-help option.
+
+ -T threads, --threads=threads
+ Specify the maximum number of worker threads to use. The default
+ is the number of available CPU cores. You can see the current
+ value of threads near the end of the output of the --long-help
+ option.
+
+ The actual number of worker threads can be less than threads if
+ using more threads would exceed the memory usage limit. In
+ addition to CPU-intensive worker threads, xz may use a few aux-
+ iliary threads, which don't use a lot of CPU time.
+
+ Multithreaded compression and decompression are not implemented
+ yet, so this option has no effect for now.
+
+ Custom compressor filter chains
+ A custom filter chain allows specifying the compression settings in
+ detail instead of relying on the settings associated to the preset lev-
+ els. When a custom filter chain is specified, the compression preset
+ level options (-0 ... -9 and --extreme) are silently ignored.
+
+ A filter chain is comparable to piping on the UN*X command line. When
+ compressing, the uncompressed input goes to the first filter, whose
+ output goes to the next filter (if any). The output of the last filter
+ gets written to the compressed file. The maximum number of filters in
+ the chain is four, but typically a filter chain has only one or two
+ filters.
+
+ Many filters have limitations where they can be in the filter chain:
+ some filters can work only as the last filter in the chain, some only
+ as a non-last filter, and some work in any position in the chain.
+ Depending on the filter, this limitation is either inherent to the fil-
+ ter design or exists to prevent security issues.
+
+ A custom filter chain is specified by using one or more filter options
+ in the order they are wanted in the filter chain. That is, the order of
+ filter options is significant! When decoding raw streams (--for-
+ mat=raw), the filter chain is specified in the same order as it was
+ specified when compressing.
+
+ Filters take filter-specific options as a comma-separated list. Extra
+ commas in options are ignored. Every option has a default value, so you
+ need to specify only those you want to change.
+
+ --lzma1[=options], --lzma2[=options]
+ Add LZMA1 or LZMA2 filter to the filter chain. These filter can
+ be used only as the last filter in the chain.
+
+ LZMA1 is a legacy filter, which is supported almost solely due
+ to the legacy .lzma file format, which supports only LZMA1.
+ LZMA2 is an updated version of LZMA1 to fix some practical
+ issues of LZMA1. The .xz format uses LZMA2, and doesn't support
+ LZMA1 at all. Compression speed and ratios of LZMA1 and LZMA2
+ are practically the same.
+
+ LZMA1 and LZMA2 share the same set of options:
+
+ preset=preset
+ Reset all LZMA1 or LZMA2 options to preset. Preset con-
+ sist of an integer, which may be followed by single-let-
+ ter preset modifiers. The integer can be from 0 to 9,
+ matching the command line options -0 ... -9. The only
+ supported modifier is currently e, which matches
+ --extreme.
+
+ The default preset is 6, from which the default values
+ for the rest of the LZMA1 or LZMA2 options are taken.
+
+ dict=size
+ Dictionary (history buffer) size indicates how many bytes
+ of the recently processed uncompressed data is kept in
+ memory. One method to reduce size of the uncompressed
+ data is to store distance-length pairs, which indicate
+ what data to repeat from the dictionary buffer. The big-
+ ger the dictionary, the better the compression ratio usu-
+ ally is, but dictionaries bigger than the uncompressed
+ data are waste of RAM.
+
+ Typical dictionary size is from 64 KiB to 64 MiB. The
+ minimum is 4 KiB. The maximum for compression is cur-
+ rently 1.5 GiB. The decompressor already supports dictio-
+ naries up to one byte less than 4 GiB, which is the maxi-
+ mum for LZMA1 and LZMA2 stream formats.
+
+ Dictionary size has the biggest effect on compression
+ ratio. Dictionary size and match finder together deter-
+ mine the memory usage of the LZMA1 or LZMA2 encoder. The
+ same dictionary size is required for decompressing that
+ was used when compressing, thus the memory usage of the
+ decoder is determined by the dictionary size used when
+ compressing.
+
+ lc=lc Specify the number of literal context bits. The minimum
+ is 0 and the maximum is 4; the default is 3. In addi-
+ tion, the sum of lc and lp must not exceed 4.
+
+ lp=lp Specify the number of literal position bits. The minimum
+ is 0 and the maximum is 4; the default is 0.
+
+ pb=pb Specify the number of position bits. The minimum is 0 and
+ the maximum is 4; the default is 2.
+
+ mode=mode
+ Compression mode specifies the function used to analyze
+ the data produced by the match finder. Supported modes
+ are fast and normal. The default is fast for presets 0-2
+ and normal for presets 3-9.
+
+ mf=mf Match finder has a major effect on encoder speed, memory
+ usage, and compression ratio. Usually Hash Chain match
+ finders are faster than Binary Tree match finders. Hash
+ Chains are usually used together with mode=fast and
+ Binary Trees with mode=normal. The memory usage formulas
+ are only rough estimates, which are closest to reality
+ when dict is a power of two.
+
+ hc3 Hash Chain with 2- and 3-byte hashing
+ Minimum value for nice: 3
+ Memory usage: dict * 7.5 (if dict <= 16 MiB);
+ dict * 5.5 + 64 MiB (if dict > 16 MiB)
+
+ hc4 Hash Chain with 2-, 3-, and 4-byte hashing
+ Minimum value for nice: 4
+ Memory usage: dict * 7.5
+
+ bt2 Binary Tree with 2-byte hashing
+ Minimum value for nice: 2
+ Memory usage: dict * 9.5
+
+ bt3 Binary Tree with 2- and 3-byte hashing
+ Minimum value for nice: 3
+ Memory usage: dict * 11.5 (if dict <= 16 MiB);
+ dict * 9.5 + 64 MiB (if dict > 16 MiB)
+
+ bt4 Binary Tree with 2-, 3-, and 4-byte hashing
+ Minimum value for nice: 4
+ Memory usage: dict * 11.5
+
+ nice=nice
+ Specify what is considered to be a nice length for a
+ match. Once a match of at least nice bytes is found, the
+ algorithm stops looking for possibly better matches.
+
+ nice can be 2-273 bytes. Higher values tend to give bet-
+ ter compression ratio at expense of speed. The default
+ depends on the preset level.
+
+ depth=depth
+ Specify the maximum search depth in the match finder. The
+ default is the special value 0, which makes the compres-
+ sor determine a reasonable depth from mf and nice.
+
+ Using very high values for depth can make the encoder
+ extremely slow with carefully crafted files. Avoid set-
+ ting the depth over 1000 unless you are prepared to
+ interrupt the compression in case it is taking too long.
+
+ When decoding raw streams (--format=raw), LZMA2 needs only the
+ value of dict. LZMA1 needs also lc, lp, and pb.
+
+ --x86[=options]
+
+ --powerpc[=options]
+
+ --ia64[=options]
+
+ --arm[=options]
+
+ --armthumb[=options]
+
+ --sparc[=options]
+ Add a branch/call/jump (BCJ) filter to the filter chain. These
+ filters can be used only as non-last filter in the filter chain.
+
+ A BCJ filter converts relative addresses in the machine code to
+ their absolute counterparts. This doesn't change the size of the
+ data, but it increases redundancy, which allows e.g. LZMA2 to
+ get better compression ratio.
+
+ The BCJ filters are always reversible, so using a BCJ filter for
+ wrong type of data doesn't cause any data loss. However, apply-
+ ing a BCJ filter for wrong type of data is a bad idea, because
+ it tends to make the compression ratio worse.
+
+ Different instruction sets have have different alignment:
+
+ Filter Alignment Notes
+ x86 1 32-bit and 64-bit x86
+ PowerPC 4 Big endian only
+ ARM 4 Little endian only
+ ARM-Thumb 2 Little endian only
+ IA-64 16 Big or little endian
+ SPARC 4 Big or little endian
+
+ Since the BCJ-filtered data is usually compressed with LZMA2,
+ the compression ratio may be improved slightly if the LZMA2
+ options are set to match the alignment of the selected BCJ fil-
+ ter. For example, with the IA-64 filter, it's good to set pb=4
+ with LZMA2 (2^4=16). The x86 filter is an exception; it's usu-
+ ally good to stick to LZMA2's default four-byte alignment when
+ compressing x86 executables.
+
+ All BCJ filters support the same options:
+
+ start=offset
+ Specify the start offset that is used when converting
+ between relative and absolute addresses. The offset must
+ be a multiple of the alignment of the filter (see the ta-
+ ble above). The default is zero. In practice, the
+ default is good; specifying a custom offset is almost
+ never useful.
+
+ Specifying a non-zero start offset is probably useful
+ only if the executable has multiple sections, and there
+ are many cross-section jumps or calls. Applying a BCJ
+ filter separately for each section with proper start off-
+ set and then compressing the result as a single chunk may
+ give some improvement in compression ratio compared to
+ applying the BCJ filter with the default offset for the
+ whole executable.
+
+ --delta[=options]
+ Add Delta filter to the filter chain. The Delta filter can be
+ used only as non-last filter in the filter chain.
+
+ Currently only simple byte-wise delta calculation is supported.
+ It can be useful when compressing e.g. uncompressed bitmap
+ images or uncompressed PCM audio. However, special purpose algo-
+ rithms may give significantly better results than Delta + LZMA2.
+ This is true especially with audio, which compresses faster and
+ better e.g. with FLAC.
+
+ Supported options:
+
+ dist=distance
+ Specify the distance of the delta calculation as bytes.
+ distance must be 1-256. The default is 1.
+
+ For example, with dist=2 and eight-byte input A1 B1 A2 B3
+ A3 B5 A4 B7, the output will be A1 B1 01 02 01 02 01 02.
+
+ Other options
+ -q, --quiet
+ Suppress warnings and notices. Specify this twice to suppress
+ errors too. This option has no effect on the exit status. That
+ is, even if a warning was suppressed, the exit status to indi-
+ cate a warning is still used.
+
+ -v, --verbose
+ Be verbose. If standard error is connected to a terminal, xz
+ will display a progress indicator. Specifying --verbose twice
+ will give even more verbose output (useful mostly for debug-
+ ging).
+
+ -Q, --no-warn
+ Don't set the exit status to 2 even if a condition worth a warn-
+ ing was detected. This option doesn't affect the verbosity
+ level, thus both --quiet and --no-warn have to be used to not
+ display warnings and to not alter the exit status.
+
+ -h, --help
+ Display a help message describing the most commonly used
+ options, and exit successfully.
+
+ -H, --long-help
+ Display a help message describing all features of xz, and exit
+ successfully
+
+ -V, --version
+ Display the version number of xz and liblzma.
+
+EXIT STATUS
+ 0 All is good.
+
+ 1 An error occurred.
+
+ 2 Something worth a warning occurred, but no actual errors
+ occurred.
+
+ Notices (not warnings or errors) printed on standard error don't affect
+ the exit status.
+
+ENVIRONMENT
+ XZ_OPT A space-separated list of options is parsed from XZ_OPT before
+ parsing the options given on the command line. Note that only
+ options are parsed from XZ_OPT; all non-options are silently
+ ignored. Parsing is done with getopt_long(3) which is used also
+ for the command line arguments.
+
+LZMA UTILS COMPATIBILITY
+ The command line syntax of xz is practically a superset of lzma,
+ unlzma, and lzcat as found from LZMA Utils 4.32.x. In most cases, it is
+ possible to replace LZMA Utils with XZ Utils without breaking existing
+ scripts. There are some incompatibilities though, which may sometimes
+ cause problems.
+
+ Compression preset levels
+ The numbering of the compression level presets is not identical in xz
+ and LZMA Utils. The most important difference is how dictionary sizes
+ are mapped to different presets. Dictionary size is roughly equal to
+ the decompressor memory usage.
+
+ Level xz LZMA Utils
+ -1 64 KiB 64 KiB
+ -2 512 KiB 1 MiB
+ -3 1 MiB 512 KiB
+ -4 2 MiB 1 MiB
+ -5 4 MiB 2 MiB
+ -6 8 MiB 4 MiB
+ -7 16 MiB 8 MiB
+ -8 32 MiB 16 MiB
+ -9 64 MiB 32 MiB
+
+ The dictionary size differences affect the compressor memory usage too,
+ but there are some other differences between LZMA Utils and XZ Utils,
+ which make the difference even bigger:
+
+ Level xz LZMA Utils 4.32.x
+ -1 2 MiB 2 MiB
+ -2 5 MiB 12 MiB
+ -3 13 MiB 12 MiB
+ -4 25 MiB 16 MiB
+ -5 48 MiB 26 MiB
+ -6 94 MiB 45 MiB
+ -7 186 MiB 83 MiB
+ -8 370 MiB 159 MiB
+ -9 674 MiB 311 MiB
+
+ The default preset level in LZMA Utils is -7 while in XZ Utils it is
+ -6, so both use 8 MiB dictionary by default.
+
+ Streamed vs. non-streamed .lzma files
+ Uncompressed size of the file can be stored in the .lzma header. LZMA
+ Utils does that when compressing regular files. The alternative is to
+ mark that uncompressed size is unknown and use end of payload marker to
+ indicate where the decompressor should stop. LZMA Utils uses this
+ method when uncompressed size isn't known, which is the case for exam-
+ ple in pipes.
+
+ xz supports decompressing .lzma files with or without end of payload
+ marker, but all .lzma files created by xz will use end of payload
+ marker and have uncompressed size marked as unknown in the .lzma
+ header. This may be a problem in some (uncommon) situations. For exam-
+ ple, a .lzma decompressor in an embedded device might work only with
+ files that have known uncompressed size. If you hit this problem, you
+ need to use LZMA Utils or LZMA SDK to create .lzma files with known
+ uncompressed size.
+
+ Unsupported .lzma files
+ The .lzma format allows lc values up to 8, and lp values up to 4. LZMA
+ Utils can decompress files with any lc and lp, but always creates files
+ with lc=3 and lp=0. Creating files with other lc and lp is possible
+ with xz and with LZMA SDK.
+
+ The implementation of the LZMA1 filter in liblzma requires that the sum
+ of lc and lp must not exceed 4. Thus, .lzma files which exceed this
+ limitation, cannot be decompressed with xz.
+
+ LZMA Utils creates only .lzma files which have dictionary size of 2^n
+ (a power of 2), but accepts files with any dictionary size. liblzma
+ accepts only .lzma files which have dictionary size of 2^n or 2^n +
+ 2^(n-1). This is to decrease false positives when autodetecting .lzma
+ files.
+
+ These limitations shouldn't be a problem in practice, since practically
+ all .lzma files have been compressed with settings that liblzma will
+ accept.
+
+ Trailing garbage
+ When decompressing, LZMA Utils silently ignore everything after the
+ first .lzma stream. In most situations, this is a bug. This also means
+ that LZMA Utils don't support decompressing concatenated .lzma files.
+
+ If there is data left after the first .lzma stream, xz considers the
+ file to be corrupt. This may break obscure scripts which have assumed
+ that trailing garbage is ignored.
+
+NOTES
+ Compressed output may vary
+ The exact compressed output produced from the same uncompressed input
+ file may vary between XZ Utils versions even if compression options are
+ identical. This is because the encoder can be improved (faster or bet-
+ ter compression) without affecting the file format. The output can vary
+ even between different builds of the same XZ Utils version, if differ-
+ ent build options are used or if the endianness of the hardware is dif-
+ ferent for different builds.
+
+ The above means that implementing --rsyncable to create rsyncable .xz
+ files is not going to happen without freezing a part of the encoder
+ implementation, which can then be used with --rsyncable.
+
+ Embedded .xz decompressors
+ Embedded .xz decompressor implementations like XZ Embedded don't neces-
+ sarily support files created with check types other than none and
+ crc32. Since the default is --check=crc64, you must use --check=none
+ or --check=crc32 when creating files for embedded systems.
+
+ Outside embedded systems, all .xz format decompressors support all the
+ check types, or at least are able to decompress the file without veri-
+ fying the integrity check if the particular check is not supported.
+
+ XZ Embedded supports BCJ filters, but only with the default start off-
+ set.
+
+SEE ALSO
+ xzdec(1), gzip(1), bzip2(1)
+
+ XZ Utils: <http://tukaani.org/xz/>
+ XZ Embedded: <http://tukaani.org/xz/embedded.html>
+ LZMA SDK: <http://7-zip.org/sdk.html>
+
+
+
+Tukaani 2009-08-27 XZ(1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdec.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdec.txt
new file mode 100644
index 00000000..ee2b820a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdec.txt
@@ -0,0 +1,95 @@
+XZDEC(1) XZ Utils XZDEC(1)
+
+
+
+NAME
+ xzdec, lzmadec - Small .xz and .lzma decompressors
+
+SYNOPSIS
+ xzdec [option]... [file]...
+ lzmadec [option]... [file]...
+
+DESCRIPTION
+ xzdec is a liblzma-based decompression-only tool for .xz (and only .xz)
+ files. xzdec is intended to work as a drop-in replacement for xz(1) in
+ the most common situations where a script has been written to use xz
+ --decompress --stdout (and possibly a few other commonly used options)
+ to decompress .xz files. lzmadec is identical to xzdec except that
+ lzmadec supports .lzma files instead of .xz files.
+
+ To reduce the size of the executable, xzdec doesn't support multi-
+ threading or localization, and doesn't read options from XZ_OPT envi-
+ ronment variable. xzdec doesn't support displaying intermediate
+ progress information: sending SIGINFO to xzdec does nothing, but send-
+ ing SIGUSR1 terminates the process instead of displaying progress
+ information.
+
+OPTIONS
+ -d, --decompress, --uncompress
+ Ignored for xz(1) compatibility. xzdec supports only decompres-
+ sion.
+
+ -k, --keep
+ Ignored for xz(1) compatibility. xzdec never creates or removes
+ any files.
+
+ -c, --stdout, --to-stdout
+ Ignored for xz(1) compatibility. xzdec always writes the decom-
+ pressed data to standard output.
+
+ -M limit, --memory=limit
+ Set the memory usage limit. If this option is specified multi-
+ ple times, the last one takes effect. The limit can be specified
+ in multiple ways:
+
+ o The limit can be an absolute value in bytes. Using an integer
+ suffix like MiB can be useful. Example: --memory=80MiB
+
+ o The limit can be specified as a percentage of physical RAM.
+ Example: --memory=70%
+
+ o The limit can be reset back to its default value (currently
+ 40 % of physical RAM) by setting it to 0.
+
+ o The memory usage limiting can be effectively disabled by set-
+ ting limit to max. This isn't recommended. It's usually bet-
+ ter to use, for example, --memory=90%.
+
+ The current limit can be seen near the bottom of the output of
+ the --help option.
+
+ -q, --quiet
+ Specifying this once does nothing since xzdec never displays any
+ warnings or notices. Specify this twice to suppress errors.
+
+ -Q, --no-warn
+ Ignored for xz(1) compatibility. xzdec never uses the exit sta-
+ tus 2.
+
+ -h, --help
+ Display a help message and exit successfully.
+
+ -V, --version
+ Display the version number of xzdec and liblzma.
+
+EXIT STATUS
+ 0 All was good.
+
+ 1 An error occurred.
+
+ xzdec doesn't have any warning messages like xz(1) has, thus the exit
+ status 2 is not used by xzdec.
+
+NOTES
+ xzdec and lzmadec are not really that small. The size can be reduced
+ further by dropping features from liblzma at compile time, but that
+ shouldn't usually be done for executables distributed in typical non-
+ embedded operating system distributions. If you need a truly small .xz
+ decompressor, consider using XZ Embedded.
+
+SEE ALSO
+ xz(1)
+
+
+
+Tukaani 2009-06-04 XZDEC(1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdiff.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdiff.txt
new file mode 100644
index 00000000..f64568f2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzdiff.txt
@@ -0,0 +1,36 @@
+XZDIFF(1) XZ Utils XZDIFF(1)
+
+
+
+NAME
+ xzcmp, xzdiff, lzcmp, lzdiff - compare compressed files
+
+SYNOPSIS
+ xzcmp [cmp_options] file1 [file2]
+ xzdiff [diff_options] file1 [file2]
+ lzcmp [cmp_options] file1 [file2]
+ lzdiff [diff_options] file1 [file2]
+
+DESCRIPTION
+ xzcmp and xdiff invoke cmp(1) or diff(1) on files compressed with
+ xz(1), lzma(1), gzip(1), or bzip2(1). All options specified are passed
+ directly to cmp or diff. If only one file is specified, then the files
+ compared are file1 (which must have a suffix of a supported compression
+ format) and file1 from which the compression format suffix has been
+ stripped. If two files are specified, then they are uncompressed if
+ necessary and fed to cmp(1) or diff(1). The exit status from cmp or
+ diff is preserved.
+
+ The names lzcmp and lzdiff are provided for backward compatibility with
+ LZMA Utils.
+
+SEE ALSO
+ cmp(1), diff(1), xz(1), gzip(1), bzip2(1), zdiff(1)
+
+BUGS
+ Messages from the cmp(1) or diff(1) programs refer to temporary file-
+ names instead of those specified.
+
+
+
+Tukaani 2009-07-05 XZDIFF(1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzgrep.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzgrep.txt
new file mode 100644
index 00000000..7f665bce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzgrep.txt
@@ -0,0 +1,39 @@
+XZGREP(1) XZ Utils XZGREP(1)
+
+
+
+NAME
+ xzgrep - search compressed files for a regular expression
+
+SYNOPSIS
+ xzgrep [grep_options] [-e] pattern file...
+ xzegrep ...
+ xzfgrep ...
+ lzgrep ...
+ lzegrep ...
+ lzfgrep ...
+
+DESCRIPTION
+ xzgrep invokes grep(1) on files which may be either uncompressed or
+ compressed with xz(1), lzma(1), gzip(1), or bzip2(1). All options
+ specified are passed directly to grep(1).
+
+ If no file is specified, then the standard input is decompressed if
+ necessary and fed to grep(1). When reading from standard input,
+ gzip(1) and bzip2(1) compressed files are not supported.
+
+ If xzgrep is invoked as xzegrep or xzfgrep then egrep(1) or fgrep(1) is
+ used instead of grep(1). The same applies to names lzgrep, lzegrep,
+ and lzfgrep, which are provided for backward compatibility with LZMA
+ Utils.
+
+ENVIRONMENT
+ GREP If the GREP environment variable is set, xzgrep uses it instead
+ of grep(1), egrep(1), or fgrep(1).
+
+SEE ALSO
+ grep(1), xz(1), gzip(1), bzip2(1), zgrep(1)
+
+
+
+Tukaani 2009-07-05 XZGREP(1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzless.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzless.txt
new file mode 100644
index 00000000..2f3dc9fe
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzless.txt
@@ -0,0 +1,40 @@
+XZLESS(1) XZ Utils XZLESS(1)
+
+
+
+NAME
+ xzless, lzless - view xz or lzma compressed (text) files
+
+SYNOPSIS
+ xzless [file...]
+ lzless [file...]
+
+DESCRIPTION
+ xzless is a filter that displays pagefulls of uncompressed text from
+ compressed file(s) to a terminal. It works on files compressed with
+ xz(1) or lzma(1). If no files are given, xzless reads from standard
+ input.
+
+ xzless uses less(1) as its only pager. Unlike xzmore, the choice of
+ pagers is not alterable by an environment variable. Commands are based
+ on both more(1) and vi(1), and allow back and forth movement and
+ searching. See the less(1) manual for more information.
+
+ The command named lzless is provided for backward compatibility with
+ LZMA Utils.
+
+ENVIRONMENT
+ LESSMETACHARS
+ A list of characters special to the shell. Set by xzless unless
+ it is already set in the environment.
+
+ LESSOPEN
+ Set to a command line to invoke the xz(1) decompressor for pre-
+ processing the input files to less(1).
+
+SEE ALSO
+ less(1), xz(1), xzmore(1), zless(1)
+
+
+
+Tukaani 2009-07-05 XZLESS(1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzmore.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzmore.txt
new file mode 100644
index 00000000..6f6cfe93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/man/txt/xzmore.txt
@@ -0,0 +1,34 @@
+XZMORE(1) XZ Utils XZMORE(1)
+
+
+
+NAME
+ xzmore, lzmore - view xz or lzma compressed (text) files
+
+SYNOPSIS
+ xzmore [filename ...]
+ lzmore [filename ...]
+
+DESCRIPTION
+ xzmore is a filter which allows examination of xz(1) or lzma(1) com-
+ pressed text files one screenful at a time on a soft-copy terminal.
+
+ To use a pager other than the default more, set environment variable
+ PAGER to the name of the desired program. The name lzmore is provided
+ for backward compatibility with LZMA Utils.
+
+ e or q When the prompt --More--(Next file: file) is printed, this com-
+ mand causes xzmore to exit.
+
+ s When the prompt --More--(Next file: file) is printed, this com-
+ mand causes xzmore to skip the next file and continue.
+
+ For list of keyboard commands supported while actually viewing the con-
+ tent of a file, refer to manual of the pager you use, usually more(1).
+
+SEE ALSO
+ more(1), xz(1), xzless(1), zmore(1)
+
+
+
+Tukaani 2009-07-05 XZMORE(1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/xz-file-format.txt b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/xz-file-format.txt
new file mode 100644
index 00000000..4ed66506
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/doc/xz-file-format.txt
@@ -0,0 +1,1150 @@
+
+The .xz File Format
+===================
+
+Version 1.0.4 (2009-08-27)
+
+
+ 0. Preface
+ 0.1. Notices and Acknowledgements
+ 0.2. Getting the Latest Version
+ 0.3. Version History
+ 1. Conventions
+ 1.1. Byte and Its Representation
+ 1.2. Multibyte Integers
+ 2. Overall Structure of .xz File
+ 2.1. Stream
+ 2.1.1. Stream Header
+ 2.1.1.1. Header Magic Bytes
+ 2.1.1.2. Stream Flags
+ 2.1.1.3. CRC32
+ 2.1.2. Stream Footer
+ 2.1.2.1. CRC32
+ 2.1.2.2. Backward Size
+ 2.1.2.3. Stream Flags
+ 2.1.2.4. Footer Magic Bytes
+ 2.2. Stream Padding
+ 3. Block
+ 3.1. Block Header
+ 3.1.1. Block Header Size
+ 3.1.2. Block Flags
+ 3.1.3. Compressed Size
+ 3.1.4. Uncompressed Size
+ 3.1.5. List of Filter Flags
+ 3.1.6. Header Padding
+ 3.1.7. CRC32
+ 3.2. Compressed Data
+ 3.3. Block Padding
+ 3.4. Check
+ 4. Index
+ 4.1. Index Indicator
+ 4.2. Number of Records
+ 4.3. List of Records
+ 4.3.1. Unpadded Size
+ 4.3.2. Uncompressed Size
+ 4.4. Index Padding
+ 4.5. CRC32
+ 5. Filter Chains
+ 5.1. Alignment
+ 5.2. Security
+ 5.3. Filters
+ 5.3.1. LZMA2
+ 5.3.2. Branch/Call/Jump Filters for Executables
+ 5.3.3. Delta
+ 5.3.3.1. Format of the Encoded Output
+ 5.4. Custom Filter IDs
+ 5.4.1. Reserved Custom Filter ID Ranges
+ 6. Cyclic Redundancy Checks
+ 7. References
+
+
+0. Preface
+
+ This document describes the .xz file format (filename suffix
+ ".xz", MIME type "application/x-xz"). It is intended that this
+ this format replace the old .lzma format used by LZMA SDK and
+ LZMA Utils.
+
+
+0.1. Notices and Acknowledgements
+
+ This file format was designed by Lasse Collin
+ <lasse.collin@tukaani.org> and Igor Pavlov.
+
+ Special thanks for helping with this document goes to
+ Ville Koskinen. Thanks for helping with this document goes to
+ Mark Adler, H. Peter Anvin, Mikko Pouru, and Lars Wirzenius.
+
+ This document has been put into the public domain.
+
+
+0.2. Getting the Latest Version
+
+ The latest official version of this document can be downloaded
+ from <http://tukaani.org/xz/xz-file-format.txt>.
+
+ Specific versions of this document have a filename
+ xz-file-format-X.Y.Z.txt where X.Y.Z is the version number.
+ For example, the version 1.0.0 of this document is available
+ at <http://tukaani.org/xz/xz-file-format-1.0.0.txt>.
+
+
+0.3. Version History
+
+ Version Date Description
+
+ 1.0.4 2009-08-27 Language improvements in Sections 1.2,
+ 2.1.1.2, 3.1.1, 3.1.2, and 5.3.1
+
+ 1.0.3 2009-06-05 Spelling fixes in Sections 5.1 and 5.4
+
+ 1.0.2 2009-06-04 Typo fixes in Sections 4 and 5.3.1
+
+ 1.0.1 2009-06-01 Typo fix in Section 0.3 and minor
+ clarifications to Sections 2, 2.2,
+ 3.3, 4.4, and 5.3.2
+
+ 1.0.0 2009-01-14 The first official version
+
+
+1. Conventions
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHOULD",
+ "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in [RFC-2119].
+
+ Indicating a warning means displaying a message, returning
+ appropriate exit status, or doing something else to let the
+ user know that something worth warning occurred. The operation
+ SHOULD still finish if a warning is indicated.
+
+ Indicating an error means displaying a message, returning
+ appropriate exit status, or doing something else to let the
+ user know that something prevented successfully finishing the
+ operation. The operation MUST be aborted once an error has
+ been indicated.
+
+
+1.1. Byte and Its Representation
+
+ In this document, byte is always 8 bits.
+
+ A "null byte" has all bits unset. That is, the value of a null
+ byte is 0x00.
+
+ To represent byte blocks, this document uses notation that
+ is similar to the notation used in [RFC-1952]:
+
+ +-------+
+ | Foo | One byte.
+ +-------+
+
+ +---+---+
+ | Foo | Two bytes; that is, some of the vertical bars
+ +---+---+ can be missing.
+
+ +=======+
+ | Foo | Zero or more bytes.
+ +=======+
+
+ In this document, a boxed byte or a byte sequence declared
+ using this notation is called "a field". The example field
+ above would be called "the Foo field" or plain "Foo".
+
+ If there are many fields, they may be split to multiple lines.
+ This is indicated with an arrow ("--->"):
+
+ +=====+
+ | Foo |
+ +=====+
+
+ +=====+
+ ---> | Bar |
+ +=====+
+
+ The above is equivalent to this:
+
+ +=====+=====+
+ | Foo | Bar |
+ +=====+=====+
+
+
+1.2. Multibyte Integers
+
+ Multibyte integers of static length, such as CRC values,
+ are stored in little endian byte order (least significant
+ byte first).
+
+ When smaller values are more likely than bigger values (for
+ example file sizes), multibyte integers are encoded in a
+ variable-length representation:
+ - Numbers in the range [0, 127] are copied as is, and take
+ one byte of space.
+ - Bigger numbers will occupy two or more bytes. All but the
+ last byte of the multibyte representation have the highest
+ (eighth) bit set.
+
+ For now, the value of the variable-length integers is limited
+ to 63 bits, which limits the encoded size of the integer to
+ nine bytes. These limits may be increased in the future if
+ needed.
+
+ The following C code illustrates encoding and decoding of
+ variable-length integers. The functions return the number of
+ bytes occupied by the integer (1-9), or zero on error.
+
+ #include <stddef.h>
+ #include <inttypes.h>
+
+ size_t
+ encode(uint8_t buf[static 9], uint64_t num)
+ {
+ if (num > UINT64_MAX / 2)
+ return 0;
+
+ size_t i = 0;
+
+ while (num >= 0x80) {
+ buf[i++] = (uint8_t)(num) | 0x80;
+ num >>= 7;
+ }
+
+ buf[i++] = (uint8_t)(num);
+
+ return i;
+ }
+
+ size_t
+ decode(const uint8_t buf[], size_t size_max, uint64_t *num)
+ {
+ if (size_max == 0)
+ return 0;
+
+ if (size_max > 9)
+ size_max = 9;
+
+ *num = buf[0] & 0x7F;
+ size_t i = 0;
+
+ while (buf[i++] & 0x80) {
+ if (i >= size_max || buf[i] == 0x00)
+ return 0;
+
+ *num |= (uint64_t)(buf[i] & 0x7F) << (i * 7);
+ }
+
+ return i;
+ }
+
+
+2. Overall Structure of .xz File
+
+ A standalone .xz files consist of one or more Streams which may
+ have Stream Padding between or after them:
+
+ +========+================+========+================+
+ | Stream | Stream Padding | Stream | Stream Padding | ...
+ +========+================+========+================+
+
+ The sizes of Stream and Stream Padding are always multiples
+ of four bytes, thus the size of every valid .xz file MUST be
+ a multiple of four bytes.
+
+ While a typical file contains only one Stream and no Stream
+ Padding, a decoder handling standalone .xz files SHOULD support
+ files that have more than one Stream or Stream Padding.
+
+ In contrast to standalone .xz files, when the .xz file format
+ is used as an internal part of some other file format or
+ communication protocol, it usually is expected that the decoder
+ stops after the first Stream, and doesn't look for Stream
+ Padding or possibly other Streams.
+
+
+2.1. Stream
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+=======+=======+ +=======+
+ | Stream Header | Block | Block | ... | Block |
+ +-+-+-+-+-+-+-+-+-+-+-+-+=======+=======+ +=======+
+
+ +=======+-+-+-+-+-+-+-+-+-+-+-+-+
+ ---> | Index | Stream Footer |
+ +=======+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ All the above fields have a size that is a multiple of four. If
+ Stream is used as an internal part of another file format, it
+ is RECOMMENDED to make the Stream start at an offset that is
+ a multiple of four bytes.
+
+ Stream Header, Index, and Stream Footer are always present in
+ a Stream. The maximum size of the Index field is 16 GiB (2^34).
+
+ There are zero or more Blocks. The maximum number of Blocks is
+ limited only by the maximum size of the Index field.
+
+ Total size of a Stream MUST be less than 8 EiB (2^63 bytes).
+ The same limit applies to the total amount of uncompressed
+ data stored in a Stream.
+
+ If an implementation supports handling .xz files with multiple
+ concatenated Streams, it MAY apply the above limits to the file
+ as a whole instead of limiting per Stream basis.
+
+
+2.1.1. Stream Header
+
+ +---+---+---+---+---+---+-------+------+--+--+--+--+
+ | Header Magic Bytes | Stream Flags | CRC32 |
+ +---+---+---+---+---+---+-------+------+--+--+--+--+
+
+
+2.1.1.1. Header Magic Bytes
+
+ The first six (6) bytes of the Stream are so called Header
+ Magic Bytes. They can be used to identify the file type.
+
+ Using a C array and ASCII:
+ const uint8_t HEADER_MAGIC[6]
+ = { 0xFD, '7', 'z', 'X', 'Z', 0x00 };
+
+ In plain hexadecimal:
+ FD 37 7A 58 5A 00
+
+ Notes:
+ - The first byte (0xFD) was chosen so that the files cannot
+ be erroneously detected as being in .lzma format, in which
+ the first byte is in the range [0x00, 0xE0].
+ - The sixth byte (0x00) was chosen to prevent applications
+ from misdetecting the file as a text file.
+
+ If the Header Magic Bytes don't match, the decoder MUST
+ indicate an error.
+
+
+2.1.1.2. Stream Flags
+
+ The first byte of Stream Flags is always a null byte. In the
+ future, this byte may be used to indicate a new Stream version
+ or other Stream properties.
+
+ The second byte of Stream Flags is a bit field:
+
+ Bit(s) Mask Description
+ 0-3 0x0F Type of Check (see Section 3.4):
+ ID Size Check name
+ 0x00 0 bytes None
+ 0x01 4 bytes CRC32
+ 0x02 4 bytes (Reserved)
+ 0x03 4 bytes (Reserved)
+ 0x04 8 bytes CRC64
+ 0x05 8 bytes (Reserved)
+ 0x06 8 bytes (Reserved)
+ 0x07 16 bytes (Reserved)
+ 0x08 16 bytes (Reserved)
+ 0x09 16 bytes (Reserved)
+ 0x0A 32 bytes SHA-256
+ 0x0B 32 bytes (Reserved)
+ 0x0C 32 bytes (Reserved)
+ 0x0D 64 bytes (Reserved)
+ 0x0E 64 bytes (Reserved)
+ 0x0F 64 bytes (Reserved)
+ 4-7 0xF0 Reserved for future use; MUST be zero for now.
+
+ Implementations SHOULD support at least the Check IDs 0x00
+ (None) and 0x01 (CRC32). Supporting other Check IDs is
+ OPTIONAL. If an unsupported Check is used, the decoder SHOULD
+ indicate a warning or error.
+
+ If any reserved bit is set, the decoder MUST indicate an error.
+ It is possible that there is a new field present which the
+ decoder is not aware of, and can thus parse the Stream Header
+ incorrectly.
+
+
+2.1.1.3. CRC32
+
+ The CRC32 is calculated from the Stream Flags field. It is
+ stored as an unsigned 32-bit little endian integer. If the
+ calculated value does not match the stored one, the decoder
+ MUST indicate an error.
+
+ The idea is that Stream Flags would always be two bytes, even
+ if new features are needed. This way old decoders will be able
+ to verify the CRC32 calculated from Stream Flags, and thus
+ distinguish between corrupt files (CRC32 doesn't match) and
+ files that the decoder doesn't support (CRC32 matches but
+ Stream Flags has reserved bits set).
+
+
+2.1.2. Stream Footer
+
+ +-+-+-+-+---+---+---+---+-------+------+----------+---------+
+ | CRC32 | Backward Size | Stream Flags | Footer Magic Bytes |
+ +-+-+-+-+---+---+---+---+-------+------+----------+---------+
+
+
+2.1.2.1. CRC32
+
+ The CRC32 is calculated from the Backward Size and Stream Flags
+ fields. It is stored as an unsigned 32-bit little endian
+ integer. If the calculated value does not match the stored one,
+ the decoder MUST indicate an error.
+
+ The reason to have the CRC32 field before the Backward Size and
+ Stream Flags fields is to keep the four-byte fields aligned to
+ a multiple of four bytes.
+
+
+2.1.2.2. Backward Size
+
+ Backward Size is stored as a 32-bit little endian integer,
+ which indicates the size of the Index field as multiple of
+ four bytes, minimum value being four bytes:
+
+ real_backward_size = (stored_backward_size + 1) * 4;
+
+ If the stored value does not match the real size of the Index
+ field, the decoder MUST indicate an error.
+
+ Using a fixed-size integer to store Backward Size makes
+ it slightly simpler to parse the Stream Footer when the
+ application needs to parse the Stream backwards.
+
+
+2.1.2.3. Stream Flags
+
+ This is a copy of the Stream Flags field from the Stream
+ Header. The information stored to Stream Flags is needed
+ when parsing the Stream backwards. The decoder MUST compare
+ the Stream Flags fields in both Stream Header and Stream
+ Footer, and indicate an error if they are not identical.
+
+
+2.1.2.4. Footer Magic Bytes
+
+ As the last step of the decoding process, the decoder MUST
+ verify the existence of Footer Magic Bytes. If they don't
+ match, an error MUST be indicated.
+
+ Using a C array and ASCII:
+ const uint8_t FOOTER_MAGIC[2] = { 'Y', 'Z' };
+
+ In hexadecimal:
+ 59 5A
+
+ The primary reason to have Footer Magic Bytes is to make
+ it easier to detect incomplete files quickly, without
+ uncompressing. If the file does not end with Footer Magic Bytes
+ (excluding Stream Padding described in Section 2.2), it cannot
+ be undamaged, unless someone has intentionally appended garbage
+ after the end of the Stream.
+
+
+2.2. Stream Padding
+
+ Only the decoders that support decoding of concatenated Streams
+ MUST support Stream Padding.
+
+ Stream Padding MUST contain only null bytes. To preserve the
+ four-byte alignment of consecutive Streams, the size of Stream
+ Padding MUST be a multiple of four bytes. Empty Stream Padding
+ is allowed. If these requirements are not met, the decoder MUST
+ indicate an error.
+
+ Note that non-empty Stream Padding is allowed at the end of the
+ file; there doesn't need to be a new Stream after non-empty
+ Stream Padding. This can be convenient in certain situations
+ [GNU-tar].
+
+ The possibility of Stream Padding MUST be taken into account
+ when designing an application that parses Streams backwards,
+ and the application supports concatenated Streams.
+
+
+3. Block
+
+ +==============+=================+===============+=======+
+ | Block Header | Compressed Data | Block Padding | Check |
+ +==============+=================+===============+=======+
+
+
+3.1. Block Header
+
+ +-------------------+-------------+=================+
+ | Block Header Size | Block Flags | Compressed Size |
+ +-------------------+-------------+=================+
+
+ +===================+======================+
+ ---> | Uncompressed Size | List of Filter Flags |
+ +===================+======================+
+
+ +================+--+--+--+--+
+ ---> | Header Padding | CRC32 |
+ +================+--+--+--+--+
+
+
+3.1.1. Block Header Size
+
+ This field overlaps with the Index Indicator field (see
+ Section 4.1).
+
+ This field contains the size of the Block Header field,
+ including the Block Header Size field itself. Valid values are
+ in the range [0x01, 0xFF], which indicate the size of the Block
+ Header as multiples of four bytes, minimum size being eight
+ bytes:
+
+ real_header_size = (encoded_header_size + 1) * 4;
+
+ If a Block Header bigger than 1024 bytes is needed in the
+ future, a new field can be added between the Block Header and
+ Compressed Data fields. The presence of this new field would
+ be indicated in the Block Header field.
+
+
+3.1.2. Block Flags
+
+ The Block Flags field is a bit field:
+
+ Bit(s) Mask Description
+ 0-1 0x03 Number of filters (1-4)
+ 2-5 0x3C Reserved for future use; MUST be zero for now.
+ 6 0x40 The Compressed Size field is present.
+ 7 0x80 The Uncompressed Size field is present.
+
+ If any reserved bit is set, the decoder MUST indicate an error.
+ It is possible that there is a new field present which the
+ decoder is not aware of, and can thus parse the Block Header
+ incorrectly.
+
+
+3.1.3. Compressed Size
+
+ This field is present only if the appropriate bit is set in
+ the Block Flags field (see Section 3.1.2).
+
+ The Compressed Size field contains the size of the Compressed
+ Data field, which MUST be non-zero. Compressed Size is stored
+ using the encoding described in Section 1.2. If the Compressed
+ Size doesn't match the size of the Compressed Data field, the
+ decoder MUST indicate an error.
+
+
+3.1.4. Uncompressed Size
+
+ This field is present only if the appropriate bit is set in
+ the Block Flags field (see Section 3.1.2).
+
+ The Uncompressed Size field contains the size of the Block
+ after uncompressing. Uncompressed Size is stored using the
+ encoding described in Section 1.2. If the Uncompressed Size
+ does not match the real uncompressed size, the decoder MUST
+ indicate an error.
+
+ Storing the Compressed Size and Uncompressed Size fields serves
+ several purposes:
+ - The decoder knows how much memory it needs to allocate
+ for a temporary buffer in multithreaded mode.
+ - Simple error detection: wrong size indicates a broken file.
+ - Seeking forwards to a specific location in streamed mode.
+
+ It should be noted that the only reliable way to determine
+ the real uncompressed size is to uncompress the Block,
+ because the Block Header and Index fields may contain
+ (intentionally or unintentionally) invalid information.
+
+
+3.1.5. List of Filter Flags
+
+ +================+================+ +================+
+ | Filter 0 Flags | Filter 1 Flags | ... | Filter n Flags |
+ +================+================+ +================+
+
+ The number of Filter Flags fields is stored in the Block Flags
+ field (see Section 3.1.2).
+
+ The format of each Filter Flags field is as follows:
+
+ +===========+====================+===================+
+ | Filter ID | Size of Properties | Filter Properties |
+ +===========+====================+===================+
+
+ Both Filter ID and Size of Properties are stored using the
+ encoding described in Section 1.2. Size of Properties indicates
+ the size of the Filter Properties field as bytes. The list of
+ officially defined Filter IDs and the formats of their Filter
+ Properties are described in Section 5.3.
+
+ Filter IDs greater than or equal to 0x4000_0000_0000_0000
+ (2^62) are reserved for implementation-specific internal use.
+ These Filter IDs MUST never be used in List of Filter Flags.
+
+
+3.1.6. Header Padding
+
+ This field contains as many null byte as it is needed to make
+ the Block Header have the size specified in Block Header Size.
+ If any of the bytes are not null bytes, the decoder MUST
+ indicate an error. It is possible that there is a new field
+ present which the decoder is not aware of, and can thus parse
+ the Block Header incorrectly.
+
+
+3.1.7. CRC32
+
+ The CRC32 is calculated over everything in the Block Header
+ field except the CRC32 field itself. It is stored as an
+ unsigned 32-bit little endian integer. If the calculated
+ value does not match the stored one, the decoder MUST indicate
+ an error.
+
+ By verifying the CRC32 of the Block Header before parsing the
+ actual contents allows the decoder to distinguish between
+ corrupt and unsupported files.
+
+
+3.2. Compressed Data
+
+ The format of Compressed Data depends on Block Flags and List
+ of Filter Flags. Excluding the descriptions of the simplest
+ filters in Section 5.3, the format of the filter-specific
+ encoded data is out of scope of this document.
+
+
+3.3. Block Padding
+
+ Block Padding MUST contain 0-3 null bytes to make the size of
+ the Block a multiple of four bytes. This can be needed when
+ the size of Compressed Data is not a multiple of four. If any
+ of the bytes in Block Padding are not null bytes, the decoder
+ MUST indicate an error.
+
+
+3.4. Check
+
+ The type and size of the Check field depends on which bits
+ are set in the Stream Flags field (see Section 2.1.1.2).
+
+ The Check, when used, is calculated from the original
+ uncompressed data. If the calculated Check does not match the
+ stored one, the decoder MUST indicate an error. If the selected
+ type of Check is not supported by the decoder, it SHOULD
+ indicate a warning or error.
+
+
+4. Index
+
+ +-----------------+===================+
+ | Index Indicator | Number of Records |
+ +-----------------+===================+
+
+ +=================+===============+-+-+-+-+
+ ---> | List of Records | Index Padding | CRC32 |
+ +=================+===============+-+-+-+-+
+
+ Index serves several purposes. Using it, one can
+ - verify that all Blocks in a Stream have been processed;
+ - find out the uncompressed size of a Stream; and
+ - quickly access the beginning of any Block (random access).
+
+
+4.1. Index Indicator
+
+ This field overlaps with the Block Header Size field (see
+ Section 3.1.1). The value of Index Indicator is always 0x00.
+
+
+4.2. Number of Records
+
+ This field indicates how many Records there are in the List
+ of Records field, and thus how many Blocks there are in the
+ Stream. The value is stored using the encoding described in
+ Section 1.2. If the decoder has decoded all the Blocks of the
+ Stream, and then notices that the Number of Records doesn't
+ match the real number of Blocks, the decoder MUST indicate an
+ error.
+
+
+4.3. List of Records
+
+ List of Records consists of as many Records as indicated by the
+ Number of Records field:
+
+ +========+========+
+ | Record | Record | ...
+ +========+========+
+
+ Each Record contains information about one Block:
+
+ +===============+===================+
+ | Unpadded Size | Uncompressed Size |
+ +===============+===================+
+
+ If the decoder has decoded all the Blocks of the Stream, it
+ MUST verify that the contents of the Records match the real
+ Unpadded Size and Uncompressed Size of the respective Blocks.
+
+ Implementation hint: It is possible to verify the Index with
+ constant memory usage by calculating for example SHA-256 of
+ both the real size values and the List of Records, then
+ comparing the hash values. Implementing this using
+ non-cryptographic hash like CRC32 SHOULD be avoided unless
+ small code size is important.
+
+ If the decoder supports random-access reading, it MUST verify
+ that Unpadded Size and Uncompressed Size of every completely
+ decoded Block match the sizes stored in the Index. If only
+ partial Block is decoded, the decoder MUST verify that the
+ processed sizes don't exceed the sizes stored in the Index.
+
+
+4.3.1. Unpadded Size
+
+ This field indicates the size of the Block excluding the Block
+ Padding field. That is, Unpadded Size is the size of the Block
+ Header, Compressed Data, and Check fields. Unpadded Size is
+ stored using the encoding described in Section 1.2. The value
+ MUST never be zero; with the current structure of Blocks, the
+ actual minimum value for Unpadded Size is five.
+
+ Implementation note: Because the size of the Block Padding
+ field is not included in Unpadded Size, calculating the total
+ size of a Stream or doing random-access reading requires
+ calculating the actual size of the Blocks by rounding Unpadded
+ Sizes up to the next multiple of four.
+
+ The reason to exclude Block Padding from Unpadded Size is to
+ ease making a raw copy of Compressed Data without Block
+ Padding. This can be useful, for example, if someone wants
+ to convert Streams to some other file format quickly.
+
+
+4.3.2. Uncompressed Size
+
+ This field indicates the Uncompressed Size of the respective
+ Block as bytes. The value is stored using the encoding
+ described in Section 1.2.
+
+
+4.4. Index Padding
+
+ This field MUST contain 0-3 null bytes to pad the Index to
+ a multiple of four bytes. If any of the bytes are not null
+ bytes, the decoder MUST indicate an error.
+
+
+4.5. CRC32
+
+ The CRC32 is calculated over everything in the Index field
+ except the CRC32 field itself. The CRC32 is stored as an
+ unsigned 32-bit little endian integer. If the calculated
+ value does not match the stored one, the decoder MUST indicate
+ an error.
+
+
+5. Filter Chains
+
+ The Block Flags field defines how many filters are used. When
+ more than one filter is used, the filters are chained; that is,
+ the output of one filter is the input of another filter. The
+ following figure illustrates the direction of data flow.
+
+ v Uncompressed Data ^
+ | Filter 0 |
+ Encoder | Filter 1 | Decoder
+ | Filter n |
+ v Compressed Data ^
+
+
+5.1. Alignment
+
+ Alignment of uncompressed input data is usually the job of
+ the application producing the data. For example, to get the
+ best results, an archiver tool should make sure that all
+ PowerPC executable files in the archive stream start at
+ offsets that are multiples of four bytes.
+
+ Some filters, for example LZMA2, can be configured to take
+ advantage of specified alignment of input data. Note that
+ taking advantage of aligned input can be beneficial also when
+ a filter is not the first filter in the chain. For example,
+ if you compress PowerPC executables, you may want to use the
+ PowerPC filter and chain that with the LZMA2 filter. Because
+ not only the input but also the output alignment of the PowerPC
+ filter is four bytes, it is now beneficial to set LZMA2
+ settings so that the LZMA2 encoder can take advantage of its
+ four-byte-aligned input data.
+
+ The output of the last filter in the chain is stored to the
+ Compressed Data field, which is is guaranteed to be aligned
+ to a multiple of four bytes relative to the beginning of the
+ Stream. This can increase
+ - speed, if the filtered data is handled multiple bytes at
+ a time by the filter-specific encoder and decoder,
+ because accessing aligned data in computer memory is
+ usually faster; and
+ - compression ratio, if the output data is later compressed
+ with an external compression tool.
+
+
+5.2. Security
+
+ If filters would be allowed to be chained freely, it would be
+ possible to create malicious files, that would be very slow to
+ decode. Such files could be used to create denial of service
+ attacks.
+
+ Slow files could occur when multiple filters are chained:
+
+ v Compressed input data
+ | Filter 1 decoder (last filter)
+ | Filter 0 decoder (non-last filter)
+ v Uncompressed output data
+
+ The decoder of the last filter in the chain produces a lot of
+ output from little input. Another filter in the chain takes the
+ output of the last filter, and produces very little output
+ while consuming a lot of input. As a result, a lot of data is
+ moved inside the filter chain, but the filter chain as a whole
+ gets very little work done.
+
+ To prevent this kind of slow files, there are restrictions on
+ how the filters can be chained. These restrictions MUST be
+ taken into account when designing new filters.
+
+ The maximum number of filters in the chain has been limited to
+ four, thus there can be at maximum of three non-last filters.
+ Of these three non-last filters, only two are allowed to change
+ the size of the data.
+
+ The non-last filters, that change the size of the data, MUST
+ have a limit how much the decoder can compress the data: the
+ decoder SHOULD produce at least n bytes of output when the
+ filter is given 2n bytes of input. This limit is not
+ absolute, but significant deviations MUST be avoided.
+
+ The above limitations guarantee that if the last filter in the
+ chain produces 4n bytes of output, the chain as a whole will
+ produce at least n bytes of output.
+
+
+5.3. Filters
+
+5.3.1. LZMA2
+
+ LZMA (Lempel-Ziv-Markov chain-Algorithm) is a general-purpose
+ compression algorithm with high compression ratio and fast
+ decompression. LZMA is based on LZ77 and range coding
+ algorithms.
+
+ LZMA2 is an extension on top of the original LZMA. LZMA2 uses
+ LZMA internally, but adds support for flushing the encoder,
+ uncompressed chunks, eases stateful decoder implementations,
+ and improves support for multithreading. Thus, the plain LZMA
+ will not be supported in this file format.
+
+ Filter ID: 0x21
+ Size of Filter Properties: 1 byte
+ Changes size of data: Yes
+ Allow as a non-last filter: No
+ Allow as the last filter: Yes
+
+ Preferred alignment:
+ Input data: Adjustable to 1/2/4/8/16 byte(s)
+ Output data: 1 byte
+
+ The format of the one-byte Filter Properties field is as
+ follows:
+
+ Bits Mask Description
+ 0-5 0x3F Dictionary Size
+ 6-7 0xC0 Reserved for future use; MUST be zero for now.
+
+ Dictionary Size is encoded with one-bit mantissa and five-bit
+ exponent. The smallest dictionary size is 4 KiB and the biggest
+ is 4 GiB.
+
+ Raw value Mantissa Exponent Dictionary size
+ 0 2 11 4 KiB
+ 1 3 11 6 KiB
+ 2 2 12 8 KiB
+ 3 3 12 12 KiB
+ 4 2 13 16 KiB
+ 5 3 13 24 KiB
+ 6 2 14 32 KiB
+ ... ... ... ...
+ 35 3 27 768 MiB
+ 36 2 28 1024 MiB
+ 37 3 29 1536 MiB
+ 38 2 30 2048 MiB
+ 39 3 30 3072 MiB
+ 40 2 31 4096 MiB - 1 B
+
+ Instead of having a table in the decoder, the dictionary size
+ can be decoded using the following C code:
+
+ const uint8_t bits = get_dictionary_flags() & 0x3F;
+ if (bits > 40)
+ return DICTIONARY_TOO_BIG; // Bigger than 4 GiB
+
+ uint32_t dictionary_size;
+ if (bits == 40) {
+ dictionary_size = UINT32_MAX;
+ } else {
+ dictionary_size = 2 | (bits & 1);
+ dictionary_size <<= bits / 2 + 11;
+ }
+
+
+5.3.2. Branch/Call/Jump Filters for Executables
+
+ These filters convert relative branch, call, and jump
+ instructions to their absolute counterparts in executable
+ files. This conversion increases redundancy and thus
+ compression ratio.
+
+ Size of Filter Properties: 0 or 4 bytes
+ Changes size of data: No
+ Allow as a non-last filter: Yes
+ Allow as the last filter: No
+
+ Below is the list of filters in this category. The alignment
+ is the same for both input and output data.
+
+ Filter ID Alignment Description
+ 0x04 1 byte x86 filter (BCJ)
+ 0x05 4 bytes PowerPC (big endian) filter
+ 0x06 16 bytes IA64 filter
+ 0x07 4 bytes ARM (little endian) filter
+ 0x08 2 bytes ARM Thumb (little endian) filter
+ 0x09 4 bytes SPARC filter
+
+ If the size of Filter Properties is four bytes, the Filter
+ Properties field contains the start offset used for address
+ conversions. It is stored as an unsigned 32-bit little endian
+ integer. The start offset MUST be a multiple of the alignment
+ of the filter as listed in the table above; if it isn't, the
+ decoder MUST indicate an error. If the size of Filter
+ Properties is zero, the start offset is zero.
+
+ Setting the start offset may be useful if an executable has
+ multiple sections, and there are many cross-section calls.
+ Taking advantage of this feature usually requires usage of
+ the Subblock filter, whose design is not complete yet.
+
+
+5.3.3. Delta
+
+ The Delta filter may increase compression ratio when the value
+ of the next byte correlates with the value of an earlier byte
+ at specified distance.
+
+ Filter ID: 0x03
+ Size of Filter Properties: 1 byte
+ Changes size of data: No
+ Allow as a non-last filter: Yes
+ Allow as the last filter: No
+
+ Preferred alignment:
+ Input data: 1 byte
+ Output data: Same as the original input data
+
+ The Properties byte indicates the delta distance, which can be
+ 1-256 bytes backwards from the current byte: 0x00 indicates
+ distance of 1 byte and 0xFF distance of 256 bytes.
+
+
+5.3.3.1. Format of the Encoded Output
+
+ The code below illustrates both encoding and decoding with
+ the Delta filter.
+
+ // Distance is in the range [1, 256].
+ const unsigned int distance = get_properties_byte() + 1;
+ uint8_t pos = 0;
+ uint8_t delta[256];
+
+ memset(delta, 0, sizeof(delta));
+
+ while (1) {
+ const int byte = read_byte();
+ if (byte == EOF)
+ break;
+
+ uint8_t tmp = delta[(uint8_t)(distance + pos)];
+ if (is_encoder) {
+ tmp = (uint8_t)(byte) - tmp;
+ delta[pos] = (uint8_t)(byte);
+ } else {
+ tmp = (uint8_t)(byte) + tmp;
+ delta[pos] = tmp;
+ }
+
+ write_byte(tmp);
+ --pos;
+ }
+
+
+5.4. Custom Filter IDs
+
+ If a developer wants to use custom Filter IDs, he has two
+ choices. The first choice is to contact Lasse Collin and ask
+ him to allocate a range of IDs for the developer.
+
+ The second choice is to generate a 40-bit random integer,
+ which the developer can use as his personal Developer ID.
+ To minimize the risk of collisions, Developer ID has to be
+ a randomly generated integer, not manually selected "hex word".
+ The following command, which works on many free operating
+ systems, can be used to generate Developer ID:
+
+ dd if=/dev/urandom bs=5 count=1 | hexdump
+
+ The developer can then use his Developer ID to create unique
+ (well, hopefully unique) Filter IDs.
+
+ Bits Mask Description
+ 0-15 0x0000_0000_0000_FFFF Filter ID
+ 16-55 0x00FF_FFFF_FFFF_0000 Developer ID
+ 56-62 0x3F00_0000_0000_0000 Static prefix: 0x3F
+
+ The resulting 63-bit integer will use 9 bytes of space when
+ stored using the encoding described in Section 1.2. To get
+ a shorter ID, see the beginning of this Section how to
+ request a custom ID range.
+
+
+5.4.1. Reserved Custom Filter ID Ranges
+
+ Range Description
+ 0x0000_0300 - 0x0000_04FF Reserved to ease .7z compatibility
+ 0x0002_0000 - 0x0007_FFFF Reserved to ease .7z compatibility
+ 0x0200_0000 - 0x07FF_FFFF Reserved to ease .7z compatibility
+
+
+6. Cyclic Redundancy Checks
+
+ There are several incompatible variations to calculate CRC32
+ and CRC64. For simplicity and clarity, complete examples are
+ provided to calculate the checks as they are used in this file
+ format. Implementations MAY use different code as long as it
+ gives identical results.
+
+ The program below reads data from standard input, calculates
+ the CRC32 and CRC64 values, and prints the calculated values
+ as big endian hexadecimal strings to standard output.
+
+ #include <stddef.h>
+ #include <inttypes.h>
+ #include <stdio.h>
+
+ uint32_t crc32_table[256];
+ uint64_t crc64_table[256];
+
+ void
+ init(void)
+ {
+ static const uint32_t poly32 = UINT32_C(0xEDB88320);
+ static const uint64_t poly64
+ = UINT64_C(0xC96C5795D7870F42);
+
+ for (size_t i = 0; i < 256; ++i) {
+ uint32_t crc32 = i;
+ uint64_t crc64 = i;
+
+ for (size_t j = 0; j < 8; ++j) {
+ if (crc32 & 1)
+ crc32 = (crc32 >> 1) ^ poly32;
+ else
+ crc32 >>= 1;
+
+ if (crc64 & 1)
+ crc64 = (crc64 >> 1) ^ poly64;
+ else
+ crc64 >>= 1;
+ }
+
+ crc32_table[i] = crc32;
+ crc64_table[i] = crc64;
+ }
+ }
+
+ uint32_t
+ crc32(const uint8_t *buf, size_t size, uint32_t crc)
+ {
+ crc = ~crc;
+ for (size_t i = 0; i < size; ++i)
+ crc = crc32_table[buf[i] ^ (crc & 0xFF)]
+ ^ (crc >> 8);
+ return ~crc;
+ }
+
+ uint64_t
+ crc64(const uint8_t *buf, size_t size, uint64_t crc)
+ {
+ crc = ~crc;
+ for (size_t i = 0; i < size; ++i)
+ crc = crc64_table[buf[i] ^ (crc & 0xFF)]
+ ^ (crc >> 8);
+ return ~crc;
+ }
+
+ int
+ main()
+ {
+ init();
+
+ uint32_t value32 = 0;
+ uint64_t value64 = 0;
+ uint64_t total_size = 0;
+ uint8_t buf[8192];
+
+ while (1) {
+ const size_t buf_size
+ = fread(buf, 1, sizeof(buf), stdin);
+ if (buf_size == 0)
+ break;
+
+ total_size += buf_size;
+ value32 = crc32(buf, buf_size, value32);
+ value64 = crc64(buf, buf_size, value64);
+ }
+
+ printf("Bytes: %" PRIu64 "\n", total_size);
+ printf("CRC-32: 0x%08" PRIX32 "\n", value32);
+ printf("CRC-64: 0x%016" PRIX64 "\n", value64);
+
+ return 0;
+ }
+
+
+7. References
+
+ LZMA SDK - The original LZMA implementation
+ http://7-zip.org/sdk.html
+
+ LZMA Utils - LZMA adapted to POSIX-like systems
+ http://tukaani.org/lzma/
+
+ XZ Utils - The next generation of LZMA Utils
+ http://tukaani.org/xz/
+
+ [RFC-1952]
+ GZIP file format specification version 4.3
+ http://www.ietf.org/rfc/rfc1952.txt
+ - Notation of byte boxes in section "2.1. Overall conventions"
+
+ [RFC-2119]
+ Key words for use in RFCs to Indicate Requirement Levels
+ http://www.ietf.org/rfc/rfc2119.txt
+
+ [GNU-tar]
+ GNU tar 1.21 manual
+ http://www.gnu.org/software/tar/manual/html_node/Blocking-Factor.html
+ - Node 9.4.2 "Blocking Factor", paragraph that begins
+ "gzip will complain about trailing garbage"
+ - Note that this URL points to the latest version of the
+ manual, and may some day not contain the note which is in
+ 1.21. For the exact version of the manual, download GNU
+ tar 1.21: ftp://ftp.gnu.org/pub/gnu/tar/tar-1.21.tar.gz
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/Makefile b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/Makefile
new file mode 100644
index 00000000..0b8a088c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/Makefile
@@ -0,0 +1,255 @@
+###############################################################################
+#
+# Makefile to build XZ Utils using DJGPP
+#
+# Make flags to alter compilation:
+#
+# DEBUG=1 Enable assertions. Don't use this for production builds!
+# You may also want to set CFLAGS="-g -O0" to disable
+# optimizations.
+#
+# The usual CPPFLAGS and CFLAGS are supported too.
+#
+###############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+###############################################################################
+
+CC = gcc
+AR = ar
+STRIP = strip
+SED = sed
+RM = rm -f
+
+CFLAGS = -g -Wextra -Wfatal-errors -Wall -march=i386 -mtune=i686 -O2
+
+# NOTE: -fgnu89-inline is needed on DJGPP 2.04 beta and GCC 4.3.2
+# because time.h uses GNU-style "extern inline".
+ALL_CFLAGS = -std=gnu99 -fgnu89-inline
+
+ALL_CPPFLAGS = \
+ -I. \
+ -I../lib \
+ -I../src/common \
+ -I../src/liblzma/api \
+ -I../src/liblzma/common \
+ -I../src/liblzma/check \
+ -I../src/liblzma/rangecoder \
+ -I../src/liblzma/lz \
+ -I../src/liblzma/lzma \
+ -I../src/liblzma/delta \
+ -I../src/liblzma/simple \
+ -I../src/liblzma/subblock
+
+ALL_CPPFLAGS += -DHAVE_CONFIG_H
+
+ifdef DEBUG
+STRIP := rem Skipping strip
+else
+ALL_CPPFLAGS += -DNDEBUG
+endif
+
+ALL_CPPFLAGS += $(CPPFLAGS)
+ALL_CFLAGS += $(CFLAGS)
+
+
+################
+# Common rules #
+################
+
+.PHONY: all clean
+all: liblzma.a getopt.a xzdec.exe lzmadec.exe xz.exe
+clean: liblzma-clean getopt-clean xzdec-clean xz-clean
+
+
+#############
+# liblzma.a #
+#############
+
+LIBLZMA_SRCS_C = \
+ ../src/liblzma/common/alone_decoder.c \
+ ../src/liblzma/common/alone_encoder.c \
+ ../src/liblzma/common/auto_decoder.c \
+ ../src/liblzma/common/block_buffer_decoder.c \
+ ../src/liblzma/common/block_buffer_encoder.c \
+ ../src/liblzma/common/block_decoder.c \
+ ../src/liblzma/common/block_encoder.c \
+ ../src/liblzma/common/block_header_decoder.c \
+ ../src/liblzma/common/block_header_encoder.c \
+ ../src/liblzma/common/block_util.c \
+ ../src/liblzma/common/common.c \
+ ../src/liblzma/common/easy_buffer_encoder.c \
+ ../src/liblzma/common/easy_decoder_memusage.c \
+ ../src/liblzma/common/easy_encoder.c \
+ ../src/liblzma/common/easy_encoder_memusage.c \
+ ../src/liblzma/common/easy_preset.c \
+ ../src/liblzma/common/filter_common.c \
+ ../src/liblzma/common/filter_decoder.c \
+ ../src/liblzma/common/filter_encoder.c \
+ ../src/liblzma/common/filter_flags_decoder.c \
+ ../src/liblzma/common/filter_flags_encoder.c \
+ ../src/liblzma/common/index.c \
+ ../src/liblzma/common/index_decoder.c \
+ ../src/liblzma/common/index_encoder.c \
+ ../src/liblzma/common/index_hash.c \
+ ../src/liblzma/common/stream_buffer_decoder.c \
+ ../src/liblzma/common/stream_buffer_encoder.c \
+ ../src/liblzma/common/stream_decoder.c \
+ ../src/liblzma/common/stream_encoder.c \
+ ../src/liblzma/common/stream_flags_common.c \
+ ../src/liblzma/common/stream_flags_decoder.c \
+ ../src/liblzma/common/stream_flags_encoder.c \
+ ../src/liblzma/common/vli_decoder.c \
+ ../src/liblzma/common/vli_encoder.c \
+ ../src/liblzma/common/vli_size.c \
+ ../src/liblzma/check/check.c \
+ ../src/liblzma/check/crc32_table.c \
+ ../src/liblzma/check/crc64_table.c \
+ ../src/liblzma/check/sha256.c \
+ ../src/liblzma/rangecoder/price_table.c \
+ ../src/liblzma/lz/lz_decoder.c \
+ ../src/liblzma/lz/lz_encoder.c \
+ ../src/liblzma/lz/lz_encoder_mf.c \
+ ../src/liblzma/lzma/fastpos_table.c \
+ ../src/liblzma/lzma/lzma2_decoder.c \
+ ../src/liblzma/lzma/lzma2_encoder.c \
+ ../src/liblzma/lzma/lzma_decoder.c \
+ ../src/liblzma/lzma/lzma_encoder.c \
+ ../src/liblzma/lzma/lzma_encoder_optimum_fast.c \
+ ../src/liblzma/lzma/lzma_encoder_optimum_normal.c \
+ ../src/liblzma/lzma/lzma_encoder_presets.c \
+ ../src/liblzma/delta/delta_common.c \
+ ../src/liblzma/delta/delta_decoder.c \
+ ../src/liblzma/delta/delta_encoder.c \
+ ../src/liblzma/simple/arm.c \
+ ../src/liblzma/simple/armthumb.c \
+ ../src/liblzma/simple/ia64.c \
+ ../src/liblzma/simple/powerpc.c \
+ ../src/liblzma/simple/simple_coder.c \
+ ../src/liblzma/simple/simple_decoder.c \
+ ../src/liblzma/simple/simple_encoder.c \
+ ../src/liblzma/simple/sparc.c \
+ ../src/liblzma/simple/x86.c
+
+LIBLZMA_SRCS_ASM = \
+ ../src/liblzma/check/crc32_x86.S \
+ ../src/liblzma/check/crc64_x86.S
+
+LIBLZMA_OBJS_C = $(LIBLZMA_SRCS_C:.c=.o)
+LIBLZMA_OBJS_ASM = $(LIBLZMA_SRCS_ASM:.S=.o)
+LIBLZMA_OBJS = $(LIBLZMA_OBJS_C) $(LIBLZMA_OBJS_ASM)
+
+$(LIBLZMA_OBJS_C): %.o: %.c
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+$(LIBLZMA_OBJS_ASM): %.o: %.S
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+liblzma.a: $(LIBLZMA_OBJS)
+ $(RM) $@
+ $(AR) rcs $@ $(LIBLZMA_OBJS)
+ $(STRIP) --strip-unneeded $@
+
+# Avoid too long command lines.
+.PHONY: liblzma-clean $(LIBLZMA_OBJS:.o=-clean)
+liblzma-clean: $(LIBLZMA_OBJS:.o=-clean)
+ -$(RM) liblzma.a
+
+$(LIBLZMA_OBJS:.o=-clean):
+ -$(RM) $(@:-clean=.o)
+
+
+###############
+# getopt_long #
+###############
+
+GETOPT_SRCS = \
+ ../lib/getopt.c \
+ ../lib/getopt1.c
+
+GETOPT_OBJS = $(GETOPT_SRCS:.c=.o)
+
+GETOPT_H = ../lib/getopt.h
+
+$(GETOPT_H): %.h: %.in.h
+ $(SED) "" $< > $@
+
+$(GETOPT_OBJS): %.o: %.c $(GETOPT_H)
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+getopt.a: $(GETOPT_OBJS)
+ $(RM) $@
+ $(AR) rcs $@ $(GETOPT_OBJS)
+ $(STRIP) --strip-unneeded $@
+
+getopt-clean:
+ $(RM) $(GETOPT_H) $(GETOPT_OBJS) getopt.a
+
+
+###########################
+# xzdec.exe & lzmadec.exe #
+###########################
+
+XZDEC_SRCS = ../src/xzdec/xzdec.c
+
+xzdec.exe: getopt.a liblzma.a $(XZDEC_SRCS)
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) $(XZDEC_SRCS) -o $@ getopt.a liblzma.a
+ $(STRIP) --strip-all $@
+ exe2coff $@
+ $(RM) $@
+ copy /b $(DJGPP:DJGPP.ENV=BIN\CWSDSTUB.EXE) + $(@:.exe=) $@
+ $(RM) $(@:.exe=)
+
+lzmadec.exe: getopt.a liblzma.a $(XZDEC_SRCS)
+ $(CC) $(ALL_CPPFLAGS) -DLZMADEC $(ALL_CFLAGS) $(XZDEC_SRCS) -o $@ getopt.a liblzma.a
+ $(STRIP) --strip-all $@
+ exe2coff $@
+ $(RM) $@
+ copy /b $(DJGPP:DJGPP.ENV=BIN\CWSDSTUB.EXE) + $(@:.exe=) $@
+ $(RM) $(@:.exe=)
+
+.PHONY: xzdec-clean
+xzdec-clean:
+ -$(RM) xzdec.exe lzmadec.exe xzdec lzmadec
+
+
+##########
+# xz.exe #
+##########
+
+XZ_SRCS = \
+ ../src/xz/args.c \
+ ../src/xz/coder.c \
+ ../src/xz/file_io.c \
+ ../src/xz/hardware.c \
+ ../src/xz/main.c \
+ ../src/xz/message.c \
+ ../src/xz/options.c \
+ ../src/xz/signals.c \
+ ../src/xz/suffix.c \
+ ../src/xz/util.c
+
+XZ_OBJS = $(XZ_SRCS:.c=.o)
+
+$(XZ_OBJS): %.o: %.c
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+xz.exe: getopt.a liblzma.a $(XZ_OBJS)
+ $(CC) $(ALL_CFLAGS) $(XZ_OBJS) -o $@ getopt.a liblzma.a
+ $(STRIP) --strip-all $@
+ exe2coff $@
+ $(RM) $@
+ copy /b $(DJGPP:DJGPP.ENV=BIN\CWSDSTUB.EXE) + $(@:.exe=) $@
+ $(RM) $(@:.exe=)
+
+# Avoid too long command lines.
+.PHONY: xz-clean $(XZ_OBJS:.o=-clean)
+xz-clean: $(XZ_OBJS:.o=-clean)
+ -$(RM) xz.exe xz
+
+$(XZ_OBJS:.o=-clean):
+ -$(RM) $(@:-clean=.o)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/README b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/README
new file mode 100644
index 00000000..649c58c4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/README
@@ -0,0 +1,113 @@
+
+XZ Utils on DOS
+===============
+
+Introduction
+
+ This document explains how to build XZ Utils for DOS using DJGPP.
+ The resulting binaries should run at least on various DOS versions
+ and under Windows 95/98/98SE/ME, which cannot run the Windows version
+ of XZ Utils.
+
+ This is currently experimental and has got very little testing.
+
+
+Getting and Installing DJGPP
+
+ You may use <http://www.delorie.com/djgpp/zip-picker.html> to help
+ deciding what to download, but as of writing (2009-02-13) that may
+ not be the most convenient way taking into account what components
+ are actually required to build XZ Utils. However, using the
+ zip-picker can still be worth doing to get nice short summary of
+ installation instructions (they can be found from readme.1st too).
+
+ For more manual method, first select a mirror from
+ <http://www.delorie.com/djgpp/getting.html>. You need
+ the following files:
+
+ unzip32.exe
+ beta/v2/djdev204.zip
+ v2gnu/bnu219b.zip
+ v2gnu/gcc432b.zip
+ v2gnu/mak3791b.zip
+ v2gnu/sed415b.zip
+ v2misc/csdpmi5b.zip
+
+ If newer versions are available, probably you should try them first.
+ Note that djdev203.zip is too old to build XZ Utils; you need at
+ least djdev204.zip. Also note that you want csdpmi5b.zip even if you
+ run under Windows or DOSEMU, because the XZ Utils Makefile will embed
+ cwsdstub.exe to the resulting binaries.
+
+ See the instructions in readme.1st found from djdev204.zip. Here's
+ a short summary, but you should still read readme.1st.
+
+ C:\> mkdir DJGPP
+ C:\> cd DJGPP
+ C:\DJGPP> c:\download\unzip32 c:\download\djdev204.zip
+ C:\DJGPP> c:\download\unzip32 c:\download\bnu219b.zip
+ C:\DJGPP> c:\download\unzip32 c:\download\gcc432b.zip
+ C:\DJGPP> c:\download\unzip32 c:\download\mak3791b.zip
+ C:\DJGPP> c:\download\unzip32 c:\download\sed415b.zip
+ C:\DJGPP> c:\download\unzip32 c:\download\csdpmi5b.zip
+
+ C:\DJGPP> set PATH=C:\DJGPP\BIN;%PATH%
+ C:\DJGPP> set DJGPP=C:\DJGPP\DJGPP.ENV
+
+ You may want to add the last two lines into AUTOEXEC.BAT or have,
+ for example, DJGPP.BAT which you can run before using DJGPP.
+
+ Make sure you use completely upper case path in the DJGPP environment
+ variable. This is not required by DJGPP, but the XZ Utils Makefile is
+ a bit stupid and expects that everything in DJGPP environment variable
+ is uppercase.
+
+
+Building
+
+ Just run "make" in this directory (the directory containing this
+ README). You should get liblzma.a, xz.exe, xzdec.exe, and
+ lzmadec.exe. Of these, probably xz.exe is the only interesting one.
+
+ Note: You need to have an environment that supports long filenames.
+ Once you have built XZ Utils, the resulting binaries can be run
+ without long filename support.
+
+
+Additional Make Flags and Targets
+
+ You may want to try some additional optimizations, which may or
+ may not make the code faster (and may or may not hit possible
+ compiler bugs more easily):
+
+ make CFLAGS="-O3 -fomit-frame-pointer -funroll-loops"
+
+ If you want to enable assertions (the assert() macro), use DEBUG=1.
+ You may want to disable optimizations too if you plan to actually
+ debug the code. Never use DEBUG=1 for production builds!
+
+ make DEBUG=1 CFLAGS="-g -O0"
+
+
+Bugs
+
+ "make clean" may remove src/xz/hardware.c when it tries to remove
+ src/xz/hardware-fixed.c. This is probably a bug somewhere in the
+ DOS environment I use. Maybe it tries truncated 8.3 name first and
+ since that gives a name of an existing file, it doesn't look for
+ long filename.
+
+ "xz -fc /dev/tty" hangs at least in DOSEMU and cannot be interrupted
+ by pressing C-c. Maybe xz should never accept non-regular files on
+ DOS even when --force is used.
+
+ Using different memory usage limit for encoding and decoding doesn't
+ make sense under pure DOS. Maybe it is still OK when running under
+ Windows.
+
+ The progress indicator of "xz -v" doesn't get updated when running
+ under Dosbox, but it works in DOSEMU. I currently (2009-02-13) don't
+ know if it works in other environments.
+
+ Report bugs to <lasse.collin@tukaani.org> (in English or Finnish).
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/config.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/config.h
new file mode 100644
index 00000000..2eb89b10
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/dos/config.h
@@ -0,0 +1,152 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* Define to 1 if using x86 assembler optimizations. */
+#define HAVE_ASM_X86 1
+
+/* Define to 1 if crc32 integrity check is enabled. */
+#define HAVE_CHECK_CRC32 1
+
+/* Define to 1 if crc64 integrity check is enabled. */
+#define HAVE_CHECK_CRC64 1
+
+/* Define to 1 if sha256 integrity check is enabled. */
+#define HAVE_CHECK_SHA256 1
+
+/* Define to 1 if decoder components are enabled. */
+#define HAVE_DECODER 1
+
+/* Define to 1 if arm decoder is enabled. */
+#define HAVE_DECODER_ARM 1
+
+/* Define to 1 if armthumb decoder is enabled. */
+#define HAVE_DECODER_ARMTHUMB 1
+
+/* Define to 1 if delta decoder is enabled. */
+#define HAVE_DECODER_DELTA 1
+
+/* Define to 1 if ia64 decoder is enabled. */
+#define HAVE_DECODER_IA64 1
+
+/* Define to 1 if lzma1 decoder is enabled. */
+#define HAVE_DECODER_LZMA1 1
+
+/* Define to 1 if lzma2 decoder is enabled. */
+#define HAVE_DECODER_LZMA2 1
+
+/* Define to 1 if powerpc decoder is enabled. */
+#define HAVE_DECODER_POWERPC 1
+
+/* Define to 1 if sparc decoder is enabled. */
+#define HAVE_DECODER_SPARC 1
+
+/* Define to 1 if subblock decoder is enabled. */
+/* #undef HAVE_DECODER_SUBBLOCK */
+
+/* Define to 1 if x86 decoder is enabled. */
+#define HAVE_DECODER_X86 1
+
+/* Define to 1 if encoder components are enabled. */
+#define HAVE_ENCODER 1
+
+/* Define to 1 if arm encoder is enabled. */
+#define HAVE_ENCODER_ARM 1
+
+/* Define to 1 if armthumb encoder is enabled. */
+#define HAVE_ENCODER_ARMTHUMB 1
+
+/* Define to 1 if delta encoder is enabled. */
+#define HAVE_ENCODER_DELTA 1
+
+/* Define to 1 if ia64 encoder is enabled. */
+#define HAVE_ENCODER_IA64 1
+
+/* Define to 1 if lzma1 encoder is enabled. */
+#define HAVE_ENCODER_LZMA1 1
+
+/* Define to 1 if lzma2 encoder is enabled. */
+#define HAVE_ENCODER_LZMA2 1
+
+/* Define to 1 if powerpc encoder is enabled. */
+#define HAVE_ENCODER_POWERPC 1
+
+/* Define to 1 if sparc encoder is enabled. */
+#define HAVE_ENCODER_SPARC 1
+
+/* Define to 1 if subblock encoder is enabled. */
+/* #undef HAVE_ENCODER_SUBBLOCK */
+
+/* Define to 1 if x86 encoder is enabled. */
+#define HAVE_ENCODER_X86 1
+
+/* Define to 1 if the system supports fast unaligned memory access. */
+#define HAVE_FAST_UNALIGNED_ACCESS 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <limits.h> header file. */
+#define HAVE_LIMITS_H 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 to enable bt2 match finder. */
+#define HAVE_MF_BT2 1
+
+/* Define to 1 to enable bt3 match finder. */
+#define HAVE_MF_BT3 1
+
+/* Define to 1 to enable bt4 match finder. */
+#define HAVE_MF_BT4 1
+
+/* Define to 1 to enable hc3 match finder. */
+#define HAVE_MF_HC3 1
+
+/* Define to 1 to enable hc4 match finder. */
+#define HAVE_MF_HC4 1
+
+/* Define to 1 if optimizing for size. */
+/* #undef HAVE_SMALL */
+
+/* Define to 1 if stdbool.h conforms to C99. */
+#define HAVE_STDBOOL_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if the system has the type `uintptr_t'. */
+#define HAVE_UINTPTR_T 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the `utimes' function. */
+#define HAVE_UTIMES 1
+
+/* Define to 1 or 0, depending whether the compiler supports simple visibility
+ declarations. */
+#define HAVE_VISIBILITY 0
+
+/* Define to 1 if the system has the type `_Bool'. */
+#define HAVE__BOOL 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "lasse.collin@tukaani.org"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "XZ Utils"
+
+/* The size of `size_t', as computed by sizeof. */
+#define SIZEOF_SIZE_T 4
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
new file mode 100755
index 00000000..35ea4dae
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
@@ -0,0 +1,115 @@
+#!/bin/bash
+#
+#############################################################################
+#
+# 7z2lzma.bash is very primitive .7z to .lzma converter. The input file must
+# have exactly one LZMA compressed stream, which has been created with the
+# default lc, lp, and pb values. The CRC32 in the .7z archive is not checked,
+# and the script may seem to succeed while it actually created a corrupt .lzma
+# file. You should always try uncompressing both the original .7z and the
+# created .lzma and compare that the output is identical.
+#
+# This script requires basic GNU tools and 7z or 7za tool from p7zip.
+#
+# Last modified: 2009-01-15 14:25+0200
+#
+#############################################################################
+#
+# Author: Lasse Collin <lasse.collin@tukaani.org>
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+#############################################################################
+
+# You can use 7z or 7za, both will work.
+SEVENZIP=7za
+
+if [ $# != 2 -o -z "$1" -o -z "$2" ]; then
+ echo "Usage: $0 input.7z output.lzma"
+ exit 1
+fi
+
+# Converts an integer variable to little endian binary integer.
+int2bin()
+{
+ local LEN=$1
+ local NUM=$2
+ local HEX=(0 1 2 3 4 5 6 7 8 9 A B C D E F)
+ local I
+ for ((I=0; I < "$LEN"; ++I)); do
+ printf "\\x${HEX[(NUM >> 4) & 0x0F]}${HEX[NUM & 0x0F]}"
+ NUM=$((NUM >> 8))
+ done
+}
+
+# Make sure we get possible errors from pipes.
+set -o pipefail
+
+# Get information about the input file. At least older 7z and 7za versions
+# may return with zero exit status even when an error occurred, so check
+# if the output has any lines beginning with "Error".
+INFO=$("$SEVENZIP" l -slt "$1")
+if [ $? != 0 ] || printf '%s\n' "$INFO" | grep -q ^Error; then
+ printf '%s\n' "$INFO"
+ exit 1
+fi
+
+# Check if the input file has more than one compressed block.
+if printf '%s\n' "$INFO" | grep -q '^Block = 1'; then
+ echo "Cannot convert, because the input file has more than"
+ echo "one compressed block."
+ exit 1
+fi
+
+# Get copmressed, uncompressed, and dictionary size.
+CSIZE=$(printf '%s\n' "$INFO" | sed -rn 's|^Packed Size = ([0-9]+$)|\1|p')
+USIZE=$(printf '%s\n' "$INFO" | sed -rn 's|^Size = ([0-9]+$)|\1|p')
+DICT=$(printf '%s\n' "$INFO" | sed -rn 's|^Method = LZMA:([0-9]+[bkm]?)$|\1|p')
+
+if [ -z "$CSIZE" -o -z "$USIZE" -o -z "$DICT" ]; then
+ echo "Parsing output of $SEVENZIP failed. Maybe the file uses some"
+ echo "other compression method than plain LZMA."
+ exit 1
+fi
+
+# The following assumes that the default lc, lp, and pb settings were used.
+# Otherwise the output will be corrupt.
+printf '\x5D' > "$2"
+
+# Dictionary size can be either was power of two, bytes, kibibytes, or
+# mebibytes. We need to convert it to bytes.
+case $DICT in
+ *b)
+ DICT=${DICT%b}
+ ;;
+ *k)
+ DICT=${DICT%k}
+ DICT=$((DICT << 10))
+ ;;
+ *m)
+ DICT=${DICT%m}
+ DICT=$((DICT << 20))
+ ;;
+ *)
+ DICT=$((1 << DICT))
+ ;;
+esac
+int2bin 4 "$DICT" >> "$2"
+
+# Uncompressed size
+int2bin 8 "$USIZE" >> "$2"
+
+# Copy the actual compressed data. Using multiple dd commands to avoid
+# copying large amount of data with one-byte block size, which would be
+# annoyingly slow.
+BS=8192
+BIGSIZE=$((CSIZE / BS))
+CSIZE=$((CSIZE % BS))
+{
+ dd of=/dev/null bs=32 count=1 \
+ && dd bs="$BS" count="$BIGSIZE" \
+ && dd bs=1 count="$CSIZE"
+} < "$1" >> "$2"
+
+exit $?
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/scanlzma/scanlzma.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/scanlzma/scanlzma.c
new file mode 100644
index 00000000..f25f1a10
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/scanlzma/scanlzma.c
@@ -0,0 +1,86 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/*
+ scanlzma, scan for lzma compressed data in stdin and echo it to stdout.
+ Copyright (C) 2006 Timo Lindfors
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+/* Usage example:
+
+ $ wget http://www.wifi-shop.cz/Files/produkty/wa2204/wa2204av1.4.1.zip
+ $ unzip wa2204av1.4.1.zip
+ $ gcc scanlzma.c -o scanlzma -Wall
+ $ ./scanlzma 0 < WA2204-FW1.4.1/linux-1.4.bin | lzma -c -d | strings | grep -i "copyright"
+ UpdateDD version 2.5, Copyright (C) 2005 Philipp Benner.
+ Copyright (C) 2005 Philipp Benner.
+ Copyright (C) 2005 Philipp Benner.
+ mawk 1.3%s%s %s, Copyright (C) Michael D. Brennan
+ # Copyright (C) 1998, 1999, 2001 Henry Spencer.
+ ...
+
+*/
+
+
+/* LZMA compressed file format */
+/* --------------------------- */
+/* Offset Size Description */
+/* 0 1 Special LZMA properties for compressed data */
+/* 1 4 Dictionary size (little endian) */
+/* 5 8 Uncompressed size (little endian). -1 means unknown size */
+/* 13 Compressed data */
+
+#define BUFSIZE 4096
+
+int find_lzma_header(unsigned char *buf) {
+ return (buf[0] < 0xE1
+ && buf[0] == 0x5d
+ && buf[4] < 0x20
+ && (memcmp (buf + 10 , "\x00\x00\x00", 3) == 0
+ || (memcmp (buf + 5, "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 8) == 0)));
+}
+
+int main(int argc, char *argv[]) {
+ char buf[BUFSIZE];
+ int ret, i, numlzma, blocks=0;
+
+ if (argc != 2) {
+ printf("usage: %s numlzma < infile | lzma -c -d > outfile\n"
+ "where numlzma is index of lzma file to extract, starting from zero.\n",
+ argv[0]);
+ exit(1);
+ }
+ numlzma = atoi(argv[1]);
+
+ for (;;) {
+ /* Read data. */
+ ret = fread(buf, BUFSIZE, 1, stdin);
+ if (ret != 1)
+ break;
+
+ /* Scan for signature. */
+ for (i = 0; i<BUFSIZE-23; i++) {
+ if (find_lzma_header(buf+i) && numlzma-- <= 0) {
+ fwrite(buf+i, (BUFSIZE-i), 1, stdout);
+ for (;;) {
+ int ch;
+ ch = getchar();
+ if (ch == EOF)
+ exit(0);
+ putchar(ch);
+ }
+ }
+ }
+ blocks++;
+ }
+ return 1;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.am
new file mode 100644
index 00000000..0dbd9c42
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.am
@@ -0,0 +1,32 @@
+##
+## Copyright (C) 2004-2007 Free Software Foundation, Inc.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+
+## Not using gnulib-tool, at least for now. It is likely that we won't
+## need anything else from Gnulib than getopt_long().
+
+noinst_LIBRARIES = libgnu.a
+
+libgnu_a_SOURCES =
+libgnu_a_DEPENDENCIES = $(LIBOBJS)
+libgnu_a_LIBADD = $(LIBOBJS)
+
+EXTRA_DIST = getopt.in.h getopt.c getopt1.c getopt_int.h
+BUILT_SOURCES = $(GETOPT_H)
+MOSTLYCLEANFILES = getopt.h getopt.h-t
+
+getopt.h: getopt.in.h
+ { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */'; \
+ cat $(srcdir)/getopt.in.h; \
+ } > $@-t
+ mv -f $@-t $@
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.in
new file mode 100644
index 00000000..6434e286
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/Makefile.in
@@ -0,0 +1,525 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = lib
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in getopt.c \
+ getopt1.c
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LIBRARIES = $(noinst_LIBRARIES)
+ARFLAGS = cru
+libgnu_a_AR = $(AR) $(ARFLAGS)
+am_libgnu_a_OBJECTS =
+libgnu_a_OBJECTS = $(am_libgnu_a_OBJECTS)
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(libgnu_a_SOURCES)
+DIST_SOURCES = $(libgnu_a_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+noinst_LIBRARIES = libgnu.a
+libgnu_a_SOURCES =
+libgnu_a_DEPENDENCIES = $(LIBOBJS)
+libgnu_a_LIBADD = $(LIBOBJS)
+EXTRA_DIST = getopt.in.h getopt.c getopt1.c getopt_int.h
+BUILT_SOURCES = $(GETOPT_H)
+MOSTLYCLEANFILES = getopt.h getopt.h-t
+all: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign lib/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign lib/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLIBRARIES:
+ -test -z "$(noinst_LIBRARIES)" || rm -f $(noinst_LIBRARIES)
+libgnu.a: $(libgnu_a_OBJECTS) $(libgnu_a_DEPENDENCIES)
+ -rm -f libgnu.a
+ $(libgnu_a_AR) libgnu.a $(libgnu_a_OBJECTS) $(libgnu_a_LIBADD)
+ $(RANLIB) libgnu.a
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@$(DEPDIR)/getopt.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@$(DEPDIR)/getopt1.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) check-am
+all-am: Makefile $(LIBRARIES)
+installdirs:
+install: $(BUILT_SOURCES)
+ $(MAKE) $(AM_MAKEFLAGS) install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+ -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES)
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+ -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES)
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLIBRARIES \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf $(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf $(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: all check install install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-noinstLIBRARIES ctags distclean \
+ distclean-compile distclean-generic distclean-libtool \
+ distclean-tags distdir dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+ pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+getopt.h: getopt.in.h
+ { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */'; \
+ cat $(srcdir)/getopt.in.h; \
+ } > $@-t
+ mv -f $@-t $@
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.c
new file mode 100644
index 00000000..e5a2e45c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.c
@@ -0,0 +1,1199 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* Getopt for GNU.
+ NOTE: getopt is now part of the C library, so if you don't know what
+ "Keep this file name-space clean" means, talk to drepper@gnu.org
+ before changing it!
+ Copyright (C) 1987,88,89,90,91,92,93,94,95,96,98,99,2000,2001,2002,2003,2004,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License along
+ with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA. */
+
+#ifndef _LIBC
+# include <config.h>
+#endif
+
+#include "getopt.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifdef __VMS
+# include <unixlib.h>
+#endif
+
+/* Completely disable NLS for getopt. We won't include translations for it
+ anyway. If the system lacks getopt_long, missing translations probably
+ aren't a problem. */
+/*
+#ifdef _LIBC
+# include <libintl.h>
+#else
+# include "gettext.h"
+# define _(msgid) gettext (msgid)
+#endif
+*/
+#define _(msgid) (msgid)
+
+#if defined _LIBC && defined USE_IN_LIBIO
+# include <wchar.h>
+#endif
+
+#ifndef attribute_hidden
+# define attribute_hidden
+#endif
+
+/* Unlike standard Unix `getopt', functions like `getopt_long'
+ let the user intersperse the options with the other arguments.
+
+ As `getopt_long' works, it permutes the elements of ARGV so that,
+ when it is done, all the options precede everything else. Thus
+ all application programs are extended to handle flexible argument order.
+
+ Using `getopt' or setting the environment variable POSIXLY_CORRECT
+ disables permutation.
+ Then the application's behavior is completely standard.
+
+ GNU application programs can use a third alternative mode in which
+ they can distinguish the relative order of options and other arguments. */
+
+#include "getopt_int.h"
+
+/* For communication from `getopt' to the caller.
+ When `getopt' finds an option that takes an argument,
+ the argument value is returned here.
+ Also, when `ordering' is RETURN_IN_ORDER,
+ each non-option ARGV-element is returned here. */
+
+char *optarg;
+
+/* Index in ARGV of the next element to be scanned.
+ This is used for communication to and from the caller
+ and for communication between successive calls to `getopt'.
+
+ On entry to `getopt', zero means this is the first call; initialize.
+
+ When `getopt' returns -1, this is the index of the first of the
+ non-option elements that the caller should itself scan.
+
+ Otherwise, `optind' communicates from one call to the next
+ how much of ARGV has been scanned so far. */
+
+/* 1003.2 says this must be 1 before any call. */
+int optind = 1;
+
+/* Callers store zero here to inhibit the error message
+ for unrecognized options. */
+
+int opterr = 1;
+
+/* Set to an option character which was unrecognized.
+ This must be initialized on some systems to avoid linking in the
+ system's own getopt implementation. */
+
+int optopt = '?';
+
+/* Keep a global copy of all internal members of getopt_data. */
+
+static struct _getopt_data getopt_data;
+
+
+#if defined HAVE_DECL_GETENV && !HAVE_DECL_GETENV
+extern char *getenv ();
+#endif
+
+#ifdef _LIBC
+/* Stored original parameters.
+ XXX This is no good solution. We should rather copy the args so
+ that we can compare them later. But we must not use malloc(3). */
+extern int __libc_argc;
+extern char **__libc_argv;
+
+/* Bash 2.0 gives us an environment variable containing flags
+ indicating ARGV elements that should not be considered arguments. */
+
+# ifdef USE_NONOPTION_FLAGS
+/* Defined in getopt_init.c */
+extern char *__getopt_nonoption_flags;
+# endif
+
+# ifdef USE_NONOPTION_FLAGS
+# define SWAP_FLAGS(ch1, ch2) \
+ if (d->__nonoption_flags_len > 0) \
+ { \
+ char __tmp = __getopt_nonoption_flags[ch1]; \
+ __getopt_nonoption_flags[ch1] = __getopt_nonoption_flags[ch2]; \
+ __getopt_nonoption_flags[ch2] = __tmp; \
+ }
+# else
+# define SWAP_FLAGS(ch1, ch2)
+# endif
+#else /* !_LIBC */
+# define SWAP_FLAGS(ch1, ch2)
+#endif /* _LIBC */
+
+/* Exchange two adjacent subsequences of ARGV.
+ One subsequence is elements [first_nonopt,last_nonopt)
+ which contains all the non-options that have been skipped so far.
+ The other is elements [last_nonopt,optind), which contains all
+ the options processed since those non-options were skipped.
+
+ `first_nonopt' and `last_nonopt' are relocated so that they describe
+ the new indices of the non-options in ARGV after they are moved. */
+
+static void
+exchange (char **argv, struct _getopt_data *d)
+{
+ int bottom = d->__first_nonopt;
+ int middle = d->__last_nonopt;
+ int top = d->optind;
+ char *tem;
+
+ /* Exchange the shorter segment with the far end of the longer segment.
+ That puts the shorter segment into the right place.
+ It leaves the longer segment in the right place overall,
+ but it consists of two parts that need to be swapped next. */
+
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+ /* First make sure the handling of the `__getopt_nonoption_flags'
+ string can work normally. Our top argument must be in the range
+ of the string. */
+ if (d->__nonoption_flags_len > 0 && top >= d->__nonoption_flags_max_len)
+ {
+ /* We must extend the array. The user plays games with us and
+ presents new arguments. */
+ char *new_str = malloc (top + 1);
+ if (new_str == NULL)
+ d->__nonoption_flags_len = d->__nonoption_flags_max_len = 0;
+ else
+ {
+ memset (__mempcpy (new_str, __getopt_nonoption_flags,
+ d->__nonoption_flags_max_len),
+ '\0', top + 1 - d->__nonoption_flags_max_len);
+ d->__nonoption_flags_max_len = top + 1;
+ __getopt_nonoption_flags = new_str;
+ }
+ }
+#endif
+
+ while (top > middle && middle > bottom)
+ {
+ if (top - middle > middle - bottom)
+ {
+ /* Bottom segment is the short one. */
+ int len = middle - bottom;
+ register int i;
+
+ /* Swap it with the top part of the top segment. */
+ for (i = 0; i < len; i++)
+ {
+ tem = argv[bottom + i];
+ argv[bottom + i] = argv[top - (middle - bottom) + i];
+ argv[top - (middle - bottom) + i] = tem;
+ SWAP_FLAGS (bottom + i, top - (middle - bottom) + i);
+ }
+ /* Exclude the moved bottom segment from further swapping. */
+ top -= len;
+ }
+ else
+ {
+ /* Top segment is the short one. */
+ int len = top - middle;
+ register int i;
+
+ /* Swap it with the bottom part of the bottom segment. */
+ for (i = 0; i < len; i++)
+ {
+ tem = argv[bottom + i];
+ argv[bottom + i] = argv[middle + i];
+ argv[middle + i] = tem;
+ SWAP_FLAGS (bottom + i, middle + i);
+ }
+ /* Exclude the moved top segment from further swapping. */
+ bottom += len;
+ }
+ }
+
+ /* Update records for the slots the non-options now occupy. */
+
+ d->__first_nonopt += (d->optind - d->__last_nonopt);
+ d->__last_nonopt = d->optind;
+}
+
+/* Initialize the internal data when the first call is made. */
+
+static const char *
+_getopt_initialize (int argc, char **argv, const char *optstring,
+ int posixly_correct, struct _getopt_data *d)
+{
+ /* Start processing options with ARGV-element 1 (since ARGV-element 0
+ is the program name); the sequence of previously skipped
+ non-option ARGV-elements is empty. */
+
+ d->__first_nonopt = d->__last_nonopt = d->optind;
+
+ d->__nextchar = NULL;
+
+ d->__posixly_correct = posixly_correct || !!getenv ("POSIXLY_CORRECT");
+
+ /* Determine how to handle the ordering of options and nonoptions. */
+
+ if (optstring[0] == '-')
+ {
+ d->__ordering = RETURN_IN_ORDER;
+ ++optstring;
+ }
+ else if (optstring[0] == '+')
+ {
+ d->__ordering = REQUIRE_ORDER;
+ ++optstring;
+ }
+ else if (d->__posixly_correct)
+ d->__ordering = REQUIRE_ORDER;
+ else
+ d->__ordering = PERMUTE;
+
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+ if (!d->__posixly_correct
+ && argc == __libc_argc && argv == __libc_argv)
+ {
+ if (d->__nonoption_flags_max_len == 0)
+ {
+ if (__getopt_nonoption_flags == NULL
+ || __getopt_nonoption_flags[0] == '\0')
+ d->__nonoption_flags_max_len = -1;
+ else
+ {
+ const char *orig_str = __getopt_nonoption_flags;
+ int len = d->__nonoption_flags_max_len = strlen (orig_str);
+ if (d->__nonoption_flags_max_len < argc)
+ d->__nonoption_flags_max_len = argc;
+ __getopt_nonoption_flags =
+ (char *) malloc (d->__nonoption_flags_max_len);
+ if (__getopt_nonoption_flags == NULL)
+ d->__nonoption_flags_max_len = -1;
+ else
+ memset (__mempcpy (__getopt_nonoption_flags, orig_str, len),
+ '\0', d->__nonoption_flags_max_len - len);
+ }
+ }
+ d->__nonoption_flags_len = d->__nonoption_flags_max_len;
+ }
+ else
+ d->__nonoption_flags_len = 0;
+#endif
+
+ return optstring;
+}
+
+/* Scan elements of ARGV (whose length is ARGC) for option characters
+ given in OPTSTRING.
+
+ If an element of ARGV starts with '-', and is not exactly "-" or "--",
+ then it is an option element. The characters of this element
+ (aside from the initial '-') are option characters. If `getopt'
+ is called repeatedly, it returns successively each of the option characters
+ from each of the option elements.
+
+ If `getopt' finds another option character, it returns that character,
+ updating `optind' and `nextchar' so that the next call to `getopt' can
+ resume the scan with the following option character or ARGV-element.
+
+ If there are no more option characters, `getopt' returns -1.
+ Then `optind' is the index in ARGV of the first ARGV-element
+ that is not an option. (The ARGV-elements have been permuted
+ so that those that are not options now come last.)
+
+ OPTSTRING is a string containing the legitimate option characters.
+ If an option character is seen that is not listed in OPTSTRING,
+ return '?' after printing an error message. If you set `opterr' to
+ zero, the error message is suppressed but we still return '?'.
+
+ If a char in OPTSTRING is followed by a colon, that means it wants an arg,
+ so the following text in the same ARGV-element, or the text of the following
+ ARGV-element, is returned in `optarg'. Two colons mean an option that
+ wants an optional arg; if there is text in the current ARGV-element,
+ it is returned in `optarg', otherwise `optarg' is set to zero.
+
+ If OPTSTRING starts with `-' or `+', it requests different methods of
+ handling the non-option ARGV-elements.
+ See the comments about RETURN_IN_ORDER and REQUIRE_ORDER, above.
+
+ Long-named options begin with `--' instead of `-'.
+ Their names may be abbreviated as long as the abbreviation is unique
+ or is an exact match for some defined option. If they have an
+ argument, it follows the option name in the same ARGV-element, separated
+ from the option name by a `=', or else the in next ARGV-element.
+ When `getopt' finds a long-named option, it returns 0 if that option's
+ `flag' field is nonzero, the value of the option's `val' field
+ if the `flag' field is zero.
+
+ LONGOPTS is a vector of `struct option' terminated by an
+ element containing a name which is zero.
+
+ LONGIND returns the index in LONGOPT of the long-named option found.
+ It is only valid when a long-named option has been found by the most
+ recent call.
+
+ If LONG_ONLY is nonzero, '-' as well as '--' can introduce
+ long-named options.
+
+ If POSIXLY_CORRECT is nonzero, behave as if the POSIXLY_CORRECT
+ environment variable were set. */
+
+int
+_getopt_internal_r (int argc, char **argv, const char *optstring,
+ const struct option *longopts, int *longind,
+ int long_only, int posixly_correct, struct _getopt_data *d)
+{
+ int print_errors = d->opterr;
+ if (optstring[0] == ':')
+ print_errors = 0;
+
+ if (argc < 1)
+ return -1;
+
+ d->optarg = NULL;
+
+ if (d->optind == 0 || !d->__initialized)
+ {
+ if (d->optind == 0)
+ d->optind = 1; /* Don't scan ARGV[0], the program name. */
+ optstring = _getopt_initialize (argc, argv, optstring,
+ posixly_correct, d);
+ d->__initialized = 1;
+ }
+
+ /* Test whether ARGV[optind] points to a non-option argument.
+ Either it does not have option syntax, or there is an environment flag
+ from the shell indicating it is not an option. The later information
+ is only used when the used in the GNU libc. */
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+# define NONOPTION_P (argv[d->optind][0] != '-' || argv[d->optind][1] == '\0' \
+ || (d->optind < d->__nonoption_flags_len \
+ && __getopt_nonoption_flags[d->optind] == '1'))
+#else
+# define NONOPTION_P (argv[d->optind][0] != '-' || argv[d->optind][1] == '\0')
+#endif
+
+ if (d->__nextchar == NULL || *d->__nextchar == '\0')
+ {
+ /* Advance to the next ARGV-element. */
+
+ /* Give FIRST_NONOPT & LAST_NONOPT rational values if OPTIND has been
+ moved back by the user (who may also have changed the arguments). */
+ if (d->__last_nonopt > d->optind)
+ d->__last_nonopt = d->optind;
+ if (d->__first_nonopt > d->optind)
+ d->__first_nonopt = d->optind;
+
+ if (d->__ordering == PERMUTE)
+ {
+ /* If we have just processed some options following some non-options,
+ exchange them so that the options come first. */
+
+ if (d->__first_nonopt != d->__last_nonopt
+ && d->__last_nonopt != d->optind)
+ exchange ((char **) argv, d);
+ else if (d->__last_nonopt != d->optind)
+ d->__first_nonopt = d->optind;
+
+ /* Skip any additional non-options
+ and extend the range of non-options previously skipped. */
+
+ while (d->optind < argc && NONOPTION_P)
+ d->optind++;
+ d->__last_nonopt = d->optind;
+ }
+
+ /* The special ARGV-element `--' means premature end of options.
+ Skip it like a null option,
+ then exchange with previous non-options as if it were an option,
+ then skip everything else like a non-option. */
+
+ if (d->optind != argc && !strcmp (argv[d->optind], "--"))
+ {
+ d->optind++;
+
+ if (d->__first_nonopt != d->__last_nonopt
+ && d->__last_nonopt != d->optind)
+ exchange ((char **) argv, d);
+ else if (d->__first_nonopt == d->__last_nonopt)
+ d->__first_nonopt = d->optind;
+ d->__last_nonopt = argc;
+
+ d->optind = argc;
+ }
+
+ /* If we have done all the ARGV-elements, stop the scan
+ and back over any non-options that we skipped and permuted. */
+
+ if (d->optind == argc)
+ {
+ /* Set the next-arg-index to point at the non-options
+ that we previously skipped, so the caller will digest them. */
+ if (d->__first_nonopt != d->__last_nonopt)
+ d->optind = d->__first_nonopt;
+ return -1;
+ }
+
+ /* If we have come to a non-option and did not permute it,
+ either stop the scan or describe it to the caller and pass it by. */
+
+ if (NONOPTION_P)
+ {
+ if (d->__ordering == REQUIRE_ORDER)
+ return -1;
+ d->optarg = argv[d->optind++];
+ return 1;
+ }
+
+ /* We have found another option-ARGV-element.
+ Skip the initial punctuation. */
+
+ d->__nextchar = (argv[d->optind] + 1
+ + (longopts != NULL && argv[d->optind][1] == '-'));
+ }
+
+ /* Decode the current option-ARGV-element. */
+
+ /* Check whether the ARGV-element is a long option.
+
+ If long_only and the ARGV-element has the form "-f", where f is
+ a valid short option, don't consider it an abbreviated form of
+ a long option that starts with f. Otherwise there would be no
+ way to give the -f short option.
+
+ On the other hand, if there's a long option "fubar" and
+ the ARGV-element is "-fu", do consider that an abbreviation of
+ the long option, just like "--fu", and not "-f" with arg "u".
+
+ This distinction seems to be the most useful approach. */
+
+ if (longopts != NULL
+ && (argv[d->optind][1] == '-'
+ || (long_only && (argv[d->optind][2]
+ || !strchr (optstring, argv[d->optind][1])))))
+ {
+ char *nameend;
+ const struct option *p;
+ const struct option *pfound = NULL;
+ int exact = 0;
+ int ambig = 0;
+ int indfound = -1;
+ int option_index;
+
+ for (nameend = d->__nextchar; *nameend && *nameend != '='; nameend++)
+ /* Do nothing. */ ;
+
+ /* Test all long options for either exact match
+ or abbreviated matches. */
+ for (p = longopts, option_index = 0; p->name; p++, option_index++)
+ if (!strncmp (p->name, d->__nextchar, nameend - d->__nextchar))
+ {
+ if ((unsigned int) (nameend - d->__nextchar)
+ == (unsigned int) strlen (p->name))
+ {
+ /* Exact match found. */
+ pfound = p;
+ indfound = option_index;
+ exact = 1;
+ break;
+ }
+ else if (pfound == NULL)
+ {
+ /* First nonexact match found. */
+ pfound = p;
+ indfound = option_index;
+ }
+ else if (long_only
+ || pfound->has_arg != p->has_arg
+ || pfound->flag != p->flag
+ || pfound->val != p->val)
+ /* Second or later nonexact match found. */
+ ambig = 1;
+ }
+
+ if (ambig && !exact)
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf, _("%s: option `%s' is ambiguous\n"),
+ argv[0], argv[d->optind]) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr, _("%s: option `%s' is ambiguous\n"),
+ argv[0], argv[d->optind]);
+#endif
+ }
+ d->__nextchar += strlen (d->__nextchar);
+ d->optind++;
+ d->optopt = 0;
+ return '?';
+ }
+
+ if (pfound != NULL)
+ {
+ option_index = indfound;
+ d->optind++;
+ if (*nameend)
+ {
+ /* Don't test has_arg with >, because some C compilers don't
+ allow it to be used on enums. */
+ if (pfound->has_arg)
+ d->optarg = nameend + 1;
+ else
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+ int n;
+#endif
+
+ if (argv[d->optind - 1][1] == '-')
+ {
+ /* --option */
+#if defined _LIBC && defined USE_IN_LIBIO
+ n = __asprintf (&buf, _("\
+%s: option `--%s' doesn't allow an argument\n"),
+ argv[0], pfound->name);
+#else
+ fprintf (stderr, _("\
+%s: option `--%s' doesn't allow an argument\n"),
+ argv[0], pfound->name);
+#endif
+ }
+ else
+ {
+ /* +option or -option */
+#if defined _LIBC && defined USE_IN_LIBIO
+ n = __asprintf (&buf, _("\
+%s: option `%c%s' doesn't allow an argument\n"),
+ argv[0], argv[d->optind - 1][0],
+ pfound->name);
+#else
+ fprintf (stderr, _("\
+%s: option `%c%s' doesn't allow an argument\n"),
+ argv[0], argv[d->optind - 1][0],
+ pfound->name);
+#endif
+ }
+
+#if defined _LIBC && defined USE_IN_LIBIO
+ if (n >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2
+ |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#endif
+ }
+
+ d->__nextchar += strlen (d->__nextchar);
+
+ d->optopt = pfound->val;
+ return '?';
+ }
+ }
+ else if (pfound->has_arg == 1)
+ {
+ if (d->optind < argc)
+ d->optarg = argv[d->optind++];
+ else
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf, _("\
+%s: option `%s' requires an argument\n"),
+ argv[0], argv[d->optind - 1]) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2
+ |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr,
+ _("%s: option `%s' requires an argument\n"),
+ argv[0], argv[d->optind - 1]);
+#endif
+ }
+ d->__nextchar += strlen (d->__nextchar);
+ d->optopt = pfound->val;
+ return optstring[0] == ':' ? ':' : '?';
+ }
+ }
+ d->__nextchar += strlen (d->__nextchar);
+ if (longind != NULL)
+ *longind = option_index;
+ if (pfound->flag)
+ {
+ *(pfound->flag) = pfound->val;
+ return 0;
+ }
+ return pfound->val;
+ }
+
+ /* Can't find it as a long option. If this is not getopt_long_only,
+ or the option starts with '--' or is not a valid short
+ option, then it's an error.
+ Otherwise interpret it as a short option. */
+ if (!long_only || argv[d->optind][1] == '-'
+ || strchr (optstring, *d->__nextchar) == NULL)
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+ int n;
+#endif
+
+ if (argv[d->optind][1] == '-')
+ {
+ /* --option */
+#if defined _LIBC && defined USE_IN_LIBIO
+ n = __asprintf (&buf, _("%s: unrecognized option `--%s'\n"),
+ argv[0], d->__nextchar);
+#else
+ fprintf (stderr, _("%s: unrecognized option `--%s'\n"),
+ argv[0], d->__nextchar);
+#endif
+ }
+ else
+ {
+ /* +option or -option */
+#if defined _LIBC && defined USE_IN_LIBIO
+ n = __asprintf (&buf, _("%s: unrecognized option `%c%s'\n"),
+ argv[0], argv[d->optind][0], d->__nextchar);
+#else
+ fprintf (stderr, _("%s: unrecognized option `%c%s'\n"),
+ argv[0], argv[d->optind][0], d->__nextchar);
+#endif
+ }
+
+#if defined _LIBC && defined USE_IN_LIBIO
+ if (n >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#endif
+ }
+ d->__nextchar = (char *) "";
+ d->optind++;
+ d->optopt = 0;
+ return '?';
+ }
+ }
+
+ /* Look at and handle the next short option-character. */
+
+ {
+ char c = *d->__nextchar++;
+ char *temp = strchr (optstring, c);
+
+ /* Increment `optind' when we start to process its last character. */
+ if (*d->__nextchar == '\0')
+ ++d->optind;
+
+ if (temp == NULL || c == ':')
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+ int n;
+#endif
+
+ if (d->__posixly_correct)
+ {
+ /* 1003.2 specifies the format of this message. */
+#if defined _LIBC && defined USE_IN_LIBIO
+ n = __asprintf (&buf, _("%s: illegal option -- %c\n"),
+ argv[0], c);
+#else
+ fprintf (stderr, _("%s: illegal option -- %c\n"), argv[0], c);
+#endif
+ }
+ else
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ n = __asprintf (&buf, _("%s: invalid option -- %c\n"),
+ argv[0], c);
+#else
+ fprintf (stderr, _("%s: invalid option -- %c\n"), argv[0], c);
+#endif
+ }
+
+#if defined _LIBC && defined USE_IN_LIBIO
+ if (n >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#endif
+ }
+ d->optopt = c;
+ return '?';
+ }
+ /* Convenience. Treat POSIX -W foo same as long option --foo */
+ if (temp[0] == 'W' && temp[1] == ';')
+ {
+ char *nameend;
+ const struct option *p;
+ const struct option *pfound = NULL;
+ int exact = 0;
+ int ambig = 0;
+ int indfound = 0;
+ int option_index;
+
+ /* This is an option that requires an argument. */
+ if (*d->__nextchar != '\0')
+ {
+ d->optarg = d->__nextchar;
+ /* If we end this ARGV-element by taking the rest as an arg,
+ we must advance to the next element now. */
+ d->optind++;
+ }
+ else if (d->optind == argc)
+ {
+ if (print_errors)
+ {
+ /* 1003.2 specifies the format of this message. */
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf,
+ _("%s: option requires an argument -- %c\n"),
+ argv[0], c) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr, _("%s: option requires an argument -- %c\n"),
+ argv[0], c);
+#endif
+ }
+ d->optopt = c;
+ if (optstring[0] == ':')
+ c = ':';
+ else
+ c = '?';
+ return c;
+ }
+ else
+ /* We already incremented `d->optind' once;
+ increment it again when taking next ARGV-elt as argument. */
+ d->optarg = argv[d->optind++];
+
+ /* optarg is now the argument, see if it's in the
+ table of longopts. */
+
+ for (d->__nextchar = nameend = d->optarg; *nameend && *nameend != '=';
+ nameend++)
+ /* Do nothing. */ ;
+
+ /* Test all long options for either exact match
+ or abbreviated matches. */
+ for (p = longopts, option_index = 0; p->name; p++, option_index++)
+ if (!strncmp (p->name, d->__nextchar, nameend - d->__nextchar))
+ {
+ if ((unsigned int) (nameend - d->__nextchar) == strlen (p->name))
+ {
+ /* Exact match found. */
+ pfound = p;
+ indfound = option_index;
+ exact = 1;
+ break;
+ }
+ else if (pfound == NULL)
+ {
+ /* First nonexact match found. */
+ pfound = p;
+ indfound = option_index;
+ }
+ else
+ /* Second or later nonexact match found. */
+ ambig = 1;
+ }
+ if (ambig && !exact)
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf, _("%s: option `-W %s' is ambiguous\n"),
+ argv[0], argv[d->optind]) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr, _("%s: option `-W %s' is ambiguous\n"),
+ argv[0], argv[d->optind]);
+#endif
+ }
+ d->__nextchar += strlen (d->__nextchar);
+ d->optind++;
+ return '?';
+ }
+ if (pfound != NULL)
+ {
+ option_index = indfound;
+ if (*nameend)
+ {
+ /* Don't test has_arg with >, because some C compilers don't
+ allow it to be used on enums. */
+ if (pfound->has_arg)
+ d->optarg = nameend + 1;
+ else
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf, _("\
+%s: option `-W %s' doesn't allow an argument\n"),
+ argv[0], pfound->name) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2
+ |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr, _("\
+%s: option `-W %s' doesn't allow an argument\n"),
+ argv[0], pfound->name);
+#endif
+ }
+
+ d->__nextchar += strlen (d->__nextchar);
+ return '?';
+ }
+ }
+ else if (pfound->has_arg == 1)
+ {
+ if (d->optind < argc)
+ d->optarg = argv[d->optind++];
+ else
+ {
+ if (print_errors)
+ {
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf, _("\
+%s: option `%s' requires an argument\n"),
+ argv[0], argv[d->optind - 1]) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2
+ |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr,
+ _("%s: option `%s' requires an argument\n"),
+ argv[0], argv[d->optind - 1]);
+#endif
+ }
+ d->__nextchar += strlen (d->__nextchar);
+ return optstring[0] == ':' ? ':' : '?';
+ }
+ }
+ d->__nextchar += strlen (d->__nextchar);
+ if (longind != NULL)
+ *longind = option_index;
+ if (pfound->flag)
+ {
+ *(pfound->flag) = pfound->val;
+ return 0;
+ }
+ return pfound->val;
+ }
+ d->__nextchar = NULL;
+ return 'W'; /* Let the application handle it. */
+ }
+ if (temp[1] == ':')
+ {
+ if (temp[2] == ':')
+ {
+ /* This is an option that accepts an argument optionally. */
+ if (*d->__nextchar != '\0')
+ {
+ d->optarg = d->__nextchar;
+ d->optind++;
+ }
+ else
+ d->optarg = NULL;
+ d->__nextchar = NULL;
+ }
+ else
+ {
+ /* This is an option that requires an argument. */
+ if (*d->__nextchar != '\0')
+ {
+ d->optarg = d->__nextchar;
+ /* If we end this ARGV-element by taking the rest as an arg,
+ we must advance to the next element now. */
+ d->optind++;
+ }
+ else if (d->optind == argc)
+ {
+ if (print_errors)
+ {
+ /* 1003.2 specifies the format of this message. */
+#if defined _LIBC && defined USE_IN_LIBIO
+ char *buf;
+
+ if (__asprintf (&buf, _("\
+%s: option requires an argument -- %c\n"),
+ argv[0], c) >= 0)
+ {
+ _IO_flockfile (stderr);
+
+ int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+ ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+
+ __fxprintf (NULL, "%s", buf);
+
+ ((_IO_FILE *) stderr)->_flags2 = old_flags2;
+ _IO_funlockfile (stderr);
+
+ free (buf);
+ }
+#else
+ fprintf (stderr,
+ _("%s: option requires an argument -- %c\n"),
+ argv[0], c);
+#endif
+ }
+ d->optopt = c;
+ if (optstring[0] == ':')
+ c = ':';
+ else
+ c = '?';
+ }
+ else
+ /* We already incremented `optind' once;
+ increment it again when taking next ARGV-elt as argument. */
+ d->optarg = argv[d->optind++];
+ d->__nextchar = NULL;
+ }
+ }
+ return c;
+ }
+}
+
+int
+_getopt_internal (int argc, char **argv, const char *optstring,
+ const struct option *longopts, int *longind,
+ int long_only, int posixly_correct)
+{
+ int result;
+
+ getopt_data.optind = optind;
+ getopt_data.opterr = opterr;
+
+ result = _getopt_internal_r (argc, argv, optstring, longopts, longind,
+ long_only, posixly_correct, &getopt_data);
+
+ optind = getopt_data.optind;
+ optarg = getopt_data.optarg;
+ optopt = getopt_data.optopt;
+
+ return result;
+}
+
+/* glibc gets a LSB-compliant getopt.
+ Standalone applications get a POSIX-compliant getopt. */
+#if _LIBC
+enum { POSIXLY_CORRECT = 0 };
+#else
+enum { POSIXLY_CORRECT = 1 };
+#endif
+
+int
+getopt (int argc, char *const *argv, const char *optstring)
+{
+ return _getopt_internal (argc, (char **) argv, optstring, NULL, NULL, 0,
+ POSIXLY_CORRECT);
+}
+
+
+#ifdef TEST
+
+/* Compile with -DTEST to make an executable for use in testing
+ the above definition of `getopt'. */
+
+int
+main (int argc, char **argv)
+{
+ int c;
+ int digit_optind = 0;
+
+ while (1)
+ {
+ int this_option_optind = optind ? optind : 1;
+
+ c = getopt (argc, argv, "abc:d:0123456789");
+ if (c == -1)
+ break;
+
+ switch (c)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (digit_optind != 0 && digit_optind != this_option_optind)
+ printf ("digits occur in two different argv-elements.\n");
+ digit_optind = this_option_optind;
+ printf ("option %c\n", c);
+ break;
+
+ case 'a':
+ printf ("option a\n");
+ break;
+
+ case 'b':
+ printf ("option b\n");
+ break;
+
+ case 'c':
+ printf ("option c with value `%s'\n", optarg);
+ break;
+
+ case '?':
+ break;
+
+ default:
+ printf ("?? getopt returned character code 0%o ??\n", c);
+ }
+ }
+
+ if (optind < argc)
+ {
+ printf ("non-option ARGV-elements: ");
+ while (optind < argc)
+ printf ("%s ", argv[optind++]);
+ printf ("\n");
+ }
+
+ exit (0);
+}
+
+#endif /* TEST */
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.in.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.in.h
new file mode 100644
index 00000000..9b79b63d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt.in.h
@@ -0,0 +1,228 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* Declarations for getopt.
+ Copyright (C) 1989-1994,1996-1999,2001,2003,2004,2005,2006,2007
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License along
+ with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA. */
+
+#ifndef _GETOPT_H
+
+#ifndef __need_getopt
+# define _GETOPT_H 1
+#endif
+
+/* Standalone applications should #define __GETOPT_PREFIX to an
+ identifier that prefixes the external functions and variables
+ defined in this header. When this happens, include the
+ headers that might declare getopt so that they will not cause
+ confusion if included after this file. Then systematically rename
+ identifiers so that they do not collide with the system functions
+ and variables. Renaming avoids problems with some compilers and
+ linkers. */
+#if defined __GETOPT_PREFIX && !defined __need_getopt
+# include <stdlib.h>
+# include <stdio.h>
+# include <unistd.h>
+# undef __need_getopt
+# undef getopt
+# undef getopt_long
+# undef getopt_long_only
+# undef optarg
+# undef opterr
+# undef optind
+# undef optopt
+# define __GETOPT_CONCAT(x, y) x ## y
+# define __GETOPT_XCONCAT(x, y) __GETOPT_CONCAT (x, y)
+# define __GETOPT_ID(y) __GETOPT_XCONCAT (__GETOPT_PREFIX, y)
+# define getopt __GETOPT_ID (getopt)
+# define getopt_long __GETOPT_ID (getopt_long)
+# define getopt_long_only __GETOPT_ID (getopt_long_only)
+# define optarg __GETOPT_ID (optarg)
+# define opterr __GETOPT_ID (opterr)
+# define optind __GETOPT_ID (optind)
+# define optopt __GETOPT_ID (optopt)
+#endif
+
+/* Standalone applications get correct prototypes for getopt_long and
+ getopt_long_only; they declare "char **argv". libc uses prototypes
+ with "char *const *argv" that are incorrect because getopt_long and
+ getopt_long_only can permute argv; this is required for backward
+ compatibility (e.g., for LSB 2.0.1).
+
+ This used to be `#if defined __GETOPT_PREFIX && !defined __need_getopt',
+ but it caused redefinition warnings if both unistd.h and getopt.h were
+ included, since unistd.h includes getopt.h having previously defined
+ __need_getopt.
+
+ The only place where __getopt_argv_const is used is in definitions
+ of getopt_long and getopt_long_only below, but these are visible
+ only if __need_getopt is not defined, so it is quite safe to rewrite
+ the conditional as follows:
+*/
+#if !defined __need_getopt
+# if defined __GETOPT_PREFIX
+# define __getopt_argv_const /* empty */
+# else
+# define __getopt_argv_const const
+# endif
+#endif
+
+/* If __GNU_LIBRARY__ is not already defined, either we are being used
+ standalone, or this is the first header included in the source file.
+ If we are being used with glibc, we need to include <features.h>, but
+ that does not exist if we are standalone. So: if __GNU_LIBRARY__ is
+ not defined, include <ctype.h>, which will pull in <features.h> for us
+ if it's from glibc. (Why ctype.h? It's guaranteed to exist and it
+ doesn't flood the namespace with stuff the way some other headers do.) */
+#if !defined __GNU_LIBRARY__
+# include <ctype.h>
+#endif
+
+#ifndef __THROW
+# ifndef __GNUC_PREREQ
+# define __GNUC_PREREQ(maj, min) (0)
+# endif
+# if defined __cplusplus && __GNUC_PREREQ (2,8)
+# define __THROW throw ()
+# else
+# define __THROW
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* For communication from `getopt' to the caller.
+ When `getopt' finds an option that takes an argument,
+ the argument value is returned here.
+ Also, when `ordering' is RETURN_IN_ORDER,
+ each non-option ARGV-element is returned here. */
+
+extern char *optarg;
+
+/* Index in ARGV of the next element to be scanned.
+ This is used for communication to and from the caller
+ and for communication between successive calls to `getopt'.
+
+ On entry to `getopt', zero means this is the first call; initialize.
+
+ When `getopt' returns -1, this is the index of the first of the
+ non-option elements that the caller should itself scan.
+
+ Otherwise, `optind' communicates from one call to the next
+ how much of ARGV has been scanned so far. */
+
+extern int optind;
+
+/* Callers store zero here to inhibit the error message `getopt' prints
+ for unrecognized options. */
+
+extern int opterr;
+
+/* Set to an option character which was unrecognized. */
+
+extern int optopt;
+
+#ifndef __need_getopt
+/* Describe the long-named options requested by the application.
+ The LONG_OPTIONS argument to getopt_long or getopt_long_only is a vector
+ of `struct option' terminated by an element containing a name which is
+ zero.
+
+ The field `has_arg' is:
+ no_argument (or 0) if the option does not take an argument,
+ required_argument (or 1) if the option requires an argument,
+ optional_argument (or 2) if the option takes an optional argument.
+
+ If the field `flag' is not NULL, it points to a variable that is set
+ to the value given in the field `val' when the option is found, but
+ left unchanged if the option is not found.
+
+ To have a long-named option do something other than set an `int' to
+ a compiled-in constant, such as set a value from `optarg', set the
+ option's `flag' field to zero and its `val' field to a nonzero
+ value (the equivalent single-letter option character, if there is
+ one). For long options that have a zero `flag' field, `getopt'
+ returns the contents of the `val' field. */
+
+struct option
+{
+ const char *name;
+ /* has_arg can't be an enum because some compilers complain about
+ type mismatches in all the code that assumes it is an int. */
+ int has_arg;
+ int *flag;
+ int val;
+};
+
+/* Names for the values of the `has_arg' field of `struct option'. */
+
+# define no_argument 0
+# define required_argument 1
+# define optional_argument 2
+#endif /* need getopt */
+
+
+/* Get definitions and prototypes for functions to process the
+ arguments in ARGV (ARGC of them, minus the program name) for
+ options given in OPTS.
+
+ Return the option character from OPTS just read. Return -1 when
+ there are no more options. For unrecognized options, or options
+ missing arguments, `optopt' is set to the option letter, and '?' is
+ returned.
+
+ The OPTS string is a list of characters which are recognized option
+ letters, optionally followed by colons, specifying that that letter
+ takes an argument, to be placed in `optarg'.
+
+ If a letter in OPTS is followed by two colons, its argument is
+ optional. This behavior is specific to the GNU `getopt'.
+
+ The argument `--' causes premature termination of argument
+ scanning, explicitly telling `getopt' that there are no more
+ options.
+
+ If OPTS begins with `-', then non-option arguments are treated as
+ arguments to the option '\1'. This behavior is specific to the GNU
+ `getopt'. If OPTS begins with `+', or POSIXLY_CORRECT is set in
+ the environment, then do not permute arguments. */
+
+extern int getopt (int ___argc, char *const *___argv, const char *__shortopts)
+ __THROW;
+
+#ifndef __need_getopt
+extern int getopt_long (int ___argc, char *__getopt_argv_const *___argv,
+ const char *__shortopts,
+ const struct option *__longopts, int *__longind)
+ __THROW;
+extern int getopt_long_only (int ___argc, char *__getopt_argv_const *___argv,
+ const char *__shortopts,
+ const struct option *__longopts, int *__longind)
+ __THROW;
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Make sure we later can get all the definitions and declarations. */
+#undef __need_getopt
+
+#endif /* getopt.h */
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt1.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt1.c
new file mode 100644
index 00000000..53e588d5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt1.c
@@ -0,0 +1,173 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* getopt_long and getopt_long_only entry points for GNU getopt.
+ Copyright (C) 1987,88,89,90,91,92,93,94,96,97,98,2004,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License along
+ with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA. */
+
+#ifdef _LIBC
+# include <getopt.h>
+#else
+# include <config.h>
+# include "getopt.h"
+#endif
+#include "getopt_int.h"
+
+#include <stdio.h>
+
+/* This needs to come after some library #include
+ to get __GNU_LIBRARY__ defined. */
+#ifdef __GNU_LIBRARY__
+#include <stdlib.h>
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+int
+getopt_long (int argc, char *__getopt_argv_const *argv, const char *options,
+ const struct option *long_options, int *opt_index)
+{
+ return _getopt_internal (argc, (char **) argv, options, long_options,
+ opt_index, 0, 0);
+}
+
+int
+_getopt_long_r (int argc, char **argv, const char *options,
+ const struct option *long_options, int *opt_index,
+ struct _getopt_data *d)
+{
+ return _getopt_internal_r (argc, argv, options, long_options, opt_index,
+ 0, 0, d);
+}
+
+/* Like getopt_long, but '-' as well as '--' can indicate a long option.
+ If an option that starts with '-' (not '--') doesn't match a long option,
+ but does match a short option, it is parsed as a short option
+ instead. */
+
+int
+getopt_long_only (int argc, char *__getopt_argv_const *argv,
+ const char *options,
+ const struct option *long_options, int *opt_index)
+{
+ return _getopt_internal (argc, (char **) argv, options, long_options,
+ opt_index, 1, 0);
+}
+
+int
+_getopt_long_only_r (int argc, char **argv, const char *options,
+ const struct option *long_options, int *opt_index,
+ struct _getopt_data *d)
+{
+ return _getopt_internal_r (argc, argv, options, long_options, opt_index,
+ 1, 0, d);
+}
+
+
+#ifdef TEST
+
+#include <stdio.h>
+
+int
+main (int argc, char **argv)
+{
+ int c;
+ int digit_optind = 0;
+
+ while (1)
+ {
+ int this_option_optind = optind ? optind : 1;
+ int option_index = 0;
+ static struct option long_options[] =
+ {
+ {"add", 1, 0, 0},
+ {"append", 0, 0, 0},
+ {"delete", 1, 0, 0},
+ {"verbose", 0, 0, 0},
+ {"create", 0, 0, 0},
+ {"file", 1, 0, 0},
+ {0, 0, 0, 0}
+ };
+
+ c = getopt_long (argc, argv, "abc:d:0123456789",
+ long_options, &option_index);
+ if (c == -1)
+ break;
+
+ switch (c)
+ {
+ case 0:
+ printf ("option %s", long_options[option_index].name);
+ if (optarg)
+ printf (" with arg %s", optarg);
+ printf ("\n");
+ break;
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (digit_optind != 0 && digit_optind != this_option_optind)
+ printf ("digits occur in two different argv-elements.\n");
+ digit_optind = this_option_optind;
+ printf ("option %c\n", c);
+ break;
+
+ case 'a':
+ printf ("option a\n");
+ break;
+
+ case 'b':
+ printf ("option b\n");
+ break;
+
+ case 'c':
+ printf ("option c with value `%s'\n", optarg);
+ break;
+
+ case 'd':
+ printf ("option d with value `%s'\n", optarg);
+ break;
+
+ case '?':
+ break;
+
+ default:
+ printf ("?? getopt returned character code 0%o ??\n", c);
+ }
+ }
+
+ if (optind < argc)
+ {
+ printf ("non-option ARGV-elements: ");
+ while (optind < argc)
+ printf ("%s ", argv[optind++]);
+ printf ("\n");
+ }
+
+ exit (0);
+}
+
+#endif /* TEST */
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt_int.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt_int.h
new file mode 100644
index 00000000..1585d410
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/lib/getopt_int.h
@@ -0,0 +1,133 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* Internal declarations for getopt.
+ Copyright (C) 1989-1994,1996-1999,2001,2003,2004
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA. */
+
+#ifndef _GETOPT_INT_H
+#define _GETOPT_INT_H 1
+
+extern int _getopt_internal (int ___argc, char **___argv,
+ const char *__shortopts,
+ const struct option *__longopts, int *__longind,
+ int __long_only, int __posixly_correct);
+
+
+/* Reentrant versions which can handle parsing multiple argument
+ vectors at the same time. */
+
+/* Data type for reentrant functions. */
+struct _getopt_data
+{
+ /* These have exactly the same meaning as the corresponding global
+ variables, except that they are used for the reentrant
+ versions of getopt. */
+ int optind;
+ int opterr;
+ int optopt;
+ char *optarg;
+
+ /* Internal members. */
+
+ /* True if the internal members have been initialized. */
+ int __initialized;
+
+ /* The next char to be scanned in the option-element
+ in which the last option character we returned was found.
+ This allows us to pick up the scan where we left off.
+
+ If this is zero, or a null string, it means resume the scan
+ by advancing to the next ARGV-element. */
+ char *__nextchar;
+
+ /* Describe how to deal with options that follow non-option ARGV-elements.
+
+ If the caller did not specify anything,
+ the default is REQUIRE_ORDER if the environment variable
+ POSIXLY_CORRECT is defined, PERMUTE otherwise.
+
+ REQUIRE_ORDER means don't recognize them as options;
+ stop option processing when the first non-option is seen.
+ This is what Unix does.
+ This mode of operation is selected by either setting the environment
+ variable POSIXLY_CORRECT, or using `+' as the first character
+ of the list of option characters, or by calling getopt.
+
+ PERMUTE is the default. We permute the contents of ARGV as we
+ scan, so that eventually all the non-options are at the end.
+ This allows options to be given in any order, even with programs
+ that were not written to expect this.
+
+ RETURN_IN_ORDER is an option available to programs that were
+ written to expect options and other ARGV-elements in any order
+ and that care about the ordering of the two. We describe each
+ non-option ARGV-element as if it were the argument of an option
+ with character code 1. Using `-' as the first character of the
+ list of option characters selects this mode of operation.
+
+ The special argument `--' forces an end of option-scanning regardless
+ of the value of `ordering'. In the case of RETURN_IN_ORDER, only
+ `--' can cause `getopt' to return -1 with `optind' != ARGC. */
+
+ enum
+ {
+ REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER
+ } __ordering;
+
+ /* If the POSIXLY_CORRECT environment variable is set
+ or getopt was called. */
+ int __posixly_correct;
+
+
+ /* Handle permutation of arguments. */
+
+ /* Describe the part of ARGV that contains non-options that have
+ been skipped. `first_nonopt' is the index in ARGV of the first
+ of them; `last_nonopt' is the index after the last of them. */
+
+ int __first_nonopt;
+ int __last_nonopt;
+
+#if defined _LIBC && defined USE_NONOPTION_FLAGS
+ int __nonoption_flags_max_len;
+ int __nonoption_flags_len;
+# endif
+};
+
+/* The initializer is necessary to set OPTIND and OPTERR to their
+ default values and to clear the initialization flag. */
+#define _GETOPT_DATA_INITIALIZER { 1, 1 }
+
+extern int _getopt_internal_r (int ___argc, char **___argv,
+ const char *__shortopts,
+ const struct option *__longopts, int *__longind,
+ int __long_only, int __posixly_correct,
+ struct _getopt_data *__data);
+
+extern int _getopt_long_r (int ___argc, char **___argv,
+ const char *__shortopts,
+ const struct option *__longopts, int *__longind,
+ struct _getopt_data *__data);
+
+extern int _getopt_long_only_r (int ___argc, char **___argv,
+ const char *__shortopts,
+ const struct option *__longopts,
+ int *__longind,
+ struct _getopt_data *__data);
+
+#endif /* getopt_int.h */
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/acx_pthread.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/acx_pthread.m4
new file mode 100644
index 00000000..d2b11694
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/acx_pthread.m4
@@ -0,0 +1,279 @@
+##### http://autoconf-archive.cryp.to/acx_pthread.html
+#
+# SYNOPSIS
+#
+# ACX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]])
+#
+# DESCRIPTION
+#
+# This macro figures out how to build C programs using POSIX threads.
+# It sets the PTHREAD_LIBS output variable to the threads library and
+# linker flags, and the PTHREAD_CFLAGS output variable to any special
+# C compiler flags that are needed. (The user can also force certain
+# compiler flags/libs to be tested by setting these environment
+# variables.)
+#
+# Also sets PTHREAD_CC to any special C compiler that is needed for
+# multi-threaded programs (defaults to the value of CC otherwise).
+# (This is necessary on AIX to use the special cc_r compiler alias.)
+#
+# NOTE: You are assumed to not only compile your program with these
+# flags, but also link it with them as well. e.g. you should link
+# with $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS
+# $LIBS
+#
+# If you are only building threads programs, you may wish to use
+# these variables in your default LIBS, CFLAGS, and CC:
+#
+# LIBS="$PTHREAD_LIBS $LIBS"
+# CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+# CC="$PTHREAD_CC"
+#
+# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute
+# constant has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to
+# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
+#
+# ACTION-IF-FOUND is a list of shell commands to run if a threads
+# library is found, and ACTION-IF-NOT-FOUND is a list of commands to
+# run it if it is not found. If ACTION-IF-FOUND is not specified, the
+# default action will define HAVE_PTHREAD.
+#
+# Please let the authors know if this macro fails on any platform, or
+# if you have any other suggestions or comments. This macro was based
+# on work by SGJ on autoconf scripts for FFTW (http://www.fftw.org/)
+# (with help from M. Frigo), as well as ac_pthread and hb_pthread
+# macros posted by Alejandro Forero Cuervo to the autoconf macro
+# repository. We are also grateful for the helpful feedback of
+# numerous users.
+#
+# LAST MODIFICATION
+#
+# 2007-07-29
+#
+# COPYLEFT
+#
+# Copyright (c) 2007 Steven G. Johnson <stevenj@alum.mit.edu>
+#
+# This program is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+# <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright
+# owner gives unlimited permission to copy, distribute and modify the
+# configure scripts that are the output of Autoconf when processing
+# the Macro. You need not follow the terms of the GNU General Public
+# License when using or distributing such scripts, even though
+# portions of the text of the Macro appear in them. The GNU General
+# Public License (GPL) does govern all other use of the material that
+# constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the
+# Autoconf Macro released by the Autoconf Macro Archive. When you
+# make and distribute a modified version of the Autoconf Macro, you
+# may extend this special exception to the GPL to apply to your
+# modified version as well.
+
+AC_DEFUN([ACX_PTHREAD], [
+AC_REQUIRE([AC_CANONICAL_HOST])
+AC_LANG_SAVE
+AC_LANG_C
+acx_pthread_ok=no
+
+# We used to check for pthread.h first, but this fails if pthread.h
+# requires special compiler flags (e.g. on True64 or Sequent).
+# It gets checked for in the link test anyway.
+
+# First of all, check if the user has set any of the PTHREAD_LIBS,
+# etcetera environment variables, and if threads linking works using
+# them:
+if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS])
+ AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes)
+ AC_MSG_RESULT($acx_pthread_ok)
+ if test x"$acx_pthread_ok" = xno; then
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+fi
+
+# We must check for the threads library under a number of different
+# names; the ordering is very important because some systems
+# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
+# libraries is broken (non-POSIX).
+
+# Create a list of thread flags to try. Items starting with a "-" are
+# C compiler flags, and other items are library names, except for "none"
+# which indicates that we try without any flags at all, and "pthread-config"
+# which is a program returning the flags for the Pth emulation library.
+
+acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
+
+# The ordering *is* (sometimes) important. Some notes on the
+# individual items follow:
+
+# pthreads: AIX (must check this before -lpthread)
+# none: in case threads are in libc; should be tried before -Kthread and
+# other compiler flags to prevent continual compiler warnings
+# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
+# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
+# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
+# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
+# -pthreads: Solaris/gcc
+# -mthreads: Mingw32/gcc, Lynx/gcc
+# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
+# doesn't hurt to check since this sometimes defines pthreads too;
+# also defines -D_REENTRANT)
+# ... -mt is also the pthreads flag for HP/aCC
+# pthread: Linux, etcetera
+# --thread-safe: KAI C++
+# pthread-config: use pthread-config program (for GNU Pth library)
+
+case "${host_cpu}-${host_os}" in
+ *solaris*)
+
+ # On Solaris (at least, for some versions), libc contains stubbed
+ # (non-functional) versions of the pthreads routines, so link-based
+ # tests will erroneously succeed. (We need to link with -pthreads/-mt/
+ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
+ # a function called by this macro, so we could check for that, but
+ # who knows whether they'll stub that too in a future libc.) So,
+ # we'll just look for -pthreads and -lpthread first:
+
+ acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags"
+ ;;
+esac
+
+if test x"$acx_pthread_ok" = xno; then
+for flag in $acx_pthread_flags; do
+
+ case $flag in
+ none)
+ AC_MSG_CHECKING([whether pthreads work without any flags])
+ ;;
+
+ -*)
+ AC_MSG_CHECKING([whether pthreads work with $flag])
+ PTHREAD_CFLAGS="$flag"
+ ;;
+
+ pthread-config)
+ AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no)
+ if test x"$acx_pthread_config" = xno; then continue; fi
+ PTHREAD_CFLAGS="`pthread-config --cflags`"
+ PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
+ ;;
+
+ *)
+ AC_MSG_CHECKING([for the pthreads library -l$flag])
+ PTHREAD_LIBS="-l$flag"
+ ;;
+ esac
+
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Check for various functions. We must include pthread.h,
+ # since some functions may be macros. (On the Sequent, we
+ # need a special flag -Kthread to make this header compile.)
+ # We check for pthread_join because it is in -lpthread on IRIX
+ # while pthread_create is in libc. We check for pthread_attr_init
+ # due to DEC craziness with -lpthreads. We check for
+ # pthread_cleanup_push because it is one of the few pthread
+ # functions on Solaris that doesn't have a non-functional libc stub.
+ # We try pthread_create on general principles.
+ AC_TRY_LINK([#include <pthread.h>],
+ [pthread_t th; pthread_join(th, 0);
+ pthread_attr_init(0); pthread_cleanup_push(0, 0);
+ pthread_create(0,0,0,0); pthread_cleanup_pop(0); ],
+ [acx_pthread_ok=yes])
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ AC_MSG_RESULT($acx_pthread_ok)
+ if test "x$acx_pthread_ok" = xyes; then
+ break;
+ fi
+
+ PTHREAD_LIBS=""
+ PTHREAD_CFLAGS=""
+done
+fi
+
+# Various other checks:
+if test "x$acx_pthread_ok" = xyes; then
+ save_LIBS="$LIBS"
+ LIBS="$PTHREAD_LIBS $LIBS"
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+
+ # Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
+ AC_MSG_CHECKING([for joinable pthread attribute])
+ attr_name=unknown
+ for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
+ AC_TRY_LINK([#include <pthread.h>], [int attr=$attr; return attr;],
+ [attr_name=$attr; break])
+ done
+ AC_MSG_RESULT($attr_name)
+ if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then
+ AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name,
+ [Define to necessary symbol if this constant
+ uses a non-standard name on your system.])
+ fi
+
+ AC_MSG_CHECKING([if more special flags are required for pthreads])
+ flag=no
+ case "${host_cpu}-${host_os}" in
+ *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";;
+ *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";;
+ esac
+ AC_MSG_RESULT(${flag})
+ if test "x$flag" != xno; then
+ PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
+ fi
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ # More AIX lossage: must compile with xlc_r or cc_r
+ if test x"$GCC" != xyes; then
+ AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC})
+ else
+ PTHREAD_CC=$CC
+ fi
+else
+ PTHREAD_CC="$CC"
+fi
+
+AC_SUBST(PTHREAD_LIBS)
+AC_SUBST(PTHREAD_CFLAGS)
+AC_SUBST(PTHREAD_CC)
+
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$acx_pthread_ok" = xyes; then
+ ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1])
+ :
+else
+ acx_pthread_ok=no
+ $2
+fi
+AC_LANG_RESTORE
+])dnl ACX_PTHREAD
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/getopt.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/getopt.m4
new file mode 100644
index 00000000..cfbe40f2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/getopt.m4
@@ -0,0 +1,71 @@
+# getopt.m4 serial 14 (modified version)
+dnl Copyright (C) 2002-2006, 2008 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+# The getopt module assume you want GNU getopt, with getopt_long etc,
+# rather than vanilla POSIX getopt. This means your code should
+# always include <getopt.h> for the getopt prototypes.
+
+AC_DEFUN([gl_GETOPT_SUBSTITUTE],
+[
+ AC_LIBOBJ([getopt])
+ AC_LIBOBJ([getopt1])
+ gl_GETOPT_SUBSTITUTE_HEADER
+])
+
+AC_DEFUN([gl_GETOPT_SUBSTITUTE_HEADER],
+[
+ GETOPT_H=getopt.h
+ AC_DEFINE([__GETOPT_PREFIX], [[rpl_]],
+ [Define to rpl_ if the getopt replacement functions and variables
+ should be used.])
+ AC_SUBST([GETOPT_H])
+])
+
+AC_DEFUN([gl_GETOPT_CHECK_HEADERS],
+[
+ if test -z "$GETOPT_H"; then
+ AC_CHECK_HEADERS([getopt.h], [], [GETOPT_H=getopt.h])
+ fi
+
+ if test -z "$GETOPT_H"; then
+ AC_CHECK_FUNCS([getopt_long], [], [GETOPT_H=getopt.h])
+ fi
+
+ dnl BSD getopt_long uses a way to reset option processing, that is different
+ dnl from GNU and Solaris (which copied the GNU behavior). We support both
+ dnl GNU and BSD style resetting of getopt_long(), so there's no need to use
+ dnl GNU getopt_long() on BSD due to different resetting style.
+ dnl
+ dnl With getopt_long(), some BSD versions have a bug in handling optional
+ dnl arguments. This bug appears only if the environment variable
+ dnl POSIXLY_CORRECT has been set, so it shouldn't be too bad in most
+ dnl cases; probably most don't have that variable set. But if we actually
+ dnl hit this bug, it is a real problem due to our heavy use of optional
+ dnl arguments.
+ dnl
+ dnl According to CVS logs, the bug was introduced in OpenBSD in 2003-09-22
+ dnl and copied to FreeBSD in 2004-02-24. It was fixed in both in 2006-09-22,
+ dnl so the affected versions shouldn't be popular anymore anyway. NetBSD
+ dnl never had this bug. TODO: What about Darwin and others?
+ if test -z "$GETOPT_H"; then
+ AC_CHECK_DECL([optreset],
+ [AC_DEFINE([HAVE_OPTRESET], 1,
+ [Define to 1 if getopt.h declares extern int optreset.])],
+ [], [#include <getopt.h>])
+ fi
+
+ dnl Solaris 10 getopt doesn't handle `+' as a leading character in an
+ dnl option string (as of 2005-05-05). We don't use that feature, so this
+ dnl is not a problem for us. Thus, the respective test was removed here.
+])
+
+AC_DEFUN([gl_GETOPT_IFELSE],
+[
+ AC_REQUIRE([gl_GETOPT_CHECK_HEADERS])
+ AS_IF([test -n "$GETOPT_H"], [$1], [$2])
+])
+
+AC_DEFUN([gl_GETOPT], [gl_GETOPT_IFELSE([gl_GETOPT_SUBSTITUTE])])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/gettext.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/gettext.m4
new file mode 100644
index 00000000..91c345e9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/gettext.m4
@@ -0,0 +1,419 @@
+# gettext.m4 serial 59 (gettext-0.16.1)
+dnl Copyright (C) 1995-2006 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+dnl
+dnl This file can can be used in projects which are not available under
+dnl the GNU General Public License or the GNU Library General Public
+dnl License but which still want to provide support for the GNU gettext
+dnl functionality.
+dnl Please note that the actual code of the GNU gettext library is covered
+dnl by the GNU Library General Public License, and the rest of the GNU
+dnl gettext package package is covered by the GNU General Public License.
+dnl They are *not* in the public domain.
+
+dnl Authors:
+dnl Ulrich Drepper <drepper@cygnus.com>, 1995-2000.
+dnl Bruno Haible <haible@clisp.cons.org>, 2000-2006.
+
+dnl Macro to add for using GNU gettext.
+
+dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]).
+dnl INTLSYMBOL can be one of 'external', 'no-libtool', 'use-libtool'. The
+dnl default (if it is not specified or empty) is 'no-libtool'.
+dnl INTLSYMBOL should be 'external' for packages with no intl directory,
+dnl and 'no-libtool' or 'use-libtool' for packages with an intl directory.
+dnl If INTLSYMBOL is 'use-libtool', then a libtool library
+dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static,
+dnl depending on --{enable,disable}-{shared,static} and on the presence of
+dnl AM-DISABLE-SHARED). If INTLSYMBOL is 'no-libtool', a static library
+dnl $(top_builddir)/intl/libintl.a will be created.
+dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext
+dnl implementations (in libc or libintl) without the ngettext() function
+dnl will be ignored. If NEEDSYMBOL is specified and is
+dnl 'need-formatstring-macros', then GNU gettext implementations that don't
+dnl support the ISO C 99 <inttypes.h> formatstring macros will be ignored.
+dnl INTLDIR is used to find the intl libraries. If empty,
+dnl the value `$(top_builddir)/intl/' is used.
+dnl
+dnl The result of the configuration is one of three cases:
+dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled
+dnl and used.
+dnl Catalog format: GNU --> install in $(datadir)
+dnl Catalog extension: .mo after installation, .gmo in source tree
+dnl 2) GNU gettext has been found in the system's C library.
+dnl Catalog format: GNU --> install in $(datadir)
+dnl Catalog extension: .mo after installation, .gmo in source tree
+dnl 3) No internationalization, always use English msgid.
+dnl Catalog format: none
+dnl Catalog extension: none
+dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur.
+dnl The use of .gmo is historical (it was needed to avoid overwriting the
+dnl GNU format catalogs when building on a platform with an X/Open gettext),
+dnl but we keep it in order not to force irrelevant filename changes on the
+dnl maintainers.
+dnl
+AC_DEFUN([AM_GNU_GETTEXT],
+[
+ dnl Argument checking.
+ ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [no-libtool], , [ifelse([$1], [use-libtool], ,
+ [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT
+])])])])])
+ ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], ,
+ [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT
+])])])])
+ define([gt_included_intl],
+ ifelse([$1], [external],
+ ifdef([AM_GNU_GETTEXT_][INTL_SUBDIR], [yes], [no]),
+ [yes]))
+ define([gt_libtool_suffix_prefix], ifelse([$1], [use-libtool], [l], []))
+ gt_NEEDS_INIT
+ AM_GNU_GETTEXT_NEED([$2])
+
+ AC_REQUIRE([AM_PO_SUBDIRS])dnl
+ ifelse(gt_included_intl, yes, [
+ AC_REQUIRE([AM_INTL_SUBDIR])dnl
+ ])
+
+ dnl Prerequisites of AC_LIB_LINKFLAGS_BODY.
+ AC_REQUIRE([AC_LIB_PREPARE_PREFIX])
+ AC_REQUIRE([AC_LIB_RPATH])
+
+ dnl Sometimes libintl requires libiconv, so first search for libiconv.
+ dnl Ideally we would do this search only after the
+ dnl if test "$USE_NLS" = "yes"; then
+ dnl if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then
+ dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT
+ dnl the configure script would need to contain the same shell code
+ dnl again, outside any 'if'. There are two solutions:
+ dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'.
+ dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE.
+ dnl Since AC_PROVIDE_IFELSE is only in autoconf >= 2.52 and not
+ dnl documented, we avoid it.
+ ifelse(gt_included_intl, yes, , [
+ AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY])
+ ])
+
+ dnl Sometimes, on MacOS X, libintl requires linking with CoreFoundation.
+ gt_INTL_MACOSX
+
+ dnl Set USE_NLS.
+ AC_REQUIRE([AM_NLS])
+
+ ifelse(gt_included_intl, yes, [
+ BUILD_INCLUDED_LIBINTL=no
+ USE_INCLUDED_LIBINTL=no
+ ])
+ LIBINTL=
+ LTLIBINTL=
+ POSUB=
+
+ dnl Add a version number to the cache macros.
+ case " $gt_needs " in
+ *" need-formatstring-macros "*) gt_api_version=3 ;;
+ *" need-ngettext "*) gt_api_version=2 ;;
+ *) gt_api_version=1 ;;
+ esac
+ gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc"
+ gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl"
+
+ dnl If we use NLS figure out what method
+ if test "$USE_NLS" = "yes"; then
+ gt_use_preinstalled_gnugettext=no
+ ifelse(gt_included_intl, yes, [
+ AC_MSG_CHECKING([whether included gettext is requested])
+ AC_ARG_WITH(included-gettext,
+ [ --with-included-gettext use the GNU gettext library included here],
+ nls_cv_force_use_gnu_gettext=$withval,
+ nls_cv_force_use_gnu_gettext=no)
+ AC_MSG_RESULT($nls_cv_force_use_gnu_gettext)
+
+ nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext"
+ if test "$nls_cv_force_use_gnu_gettext" != "yes"; then
+ ])
+ dnl User does not insist on using GNU NLS library. Figure out what
+ dnl to use. If GNU gettext is available we use this. Else we have
+ dnl to fall back to GNU NLS library.
+
+ if test $gt_api_version -ge 3; then
+ gt_revision_test_code='
+#ifndef __GNU_GETTEXT_SUPPORTED_REVISION
+#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1)
+#endif
+changequote(,)dnl
+typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1];
+changequote([,])dnl
+'
+ else
+ gt_revision_test_code=
+ fi
+ if test $gt_api_version -ge 2; then
+ gt_expression_test_code=' + * ngettext ("", "", 0)'
+ else
+ gt_expression_test_code=
+ fi
+
+ AC_CACHE_CHECK([for GNU gettext in libc], [$gt_func_gnugettext_libc],
+ [AC_TRY_LINK([#include <libintl.h>
+$gt_revision_test_code
+extern int _nl_msg_cat_cntr;
+extern int *_nl_domain_bindings;],
+ [bindtextdomain ("", "");
+return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_domain_bindings],
+ [eval "$gt_func_gnugettext_libc=yes"],
+ [eval "$gt_func_gnugettext_libc=no"])])
+
+ if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then
+ dnl Sometimes libintl requires libiconv, so first search for libiconv.
+ ifelse(gt_included_intl, yes, , [
+ AM_ICONV_LINK
+ ])
+ dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL
+ dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv])
+ dnl because that would add "-liconv" to LIBINTL and LTLIBINTL
+ dnl even if libiconv doesn't exist.
+ AC_LIB_LINKFLAGS_BODY([intl])
+ AC_CACHE_CHECK([for GNU gettext in libintl],
+ [$gt_func_gnugettext_libintl],
+ [gt_save_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $INCINTL"
+ gt_save_LIBS="$LIBS"
+ LIBS="$LIBS $LIBINTL"
+ dnl Now see whether libintl exists and does not depend on libiconv.
+ AC_TRY_LINK([#include <libintl.h>
+$gt_revision_test_code
+extern int _nl_msg_cat_cntr;
+extern
+#ifdef __cplusplus
+"C"
+#endif
+const char *_nl_expand_alias (const char *);],
+ [bindtextdomain ("", "");
+return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("")],
+ [eval "$gt_func_gnugettext_libintl=yes"],
+ [eval "$gt_func_gnugettext_libintl=no"])
+ dnl Now see whether libintl exists and depends on libiconv.
+ if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then
+ LIBS="$LIBS $LIBICONV"
+ AC_TRY_LINK([#include <libintl.h>
+$gt_revision_test_code
+extern int _nl_msg_cat_cntr;
+extern
+#ifdef __cplusplus
+"C"
+#endif
+const char *_nl_expand_alias (const char *);],
+ [bindtextdomain ("", "");
+return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("")],
+ [LIBINTL="$LIBINTL $LIBICONV"
+ LTLIBINTL="$LTLIBINTL $LTLIBICONV"
+ eval "$gt_func_gnugettext_libintl=yes"
+ ])
+ fi
+ CPPFLAGS="$gt_save_CPPFLAGS"
+ LIBS="$gt_save_LIBS"])
+ fi
+
+ dnl If an already present or preinstalled GNU gettext() is found,
+ dnl use it. But if this macro is used in GNU gettext, and GNU
+ dnl gettext is already preinstalled in libintl, we update this
+ dnl libintl. (Cf. the install rule in intl/Makefile.in.)
+ if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \
+ || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \
+ && test "$PACKAGE" != gettext-runtime \
+ && test "$PACKAGE" != gettext-tools; }; then
+ gt_use_preinstalled_gnugettext=yes
+ else
+ dnl Reset the values set by searching for libintl.
+ LIBINTL=
+ LTLIBINTL=
+ INCINTL=
+ fi
+
+ ifelse(gt_included_intl, yes, [
+ if test "$gt_use_preinstalled_gnugettext" != "yes"; then
+ dnl GNU gettext is not found in the C library.
+ dnl Fall back on included GNU gettext library.
+ nls_cv_use_gnu_gettext=yes
+ fi
+ fi
+
+ if test "$nls_cv_use_gnu_gettext" = "yes"; then
+ dnl Mark actions used to generate GNU NLS library.
+ BUILD_INCLUDED_LIBINTL=yes
+ USE_INCLUDED_LIBINTL=yes
+ LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LIBICONV $LIBTHREAD"
+ LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LTLIBICONV $LTLIBTHREAD"
+ LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'`
+ fi
+
+ CATOBJEXT=
+ if test "$gt_use_preinstalled_gnugettext" = "yes" \
+ || test "$nls_cv_use_gnu_gettext" = "yes"; then
+ dnl Mark actions to use GNU gettext tools.
+ CATOBJEXT=.gmo
+ fi
+ ])
+
+ if test -n "$INTL_MACOSX_LIBS"; then
+ if test "$gt_use_preinstalled_gnugettext" = "yes" \
+ || test "$nls_cv_use_gnu_gettext" = "yes"; then
+ dnl Some extra flags are needed during linking.
+ LIBINTL="$LIBINTL $INTL_MACOSX_LIBS"
+ LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS"
+ fi
+ fi
+
+ if test "$gt_use_preinstalled_gnugettext" = "yes" \
+ || test "$nls_cv_use_gnu_gettext" = "yes"; then
+ AC_DEFINE(ENABLE_NLS, 1,
+ [Define to 1 if translation of program messages to the user's native language
+ is requested.])
+ else
+ USE_NLS=no
+ fi
+ fi
+
+ AC_MSG_CHECKING([whether to use NLS])
+ AC_MSG_RESULT([$USE_NLS])
+ if test "$USE_NLS" = "yes"; then
+ AC_MSG_CHECKING([where the gettext function comes from])
+ if test "$gt_use_preinstalled_gnugettext" = "yes"; then
+ if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then
+ gt_source="external libintl"
+ else
+ gt_source="libc"
+ fi
+ else
+ gt_source="included intl directory"
+ fi
+ AC_MSG_RESULT([$gt_source])
+ fi
+
+ if test "$USE_NLS" = "yes"; then
+
+ if test "$gt_use_preinstalled_gnugettext" = "yes"; then
+ if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then
+ AC_MSG_CHECKING([how to link with libintl])
+ AC_MSG_RESULT([$LIBINTL])
+ AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL])
+ fi
+
+ dnl For backward compatibility. Some packages may be using this.
+ AC_DEFINE(HAVE_GETTEXT, 1,
+ [Define if the GNU gettext() function is already present or preinstalled.])
+ AC_DEFINE(HAVE_DCGETTEXT, 1,
+ [Define if the GNU dcgettext() function is already present or preinstalled.])
+ fi
+
+ dnl We need to process the po/ directory.
+ POSUB=po
+ fi
+
+ ifelse(gt_included_intl, yes, [
+ dnl If this is used in GNU gettext we have to set BUILD_INCLUDED_LIBINTL
+ dnl to 'yes' because some of the testsuite requires it.
+ if test "$PACKAGE" = gettext-runtime || test "$PACKAGE" = gettext-tools; then
+ BUILD_INCLUDED_LIBINTL=yes
+ fi
+
+ dnl Make all variables we use known to autoconf.
+ AC_SUBST(BUILD_INCLUDED_LIBINTL)
+ AC_SUBST(USE_INCLUDED_LIBINTL)
+ AC_SUBST(CATOBJEXT)
+
+ dnl For backward compatibility. Some configure.ins may be using this.
+ nls_cv_header_intl=
+ nls_cv_header_libgt=
+
+ dnl For backward compatibility. Some Makefiles may be using this.
+ DATADIRNAME=share
+ AC_SUBST(DATADIRNAME)
+
+ dnl For backward compatibility. Some Makefiles may be using this.
+ INSTOBJEXT=.mo
+ AC_SUBST(INSTOBJEXT)
+
+ dnl For backward compatibility. Some Makefiles may be using this.
+ GENCAT=gencat
+ AC_SUBST(GENCAT)
+
+ dnl For backward compatibility. Some Makefiles may be using this.
+ INTLOBJS=
+ if test "$USE_INCLUDED_LIBINTL" = yes; then
+ INTLOBJS="\$(GETTOBJS)"
+ fi
+ AC_SUBST(INTLOBJS)
+
+ dnl Enable libtool support if the surrounding package wishes it.
+ INTL_LIBTOOL_SUFFIX_PREFIX=gt_libtool_suffix_prefix
+ AC_SUBST(INTL_LIBTOOL_SUFFIX_PREFIX)
+ ])
+
+ dnl For backward compatibility. Some Makefiles may be using this.
+ INTLLIBS="$LIBINTL"
+ AC_SUBST(INTLLIBS)
+
+ dnl Make all documented variables known to autoconf.
+ AC_SUBST(LIBINTL)
+ AC_SUBST(LTLIBINTL)
+ AC_SUBST(POSUB)
+])
+
+
+dnl Checks for special options needed on MacOS X.
+dnl Defines INTL_MACOSX_LIBS.
+AC_DEFUN([gt_INTL_MACOSX],
+[
+ dnl Check for API introduced in MacOS X 10.2.
+ AC_CACHE_CHECK([for CFPreferencesCopyAppValue],
+ gt_cv_func_CFPreferencesCopyAppValue,
+ [gt_save_LIBS="$LIBS"
+ LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation"
+ AC_TRY_LINK([#include <CoreFoundation/CFPreferences.h>],
+ [CFPreferencesCopyAppValue(NULL, NULL)],
+ [gt_cv_func_CFPreferencesCopyAppValue=yes],
+ [gt_cv_func_CFPreferencesCopyAppValue=no])
+ LIBS="$gt_save_LIBS"])
+ if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then
+ AC_DEFINE([HAVE_CFPREFERENCESCOPYAPPVALUE], 1,
+ [Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in the CoreFoundation framework.])
+ fi
+ dnl Check for API introduced in MacOS X 10.3.
+ AC_CACHE_CHECK([for CFLocaleCopyCurrent], gt_cv_func_CFLocaleCopyCurrent,
+ [gt_save_LIBS="$LIBS"
+ LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation"
+ AC_TRY_LINK([#include <CoreFoundation/CFLocale.h>], [CFLocaleCopyCurrent();],
+ [gt_cv_func_CFLocaleCopyCurrent=yes],
+ [gt_cv_func_CFLocaleCopyCurrent=no])
+ LIBS="$gt_save_LIBS"])
+ if test $gt_cv_func_CFLocaleCopyCurrent = yes; then
+ AC_DEFINE([HAVE_CFLOCALECOPYCURRENT], 1,
+ [Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the CoreFoundation framework.])
+ fi
+ INTL_MACOSX_LIBS=
+ if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then
+ INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation"
+ fi
+ AC_SUBST([INTL_MACOSX_LIBS])
+])
+
+
+dnl gt_NEEDS_INIT ensures that the gt_needs variable is initialized.
+m4_define([gt_NEEDS_INIT],
+[
+ m4_divert_text([DEFAULTS], [gt_needs=])
+ m4_define([gt_NEEDS_INIT], [])
+])
+
+
+dnl Usage: AM_GNU_GETTEXT_NEED([NEEDSYMBOL])
+AC_DEFUN([AM_GNU_GETTEXT_NEED],
+[
+ m4_divert_text([INIT_PREPARE], [gt_needs="$gt_needs $1"])
+])
+
+
+dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version])
+AC_DEFUN([AM_GNU_GETTEXT_VERSION], [])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/iconv.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/iconv.m4
new file mode 100644
index 00000000..654c4158
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/iconv.m4
@@ -0,0 +1,101 @@
+# iconv.m4 serial AM4 (gettext-0.11.3)
+dnl Copyright (C) 2000-2002 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl From Bruno Haible.
+
+AC_DEFUN([AM_ICONV_LINKFLAGS_BODY],
+[
+ dnl Prerequisites of AC_LIB_LINKFLAGS_BODY.
+ AC_REQUIRE([AC_LIB_PREPARE_PREFIX])
+ AC_REQUIRE([AC_LIB_RPATH])
+
+ dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV
+ dnl accordingly.
+ AC_LIB_LINKFLAGS_BODY([iconv])
+])
+
+AC_DEFUN([AM_ICONV_LINK],
+[
+ dnl Some systems have iconv in libc, some have it in libiconv (OSF/1 and
+ dnl those with the standalone portable GNU libiconv installed).
+
+ dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV
+ dnl accordingly.
+ AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY])
+
+ dnl Add $INCICONV to CPPFLAGS before performing the following checks,
+ dnl because if the user has installed libiconv and not disabled its use
+ dnl via --without-libiconv-prefix, he wants to use it. The first
+ dnl AC_TRY_LINK will then fail, the second AC_TRY_LINK will succeed.
+ am_save_CPPFLAGS="$CPPFLAGS"
+ AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCICONV])
+
+ AC_CACHE_CHECK(for iconv, am_cv_func_iconv, [
+ am_cv_func_iconv="no, consider installing GNU libiconv"
+ am_cv_lib_iconv=no
+ AC_TRY_LINK([#include <stdlib.h>
+#include <iconv.h>],
+ [iconv_t cd = iconv_open("","");
+ iconv(cd,NULL,NULL,NULL,NULL);
+ iconv_close(cd);],
+ am_cv_func_iconv=yes)
+ if test "$am_cv_func_iconv" != yes; then
+ am_save_LIBS="$LIBS"
+ LIBS="$LIBS $LIBICONV"
+ AC_TRY_LINK([#include <stdlib.h>
+#include <iconv.h>],
+ [iconv_t cd = iconv_open("","");
+ iconv(cd,NULL,NULL,NULL,NULL);
+ iconv_close(cd);],
+ am_cv_lib_iconv=yes
+ am_cv_func_iconv=yes)
+ LIBS="$am_save_LIBS"
+ fi
+ ])
+ if test "$am_cv_func_iconv" = yes; then
+ AC_DEFINE(HAVE_ICONV, 1, [Define if you have the iconv() function.])
+ fi
+ if test "$am_cv_lib_iconv" = yes; then
+ AC_MSG_CHECKING([how to link with libiconv])
+ AC_MSG_RESULT([$LIBICONV])
+ else
+ dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV
+ dnl either.
+ CPPFLAGS="$am_save_CPPFLAGS"
+ LIBICONV=
+ LTLIBICONV=
+ fi
+ AC_SUBST(LIBICONV)
+ AC_SUBST(LTLIBICONV)
+])
+
+AC_DEFUN([AM_ICONV],
+[
+ AM_ICONV_LINK
+ if test "$am_cv_func_iconv" = yes; then
+ AC_MSG_CHECKING([for iconv declaration])
+ AC_CACHE_VAL(am_cv_proto_iconv, [
+ AC_TRY_COMPILE([
+#include <stdlib.h>
+#include <iconv.h>
+extern
+#ifdef __cplusplus
+"C"
+#endif
+#if defined(__STDC__) || defined(__cplusplus)
+size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);
+#else
+size_t iconv();
+#endif
+], [], am_cv_proto_iconv_arg1="", am_cv_proto_iconv_arg1="const")
+ am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"])
+ am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'`
+ AC_MSG_RESULT([$]{ac_t:-
+ }[$]am_cv_proto_iconv)
+ AC_DEFINE_UNQUOTED(ICONV_CONST, $am_cv_proto_iconv_arg1,
+ [Define as const if the declaration of iconv() needs const.])
+ fi
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_cpucores.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_cpucores.m4
new file mode 100644
index 00000000..2fae953a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_cpucores.m4
@@ -0,0 +1,57 @@
+dnl ###########################################################################
+dnl
+dnl lc_CPUCORES - Check how to find out the number of online CPU cores
+dnl
+dnl Check how to find out the number of available CPU cores in the system.
+dnl sysconf(_SC_NPROCESSORS_ONLN) works on most systems, except that BSDs
+dnl use sysctl().
+dnl
+dnl ###########################################################################
+dnl
+dnl Author: Lasse Collin
+dnl
+dnl This file has been put into the public domain.
+dnl You can do whatever you want with this file.
+dnl
+dnl ###########################################################################
+AC_DEFUN([lc_CPUCORES], [
+AC_MSG_CHECKING([how to detect the number of available CPU cores])
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#include <unistd.h>
+int
+main(void)
+{
+ long i;
+ i = sysconf(_SC_NPROCESSORS_ONLN);
+ return 0;
+}
+]])], [
+ AC_DEFINE([HAVE_CPUCORES_SYSCONF], [1],
+ [Define to 1 if the number of available CPU cores can be
+ detected with sysconf(_SC_NPROCESSORS_ONLN).])
+ AC_MSG_RESULT([sysconf])
+], [
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#include <sys/types.h>
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+#include <sys/sysctl.h>
+int
+main(void)
+{
+ int name[2] = { CTL_HW, HW_NCPU };
+ int cpus;
+ size_t cpus_size = sizeof(cpus);
+ sysctl(name, 2, &cpus, &cpus_size, NULL, NULL);
+ return 0;
+}
+]])], [
+ AC_DEFINE([HAVE_CPUCORES_SYSCTL], [1],
+ [Define to 1 if the number of available CPU cores can be
+ detected with sysctl().])
+ AC_MSG_RESULT([sysctl])
+], [
+ AC_MSG_RESULT([unknown])
+])])
+])dnl lc_CPUCORES
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_physmem.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_physmem.m4
new file mode 100644
index 00000000..78be1362
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lc_physmem.m4
@@ -0,0 +1,84 @@
+dnl ###########################################################################
+dnl
+dnl lc_PHYSMEM - Check how to find out the amount of physical memory
+dnl
+dnl - sysconf() gives all the needed info on GNU+Linux and Solaris.
+dnl - BSDs use sysctl().
+dnl - sysinfo() works on Linux/dietlibc and probably on other Linux systems
+dnl whose libc may lack sysconf().
+dnl
+dnl ###########################################################################
+dnl
+dnl Author: Lasse Collin
+dnl
+dnl This file has been put into the public domain.
+dnl You can do whatever you want with this file.
+dnl
+dnl ###########################################################################
+AC_DEFUN([lc_PHYSMEM], [
+AC_MSG_CHECKING([how to detect the amount of physical memory])
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#include <unistd.h>
+int
+main(void)
+{
+ long i;
+ i = sysconf(_SC_PAGESIZE);
+ i = sysconf(_SC_PHYS_PAGES);
+ return 0;
+}
+]])], [
+ AC_DEFINE([HAVE_PHYSMEM_SYSCONF], [1],
+ [Define to 1 if the amount of physical memory can be detected
+ with sysconf(_SC_PAGESIZE) and sysconf(_SC_PHYS_PAGES).])
+ AC_MSG_RESULT([sysconf])
+], [
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#include <sys/types.h>
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+#endif
+#include <sys/sysctl.h>
+int
+main(void)
+{
+ int name[2] = { CTL_HW, HW_PHYSMEM };
+ unsigned long mem;
+ size_t mem_ptr_size = sizeof(mem);
+ sysctl(name, 2, &mem, &mem_ptr_size, NULL, NULL);
+ return 0;
+}
+]])], [
+ AC_DEFINE([HAVE_PHYSMEM_SYSCTL], [1],
+ [Define to 1 if the amount of physical memory can be detected
+ with sysctl().])
+ AC_MSG_RESULT([sysctl])
+], [
+dnl sysinfo() is Linux-specific. Some non-Linux systems have
+dnl incompatible sysinfo() so we must check $host_os.
+case $host_os in
+ linux*)
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#include <sys/sysinfo.h>
+int
+main(void)
+{
+ struct sysinfo si;
+ sysinfo(&si);
+ return 0;
+}
+ ]])], [
+ AC_DEFINE([HAVE_PHYSMEM_SYSINFO], [1],
+ [Define to 1 if the amount of physical memory
+ can be detected with Linux sysinfo().])
+ AC_MSG_RESULT([sysinfo])
+ ], [
+ AC_MSG_RESULT([unknown])
+ ])
+ ;;
+ *)
+ AC_MSG_RESULT([unknown])
+ ;;
+esac
+])])
+])dnl lc_PHYSMEM
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-ld.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-ld.m4
new file mode 100644
index 00000000..96c4e2c3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-ld.m4
@@ -0,0 +1,110 @@
+# lib-ld.m4 serial 3 (gettext-0.13)
+dnl Copyright (C) 1996-2003 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl Subroutines of libtool.m4,
+dnl with replacements s/AC_/AC_LIB/ and s/lt_cv/acl_cv/ to avoid collision
+dnl with libtool.m4.
+
+dnl From libtool-1.4. Sets the variable with_gnu_ld to yes or no.
+AC_DEFUN([AC_LIB_PROG_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], acl_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU ld's only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ acl_cv_prog_gnu_ld=yes ;;
+*)
+ acl_cv_prog_gnu_ld=no ;;
+esac])
+with_gnu_ld=$acl_cv_prog_gnu_ld
+])
+
+dnl From libtool-1.4. Sets the variable LD.
+AC_DEFUN([AC_LIB_PROG_LD],
+[AC_ARG_WITH(gnu-ld,
+[ --with-gnu-ld assume the C compiler uses GNU ld [default=no]],
+test "$withval" = no || with_gnu_ld=yes, with_gnu_ld=no)
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+# Prepare PATH_SEPARATOR.
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ AC_MSG_CHECKING([for ld used by GCC])
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [[\\/]* | [A-Za-z]:[\\/]*)]
+ [re_direlt='/[^/][^/]*/\.\./']
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ AC_MSG_CHECKING([for GNU ld])
+else
+ AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(acl_cv_path_LD,
+[if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ acl_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break ;;
+ *)
+ test "$with_gnu_ld" != yes && break ;;
+ esac
+ fi
+ done
+ IFS="$ac_save_ifs"
+else
+ acl_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$acl_cv_path_LD"
+if test -n "$LD"; then
+ AC_MSG_RESULT($LD)
+else
+ AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+AC_LIB_PROG_LD_GNU
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-link.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-link.m4
new file mode 100644
index 00000000..f95b7ba8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-link.m4
@@ -0,0 +1,644 @@
+# lib-link.m4 serial 9 (gettext-0.16)
+dnl Copyright (C) 2001-2006 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl From Bruno Haible.
+
+AC_PREREQ(2.50)
+
+dnl AC_LIB_LINKFLAGS(name [, dependencies]) searches for libname and
+dnl the libraries corresponding to explicit and implicit dependencies.
+dnl Sets and AC_SUBSTs the LIB${NAME} and LTLIB${NAME} variables and
+dnl augments the CPPFLAGS variable.
+AC_DEFUN([AC_LIB_LINKFLAGS],
+[
+ AC_REQUIRE([AC_LIB_PREPARE_PREFIX])
+ AC_REQUIRE([AC_LIB_RPATH])
+ define([Name],[translit([$1],[./-], [___])])
+ define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-],
+ [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])])
+ AC_CACHE_CHECK([how to link with lib[]$1], [ac_cv_lib[]Name[]_libs], [
+ AC_LIB_LINKFLAGS_BODY([$1], [$2])
+ ac_cv_lib[]Name[]_libs="$LIB[]NAME"
+ ac_cv_lib[]Name[]_ltlibs="$LTLIB[]NAME"
+ ac_cv_lib[]Name[]_cppflags="$INC[]NAME"
+ ])
+ LIB[]NAME="$ac_cv_lib[]Name[]_libs"
+ LTLIB[]NAME="$ac_cv_lib[]Name[]_ltlibs"
+ INC[]NAME="$ac_cv_lib[]Name[]_cppflags"
+ AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME)
+ AC_SUBST([LIB]NAME)
+ AC_SUBST([LTLIB]NAME)
+ dnl Also set HAVE_LIB[]NAME so that AC_LIB_HAVE_LINKFLAGS can reuse the
+ dnl results of this search when this library appears as a dependency.
+ HAVE_LIB[]NAME=yes
+ undefine([Name])
+ undefine([NAME])
+])
+
+dnl AC_LIB_HAVE_LINKFLAGS(name, dependencies, includes, testcode)
+dnl searches for libname and the libraries corresponding to explicit and
+dnl implicit dependencies, together with the specified include files and
+dnl the ability to compile and link the specified testcode. If found, it
+dnl sets and AC_SUBSTs HAVE_LIB${NAME}=yes and the LIB${NAME} and
+dnl LTLIB${NAME} variables and augments the CPPFLAGS variable, and
+dnl #defines HAVE_LIB${NAME} to 1. Otherwise, it sets and AC_SUBSTs
+dnl HAVE_LIB${NAME}=no and LIB${NAME} and LTLIB${NAME} to empty.
+AC_DEFUN([AC_LIB_HAVE_LINKFLAGS],
+[
+ AC_REQUIRE([AC_LIB_PREPARE_PREFIX])
+ AC_REQUIRE([AC_LIB_RPATH])
+ define([Name],[translit([$1],[./-], [___])])
+ define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-],
+ [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])])
+
+ dnl Search for lib[]Name and define LIB[]NAME, LTLIB[]NAME and INC[]NAME
+ dnl accordingly.
+ AC_LIB_LINKFLAGS_BODY([$1], [$2])
+
+ dnl Add $INC[]NAME to CPPFLAGS before performing the following checks,
+ dnl because if the user has installed lib[]Name and not disabled its use
+ dnl via --without-lib[]Name-prefix, he wants to use it.
+ ac_save_CPPFLAGS="$CPPFLAGS"
+ AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME)
+
+ AC_CACHE_CHECK([for lib[]$1], [ac_cv_lib[]Name], [
+ ac_save_LIBS="$LIBS"
+ LIBS="$LIBS $LIB[]NAME"
+ AC_TRY_LINK([$3], [$4], [ac_cv_lib[]Name=yes], [ac_cv_lib[]Name=no])
+ LIBS="$ac_save_LIBS"
+ ])
+ if test "$ac_cv_lib[]Name" = yes; then
+ HAVE_LIB[]NAME=yes
+ AC_DEFINE([HAVE_LIB]NAME, 1, [Define if you have the $1 library.])
+ AC_MSG_CHECKING([how to link with lib[]$1])
+ AC_MSG_RESULT([$LIB[]NAME])
+ else
+ HAVE_LIB[]NAME=no
+ dnl If $LIB[]NAME didn't lead to a usable library, we don't need
+ dnl $INC[]NAME either.
+ CPPFLAGS="$ac_save_CPPFLAGS"
+ LIB[]NAME=
+ LTLIB[]NAME=
+ fi
+ AC_SUBST([HAVE_LIB]NAME)
+ AC_SUBST([LIB]NAME)
+ AC_SUBST([LTLIB]NAME)
+ undefine([Name])
+ undefine([NAME])
+])
+
+dnl Determine the platform dependent parameters needed to use rpath:
+dnl libext, shlibext, hardcode_libdir_flag_spec, hardcode_libdir_separator,
+dnl hardcode_direct, hardcode_minus_L.
+AC_DEFUN([AC_LIB_RPATH],
+[
+ dnl Tell automake >= 1.10 to complain if config.rpath is missing.
+ m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([config.rpath])])
+ AC_REQUIRE([AC_PROG_CC]) dnl we use $CC, $GCC, $LDFLAGS
+ AC_REQUIRE([AC_LIB_PROG_LD]) dnl we use $LD, $with_gnu_ld
+ AC_REQUIRE([AC_CANONICAL_HOST]) dnl we use $host
+ AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT]) dnl we use $ac_aux_dir
+ AC_CACHE_CHECK([for shared library run path origin], acl_cv_rpath, [
+ CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \
+ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh
+ . ./conftest.sh
+ rm -f ./conftest.sh
+ acl_cv_rpath=done
+ ])
+ wl="$acl_cv_wl"
+ libext="$acl_cv_libext"
+ shlibext="$acl_cv_shlibext"
+ hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec"
+ hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator"
+ hardcode_direct="$acl_cv_hardcode_direct"
+ hardcode_minus_L="$acl_cv_hardcode_minus_L"
+ dnl Determine whether the user wants rpath handling at all.
+ AC_ARG_ENABLE(rpath,
+ [ --disable-rpath do not hardcode runtime library paths],
+ :, enable_rpath=yes)
+])
+
+dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and
+dnl the libraries corresponding to explicit and implicit dependencies.
+dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables.
+AC_DEFUN([AC_LIB_LINKFLAGS_BODY],
+[
+ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB])
+ define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-],
+ [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])])
+ dnl By default, look in $includedir and $libdir.
+ use_additional=yes
+ AC_LIB_WITH_FINAL_PREFIX([
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+ ])
+ AC_LIB_ARG_WITH([lib$1-prefix],
+[ --with-lib$1-prefix[=DIR] search for lib$1 in DIR/include and DIR/lib
+ --without-lib$1-prefix don't search for lib$1 in includedir and libdir],
+[
+ if test "X$withval" = "Xno"; then
+ use_additional=no
+ else
+ if test "X$withval" = "X"; then
+ AC_LIB_WITH_FINAL_PREFIX([
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+ ])
+ else
+ additional_includedir="$withval/include"
+ additional_libdir="$withval/$acl_libdirstem"
+ fi
+ fi
+])
+ dnl Search the library and its dependencies in $additional_libdir and
+ dnl $LDFLAGS. Using breadth-first-seach.
+ LIB[]NAME=
+ LTLIB[]NAME=
+ INC[]NAME=
+ rpathdirs=
+ ltrpathdirs=
+ names_already_handled=
+ names_next_round='$1 $2'
+ while test -n "$names_next_round"; do
+ names_this_round="$names_next_round"
+ names_next_round=
+ for name in $names_this_round; do
+ already_handled=
+ for n in $names_already_handled; do
+ if test "$n" = "$name"; then
+ already_handled=yes
+ break
+ fi
+ done
+ if test -z "$already_handled"; then
+ names_already_handled="$names_already_handled $name"
+ dnl See if it was already located by an earlier AC_LIB_LINKFLAGS
+ dnl or AC_LIB_HAVE_LINKFLAGS call.
+ uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'`
+ eval value=\"\$HAVE_LIB$uppername\"
+ if test -n "$value"; then
+ if test "$value" = yes; then
+ eval value=\"\$LIB$uppername\"
+ test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value"
+ eval value=\"\$LTLIB$uppername\"
+ test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value"
+ else
+ dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined
+ dnl that this library doesn't exist. So just drop it.
+ :
+ fi
+ else
+ dnl Search the library lib$name in $additional_libdir and $LDFLAGS
+ dnl and the already constructed $LIBNAME/$LTLIBNAME.
+ found_dir=
+ found_la=
+ found_so=
+ found_a=
+ if test $use_additional = yes; then
+ if test -n "$shlibext" \
+ && { test -f "$additional_libdir/lib$name.$shlibext" \
+ || { test "$shlibext" = dll \
+ && test -f "$additional_libdir/lib$name.dll.a"; }; }; then
+ found_dir="$additional_libdir"
+ if test -f "$additional_libdir/lib$name.$shlibext"; then
+ found_so="$additional_libdir/lib$name.$shlibext"
+ else
+ found_so="$additional_libdir/lib$name.dll.a"
+ fi
+ if test -f "$additional_libdir/lib$name.la"; then
+ found_la="$additional_libdir/lib$name.la"
+ fi
+ else
+ if test -f "$additional_libdir/lib$name.$libext"; then
+ found_dir="$additional_libdir"
+ found_a="$additional_libdir/lib$name.$libext"
+ if test -f "$additional_libdir/lib$name.la"; then
+ found_la="$additional_libdir/lib$name.la"
+ fi
+ fi
+ fi
+ fi
+ if test "X$found_dir" = "X"; then
+ for x in $LDFLAGS $LTLIB[]NAME; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ case "$x" in
+ -L*)
+ dir=`echo "X$x" | sed -e 's/^X-L//'`
+ if test -n "$shlibext" \
+ && { test -f "$dir/lib$name.$shlibext" \
+ || { test "$shlibext" = dll \
+ && test -f "$dir/lib$name.dll.a"; }; }; then
+ found_dir="$dir"
+ if test -f "$dir/lib$name.$shlibext"; then
+ found_so="$dir/lib$name.$shlibext"
+ else
+ found_so="$dir/lib$name.dll.a"
+ fi
+ if test -f "$dir/lib$name.la"; then
+ found_la="$dir/lib$name.la"
+ fi
+ else
+ if test -f "$dir/lib$name.$libext"; then
+ found_dir="$dir"
+ found_a="$dir/lib$name.$libext"
+ if test -f "$dir/lib$name.la"; then
+ found_la="$dir/lib$name.la"
+ fi
+ fi
+ fi
+ ;;
+ esac
+ if test "X$found_dir" != "X"; then
+ break
+ fi
+ done
+ fi
+ if test "X$found_dir" != "X"; then
+ dnl Found the library.
+ LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name"
+ if test "X$found_so" != "X"; then
+ dnl Linking with a shared library. We attempt to hardcode its
+ dnl directory into the executable's runpath, unless it's the
+ dnl standard /usr/lib.
+ if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then
+ dnl No hardcoding is needed.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so"
+ else
+ dnl Use an explicit option to hardcode DIR into the resulting
+ dnl binary.
+ dnl Potentially add DIR to ltrpathdirs.
+ dnl The ltrpathdirs will be appended to $LTLIBNAME at the end.
+ haveit=
+ for x in $ltrpathdirs; do
+ if test "X$x" = "X$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ ltrpathdirs="$ltrpathdirs $found_dir"
+ fi
+ dnl The hardcoding into $LIBNAME is system dependent.
+ if test "$hardcode_direct" = yes; then
+ dnl Using DIR/libNAME.so during linking hardcodes DIR into the
+ dnl resulting binary.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so"
+ else
+ if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then
+ dnl Use an explicit option to hardcode DIR into the resulting
+ dnl binary.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so"
+ dnl Potentially add DIR to rpathdirs.
+ dnl The rpathdirs will be appended to $LIBNAME at the end.
+ haveit=
+ for x in $rpathdirs; do
+ if test "X$x" = "X$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ rpathdirs="$rpathdirs $found_dir"
+ fi
+ else
+ dnl Rely on "-L$found_dir".
+ dnl But don't add it if it's already contained in the LDFLAGS
+ dnl or the already constructed $LIBNAME
+ haveit=
+ for x in $LDFLAGS $LIB[]NAME; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X-L$found_dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir"
+ fi
+ if test "$hardcode_minus_L" != no; then
+ dnl FIXME: Not sure whether we should use
+ dnl "-L$found_dir -l$name" or "-L$found_dir $found_so"
+ dnl here.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so"
+ else
+ dnl We cannot use $hardcode_runpath_var and LD_RUN_PATH
+ dnl here, because this doesn't fit in flags passed to the
+ dnl compiler. So give up. No hardcoding. This affects only
+ dnl very old systems.
+ dnl FIXME: Not sure whether we should use
+ dnl "-L$found_dir -l$name" or "-L$found_dir $found_so"
+ dnl here.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name"
+ fi
+ fi
+ fi
+ fi
+ else
+ if test "X$found_a" != "X"; then
+ dnl Linking with a static library.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a"
+ else
+ dnl We shouldn't come here, but anyway it's good to have a
+ dnl fallback.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name"
+ fi
+ fi
+ dnl Assume the include files are nearby.
+ additional_includedir=
+ case "$found_dir" in
+ */$acl_libdirstem | */$acl_libdirstem/)
+ basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'`
+ additional_includedir="$basedir/include"
+ ;;
+ esac
+ if test "X$additional_includedir" != "X"; then
+ dnl Potentially add $additional_includedir to $INCNAME.
+ dnl But don't add it
+ dnl 1. if it's the standard /usr/include,
+ dnl 2. if it's /usr/local/include and we are using GCC on Linux,
+ dnl 3. if it's already present in $CPPFLAGS or the already
+ dnl constructed $INCNAME,
+ dnl 4. if it doesn't exist as a directory.
+ if test "X$additional_includedir" != "X/usr/include"; then
+ haveit=
+ if test "X$additional_includedir" = "X/usr/local/include"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ for x in $CPPFLAGS $INC[]NAME; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X-I$additional_includedir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_includedir"; then
+ dnl Really add $additional_includedir to $INCNAME.
+ INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir"
+ fi
+ fi
+ fi
+ fi
+ fi
+ dnl Look for dependencies.
+ if test -n "$found_la"; then
+ dnl Read the .la file. It defines the variables
+ dnl dlname, library_names, old_library, dependency_libs, current,
+ dnl age, revision, installed, dlopen, dlpreopen, libdir.
+ save_libdir="$libdir"
+ case "$found_la" in
+ */* | *\\*) . "$found_la" ;;
+ *) . "./$found_la" ;;
+ esac
+ libdir="$save_libdir"
+ dnl We use only dependency_libs.
+ for dep in $dependency_libs; do
+ case "$dep" in
+ -L*)
+ additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'`
+ dnl Potentially add $additional_libdir to $LIBNAME and $LTLIBNAME.
+ dnl But don't add it
+ dnl 1. if it's the standard /usr/lib,
+ dnl 2. if it's /usr/local/lib and we are using GCC on Linux,
+ dnl 3. if it's already present in $LDFLAGS or the already
+ dnl constructed $LIBNAME,
+ dnl 4. if it doesn't exist as a directory.
+ if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then
+ haveit=
+ if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ haveit=
+ for x in $LDFLAGS $LIB[]NAME; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ dnl Really add $additional_libdir to $LIBNAME.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$additional_libdir"
+ fi
+ fi
+ haveit=
+ for x in $LDFLAGS $LTLIB[]NAME; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ dnl Really add $additional_libdir to $LTLIBNAME.
+ LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$additional_libdir"
+ fi
+ fi
+ fi
+ fi
+ ;;
+ -R*)
+ dir=`echo "X$dep" | sed -e 's/^X-R//'`
+ if test "$enable_rpath" != no; then
+ dnl Potentially add DIR to rpathdirs.
+ dnl The rpathdirs will be appended to $LIBNAME at the end.
+ haveit=
+ for x in $rpathdirs; do
+ if test "X$x" = "X$dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ rpathdirs="$rpathdirs $dir"
+ fi
+ dnl Potentially add DIR to ltrpathdirs.
+ dnl The ltrpathdirs will be appended to $LTLIBNAME at the end.
+ haveit=
+ for x in $ltrpathdirs; do
+ if test "X$x" = "X$dir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ ltrpathdirs="$ltrpathdirs $dir"
+ fi
+ fi
+ ;;
+ -l*)
+ dnl Handle this in the next round.
+ names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'`
+ ;;
+ *.la)
+ dnl Handle this in the next round. Throw away the .la's
+ dnl directory; it is already contained in a preceding -L
+ dnl option.
+ names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'`
+ ;;
+ *)
+ dnl Most likely an immediate library name.
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep"
+ LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep"
+ ;;
+ esac
+ done
+ fi
+ else
+ dnl Didn't find the library; assume it is in the system directories
+ dnl known to the linker and runtime loader. (All the system
+ dnl directories known to the linker should also be known to the
+ dnl runtime loader, otherwise the system is severely misconfigured.)
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name"
+ LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name"
+ fi
+ fi
+ fi
+ done
+ done
+ if test "X$rpathdirs" != "X"; then
+ if test -n "$hardcode_libdir_separator"; then
+ dnl Weird platform: only the last -rpath option counts, the user must
+ dnl pass all path elements in one option. We can arrange that for a
+ dnl single library, but not when more than one $LIBNAMEs are used.
+ alldirs=
+ for found_dir in $rpathdirs; do
+ alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$found_dir"
+ done
+ dnl Note: hardcode_libdir_flag_spec uses $libdir and $wl.
+ acl_save_libdir="$libdir"
+ libdir="$alldirs"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag"
+ else
+ dnl The -rpath options are cumulative.
+ for found_dir in $rpathdirs; do
+ acl_save_libdir="$libdir"
+ libdir="$found_dir"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag"
+ done
+ fi
+ fi
+ if test "X$ltrpathdirs" != "X"; then
+ dnl When using libtool, the option that works for both libraries and
+ dnl executables is -R. The -R options are cumulative.
+ for found_dir in $ltrpathdirs; do
+ LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir"
+ done
+ fi
+])
+
+dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR,
+dnl unless already present in VAR.
+dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes
+dnl contains two or three consecutive elements that belong together.
+AC_DEFUN([AC_LIB_APPENDTOVAR],
+[
+ for element in [$2]; do
+ haveit=
+ for x in $[$1]; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X$element"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ [$1]="${[$1]}${[$1]:+ }$element"
+ fi
+ done
+])
+
+dnl For those cases where a variable contains several -L and -l options
+dnl referring to unknown libraries and directories, this macro determines the
+dnl necessary additional linker options for the runtime path.
+dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL])
+dnl sets LDADDVAR to linker options needed together with LIBSVALUE.
+dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed,
+dnl otherwise linking without libtool is assumed.
+AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS],
+[
+ AC_REQUIRE([AC_LIB_RPATH])
+ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB])
+ $1=
+ if test "$enable_rpath" != no; then
+ if test -n "$hardcode_libdir_flag_spec" && test "$hardcode_minus_L" = no; then
+ dnl Use an explicit option to hardcode directories into the resulting
+ dnl binary.
+ rpathdirs=
+ next=
+ for opt in $2; do
+ if test -n "$next"; then
+ dir="$next"
+ dnl No need to hardcode the standard /usr/lib.
+ if test "X$dir" != "X/usr/$acl_libdirstem"; then
+ rpathdirs="$rpathdirs $dir"
+ fi
+ next=
+ else
+ case $opt in
+ -L) next=yes ;;
+ -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'`
+ dnl No need to hardcode the standard /usr/lib.
+ if test "X$dir" != "X/usr/$acl_libdirstem"; then
+ rpathdirs="$rpathdirs $dir"
+ fi
+ next= ;;
+ *) next= ;;
+ esac
+ fi
+ done
+ if test "X$rpathdirs" != "X"; then
+ if test -n ""$3""; then
+ dnl libtool is used for linking. Use -R options.
+ for dir in $rpathdirs; do
+ $1="${$1}${$1:+ }-R$dir"
+ done
+ else
+ dnl The linker is used for linking directly.
+ if test -n "$hardcode_libdir_separator"; then
+ dnl Weird platform: only the last -rpath option counts, the user
+ dnl must pass all path elements in one option.
+ alldirs=
+ for dir in $rpathdirs; do
+ alldirs="${alldirs}${alldirs:+$hardcode_libdir_separator}$dir"
+ done
+ acl_save_libdir="$libdir"
+ libdir="$alldirs"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ $1="$flag"
+ else
+ dnl The -rpath options are cumulative.
+ for dir in $rpathdirs; do
+ acl_save_libdir="$libdir"
+ libdir="$dir"
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ libdir="$acl_save_libdir"
+ $1="${$1}${$1:+ }$flag"
+ done
+ fi
+ fi
+ fi
+ fi
+ fi
+ AC_SUBST([$1])
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-prefix.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-prefix.m4
new file mode 100644
index 00000000..a8684e17
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lib-prefix.m4
@@ -0,0 +1,185 @@
+# lib-prefix.m4 serial 5 (gettext-0.15)
+dnl Copyright (C) 2001-2005 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl From Bruno Haible.
+
+dnl AC_LIB_ARG_WITH is synonymous to AC_ARG_WITH in autoconf-2.13, and
+dnl similar to AC_ARG_WITH in autoconf 2.52...2.57 except that is doesn't
+dnl require excessive bracketing.
+ifdef([AC_HELP_STRING],
+[AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[[$2]],[$3],[$4])])],
+[AC_DEFUN([AC_][LIB_ARG_WITH], [AC_ARG_WITH([$1],[$2],[$3],[$4])])])
+
+dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed
+dnl to access previously installed libraries. The basic assumption is that
+dnl a user will want packages to use other packages he previously installed
+dnl with the same --prefix option.
+dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate
+dnl libraries, but is otherwise very convenient.
+AC_DEFUN([AC_LIB_PREFIX],
+[
+ AC_BEFORE([$0], [AC_LIB_LINKFLAGS])
+ AC_REQUIRE([AC_PROG_CC])
+ AC_REQUIRE([AC_CANONICAL_HOST])
+ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB])
+ AC_REQUIRE([AC_LIB_PREPARE_PREFIX])
+ dnl By default, look in $includedir and $libdir.
+ use_additional=yes
+ AC_LIB_WITH_FINAL_PREFIX([
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+ ])
+ AC_LIB_ARG_WITH([lib-prefix],
+[ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib
+ --without-lib-prefix don't search for libraries in includedir and libdir],
+[
+ if test "X$withval" = "Xno"; then
+ use_additional=no
+ else
+ if test "X$withval" = "X"; then
+ AC_LIB_WITH_FINAL_PREFIX([
+ eval additional_includedir=\"$includedir\"
+ eval additional_libdir=\"$libdir\"
+ ])
+ else
+ additional_includedir="$withval/include"
+ additional_libdir="$withval/$acl_libdirstem"
+ fi
+ fi
+])
+ if test $use_additional = yes; then
+ dnl Potentially add $additional_includedir to $CPPFLAGS.
+ dnl But don't add it
+ dnl 1. if it's the standard /usr/include,
+ dnl 2. if it's already present in $CPPFLAGS,
+ dnl 3. if it's /usr/local/include and we are using GCC on Linux,
+ dnl 4. if it doesn't exist as a directory.
+ if test "X$additional_includedir" != "X/usr/include"; then
+ haveit=
+ for x in $CPPFLAGS; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X-I$additional_includedir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test "X$additional_includedir" = "X/usr/local/include"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux* | gnu* | k*bsd*-gnu) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ if test -d "$additional_includedir"; then
+ dnl Really add $additional_includedir to $CPPFLAGS.
+ CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir"
+ fi
+ fi
+ fi
+ fi
+ dnl Potentially add $additional_libdir to $LDFLAGS.
+ dnl But don't add it
+ dnl 1. if it's the standard /usr/lib,
+ dnl 2. if it's already present in $LDFLAGS,
+ dnl 3. if it's /usr/local/lib and we are using GCC on Linux,
+ dnl 4. if it doesn't exist as a directory.
+ if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then
+ haveit=
+ for x in $LDFLAGS; do
+ AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"])
+ if test "X$x" = "X-L$additional_libdir"; then
+ haveit=yes
+ break
+ fi
+ done
+ if test -z "$haveit"; then
+ if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then
+ if test -n "$GCC"; then
+ case $host_os in
+ linux*) haveit=yes;;
+ esac
+ fi
+ fi
+ if test -z "$haveit"; then
+ if test -d "$additional_libdir"; then
+ dnl Really add $additional_libdir to $LDFLAGS.
+ LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir"
+ fi
+ fi
+ fi
+ fi
+ fi
+])
+
+dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix,
+dnl acl_final_exec_prefix, containing the values to which $prefix and
+dnl $exec_prefix will expand at the end of the configure script.
+AC_DEFUN([AC_LIB_PREPARE_PREFIX],
+[
+ dnl Unfortunately, prefix and exec_prefix get only finally determined
+ dnl at the end of configure.
+ if test "X$prefix" = "XNONE"; then
+ acl_final_prefix="$ac_default_prefix"
+ else
+ acl_final_prefix="$prefix"
+ fi
+ if test "X$exec_prefix" = "XNONE"; then
+ acl_final_exec_prefix='${prefix}'
+ else
+ acl_final_exec_prefix="$exec_prefix"
+ fi
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ eval acl_final_exec_prefix=\"$acl_final_exec_prefix\"
+ prefix="$acl_save_prefix"
+])
+
+dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the
+dnl variables prefix and exec_prefix bound to the values they will have
+dnl at the end of the configure script.
+AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX],
+[
+ acl_save_prefix="$prefix"
+ prefix="$acl_final_prefix"
+ acl_save_exec_prefix="$exec_prefix"
+ exec_prefix="$acl_final_exec_prefix"
+ $1
+ exec_prefix="$acl_save_exec_prefix"
+ prefix="$acl_save_prefix"
+])
+
+dnl AC_LIB_PREPARE_MULTILIB creates a variable acl_libdirstem, containing
+dnl the basename of the libdir, either "lib" or "lib64".
+AC_DEFUN([AC_LIB_PREPARE_MULTILIB],
+[
+ dnl There is no formal standard regarding lib and lib64. The current
+ dnl practice is that on a system supporting 32-bit and 64-bit instruction
+ dnl sets or ABIs, 64-bit libraries go under $prefix/lib64 and 32-bit
+ dnl libraries go under $prefix/lib. We determine the compiler's default
+ dnl mode by looking at the compiler's library search path. If at least
+ dnl of its elements ends in /lib64 or points to a directory whose absolute
+ dnl pathname ends in /lib64, we assume a 64-bit ABI. Otherwise we use the
+ dnl default, namely "lib".
+ acl_libdirstem=lib
+ searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'`
+ if test -n "$searchpath"; then
+ acl_save_IFS="${IFS= }"; IFS=":"
+ for searchdir in $searchpath; do
+ if test -d "$searchdir"; then
+ case "$searchdir" in
+ */lib64/ | */lib64 ) acl_libdirstem=lib64 ;;
+ *) searchdir=`cd "$searchdir" && pwd`
+ case "$searchdir" in
+ */lib64 ) acl_libdirstem=lib64 ;;
+ esac ;;
+ esac
+ fi
+ done
+ IFS="$acl_save_IFS"
+ fi
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/libtool.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/libtool.m4
new file mode 100644
index 00000000..1317892b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/libtool.m4
@@ -0,0 +1,7357 @@
+# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
+#
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+# 2006, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gordon Matzigkeit, 1996
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+m4_define([_LT_COPYING], [dnl
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+# 2006, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gordon Matzigkeit, 1996
+#
+# This file is part of GNU Libtool.
+#
+# GNU Libtool is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING. If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
+# obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA.
+])
+
+# serial 56 LT_INIT
+
+
+# LT_PREREQ(VERSION)
+# ------------------
+# Complain and exit if this libtool version is less that VERSION.
+m4_defun([LT_PREREQ],
+[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1,
+ [m4_default([$3],
+ [m4_fatal([Libtool version $1 or higher is required],
+ 63)])],
+ [$2])])
+
+
+# _LT_CHECK_BUILDDIR
+# ------------------
+# Complain if the absolute build directory name contains unusual characters
+m4_defun([_LT_CHECK_BUILDDIR],
+[case `pwd` in
+ *\ * | *\ *)
+ AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;;
+esac
+])
+
+
+# LT_INIT([OPTIONS])
+# ------------------
+AC_DEFUN([LT_INIT],
+[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT
+AC_BEFORE([$0], [LT_LANG])dnl
+AC_BEFORE([$0], [LT_OUTPUT])dnl
+AC_BEFORE([$0], [LTDL_INIT])dnl
+m4_require([_LT_CHECK_BUILDDIR])dnl
+
+dnl Autoconf doesn't catch unexpanded LT_ macros by default:
+m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl
+m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl
+dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4
+dnl unless we require an AC_DEFUNed macro:
+AC_REQUIRE([LTOPTIONS_VERSION])dnl
+AC_REQUIRE([LTSUGAR_VERSION])dnl
+AC_REQUIRE([LTVERSION_VERSION])dnl
+AC_REQUIRE([LTOBSOLETE_VERSION])dnl
+m4_require([_LT_PROG_LTMAIN])dnl
+
+dnl Parse OPTIONS
+_LT_SET_OPTIONS([$0], [$1])
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ltmain"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+_LT_SETUP
+
+# Only expand once:
+m4_define([LT_INIT])
+])# LT_INIT
+
+# Old names:
+AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT])
+AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PROG_LIBTOOL], [])
+dnl AC_DEFUN([AM_PROG_LIBTOOL], [])
+
+
+# _LT_CC_BASENAME(CC)
+# -------------------
+# Calculate cc_basename. Skip known compiler wrappers and cross-prefix.
+m4_defun([_LT_CC_BASENAME],
+[for cc_temp in $1""; do
+ case $cc_temp in
+ compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
+ distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"`
+])
+
+
+# _LT_FILEUTILS_DEFAULTS
+# ----------------------
+# It is okay to use these file commands and assume they have been set
+# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'.
+m4_defun([_LT_FILEUTILS_DEFAULTS],
+[: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+])# _LT_FILEUTILS_DEFAULTS
+
+
+# _LT_SETUP
+# ---------
+m4_defun([_LT_SETUP],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+_LT_DECL([], [host_alias], [0], [The host system])dnl
+_LT_DECL([], [host], [0])dnl
+_LT_DECL([], [host_os], [0])dnl
+dnl
+_LT_DECL([], [build_alias], [0], [The build system])dnl
+_LT_DECL([], [build], [0])dnl
+_LT_DECL([], [build_os], [0])dnl
+dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+test -z "$LN_S" && LN_S="ln -s"
+_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl
+dnl
+AC_REQUIRE([LT_CMD_MAX_LEN])dnl
+_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl
+_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
+dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+m4_require([_LT_CMD_RELOAD])dnl
+m4_require([_LT_CHECK_MAGIC_METHOD])dnl
+m4_require([_LT_CMD_OLD_ARCHIVE])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+
+_LT_CONFIG_LIBTOOL_INIT([
+# See if we are running on zsh, and set the options which allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+])
+if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+fi
+
+_LT_CHECK_OBJDIR
+
+m4_require([_LT_TAG_COMPILER])dnl
+_LT_PROG_ECHO_BACKSLASH
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([["`\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+_LT_CC_BASENAME([$compiler])
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ _LT_PATH_MAGIC
+ fi
+ ;;
+esac
+
+# Use C for the default configuration in the libtool script
+LT_SUPPORTED_TAG([CC])
+_LT_LANG_C_CONFIG
+_LT_LANG_DEFAULT_CONFIG
+_LT_CONFIG_COMMANDS
+])# _LT_SETUP
+
+
+# _LT_PROG_LTMAIN
+# ---------------
+# Note that this code is called both from `configure', and `config.status'
+# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably,
+# `config.status' has no value for ac_aux_dir unless we are using Automake,
+# so we pass a copy along to make sure it has a sensible value anyway.
+m4_defun([_LT_PROG_LTMAIN],
+[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl
+_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir'])
+ltmain="$ac_aux_dir/ltmain.sh"
+])# _LT_PROG_LTMAIN
+
+
+## ------------------------------------- ##
+## Accumulate code for creating libtool. ##
+## ------------------------------------- ##
+
+# So that we can recreate a full libtool script including additional
+# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
+# in macros and then make a single call at the end using the `libtool'
+# label.
+
+
+# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS])
+# ----------------------------------------
+# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL_INIT],
+[m4_ifval([$1],
+ [m4_append([_LT_OUTPUT_LIBTOOL_INIT],
+ [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_INIT])
+
+
+# _LT_CONFIG_LIBTOOL([COMMANDS])
+# ------------------------------
+# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL],
+[m4_ifval([$1],
+ [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS],
+ [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS])
+
+
+# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS])
+# -----------------------------------------------------
+m4_defun([_LT_CONFIG_SAVE_COMMANDS],
+[_LT_CONFIG_LIBTOOL([$1])
+_LT_CONFIG_LIBTOOL_INIT([$2])
+])
+
+
+# _LT_FORMAT_COMMENT([COMMENT])
+# -----------------------------
+# Add leading comment marks to the start of each line, and a trailing
+# full-stop to the whole comment if one is not present already.
+m4_define([_LT_FORMAT_COMMENT],
+[m4_ifval([$1], [
+m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])],
+ [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.])
+)])
+
+
+
+## ------------------------ ##
+## FIXME: Eliminate VARNAME ##
+## ------------------------ ##
+
+
+# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?])
+# -------------------------------------------------------------------
+# CONFIGNAME is the name given to the value in the libtool script.
+# VARNAME is the (base) name used in the configure script.
+# VALUE may be 0, 1 or 2 for a computed quote escaped value based on
+# VARNAME. Any other value will be used directly.
+m4_define([_LT_DECL],
+[lt_if_append_uniq([lt_decl_varnames], [$2], [, ],
+ [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name],
+ [m4_ifval([$1], [$1], [$2])])
+ lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3])
+ m4_ifval([$4],
+ [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])])
+ lt_dict_add_subkey([lt_decl_dict], [$2],
+ [tagged?], [m4_ifval([$5], [yes], [no])])])
+])
+
+
+# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION])
+# --------------------------------------------------------
+m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])])
+
+
+# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_tag_varnames],
+[_lt_decl_filter([tagged?], [yes], $@)])
+
+
+# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..])
+# ---------------------------------------------------------
+m4_define([_lt_decl_filter],
+[m4_case([$#],
+ [0], [m4_fatal([$0: too few arguments: $#])],
+ [1], [m4_fatal([$0: too few arguments: $#: $1])],
+ [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)],
+ [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)],
+ [lt_dict_filter([lt_decl_dict], $@)])[]dnl
+])
+
+
+# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...])
+# --------------------------------------------------
+m4_define([lt_decl_quote_varnames],
+[_lt_decl_filter([value], [1], $@)])
+
+
+# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_dquote_varnames],
+[_lt_decl_filter([value], [2], $@)])
+
+
+# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_varnames_tagged],
+[m4_assert([$# <= 2])dnl
+_$0(m4_quote(m4_default([$1], [[, ]])),
+ m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]),
+ m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))])
+m4_define([_lt_decl_varnames_tagged],
+[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])])
+
+
+# lt_decl_all_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_all_varnames],
+[_$0(m4_quote(m4_default([$1], [[, ]])),
+ m4_if([$2], [],
+ m4_quote(lt_decl_varnames),
+ m4_quote(m4_shift($@))))[]dnl
+])
+m4_define([_lt_decl_all_varnames],
+[lt_join($@, lt_decl_varnames_tagged([$1],
+ lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl
+])
+
+
+# _LT_CONFIG_STATUS_DECLARE([VARNAME])
+# ------------------------------------
+# Quote a variable value, and forward it to `config.status' so that its
+# declaration there will have the same value as in `configure'. VARNAME
+# must have a single quote delimited value for this to work.
+m4_define([_LT_CONFIG_STATUS_DECLARE],
+[$1='`$ECHO "X$][$1" | $Xsed -e "$delay_single_quote_subst"`'])
+
+
+# _LT_CONFIG_STATUS_DECLARATIONS
+# ------------------------------
+# We delimit libtool config variables with single quotes, so when
+# we write them to config.status, we have to be sure to quote all
+# embedded single quotes properly. In configure, this macro expands
+# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
+#
+# <var>='`$ECHO "X$<var>" | $Xsed -e "$delay_single_quote_subst"`'
+m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
+ [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAGS
+# ----------------
+# Output comment and list of tags supported by the script
+m4_defun([_LT_LIBTOOL_TAGS],
+[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl
+available_tags="_LT_TAGS"dnl
+])
+
+
+# _LT_LIBTOOL_DECLARE(VARNAME, [TAG])
+# -----------------------------------
+# Extract the dictionary values for VARNAME (optionally with TAG) and
+# expand to a commented shell variable setting:
+#
+# # Some comment about what VAR is for.
+# visible_name=$lt_internal_name
+m4_define([_LT_LIBTOOL_DECLARE],
+[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1],
+ [description])))[]dnl
+m4_pushdef([_libtool_name],
+ m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl
+m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])),
+ [0], [_libtool_name=[$]$1],
+ [1], [_libtool_name=$lt_[]$1],
+ [2], [_libtool_name=$lt_[]$1],
+ [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl
+m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl
+])
+
+
+# _LT_LIBTOOL_CONFIG_VARS
+# -----------------------
+# Produce commented declarations of non-tagged libtool config variables
+# suitable for insertion in the LIBTOOL CONFIG section of the `libtool'
+# script. Tagged libtool config variables (even for the LIBTOOL CONFIG
+# section) are produced by _LT_LIBTOOL_TAG_VARS.
+m4_defun([_LT_LIBTOOL_CONFIG_VARS],
+[m4_foreach([_lt_var],
+ m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)),
+ [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAG_VARS(TAG)
+# -------------------------
+m4_define([_LT_LIBTOOL_TAG_VARS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames),
+ [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])])
+
+
+# _LT_TAGVAR(VARNAME, [TAGNAME])
+# ------------------------------
+m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])])
+
+
+# _LT_CONFIG_COMMANDS
+# -------------------
+# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of
+# variables for single and double quote escaping we saved from calls
+# to _LT_DECL, we can put quote escaped variables declarations
+# into `config.status', and then the shell code to quote escape them in
+# for loops in `config.status'. Finally, any additional code accumulated
+# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded.
+m4_defun([_LT_CONFIG_COMMANDS],
+[AC_PROVIDE_IFELSE([LT_OUTPUT],
+ dnl If the libtool generation code has been placed in $CONFIG_LT,
+ dnl instead of duplicating it all over again into config.status,
+ dnl then we will have config.status run $CONFIG_LT later, so it
+ dnl needs to know what name is stored there:
+ [AC_CONFIG_COMMANDS([libtool],
+ [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])],
+ dnl If the libtool generation code is destined for config.status,
+ dnl expand the accumulated commands and init code now:
+ [AC_CONFIG_COMMANDS([libtool],
+ [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])])
+])#_LT_CONFIG_COMMANDS
+
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT],
+[
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+_LT_CONFIG_STATUS_DECLARATIONS
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# Quote evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_quote_varnames); do
+ case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ *[[\\\\\\\`\\"\\\$]]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+# Double-quote double-evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_dquote_varnames); do
+ case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ *[[\\\\\\\`\\"\\\$]]*)
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+ ;;
+ *)
+ eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+ ;;
+ esac
+done
+
+# Fix-up fallback echo if it was mangled by the above quoting rules.
+case \$lt_ECHO in
+*'\\\[$]0 --fallback-echo"')dnl "
+ lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\`
+ ;;
+esac
+
+_LT_OUTPUT_LIBTOOL_INIT
+])
+
+
+# LT_OUTPUT
+# ---------
+# This macro allows early generation of the libtool script (before
+# AC_OUTPUT is called), incase it is used in configure for compilation
+# tests.
+AC_DEFUN([LT_OUTPUT],
+[: ${CONFIG_LT=./config.lt}
+AC_MSG_NOTICE([creating $CONFIG_LT])
+cat >"$CONFIG_LT" <<_LTEOF
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate a libtool stub with the current configuration.
+
+lt_cl_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_LTEOF
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+AS_SHELL_SANITIZE
+_AS_PREPARE
+
+exec AS_MESSAGE_FD>&1
+exec AS_MESSAGE_LOG_FD>>config.log
+{
+ echo
+ AS_BOX([Running $as_me.])
+} >&AS_MESSAGE_LOG_FD
+
+lt_cl_help="\
+\`$as_me' creates a local libtool stub from the current configuration,
+for use in further configure time tests before the real libtool is
+generated.
+
+Usage: $[0] [[OPTIONS]]
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+
+Report bugs to <bug-libtool@gnu.org>."
+
+lt_cl_version="\
+m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
+m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
+configured by $[0], generated by m4_PACKAGE_STRING.
+
+Copyright (C) 2008 Free Software Foundation, Inc.
+This config.lt script is free software; the Free Software Foundation
+gives unlimited permision to copy, distribute and modify it."
+
+while test $[#] != 0
+do
+ case $[1] in
+ --version | --v* | -V )
+ echo "$lt_cl_version"; exit 0 ;;
+ --help | --h* | -h )
+ echo "$lt_cl_help"; exit 0 ;;
+ --debug | --d* | -d )
+ debug=: ;;
+ --quiet | --q* | --silent | --s* | -q )
+ lt_cl_silent=: ;;
+
+ -*) AC_MSG_ERROR([unrecognized option: $[1]
+Try \`$[0] --help' for more information.]) ;;
+
+ *) AC_MSG_ERROR([unrecognized argument: $[1]
+Try \`$[0] --help' for more information.]) ;;
+ esac
+ shift
+done
+
+if $lt_cl_silent; then
+ exec AS_MESSAGE_FD>/dev/null
+fi
+_LTEOF
+
+cat >>"$CONFIG_LT" <<_LTEOF
+_LT_OUTPUT_LIBTOOL_COMMANDS_INIT
+_LTEOF
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+AC_MSG_NOTICE([creating $ofile])
+_LT_OUTPUT_LIBTOOL_COMMANDS
+AS_EXIT(0)
+_LTEOF
+chmod +x "$CONFIG_LT"
+
+# configure is writing to config.log, but config.lt does its own redirection,
+# appending to config.log, which fails on DOS, as config.log is still kept
+# open by configure. Here we exec the FD to /dev/null, effectively closing
+# config.log, so it can be properly (re)opened and appended to by config.lt.
+if test "$no_create" != yes; then
+ lt_cl_success=:
+ test "$silent" = yes &&
+ lt_config_lt_args="$lt_config_lt_args --quiet"
+ exec AS_MESSAGE_LOG_FD>/dev/null
+ $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
+ exec AS_MESSAGE_LOG_FD>>config.log
+ $lt_cl_success || AS_EXIT(1)
+fi
+])# LT_OUTPUT
+
+
+# _LT_CONFIG(TAG)
+# ---------------
+# If TAG is the built-in tag, create an initial libtool script with a
+# default configuration from the untagged config vars. Otherwise add code
+# to config.status for appending the configuration named by TAG from the
+# matching tagged config vars.
+m4_defun([_LT_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_CONFIG_SAVE_COMMANDS([
+ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl
+ m4_if(_LT_TAG, [C], [
+ # See if we are running on zsh, and set the options which allow our
+ # commands through without removal of \ escapes.
+ if test -n "${ZSH_VERSION+set}" ; then
+ setopt NO_GLOB_SUBST
+ fi
+
+ cfgfile="${ofile}T"
+ trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+ $RM "$cfgfile"
+
+ cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+
+# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+_LT_COPYING
+_LT_LIBTOOL_TAGS
+
+# ### BEGIN LIBTOOL CONFIG
+_LT_LIBTOOL_CONFIG_VARS
+_LT_LIBTOOL_TAG_VARS
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+ case $host_os in
+ aix3*)
+ cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+_LT_EOF
+ ;;
+ esac
+
+ _LT_PROG_LTMAIN
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ _LT_PROG_XSI_SHELLFNS
+
+ sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ mv -f "$cfgfile" "$ofile" ||
+ (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+ chmod +x "$ofile"
+],
+[cat <<_LT_EOF >> "$ofile"
+
+dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded
+dnl in a comment (ie after a #).
+# ### BEGIN LIBTOOL TAG CONFIG: $1
+_LT_LIBTOOL_TAG_VARS(_LT_TAG)
+# ### END LIBTOOL TAG CONFIG: $1
+_LT_EOF
+])dnl /m4_if
+],
+[m4_if([$1], [], [
+ PACKAGE='$PACKAGE'
+ VERSION='$VERSION'
+ TIMESTAMP='$TIMESTAMP'
+ RM='$RM'
+ ofile='$ofile'], [])
+])dnl /_LT_CONFIG_SAVE_COMMANDS
+])# _LT_CONFIG
+
+
+# LT_SUPPORTED_TAG(TAG)
+# ---------------------
+# Trace this macro to discover what tags are supported by the libtool
+# --tag option, using:
+# autoconf --trace 'LT_SUPPORTED_TAG:$1'
+AC_DEFUN([LT_SUPPORTED_TAG], [])
+
+
+# C support is built-in for now
+m4_define([_LT_LANG_C_enabled], [])
+m4_define([_LT_TAGS], [])
+
+
+# LT_LANG(LANG)
+# -------------
+# Enable libtool support for the given language if not already enabled.
+AC_DEFUN([LT_LANG],
+[AC_BEFORE([$0], [LT_OUTPUT])dnl
+m4_case([$1],
+ [C], [_LT_LANG(C)],
+ [C++], [_LT_LANG(CXX)],
+ [Java], [_LT_LANG(GCJ)],
+ [Fortran 77], [_LT_LANG(F77)],
+ [Fortran], [_LT_LANG(FC)],
+ [Windows Resource], [_LT_LANG(RC)],
+ [m4_ifdef([_LT_LANG_]$1[_CONFIG],
+ [_LT_LANG($1)],
+ [m4_fatal([$0: unsupported language: "$1"])])])dnl
+])# LT_LANG
+
+
+# _LT_LANG(LANGNAME)
+# ------------------
+m4_defun([_LT_LANG],
+[m4_ifdef([_LT_LANG_]$1[_enabled], [],
+ [LT_SUPPORTED_TAG([$1])dnl
+ m4_append([_LT_TAGS], [$1 ])dnl
+ m4_define([_LT_LANG_]$1[_enabled], [])dnl
+ _LT_LANG_$1_CONFIG($1)])dnl
+])# _LT_LANG
+
+
+# _LT_LANG_DEFAULT_CONFIG
+# -----------------------
+m4_defun([_LT_LANG_DEFAULT_CONFIG],
+[AC_PROVIDE_IFELSE([AC_PROG_CXX],
+ [LT_LANG(CXX)],
+ [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_F77],
+ [LT_LANG(F77)],
+ [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_FC],
+ [LT_LANG(FC)],
+ [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])])
+
+dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal
+dnl pulling things in needlessly.
+AC_PROVIDE_IFELSE([AC_PROG_GCJ],
+ [LT_LANG(GCJ)],
+ [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],
+ [LT_LANG(GCJ)],
+ [AC_PROVIDE_IFELSE([LT_PROG_GCJ],
+ [LT_LANG(GCJ)],
+ [m4_ifdef([AC_PROG_GCJ],
+ [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])])
+ m4_ifdef([A][M_PROG_GCJ],
+ [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])])
+ m4_ifdef([LT_PROG_GCJ],
+ [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
+
+AC_PROVIDE_IFELSE([LT_PROG_RC],
+ [LT_LANG(RC)],
+ [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
+])# _LT_LANG_DEFAULT_CONFIG
+
+# Obsolete macros:
+AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
+AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
+AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
+AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
+dnl AC_DEFUN([AC_LIBTOOL_F77], [])
+dnl AC_DEFUN([AC_LIBTOOL_FC], [])
+dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
+
+
+# _LT_TAG_COMPILER
+# ----------------
+m4_defun([_LT_TAG_COMPILER],
+[AC_REQUIRE([AC_PROG_CC])dnl
+
+_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl
+_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl
+_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl
+_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+])# _LT_TAG_COMPILER
+
+
+# _LT_COMPILER_BOILERPLATE
+# ------------------------
+# Check for compiler boilerplate output or warnings with
+# the simple compiler test code.
+m4_defun([_LT_COMPILER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+])# _LT_COMPILER_BOILERPLATE
+
+
+# _LT_LINKER_BOILERPLATE
+# ----------------------
+# Check for linker boilerplate output or warnings with
+# the simple link test code.
+m4_defun([_LT_LINKER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+])# _LT_LINKER_BOILERPLATE
+
+# _LT_REQUIRED_DARWIN_CHECKS
+# -------------------------
+m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
+ case $host_os in
+ rhapsody* | darwin*)
+ AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:])
+ AC_CHECK_TOOL([NMEDIT], [nmedit], [:])
+ AC_CHECK_TOOL([LIPO], [lipo], [:])
+ AC_CHECK_TOOL([OTOOL], [otool], [:])
+ AC_CHECK_TOOL([OTOOL64], [otool64], [:])
+ _LT_DECL([], [DSYMUTIL], [1],
+ [Tool to manipulate archived DWARF debug symbol files on Mac OS X])
+ _LT_DECL([], [NMEDIT], [1],
+ [Tool to change global to local symbols on Mac OS X])
+ _LT_DECL([], [LIPO], [1],
+ [Tool to manipulate fat objects and archives on Mac OS X])
+ _LT_DECL([], [OTOOL], [1],
+ [ldd/readelf like tool for Mach-O binaries on Mac OS X])
+ _LT_DECL([], [OTOOL64], [1],
+ [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4])
+
+ AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod],
+ [lt_cv_apple_cc_single_mod=no
+ if test -z "${LT_MULTI_MODULE}"; then
+ # By default we will add the -single_module flag. You can override
+ # by either setting the environment variable LT_MULTI_MODULE
+ # non-empty at configure time, or by adding -multi_module to the
+ # link flags.
+ rm -rf libconftest.dylib*
+ echo "int foo(void){return 1;}" > conftest.c
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD
+ $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+ _lt_result=$?
+ if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then
+ lt_cv_apple_cc_single_mod=yes
+ else
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ fi
+ rm -rf libconftest.dylib*
+ rm -f conftest.*
+ fi])
+ AC_CACHE_CHECK([for -exported_symbols_list linker flag],
+ [lt_cv_ld_exported_symbols_list],
+ [lt_cv_ld_exported_symbols_list=no
+ save_LDFLAGS=$LDFLAGS
+ echo "_main" > conftest.sym
+ LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+ [lt_cv_ld_exported_symbols_list=yes],
+ [lt_cv_ld_exported_symbols_list=no])
+ LDFLAGS="$save_LDFLAGS"
+ ])
+ case $host_os in
+ rhapsody* | darwin1.[[012]])
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
+ darwin1.*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ darwin*) # darwin 5.x on
+ # if running on 10.5 or later, the deployment target defaults
+ # to the OS version, if on x86, and 10.4, the deployment
+ # target defaults to 10.4. Don't you love it?
+ case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
+ 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ 10.[[012]]*)
+ _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+ 10.*)
+ _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+ esac
+ ;;
+ esac
+ if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+ _lt_dar_single_mod='$single_module'
+ fi
+ if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
+ _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+ else
+ _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+ fi
+ if test "$DSYMUTIL" != ":"; then
+ _lt_dsymutil='~$DSYMUTIL $lib || :'
+ else
+ _lt_dsymutil=
+ fi
+ ;;
+ esac
+])
+
+
+# _LT_DARWIN_LINKER_FEATURES
+# --------------------------
+# Checks for linker and compiler features on darwin
+m4_defun([_LT_DARWIN_LINKER_FEATURES],
+[
+ m4_require([_LT_REQUIRED_DARWIN_CHECKS])
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_automatic, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined"
+ case $cc_basename in
+ ifort*) _lt_dar_can_shared=yes ;;
+ *) _lt_dar_can_shared=$GCC ;;
+ esac
+ if test "$_lt_dar_can_shared" = "yes"; then
+ output_verbose_link_cmd=echo
+ _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+ _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+ _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+ m4_if([$1], [CXX],
+[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then
+ _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}"
+ fi
+],[])
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+])
+
+# _LT_SYS_MODULE_PATH_AIX
+# -----------------------
+# Links a minimal program and checks the executable
+# for the system default hardcoded library path. In most cases,
+# this is /usr/lib:/lib, but when the MPI compilers are used
+# the location of the communication and MPI libs are included too.
+# If we don't find anything, use the default library path according
+# to the aix ld manual.
+m4_defun([_LT_SYS_MODULE_PATH_AIX],
+[m4_require([_LT_DECL_SED])dnl
+AC_LINK_IFELSE([AC_LANG_PROGRAM],[
+lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\(.*\)$/\1/
+ p
+ }
+ }'
+aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+# Check for a 64-bit object if we didn't find anything.
+if test -z "$aix_libpath"; then
+ aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+fi],[])
+if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+])# _LT_SYS_MODULE_PATH_AIX
+
+
+# _LT_SHELL_INIT(ARG)
+# -------------------
+m4_define([_LT_SHELL_INIT],
+[ifdef([AC_DIVERSION_NOTICE],
+ [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)],
+ [AC_DIVERT_PUSH(NOTICE)])
+$1
+AC_DIVERT_POP
+])# _LT_SHELL_INIT
+
+
+# _LT_PROG_ECHO_BACKSLASH
+# -----------------------
+# Add some code to the start of the generated configure script which
+# will find an echo command which doesn't interpret backslashes.
+m4_defun([_LT_PROG_ECHO_BACKSLASH],
+[_LT_SHELL_INIT([
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+case X$lt_ECHO in
+X*--fallback-echo)
+ # Remove one level of quotation (which was required for Make).
+ ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','`
+ ;;
+esac
+
+ECHO=${lt_ECHO-echo}
+if test "X[$]1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X[$]1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then
+ # Yippee, $ECHO works!
+ :
+else
+ # Restart under the correct shell.
+ exec $SHELL "[$]0" --no-reexec ${1+"[$]@"}
+fi
+
+if test "X[$]1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<_LT_EOF
+[$]*
+_LT_EOF
+ exit 0
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+if test -z "$lt_ECHO"; then
+ if test "X${echo_test_string+set}" != Xset; then
+ # find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if { echo_test_string=`eval $cmd`; } 2>/dev/null &&
+ { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null
+ then
+ break
+ fi
+ done
+ fi
+
+ if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ :
+ else
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for dir in $PATH /usr/ucb; do
+ IFS="$lt_save_ifs"
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ ECHO="$dir/echo"
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+
+ if test "X$ECHO" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ ECHO='print -r'
+ elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running configure again with it.
+ ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"}
+ else
+ # Try using printf.
+ ECHO='printf %s\n'
+ if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ ECHO="$CONFIG_SHELL [$]0 --fallback-echo"
+ elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ ECHO="$CONFIG_SHELL [$]0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do
+ if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null
+ then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "[$]0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ ECHO=echo
+ fi
+ fi
+ fi
+ fi
+ fi
+fi
+
+# Copy echo and quote the copy suitably for passing to libtool from
+# the Makefile, instead of quoting the original, which is used later.
+lt_ECHO=$ECHO
+if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then
+ lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo"
+fi
+
+AC_SUBST(lt_ECHO)
+])
+_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
+_LT_DECL([], [ECHO], [1],
+ [An echo program that does not interpret backslashes])
+])# _LT_PROG_ECHO_BACKSLASH
+
+
+# _LT_ENABLE_LOCK
+# ---------------
+m4_defun([_LT_ENABLE_LOCK],
+[AC_ARG_ENABLE([libtool-lock],
+ [AS_HELP_STRING([--disable-libtool-lock],
+ [avoid locking (might break parallel builds)])])
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *ELF-32*)
+ HPUX_IA64_MODE="32"
+ ;;
+ *ELF-64*)
+ HPUX_IA64_MODE="64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '[#]line __oline__ "configure"' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -melf32bsmip"
+ ;;
+ *N32*)
+ LD="${LD-ld} -melf32bmipn32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -melf64bmip"
+ ;;
+ esac
+ else
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ fi
+ rm -rf conftest*
+ ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.o` in
+ *32-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_i386_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_i386"
+ ;;
+ ppc64-*linux*|powerpc64-*linux*)
+ LD="${LD-ld} -m elf32ppclinux"
+ ;;
+ s390x-*linux*)
+ LD="${LD-ld} -m elf_s390"
+ ;;
+ sparc64-*linux*)
+ LD="${LD-ld} -m elf32_sparc"
+ ;;
+ esac
+ ;;
+ *64-bit*)
+ case $host in
+ x86_64-*kfreebsd*-gnu)
+ LD="${LD-ld} -m elf_x86_64_fbsd"
+ ;;
+ x86_64-*linux*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ ppc*-*linux*|powerpc*-*linux*)
+ LD="${LD-ld} -m elf64ppc"
+ ;;
+ s390*-*linux*|s390*-*tpf*)
+ LD="${LD-ld} -m elf64_s390"
+ ;;
+ sparc*-*linux*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+ [AC_LANG_PUSH(C)
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+ AC_LANG_POP])
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+sparc*-*solaris*)
+ # Find out which ABI we are using.
+ echo 'int i;' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.o` in
+ *64-bit*)
+ case $lt_cv_prog_gnu_ld in
+ yes*) LD="${LD-ld} -m elf64_sparc" ;;
+ *)
+ if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+ LD="${LD-ld} -64"
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+esac
+
+need_locks="$enable_libtool_lock"
+])# _LT_ENABLE_LOCK
+
+
+# _LT_CMD_OLD_ARCHIVE
+# -------------------
+m4_defun([_LT_CMD_OLD_ARCHIVE],
+[AC_CHECK_TOOL(AR, ar, false)
+test -z "$AR" && AR=ar
+test -z "$AR_FLAGS" && AR_FLAGS=cru
+_LT_DECL([], [AR], [1], [The archiver])
+_LT_DECL([], [AR_FLAGS], [1])
+
+AC_CHECK_TOOL(STRIP, strip, :)
+test -z "$STRIP" && STRIP=:
+_LT_DECL([], [STRIP], [1], [A symbol stripping program])
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+test -z "$RANLIB" && RANLIB=:
+_LT_DECL([], [RANLIB], [1],
+ [Commands used to install an old-style archive])
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib"
+ ;;
+ *)
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+fi
+_LT_DECL([], [old_postinstall_cmds], [2])
+_LT_DECL([], [old_postuninstall_cmds], [2])
+_LT_TAGDECL([], [old_archive_cmds], [2],
+ [Commands used to build an old-style archive])
+])# _LT_CMD_OLD_ARCHIVE
+
+
+# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------------------
+# Check whether the given compiler option works
+AC_DEFUN([_LT_COMPILER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+ [$2=no
+ m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4])
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ lt_compiler_flag="$3"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ # The option is referenced via a variable to avoid confusing sed.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$lt_compile" 2>conftest.err)
+ ac_status=$?
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ if (exit $ac_status) && test -s "$ac_outfile"; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings other than the usual output.
+ $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+ $2=yes
+ fi
+ fi
+ $RM conftest*
+])
+
+if test x"[$]$2" = xyes; then
+ m4_if([$5], , :, [$5])
+else
+ m4_if([$6], , :, [$6])
+fi
+])# _LT_COMPILER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], [])
+
+
+# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+# [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------
+# Check whether the given linker option works
+AC_DEFUN([_LT_LINKER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+ [$2=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $3"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&AS_MESSAGE_LOG_FD
+ $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ $2=yes
+ fi
+ else
+ $2=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+])
+
+if test x"[$]$2" = xyes; then
+ m4_if([$4], , :, [$4])
+else
+ m4_if([$5], , :, [$5])
+fi
+])# _LT_LINKER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], [])
+
+
+# LT_CMD_MAX_LEN
+#---------------
+AC_DEFUN([LT_CMD_MAX_LEN],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+# find the maximum length of command line arguments
+AC_MSG_CHECKING([the maximum length of command line arguments])
+AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
+ i=0
+ teststring="ABCD"
+
+ case $build_os in
+ msdosdjgpp*)
+ # On DJGPP, this test can blow up pretty badly due to problems in libc
+ # (any single argument exceeding 2000 bytes causes a buffer overrun
+ # during glob expansion). Even if it were fixed, the result of this
+ # check would be larger than it should be.
+ lt_cv_sys_max_cmd_len=12288; # 12K is about right
+ ;;
+
+ gnu*)
+ # Under GNU Hurd, this test is not required because there is
+ # no limit to the length of command line arguments.
+ # Libtool will interpret -1 as no limit whatsoever
+ lt_cv_sys_max_cmd_len=-1;
+ ;;
+
+ cygwin* | mingw* | cegcc*)
+ # On Win9x/ME, this test blows up -- it succeeds, but takes
+ # about 5 minutes as the teststring grows exponentially.
+ # Worse, since 9x/ME are not pre-emptively multitasking,
+ # you end up with a "frozen" computer, even though with patience
+ # the test eventually succeeds (with a max line length of 256k).
+ # Instead, let's just punt: use the minimum linelength reported by
+ # all of the supported platforms: 8192 (on NT/2K/XP).
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ amigaos*)
+ # On AmigaOS with pdksh, this test takes hours, literally.
+ # So we just punt and use a minimum line length of 8192.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
+ netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+ # This has been around since 386BSD, at least. Likely further.
+ if test -x /sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+ elif test -x /usr/sbin/sysctl; then
+ lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+ else
+ lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs
+ fi
+ # And add a safety zone
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ ;;
+
+ interix*)
+ # We know the value 262144 and hardcode it with a safety zone (like BSD)
+ lt_cv_sys_max_cmd_len=196608
+ ;;
+
+ osf*)
+ # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+ # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+ # nice to cause kernel panics so lets avoid the loop below.
+ # First set a reasonable default.
+ lt_cv_sys_max_cmd_len=16384
+ #
+ if test -x /sbin/sysconfig; then
+ case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+ *1*) lt_cv_sys_max_cmd_len=-1 ;;
+ esac
+ fi
+ ;;
+ sco3.2v5*)
+ lt_cv_sys_max_cmd_len=102400
+ ;;
+ sysv5* | sco5v6* | sysv4.2uw2*)
+ kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+ if test -n "$kargmax"; then
+ lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'`
+ else
+ lt_cv_sys_max_cmd_len=32768
+ fi
+ ;;
+ *)
+ lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+ if test -n "$lt_cv_sys_max_cmd_len"; then
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+ else
+ # Make teststring a little bigger before we do anything with it.
+ # a 1K string should be a reasonable start.
+ for i in 1 2 3 4 5 6 7 8 ; do
+ teststring=$teststring$teststring
+ done
+ SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+ # If test is not a shell built-in, we'll probably end up computing a
+ # maximum length that is only half of the actual maximum length, but
+ # we can't tell.
+ while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \
+ = "XX$teststring$teststring"; } >/dev/null 2>&1 &&
+ test $i != 17 # 1/2 MB should be enough
+ do
+ i=`expr $i + 1`
+ teststring=$teststring$teststring
+ done
+ # Only check the string length outside the loop.
+ lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+ teststring=
+ # Add a significant safety factor because C++ compilers can tack on
+ # massive amounts of additional arguments before passing them to the
+ # linker. It appears as though 1/2 is a usable value.
+ lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+ fi
+ ;;
+ esac
+])
+if test -n $lt_cv_sys_max_cmd_len ; then
+ AC_MSG_RESULT($lt_cv_sys_max_cmd_len)
+else
+ AC_MSG_RESULT(none)
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+_LT_DECL([], [max_cmd_len], [0],
+ [What is the maximum length of a command?])
+])# LT_CMD_MAX_LEN
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], [])
+
+
+# _LT_HEADER_DLFCN
+# ----------------
+m4_defun([_LT_HEADER_DLFCN],
+[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl
+])# _LT_HEADER_DLFCN
+
+
+# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ----------------------------------------------------------------
+m4_defun([_LT_TRY_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test "$cross_compiling" = yes; then :
+ [$4]
+else
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<_LT_EOF
+[#line __oline__ "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+ else
+ puts (dlerror ());
+
+ return status;
+}]
+_LT_EOF
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) $1 ;;
+ x$lt_dlneed_uscore) $2 ;;
+ x$lt_dlunknown|x*) $3 ;;
+ esac
+ else :
+ # compilation failed
+ $3
+ fi
+fi
+rm -fr conftest*
+])# _LT_TRY_DLOPEN_SELF
+
+
+# LT_SYS_DLOPEN_SELF
+# ------------------
+AC_DEFUN([LT_SYS_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ mingw* | pw32* | cegcc*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ cygwin*)
+ lt_cv_dlopen="dlopen"
+ lt_cv_dlopen_libs=
+ ;;
+
+ darwin*)
+ # if libdl is installed we need to link against it
+ AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[
+ lt_cv_dlopen="dyld"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ])
+ ;;
+
+ *)
+ AC_CHECK_FUNC([shl_load],
+ [lt_cv_dlopen="shl_load"],
+ [AC_CHECK_LIB([dld], [shl_load],
+ [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"],
+ [AC_CHECK_FUNC([dlopen],
+ [lt_cv_dlopen="dlopen"],
+ [AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],
+ [AC_CHECK_LIB([svld], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"],
+ [AC_CHECK_LIB([dld], [dld_link],
+ [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"])
+ ])
+ ])
+ ])
+ ])
+ ])
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ AC_CACHE_CHECK([whether a program can dlopen itself],
+ lt_cv_dlopen_self, [dnl
+ _LT_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+ lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+ ])
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+ AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+ lt_cv_dlopen_self_static, [dnl
+ _LT_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+ lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross)
+ ])
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+_LT_DECL([dlopen_support], [enable_dlopen], [0],
+ [Whether dlopen is supported])
+_LT_DECL([dlopen_self], [enable_dlopen_self], [0],
+ [Whether dlopen of programs is supported])
+_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0],
+ [Whether dlopen of statically linked programs is supported])
+])# LT_SYS_DLOPEN_SELF
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], [])
+
+
+# _LT_COMPILER_C_O([TAGNAME])
+# ---------------------------
+# Check to see if options -c and -o are simultaneously supported by compiler.
+# This macro does not hard code the compiler like AC_PROG_CC_C_O.
+m4_defun([_LT_COMPILER_C_O],
+[m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
+ [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)],
+ [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no
+ $RM -r conftest 2>/dev/null
+ mkdir conftest
+ cd conftest
+ mkdir out
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ lt_compiler_flag="-o out/conftest2.$ac_objext"
+ # Insert the option either (1) after the last *FLAGS variable, or
+ # (2) before a word containing "conftest.", or (3) at the end.
+ # Note that $ac_compile itself does not contain backslashes and begins
+ # with a dollar sign (not a hyphen), so the echo should work correctly.
+ lt_compile=`echo "$ac_compile" | $SED \
+ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+ -e 's:$: $lt_compiler_flag:'`
+ (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$lt_compile" 2>out/conftest.err)
+ ac_status=$?
+ cat out/conftest.err >&AS_MESSAGE_LOG_FD
+ echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ if (exit $ac_status) && test -s out/conftest2.$ac_objext
+ then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp
+ $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+ if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+ _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+ fi
+ fi
+ chmod u+w . 2>&AS_MESSAGE_LOG_FD
+ $RM conftest*
+ # SGI C++ compiler will create directory out/ii_files/ for
+ # template instantiation
+ test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+ $RM out/* && rmdir out
+ cd ..
+ $RM -r conftest
+ $RM conftest*
+])
+_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1],
+ [Does compiler simultaneously support -c and -o options?])
+])# _LT_COMPILER_C_O
+
+
+# _LT_COMPILER_FILE_LOCKS([TAGNAME])
+# ----------------------------------
+# Check to see if we can do hard links to lock some files if needed
+m4_defun([_LT_COMPILER_FILE_LOCKS],
+[m4_require([_LT_ENABLE_LOCK])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_COMPILER_C_O([$1])
+
+hard_links="nottested"
+if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ AC_MSG_CHECKING([if we can lock with hard links])
+ hard_links=yes
+ $RM conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ AC_MSG_RESULT([$hard_links])
+ if test "$hard_links" = no; then
+ AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe])
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?])
+])# _LT_COMPILER_FILE_LOCKS
+
+
+# _LT_CHECK_OBJDIR
+# ----------------
+m4_defun([_LT_CHECK_OBJDIR],
+[AC_CACHE_CHECK([for objdir], [lt_cv_objdir],
+[rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ lt_cv_objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null])
+objdir=$lt_cv_objdir
+_LT_DECL([], [objdir], [0],
+ [The name of the directory that contains temporary libtool files])dnl
+m4_pattern_allow([LT_OBJDIR])dnl
+AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/",
+ [Define to the sub-directory in which libtool stores uninstalled libraries.])
+])# _LT_CHECK_OBJDIR
+
+
+# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME])
+# --------------------------------------
+# Check hardcoding attributes.
+m4_defun([_LT_LINKER_HARDCODE_LIBPATH],
+[AC_MSG_CHECKING([how to hardcode library paths into programs])
+_LT_TAGVAR(hardcode_action, $1)=
+if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" ||
+ test -n "$_LT_TAGVAR(runpath_var, $1)" ||
+ test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then
+
+ # We can hardcode non-existent directories.
+ if test "$_LT_TAGVAR(hardcode_direct, $1)" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no &&
+ test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then
+ # Linking always hardcodes the temporary library directory.
+ _LT_TAGVAR(hardcode_action, $1)=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ _LT_TAGVAR(hardcode_action, $1)=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ _LT_TAGVAR(hardcode_action, $1)=unsupported
+fi
+AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)])
+
+if test "$_LT_TAGVAR(hardcode_action, $1)" = relink ||
+ test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+_LT_TAGDECL([], [hardcode_action], [0],
+ [How to hardcode a shared library path into an executable])
+])# _LT_LINKER_HARDCODE_LIBPATH
+
+
+# _LT_CMD_STRIPLIB
+# ----------------
+m4_defun([_LT_CMD_STRIPLIB],
+[m4_require([_LT_DECL_EGREP])
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ AC_MSG_RESULT([yes])
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+ case $host_os in
+ darwin*)
+ if test -n "$STRIP" ; then
+ striplib="$STRIP -x"
+ old_striplib="$STRIP -S"
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ fi
+ ;;
+ *)
+ AC_MSG_RESULT([no])
+ ;;
+ esac
+fi
+_LT_DECL([], [old_striplib], [1], [Commands to strip libraries])
+_LT_DECL([], [striplib], [1])
+])# _LT_CMD_STRIPLIB
+
+
+# _LT_SYS_DYNAMIC_LINKER([TAG])
+# -----------------------------
+# PORTME Fill in your ld.so characteristics
+m4_defun([_LT_SYS_DYNAMIC_LINKER],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_OBJDUMP])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_MSG_CHECKING([dynamic linker characteristics])
+m4_if([$1],
+ [], [
+if test "$GCC" = yes; then
+ case $host_os in
+ darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
+ *) lt_awk_arg="/^libraries:/" ;;
+ esac
+ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"`
+ if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then
+ # if the path contains ";" then we assume it to be the separator
+ # otherwise default to the standard path separator (i.e. ":") - it is
+ # assumed that no part of a normal pathname contains ";" but that should
+ # okay in the real world where ";" in dirpaths is itself problematic.
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # Ok, now we have the path, separated by spaces, we can step through it
+ # and add multilib dir if necessary.
+ lt_tmp_lt_search_path_spec=
+ lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+ for lt_sys_path in $lt_search_path_spec; do
+ if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+ else
+ test -d "$lt_sys_path" && \
+ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+ fi
+ done
+ lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk '
+BEGIN {RS=" "; FS="/|\n";} {
+ lt_foo="";
+ lt_count=0;
+ for (lt_i = NF; lt_i > 0; lt_i--) {
+ if ($lt_i != "" && $lt_i != ".") {
+ if ($lt_i == "..") {
+ lt_count++;
+ } else {
+ if (lt_count == 0) {
+ lt_foo="/" $lt_i lt_foo;
+ } else {
+ lt_count--;
+ }
+ }
+ }
+ }
+ if (lt_foo != "") { lt_freq[[lt_foo]]++; }
+ if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
+}'`
+ sys_lib_search_path_spec=`$ECHO $lt_search_path_spec`
+else
+ sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX 3 has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+
+aix[[4-9]]*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ hardcode_into_libs=yes
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[[01]] | aix4.[[01]].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+ # soname into executable. Probably we can add versioning support to
+ # collect2, so additional links can be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ case $host_cpu in
+ powerpc)
+ # Since July 2007 AmigaOS4 officially supports .so libraries.
+ # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ ;;
+ m68k)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ ;;
+ esac
+ ;;
+
+beos*)
+ library_names_spec='${libname}${shared_ext}'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi[[45]]*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+ version_type=windows
+ shrext_cmds=".dll"
+ need_version=no
+ need_lib_prefix=no
+
+ case $GCC,$host_os in
+ yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*)
+ library_names_spec='$libname.dll.a'
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname~
+ chmod a+x \$dldir/$dlname~
+ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+ eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+ fi'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+
+ case $host_os in
+ cygwin*)
+ # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib"
+ ;;
+ mingw* | cegcc*)
+ # MinGW DLLs use traditional 'lib' prefix
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"`
+ if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
+ # It is most probably a Windows format PATH printed by
+ # mingw gcc, but we are running on Cygwin. Gcc prints its search
+ # path with ; separators, and with drive letters. We can handle the
+ # drive letters (cygwin fileutils understands them), so leave them,
+ # especially as we might pass files found there to a mingw objdump,
+ # which wouldn't understand a cygwinified path. Ahh.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ ;;
+ pw32*)
+ # pw32 DLLs use 'pw' prefix rather than 'lib'
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ ;;
+ esac
+ ;;
+
+ *)
+ library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+ soname_spec='${libname}${release}${major}$shared_ext'
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+m4_if([$1], [],[
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"])
+ sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd* | dragonfly*)
+ # DragonFly does not have aout. When/if they implement a new
+ # versioning mechanism, adjust this.
+ if test -x /usr/bin/objformat; then
+ objformat=`/usr/bin/objformat`
+ else
+ case $host_os in
+ freebsd[[123]]*) objformat=aout ;;
+ *) objformat=elf ;;
+ esac
+ fi
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ freebsd3.[[01]]* | freebsdelf3.[[01]]*)
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \
+ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ *) # from 4.6 on, and DragonFly
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ case $host_cpu in
+ ia64*)
+ shrext_cmds='.so'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.so"
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ if test "X$HPUX_IA64_MODE" = X32; then
+ sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+ else
+ sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+ fi
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ hppa*64*)
+ shrext_cmds='.sl'
+ hardcode_into_libs=yes
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+ shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+ sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+ ;;
+ *)
+ shrext_cmds='.sl'
+ dynamic_linker="$host_os dld.sl"
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ ;;
+ esac
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+ ;;
+
+interix[[3-9]]*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $host_os in
+ nonstopux*) version_type=nonstopux ;;
+ *)
+ if test "$lt_cv_prog_gnu_ld" = yes; then
+ version_type=linux
+ else
+ version_type=irix
+ fi ;;
+ esac
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+ case $host_os in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+ libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+ libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+ libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ hardcode_into_libs=yes
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux* | k*bsd*-gnu)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ # Some binutils ld are patched to set DT_RUNPATH
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
+ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+ [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
+ [shlibpath_overrides_runpath=yes])])
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # Append ld.so.conf contents to the search path
+ if test -f /etc/ld.so.conf; then
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '`
+ sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+ fi
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+*nto* | *qnx*)
+ version_type=qnx
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ dynamic_linker='ldqnx.so'
+ ;;
+
+openbsd*)
+ version_type=sunos
+ sys_lib_dlsearch_path_spec="/usr/lib"
+ need_lib_prefix=no
+ # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+ case $host_os in
+ openbsd3.3 | openbsd3.3.*) need_version=yes ;;
+ *) need_version=no ;;
+ esac
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case $host_os in
+ openbsd2.[[89]] | openbsd2.[[89]].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ ;;
+
+os2*)
+ libname_spec='$name'
+ shrext_cmds=".dll"
+ need_lib_prefix=no
+ library_names_spec='$libname${shared_ext} $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}${shared_ext}$major'
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+rdos*)
+ dynamic_linker=no
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.3*)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ need_lib_prefix=no
+ runpath_var=LD_RUN_PATH
+ ;;
+ siemens)
+ need_lib_prefix=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+ soname_spec='$libname${shared_ext}.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ version_type=freebsd-elf
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ if test "$with_gnu_ld" = yes; then
+ sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+ else
+ sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+ case $host_os in
+ sco3.2v5*)
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+ ;;
+ esac
+ fi
+ sys_lib_dlsearch_path_spec='/usr/lib'
+ ;;
+
+tpf*)
+ # TPF is a cross-target only. Preferred cross-host = GNU/Linux.
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+ sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+ sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+_LT_DECL([], [variables_saved_for_relink], [1],
+ [Variables whose values should be saved in libtool wrapper scripts and
+ restored at link time])
+_LT_DECL([], [need_lib_prefix], [0],
+ [Do we need the "lib" prefix for modules?])
+_LT_DECL([], [need_version], [0], [Do we need a version for libraries?])
+_LT_DECL([], [version_type], [0], [Library versioning type])
+_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable])
+_LT_DECL([], [shlibpath_var], [0],[Shared library path variable])
+_LT_DECL([], [shlibpath_overrides_runpath], [0],
+ [Is shlibpath searched before the hard-coded library search path?])
+_LT_DECL([], [libname_spec], [1], [Format of library name prefix])
+_LT_DECL([], [library_names_spec], [1],
+ [[List of archive names. First name is the real one, the rest are links.
+ The last name is the one that the linker finds with -lNAME]])
+_LT_DECL([], [soname_spec], [1],
+ [[The coded name of the library, if different from the real name]])
+_LT_DECL([], [postinstall_cmds], [2],
+ [Command to use after installation of a shared archive])
+_LT_DECL([], [postuninstall_cmds], [2],
+ [Command to use after uninstallation of a shared archive])
+_LT_DECL([], [finish_cmds], [2],
+ [Commands used to finish a libtool library installation in a directory])
+_LT_DECL([], [finish_eval], [1],
+ [[As "finish_cmds", except a single script fragment to be evaled but
+ not shown]])
+_LT_DECL([], [hardcode_into_libs], [0],
+ [Whether we should hardcode library paths into libraries])
+_LT_DECL([], [sys_lib_search_path_spec], [2],
+ [Compile-time system search path for libraries])
+_LT_DECL([], [sys_lib_dlsearch_path_spec], [2],
+ [Run-time system search path for libraries])
+])# _LT_SYS_DYNAMIC_LINKER
+
+
+# _LT_PATH_TOOL_PREFIX(TOOL)
+# --------------------------
+# find a file program which can recognize shared library
+AC_DEFUN([_LT_PATH_TOOL_PREFIX],
+[m4_require([_LT_DECL_EGREP])dnl
+AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+[[\\/*] | ?:[\\/]*])
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+*)
+ lt_save_MAGIC_CMD="$MAGIC_CMD"
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word. This closes a longstanding sh security hole.
+ ac_dummy="m4_if([$2], , $PATH, [$2])"
+ for ac_dir in $ac_dummy; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$1; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/$1"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ $EGREP "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+_LT_EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$lt_save_ifs"
+ MAGIC_CMD="$lt_save_MAGIC_CMD"
+ ;;
+esac])
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ AC_MSG_RESULT($MAGIC_CMD)
+else
+ AC_MSG_RESULT(no)
+fi
+_LT_DECL([], [MAGIC_CMD], [0],
+ [Used to examine libraries when file_magic_cmd begins with "file"])dnl
+])# _LT_PATH_TOOL_PREFIX
+
+# Old name:
+AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], [])
+
+
+# _LT_PATH_MAGIC
+# --------------
+# find a file program which can recognize a shared library
+m4_defun([_LT_PATH_MAGIC],
+[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH)
+ else
+ MAGIC_CMD=:
+ fi
+fi
+])# _LT_PATH_MAGIC
+
+
+# LT_PATH_LD
+# ----------
+# find the pathname to the GNU or non-GNU linker
+AC_DEFUN([LT_PATH_LD],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+
+AC_ARG_WITH([gnu-ld],
+ [AS_HELP_STRING([--with-gnu-ld],
+ [assume the C compiler uses GNU ld @<:@default=no@:>@])],
+ [test "$withval" = no || with_gnu_ld=yes],
+ [with_gnu_ld=no])dnl
+
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ AC_MSG_CHECKING([for ld used by $CC])
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [[\\/]]* | ?:[[\\/]]*)
+ re_direlt='/[[^/]][[^/]]*/\.\./'
+ # Canonicalize the pathname of ld
+ ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+ while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ AC_MSG_CHECKING([for GNU ld])
+else
+ AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some variants of GNU ld only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+ *GNU* | *'with BFD'*)
+ test "$with_gnu_ld" != no && break
+ ;;
+ *)
+ test "$with_gnu_ld" != yes && break
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ AC_MSG_RESULT($LD)
+else
+ AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+_LT_PATH_LD_GNU
+AC_SUBST([LD])
+
+_LT_TAGDECL([], [LD], [1], [The linker used to build libraries])
+])# LT_PATH_LD
+
+# Old names:
+AU_ALIAS([AM_PROG_LD], [LT_PATH_LD])
+AU_ALIAS([AC_PROG_LD], [LT_PATH_LD])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_LD], [])
+dnl AC_DEFUN([AC_PROG_LD], [])
+
+
+# _LT_PATH_LD_GNU
+#- --------------
+m4_defun([_LT_PATH_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+ lt_cv_prog_gnu_ld=yes
+ ;;
+*)
+ lt_cv_prog_gnu_ld=no
+ ;;
+esac])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])# _LT_PATH_LD_GNU
+
+
+# _LT_CMD_RELOAD
+# --------------
+# find reload flag for linker
+# -- PORTME Some linkers may need a different reload flag.
+m4_defun([_LT_CMD_RELOAD],
+[AC_CACHE_CHECK([for $LD option to reload object files],
+ lt_cv_ld_reload_flag,
+ [lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+ darwin*)
+ if test "$GCC" = yes; then
+ reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+ else
+ reload_cmds='$LD$reload_flag -o $output$reload_objs'
+ fi
+ ;;
+esac
+_LT_DECL([], [reload_flag], [1], [How to create reloadable object files])dnl
+_LT_DECL([], [reload_cmds], [2])dnl
+])# _LT_CMD_RELOAD
+
+
+# _LT_CHECK_MAGIC_METHOD
+# ----------------------
+# how to check for library dependencies
+# -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_MAGIC_METHOD],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+AC_CACHE_CHECK([how to recognize dependent libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given extended regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[[4-9]]*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi[[45]]*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin*)
+ # func_win32_libid is a shell function defined in ltmain.sh
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ ;;
+
+mingw* | pw32*)
+ # Base MSYS/MinGW do not provide the 'file' command needed by
+ # func_win32_libid shell function, so use a weaker test based on 'objdump',
+ # unless we find 'file', for example because we are cross-compiling.
+ if ( file / ) >/dev/null 2>&1; then
+ lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+ lt_cv_file_magic_cmd='func_win32_libid'
+ else
+ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ fi
+ ;;
+
+cegcc)
+ # use the weaker test based on 'objdump'. See mingw*.
+ lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+freebsd* | dragonfly*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20* | hpux11*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ case $host_cpu in
+ ia64*)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
+ lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+ ;;
+ hppa*64*)
+ [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]']
+ lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+ ;;
+ *)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library'
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+ esac
+ ;;
+
+interix[[3-9]]*)
+ # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$'
+ ;;
+
+irix5* | irix6* | nonstopux*)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be Linux ELF.
+linux* | k*bsd*-gnu)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+*nto* | *qnx*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+openbsd*)
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+rdos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.3*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ siemens)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ pc)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ esac
+ ;;
+
+tpf*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+esac
+])
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+_LT_DECL([], [deplibs_check_method], [1],
+ [Method to check whether dependent libraries are shared objects])
+_LT_DECL([], [file_magic_cmd], [1],
+ [Command to use when deplibs_check_method == "file_magic"])
+])# _LT_CHECK_MAGIC_METHOD
+
+
+# LT_PATH_NM
+# ----------
+# find the pathname to a BSD- or MS-compatible name lister
+AC_DEFUN([LT_PATH_NM],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM,
+[if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ lt_nm_to_check="${ac_tool_prefix}nm"
+ if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+ lt_nm_to_check="$lt_nm_to_check nm"
+ fi
+ for lt_tmp_nm in $lt_nm_to_check; do
+ lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+ IFS="$lt_save_ifs"
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm="$ac_dir/$lt_tmp_nm"
+ if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
+ */dev/null* | *'Invalid file or object type'*)
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ ;;
+ *)
+ case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+ */dev/null*)
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ ;;
+ *)
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ done
+ IFS="$lt_save_ifs"
+ done
+ : ${lt_cv_path_NM=no}
+fi])
+if test "$lt_cv_path_NM" != "no"; then
+ NM="$lt_cv_path_NM"
+else
+ # Didn't find any BSD compatible name lister, look for dumpbin.
+ AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :)
+ AC_SUBST([DUMPBIN])
+ if test "$DUMPBIN" != ":"; then
+ NM="$DUMPBIN"
+ fi
+fi
+test -z "$NM" && NM=nm
+AC_SUBST([NM])
+_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
+
+AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
+ [lt_cv_nm_interface="BSD nm"
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$ac_compile" 2>conftest.err)
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
+ (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD)
+ cat conftest.out >&AS_MESSAGE_LOG_FD
+ if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+ lt_cv_nm_interface="MS dumpbin"
+ fi
+ rm -f conftest*])
+])# LT_PATH_NM
+
+# Old names:
+AU_ALIAS([AM_PROG_NM], [LT_PATH_NM])
+AU_ALIAS([AC_PROG_NM], [LT_PATH_NM])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_NM], [])
+dnl AC_DEFUN([AC_PROG_NM], [])
+
+
+# LT_LIB_M
+# --------
+# check for math library
+AC_DEFUN([LT_LIB_M],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*)
+ # These system don't have libm, or don't need it
+ ;;
+*-ncr-sysv4.3*)
+ AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
+ AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm")
+ ;;
+*)
+ AC_CHECK_LIB(m, cos, LIBM="-lm")
+ ;;
+esac
+AC_SUBST([LIBM])
+])# LT_LIB_M
+
+# Old name:
+AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_CHECK_LIBM], [])
+
+
+# _LT_COMPILER_NO_RTTI([TAGNAME])
+# -------------------------------
+m4_defun([_LT_COMPILER_NO_RTTI],
+[m4_require([_LT_TAG_COMPILER])dnl
+
+_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+
+if test "$GCC" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+
+ _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
+ lt_cv_prog_compiler_rtti_exceptions,
+ [-fno-rtti -fno-exceptions], [],
+ [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"])
+fi
+_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
+ [Compiler flag to turn off builtin functions])
+])# _LT_COMPILER_NO_RTTI
+
+
+# _LT_CMD_GLOBAL_SYMBOLS
+# ----------------------
+m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output from $compiler object])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe],
+[
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[[BCDT]]'
+ ;;
+cygwin* | mingw* | pw32* | cegcc*)
+ symcode='[[ABCDGISTW]]'
+ ;;
+hpux*)
+ if test "$host_cpu" = ia64; then
+ symcode='[[ABCDEGRST]]'
+ fi
+ ;;
+irix* | nonstopux*)
+ symcode='[[BCDEGRST]]'
+ ;;
+osf*)
+ symcode='[[BCDEGQRST]]'
+ ;;
+solaris*)
+ symcode='[[BDRT]]'
+ ;;
+sco3.2v5*)
+ symcode='[[DT]]'
+ ;;
+sysv4.2uw2*)
+ symcode='[[DT]]'
+ ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+ symcode='[[ABDT]]'
+ ;;
+sysv4)
+ symcode='[[DFNSTU]]'
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+ symcode='[[ABCDGIRSTW]]' ;;
+esac
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+ opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+ symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+ # Write the raw and C identifiers.
+ if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ # Fake it for dumpbin and say T for any non-static function
+ # and D for any global variable.
+ # Also find C++ and __fastcall symbols from MSVC++,
+ # which start with @ or ?.
+ lt_cv_sys_global_symbol_pipe="$AWK ['"\
+" {last_section=section; section=\$ 3};"\
+" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+" \$ 0!~/External *\|/{next};"\
+" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+" {if(hide[section]) next};"\
+" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
+" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
+" s[1]~/^[@?]/{print s[1], s[1]; next};"\
+" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+" ' prfx=^$ac_symprfx]"
+ else
+ lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+ fi
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+
+ rm -f conftest*
+ cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+ if AC_TRY_EVAL(ac_compile); then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+ if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<_LT_EOF > conftest.$ac_ext
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+ cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ void *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[[]] =
+{
+ { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+ $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+ cat <<\_LT_EOF >> conftest.$ac_ext
+ {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+ return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ lt_save_LIBS="$LIBS"
+ lt_save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then
+ pipe_works=yes
+ fi
+ LIBS="$lt_save_LIBS"
+ CFLAGS="$lt_save_CFLAGS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat conftest.$ac_ext >&5
+ fi
+ rm -rf conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+])
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+ AC_MSG_RESULT(failed)
+else
+ AC_MSG_RESULT(ok)
+fi
+
+_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
+ [Take the output of nm and produce a listing of raw symbols and C names])
+_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
+ [Transform the output of nm in a proper C declaration])
+_LT_DECL([global_symbol_to_c_name_address],
+ [lt_cv_sys_global_symbol_to_c_name_address], [1],
+ [Transform the output of nm in a C name address pair])
+_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
+ [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
+ [Transform the output of nm in a C name address pair when lib prefix is needed])
+]) # _LT_CMD_GLOBAL_SYMBOLS
+
+
+# _LT_COMPILER_PIC([TAGNAME])
+# ---------------------------
+m4_defun([_LT_COMPILER_PIC],
+[m4_require([_LT_TAG_COMPILER])dnl
+_LT_TAGVAR(lt_prog_compiler_wl, $1)=
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+_LT_TAGVAR(lt_prog_compiler_static, $1)=
+
+AC_MSG_CHECKING([for $compiler option to produce PIC])
+m4_if([$1], [CXX], [
+ # C++ specific cases for pic, static, wl, etc.
+ if test "$GXX" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ mingw* | cygwin* | os2* | pw32* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+ ;;
+ *djgpp*)
+ # DJGPP does not support shared libraries at all
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ ;;
+ interix[[3-9]]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+ fi
+ ;;
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ ;;
+ *qnx* | *nto*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ else
+ case $host_os in
+ aix[[4-9]]*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ else
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+ chorus*)
+ case $cc_basename in
+ cxch68*)
+ # Green Hills C++ Compiler
+ # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+ ;;
+ esac
+ ;;
+ dgux*)
+ case $cc_basename in
+ ec++*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ ;;
+ ghcx*)
+ # Green Hills C++ Compiler
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ freebsd* | dragonfly*)
+ # FreeBSD uses GNU C++
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+ if test "$host_cpu" != ia64; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+ fi
+ ;;
+ aCC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+ ;;
+ esac
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ interix*)
+ # This is c89, which is MS Visual C++ (no shared libs)
+ # Anyone wants to do a port?
+ ;;
+ irix5* | irix6* | nonstopux*)
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ # CC pic flag -KPIC is the default.
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ linux* | k*bsd*-gnu)
+ case $cc_basename in
+ KCC*)
+ # KAI C++ Compiler
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ ecpc* )
+ # old Intel C++ for x86_64 which still supported -KPIC.
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ icpc* )
+ # Intel C++, used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ pgCC* | pgcpp*)
+ # Portland Group C++ compiler
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ cxx*)
+ # Compaq C++
+ # Make sure the PIC flag is empty. It appears that all Alpha
+ # Linux and Compaq Tru64 Unix objects are PIC.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+ xlc* | xlC*)
+ # IBM XL 8.0 on PPC
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+ lynxos*)
+ ;;
+ m88k*)
+ ;;
+ mvs*)
+ case $cc_basename in
+ cxx*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ netbsd*)
+ ;;
+ *qnx* | *nto*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+ osf3* | osf4* | osf5*)
+ case $cc_basename in
+ KCC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+ ;;
+ RCC*)
+ # Rational C++ 2.4.1
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ ;;
+ cxx*)
+ # Digital/Compaq C++
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # Make sure the PIC flag is empty. It appears that all Alpha
+ # Linux and Compaq Tru64 Unix objects are PIC.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ psos*)
+ ;;
+ solaris*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.2, 5.x and Centerline C++
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ ;;
+ gcx*)
+ # Green Hills C++ Compiler
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ sunos4*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.x
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ lcc*)
+ # Lucid
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ esac
+ ;;
+ tandem*)
+ case $cc_basename in
+ NCC*)
+ # NonStop-UX NCC 3.20
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ ;;
+ *)
+ ;;
+ esac
+ ;;
+ vxworks*)
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ ;;
+ esac
+ fi
+],
+[
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+ case $host_os in
+ aix*)
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ m68k)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ esac
+ ;;
+
+ beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ # Although the cygwin gcc ignores -fPIC, still need this for old-style
+ # (--disable-auto-import) libraries
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+ ;;
+
+ hpux*)
+ # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+ # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
+ # sets the default TLS model and affects inlining.
+ case $host_cpu in
+ hppa*64*)
+ # +Z the default
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ ;;
+
+ interix[[3-9]]*)
+ # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+ # Instead, we relocate shared libraries at runtime.
+ ;;
+
+ msdosdjgpp*)
+ # Just because we use GCC doesn't mean we suddenly get shared libraries
+ # on systems that don't support them.
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ enable_shared=no
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+ fi
+ ;;
+
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ ;;
+ esac
+ else
+ # PORTME Check for flag to pass linker flags through the system compiler.
+ case $host_os in
+ aix*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ else
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ mingw* | cygwin* | pw32* | os2* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+ # not for PA HP-UX.
+ case $host_cpu in
+ hppa*64*|ia64*)
+ # +Z the default
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+ ;;
+ esac
+ # Is there a better lt_prog_compiler_static that works with the bundled CC?
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # PIC (with -KPIC) is the default.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+
+ linux* | k*bsd*-gnu)
+ case $cc_basename in
+ # old Intel for x86_64 which still supported -KPIC.
+ ecc*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ # icc used to be incompatible with GCC.
+ # ICC 10 doesn't accept -KPIC any more.
+ icc* | ifort*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ # Lahey Fortran 8.1.
+ lf95*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
+ ;;
+ pgcc* | pgf77* | pgf90* | pgf95*)
+ # Portland Group compilers (*not* the Pentium gcc compiler,
+ # which looks to be a dead project)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ ccc*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # All Alpha code is PIC.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+ xl*)
+ # IBM XL C 8.0/Fortran 10.1 on PPC
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C 5.9
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ ;;
+ *Sun\ F*)
+ # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ newsos6)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ *nto* | *qnx*)
+ # QNX uses GNU C++, but need to define -shared option too, otherwise
+ # it will coredump.
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ # All OSF/1 code is PIC.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+
+ rdos*)
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+ ;;
+
+ solaris*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ case $cc_basename in
+ f77* | f90* | f95*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
+ esac
+ ;;
+
+ sunos4*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ fi
+ ;;
+
+ sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ unicos*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ ;;
+
+ uts4*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+
+ *)
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+ ;;
+ esac
+ fi
+])
+case $host_os in
+ # For platforms which do not support PIC, -DPIC is meaningless:
+ *djgpp*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
+ ;;
+esac
+AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
+_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
+ [How to pass a linker flag through the compiler])
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+ _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works],
+ [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)],
+ [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [],
+ [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in
+ "" | " "*) ;;
+ *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;;
+ esac],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+ _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
+fi
+_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
+ [Additional compiler flags for building library objects])
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\"
+_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works],
+ _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1),
+ $lt_tmp_static_flag,
+ [],
+ [_LT_TAGVAR(lt_prog_compiler_static, $1)=])
+_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
+ [Compiler flag to prevent dynamic linking])
+])# _LT_COMPILER_PIC
+
+
+# _LT_LINKER_SHLIBS([TAGNAME])
+# ----------------------------
+# See if the linker supports building shared libraries.
+m4_defun([_LT_LINKER_SHLIBS],
+[AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+m4_if([$1], [CXX], [
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ case $host_os in
+ aix[[4-9]]*)
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ ;;
+ pw32*)
+ _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds"
+ ;;
+ cygwin* | mingw* | cegcc*)
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+ ;;
+ *)
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ ;;
+ esac
+ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+], [
+ runpath_var=
+ _LT_TAGVAR(allow_undefined_flag, $1)=
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(archive_cmds, $1)=
+ _LT_TAGVAR(archive_expsym_cmds, $1)=
+ _LT_TAGVAR(compiler_needs_object, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(hardcode_automatic, $1)=no
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+ _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=
+ _LT_TAGVAR(hardcode_minus_L, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+ _LT_TAGVAR(inherit_rpath, $1)=no
+ _LT_TAGVAR(link_all_deplibs, $1)=unknown
+ _LT_TAGVAR(module_cmds, $1)=
+ _LT_TAGVAR(module_expsym_cmds, $1)=
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)=
+ _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)=
+ _LT_TAGVAR(thread_safe_flag_spec, $1)=
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ # include_expsyms should be a list of space-separated symbols to be *always*
+ # included in the symbol list
+ _LT_TAGVAR(include_expsyms, $1)=
+ # exclude_expsyms can be an extended regexp of symbols to exclude
+ # it will be wrapped by ` (' and `)$', so one must not match beginning or
+ # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+ # as well as any symbol that contains `d'.
+ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+ # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+ # platforms (ab)use it in PIC code, but their linkers get confused if
+ # the symbol is explicitly referenced. Since portable code cannot
+ # rely on this symbol name, it's probably fine to never include it in
+ # preloaded symbol tables.
+ # Exclude shared library initialization/finalization symbols.
+dnl Note also adjust exclude_expsyms for C++ above.
+ extract_expsyms_cmds=
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+ interix*)
+ # we just hope/assume this is gcc and not c89 (= MSVC++)
+ with_gnu_ld=yes
+ ;;
+ openbsd*)
+ with_gnu_ld=no
+ ;;
+ esac
+
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # Set some defaults for GNU ld with shared library support. These
+ # are reset later if shared libraries are not supported. Putting them
+ # here allows them to be overridden if necessary.
+ runpath_var=LD_RUN_PATH
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ fi
+ supports_anon_versioning=no
+ case `$LD -v 2>&1` in
+ *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
+ *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+ *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+ *\ 2.11.*) ;; # other 2.11 versions
+ *) supports_anon_versioning=yes ;;
+ esac
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix[[3-9]]*)
+ # On AIX/PPC, the GNU linker is very broken
+ if test "$host_cpu" != ia64; then
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+_LT_EOF
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)=''
+ ;;
+ m68k)
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ ;;
+ esac
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+ # as there is no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ interix[[3-9]]*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+
+ gnu* | linux* | tpf* | k*bsd*-gnu)
+ tmp_diet=no
+ if test "$host_os" = linux-dietlibc; then
+ case $cc_basename in
+ diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn)
+ esac
+ fi
+ if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+ && test "$tmp_diet" = no
+ then
+ tmp_addflag=
+ tmp_sharedflag='-shared'
+ case $cc_basename,$host_cpu in
+ pgcc*) # Portland Group C compiler
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag'
+ ;;
+ pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ tmp_addflag=' $pic_flag -Mnomain' ;;
+ ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64
+ tmp_addflag=' -i_dynamic' ;;
+ efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64
+ tmp_addflag=' -i_dynamic -nofor_main' ;;
+ ifc* | ifort*) # Intel Fortran compiler
+ tmp_addflag=' -nofor_main' ;;
+ lf95*) # Lahey Fortran 8.1
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ tmp_sharedflag='--shared' ;;
+ xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ tmp_sharedflag='-qmkshrobj'
+ tmp_addflag= ;;
+ esac
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*) # Sun C 5.9
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(compiler_needs_object, $1)=yes
+ tmp_sharedflag='-G' ;;
+ *Sun\ F*) # Sun Fortran 8.3
+ tmp_sharedflag='-G' ;;
+ esac
+ _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+ if test "x$supports_anon_versioning" = xyes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+
+ case $cc_basename in
+ xlf*)
+ # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+ _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir'
+ _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ esac
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris*)
+ if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+ case `$LD -v 2>&1` in
+ *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*)
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+ ;;
+ *)
+ # For security reasons, it is highly recommended that you always
+ # use absolute paths for naming shared libraries, and exclude the
+ # DT_RUNPATH tag from executables and libraries. But doing so
+ # requires that you compile everything twice, which is a pain.
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ sunos4*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+
+ if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then
+ runpath_var=
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ fi
+ else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ _LT_TAGVAR(hardcode_direct, $1)=unsupported
+ fi
+ ;;
+
+ aix[[4-9]]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ # If we're using GNU nm, then we don't want the "-C" option.
+ # -C means demangle to AIX nm, but means don't demangle with GNU nm
+ if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ else
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ fi
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ _LT_TAGVAR(archive_cmds, $1)=''
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='${wl}-f,'
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ _LT_TAGVAR(hardcode_direct, $1)=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=
+ fi
+ ;;
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to export.
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ _LT_SYS_MODULE_PATH_AIX
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
+ _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ _LT_SYS_MODULE_PATH_AIX
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
+ # Exported symbols can be pulled into shared objects from archives
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ # This is similar to how AIX traditionally builds its shared libraries.
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ # see comment about AmigaOS4 .so support
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)=''
+ ;;
+ m68k)
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ ;;
+ esac
+ ;;
+
+ bsdi[[45]]*)
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ # FIXME: Should let the user specify the lib program.
+ _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
+ _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ ;;
+
+ darwin* | rhapsody*)
+ _LT_DARWIN_LINKER_FEATURES($1)
+ ;;
+
+ dgux*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ freebsd1*)
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd* | dragonfly*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ hpux9*)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ ;;
+
+ hpux10*)
+ if test "$GCC" = yes -a "$with_gnu_ld" = no; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ if test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ fi
+ ;;
+
+ hpux11*)
+ if test "$GCC" = yes -a "$with_gnu_ld" = no; then
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ else
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ fi
+ if test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+ *)
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ ;;
+ esac
+ fi
+ ;;
+
+ irix5* | irix6* | nonstopux*)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ # Try to use the -exported_symbol ld option, if it does not
+ # work, assume that -exports_file does not work either and
+ # implicitly export all symbols.
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([int foo(void) {}]),
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+ ])
+ LDFLAGS="$save_LDFLAGS"
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(inherit_rpath, $1)=yes
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ newsos6)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ *nto* | *qnx*)
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ else
+ case $host_os in
+ openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ os2*)
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ else
+ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+ # Both c and cxx compiler support -rpath directly
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+ fi
+ _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ ;;
+
+ solaris*)
+ _LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
+ if test "$GCC" = yes; then
+ wlarc='${wl}'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ else
+ case `$CC -V 2>&1` in
+ *"Compilers 5.0"*)
+ wlarc=''
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+ ;;
+ *)
+ wlarc='${wl}'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ ;;
+ esac
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'. GCC discards it without `$wl',
+ # but is careful enough not to reorder.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+ fi
+ ;;
+ esac
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ sysv4)
+ case $host_vendor in
+ sni)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true???
+ ;;
+ siemens)
+ ## LD is ld it makes a PLAMLIB
+ ## CC just makes a GrossModule.
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs'
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ ;;
+ motorola)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie
+ ;;
+ esac
+ runpath_var='LD_RUN_PATH'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ sysv4.3*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ fi
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ if test "$GCC" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ ;;
+
+ uts4*)
+ _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+
+ *)
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+
+ if test x$host_vendor = xsni; then
+ case $host in
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym'
+ ;;
+ esac
+ fi
+ fi
+])
+AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no
+
+_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld
+
+_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl
+_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl
+_LT_DECL([], [extract_expsyms_cmds], [2],
+ [The commands to extract the exported symbol list from a shared archive])
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in
+x|xyes)
+ # Assume -lc should be added
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+
+ if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $_LT_TAGVAR(archive_cmds, $1) in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ AC_MSG_CHECKING([whether -lc should be explicitly linked in])
+ $RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
+ pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
+ _LT_TAGVAR(allow_undefined_flag, $1)=
+ if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
+ then
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ else
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ fi
+ _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+ AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)])
+ ;;
+ esac
+ fi
+ ;;
+esac
+
+_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0],
+ [Whether or not to add -lc for building shared libraries])
+_LT_TAGDECL([allow_libtool_libs_with_static_runtimes],
+ [enable_shared_with_static_runtimes], [0],
+ [Whether or not to disallow shared libs when runtime libs are static])
+_LT_TAGDECL([], [export_dynamic_flag_spec], [1],
+ [Compiler flag to allow reflexive dlopens])
+_LT_TAGDECL([], [whole_archive_flag_spec], [1],
+ [Compiler flag to generate shared objects directly from archives])
+_LT_TAGDECL([], [compiler_needs_object], [1],
+ [Whether the compiler copes with passing no objects directly])
+_LT_TAGDECL([], [old_archive_from_new_cmds], [2],
+ [Create an old-style archive from a shared archive])
+_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2],
+ [Create a temporary old-style archive to link instead of a shared archive])
+_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive])
+_LT_TAGDECL([], [archive_expsym_cmds], [2])
+_LT_TAGDECL([], [module_cmds], [2],
+ [Commands used to build a loadable module if different from building
+ a shared archive.])
+_LT_TAGDECL([], [module_expsym_cmds], [2])
+_LT_TAGDECL([], [with_gnu_ld], [1],
+ [Whether we are building with GNU ld or not])
+_LT_TAGDECL([], [allow_undefined_flag], [1],
+ [Flag that allows shared libraries with undefined symbols to be built])
+_LT_TAGDECL([], [no_undefined_flag], [1],
+ [Flag that enforces no undefined symbols])
+_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
+ [Flag to hardcode $libdir into a binary during linking.
+ This must work even if $libdir does not exist])
+_LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1],
+ [[If ld is used when linking, flag to hardcode $libdir into a binary
+ during linking. This must work even if $libdir does not exist]])
+_LT_TAGDECL([], [hardcode_libdir_separator], [1],
+ [Whether we need a single "-rpath" flag with a separated argument])
+_LT_TAGDECL([], [hardcode_direct], [0],
+ [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes
+ DIR into the resulting binary])
+_LT_TAGDECL([], [hardcode_direct_absolute], [0],
+ [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes
+ DIR into the resulting binary and the resulting library dependency is
+ "absolute", i.e impossible to change by setting ${shlibpath_var} if the
+ library is relocated])
+_LT_TAGDECL([], [hardcode_minus_L], [0],
+ [Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+ into the resulting binary])
+_LT_TAGDECL([], [hardcode_shlibpath_var], [0],
+ [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+ into the resulting binary])
+_LT_TAGDECL([], [hardcode_automatic], [0],
+ [Set to "yes" if building a shared library automatically hardcodes DIR
+ into the library and all subsequent libraries and executables linked
+ against it])
+_LT_TAGDECL([], [inherit_rpath], [0],
+ [Set to yes if linker adds runtime paths of dependent libraries
+ to runtime path list])
+_LT_TAGDECL([], [link_all_deplibs], [0],
+ [Whether libtool must link a program against all its dependency libraries])
+_LT_TAGDECL([], [fix_srcfile_path], [1],
+ [Fix the shell variable $srcfile for the compiler])
+_LT_TAGDECL([], [always_export_symbols], [0],
+ [Set to "yes" if exported symbols are required])
+_LT_TAGDECL([], [export_symbols_cmds], [2],
+ [The commands to list exported symbols])
+_LT_TAGDECL([], [exclude_expsyms], [1],
+ [Symbols that should not be listed in the preloaded symbols])
+_LT_TAGDECL([], [include_expsyms], [1],
+ [Symbols that must always be exported])
+_LT_TAGDECL([], [prelink_cmds], [2],
+ [Commands necessary for linking programs (against libraries) with templates])
+_LT_TAGDECL([], [file_list_spec], [1],
+ [Specify filename containing input files])
+dnl FIXME: Not yet implemented
+dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1],
+dnl [Compiler flag to generate thread safe objects])
+])# _LT_LINKER_SHLIBS
+
+
+# _LT_LANG_C_CONFIG([TAG])
+# ------------------------
+# Ensure that the configuration variables for a C compiler are suitably
+# defined. These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_C_CONFIG],
+[m4_require([_LT_DECL_EGREP])dnl
+lt_save_CC="$CC"
+AC_LANG_PUSH(C)
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+_LT_TAG_COMPILER
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+## CAVEAT EMPTOR:
+## There is no encapsulation within the following macros, do not change
+## the running order or otherwise move them around unless you know exactly
+## what you are doing...
+if test -n "$compiler"; then
+ _LT_COMPILER_NO_RTTI($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+ LT_SYS_DLOPEN_SELF
+ _LT_CMD_STRIPLIB
+
+ # Report which library types will actually be built
+ AC_MSG_CHECKING([if libtool supports shared libraries])
+ AC_MSG_RESULT([$can_build_shared])
+
+ AC_MSG_CHECKING([whether to build shared libraries])
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+ aix[[4-9]]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$enable_shared])
+
+ AC_MSG_CHECKING([whether to build static libraries])
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ AC_MSG_RESULT([$enable_static])
+
+ _LT_CONFIG($1)
+fi
+AC_LANG_POP
+CC="$lt_save_CC"
+])# _LT_LANG_C_CONFIG
+
+
+# _LT_PROG_CXX
+# ------------
+# Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++
+# compiler, we have our own version here.
+m4_defun([_LT_PROG_CXX],
+[
+pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes])
+AC_PROG_CXX
+if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+ ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+ (test "X$CXX" != "Xg++"))) ; then
+ AC_PROG_CXXCPP
+else
+ _lt_caught_CXX_error=yes
+fi
+popdef([AC_MSG_ERROR])
+])# _LT_PROG_CXX
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([_LT_PROG_CXX], [])
+
+
+# _LT_LANG_CXX_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a C++ compiler are suitably
+# defined. These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_CXX_CONFIG],
+[AC_REQUIRE([_LT_PROG_CXX])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+
+AC_LANG_PUSH(C++)
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(compiler_needs_object, $1)=no
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_caught_CXX_error" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="int some_variable = 0;"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }'
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+ _LT_TAG_COMPILER
+
+ # save warnings/boilerplate of simple test code
+ _LT_COMPILER_BOILERPLATE
+ _LT_LINKER_BOILERPLATE
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC=$CC
+ lt_save_LD=$LD
+ lt_save_GCC=$GCC
+ GCC=$GXX
+ lt_save_with_gnu_ld=$with_gnu_ld
+ lt_save_path_LD=$lt_cv_path_LD
+ if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+ lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+ else
+ $as_unset lt_cv_prog_gnu_ld
+ fi
+ if test -n "${lt_cv_path_LDCXX+set}"; then
+ lt_cv_path_LD=$lt_cv_path_LDCXX
+ else
+ $as_unset lt_cv_path_LD
+ fi
+ test -z "${LDCXX+set}" || LD=$LDCXX
+ CC=${CXX-"c++"}
+ compiler=$CC
+ _LT_TAGVAR(compiler, $1)=$CC
+ _LT_CC_BASENAME([$compiler])
+
+ if test -n "$compiler"; then
+ # We don't want -fno-exception when compiling C++ code, so set the
+ # no_builtin_flag separately
+ if test "$GXX" = yes; then
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+ else
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+ fi
+
+ if test "$GXX" = yes; then
+ # Set up default GNU C++ configuration
+
+ LT_PATH_LD
+
+ # Check if GNU C++ uses GNU ld as the underlying linker, since the
+ # archiving commands below assume that GNU ld is being used.
+ if test "$with_gnu_ld" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+ # investigate it a little bit more. (MM)
+ wlarc='${wl}'
+
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+ $GREP 'no-whole-archive' > /dev/null; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=
+ fi
+ else
+ with_gnu_ld=no
+ wlarc=
+
+ # A generic and very simple default shared library creation
+ # command for GNU C++ for the case where it uses the native
+ # linker, instead of GNU ld. If possible, this setting should
+ # overridden to take advantage of the native linker features on
+ # the platform it is being used on.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+ fi
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+
+ else
+ GXX=no
+ with_gnu_ld=no
+ wlarc=
+ fi
+
+ # PORTME: fill in a description of your system's C++ link characteristics
+ AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ case $host_os in
+ aix3*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ aix[[4-9]]*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+ for ld_flag in $LDFLAGS; do
+ case $ld_flag in
+ *-brtl*)
+ aix_use_runtimelinking=yes
+ break
+ ;;
+ esac
+ done
+ ;;
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ _LT_TAGVAR(archive_cmds, $1)=''
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='${wl}-f,'
+
+ if test "$GXX" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ # We only want to do this on AIX 4.2 and lower, the check
+ # below for broken collect2 doesn't work under 4.3+
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" &&
+ strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ :
+ else
+ # We have old collect2
+ _LT_TAGVAR(hardcode_direct, $1)=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=
+ fi
+ esac
+ shared_flag='-shared'
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag="$shared_flag "'${wl}-G'
+ fi
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ # chokes on -Wl,-G. The following line is correct:
+ shared_flag='-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+ fi
+
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall'
+ # It seems that -bexpall does not export symbols beginning with
+ # underscore (_), so it is better to generate a list of symbols to
+ # export.
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+ # Determine the default libpath from the value encoded in an empty
+ # executable.
+ _LT_SYS_MODULE_PATH_AIX
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
+ _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ else
+ # Determine the default libpath from the value encoded in an
+ # empty executable.
+ _LT_SYS_MODULE_PATH_AIX
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
+ # Exported symbols can be pulled into shared objects from archives
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ # This is similar to how AIX traditionally builds its shared
+ # libraries.
+ _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ fi
+ fi
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ chorus*)
+ case $cc_basename in
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ cygwin* | mingw* | pw32* | cegcc*)
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+ # as there is no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ darwin* | rhapsody*)
+ _LT_DARWIN_LINKER_FEATURES($1)
+ ;;
+
+ dgux*)
+ case $cc_basename in
+ ec++*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ ghcx*)
+ # Green Hills C++ Compiler
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ freebsd[[12]]*)
+ # C++ shared libraries reported to be fairly broken before
+ # switch to ELF
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ freebsd-elf*)
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ ;;
+
+ freebsd* | dragonfly*)
+ # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+ # conventions
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ ;;
+
+ gnu*)
+ ;;
+
+ hpux9*)
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+ # but as the default
+ # location of the library.
+
+ case $cc_basename in
+ CC*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ aCC*)
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ else
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ hpux10*|hpux11*)
+ if test $with_gnu_ld = no; then
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ case $host_cpu in
+ hppa*64*|ia64*)
+ ;;
+ *)
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ ;;
+ esac
+ fi
+ case $host_cpu in
+ hppa*64*|ia64*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ ;;
+ *)
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+ # but as the default
+ # location of the library.
+ ;;
+ esac
+
+ case $cc_basename in
+ CC*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ aCC*)
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ esac
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ if test $with_gnu_ld = no; then
+ case $host_cpu in
+ hppa*64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ ia64*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ ;;
+ esac
+ fi
+ else
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ interix[[3-9]]*)
+ _LT_TAGVAR(hardcode_direct, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ # Instead, shared libraries are loaded at an image base (0x10000000 by
+ # default) and relocated if they conflict, which is a slow very memory
+ # consuming and fragmenting process. To avoid this, we pick a random,
+ # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ # time. Moving up from 0x10000000 also allows more sbrk(2) space.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ ;;
+ irix5* | irix6*)
+ case $cc_basename in
+ CC*)
+ # SGI C++
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+
+ # Archives containing C++ object files must be created using
+ # "CC -ar", where "CC" is the IRIX C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs'
+ ;;
+ *)
+ if test "$GXX" = yes; then
+ if test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib'
+ fi
+ fi
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+ esac
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+ _LT_TAGVAR(inherit_rpath, $1)=yes
+ ;;
+
+ linux* | k*bsd*-gnu)
+ case $cc_basename in
+ KCC*)
+ # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+ # KCC will only create a shared library if the output file
+ # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ # to its proper name (with version) after linking.
+ _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+
+ # Archives containing C++ object files must be created using
+ # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
+ ;;
+ icpc* | ecpc* )
+ # Intel C++
+ with_gnu_ld=yes
+ # version 8.0 and above of icpc choke on multiply defined symbols
+ # if we add $predep_objects and $postdep_objects, however 7.1 and
+ # earlier do not add the objects themselves.
+ case `$CC -V 2>&1` in
+ *"Version 7."*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ ;;
+ *) # Version 8.0 or newer
+ tmp_idyn=
+ case $host_cpu in
+ ia64*) tmp_idyn=' -i_dynamic';;
+ esac
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ ;;
+ esac
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ ;;
+ pgCC* | pgcpp*)
+ # Portland Group C++ compiler
+ case `$CC -V` in
+ *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*)
+ _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+ compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"'
+ _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~
+ $RANLIB $oldlib'
+ _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
+ rm -rf $tpldir~
+ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ ;;
+ *) # Version 6 will use weak symbols
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ ;;
+ esac
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ ;;
+ cxx*)
+ # Compaq C++
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
+
+ runpath_var=LD_RUN_PATH
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ ;;
+ xl*)
+ # IBM XL 8.0 on PPC, with GNU ld
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ if test "x$supports_anon_versioning" = xyes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+ echo "local: *; };" >> $output_objdir/$libname.ver~
+ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ fi
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+ _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(compiler_needs_object, $1)=yes
+
+ # Not sure whether something based on
+ # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+ # would be better.
+ output_verbose_link_cmd='echo'
+
+ # Archives containing C++ object files must be created using
+ # "CC -xar", where "CC" is the Sun C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+
+ lynxos*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ m88k*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ mvs*)
+ case $cc_basename in
+ cxx*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+ wlarc=
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ fi
+ # Workaround some broken pre-1.5 toolchains
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+ ;;
+
+ *nto* | *qnx*)
+ _LT_TAGVAR(ld_shlibs, $1)=yes
+ ;;
+
+ openbsd2*)
+ # C++ shared libraries are fairly broken
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ openbsd*)
+ if test -f /usr/libexec/ld.so; then
+ _LT_TAGVAR(hardcode_direct, $1)=yes
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ fi
+ output_verbose_link_cmd=echo
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+
+ osf3* | osf4* | osf5*)
+ case $cc_basename in
+ KCC*)
+ # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+ # KCC will only create a shared library if the output file
+ # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ # to its proper name (with version) after linking.
+ _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Archives containing C++ object files must be created using
+ # the KAI C++ compiler.
+ case $host in
+ osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;;
+ *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;;
+ esac
+ ;;
+ RCC*)
+ # Rational C++ 2.4.1
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ cxx*)
+ case $host in
+ osf3*)
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ ;;
+ *)
+ _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+ echo "-hidden">> $lib.exp~
+ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~
+ $RM $lib.exp'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+ ;;
+ esac
+
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ #
+ # There doesn't appear to be a way to prevent this compiler from
+ # explicitly linking system object files so we need to strip them
+ # from the output so that they don't get included in the library
+ # dependencies.
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ ;;
+ *)
+ if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+ case $host in
+ osf3*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ ;;
+ esac
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+
+ else
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
+
+ psos*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ sunos4*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.x
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ lcc*)
+ # Lucid
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ solaris*)
+ case $cc_basename in
+ CC*)
+ # Sun C++ 4.2, 5.x and Centerline C++
+ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes
+ _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *)
+ # The compiler driver will combine and reorder linker options,
+ # but understands `-z linker_flag'.
+ # Supported since Solaris 2.6 (maybe 2.5.1?)
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+ ;;
+ esac
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+
+ output_verbose_link_cmd='echo'
+
+ # Archives containing C++ object files must be created using
+ # "CC -xar", where "CC" is the Sun C++ compiler. This is
+ # necessary to make sure instantiated templates are included
+ # in the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+ ;;
+ gcx*)
+ # Green Hills C++ Compiler
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+
+ # The C++ compiler must be used to create the archive.
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+ ;;
+ *)
+ # GNU C++ compiler with Solaris linker
+ if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs'
+ if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+ else
+ # g++ 2.7 appears to require `-G' NOT `-shared' on this
+ # platform.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+ # Commands to make compiler produce verbose output that lists
+ # what "hidden" libraries, object files and flags are used when
+ # linking a shared library.
+ output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+ fi
+
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir'
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *)
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ ;;
+ esac
+ fi
+ ;;
+ esac
+ ;;
+
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ runpath_var='LD_RUN_PATH'
+
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ ;;
+
+ sysv5* | sco3.2v5* | sco5v6*)
+ # Note: We can NOT use -z defs as we might desire, because we do not
+ # link with -lc, and that would cause any symbols used from libc to
+ # always be unresolved, which means just about no library would
+ # ever link correctly. If we're not using GNU ld we use -z text
+ # though, which does catch some bad symbols but isn't as heavy-handed
+ # as -z defs.
+ _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+ _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs'
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir'
+ _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport'
+ runpath_var='LD_RUN_PATH'
+
+ case $cc_basename in
+ CC*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ *)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ ;;
+ esac
+ ;;
+
+ tandem*)
+ case $cc_basename in
+ NCC*)
+ # NonStop-UX NCC 3.20
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+ ;;
+
+ vxworks*)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+
+ *)
+ # FIXME: insert proper C++ library support
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ ;;
+ esac
+
+ AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+ test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no
+
+ _LT_TAGVAR(GCC, $1)="$GXX"
+ _LT_TAGVAR(LD, $1)="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ _LT_SYS_HIDDEN_LIBDEPS($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+ fi # test -n "$compiler"
+
+ CC=$lt_save_CC
+ LDCXX=$LD
+ LD=$lt_save_LD
+ GCC=$lt_save_GCC
+ with_gnu_ld=$lt_save_with_gnu_ld
+ lt_cv_path_LDCXX=$lt_cv_path_LD
+ lt_cv_path_LD=$lt_save_path_LD
+ lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+ lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test "$_lt_caught_CXX_error" != yes
+
+AC_LANG_POP
+])# _LT_LANG_CXX_CONFIG
+
+
+# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
+# ---------------------------------
+# Figure out "hidden" library dependencies from verbose
+# compiler output when linking a shared library.
+# Parse the compiler output and extract the necessary
+# objects, libraries and library flags.
+m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+# Dependencies to place before and after the object being linked:
+_LT_TAGVAR(predep_objects, $1)=
+_LT_TAGVAR(postdep_objects, $1)=
+_LT_TAGVAR(predeps, $1)=
+_LT_TAGVAR(postdeps, $1)=
+_LT_TAGVAR(compiler_lib_search_path, $1)=
+
+dnl we can't use the lt_simple_compile_test_code here,
+dnl because it contains code intended for an executable,
+dnl not a library. It's possible we should let each
+dnl tag define a new lt_????_link_test_code variable,
+dnl but it's only used here...
+m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF
+int a;
+void foo (void) { a = 0; }
+_LT_EOF
+], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+ Foo (void) { a = 0; }
+private:
+ int a;
+};
+_LT_EOF
+], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF
+ subroutine foo
+ implicit none
+ integer*4 a
+ a=0
+ return
+ end
+_LT_EOF
+], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF
+ subroutine foo
+ implicit none
+ integer a
+ a=0
+ return
+ end
+_LT_EOF
+], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF
+public class foo {
+ private int a;
+ public void bar (void) {
+ a = 0;
+ }
+};
+_LT_EOF
+])
+dnl Parse the compiler output and extract the necessary
+dnl objects, libraries and library flags.
+if AC_TRY_EVAL(ac_compile); then
+ # Parse the compiler output and extract the necessary
+ # objects, libraries and library flags.
+
+ # Sentinel used to keep track of whether or not we are before
+ # the conftest object file.
+ pre_test_object_deps_done=no
+
+ for p in `eval "$output_verbose_link_cmd"`; do
+ case $p in
+
+ -L* | -R* | -l*)
+ # Some compilers place space between "-{L,R}" and the path.
+ # Remove the space.
+ if test $p = "-L" ||
+ test $p = "-R"; then
+ prev=$p
+ continue
+ else
+ prev=
+ fi
+
+ if test "$pre_test_object_deps_done" = no; then
+ case $p in
+ -L* | -R*)
+ # Internal compiler library paths should come after those
+ # provided the user. The postdeps already come after the
+ # user supplied libs so there is no need to process them.
+ if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then
+ _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}"
+ else
+ _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}"
+ fi
+ ;;
+ # The "-l" case would never come before the object being
+ # linked, so don't bother handling this case.
+ esac
+ else
+ if test -z "$_LT_TAGVAR(postdeps, $1)"; then
+ _LT_TAGVAR(postdeps, $1)="${prev}${p}"
+ else
+ _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}"
+ fi
+ fi
+ ;;
+
+ *.$objext)
+ # This assumes that the test object file only shows up
+ # once in the compiler output.
+ if test "$p" = "conftest.$objext"; then
+ pre_test_object_deps_done=yes
+ continue
+ fi
+
+ if test "$pre_test_object_deps_done" = no; then
+ if test -z "$_LT_TAGVAR(predep_objects, $1)"; then
+ _LT_TAGVAR(predep_objects, $1)="$p"
+ else
+ _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p"
+ fi
+ else
+ if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then
+ _LT_TAGVAR(postdep_objects, $1)="$p"
+ else
+ _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p"
+ fi
+ fi
+ ;;
+
+ *) ;; # Ignore the rest.
+
+ esac
+ done
+
+ # Clean up.
+ rm -f a.out a.exe
+else
+ echo "libtool.m4: error: problem compiling $1 test program"
+fi
+
+$RM -f confest.$objext
+
+# PORTME: override above test on systems where it is broken
+m4_if([$1], [CXX],
+[case $host_os in
+interix[[3-9]]*)
+ # Interix 3.5 installs completely hosed .la files for C++, so rather than
+ # hack all around it, let's just trust "g++" to DTRT.
+ _LT_TAGVAR(predep_objects,$1)=
+ _LT_TAGVAR(postdep_objects,$1)=
+ _LT_TAGVAR(postdeps,$1)=
+ ;;
+
+linux*)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ # Sun C++ 5.9
+
+ # The more standards-conforming stlport4 library is
+ # incompatible with the Cstd library. Avoid specifying
+ # it if it's in CXXFLAGS. Ignore libCrun as
+ # -library=stlport4 depends on it.
+ case " $CXX $CXXFLAGS " in
+ *" -library=stlport4 "*)
+ solaris_use_stlport4=yes
+ ;;
+ esac
+
+ if test "$solaris_use_stlport4" != yes; then
+ _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun'
+ fi
+ ;;
+ esac
+ ;;
+
+solaris*)
+ case $cc_basename in
+ CC*)
+ # The more standards-conforming stlport4 library is
+ # incompatible with the Cstd library. Avoid specifying
+ # it if it's in CXXFLAGS. Ignore libCrun as
+ # -library=stlport4 depends on it.
+ case " $CXX $CXXFLAGS " in
+ *" -library=stlport4 "*)
+ solaris_use_stlport4=yes
+ ;;
+ esac
+
+ # Adding this requires a known-good setup of shared libraries for
+ # Sun compiler versions before 5.6, else PIC objects from an old
+ # archive will be linked into the output, leading to subtle bugs.
+ if test "$solaris_use_stlport4" != yes; then
+ _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun'
+ fi
+ ;;
+ esac
+ ;;
+esac
+])
+
+case " $_LT_TAGVAR(postdeps, $1) " in
+*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;;
+esac
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=
+if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+fi
+_LT_TAGDECL([], [compiler_lib_search_dirs], [1],
+ [The directories searched by this compiler when creating a shared library])
+_LT_TAGDECL([], [predep_objects], [1],
+ [Dependencies to place before and after the objects being linked to
+ create a shared library])
+_LT_TAGDECL([], [postdep_objects], [1])
+_LT_TAGDECL([], [predeps], [1])
+_LT_TAGDECL([], [postdeps], [1])
+_LT_TAGDECL([], [compiler_lib_search_path], [1],
+ [The library search path used internally by the compiler when linking
+ a shared library])
+])# _LT_SYS_HIDDEN_LIBDEPS
+
+
+# _LT_PROG_F77
+# ------------
+# Since AC_PROG_F77 is broken, in that it returns the empty string
+# if there is no fortran compiler, we have our own version here.
+m4_defun([_LT_PROG_F77],
+[
+pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes])
+AC_PROG_F77
+if test -z "$F77" || test "X$F77" = "Xno"; then
+ _lt_disable_F77=yes
+fi
+popdef([AC_MSG_ERROR])
+])# _LT_PROG_F77
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([_LT_PROG_F77], [])
+
+
+# _LT_LANG_F77_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a Fortran 77 compiler are
+# suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_F77_CONFIG],
+[AC_REQUIRE([_LT_PROG_F77])dnl
+AC_LANG_PUSH(Fortran 77)
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for f77 test sources.
+ac_ext=f
+
+# Object file extension for compiled f77 test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the F77 compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_F77" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="\
+ subroutine t
+ return
+ end
+"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code="\
+ program t
+ end
+"
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+ _LT_TAG_COMPILER
+
+ # save warnings/boilerplate of simple test code
+ _LT_COMPILER_BOILERPLATE
+ _LT_LINKER_BOILERPLATE
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC="$CC"
+ lt_save_GCC=$GCC
+ CC=${F77-"f77"}
+ compiler=$CC
+ _LT_TAGVAR(compiler, $1)=$CC
+ _LT_CC_BASENAME([$compiler])
+ GCC=$G77
+ if test -n "$compiler"; then
+ AC_MSG_CHECKING([if libtool supports shared libraries])
+ AC_MSG_RESULT([$can_build_shared])
+
+ AC_MSG_CHECKING([whether to build shared libraries])
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+ aix[[4-9]]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$enable_shared])
+
+ AC_MSG_CHECKING([whether to build static libraries])
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ AC_MSG_RESULT([$enable_static])
+
+ _LT_TAGVAR(GCC, $1)="$G77"
+ _LT_TAGVAR(LD, $1)="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+ fi # test -n "$compiler"
+
+ GCC=$lt_save_GCC
+ CC="$lt_save_CC"
+fi # test "$_lt_disable_F77" != yes
+
+AC_LANG_POP
+])# _LT_LANG_F77_CONFIG
+
+
+# _LT_PROG_FC
+# -----------
+# Since AC_PROG_FC is broken, in that it returns the empty string
+# if there is no fortran compiler, we have our own version here.
+m4_defun([_LT_PROG_FC],
+[
+pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes])
+AC_PROG_FC
+if test -z "$FC" || test "X$FC" = "Xno"; then
+ _lt_disable_FC=yes
+fi
+popdef([AC_MSG_ERROR])
+])# _LT_PROG_FC
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([_LT_PROG_FC], [])
+
+
+# _LT_LANG_FC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for a Fortran compiler are
+# suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_FC_CONFIG],
+[AC_REQUIRE([_LT_PROG_FC])dnl
+AC_LANG_PUSH(Fortran)
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for fc test sources.
+ac_ext=${ac_fc_srcext-f}
+
+# Object file extension for compiled fc test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the FC compiler isn't working. Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_FC" != yes; then
+ # Code to be used in simple compile tests
+ lt_simple_compile_test_code="\
+ subroutine t
+ return
+ end
+"
+
+ # Code to be used in simple link tests
+ lt_simple_link_test_code="\
+ program t
+ end
+"
+
+ # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+ _LT_TAG_COMPILER
+
+ # save warnings/boilerplate of simple test code
+ _LT_COMPILER_BOILERPLATE
+ _LT_LINKER_BOILERPLATE
+
+ # Allow CC to be a program name with arguments.
+ lt_save_CC="$CC"
+ lt_save_GCC=$GCC
+ CC=${FC-"f95"}
+ compiler=$CC
+ GCC=$ac_cv_fc_compiler_gnu
+
+ _LT_TAGVAR(compiler, $1)=$CC
+ _LT_CC_BASENAME([$compiler])
+
+ if test -n "$compiler"; then
+ AC_MSG_CHECKING([if libtool supports shared libraries])
+ AC_MSG_RESULT([$can_build_shared])
+
+ AC_MSG_CHECKING([whether to build shared libraries])
+ test "$can_build_shared" = "no" && enable_shared=no
+
+ # On AIX, shared libraries and static libraries use the same namespace, and
+ # are all built from PIC.
+ case $host_os in
+ aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+ aix[[4-9]]*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+ esac
+ AC_MSG_RESULT([$enable_shared])
+
+ AC_MSG_CHECKING([whether to build static libraries])
+ # Make sure either enable_shared or enable_static is yes.
+ test "$enable_shared" = yes || enable_static=yes
+ AC_MSG_RESULT([$enable_static])
+
+ _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu"
+ _LT_TAGVAR(LD, $1)="$LD"
+
+ ## CAVEAT EMPTOR:
+ ## There is no encapsulation within the following macros, do not change
+ ## the running order or otherwise move them around unless you know exactly
+ ## what you are doing...
+ _LT_SYS_HIDDEN_LIBDEPS($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_SYS_DYNAMIC_LINKER($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+ fi # test -n "$compiler"
+
+ GCC=$lt_save_GCC
+ CC="$lt_save_CC"
+fi # test "$_lt_disable_FC" != yes
+
+AC_LANG_POP
+])# _LT_LANG_FC_CONFIG
+
+
+# _LT_LANG_GCJ_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Java Compiler compiler
+# are suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_GCJ_CONFIG],
+[AC_REQUIRE([LT_PROG_GCJ])dnl
+AC_LANG_SAVE
+
+# Source file extension for Java test sources.
+ac_ext=java
+
+# Object file extension for compiled Java test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="class foo {}"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC="$CC"
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GCJ-"gcj"}
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)="$LD"
+_LT_CC_BASENAME([$compiler])
+
+# GCJ did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+
+## CAVEAT EMPTOR:
+## There is no encapsulation within the following macros, do not change
+## the running order or otherwise move them around unless you know exactly
+## what you are doing...
+if test -n "$compiler"; then
+ _LT_COMPILER_NO_RTTI($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC="$lt_save_CC"
+])# _LT_LANG_GCJ_CONFIG
+
+
+# _LT_LANG_RC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for the Windows resource compiler
+# are suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_RC_CONFIG],
+[AC_REQUIRE([LT_PROG_RC])dnl
+AC_LANG_SAVE
+
+# Source file extension for RC test sources.
+ac_ext=rc
+
+# Object file extension for compiled RC test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
+
+# Code to be used in simple link tests
+lt_simple_link_test_code="$lt_simple_compile_test_code"
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC="$CC"
+lt_save_GCC=$GCC
+GCC=
+CC=${RC-"windres"}
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_CC_BASENAME([$compiler])
+_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+
+if test -n "$compiler"; then
+ :
+ _LT_CONFIG($1)
+fi
+
+GCC=$lt_save_GCC
+AC_LANG_RESTORE
+CC="$lt_save_CC"
+])# _LT_LANG_RC_CONFIG
+
+
+# LT_PROG_GCJ
+# -----------
+AC_DEFUN([LT_PROG_GCJ],
+[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ],
+ [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ],
+ [AC_CHECK_TOOL(GCJ, gcj,)
+ test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2"
+ AC_SUBST(GCJFLAGS)])])[]dnl
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
+
+
+# LT_PROG_RC
+# ----------
+AC_DEFUN([LT_PROG_RC],
+[AC_CHECK_TOOL(RC, windres,)
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_RC], [])
+
+
+# _LT_DECL_EGREP
+# --------------
+# If we don't have a new enough Autoconf to choose the best grep
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_EGREP],
+[AC_REQUIRE([AC_PROG_EGREP])dnl
+AC_REQUIRE([AC_PROG_FGREP])dnl
+test -z "$GREP" && GREP=grep
+_LT_DECL([], [GREP], [1], [A grep program that handles long lines])
+_LT_DECL([], [EGREP], [1], [An ERE matcher])
+_LT_DECL([], [FGREP], [1], [A literal string matcher])
+dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too
+AC_SUBST([GREP])
+])
+
+
+# _LT_DECL_OBJDUMP
+# --------------
+# If we don't have a new enough Autoconf to choose the best objdump
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_OBJDUMP],
+[AC_CHECK_TOOL(OBJDUMP, objdump, false)
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
+AC_SUBST([OBJDUMP])
+])
+
+
+# _LT_DECL_SED
+# ------------
+# Check for a fully-functional sed program, that truncates
+# as few characters as possible. Prefer GNU sed if found.
+m4_defun([_LT_DECL_SED],
+[AC_PROG_SED
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+_LT_DECL([], [SED], [1], [A sed program that does not truncate output])
+_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"],
+ [Sed that helps us avoid accidentally triggering echo(1) options like -n])
+])# _LT_DECL_SED
+
+m4_ifndef([AC_PROG_SED], [
+############################################################
+# NOTE: This macro has been submitted for inclusion into #
+# GNU Autoconf as AC_PROG_SED. When it is available in #
+# a released version of Autoconf we should remove this #
+# macro and use it instead. #
+############################################################
+
+m4_defun([AC_PROG_SED],
+[AC_MSG_CHECKING([for a sed that does not truncate output])
+AC_CACHE_VAL(lt_cv_path_SED,
+[# Loop through the user's path and test for sed and gsed.
+# Then use that list of sed's as ones to test for truncation.
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for lt_ac_prog in sed gsed; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then
+ lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext"
+ fi
+ done
+ done
+done
+IFS=$as_save_IFS
+lt_ac_max=0
+lt_ac_count=0
+# Add /usr/xpg4/bin/sed as it is typically found on Solaris
+# along with /bin/sed that truncates output.
+for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
+ test ! -f $lt_ac_sed && continue
+ cat /dev/null > conftest.in
+ lt_ac_count=0
+ echo $ECHO_N "0123456789$ECHO_C" >conftest.in
+ # Check for GNU sed and select it if it is found.
+ if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then
+ lt_cv_path_SED=$lt_ac_sed
+ break
+ fi
+ while true; do
+ cat conftest.in conftest.in >conftest.tmp
+ mv conftest.tmp conftest.in
+ cp conftest.in conftest.nl
+ echo >>conftest.nl
+ $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break
+ cmp -s conftest.out conftest.nl || break
+ # 10000 chars as input seems more than enough
+ test $lt_ac_count -gt 10 && break
+ lt_ac_count=`expr $lt_ac_count + 1`
+ if test $lt_ac_count -gt $lt_ac_max; then
+ lt_ac_max=$lt_ac_count
+ lt_cv_path_SED=$lt_ac_sed
+ fi
+ done
+done
+])
+SED=$lt_cv_path_SED
+AC_SUBST([SED])
+AC_MSG_RESULT([$SED])
+])#AC_PROG_SED
+])#m4_ifndef
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_SED], [])
+
+
+# _LT_CHECK_SHELL_FEATURES
+# ------------------------
+# Find out whether the shell is Bourne or XSI compatible,
+# or has some other useful features.
+m4_defun([_LT_CHECK_SHELL_FEATURES],
+[AC_MSG_CHECKING([whether the shell understands some XSI constructs])
+# Try some XSI features
+xsi_shell=no
+( _lt_dummy="a/b/c"
+ test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \
+ = c,a/b,, \
+ && eval 'test $(( 1 + 1 )) -eq 2 \
+ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
+ && xsi_shell=yes
+AC_MSG_RESULT([$xsi_shell])
+_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell'])
+
+AC_MSG_CHECKING([whether the shell understands "+="])
+lt_shell_append=no
+( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \
+ >/dev/null 2>&1 \
+ && lt_shell_append=yes
+AC_MSG_RESULT([$lt_shell_append])
+_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append'])
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ lt_unset=unset
+else
+ lt_unset=false
+fi
+_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+ # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+ lt_SP2NL='tr \040 \012'
+ lt_NL2SP='tr \015\012 \040\040'
+ ;;
+ *) # EBCDIC based system
+ lt_SP2NL='tr \100 \n'
+ lt_NL2SP='tr \r\n \100\100'
+ ;;
+esac
+_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl
+_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
+])# _LT_CHECK_SHELL_FEATURES
+
+
+# _LT_PROG_XSI_SHELLFNS
+# ---------------------
+# Bourne and XSI compatible variants of some useful shell functions.
+m4_defun([_LT_PROG_XSI_SHELLFNS],
+[case $xsi_shell in
+ yes)
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE. If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac
+}
+
+# func_basename file
+func_basename ()
+{
+ func_basename_result="${1##*/}"
+}
+
+# func_dirname_and_basename file append nondir_replacement
+# perform func_basename and func_dirname in a single function
+# call:
+# dirname: Compute the dirname of FILE. If nonempty,
+# add APPEND to the result, otherwise set result
+# to NONDIR_REPLACEMENT.
+# value returned in "$func_dirname_result"
+# basename: Compute filename of FILE.
+# value retuned in "$func_basename_result"
+# Implementation must be kept synchronized with func_dirname
+# and func_basename. For efficiency, we do not delegate to
+# those functions but instead duplicate the functionality here.
+func_dirname_and_basename ()
+{
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac
+ func_basename_result="${1##*/}"
+}
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+func_stripname ()
+{
+ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+ # positional parameters, so assign one to ordinary parameter first.
+ func_stripname_result=${3}
+ func_stripname_result=${func_stripname_result#"${1}"}
+ func_stripname_result=${func_stripname_result%"${2}"}
+}
+
+# func_opt_split
+func_opt_split ()
+{
+ func_opt_split_opt=${1%%=*}
+ func_opt_split_arg=${1#*=}
+}
+
+# func_lo2o object
+func_lo2o ()
+{
+ case ${1} in
+ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
+ *) func_lo2o_result=${1} ;;
+ esac
+}
+
+# func_xform libobj-or-source
+func_xform ()
+{
+ func_xform_result=${1%.*}.lo
+}
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+ func_arith_result=$(( $[*] ))
+}
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+ func_len_result=${#1}
+}
+
+_LT_EOF
+ ;;
+ *) # Bourne compatible functions.
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE. If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+ # Extract subdirectory from the argument.
+ func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+}
+
+# func_basename file
+func_basename ()
+{
+ func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
+}
+
+dnl func_dirname_and_basename
+dnl A portable version of this function is already defined in general.m4sh
+dnl so there is no need for it here.
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+# func_strip_suffix prefix name
+func_stripname ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "X${3}" \
+ | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "X${3}" \
+ | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;;
+ esac
+}
+
+# sed scripts:
+my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q'
+my_sed_long_arg='1s/^-[[^=]]*=//'
+
+# func_opt_split
+func_opt_split ()
+{
+ func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"`
+ func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"`
+}
+
+# func_lo2o object
+func_lo2o ()
+{
+ func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"`
+}
+
+# func_xform libobj-or-source
+func_xform ()
+{
+ func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'`
+}
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+ func_arith_result=`expr "$[@]"`
+}
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+ func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len`
+}
+
+_LT_EOF
+esac
+
+case $lt_shell_append in
+ yes)
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+ eval "$[1]+=\$[2]"
+}
+_LT_EOF
+ ;;
+ *)
+ cat << \_LT_EOF >> "$cfgfile"
+
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+ eval "$[1]=\$$[1]\$[2]"
+}
+
+_LT_EOF
+ ;;
+ esac
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltoptions.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltoptions.m4
new file mode 100644
index 00000000..34151a3b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltoptions.m4
@@ -0,0 +1,368 @@
+# Helper functions for option handling. -*- Autoconf -*-
+#
+# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 6 ltoptions.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
+
+
+# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME)
+# ------------------------------------------
+m4_define([_LT_MANGLE_OPTION],
+[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])])
+
+
+# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME)
+# ---------------------------------------
+# Set option OPTION-NAME for macro MACRO-NAME, and if there is a
+# matching handler defined, dispatch to it. Other OPTION-NAMEs are
+# saved as a flag.
+m4_define([_LT_SET_OPTION],
+[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl
+m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]),
+ _LT_MANGLE_DEFUN([$1], [$2]),
+ [m4_warning([Unknown $1 option `$2'])])[]dnl
+])
+
+
+# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET])
+# ------------------------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+m4_define([_LT_IF_OPTION],
+[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])])
+
+
+# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET)
+# -------------------------------------------------------
+# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME
+# are set.
+m4_define([_LT_UNLESS_OPTIONS],
+[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+ [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option),
+ [m4_define([$0_found])])])[]dnl
+m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3
+])[]dnl
+])
+
+
+# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST)
+# ----------------------------------------
+# OPTION-LIST is a space-separated list of Libtool options associated
+# with MACRO-NAME. If any OPTION has a matching handler declared with
+# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about
+# the unknown option and exit.
+m4_defun([_LT_SET_OPTIONS],
+[# Set options
+m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+ [_LT_SET_OPTION([$1], _LT_Option)])
+
+m4_if([$1],[LT_INIT],[
+ dnl
+ dnl Simply set some default values (i.e off) if boolean options were not
+ dnl specified:
+ _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no
+ ])
+ _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no
+ ])
+ dnl
+ dnl If no reference was made to various pairs of opposing options, then
+ dnl we run the default mode handler for the pair. For example, if neither
+ dnl `shared' nor `disable-shared' was passed, we enable building of shared
+ dnl archives by default:
+ _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED])
+ _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC])
+ _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC])
+ _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install],
+ [_LT_ENABLE_FAST_INSTALL])
+ ])
+])# _LT_SET_OPTIONS
+
+
+## --------------------------------- ##
+## Macros to handle LT_INIT options. ##
+## --------------------------------- ##
+
+# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME)
+# -----------------------------------------
+m4_define([_LT_MANGLE_DEFUN],
+[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])])
+
+
+# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE)
+# -----------------------------------------------
+m4_define([LT_OPTION_DEFINE],
+[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl
+])# LT_OPTION_DEFINE
+
+
+# dlopen
+# ------
+LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes
+])
+
+AU_DEFUN([AC_LIBTOOL_DLOPEN],
+[_LT_SET_OPTION([LT_INIT], [dlopen])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `dlopen' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], [])
+
+
+# win32-dll
+# ---------
+# Declare package support for building win32 dll's.
+LT_OPTION_DEFINE([LT_INIT], [win32-dll],
+[enable_win32_dll=yes
+
+case $host in
+*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*)
+ AC_CHECK_TOOL(AS, as, false)
+ AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+ AC_CHECK_TOOL(OBJDUMP, objdump, false)
+ ;;
+esac
+
+test -z "$AS" && AS=as
+_LT_DECL([], [AS], [0], [Assembler program])dnl
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl
+])# win32-dll
+
+AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+_LT_SET_OPTION([LT_INIT], [win32-dll])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `win32-dll' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [])
+
+
+# _LT_ENABLE_SHARED([DEFAULT])
+# ----------------------------
+# implement the --enable-shared flag, and supports the `shared' and
+# `disable-shared' LT_INIT options.
+# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_SHARED],
+[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([shared],
+ [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@],
+ [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])],
+ [p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_shared=yes ;;
+ no) enable_shared=no ;;
+ *)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [enable_shared=]_LT_ENABLE_SHARED_DEFAULT)
+
+ _LT_DECL([build_libtool_libs], [enable_shared], [0],
+ [Whether or not to build shared libraries])
+])# _LT_ENABLE_SHARED
+
+LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared])
+])
+
+AC_DEFUN([AC_DISABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], [disable-shared])
+])
+
+AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_SHARED], [])
+dnl AC_DEFUN([AM_DISABLE_SHARED], [])
+
+
+
+# _LT_ENABLE_STATIC([DEFAULT])
+# ----------------------------
+# implement the --enable-static flag, and support the `static' and
+# `disable-static' LT_INIT options.
+# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_STATIC],
+[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([static],
+ [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@],
+ [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])],
+ [p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_static=yes ;;
+ no) enable_static=no ;;
+ *)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [enable_static=]_LT_ENABLE_STATIC_DEFAULT)
+
+ _LT_DECL([build_old_libs], [enable_static], [0],
+ [Whether or not to build static libraries])
+])# _LT_ENABLE_STATIC
+
+LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static])
+])
+
+AC_DEFUN([AC_DISABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], [disable-static])
+])
+
+AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_STATIC], [])
+dnl AC_DEFUN([AM_DISABLE_STATIC], [])
+
+
+
+# _LT_ENABLE_FAST_INSTALL([DEFAULT])
+# ----------------------------------
+# implement the --enable-fast-install flag, and support the `fast-install'
+# and `disable-fast-install' LT_INIT options.
+# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_FAST_INSTALL],
+[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([fast-install],
+ [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@],
+ [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])],
+ [p=${PACKAGE-default}
+ case $enableval in
+ yes) enable_fast_install=yes ;;
+ no) enable_fast_install=no ;;
+ *)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for pkg in $enableval; do
+ IFS="$lt_save_ifs"
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
+ [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT)
+
+_LT_DECL([fast_install], [enable_fast_install], [0],
+ [Whether or not to optimize for fast installation])dnl
+])# _LT_ENABLE_FAST_INSTALL
+
+LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])])
+
+# Old names:
+AU_DEFUN([AC_ENABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the `fast-install' option into LT_INIT's first parameter.])
+])
+
+AU_DEFUN([AC_DISABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], [disable-fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the `disable-fast-install' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], [])
+dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
+
+
+# _LT_WITH_PIC([MODE])
+# --------------------
+# implement the --with-pic flag, and support the `pic-only' and `no-pic'
+# LT_INIT options.
+# MODE is either `yes' or `no'. If omitted, it defaults to `both'.
+m4_define([_LT_WITH_PIC],
+[AC_ARG_WITH([pic],
+ [AS_HELP_STRING([--with-pic],
+ [try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
+ [pic_mode="$withval"],
+ [pic_mode=default])
+
+test -z "$pic_mode" && pic_mode=m4_default([$1], [default])
+
+_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl
+])# _LT_WITH_PIC
+
+LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])])
+
+# Old name:
+AU_DEFUN([AC_LIBTOOL_PICMODE],
+[_LT_SET_OPTION([LT_INIT], [pic-only])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `pic-only' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_PICMODE], [])
+
+## ----------------- ##
+## LTDL_INIT Options ##
+## ----------------- ##
+
+m4_define([_LTDL_MODE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive],
+ [m4_define([_LTDL_MODE], [nonrecursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [recursive],
+ [m4_define([_LTDL_MODE], [recursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [subproject],
+ [m4_define([_LTDL_MODE], [subproject])])
+
+m4_define([_LTDL_TYPE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [installable],
+ [m4_define([_LTDL_TYPE], [installable])])
+LT_OPTION_DEFINE([LTDL_INIT], [convenience],
+ [m4_define([_LTDL_TYPE], [convenience])])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltsugar.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltsugar.m4
new file mode 100644
index 00000000..9000a057
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltsugar.m4
@@ -0,0 +1,123 @@
+# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*-
+#
+# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 6 ltsugar.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])])
+
+
+# lt_join(SEP, ARG1, [ARG2...])
+# -----------------------------
+# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their
+# associated separator.
+# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier
+# versions in m4sugar had bugs.
+m4_define([lt_join],
+[m4_if([$#], [1], [],
+ [$#], [2], [[$2]],
+ [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])])
+m4_define([_lt_join],
+[m4_if([$#$2], [2], [],
+ [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])])
+
+
+# lt_car(LIST)
+# lt_cdr(LIST)
+# ------------
+# Manipulate m4 lists.
+# These macros are necessary as long as will still need to support
+# Autoconf-2.59 which quotes differently.
+m4_define([lt_car], [[$1]])
+m4_define([lt_cdr],
+[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])],
+ [$#], 1, [],
+ [m4_dquote(m4_shift($@))])])
+m4_define([lt_unquote], $1)
+
+
+# lt_append(MACRO-NAME, STRING, [SEPARATOR])
+# ------------------------------------------
+# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'.
+# Note that neither SEPARATOR nor STRING are expanded; they are appended
+# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked).
+# No SEPARATOR is output if MACRO-NAME was previously undefined (different
+# than defined and empty).
+#
+# This macro is needed until we can rely on Autoconf 2.62, since earlier
+# versions of m4sugar mistakenly expanded SEPARATOR but not STRING.
+m4_define([lt_append],
+[m4_define([$1],
+ m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])])
+
+
+
+# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...])
+# ----------------------------------------------------------
+# Produce a SEP delimited list of all paired combinations of elements of
+# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list
+# has the form PREFIXmINFIXSUFFIXn.
+# Needed until we can rely on m4_combine added in Autoconf 2.62.
+m4_define([lt_combine],
+[m4_if(m4_eval([$# > 3]), [1],
+ [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl
+[[m4_foreach([_Lt_prefix], [$2],
+ [m4_foreach([_Lt_suffix],
+ ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[,
+ [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])])
+
+
+# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ])
+# -----------------------------------------------------------------------
+# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited
+# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ.
+m4_define([lt_if_append_uniq],
+[m4_ifdef([$1],
+ [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1],
+ [lt_append([$1], [$2], [$3])$4],
+ [$5])],
+ [lt_append([$1], [$2], [$3])$4])])
+
+
+# lt_dict_add(DICT, KEY, VALUE)
+# -----------------------------
+m4_define([lt_dict_add],
+[m4_define([$1($2)], [$3])])
+
+
+# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE)
+# --------------------------------------------
+m4_define([lt_dict_add_subkey],
+[m4_define([$1($2:$3)], [$4])])
+
+
+# lt_dict_fetch(DICT, KEY, [SUBKEY])
+# ----------------------------------
+m4_define([lt_dict_fetch],
+[m4_ifval([$3],
+ m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]),
+ m4_ifdef([$1($2)], [m4_defn([$1($2)])]))])
+
+
+# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE])
+# -----------------------------------------------------------------
+m4_define([lt_if_dict_fetch],
+[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4],
+ [$5],
+ [$6])])
+
+
+# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...])
+# --------------------------------------------------------------
+m4_define([lt_dict_filter],
+[m4_if([$5], [], [],
+ [lt_join(m4_quote(m4_default([$4], [[, ]])),
+ lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]),
+ [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltversion.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltversion.m4
new file mode 100644
index 00000000..b8e154fe
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/ltversion.m4
@@ -0,0 +1,23 @@
+# ltversion.m4 -- version numbers -*- Autoconf -*-
+#
+# Copyright (C) 2004 Free Software Foundation, Inc.
+# Written by Scott James Remnant, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# Generated from ltversion.in.
+
+# serial 3012 ltversion.m4
+# This file is part of GNU Libtool
+
+m4_define([LT_PACKAGE_VERSION], [2.2.6])
+m4_define([LT_PACKAGE_REVISION], [1.3012])
+
+AC_DEFUN([LTVERSION_VERSION],
+[macro_version='2.2.6'
+macro_revision='1.3012'
+_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
+_LT_DECL(, macro_revision, 0)
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lt~obsolete.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lt~obsolete.m4
new file mode 100644
index 00000000..637bb206
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/lt~obsolete.m4
@@ -0,0 +1,92 @@
+# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*-
+#
+# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc.
+# Written by Scott James Remnant, 2004.
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 4 lt~obsolete.m4
+
+# These exist entirely to fool aclocal when bootstrapping libtool.
+#
+# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN)
+# which have later been changed to m4_define as they aren't part of the
+# exported API, or moved to Autoconf or Automake where they belong.
+#
+# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN
+# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us
+# using a macro with the same name in our local m4/libtool.m4 it'll
+# pull the old libtool.m4 in (it doesn't see our shiny new m4_define
+# and doesn't know about Autoconf macros at all.)
+#
+# So we provide this file, which has a silly filename so it's always
+# included after everything else. This provides aclocal with the
+# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything
+# because those macros already exist, or will be overwritten later.
+# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6.
+#
+# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here.
+# Yes, that means every name once taken will need to remain here until
+# we give up compatibility with versions before 1.7, at which point
+# we need to keep only those names which we still refer to.
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])])
+
+m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])])
+m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])])
+m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])])
+m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])])
+m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])])
+m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])])
+m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])])
+m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])])
+m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])])
+m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])])
+m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])])
+m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])])
+m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])])
+m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])])
+m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])])
+m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])])
+m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])])
+m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])])
+m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])])
+m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])])
+m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])])
+m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])])
+m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])])
+m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])])
+m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])])
+m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])])
+m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])])
+m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])])
+m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])])
+m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])])
+m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])])
+m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])])
+m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])])
+m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
+m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])])
+m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])])
+m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])])
+m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])])
+m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
+m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
+m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])])
+m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])])
+m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
+m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
+m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])])
+m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/nls.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/nls.m4
new file mode 100644
index 00000000..7967cc2f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/nls.m4
@@ -0,0 +1,31 @@
+# nls.m4 serial 3 (gettext-0.15)
+dnl Copyright (C) 1995-2003, 2005-2006 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+dnl
+dnl This file can can be used in projects which are not available under
+dnl the GNU General Public License or the GNU Library General Public
+dnl License but which still want to provide support for the GNU gettext
+dnl functionality.
+dnl Please note that the actual code of the GNU gettext library is covered
+dnl by the GNU Library General Public License, and the rest of the GNU
+dnl gettext package package is covered by the GNU General Public License.
+dnl They are *not* in the public domain.
+
+dnl Authors:
+dnl Ulrich Drepper <drepper@cygnus.com>, 1995-2000.
+dnl Bruno Haible <haible@clisp.cons.org>, 2000-2003.
+
+AC_PREREQ(2.50)
+
+AC_DEFUN([AM_NLS],
+[
+ AC_MSG_CHECKING([whether NLS is requested])
+ dnl Default is enabled NLS
+ AC_ARG_ENABLE(nls,
+ [ --disable-nls do not use Native Language Support],
+ USE_NLS=$enableval, USE_NLS=yes)
+ AC_MSG_RESULT($USE_NLS)
+ AC_SUBST(USE_NLS)
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/po.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/po.m4
new file mode 100644
index 00000000..00133ef3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/po.m4
@@ -0,0 +1,428 @@
+# po.m4 serial 13 (gettext-0.15)
+dnl Copyright (C) 1995-2006 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+dnl
+dnl This file can can be used in projects which are not available under
+dnl the GNU General Public License or the GNU Library General Public
+dnl License but which still want to provide support for the GNU gettext
+dnl functionality.
+dnl Please note that the actual code of the GNU gettext library is covered
+dnl by the GNU Library General Public License, and the rest of the GNU
+dnl gettext package package is covered by the GNU General Public License.
+dnl They are *not* in the public domain.
+
+dnl Authors:
+dnl Ulrich Drepper <drepper@cygnus.com>, 1995-2000.
+dnl Bruno Haible <haible@clisp.cons.org>, 2000-2003.
+
+AC_PREREQ(2.50)
+
+dnl Checks for all prerequisites of the po subdirectory.
+AC_DEFUN([AM_PO_SUBDIRS],
+[
+ AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+ AC_REQUIRE([AC_PROG_INSTALL])dnl
+ AC_REQUIRE([AM_PROG_MKDIR_P])dnl defined by automake
+ AC_REQUIRE([AM_NLS])dnl
+
+ dnl Perform the following tests also if --disable-nls has been given,
+ dnl because they are needed for "make dist" to work.
+
+ dnl Search for GNU msgfmt in the PATH.
+ dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions.
+ dnl The second test excludes FreeBSD msgfmt.
+ AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt,
+ [$ac_dir/$ac_word --statistics /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 &&
+ (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)],
+ :)
+ AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT)
+
+ dnl Test whether it is GNU msgfmt >= 0.15.
+changequote(,)dnl
+ case `$MSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in
+ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) MSGFMT_015=: ;;
+ *) MSGFMT_015=$MSGFMT ;;
+ esac
+changequote([,])dnl
+ AC_SUBST([MSGFMT_015])
+changequote(,)dnl
+ case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in
+ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;;
+ *) GMSGFMT_015=$GMSGFMT ;;
+ esac
+changequote([,])dnl
+ AC_SUBST([GMSGFMT_015])
+
+ dnl Search for GNU xgettext 0.12 or newer in the PATH.
+ dnl The first test excludes Solaris xgettext and early GNU xgettext versions.
+ dnl The second test excludes FreeBSD xgettext.
+ AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext,
+ [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 &&
+ (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)],
+ :)
+ dnl Remove leftover from FreeBSD xgettext call.
+ rm -f messages.po
+
+ dnl Test whether it is GNU xgettext >= 0.15.
+changequote(,)dnl
+ case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in
+ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;;
+ *) XGETTEXT_015=$XGETTEXT ;;
+ esac
+changequote([,])dnl
+ AC_SUBST([XGETTEXT_015])
+
+ dnl Search for GNU msgmerge 0.11 or newer in the PATH.
+ AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge,
+ [$ac_dir/$ac_word --update -q /dev/null /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1], :)
+
+ dnl Installation directories.
+ dnl Autoconf >= 2.60 defines localedir. For older versions of autoconf, we
+ dnl have to define it here, so that it can be used in po/Makefile.
+ test -n "$localedir" || localedir='${datadir}/locale'
+ AC_SUBST([localedir])
+
+ AC_CONFIG_COMMANDS([po-directories], [[
+ for ac_file in $CONFIG_FILES; do
+ # Support "outfile[:infile[:infile...]]"
+ case "$ac_file" in
+ *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;;
+ esac
+ # PO directories have a Makefile.in generated from Makefile.in.in.
+ case "$ac_file" in */Makefile.in)
+ # Adjust a relative srcdir.
+ ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'`
+ ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`"
+ ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'`
+ # In autoconf-2.13 it is called $ac_given_srcdir.
+ # In autoconf-2.50 it is called $srcdir.
+ test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir"
+ case "$ac_given_srcdir" in
+ .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;;
+ /*) top_srcdir="$ac_given_srcdir" ;;
+ *) top_srcdir="$ac_dots$ac_given_srcdir" ;;
+ esac
+ # Treat a directory as a PO directory if and only if it has a
+ # POTFILES.in file. This allows packages to have multiple PO
+ # directories under different names or in different locations.
+ if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then
+ rm -f "$ac_dir/POTFILES"
+ test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES"
+ cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES"
+ POMAKEFILEDEPS="POTFILES.in"
+ # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend
+ # on $ac_dir but don't depend on user-specified configuration
+ # parameters.
+ if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then
+ # The LINGUAS file contains the set of available languages.
+ if test -n "$OBSOLETE_ALL_LINGUAS"; then
+ test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete"
+ fi
+ ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"`
+ # Hide the ALL_LINGUAS assigment from automake < 1.5.
+ eval 'ALL_LINGUAS''=$ALL_LINGUAS_'
+ POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS"
+ else
+ # The set of available languages was given in configure.in.
+ # Hide the ALL_LINGUAS assigment from automake < 1.5.
+ eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS'
+ fi
+ # Compute POFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po)
+ # Compute UPDATEPOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update)
+ # Compute DUMMYPOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop)
+ # Compute GMOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo)
+ case "$ac_given_srcdir" in
+ .) srcdirpre= ;;
+ *) srcdirpre='$(srcdir)/' ;;
+ esac
+ POFILES=
+ UPDATEPOFILES=
+ DUMMYPOFILES=
+ GMOFILES=
+ for lang in $ALL_LINGUAS; do
+ POFILES="$POFILES $srcdirpre$lang.po"
+ UPDATEPOFILES="$UPDATEPOFILES $lang.po-update"
+ DUMMYPOFILES="$DUMMYPOFILES $lang.nop"
+ GMOFILES="$GMOFILES $srcdirpre$lang.gmo"
+ done
+ # CATALOGS depends on both $ac_dir and the user's LINGUAS
+ # environment variable.
+ INST_LINGUAS=
+ if test -n "$ALL_LINGUAS"; then
+ for presentlang in $ALL_LINGUAS; do
+ useit=no
+ if test "%UNSET%" != "$LINGUAS"; then
+ desiredlanguages="$LINGUAS"
+ else
+ desiredlanguages="$ALL_LINGUAS"
+ fi
+ for desiredlang in $desiredlanguages; do
+ # Use the presentlang catalog if desiredlang is
+ # a. equal to presentlang, or
+ # b. a variant of presentlang (because in this case,
+ # presentlang can be used as a fallback for messages
+ # which are not translated in the desiredlang catalog).
+ case "$desiredlang" in
+ "$presentlang"*) useit=yes;;
+ esac
+ done
+ if test $useit = yes; then
+ INST_LINGUAS="$INST_LINGUAS $presentlang"
+ fi
+ done
+ fi
+ CATALOGS=
+ if test -n "$INST_LINGUAS"; then
+ for lang in $INST_LINGUAS; do
+ CATALOGS="$CATALOGS $lang.gmo"
+ done
+ fi
+ test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile"
+ sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile"
+ for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do
+ if test -f "$f"; then
+ case "$f" in
+ *.orig | *.bak | *~) ;;
+ *) cat "$f" >> "$ac_dir/Makefile" ;;
+ esac
+ fi
+ done
+ fi
+ ;;
+ esac
+ done]],
+ [# Capture the value of obsolete ALL_LINGUAS because we need it to compute
+ # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it
+ # from automake < 1.5.
+ eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"'
+ # Capture the value of LINGUAS because we need it to compute CATALOGS.
+ LINGUAS="${LINGUAS-%UNSET%}"
+ ])
+])
+
+dnl Postprocesses a Makefile in a directory containing PO files.
+AC_DEFUN([AM_POSTPROCESS_PO_MAKEFILE],
+[
+ # When this code is run, in config.status, two variables have already been
+ # set:
+ # - OBSOLETE_ALL_LINGUAS is the value of LINGUAS set in configure.in,
+ # - LINGUAS is the value of the environment variable LINGUAS at configure
+ # time.
+
+changequote(,)dnl
+ # Adjust a relative srcdir.
+ ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'`
+ ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`"
+ ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'`
+ # In autoconf-2.13 it is called $ac_given_srcdir.
+ # In autoconf-2.50 it is called $srcdir.
+ test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir"
+ case "$ac_given_srcdir" in
+ .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;;
+ /*) top_srcdir="$ac_given_srcdir" ;;
+ *) top_srcdir="$ac_dots$ac_given_srcdir" ;;
+ esac
+
+ # Find a way to echo strings without interpreting backslash.
+ if test "X`(echo '\t') 2>/dev/null`" = 'X\t'; then
+ gt_echo='echo'
+ else
+ if test "X`(printf '%s\n' '\t') 2>/dev/null`" = 'X\t'; then
+ gt_echo='printf %s\n'
+ else
+ echo_func () {
+ cat <<EOT
+$*
+EOT
+ }
+ gt_echo='echo_func'
+ fi
+ fi
+
+ # A sed script that extracts the value of VARIABLE from a Makefile.
+ sed_x_variable='
+# Test if the hold space is empty.
+x
+s/P/P/
+x
+ta
+# Yes it was empty. Look if we have the expected variable definition.
+/^[ ]*VARIABLE[ ]*=/{
+ # Seen the first line of the variable definition.
+ s/^[ ]*VARIABLE[ ]*=//
+ ba
+}
+bd
+:a
+# Here we are processing a line from the variable definition.
+# Remove comment, more precisely replace it with a space.
+s/#.*$/ /
+# See if the line ends in a backslash.
+tb
+:b
+s/\\$//
+# Print the line, without the trailing backslash.
+p
+tc
+# There was no trailing backslash. The end of the variable definition is
+# reached. Clear the hold space.
+s/^.*$//
+x
+bd
+:c
+# A trailing backslash means that the variable definition continues in the
+# next line. Put a nonempty string into the hold space to indicate this.
+s/^.*$/P/
+x
+:d
+'
+changequote([,])dnl
+
+ # Set POTFILES to the value of the Makefile variable POTFILES.
+ sed_x_POTFILES=`$gt_echo "$sed_x_variable" | sed -e '/^ *#/d' -e 's/VARIABLE/POTFILES/g'`
+ POTFILES=`sed -n -e "$sed_x_POTFILES" < "$ac_file"`
+ # Compute POTFILES_DEPS as
+ # $(foreach file, $(POTFILES), $(top_srcdir)/$(file))
+ POTFILES_DEPS=
+ for file in $POTFILES; do
+ POTFILES_DEPS="$POTFILES_DEPS "'$(top_srcdir)/'"$file"
+ done
+ POMAKEFILEDEPS=""
+
+ if test -n "$OBSOLETE_ALL_LINGUAS"; then
+ test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete"
+ fi
+ if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then
+ # The LINGUAS file contains the set of available languages.
+ ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"`
+ POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS"
+ else
+ # Set ALL_LINGUAS to the value of the Makefile variable LINGUAS.
+ sed_x_LINGUAS=`$gt_echo "$sed_x_variable" | sed -e '/^ *#/d' -e 's/VARIABLE/LINGUAS/g'`
+ ALL_LINGUAS_=`sed -n -e "$sed_x_LINGUAS" < "$ac_file"`
+ fi
+ # Hide the ALL_LINGUAS assigment from automake < 1.5.
+ eval 'ALL_LINGUAS''=$ALL_LINGUAS_'
+ # Compute POFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po)
+ # Compute UPDATEPOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update)
+ # Compute DUMMYPOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop)
+ # Compute GMOFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo)
+ # Compute PROPERTIESFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(top_srcdir)/$(DOMAIN)_$(lang).properties)
+ # Compute CLASSFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(top_srcdir)/$(DOMAIN)_$(lang).class)
+ # Compute QMFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).qm)
+ # Compute MSGFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(frob $(lang)).msg)
+ # Compute RESOURCESDLLFILES
+ # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(frob $(lang))/$(DOMAIN).resources.dll)
+ case "$ac_given_srcdir" in
+ .) srcdirpre= ;;
+ *) srcdirpre='$(srcdir)/' ;;
+ esac
+ POFILES=
+ UPDATEPOFILES=
+ DUMMYPOFILES=
+ GMOFILES=
+ PROPERTIESFILES=
+ CLASSFILES=
+ QMFILES=
+ MSGFILES=
+ RESOURCESDLLFILES=
+ for lang in $ALL_LINGUAS; do
+ POFILES="$POFILES $srcdirpre$lang.po"
+ UPDATEPOFILES="$UPDATEPOFILES $lang.po-update"
+ DUMMYPOFILES="$DUMMYPOFILES $lang.nop"
+ GMOFILES="$GMOFILES $srcdirpre$lang.gmo"
+ PROPERTIESFILES="$PROPERTIESFILES \$(top_srcdir)/\$(DOMAIN)_$lang.properties"
+ CLASSFILES="$CLASSFILES \$(top_srcdir)/\$(DOMAIN)_$lang.class"
+ QMFILES="$QMFILES $srcdirpre$lang.qm"
+ frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'`
+ MSGFILES="$MSGFILES $srcdirpre$frobbedlang.msg"
+ frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'`
+ RESOURCESDLLFILES="$RESOURCESDLLFILES $srcdirpre$frobbedlang/\$(DOMAIN).resources.dll"
+ done
+ # CATALOGS depends on both $ac_dir and the user's LINGUAS
+ # environment variable.
+ INST_LINGUAS=
+ if test -n "$ALL_LINGUAS"; then
+ for presentlang in $ALL_LINGUAS; do
+ useit=no
+ if test "%UNSET%" != "$LINGUAS"; then
+ desiredlanguages="$LINGUAS"
+ else
+ desiredlanguages="$ALL_LINGUAS"
+ fi
+ for desiredlang in $desiredlanguages; do
+ # Use the presentlang catalog if desiredlang is
+ # a. equal to presentlang, or
+ # b. a variant of presentlang (because in this case,
+ # presentlang can be used as a fallback for messages
+ # which are not translated in the desiredlang catalog).
+ case "$desiredlang" in
+ "$presentlang"*) useit=yes;;
+ esac
+ done
+ if test $useit = yes; then
+ INST_LINGUAS="$INST_LINGUAS $presentlang"
+ fi
+ done
+ fi
+ CATALOGS=
+ JAVACATALOGS=
+ QTCATALOGS=
+ TCLCATALOGS=
+ CSHARPCATALOGS=
+ if test -n "$INST_LINGUAS"; then
+ for lang in $INST_LINGUAS; do
+ CATALOGS="$CATALOGS $lang.gmo"
+ JAVACATALOGS="$JAVACATALOGS \$(DOMAIN)_$lang.properties"
+ QTCATALOGS="$QTCATALOGS $lang.qm"
+ frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'`
+ TCLCATALOGS="$TCLCATALOGS $frobbedlang.msg"
+ frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'`
+ CSHARPCATALOGS="$CSHARPCATALOGS $frobbedlang/\$(DOMAIN).resources.dll"
+ done
+ fi
+
+ sed -e "s|@POTFILES_DEPS@|$POTFILES_DEPS|g" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@PROPERTIESFILES@|$PROPERTIESFILES|g" -e "s|@CLASSFILES@|$CLASSFILES|g" -e "s|@QMFILES@|$QMFILES|g" -e "s|@MSGFILES@|$MSGFILES|g" -e "s|@RESOURCESDLLFILES@|$RESOURCESDLLFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@JAVACATALOGS@|$JAVACATALOGS|g" -e "s|@QTCATALOGS@|$QTCATALOGS|g" -e "s|@TCLCATALOGS@|$TCLCATALOGS|g" -e "s|@CSHARPCATALOGS@|$CSHARPCATALOGS|g" -e 's,^#distdir:,distdir:,' < "$ac_file" > "$ac_file.tmp"
+ if grep -l '@TCLCATALOGS@' "$ac_file" > /dev/null; then
+ # Add dependencies that cannot be formulated as a simple suffix rule.
+ for lang in $ALL_LINGUAS; do
+ frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'`
+ cat >> "$ac_file.tmp" <<EOF
+$frobbedlang.msg: $lang.po
+ @echo "\$(MSGFMT) -c --tcl -d \$(srcdir) -l $lang $srcdirpre$lang.po"; \
+ \$(MSGFMT) -c --tcl -d "\$(srcdir)" -l $lang $srcdirpre$lang.po || { rm -f "\$(srcdir)/$frobbedlang.msg"; exit 1; }
+EOF
+ done
+ fi
+ if grep -l '@CSHARPCATALOGS@' "$ac_file" > /dev/null; then
+ # Add dependencies that cannot be formulated as a simple suffix rule.
+ for lang in $ALL_LINGUAS; do
+ frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'`
+ cat >> "$ac_file.tmp" <<EOF
+$frobbedlang/\$(DOMAIN).resources.dll: $lang.po
+ @echo "\$(MSGFMT) -c --csharp -d \$(srcdir) -l $lang $srcdirpre$lang.po -r \$(DOMAIN)"; \
+ \$(MSGFMT) -c --csharp -d "\$(srcdir)" -l $lang $srcdirpre$lang.po -r "\$(DOMAIN)" || { rm -f "\$(srcdir)/$frobbedlang.msg"; exit 1; }
+EOF
+ done
+ fi
+ if test -n "$POMAKEFILEDEPS"; then
+ cat >> "$ac_file.tmp" <<EOF
+Makefile: $POMAKEFILEDEPS
+EOF
+ fi
+ mv "$ac_file.tmp" "$ac_file"
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/posix-shell.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/posix-shell.m4
new file mode 100644
index 00000000..4c56193c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/posix-shell.m4
@@ -0,0 +1,63 @@
+# Find a POSIX-conforming shell.
+
+# Copyright (C) 2007-2008 Free Software Foundation, Inc.
+
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# Written by Paul Eggert.
+
+# If a POSIX-conforming shell can be found, set POSIX_SHELL and
+# PREFERABLY_POSIX_SHELL to it. If not, set POSIX_SHELL to the
+# empty string and PREFERABLY_POSIX_SHELL to '/bin/sh'.
+
+AC_DEFUN([gl_POSIX_SHELL],
+[
+ AC_CACHE_CHECK([for a shell that conforms to POSIX], [gl_cv_posix_shell],
+ [gl_test_posix_shell_script='
+ func_return () {
+ (exit [$]1)
+ }
+ func_success () {
+ func_return 0
+ }
+ func_failure () {
+ func_return 1
+ }
+ func_ret_success () {
+ return 0
+ }
+ func_ret_failure () {
+ return 1
+ }
+ subshell_umask_sanity () {
+ (umask 22; (umask 0); test $(umask) -eq 22)
+ }
+ test "[$](echo foo)" = foo &&
+ func_success &&
+ ! func_failure &&
+ func_ret_success &&
+ ! func_ret_failure &&
+ (set x && func_ret_success y && test x = "[$]1") &&
+ subshell_umask_sanity
+ '
+ for gl_cv_posix_shell in \
+ "$CONFIG_SHELL" "$SHELL" /bin/sh /bin/bash /bin/ksh /bin/sh5 no; do
+ case $gl_cv_posix_shell in
+ /*)
+ "$gl_cv_posix_shell" -c "$gl_test_posix_shell_script" 2>/dev/null \
+ && break;;
+ esac
+ done])
+
+ if test "$gl_cv_posix_shell" != no; then
+ POSIX_SHELL=$gl_cv_posix_shell
+ PREFERABLY_POSIX_SHELL=$POSIX_SHELL
+ else
+ POSIX_SHELL=
+ PREFERABLY_POSIX_SHELL=/bin/sh
+ fi
+ AC_SUBST([POSIX_SHELL])
+ AC_SUBST([PREFERABLY_POSIX_SHELL])
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/progtest.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/progtest.m4
new file mode 100644
index 00000000..a56365cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/progtest.m4
@@ -0,0 +1,92 @@
+# progtest.m4 serial 4 (gettext-0.14.2)
+dnl Copyright (C) 1996-2003, 2005 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+dnl
+dnl This file can can be used in projects which are not available under
+dnl the GNU General Public License or the GNU Library General Public
+dnl License but which still want to provide support for the GNU gettext
+dnl functionality.
+dnl Please note that the actual code of the GNU gettext library is covered
+dnl by the GNU Library General Public License, and the rest of the GNU
+dnl gettext package package is covered by the GNU General Public License.
+dnl They are *not* in the public domain.
+
+dnl Authors:
+dnl Ulrich Drepper <drepper@cygnus.com>, 1996.
+
+AC_PREREQ(2.50)
+
+# Search path for a program which passes the given test.
+
+dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR,
+dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]])
+AC_DEFUN([AM_PATH_PROG_WITH_TEST],
+[
+# Prepare PATH_SEPARATOR.
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Find out how to test for executable files. Don't use a zero-byte file,
+# as systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ ac_executable_p="test -x"
+else
+ ac_executable_p="test -f"
+fi
+rm -f conf$$.file
+
+# Extract the first word of "$2", so it can be a program name with args.
+set dummy $2; ac_word=[$]2
+AC_MSG_CHECKING([for $ac_word])
+AC_CACHE_VAL(ac_cv_path_$1,
+[case "[$]$1" in
+ [[\\/]]* | ?:[[\\/]]*)
+ ac_cv_path_$1="[$]$1" # Let the user override the test with a path.
+ ;;
+ *)
+ ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in ifelse([$5], , $PATH, [$5]); do
+ IFS="$ac_save_IFS"
+ test -z "$ac_dir" && ac_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then
+ echo "$as_me: trying $ac_dir/$ac_word..." >&AS_MESSAGE_LOG_FD
+ if [$3]; then
+ ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext"
+ break 2
+ fi
+ fi
+ done
+ done
+ IFS="$ac_save_IFS"
+dnl If no 4th arg is given, leave the cache variable unset,
+dnl so AC_PATH_PROGS will keep looking.
+ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4"
+])dnl
+ ;;
+esac])dnl
+$1="$ac_cv_path_$1"
+if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then
+ AC_MSG_RESULT([$]$1)
+else
+ AC_MSG_RESULT(no)
+fi
+AC_SUBST($1)dnl
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/visibility.m4 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/visibility.m4
new file mode 100644
index 00000000..2ff6330a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/visibility.m4
@@ -0,0 +1,52 @@
+# visibility.m4 serial 1 (gettext-0.15)
+dnl Copyright (C) 2005 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl From Bruno Haible.
+
+dnl Tests whether the compiler supports the command-line option
+dnl -fvisibility=hidden and the function and variable attributes
+dnl __attribute__((__visibility__("hidden"))) and
+dnl __attribute__((__visibility__("default"))).
+dnl Does *not* test for __visibility__("protected") - which has tricky
+dnl semantics (see the 'vismain' test in glibc) and does not exist e.g. on
+dnl MacOS X.
+dnl Does *not* test for __visibility__("internal") - which has processor
+dnl dependent semantics.
+dnl Does *not* test for #pragma GCC visibility push(hidden) - which is
+dnl "really only recommended for legacy code".
+dnl Set the variable CFLAG_VISIBILITY.
+dnl Defines and sets the variable HAVE_VISIBILITY.
+
+AC_DEFUN([gl_VISIBILITY],
+[
+ AC_REQUIRE([AC_PROG_CC])
+ CFLAG_VISIBILITY=
+ HAVE_VISIBILITY=0
+ if test -n "$GCC"; then
+ AC_MSG_CHECKING([for simple visibility declarations])
+ AC_CACHE_VAL(gl_cv_cc_visibility, [
+ gl_save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fvisibility=hidden"
+ AC_TRY_COMPILE(
+ [extern __attribute__((__visibility__("hidden"))) int hiddenvar;
+ extern __attribute__((__visibility__("default"))) int exportedvar;
+ extern __attribute__((__visibility__("hidden"))) int hiddenfunc (void);
+ extern __attribute__((__visibility__("default"))) int exportedfunc (void);],
+ [],
+ gl_cv_cc_visibility=yes,
+ gl_cv_cc_visibility=no)
+ CFLAGS="$gl_save_CFLAGS"])
+ AC_MSG_RESULT([$gl_cv_cc_visibility])
+ if test $gl_cv_cc_visibility = yes; then
+ CFLAG_VISIBILITY="-fvisibility=hidden"
+ HAVE_VISIBILITY=1
+ fi
+ fi
+ AC_SUBST([CFLAG_VISIBILITY])
+ AC_SUBST([HAVE_VISIBILITY])
+ AC_DEFINE_UNQUOTED([HAVE_VISIBILITY], [$HAVE_VISIBILITY],
+ [Define to 1 or 0, depending whether the compiler supports simple visibility declarations.])
+])
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/LINGUAS b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/LINGUAS
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/LINGUAS
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makefile.in.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makefile.in.in
new file mode 100644
index 00000000..5022b8b1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makefile.in.in
@@ -0,0 +1,403 @@
+# Makefile for PO directory in any package using GNU gettext.
+# Copyright (C) 1995-1997, 2000-2006 by Ulrich Drepper <drepper@gnu.ai.mit.edu>
+#
+# This file can be copied and used freely without restrictions. It can
+# be used in projects which are not available under the GNU General Public
+# License but which still want to provide support for the GNU gettext
+# functionality.
+# Please note that the actual code of GNU gettext is covered by the GNU
+# General Public License and is *not* in the public domain.
+#
+# Origin: gettext-0.16
+
+PACKAGE = @PACKAGE@
+VERSION = @VERSION@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+
+SHELL = /bin/sh
+@SET_MAKE@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localedir = @localedir@
+gettextsrcdir = $(datadir)/gettext/po
+
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+
+# We use $(mkdir_p).
+# In automake <= 1.9.x, $(mkdir_p) is defined either as "mkdir -p --" or as
+# "$(mkinstalldirs)" or as "$(install_sh) -d". For these automake versions,
+# @install_sh@ does not start with $(SHELL), so we add it.
+# In automake >= 1.10, @mkdir_p@ is derived from ${MKDIR_P}, which is defined
+# either as "/path/to/mkdir -p" or ".../install-sh -c -d". For these automake
+# versions, $(mkinstalldirs) and $(install_sh) are unused.
+mkinstalldirs = $(SHELL) @install_sh@ -d
+install_sh = $(SHELL) @install_sh@
+MKDIR_P = @MKDIR_P@
+mkdir_p = @mkdir_p@
+
+GMSGFMT_ = @GMSGFMT@
+GMSGFMT_no = @GMSGFMT@
+GMSGFMT_yes = @GMSGFMT_015@
+GMSGFMT = $(GMSGFMT_$(USE_MSGCTXT))
+MSGFMT_ = @MSGFMT@
+MSGFMT_no = @MSGFMT@
+MSGFMT_yes = @MSGFMT_015@
+MSGFMT = $(MSGFMT_$(USE_MSGCTXT))
+XGETTEXT_ = @XGETTEXT@
+XGETTEXT_no = @XGETTEXT@
+XGETTEXT_yes = @XGETTEXT_015@
+XGETTEXT = $(XGETTEXT_$(USE_MSGCTXT))
+MSGMERGE = msgmerge
+MSGMERGE_UPDATE = @MSGMERGE@ --update
+MSGINIT = msginit
+MSGCONV = msgconv
+MSGFILTER = msgfilter
+
+POFILES = @POFILES@
+GMOFILES = @GMOFILES@
+UPDATEPOFILES = @UPDATEPOFILES@
+DUMMYPOFILES = @DUMMYPOFILES@
+DISTFILES.common = Makefile.in.in remove-potcdate.sin \
+$(DISTFILES.common.extra1) $(DISTFILES.common.extra2) $(DISTFILES.common.extra3)
+DISTFILES = $(DISTFILES.common) Makevars POTFILES.in \
+$(POFILES) $(GMOFILES) \
+$(DISTFILES.extra1) $(DISTFILES.extra2) $(DISTFILES.extra3)
+
+POTFILES = \
+
+CATALOGS = @CATALOGS@
+
+# Makevars gets inserted here. (Don't remove this line!)
+
+.SUFFIXES:
+.SUFFIXES: .po .gmo .mo .sed .sin .nop .po-create .po-update
+
+.po.mo:
+ @echo "$(MSGFMT) -c -o $@ $<"; \
+ $(MSGFMT) -c -o t-$@ $< && mv t-$@ $@
+
+.po.gmo:
+ @lang=`echo $* | sed -e 's,.*/,,'`; \
+ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \
+ echo "$${cdcmd}rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o $${lang}.gmo $${lang}.po"; \
+ cd $(srcdir) && rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o t-$${lang}.gmo $${lang}.po && mv t-$${lang}.gmo $${lang}.gmo
+
+.sin.sed:
+ sed -e '/^#/d' $< > t-$@
+ mv t-$@ $@
+
+
+all: all-@USE_NLS@
+
+all-yes: stamp-po
+all-no:
+
+# $(srcdir)/$(DOMAIN).pot is only created when needed. When xgettext finds no
+# internationalized messages, no $(srcdir)/$(DOMAIN).pot is created (because
+# we don't want to bother translators with empty POT files). We assume that
+# LINGUAS is empty in this case, i.e. $(POFILES) and $(GMOFILES) are empty.
+# In this case, stamp-po is a nop (i.e. a phony target).
+
+# stamp-po is a timestamp denoting the last time at which the CATALOGS have
+# been loosely updated. Its purpose is that when a developer or translator
+# checks out the package via CVS, and the $(DOMAIN).pot file is not in CVS,
+# "make" will update the $(DOMAIN).pot and the $(CATALOGS), but subsequent
+# invocations of "make" will do nothing. This timestamp would not be necessary
+# if updating the $(CATALOGS) would always touch them; however, the rule for
+# $(POFILES) has been designed to not touch files that don't need to be
+# changed.
+stamp-po: $(srcdir)/$(DOMAIN).pot
+ test ! -f $(srcdir)/$(DOMAIN).pot || \
+ test -z "$(GMOFILES)" || $(MAKE) $(GMOFILES)
+ @test ! -f $(srcdir)/$(DOMAIN).pot || { \
+ echo "touch stamp-po" && \
+ echo timestamp > stamp-poT && \
+ mv stamp-poT stamp-po; \
+ }
+
+# Note: Target 'all' must not depend on target '$(DOMAIN).pot-update',
+# otherwise packages like GCC can not be built if only parts of the source
+# have been downloaded.
+
+# This target rebuilds $(DOMAIN).pot; it is an expensive operation.
+# Note that $(DOMAIN).pot is not touched if it doesn't need to be changed.
+$(DOMAIN).pot-update: $(POTFILES) $(srcdir)/POTFILES.in remove-potcdate.sed
+ if test -n '$(MSGID_BUGS_ADDRESS)' || test '$(PACKAGE_BUGREPORT)' = '@'PACKAGE_BUGREPORT'@'; then \
+ msgid_bugs_address='$(MSGID_BUGS_ADDRESS)'; \
+ else \
+ msgid_bugs_address='$(PACKAGE_BUGREPORT)'; \
+ fi; \
+ $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \
+ --add-comments=TRANSLATORS: $(XGETTEXT_OPTIONS) \
+ --files-from=$(srcdir)/POTFILES.in \
+ --copyright-holder='$(COPYRIGHT_HOLDER)' \
+ --msgid-bugs-address="$$msgid_bugs_address"
+ test ! -f $(DOMAIN).po || { \
+ if test -f $(srcdir)/$(DOMAIN).pot; then \
+ sed -f remove-potcdate.sed < $(srcdir)/$(DOMAIN).pot > $(DOMAIN).1po && \
+ sed -f remove-potcdate.sed < $(DOMAIN).po > $(DOMAIN).2po && \
+ if cmp $(DOMAIN).1po $(DOMAIN).2po >/dev/null 2>&1; then \
+ rm -f $(DOMAIN).1po $(DOMAIN).2po $(DOMAIN).po; \
+ else \
+ rm -f $(DOMAIN).1po $(DOMAIN).2po $(srcdir)/$(DOMAIN).pot && \
+ mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \
+ fi; \
+ else \
+ mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \
+ fi; \
+ }
+
+# This rule has no dependencies: we don't need to update $(DOMAIN).pot at
+# every "make" invocation, only create it when it is missing.
+# Only "make $(DOMAIN).pot-update" or "make dist" will force an update.
+$(srcdir)/$(DOMAIN).pot:
+ $(MAKE) $(DOMAIN).pot-update
+
+# This target rebuilds a PO file if $(DOMAIN).pot has changed.
+# Note that a PO file is not touched if it doesn't need to be changed.
+$(POFILES): $(srcdir)/$(DOMAIN).pot
+ @lang=`echo $@ | sed -e 's,.*/,,' -e 's/\.po$$//'`; \
+ if test -f "$(srcdir)/$${lang}.po"; then \
+ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \
+ echo "$${cdcmd}$(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot"; \
+ cd $(srcdir) && $(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot; \
+ else \
+ $(MAKE) $${lang}.po-create; \
+ fi
+
+
+install: install-exec install-data
+install-exec:
+install-data: install-data-@USE_NLS@
+ if test "$(PACKAGE)" = "gettext-tools"; then \
+ $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \
+ for file in $(DISTFILES.common) Makevars.template; do \
+ $(INSTALL_DATA) $(srcdir)/$$file \
+ $(DESTDIR)$(gettextsrcdir)/$$file; \
+ done; \
+ for file in Makevars; do \
+ rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \
+ done; \
+ else \
+ : ; \
+ fi
+install-data-no: all
+install-data-yes: all
+ $(mkdir_p) $(DESTDIR)$(datadir)
+ @catalogs='$(CATALOGS)'; \
+ for cat in $$catalogs; do \
+ cat=`basename $$cat`; \
+ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \
+ dir=$(localedir)/$$lang/LC_MESSAGES; \
+ $(mkdir_p) $(DESTDIR)$$dir; \
+ if test -r $$cat; then realcat=$$cat; else realcat=$(srcdir)/$$cat; fi; \
+ $(INSTALL_DATA) $$realcat $(DESTDIR)$$dir/$(DOMAIN).mo; \
+ echo "installing $$realcat as $(DESTDIR)$$dir/$(DOMAIN).mo"; \
+ for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \
+ if test -n "$$lc"; then \
+ if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \
+ link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \
+ mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \
+ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \
+ (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \
+ for file in *; do \
+ if test -f $$file; then \
+ ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \
+ fi; \
+ done); \
+ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \
+ else \
+ if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \
+ :; \
+ else \
+ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \
+ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \
+ fi; \
+ fi; \
+ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \
+ ln -s ../LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \
+ ln $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \
+ cp -p $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \
+ echo "installing $$realcat link as $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo"; \
+ fi; \
+ done; \
+ done
+
+install-strip: install
+
+installdirs: installdirs-exec installdirs-data
+installdirs-exec:
+installdirs-data: installdirs-data-@USE_NLS@
+ if test "$(PACKAGE)" = "gettext-tools"; then \
+ $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \
+ else \
+ : ; \
+ fi
+installdirs-data-no:
+installdirs-data-yes:
+ $(mkdir_p) $(DESTDIR)$(datadir)
+ @catalogs='$(CATALOGS)'; \
+ for cat in $$catalogs; do \
+ cat=`basename $$cat`; \
+ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \
+ dir=$(localedir)/$$lang/LC_MESSAGES; \
+ $(mkdir_p) $(DESTDIR)$$dir; \
+ for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \
+ if test -n "$$lc"; then \
+ if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \
+ link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \
+ mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \
+ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \
+ (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \
+ for file in *; do \
+ if test -f $$file; then \
+ ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \
+ fi; \
+ done); \
+ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \
+ else \
+ if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \
+ :; \
+ else \
+ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \
+ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \
+ fi; \
+ fi; \
+ fi; \
+ done; \
+ done
+
+# Define this as empty until I found a useful application.
+installcheck:
+
+uninstall: uninstall-exec uninstall-data
+uninstall-exec:
+uninstall-data: uninstall-data-@USE_NLS@
+ if test "$(PACKAGE)" = "gettext-tools"; then \
+ for file in $(DISTFILES.common) Makevars.template; do \
+ rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \
+ done; \
+ else \
+ : ; \
+ fi
+uninstall-data-no:
+uninstall-data-yes:
+ catalogs='$(CATALOGS)'; \
+ for cat in $$catalogs; do \
+ cat=`basename $$cat`; \
+ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \
+ for lc in LC_MESSAGES $(EXTRA_LOCALE_CATEGORIES); do \
+ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \
+ done; \
+ done
+
+check: all
+
+info dvi ps pdf html tags TAGS ctags CTAGS ID:
+
+mostlyclean:
+ rm -f remove-potcdate.sed
+ rm -f stamp-poT
+ rm -f core core.* $(DOMAIN).po $(DOMAIN).1po $(DOMAIN).2po *.new.po
+ rm -fr *.o
+
+clean: mostlyclean
+
+distclean: clean
+ rm -f Makefile Makefile.in POTFILES *.mo
+
+maintainer-clean: distclean
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+ rm -f stamp-po $(GMOFILES)
+
+distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir)
+dist distdir:
+ $(MAKE) update-po
+ @$(MAKE) dist2
+# This is a separate target because 'update-po' must be executed before.
+dist2: stamp-po $(DISTFILES)
+ dists="$(DISTFILES)"; \
+ if test "$(PACKAGE)" = "gettext-tools"; then \
+ dists="$$dists Makevars.template"; \
+ fi; \
+ if test -f $(srcdir)/$(DOMAIN).pot; then \
+ dists="$$dists $(DOMAIN).pot stamp-po"; \
+ fi; \
+ if test -f $(srcdir)/ChangeLog; then \
+ dists="$$dists ChangeLog"; \
+ fi; \
+ for i in 0 1 2 3 4 5 6 7 8 9; do \
+ if test -f $(srcdir)/ChangeLog.$$i; then \
+ dists="$$dists ChangeLog.$$i"; \
+ fi; \
+ done; \
+ if test -f $(srcdir)/LINGUAS; then dists="$$dists LINGUAS"; fi; \
+ for file in $$dists; do \
+ if test -f $$file; then \
+ cp -p $$file $(distdir) || exit 1; \
+ else \
+ cp -p $(srcdir)/$$file $(distdir) || exit 1; \
+ fi; \
+ done
+
+update-po: Makefile
+ $(MAKE) $(DOMAIN).pot-update
+ test -z "$(UPDATEPOFILES)" || $(MAKE) $(UPDATEPOFILES)
+ $(MAKE) update-gmo
+
+# General rule for creating PO files.
+
+.nop.po-create:
+ @lang=`echo $@ | sed -e 's/\.po-create$$//'`; \
+ echo "File $$lang.po does not exist. If you are a translator, you can create it through 'msginit'." 1>&2; \
+ exit 1
+
+# General rule for updating PO files.
+
+.nop.po-update:
+ @lang=`echo $@ | sed -e 's/\.po-update$$//'`; \
+ if test "$(PACKAGE)" = "gettext-tools"; then PATH=`pwd`/../src:$$PATH; fi; \
+ tmpdir=`pwd`; \
+ echo "$$lang:"; \
+ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \
+ echo "$${cdcmd}$(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$lang.new.po"; \
+ cd $(srcdir); \
+ if $(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$tmpdir/$$lang.new.po; then \
+ if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \
+ rm -f $$tmpdir/$$lang.new.po; \
+ else \
+ if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \
+ :; \
+ else \
+ echo "msgmerge for $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \
+ exit 1; \
+ fi; \
+ fi; \
+ else \
+ echo "msgmerge for $$lang.po failed!" 1>&2; \
+ rm -f $$tmpdir/$$lang.new.po; \
+ fi
+
+$(DUMMYPOFILES):
+
+update-gmo: Makefile $(GMOFILES)
+ @:
+
+Makefile: Makefile.in.in Makevars $(top_builddir)/config.status @POMAKEFILEDEPS@
+ cd $(top_builddir) \
+ && $(SHELL) ./config.status $(subdir)/$@.in po-directories
+
+force:
+
+# Tell versions [3.59,3.63) of GNU make not to export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makevars b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makevars
new file mode 100644
index 00000000..dc19bc96
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Makevars
@@ -0,0 +1,46 @@
+# Makefile variables for PO directory in any package using GNU gettext.
+
+# Usually the message domain is the same as the package name.
+DOMAIN = $(PACKAGE)
+
+# These two variables depend on the location of this directory.
+subdir = po
+top_builddir = ..
+
+# These options get passed to xgettext.
+XGETTEXT_OPTIONS = --keyword=_ --keyword=N_
+
+# This is the copyright holder that gets inserted into the header of the
+# $(DOMAIN).pot file. Set this to the copyright holder of the surrounding
+# package. (Note that the msgstr strings, extracted from the package's
+# sources, belong to the copyright holder of the package.) Translators are
+# expected to transfer the copyright for their translations to this person
+# or entity, or to disclaim their copyright. The empty string stands for
+# the public domain; in this case the translators are expected to disclaim
+# their copyright.
+COPYRIGHT_HOLDER =
+
+# This is the email address or URL to which the translators shall report
+# bugs in the untranslated strings:
+# - Strings which are not entire sentences, see the maintainer guidelines
+# in the GNU gettext documentation, section 'Preparing Strings'.
+# - Strings which use unclear terms or require additional context to be
+# understood.
+# - Strings which make invalid assumptions about notation of date, time or
+# money.
+# - Pluralisation problems.
+# - Incorrect English spelling.
+# - Incorrect formatting.
+# It can be your email address, or a mailing list address where translators
+# can write to without being subscribed, or the URL of a web page through
+# which the translators can contact you.
+MSGID_BUGS_ADDRESS =
+
+# This is the list of locale categories, beyond LC_MESSAGES, for which the
+# message catalogs shall be used. It is usually empty.
+EXTRA_LOCALE_CATEGORIES =
+
+# Although you may need slightly wider terminal than 80 chars, it is
+# much nicer to edit the output of --help when this is set.
+XGETTEXT_OPTIONS += --no-wrap
+MSGMERGE += --no-wrap
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/POTFILES.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/POTFILES.in
new file mode 100644
index 00000000..ee430c5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/POTFILES.in
@@ -0,0 +1,10 @@
+# List of source files which contain translatable strings.
+src/xz/args.c
+src/xz/coder.c
+src/xz/file_io.c
+src/xz/hardware.c
+src/xz/main.c
+src/xz/message.c
+src/xz/options.c
+src/xz/suffix.c
+src/xz/util.c
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Rules-quot b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Rules-quot
new file mode 100644
index 00000000..9c2a995e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/Rules-quot
@@ -0,0 +1,47 @@
+# Special Makefile rules for English message catalogs with quotation marks.
+
+DISTFILES.common.extra1 = quot.sed boldquot.sed en@quot.header en@boldquot.header insert-header.sin Rules-quot
+
+.SUFFIXES: .insert-header .po-update-en
+
+en@quot.po-create:
+ $(MAKE) en@quot.po-update
+en@boldquot.po-create:
+ $(MAKE) en@boldquot.po-update
+
+en@quot.po-update: en@quot.po-update-en
+en@boldquot.po-update: en@boldquot.po-update-en
+
+.insert-header.po-update-en:
+ @lang=`echo $@ | sed -e 's/\.po-update-en$$//'`; \
+ if test "$(PACKAGE)" = "gettext"; then PATH=`pwd`/../src:$$PATH; GETTEXTLIBDIR=`cd $(top_srcdir)/src && pwd`; export GETTEXTLIBDIR; fi; \
+ tmpdir=`pwd`; \
+ echo "$$lang:"; \
+ ll=`echo $$lang | sed -e 's/@.*//'`; \
+ LC_ALL=C; export LC_ALL; \
+ cd $(srcdir); \
+ if $(MSGINIT) -i $(DOMAIN).pot --no-translator -l $$ll -o - 2>/dev/null | sed -f $$tmpdir/$$lang.insert-header | $(MSGCONV) -t UTF-8 | $(MSGFILTER) sed -f `echo $$lang | sed -e 's/.*@//'`.sed 2>/dev/null > $$tmpdir/$$lang.new.po; then \
+ if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \
+ rm -f $$tmpdir/$$lang.new.po; \
+ else \
+ if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \
+ :; \
+ else \
+ echo "creation of $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \
+ exit 1; \
+ fi; \
+ fi; \
+ else \
+ echo "creation of $$lang.po failed!" 1>&2; \
+ rm -f $$tmpdir/$$lang.new.po; \
+ fi
+
+en@quot.insert-header: insert-header.sin
+ sed -e '/^#/d' -e 's/HEADER/en@quot.header/g' $(srcdir)/insert-header.sin > en@quot.insert-header
+
+en@boldquot.insert-header: insert-header.sin
+ sed -e '/^#/d' -e 's/HEADER/en@boldquot.header/g' $(srcdir)/insert-header.sin > en@boldquot.insert-header
+
+mostlyclean: mostlyclean-quot
+mostlyclean-quot:
+ rm -f *.insert-header
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/boldquot.sed b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/boldquot.sed
new file mode 100644
index 00000000..4b937aa5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/boldquot.sed
@@ -0,0 +1,10 @@
+s/"\([^"]*\)"/“\1”/g
+s/`\([^`']*\)'/‘\1’/g
+s/ '\([^`']*\)' / ‘\1’ /g
+s/ '\([^`']*\)'$/ ‘\1’/g
+s/^'\([^`']*\)' /‘\1’ /g
+s/“”/""/g
+s/“/“/g
+s/”/”/g
+s/‘/‘/g
+s/’/’/g
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@boldquot.header b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@boldquot.header
new file mode 100644
index 00000000..fedb6a06
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@boldquot.header
@@ -0,0 +1,25 @@
+# All this catalog "translates" are quotation characters.
+# The msgids must be ASCII and therefore cannot contain real quotation
+# characters, only substitutes like grave accent (0x60), apostrophe (0x27)
+# and double quote (0x22). These substitutes look strange; see
+# http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html
+#
+# This catalog translates grave accent (0x60) and apostrophe (0x27) to
+# left single quotation mark (U+2018) and right single quotation mark (U+2019).
+# It also translates pairs of apostrophe (0x27) to
+# left single quotation mark (U+2018) and right single quotation mark (U+2019)
+# and pairs of quotation mark (0x22) to
+# left double quotation mark (U+201C) and right double quotation mark (U+201D).
+#
+# When output to an UTF-8 terminal, the quotation characters appear perfectly.
+# When output to an ISO-8859-1 terminal, the single quotation marks are
+# transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to
+# grave/acute accent (by libiconv), and the double quotation marks are
+# transliterated to 0x22.
+# When output to an ASCII terminal, the single quotation marks are
+# transliterated to apostrophes, and the double quotation marks are
+# transliterated to 0x22.
+#
+# This catalog furthermore displays the text between the quotation marks in
+# bold face, assuming the VT100/XTerm escape sequences.
+#
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@quot.header b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@quot.header
new file mode 100644
index 00000000..a9647fc3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/en@quot.header
@@ -0,0 +1,22 @@
+# All this catalog "translates" are quotation characters.
+# The msgids must be ASCII and therefore cannot contain real quotation
+# characters, only substitutes like grave accent (0x60), apostrophe (0x27)
+# and double quote (0x22). These substitutes look strange; see
+# http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html
+#
+# This catalog translates grave accent (0x60) and apostrophe (0x27) to
+# left single quotation mark (U+2018) and right single quotation mark (U+2019).
+# It also translates pairs of apostrophe (0x27) to
+# left single quotation mark (U+2018) and right single quotation mark (U+2019)
+# and pairs of quotation mark (0x22) to
+# left double quotation mark (U+201C) and right double quotation mark (U+201D).
+#
+# When output to an UTF-8 terminal, the quotation characters appear perfectly.
+# When output to an ISO-8859-1 terminal, the single quotation marks are
+# transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to
+# grave/acute accent (by libiconv), and the double quotation marks are
+# transliterated to 0x22.
+# When output to an ASCII terminal, the single quotation marks are
+# transliterated to apostrophes, and the double quotation marks are
+# transliterated to 0x22.
+#
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/insert-header.sin b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/insert-header.sin
new file mode 100644
index 00000000..b26de01f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/insert-header.sin
@@ -0,0 +1,23 @@
+# Sed script that inserts the file called HEADER before the header entry.
+#
+# At each occurrence of a line starting with "msgid ", we execute the following
+# commands. At the first occurrence, insert the file. At the following
+# occurrences, do nothing. The distinction between the first and the following
+# occurrences is achieved by looking at the hold space.
+/^msgid /{
+x
+# Test if the hold space is empty.
+s/m/m/
+ta
+# Yes it was empty. First occurrence. Read the file.
+r HEADER
+# Output the file's contents by reading the next line. But don't lose the
+# current line while doing this.
+g
+N
+bb
+:a
+# The hold space was nonempty. Following occurrences. Do nothing.
+x
+:b
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/quot.sed b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/quot.sed
new file mode 100644
index 00000000..0122c463
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/quot.sed
@@ -0,0 +1,6 @@
+s/"\([^"]*\)"/“\1”/g
+s/`\([^`']*\)'/‘\1’/g
+s/ '\([^`']*\)' / ‘\1’ /g
+s/ '\([^`']*\)'$/ ‘\1’/g
+s/^'\([^`']*\)' /‘\1’ /g
+s/“”/""/g
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/remove-potcdate.sin b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/remove-potcdate.sin
new file mode 100644
index 00000000..2436c49e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/remove-potcdate.sin
@@ -0,0 +1,19 @@
+# Sed script that remove the POT-Creation-Date line in the header entry
+# from a POT file.
+#
+# The distinction between the first and the following occurrences of the
+# pattern is achieved by looking at the hold space.
+/^"POT-Creation-Date: .*"$/{
+x
+# Test if the hold space is empty.
+s/P/P/
+ta
+# Yes it was empty. First occurrence. Remove the line.
+g
+d
+bb
+:a
+# The hold space was nonempty. Following occurrences. Do nothing.
+x
+:b
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/stamp-po b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/stamp-po
new file mode 100644
index 00000000..9788f702
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/stamp-po
@@ -0,0 +1 @@
+timestamp
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/xz.pot b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/xz.pot
new file mode 100644
index 00000000..dc6e3201
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/po/xz.pot
@@ -0,0 +1,481 @@
+# SOME DESCRIPTIVE TITLE.
+# This file is put in the public domain.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: lasse.collin@tukaani.org\n"
+"POT-Creation-Date: 2009-08-27 18:37+0300\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: src/xz/args.c:289
+#, c-format
+msgid "%s: Unknown file format type"
+msgstr ""
+
+#: src/xz/args.c:312 src/xz/args.c:320
+#, c-format
+msgid "%s: Unsupported integrity check type"
+msgstr ""
+
+#: src/xz/args.c:334
+msgid "Only one file can be specified with `--files'or `--files0'."
+msgstr ""
+
+#: src/xz/args.c:392
+msgid "The environment variable XZ_OPT contains too many arguments"
+msgstr ""
+
+#: src/xz/coder.c:95
+msgid "Maximum number of filters is four"
+msgstr ""
+
+#: src/xz/coder.c:108
+#, c-format
+msgid "Memory usage limit (%<PRIu64> MiB) is too small for the given filter setup (%<PRIu64> MiB)"
+msgstr ""
+
+#: src/xz/coder.c:128
+msgid "Using a preset in raw mode is discouraged."
+msgstr ""
+
+#: src/xz/coder.c:130
+msgid "The exact options of the presets may vary between software versions."
+msgstr ""
+
+#: src/xz/coder.c:158
+msgid "The .lzma format supports only the LZMA1 filter"
+msgstr ""
+
+#: src/xz/coder.c:166
+msgid "LZMA1 cannot be used with the .xz format"
+msgstr ""
+
+#: src/xz/coder.c:186
+#, c-format
+msgid "%s MiB (%s B) of memory is required per thread, limit is %s MiB (%s B)"
+msgstr ""
+
+#: src/xz/coder.c:553
+#, c-format
+msgid "Limit was %s MiB, but %s MiB would have been needed"
+msgstr ""
+
+#: src/xz/file_io.c:86
+#, c-format
+msgid "%s: File seems to be moved, not removing"
+msgstr ""
+
+#: src/xz/file_io.c:93
+#, c-format
+msgid "%s: Cannot remove: %s"
+msgstr ""
+
+#: src/xz/file_io.c:118
+#, c-format
+msgid "%s: Cannot set the file owner: %s"
+msgstr ""
+
+#: src/xz/file_io.c:124
+#, c-format
+msgid "%s: Cannot set the file group: %s"
+msgstr ""
+
+#: src/xz/file_io.c:143
+#, c-format
+msgid "%s: Cannot set the file permissions: %s"
+msgstr ""
+
+#: src/xz/file_io.c:281 src/xz/file_io.c:360
+#, c-format
+msgid "%s: Is a symbolic link, skipping"
+msgstr ""
+
+#: src/xz/file_io.c:395
+#, c-format
+msgid "%s: Is a directory, skipping"
+msgstr ""
+
+#: src/xz/file_io.c:402
+#, c-format
+msgid "%s: Not a regular file, skipping"
+msgstr ""
+
+#: src/xz/file_io.c:419
+#, c-format
+msgid "%s: File has setuid or setgid bit set, skipping"
+msgstr ""
+
+#: src/xz/file_io.c:426
+#, c-format
+msgid "%s: File has sticky bit set, skipping"
+msgstr ""
+
+#: src/xz/file_io.c:433
+#, c-format
+msgid "%s: Input file has more than one hard link, skipping"
+msgstr ""
+
+#: src/xz/file_io.c:555
+#, c-format
+msgid "%s: Closing the file failed: %s"
+msgstr ""
+
+#: src/xz/file_io.c:663
+#, c-format
+msgid "%s: Read error: %s"
+msgstr ""
+
+#: src/xz/file_io.c:708
+#, c-format
+msgid "%s: Write error: %s"
+msgstr ""
+
+#: src/xz/main.c:57
+msgid "Writing to standard output failed"
+msgstr ""
+
+#: src/xz/main.c:60
+msgid "Unknown error"
+msgstr ""
+
+#: src/xz/main.c:117
+#, c-format
+msgid "%s: Error reading filenames: %s"
+msgstr ""
+
+#: src/xz/main.c:124
+#, c-format
+msgid "%s: Unexpected end of input when reading filenames"
+msgstr ""
+
+#: src/xz/main.c:148
+#, c-format
+msgid "%s: Null character found when reading filenames; maybe you meant to use `--files0' instead of `--files'?"
+msgstr ""
+
+#: src/xz/main.c:272
+msgid "Cannot read data from standard input when reading filenames from standard input"
+msgstr ""
+
+#: src/xz/message.c:840 src/xz/message.c:884
+msgid "Internal error (bug)"
+msgstr ""
+
+#: src/xz/message.c:847
+msgid "Cannot establish signal handlers"
+msgstr ""
+
+#: src/xz/message.c:856
+msgid "No integrity check; not verifying file integrity"
+msgstr ""
+
+#: src/xz/message.c:859
+msgid "Unsupported type of integrity check; not verifying file integrity"
+msgstr ""
+
+#: src/xz/message.c:866
+msgid "Memory usage limit reached"
+msgstr ""
+
+#: src/xz/message.c:869
+msgid "File format not recognized"
+msgstr ""
+
+#: src/xz/message.c:872
+msgid "Unsupported options"
+msgstr ""
+
+#: src/xz/message.c:875
+msgid "Compressed data is corrupt"
+msgstr ""
+
+#: src/xz/message.c:878
+msgid "Unexpected end of input"
+msgstr ""
+
+#: src/xz/message.c:897
+#, c-format
+msgid "%s: Filter chain:"
+msgstr ""
+
+#: src/xz/message.c:1008
+#, c-format
+msgid "Try `%s --help' for more information."
+msgstr ""
+
+#: src/xz/message.c:1027
+#, c-format
+msgid ""
+"Usage: %s [OPTION]... [FILE]...\n"
+"Compress or decompress FILEs in the .xz format.\n"
+"\n"
+msgstr ""
+
+#: src/xz/message.c:1031
+msgid "Mandatory arguments to long options are mandatory for short options too.\n"
+msgstr ""
+
+#: src/xz/message.c:1035
+msgid " Operation mode:\n"
+msgstr ""
+
+#: src/xz/message.c:1038
+msgid ""
+" -z, --compress force compression\n"
+" -d, --decompress force decompression\n"
+" -t, --test test compressed file integrity\n"
+" -l, --list list information about files"
+msgstr ""
+
+#: src/xz/message.c:1044
+msgid ""
+"\n"
+" Operation modifiers:\n"
+msgstr ""
+
+#: src/xz/message.c:1047
+msgid ""
+" -k, --keep keep (don't delete) input files\n"
+" -f, --force force overwrite of output file and (de)compress links\n"
+" -c, --stdout write to standard output and don't delete input files"
+msgstr ""
+
+#: src/xz/message.c:1053
+msgid ""
+" -S, --suffix=.SUF use the suffix `.SUF' on compressed files\n"
+" --files=[FILE] read filenames to process from FILE; if FILE is\n"
+" omitted, filenames are read from the standard input;\n"
+" filenames must be terminated with the newline character\n"
+" --files0=[FILE] like --files but use the null character as terminator"
+msgstr ""
+
+#: src/xz/message.c:1060
+msgid ""
+"\n"
+" Basic file format and compression options:\n"
+msgstr ""
+
+#: src/xz/message.c:1062
+msgid ""
+" -F, --format=FMT file format to encode or decode; possible values are\n"
+" `auto' (default), `xz', `lzma', and `raw'\n"
+" -C, --check=CHECK integrity check type: `crc32', `crc64' (default),\n"
+" or `sha256'"
+msgstr ""
+
+#: src/xz/message.c:1069
+msgid ""
+" -0 .. -9 compression preset; 0-2 fast compression, 3-5 good\n"
+" compression, 6-9 excellent compression; default is 6"
+msgstr ""
+
+#: src/xz/message.c:1073
+msgid ""
+" -e, --extreme use more CPU time when encoding to increase compression\n"
+" ratio without increasing memory usage of the decoder"
+msgstr ""
+
+#: src/xz/message.c:1078
+msgid ""
+" -M, --memory=NUM use roughly NUM bytes of memory at maximum; 0 indicates\n"
+" the default setting, which depends on the operation mode\n"
+" and the amount of physical memory (RAM)"
+msgstr ""
+
+#: src/xz/message.c:1084
+msgid ""
+"\n"
+" Custom filter chain for compression (alternative for using presets):"
+msgstr ""
+
+#: src/xz/message.c:1089
+msgid ""
+"\n"
+" --lzma1[=OPTS] LZMA1 or LZMA2; OPTS is a comma-separated list of zero or\n"
+" --lzma2[=OPTS] more of the following options (valid values; default):\n"
+" preset=NUM reset options to preset number NUM (0-9)\n"
+" dict=NUM dictionary size (4KiB - 1536MiB; 8MiB)\n"
+" lc=NUM number of literal context bits (0-4; 3)\n"
+" lp=NUM number of literal position bits (0-4; 0)\n"
+" pb=NUM number of position bits (0-4; 2)\n"
+" mode=MODE compression mode (fast, normal; normal)\n"
+" nice=NUM nice length of a match (2-273; 64)\n"
+" mf=NAME match finder (hc3, hc4, bt2, bt3, bt4; bt4)\n"
+" depth=NUM maximum search depth; 0=automatic (default)"
+msgstr ""
+
+#: src/xz/message.c:1104
+msgid ""
+"\n"
+" --x86[=OPTS] x86 BCJ filter\n"
+" --powerpc[=OPTS] PowerPC BCJ filter (big endian only)\n"
+" --ia64[=OPTS] IA64 (Itanium) BCJ filter\n"
+" --arm[=OPTS] ARM BCJ filter (little endian only)\n"
+" --armthumb[=OPTS] ARM-Thumb BCJ filter (little endian only)\n"
+" --sparc[=OPTS] SPARC BCJ filter\n"
+" Valid OPTS for all BCJ filters:\n"
+" start=NUM start offset for conversions (default=0)"
+msgstr ""
+
+#: src/xz/message.c:1116
+msgid ""
+"\n"
+" --delta[=OPTS] Delta filter; valid OPTS (valid values; default):\n"
+" dist=NUM distance between bytes being subtracted\n"
+" from each other (1-256; 1)"
+msgstr ""
+
+#: src/xz/message.c:1124
+msgid ""
+"\n"
+" --subblock[=OPTS] Subblock filter; valid OPTS (valid values; default):\n"
+" size=NUM number of bytes of data per subblock\n"
+" (1 - 256Mi; 4Ki)\n"
+" rle=NUM run-length encoder chunk size (0-256; 0)"
+msgstr ""
+
+#: src/xz/message.c:1133
+msgid ""
+"\n"
+" Other options:\n"
+msgstr ""
+
+#: src/xz/message.c:1136
+msgid ""
+" -q, --quiet suppress warnings; specify twice to suppress errors too\n"
+" -v, --verbose be verbose; specify twice for even more verbose"
+msgstr ""
+
+#: src/xz/message.c:1141
+msgid " -Q, --no-warn make warnings not affect the exit status"
+msgstr ""
+
+#: src/xz/message.c:1145
+msgid ""
+"\n"
+" -h, --help display the short help (lists only the basic options)\n"
+" -H, --long-help display this long help"
+msgstr ""
+
+#: src/xz/message.c:1150
+msgid ""
+" -h, --help display this short help\n"
+" -H, --long-help display the long help (lists also the advanced options)"
+msgstr ""
+
+#: src/xz/message.c:1154
+msgid " -V, --version display the version number"
+msgstr ""
+
+#: src/xz/message.c:1156
+msgid ""
+"\n"
+"With no FILE, or when FILE is -, read standard input.\n"
+msgstr ""
+
+#: src/xz/message.c:1160
+#, c-format
+msgid ""
+"On this system and configuration, this program will use at maximum of roughly\n"
+"%s MiB RAM and "
+msgstr ""
+
+#: src/xz/message.c:1162
+msgid ""
+"one thread.\n"
+"\n"
+msgstr ""
+
+#: src/xz/message.c:1167
+#, c-format
+msgid "Report bugs to <%s> (in English or Finnish).\n"
+msgstr ""
+
+#: src/xz/message.c:1169
+#, c-format
+msgid "%s home page: <%s>\n"
+msgstr ""
+
+#: src/xz/options.c:88
+#, c-format
+msgid "%s: Options must be `name=value' pairs separated with commas"
+msgstr ""
+
+#: src/xz/options.c:108
+#, c-format
+msgid "%s: Invalid option value"
+msgstr ""
+
+#: src/xz/options.c:131
+#, c-format
+msgid "%s: Invalid option name"
+msgstr ""
+
+#: src/xz/options.c:310
+#, c-format
+msgid "Unsupported LZMA1/LZMA2 preset: %s"
+msgstr ""
+
+#: src/xz/options.c:429
+msgid "The sum of lc and lp must be at maximum of 4"
+msgstr ""
+
+#: src/xz/options.c:434
+#, c-format
+msgid "The selected match finder requires at least nice=%<PRIu32>"
+msgstr ""
+
+#: src/xz/suffix.c:79 src/xz/suffix.c:164
+#, c-format
+msgid "%s: With --format=raw, --suffix=.SUF is required unless writing to stdout"
+msgstr ""
+
+#: src/xz/suffix.c:99
+#, c-format
+msgid "%s: Filename has an unknown suffix, skipping"
+msgstr ""
+
+#: src/xz/suffix.c:154
+#, c-format
+msgid "%s: File already has `%s' suffix, skipping"
+msgstr ""
+
+#: src/xz/suffix.c:205
+#, c-format
+msgid "%s: Invalid filename suffix"
+msgstr ""
+
+#: src/xz/util.c:62
+#, c-format
+msgid "%s: Value is not a non-negative decimal integer"
+msgstr ""
+
+#: src/xz/util.c:104
+#, c-format
+msgid "%s: Invalid multiplier suffix. Valid suffixes:"
+msgstr ""
+
+#: src/xz/util.c:124
+#, c-format
+msgid "Value of the option `%s' must be in the range [%<PRIu64>, %<PRIu64>]"
+msgstr ""
+
+#: src/xz/util.c:198
+msgid "Empty filename, skipping"
+msgstr ""
+
+#: src/xz/util.c:212
+msgid "Compressed data not read from a terminal unless `--force' is used."
+msgstr ""
+
+#: src/xz/util.c:225
+msgid "Compressed data not written to a terminal unless `--force' is used."
+msgstr ""
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.am
new file mode 100644
index 00000000..f03f5a3d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.am
@@ -0,0 +1,9 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+SUBDIRS = liblzma xz xzdec lzmainfo scripts
+EXTRA_DIST = common
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.in
new file mode 100644
index 00000000..6d7c827e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/Makefile.in
@@ -0,0 +1,598 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = src
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+ html-recursive info-recursive install-data-recursive \
+ install-dvi-recursive install-exec-recursive \
+ install-html-recursive install-info-recursive \
+ install-pdf-recursive install-ps-recursive install-recursive \
+ installcheck-recursive installdirs-recursive pdf-recursive \
+ ps-recursive uninstall-recursive
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
+ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
+ distdir
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+SUBDIRS = liblzma xz xzdec lzmainfo scripts
+EXTRA_DIST = common
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ rev=''; for subdir in $$list; do \
+ if test "$$subdir" = "."; then :; else \
+ rev="$$subdir $$rev"; \
+ fi; \
+ done; \
+ rev="$$rev ."; \
+ target=`echo $@ | sed s/-recursive//`; \
+ for subdir in $$rev; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done && test -z "$$fail"
+tags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+ done
+ctags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+ done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \
+ install-am install-strip tags-recursive
+
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+ all all-am check check-am clean clean-generic clean-libtool \
+ ctags ctags-recursive distclean distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \
+ uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/bswap.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/bswap.h
new file mode 100644
index 00000000..2ed2c297
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/bswap.h
@@ -0,0 +1,54 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file bswap.h
+/// \brief Byte swapping
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_BSWAP_H
+#define LZMA_BSWAP_H
+
+// NOTE: We assume that config.h is already #included.
+
+// At least glibc has byteswap.h which contains inline assembly code for
+// byteswapping. Some systems have byteswap.h but lack one or more of the
+// bswap_xx macros/functions, which is why we check them separately even
+// if byteswap.h is available.
+
+#ifdef HAVE_BYTESWAP_H
+# include <byteswap.h>
+#endif
+
+#ifndef HAVE_BSWAP_16
+# define bswap_16(num) \
+ (((num) << 8) | ((num) >> 8))
+#endif
+
+#ifndef HAVE_BSWAP_32
+# define bswap_32(num) \
+ ( (((num) << 24) ) \
+ | (((num) << 8) & UINT32_C(0x00FF0000)) \
+ | (((num) >> 8) & UINT32_C(0x0000FF00)) \
+ | (((num) >> 24) ) )
+#endif
+
+#ifndef HAVE_BSWAP_64
+# define bswap_64(num) \
+ ( (((num) << 56) ) \
+ | (((num) << 40) & UINT64_C(0x00FF000000000000)) \
+ | (((num) << 24) & UINT64_C(0x0000FF0000000000)) \
+ | (((num) << 8) & UINT64_C(0x000000FF00000000)) \
+ | (((num) >> 8) & UINT64_C(0x00000000FF000000)) \
+ | (((num) >> 24) & UINT64_C(0x0000000000FF0000)) \
+ | (((num) >> 40) & UINT64_C(0x000000000000FF00)) \
+ | (((num) >> 56) ) )
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/common_w32res.rc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/common_w32res.rc
new file mode 100644
index 00000000..ad9e1653
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/common_w32res.rc
@@ -0,0 +1,53 @@
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <winresrc.h>
+#include "config.h"
+#define LZMA_H_INTERNAL
+#define LZMA_H_INTERNAL_RC
+#include "lzma/version.h"
+
+#ifndef MY_BUILD
+# define MY_BUILD 0
+#endif
+#define MY_VERSION LZMA_VERSION_MAJOR,LZMA_VERSION_MINOR,LZMA_VERSION_PATCH,MY_BUILD
+
+#define MY_FILENAME MY_NAME MY_SUFFIX
+#define MY_COMPANY "Lasse Collin and Igor Pavlov"
+#define MY_COPYRIGHT "Public Domain by " MY_COMPANY
+#define MY_COMMENTS "http://tukaani.org/xz/"
+
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION MY_VERSION
+ PRODUCTVERSION MY_VERSION
+ FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
+ FILEFLAGS 0
+ FILEOS VOS_NT_WINDOWS32
+ FILETYPE MY_TYPE
+ FILESUBTYPE 0x0L
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904b0"
+ BEGIN
+ VALUE "Comments", MY_COMMENTS
+ VALUE "CompanyName", MY_COMPANY
+ VALUE "FileDescription", MY_DESC
+ VALUE "FileVersion", LZMA_VERSION_STRING
+ VALUE "InternalName", MY_NAME
+ VALUE "LegalCopyright", MY_COPYRIGHT
+ VALUE "OriginalFilename", MY_FILENAME
+ VALUE "ProductName", PACKAGE_NAME
+ VALUE "ProductVersion", LZMA_VERSION_STRING
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1200
+ END
+END
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/cpucores.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/cpucores.h
new file mode 100644
index 00000000..954d8b56
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/cpucores.h
@@ -0,0 +1,53 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file cpucores.h
+/// \brief Get the number of online CPU cores
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef CPUCORES_H
+#define CPUCORES_H
+
+#if defined(HAVE_CPUCORES_SYSCONF)
+# include <unistd.h>
+
+#elif defined(HAVE_CPUCORES_SYSCTL)
+# ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+# endif
+# ifdef HAVE_SYS_SYSCTL_H
+# include <sys/sysctl.h>
+# endif
+#endif
+
+
+static inline uint32_t
+cpucores(void)
+{
+ uint32_t ret = 0;
+
+#if defined(HAVE_CPUCORES_SYSCONF)
+ const long cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (cpus > 0)
+ ret = (uint32_t)(cpus);
+
+#elif defined(HAVE_CPUCORES_SYSCTL)
+ int name[2] = { CTL_HW, HW_NCPU };
+ int cpus;
+ size_t cpus_size = sizeof(cpus);
+ if (!sysctl(name, &cpus, &cpus_size, NULL, NULL)
+ && cpus_size == sizeof(cpus) && cpus > 0)
+ ret = (uint32_t)(cpus);
+#endif
+
+ return ret;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/integer.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/integer.h
new file mode 100644
index 00000000..65249365
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/integer.h
@@ -0,0 +1,172 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file integer.h
+/// \brief Reading and writing integers from and to buffers
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_INTEGER_H
+#define LZMA_INTEGER_H
+
+// On big endian, we need byte swapping. These macros may be used outside
+// this file, so don't put these inside HAVE_FAST_UNALIGNED_ACCESS.
+#ifdef WORDS_BIGENDIAN
+# include "bswap.h"
+# define integer_le_16(n) bswap_16(n)
+# define integer_le_32(n) bswap_32(n)
+# define integer_le_64(n) bswap_64(n)
+#else
+# define integer_le_16(n) (n)
+# define integer_le_32(n) (n)
+# define integer_le_64(n) (n)
+#endif
+
+
+// I'm aware of AC_CHECK_ALIGNED_ACCESS_REQUIRED from Autoconf archive, but
+// it's not useful here. We don't care if unaligned access is supported,
+// we care if it is fast. Some systems can emulate unaligned access in
+// software, which is horribly slow; we want to use byte-by-byte access on
+// such systems but the Autoconf test would detect such a system as
+// supporting unaligned access.
+//
+// NOTE: HAVE_FAST_UNALIGNED_ACCESS indicates only support for 16-bit and
+// 32-bit integer loads and stores. 64-bit integers may or may not work.
+// That's why 64-bit functions are commented out.
+//
+// TODO: Big endian PowerPC supports byte swapping load and store instructions
+// that also allow unaligned access. Inline assembler could be OK for that.
+//
+// Performance of these functions isn't that important until LZMA3, but it
+// doesn't hurt to have these ready already.
+#ifdef HAVE_FAST_UNALIGNED_ACCESS
+
+static inline uint16_t
+integer_read_16(const uint8_t buf[static 2])
+{
+ uint16_t ret = *(const uint16_t *)(buf);
+ return integer_le_16(ret);
+}
+
+
+static inline uint32_t
+integer_read_32(const uint8_t buf[static 4])
+{
+ uint32_t ret = *(const uint32_t *)(buf);
+ return integer_le_32(ret);
+}
+
+
+/*
+static inline uint64_t
+integer_read_64(const uint8_t buf[static 8])
+{
+ uint64_t ret = *(const uint64_t *)(buf);
+ return integer_le_64(ret);
+}
+*/
+
+
+static inline void
+integer_write_16(uint8_t buf[static 2], uint16_t num)
+{
+ *(uint16_t *)(buf) = integer_le_16(num);
+}
+
+
+static inline void
+integer_write_32(uint8_t buf[static 4], uint32_t num)
+{
+ *(uint32_t *)(buf) = integer_le_32(num);
+}
+
+
+/*
+static inline void
+integer_write_64(uint8_t buf[static 8], uint64_t num)
+{
+ *(uint64_t *)(buf) = integer_le_64(num);
+}
+*/
+
+
+#else
+
+static inline uint16_t
+integer_read_16(const uint8_t buf[static 2])
+{
+ uint16_t ret = buf[0] | (buf[1] << 8);
+ return ret;
+}
+
+
+static inline uint32_t
+integer_read_32(const uint8_t buf[static 4])
+{
+ uint32_t ret = buf[0];
+ ret |= (uint32_t)(buf[1]) << 8;
+ ret |= (uint32_t)(buf[2]) << 16;
+ ret |= (uint32_t)(buf[3]) << 24;
+ return ret;
+}
+
+
+/*
+static inline uint64_t
+integer_read_64(const uint8_t buf[static 8])
+{
+ uint64_t ret = buf[0];
+ ret |= (uint64_t)(buf[1]) << 8;
+ ret |= (uint64_t)(buf[2]) << 16;
+ ret |= (uint64_t)(buf[3]) << 24;
+ ret |= (uint64_t)(buf[4]) << 32;
+ ret |= (uint64_t)(buf[5]) << 40;
+ ret |= (uint64_t)(buf[6]) << 48;
+ ret |= (uint64_t)(buf[7]) << 56;
+ return ret;
+}
+*/
+
+
+static inline void
+integer_write_16(uint8_t buf[static 2], uint16_t num)
+{
+ buf[0] = (uint8_t)(num);
+ buf[1] = (uint8_t)(num >> 8);
+}
+
+
+static inline void
+integer_write_32(uint8_t buf[static 4], uint32_t num)
+{
+ buf[0] = (uint8_t)(num);
+ buf[1] = (uint8_t)(num >> 8);
+ buf[2] = (uint8_t)(num >> 16);
+ buf[3] = (uint8_t)(num >> 24);
+}
+
+
+/*
+static inline void
+integer_write_64(uint8_t buf[static 8], uint64_t num)
+{
+ buf[0] = (uint8_t)(num);
+ buf[1] = (uint8_t)(num >> 8);
+ buf[2] = (uint8_t)(num >> 16);
+ buf[3] = (uint8_t)(num >> 24);
+ buf[4] = (uint8_t)(num >> 32);
+ buf[5] = (uint8_t)(num >> 40);
+ buf[6] = (uint8_t)(num >> 48);
+ buf[7] = (uint8_t)(num >> 56);
+}
+*/
+
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/mythread.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/mythread.h
new file mode 100644
index 00000000..c1cc659c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/mythread.h
@@ -0,0 +1,44 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file mythread.h
+/// \brief Wrappers for threads
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+
+
+#ifdef HAVE_PTHREAD
+# include <pthread.h>
+
+# define mythread_once(func) \
+ do { \
+ static pthread_once_t once_ = PTHREAD_ONCE_INIT; \
+ pthread_once(&once_, &func); \
+ } while (0)
+
+# define mythread_sigmask(how, set, oset) \
+ pthread_sigmask(how, set, oset)
+
+#else
+
+# define mythread_once(func) \
+ do { \
+ static bool once_ = false; \
+ if (!once_) { \
+ func(); \
+ once_ = true; \
+ } \
+ } while (0)
+
+# define mythread_sigmask(how, set, oset) \
+ sigprocmask(how, set, oset)
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/open_stdxxx.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/open_stdxxx.h
new file mode 100644
index 00000000..d46962f2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/open_stdxxx.h
@@ -0,0 +1,51 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file open_stdxxx.h
+/// \brief Make sure that file descriptors 0, 1, and 2 are open
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef OPEN_STDXXX_H
+#define OPEN_STDXXX_H
+
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+
+static void
+open_stdxxx(int status)
+{
+ for (int i = 0; i <= 2; ++i) {
+ // We use fcntl() to check if the file descriptor is open.
+ if (fcntl(i, F_GETFD) == -1 && errno == EBADF) {
+ // With stdin, we could use /dev/full so that
+ // writing to stdin would fail. However, /dev/full
+ // is Linux specific, and if the program tries to
+ // write to stdin, there's already a problem anyway.
+ const int fd = open("/dev/null", O_NOCTTY
+ | (i == 0 ? O_WRONLY : O_RDONLY));
+
+ if (fd != i) {
+ // Something went wrong. Exit with the
+ // exit status we were given. Don't try
+ // to print an error message, since stderr
+ // may very well be non-existent. This
+ // error should be extremely rare.
+ (void)close(fd);
+ exit(status);
+ }
+ }
+ }
+
+ return;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/physmem.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/physmem.h
new file mode 100644
index 00000000..0217227d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/physmem.h
@@ -0,0 +1,136 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file physmem.h
+/// \brief Get the amount of physical memory
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef PHYSMEM_H
+#define PHYSMEM_H
+
+// Test for Windows first, because we want to use Windows-specific code
+// on Cygwin, which also has memory information available via sysconf(), but
+// on Cygwin 1.5 and older it gives wrong results (from our point of view).
+#if defined(_WIN32) || defined(__CYGWIN__)
+# ifndef _WIN32_WINNT
+# define _WIN32_WINNT 0x0500
+# endif
+# include <windows.h>
+
+#elif defined(HAVE_PHYSMEM_SYSCONF)
+# include <unistd.h>
+
+#elif defined(HAVE_PHYSMEM_SYSCTL)
+# ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+# endif
+# ifdef HAVE_SYS_SYSCTL_H
+# include <sys/sysctl.h>
+# endif
+
+#elif defined(HAVE_PHYSMEM_SYSINFO)
+# include <sys/sysinfo.h>
+
+#elif defined(__DJGPP__)
+# include <dpmi.h>
+#endif
+
+
+/// \brief Get the amount of physical memory in bytes
+///
+/// \return Amount of physical memory in bytes. On error, zero is
+/// returned.
+static inline uint64_t
+physmem(void)
+{
+ uint64_t ret = 0;
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+ if ((GetVersion() & 0xFF) >= 5) {
+ // Windows 2000 and later have GlobalMemoryStatusEx() which
+ // supports reporting values greater than 4 GiB. To keep the
+ // code working also on older Windows versions, use
+ // GlobalMemoryStatusEx() conditionally.
+ HMODULE kernel32 = GetModuleHandle("kernel32.dll");
+ if (kernel32 != NULL) {
+ BOOL (WINAPI *gmse)(LPMEMORYSTATUSEX) = GetProcAddress(
+ kernel32, "GlobalMemoryStatusEx");
+ if (gmse != NULL) {
+ MEMORYSTATUSEX meminfo;
+ meminfo.dwLength = sizeof(meminfo);
+ if (gmse(&meminfo))
+ ret = meminfo.ullTotalPhys;
+ }
+ }
+ }
+
+ if (ret == 0) {
+ // GlobalMemoryStatus() is supported by Windows 95 and later,
+ // so it is fine to link against it unconditionally. Note that
+ // GlobalMemoryStatus() has no return value.
+ MEMORYSTATUS meminfo;
+ meminfo.dwLength = sizeof(meminfo);
+ GlobalMemoryStatus(&meminfo);
+ ret = meminfo.dwTotalPhys;
+ }
+
+#elif defined(HAVE_PHYSMEM_SYSCONF)
+ const long pagesize = sysconf(_SC_PAGESIZE);
+ const long pages = sysconf(_SC_PHYS_PAGES);
+ if (pagesize != -1 || pages != -1)
+ // According to docs, pagesize * pages can overflow.
+ // Simple case is 32-bit box with 4 GiB or more RAM,
+ // which may report exactly 4 GiB of RAM, and "long"
+ // being 32-bit will overflow. Casting to uint64_t
+ // hopefully avoids overflows in the near future.
+ ret = (uint64_t)(pagesize) * (uint64_t)(pages);
+
+#elif defined(HAVE_PHYSMEM_SYSCTL)
+ int name[2] = {
+ CTL_HW,
+#ifdef HW_PHYSMEM64
+ HW_PHYSMEM64
+#else
+ HW_PHYSMEM
+#endif
+ };
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ } mem;
+ size_t mem_ptr_size = sizeof(mem.u64);
+ if (!sysctl(name, 2, &mem.u64, &mem_ptr_size, NULL, NULL)) {
+ // IIRC, 64-bit "return value" is possible on some 64-bit
+ // BSD systems even with HW_PHYSMEM (instead of HW_PHYSMEM64),
+ // so support both.
+ if (mem_ptr_size == sizeof(mem.u64))
+ ret = mem.u64;
+ else if (mem_ptr_size == sizeof(mem.u32))
+ ret = mem.u32;
+ }
+
+#elif defined(HAVE_PHYSMEM_SYSINFO)
+ struct sysinfo si;
+ if (sysinfo(&si) == 0)
+ ret = (uint64_t)(si.totalram) * si.mem_unit;
+
+#elif defined(__DJGPP__)
+ __dpmi_free_mem_info meminfo;
+ if (__dpmi_get_free_memory_information(&meminfo) == 0
+ && meminfo.total_number_of_physical_pages
+ != (unsigned long)(-1))
+ ret = (uint64_t)(meminfo.total_number_of_physical_pages)
+ * 4096;
+#endif
+
+ return ret;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/sysdefs.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/sysdefs.h
new file mode 100644
index 00000000..37896377
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/common/sysdefs.h
@@ -0,0 +1,171 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file sysdefs.h
+/// \brief Common includes, definitions, system-specific things etc.
+///
+/// This file is used also by the lzma command line tool, that's why this
+/// file is separate from common.h.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SYSDEFS_H
+#define LZMA_SYSDEFS_H
+
+//////////////
+// Includes //
+//////////////
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+// size_t and NULL
+#include <stddef.h>
+
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+
+// C99 says that inttypes.h always includes stdint.h, but some systems
+// don't do that, and require including stdint.h separately.
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+// Some pre-C99 systems have SIZE_MAX in limits.h instead of stdint.h. The
+// limits are also used to figure out some macros missing from pre-C99 systems.
+#ifdef HAVE_LIMITS_H
+# include <limits.h>
+#endif
+
+// Be more compatible with systems that have non-conforming inttypes.h.
+// We assume that int is 32-bit and that long is either 32-bit or 64-bit.
+// Full Autoconf test could be more correct, but this should work well enough.
+// Note that this duplicates some code from lzma.h, but this is better since
+// we can work without inttypes.h thanks to Autoconf tests.
+#ifndef UINT32_C
+# if UINT_MAX != 4294967295U
+# error UINT32_C is not defined and unsiged int is not 32-bit.
+# endif
+# define UINT32_C(n) n ## U
+#endif
+#ifndef UINT32_MAX
+# define UINT32_MAX UINT32_C(4294967295)
+#endif
+#ifndef PRIu32
+# define PRIu32 "u"
+#endif
+#ifndef PRIX32
+# define PRIX32 "X"
+#endif
+
+#if ULONG_MAX == 4294967295UL
+# ifndef UINT64_C
+# define UINT64_C(n) n ## ULL
+# endif
+# ifndef PRIu64
+# define PRIu64 "llu"
+# endif
+# ifndef PRIX64
+# define PRIX64 "llX"
+# endif
+#else
+# ifndef UINT64_C
+# define UINT64_C(n) n ## UL
+# endif
+# ifndef PRIu64
+# define PRIu64 "lu"
+# endif
+# ifndef PRIX64
+# define PRIX64 "lX"
+# endif
+#endif
+#ifndef UINT64_MAX
+# define UINT64_MAX UINT64_C(18446744073709551615)
+#endif
+
+// The code currently assumes that size_t is either 32-bit or 64-bit.
+#ifndef SIZE_MAX
+# if SIZEOF_SIZE_T == 4
+# define SIZE_MAX UINT32_MAX
+# elif SIZEOF_SIZE_T == 8
+# define SIZE_MAX UINT64_MAX
+# else
+# error sizeof(size_t) is not 32-bit or 64-bit
+# endif
+#endif
+#if SIZE_MAX != UINT32_MAX && SIZE_MAX != UINT64_MAX
+# error sizeof(size_t) is not 32-bit or 64-bit
+#endif
+
+#include <stdlib.h>
+#include <assert.h>
+
+// Pre-C99 systems lack stdbool.h. All the code in LZMA Utils must be written
+// so that it works with fake bool type, for example:
+//
+// bool foo = (flags & 0x100) != 0;
+// bool bar = !!(flags & 0x100);
+//
+// This works with the real C99 bool but breaks with fake bool:
+//
+// bool baz = (flags & 0x100);
+//
+#ifdef HAVE_STDBOOL_H
+# include <stdbool.h>
+#else
+# if ! HAVE__BOOL
+typedef unsigned char _Bool;
+# endif
+# define bool _Bool
+# define false 0
+# define true 1
+# define __bool_true_false_are_defined 1
+#endif
+
+// string.h should be enough but let's include strings.h and memory.h too if
+// they exists, since that shouldn't do any harm, but may improve portability.
+#ifdef HAVE_STRING_H
+# include <string.h>
+#endif
+
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+
+#ifdef HAVE_MEMORY_H
+# include <memory.h>
+#endif
+
+
+////////////
+// Macros //
+////////////
+
+#if defined(_WIN32) || defined(__MSDOS__) || defined(__OS2__)
+# define DOSLIKE 1
+#endif
+
+#undef memzero
+#define memzero(s, n) memset(s, 0, n)
+
+#ifndef MIN
+# define MIN(x, y) ((x) < (y) ? (x) : (y))
+#endif
+
+#ifndef MAX
+# define MAX(x, y) ((x) > (y) ? (x) : (y))
+#endif
+
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0]))
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.am
new file mode 100644
index 00000000..6d5753b1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.am
@@ -0,0 +1,94 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+SUBDIRS = api
+
+EXTRA_DIST =
+CLEANFILES =
+doc_DATA =
+
+lib_LTLIBRARIES = liblzma.la
+liblzma_la_SOURCES =
+liblzma_la_CPPFLAGS = \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_srcdir)/src/liblzma/common \
+ -I$(top_srcdir)/src/liblzma/check \
+ -I$(top_srcdir)/src/liblzma/lz \
+ -I$(top_srcdir)/src/liblzma/rangecoder \
+ -I$(top_srcdir)/src/liblzma/lzma \
+ -I$(top_srcdir)/src/liblzma/subblock \
+ -I$(top_srcdir)/src/liblzma/delta \
+ -I$(top_srcdir)/src/liblzma/simple \
+ -I$(top_srcdir)/src/common
+liblzma_la_LDFLAGS = -no-undefined -version-info 0:0:0
+
+include $(srcdir)/common/Makefile.inc
+include $(srcdir)/check/Makefile.inc
+
+if COND_FILTER_LZ
+include $(srcdir)/lz/Makefile.inc
+endif
+
+if COND_FILTER_LZMA1
+include $(srcdir)/lzma/Makefile.inc
+include $(srcdir)/rangecoder/Makefile.inc
+endif
+
+if COND_FILTER_SUBBLOCK
+include $(srcdir)/subblock/Makefile.inc
+endif
+
+if COND_FILTER_DELTA
+include $(srcdir)/delta/Makefile.inc
+endif
+
+if COND_FILTER_SIMPLE
+include $(srcdir)/simple/Makefile.inc
+endif
+
+
+## Windows-specific stuff
+
+# Windows resource compiler support. libtool knows what to do with .rc
+# files, but Automake (<= 1.11 at least) doesn't know.
+#
+# We want the resource file only in shared liblzma. To avoid linking it into
+# static liblzma, we overwrite the static object file with an object file
+# compiled from empty input. Note that GNU-specific features are OK here,
+# because on Windows we are compiled with the GNU toolchain.
+.rc.lo:
+ $(LIBTOOL) --mode=compile $(RC) $(DEFS) $(DEFAULT_INCLUDES) \
+ $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(RCFLAGS) \
+ -i $< -o $@
+ echo > empty.c
+ $(COMPILE) -c empty.c -o $(*D)/$(*F).o
+
+# Remove ordinals from the generated .def file. People must link by name,
+# not by ordinal, because no one is going to track the ordinal numbers.
+liblzma.def: liblzma.la liblzma.def.in
+ sed 's/ \+@ *[0-9]\+//' liblzma.def.in > liblzma.def
+
+# Creating liblzma.def.in is a side effect of linking the library.
+liblzma.def.in: liblzma.la
+
+if COND_W32
+CLEANFILES += liblzma.def liblzma.def.in empty.c
+liblzma_la_SOURCES += liblzma_w32res.rc
+liblzma_la_LDFLAGS += -Xlinker --output-def -Xlinker liblzma.def.in
+
+## liblzma.def.in is created only when building shared liblzma, so don't
+## try to create liblzma.def when not building shared liblzma.
+if COND_SHARED
+doc_DATA += liblzma.def
+endif
+endif
+
+
+## pkg-config
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = liblzma.pc
+EXTRA_DIST += liblzma.pc.in
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.in
new file mode 100644
index 00000000..7d401e58
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/Makefile.in
@@ -0,0 +1,1728 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(srcdir)/check/Makefile.inc $(srcdir)/common/Makefile.inc \
+ $(srcdir)/delta/Makefile.inc $(srcdir)/liblzma.pc.in \
+ $(srcdir)/lz/Makefile.inc $(srcdir)/lzma/Makefile.inc \
+ $(srcdir)/rangecoder/Makefile.inc \
+ $(srcdir)/simple/Makefile.inc $(srcdir)/subblock/Makefile.inc
+@COND_MAIN_ENCODER_TRUE@am__append_1 = \
+@COND_MAIN_ENCODER_TRUE@ common/alone_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/block_buffer_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/block_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/block_encoder.h \
+@COND_MAIN_ENCODER_TRUE@ common/block_header_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/easy_buffer_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/easy_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/easy_encoder_memusage.c \
+@COND_MAIN_ENCODER_TRUE@ common/filter_buffer_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/filter_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/filter_encoder.h \
+@COND_MAIN_ENCODER_TRUE@ common/filter_flags_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/index_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/index_encoder.h \
+@COND_MAIN_ENCODER_TRUE@ common/stream_buffer_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/stream_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/stream_encoder.h \
+@COND_MAIN_ENCODER_TRUE@ common/stream_flags_encoder.c \
+@COND_MAIN_ENCODER_TRUE@ common/vli_encoder.c
+
+@COND_MAIN_DECODER_TRUE@am__append_2 = \
+@COND_MAIN_DECODER_TRUE@ common/alone_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/alone_decoder.h \
+@COND_MAIN_DECODER_TRUE@ common/auto_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/block_buffer_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/block_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/block_decoder.h \
+@COND_MAIN_DECODER_TRUE@ common/block_header_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/easy_decoder_memusage.c \
+@COND_MAIN_DECODER_TRUE@ common/filter_buffer_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/filter_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/filter_decoder.h \
+@COND_MAIN_DECODER_TRUE@ common/filter_flags_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/index_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/index_hash.c \
+@COND_MAIN_DECODER_TRUE@ common/stream_buffer_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/stream_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/stream_decoder.h \
+@COND_MAIN_DECODER_TRUE@ common/stream_flags_decoder.c \
+@COND_MAIN_DECODER_TRUE@ common/vli_decoder.c
+
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_TRUE@am__append_3 = check/crc32_small.c
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@am__append_4 = \
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@ check/crc32_table.c \
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@ check/crc32_table_le.h \
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@ check/crc32_table_be.h
+
+@COND_ASM_X86_TRUE@@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@am__append_5 = check/crc32_x86.S
+@COND_ASM_X86_FALSE@@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@am__append_6 = check/crc32_fast.c
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_TRUE@am__append_7 = check/crc64_small.c
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@am__append_8 = \
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@ check/crc64_table.c \
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@ check/crc64_table_le.h \
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@ check/crc64_table_be.h
+
+@COND_ASM_X86_TRUE@@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@am__append_9 = check/crc64_x86.S
+@COND_ASM_X86_FALSE@@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@am__append_10 = check/crc64_fast.c
+@COND_CHECK_SHA256_TRUE@am__append_11 = check/sha256.c
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@am__append_12 = \
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ lz/lz_encoder.c \
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ lz/lz_encoder.h \
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ lz/lz_encoder_hash.h \
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ lz/lz_encoder_mf.c
+
+@COND_DECODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@am__append_13 = \
+@COND_DECODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ lz/lz_decoder.c \
+@COND_DECODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ lz/lz_decoder.h
+
+@COND_FILTER_LZMA1_TRUE@am__append_14 = lzma/fastpos_tablegen.c \
+@COND_FILTER_LZMA1_TRUE@ rangecoder/price_tablegen.c
+@COND_FILTER_LZMA1_TRUE@am__append_15 = lzma/lzma_common.h
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__append_16 = \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/fastpos.h \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_encoder.h \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_encoder.c \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_encoder_presets.c \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_encoder_private.h \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_encoder_optimum_fast.c \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_encoder_optimum_normal.c
+
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@@COND_SMALL_FALSE@am__append_17 = lzma/fastpos_table.c
+@COND_DECODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__append_18 = \
+@COND_DECODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_decoder.c \
+@COND_DECODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma_decoder.h
+
+@COND_ENCODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@am__append_19 = \
+@COND_ENCODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma2_encoder.c \
+@COND_ENCODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma2_encoder.h
+
+@COND_DECODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@am__append_20 = \
+@COND_DECODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma2_decoder.c \
+@COND_DECODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@ lzma/lzma2_decoder.h
+
+@COND_FILTER_LZMA1_TRUE@am__append_21 = rangecoder/range_common.h
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__append_22 = \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ rangecoder/range_encoder.h \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ rangecoder/price.h \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ rangecoder/price_table.c
+
+@COND_DECODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__append_23 = rangecoder/range_decoder.h
+@COND_ENCODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@am__append_24 = \
+@COND_ENCODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ subblock/subblock_encoder.c \
+@COND_ENCODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ subblock/subblock_encoder.h
+
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@am__append_25 = \
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ subblock/subblock_decoder.c \
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ subblock/subblock_decoder.h \
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ subblock/subblock_decoder_helper.c \
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ subblock/subblock_decoder_helper.h
+
+@COND_FILTER_DELTA_TRUE@am__append_26 = \
+@COND_FILTER_DELTA_TRUE@ delta/delta_common.c \
+@COND_FILTER_DELTA_TRUE@ delta/delta_common.h \
+@COND_FILTER_DELTA_TRUE@ delta/delta_private.h
+
+@COND_ENCODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@am__append_27 = \
+@COND_ENCODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@ delta/delta_encoder.c \
+@COND_ENCODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@ delta/delta_encoder.h
+
+@COND_DECODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@am__append_28 = \
+@COND_DECODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@ delta/delta_decoder.c \
+@COND_DECODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@ delta/delta_decoder.h
+
+@COND_FILTER_SIMPLE_TRUE@am__append_29 = \
+@COND_FILTER_SIMPLE_TRUE@ simple/simple_coder.c \
+@COND_FILTER_SIMPLE_TRUE@ simple/simple_coder.h \
+@COND_FILTER_SIMPLE_TRUE@ simple/simple_private.h
+
+@COND_ENCODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@am__append_30 = \
+@COND_ENCODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@ simple/simple_encoder.c \
+@COND_ENCODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@ simple/simple_encoder.h
+
+@COND_DECODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@am__append_31 = \
+@COND_DECODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@ simple/simple_decoder.c \
+@COND_DECODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@ simple/simple_decoder.h
+
+@COND_FILTER_SIMPLE_TRUE@@COND_FILTER_X86_TRUE@am__append_32 = simple/x86.c
+@COND_FILTER_POWERPC_TRUE@@COND_FILTER_SIMPLE_TRUE@am__append_33 = simple/powerpc.c
+@COND_FILTER_IA64_TRUE@@COND_FILTER_SIMPLE_TRUE@am__append_34 = simple/ia64.c
+@COND_FILTER_ARM_TRUE@@COND_FILTER_SIMPLE_TRUE@am__append_35 = simple/arm.c
+@COND_FILTER_ARMTHUMB_TRUE@@COND_FILTER_SIMPLE_TRUE@am__append_36 = simple/armthumb.c
+@COND_FILTER_SIMPLE_TRUE@@COND_FILTER_SPARC_TRUE@am__append_37 = simple/sparc.c
+@COND_W32_TRUE@am__append_38 = liblzma.def liblzma.def.in empty.c
+@COND_W32_TRUE@am__append_39 = liblzma_w32res.rc
+@COND_W32_TRUE@am__append_40 = -Xlinker --output-def -Xlinker liblzma.def.in
+@COND_SHARED_TRUE@@COND_W32_TRUE@am__append_41 = liblzma.def
+subdir = src/liblzma
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES = liblzma.pc
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(docdir)" \
+ "$(DESTDIR)$(pkgconfigdir)"
+LTLIBRARIES = $(lib_LTLIBRARIES)
+liblzma_la_LIBADD =
+am__liblzma_la_SOURCES_DIST = common/common.c common/common.h \
+ common/bsr.h common/block_util.c common/easy_preset.c \
+ common/easy_preset.h common/filter_common.c \
+ common/filter_common.h common/index.c common/index.h \
+ common/stream_flags_common.c common/stream_flags_common.h \
+ common/vli_size.c common/alone_encoder.c \
+ common/block_buffer_encoder.c common/block_encoder.c \
+ common/block_encoder.h common/block_header_encoder.c \
+ common/easy_buffer_encoder.c common/easy_encoder.c \
+ common/easy_encoder_memusage.c common/filter_buffer_encoder.c \
+ common/filter_encoder.c common/filter_encoder.h \
+ common/filter_flags_encoder.c common/index_encoder.c \
+ common/index_encoder.h common/stream_buffer_encoder.c \
+ common/stream_encoder.c common/stream_encoder.h \
+ common/stream_flags_encoder.c common/vli_encoder.c \
+ common/alone_decoder.c common/alone_decoder.h \
+ common/auto_decoder.c common/block_buffer_decoder.c \
+ common/block_decoder.c common/block_decoder.h \
+ common/block_header_decoder.c common/easy_decoder_memusage.c \
+ common/filter_buffer_decoder.c common/filter_decoder.c \
+ common/filter_decoder.h common/filter_flags_decoder.c \
+ common/index_decoder.c common/index_hash.c \
+ common/stream_buffer_decoder.c common/stream_decoder.c \
+ common/stream_decoder.h common/stream_flags_decoder.c \
+ common/vli_decoder.c check/check.c check/check.h \
+ check/crc_macros.h check/crc32_small.c check/crc32_table.c \
+ check/crc32_table_le.h check/crc32_table_be.h \
+ check/crc32_x86.S check/crc32_fast.c check/crc64_small.c \
+ check/crc64_table.c check/crc64_table_le.h \
+ check/crc64_table_be.h check/crc64_x86.S check/crc64_fast.c \
+ check/sha256.c lz/lz_encoder.c lz/lz_encoder.h \
+ lz/lz_encoder_hash.h lz/lz_encoder_mf.c lz/lz_decoder.c \
+ lz/lz_decoder.h lzma/lzma_common.h lzma/fastpos.h \
+ lzma/lzma_encoder.h lzma/lzma_encoder.c \
+ lzma/lzma_encoder_presets.c lzma/lzma_encoder_private.h \
+ lzma/lzma_encoder_optimum_fast.c \
+ lzma/lzma_encoder_optimum_normal.c lzma/fastpos_table.c \
+ lzma/lzma_decoder.c lzma/lzma_decoder.h lzma/lzma2_encoder.c \
+ lzma/lzma2_encoder.h lzma/lzma2_decoder.c lzma/lzma2_decoder.h \
+ rangecoder/range_common.h rangecoder/range_encoder.h \
+ rangecoder/price.h rangecoder/price_table.c \
+ rangecoder/range_decoder.h subblock/subblock_encoder.c \
+ subblock/subblock_encoder.h subblock/subblock_decoder.c \
+ subblock/subblock_decoder.h subblock/subblock_decoder_helper.c \
+ subblock/subblock_decoder_helper.h delta/delta_common.c \
+ delta/delta_common.h delta/delta_private.h \
+ delta/delta_encoder.c delta/delta_encoder.h \
+ delta/delta_decoder.c delta/delta_decoder.h \
+ simple/simple_coder.c simple/simple_coder.h \
+ simple/simple_private.h simple/simple_encoder.c \
+ simple/simple_encoder.h simple/simple_decoder.c \
+ simple/simple_decoder.h simple/x86.c simple/powerpc.c \
+ simple/ia64.c simple/arm.c simple/armthumb.c simple/sparc.c \
+ liblzma_w32res.rc
+@COND_MAIN_ENCODER_TRUE@am__objects_1 = liblzma_la-alone_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-block_buffer_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-block_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-block_header_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-easy_buffer_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-easy_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-easy_encoder_memusage.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-filter_buffer_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-filter_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-filter_flags_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-index_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-stream_buffer_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-stream_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-stream_flags_encoder.lo \
+@COND_MAIN_ENCODER_TRUE@ liblzma_la-vli_encoder.lo
+@COND_MAIN_DECODER_TRUE@am__objects_2 = liblzma_la-alone_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-auto_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-block_buffer_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-block_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-block_header_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-easy_decoder_memusage.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-filter_buffer_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-filter_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-filter_flags_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-index_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-index_hash.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-stream_buffer_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-stream_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-stream_flags_decoder.lo \
+@COND_MAIN_DECODER_TRUE@ liblzma_la-vli_decoder.lo
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_TRUE@am__objects_3 = liblzma_la-crc32_small.lo
+@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@am__objects_4 = liblzma_la-crc32_table.lo
+@COND_ASM_X86_TRUE@@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@am__objects_5 = liblzma_la-crc32_x86.lo
+@COND_ASM_X86_FALSE@@COND_CHECK_CRC32_TRUE@@COND_SMALL_FALSE@am__objects_6 = liblzma_la-crc32_fast.lo
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_TRUE@am__objects_7 = liblzma_la-crc64_small.lo
+@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@am__objects_8 = liblzma_la-crc64_table.lo
+@COND_ASM_X86_TRUE@@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@am__objects_9 = liblzma_la-crc64_x86.lo
+@COND_ASM_X86_FALSE@@COND_CHECK_CRC64_TRUE@@COND_SMALL_FALSE@am__objects_10 = liblzma_la-crc64_fast.lo
+@COND_CHECK_SHA256_TRUE@am__objects_11 = liblzma_la-sha256.lo
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@am__objects_12 = liblzma_la-lz_encoder.lo \
+@COND_ENCODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@ liblzma_la-lz_encoder_mf.lo
+@COND_DECODER_LZ_TRUE@@COND_FILTER_LZ_TRUE@am__objects_13 = liblzma_la-lz_decoder.lo
+am__objects_14 =
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__objects_15 = liblzma_la-lzma_encoder.lo \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ liblzma_la-lzma_encoder_presets.lo \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ liblzma_la-lzma_encoder_optimum_fast.lo \
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@ liblzma_la-lzma_encoder_optimum_normal.lo
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@@COND_SMALL_FALSE@am__objects_16 = liblzma_la-fastpos_table.lo
+@COND_DECODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__objects_17 = liblzma_la-lzma_decoder.lo
+@COND_ENCODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@am__objects_18 = liblzma_la-lzma2_encoder.lo
+@COND_DECODER_LZMA2_TRUE@@COND_FILTER_LZMA1_TRUE@am__objects_19 = liblzma_la-lzma2_decoder.lo
+@COND_ENCODER_LZMA1_TRUE@@COND_FILTER_LZMA1_TRUE@am__objects_20 = liblzma_la-price_table.lo
+@COND_ENCODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@am__objects_21 = liblzma_la-subblock_encoder.lo
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@am__objects_22 = liblzma_la-subblock_decoder.lo \
+@COND_DECODER_SUBBLOCK_TRUE@@COND_FILTER_SUBBLOCK_TRUE@ liblzma_la-subblock_decoder_helper.lo
+@COND_FILTER_DELTA_TRUE@am__objects_23 = liblzma_la-delta_common.lo
+@COND_ENCODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@am__objects_24 = liblzma_la-delta_encoder.lo
+@COND_DECODER_DELTA_TRUE@@COND_FILTER_DELTA_TRUE@am__objects_25 = liblzma_la-delta_decoder.lo
+@COND_FILTER_SIMPLE_TRUE@am__objects_26 = liblzma_la-simple_coder.lo
+@COND_ENCODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@am__objects_27 = liblzma_la-simple_encoder.lo
+@COND_DECODER_SIMPLE_TRUE@@COND_FILTER_SIMPLE_TRUE@am__objects_28 = liblzma_la-simple_decoder.lo
+@COND_FILTER_SIMPLE_TRUE@@COND_FILTER_X86_TRUE@am__objects_29 = liblzma_la-x86.lo
+@COND_FILTER_POWERPC_TRUE@@COND_FILTER_SIMPLE_TRUE@am__objects_30 = liblzma_la-powerpc.lo
+@COND_FILTER_IA64_TRUE@@COND_FILTER_SIMPLE_TRUE@am__objects_31 = liblzma_la-ia64.lo
+@COND_FILTER_ARM_TRUE@@COND_FILTER_SIMPLE_TRUE@am__objects_32 = liblzma_la-arm.lo
+@COND_FILTER_ARMTHUMB_TRUE@@COND_FILTER_SIMPLE_TRUE@am__objects_33 = liblzma_la-armthumb.lo
+@COND_FILTER_SIMPLE_TRUE@@COND_FILTER_SPARC_TRUE@am__objects_34 = liblzma_la-sparc.lo
+@COND_W32_TRUE@am__objects_35 = liblzma_w32res.lo
+am_liblzma_la_OBJECTS = liblzma_la-common.lo liblzma_la-block_util.lo \
+ liblzma_la-easy_preset.lo liblzma_la-filter_common.lo \
+ liblzma_la-index.lo liblzma_la-stream_flags_common.lo \
+ liblzma_la-vli_size.lo $(am__objects_1) $(am__objects_2) \
+ liblzma_la-check.lo $(am__objects_3) $(am__objects_4) \
+ $(am__objects_5) $(am__objects_6) $(am__objects_7) \
+ $(am__objects_8) $(am__objects_9) $(am__objects_10) \
+ $(am__objects_11) $(am__objects_12) $(am__objects_13) \
+ $(am__objects_14) $(am__objects_15) $(am__objects_16) \
+ $(am__objects_17) $(am__objects_18) $(am__objects_19) \
+ $(am__objects_14) $(am__objects_20) $(am__objects_14) \
+ $(am__objects_21) $(am__objects_22) $(am__objects_23) \
+ $(am__objects_24) $(am__objects_25) $(am__objects_26) \
+ $(am__objects_27) $(am__objects_28) $(am__objects_29) \
+ $(am__objects_30) $(am__objects_31) $(am__objects_32) \
+ $(am__objects_33) $(am__objects_34) $(am__objects_35)
+liblzma_la_OBJECTS = $(am_liblzma_la_OBJECTS)
+liblzma_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(liblzma_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+LTCPPASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(liblzma_la_SOURCES)
+DIST_SOURCES = $(am__liblzma_la_SOURCES_DIST)
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+ html-recursive info-recursive install-data-recursive \
+ install-dvi-recursive install-exec-recursive \
+ install-html-recursive install-info-recursive \
+ install-pdf-recursive install-ps-recursive install-recursive \
+ installcheck-recursive installdirs-recursive pdf-recursive \
+ ps-recursive uninstall-recursive
+DATA = $(doc_DATA) $(pkgconfig_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
+ distclean-recursive maintainer-clean-recursive
+AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
+ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
+ distdir
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+ dir0=`pwd`; \
+ sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+ sed_rest='s,^[^/]*/*,,'; \
+ sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+ sed_butlast='s,/*[^/]*$$,,'; \
+ while test -n "$$dir1"; do \
+ first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+ if test "$$first" != "."; then \
+ if test "$$first" = ".."; then \
+ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+ else \
+ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+ if test "$$first2" = "$$first"; then \
+ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+ else \
+ dir2="../$$dir2"; \
+ fi; \
+ dir0="$$dir0"/"$$first"; \
+ fi; \
+ fi; \
+ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+ done; \
+ reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+SUBDIRS = api
+EXTRA_DIST = check/crc32_tablegen.c check/crc64_tablegen.c \
+ $(am__append_14) liblzma.pc.in
+CLEANFILES = $(am__append_38)
+doc_DATA = $(am__append_41)
+lib_LTLIBRARIES = liblzma.la
+liblzma_la_SOURCES = common/common.c common/common.h common/bsr.h \
+ common/block_util.c common/easy_preset.c common/easy_preset.h \
+ common/filter_common.c common/filter_common.h common/index.c \
+ common/index.h common/stream_flags_common.c \
+ common/stream_flags_common.h common/vli_size.c $(am__append_1) \
+ $(am__append_2) check/check.c check/check.h check/crc_macros.h \
+ $(am__append_3) $(am__append_4) $(am__append_5) \
+ $(am__append_6) $(am__append_7) $(am__append_8) \
+ $(am__append_9) $(am__append_10) $(am__append_11) \
+ $(am__append_12) $(am__append_13) $(am__append_15) \
+ $(am__append_16) $(am__append_17) $(am__append_18) \
+ $(am__append_19) $(am__append_20) $(am__append_21) \
+ $(am__append_22) $(am__append_23) $(am__append_24) \
+ $(am__append_25) $(am__append_26) $(am__append_27) \
+ $(am__append_28) $(am__append_29) $(am__append_30) \
+ $(am__append_31) $(am__append_32) $(am__append_33) \
+ $(am__append_34) $(am__append_35) $(am__append_36) \
+ $(am__append_37) $(am__append_39)
+liblzma_la_CPPFLAGS = \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_srcdir)/src/liblzma/common \
+ -I$(top_srcdir)/src/liblzma/check \
+ -I$(top_srcdir)/src/liblzma/lz \
+ -I$(top_srcdir)/src/liblzma/rangecoder \
+ -I$(top_srcdir)/src/liblzma/lzma \
+ -I$(top_srcdir)/src/liblzma/subblock \
+ -I$(top_srcdir)/src/liblzma/delta \
+ -I$(top_srcdir)/src/liblzma/simple \
+ -I$(top_srcdir)/src/common
+
+liblzma_la_LDFLAGS = -no-undefined -version-info 0:0:0 \
+ $(am__append_40)
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = liblzma.pc
+all: all-recursive
+
+.SUFFIXES:
+.SUFFIXES: .S .c .lo .o .obj .rc
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/common/Makefile.inc $(srcdir)/check/Makefile.inc $(srcdir)/lz/Makefile.inc $(srcdir)/lzma/Makefile.inc $(srcdir)/rangecoder/Makefile.inc $(srcdir)/subblock/Makefile.inc $(srcdir)/delta/Makefile.inc $(srcdir)/simple/Makefile.inc $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/liblzma/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/liblzma/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+liblzma.pc: $(top_builddir)/config.status $(srcdir)/liblzma.pc.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+install-libLTLIBRARIES: $(lib_LTLIBRARIES)
+ @$(NORMAL_INSTALL)
+ test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)"
+ @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+ list2=; for p in $$list; do \
+ if test -f $$p; then \
+ list2="$$list2 $$p"; \
+ else :; fi; \
+ done; \
+ test -z "$$list2" || { \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
+ }
+
+uninstall-libLTLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+ for p in $$list; do \
+ $(am__strip_dir) \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
+ done
+
+clean-libLTLIBRARIES:
+ -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
+ @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
+ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+ test "$$dir" != "$$p" || dir=.; \
+ echo "rm -f \"$${dir}/so_locations\""; \
+ rm -f "$${dir}/so_locations"; \
+ done
+liblzma.la: $(liblzma_la_OBJECTS) $(liblzma_la_DEPENDENCIES)
+ $(liblzma_la_LINK) -rpath $(libdir) $(liblzma_la_OBJECTS) $(liblzma_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-alone_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-alone_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-arm.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-armthumb.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-auto_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_buffer_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_buffer_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_header_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_header_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-block_util.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-check.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc32_fast.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc32_small.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc32_table.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc32_x86.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc64_fast.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc64_small.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc64_table.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-crc64_x86.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-delta_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-delta_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-delta_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-easy_buffer_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-easy_decoder_memusage.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-easy_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-easy_encoder_memusage.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-easy_preset.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-fastpos_table.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_buffer_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_buffer_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_flags_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-filter_flags_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-ia64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-index.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-index_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-index_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-index_hash.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lz_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lz_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lz_encoder_mf.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma2_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma2_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma_encoder_optimum_fast.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma_encoder_optimum_normal.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-lzma_encoder_presets.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-powerpc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-price_table.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-sha256.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-simple_coder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-simple_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-simple_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-sparc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_buffer_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_buffer_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_flags_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_flags_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-stream_flags_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-subblock_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-subblock_decoder_helper.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-subblock_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-vli_decoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-vli_encoder.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-vli_size.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblzma_la-x86.Plo@am__quote@
+
+.S.o:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ $<
+
+.S.obj:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.S.lo:
+@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(LTCPPASCOMPILE) -c -o $@ $<
+
+liblzma_la-crc32_x86.lo: check/crc32_x86.S
+@am__fastdepCCAS_TRUE@ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -MT liblzma_la-crc32_x86.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc32_x86.Tpo -c -o liblzma_la-crc32_x86.lo `test -f 'check/crc32_x86.S' || echo '$(srcdir)/'`check/crc32_x86.S
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc32_x86.Tpo $(DEPDIR)/liblzma_la-crc32_x86.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='check/crc32_x86.S' object='liblzma_la-crc32_x86.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o liblzma_la-crc32_x86.lo `test -f 'check/crc32_x86.S' || echo '$(srcdir)/'`check/crc32_x86.S
+
+liblzma_la-crc64_x86.lo: check/crc64_x86.S
+@am__fastdepCCAS_TRUE@ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -MT liblzma_la-crc64_x86.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc64_x86.Tpo -c -o liblzma_la-crc64_x86.lo `test -f 'check/crc64_x86.S' || echo '$(srcdir)/'`check/crc64_x86.S
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc64_x86.Tpo $(DEPDIR)/liblzma_la-crc64_x86.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='check/crc64_x86.S' object='liblzma_la-crc64_x86.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o liblzma_la-crc64_x86.lo `test -f 'check/crc64_x86.S' || echo '$(srcdir)/'`check/crc64_x86.S
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+liblzma_la-common.lo: common/common.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-common.lo -MD -MP -MF $(DEPDIR)/liblzma_la-common.Tpo -c -o liblzma_la-common.lo `test -f 'common/common.c' || echo '$(srcdir)/'`common/common.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-common.Tpo $(DEPDIR)/liblzma_la-common.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/common.c' object='liblzma_la-common.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-common.lo `test -f 'common/common.c' || echo '$(srcdir)/'`common/common.c
+
+liblzma_la-block_util.lo: common/block_util.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_util.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_util.Tpo -c -o liblzma_la-block_util.lo `test -f 'common/block_util.c' || echo '$(srcdir)/'`common/block_util.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_util.Tpo $(DEPDIR)/liblzma_la-block_util.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_util.c' object='liblzma_la-block_util.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_util.lo `test -f 'common/block_util.c' || echo '$(srcdir)/'`common/block_util.c
+
+liblzma_la-easy_preset.lo: common/easy_preset.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-easy_preset.lo -MD -MP -MF $(DEPDIR)/liblzma_la-easy_preset.Tpo -c -o liblzma_la-easy_preset.lo `test -f 'common/easy_preset.c' || echo '$(srcdir)/'`common/easy_preset.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-easy_preset.Tpo $(DEPDIR)/liblzma_la-easy_preset.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/easy_preset.c' object='liblzma_la-easy_preset.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-easy_preset.lo `test -f 'common/easy_preset.c' || echo '$(srcdir)/'`common/easy_preset.c
+
+liblzma_la-filter_common.lo: common/filter_common.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_common.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_common.Tpo -c -o liblzma_la-filter_common.lo `test -f 'common/filter_common.c' || echo '$(srcdir)/'`common/filter_common.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_common.Tpo $(DEPDIR)/liblzma_la-filter_common.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_common.c' object='liblzma_la-filter_common.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_common.lo `test -f 'common/filter_common.c' || echo '$(srcdir)/'`common/filter_common.c
+
+liblzma_la-index.lo: common/index.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-index.lo -MD -MP -MF $(DEPDIR)/liblzma_la-index.Tpo -c -o liblzma_la-index.lo `test -f 'common/index.c' || echo '$(srcdir)/'`common/index.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-index.Tpo $(DEPDIR)/liblzma_la-index.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/index.c' object='liblzma_la-index.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-index.lo `test -f 'common/index.c' || echo '$(srcdir)/'`common/index.c
+
+liblzma_la-stream_flags_common.lo: common/stream_flags_common.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_flags_common.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_flags_common.Tpo -c -o liblzma_la-stream_flags_common.lo `test -f 'common/stream_flags_common.c' || echo '$(srcdir)/'`common/stream_flags_common.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_flags_common.Tpo $(DEPDIR)/liblzma_la-stream_flags_common.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_flags_common.c' object='liblzma_la-stream_flags_common.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_flags_common.lo `test -f 'common/stream_flags_common.c' || echo '$(srcdir)/'`common/stream_flags_common.c
+
+liblzma_la-vli_size.lo: common/vli_size.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-vli_size.lo -MD -MP -MF $(DEPDIR)/liblzma_la-vli_size.Tpo -c -o liblzma_la-vli_size.lo `test -f 'common/vli_size.c' || echo '$(srcdir)/'`common/vli_size.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-vli_size.Tpo $(DEPDIR)/liblzma_la-vli_size.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/vli_size.c' object='liblzma_la-vli_size.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-vli_size.lo `test -f 'common/vli_size.c' || echo '$(srcdir)/'`common/vli_size.c
+
+liblzma_la-alone_encoder.lo: common/alone_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-alone_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-alone_encoder.Tpo -c -o liblzma_la-alone_encoder.lo `test -f 'common/alone_encoder.c' || echo '$(srcdir)/'`common/alone_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-alone_encoder.Tpo $(DEPDIR)/liblzma_la-alone_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/alone_encoder.c' object='liblzma_la-alone_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-alone_encoder.lo `test -f 'common/alone_encoder.c' || echo '$(srcdir)/'`common/alone_encoder.c
+
+liblzma_la-block_buffer_encoder.lo: common/block_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_buffer_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_buffer_encoder.Tpo -c -o liblzma_la-block_buffer_encoder.lo `test -f 'common/block_buffer_encoder.c' || echo '$(srcdir)/'`common/block_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_buffer_encoder.Tpo $(DEPDIR)/liblzma_la-block_buffer_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_buffer_encoder.c' object='liblzma_la-block_buffer_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_buffer_encoder.lo `test -f 'common/block_buffer_encoder.c' || echo '$(srcdir)/'`common/block_buffer_encoder.c
+
+liblzma_la-block_encoder.lo: common/block_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_encoder.Tpo -c -o liblzma_la-block_encoder.lo `test -f 'common/block_encoder.c' || echo '$(srcdir)/'`common/block_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_encoder.Tpo $(DEPDIR)/liblzma_la-block_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_encoder.c' object='liblzma_la-block_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_encoder.lo `test -f 'common/block_encoder.c' || echo '$(srcdir)/'`common/block_encoder.c
+
+liblzma_la-block_header_encoder.lo: common/block_header_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_header_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_header_encoder.Tpo -c -o liblzma_la-block_header_encoder.lo `test -f 'common/block_header_encoder.c' || echo '$(srcdir)/'`common/block_header_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_header_encoder.Tpo $(DEPDIR)/liblzma_la-block_header_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_header_encoder.c' object='liblzma_la-block_header_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_header_encoder.lo `test -f 'common/block_header_encoder.c' || echo '$(srcdir)/'`common/block_header_encoder.c
+
+liblzma_la-easy_buffer_encoder.lo: common/easy_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-easy_buffer_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-easy_buffer_encoder.Tpo -c -o liblzma_la-easy_buffer_encoder.lo `test -f 'common/easy_buffer_encoder.c' || echo '$(srcdir)/'`common/easy_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-easy_buffer_encoder.Tpo $(DEPDIR)/liblzma_la-easy_buffer_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/easy_buffer_encoder.c' object='liblzma_la-easy_buffer_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-easy_buffer_encoder.lo `test -f 'common/easy_buffer_encoder.c' || echo '$(srcdir)/'`common/easy_buffer_encoder.c
+
+liblzma_la-easy_encoder.lo: common/easy_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-easy_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-easy_encoder.Tpo -c -o liblzma_la-easy_encoder.lo `test -f 'common/easy_encoder.c' || echo '$(srcdir)/'`common/easy_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-easy_encoder.Tpo $(DEPDIR)/liblzma_la-easy_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/easy_encoder.c' object='liblzma_la-easy_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-easy_encoder.lo `test -f 'common/easy_encoder.c' || echo '$(srcdir)/'`common/easy_encoder.c
+
+liblzma_la-easy_encoder_memusage.lo: common/easy_encoder_memusage.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-easy_encoder_memusage.lo -MD -MP -MF $(DEPDIR)/liblzma_la-easy_encoder_memusage.Tpo -c -o liblzma_la-easy_encoder_memusage.lo `test -f 'common/easy_encoder_memusage.c' || echo '$(srcdir)/'`common/easy_encoder_memusage.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-easy_encoder_memusage.Tpo $(DEPDIR)/liblzma_la-easy_encoder_memusage.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/easy_encoder_memusage.c' object='liblzma_la-easy_encoder_memusage.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-easy_encoder_memusage.lo `test -f 'common/easy_encoder_memusage.c' || echo '$(srcdir)/'`common/easy_encoder_memusage.c
+
+liblzma_la-filter_buffer_encoder.lo: common/filter_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_buffer_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_buffer_encoder.Tpo -c -o liblzma_la-filter_buffer_encoder.lo `test -f 'common/filter_buffer_encoder.c' || echo '$(srcdir)/'`common/filter_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_buffer_encoder.Tpo $(DEPDIR)/liblzma_la-filter_buffer_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_buffer_encoder.c' object='liblzma_la-filter_buffer_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_buffer_encoder.lo `test -f 'common/filter_buffer_encoder.c' || echo '$(srcdir)/'`common/filter_buffer_encoder.c
+
+liblzma_la-filter_encoder.lo: common/filter_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_encoder.Tpo -c -o liblzma_la-filter_encoder.lo `test -f 'common/filter_encoder.c' || echo '$(srcdir)/'`common/filter_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_encoder.Tpo $(DEPDIR)/liblzma_la-filter_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_encoder.c' object='liblzma_la-filter_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_encoder.lo `test -f 'common/filter_encoder.c' || echo '$(srcdir)/'`common/filter_encoder.c
+
+liblzma_la-filter_flags_encoder.lo: common/filter_flags_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_flags_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_flags_encoder.Tpo -c -o liblzma_la-filter_flags_encoder.lo `test -f 'common/filter_flags_encoder.c' || echo '$(srcdir)/'`common/filter_flags_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_flags_encoder.Tpo $(DEPDIR)/liblzma_la-filter_flags_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_flags_encoder.c' object='liblzma_la-filter_flags_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_flags_encoder.lo `test -f 'common/filter_flags_encoder.c' || echo '$(srcdir)/'`common/filter_flags_encoder.c
+
+liblzma_la-index_encoder.lo: common/index_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-index_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-index_encoder.Tpo -c -o liblzma_la-index_encoder.lo `test -f 'common/index_encoder.c' || echo '$(srcdir)/'`common/index_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-index_encoder.Tpo $(DEPDIR)/liblzma_la-index_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/index_encoder.c' object='liblzma_la-index_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-index_encoder.lo `test -f 'common/index_encoder.c' || echo '$(srcdir)/'`common/index_encoder.c
+
+liblzma_la-stream_buffer_encoder.lo: common/stream_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_buffer_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_buffer_encoder.Tpo -c -o liblzma_la-stream_buffer_encoder.lo `test -f 'common/stream_buffer_encoder.c' || echo '$(srcdir)/'`common/stream_buffer_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_buffer_encoder.Tpo $(DEPDIR)/liblzma_la-stream_buffer_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_buffer_encoder.c' object='liblzma_la-stream_buffer_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_buffer_encoder.lo `test -f 'common/stream_buffer_encoder.c' || echo '$(srcdir)/'`common/stream_buffer_encoder.c
+
+liblzma_la-stream_encoder.lo: common/stream_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_encoder.Tpo -c -o liblzma_la-stream_encoder.lo `test -f 'common/stream_encoder.c' || echo '$(srcdir)/'`common/stream_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_encoder.Tpo $(DEPDIR)/liblzma_la-stream_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_encoder.c' object='liblzma_la-stream_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_encoder.lo `test -f 'common/stream_encoder.c' || echo '$(srcdir)/'`common/stream_encoder.c
+
+liblzma_la-stream_flags_encoder.lo: common/stream_flags_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_flags_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_flags_encoder.Tpo -c -o liblzma_la-stream_flags_encoder.lo `test -f 'common/stream_flags_encoder.c' || echo '$(srcdir)/'`common/stream_flags_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_flags_encoder.Tpo $(DEPDIR)/liblzma_la-stream_flags_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_flags_encoder.c' object='liblzma_la-stream_flags_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_flags_encoder.lo `test -f 'common/stream_flags_encoder.c' || echo '$(srcdir)/'`common/stream_flags_encoder.c
+
+liblzma_la-vli_encoder.lo: common/vli_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-vli_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-vli_encoder.Tpo -c -o liblzma_la-vli_encoder.lo `test -f 'common/vli_encoder.c' || echo '$(srcdir)/'`common/vli_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-vli_encoder.Tpo $(DEPDIR)/liblzma_la-vli_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/vli_encoder.c' object='liblzma_la-vli_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-vli_encoder.lo `test -f 'common/vli_encoder.c' || echo '$(srcdir)/'`common/vli_encoder.c
+
+liblzma_la-alone_decoder.lo: common/alone_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-alone_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-alone_decoder.Tpo -c -o liblzma_la-alone_decoder.lo `test -f 'common/alone_decoder.c' || echo '$(srcdir)/'`common/alone_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-alone_decoder.Tpo $(DEPDIR)/liblzma_la-alone_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/alone_decoder.c' object='liblzma_la-alone_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-alone_decoder.lo `test -f 'common/alone_decoder.c' || echo '$(srcdir)/'`common/alone_decoder.c
+
+liblzma_la-auto_decoder.lo: common/auto_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-auto_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-auto_decoder.Tpo -c -o liblzma_la-auto_decoder.lo `test -f 'common/auto_decoder.c' || echo '$(srcdir)/'`common/auto_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-auto_decoder.Tpo $(DEPDIR)/liblzma_la-auto_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/auto_decoder.c' object='liblzma_la-auto_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-auto_decoder.lo `test -f 'common/auto_decoder.c' || echo '$(srcdir)/'`common/auto_decoder.c
+
+liblzma_la-block_buffer_decoder.lo: common/block_buffer_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_buffer_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_buffer_decoder.Tpo -c -o liblzma_la-block_buffer_decoder.lo `test -f 'common/block_buffer_decoder.c' || echo '$(srcdir)/'`common/block_buffer_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_buffer_decoder.Tpo $(DEPDIR)/liblzma_la-block_buffer_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_buffer_decoder.c' object='liblzma_la-block_buffer_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_buffer_decoder.lo `test -f 'common/block_buffer_decoder.c' || echo '$(srcdir)/'`common/block_buffer_decoder.c
+
+liblzma_la-block_decoder.lo: common/block_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_decoder.Tpo -c -o liblzma_la-block_decoder.lo `test -f 'common/block_decoder.c' || echo '$(srcdir)/'`common/block_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_decoder.Tpo $(DEPDIR)/liblzma_la-block_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_decoder.c' object='liblzma_la-block_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_decoder.lo `test -f 'common/block_decoder.c' || echo '$(srcdir)/'`common/block_decoder.c
+
+liblzma_la-block_header_decoder.lo: common/block_header_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-block_header_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-block_header_decoder.Tpo -c -o liblzma_la-block_header_decoder.lo `test -f 'common/block_header_decoder.c' || echo '$(srcdir)/'`common/block_header_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-block_header_decoder.Tpo $(DEPDIR)/liblzma_la-block_header_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/block_header_decoder.c' object='liblzma_la-block_header_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-block_header_decoder.lo `test -f 'common/block_header_decoder.c' || echo '$(srcdir)/'`common/block_header_decoder.c
+
+liblzma_la-easy_decoder_memusage.lo: common/easy_decoder_memusage.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-easy_decoder_memusage.lo -MD -MP -MF $(DEPDIR)/liblzma_la-easy_decoder_memusage.Tpo -c -o liblzma_la-easy_decoder_memusage.lo `test -f 'common/easy_decoder_memusage.c' || echo '$(srcdir)/'`common/easy_decoder_memusage.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-easy_decoder_memusage.Tpo $(DEPDIR)/liblzma_la-easy_decoder_memusage.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/easy_decoder_memusage.c' object='liblzma_la-easy_decoder_memusage.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-easy_decoder_memusage.lo `test -f 'common/easy_decoder_memusage.c' || echo '$(srcdir)/'`common/easy_decoder_memusage.c
+
+liblzma_la-filter_buffer_decoder.lo: common/filter_buffer_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_buffer_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_buffer_decoder.Tpo -c -o liblzma_la-filter_buffer_decoder.lo `test -f 'common/filter_buffer_decoder.c' || echo '$(srcdir)/'`common/filter_buffer_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_buffer_decoder.Tpo $(DEPDIR)/liblzma_la-filter_buffer_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_buffer_decoder.c' object='liblzma_la-filter_buffer_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_buffer_decoder.lo `test -f 'common/filter_buffer_decoder.c' || echo '$(srcdir)/'`common/filter_buffer_decoder.c
+
+liblzma_la-filter_decoder.lo: common/filter_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_decoder.Tpo -c -o liblzma_la-filter_decoder.lo `test -f 'common/filter_decoder.c' || echo '$(srcdir)/'`common/filter_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_decoder.Tpo $(DEPDIR)/liblzma_la-filter_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_decoder.c' object='liblzma_la-filter_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_decoder.lo `test -f 'common/filter_decoder.c' || echo '$(srcdir)/'`common/filter_decoder.c
+
+liblzma_la-filter_flags_decoder.lo: common/filter_flags_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-filter_flags_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-filter_flags_decoder.Tpo -c -o liblzma_la-filter_flags_decoder.lo `test -f 'common/filter_flags_decoder.c' || echo '$(srcdir)/'`common/filter_flags_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-filter_flags_decoder.Tpo $(DEPDIR)/liblzma_la-filter_flags_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/filter_flags_decoder.c' object='liblzma_la-filter_flags_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-filter_flags_decoder.lo `test -f 'common/filter_flags_decoder.c' || echo '$(srcdir)/'`common/filter_flags_decoder.c
+
+liblzma_la-index_decoder.lo: common/index_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-index_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-index_decoder.Tpo -c -o liblzma_la-index_decoder.lo `test -f 'common/index_decoder.c' || echo '$(srcdir)/'`common/index_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-index_decoder.Tpo $(DEPDIR)/liblzma_la-index_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/index_decoder.c' object='liblzma_la-index_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-index_decoder.lo `test -f 'common/index_decoder.c' || echo '$(srcdir)/'`common/index_decoder.c
+
+liblzma_la-index_hash.lo: common/index_hash.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-index_hash.lo -MD -MP -MF $(DEPDIR)/liblzma_la-index_hash.Tpo -c -o liblzma_la-index_hash.lo `test -f 'common/index_hash.c' || echo '$(srcdir)/'`common/index_hash.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-index_hash.Tpo $(DEPDIR)/liblzma_la-index_hash.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/index_hash.c' object='liblzma_la-index_hash.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-index_hash.lo `test -f 'common/index_hash.c' || echo '$(srcdir)/'`common/index_hash.c
+
+liblzma_la-stream_buffer_decoder.lo: common/stream_buffer_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_buffer_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_buffer_decoder.Tpo -c -o liblzma_la-stream_buffer_decoder.lo `test -f 'common/stream_buffer_decoder.c' || echo '$(srcdir)/'`common/stream_buffer_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_buffer_decoder.Tpo $(DEPDIR)/liblzma_la-stream_buffer_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_buffer_decoder.c' object='liblzma_la-stream_buffer_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_buffer_decoder.lo `test -f 'common/stream_buffer_decoder.c' || echo '$(srcdir)/'`common/stream_buffer_decoder.c
+
+liblzma_la-stream_decoder.lo: common/stream_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_decoder.Tpo -c -o liblzma_la-stream_decoder.lo `test -f 'common/stream_decoder.c' || echo '$(srcdir)/'`common/stream_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_decoder.Tpo $(DEPDIR)/liblzma_la-stream_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_decoder.c' object='liblzma_la-stream_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_decoder.lo `test -f 'common/stream_decoder.c' || echo '$(srcdir)/'`common/stream_decoder.c
+
+liblzma_la-stream_flags_decoder.lo: common/stream_flags_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-stream_flags_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-stream_flags_decoder.Tpo -c -o liblzma_la-stream_flags_decoder.lo `test -f 'common/stream_flags_decoder.c' || echo '$(srcdir)/'`common/stream_flags_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-stream_flags_decoder.Tpo $(DEPDIR)/liblzma_la-stream_flags_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/stream_flags_decoder.c' object='liblzma_la-stream_flags_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-stream_flags_decoder.lo `test -f 'common/stream_flags_decoder.c' || echo '$(srcdir)/'`common/stream_flags_decoder.c
+
+liblzma_la-vli_decoder.lo: common/vli_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-vli_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-vli_decoder.Tpo -c -o liblzma_la-vli_decoder.lo `test -f 'common/vli_decoder.c' || echo '$(srcdir)/'`common/vli_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-vli_decoder.Tpo $(DEPDIR)/liblzma_la-vli_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='common/vli_decoder.c' object='liblzma_la-vli_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-vli_decoder.lo `test -f 'common/vli_decoder.c' || echo '$(srcdir)/'`common/vli_decoder.c
+
+liblzma_la-check.lo: check/check.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-check.lo -MD -MP -MF $(DEPDIR)/liblzma_la-check.Tpo -c -o liblzma_la-check.lo `test -f 'check/check.c' || echo '$(srcdir)/'`check/check.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-check.Tpo $(DEPDIR)/liblzma_la-check.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/check.c' object='liblzma_la-check.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-check.lo `test -f 'check/check.c' || echo '$(srcdir)/'`check/check.c
+
+liblzma_la-crc32_small.lo: check/crc32_small.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-crc32_small.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc32_small.Tpo -c -o liblzma_la-crc32_small.lo `test -f 'check/crc32_small.c' || echo '$(srcdir)/'`check/crc32_small.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc32_small.Tpo $(DEPDIR)/liblzma_la-crc32_small.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/crc32_small.c' object='liblzma_la-crc32_small.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-crc32_small.lo `test -f 'check/crc32_small.c' || echo '$(srcdir)/'`check/crc32_small.c
+
+liblzma_la-crc32_table.lo: check/crc32_table.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-crc32_table.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc32_table.Tpo -c -o liblzma_la-crc32_table.lo `test -f 'check/crc32_table.c' || echo '$(srcdir)/'`check/crc32_table.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc32_table.Tpo $(DEPDIR)/liblzma_la-crc32_table.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/crc32_table.c' object='liblzma_la-crc32_table.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-crc32_table.lo `test -f 'check/crc32_table.c' || echo '$(srcdir)/'`check/crc32_table.c
+
+liblzma_la-crc32_fast.lo: check/crc32_fast.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-crc32_fast.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc32_fast.Tpo -c -o liblzma_la-crc32_fast.lo `test -f 'check/crc32_fast.c' || echo '$(srcdir)/'`check/crc32_fast.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc32_fast.Tpo $(DEPDIR)/liblzma_la-crc32_fast.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/crc32_fast.c' object='liblzma_la-crc32_fast.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-crc32_fast.lo `test -f 'check/crc32_fast.c' || echo '$(srcdir)/'`check/crc32_fast.c
+
+liblzma_la-crc64_small.lo: check/crc64_small.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-crc64_small.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc64_small.Tpo -c -o liblzma_la-crc64_small.lo `test -f 'check/crc64_small.c' || echo '$(srcdir)/'`check/crc64_small.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc64_small.Tpo $(DEPDIR)/liblzma_la-crc64_small.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/crc64_small.c' object='liblzma_la-crc64_small.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-crc64_small.lo `test -f 'check/crc64_small.c' || echo '$(srcdir)/'`check/crc64_small.c
+
+liblzma_la-crc64_table.lo: check/crc64_table.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-crc64_table.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc64_table.Tpo -c -o liblzma_la-crc64_table.lo `test -f 'check/crc64_table.c' || echo '$(srcdir)/'`check/crc64_table.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc64_table.Tpo $(DEPDIR)/liblzma_la-crc64_table.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/crc64_table.c' object='liblzma_la-crc64_table.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-crc64_table.lo `test -f 'check/crc64_table.c' || echo '$(srcdir)/'`check/crc64_table.c
+
+liblzma_la-crc64_fast.lo: check/crc64_fast.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-crc64_fast.lo -MD -MP -MF $(DEPDIR)/liblzma_la-crc64_fast.Tpo -c -o liblzma_la-crc64_fast.lo `test -f 'check/crc64_fast.c' || echo '$(srcdir)/'`check/crc64_fast.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-crc64_fast.Tpo $(DEPDIR)/liblzma_la-crc64_fast.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/crc64_fast.c' object='liblzma_la-crc64_fast.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-crc64_fast.lo `test -f 'check/crc64_fast.c' || echo '$(srcdir)/'`check/crc64_fast.c
+
+liblzma_la-sha256.lo: check/sha256.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-sha256.lo -MD -MP -MF $(DEPDIR)/liblzma_la-sha256.Tpo -c -o liblzma_la-sha256.lo `test -f 'check/sha256.c' || echo '$(srcdir)/'`check/sha256.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-sha256.Tpo $(DEPDIR)/liblzma_la-sha256.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='check/sha256.c' object='liblzma_la-sha256.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-sha256.lo `test -f 'check/sha256.c' || echo '$(srcdir)/'`check/sha256.c
+
+liblzma_la-lz_encoder.lo: lz/lz_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lz_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lz_encoder.Tpo -c -o liblzma_la-lz_encoder.lo `test -f 'lz/lz_encoder.c' || echo '$(srcdir)/'`lz/lz_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lz_encoder.Tpo $(DEPDIR)/liblzma_la-lz_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lz/lz_encoder.c' object='liblzma_la-lz_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lz_encoder.lo `test -f 'lz/lz_encoder.c' || echo '$(srcdir)/'`lz/lz_encoder.c
+
+liblzma_la-lz_encoder_mf.lo: lz/lz_encoder_mf.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lz_encoder_mf.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lz_encoder_mf.Tpo -c -o liblzma_la-lz_encoder_mf.lo `test -f 'lz/lz_encoder_mf.c' || echo '$(srcdir)/'`lz/lz_encoder_mf.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lz_encoder_mf.Tpo $(DEPDIR)/liblzma_la-lz_encoder_mf.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lz/lz_encoder_mf.c' object='liblzma_la-lz_encoder_mf.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lz_encoder_mf.lo `test -f 'lz/lz_encoder_mf.c' || echo '$(srcdir)/'`lz/lz_encoder_mf.c
+
+liblzma_la-lz_decoder.lo: lz/lz_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lz_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lz_decoder.Tpo -c -o liblzma_la-lz_decoder.lo `test -f 'lz/lz_decoder.c' || echo '$(srcdir)/'`lz/lz_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lz_decoder.Tpo $(DEPDIR)/liblzma_la-lz_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lz/lz_decoder.c' object='liblzma_la-lz_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lz_decoder.lo `test -f 'lz/lz_decoder.c' || echo '$(srcdir)/'`lz/lz_decoder.c
+
+liblzma_la-lzma_encoder.lo: lzma/lzma_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma_encoder.Tpo -c -o liblzma_la-lzma_encoder.lo `test -f 'lzma/lzma_encoder.c' || echo '$(srcdir)/'`lzma/lzma_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma_encoder.Tpo $(DEPDIR)/liblzma_la-lzma_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma_encoder.c' object='liblzma_la-lzma_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma_encoder.lo `test -f 'lzma/lzma_encoder.c' || echo '$(srcdir)/'`lzma/lzma_encoder.c
+
+liblzma_la-lzma_encoder_presets.lo: lzma/lzma_encoder_presets.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma_encoder_presets.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma_encoder_presets.Tpo -c -o liblzma_la-lzma_encoder_presets.lo `test -f 'lzma/lzma_encoder_presets.c' || echo '$(srcdir)/'`lzma/lzma_encoder_presets.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma_encoder_presets.Tpo $(DEPDIR)/liblzma_la-lzma_encoder_presets.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma_encoder_presets.c' object='liblzma_la-lzma_encoder_presets.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma_encoder_presets.lo `test -f 'lzma/lzma_encoder_presets.c' || echo '$(srcdir)/'`lzma/lzma_encoder_presets.c
+
+liblzma_la-lzma_encoder_optimum_fast.lo: lzma/lzma_encoder_optimum_fast.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma_encoder_optimum_fast.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma_encoder_optimum_fast.Tpo -c -o liblzma_la-lzma_encoder_optimum_fast.lo `test -f 'lzma/lzma_encoder_optimum_fast.c' || echo '$(srcdir)/'`lzma/lzma_encoder_optimum_fast.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma_encoder_optimum_fast.Tpo $(DEPDIR)/liblzma_la-lzma_encoder_optimum_fast.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma_encoder_optimum_fast.c' object='liblzma_la-lzma_encoder_optimum_fast.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma_encoder_optimum_fast.lo `test -f 'lzma/lzma_encoder_optimum_fast.c' || echo '$(srcdir)/'`lzma/lzma_encoder_optimum_fast.c
+
+liblzma_la-lzma_encoder_optimum_normal.lo: lzma/lzma_encoder_optimum_normal.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma_encoder_optimum_normal.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma_encoder_optimum_normal.Tpo -c -o liblzma_la-lzma_encoder_optimum_normal.lo `test -f 'lzma/lzma_encoder_optimum_normal.c' || echo '$(srcdir)/'`lzma/lzma_encoder_optimum_normal.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma_encoder_optimum_normal.Tpo $(DEPDIR)/liblzma_la-lzma_encoder_optimum_normal.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma_encoder_optimum_normal.c' object='liblzma_la-lzma_encoder_optimum_normal.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma_encoder_optimum_normal.lo `test -f 'lzma/lzma_encoder_optimum_normal.c' || echo '$(srcdir)/'`lzma/lzma_encoder_optimum_normal.c
+
+liblzma_la-fastpos_table.lo: lzma/fastpos_table.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-fastpos_table.lo -MD -MP -MF $(DEPDIR)/liblzma_la-fastpos_table.Tpo -c -o liblzma_la-fastpos_table.lo `test -f 'lzma/fastpos_table.c' || echo '$(srcdir)/'`lzma/fastpos_table.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-fastpos_table.Tpo $(DEPDIR)/liblzma_la-fastpos_table.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/fastpos_table.c' object='liblzma_la-fastpos_table.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-fastpos_table.lo `test -f 'lzma/fastpos_table.c' || echo '$(srcdir)/'`lzma/fastpos_table.c
+
+liblzma_la-lzma_decoder.lo: lzma/lzma_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma_decoder.Tpo -c -o liblzma_la-lzma_decoder.lo `test -f 'lzma/lzma_decoder.c' || echo '$(srcdir)/'`lzma/lzma_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma_decoder.Tpo $(DEPDIR)/liblzma_la-lzma_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma_decoder.c' object='liblzma_la-lzma_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma_decoder.lo `test -f 'lzma/lzma_decoder.c' || echo '$(srcdir)/'`lzma/lzma_decoder.c
+
+liblzma_la-lzma2_encoder.lo: lzma/lzma2_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma2_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma2_encoder.Tpo -c -o liblzma_la-lzma2_encoder.lo `test -f 'lzma/lzma2_encoder.c' || echo '$(srcdir)/'`lzma/lzma2_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma2_encoder.Tpo $(DEPDIR)/liblzma_la-lzma2_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma2_encoder.c' object='liblzma_la-lzma2_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma2_encoder.lo `test -f 'lzma/lzma2_encoder.c' || echo '$(srcdir)/'`lzma/lzma2_encoder.c
+
+liblzma_la-lzma2_decoder.lo: lzma/lzma2_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-lzma2_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-lzma2_decoder.Tpo -c -o liblzma_la-lzma2_decoder.lo `test -f 'lzma/lzma2_decoder.c' || echo '$(srcdir)/'`lzma/lzma2_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-lzma2_decoder.Tpo $(DEPDIR)/liblzma_la-lzma2_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzma/lzma2_decoder.c' object='liblzma_la-lzma2_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-lzma2_decoder.lo `test -f 'lzma/lzma2_decoder.c' || echo '$(srcdir)/'`lzma/lzma2_decoder.c
+
+liblzma_la-price_table.lo: rangecoder/price_table.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-price_table.lo -MD -MP -MF $(DEPDIR)/liblzma_la-price_table.Tpo -c -o liblzma_la-price_table.lo `test -f 'rangecoder/price_table.c' || echo '$(srcdir)/'`rangecoder/price_table.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-price_table.Tpo $(DEPDIR)/liblzma_la-price_table.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='rangecoder/price_table.c' object='liblzma_la-price_table.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-price_table.lo `test -f 'rangecoder/price_table.c' || echo '$(srcdir)/'`rangecoder/price_table.c
+
+liblzma_la-subblock_encoder.lo: subblock/subblock_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-subblock_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-subblock_encoder.Tpo -c -o liblzma_la-subblock_encoder.lo `test -f 'subblock/subblock_encoder.c' || echo '$(srcdir)/'`subblock/subblock_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-subblock_encoder.Tpo $(DEPDIR)/liblzma_la-subblock_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='subblock/subblock_encoder.c' object='liblzma_la-subblock_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-subblock_encoder.lo `test -f 'subblock/subblock_encoder.c' || echo '$(srcdir)/'`subblock/subblock_encoder.c
+
+liblzma_la-subblock_decoder.lo: subblock/subblock_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-subblock_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-subblock_decoder.Tpo -c -o liblzma_la-subblock_decoder.lo `test -f 'subblock/subblock_decoder.c' || echo '$(srcdir)/'`subblock/subblock_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-subblock_decoder.Tpo $(DEPDIR)/liblzma_la-subblock_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='subblock/subblock_decoder.c' object='liblzma_la-subblock_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-subblock_decoder.lo `test -f 'subblock/subblock_decoder.c' || echo '$(srcdir)/'`subblock/subblock_decoder.c
+
+liblzma_la-subblock_decoder_helper.lo: subblock/subblock_decoder_helper.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-subblock_decoder_helper.lo -MD -MP -MF $(DEPDIR)/liblzma_la-subblock_decoder_helper.Tpo -c -o liblzma_la-subblock_decoder_helper.lo `test -f 'subblock/subblock_decoder_helper.c' || echo '$(srcdir)/'`subblock/subblock_decoder_helper.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-subblock_decoder_helper.Tpo $(DEPDIR)/liblzma_la-subblock_decoder_helper.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='subblock/subblock_decoder_helper.c' object='liblzma_la-subblock_decoder_helper.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-subblock_decoder_helper.lo `test -f 'subblock/subblock_decoder_helper.c' || echo '$(srcdir)/'`subblock/subblock_decoder_helper.c
+
+liblzma_la-delta_common.lo: delta/delta_common.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-delta_common.lo -MD -MP -MF $(DEPDIR)/liblzma_la-delta_common.Tpo -c -o liblzma_la-delta_common.lo `test -f 'delta/delta_common.c' || echo '$(srcdir)/'`delta/delta_common.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-delta_common.Tpo $(DEPDIR)/liblzma_la-delta_common.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='delta/delta_common.c' object='liblzma_la-delta_common.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-delta_common.lo `test -f 'delta/delta_common.c' || echo '$(srcdir)/'`delta/delta_common.c
+
+liblzma_la-delta_encoder.lo: delta/delta_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-delta_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-delta_encoder.Tpo -c -o liblzma_la-delta_encoder.lo `test -f 'delta/delta_encoder.c' || echo '$(srcdir)/'`delta/delta_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-delta_encoder.Tpo $(DEPDIR)/liblzma_la-delta_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='delta/delta_encoder.c' object='liblzma_la-delta_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-delta_encoder.lo `test -f 'delta/delta_encoder.c' || echo '$(srcdir)/'`delta/delta_encoder.c
+
+liblzma_la-delta_decoder.lo: delta/delta_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-delta_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-delta_decoder.Tpo -c -o liblzma_la-delta_decoder.lo `test -f 'delta/delta_decoder.c' || echo '$(srcdir)/'`delta/delta_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-delta_decoder.Tpo $(DEPDIR)/liblzma_la-delta_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='delta/delta_decoder.c' object='liblzma_la-delta_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-delta_decoder.lo `test -f 'delta/delta_decoder.c' || echo '$(srcdir)/'`delta/delta_decoder.c
+
+liblzma_la-simple_coder.lo: simple/simple_coder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-simple_coder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-simple_coder.Tpo -c -o liblzma_la-simple_coder.lo `test -f 'simple/simple_coder.c' || echo '$(srcdir)/'`simple/simple_coder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-simple_coder.Tpo $(DEPDIR)/liblzma_la-simple_coder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/simple_coder.c' object='liblzma_la-simple_coder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-simple_coder.lo `test -f 'simple/simple_coder.c' || echo '$(srcdir)/'`simple/simple_coder.c
+
+liblzma_la-simple_encoder.lo: simple/simple_encoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-simple_encoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-simple_encoder.Tpo -c -o liblzma_la-simple_encoder.lo `test -f 'simple/simple_encoder.c' || echo '$(srcdir)/'`simple/simple_encoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-simple_encoder.Tpo $(DEPDIR)/liblzma_la-simple_encoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/simple_encoder.c' object='liblzma_la-simple_encoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-simple_encoder.lo `test -f 'simple/simple_encoder.c' || echo '$(srcdir)/'`simple/simple_encoder.c
+
+liblzma_la-simple_decoder.lo: simple/simple_decoder.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-simple_decoder.lo -MD -MP -MF $(DEPDIR)/liblzma_la-simple_decoder.Tpo -c -o liblzma_la-simple_decoder.lo `test -f 'simple/simple_decoder.c' || echo '$(srcdir)/'`simple/simple_decoder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-simple_decoder.Tpo $(DEPDIR)/liblzma_la-simple_decoder.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/simple_decoder.c' object='liblzma_la-simple_decoder.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-simple_decoder.lo `test -f 'simple/simple_decoder.c' || echo '$(srcdir)/'`simple/simple_decoder.c
+
+liblzma_la-x86.lo: simple/x86.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-x86.lo -MD -MP -MF $(DEPDIR)/liblzma_la-x86.Tpo -c -o liblzma_la-x86.lo `test -f 'simple/x86.c' || echo '$(srcdir)/'`simple/x86.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-x86.Tpo $(DEPDIR)/liblzma_la-x86.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/x86.c' object='liblzma_la-x86.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-x86.lo `test -f 'simple/x86.c' || echo '$(srcdir)/'`simple/x86.c
+
+liblzma_la-powerpc.lo: simple/powerpc.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-powerpc.lo -MD -MP -MF $(DEPDIR)/liblzma_la-powerpc.Tpo -c -o liblzma_la-powerpc.lo `test -f 'simple/powerpc.c' || echo '$(srcdir)/'`simple/powerpc.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-powerpc.Tpo $(DEPDIR)/liblzma_la-powerpc.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/powerpc.c' object='liblzma_la-powerpc.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-powerpc.lo `test -f 'simple/powerpc.c' || echo '$(srcdir)/'`simple/powerpc.c
+
+liblzma_la-ia64.lo: simple/ia64.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-ia64.lo -MD -MP -MF $(DEPDIR)/liblzma_la-ia64.Tpo -c -o liblzma_la-ia64.lo `test -f 'simple/ia64.c' || echo '$(srcdir)/'`simple/ia64.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-ia64.Tpo $(DEPDIR)/liblzma_la-ia64.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/ia64.c' object='liblzma_la-ia64.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-ia64.lo `test -f 'simple/ia64.c' || echo '$(srcdir)/'`simple/ia64.c
+
+liblzma_la-arm.lo: simple/arm.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-arm.lo -MD -MP -MF $(DEPDIR)/liblzma_la-arm.Tpo -c -o liblzma_la-arm.lo `test -f 'simple/arm.c' || echo '$(srcdir)/'`simple/arm.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-arm.Tpo $(DEPDIR)/liblzma_la-arm.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/arm.c' object='liblzma_la-arm.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-arm.lo `test -f 'simple/arm.c' || echo '$(srcdir)/'`simple/arm.c
+
+liblzma_la-armthumb.lo: simple/armthumb.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-armthumb.lo -MD -MP -MF $(DEPDIR)/liblzma_la-armthumb.Tpo -c -o liblzma_la-armthumb.lo `test -f 'simple/armthumb.c' || echo '$(srcdir)/'`simple/armthumb.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-armthumb.Tpo $(DEPDIR)/liblzma_la-armthumb.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/armthumb.c' object='liblzma_la-armthumb.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-armthumb.lo `test -f 'simple/armthumb.c' || echo '$(srcdir)/'`simple/armthumb.c
+
+liblzma_la-sparc.lo: simple/sparc.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT liblzma_la-sparc.lo -MD -MP -MF $(DEPDIR)/liblzma_la-sparc.Tpo -c -o liblzma_la-sparc.lo `test -f 'simple/sparc.c' || echo '$(srcdir)/'`simple/sparc.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/liblzma_la-sparc.Tpo $(DEPDIR)/liblzma_la-sparc.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='simple/sparc.c' object='liblzma_la-sparc.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o liblzma_la-sparc.lo `test -f 'simple/sparc.c' || echo '$(srcdir)/'`simple/sparc.c
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-docDATA: $(doc_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(docdir)" || $(MKDIR_P) "$(DESTDIR)$(docdir)"
+ @list='$(doc_DATA)'; test -n "$(docdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(docdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(docdir)" || exit $$?; \
+ done
+
+uninstall-docDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(doc_DATA)'; test -n "$(docdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(docdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(docdir)" && rm -f $$files
+install-pkgconfigDATA: $(pkgconfig_DATA)
+ @$(NORMAL_INSTALL)
+ test -z "$(pkgconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)"
+ @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \
+ done
+
+uninstall-pkgconfigDATA:
+ @$(NORMAL_UNINSTALL)
+ @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(pkgconfigdir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(pkgconfigdir)" && rm -f $$files
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ rev=''; for subdir in $$list; do \
+ if test "$$subdir" = "."; then :; else \
+ rev="$$subdir $$rev"; \
+ fi; \
+ done; \
+ rev="$$rev ."; \
+ target=`echo $@ | sed s/-recursive//`; \
+ for subdir in $$rev; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done && test -z "$$fail"
+tags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+ done
+ctags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+ done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
+ fi; \
+ done
+ @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+ $(am__relativize); \
+ new_distdir=$$reldir; \
+ dir1=$$subdir; dir2="$(top_distdir)"; \
+ $(am__relativize); \
+ new_top_distdir=$$reldir; \
+ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+ ($(am__cd) $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$new_top_distdir" \
+ distdir="$$new_distdir" \
+ am__remove_distdir=: \
+ am__skip_length_check=: \
+ am__skip_mode_fix=: \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(LTLIBRARIES) $(DATA)
+installdirs: installdirs-recursive
+installdirs-am:
+ for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(docdir)" "$(DESTDIR)$(pkgconfigdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \
+ mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am: install-docDATA install-pkgconfigDATA
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am: install-libLTLIBRARIES
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-docDATA uninstall-libLTLIBRARIES \
+ uninstall-pkgconfigDATA
+
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \
+ install-am install-strip tags-recursive
+
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+ all all-am check check-am clean clean-generic \
+ clean-libLTLIBRARIES clean-libtool ctags ctags-recursive \
+ distclean distclean-compile distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-docDATA install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-libLTLIBRARIES \
+ install-man install-pdf install-pdf-am install-pkgconfigDATA \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags tags-recursive uninstall uninstall-am uninstall-docDATA \
+ uninstall-libLTLIBRARIES uninstall-pkgconfigDATA
+
+
+# Windows resource compiler support. libtool knows what to do with .rc
+# files, but Automake (<= 1.11 at least) doesn't know.
+#
+# We want the resource file only in shared liblzma. To avoid linking it into
+# static liblzma, we overwrite the static object file with an object file
+# compiled from empty input. Note that GNU-specific features are OK here,
+# because on Windows we are compiled with the GNU toolchain.
+.rc.lo:
+ $(LIBTOOL) --mode=compile $(RC) $(DEFS) $(DEFAULT_INCLUDES) \
+ $(INCLUDES) $(liblzma_la_CPPFLAGS) $(CPPFLAGS) $(RCFLAGS) \
+ -i $< -o $@
+ echo > empty.c
+ $(COMPILE) -c empty.c -o $(*D)/$(*F).o
+
+# Remove ordinals from the generated .def file. People must link by name,
+# not by ordinal, because no one is going to track the ordinal numbers.
+liblzma.def: liblzma.la liblzma.def.in
+ sed 's/ \+@ *[0-9]\+//' liblzma.def.in > liblzma.def
+
+# Creating liblzma.def.in is a side effect of linking the library.
+liblzma.def.in: liblzma.la
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.am
new file mode 100644
index 00000000..0992d221
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.am
@@ -0,0 +1,23 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+nobase_include_HEADERS = \
+ lzma.h \
+ lzma/base.h \
+ lzma/bcj.h \
+ lzma/block.h \
+ lzma/check.h \
+ lzma/container.h \
+ lzma/delta.h \
+ lzma/filter.h \
+ lzma/index.h \
+ lzma/index_hash.h \
+ lzma/lzma.h \
+ lzma/stream_flags.h \
+ lzma/subblock.h \
+ lzma/version.h \
+ lzma/vli.h
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.in
new file mode 100644
index 00000000..f4f9a3e7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/Makefile.in
@@ -0,0 +1,512 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = src/liblzma/api
+DIST_COMMON = $(nobase_include_HEADERS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(includedir)"
+HEADERS = $(nobase_include_HEADERS)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+nobase_include_HEADERS = \
+ lzma.h \
+ lzma/base.h \
+ lzma/bcj.h \
+ lzma/block.h \
+ lzma/check.h \
+ lzma/container.h \
+ lzma/delta.h \
+ lzma/filter.h \
+ lzma/index.h \
+ lzma/index_hash.h \
+ lzma/lzma.h \
+ lzma/stream_flags.h \
+ lzma/subblock.h \
+ lzma/version.h \
+ lzma/vli.h
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/liblzma/api/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/liblzma/api/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-nobase_includeHEADERS: $(nobase_include_HEADERS)
+ @$(NORMAL_INSTALL)
+ test -z "$(includedir)" || $(MKDIR_P) "$(DESTDIR)$(includedir)"
+ @list='$(nobase_include_HEADERS)'; test -n "$(includedir)" || list=; \
+ $(am__nobase_list) | while read dir files; do \
+ xfiles=; for file in $$files; do \
+ if test -f "$$file"; then xfiles="$$xfiles $$file"; \
+ else xfiles="$$xfiles $(srcdir)/$$file"; fi; done; \
+ test -z "$$xfiles" || { \
+ test "x$$dir" = x. || { \
+ echo "$(MKDIR_P) '$(DESTDIR)$(includedir)/$$dir'"; \
+ $(MKDIR_P) "$(DESTDIR)$(includedir)/$$dir"; }; \
+ echo " $(INSTALL_HEADER) $$xfiles '$(DESTDIR)$(includedir)/$$dir'"; \
+ $(INSTALL_HEADER) $$xfiles "$(DESTDIR)$(includedir)/$$dir" || exit $$?; }; \
+ done
+
+uninstall-nobase_includeHEADERS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(nobase_include_HEADERS)'; test -n "$(includedir)" || list=; \
+ $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \
+ test -n "$$files" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(includedir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(includedir)" && rm -f $$files
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(HEADERS)
+installdirs:
+ for dir in "$(DESTDIR)$(includedir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-nobase_includeHEADERS
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-nobase_includeHEADERS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool ctags distclean distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-nobase_includeHEADERS \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am uninstall-nobase_includeHEADERS
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma.h
new file mode 100644
index 00000000..c62342da
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma.h
@@ -0,0 +1,323 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file api/lzma.h
+ * \brief The public API of liblzma data compression library
+ *
+ * liblzma is a public domain general-purpose data compression library with
+ * a zlib-like API. The native file format is .xz, but also the old .lzma
+ * format and raw (no headers) streams are supported. Multiple compression
+ * algorithms (filters) are supported. Currently LZMA2 is the primary filter.
+ *
+ * liblzma is part of XZ Utils <http://tukaani.org/xz/>. XZ Utils includes
+ * a gzip-like command line tool named xz and some other tools. XZ Utils
+ * is developed and maintained by Lasse Collin.
+ *
+ * Major parts of liblzma are based on Igor Pavlov's public domain LZMA SDK
+ * <http://7-zip.org/sdk.html>.
+ *
+ * The SHA-256 implementation is based on the public domain code found from
+ * 7-Zip <http://7-zip.org/>, which has a modified version of the public
+ * domain SHA-256 code found from Crypto++ <http://www.cryptopp.com/>.
+ * The SHA-256 code in Crypto++ was written by Kevin Springle and Wei Dai.
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef LZMA_H
+#define LZMA_H
+
+/*****************************
+ * Required standard headers *
+ *****************************/
+
+/*
+ * liblzma API headers need some standard types and macros. To allow
+ * including lzma.h without requiring the application to include other
+ * headers first, lzma.h includes the required standard headers unless
+ * they already seem to be included already or if LZMA_MANUAL_HEADERS
+ * has been defined.
+ *
+ * Here's what types and macros are needed and from which headers:
+ * - stddef.h: size_t, NULL
+ * - stdint.h: uint8_t, uint32_t, uint64_t, UINT32_C(n), uint64_C(n),
+ * UINT32_MAX, UINT64_MAX
+ *
+ * However, inttypes.h is a little more portable than stdint.h, although
+ * inttypes.h declares some unneeded things compared to plain stdint.h.
+ *
+ * The hacks below aren't perfect, specifically they assume that inttypes.h
+ * exists and that it typedefs at least uint8_t, uint32_t, and uint64_t,
+ * and that, in case of incomplete inttypes.h, unsigned int is 32-bit.
+ * If the application already takes care of setting up all the types and
+ * macros properly (for example by using gnulib's stdint.h or inttypes.h),
+ * we try to detect that the macros are already defined and don't include
+ * inttypes.h here again. However, you may define LZMA_MANUAL_HEADERS to
+ * force this file to never include any system headers.
+ *
+ * Some could argue that liblzma API should provide all the required types,
+ * for example lzma_uint64, LZMA_UINT64_C(n), and LZMA_UINT64_MAX. This was
+ * seen unnecessary mess, since most systems already provide all the necessary
+ * types and macros in the standard headers.
+ *
+ * Note that liblzma API still has lzma_bool, because using stdbool.h would
+ * break C89 and C++ programs on many systems. sizeof(bool) in C99 isn't
+ * necessarily the same as sizeof(bool) in C++.
+ */
+
+#ifndef LZMA_MANUAL_HEADERS
+ /*
+ * I suppose this works portably also in C++. Note that in C++,
+ * we need to get size_t into the global namespace.
+ */
+# include <stddef.h>
+
+ /*
+ * Skip inttypes.h if we already have all the required macros. If we
+ * have the macros, we assume that we have the matching typedefs too.
+ */
+# if !defined(UINT32_C) || !defined(UINT64_C) \
+ || !defined(UINT32_MAX) || !defined(UINT64_MAX)
+ /*
+ * MSVC has no C99 support, and thus it cannot be used to
+ * compile liblzma. The liblzma API has to still be usable
+ * from MSVC, so we need to define the required standard
+ * integer types here.
+ */
+# if defined(_WIN32) && defined(_MSC_VER)
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ typedef unsigned __int64 uint64_t;
+# else
+ /* Use the standard inttypes.h. */
+# ifdef __cplusplus
+ /*
+ * C99 sections 7.18.2 and 7.18.4 specify that
+ * in C++ implementations define the limit
+ * and constant macros only if specifically
+ * requested. Note that if you want the
+ * format macros (PRIu64 etc.) too, you need
+ * to define __STDC_FORMAT_MACROS before
+ * including lzma.h, since re-including
+ * inttypes.h with __STDC_FORMAT_MACROS
+ * defined doesn't necessarily work.
+ */
+# ifndef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS 1
+# endif
+# ifndef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS 1
+# endif
+# endif
+
+# include <inttypes.h>
+# endif
+
+ /*
+ * Some old systems have only the typedefs in inttypes.h, and
+ * lack all the macros. For those systems, we need a few more
+ * hacks. We assume that unsigned int is 32-bit and unsigned
+ * long is either 32-bit or 64-bit. If these hacks aren't
+ * enough, the application has to setup the types manually
+ * before including lzma.h.
+ */
+# ifndef UINT32_C
+# if defined(_WIN32) && defined(_MSC_VER)
+# define UINT32_C(n) n ## UI32
+# else
+# define UINT32_C(n) n ## U
+# endif
+# endif
+
+# ifndef UINT64_C
+# if defined(_WIN32) && defined(_MSC_VER)
+# define UINT64_C(n) n ## UI64
+# else
+ /* Get ULONG_MAX. */
+# include <limits.h>
+# if ULONG_MAX == 4294967295UL
+# define UINT64_C(n) n ## ULL
+# else
+# define UINT64_C(n) n ## UL
+# endif
+# endif
+# endif
+
+# ifndef UINT32_MAX
+# define UINT32_MAX (UINT32_C(4294967295))
+# endif
+
+# ifndef UINT64_MAX
+# define UINT64_MAX (UINT64_C(18446744073709551615))
+# endif
+# endif
+#endif /* ifdef LZMA_MANUAL_HEADERS */
+
+
+/******************
+ * LZMA_API macro *
+ ******************/
+
+/*
+ * Some systems require (or at least recommend) that the functions and
+ * function pointers are declared specially in the headers. LZMA_API_IMPORT
+ * is for importing symbols and LZMA_API_CALL is to specify calling
+ * convention.
+ *
+ * By default it is assumed that the application will link dynamically
+ * against liblzma. #define LZMA_API_STATIC in your application if you
+ * want to link against static liblzma. If you don't care about portability
+ * to operating systems like Windows, or at least don't care about linking
+ * against static liblzma on them, don't worry about LZMA_API_STATIC. That
+ * is, most developers will never need to use LZMA_API_STATIC.
+ *
+ * Cygwin is a special case on Windows. We rely on GCC doing the right thing
+ * and thus don't use dllimport and don't specify the calling convention.
+ */
+#ifndef LZMA_API_IMPORT
+# if !defined(LZMA_API_STATIC) && defined(_WIN32) && !defined(__CYGWIN__)
+# define LZMA_API_IMPORT __declspec(dllimport)
+# else
+# define LZMA_API_IMPORT
+# endif
+#endif
+
+#ifndef LZMA_API_CALL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define LZMA_API_CALL __cdecl
+# else
+# define LZMA_API_CALL
+# endif
+#endif
+
+#ifndef LZMA_API
+# define LZMA_API(type) LZMA_API_IMPORT type LZMA_API_CALL
+#endif
+
+
+/***********
+ * nothrow *
+ ***********/
+
+/*
+ * None of the functions in liblzma may throw an exception. Even
+ * the functions that use callback functions won't throw exceptions,
+ * because liblzma would break if a callback function threw an exception.
+ */
+#ifndef lzma_nothrow
+# if defined(__cplusplus)
+# define lzma_nothrow throw()
+# elif __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define lzma_nothrow __attribute__((__nothrow__))
+# else
+# define lzma_nothrow
+# endif
+#endif
+
+
+/********************
+ * GNU C extensions *
+ ********************/
+
+/*
+ * GNU C extensions are used conditionally in the public API. It doesn't
+ * break anything if these are sometimes enabled and sometimes not, only
+ * affects warnings and optimizations.
+ */
+#if __GNUC__ >= 3
+# ifndef lzma_attribute
+# define lzma_attribute(attr) __attribute__(attr)
+# endif
+
+# ifndef lzma_restrict
+# define lzma_restrict __restrict__
+# endif
+
+ /* warn_unused_result was added in GCC 3.4. */
+# ifndef lzma_attr_warn_unused_result
+# if __GNUC__ == 3 && __GNUC_MINOR__ < 4
+# define lzma_attr_warn_unused_result
+# endif
+# endif
+
+#else
+# ifndef lzma_attribute
+# define lzma_attribute(attr)
+# endif
+
+# ifndef lzma_restrict
+# if __STDC_VERSION__ >= 199901L
+# define lzma_restrict restrict
+# else
+# define lzma_restrict
+# endif
+# endif
+#endif
+
+
+#ifndef lzma_attr_pure
+# define lzma_attr_pure lzma_attribute((__pure__))
+#endif
+
+#ifndef lzma_attr_const
+# define lzma_attr_const lzma_attribute((__const__))
+#endif
+
+#ifndef lzma_attr_warn_unused_result
+# define lzma_attr_warn_unused_result \
+ lzma_attribute((__warn_unused_result__))
+#endif
+
+
+/**************
+ * Subheaders *
+ **************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Subheaders check that this is defined. It is to prevent including
+ * them directly from applications.
+ */
+#define LZMA_H_INTERNAL 1
+
+/* Basic features */
+#include "lzma/version.h"
+#include "lzma/base.h"
+#include "lzma/vli.h"
+#include "lzma/check.h"
+
+/* Filters */
+#include "lzma/filter.h"
+#include "lzma/subblock.h"
+#include "lzma/bcj.h"
+#include "lzma/delta.h"
+#include "lzma/lzma.h"
+
+/* Container formats */
+#include "lzma/container.h"
+
+/* Advanced features */
+#include "lzma/stream_flags.h"
+#include "lzma/block.h"
+#include "lzma/index.h"
+#include "lzma/index_hash.h"
+
+/*
+ * All subheaders included. Undefine LZMA_H_INTERNAL to prevent applications
+ * re-including the subheaders.
+ */
+#undef LZMA_H_INTERNAL
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ifndef LZMA_H */
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/base.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/base.h
new file mode 100644
index 00000000..9cf0a8c8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/base.h
@@ -0,0 +1,598 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/base.h
+ * \brief Data types and functions used in many places in liblzma API
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Boolean
+ *
+ * This is here because C89 doesn't have stdbool.h. To set a value for
+ * variables having type lzma_bool, you can use
+ * - C99's `true' and `false' from stdbool.h;
+ * - C++'s internal `true' and `false'; or
+ * - integers one (true) and zero (false).
+ */
+typedef unsigned char lzma_bool;
+
+
+/**
+ * \brief Type of reserved enumeration variable in structures
+ *
+ * To avoid breaking library ABI when new features are added, several
+ * structures contain extra variables that may be used in future. Since
+ * sizeof(enum) can be different than sizeof(int), and sizeof(enum) may
+ * even vary depending on the range of enumeration constants, we specify
+ * a separate type to be used for reserved enumeration variables. All
+ * enumeration constants in liblzma API will be non-negative and less
+ * than 128, which should guarantee that the ABI won't break even when
+ * new constants are added to existing enumerations.
+ */
+typedef enum {
+ LZMA_RESERVED_ENUM = 0
+} lzma_reserved_enum;
+
+
+/**
+ * \brief Return values used by several functions in liblzma
+ *
+ * Check the descriptions of specific functions to find out which return
+ * values they can return. With some functions the return values may have
+ * more specific meanings than described here; those differences are
+ * described per-function basis.
+ */
+typedef enum {
+ LZMA_OK = 0,
+ /**<
+ * \brief Operation completed successfully
+ */
+
+ LZMA_STREAM_END = 1,
+ /**<
+ * \brief End of stream was reached
+ *
+ * In encoder, LZMA_SYNC_FLUSH, LZMA_FULL_FLUSH, or
+ * LZMA_FINISH was finished. In decoder, this indicates
+ * that all the data was successfully decoded.
+ *
+ * In all cases, when LZMA_STREAM_END is returned, the last
+ * output bytes should be picked from strm->next_out.
+ */
+
+ LZMA_NO_CHECK = 2,
+ /**<
+ * \brief Input stream has no integrity check
+ *
+ * This return value can be returned only if the
+ * LZMA_TELL_NO_CHECK flag was used when initializing
+ * the decoder. LZMA_NO_CHECK is just a warning, and
+ * the decoding can be continued normally.
+ *
+ * It is possible to call lzma_get_check() immediatelly after
+ * lzma_code has returned LZMA_NO_CHECK. The result will
+ * naturally be LZMA_CHECK_NONE, but the possibility to call
+ * lzma_get_check() may be convenient in some applications.
+ */
+
+ LZMA_UNSUPPORTED_CHECK = 3,
+ /**<
+ * \brief Cannot calculate the integrity check
+ *
+ * The usage of this return value is different in encoders
+ * and decoders.
+ *
+ * Encoders can return this value only from the initialization
+ * function. If initialization fails with this value, the
+ * encoding cannot be done, because there's no way to produce
+ * output with the correct integrity check.
+ *
+ * Decoders can return this value only from lzma_code() and
+ * only if the LZMA_TELL_UNSUPPORTED_CHECK flag was used when
+ * initializing the decoder. The decoding can still be
+ * continued normally even if the check type is unsupported,
+ * but naturally the check will not be validated, and possible
+ * errors may go undetected.
+ *
+ * With decoder, it is possible to call lzma_get_check()
+ * immediatelly after lzma_code() has returned
+ * LZMA_UNSUPPORTED_CHECK. This way it is possible to find
+ * out what the unsupported Check ID was.
+ */
+
+ LZMA_GET_CHECK = 4,
+ /**<
+ * \brief Integrity check type is now available
+ *
+ * This value can be returned only by the lzma_code() function
+ * and only if the decoder was initialized with the
+ * LZMA_TELL_ANY_CHECK flag. LZMA_GET_CHECK tells the
+ * application that it may now call lzma_get_check() to find
+ * out the Check ID. This can be used, for example, to
+ * implement a decoder that accepts only files that have
+ * strong enough integrity check.
+ */
+
+ LZMA_MEM_ERROR = 5,
+ /**<
+ * \brief Cannot allocate memory
+ *
+ * Memory allocation failed, or the size of the allocation
+ * would be greater than SIZE_MAX.
+ *
+ * Due to internal implementation reasons, the coding cannot
+ * be continued even if more memory were made available after
+ * LZMA_MEM_ERROR.
+ */
+
+ LZMA_MEMLIMIT_ERROR = 6,
+ /**
+ * \brief Memory usage limit was reached
+ *
+ * Decoder would need more memory than allowed by the
+ * specified memory usage limit. To continue decoding,
+ * the memory usage limit has to be increased with
+ * lzma_memlimit_set().
+ */
+
+ LZMA_FORMAT_ERROR = 7,
+ /**<
+ * \brief File format not recognized
+ *
+ * The decoder did not recognize the input as supported file
+ * format. This error can occur, for example, when trying to
+ * decode .lzma format file with lzma_stream_decoder,
+ * because lzma_stream_decoder accepts only the .xz format.
+ */
+
+ LZMA_OPTIONS_ERROR = 8,
+ /**<
+ * \brief Invalid or unsupported options
+ *
+ * Invalid or unsupported options, for example
+ * - unsupported filter(s) or filter options; or
+ * - reserved bits set in headers (decoder only).
+ *
+ * Rebuilding liblzma with more features enabled, or
+ * upgrading to a newer version of liblzma may help.
+ */
+
+ LZMA_DATA_ERROR = 9,
+ /**<
+ * \brief Data is corrupt
+ *
+ * The usage of this return value is different in encoders
+ * and decoders. In both encoder and decoder, the coding
+ * cannot continue after this error.
+ *
+ * Encoders return this if size limits of the target file
+ * format would be exceeded. These limits are huge, thus
+ * getting this error from an encoder is mostly theoretical.
+ * For example, the maximum compressed and uncompressed
+ * size of a .xz Stream is roughly 8 EiB (2^63 bytes).
+ *
+ * Decoders return this error if the input data is corrupt.
+ * This can mean, for example, invalid CRC32 in headers
+ * or invalid check of uncompressed data.
+ */
+
+ LZMA_BUF_ERROR = 10,
+ /**<
+ * \brief No progress is possible
+ *
+ * This error code is returned when the coder cannot consume
+ * any new input and produce any new output. The most common
+ * reason for this error is that the input stream being
+ * decoded is truncated or corrupt.
+ *
+ * This error is not fatal. Coding can be continued normally
+ * by providing more input and/or more output space, if
+ * possible.
+ *
+ * Typically the first call to lzma_code() that can do no
+ * progress returns LZMA_OK instead of LZMA_BUF_ERROR. Only
+ * the second consecutive call doing no progress will return
+ * LZMA_BUF_ERROR. This is intentional.
+ *
+ * With zlib, Z_BUF_ERROR may be returned even if the
+ * application is doing nothing wrong, so apps will need
+ * to handle Z_BUF_ERROR specially. The above hack
+ * guarantees that liblzma never returns LZMA_BUF_ERROR
+ * to properly written applications unless the input file
+ * is truncated or corrupt. This should simplify the
+ * applications a little.
+ */
+
+ LZMA_PROG_ERROR = 11,
+ /**<
+ * \brief Programming error
+ *
+ * This indicates that the arguments given to the function are
+ * invalid or the internal state of the decoder is corrupt.
+ * - Function arguments are invalid or the structures
+ * pointed by the argument pointers are invalid
+ * e.g. if strm->next_out has been set to NULL and
+ * strm->avail_out > 0 when calling lzma_code().
+ * - lzma_* functions have been called in wrong order
+ * e.g. lzma_code() was called right after lzma_end().
+ * - If errors occur randomly, the reason might be flaky
+ * hardware.
+ *
+ * If you think that your code is correct, this error code
+ * can be a sign of a bug in liblzma. See the documentation
+ * how to report bugs.
+ */
+} lzma_ret;
+
+
+/**
+ * \brief The `action' argument for lzma_code()
+ *
+ * After the first use of LZMA_SYNC_FLUSH, LZMA_FULL_FLUSH, or LZMA_FINISH,
+ * the same `action' must is used until lzma_code() returns LZMA_STREAM_END.
+ * Also, the amount of input (that is, strm->avail_in) must not be modified
+ * by the application until lzma_code() returns LZMA_STREAM_END. Changing the
+ * `action' or modifying the amount of input will make lzma_code() return
+ * LZMA_PROG_ERROR.
+ */
+typedef enum {
+ LZMA_RUN = 0,
+ /**<
+ * \brief Continue coding
+ *
+ * Encoder: Encode as much input as possible. Some internal
+ * buffering will probably be done (depends on the filter
+ * chain in use), which causes latency: the input used won't
+ * usually be decodeable from the output of the same
+ * lzma_code() call.
+ *
+ * Decoder: Decode as much input as possible and produce as
+ * much output as possible.
+ */
+
+ LZMA_SYNC_FLUSH = 1,
+ /**<
+ * \brief Make all the input available at output
+ *
+ * Normally the encoder introduces some latency.
+ * LZMA_SYNC_FLUSH forces all the buffered data to be
+ * available at output without resetting the internal
+ * state of the encoder. This way it is possible to use
+ * compressed stream for example for communication over
+ * network.
+ *
+ * Only some filters support LZMA_SYNC_FLUSH. Trying to use
+ * LZMA_SYNC_FLUSH with filters that don't support it will
+ * make lzma_code() return LZMA_OPTIONS_ERROR. For example,
+ * LZMA1 doesn't support LZMA_SYNC_FLUSH but LZMA2 does.
+ *
+ * Using LZMA_SYNC_FLUSH very often can dramatically reduce
+ * the compression ratio. With some filters (for example,
+ * LZMA2), finetuning the compression options may help
+ * mitigate this problem significantly.
+ *
+ * Decoders don't support LZMA_SYNC_FLUSH.
+ */
+
+ LZMA_FULL_FLUSH = 2,
+ /**<
+ * \brief Make all the input available at output
+ *
+ * Finish encoding of the current Block. All the input
+ * data going to the current Block must have been given
+ * to the encoder (the last bytes can still be pending in
+ * next_in). Call lzma_code() with LZMA_FULL_FLUSH until
+ * it returns LZMA_STREAM_END. Then continue normally with
+ * LZMA_RUN or finish the Stream with LZMA_FINISH.
+ *
+ * This action is currently supported only by Stream encoder
+ * and easy encoder (which uses Stream encoder). If there is
+ * no unfinished Block, no empty Block is created.
+ */
+
+ LZMA_FINISH = 3
+ /**<
+ * \brief Finish the coding operation
+ *
+ * Finishes the coding operation. All the input data must
+ * have been given to the encoder (the last bytes can still
+ * be pending in next_in). Call lzma_code() with LZMA_FINISH
+ * until it returns LZMA_STREAM_END. Once LZMA_FINISH has
+ * been used, the amount of input must no longer be changed
+ * by the application.
+ *
+ * When decoding, using LZMA_FINISH is optional unless the
+ * LZMA_CONCATENATED flag was used when the decoder was
+ * initialized. When LZMA_CONCATENATED was not used, the only
+ * effect of LZMA_FINISH is that the amount of input must not
+ * be changed just like in the encoder.
+ */
+} lzma_action;
+
+
+/**
+ * \brief Custom functions for memory handling
+ *
+ * A pointer to lzma_allocator may be passed via lzma_stream structure
+ * to liblzma, and some advanced functions take a pointer to lzma_allocator
+ * as a separate function argument. The library will use the functions
+ * specified in lzma_allocator for memory handling instead of the default
+ * malloc() and free(). C++ users should note that the custom memory
+ * handling functions must not throw exceptions.
+ *
+ * liblzma doesn't make an internal copy of lzma_allocator. Thus, it is
+ * OK to change these function pointers in the middle of the coding
+ * process, but obviously it must be done carefully to make sure that the
+ * replacement `free' can deallocate memory allocated by the earlier
+ * `alloc' function(s).
+ */
+typedef struct {
+ /**
+ * \brief Pointer to a custom memory allocation function
+ *
+ * If you don't want a custom allocator, but still want
+ * custom free(), set this to NULL and liblzma will use
+ * the standard malloc().
+ *
+ * \param opaque lzma_allocator.opaque (see below)
+ * \param nmemb Number of elements like in calloc(). liblzma
+ * will always set nmemb to 1, so it is safe to
+ * ignore nmemb in a custom allocator if you like.
+ * The nmemb argument exists only for
+ * compatibility with zlib and libbzip2.
+ * \param size Size of an element in bytes.
+ * liblzma never sets this to zero.
+ *
+ * \return Pointer to the beginning of a memory block of
+ * `size' bytes, or NULL if allocation fails
+ * for some reason. When allocation fails, functions
+ * of liblzma return LZMA_MEM_ERROR.
+ *
+ * The allocator should not waste time zeroing the allocated buffers.
+ * This is not only about speed, but also memory usage, since the
+ * operating system kernel doesn't necessarily allocate the requested
+ * memory in physical memory until it is actually used. With small
+ * input files, liblzma may actually need only a fraction of the
+ * memory that it requested for allocation.
+ *
+ * \note LZMA_MEM_ERROR is also used when the size of the
+ * allocation would be greater than SIZE_MAX. Thus,
+ * don't assume that the custom allocator must have
+ * returned NULL if some function from liblzma
+ * returns LZMA_MEM_ERROR.
+ */
+ void *(LZMA_API_CALL *alloc)(void *opaque, size_t nmemb, size_t size);
+
+ /**
+ * \brief Pointer to a custom memory freeing function
+ *
+ * If you don't want a custom freeing function, but still
+ * want a custom allocator, set this to NULL and liblzma
+ * will use the standard free().
+ *
+ * \param opaque lzma_allocator.opaque (see below)
+ * \param ptr Pointer returned by lzma_allocator.alloc(),
+ * or when it is set to NULL, a pointer returned
+ * by the standard malloc().
+ */
+ void (LZMA_API_CALL *free)(void *opaque, void *ptr);
+
+ /**
+ * \brief Pointer passed to .alloc() and .free()
+ *
+ * opaque is passed as the first argument to lzma_allocator.alloc()
+ * and lzma_allocator.free(). This intended to ease implementing
+ * custom memory allocation functions for use with liblzma.
+ *
+ * If you don't need this, you should set this to NULL.
+ */
+ void *opaque;
+
+} lzma_allocator;
+
+
+/**
+ * \brief Internal data structure
+ *
+ * The contents of this structure is not visible outside the library.
+ */
+typedef struct lzma_internal_s lzma_internal;
+
+
+/**
+ * \brief Passing data to and from liblzma
+ *
+ * The lzma_stream structure is used for
+ * - passing pointers to input and output buffers to liblzma;
+ * - defining custom memory hander functions; and
+ * - holding a pointer to coder-specific internal data structures.
+ *
+ * Typical usage:
+ *
+ * - After allocating lzma_stream (on stack or with malloc()), it must be
+ * initialized to LZMA_STREAM_INIT (see LZMA_STREAM_INIT for details).
+ *
+ * - Initialize a coder to the lzma_stream, for example by using
+ * lzma_easy_encoder() or lzma_auto_decoder(). Some notes:
+ * - In contrast to zlib, strm->next_in and strm->next_out are
+ * ignored by all initialization functions, thus it is safe
+ * to not initialize them yet.
+ * - The initialization functions always set strm->total_in and
+ * strm->total_out to zero.
+ * - If the initialization function fails, no memory is left allocated
+ * that would require freeing with lzma_end() even if some memory was
+ * associated with the lzma_stream structure when the initialization
+ * function was called.
+ *
+ * - Use lzma_code() to do the actual work.
+ *
+ * - Once the coding has been finished, the existing lzma_stream can be
+ * reused. It is OK to reuse lzma_stream with different initialization
+ * function without calling lzma_end() first. Old allocations are
+ * automatically freed.
+ *
+ * - Finally, use lzma_end() to free the allocated memory. lzma_end() never
+ * frees the lzma_stream structure itself.
+ *
+ * Application may modify the values of total_in and total_out as it wants.
+ * They are updated by liblzma to match the amount of data read and
+ * written, but aren't used for anything else.
+ */
+typedef struct {
+ const uint8_t *next_in; /**< Pointer to the next input byte. */
+ size_t avail_in; /**< Number of available input bytes in next_in. */
+ uint64_t total_in; /**< Total number of bytes read by liblzma. */
+
+ uint8_t *next_out; /**< Pointer to the next output position. */
+ size_t avail_out; /**< Amount of free space in next_out. */
+ uint64_t total_out; /**< Total number of bytes written by liblzma. */
+
+ /**
+ * \brief Custom memory allocation functions
+ *
+ * In most cases this is NULL which makes liblzma use
+ * the standard malloc() and free().
+ */
+ lzma_allocator *allocator;
+
+ /** Internal state is not visible to applications. */
+ lzma_internal *internal;
+
+ /*
+ * Reserved space to allow possible future extensions without
+ * breaking the ABI. Excluding the initialization of this structure,
+ * you should not touch these, because the names of these variables
+ * may change.
+ */
+ void *reserved_ptr1;
+ void *reserved_ptr2;
+ uint64_t reserved_int1;
+ uint64_t reserved_int2;
+ lzma_reserved_enum reserved_enum1;
+ lzma_reserved_enum reserved_enum2;
+
+} lzma_stream;
+
+
+/**
+ * \brief Initialization for lzma_stream
+ *
+ * When you declare an instance of lzma_stream, you can immediatelly
+ * initialize it so that initialization functions know that no memory
+ * has been allocated yet:
+ *
+ * lzma_stream strm = LZMA_STREAM_INIT;
+ *
+ * If you need to initialize a dynamically allocated lzma_stream, you can use
+ * memset(strm_pointer, 0, sizeof(lzma_stream)). Strictly speaking, this
+ * violates the C standard since NULL may have different internal
+ * representation than zero, but it should be portable enough in practice.
+ * Anyway, for maximum portability, you can use something like this:
+ *
+ * lzma_stream tmp = LZMA_STREAM_INIT;
+ * *strm = tmp;
+ */
+#define LZMA_STREAM_INIT \
+ { NULL, 0, 0, NULL, 0, 0, NULL, NULL, \
+ NULL, NULL, 0, 0, LZMA_RESERVED_ENUM, LZMA_RESERVED_ENUM }
+
+
+/**
+ * \brief Encode or decode data
+ *
+ * Once the lzma_stream has been successfully initialized (e.g. with
+ * lzma_stream_encoder()), the actual encoding or decoding is done
+ * using this function. The application has to update strm->next_in,
+ * strm->avail_in, strm->next_out, and strm->avail_out to pass input
+ * to and get output from liblzma.
+ *
+ * See the description of the coder-specific initialization function to find
+ * out what `action' values are supported by the coder.
+ */
+extern LZMA_API(lzma_ret) lzma_code(lzma_stream *strm, lzma_action action)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Free memory allocated for the coder data structures
+ *
+ * \param strm Pointer to lzma_stream that is at least initialized
+ * with LZMA_STREAM_INIT.
+ *
+ * After lzma_end(strm), strm->internal is guaranteed to be NULL. No other
+ * members of the lzma_stream structure are touched.
+ *
+ * \note zlib indicates an error if application end()s unfinished
+ * stream structure. liblzma doesn't do this, and assumes that
+ * application knows what it is doing.
+ */
+extern LZMA_API(void) lzma_end(lzma_stream *strm) lzma_nothrow;
+
+
+/**
+ * \brief Get the memory usage of decoder filter chain
+ *
+ * This function is currently supported only when *strm has been initialized
+ * with a function that takes a memlimit argument. With other functions, you
+ * should use e.g. lzma_raw_encoder_memusage() or lzma_raw_decoder_memusage()
+ * to estimate the memory requirements.
+ *
+ * This function is useful e.g. after LZMA_MEMLIMIT_ERROR to find out how big
+ * the memory usage limit should have been to decode the input. Note that
+ * this may give misleading information if decoding .xz Streams that have
+ * multiple Blocks, because each Block can have different memory requirements.
+ *
+ * \return Rough estimate of how much memory is currently allocated
+ * for the filter decoders. If no filter chain is currently
+ * allocated, some non-zero value is still returned, which is
+ * less than or equal to what any filter chain would indicate
+ * as its memory requirement.
+ *
+ * If this function isn't supported by *strm or some other error
+ * occurs, zero is returned.
+ */
+extern LZMA_API(uint64_t) lzma_memusage(const lzma_stream *strm)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the current memory usage limit
+ *
+ * This function is supported only when *strm has been initialized with
+ * a function that takes a memlimit argument.
+ *
+ * \return On success, the current memory usage limit is returned
+ * (always non-zero). On error, zero is returned.
+ */
+extern LZMA_API(uint64_t) lzma_memlimit_get(const lzma_stream *strm)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Set the memory usage limit
+ *
+ * This function is supported only when *strm has been initialized with
+ * a function that takes a memlimit argument.
+ *
+ * \return - LZMA_OK: New memory usage limit successfully set.
+ * - LZMA_MEMLIMIT_ERROR: The new limit is too small.
+ * The limit was not changed.
+ * - LZMA_PROG_ERROR: Invalid arguments, e.g. *strm doesn't
+ * support memory usage limit or memlimit was zero.
+ */
+extern LZMA_API(lzma_ret) lzma_memlimit_set(
+ lzma_stream *strm, uint64_t memlimit) lzma_nothrow;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/bcj.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/bcj.h
new file mode 100644
index 00000000..d67e70a6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/bcj.h
@@ -0,0 +1,92 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/bcj.h
+ * \brief Branch/Call/Jump conversion filters
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/* Filter IDs for lzma_filter.id */
+
+#define LZMA_FILTER_X86 LZMA_VLI_C(0x04)
+ /**<
+ * Filter for x86 binaries
+ */
+
+#define LZMA_FILTER_POWERPC LZMA_VLI_C(0x05)
+ /**<
+ * Filter for Big endian PowerPC binaries
+ */
+
+#define LZMA_FILTER_IA64 LZMA_VLI_C(0x06)
+ /**<
+ * Filter for IA64 (Itanium) binaries.
+ */
+
+#define LZMA_FILTER_ARM LZMA_VLI_C(0x07)
+ /**<
+ * Filter for ARM binaries.
+ */
+
+#define LZMA_FILTER_ARMTHUMB LZMA_VLI_C(0x08)
+ /**<
+ * Filter for ARMThumb binaries.
+ */
+
+#define LZMA_FILTER_SPARC LZMA_VLI_C(0x09)
+ /**<
+ * Filter for SPARC binaries.
+ */
+
+
+/**
+ * \brief Options for BCJ filters
+ *
+ * The BCJ filters never change the size of the data. Specifying options
+ * for them is optional: if pointer to options is NULL, default value is
+ * used. You probably never need to specify options to BCJ filters, so just
+ * set the options pointer to NULL and be happy.
+ *
+ * If options with non-default values have been specified when encoding,
+ * the same options must also be specified when decoding.
+ *
+ * \note At the moment, none of the BCJ filters support
+ * LZMA_SYNC_FLUSH. If LZMA_SYNC_FLUSH is specified,
+ * LZMA_OPTIONS_ERROR will be returned. If there is need,
+ * partial support for LZMA_SYNC_FLUSH can be added in future.
+ * Partial means that flushing would be possible only at
+ * offsets that are multiple of 2, 4, or 16 depending on
+ * the filter, except x86 which cannot be made to support
+ * LZMA_SYNC_FLUSH predictably.
+ */
+typedef struct {
+ /**
+ * \brief Start offset for conversions
+ *
+ * This setting is useful only when the same filter is used
+ * _separately_ for multiple sections of the same executable file,
+ * and the sections contain cross-section branch/call/jump
+ * instructions. In that case it is benefical to set the start
+ * offset of the non-first sections so that the relative addresses
+ * of the cross-section branch/call/jump instructions will use the
+ * same absolute addresses as in the first section.
+ *
+ * When the pointer to options is NULL, the default value (zero)
+ * is used.
+ */
+ uint32_t start_offset;
+
+} lzma_options_bcj;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/block.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/block.h
new file mode 100644
index 00000000..9e3518e6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/block.h
@@ -0,0 +1,536 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/block.h
+ * \brief .xz Block handling
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Options for the Block and Block Header encoders and decoders
+ *
+ * Different Block handling functions use different parts of this structure.
+ * Some read some members, other functions write, and some do both. Only the
+ * members listed for reading need to be initialized when the specified
+ * functions are called. The members marked for writing will be assigned
+ * new values at some point either by calling the given function or by
+ * later calls to lzma_code().
+ */
+typedef struct {
+ /**
+ * \brief Block format version
+ *
+ * To prevent API and ABI breakages if new features are needed in
+ * Block, a version number is used to indicate which fields in this
+ * structure are in use. For now, version must always be zero.
+ * With non-zero version, most Block related functions will return
+ * LZMA_OPTIONS_ERROR.
+ *
+ * The decoding functions will always set this to the lowest value
+ * that supports all the features indicated by the Block Header field.
+ * The application must check that the version number set by the
+ * decoding functions is supported by the application. Otherwise it
+ * is possible that the application will decode the Block incorrectly.
+ *
+ * Read by:
+ * - lzma_block_header_size()
+ * - lzma_block_header_encode()
+ * - lzma_block_compressed_size()
+ * - lzma_block_unpadded_size()
+ * - lzma_block_total_size()
+ * - lzma_block_encoder()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_encode()
+ * - lzma_block_buffer_decode()
+ *
+ * Written by:
+ * - lzma_block_header_decode()
+ */
+ uint32_t version;
+
+ /**
+ * \brief Size of the Block Header field
+ *
+ * This is always a multiple of four.
+ *
+ * Read by:
+ * - lzma_block_header_encode()
+ * - lzma_block_header_decode()
+ * - lzma_block_compressed_size()
+ * - lzma_block_unpadded_size()
+ * - lzma_block_total_size()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_decode()
+ *
+ * Written by:
+ * - lzma_block_header_size()
+ * - lzma_block_buffer_encode()
+ */
+ uint32_t header_size;
+# define LZMA_BLOCK_HEADER_SIZE_MIN 8
+# define LZMA_BLOCK_HEADER_SIZE_MAX 1024
+
+ /**
+ * \brief Type of integrity Check
+ *
+ * The Check ID is not stored into the Block Header, thus its value
+ * must be provided also when decoding.
+ *
+ * Read by:
+ * - lzma_block_header_encode()
+ * - lzma_block_header_decode()
+ * - lzma_block_compressed_size()
+ * - lzma_block_unpadded_size()
+ * - lzma_block_total_size()
+ * - lzma_block_encoder()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_encode()
+ * - lzma_block_buffer_decode()
+ */
+ lzma_check check;
+
+ /**
+ * \brief Size of the Compressed Data in bytes
+ *
+ * Encoding: If this is not LZMA_VLI_UNKNOWN, Block Header encoder
+ * will store this value to the Block Header. Block encoder doesn't
+ * care about this value, but will set it once the encoding has been
+ * finished.
+ *
+ * Decoding: If this is not LZMA_VLI_UNKNOWN, Block decoder will
+ * verify that the size of the Compressed Data field matches
+ * compressed_size.
+ *
+ * Usually you don't know this value when encoding in streamed mode,
+ * and thus cannot write this field into the Block Header.
+ *
+ * In non-streamed mode you can reserve space for this field before
+ * encoding the actual Block. After encoding the data, finish the
+ * Block by encoding the Block Header. Steps in detail:
+ *
+ * - Set compressed_size to some big enough value. If you don't know
+ * better, use LZMA_VLI_MAX, but remember that bigger values take
+ * more space in Block Header.
+ *
+ * - Call lzma_block_header_size() to see how much space you need to
+ * reserve for the Block Header.
+ *
+ * - Encode the Block using lzma_block_encoder() and lzma_code().
+ * It sets compressed_size to the correct value.
+ *
+ * - Use lzma_block_header_encode() to encode the Block Header.
+ * Because space was reserved in the first step, you don't need
+ * to call lzma_block_header_size() anymore, because due to
+ * reserving, header_size has to be big enough. If it is "too big",
+ * lzma_block_header_encode() will add enough Header Padding to
+ * make Block Header to match the size specified by header_size.
+ *
+ * Read by:
+ * - lzma_block_header_size()
+ * - lzma_block_header_encode()
+ * - lzma_block_compressed_size()
+ * - lzma_block_unpadded_size()
+ * - lzma_block_total_size()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_decode()
+ *
+ * Written by:
+ * - lzma_block_header_decode()
+ * - lzma_block_compressed_size()
+ * - lzma_block_encoder()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_encode()
+ * - lzma_block_buffer_decode()
+ */
+ lzma_vli compressed_size;
+
+ /**
+ * \brief Uncompressed Size in bytes
+ *
+ * This is handled very similarly to compressed_size above.
+ *
+ * uncompressed_size is needed by fewer functions than
+ * compressed_size. This is because uncompressed_size isn't
+ * needed to validate that Block stays within proper limits.
+ *
+ * Read by:
+ * - lzma_block_header_size()
+ * - lzma_block_header_encode()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_decode()
+ *
+ * Written by:
+ * - lzma_block_header_decode()
+ * - lzma_block_encoder()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_encode()
+ * - lzma_block_buffer_decode()
+ */
+ lzma_vli uncompressed_size;
+
+ /**
+ * \brief Array of filters
+ *
+ * There can be 1-4 filters. The end of the array is marked with
+ * .id = LZMA_VLI_UNKNOWN.
+ *
+ * Read by:
+ * - lzma_block_header_size()
+ * - lzma_block_header_encode()
+ * - lzma_block_encoder()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_encode()
+ * - lzma_block_buffer_decode()
+ *
+ * Written by:
+ * - lzma_block_header_decode(): Note that this does NOT free()
+ * the old filter options structures. All unused filters[] will
+ * have .id == LZMA_VLI_UNKNOWN and .options == NULL. If
+ * decoding fails, all filters[] are guaranteed to be
+ * LZMA_VLI_UNKNOWN and NULL.
+ *
+ * \note Because of the array is terminated with
+ * .id = LZMA_VLI_UNKNOWN, the actual array must
+ * have LZMA_FILTERS_MAX + 1 members or the Block
+ * Header decoder will overflow the buffer.
+ */
+ lzma_filter *filters;
+
+ /**
+ * \brief Raw value stored in the Check field
+ *
+ * After successful coding, the first lzma_check_size(check) bytes
+ * of this array contain the raw value stored in the Check field.
+ *
+ * Note that CRC32 and CRC64 are stored in little endian byte order.
+ * Take it into account if you display the Check values to the user.
+ *
+ * Written by:
+ * - lzma_block_encoder()
+ * - lzma_block_decoder()
+ * - lzma_block_buffer_encode()
+ * - lzma_block_buffer_decode()
+ */
+ uint8_t raw_check[LZMA_CHECK_SIZE_MAX];
+
+ /*
+ * Reserved space to allow possible future extensions without
+ * breaking the ABI. You should not touch these, because the names
+ * of these variables may change. These are and will never be used
+ * with the currently supported options, so it is safe to leave these
+ * uninitialized.
+ */
+ void *reserved_ptr1;
+ void *reserved_ptr2;
+ void *reserved_ptr3;
+ uint32_t reserved_int1;
+ uint32_t reserved_int2;
+ lzma_vli reserved_int3;
+ lzma_vli reserved_int4;
+ lzma_vli reserved_int5;
+ lzma_vli reserved_int6;
+ lzma_vli reserved_int7;
+ lzma_vli reserved_int8;
+ lzma_reserved_enum reserved_enum1;
+ lzma_reserved_enum reserved_enum2;
+ lzma_reserved_enum reserved_enum3;
+ lzma_reserved_enum reserved_enum4;
+ lzma_bool reserved_bool1;
+ lzma_bool reserved_bool2;
+ lzma_bool reserved_bool3;
+ lzma_bool reserved_bool4;
+ lzma_bool reserved_bool5;
+ lzma_bool reserved_bool6;
+ lzma_bool reserved_bool7;
+ lzma_bool reserved_bool8;
+
+} lzma_block;
+
+
+/**
+ * \brief Decode the Block Header Size field
+ *
+ * To decode Block Header using lzma_block_header_decode(), the size of the
+ * Block Header has to be known and stored into lzma_block.header_size.
+ * The size can be calculated from the first byte of a Block using this macro.
+ * Note that if the first byte is 0x00, it indicates beginning of Index; use
+ * this macro only when the byte is not 0x00.
+ *
+ * There is no encoding macro, because Block Header encoder is enough for that.
+ */
+#define lzma_block_header_size_decode(b) (((uint32_t)(b) + 1) * 4)
+
+
+/**
+ * \brief Calculate Block Header Size
+ *
+ * Calculate the minimum size needed for the Block Header field using the
+ * settings specified in the lzma_block structure. Note that it is OK to
+ * increase the calculated header_size value as long as it is a multiple of
+ * four and doesn't exceed LZMA_BLOCK_HEADER_SIZE_MAX. Increasing header_size
+ * just means that lzma_block_header_encode() will add Header Padding.
+ *
+ * \return - LZMA_OK: Size calculated successfully and stored to
+ * block->header_size.
+ * - LZMA_OPTIONS_ERROR: Unsupported version, filters or
+ * filter options.
+ * - LZMA_PROG_ERROR: Invalid values like compressed_size == 0.
+ *
+ * \note This doesn't check that all the options are valid i.e. this
+ * may return LZMA_OK even if lzma_block_header_encode() or
+ * lzma_block_encoder() would fail. If you want to validate the
+ * filter chain, consider using lzma_memlimit_encoder() which as
+ * a side-effect validates the filter chain.
+ */
+extern LZMA_API(lzma_ret) lzma_block_header_size(lzma_block *block)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Encode Block Header
+ *
+ * The caller must have calculated the size of the Block Header already with
+ * lzma_block_header_size(). If a value larger than the one calculated by
+ * lzma_block_header_size() is used, the Block Header will be padded to the
+ * specified size.
+ *
+ * \param out Beginning of the output buffer. This must be
+ * at least block->header_size bytes.
+ * \param block Block options to be encoded.
+ *
+ * \return - LZMA_OK: Encoding was successful. block->header_size
+ * bytes were written to output buffer.
+ * - LZMA_OPTIONS_ERROR: Invalid or unsupported options.
+ * - LZMA_PROG_ERROR: Invalid arguments, for example
+ * block->header_size is invalid or block->filters is NULL.
+ */
+extern LZMA_API(lzma_ret) lzma_block_header_encode(
+ const lzma_block *block, uint8_t *out)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Decode Block Header
+ *
+ * The size of the Block Header must have already been decoded with
+ * lzma_block_header_size_decode() macro and stored to block->header_size.
+ * block->filters must have been allocated, but not necessarily initialized.
+ * Possible existing filter options are _not_ freed.
+ *
+ * \param block Destination for block options with header_size
+ * properly initialized.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() (and also free()
+ * if an error occurs).
+ * \param in Beginning of the input buffer. This must be
+ * at least block->header_size bytes.
+ *
+ * \return - LZMA_OK: Decoding was successful. block->header_size
+ * bytes were read from the input buffer.
+ * - LZMA_OPTIONS_ERROR: The Block Header specifies some
+ * unsupported options such as unsupported filters.
+ * - LZMA_DATA_ERROR: Block Header is corrupt, for example,
+ * the CRC32 doesn't match.
+ * - LZMA_PROG_ERROR: Invalid arguments, for example
+ * block->header_size is invalid or block->filters is NULL.
+ */
+extern LZMA_API(lzma_ret) lzma_block_header_decode(lzma_block *block,
+ lzma_allocator *allocator, const uint8_t *in)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Validate and set Compressed Size according to Unpadded Size
+ *
+ * Block Header stores Compressed Size, but Index has Unpadded Size. If the
+ * application has already parsed the Index and is now decoding Blocks,
+ * it can calculate Compressed Size from Unpadded Size. This function does
+ * exactly that with error checking:
+ *
+ * - Compressed Size calculated from Unpadded Size must be positive integer,
+ * that is, Unpadded Size must be big enough that after Block Header and
+ * Check fields there's still at least one byte for Compressed Size.
+ *
+ * - If Compressed Size was present in Block Header, the new value
+ * calculated from Unpadded Size is compared against the value
+ * from Block Header.
+ *
+ * \note This function must be called _after_ decoding the Block Header
+ * field so that it can properly validate Compressed Size if it
+ * was present in Block Header.
+ *
+ * \return - LZMA_OK: block->compressed_size was set successfully.
+ * - LZMA_DATA_ERROR: unpadded_size is too small compared to
+ * block->header_size and lzma_check_size(block->check).
+ * - LZMA_PROG_ERROR: Some values are invalid. For example,
+ * block->header_size must be a multiple of four and
+ * between 8 and 1024 inclusive.
+ */
+extern LZMA_API(lzma_ret) lzma_block_compressed_size(
+ lzma_block *block, lzma_vli unpadded_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Calculate Unpadded Size
+ *
+ * The Index field stores Unpadded Size and Uncompressed Size. The latter
+ * can be taken directly from the lzma_block structure after coding a Block,
+ * but Unpadded Size needs to be calculated from Block Header Size,
+ * Compressed Size, and size of the Check field. This is where this function
+ * is needed.
+ *
+ * \return Unpadded Size on success, or zero on error.
+ */
+extern LZMA_API(lzma_vli) lzma_block_unpadded_size(const lzma_block *block)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Calculate the total encoded size of a Block
+ *
+ * This is equivalent to lzma_block_unpadded_size() except that the returned
+ * value includes the size of the Block Padding field.
+ *
+ * \return On success, total encoded size of the Block. On error,
+ * zero is returned.
+ */
+extern LZMA_API(lzma_vli) lzma_block_total_size(const lzma_block *block)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Initialize .xz Block encoder
+ *
+ * Valid actions for lzma_code() are LZMA_RUN, LZMA_SYNC_FLUSH (only if the
+ * filter chain supports it), and LZMA_FINISH.
+ *
+ * \return - LZMA_OK: All good, continue with lzma_code().
+ * - LZMA_MEM_ERROR
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_UNSUPPORTED_CHECK: block->check specfies a Check ID
+ * that is not supported by this buid of liblzma. Initializing
+ * the encoder failed.
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_block_encoder(
+ lzma_stream *strm, lzma_block *block)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Initialize .xz Block decoder
+ *
+ * Valid actions for lzma_code() are LZMA_RUN and LZMA_FINISH. Using
+ * LZMA_FINISH is not required. It is supported only for convenience.
+ *
+ * \return - LZMA_OK: All good, continue with lzma_code().
+ * - LZMA_UNSUPPORTED_CHECK: Initialization was successful, but
+ * the given Check ID is not supported, thus Check will be
+ * ignored.
+ * - LZMA_PROG_ERROR
+ * - LZMA_MEM_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_block_decoder(
+ lzma_stream *strm, lzma_block *block)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Calculate maximum output size for single-call Block encoding
+ *
+ * This is equivalent to lzma_stream_buffer_bound() but for .xz Blocks.
+ * See the documentation of lzma_stream_buffer_bound().
+ */
+extern LZMA_API(size_t) lzma_block_buffer_bound(size_t uncompressed_size)
+ lzma_nothrow;
+
+
+/**
+ * \brief Single-call .xz Block encoder
+ *
+ * In contrast to the multi-call encoder initialized with
+ * lzma_block_encoder(), this function encodes also the Block Header. This
+ * is required to make it possible to write appropriate Block Header also
+ * in case the data isn't compressible, and different filter chain has to be
+ * used to encode the data in uncompressed form using uncompressed chunks
+ * of the LZMA2 filter.
+ *
+ * When the data isn't compressible, header_size, compressed_size, and
+ * uncompressed_size are set just like when the data was compressible, but
+ * it is possible that header_size is too small to hold the filter chain
+ * specified in block->filters, because that isn't necessarily the filter
+ * chain that was actually used to encode the data. lzma_block_unpadded_size()
+ * still works normally, because it doesn't read the filters array.
+ *
+ * \param block Block options: block->version, block->check,
+ * and block->filters must have been initialized.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_size Size of the input buffer
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_BUF_ERROR: Not enough output buffer space.
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_MEM_ERROR
+ * - LZMA_DATA_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_block_buffer_encode(
+ lzma_block *block, lzma_allocator *allocator,
+ const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Single-call .xz Block decoder
+ *
+ * This is single-call equivalent of lzma_block_decoder(), and requires that
+ * the caller has already decoded Block Header and checked its memory usage.
+ *
+ * \param block Block options just like with lzma_block_decoder().
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_pos The next byte will be read from in[*in_pos].
+ * *in_pos is updated only if decoding succeeds.
+ * \param in_size Size of the input buffer; the first byte that
+ * won't be read is in[in_size].
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Decoding was successful.
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_DATA_ERROR
+ * - LZMA_MEM_ERROR
+ * - LZMA_BUF_ERROR: Output buffer was too small.
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_block_buffer_decode(
+ lzma_block *block, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+ lzma_nothrow;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/check.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/check.h
new file mode 100644
index 00000000..fe9a08ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/check.h
@@ -0,0 +1,152 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/check.h
+ * \brief Integrity checks
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Type of the integrity check (Check ID)
+ *
+ * The .xz format supports multiple types of checks that are calculated
+ * from the uncompressed data. They vary in both speed and ability to
+ * detect errors.
+ */
+typedef enum {
+ LZMA_CHECK_NONE = 0,
+ /**<
+ * No Check is calculated.
+ *
+ * Size of the Check field: 0 bytes
+ */
+
+ LZMA_CHECK_CRC32 = 1,
+ /**<
+ * CRC32 using the polynomial from the IEEE 802.3 standard
+ *
+ * Size of the Check field: 4 bytes
+ */
+
+ LZMA_CHECK_CRC64 = 4,
+ /**<
+ * CRC64 using the polynomial from the ECMA-182 standard
+ *
+ * Size of the Check field: 8 bytes
+ */
+
+ LZMA_CHECK_SHA256 = 10
+ /**<
+ * SHA-256
+ *
+ * Size of the Check field: 32 bytes
+ */
+} lzma_check;
+
+
+/**
+ * \brief Maximum valid Check ID
+ *
+ * The .xz file format specification specifies 16 Check IDs (0-15). Some
+ * of them are only reserved, that is, no actual Check algorithm has been
+ * assigned. When decoding, liblzma still accepts unknown Check IDs for
+ * future compatibility. If a valid but unsupported Check ID is detected,
+ * liblzma can indicate a warning; see the flags LZMA_TELL_NO_CHECK,
+ * LZMA_TELL_UNSUPPORTED_CHECK, and LZMA_TELL_ANY_CHECK in container.h.
+ */
+#define LZMA_CHECK_ID_MAX 15
+
+
+/**
+ * \brief Test if the given Check ID is supported
+ *
+ * Return true if the given Check ID is supported by this liblzma build.
+ * Otherwise false is returned. It is safe to call this with a value that
+ * is not in the range [0, 15]; in that case the return value is always false.
+ *
+ * You can assume that LZMA_CHECK_NONE and LZMA_CHECK_CRC32 are always
+ * supported (even if liblzma is built with limited features).
+ */
+extern LZMA_API(lzma_bool) lzma_check_is_supported(lzma_check check)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Get the size of the Check field with the given Check ID
+ *
+ * Although not all Check IDs have a check algorithm associated, the size of
+ * every Check is already frozen. This function returns the size (in bytes) of
+ * the Check field with the specified Check ID. The values are:
+ * { 0, 4, 4, 4, 8, 8, 8, 16, 16, 16, 32, 32, 32, 64, 64, 64 }
+ *
+ * If the argument is not in the range [0, 15], UINT32_MAX is returned.
+ */
+extern LZMA_API(uint32_t) lzma_check_size(lzma_check check)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Maximum size of a Check field
+ */
+#define LZMA_CHECK_SIZE_MAX 64
+
+
+/**
+ * \brief Calculate CRC32
+ *
+ * Calculate CRC32 using the polynomial from the IEEE 802.3 standard.
+ *
+ * \param buf Pointer to the input buffer
+ * \param size Size of the input buffer
+ * \param crc Previously returned CRC value. This is used to
+ * calculate the CRC of a big buffer in smaller chunks.
+ * Set to zero when starting a new calculation.
+ *
+ * \return Updated CRC value, which can be passed to this function
+ * again to continue CRC calculation.
+ */
+extern LZMA_API(uint32_t) lzma_crc32(
+ const uint8_t *buf, size_t size, uint32_t crc)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Calculate CRC64
+ *
+ * Calculate CRC64 using the polynomial from the ECMA-182 standard.
+ *
+ * This function is used similarly to lzma_crc32(). See its documentation.
+ */
+extern LZMA_API(uint64_t) lzma_crc64(
+ const uint8_t *buf, size_t size, uint64_t crc)
+ lzma_nothrow lzma_attr_pure;
+
+
+/*
+ * SHA-256 functions are currently not exported to public API.
+ * Contact Lasse Collin if you think it should be.
+ */
+
+
+/**
+ * \brief Get the type of the integrity check
+ *
+ * This function can be called only immediatelly after lzma_code() has
+ * returned LZMA_NO_CHECK, LZMA_UNSUPPORTED_CHECK, or LZMA_GET_CHECK.
+ * Calling this function in any other situation has undefined behavior.
+ */
+extern LZMA_API(lzma_check) lzma_get_check(const lzma_stream *strm)
+ lzma_nothrow;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/container.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/container.h
new file mode 100644
index 00000000..15ddf7c0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/container.h
@@ -0,0 +1,406 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/container.h
+ * \brief File formats
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/************
+ * Encoding *
+ ************/
+
+/**
+ * \brief Default compression preset
+ *
+ * It's not straightforward to recommend a default preset, because in some
+ * cases keeping the resource usage relatively low is more important that
+ * getting the maximum compression ratio.
+ */
+#define LZMA_PRESET_DEFAULT UINT32_C(6)
+
+
+/**
+ * \brief Mask for preset level
+ *
+ * This is useful only if you need to extract the level from the preset
+ * variable. That should be rare.
+ */
+#define LZMA_PRESET_LEVEL_MASK UINT32_C(0x1F)
+
+
+/*
+ * Preset flags
+ *
+ * Currently only one flag is defined.
+ */
+
+/**
+ * \brief Extreme compression preset
+ *
+ * This flag modifies the preset to make the encoding significantly slower
+ * while improving the compression ratio only marginally. This is useful
+ * when you don't mind wasting time to get as small result as possible.
+ *
+ * This flag doesn't affect the memory usage requirements of the decoder (at
+ * least not significantly). The memory usage of the encoder may be increased
+ * a little but only at the lowest preset levels (0-2).
+ */
+#define LZMA_PRESET_EXTREME (UINT32_C(1) << 31)
+
+
+/**
+ * \brief Calculate rough memory usage of easy encoder
+ *
+ * This function is a wrapper for lzma_raw_encoder_memusage().
+ *
+ * \param preset Compression preset (level and possible flags)
+ */
+extern LZMA_API(uint64_t) lzma_easy_encoder_memusage(uint32_t preset)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Calculate rough decoder memory usage of a preset
+ *
+ * This function is a wrapper for lzma_raw_decoder_memusage().
+ *
+ * \param preset Compression preset (level and possible flags)
+ */
+extern LZMA_API(uint64_t) lzma_easy_decoder_memusage(uint32_t preset)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Initialize .xz Stream encoder using a preset number
+ *
+ * This function is intended for those who just want to use the basic features
+ * if liblzma (that is, most developers out there).
+ *
+ * \param strm Pointer to lzma_stream that is at least initialized
+ * with LZMA_STREAM_INIT.
+ * \param preset Compression preset to use. A preset consist of level
+ * number and zero or more flags. Usually flags aren't
+ * used, so preset is simply a number [0, 9] which match
+ * the options -0 .. -9 of the xz command line tool.
+ * Additional flags can be be set using bitwise-or with
+ * the preset level number, e.g. 6 | LZMA_PRESET_EXTREME.
+ * \param check Integrity check type to use. See check.h for available
+ * checks. If you are unsure, use LZMA_CHECK_CRC32.
+ *
+ * \return - LZMA_OK: Initialization succeeded. Use lzma_code() to
+ * encode your data.
+ * - LZMA_MEM_ERROR: Memory allocation failed.
+ * - LZMA_OPTIONS_ERROR: The given compression level is not
+ * supported by this build of liblzma.
+ * - LZMA_UNSUPPORTED_CHECK: The given check type is not
+ * supported by this liblzma build.
+ * - LZMA_PROG_ERROR: One or more of the parameters have values
+ * that will never be valid. For example, strm == NULL.
+ *
+ * If initialization fails (return value is not LZMA_OK), all the memory
+ * allocated for *strm by liblzma is always freed. Thus, there is no need
+ * to call lzma_end() after failed initialization.
+ *
+ * If initialization succeeds, use lzma_code() to do the actual encoding.
+ * Valid values for `action' (the second argument of lzma_code()) are
+ * LZMA_RUN, LZMA_SYNC_FLUSH, LZMA_FULL_FLUSH, and LZMA_FINISH. In future,
+ * there may be compression levels or flags that don't support LZMA_SYNC_FLUSH.
+ */
+extern LZMA_API(lzma_ret) lzma_easy_encoder(
+ lzma_stream *strm, uint32_t preset, lzma_check check)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Single-call .xz Stream encoding using a preset number
+ *
+ * The maximum required output buffer size can be calculated with
+ * lzma_stream_buffer_bound().
+ *
+ * \param preset Compression preset to use. See the description
+ * in lzma_easy_encoder().
+ * \param check Type of the integrity check to calculate from
+ * uncompressed data.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_size Size of the input buffer
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_BUF_ERROR: Not enough output buffer space.
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_MEM_ERROR
+ * - LZMA_DATA_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_easy_buffer_encode(
+ uint32_t preset, lzma_check check,
+ lzma_allocator *allocator, const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size) lzma_nothrow;
+
+
+/**
+ * \brief Initialize .xz Stream encoder using a custom filter chain
+ *
+ * \param strm Pointer to properly prepared lzma_stream
+ * \param filters Array of filters. This must be terminated with
+ * filters[n].id = LZMA_VLI_UNKNOWN. See filter.h for
+ * more information.
+ * \param check Type of the integrity check to calculate from
+ * uncompressed data.
+ *
+ * \return - LZMA_OK: Initialization was successful.
+ * - LZMA_MEM_ERROR
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_stream_encoder(lzma_stream *strm,
+ const lzma_filter *filters, lzma_check check)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Initialize .lzma encoder (legacy file format)
+ *
+ * The .lzma format is sometimes called the LZMA_Alone format, which is the
+ * reason for the name of this function. The .lzma format supports only the
+ * LZMA1 filter. There is no support for integrity checks like CRC32.
+ *
+ * Use this function if and only if you need to create files readable by
+ * legacy LZMA tools such as LZMA Utils 4.32.x. Moving to the .xz format
+ * is strongly recommended.
+ *
+ * The valid action values for lzma_code() are LZMA_RUN and LZMA_FINISH.
+ * No kind of flushing is supported, because the file format doesn't make
+ * it possible.
+ *
+ * \return - LZMA_OK
+ * - LZMA_MEM_ERROR
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_alone_encoder(
+ lzma_stream *strm, const lzma_options_lzma *options)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Calculate output buffer size for single-call Stream encoder
+ *
+ * When trying to compress uncompressible data, the encoded size will be
+ * slightly bigger than the input data. This function calculates how much
+ * output buffer space is required to be sure that lzma_stream_buffer_encode()
+ * doesn't return LZMA_BUF_ERROR.
+ *
+ * The calculated value is not exact, but it is guaranteed to be big enough.
+ * The actual maximum output space required may be slightly smaller (up to
+ * about 100 bytes). This should not be a problem in practice.
+ *
+ * If the calculated maximum size doesn't fit into size_t or would make the
+ * Stream grow past LZMA_VLI_MAX (which should never happen in practice),
+ * zero is returned to indicate the error.
+ *
+ * \note The limit calculated by this function applies only to
+ * single-call encoding. Multi-call encoding may (and probably
+ * will) have larger maximum expansion when encoding
+ * uncompressible data. Currently there is no function to
+ * calculate the maximum expansion of multi-call encoding.
+ */
+extern LZMA_API(size_t) lzma_stream_buffer_bound(size_t uncompressed_size)
+ lzma_nothrow;
+
+
+/**
+ * \brief Single-call .xz Stream encoder
+ *
+ * \param filters Array of filters. This must be terminated with
+ * filters[n].id = LZMA_VLI_UNKNOWN. See filter.h
+ * for more information.
+ * \param check Type of the integrity check to calculate from
+ * uncompressed data.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_size Size of the input buffer
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_BUF_ERROR: Not enough output buffer space.
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_MEM_ERROR
+ * - LZMA_DATA_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_stream_buffer_encode(
+ lzma_filter *filters, lzma_check check,
+ lzma_allocator *allocator, const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/************
+ * Decoding *
+ ************/
+
+/**
+ * This flag makes lzma_code() return LZMA_NO_CHECK if the input stream
+ * being decoded has no integrity check. Note that when used with
+ * lzma_auto_decoder(), all .lzma files will trigger LZMA_NO_CHECK
+ * if LZMA_TELL_NO_CHECK is used.
+ */
+#define LZMA_TELL_NO_CHECK UINT32_C(0x01)
+
+
+/**
+ * This flag makes lzma_code() return LZMA_UNSUPPORTED_CHECK if the input
+ * stream has an integrity check, but the type of the integrity check is not
+ * supported by this liblzma version or build. Such files can still be
+ * decoded, but the integrity check cannot be verified.
+ */
+#define LZMA_TELL_UNSUPPORTED_CHECK UINT32_C(0x02)
+
+
+/**
+ * This flag makes lzma_code() return LZMA_GET_CHECK as soon as the type
+ * of the integrity check is known. The type can then be got with
+ * lzma_get_check().
+ */
+#define LZMA_TELL_ANY_CHECK UINT32_C(0x04)
+
+
+/**
+ * This flag enables decoding of concatenated files with file formats that
+ * allow concatenating compressed files as is. From the formats currently
+ * supported by liblzma, only the .xz format allows concatenated files.
+ * Concatenated files are not allowed with the legacy .lzma format.
+ *
+ * This flag also affects the usage of the `action' argument for lzma_code().
+ * When LZMA_CONCATENATED is used, lzma_code() won't return LZMA_STREAM_END
+ * unless LZMA_FINISH is used as `action'. Thus, the application has to set
+ * LZMA_FINISH in the same way as it does when encoding.
+ *
+ * If LZMA_CONCATENATED is not used, the decoders still accept LZMA_FINISH
+ * as `action' for lzma_code(), but the usage of LZMA_FINISH isn't required.
+ */
+#define LZMA_CONCATENATED UINT32_C(0x08)
+
+
+/**
+ * \brief Initialize .xz Stream decoder
+ *
+ * \param strm Pointer to properly prepared lzma_stream
+ * \param memlimit Rough memory usage limit as bytes
+ * \param flags Bitwise-or of zero or more of the decoder flags:
+ * LZMA_TELL_NO_CHECK, LZMA_TELL_UNSUPPORTED_CHECK,
+ * LZMA_TELL_ANY_CHECK, LZMA_CONCATENATED
+ *
+ * \return - LZMA_OK: Initialization was successful.
+ * - LZMA_MEM_ERROR: Cannot allocate memory.
+ * - LZMA_OPTIONS_ERROR: Unsupported flags
+ */
+extern LZMA_API(lzma_ret) lzma_stream_decoder(
+ lzma_stream *strm, uint64_t memlimit, uint32_t flags)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Decode .xz Streams and .lzma files with autodetection
+ *
+ * This decoder autodetects between the .xz and .lzma file formats, and
+ * calls lzma_stream_decoder() or lzma_alone_decoder() once the type
+ * of the input file has been detected.
+ *
+ * \param strm Pointer to properly prepared lzma_stream
+ * \param memlimit Rough memory usage limit as bytes
+ * \param flags Bitwise-or of flags, or zero for no flags.
+ *
+ * \return - LZMA_OK: Initialization was successful.
+ * - LZMA_MEM_ERROR: Cannot allocate memory.
+ * - LZMA_OPTIONS_ERROR: Unsupported flags
+ */
+extern LZMA_API(lzma_ret) lzma_auto_decoder(
+ lzma_stream *strm, uint64_t memlimit, uint32_t flags)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Initialize .lzma decoder (legacy file format)
+ *
+ * Valid `action' arguments to lzma_code() are LZMA_RUN and LZMA_FINISH.
+ * There is no need to use LZMA_FINISH, but allowing it may simplify
+ * certain types of applications.
+ *
+ * \return - LZMA_OK
+ * - LZMA_MEM_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_alone_decoder(
+ lzma_stream *strm, uint64_t memlimit)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Single-call .xz Stream decoder
+ *
+ * \param memlimit Pointer to how much memory the decoder is allowed
+ * to allocate. The value pointed by this pointer is
+ * modified if and only if LZMA_MEMLIMIT_ERROR is
+ * returned.
+ * \param flags Bitwise-or of zero or more of the decoder flags:
+ * LZMA_TELL_NO_CHECK, LZMA_TELL_UNSUPPORTED_CHECK,
+ * LZMA_CONCATENATED. Note that LZMA_TELL_ANY_CHECK
+ * is not allowed and will return LZMA_PROG_ERROR.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_pos The next byte will be read from in[*in_pos].
+ * *in_pos is updated only if decoding succeeds.
+ * \param in_size Size of the input buffer; the first byte that
+ * won't be read is in[in_size].
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Decoding was successful.
+ * - LZMA_FORMAT_ERROR
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_DATA_ERROR
+ * - LZMA_NO_CHECK: This can be returned only if using
+ * the LZMA_TELL_NO_CHECK flag.
+ * - LZMA_UNSUPPORTED_CHECK: This can be returned only if using
+ * the LZMA_TELL_UNSUPPORTED_CHECK flag.
+ * - LZMA_MEM_ERROR
+ * - LZMA_MEMLIMIT_ERROR: Memory usage limit was reached.
+ * The minimum required memlimit value was stored to *memlimit.
+ * - LZMA_BUF_ERROR: Output buffer was too small.
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_stream_buffer_decode(
+ uint64_t *memlimit, uint32_t flags, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/delta.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/delta.h
new file mode 100644
index 00000000..91c88d0a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/delta.h
@@ -0,0 +1,79 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/delta.h
+ * \brief Delta filter
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Filter ID
+ *
+ * Filter ID of the Delta filter. This is used as lzma_filter.id.
+ */
+#define LZMA_FILTER_DELTA LZMA_VLI_C(0x03)
+
+
+/**
+ * \brief Type of the delta calculation
+ *
+ * Currently only byte-wise delta is supported. Other possible types could
+ * be, for example, delta of 16/32/64-bit little/big endian integers, but
+ * these are not currently planned since byte-wise delta is almost as good.
+ */
+typedef enum {
+ LZMA_DELTA_TYPE_BYTE
+} lzma_delta_type;
+
+
+/**
+ * \brief Options for the Delta filter
+ *
+ * These options are needed by both encoder and decoder.
+ */
+typedef struct {
+ /** For now, this must always be LZMA_DELTA_TYPE_BYTE. */
+ lzma_delta_type type;
+
+ /**
+ * \brief Delta distance
+ *
+ * With the only currently supported type, LZMA_DELTA_TYPE_BYTE,
+ * the distance is as bytes.
+ *
+ * Examples:
+ * - 16-bit stereo audio: distance = 4 bytes
+ * - 24-bit RGB image data: distance = 3 bytes
+ */
+ uint32_t dist;
+# define LZMA_DELTA_DIST_MIN 1
+# define LZMA_DELTA_DIST_MAX 256
+
+ /*
+ * Reserved space to allow possible future extensions without
+ * breaking the ABI. You should not touch these, because the names
+ * of these variables may change. These are and will never be used
+ * when type is LZMA_DELTA_TYPE_BYTE, so it is safe to leave these
+ * uninitialized.
+ */
+ uint32_t reserved_int1;
+ uint32_t reserved_int2;
+ uint32_t reserved_int3;
+ uint32_t reserved_int4;
+ void *reserved_ptr1;
+ void *reserved_ptr2;
+
+} lzma_options_delta;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/filter.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/filter.h
new file mode 100644
index 00000000..4c27f171
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/filter.h
@@ -0,0 +1,362 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/filter.h
+ * \brief Common filter related types
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Maximum number of filters in a chain
+ *
+ * A filter chain can have 1-4 filters, of which three are allowed to change
+ * the size of the data. Usually only one or two filters are needed.
+ */
+#define LZMA_FILTERS_MAX 4
+
+
+/**
+ * \brief Filter options
+ *
+ * This structure is used to pass Filter ID and a pointer filter's
+ * options to liblzma. A few functions work with a single lzma_filter
+ * structure, while most functions expect a filter chain.
+ *
+ * A filter chain is indicated with an array of lzma_filter structures.
+ * The array is terminated with .id = LZMA_VLI_UNKNOWN. Thus, the filter
+ * array must have LZMA_FILTERS_MAX + 1 elements (that is, five) to
+ * be able to hold any arbitrary filter chain. This is important when
+ * using lzma_block_header_decode() from block.h, because too small
+ * array would make liblzma write past the end of the filters array.
+ */
+typedef struct {
+ /**
+ * \brief Filter ID
+ *
+ * Use constants whose name begin with `LZMA_FILTER_' to specify
+ * different filters. In an array of lzma_filter structures, use
+ * LZMA_VLI_UNKNOWN to indicate end of filters.
+ *
+ * \note This is not an enum, because on some systems enums
+ * cannot be 64-bit.
+ */
+ lzma_vli id;
+
+ /**
+ * \brief Pointer to filter-specific options structure
+ *
+ * If the filter doesn't need options, set this to NULL. If id is
+ * set to LZMA_VLI_UNKNOWN, options is ignored, and thus
+ * doesn't need be initialized.
+ *
+ * Some filters support changing the options in the middle of
+ * the encoding process. These filters store the pointer of the
+ * options structure and communicate with the application via
+ * modifications of the options structure.
+ */
+ void *options;
+
+} lzma_filter;
+
+
+/**
+ * \brief Test if the given Filter ID is supported for encoding
+ *
+ * Return true if the give Filter ID is supported for encoding by this
+ * liblzma build. Otherwise false is returned.
+ *
+ * There is no way to list which filters are available in this particular
+ * liblzma version and build. It would be useless, because the application
+ * couldn't know what kind of options the filter would need.
+ */
+extern LZMA_API(lzma_bool) lzma_filter_encoder_is_supported(lzma_vli id)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Test if the given Filter ID is supported for decoding
+ *
+ * Return true if the give Filter ID is supported for decoding by this
+ * liblzma build. Otherwise false is returned.
+ */
+extern LZMA_API(lzma_bool) lzma_filter_decoder_is_supported(lzma_vli id)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Calculate rough memory requirements for raw encoder
+ *
+ * Because the calculation is rough, this function can be used to calculate
+ * the memory requirements for Block and Stream encoders too.
+ *
+ * \param filters Array of filters terminated with
+ * .id == LZMA_VLI_UNKNOWN.
+ *
+ * \return Rough number of bytes of memory required for the given
+ * filter chain when encoding.
+ */
+extern LZMA_API(uint64_t) lzma_raw_encoder_memusage(const lzma_filter *filters)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Calculate rough memory requirements for raw decoder
+ *
+ * Because the calculation is rough, this function can be used to calculate
+ * the memory requirements for Block and Stream decoders too.
+ *
+ * \param filters Array of filters terminated with
+ * .id == LZMA_VLI_UNKNOWN.
+ *
+ * \return Rough number of bytes of memory required for the given
+ * filter chain when decoding.
+ */
+extern LZMA_API(uint64_t) lzma_raw_decoder_memusage(const lzma_filter *filters)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Initialize raw encoder
+ *
+ * This function may be useful when implementing custom file formats.
+ *
+ * \param strm Pointer to properly prepared lzma_stream
+ * \param filters Array of lzma_filter structures. The end of the
+ * array must be marked with .id = LZMA_VLI_UNKNOWN.
+ *
+ * The `action' with lzma_code() can be LZMA_RUN, LZMA_SYNC_FLUSH (if the
+ * filter chain supports it), or LZMA_FINISH.
+ *
+ * \return - LZMA_OK
+ * - LZMA_MEM_ERROR
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_raw_encoder(
+ lzma_stream *strm, const lzma_filter *filters)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Initialize raw decoder
+ *
+ * The initialization of raw decoder goes similarly to raw encoder.
+ *
+ * The `action' with lzma_code() can be LZMA_RUN or LZMA_FINISH. Using
+ * LZMA_FINISH is not required, it is supported just for convenience.
+ *
+ * \return - LZMA_OK
+ * - LZMA_MEM_ERROR
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_raw_decoder(
+ lzma_stream *strm, const lzma_filter *filters)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Single-call raw encoder
+ *
+ * \param filters Array of lzma_filter structures. The end of the
+ * array must be marked with .id = LZMA_VLI_UNKNOWN.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_size Size of the input buffer
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_BUF_ERROR: Not enough output buffer space.
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_MEM_ERROR
+ * - LZMA_DATA_ERROR
+ * - LZMA_PROG_ERROR
+ *
+ * \note There is no function to calculate how big output buffer
+ * would surely be big enough. (lzma_stream_buffer_bound()
+ * works only for lzma_stream_buffer_encode().)
+ */
+extern LZMA_API(lzma_ret) lzma_raw_buffer_encode(
+ const lzma_filter *filters, lzma_allocator *allocator,
+ const uint8_t *in, size_t in_size, uint8_t *out,
+ size_t *out_pos, size_t out_size) lzma_nothrow;
+
+
+/**
+ * \brief Single-call raw decoder
+ *
+ * \param filters Array of lzma_filter structures. The end of the
+ * array must be marked with .id = LZMA_VLI_UNKNOWN.
+ * \param allocator lzma_allocator for custom allocator functions.
+ * Set to NULL to use malloc() and free().
+ * \param in Beginning of the input buffer
+ * \param in_pos The next byte will be read from in[*in_pos].
+ * *in_pos is updated only if decoding succeeds.
+ * \param in_size Size of the input buffer; the first byte that
+ * won't be read is in[in_size].
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ */
+extern LZMA_API(lzma_ret) lzma_raw_buffer_decode(
+ const lzma_filter *filters, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size) lzma_nothrow;
+
+
+/**
+ * \brief Get the size of the Filter Properties field
+ *
+ * This function may be useful when implementing custom file formats
+ * using the raw encoder and decoder.
+ *
+ * \param size Pointer to uint32_t to hold the size of the properties
+ * \param filter Filter ID and options (the size of the propeties may
+ * vary depending on the options)
+ *
+ * \return - LZMA_OK
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_PROG_ERROR
+ *
+ * \note This function validates the Filter ID, but does not
+ * necessarily validate the options. Thus, it is possible
+ * that this returns LZMA_OK while the following call to
+ * lzma_properties_encode() returns LZMA_OPTIONS_ERROR.
+ */
+extern LZMA_API(lzma_ret) lzma_properties_size(
+ uint32_t *size, const lzma_filter *filter) lzma_nothrow;
+
+
+/**
+ * \brief Encode the Filter Properties field
+ *
+ * \param filter Filter ID and options
+ * \param props Buffer to hold the encoded options. The size of
+ * buffer must have been already determined with
+ * lzma_properties_size().
+ *
+ * \return - LZMA_OK
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_PROG_ERROR
+ *
+ * \note Even this function won't validate more options than actually
+ * necessary. Thus, it is possible that encoding the properties
+ * succeeds but using the same options to initialize the encoder
+ * will fail.
+ *
+ * \note It is OK to skip calling this function if
+ * lzma_properties_size() indicated that the size
+ * of the Filter Properties field is zero.
+ */
+extern LZMA_API(lzma_ret) lzma_properties_encode(
+ const lzma_filter *filter, uint8_t *props) lzma_nothrow;
+
+
+/**
+ * \brief Decode the Filter Properties field
+ *
+ * \param filter filter->id must have been set to the correct
+ * Filter ID. filter->options doesn't need to be
+ * initialized (it's not freed by this function). The
+ * decoded options will be stored to filter->options.
+ * filter->options is set to NULL if there are no
+ * properties or if an error occurs.
+ * \param allocator Custom memory allocator used to allocate the
+ * options. Set to NULL to use the default malloc(),
+ * and in case of an error, also free().
+ * \param props Input buffer containing the properties.
+ * \param props_size Size of the properties. This must be the exact
+ * size; giving too much or too little input will
+ * return LZMA_OPTIONS_ERROR.
+ *
+ * \return - LZMA_OK
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_MEM_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_properties_decode(
+ lzma_filter *filter, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size) lzma_nothrow;
+
+
+/**
+ * \brief Calculate encoded size of a Filter Flags field
+ *
+ * Knowing the size of Filter Flags is useful to know when allocating
+ * memory to hold the encoded Filter Flags.
+ *
+ * \param size Pointer to integer to hold the calculated size
+ * \param filters Filter ID and associated options whose encoded
+ * size is to be calculted
+ *
+ * \return - LZMA_OK: *size set successfully. Note that this doesn't
+ * guarantee that filters->options is valid, thus
+ * lzma_filter_flags_encode() may still fail.
+ * - LZMA_OPTIONS_ERROR: Unknown Filter ID or unsupported options.
+ * - LZMA_PROG_ERROR: Invalid options
+ *
+ * \note If you need to calculate size of List of Filter Flags,
+ * you need to loop over every lzma_filter entry.
+ */
+extern LZMA_API(lzma_ret) lzma_filter_flags_size(
+ uint32_t *size, const lzma_filter *filters)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Encode Filter Flags into given buffer
+ *
+ * In contrast to some functions, this doesn't allocate the needed buffer.
+ * This is due to how this function is used internally by liblzma.
+ *
+ * \param filters Filter ID and options to be encoded
+ * \param out Beginning of the output buffer
+ * \param out_pos out[*out_pos] is the next write position. This
+ * is updated by the encoder.
+ * \param out_size out[out_size] is the first byte to not write.
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_OPTIONS_ERROR: Invalid or unsupported options.
+ * - LZMA_PROG_ERROR: Invalid options or not enough output
+ * buffer space (you should have checked it with
+ * lzma_filter_flags_size()).
+ */
+extern LZMA_API(lzma_ret) lzma_filter_flags_encode(const lzma_filter *filters,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Decode Filter Flags from given buffer
+ *
+ * The decoded result is stored into *filters. filters->options is
+ * initialized but the old value is NOT free()d.
+ *
+ * \return - LZMA_OK
+ * - LZMA_OPTIONS_ERROR
+ * - LZMA_MEM_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_filter_flags_decode(
+ lzma_filter *filters, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index.h
new file mode 100644
index 00000000..c2733d52
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index.h
@@ -0,0 +1,405 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/index.h
+ * \brief Handling of .xz Index lists
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Opaque data type to hold the Index
+ */
+typedef struct lzma_index_s lzma_index;
+
+
+/**
+ * \brief Index Record and its location
+ */
+typedef struct {
+ /**
+ * \brief Total encoded size of a Block including Block Padding
+ *
+ * This value is useful if you need to know the actual size of the
+ * Block that the Block decoder will read.
+ */
+ lzma_vli total_size;
+
+ /**
+ * \brief Encoded size of a Block excluding Block Padding
+ *
+ * This value is stored in the Index. When doing random-access
+ * reading, you should give this value to the Block decoder along
+ * with uncompressed_size.
+ */
+ lzma_vli unpadded_size;
+
+ /**
+ * \brief Uncompressed Size of a Block
+ */
+ lzma_vli uncompressed_size;
+
+ /**
+ * \brief Compressed offset in the Stream(s)
+ *
+ * This is the offset of the first byte of the Block, that is,
+ * where you need to seek to decode the Block. The offset
+ * is relative to the beginning of the Stream, or if there are
+ * multiple Indexes combined, relative to the beginning of the
+ * first Stream.
+ */
+ lzma_vli stream_offset;
+
+ /**
+ * \brief Uncompressed offset
+ *
+ * When doing random-access reading, it is possible that the target
+ * offset is not exactly at Block boundary. One will need to compare
+ * the target offset against uncompressed_offset, and possibly decode
+ * and throw away some amount of data before reaching the target
+ * offset.
+ */
+ lzma_vli uncompressed_offset;
+
+} lzma_index_record;
+
+
+/**
+ * \brief Calculate memory usage for Index with given number of Records
+ *
+ * On disk, the size of the Index field depends on both the number of Records
+ * stored and how big values the Records store (due to variable-length integer
+ * encoding). When the Index is kept in lzma_index structure, the memory usage
+ * depends only on the number of Records stored in the Index. The size in RAM
+ * is almost always a lot bigger than in encoded form on disk.
+ *
+ * This function calculates an approximate amount of memory needed hold the
+ * given number of Records in lzma_index structure. This value may vary
+ * between liblzma versions if the internal implementation is modified.
+ *
+ * If you want to know how much memory an existing lzma_index structure is
+ * using, use lzma_index_memusage(lzma_index_count(i)).
+ */
+extern LZMA_API(uint64_t) lzma_index_memusage(lzma_vli record_count)
+ lzma_nothrow;
+
+
+/**
+ * \brief Allocate and initialize a new lzma_index structure
+ *
+ * If i is NULL, a new lzma_index structure is allocated, initialized,
+ * and a pointer to it returned. If allocation fails, NULL is returned.
+ *
+ * If i is non-NULL, it is reinitialized and the same pointer returned.
+ * In this case, return value cannot be NULL or a different pointer than
+ * the i that was given as an argument.
+ */
+extern LZMA_API(lzma_index *) lzma_index_init(
+ lzma_index *i, lzma_allocator *allocator) lzma_nothrow;
+
+
+/**
+ * \brief Deallocate the Index
+ *
+ * If i is NULL, this does nothing.
+ */
+extern LZMA_API(void) lzma_index_end(lzma_index *i, lzma_allocator *allocator)
+ lzma_nothrow;
+
+
+/**
+ * \brief Add a new Record to an Index
+ *
+ * \param i Pointer to a lzma_index structure
+ * \param allocator Pointer to lzma_allocator, or NULL to
+ * use malloc()
+ * \param unpadded_size Unpadded Size of a Block. This can be
+ * calculated with lzma_block_unpadded_size()
+ * after encoding or decoding the Block.
+ * \param uncompressed_size Uncompressed Size of a Block. This can be
+ * taken directly from lzma_block structure
+ * after encoding or decoding the Block.
+ *
+ * Appending a new Record does not affect the read position.
+ *
+ * \return - LZMA_OK
+ * - LZMA_MEM_ERROR
+ * - LZMA_DATA_ERROR: Compressed or uncompressed size of the
+ * Stream or size of the Index field would grow too big.
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_index_append(
+ lzma_index *i, lzma_allocator *allocator,
+ lzma_vli unpadded_size, lzma_vli uncompressed_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Get the number of Records
+ */
+extern LZMA_API(lzma_vli) lzma_index_count(const lzma_index *i)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the size of the Index field as bytes
+ *
+ * This is needed to verify the Backward Size field in the Stream Footer.
+ */
+extern LZMA_API(lzma_vli) lzma_index_size(const lzma_index *i)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the total size of the Blocks
+ *
+ * This doesn't include the Stream Header, Stream Footer, Stream Padding,
+ * or Index fields.
+ */
+extern LZMA_API(lzma_vli) lzma_index_total_size(const lzma_index *i)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the total size of the Stream
+ *
+ * If multiple Indexes have been combined, this works as if the Blocks
+ * were in a single Stream.
+ */
+extern LZMA_API(lzma_vli) lzma_index_stream_size(const lzma_index *i)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the total size of the file
+ *
+ * When no Indexes have been combined with lzma_index_cat(), this function is
+ * identical to lzma_index_stream_size(). If multiple Indexes have been
+ * combined, this includes also the headers of each separate Stream and the
+ * possible Stream Padding fields.
+ */
+extern LZMA_API(lzma_vli) lzma_index_file_size(const lzma_index *i)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the uncompressed size of the Stream
+ */
+extern LZMA_API(lzma_vli) lzma_index_uncompressed_size(const lzma_index *i)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Get the next Record from the Index
+ */
+extern LZMA_API(lzma_bool) lzma_index_read(
+ lzma_index *i, lzma_index_record *record)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Rewind the Index
+ *
+ * Rewind the Index so that next call to lzma_index_read() will return the
+ * first Record.
+ */
+extern LZMA_API(void) lzma_index_rewind(lzma_index *i) lzma_nothrow;
+
+
+/**
+ * \brief Locate a Record
+ *
+ * When the Index is available, it is possible to do random-access reading
+ * with granularity of Block size.
+ *
+ * \param i Pointer to lzma_index structure
+ * \param record Pointer to a structure to hold the search results
+ * \param target Uncompressed target offset which the caller would
+ * like to locate from the Stream
+ *
+ * If the target is smaller than the uncompressed size of the Stream (can be
+ * checked with lzma_index_uncompressed_size()):
+ * - Information about the Record containing the requested uncompressed
+ * offset is stored into *record.
+ * - Read offset will be adjusted so that calling lzma_index_read() can be
+ * used to read subsequent Records.
+ * - This function returns false.
+ *
+ * If target is greater than the uncompressed size of the Stream, *record
+ * and the read position are not modified, and this function returns true.
+ */
+extern LZMA_API(lzma_bool) lzma_index_locate(
+ lzma_index *i, lzma_index_record *record, lzma_vli target)
+ lzma_nothrow;
+
+
+/**
+ * \brief Concatenate Indexes of two Streams
+ *
+ * Concatenating Indexes is useful when doing random-access reading in
+ * multi-Stream .xz file, or when combining multiple Streams into single
+ * Stream.
+ *
+ * \param dest Destination Index after which src is appended
+ * \param src Source Index. If this function succeeds, the
+ * memory allocated for src is freed or moved to
+ * be part of dest.
+ * \param allocator Custom memory allocator; can be NULL to use
+ * malloc() and free().
+ * \param padding Size of the Stream Padding field between Streams.
+ * This must be a multiple of four.
+ *
+ * \return - LZMA_OK: Indexes concatenated successfully. src is now
+ * a dangling pointer.
+ * - LZMA_DATA_ERROR: *dest would grow too big.
+ * - LZMA_MEM_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_index_cat(lzma_index *lzma_restrict dest,
+ lzma_index *lzma_restrict src,
+ lzma_allocator *allocator, lzma_vli padding)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Duplicate an Index list
+ *
+ * Makes an identical copy of the Index. Also the read position is copied.
+ *
+ * \return A copy of the Index, or NULL if memory allocation failed.
+ */
+extern LZMA_API(lzma_index *) lzma_index_dup(
+ const lzma_index *i, lzma_allocator *allocator)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Compare if two Index lists are identical
+ *
+ * Read positions are not compared.
+ *
+ * \return True if *a and *b are equal, false otherwise.
+ */
+extern LZMA_API(lzma_bool) lzma_index_equal(
+ const lzma_index *a, const lzma_index *b)
+ lzma_nothrow lzma_attr_pure;
+
+
+/**
+ * \brief Initialize .xz Index encoder
+ *
+ * \param strm Pointer to properly prepared lzma_stream
+ * \param i Pointer to lzma_index which should be encoded.
+ * The read position will be at the end of the Index
+ * after lzma_code() has returned LZMA_STREAM_END.
+ *
+ * The only valid action value for lzma_code() is LZMA_RUN.
+ *
+ * \return - LZMA_OK: Initialization succeeded, continue with lzma_code().
+ * - LZMA_MEM_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_index_encoder(lzma_stream *strm, lzma_index *i)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Initialize .xz Index decoder
+ *
+ * \param strm Pointer to properly prepared lzma_stream
+ * \param i Pointer to a pointer that will be made to point
+ * to the final decoded Index once lzma_code() has
+ * returned LZMA_STREAM_END. That is,
+ * lzma_index_decoder() always takes care of
+ * allocating a new lzma_index structure, and *i
+ * doesn't need to be initialized by the caller.
+ * \param memlimit How much memory the resulting Index is allowed
+ * to require.
+ *
+ * The only valid action value for lzma_code() is LZMA_RUN.
+ *
+ * \return - LZMA_OK: Initialization succeeded, continue with lzma_code().
+ * - LZMA_MEM_ERROR
+ * - LZMA_MEMLIMIT_ERROR
+ * - LZMA_PROG_ERROR
+ *
+ * \note The memory usage limit is checked early in the decoding
+ * (within the first dozen input bytes or so). The actual memory
+ * is allocated later in smaller pieces. If the memory usage
+ * limit is modified with lzma_memlimit_set() after a part
+ * of the Index has already been decoded, the new limit may
+ * get ignored.
+ */
+extern LZMA_API(lzma_ret) lzma_index_decoder(
+ lzma_stream *strm, lzma_index **i, uint64_t memlimit)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Single-call .xz Index encoder
+ *
+ * \param i Index to be encoded. The read position will be at
+ * the end of the Index if encoding succeeds, or at
+ * unspecified position in case an error occurs.
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * *out_pos is updated only if encoding succeeds.
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_BUF_ERROR: Output buffer is too small. Use
+ * lzma_index_size() to find out how much output
+ * space is needed.
+ * - LZMA_PROG_ERROR
+ *
+ * \note This function doesn't take allocator argument since all
+ * the internal data is allocated on stack.
+ */
+extern LZMA_API(lzma_ret) lzma_index_buffer_encode(lzma_index *i,
+ uint8_t *out, size_t *out_pos, size_t out_size) lzma_nothrow;
+
+
+/**
+ * \brief Single-call .xz Index decoder
+ *
+ * \param i Pointer to a pointer that will be made to point
+ * to the final decoded Index if decoding is
+ * successful. That is, lzma_index_buffer_decode()
+ * always takes care of allocating a new
+ * lzma_index structure, and *i doesn't need to be
+ * initialized by the caller.
+ * \param memlimit Pointer to how much memory the resulting Index
+ * is allowed to require. The value pointed by
+ * this pointer is modified if and only if
+ * LZMA_MEMLIMIT_ERROR is returned.
+ * \param allocator Pointer to lzma_allocator, or NULL to use malloc()
+ * \param in Beginning of the input buffer
+ * \param in_pos The next byte will be read from in[*in_pos].
+ * *in_pos is updated only if decoding succeeds.
+ * \param in_size Size of the input buffer; the first byte that
+ * won't be read is in[in_size].
+ *
+ * \return - LZMA_OK: Decoding was successful.
+ * - LZMA_MEM_ERROR
+ * - LZMA_MEMLIMIT_ERROR: Memory usage limit was reached.
+ * The minimum required memlimit value was stored to *memlimit.
+ * - LZMA_DATA_ERROR
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_index_buffer_decode(lzma_index **i,
+ uint64_t *memlimit, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size)
+ lzma_nothrow;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index_hash.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index_hash.h
new file mode 100644
index 00000000..dd428f79
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/index_hash.h
@@ -0,0 +1,109 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/index_hash.h
+ * \brief Validates Index by using a hash function
+ *
+ * Hashing makes it possible to use constant amount of memory to validate
+ * Index of arbitrary size.
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+/**
+ * \brief Opaque data type to hold the Index hash
+ */
+typedef struct lzma_index_hash_s lzma_index_hash;
+
+
+/**
+ * \brief Allocate and initialize a new lzma_index_hash structure
+ *
+ * If index_hash is NULL, a new lzma_index_hash structure is allocated,
+ * initialized, and a pointer to it returned. If allocation fails, NULL
+ * is returned.
+ *
+ * If index_hash is non-NULL, it is reinitialized and the same pointer
+ * returned. In this case, return value cannot be NULL or a different
+ * pointer than the index_hash that was given as an argument.
+ */
+extern LZMA_API(lzma_index_hash *) lzma_index_hash_init(
+ lzma_index_hash *index_hash, lzma_allocator *allocator)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Deallocate lzma_index_hash structure
+ */
+extern LZMA_API(void) lzma_index_hash_end(
+ lzma_index_hash *index_hash, lzma_allocator *allocator)
+ lzma_nothrow;
+
+
+/**
+ * \brief Add a new Record to an Index hash
+ *
+ * \param index Pointer to a lzma_index_hash structure
+ * \param unpadded_size Unpadded Size of a Block
+ * \param uncompressed_size Uncompressed Size of a Block
+ *
+ * \return - LZMA_OK
+ * - LZMA_DATA_ERROR: Compressed or uncompressed size of the
+ * Stream or size of the Index field would grow too big.
+ * - LZMA_PROG_ERROR: Invalid arguments or this function is being
+ * used when lzma_index_hash_decode() has already been used.
+ */
+extern LZMA_API(lzma_ret) lzma_index_hash_append(lzma_index_hash *index_hash,
+ lzma_vli unpadded_size, lzma_vli uncompressed_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Decode and validate the Index field
+ *
+ * After telling the sizes of all Blocks with lzma_index_hash_append(),
+ * the actual Index field is decoded with this function. Specifically,
+ * once decoding of the Index field has been started, no more Records
+ * can be added using lzma_index_hash_append().
+ *
+ * This function doesn't use lzma_stream structure to pass the input data.
+ * Instead, the input buffer is specified using three arguments. This is
+ * because it matches better the internal APIs of liblzma.
+ *
+ * \param index_hash Pointer to a lzma_index_hash structure
+ * \param in Pointer to the beginning of the input buffer
+ * \param in_pos in[*in_pos] is the next byte to process
+ * \param in_size in[in_size] is the first byte not to process
+ *
+ * \return - LZMA_OK: So far good, but more input is needed.
+ * - LZMA_STREAM_END: Index decoded successfully and it matches
+ * the Records given with lzma_index_hash_append().
+ * - LZMA_DATA_ERROR: Index is corrupt or doesn't match the
+ * information given with lzma_index_hash_append().
+ * - LZMA_BUF_ERROR: Cannot progress because *in_pos >= in_size.
+ * - LZMA_PROG_ERROR
+ */
+extern LZMA_API(lzma_ret) lzma_index_hash_decode(lzma_index_hash *index_hash,
+ const uint8_t *in, size_t *in_pos, size_t in_size)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Get the size of the Index field as bytes
+ *
+ * This is needed to verify the Backward Size field in the Stream Footer.
+ */
+extern LZMA_API(lzma_vli) lzma_index_hash_size(
+ const lzma_index_hash *index_hash)
+ lzma_nothrow lzma_attr_pure;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/lzma.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/lzma.h
new file mode 100644
index 00000000..5f7be0c8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/lzma.h
@@ -0,0 +1,412 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/lzma.h
+ * \brief LZMA1 and LZMA2 filters
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief LZMA1 Filter ID
+ *
+ * LZMA1 is the very same thing as what was called just LZMA in LZMA Utils,
+ * 7-Zip, and LZMA SDK. It's called LZMA1 here to prevent developers from
+ * accidentally using LZMA when they actually want LZMA2.
+ *
+ * LZMA1 shouldn't be used for new applications unless you _really_ know
+ * what you are doing. LZMA2 is almost always a better choice.
+ */
+#define LZMA_FILTER_LZMA1 LZMA_VLI_C(0x4000000000000001)
+
+/**
+ * \brief LZMA2 Filter ID
+ *
+ * Usually you want this instead of LZMA1. Compared to LZMA1, LZMA2 adds
+ * support for LZMA_SYNC_FLUSH, uncompressed chunks (smaller expansion
+ * when trying to compress uncompressible data), possibility to change
+ * lc/lp/pb in the middle of encoding, and some other internal improvements.
+ */
+#define LZMA_FILTER_LZMA2 LZMA_VLI_C(0x21)
+
+
+/**
+ * \brief Match finders
+ *
+ * Match finder has major effect on both speed and compression ratio.
+ * Usually hash chains are faster than binary trees.
+ *
+ * The memory usage formulas are only rough estimates, which are closest to
+ * reality when dict_size is a power of two. The formulas are more complex
+ * in reality, and can also change a little between liblzma versions. Use
+ * lzma_memusage_encoder() to get more accurate estimate of memory usage.
+ */
+typedef enum {
+ LZMA_MF_HC3 = 0x03,
+ /**<
+ * \brief Hash Chain with 2- and 3-byte hashing
+ *
+ * Minimum nice_len: 3
+ *
+ * Memory usage:
+ * - dict_size <= 16 MiB: dict_size * 7.5
+ * - dict_size > 16 MiB: dict_size * 5.5 + 64 MiB
+ */
+
+ LZMA_MF_HC4 = 0x04,
+ /**<
+ * \brief Hash Chain with 2-, 3-, and 4-byte hashing
+ *
+ * Minimum nice_len: 4
+ *
+ * Memory usage: dict_size * 7.5
+ */
+
+ LZMA_MF_BT2 = 0x12,
+ /**<
+ * \brief Binary Tree with 2-byte hashing
+ *
+ * Minimum nice_len: 2
+ *
+ * Memory usage: dict_size * 9.5
+ */
+
+ LZMA_MF_BT3 = 0x13,
+ /**<
+ * \brief Binary Tree with 2- and 3-byte hashing
+ *
+ * Minimum nice_len: 3
+ *
+ * Memory usage:
+ * - dict_size <= 16 MiB: dict_size * 11.5
+ * - dict_size > 16 MiB: dict_size * 9.5 + 64 MiB
+ */
+
+ LZMA_MF_BT4 = 0x14
+ /**<
+ * \brief Binary Tree with 2-, 3-, and 4-byte hashing
+ *
+ * Minimum nice_len: 4
+ *
+ * Memory usage: dict_size * 11.5
+ */
+} lzma_match_finder;
+
+
+/**
+ * \brief Test if given match finder is supported
+ *
+ * Return true if the given match finder is supported by this liblzma build.
+ * Otherwise false is returned. It is safe to call this with a value that
+ * isn't listed in lzma_match_finder enumeration; the return value will be
+ * false.
+ *
+ * There is no way to list which match finders are available in this
+ * particular liblzma version and build. It would be useless, because
+ * a new match finder, which the application developer wasn't aware,
+ * could require giving additional options to the encoder that the older
+ * match finders don't need.
+ */
+extern LZMA_API(lzma_bool) lzma_mf_is_supported(lzma_match_finder match_finder)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Compression modes
+ *
+ * This selects the function used to analyze the data produced by the match
+ * finder.
+ */
+typedef enum {
+ LZMA_MODE_FAST = 1,
+ /**<
+ * \brief Fast compression
+ *
+ * Fast mode is usually at its best when combined with
+ * a hash chain match finder.
+ */
+
+ LZMA_MODE_NORMAL = 2
+ /**<
+ * \brief Normal compression
+ *
+ * This is usually notably slower than fast mode. Use this
+ * together with binary tree match finders to expose the
+ * full potential of the LZMA1 or LZMA2 encoder.
+ */
+} lzma_mode;
+
+
+/**
+ * \brief Test if given compression mode is supported
+ *
+ * Return true if the given compression mode is supported by this liblzma
+ * build. Otherwise false is returned. It is safe to call this with a value
+ * that isn't listed in lzma_mode enumeration; the return value will be false.
+ *
+ * There is no way to list which modes are available in this particular
+ * liblzma version and build. It would be useless, because a new compression
+ * mode, which the application developer wasn't aware, could require giving
+ * additional options to the encoder that the older modes don't need.
+ */
+extern LZMA_API(lzma_bool) lzma_mode_is_supported(lzma_mode mode)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Options specific to the LZMA1 and LZMA2 filters
+ *
+ * Since LZMA1 and LZMA2 share most of the code, it's simplest to share
+ * the options structure too. For encoding, all but the reserved variables
+ * need to be initialized unless specifically mentioned otherwise.
+ *
+ * For raw decoding, both LZMA1 and LZMA2 need dict_size, preset_dict, and
+ * preset_dict_size (if preset_dict != NULL). LZMA1 needs also lc, lp, and pb.
+ */
+typedef struct {
+ /**
+ * \brief Dictionary size in bytes
+ *
+ * Dictionary size indicates how many bytes of the recently processed
+ * uncompressed data is kept in memory. One method to reduce size of
+ * the uncompressed data is to store distance-length pairs, which
+ * indicate what data to repeat from the dictionary buffer. Thus,
+ * the bigger the dictionary, the better the compression ratio
+ * usually is.
+ *
+ * Maximum size of the dictionary depends on multiple things:
+ * - Memory usage limit
+ * - Available address space (not a problem on 64-bit systems)
+ * - Selected match finder (encoder only)
+ *
+ * Currently the maximum dictionary size for encoding is 1.5 GiB
+ * (i.e. (UINT32_C(1) << 30) + (UINT32_C(1) << 29)) even on 64-bit
+ * systems for certain match finder implementation reasons. In the
+ * future, there may be match finders that support bigger
+ * dictionaries.
+ *
+ * Decoder already supports dictionaries up to 4 GiB - 1 B (i.e.
+ * UINT32_MAX), so increasing the maximum dictionary size of the
+ * encoder won't cause problems for old decoders.
+ *
+ * Because extremely small dictionaries sizes would have unneeded
+ * overhead in the decoder, the minimum dictionary size is 4096 bytes.
+ *
+ * \note When decoding, too big dictionary does no other harm
+ * than wasting memory.
+ */
+ uint32_t dict_size;
+# define LZMA_DICT_SIZE_MIN UINT32_C(4096)
+# define LZMA_DICT_SIZE_DEFAULT (UINT32_C(1) << 23)
+
+ /**
+ * \brief Pointer to an initial dictionary
+ *
+ * It is possible to initialize the LZ77 history window using
+ * a preset dictionary. It is useful when compressing many
+ * similar, relatively small chunks of data independently from
+ * each other. The preset dictionary should contain typical
+ * strings that occur in the files being compressed. The most
+ * probable strings should be near the end of the preset dictionary.
+ *
+ * This feature should be used only in special situations. For
+ * now, it works correctly only with raw encoding and decoding.
+ * Currently none of the container formats supported by
+ * liblzma allow preset dictionary when decoding, thus if
+ * you create a .xz or .lzma file with preset dictionary, it
+ * cannot be decoded with the regular decoder functions. In the
+ * future, the .xz format will likely get support for preset
+ * dictionary though.
+ */
+ const uint8_t *preset_dict;
+
+ /**
+ * \brief Size of the preset dictionary
+ *
+ * Specifies the size of the preset dictionary. If the size is
+ * bigger than dict_size, only the last dict_size bytes are
+ * processed.
+ *
+ * This variable is read only when preset_dict is not NULL.
+ * If preset_dict is not NULL but preset_dict_size is zero,
+ * no preset dictionary is used (identical to only setting
+ * preset_dict to NULL).
+ */
+ uint32_t preset_dict_size;
+
+ /**
+ * \brief Number of literal context bits
+ *
+ * How many of the highest bits of the previous uncompressed
+ * eight-bit byte (also known as `literal') are taken into
+ * account when predicting the bits of the next literal.
+ *
+ * \todo Example
+ *
+ * There is a limit that applies to literal context bits and literal
+ * position bits together: lc + lp <= 4. Without this limit the
+ * decoding could become very slow, which could have security related
+ * results in some cases like email servers doing virus scanning.
+ * This limit also simplifies the internal implementation in liblzma.
+ *
+ * There may be LZMA1 streams that have lc + lp > 4 (maximum possible
+ * lc would be 8). It is not possible to decode such streams with
+ * liblzma.
+ */
+ uint32_t lc;
+# define LZMA_LCLP_MIN 0
+# define LZMA_LCLP_MAX 4
+# define LZMA_LC_DEFAULT 3
+
+ /**
+ * \brief Number of literal position bits
+ *
+ * How many of the lowest bits of the current position (number
+ * of bytes from the beginning of the uncompressed data) in the
+ * uncompressed data is taken into account when predicting the
+ * bits of the next literal (a single eight-bit byte).
+ *
+ * \todo Example
+ */
+ uint32_t lp;
+# define LZMA_LP_DEFAULT 0
+
+ /**
+ * \brief Number of position bits
+ *
+ * How many of the lowest bits of the current position in the
+ * uncompressed data is taken into account when estimating
+ * probabilities of matches. A match is a sequence of bytes for
+ * which a matching sequence is found from the dictionary and
+ * thus can be stored as distance-length pair.
+ *
+ * Example: If most of the matches occur at byte positions of
+ * 8 * n + 3, that is, 3, 11, 19, ... set pb to 3, because 2**3 == 8.
+ */
+ uint32_t pb;
+# define LZMA_PB_MIN 0
+# define LZMA_PB_MAX 4
+# define LZMA_PB_DEFAULT 2
+
+ /**
+ * \brief Indicate if the options structure is persistent
+ *
+ * If this is true, the application must keep this options structure
+ * available after the LZMA2 encoder has been initialized. With
+ * persistent structure it is possible to change some encoder options
+ * in the middle of the encoding process without resetting the encoder.
+ *
+ * This option is used only by LZMA2. LZMA1 ignores this and it is
+ * safe to not initialize this when encoding with LZMA1.
+ */
+ lzma_bool persistent;
+
+ /** Compression mode */
+ lzma_mode mode;
+
+ /**
+ * \brief Nice length of a match
+ *
+ * This determines how many bytes the encoder compares from the match
+ * candidates when looking for the best match. Once a match of at
+ * least nice_len bytes long is found, the encoder stops looking for
+ * better condidates and encodes the match. (Naturally, if the found
+ * match is actually longer than nice_len, the actual length is
+ * encoded; it's not truncated to nice_len.)
+ *
+ * Bigger values usually increase the compression ratio and
+ * compression time. For most files, 32 to 128 is a good value,
+ * which gives very good compression ratio at good speed.
+ *
+ * The exact minimum value depends on the match finder. The maximum
+ * is 273, which is the maximum length of a match that LZMA1 and
+ * LZMA2 can encode.
+ */
+ uint32_t nice_len;
+
+ /** Match finder ID */
+ lzma_match_finder mf;
+
+ /**
+ * \brief Maximum search depth in the match finder
+ *
+ * For every input byte, match finder searches through the hash chain
+ * or binary tree in a loop, each iteration going one step deeper in
+ * the chain or tree. The searching stops if
+ * - a match of at least nice_len bytes long is found;
+ * - all match candidates from the hash chain or binary tree have
+ * been checked; or
+ * - maximum search depth is reached.
+ *
+ * Maximum search depth is needed to prevent the match finder from
+ * wasting too much time in case there are lots of short match
+ * candidates. On the other hand, stopping the search before all
+ * candidates have been checked can reduce compression ratio.
+ *
+ * Setting depth to zero tells liblzma to use an automatic default
+ * value, that depends on the selected match finder and nice_len.
+ * The default is in the range [10, 200] or so (it may vary between
+ * liblzma versions).
+ *
+ * Using a bigger depth value than the default can increase
+ * compression ratio in some cases. There is no strict maximum value,
+ * but high values (thousands or millions) should be used with care:
+ * the encoder could remain fast enough with typical input, but
+ * malicious input could cause the match finder to slow down
+ * dramatically, possibly creating a denial of service attack.
+ */
+ uint32_t depth;
+
+ /*
+ * Reserved space to allow possible future extensions without
+ * breaking the ABI. You should not touch these, because the names
+ * of these variables may change. These are and will never be used
+ * with the currently supported options, so it is safe to leave these
+ * uninitialized.
+ */
+ void *reserved_ptr1;
+ void *reserved_ptr2;
+ uint32_t reserved_int1;
+ uint32_t reserved_int2;
+ uint32_t reserved_int3;
+ uint32_t reserved_int4;
+ uint32_t reserved_int5;
+ uint32_t reserved_int6;
+ uint32_t reserved_int7;
+ uint32_t reserved_int8;
+ lzma_reserved_enum reserved_enum1;
+ lzma_reserved_enum reserved_enum2;
+ lzma_reserved_enum reserved_enum3;
+ lzma_reserved_enum reserved_enum4;
+
+} lzma_options_lzma;
+
+
+/**
+ * \brief Set a compression preset to lzma_options_lzma structure
+ *
+ * 0 is the fastest and 9 is the slowest. These match the switches -0 .. -9
+ * of the xz command line tool. In addition, it is possible to bitwise-or
+ * flags to the preset. Currently only LZMA_PRESET_EXTREME is supported.
+ * The flags are defined in container.h, because the flags are used also
+ * with lzma_easy_encoder().
+ *
+ * The preset values are subject to changes between liblzma versions.
+ *
+ * This function is available only if LZMA1 or LZMA2 encoder has been enabled
+ * when building liblzma.
+ */
+extern LZMA_API(lzma_bool) lzma_lzma_preset(
+ lzma_options_lzma *options, uint32_t preset) lzma_nothrow;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/stream_flags.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/stream_flags.h
new file mode 100644
index 00000000..beff77ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/stream_flags.h
@@ -0,0 +1,229 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/stream_flags.h
+ * \brief .xz Stream Header and Stream Footer encoder and decoder
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Size of Stream Header and Stream Footer
+ *
+ * Stream Header and Stream Footer have the same size and they are not
+ * going to change even if a newer version of the .xz file format is
+ * developed in future.
+ */
+#define LZMA_STREAM_HEADER_SIZE 12
+
+
+/**
+ * \brief Options for encoding/decoding Stream Header and Stream Footer
+ */
+typedef struct {
+ /**
+ * \brief Stream Flags format version
+ *
+ * To prevent API and ABI breakages if new features are needed in
+ * Stream Header or Stream Footer, a version number is used to
+ * indicate which fields in this structure are in use. For now,
+ * version must always be zero. With non-zero version, the
+ * lzma_stream_header_encode() and lzma_stream_footer_encode()
+ * will return LZMA_OPTIONS_ERROR.
+ *
+ * lzma_stream_header_decode() and lzma_stream_footer_decode()
+ * will always set this to the lowest value that supports all the
+ * features indicated by the Stream Flags field. The application
+ * must check that the version number set by the decoding functions
+ * is supported by the application. Otherwise it is possible that
+ * the application will decode the Stream incorrectly.
+ */
+ uint32_t version;
+
+ /**
+ * \brief Backward Size
+ *
+ * Backward Size must be a multiple of four bytes. In this Stream
+ * format version, Backward Size is the size of the Index field.
+ *
+ * Backward Size isn't actually part of the Stream Flags field, but
+ * it is convenient to include in this structure anyway. Backward
+ * Size is present only in the Stream Footer. There is no need to
+ * initialize backward_size when encoding Stream Header.
+ *
+ * lzma_stream_header_decode() always sets backward_size to
+ * LZMA_VLI_UNKNOWN so that it is convenient to use
+ * lzma_stream_flags_compare() when both Stream Header and Stream
+ * Footer have been decoded.
+ */
+ lzma_vli backward_size;
+# define LZMA_BACKWARD_SIZE_MIN 4
+# define LZMA_BACKWARD_SIZE_MAX (LZMA_VLI_C(1) << 34)
+
+ /**
+ * \brief Check ID
+ *
+ * This indicates the type of the integrity check calculated from
+ * uncompressed data.
+ */
+ lzma_check check;
+
+ /*
+ * Reserved space to allow possible future extensions without
+ * breaking the ABI. You should not touch these, because the
+ * names of these variables may change.
+ *
+ * (We will never be able to use all of these since Stream Flags
+ * is just two bytes plus Backward Size of four bytes. But it's
+ * nice to have the proper types when they are needed.)
+ */
+ lzma_reserved_enum reserved_enum1;
+ lzma_reserved_enum reserved_enum2;
+ lzma_reserved_enum reserved_enum3;
+ lzma_reserved_enum reserved_enum4;
+ lzma_reserved_enum reserved_enum5;
+ lzma_reserved_enum reserved_enum6;
+ lzma_bool reserved_bool1;
+ lzma_bool reserved_bool2;
+ lzma_bool reserved_bool3;
+ lzma_bool reserved_bool4;
+ lzma_bool reserved_bool5;
+ lzma_bool reserved_bool6;
+ lzma_bool reserved_bool7;
+ lzma_bool reserved_bool8;
+ uint32_t reserved_int1;
+ uint32_t reserved_int2;
+ uint32_t reserved_int3;
+ uint32_t reserved_int4;
+
+} lzma_stream_flags;
+
+
+/**
+ * \brief Encode Stream Header
+ *
+ * \param options Stream Header options to be encoded.
+ * options->backward_size is ignored and doesn't
+ * need to be initialized.
+ * \param out Beginning of the output buffer of
+ * LZMA_STREAM_HEADER_SIZE bytes.
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_OPTIONS_ERROR: options->version is not supported by
+ * this liblzma version.
+ * - LZMA_PROG_ERROR: Invalid options.
+ */
+extern LZMA_API(lzma_ret) lzma_stream_header_encode(
+ const lzma_stream_flags *options, uint8_t *out)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Encode Stream Footer
+ *
+ * \param options Stream Footer options to be encoded.
+ * \param out Beginning of the output buffer of
+ * LZMA_STREAM_HEADER_SIZE bytes.
+ *
+ * \return - LZMA_OK: Encoding was successful.
+ * - LZMA_OPTIONS_ERROR: options->version is not supported by
+ * this liblzma version.
+ * - LZMA_PROG_ERROR: Invalid options.
+ */
+extern LZMA_API(lzma_ret) lzma_stream_footer_encode(
+ const lzma_stream_flags *options, uint8_t *out)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Decode Stream Header
+ *
+ * \param options Stream Header options to be encoded.
+ * \param in Beginning of the input buffer of
+ * LZMA_STREAM_HEADER_SIZE bytes.
+ *
+ * options->backward_size is always set to LZMA_VLI_UNKNOWN. This is to
+ * help comparing Stream Flags from Stream Header and Stream Footer with
+ * lzma_stream_flags_compare().
+ *
+ * \return - LZMA_OK: Decoding was successful.
+ * - LZMA_FORMAT_ERROR: Magic bytes don't match, thus the given
+ * buffer cannot be Stream Header.
+ * - LZMA_DATA_ERROR: CRC32 doesn't match, thus the header
+ * is corrupt.
+ * - LZMA_OPTIONS_ERROR: Unsupported options are present
+ * in the header.
+ *
+ * \note When decoding .xz files that contain multiple Streams, it may
+ * make sense to print "file format not recognized" only if
+ * decoding of the Stream Header of the _first_ Stream gives
+ * LZMA_FORMAT_ERROR. If non-first Stream Header gives
+ * LZMA_FORMAT_ERROR, the message used for LZMA_DATA_ERROR is
+ * probably more appropriate.
+ *
+ * For example, Stream decoder in liblzma uses LZMA_DATA_ERROR if
+ * LZMA_FORMAT_ERROR is returned by lzma_stream_header_decode()
+ * when decoding non-first Stream.
+ */
+extern LZMA_API(lzma_ret) lzma_stream_header_decode(
+ lzma_stream_flags *options, const uint8_t *in)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Decode Stream Footer
+ *
+ * \param options Stream Header options to be encoded.
+ * \param in Beginning of the input buffer of
+ * LZMA_STREAM_HEADER_SIZE bytes.
+ *
+ * \return - LZMA_OK: Decoding was successful.
+ * - LZMA_FORMAT_ERROR: Magic bytes don't match, thus the given
+ * buffer cannot be Stream Footer.
+ * - LZMA_DATA_ERROR: CRC32 doesn't match, thus the Stream Footer
+ * is corrupt.
+ * - LZMA_OPTIONS_ERROR: Unsupported options are present
+ * in Stream Footer.
+ *
+ * \note If Stream Header was already decoded successfully, but
+ * decoding Stream Footer returns LZMA_FORMAT_ERROR, the
+ * application should probably report some other error message
+ * than "file format not recognized", since the file more likely
+ * is corrupt (possibly truncated). Stream decoder in liblzma
+ * uses LZMA_DATA_ERROR in this situation.
+ */
+extern LZMA_API(lzma_ret) lzma_stream_footer_decode(
+ lzma_stream_flags *options, const uint8_t *in)
+ lzma_nothrow lzma_attr_warn_unused_result;
+
+
+/**
+ * \brief Compare two lzma_stream_flags structures
+ *
+ * backward_size values are compared only if both are not
+ * LZMA_VLI_UNKNOWN.
+ *
+ * \return - LZMA_OK: Both are equal. If either had backward_size set
+ * to LZMA_VLI_UNKNOWN, backward_size values were not
+ * compared or validated.
+ * - LZMA_DATA_ERROR: The structures differ.
+ * - LZMA_OPTIONS_ERROR: version in either structure is greater
+ * than the maximum supported version (currently zero).
+ * - LZMA_PROG_ERROR: Invalid value, e.g. invalid check or
+ * backward_size.
+ */
+extern LZMA_API(lzma_ret) lzma_stream_flags_compare(
+ const lzma_stream_flags *a, const lzma_stream_flags *b)
+ lzma_nothrow lzma_attr_pure;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/subblock.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/subblock.h
new file mode 100644
index 00000000..2c4eed90
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/subblock.h
@@ -0,0 +1,202 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/subblock.h
+ * \brief Subblock filter
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Filter ID
+ *
+ * Filter ID of the Subblock filter. This is used as lzma_filter.id.
+ */
+#define LZMA_FILTER_SUBBLOCK LZMA_VLI_C(0x01)
+
+
+/**
+ * \brief Subfilter mode
+ *
+ * See lzma_options_subblock.subfilter_mode for details.
+ */
+typedef enum {
+ LZMA_SUBFILTER_NONE,
+ /**<
+ * No Subfilter is in use.
+ */
+
+ LZMA_SUBFILTER_SET,
+ /**<
+ * New Subfilter has been requested to be initialized.
+ */
+
+ LZMA_SUBFILTER_RUN,
+ /**<
+ * Subfilter is active.
+ */
+
+ LZMA_SUBFILTER_FINISH
+ /**<
+ * Subfilter has been requested to be finished.
+ */
+} lzma_subfilter_mode;
+
+
+/**
+ * \brief Options for the Subblock filter
+ *
+ * Specifying options for the Subblock filter is optional: if the pointer
+ * options is NULL, no subfilters are allowed and the default value is used
+ * for subblock_data_size.
+ */
+typedef struct {
+ /* Options for encoder and decoder */
+
+ /**
+ * \brief Allowing subfilters
+ *
+ * If this true, subfilters are allowed.
+ *
+ * In the encoder, if this is set to false, subfilter_mode and
+ * subfilter_options are completely ignored.
+ */
+ lzma_bool allow_subfilters;
+
+ /* Options for encoder only */
+
+ /**
+ * \brief Alignment
+ *
+ * The Subblock filter encapsulates the input data into Subblocks.
+ * Each Subblock has a header which takes a few bytes of space.
+ * When the output of the Subblock encoder is fed to another filter
+ * that takes advantage of the alignment of the input data (e.g. LZMA),
+ * the Subblock filter can add padding to keep the actual data parts
+ * in the Subblocks aligned correctly.
+ *
+ * The alignment should be a positive integer. Subblock filter will
+ * add enough padding between Subblocks so that this is true for
+ * every payload byte:
+ * input_offset % alignment == output_offset % alignment
+ *
+ * The Subblock filter assumes that the first output byte will be
+ * written to a position in the output stream that is properly
+ * aligned. This requirement is automatically met when the start
+ * offset of the Stream or Block is correctly told to Block or
+ * Stream encoder.
+ */
+ uint32_t alignment;
+# define LZMA_SUBBLOCK_ALIGNMENT_MIN 1
+# define LZMA_SUBBLOCK_ALIGNMENT_MAX 32
+# define LZMA_SUBBLOCK_ALIGNMENT_DEFAULT 4
+
+ /**
+ * \brief Size of the Subblock Data part of each Subblock
+ *
+ * This value is re-read every time a new Subblock is started.
+ *
+ * Bigger values
+ * - save a few bytes of space;
+ * - increase latency in the encoder (but no effect for decoding);
+ * - decrease memory locality (increased cache pollution) in the
+ * encoder (no effect in decoding).
+ */
+ uint32_t subblock_data_size;
+# define LZMA_SUBBLOCK_DATA_SIZE_MIN 1
+# define LZMA_SUBBLOCK_DATA_SIZE_MAX (UINT32_C(1) << 28)
+# define LZMA_SUBBLOCK_DATA_SIZE_DEFAULT 4096
+
+ /**
+ * \brief Run-length encoder remote control
+ *
+ * The Subblock filter has an internal run-length encoder (RLE). It
+ * can be useful when the data includes byte sequences that repeat
+ * very many times. The RLE can be used also when a Subfilter is
+ * in use; the RLE will be applied to the output of the Subfilter.
+ *
+ * Note that in contrast to traditional RLE, this RLE is intended to
+ * be used only when there's a lot of data to be repeated. If the
+ * input data has e.g. 500 bytes of NULs now and then, this RLE
+ * is probably useless, because plain LZMA should provide better
+ * results.
+ *
+ * Due to above reasons, it was decided to keep the implementation
+ * of the RLE very simple. When the rle variable is non-zero, it
+ * subblock_data_size must be a multiple of rle. Once the Subblock
+ * encoder has got subblock_data_size bytes of input, it will check
+ * if the whole buffer of the last subblock_data_size can be
+ * represented with repeats of chunks having size of rle bytes.
+ *
+ * If there are consecutive identical buffers of subblock_data_size
+ * bytes, they will be encoded using a single repeat entry if
+ * possible.
+ *
+ * If need arises, more advanced RLE can be implemented later
+ * without breaking API or ABI.
+ */
+ uint32_t rle;
+# define LZMA_SUBBLOCK_RLE_OFF 0
+# define LZMA_SUBBLOCK_RLE_MIN 1
+# define LZMA_SUBBLOCK_RLE_MAX 256
+
+ /**
+ * \brief Subfilter remote control
+ *
+ * When the Subblock filter is initialized, this variable must be
+ * LZMA_SUBFILTER_NONE or LZMA_SUBFILTER_SET.
+ *
+ * When subfilter_mode is LZMA_SUBFILTER_NONE, the application may
+ * put Subfilter options to subfilter_options structure, and then
+ * set subfilter_mode to LZMA_SUBFILTER_SET. No new input data will
+ * be read until the Subfilter has been enabled. Once the Subfilter
+ * has been enabled, liblzma will set subfilter_mode to
+ * LZMA_SUBFILTER_RUN.
+ *
+ * When subfilter_mode is LZMA_SUBFILTER_RUN, the application may
+ * set subfilter_mode to LZMA_SUBFILTER_FINISH. All the input
+ * currently available will be encoded before unsetting the
+ * Subfilter. Application must not change the amount of available
+ * input until the Subfilter has finished. Once the Subfilter has
+ * finished, liblzma will set subfilter_mode to LZMA_SUBFILTER_NONE.
+ *
+ * If the intent is to have Subfilter enabled to the very end of
+ * the data, it is not needed to separately disable Subfilter with
+ * LZMA_SUBFILTER_FINISH. Using LZMA_FINISH as the second argument
+ * of lzma_code() will make the Subblock encoder to disable the
+ * Subfilter once all the data has been ran through the Subfilter.
+ *
+ * After the first call with LZMA_SYNC_FLUSH or LZMA_FINISH, the
+ * application must not change subfilter_mode until LZMA_STREAM_END.
+ * Setting LZMA_SUBFILTER_SET/LZMA_SUBFILTER_FINISH and
+ * LZMA_SYNC_FLUSH/LZMA_FINISH _at the same time_ is fine.
+ *
+ * \note This variable is ignored if allow_subfilters is false.
+ */
+ lzma_subfilter_mode subfilter_mode;
+
+ /**
+ * \brief Subfilter and its options
+ *
+ * When no Subfilter is used, the data is copied as is into Subblocks.
+ * Setting a Subfilter allows encoding some parts of the data with
+ * an additional filter. It is possible to many different Subfilters
+ * in the same Block, although only one can be used at once.
+ *
+ * \note This variable is ignored if allow_subfilters is false.
+ */
+ lzma_filter subfilter_options;
+
+} lzma_options_subblock;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/version.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/version.h
new file mode 100644
index 00000000..7d732596
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/version.h
@@ -0,0 +1,123 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/version.h
+ * \brief Version number
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/*
+ * Version number splitted in components
+ */
+#define LZMA_VERSION_MAJOR 4
+#define LZMA_VERSION_MINOR 999
+#define LZMA_VERSION_PATCH 9
+#define LZMA_VERSION_STABILITY LZMA_VERSION_STABILITY_BETA
+
+#ifndef LZMA_VERSION_COMMIT
+# define LZMA_VERSION_COMMIT ""
+#endif
+
+
+/*
+ * Map symbolic stability levels to integers.
+ */
+#define LZMA_VERSION_STABILITY_ALPHA 0
+#define LZMA_VERSION_STABILITY_BETA 1
+#define LZMA_VERSION_STABILITY_STABLE 2
+
+
+/**
+ * \brief Compile-time version number
+ *
+ * The version number is of format xyyyzzzs where
+ * - x = major
+ * - yyy = minor
+ * - zzz = revision
+ * - s indicates stability: 0 = alpha, 1 = beta, 2 = stable
+ *
+ * The same xyyyzzz triplet is never reused with different stability levels.
+ * For example, if 5.1.0alpha has been released, there will never be 5.1.0beta
+ * or 5.1.0 stable.
+ *
+ * \note The version number of liblzma has nothing to with
+ * the version number of Igor Pavlov's LZMA SDK.
+ */
+#define LZMA_VERSION (LZMA_VERSION_MAJOR * UINT32_C(10000000) \
+ + LZMA_VERSION_MINOR * UINT32_C(10000) \
+ + LZMA_VERSION_PATCH * UINT32_C(10) \
+ + LZMA_VERSION_STABILITY)
+
+
+/*
+ * Macros to construct the compile-time version string
+ */
+#if LZMA_VERSION_STABILITY == LZMA_VERSION_STABILITY_ALPHA
+# define LZMA_VERSION_STABILITY_STRING "alpha"
+#elif LZMA_VERSION_STABILITY == LZMA_VERSION_STABILITY_BETA
+# define LZMA_VERSION_STABILITY_STRING "beta"
+#elif LZMA_VERSION_STABILITY == LZMA_VERSION_STABILITY_STABLE
+# define LZMA_VERSION_STABILITY_STRING ""
+#else
+# error Incorrect LZMA_VERSION_STABILITY
+#endif
+
+#define LZMA_VERSION_STRING_C_(major, minor, patch, stability, commit) \
+ #major "." #minor "." #patch stability commit
+
+#define LZMA_VERSION_STRING_C(major, minor, patch, stability, commit) \
+ LZMA_VERSION_STRING_C_(major, minor, patch, stability, commit)
+
+
+/**
+ * \brief Compile-time version as a string
+ *
+ * This can be for example "4.999.5alpha", "4.999.8beta", or "5.0.0" (stable
+ * versions don't have any "stable" suffix). In future, a snapshot built
+ * from source code repository may include an additional suffix, for example
+ * "4.999.8beta-21-g1d92". The commit ID won't be available in numeric form
+ * in LZMA_VERSION macro.
+ */
+#define LZMA_VERSION_STRING LZMA_VERSION_STRING_C( \
+ LZMA_VERSION_MAJOR, LZMA_VERSION_MINOR, \
+ LZMA_VERSION_PATCH, LZMA_VERSION_STABILITY_STRING, \
+ LZMA_VERSION_COMMIT)
+
+
+/* #ifndef is needed for use with windres (MinGW or Cygwin). */
+#ifndef LZMA_H_INTERNAL_RC
+
+/**
+ * \brief Run-time version number as an integer
+ *
+ * Return the value of LZMA_VERSION macro at the compile time of liblzma.
+ * This allows the application to compare if it was built against the same,
+ * older, or newer version of liblzma that is currently running.
+ */
+extern LZMA_API(uint32_t) lzma_version_number(void)
+ lzma_nothrow lzma_attr_const;
+
+
+/**
+ * \brief Run-time version as a string
+ *
+ * This function may be useful if you want to display which version of
+ * liblzma your application is currently using.
+ */
+extern LZMA_API(const char *) lzma_version_string(void)
+ lzma_nothrow lzma_attr_const;
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/vli.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/vli.h
new file mode 100644
index 00000000..a24e9e55
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/api/lzma/vli.h
@@ -0,0 +1,170 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/**
+ * \file lzma/vli.h
+ * \brief Variable-length integer handling
+ *
+ * In the .xz format, most integers are encoded in a variable-length
+ * representation, which is sometimes called little endian base-128 encoding.
+ * This saves space when smaller values are more likely than bigger values.
+ *
+ * The encoding scheme encodes seven bits to every byte, using minimum
+ * number of bytes required to represent the given value. Encodings that use
+ * non-minimum number of bytes are invalid, thus every integer has exactly
+ * one encoded representation. The maximum number of bits in a VLI is 63,
+ * thus the vli argument must be at maximum of UINT64_MAX / 2. You should
+ * use LZMA_VLI_MAX for clarity.
+ */
+
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * See ../lzma.h for information about liblzma as a whole.
+ */
+
+#ifndef LZMA_H_INTERNAL
+# error Never include this file directly. Use <lzma.h> instead.
+#endif
+
+
+/**
+ * \brief Maximum supported value of variable-length integer
+ */
+#define LZMA_VLI_MAX (UINT64_MAX / 2)
+
+/**
+ * \brief VLI value to denote that the value is unknown
+ */
+#define LZMA_VLI_UNKNOWN UINT64_MAX
+
+/**
+ * \brief Maximum supported length of variable length integers
+ */
+#define LZMA_VLI_BYTES_MAX 9
+
+
+/**
+ * \brief VLI constant suffix
+ */
+#define LZMA_VLI_C(n) UINT64_C(n)
+
+
+/**
+ * \brief Variable-length integer type
+ *
+ * This will always be unsigned integer. Valid VLI values are in the range
+ * [0, LZMA_VLI_MAX]. Unknown value is indicated with LZMA_VLI_UNKNOWN,
+ * which is the maximum value of the underlaying integer type.
+ *
+ * In future, even if lzma_vli is typdefined to something else than uint64_t,
+ * it is guaranteed that 2 * LZMA_VLI_MAX will not overflow lzma_vli.
+ * This simplifies integer overflow detection.
+ */
+typedef uint64_t lzma_vli;
+
+
+/**
+ * \brief Simple macro to validate variable-length integer
+ *
+ * This is useful to test that application has given acceptable values
+ * for example in the uncompressed_size and compressed_size variables.
+ *
+ * \return True if the integer is representable as VLI or if it
+ * indicates unknown value.
+ */
+#define lzma_vli_is_valid(vli) \
+ ((vli) <= LZMA_VLI_MAX || (vli) == LZMA_VLI_UNKNOWN)
+
+
+/**
+ * \brief Encode a variable-length integer
+ *
+ * This function has two modes: single-call and multi-call. Single-call mode
+ * encodes the whole integer at once; it is an error if the output buffer is
+ * too small. Multi-call mode saves the position in *vli_pos, and thus it is
+ * possible to continue encoding if the buffer becomes full before the whole
+ * integer has been encoded.
+ *
+ * \param vli Integer to be encoded
+ * \param vli_pos How many VLI-encoded bytes have already been written
+ * out. When starting to encode a new integer, *vli_pos
+ * must be set to zero. To use single-call encoding,
+ * set vli_pos to NULL.
+ * \param out Beginning of the output buffer
+ * \param out_pos The next byte will be written to out[*out_pos].
+ * \param out_size Size of the out buffer; the first byte into
+ * which no data is written to is out[out_size].
+ *
+ * \return Slightly different return values are used in multi-call and
+ * single-call modes.
+ *
+ * Single-call (vli_pos == NULL):
+ * - LZMA_OK: Integer successfully encoded.
+ * - LZMA_PROG_ERROR: Arguments are not sane. This can be due
+ * to too little output space; single-call mode doesn't use
+ * LZMA_BUF_ERROR, since the application should have checked
+ * the encoded size with lzma_vli_size().
+ *
+ * Multi-call (vli_pos != NULL):
+ * - LZMA_OK: So far all OK, but the integer is not
+ * completely written out yet.
+ * - LZMA_STREAM_END: Integer successfully encoded.
+ * - LZMA_BUF_ERROR: No output space was provided.
+ * - LZMA_PROG_ERROR: Arguments are not sane.
+ */
+extern LZMA_API(lzma_ret) lzma_vli_encode(lzma_vli vli,
+ size_t *vli_pos, uint8_t *lzma_restrict out,
+ size_t *lzma_restrict out_pos, size_t out_size) lzma_nothrow;
+
+
+/**
+ * \brief Decode a variable-length integer
+ *
+ * Like lzma_vli_encode(), this function has single-call and multi-call modes.
+ *
+ * \param vli Pointer to decoded integer. The decoder will
+ * initialize it to zero when *vli_pos == 0, so
+ * application isn't required to initialize *vli.
+ * \param vli_pos How many bytes have already been decoded. When
+ * starting to decode a new integer, *vli_pos must
+ * be initialized to zero. To use single-call decoding,
+ * set this to NULL.
+ * \param in Beginning of the input buffer
+ * \param in_pos The next byte will be read from in[*in_pos].
+ * \param in_size Size of the input buffer; the first byte that
+ * won't be read is in[in_size].
+ *
+ * \return Slightly different return values are used in multi-call and
+ * single-call modes.
+ *
+ * Single-call (vli_pos == NULL):
+ * - LZMA_OK: Integer successfully decoded.
+ * - LZMA_DATA_ERROR: Integer is corrupt. This includes hitting
+ * the end of the input buffer before the whole integer was
+ * decoded; providing no input at all will use LZMA_DATA_ERROR.
+ * - LZMA_PROG_ERROR: Arguments are not sane.
+ *
+ * Multi-call (vli_pos != NULL):
+ * - LZMA_OK: So far all OK, but the integer is not
+ * completely decoded yet.
+ * - LZMA_STREAM_END: Integer successfully decoded.
+ * - LZMA_DATA_ERROR: Integer is corrupt.
+ * - LZMA_BUF_ERROR: No input was provided.
+ * - LZMA_PROG_ERROR: Arguments are not sane.
+ */
+extern LZMA_API(lzma_ret) lzma_vli_decode(lzma_vli *lzma_restrict vli,
+ size_t *vli_pos, const uint8_t *lzma_restrict in,
+ size_t *lzma_restrict in_pos, size_t in_size) lzma_nothrow;
+
+
+/**
+ * \brief Get the number of bytes required to encode a VLI
+ *
+ * \return Number of bytes on success (1-9). If vli isn't valid,
+ * zero is returned.
+ */
+extern LZMA_API(uint32_t) lzma_vli_size(lzma_vli vli)
+ lzma_nothrow lzma_attr_pure;
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/Makefile.inc
new file mode 100644
index 00000000..e4067a9c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/Makefile.inc
@@ -0,0 +1,51 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+EXTRA_DIST += \
+ check/crc32_tablegen.c \
+ check/crc64_tablegen.c
+
+liblzma_la_SOURCES += \
+ check/check.c \
+ check/check.h \
+ check/crc_macros.h
+
+if COND_CHECK_CRC32
+if COND_SMALL
+liblzma_la_SOURCES += check/crc32_small.c
+else
+liblzma_la_SOURCES += \
+ check/crc32_table.c \
+ check/crc32_table_le.h \
+ check/crc32_table_be.h
+if COND_ASM_X86
+liblzma_la_SOURCES += check/crc32_x86.S
+else
+liblzma_la_SOURCES += check/crc32_fast.c
+endif
+endif
+endif
+
+if COND_CHECK_CRC64
+if COND_SMALL
+liblzma_la_SOURCES += check/crc64_small.c
+else
+liblzma_la_SOURCES += \
+ check/crc64_table.c \
+ check/crc64_table_le.h \
+ check/crc64_table_be.h
+if COND_ASM_X86
+liblzma_la_SOURCES += check/crc64_x86.S
+else
+liblzma_la_SOURCES += check/crc64_fast.c
+endif
+endif
+endif
+
+if COND_CHECK_SHA256
+liblzma_la_SOURCES += check/sha256.c
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.c
new file mode 100644
index 00000000..ef2e6ed7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.c
@@ -0,0 +1,176 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file check.c
+/// \brief Single API to access different integrity checks
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "check.h"
+
+
+extern LZMA_API(lzma_bool)
+lzma_check_is_supported(lzma_check type)
+{
+ if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
+ return false;
+
+ static const lzma_bool available_checks[LZMA_CHECK_ID_MAX + 1] = {
+ true, // LZMA_CHECK_NONE
+
+#ifdef HAVE_CHECK_CRC32
+ true,
+#else
+ false,
+#endif
+
+ false, // Reserved
+ false, // Reserved
+
+#ifdef HAVE_CHECK_CRC64
+ true,
+#else
+ false,
+#endif
+
+ false, // Reserved
+ false, // Reserved
+ false, // Reserved
+ false, // Reserved
+ false, // Reserved
+
+#ifdef HAVE_CHECK_SHA256
+ true,
+#else
+ false,
+#endif
+
+ false, // Reserved
+ false, // Reserved
+ false, // Reserved
+ false, // Reserved
+ false, // Reserved
+ };
+
+ return available_checks[(unsigned int)(type)];
+}
+
+
+extern LZMA_API(uint32_t)
+lzma_check_size(lzma_check type)
+{
+ if ((unsigned int)(type) > LZMA_CHECK_ID_MAX)
+ return UINT32_MAX;
+
+ // See file-format.txt section 2.1.1.2.
+ static const uint8_t check_sizes[LZMA_CHECK_ID_MAX + 1] = {
+ 0,
+ 4, 4, 4,
+ 8, 8, 8,
+ 16, 16, 16,
+ 32, 32, 32,
+ 64, 64, 64
+ };
+
+ return check_sizes[(unsigned int)(type)];
+}
+
+
+extern void
+lzma_check_init(lzma_check_state *check, lzma_check type)
+{
+ switch (type) {
+ case LZMA_CHECK_NONE:
+ break;
+
+#ifdef HAVE_CHECK_CRC32
+ case LZMA_CHECK_CRC32:
+ check->state.crc32 = 0;
+ break;
+#endif
+
+#ifdef HAVE_CHECK_CRC64
+ case LZMA_CHECK_CRC64:
+ check->state.crc64 = 0;
+ break;
+#endif
+
+#ifdef HAVE_CHECK_SHA256
+ case LZMA_CHECK_SHA256:
+ lzma_sha256_init(check);
+ break;
+#endif
+
+ default:
+ break;
+ }
+
+ return;
+}
+
+
+extern void
+lzma_check_update(lzma_check_state *check, lzma_check type,
+ const uint8_t *buf, size_t size)
+{
+ switch (type) {
+#ifdef HAVE_CHECK_CRC32
+ case LZMA_CHECK_CRC32:
+ check->state.crc32 = lzma_crc32(buf, size, check->state.crc32);
+ break;
+#endif
+
+#ifdef HAVE_CHECK_CRC64
+ case LZMA_CHECK_CRC64:
+ check->state.crc64 = lzma_crc64(buf, size, check->state.crc64);
+ break;
+#endif
+
+#ifdef HAVE_CHECK_SHA256
+ case LZMA_CHECK_SHA256:
+ lzma_sha256_update(buf, size, check);
+ break;
+#endif
+
+ default:
+ break;
+ }
+
+ return;
+}
+
+
+extern void
+lzma_check_finish(lzma_check_state *check, lzma_check type)
+{
+ switch (type) {
+#ifdef HAVE_CHECK_CRC32
+ case LZMA_CHECK_CRC32:
+ check->buffer.u32[0] = integer_le_32(check->state.crc32);
+ break;
+#endif
+
+#ifdef HAVE_CHECK_CRC64
+ case LZMA_CHECK_CRC64:
+ check->buffer.u64[0] = integer_le_64(check->state.crc64);
+ break;
+#endif
+
+#ifdef HAVE_CHECK_SHA256
+ case LZMA_CHECK_SHA256:
+ lzma_sha256_finish(check);
+ break;
+#endif
+
+ default:
+ break;
+ }
+
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.h
new file mode 100644
index 00000000..2628fdca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/check.h
@@ -0,0 +1,97 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file check.h
+/// \brief Internal API to different integrity check functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_CHECK_H
+#define LZMA_CHECK_H
+
+#include "common.h"
+
+
+// Index hashing needs the best possible hash function (preferably
+// a cryptographic hash) for maximum reliability.
+#if defined(HAVE_CHECK_SHA256)
+# define LZMA_CHECK_BEST LZMA_CHECK_SHA256
+#elif defined(HAVE_CHECK_CRC64)
+# define LZMA_CHECK_BEST LZMA_CHECK_CRC64
+#else
+# define LZMA_CHECK_BEST LZMA_CHECK_CRC32
+#endif
+
+
+/// \brief Structure to hold internal state of the check being calculated
+///
+/// \note This is not in the public API because this structure may
+/// change in future if new integrity check algorithms are added.
+typedef struct {
+ /// Buffer to hold the final result and a temporary buffer for SHA256.
+ union {
+ uint8_t u8[64];
+ uint32_t u32[16];
+ uint64_t u64[8];
+ } buffer;
+
+ /// Check-specific data
+ union {
+ uint32_t crc32;
+ uint64_t crc64;
+
+ struct {
+ /// Internal state
+ uint32_t state[8];
+
+ /// Size of the message excluding padding
+ uint64_t size;
+ } sha256;
+ } state;
+
+} lzma_check_state;
+
+
+/// lzma_crc32_table[0] is needed by LZ encoder so we need to keep
+/// the array two-dimensional.
+#ifdef HAVE_SMALL
+extern uint32_t lzma_crc32_table[1][256];
+extern void lzma_crc32_init(void);
+#else
+extern const uint32_t lzma_crc32_table[8][256];
+extern const uint64_t lzma_crc64_table[4][256];
+#endif
+
+
+/// \brief Initialize *check depending on type
+///
+/// \return LZMA_OK on success. LZMA_UNSUPPORTED_CHECK if the type is not
+/// supported by the current version or build of liblzma.
+/// LZMA_PROG_ERROR if type > LZMA_CHECK_ID_MAX.
+extern void lzma_check_init(lzma_check_state *check, lzma_check type);
+
+/// Update the check state
+extern void lzma_check_update(lzma_check_state *check, lzma_check type,
+ const uint8_t *buf, size_t size);
+
+/// Finish the check calculation and store the result to check->buffer.u8.
+extern void lzma_check_finish(lzma_check_state *check, lzma_check type);
+
+
+/// Prepare SHA-256 state for new input.
+extern void lzma_sha256_init(lzma_check_state *check);
+
+/// Update the SHA-256 hash state
+extern void lzma_sha256_update(
+ const uint8_t *buf, size_t size, lzma_check_state *check);
+
+/// Finish the SHA-256 calculation and store the result to check->buffer.u8.
+extern void lzma_sha256_finish(lzma_check_state *check);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_fast.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_fast.c
new file mode 100644
index 00000000..0056a807
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_fast.c
@@ -0,0 +1,84 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc32.c
+/// \brief CRC32 calculation
+///
+/// Calculate the CRC32 using the slice-by-eight algorithm.
+/// It is explained in this document:
+/// http://www.intel.com/technology/comms/perfnet/download/CRC_generators.pdf
+/// The code in this file is not the same as in Intel's paper, but
+/// the basic principle is identical.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "check.h"
+#include "crc_macros.h"
+
+
+// If you make any changes, do some bench marking! Seemingly unrelated
+// changes can very easily ruin the performance (and very probably is
+// very compiler dependent).
+extern LZMA_API(uint32_t)
+lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+{
+ crc = ~crc;
+
+#ifdef WORDS_BIGENDIAN
+ crc = bswap_32(crc);
+#endif
+
+ if (size > 8) {
+ // Fix the alignment, if needed. The if statement above
+ // ensures that this won't read past the end of buf[].
+ while ((uintptr_t)(buf) & 7) {
+ crc = lzma_crc32_table[0][*buf++ ^ A(crc)] ^ S8(crc);
+ --size;
+ }
+
+ // Calculate the position where to stop.
+ const uint8_t *const limit = buf + (size & ~(size_t)(7));
+
+ // Calculate how many bytes must be calculated separately
+ // before returning the result.
+ size &= (size_t)(7);
+
+ // Calculate the CRC32 using the slice-by-eight algorithm.
+ while (buf < limit) {
+ crc ^= *(uint32_t *)(buf);
+ buf += 4;
+
+ crc = lzma_crc32_table[7][A(crc)]
+ ^ lzma_crc32_table[6][B(crc)]
+ ^ lzma_crc32_table[5][C(crc)]
+ ^ lzma_crc32_table[4][D(crc)];
+
+ const uint32_t tmp = *(uint32_t *)(buf);
+ buf += 4;
+
+ // At least with some compilers, it is critical for
+ // performance, that the crc variable is XORed
+ // between the two table-lookup pairs.
+ crc = lzma_crc32_table[3][A(tmp)]
+ ^ lzma_crc32_table[2][B(tmp)]
+ ^ crc
+ ^ lzma_crc32_table[1][C(tmp)]
+ ^ lzma_crc32_table[0][D(tmp)];
+ }
+ }
+
+ while (size-- != 0)
+ crc = lzma_crc32_table[0][*buf++ ^ A(crc)] ^ S8(crc);
+
+#ifdef WORDS_BIGENDIAN
+ crc = bswap_32(crc);
+#endif
+
+ return ~crc;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_small.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_small.c
new file mode 100644
index 00000000..a6036a64
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_small.c
@@ -0,0 +1,63 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc32_small.c
+/// \brief CRC32 calculation (size-optimized)
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "check.h"
+
+
+uint32_t lzma_crc32_table[1][256];
+
+
+static void
+crc32_init(void)
+{
+ static const uint32_t poly32 = UINT32_C(0xEDB88320);
+
+ for (size_t b = 0; b < 256; ++b) {
+ uint32_t r = b;
+ for (size_t i = 0; i < 8; ++i) {
+ if (r & 1)
+ r = (r >> 1) ^ poly32;
+ else
+ r >>= 1;
+ }
+
+ lzma_crc32_table[0][b] = r;
+ }
+
+ return;
+}
+
+
+extern void
+lzma_crc32_init(void)
+{
+ mythread_once(crc32_init);
+ return;
+}
+
+
+extern LZMA_API(uint32_t)
+lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+{
+ lzma_crc32_init();
+
+ crc = ~crc;
+
+ while (size != 0) {
+ crc = lzma_crc32_table[0][*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
+ --size;
+ }
+
+ return ~crc;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table.c
new file mode 100644
index 00000000..11b3d2bb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table.c
@@ -0,0 +1,21 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc32_table.c
+/// \brief Precalculated CRC32 table with correct endianness
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+#ifdef WORDS_BIGENDIAN
+# include "crc32_table_be.h"
+#else
+# include "crc32_table_le.h"
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_be.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_be.h
new file mode 100644
index 00000000..701f4e79
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_be.h
@@ -0,0 +1,527 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* This file has been automatically generated by crc32_tablegen.c. */
+
+const uint32_t lzma_crc32_table[8][256] = {
+ {
+ 0x00000000, 0x96300777, 0x2C610EEE, 0xBA510999,
+ 0x19C46D07, 0x8FF46A70, 0x35A563E9, 0xA395649E,
+ 0x3288DB0E, 0xA4B8DC79, 0x1EE9D5E0, 0x88D9D297,
+ 0x2B4CB609, 0xBD7CB17E, 0x072DB8E7, 0x911DBF90,
+ 0x6410B71D, 0xF220B06A, 0x4871B9F3, 0xDE41BE84,
+ 0x7DD4DA1A, 0xEBE4DD6D, 0x51B5D4F4, 0xC785D383,
+ 0x56986C13, 0xC0A86B64, 0x7AF962FD, 0xECC9658A,
+ 0x4F5C0114, 0xD96C0663, 0x633D0FFA, 0xF50D088D,
+ 0xC8206E3B, 0x5E10694C, 0xE44160D5, 0x727167A2,
+ 0xD1E4033C, 0x47D4044B, 0xFD850DD2, 0x6BB50AA5,
+ 0xFAA8B535, 0x6C98B242, 0xD6C9BBDB, 0x40F9BCAC,
+ 0xE36CD832, 0x755CDF45, 0xCF0DD6DC, 0x593DD1AB,
+ 0xAC30D926, 0x3A00DE51, 0x8051D7C8, 0x1661D0BF,
+ 0xB5F4B421, 0x23C4B356, 0x9995BACF, 0x0FA5BDB8,
+ 0x9EB80228, 0x0888055F, 0xB2D90CC6, 0x24E90BB1,
+ 0x877C6F2F, 0x114C6858, 0xAB1D61C1, 0x3D2D66B6,
+ 0x9041DC76, 0x0671DB01, 0xBC20D298, 0x2A10D5EF,
+ 0x8985B171, 0x1FB5B606, 0xA5E4BF9F, 0x33D4B8E8,
+ 0xA2C90778, 0x34F9000F, 0x8EA80996, 0x18980EE1,
+ 0xBB0D6A7F, 0x2D3D6D08, 0x976C6491, 0x015C63E6,
+ 0xF4516B6B, 0x62616C1C, 0xD8306585, 0x4E0062F2,
+ 0xED95066C, 0x7BA5011B, 0xC1F40882, 0x57C40FF5,
+ 0xC6D9B065, 0x50E9B712, 0xEAB8BE8B, 0x7C88B9FC,
+ 0xDF1DDD62, 0x492DDA15, 0xF37CD38C, 0x654CD4FB,
+ 0x5861B24D, 0xCE51B53A, 0x7400BCA3, 0xE230BBD4,
+ 0x41A5DF4A, 0xD795D83D, 0x6DC4D1A4, 0xFBF4D6D3,
+ 0x6AE96943, 0xFCD96E34, 0x468867AD, 0xD0B860DA,
+ 0x732D0444, 0xE51D0333, 0x5F4C0AAA, 0xC97C0DDD,
+ 0x3C710550, 0xAA410227, 0x10100BBE, 0x86200CC9,
+ 0x25B56857, 0xB3856F20, 0x09D466B9, 0x9FE461CE,
+ 0x0EF9DE5E, 0x98C9D929, 0x2298D0B0, 0xB4A8D7C7,
+ 0x173DB359, 0x810DB42E, 0x3B5CBDB7, 0xAD6CBAC0,
+ 0x2083B8ED, 0xB6B3BF9A, 0x0CE2B603, 0x9AD2B174,
+ 0x3947D5EA, 0xAF77D29D, 0x1526DB04, 0x8316DC73,
+ 0x120B63E3, 0x843B6494, 0x3E6A6D0D, 0xA85A6A7A,
+ 0x0BCF0EE4, 0x9DFF0993, 0x27AE000A, 0xB19E077D,
+ 0x44930FF0, 0xD2A30887, 0x68F2011E, 0xFEC20669,
+ 0x5D5762F7, 0xCB676580, 0x71366C19, 0xE7066B6E,
+ 0x761BD4FE, 0xE02BD389, 0x5A7ADA10, 0xCC4ADD67,
+ 0x6FDFB9F9, 0xF9EFBE8E, 0x43BEB717, 0xD58EB060,
+ 0xE8A3D6D6, 0x7E93D1A1, 0xC4C2D838, 0x52F2DF4F,
+ 0xF167BBD1, 0x6757BCA6, 0xDD06B53F, 0x4B36B248,
+ 0xDA2B0DD8, 0x4C1B0AAF, 0xF64A0336, 0x607A0441,
+ 0xC3EF60DF, 0x55DF67A8, 0xEF8E6E31, 0x79BE6946,
+ 0x8CB361CB, 0x1A8366BC, 0xA0D26F25, 0x36E26852,
+ 0x95770CCC, 0x03470BBB, 0xB9160222, 0x2F260555,
+ 0xBE3BBAC5, 0x280BBDB2, 0x925AB42B, 0x046AB35C,
+ 0xA7FFD7C2, 0x31CFD0B5, 0x8B9ED92C, 0x1DAEDE5B,
+ 0xB0C2649B, 0x26F263EC, 0x9CA36A75, 0x0A936D02,
+ 0xA906099C, 0x3F360EEB, 0x85670772, 0x13570005,
+ 0x824ABF95, 0x147AB8E2, 0xAE2BB17B, 0x381BB60C,
+ 0x9B8ED292, 0x0DBED5E5, 0xB7EFDC7C, 0x21DFDB0B,
+ 0xD4D2D386, 0x42E2D4F1, 0xF8B3DD68, 0x6E83DA1F,
+ 0xCD16BE81, 0x5B26B9F6, 0xE177B06F, 0x7747B718,
+ 0xE65A0888, 0x706A0FFF, 0xCA3B0666, 0x5C0B0111,
+ 0xFF9E658F, 0x69AE62F8, 0xD3FF6B61, 0x45CF6C16,
+ 0x78E20AA0, 0xEED20DD7, 0x5483044E, 0xC2B30339,
+ 0x612667A7, 0xF71660D0, 0x4D476949, 0xDB776E3E,
+ 0x4A6AD1AE, 0xDC5AD6D9, 0x660BDF40, 0xF03BD837,
+ 0x53AEBCA9, 0xC59EBBDE, 0x7FCFB247, 0xE9FFB530,
+ 0x1CF2BDBD, 0x8AC2BACA, 0x3093B353, 0xA6A3B424,
+ 0x0536D0BA, 0x9306D7CD, 0x2957DE54, 0xBF67D923,
+ 0x2E7A66B3, 0xB84A61C4, 0x021B685D, 0x942B6F2A,
+ 0x37BE0BB4, 0xA18E0CC3, 0x1BDF055A, 0x8DEF022D
+ }, {
+ 0x00000000, 0x41311B19, 0x82623632, 0xC3532D2B,
+ 0x04C56C64, 0x45F4777D, 0x86A75A56, 0xC796414F,
+ 0x088AD9C8, 0x49BBC2D1, 0x8AE8EFFA, 0xCBD9F4E3,
+ 0x0C4FB5AC, 0x4D7EAEB5, 0x8E2D839E, 0xCF1C9887,
+ 0x5112C24A, 0x1023D953, 0xD370F478, 0x9241EF61,
+ 0x55D7AE2E, 0x14E6B537, 0xD7B5981C, 0x96848305,
+ 0x59981B82, 0x18A9009B, 0xDBFA2DB0, 0x9ACB36A9,
+ 0x5D5D77E6, 0x1C6C6CFF, 0xDF3F41D4, 0x9E0E5ACD,
+ 0xA2248495, 0xE3159F8C, 0x2046B2A7, 0x6177A9BE,
+ 0xA6E1E8F1, 0xE7D0F3E8, 0x2483DEC3, 0x65B2C5DA,
+ 0xAAAE5D5D, 0xEB9F4644, 0x28CC6B6F, 0x69FD7076,
+ 0xAE6B3139, 0xEF5A2A20, 0x2C09070B, 0x6D381C12,
+ 0xF33646DF, 0xB2075DC6, 0x715470ED, 0x30656BF4,
+ 0xF7F32ABB, 0xB6C231A2, 0x75911C89, 0x34A00790,
+ 0xFBBC9F17, 0xBA8D840E, 0x79DEA925, 0x38EFB23C,
+ 0xFF79F373, 0xBE48E86A, 0x7D1BC541, 0x3C2ADE58,
+ 0x054F79F0, 0x447E62E9, 0x872D4FC2, 0xC61C54DB,
+ 0x018A1594, 0x40BB0E8D, 0x83E823A6, 0xC2D938BF,
+ 0x0DC5A038, 0x4CF4BB21, 0x8FA7960A, 0xCE968D13,
+ 0x0900CC5C, 0x4831D745, 0x8B62FA6E, 0xCA53E177,
+ 0x545DBBBA, 0x156CA0A3, 0xD63F8D88, 0x970E9691,
+ 0x5098D7DE, 0x11A9CCC7, 0xD2FAE1EC, 0x93CBFAF5,
+ 0x5CD76272, 0x1DE6796B, 0xDEB55440, 0x9F844F59,
+ 0x58120E16, 0x1923150F, 0xDA703824, 0x9B41233D,
+ 0xA76BFD65, 0xE65AE67C, 0x2509CB57, 0x6438D04E,
+ 0xA3AE9101, 0xE29F8A18, 0x21CCA733, 0x60FDBC2A,
+ 0xAFE124AD, 0xEED03FB4, 0x2D83129F, 0x6CB20986,
+ 0xAB2448C9, 0xEA1553D0, 0x29467EFB, 0x687765E2,
+ 0xF6793F2F, 0xB7482436, 0x741B091D, 0x352A1204,
+ 0xF2BC534B, 0xB38D4852, 0x70DE6579, 0x31EF7E60,
+ 0xFEF3E6E7, 0xBFC2FDFE, 0x7C91D0D5, 0x3DA0CBCC,
+ 0xFA368A83, 0xBB07919A, 0x7854BCB1, 0x3965A7A8,
+ 0x4B98833B, 0x0AA99822, 0xC9FAB509, 0x88CBAE10,
+ 0x4F5DEF5F, 0x0E6CF446, 0xCD3FD96D, 0x8C0EC274,
+ 0x43125AF3, 0x022341EA, 0xC1706CC1, 0x804177D8,
+ 0x47D73697, 0x06E62D8E, 0xC5B500A5, 0x84841BBC,
+ 0x1A8A4171, 0x5BBB5A68, 0x98E87743, 0xD9D96C5A,
+ 0x1E4F2D15, 0x5F7E360C, 0x9C2D1B27, 0xDD1C003E,
+ 0x120098B9, 0x533183A0, 0x9062AE8B, 0xD153B592,
+ 0x16C5F4DD, 0x57F4EFC4, 0x94A7C2EF, 0xD596D9F6,
+ 0xE9BC07AE, 0xA88D1CB7, 0x6BDE319C, 0x2AEF2A85,
+ 0xED796BCA, 0xAC4870D3, 0x6F1B5DF8, 0x2E2A46E1,
+ 0xE136DE66, 0xA007C57F, 0x6354E854, 0x2265F34D,
+ 0xE5F3B202, 0xA4C2A91B, 0x67918430, 0x26A09F29,
+ 0xB8AEC5E4, 0xF99FDEFD, 0x3ACCF3D6, 0x7BFDE8CF,
+ 0xBC6BA980, 0xFD5AB299, 0x3E099FB2, 0x7F3884AB,
+ 0xB0241C2C, 0xF1150735, 0x32462A1E, 0x73773107,
+ 0xB4E17048, 0xF5D06B51, 0x3683467A, 0x77B25D63,
+ 0x4ED7FACB, 0x0FE6E1D2, 0xCCB5CCF9, 0x8D84D7E0,
+ 0x4A1296AF, 0x0B238DB6, 0xC870A09D, 0x8941BB84,
+ 0x465D2303, 0x076C381A, 0xC43F1531, 0x850E0E28,
+ 0x42984F67, 0x03A9547E, 0xC0FA7955, 0x81CB624C,
+ 0x1FC53881, 0x5EF42398, 0x9DA70EB3, 0xDC9615AA,
+ 0x1B0054E5, 0x5A314FFC, 0x996262D7, 0xD85379CE,
+ 0x174FE149, 0x567EFA50, 0x952DD77B, 0xD41CCC62,
+ 0x138A8D2D, 0x52BB9634, 0x91E8BB1F, 0xD0D9A006,
+ 0xECF37E5E, 0xADC26547, 0x6E91486C, 0x2FA05375,
+ 0xE836123A, 0xA9070923, 0x6A542408, 0x2B653F11,
+ 0xE479A796, 0xA548BC8F, 0x661B91A4, 0x272A8ABD,
+ 0xE0BCCBF2, 0xA18DD0EB, 0x62DEFDC0, 0x23EFE6D9,
+ 0xBDE1BC14, 0xFCD0A70D, 0x3F838A26, 0x7EB2913F,
+ 0xB924D070, 0xF815CB69, 0x3B46E642, 0x7A77FD5B,
+ 0xB56B65DC, 0xF45A7EC5, 0x370953EE, 0x763848F7,
+ 0xB1AE09B8, 0xF09F12A1, 0x33CC3F8A, 0x72FD2493
+ }, {
+ 0x00000000, 0x376AC201, 0x6ED48403, 0x59BE4602,
+ 0xDCA80907, 0xEBC2CB06, 0xB27C8D04, 0x85164F05,
+ 0xB851130E, 0x8F3BD10F, 0xD685970D, 0xE1EF550C,
+ 0x64F91A09, 0x5393D808, 0x0A2D9E0A, 0x3D475C0B,
+ 0x70A3261C, 0x47C9E41D, 0x1E77A21F, 0x291D601E,
+ 0xAC0B2F1B, 0x9B61ED1A, 0xC2DFAB18, 0xF5B56919,
+ 0xC8F23512, 0xFF98F713, 0xA626B111, 0x914C7310,
+ 0x145A3C15, 0x2330FE14, 0x7A8EB816, 0x4DE47A17,
+ 0xE0464D38, 0xD72C8F39, 0x8E92C93B, 0xB9F80B3A,
+ 0x3CEE443F, 0x0B84863E, 0x523AC03C, 0x6550023D,
+ 0x58175E36, 0x6F7D9C37, 0x36C3DA35, 0x01A91834,
+ 0x84BF5731, 0xB3D59530, 0xEA6BD332, 0xDD011133,
+ 0x90E56B24, 0xA78FA925, 0xFE31EF27, 0xC95B2D26,
+ 0x4C4D6223, 0x7B27A022, 0x2299E620, 0x15F32421,
+ 0x28B4782A, 0x1FDEBA2B, 0x4660FC29, 0x710A3E28,
+ 0xF41C712D, 0xC376B32C, 0x9AC8F52E, 0xADA2372F,
+ 0xC08D9A70, 0xF7E75871, 0xAE591E73, 0x9933DC72,
+ 0x1C259377, 0x2B4F5176, 0x72F11774, 0x459BD575,
+ 0x78DC897E, 0x4FB64B7F, 0x16080D7D, 0x2162CF7C,
+ 0xA4748079, 0x931E4278, 0xCAA0047A, 0xFDCAC67B,
+ 0xB02EBC6C, 0x87447E6D, 0xDEFA386F, 0xE990FA6E,
+ 0x6C86B56B, 0x5BEC776A, 0x02523168, 0x3538F369,
+ 0x087FAF62, 0x3F156D63, 0x66AB2B61, 0x51C1E960,
+ 0xD4D7A665, 0xE3BD6464, 0xBA032266, 0x8D69E067,
+ 0x20CBD748, 0x17A11549, 0x4E1F534B, 0x7975914A,
+ 0xFC63DE4F, 0xCB091C4E, 0x92B75A4C, 0xA5DD984D,
+ 0x989AC446, 0xAFF00647, 0xF64E4045, 0xC1248244,
+ 0x4432CD41, 0x73580F40, 0x2AE64942, 0x1D8C8B43,
+ 0x5068F154, 0x67023355, 0x3EBC7557, 0x09D6B756,
+ 0x8CC0F853, 0xBBAA3A52, 0xE2147C50, 0xD57EBE51,
+ 0xE839E25A, 0xDF53205B, 0x86ED6659, 0xB187A458,
+ 0x3491EB5D, 0x03FB295C, 0x5A456F5E, 0x6D2FAD5F,
+ 0x801B35E1, 0xB771F7E0, 0xEECFB1E2, 0xD9A573E3,
+ 0x5CB33CE6, 0x6BD9FEE7, 0x3267B8E5, 0x050D7AE4,
+ 0x384A26EF, 0x0F20E4EE, 0x569EA2EC, 0x61F460ED,
+ 0xE4E22FE8, 0xD388EDE9, 0x8A36ABEB, 0xBD5C69EA,
+ 0xF0B813FD, 0xC7D2D1FC, 0x9E6C97FE, 0xA90655FF,
+ 0x2C101AFA, 0x1B7AD8FB, 0x42C49EF9, 0x75AE5CF8,
+ 0x48E900F3, 0x7F83C2F2, 0x263D84F0, 0x115746F1,
+ 0x944109F4, 0xA32BCBF5, 0xFA958DF7, 0xCDFF4FF6,
+ 0x605D78D9, 0x5737BAD8, 0x0E89FCDA, 0x39E33EDB,
+ 0xBCF571DE, 0x8B9FB3DF, 0xD221F5DD, 0xE54B37DC,
+ 0xD80C6BD7, 0xEF66A9D6, 0xB6D8EFD4, 0x81B22DD5,
+ 0x04A462D0, 0x33CEA0D1, 0x6A70E6D3, 0x5D1A24D2,
+ 0x10FE5EC5, 0x27949CC4, 0x7E2ADAC6, 0x494018C7,
+ 0xCC5657C2, 0xFB3C95C3, 0xA282D3C1, 0x95E811C0,
+ 0xA8AF4DCB, 0x9FC58FCA, 0xC67BC9C8, 0xF1110BC9,
+ 0x740744CC, 0x436D86CD, 0x1AD3C0CF, 0x2DB902CE,
+ 0x4096AF91, 0x77FC6D90, 0x2E422B92, 0x1928E993,
+ 0x9C3EA696, 0xAB546497, 0xF2EA2295, 0xC580E094,
+ 0xF8C7BC9F, 0xCFAD7E9E, 0x9613389C, 0xA179FA9D,
+ 0x246FB598, 0x13057799, 0x4ABB319B, 0x7DD1F39A,
+ 0x3035898D, 0x075F4B8C, 0x5EE10D8E, 0x698BCF8F,
+ 0xEC9D808A, 0xDBF7428B, 0x82490489, 0xB523C688,
+ 0x88649A83, 0xBF0E5882, 0xE6B01E80, 0xD1DADC81,
+ 0x54CC9384, 0x63A65185, 0x3A181787, 0x0D72D586,
+ 0xA0D0E2A9, 0x97BA20A8, 0xCE0466AA, 0xF96EA4AB,
+ 0x7C78EBAE, 0x4B1229AF, 0x12AC6FAD, 0x25C6ADAC,
+ 0x1881F1A7, 0x2FEB33A6, 0x765575A4, 0x413FB7A5,
+ 0xC429F8A0, 0xF3433AA1, 0xAAFD7CA3, 0x9D97BEA2,
+ 0xD073C4B5, 0xE71906B4, 0xBEA740B6, 0x89CD82B7,
+ 0x0CDBCDB2, 0x3BB10FB3, 0x620F49B1, 0x55658BB0,
+ 0x6822D7BB, 0x5F4815BA, 0x06F653B8, 0x319C91B9,
+ 0xB48ADEBC, 0x83E01CBD, 0xDA5E5ABF, 0xED3498BE
+ }, {
+ 0x00000000, 0x6567BCB8, 0x8BC809AA, 0xEEAFB512,
+ 0x5797628F, 0x32F0DE37, 0xDC5F6B25, 0xB938D79D,
+ 0xEF28B4C5, 0x8A4F087D, 0x64E0BD6F, 0x018701D7,
+ 0xB8BFD64A, 0xDDD86AF2, 0x3377DFE0, 0x56106358,
+ 0x9F571950, 0xFA30A5E8, 0x149F10FA, 0x71F8AC42,
+ 0xC8C07BDF, 0xADA7C767, 0x43087275, 0x266FCECD,
+ 0x707FAD95, 0x1518112D, 0xFBB7A43F, 0x9ED01887,
+ 0x27E8CF1A, 0x428F73A2, 0xAC20C6B0, 0xC9477A08,
+ 0x3EAF32A0, 0x5BC88E18, 0xB5673B0A, 0xD00087B2,
+ 0x6938502F, 0x0C5FEC97, 0xE2F05985, 0x8797E53D,
+ 0xD1878665, 0xB4E03ADD, 0x5A4F8FCF, 0x3F283377,
+ 0x8610E4EA, 0xE3775852, 0x0DD8ED40, 0x68BF51F8,
+ 0xA1F82BF0, 0xC49F9748, 0x2A30225A, 0x4F579EE2,
+ 0xF66F497F, 0x9308F5C7, 0x7DA740D5, 0x18C0FC6D,
+ 0x4ED09F35, 0x2BB7238D, 0xC518969F, 0xA07F2A27,
+ 0x1947FDBA, 0x7C204102, 0x928FF410, 0xF7E848A8,
+ 0x3D58149B, 0x583FA823, 0xB6901D31, 0xD3F7A189,
+ 0x6ACF7614, 0x0FA8CAAC, 0xE1077FBE, 0x8460C306,
+ 0xD270A05E, 0xB7171CE6, 0x59B8A9F4, 0x3CDF154C,
+ 0x85E7C2D1, 0xE0807E69, 0x0E2FCB7B, 0x6B4877C3,
+ 0xA20F0DCB, 0xC768B173, 0x29C70461, 0x4CA0B8D9,
+ 0xF5986F44, 0x90FFD3FC, 0x7E5066EE, 0x1B37DA56,
+ 0x4D27B90E, 0x284005B6, 0xC6EFB0A4, 0xA3880C1C,
+ 0x1AB0DB81, 0x7FD76739, 0x9178D22B, 0xF41F6E93,
+ 0x03F7263B, 0x66909A83, 0x883F2F91, 0xED589329,
+ 0x546044B4, 0x3107F80C, 0xDFA84D1E, 0xBACFF1A6,
+ 0xECDF92FE, 0x89B82E46, 0x67179B54, 0x027027EC,
+ 0xBB48F071, 0xDE2F4CC9, 0x3080F9DB, 0x55E74563,
+ 0x9CA03F6B, 0xF9C783D3, 0x176836C1, 0x720F8A79,
+ 0xCB375DE4, 0xAE50E15C, 0x40FF544E, 0x2598E8F6,
+ 0x73888BAE, 0x16EF3716, 0xF8408204, 0x9D273EBC,
+ 0x241FE921, 0x41785599, 0xAFD7E08B, 0xCAB05C33,
+ 0x3BB659ED, 0x5ED1E555, 0xB07E5047, 0xD519ECFF,
+ 0x6C213B62, 0x094687DA, 0xE7E932C8, 0x828E8E70,
+ 0xD49EED28, 0xB1F95190, 0x5F56E482, 0x3A31583A,
+ 0x83098FA7, 0xE66E331F, 0x08C1860D, 0x6DA63AB5,
+ 0xA4E140BD, 0xC186FC05, 0x2F294917, 0x4A4EF5AF,
+ 0xF3762232, 0x96119E8A, 0x78BE2B98, 0x1DD99720,
+ 0x4BC9F478, 0x2EAE48C0, 0xC001FDD2, 0xA566416A,
+ 0x1C5E96F7, 0x79392A4F, 0x97969F5D, 0xF2F123E5,
+ 0x05196B4D, 0x607ED7F5, 0x8ED162E7, 0xEBB6DE5F,
+ 0x528E09C2, 0x37E9B57A, 0xD9460068, 0xBC21BCD0,
+ 0xEA31DF88, 0x8F566330, 0x61F9D622, 0x049E6A9A,
+ 0xBDA6BD07, 0xD8C101BF, 0x366EB4AD, 0x53090815,
+ 0x9A4E721D, 0xFF29CEA5, 0x11867BB7, 0x74E1C70F,
+ 0xCDD91092, 0xA8BEAC2A, 0x46111938, 0x2376A580,
+ 0x7566C6D8, 0x10017A60, 0xFEAECF72, 0x9BC973CA,
+ 0x22F1A457, 0x479618EF, 0xA939ADFD, 0xCC5E1145,
+ 0x06EE4D76, 0x6389F1CE, 0x8D2644DC, 0xE841F864,
+ 0x51792FF9, 0x341E9341, 0xDAB12653, 0xBFD69AEB,
+ 0xE9C6F9B3, 0x8CA1450B, 0x620EF019, 0x07694CA1,
+ 0xBE519B3C, 0xDB362784, 0x35999296, 0x50FE2E2E,
+ 0x99B95426, 0xFCDEE89E, 0x12715D8C, 0x7716E134,
+ 0xCE2E36A9, 0xAB498A11, 0x45E63F03, 0x208183BB,
+ 0x7691E0E3, 0x13F65C5B, 0xFD59E949, 0x983E55F1,
+ 0x2106826C, 0x44613ED4, 0xAACE8BC6, 0xCFA9377E,
+ 0x38417FD6, 0x5D26C36E, 0xB389767C, 0xD6EECAC4,
+ 0x6FD61D59, 0x0AB1A1E1, 0xE41E14F3, 0x8179A84B,
+ 0xD769CB13, 0xB20E77AB, 0x5CA1C2B9, 0x39C67E01,
+ 0x80FEA99C, 0xE5991524, 0x0B36A036, 0x6E511C8E,
+ 0xA7166686, 0xC271DA3E, 0x2CDE6F2C, 0x49B9D394,
+ 0xF0810409, 0x95E6B8B1, 0x7B490DA3, 0x1E2EB11B,
+ 0x483ED243, 0x2D596EFB, 0xC3F6DBE9, 0xA6916751,
+ 0x1FA9B0CC, 0x7ACE0C74, 0x9461B966, 0xF10605DE
+ }, {
+ 0x00000000, 0xB029603D, 0x6053C07A, 0xD07AA047,
+ 0xC0A680F5, 0x708FE0C8, 0xA0F5408F, 0x10DC20B2,
+ 0xC14B7030, 0x7162100D, 0xA118B04A, 0x1131D077,
+ 0x01EDF0C5, 0xB1C490F8, 0x61BE30BF, 0xD1975082,
+ 0x8297E060, 0x32BE805D, 0xE2C4201A, 0x52ED4027,
+ 0x42316095, 0xF21800A8, 0x2262A0EF, 0x924BC0D2,
+ 0x43DC9050, 0xF3F5F06D, 0x238F502A, 0x93A63017,
+ 0x837A10A5, 0x33537098, 0xE329D0DF, 0x5300B0E2,
+ 0x042FC1C1, 0xB406A1FC, 0x647C01BB, 0xD4556186,
+ 0xC4894134, 0x74A02109, 0xA4DA814E, 0x14F3E173,
+ 0xC564B1F1, 0x754DD1CC, 0xA537718B, 0x151E11B6,
+ 0x05C23104, 0xB5EB5139, 0x6591F17E, 0xD5B89143,
+ 0x86B821A1, 0x3691419C, 0xE6EBE1DB, 0x56C281E6,
+ 0x461EA154, 0xF637C169, 0x264D612E, 0x96640113,
+ 0x47F35191, 0xF7DA31AC, 0x27A091EB, 0x9789F1D6,
+ 0x8755D164, 0x377CB159, 0xE706111E, 0x572F7123,
+ 0x4958F358, 0xF9719365, 0x290B3322, 0x9922531F,
+ 0x89FE73AD, 0x39D71390, 0xE9ADB3D7, 0x5984D3EA,
+ 0x88138368, 0x383AE355, 0xE8404312, 0x5869232F,
+ 0x48B5039D, 0xF89C63A0, 0x28E6C3E7, 0x98CFA3DA,
+ 0xCBCF1338, 0x7BE67305, 0xAB9CD342, 0x1BB5B37F,
+ 0x0B6993CD, 0xBB40F3F0, 0x6B3A53B7, 0xDB13338A,
+ 0x0A846308, 0xBAAD0335, 0x6AD7A372, 0xDAFEC34F,
+ 0xCA22E3FD, 0x7A0B83C0, 0xAA712387, 0x1A5843BA,
+ 0x4D773299, 0xFD5E52A4, 0x2D24F2E3, 0x9D0D92DE,
+ 0x8DD1B26C, 0x3DF8D251, 0xED827216, 0x5DAB122B,
+ 0x8C3C42A9, 0x3C152294, 0xEC6F82D3, 0x5C46E2EE,
+ 0x4C9AC25C, 0xFCB3A261, 0x2CC90226, 0x9CE0621B,
+ 0xCFE0D2F9, 0x7FC9B2C4, 0xAFB31283, 0x1F9A72BE,
+ 0x0F46520C, 0xBF6F3231, 0x6F159276, 0xDF3CF24B,
+ 0x0EABA2C9, 0xBE82C2F4, 0x6EF862B3, 0xDED1028E,
+ 0xCE0D223C, 0x7E244201, 0xAE5EE246, 0x1E77827B,
+ 0x92B0E6B1, 0x2299868C, 0xF2E326CB, 0x42CA46F6,
+ 0x52166644, 0xE23F0679, 0x3245A63E, 0x826CC603,
+ 0x53FB9681, 0xE3D2F6BC, 0x33A856FB, 0x838136C6,
+ 0x935D1674, 0x23747649, 0xF30ED60E, 0x4327B633,
+ 0x102706D1, 0xA00E66EC, 0x7074C6AB, 0xC05DA696,
+ 0xD0818624, 0x60A8E619, 0xB0D2465E, 0x00FB2663,
+ 0xD16C76E1, 0x614516DC, 0xB13FB69B, 0x0116D6A6,
+ 0x11CAF614, 0xA1E39629, 0x7199366E, 0xC1B05653,
+ 0x969F2770, 0x26B6474D, 0xF6CCE70A, 0x46E58737,
+ 0x5639A785, 0xE610C7B8, 0x366A67FF, 0x864307C2,
+ 0x57D45740, 0xE7FD377D, 0x3787973A, 0x87AEF707,
+ 0x9772D7B5, 0x275BB788, 0xF72117CF, 0x470877F2,
+ 0x1408C710, 0xA421A72D, 0x745B076A, 0xC4726757,
+ 0xD4AE47E5, 0x648727D8, 0xB4FD879F, 0x04D4E7A2,
+ 0xD543B720, 0x656AD71D, 0xB510775A, 0x05391767,
+ 0x15E537D5, 0xA5CC57E8, 0x75B6F7AF, 0xC59F9792,
+ 0xDBE815E9, 0x6BC175D4, 0xBBBBD593, 0x0B92B5AE,
+ 0x1B4E951C, 0xAB67F521, 0x7B1D5566, 0xCB34355B,
+ 0x1AA365D9, 0xAA8A05E4, 0x7AF0A5A3, 0xCAD9C59E,
+ 0xDA05E52C, 0x6A2C8511, 0xBA562556, 0x0A7F456B,
+ 0x597FF589, 0xE95695B4, 0x392C35F3, 0x890555CE,
+ 0x99D9757C, 0x29F01541, 0xF98AB506, 0x49A3D53B,
+ 0x983485B9, 0x281DE584, 0xF86745C3, 0x484E25FE,
+ 0x5892054C, 0xE8BB6571, 0x38C1C536, 0x88E8A50B,
+ 0xDFC7D428, 0x6FEEB415, 0xBF941452, 0x0FBD746F,
+ 0x1F6154DD, 0xAF4834E0, 0x7F3294A7, 0xCF1BF49A,
+ 0x1E8CA418, 0xAEA5C425, 0x7EDF6462, 0xCEF6045F,
+ 0xDE2A24ED, 0x6E0344D0, 0xBE79E497, 0x0E5084AA,
+ 0x5D503448, 0xED795475, 0x3D03F432, 0x8D2A940F,
+ 0x9DF6B4BD, 0x2DDFD480, 0xFDA574C7, 0x4D8C14FA,
+ 0x9C1B4478, 0x2C322445, 0xFC488402, 0x4C61E43F,
+ 0x5CBDC48D, 0xEC94A4B0, 0x3CEE04F7, 0x8CC764CA
+ }, {
+ 0x00000000, 0xA5D35CCB, 0x0BA1C84D, 0xAE729486,
+ 0x1642919B, 0xB391CD50, 0x1DE359D6, 0xB830051D,
+ 0x6D8253EC, 0xC8510F27, 0x66239BA1, 0xC3F0C76A,
+ 0x7BC0C277, 0xDE139EBC, 0x70610A3A, 0xD5B256F1,
+ 0x9B02D603, 0x3ED18AC8, 0x90A31E4E, 0x35704285,
+ 0x8D404798, 0x28931B53, 0x86E18FD5, 0x2332D31E,
+ 0xF68085EF, 0x5353D924, 0xFD214DA2, 0x58F21169,
+ 0xE0C21474, 0x451148BF, 0xEB63DC39, 0x4EB080F2,
+ 0x3605AC07, 0x93D6F0CC, 0x3DA4644A, 0x98773881,
+ 0x20473D9C, 0x85946157, 0x2BE6F5D1, 0x8E35A91A,
+ 0x5B87FFEB, 0xFE54A320, 0x502637A6, 0xF5F56B6D,
+ 0x4DC56E70, 0xE81632BB, 0x4664A63D, 0xE3B7FAF6,
+ 0xAD077A04, 0x08D426CF, 0xA6A6B249, 0x0375EE82,
+ 0xBB45EB9F, 0x1E96B754, 0xB0E423D2, 0x15377F19,
+ 0xC08529E8, 0x65567523, 0xCB24E1A5, 0x6EF7BD6E,
+ 0xD6C7B873, 0x7314E4B8, 0xDD66703E, 0x78B52CF5,
+ 0x6C0A580F, 0xC9D904C4, 0x67AB9042, 0xC278CC89,
+ 0x7A48C994, 0xDF9B955F, 0x71E901D9, 0xD43A5D12,
+ 0x01880BE3, 0xA45B5728, 0x0A29C3AE, 0xAFFA9F65,
+ 0x17CA9A78, 0xB219C6B3, 0x1C6B5235, 0xB9B80EFE,
+ 0xF7088E0C, 0x52DBD2C7, 0xFCA94641, 0x597A1A8A,
+ 0xE14A1F97, 0x4499435C, 0xEAEBD7DA, 0x4F388B11,
+ 0x9A8ADDE0, 0x3F59812B, 0x912B15AD, 0x34F84966,
+ 0x8CC84C7B, 0x291B10B0, 0x87698436, 0x22BAD8FD,
+ 0x5A0FF408, 0xFFDCA8C3, 0x51AE3C45, 0xF47D608E,
+ 0x4C4D6593, 0xE99E3958, 0x47ECADDE, 0xE23FF115,
+ 0x378DA7E4, 0x925EFB2F, 0x3C2C6FA9, 0x99FF3362,
+ 0x21CF367F, 0x841C6AB4, 0x2A6EFE32, 0x8FBDA2F9,
+ 0xC10D220B, 0x64DE7EC0, 0xCAACEA46, 0x6F7FB68D,
+ 0xD74FB390, 0x729CEF5B, 0xDCEE7BDD, 0x793D2716,
+ 0xAC8F71E7, 0x095C2D2C, 0xA72EB9AA, 0x02FDE561,
+ 0xBACDE07C, 0x1F1EBCB7, 0xB16C2831, 0x14BF74FA,
+ 0xD814B01E, 0x7DC7ECD5, 0xD3B57853, 0x76662498,
+ 0xCE562185, 0x6B857D4E, 0xC5F7E9C8, 0x6024B503,
+ 0xB596E3F2, 0x1045BF39, 0xBE372BBF, 0x1BE47774,
+ 0xA3D47269, 0x06072EA2, 0xA875BA24, 0x0DA6E6EF,
+ 0x4316661D, 0xE6C53AD6, 0x48B7AE50, 0xED64F29B,
+ 0x5554F786, 0xF087AB4D, 0x5EF53FCB, 0xFB266300,
+ 0x2E9435F1, 0x8B47693A, 0x2535FDBC, 0x80E6A177,
+ 0x38D6A46A, 0x9D05F8A1, 0x33776C27, 0x96A430EC,
+ 0xEE111C19, 0x4BC240D2, 0xE5B0D454, 0x4063889F,
+ 0xF8538D82, 0x5D80D149, 0xF3F245CF, 0x56211904,
+ 0x83934FF5, 0x2640133E, 0x883287B8, 0x2DE1DB73,
+ 0x95D1DE6E, 0x300282A5, 0x9E701623, 0x3BA34AE8,
+ 0x7513CA1A, 0xD0C096D1, 0x7EB20257, 0xDB615E9C,
+ 0x63515B81, 0xC682074A, 0x68F093CC, 0xCD23CF07,
+ 0x189199F6, 0xBD42C53D, 0x133051BB, 0xB6E30D70,
+ 0x0ED3086D, 0xAB0054A6, 0x0572C020, 0xA0A19CEB,
+ 0xB41EE811, 0x11CDB4DA, 0xBFBF205C, 0x1A6C7C97,
+ 0xA25C798A, 0x078F2541, 0xA9FDB1C7, 0x0C2EED0C,
+ 0xD99CBBFD, 0x7C4FE736, 0xD23D73B0, 0x77EE2F7B,
+ 0xCFDE2A66, 0x6A0D76AD, 0xC47FE22B, 0x61ACBEE0,
+ 0x2F1C3E12, 0x8ACF62D9, 0x24BDF65F, 0x816EAA94,
+ 0x395EAF89, 0x9C8DF342, 0x32FF67C4, 0x972C3B0F,
+ 0x429E6DFE, 0xE74D3135, 0x493FA5B3, 0xECECF978,
+ 0x54DCFC65, 0xF10FA0AE, 0x5F7D3428, 0xFAAE68E3,
+ 0x821B4416, 0x27C818DD, 0x89BA8C5B, 0x2C69D090,
+ 0x9459D58D, 0x318A8946, 0x9FF81DC0, 0x3A2B410B,
+ 0xEF9917FA, 0x4A4A4B31, 0xE438DFB7, 0x41EB837C,
+ 0xF9DB8661, 0x5C08DAAA, 0xF27A4E2C, 0x57A912E7,
+ 0x19199215, 0xBCCACEDE, 0x12B85A58, 0xB76B0693,
+ 0x0F5B038E, 0xAA885F45, 0x04FACBC3, 0xA1299708,
+ 0x749BC1F9, 0xD1489D32, 0x7F3A09B4, 0xDAE9557F,
+ 0x62D95062, 0xC70A0CA9, 0x6978982F, 0xCCABC4E4
+ }, {
+ 0x00000000, 0xB40B77A6, 0x29119F97, 0x9D1AE831,
+ 0x13244FF4, 0xA72F3852, 0x3A35D063, 0x8E3EA7C5,
+ 0x674EEF33, 0xD3459895, 0x4E5F70A4, 0xFA540702,
+ 0x746AA0C7, 0xC061D761, 0x5D7B3F50, 0xE97048F6,
+ 0xCE9CDE67, 0x7A97A9C1, 0xE78D41F0, 0x53863656,
+ 0xDDB89193, 0x69B3E635, 0xF4A90E04, 0x40A279A2,
+ 0xA9D23154, 0x1DD946F2, 0x80C3AEC3, 0x34C8D965,
+ 0xBAF67EA0, 0x0EFD0906, 0x93E7E137, 0x27EC9691,
+ 0x9C39BDCF, 0x2832CA69, 0xB5282258, 0x012355FE,
+ 0x8F1DF23B, 0x3B16859D, 0xA60C6DAC, 0x12071A0A,
+ 0xFB7752FC, 0x4F7C255A, 0xD266CD6B, 0x666DBACD,
+ 0xE8531D08, 0x5C586AAE, 0xC142829F, 0x7549F539,
+ 0x52A563A8, 0xE6AE140E, 0x7BB4FC3F, 0xCFBF8B99,
+ 0x41812C5C, 0xF58A5BFA, 0x6890B3CB, 0xDC9BC46D,
+ 0x35EB8C9B, 0x81E0FB3D, 0x1CFA130C, 0xA8F164AA,
+ 0x26CFC36F, 0x92C4B4C9, 0x0FDE5CF8, 0xBBD52B5E,
+ 0x79750B44, 0xCD7E7CE2, 0x506494D3, 0xE46FE375,
+ 0x6A5144B0, 0xDE5A3316, 0x4340DB27, 0xF74BAC81,
+ 0x1E3BE477, 0xAA3093D1, 0x372A7BE0, 0x83210C46,
+ 0x0D1FAB83, 0xB914DC25, 0x240E3414, 0x900543B2,
+ 0xB7E9D523, 0x03E2A285, 0x9EF84AB4, 0x2AF33D12,
+ 0xA4CD9AD7, 0x10C6ED71, 0x8DDC0540, 0x39D772E6,
+ 0xD0A73A10, 0x64AC4DB6, 0xF9B6A587, 0x4DBDD221,
+ 0xC38375E4, 0x77880242, 0xEA92EA73, 0x5E999DD5,
+ 0xE54CB68B, 0x5147C12D, 0xCC5D291C, 0x78565EBA,
+ 0xF668F97F, 0x42638ED9, 0xDF7966E8, 0x6B72114E,
+ 0x820259B8, 0x36092E1E, 0xAB13C62F, 0x1F18B189,
+ 0x9126164C, 0x252D61EA, 0xB83789DB, 0x0C3CFE7D,
+ 0x2BD068EC, 0x9FDB1F4A, 0x02C1F77B, 0xB6CA80DD,
+ 0x38F42718, 0x8CFF50BE, 0x11E5B88F, 0xA5EECF29,
+ 0x4C9E87DF, 0xF895F079, 0x658F1848, 0xD1846FEE,
+ 0x5FBAC82B, 0xEBB1BF8D, 0x76AB57BC, 0xC2A0201A,
+ 0xF2EA1688, 0x46E1612E, 0xDBFB891F, 0x6FF0FEB9,
+ 0xE1CE597C, 0x55C52EDA, 0xC8DFC6EB, 0x7CD4B14D,
+ 0x95A4F9BB, 0x21AF8E1D, 0xBCB5662C, 0x08BE118A,
+ 0x8680B64F, 0x328BC1E9, 0xAF9129D8, 0x1B9A5E7E,
+ 0x3C76C8EF, 0x887DBF49, 0x15675778, 0xA16C20DE,
+ 0x2F52871B, 0x9B59F0BD, 0x0643188C, 0xB2486F2A,
+ 0x5B3827DC, 0xEF33507A, 0x7229B84B, 0xC622CFED,
+ 0x481C6828, 0xFC171F8E, 0x610DF7BF, 0xD5068019,
+ 0x6ED3AB47, 0xDAD8DCE1, 0x47C234D0, 0xF3C94376,
+ 0x7DF7E4B3, 0xC9FC9315, 0x54E67B24, 0xE0ED0C82,
+ 0x099D4474, 0xBD9633D2, 0x208CDBE3, 0x9487AC45,
+ 0x1AB90B80, 0xAEB27C26, 0x33A89417, 0x87A3E3B1,
+ 0xA04F7520, 0x14440286, 0x895EEAB7, 0x3D559D11,
+ 0xB36B3AD4, 0x07604D72, 0x9A7AA543, 0x2E71D2E5,
+ 0xC7019A13, 0x730AEDB5, 0xEE100584, 0x5A1B7222,
+ 0xD425D5E7, 0x602EA241, 0xFD344A70, 0x493F3DD6,
+ 0x8B9F1DCC, 0x3F946A6A, 0xA28E825B, 0x1685F5FD,
+ 0x98BB5238, 0x2CB0259E, 0xB1AACDAF, 0x05A1BA09,
+ 0xECD1F2FF, 0x58DA8559, 0xC5C06D68, 0x71CB1ACE,
+ 0xFFF5BD0B, 0x4BFECAAD, 0xD6E4229C, 0x62EF553A,
+ 0x4503C3AB, 0xF108B40D, 0x6C125C3C, 0xD8192B9A,
+ 0x56278C5F, 0xE22CFBF9, 0x7F3613C8, 0xCB3D646E,
+ 0x224D2C98, 0x96465B3E, 0x0B5CB30F, 0xBF57C4A9,
+ 0x3169636C, 0x856214CA, 0x1878FCFB, 0xAC738B5D,
+ 0x17A6A003, 0xA3ADD7A5, 0x3EB73F94, 0x8ABC4832,
+ 0x0482EFF7, 0xB0899851, 0x2D937060, 0x999807C6,
+ 0x70E84F30, 0xC4E33896, 0x59F9D0A7, 0xEDF2A701,
+ 0x63CC00C4, 0xD7C77762, 0x4ADD9F53, 0xFED6E8F5,
+ 0xD93A7E64, 0x6D3109C2, 0xF02BE1F3, 0x44209655,
+ 0xCA1E3190, 0x7E154636, 0xE30FAE07, 0x5704D9A1,
+ 0xBE749157, 0x0A7FE6F1, 0x97650EC0, 0x236E7966,
+ 0xAD50DEA3, 0x195BA905, 0x84414134, 0x304A3692
+ }, {
+ 0x00000000, 0x9E00AACC, 0x7D072542, 0xE3078F8E,
+ 0xFA0E4A84, 0x640EE048, 0x87096FC6, 0x1909C50A,
+ 0xB51BE5D3, 0x2B1B4F1F, 0xC81CC091, 0x561C6A5D,
+ 0x4F15AF57, 0xD115059B, 0x32128A15, 0xAC1220D9,
+ 0x2B31BB7C, 0xB53111B0, 0x56369E3E, 0xC83634F2,
+ 0xD13FF1F8, 0x4F3F5B34, 0xAC38D4BA, 0x32387E76,
+ 0x9E2A5EAF, 0x002AF463, 0xE32D7BED, 0x7D2DD121,
+ 0x6424142B, 0xFA24BEE7, 0x19233169, 0x87239BA5,
+ 0x566276F9, 0xC862DC35, 0x2B6553BB, 0xB565F977,
+ 0xAC6C3C7D, 0x326C96B1, 0xD16B193F, 0x4F6BB3F3,
+ 0xE379932A, 0x7D7939E6, 0x9E7EB668, 0x007E1CA4,
+ 0x1977D9AE, 0x87777362, 0x6470FCEC, 0xFA705620,
+ 0x7D53CD85, 0xE3536749, 0x0054E8C7, 0x9E54420B,
+ 0x875D8701, 0x195D2DCD, 0xFA5AA243, 0x645A088F,
+ 0xC8482856, 0x5648829A, 0xB54F0D14, 0x2B4FA7D8,
+ 0x324662D2, 0xAC46C81E, 0x4F414790, 0xD141ED5C,
+ 0xEDC29D29, 0x73C237E5, 0x90C5B86B, 0x0EC512A7,
+ 0x17CCD7AD, 0x89CC7D61, 0x6ACBF2EF, 0xF4CB5823,
+ 0x58D978FA, 0xC6D9D236, 0x25DE5DB8, 0xBBDEF774,
+ 0xA2D7327E, 0x3CD798B2, 0xDFD0173C, 0x41D0BDF0,
+ 0xC6F32655, 0x58F38C99, 0xBBF40317, 0x25F4A9DB,
+ 0x3CFD6CD1, 0xA2FDC61D, 0x41FA4993, 0xDFFAE35F,
+ 0x73E8C386, 0xEDE8694A, 0x0EEFE6C4, 0x90EF4C08,
+ 0x89E68902, 0x17E623CE, 0xF4E1AC40, 0x6AE1068C,
+ 0xBBA0EBD0, 0x25A0411C, 0xC6A7CE92, 0x58A7645E,
+ 0x41AEA154, 0xDFAE0B98, 0x3CA98416, 0xA2A92EDA,
+ 0x0EBB0E03, 0x90BBA4CF, 0x73BC2B41, 0xEDBC818D,
+ 0xF4B54487, 0x6AB5EE4B, 0x89B261C5, 0x17B2CB09,
+ 0x909150AC, 0x0E91FA60, 0xED9675EE, 0x7396DF22,
+ 0x6A9F1A28, 0xF49FB0E4, 0x17983F6A, 0x899895A6,
+ 0x258AB57F, 0xBB8A1FB3, 0x588D903D, 0xC68D3AF1,
+ 0xDF84FFFB, 0x41845537, 0xA283DAB9, 0x3C837075,
+ 0xDA853B53, 0x4485919F, 0xA7821E11, 0x3982B4DD,
+ 0x208B71D7, 0xBE8BDB1B, 0x5D8C5495, 0xC38CFE59,
+ 0x6F9EDE80, 0xF19E744C, 0x1299FBC2, 0x8C99510E,
+ 0x95909404, 0x0B903EC8, 0xE897B146, 0x76971B8A,
+ 0xF1B4802F, 0x6FB42AE3, 0x8CB3A56D, 0x12B30FA1,
+ 0x0BBACAAB, 0x95BA6067, 0x76BDEFE9, 0xE8BD4525,
+ 0x44AF65FC, 0xDAAFCF30, 0x39A840BE, 0xA7A8EA72,
+ 0xBEA12F78, 0x20A185B4, 0xC3A60A3A, 0x5DA6A0F6,
+ 0x8CE74DAA, 0x12E7E766, 0xF1E068E8, 0x6FE0C224,
+ 0x76E9072E, 0xE8E9ADE2, 0x0BEE226C, 0x95EE88A0,
+ 0x39FCA879, 0xA7FC02B5, 0x44FB8D3B, 0xDAFB27F7,
+ 0xC3F2E2FD, 0x5DF24831, 0xBEF5C7BF, 0x20F56D73,
+ 0xA7D6F6D6, 0x39D65C1A, 0xDAD1D394, 0x44D17958,
+ 0x5DD8BC52, 0xC3D8169E, 0x20DF9910, 0xBEDF33DC,
+ 0x12CD1305, 0x8CCDB9C9, 0x6FCA3647, 0xF1CA9C8B,
+ 0xE8C35981, 0x76C3F34D, 0x95C47CC3, 0x0BC4D60F,
+ 0x3747A67A, 0xA9470CB6, 0x4A408338, 0xD44029F4,
+ 0xCD49ECFE, 0x53494632, 0xB04EC9BC, 0x2E4E6370,
+ 0x825C43A9, 0x1C5CE965, 0xFF5B66EB, 0x615BCC27,
+ 0x7852092D, 0xE652A3E1, 0x05552C6F, 0x9B5586A3,
+ 0x1C761D06, 0x8276B7CA, 0x61713844, 0xFF719288,
+ 0xE6785782, 0x7878FD4E, 0x9B7F72C0, 0x057FD80C,
+ 0xA96DF8D5, 0x376D5219, 0xD46ADD97, 0x4A6A775B,
+ 0x5363B251, 0xCD63189D, 0x2E649713, 0xB0643DDF,
+ 0x6125D083, 0xFF257A4F, 0x1C22F5C1, 0x82225F0D,
+ 0x9B2B9A07, 0x052B30CB, 0xE62CBF45, 0x782C1589,
+ 0xD43E3550, 0x4A3E9F9C, 0xA9391012, 0x3739BADE,
+ 0x2E307FD4, 0xB030D518, 0x53375A96, 0xCD37F05A,
+ 0x4A146BFF, 0xD414C133, 0x37134EBD, 0xA913E471,
+ 0xB01A217B, 0x2E1A8BB7, 0xCD1D0439, 0x531DAEF5,
+ 0xFF0F8E2C, 0x610F24E0, 0x8208AB6E, 0x1C0801A2,
+ 0x0501C4A8, 0x9B016E64, 0x7806E1EA, 0xE6064B26
+ }
+};
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_le.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_le.h
new file mode 100644
index 00000000..c69174e2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_table_le.h
@@ -0,0 +1,527 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* This file has been automatically generated by crc32_tablegen.c. */
+
+const uint32_t lzma_crc32_table[8][256] = {
+ {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+ }, {
+ 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3,
+ 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7,
+ 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB,
+ 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF,
+ 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192,
+ 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496,
+ 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A,
+ 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E,
+ 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761,
+ 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265,
+ 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69,
+ 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D,
+ 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530,
+ 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034,
+ 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38,
+ 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C,
+ 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6,
+ 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2,
+ 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE,
+ 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA,
+ 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97,
+ 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93,
+ 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F,
+ 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B,
+ 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864,
+ 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60,
+ 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C,
+ 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768,
+ 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35,
+ 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31,
+ 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D,
+ 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539,
+ 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88,
+ 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C,
+ 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180,
+ 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484,
+ 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9,
+ 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD,
+ 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1,
+ 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5,
+ 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A,
+ 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E,
+ 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522,
+ 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026,
+ 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B,
+ 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F,
+ 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773,
+ 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277,
+ 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D,
+ 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189,
+ 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85,
+ 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81,
+ 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC,
+ 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8,
+ 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4,
+ 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0,
+ 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F,
+ 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B,
+ 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27,
+ 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23,
+ 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E,
+ 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A,
+ 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876,
+ 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72
+ }, {
+ 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59,
+ 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685,
+ 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1,
+ 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D,
+ 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29,
+ 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5,
+ 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91,
+ 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D,
+ 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9,
+ 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065,
+ 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901,
+ 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD,
+ 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9,
+ 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315,
+ 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71,
+ 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD,
+ 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399,
+ 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45,
+ 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221,
+ 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD,
+ 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9,
+ 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835,
+ 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151,
+ 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D,
+ 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579,
+ 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5,
+ 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1,
+ 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D,
+ 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609,
+ 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5,
+ 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1,
+ 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D,
+ 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9,
+ 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05,
+ 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461,
+ 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD,
+ 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9,
+ 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75,
+ 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711,
+ 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD,
+ 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339,
+ 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5,
+ 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281,
+ 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D,
+ 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049,
+ 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895,
+ 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1,
+ 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D,
+ 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819,
+ 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5,
+ 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1,
+ 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D,
+ 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69,
+ 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5,
+ 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1,
+ 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D,
+ 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9,
+ 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625,
+ 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41,
+ 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D,
+ 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89,
+ 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555,
+ 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31,
+ 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED
+ }, {
+ 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE,
+ 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9,
+ 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701,
+ 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056,
+ 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871,
+ 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26,
+ 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E,
+ 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9,
+ 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0,
+ 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787,
+ 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F,
+ 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68,
+ 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F,
+ 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018,
+ 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0,
+ 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7,
+ 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3,
+ 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084,
+ 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C,
+ 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B,
+ 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C,
+ 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B,
+ 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3,
+ 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4,
+ 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED,
+ 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA,
+ 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002,
+ 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755,
+ 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72,
+ 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825,
+ 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D,
+ 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA,
+ 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5,
+ 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82,
+ 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A,
+ 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D,
+ 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A,
+ 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D,
+ 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5,
+ 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2,
+ 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB,
+ 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC,
+ 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04,
+ 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953,
+ 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174,
+ 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623,
+ 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B,
+ 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC,
+ 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8,
+ 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF,
+ 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907,
+ 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50,
+ 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677,
+ 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120,
+ 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98,
+ 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF,
+ 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6,
+ 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981,
+ 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639,
+ 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E,
+ 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949,
+ 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E,
+ 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6,
+ 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1
+ }, {
+ 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0,
+ 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10,
+ 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111,
+ 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1,
+ 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52,
+ 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92,
+ 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693,
+ 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053,
+ 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4,
+ 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314,
+ 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15,
+ 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5,
+ 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256,
+ 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496,
+ 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997,
+ 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57,
+ 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299,
+ 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459,
+ 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958,
+ 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98,
+ 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B,
+ 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB,
+ 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA,
+ 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A,
+ 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D,
+ 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D,
+ 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C,
+ 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C,
+ 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F,
+ 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF,
+ 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE,
+ 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E,
+ 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42,
+ 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82,
+ 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183,
+ 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743,
+ 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0,
+ 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00,
+ 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601,
+ 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1,
+ 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546,
+ 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386,
+ 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87,
+ 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847,
+ 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4,
+ 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404,
+ 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905,
+ 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5,
+ 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B,
+ 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB,
+ 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA,
+ 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A,
+ 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589,
+ 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349,
+ 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48,
+ 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888,
+ 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F,
+ 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF,
+ 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE,
+ 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E,
+ 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D,
+ 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D,
+ 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C,
+ 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C
+ }, {
+ 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE,
+ 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8,
+ 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3,
+ 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5,
+ 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035,
+ 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223,
+ 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258,
+ 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E,
+ 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798,
+ 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E,
+ 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5,
+ 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3,
+ 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503,
+ 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715,
+ 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E,
+ 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578,
+ 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2,
+ 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4,
+ 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF,
+ 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9,
+ 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59,
+ 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F,
+ 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834,
+ 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22,
+ 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4,
+ 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2,
+ 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99,
+ 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F,
+ 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F,
+ 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79,
+ 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02,
+ 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14,
+ 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676,
+ 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460,
+ 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B,
+ 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D,
+ 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED,
+ 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB,
+ 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680,
+ 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496,
+ 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340,
+ 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156,
+ 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D,
+ 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B,
+ 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB,
+ 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD,
+ 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6,
+ 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0,
+ 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A,
+ 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C,
+ 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77,
+ 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61,
+ 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81,
+ 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97,
+ 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC,
+ 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA,
+ 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C,
+ 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A,
+ 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41,
+ 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957,
+ 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7,
+ 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1,
+ 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA,
+ 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC
+ }, {
+ 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D,
+ 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E,
+ 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA,
+ 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9,
+ 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653,
+ 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240,
+ 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834,
+ 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27,
+ 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301,
+ 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712,
+ 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66,
+ 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975,
+ 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF,
+ 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC,
+ 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8,
+ 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB,
+ 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4,
+ 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7,
+ 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183,
+ 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590,
+ 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A,
+ 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739,
+ 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D,
+ 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E,
+ 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678,
+ 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B,
+ 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F,
+ 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C,
+ 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6,
+ 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5,
+ 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1,
+ 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2,
+ 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F,
+ 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C,
+ 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08,
+ 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B,
+ 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1,
+ 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2,
+ 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6,
+ 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5,
+ 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3,
+ 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0,
+ 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794,
+ 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387,
+ 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D,
+ 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E,
+ 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A,
+ 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49,
+ 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516,
+ 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105,
+ 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71,
+ 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62,
+ 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8,
+ 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB,
+ 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF,
+ 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC,
+ 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A,
+ 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899,
+ 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED,
+ 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE,
+ 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044,
+ 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457,
+ 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23,
+ 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30
+ }, {
+ 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3,
+ 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919,
+ 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56,
+ 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC,
+ 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8,
+ 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832,
+ 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D,
+ 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387,
+ 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5,
+ 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F,
+ 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00,
+ 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA,
+ 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E,
+ 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64,
+ 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B,
+ 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1,
+ 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E,
+ 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4,
+ 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB,
+ 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041,
+ 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425,
+ 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF,
+ 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90,
+ 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A,
+ 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758,
+ 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2,
+ 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED,
+ 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217,
+ 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673,
+ 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889,
+ 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6,
+ 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C,
+ 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239,
+ 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3,
+ 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C,
+ 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776,
+ 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312,
+ 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8,
+ 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7,
+ 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D,
+ 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F,
+ 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95,
+ 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA,
+ 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520,
+ 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144,
+ 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE,
+ 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1,
+ 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B,
+ 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4,
+ 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E,
+ 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61,
+ 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B,
+ 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF,
+ 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05,
+ 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A,
+ 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0,
+ 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282,
+ 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78,
+ 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937,
+ 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD,
+ 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9,
+ 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53,
+ 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C,
+ 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6
+ }
+};
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_tablegen.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_tablegen.c
new file mode 100644
index 00000000..272890b1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_tablegen.c
@@ -0,0 +1,93 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc32_tablegen.c
+/// \brief Generate crc32_table_le.h and crc32_table_be.h
+///
+/// Compiling: gcc -std=c99 -o crc32_tablegen crc32_tablegen.c
+/// Add -DWORDS_BIGENDIAN to generate big endian table.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#ifdef WORDS_BIGENDIAN
+# include "../../common/bswap.h"
+#endif
+
+
+static uint32_t crc32_table[8][256];
+
+
+static void
+init_crc32_table(void)
+{
+ static const uint32_t poly32 = UINT32_C(0xEDB88320);
+
+ for (size_t s = 0; s < 8; ++s) {
+ for (size_t b = 0; b < 256; ++b) {
+ uint32_t r = s == 0 ? b : crc32_table[s - 1][b];
+
+ for (size_t i = 0; i < 8; ++i) {
+ if (r & 1)
+ r = (r >> 1) ^ poly32;
+ else
+ r >>= 1;
+ }
+
+ crc32_table[s][b] = r;
+ }
+ }
+
+#ifdef WORDS_BIGENDIAN
+ for (size_t s = 0; s < 8; ++s)
+ for (size_t b = 0; b < 256; ++b)
+ crc32_table[s][b] = bswap_32(crc32_table[s][b]);
+#endif
+
+ return;
+}
+
+
+static void
+print_crc32_table(void)
+{
+ printf("/* This file has been automatically generated by "
+ "crc32_tablegen.c. */\n\n"
+ "const uint32_t lzma_crc32_table[8][256] = {\n\t{");
+
+ for (size_t s = 0; s < 8; ++s) {
+ for (size_t b = 0; b < 256; ++b) {
+ if ((b % 4) == 0)
+ printf("\n\t\t");
+
+ printf("0x%08" PRIX32, crc32_table[s][b]);
+
+ if (b != 255)
+ printf(",%s", (b+1) % 4 == 0 ? "" : " ");
+ }
+
+ if (s == 7)
+ printf("\n\t}\n};\n");
+ else
+ printf("\n\t}, {");
+ }
+
+ return;
+}
+
+
+int
+main(void)
+{
+ init_crc32_table();
+ print_crc32_table();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_x86.S b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_x86.S
new file mode 100644
index 00000000..cca29891
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc32_x86.S
@@ -0,0 +1,304 @@
+/*
+ * Speed-optimized CRC32 using slicing-by-eight algorithm
+ *
+ * This uses only i386 instructions, but it is optimized for i686 and later
+ * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). For i586
+ * (e.g. Pentium), slicing-by-four would be better, and even the C version
+ * of slicing-by-eight built with gcc -march=i586 tends to be a little bit
+ * better than this. Very few probably run this code on i586 or older x86
+ * so this shouldn't be a problem in practice.
+ *
+ * Authors: Igor Pavlov (original version)
+ * Lasse Collin (AT&T syntax, PIC support, better portability)
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * This code needs lzma_crc32_table, which can be created using the
+ * following C code:
+
+uint32_t lzma_crc32_table[8][256];
+
+void
+init_table(void)
+{
+ // IEEE-802.3
+ static const uint32_t poly32 = UINT32_C(0xEDB88320);
+
+ // Castagnoli
+ // static const uint32_t poly32 = UINT32_C(0x82F63B78);
+
+ // Koopman
+ // static const uint32_t poly32 = UINT32_C(0xEB31D82E);
+
+ for (size_t s = 0; s < 8; ++s) {
+ for (size_t b = 0; b < 256; ++b) {
+ uint32_t r = s == 0 ? b : lzma_crc32_table[s - 1][b];
+
+ for (size_t i = 0; i < 8; ++i) {
+ if (r & 1)
+ r = (r >> 1) ^ poly32;
+ else
+ r >>= 1;
+ }
+
+ lzma_crc32_table[s][b] = r;
+ }
+ }
+}
+
+ * The prototype of the CRC32 function:
+ * extern uint32_t lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+ */
+
+/*
+ * On some systems, the functions need to be prefixed. The prefix is
+ * usually an underscore.
+ */
+#ifndef __USER_LABEL_PREFIX__
+# define __USER_LABEL_PREFIX__
+#endif
+#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
+#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
+#define LZMA_CRC32 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32)
+#define LZMA_CRC32_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_table)
+
+/*
+ * Solaris assembler doesn't have .p2align, and Darwin uses .align
+ * differently than GNU/Linux and Solaris.
+ */
+#if defined(__MACH__) || defined(__MSDOS__)
+# define ALIGN(pow2, abs) .align pow2
+#else
+# define ALIGN(pow2, abs) .align abs
+#endif
+
+ .text
+ .globl LZMA_CRC32
+
+#if !defined(__MACH__) && !defined(_WIN32) && !defined(__CYGWIN__) \
+ && !defined(__MSDOS__)
+ .type LZMA_CRC32, @function
+#endif
+
+ ALIGN(4, 16)
+LZMA_CRC32:
+ /*
+ * Register usage:
+ * %eax crc
+ * %esi buf
+ * %edi size or buf + size
+ * %ebx lzma_crc32_table
+ * %ebp Table index
+ * %ecx Temporary
+ * %edx Temporary
+ */
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+ movl 0x14(%esp), %esi /* buf */
+ movl 0x18(%esp), %edi /* size */
+ movl 0x1C(%esp), %eax /* crc */
+
+ /*
+ * Store the address of lzma_crc32_table to %ebx. This is needed to
+ * get position-independent code (PIC).
+ *
+ * The PIC macro is defined by libtool, while __PIC__ is defined
+ * by GCC but only on some systems. Testing for both makes it simpler
+ * to test this code without libtool, and keeps the code working also
+ * when built with libtool but using something else than GCC.
+ *
+ * I understood that libtool may define PIC on Windows even though
+ * the code in Windows DLLs is not PIC in sense that it is in ELF
+ * binaries, so we need a separate check to always use the non-PIC
+ * code on Windows.
+ */
+#if (!defined(PIC) && !defined(__PIC__)) \
+ || (defined(_WIN32) || defined(__CYGWIN__))
+ /* Not PIC */
+ movl $LZMA_CRC32_TABLE, %ebx
+#elif defined(__MACH__)
+ /* Mach-O */
+ call .L_get_pc
+.L_pic:
+ leal .L_lzma_crc32_table$non_lazy_ptr-.L_pic(%ebx), %ebx
+ movl (%ebx), %ebx
+#else
+ /* ELF */
+ call .L_get_pc
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ movl LZMA_CRC32_TABLE@GOT(%ebx), %ebx
+#endif
+
+ /* Complement the initial value. */
+ notl %eax
+
+ ALIGN(4, 16)
+.L_align:
+ /*
+ * Check if there is enough input to use slicing-by-eight.
+ * We need 16 bytes, because the loop pre-reads eight bytes.
+ */
+ cmpl $16, %edi
+ jl .L_rest
+
+ /* Check if we have reached alignment of eight bytes. */
+ testl $7, %esi
+ jz .L_slice
+
+ /* Calculate CRC of the next input byte. */
+ movzbl (%esi), %ebp
+ incl %esi
+ movzbl %al, %ecx
+ xorl %ecx, %ebp
+ shrl $8, %eax
+ xorl (%ebx, %ebp, 4), %eax
+ decl %edi
+ jmp .L_align
+
+ ALIGN(2, 4)
+.L_slice:
+ /*
+ * If we get here, there's at least 16 bytes of aligned input
+ * available. Make %edi multiple of eight bytes. Store the possible
+ * remainder over the "size" variable in the argument stack.
+ */
+ movl %edi, 0x18(%esp)
+ andl $-8, %edi
+ subl %edi, 0x18(%esp)
+
+ /*
+ * Let %edi be buf + size - 8 while running the main loop. This way
+ * we can compare for equality to determine when exit the loop.
+ */
+ addl %esi, %edi
+ subl $8, %edi
+
+ /* Read in the first eight aligned bytes. */
+ xorl (%esi), %eax
+ movl 4(%esi), %ecx
+ movzbl %cl, %ebp
+
+.L_loop:
+ movl 0x0C00(%ebx, %ebp, 4), %edx
+ movzbl %ch, %ebp
+ xorl 0x0800(%ebx, %ebp, 4), %edx
+ shrl $16, %ecx
+ xorl 8(%esi), %edx
+ movzbl %cl, %ebp
+ xorl 0x0400(%ebx, %ebp, 4), %edx
+ movzbl %ch, %ebp
+ xorl (%ebx, %ebp, 4), %edx
+ movzbl %al, %ebp
+
+ /*
+ * Read the next four bytes, for which the CRC is calculated
+ * on the next interation of the loop.
+ */
+ movl 12(%esi), %ecx
+
+ xorl 0x1C00(%ebx, %ebp, 4), %edx
+ movzbl %ah, %ebp
+ shrl $16, %eax
+ xorl 0x1800(%ebx, %ebp, 4), %edx
+ movzbl %ah, %ebp
+ movzbl %al, %eax
+ movl 0x1400(%ebx, %eax, 4), %eax
+ addl $8, %esi
+ xorl %edx, %eax
+ xorl 0x1000(%ebx, %ebp, 4), %eax
+
+ /* Check for end of aligned input. */
+ cmpl %edi, %esi
+ movzbl %cl, %ebp
+ jne .L_loop
+
+ /*
+ * Process the remaining eight bytes, which we have already
+ * copied to %ecx and %edx.
+ */
+ movl 0x0C00(%ebx, %ebp, 4), %edx
+ movzbl %ch, %ebp
+ xorl 0x0800(%ebx, %ebp, 4), %edx
+ shrl $16, %ecx
+ movzbl %cl, %ebp
+ xorl 0x0400(%ebx, %ebp, 4), %edx
+ movzbl %ch, %ebp
+ xorl (%ebx, %ebp, 4), %edx
+ movzbl %al, %ebp
+
+ xorl 0x1C00(%ebx, %ebp, 4), %edx
+ movzbl %ah, %ebp
+ shrl $16, %eax
+ xorl 0x1800(%ebx, %ebp, 4), %edx
+ movzbl %ah, %ebp
+ movzbl %al, %eax
+ movl 0x1400(%ebx, %eax, 4), %eax
+ addl $8, %esi
+ xorl %edx, %eax
+ xorl 0x1000(%ebx, %ebp, 4), %eax
+
+ /* Copy the number of remaining bytes to %edi. */
+ movl 0x18(%esp), %edi
+
+.L_rest:
+ /* Check for end of input. */
+ testl %edi, %edi
+ jz .L_return
+
+ /* Calculate CRC of the next input byte. */
+ movzbl (%esi), %ebp
+ incl %esi
+ movzbl %al, %ecx
+ xorl %ecx, %ebp
+ shrl $8, %eax
+ xorl (%ebx, %ebp, 4), %eax
+ decl %edi
+ jmp .L_rest
+
+.L_return:
+ /* Complement the final value. */
+ notl %eax
+
+ popl %ebp
+ popl %edi
+ popl %esi
+ popl %ebx
+ ret
+
+#if defined(PIC) || defined(__PIC__)
+ ALIGN(4, 16)
+.L_get_pc:
+ movl (%esp), %ebx
+ ret
+#endif
+
+#if defined(__MACH__) && (defined(PIC) || defined(__PIC__))
+ /* Mach-O PIC */
+ .section __IMPORT,__pointers,non_lazy_symbol_pointers
+.L_lzma_crc32_table$non_lazy_ptr:
+ .indirect_symbol LZMA_CRC32_TABLE
+ .long 0
+
+#elif defined(_WIN32) || defined(__CYGWIN__)
+# ifdef DLL_EXPORT
+ /* This is equivalent of __declspec(dllexport). */
+ .section .drectve
+ .ascii " -export:lzma_crc32"
+# endif
+
+#elif !defined(__MSDOS__)
+ /* ELF */
+ .size LZMA_CRC32, .-LZMA_CRC32
+#endif
+
+/*
+ * This is needed to support non-executable stack. It's ugly to
+ * use __linux__ here, but I don't know a way to detect when
+ * we are using GNU assembler.
+ */
+#if defined(__ELF__) && defined(__linux__)
+ .section .note.GNU-stack,"",@progbits
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_fast.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_fast.c
new file mode 100644
index 00000000..01ae80cd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_fast.c
@@ -0,0 +1,73 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc64.c
+/// \brief CRC64 calculation
+///
+/// Calculate the CRC64 using the slice-by-four algorithm. This is the same
+/// idea that is used in crc32_fast.c, but for CRC64 we use only four tables
+/// instead of eight to avoid increasing CPU cache usage.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "check.h"
+#include "crc_macros.h"
+
+
+#ifdef WORDS_BIGENDIAN
+# define A1(x) ((x) >> 56)
+#else
+# define A1 A
+#endif
+
+
+// See the comments in crc32_fast.c. They aren't duplicated here.
+extern LZMA_API(uint64_t)
+lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
+{
+ crc = ~crc;
+
+#ifdef WORDS_BIGENDIAN
+ crc = bswap_64(crc);
+#endif
+
+ if (size > 4) {
+ while ((uintptr_t)(buf) & 3) {
+ crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
+ --size;
+ }
+
+ const uint8_t *const limit = buf + (size & ~(size_t)(3));
+ size &= (size_t)(3);
+
+ while (buf < limit) {
+#ifdef WORDS_BIGENDIAN
+ const uint32_t tmp = (crc >> 32) ^ *(uint32_t *)(buf);
+#else
+ const uint32_t tmp = crc ^ *(uint32_t *)(buf);
+#endif
+ buf += 4;
+
+ crc = lzma_crc64_table[3][A(tmp)]
+ ^ lzma_crc64_table[2][B(tmp)]
+ ^ S32(crc)
+ ^ lzma_crc64_table[1][C(tmp)]
+ ^ lzma_crc64_table[0][D(tmp)];
+ }
+ }
+
+ while (size-- != 0)
+ crc = lzma_crc64_table[0][*buf++ ^ A1(crc)] ^ S8(crc);
+
+#ifdef WORDS_BIGENDIAN
+ crc = bswap_64(crc);
+#endif
+
+ return ~crc;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_small.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_small.c
new file mode 100644
index 00000000..2ce72e2c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_small.c
@@ -0,0 +1,55 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc64_small.c
+/// \brief CRC64 calculation (size-optimized)
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "check.h"
+
+
+static uint64_t crc64_table[256];
+
+
+static void
+crc64_init(void)
+{
+ static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
+
+ for (size_t b = 0; b < 256; ++b) {
+ uint64_t r = b;
+ for (size_t i = 0; i < 8; ++i) {
+ if (r & 1)
+ r = (r >> 1) ^ poly64;
+ else
+ r >>= 1;
+ }
+
+ crc64_table[b] = r;
+ }
+
+ return;
+}
+
+
+extern LZMA_API(uint64_t)
+lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
+{
+ mythread_once(crc64_init);
+
+ crc = ~crc;
+
+ while (size != 0) {
+ crc = crc64_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
+ --size;
+ }
+
+ return ~crc;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table.c
new file mode 100644
index 00000000..3ffe86fa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table.c
@@ -0,0 +1,21 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc64_table.c
+/// \brief Precalculated CRC64 table with correct endianness
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+#ifdef WORDS_BIGENDIAN
+# include "crc64_table_be.h"
+#else
+# include "crc64_table_le.h"
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_be.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_be.h
new file mode 100644
index 00000000..4e5c8e94
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_be.h
@@ -0,0 +1,523 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* This file has been automatically generated by crc64_tablegen.c. */
+
+const uint64_t lzma_crc64_table[4][256] = {
+ {
+ UINT64_C(0x0000000000000000), UINT64_C(0x6F5FA703BE4C2EB3),
+ UINT64_C(0x5BA040A8573684F4), UINT64_C(0x34FFE7ABE97AAA47),
+ UINT64_C(0x335E8FFF84C3D07B), UINT64_C(0x5C0128FC3A8FFEC8),
+ UINT64_C(0x68FECF57D3F5548F), UINT64_C(0x07A168546DB97A3C),
+ UINT64_C(0x66BC1EFF0987A1F7), UINT64_C(0x09E3B9FCB7CB8F44),
+ UINT64_C(0x3D1C5E575EB12503), UINT64_C(0x5243F954E0FD0BB0),
+ UINT64_C(0x55E291008D44718C), UINT64_C(0x3ABD360333085F3F),
+ UINT64_C(0x0E42D1A8DA72F578), UINT64_C(0x611D76AB643EDBCB),
+ UINT64_C(0x4966335138A19B7D), UINT64_C(0x2639945286EDB5CE),
+ UINT64_C(0x12C673F96F971F89), UINT64_C(0x7D99D4FAD1DB313A),
+ UINT64_C(0x7A38BCAEBC624B06), UINT64_C(0x15671BAD022E65B5),
+ UINT64_C(0x2198FC06EB54CFF2), UINT64_C(0x4EC75B055518E141),
+ UINT64_C(0x2FDA2DAE31263A8A), UINT64_C(0x40858AAD8F6A1439),
+ UINT64_C(0x747A6D066610BE7E), UINT64_C(0x1B25CA05D85C90CD),
+ UINT64_C(0x1C84A251B5E5EAF1), UINT64_C(0x73DB05520BA9C442),
+ UINT64_C(0x4724E2F9E2D36E05), UINT64_C(0x287B45FA5C9F40B6),
+ UINT64_C(0x92CC66A2704237FB), UINT64_C(0xFD93C1A1CE0E1948),
+ UINT64_C(0xC96C260A2774B30F), UINT64_C(0xA633810999389DBC),
+ UINT64_C(0xA192E95DF481E780), UINT64_C(0xCECD4E5E4ACDC933),
+ UINT64_C(0xFA32A9F5A3B76374), UINT64_C(0x956D0EF61DFB4DC7),
+ UINT64_C(0xF470785D79C5960C), UINT64_C(0x9B2FDF5EC789B8BF),
+ UINT64_C(0xAFD038F52EF312F8), UINT64_C(0xC08F9FF690BF3C4B),
+ UINT64_C(0xC72EF7A2FD064677), UINT64_C(0xA87150A1434A68C4),
+ UINT64_C(0x9C8EB70AAA30C283), UINT64_C(0xF3D11009147CEC30),
+ UINT64_C(0xDBAA55F348E3AC86), UINT64_C(0xB4F5F2F0F6AF8235),
+ UINT64_C(0x800A155B1FD52872), UINT64_C(0xEF55B258A19906C1),
+ UINT64_C(0xE8F4DA0CCC207CFD), UINT64_C(0x87AB7D0F726C524E),
+ UINT64_C(0xB3549AA49B16F809), UINT64_C(0xDC0B3DA7255AD6BA),
+ UINT64_C(0xBD164B0C41640D71), UINT64_C(0xD249EC0FFF2823C2),
+ UINT64_C(0xE6B60BA416528985), UINT64_C(0x89E9ACA7A81EA736),
+ UINT64_C(0x8E48C4F3C5A7DD0A), UINT64_C(0xE11763F07BEBF3B9),
+ UINT64_C(0xD5E8845B929159FE), UINT64_C(0xBAB723582CDD774D),
+ UINT64_C(0xA187C3EBCA2BB664), UINT64_C(0xCED864E8746798D7),
+ UINT64_C(0xFA2783439D1D3290), UINT64_C(0x9578244023511C23),
+ UINT64_C(0x92D94C144EE8661F), UINT64_C(0xFD86EB17F0A448AC),
+ UINT64_C(0xC9790CBC19DEE2EB), UINT64_C(0xA626ABBFA792CC58),
+ UINT64_C(0xC73BDD14C3AC1793), UINT64_C(0xA8647A177DE03920),
+ UINT64_C(0x9C9B9DBC949A9367), UINT64_C(0xF3C43ABF2AD6BDD4),
+ UINT64_C(0xF46552EB476FC7E8), UINT64_C(0x9B3AF5E8F923E95B),
+ UINT64_C(0xAFC512431059431C), UINT64_C(0xC09AB540AE156DAF),
+ UINT64_C(0xE8E1F0BAF28A2D19), UINT64_C(0x87BE57B94CC603AA),
+ UINT64_C(0xB341B012A5BCA9ED), UINT64_C(0xDC1E17111BF0875E),
+ UINT64_C(0xDBBF7F457649FD62), UINT64_C(0xB4E0D846C805D3D1),
+ UINT64_C(0x801F3FED217F7996), UINT64_C(0xEF4098EE9F335725),
+ UINT64_C(0x8E5DEE45FB0D8CEE), UINT64_C(0xE10249464541A25D),
+ UINT64_C(0xD5FDAEEDAC3B081A), UINT64_C(0xBAA209EE127726A9),
+ UINT64_C(0xBD0361BA7FCE5C95), UINT64_C(0xD25CC6B9C1827226),
+ UINT64_C(0xE6A3211228F8D861), UINT64_C(0x89FC861196B4F6D2),
+ UINT64_C(0x334BA549BA69819F), UINT64_C(0x5C14024A0425AF2C),
+ UINT64_C(0x68EBE5E1ED5F056B), UINT64_C(0x07B442E253132BD8),
+ UINT64_C(0x00152AB63EAA51E4), UINT64_C(0x6F4A8DB580E67F57),
+ UINT64_C(0x5BB56A1E699CD510), UINT64_C(0x34EACD1DD7D0FBA3),
+ UINT64_C(0x55F7BBB6B3EE2068), UINT64_C(0x3AA81CB50DA20EDB),
+ UINT64_C(0x0E57FB1EE4D8A49C), UINT64_C(0x61085C1D5A948A2F),
+ UINT64_C(0x66A93449372DF013), UINT64_C(0x09F6934A8961DEA0),
+ UINT64_C(0x3D0974E1601B74E7), UINT64_C(0x5256D3E2DE575A54),
+ UINT64_C(0x7A2D961882C81AE2), UINT64_C(0x1572311B3C843451),
+ UINT64_C(0x218DD6B0D5FE9E16), UINT64_C(0x4ED271B36BB2B0A5),
+ UINT64_C(0x497319E7060BCA99), UINT64_C(0x262CBEE4B847E42A),
+ UINT64_C(0x12D3594F513D4E6D), UINT64_C(0x7D8CFE4CEF7160DE),
+ UINT64_C(0x1C9188E78B4FBB15), UINT64_C(0x73CE2FE4350395A6),
+ UINT64_C(0x4731C84FDC793FE1), UINT64_C(0x286E6F4C62351152),
+ UINT64_C(0x2FCF07180F8C6B6E), UINT64_C(0x4090A01BB1C045DD),
+ UINT64_C(0x746F47B058BAEF9A), UINT64_C(0x1B30E0B3E6F6C129),
+ UINT64_C(0x420F87D795576CC9), UINT64_C(0x2D5020D42B1B427A),
+ UINT64_C(0x19AFC77FC261E83D), UINT64_C(0x76F0607C7C2DC68E),
+ UINT64_C(0x715108281194BCB2), UINT64_C(0x1E0EAF2BAFD89201),
+ UINT64_C(0x2AF1488046A23846), UINT64_C(0x45AEEF83F8EE16F5),
+ UINT64_C(0x24B399289CD0CD3E), UINT64_C(0x4BEC3E2B229CE38D),
+ UINT64_C(0x7F13D980CBE649CA), UINT64_C(0x104C7E8375AA6779),
+ UINT64_C(0x17ED16D718131D45), UINT64_C(0x78B2B1D4A65F33F6),
+ UINT64_C(0x4C4D567F4F2599B1), UINT64_C(0x2312F17CF169B702),
+ UINT64_C(0x0B69B486ADF6F7B4), UINT64_C(0x6436138513BAD907),
+ UINT64_C(0x50C9F42EFAC07340), UINT64_C(0x3F96532D448C5DF3),
+ UINT64_C(0x38373B79293527CF), UINT64_C(0x57689C7A9779097C),
+ UINT64_C(0x63977BD17E03A33B), UINT64_C(0x0CC8DCD2C04F8D88),
+ UINT64_C(0x6DD5AA79A4715643), UINT64_C(0x028A0D7A1A3D78F0),
+ UINT64_C(0x3675EAD1F347D2B7), UINT64_C(0x592A4DD24D0BFC04),
+ UINT64_C(0x5E8B258620B28638), UINT64_C(0x31D482859EFEA88B),
+ UINT64_C(0x052B652E778402CC), UINT64_C(0x6A74C22DC9C82C7F),
+ UINT64_C(0xD0C3E175E5155B32), UINT64_C(0xBF9C46765B597581),
+ UINT64_C(0x8B63A1DDB223DFC6), UINT64_C(0xE43C06DE0C6FF175),
+ UINT64_C(0xE39D6E8A61D68B49), UINT64_C(0x8CC2C989DF9AA5FA),
+ UINT64_C(0xB83D2E2236E00FBD), UINT64_C(0xD762892188AC210E),
+ UINT64_C(0xB67FFF8AEC92FAC5), UINT64_C(0xD920588952DED476),
+ UINT64_C(0xEDDFBF22BBA47E31), UINT64_C(0x8280182105E85082),
+ UINT64_C(0x8521707568512ABE), UINT64_C(0xEA7ED776D61D040D),
+ UINT64_C(0xDE8130DD3F67AE4A), UINT64_C(0xB1DE97DE812B80F9),
+ UINT64_C(0x99A5D224DDB4C04F), UINT64_C(0xF6FA752763F8EEFC),
+ UINT64_C(0xC205928C8A8244BB), UINT64_C(0xAD5A358F34CE6A08),
+ UINT64_C(0xAAFB5DDB59771034), UINT64_C(0xC5A4FAD8E73B3E87),
+ UINT64_C(0xF15B1D730E4194C0), UINT64_C(0x9E04BA70B00DBA73),
+ UINT64_C(0xFF19CCDBD43361B8), UINT64_C(0x90466BD86A7F4F0B),
+ UINT64_C(0xA4B98C738305E54C), UINT64_C(0xCBE62B703D49CBFF),
+ UINT64_C(0xCC47432450F0B1C3), UINT64_C(0xA318E427EEBC9F70),
+ UINT64_C(0x97E7038C07C63537), UINT64_C(0xF8B8A48FB98A1B84),
+ UINT64_C(0xE388443C5F7CDAAD), UINT64_C(0x8CD7E33FE130F41E),
+ UINT64_C(0xB8280494084A5E59), UINT64_C(0xD777A397B60670EA),
+ UINT64_C(0xD0D6CBC3DBBF0AD6), UINT64_C(0xBF896CC065F32465),
+ UINT64_C(0x8B768B6B8C898E22), UINT64_C(0xE4292C6832C5A091),
+ UINT64_C(0x85345AC356FB7B5A), UINT64_C(0xEA6BFDC0E8B755E9),
+ UINT64_C(0xDE941A6B01CDFFAE), UINT64_C(0xB1CBBD68BF81D11D),
+ UINT64_C(0xB66AD53CD238AB21), UINT64_C(0xD935723F6C748592),
+ UINT64_C(0xEDCA9594850E2FD5), UINT64_C(0x829532973B420166),
+ UINT64_C(0xAAEE776D67DD41D0), UINT64_C(0xC5B1D06ED9916F63),
+ UINT64_C(0xF14E37C530EBC524), UINT64_C(0x9E1190C68EA7EB97),
+ UINT64_C(0x99B0F892E31E91AB), UINT64_C(0xF6EF5F915D52BF18),
+ UINT64_C(0xC210B83AB428155F), UINT64_C(0xAD4F1F390A643BEC),
+ UINT64_C(0xCC5269926E5AE027), UINT64_C(0xA30DCE91D016CE94),
+ UINT64_C(0x97F2293A396C64D3), UINT64_C(0xF8AD8E3987204A60),
+ UINT64_C(0xFF0CE66DEA99305C), UINT64_C(0x9053416E54D51EEF),
+ UINT64_C(0xA4ACA6C5BDAFB4A8), UINT64_C(0xCBF301C603E39A1B),
+ UINT64_C(0x7144229E2F3EED56), UINT64_C(0x1E1B859D9172C3E5),
+ UINT64_C(0x2AE46236780869A2), UINT64_C(0x45BBC535C6444711),
+ UINT64_C(0x421AAD61ABFD3D2D), UINT64_C(0x2D450A6215B1139E),
+ UINT64_C(0x19BAEDC9FCCBB9D9), UINT64_C(0x76E54ACA4287976A),
+ UINT64_C(0x17F83C6126B94CA1), UINT64_C(0x78A79B6298F56212),
+ UINT64_C(0x4C587CC9718FC855), UINT64_C(0x2307DBCACFC3E6E6),
+ UINT64_C(0x24A6B39EA27A9CDA), UINT64_C(0x4BF9149D1C36B269),
+ UINT64_C(0x7F06F336F54C182E), UINT64_C(0x105954354B00369D),
+ UINT64_C(0x382211CF179F762B), UINT64_C(0x577DB6CCA9D35898),
+ UINT64_C(0x6382516740A9F2DF), UINT64_C(0x0CDDF664FEE5DC6C),
+ UINT64_C(0x0B7C9E30935CA650), UINT64_C(0x642339332D1088E3),
+ UINT64_C(0x50DCDE98C46A22A4), UINT64_C(0x3F83799B7A260C17),
+ UINT64_C(0x5E9E0F301E18D7DC), UINT64_C(0x31C1A833A054F96F),
+ UINT64_C(0x053E4F98492E5328), UINT64_C(0x6A61E89BF7627D9B),
+ UINT64_C(0x6DC080CF9ADB07A7), UINT64_C(0x029F27CC24972914),
+ UINT64_C(0x3660C067CDED8353), UINT64_C(0x593F676473A1ADE0)
+ }, {
+ UINT64_C(0x0000000000000000), UINT64_C(0x0DF1D05C9279E954),
+ UINT64_C(0x1AE2A1B924F3D2A9), UINT64_C(0x171371E5B68A3BFD),
+ UINT64_C(0xB1DA4DDC62497DC1), UINT64_C(0xBC2B9D80F0309495),
+ UINT64_C(0xAB38EC6546BAAF68), UINT64_C(0xA6C93C39D4C3463C),
+ UINT64_C(0xE7AB9517EE3D2210), UINT64_C(0xEA5A454B7C44CB44),
+ UINT64_C(0xFD4934AECACEF0B9), UINT64_C(0xF0B8E4F258B719ED),
+ UINT64_C(0x5671D8CB8C745FD1), UINT64_C(0x5B8008971E0DB685),
+ UINT64_C(0x4C937972A8878D78), UINT64_C(0x4162A92E3AFE642C),
+ UINT64_C(0xCE572B2FDC7B4420), UINT64_C(0xC3A6FB734E02AD74),
+ UINT64_C(0xD4B58A96F8889689), UINT64_C(0xD9445ACA6AF17FDD),
+ UINT64_C(0x7F8D66F3BE3239E1), UINT64_C(0x727CB6AF2C4BD0B5),
+ UINT64_C(0x656FC74A9AC1EB48), UINT64_C(0x689E171608B8021C),
+ UINT64_C(0x29FCBE3832466630), UINT64_C(0x240D6E64A03F8F64),
+ UINT64_C(0x331E1F8116B5B499), UINT64_C(0x3EEFCFDD84CC5DCD),
+ UINT64_C(0x9826F3E4500F1BF1), UINT64_C(0x95D723B8C276F2A5),
+ UINT64_C(0x82C4525D74FCC958), UINT64_C(0x8F358201E685200C),
+ UINT64_C(0x9CAF565EB8F78840), UINT64_C(0x915E86022A8E6114),
+ UINT64_C(0x864DF7E79C045AE9), UINT64_C(0x8BBC27BB0E7DB3BD),
+ UINT64_C(0x2D751B82DABEF581), UINT64_C(0x2084CBDE48C71CD5),
+ UINT64_C(0x3797BA3BFE4D2728), UINT64_C(0x3A666A676C34CE7C),
+ UINT64_C(0x7B04C34956CAAA50), UINT64_C(0x76F51315C4B34304),
+ UINT64_C(0x61E662F0723978F9), UINT64_C(0x6C17B2ACE04091AD),
+ UINT64_C(0xCADE8E953483D791), UINT64_C(0xC72F5EC9A6FA3EC5),
+ UINT64_C(0xD03C2F2C10700538), UINT64_C(0xDDCDFF708209EC6C),
+ UINT64_C(0x52F87D71648CCC60), UINT64_C(0x5F09AD2DF6F52534),
+ UINT64_C(0x481ADCC8407F1EC9), UINT64_C(0x45EB0C94D206F79D),
+ UINT64_C(0xE32230AD06C5B1A1), UINT64_C(0xEED3E0F194BC58F5),
+ UINT64_C(0xF9C0911422366308), UINT64_C(0xF4314148B04F8A5C),
+ UINT64_C(0xB553E8668AB1EE70), UINT64_C(0xB8A2383A18C80724),
+ UINT64_C(0xAFB149DFAE423CD9), UINT64_C(0xA24099833C3BD58D),
+ UINT64_C(0x0489A5BAE8F893B1), UINT64_C(0x097875E67A817AE5),
+ UINT64_C(0x1E6B0403CC0B4118), UINT64_C(0x139AD45F5E72A84C),
+ UINT64_C(0x385FADBC70EF1181), UINT64_C(0x35AE7DE0E296F8D5),
+ UINT64_C(0x22BD0C05541CC328), UINT64_C(0x2F4CDC59C6652A7C),
+ UINT64_C(0x8985E06012A66C40), UINT64_C(0x8474303C80DF8514),
+ UINT64_C(0x936741D93655BEE9), UINT64_C(0x9E969185A42C57BD),
+ UINT64_C(0xDFF438AB9ED23391), UINT64_C(0xD205E8F70CABDAC5),
+ UINT64_C(0xC5169912BA21E138), UINT64_C(0xC8E7494E2858086C),
+ UINT64_C(0x6E2E7577FC9B4E50), UINT64_C(0x63DFA52B6EE2A704),
+ UINT64_C(0x74CCD4CED8689CF9), UINT64_C(0x793D04924A1175AD),
+ UINT64_C(0xF6088693AC9455A1), UINT64_C(0xFBF956CF3EEDBCF5),
+ UINT64_C(0xECEA272A88678708), UINT64_C(0xE11BF7761A1E6E5C),
+ UINT64_C(0x47D2CB4FCEDD2860), UINT64_C(0x4A231B135CA4C134),
+ UINT64_C(0x5D306AF6EA2EFAC9), UINT64_C(0x50C1BAAA7857139D),
+ UINT64_C(0x11A3138442A977B1), UINT64_C(0x1C52C3D8D0D09EE5),
+ UINT64_C(0x0B41B23D665AA518), UINT64_C(0x06B06261F4234C4C),
+ UINT64_C(0xA0795E5820E00A70), UINT64_C(0xAD888E04B299E324),
+ UINT64_C(0xBA9BFFE10413D8D9), UINT64_C(0xB76A2FBD966A318D),
+ UINT64_C(0xA4F0FBE2C81899C1), UINT64_C(0xA9012BBE5A617095),
+ UINT64_C(0xBE125A5BECEB4B68), UINT64_C(0xB3E38A077E92A23C),
+ UINT64_C(0x152AB63EAA51E400), UINT64_C(0x18DB666238280D54),
+ UINT64_C(0x0FC817878EA236A9), UINT64_C(0x0239C7DB1CDBDFFD),
+ UINT64_C(0x435B6EF52625BBD1), UINT64_C(0x4EAABEA9B45C5285),
+ UINT64_C(0x59B9CF4C02D66978), UINT64_C(0x54481F1090AF802C),
+ UINT64_C(0xF2812329446CC610), UINT64_C(0xFF70F375D6152F44),
+ UINT64_C(0xE8638290609F14B9), UINT64_C(0xE59252CCF2E6FDED),
+ UINT64_C(0x6AA7D0CD1463DDE1), UINT64_C(0x67560091861A34B5),
+ UINT64_C(0x7045717430900F48), UINT64_C(0x7DB4A128A2E9E61C),
+ UINT64_C(0xDB7D9D11762AA020), UINT64_C(0xD68C4D4DE4534974),
+ UINT64_C(0xC19F3CA852D97289), UINT64_C(0xCC6EECF4C0A09BDD),
+ UINT64_C(0x8D0C45DAFA5EFFF1), UINT64_C(0x80FD9586682716A5),
+ UINT64_C(0x97EEE463DEAD2D58), UINT64_C(0x9A1F343F4CD4C40C),
+ UINT64_C(0x3CD6080698178230), UINT64_C(0x3127D85A0A6E6B64),
+ UINT64_C(0x2634A9BFBCE45099), UINT64_C(0x2BC579E32E9DB9CD),
+ UINT64_C(0xF5A054D6CA71FB90), UINT64_C(0xF851848A580812C4),
+ UINT64_C(0xEF42F56FEE822939), UINT64_C(0xE2B325337CFBC06D),
+ UINT64_C(0x447A190AA8388651), UINT64_C(0x498BC9563A416F05),
+ UINT64_C(0x5E98B8B38CCB54F8), UINT64_C(0x536968EF1EB2BDAC),
+ UINT64_C(0x120BC1C1244CD980), UINT64_C(0x1FFA119DB63530D4),
+ UINT64_C(0x08E9607800BF0B29), UINT64_C(0x0518B02492C6E27D),
+ UINT64_C(0xA3D18C1D4605A441), UINT64_C(0xAE205C41D47C4D15),
+ UINT64_C(0xB9332DA462F676E8), UINT64_C(0xB4C2FDF8F08F9FBC),
+ UINT64_C(0x3BF77FF9160ABFB0), UINT64_C(0x3606AFA5847356E4),
+ UINT64_C(0x2115DE4032F96D19), UINT64_C(0x2CE40E1CA080844D),
+ UINT64_C(0x8A2D32257443C271), UINT64_C(0x87DCE279E63A2B25),
+ UINT64_C(0x90CF939C50B010D8), UINT64_C(0x9D3E43C0C2C9F98C),
+ UINT64_C(0xDC5CEAEEF8379DA0), UINT64_C(0xD1AD3AB26A4E74F4),
+ UINT64_C(0xC6BE4B57DCC44F09), UINT64_C(0xCB4F9B0B4EBDA65D),
+ UINT64_C(0x6D86A7329A7EE061), UINT64_C(0x6077776E08070935),
+ UINT64_C(0x7764068BBE8D32C8), UINT64_C(0x7A95D6D72CF4DB9C),
+ UINT64_C(0x690F0288728673D0), UINT64_C(0x64FED2D4E0FF9A84),
+ UINT64_C(0x73EDA3315675A179), UINT64_C(0x7E1C736DC40C482D),
+ UINT64_C(0xD8D54F5410CF0E11), UINT64_C(0xD5249F0882B6E745),
+ UINT64_C(0xC237EEED343CDCB8), UINT64_C(0xCFC63EB1A64535EC),
+ UINT64_C(0x8EA4979F9CBB51C0), UINT64_C(0x835547C30EC2B894),
+ UINT64_C(0x94463626B8488369), UINT64_C(0x99B7E67A2A316A3D),
+ UINT64_C(0x3F7EDA43FEF22C01), UINT64_C(0x328F0A1F6C8BC555),
+ UINT64_C(0x259C7BFADA01FEA8), UINT64_C(0x286DABA6487817FC),
+ UINT64_C(0xA75829A7AEFD37F0), UINT64_C(0xAAA9F9FB3C84DEA4),
+ UINT64_C(0xBDBA881E8A0EE559), UINT64_C(0xB04B584218770C0D),
+ UINT64_C(0x1682647BCCB44A31), UINT64_C(0x1B73B4275ECDA365),
+ UINT64_C(0x0C60C5C2E8479898), UINT64_C(0x0191159E7A3E71CC),
+ UINT64_C(0x40F3BCB040C015E0), UINT64_C(0x4D026CECD2B9FCB4),
+ UINT64_C(0x5A111D096433C749), UINT64_C(0x57E0CD55F64A2E1D),
+ UINT64_C(0xF129F16C22896821), UINT64_C(0xFCD82130B0F08175),
+ UINT64_C(0xEBCB50D5067ABA88), UINT64_C(0xE63A8089940353DC),
+ UINT64_C(0xCDFFF96ABA9EEA11), UINT64_C(0xC00E293628E70345),
+ UINT64_C(0xD71D58D39E6D38B8), UINT64_C(0xDAEC888F0C14D1EC),
+ UINT64_C(0x7C25B4B6D8D797D0), UINT64_C(0x71D464EA4AAE7E84),
+ UINT64_C(0x66C7150FFC244579), UINT64_C(0x6B36C5536E5DAC2D),
+ UINT64_C(0x2A546C7D54A3C801), UINT64_C(0x27A5BC21C6DA2155),
+ UINT64_C(0x30B6CDC470501AA8), UINT64_C(0x3D471D98E229F3FC),
+ UINT64_C(0x9B8E21A136EAB5C0), UINT64_C(0x967FF1FDA4935C94),
+ UINT64_C(0x816C801812196769), UINT64_C(0x8C9D504480608E3D),
+ UINT64_C(0x03A8D24566E5AE31), UINT64_C(0x0E590219F49C4765),
+ UINT64_C(0x194A73FC42167C98), UINT64_C(0x14BBA3A0D06F95CC),
+ UINT64_C(0xB2729F9904ACD3F0), UINT64_C(0xBF834FC596D53AA4),
+ UINT64_C(0xA8903E20205F0159), UINT64_C(0xA561EE7CB226E80D),
+ UINT64_C(0xE403475288D88C21), UINT64_C(0xE9F2970E1AA16575),
+ UINT64_C(0xFEE1E6EBAC2B5E88), UINT64_C(0xF31036B73E52B7DC),
+ UINT64_C(0x55D90A8EEA91F1E0), UINT64_C(0x5828DAD278E818B4),
+ UINT64_C(0x4F3BAB37CE622349), UINT64_C(0x42CA7B6B5C1BCA1D),
+ UINT64_C(0x5150AF3402696251), UINT64_C(0x5CA17F6890108B05),
+ UINT64_C(0x4BB20E8D269AB0F8), UINT64_C(0x4643DED1B4E359AC),
+ UINT64_C(0xE08AE2E860201F90), UINT64_C(0xED7B32B4F259F6C4),
+ UINT64_C(0xFA68435144D3CD39), UINT64_C(0xF799930DD6AA246D),
+ UINT64_C(0xB6FB3A23EC544041), UINT64_C(0xBB0AEA7F7E2DA915),
+ UINT64_C(0xAC199B9AC8A792E8), UINT64_C(0xA1E84BC65ADE7BBC),
+ UINT64_C(0x072177FF8E1D3D80), UINT64_C(0x0AD0A7A31C64D4D4),
+ UINT64_C(0x1DC3D646AAEEEF29), UINT64_C(0x1032061A3897067D),
+ UINT64_C(0x9F07841BDE122671), UINT64_C(0x92F654474C6BCF25),
+ UINT64_C(0x85E525A2FAE1F4D8), UINT64_C(0x8814F5FE68981D8C),
+ UINT64_C(0x2EDDC9C7BC5B5BB0), UINT64_C(0x232C199B2E22B2E4),
+ UINT64_C(0x343F687E98A88919), UINT64_C(0x39CEB8220AD1604D),
+ UINT64_C(0x78AC110C302F0461), UINT64_C(0x755DC150A256ED35),
+ UINT64_C(0x624EB0B514DCD6C8), UINT64_C(0x6FBF60E986A53F9C),
+ UINT64_C(0xC9765CD0526679A0), UINT64_C(0xC4878C8CC01F90F4),
+ UINT64_C(0xD394FD697695AB09), UINT64_C(0xDE652D35E4EC425D)
+ }, {
+ UINT64_C(0x0000000000000000), UINT64_C(0xCB6D6A914AE10B3F),
+ UINT64_C(0x96DBD42295C2177E), UINT64_C(0x5DB6BEB3DF231C41),
+ UINT64_C(0x2CB7A9452A852FFC), UINT64_C(0xE7DAC3D4606424C3),
+ UINT64_C(0xBA6C7D67BF473882), UINT64_C(0x710117F6F5A633BD),
+ UINT64_C(0xDD705D247FA5876A), UINT64_C(0x161D37B535448C55),
+ UINT64_C(0x4BAB8906EA679014), UINT64_C(0x80C6E397A0869B2B),
+ UINT64_C(0xF1C7F4615520A896), UINT64_C(0x3AAA9EF01FC1A3A9),
+ UINT64_C(0x671C2043C0E2BFE8), UINT64_C(0xAC714AD28A03B4D7),
+ UINT64_C(0xBAE1BA48FE4A0FD5), UINT64_C(0x718CD0D9B4AB04EA),
+ UINT64_C(0x2C3A6E6A6B8818AB), UINT64_C(0xE75704FB21691394),
+ UINT64_C(0x9656130DD4CF2029), UINT64_C(0x5D3B799C9E2E2B16),
+ UINT64_C(0x008DC72F410D3757), UINT64_C(0xCBE0ADBE0BEC3C68),
+ UINT64_C(0x6791E76C81EF88BF), UINT64_C(0xACFC8DFDCB0E8380),
+ UINT64_C(0xF14A334E142D9FC1), UINT64_C(0x3A2759DF5ECC94FE),
+ UINT64_C(0x4B264E29AB6AA743), UINT64_C(0x804B24B8E18BAC7C),
+ UINT64_C(0xDDFD9A0B3EA8B03D), UINT64_C(0x1690F09A7449BB02),
+ UINT64_C(0xF1DD7B3ED73AC638), UINT64_C(0x3AB011AF9DDBCD07),
+ UINT64_C(0x6706AF1C42F8D146), UINT64_C(0xAC6BC58D0819DA79),
+ UINT64_C(0xDD6AD27BFDBFE9C4), UINT64_C(0x1607B8EAB75EE2FB),
+ UINT64_C(0x4BB10659687DFEBA), UINT64_C(0x80DC6CC8229CF585),
+ UINT64_C(0x2CAD261AA89F4152), UINT64_C(0xE7C04C8BE27E4A6D),
+ UINT64_C(0xBA76F2383D5D562C), UINT64_C(0x711B98A977BC5D13),
+ UINT64_C(0x001A8F5F821A6EAE), UINT64_C(0xCB77E5CEC8FB6591),
+ UINT64_C(0x96C15B7D17D879D0), UINT64_C(0x5DAC31EC5D3972EF),
+ UINT64_C(0x4B3CC1762970C9ED), UINT64_C(0x8051ABE76391C2D2),
+ UINT64_C(0xDDE71554BCB2DE93), UINT64_C(0x168A7FC5F653D5AC),
+ UINT64_C(0x678B683303F5E611), UINT64_C(0xACE602A24914ED2E),
+ UINT64_C(0xF150BC119637F16F), UINT64_C(0x3A3DD680DCD6FA50),
+ UINT64_C(0x964C9C5256D54E87), UINT64_C(0x5D21F6C31C3445B8),
+ UINT64_C(0x00974870C31759F9), UINT64_C(0xCBFA22E189F652C6),
+ UINT64_C(0xBAFB35177C50617B), UINT64_C(0x71965F8636B16A44),
+ UINT64_C(0x2C20E135E9927605), UINT64_C(0xE74D8BA4A3737D3A),
+ UINT64_C(0xE2BBF77CAE758C71), UINT64_C(0x29D69DEDE494874E),
+ UINT64_C(0x7460235E3BB79B0F), UINT64_C(0xBF0D49CF71569030),
+ UINT64_C(0xCE0C5E3984F0A38D), UINT64_C(0x056134A8CE11A8B2),
+ UINT64_C(0x58D78A1B1132B4F3), UINT64_C(0x93BAE08A5BD3BFCC),
+ UINT64_C(0x3FCBAA58D1D00B1B), UINT64_C(0xF4A6C0C99B310024),
+ UINT64_C(0xA9107E7A44121C65), UINT64_C(0x627D14EB0EF3175A),
+ UINT64_C(0x137C031DFB5524E7), UINT64_C(0xD811698CB1B42FD8),
+ UINT64_C(0x85A7D73F6E973399), UINT64_C(0x4ECABDAE247638A6),
+ UINT64_C(0x585A4D34503F83A4), UINT64_C(0x933727A51ADE889B),
+ UINT64_C(0xCE819916C5FD94DA), UINT64_C(0x05ECF3878F1C9FE5),
+ UINT64_C(0x74EDE4717ABAAC58), UINT64_C(0xBF808EE0305BA767),
+ UINT64_C(0xE2363053EF78BB26), UINT64_C(0x295B5AC2A599B019),
+ UINT64_C(0x852A10102F9A04CE), UINT64_C(0x4E477A81657B0FF1),
+ UINT64_C(0x13F1C432BA5813B0), UINT64_C(0xD89CAEA3F0B9188F),
+ UINT64_C(0xA99DB955051F2B32), UINT64_C(0x62F0D3C44FFE200D),
+ UINT64_C(0x3F466D7790DD3C4C), UINT64_C(0xF42B07E6DA3C3773),
+ UINT64_C(0x13668C42794F4A49), UINT64_C(0xD80BE6D333AE4176),
+ UINT64_C(0x85BD5860EC8D5D37), UINT64_C(0x4ED032F1A66C5608),
+ UINT64_C(0x3FD1250753CA65B5), UINT64_C(0xF4BC4F96192B6E8A),
+ UINT64_C(0xA90AF125C60872CB), UINT64_C(0x62679BB48CE979F4),
+ UINT64_C(0xCE16D16606EACD23), UINT64_C(0x057BBBF74C0BC61C),
+ UINT64_C(0x58CD05449328DA5D), UINT64_C(0x93A06FD5D9C9D162),
+ UINT64_C(0xE2A178232C6FE2DF), UINT64_C(0x29CC12B2668EE9E0),
+ UINT64_C(0x747AAC01B9ADF5A1), UINT64_C(0xBF17C690F34CFE9E),
+ UINT64_C(0xA987360A8705459C), UINT64_C(0x62EA5C9BCDE44EA3),
+ UINT64_C(0x3F5CE22812C752E2), UINT64_C(0xF43188B9582659DD),
+ UINT64_C(0x85309F4FAD806A60), UINT64_C(0x4E5DF5DEE761615F),
+ UINT64_C(0x13EB4B6D38427D1E), UINT64_C(0xD88621FC72A37621),
+ UINT64_C(0x74F76B2EF8A0C2F6), UINT64_C(0xBF9A01BFB241C9C9),
+ UINT64_C(0xE22CBF0C6D62D588), UINT64_C(0x2941D59D2783DEB7),
+ UINT64_C(0x5840C26BD225ED0A), UINT64_C(0x932DA8FA98C4E635),
+ UINT64_C(0xCE9B164947E7FA74), UINT64_C(0x05F67CD80D06F14B),
+ UINT64_C(0xC477EFF95CEB18E3), UINT64_C(0x0F1A8568160A13DC),
+ UINT64_C(0x52AC3BDBC9290F9D), UINT64_C(0x99C1514A83C804A2),
+ UINT64_C(0xE8C046BC766E371F), UINT64_C(0x23AD2C2D3C8F3C20),
+ UINT64_C(0x7E1B929EE3AC2061), UINT64_C(0xB576F80FA94D2B5E),
+ UINT64_C(0x1907B2DD234E9F89), UINT64_C(0xD26AD84C69AF94B6),
+ UINT64_C(0x8FDC66FFB68C88F7), UINT64_C(0x44B10C6EFC6D83C8),
+ UINT64_C(0x35B01B9809CBB075), UINT64_C(0xFEDD7109432ABB4A),
+ UINT64_C(0xA36BCFBA9C09A70B), UINT64_C(0x6806A52BD6E8AC34),
+ UINT64_C(0x7E9655B1A2A11736), UINT64_C(0xB5FB3F20E8401C09),
+ UINT64_C(0xE84D819337630048), UINT64_C(0x2320EB027D820B77),
+ UINT64_C(0x5221FCF4882438CA), UINT64_C(0x994C9665C2C533F5),
+ UINT64_C(0xC4FA28D61DE62FB4), UINT64_C(0x0F9742475707248B),
+ UINT64_C(0xA3E60895DD04905C), UINT64_C(0x688B620497E59B63),
+ UINT64_C(0x353DDCB748C68722), UINT64_C(0xFE50B62602278C1D),
+ UINT64_C(0x8F51A1D0F781BFA0), UINT64_C(0x443CCB41BD60B49F),
+ UINT64_C(0x198A75F26243A8DE), UINT64_C(0xD2E71F6328A2A3E1),
+ UINT64_C(0x35AA94C78BD1DEDB), UINT64_C(0xFEC7FE56C130D5E4),
+ UINT64_C(0xA37140E51E13C9A5), UINT64_C(0x681C2A7454F2C29A),
+ UINT64_C(0x191D3D82A154F127), UINT64_C(0xD2705713EBB5FA18),
+ UINT64_C(0x8FC6E9A03496E659), UINT64_C(0x44AB83317E77ED66),
+ UINT64_C(0xE8DAC9E3F47459B1), UINT64_C(0x23B7A372BE95528E),
+ UINT64_C(0x7E011DC161B64ECF), UINT64_C(0xB56C77502B5745F0),
+ UINT64_C(0xC46D60A6DEF1764D), UINT64_C(0x0F000A3794107D72),
+ UINT64_C(0x52B6B4844B336133), UINT64_C(0x99DBDE1501D26A0C),
+ UINT64_C(0x8F4B2E8F759BD10E), UINT64_C(0x4426441E3F7ADA31),
+ UINT64_C(0x1990FAADE059C670), UINT64_C(0xD2FD903CAAB8CD4F),
+ UINT64_C(0xA3FC87CA5F1EFEF2), UINT64_C(0x6891ED5B15FFF5CD),
+ UINT64_C(0x352753E8CADCE98C), UINT64_C(0xFE4A3979803DE2B3),
+ UINT64_C(0x523B73AB0A3E5664), UINT64_C(0x9956193A40DF5D5B),
+ UINT64_C(0xC4E0A7899FFC411A), UINT64_C(0x0F8DCD18D51D4A25),
+ UINT64_C(0x7E8CDAEE20BB7998), UINT64_C(0xB5E1B07F6A5A72A7),
+ UINT64_C(0xE8570ECCB5796EE6), UINT64_C(0x233A645DFF9865D9),
+ UINT64_C(0x26CC1885F29E9492), UINT64_C(0xEDA17214B87F9FAD),
+ UINT64_C(0xB017CCA7675C83EC), UINT64_C(0x7B7AA6362DBD88D3),
+ UINT64_C(0x0A7BB1C0D81BBB6E), UINT64_C(0xC116DB5192FAB051),
+ UINT64_C(0x9CA065E24DD9AC10), UINT64_C(0x57CD0F730738A72F),
+ UINT64_C(0xFBBC45A18D3B13F8), UINT64_C(0x30D12F30C7DA18C7),
+ UINT64_C(0x6D67918318F90486), UINT64_C(0xA60AFB1252180FB9),
+ UINT64_C(0xD70BECE4A7BE3C04), UINT64_C(0x1C668675ED5F373B),
+ UINT64_C(0x41D038C6327C2B7A), UINT64_C(0x8ABD5257789D2045),
+ UINT64_C(0x9C2DA2CD0CD49B47), UINT64_C(0x5740C85C46359078),
+ UINT64_C(0x0AF676EF99168C39), UINT64_C(0xC19B1C7ED3F78706),
+ UINT64_C(0xB09A0B882651B4BB), UINT64_C(0x7BF761196CB0BF84),
+ UINT64_C(0x2641DFAAB393A3C5), UINT64_C(0xED2CB53BF972A8FA),
+ UINT64_C(0x415DFFE973711C2D), UINT64_C(0x8A30957839901712),
+ UINT64_C(0xD7862BCBE6B30B53), UINT64_C(0x1CEB415AAC52006C),
+ UINT64_C(0x6DEA56AC59F433D1), UINT64_C(0xA6873C3D131538EE),
+ UINT64_C(0xFB31828ECC3624AF), UINT64_C(0x305CE81F86D72F90),
+ UINT64_C(0xD71163BB25A452AA), UINT64_C(0x1C7C092A6F455995),
+ UINT64_C(0x41CAB799B06645D4), UINT64_C(0x8AA7DD08FA874EEB),
+ UINT64_C(0xFBA6CAFE0F217D56), UINT64_C(0x30CBA06F45C07669),
+ UINT64_C(0x6D7D1EDC9AE36A28), UINT64_C(0xA610744DD0026117),
+ UINT64_C(0x0A613E9F5A01D5C0), UINT64_C(0xC10C540E10E0DEFF),
+ UINT64_C(0x9CBAEABDCFC3C2BE), UINT64_C(0x57D7802C8522C981),
+ UINT64_C(0x26D697DA7084FA3C), UINT64_C(0xEDBBFD4B3A65F103),
+ UINT64_C(0xB00D43F8E546ED42), UINT64_C(0x7B602969AFA7E67D),
+ UINT64_C(0x6DF0D9F3DBEE5D7F), UINT64_C(0xA69DB362910F5640),
+ UINT64_C(0xFB2B0DD14E2C4A01), UINT64_C(0x3046674004CD413E),
+ UINT64_C(0x414770B6F16B7283), UINT64_C(0x8A2A1A27BB8A79BC),
+ UINT64_C(0xD79CA49464A965FD), UINT64_C(0x1CF1CE052E486EC2),
+ UINT64_C(0xB08084D7A44BDA15), UINT64_C(0x7BEDEE46EEAAD12A),
+ UINT64_C(0x265B50F53189CD6B), UINT64_C(0xED363A647B68C654),
+ UINT64_C(0x9C372D928ECEF5E9), UINT64_C(0x575A4703C42FFED6),
+ UINT64_C(0x0AECF9B01B0CE297), UINT64_C(0xC181932151EDE9A8)
+ }, {
+ UINT64_C(0x0000000000000000), UINT64_C(0xDCA12C225E8AEE1D),
+ UINT64_C(0xB8435944BC14DD3B), UINT64_C(0x64E27566E29E3326),
+ UINT64_C(0x7087B2887829BA77), UINT64_C(0xAC269EAA26A3546A),
+ UINT64_C(0xC8C4EBCCC43D674C), UINT64_C(0x1465C7EE9AB78951),
+ UINT64_C(0xE00E6511F15274EF), UINT64_C(0x3CAF4933AFD89AF2),
+ UINT64_C(0x584D3C554D46A9D4), UINT64_C(0x84EC107713CC47C9),
+ UINT64_C(0x9089D799897BCE98), UINT64_C(0x4C28FBBBD7F12085),
+ UINT64_C(0x28CA8EDD356F13A3), UINT64_C(0xF46BA2FF6BE5FDBE),
+ UINT64_C(0x4503C48DC90A304C), UINT64_C(0x99A2E8AF9780DE51),
+ UINT64_C(0xFD409DC9751EED77), UINT64_C(0x21E1B1EB2B94036A),
+ UINT64_C(0x35847605B1238A3B), UINT64_C(0xE9255A27EFA96426),
+ UINT64_C(0x8DC72F410D375700), UINT64_C(0x5166036353BDB91D),
+ UINT64_C(0xA50DA19C385844A3), UINT64_C(0x79AC8DBE66D2AABE),
+ UINT64_C(0x1D4EF8D8844C9998), UINT64_C(0xC1EFD4FADAC67785),
+ UINT64_C(0xD58A13144071FED4), UINT64_C(0x092B3F361EFB10C9),
+ UINT64_C(0x6DC94A50FC6523EF), UINT64_C(0xB1686672A2EFCDF2),
+ UINT64_C(0x8A06881B93156098), UINT64_C(0x56A7A439CD9F8E85),
+ UINT64_C(0x3245D15F2F01BDA3), UINT64_C(0xEEE4FD7D718B53BE),
+ UINT64_C(0xFA813A93EB3CDAEF), UINT64_C(0x262016B1B5B634F2),
+ UINT64_C(0x42C263D7572807D4), UINT64_C(0x9E634FF509A2E9C9),
+ UINT64_C(0x6A08ED0A62471477), UINT64_C(0xB6A9C1283CCDFA6A),
+ UINT64_C(0xD24BB44EDE53C94C), UINT64_C(0x0EEA986C80D92751),
+ UINT64_C(0x1A8F5F821A6EAE00), UINT64_C(0xC62E73A044E4401D),
+ UINT64_C(0xA2CC06C6A67A733B), UINT64_C(0x7E6D2AE4F8F09D26),
+ UINT64_C(0xCF054C965A1F50D4), UINT64_C(0x13A460B40495BEC9),
+ UINT64_C(0x774615D2E60B8DEF), UINT64_C(0xABE739F0B88163F2),
+ UINT64_C(0xBF82FE1E2236EAA3), UINT64_C(0x6323D23C7CBC04BE),
+ UINT64_C(0x07C1A75A9E223798), UINT64_C(0xDB608B78C0A8D985),
+ UINT64_C(0x2F0B2987AB4D243B), UINT64_C(0xF3AA05A5F5C7CA26),
+ UINT64_C(0x974870C31759F900), UINT64_C(0x4BE95CE149D3171D),
+ UINT64_C(0x5F8C9B0FD3649E4C), UINT64_C(0x832DB72D8DEE7051),
+ UINT64_C(0xE7CFC24B6F704377), UINT64_C(0x3B6EEE6931FAAD6A),
+ UINT64_C(0x91131E980D8418A2), UINT64_C(0x4DB232BA530EF6BF),
+ UINT64_C(0x295047DCB190C599), UINT64_C(0xF5F16BFEEF1A2B84),
+ UINT64_C(0xE194AC1075ADA2D5), UINT64_C(0x3D3580322B274CC8),
+ UINT64_C(0x59D7F554C9B97FEE), UINT64_C(0x8576D976973391F3),
+ UINT64_C(0x711D7B89FCD66C4D), UINT64_C(0xADBC57ABA25C8250),
+ UINT64_C(0xC95E22CD40C2B176), UINT64_C(0x15FF0EEF1E485F6B),
+ UINT64_C(0x019AC90184FFD63A), UINT64_C(0xDD3BE523DA753827),
+ UINT64_C(0xB9D9904538EB0B01), UINT64_C(0x6578BC676661E51C),
+ UINT64_C(0xD410DA15C48E28EE), UINT64_C(0x08B1F6379A04C6F3),
+ UINT64_C(0x6C538351789AF5D5), UINT64_C(0xB0F2AF7326101BC8),
+ UINT64_C(0xA497689DBCA79299), UINT64_C(0x783644BFE22D7C84),
+ UINT64_C(0x1CD431D900B34FA2), UINT64_C(0xC0751DFB5E39A1BF),
+ UINT64_C(0x341EBF0435DC5C01), UINT64_C(0xE8BF93266B56B21C),
+ UINT64_C(0x8C5DE64089C8813A), UINT64_C(0x50FCCA62D7426F27),
+ UINT64_C(0x44990D8C4DF5E676), UINT64_C(0x983821AE137F086B),
+ UINT64_C(0xFCDA54C8F1E13B4D), UINT64_C(0x207B78EAAF6BD550),
+ UINT64_C(0x1B1596839E91783A), UINT64_C(0xC7B4BAA1C01B9627),
+ UINT64_C(0xA356CFC72285A501), UINT64_C(0x7FF7E3E57C0F4B1C),
+ UINT64_C(0x6B92240BE6B8C24D), UINT64_C(0xB7330829B8322C50),
+ UINT64_C(0xD3D17D4F5AAC1F76), UINT64_C(0x0F70516D0426F16B),
+ UINT64_C(0xFB1BF3926FC30CD5), UINT64_C(0x27BADFB03149E2C8),
+ UINT64_C(0x4358AAD6D3D7D1EE), UINT64_C(0x9FF986F48D5D3FF3),
+ UINT64_C(0x8B9C411A17EAB6A2), UINT64_C(0x573D6D38496058BF),
+ UINT64_C(0x33DF185EABFE6B99), UINT64_C(0xEF7E347CF5748584),
+ UINT64_C(0x5E16520E579B4876), UINT64_C(0x82B77E2C0911A66B),
+ UINT64_C(0xE6550B4AEB8F954D), UINT64_C(0x3AF42768B5057B50),
+ UINT64_C(0x2E91E0862FB2F201), UINT64_C(0xF230CCA471381C1C),
+ UINT64_C(0x96D2B9C293A62F3A), UINT64_C(0x4A7395E0CD2CC127),
+ UINT64_C(0xBE18371FA6C93C99), UINT64_C(0x62B91B3DF843D284),
+ UINT64_C(0x065B6E5B1ADDE1A2), UINT64_C(0xDAFA427944570FBF),
+ UINT64_C(0xCE9F8597DEE086EE), UINT64_C(0x123EA9B5806A68F3),
+ UINT64_C(0x76DCDCD362F45BD5), UINT64_C(0xAA7DF0F13C7EB5C8),
+ UINT64_C(0xA739329F30A7E9D6), UINT64_C(0x7B981EBD6E2D07CB),
+ UINT64_C(0x1F7A6BDB8CB334ED), UINT64_C(0xC3DB47F9D239DAF0),
+ UINT64_C(0xD7BE8017488E53A1), UINT64_C(0x0B1FAC351604BDBC),
+ UINT64_C(0x6FFDD953F49A8E9A), UINT64_C(0xB35CF571AA106087),
+ UINT64_C(0x4737578EC1F59D39), UINT64_C(0x9B967BAC9F7F7324),
+ UINT64_C(0xFF740ECA7DE14002), UINT64_C(0x23D522E8236BAE1F),
+ UINT64_C(0x37B0E506B9DC274E), UINT64_C(0xEB11C924E756C953),
+ UINT64_C(0x8FF3BC4205C8FA75), UINT64_C(0x535290605B421468),
+ UINT64_C(0xE23AF612F9ADD99A), UINT64_C(0x3E9BDA30A7273787),
+ UINT64_C(0x5A79AF5645B904A1), UINT64_C(0x86D883741B33EABC),
+ UINT64_C(0x92BD449A818463ED), UINT64_C(0x4E1C68B8DF0E8DF0),
+ UINT64_C(0x2AFE1DDE3D90BED6), UINT64_C(0xF65F31FC631A50CB),
+ UINT64_C(0x0234930308FFAD75), UINT64_C(0xDE95BF2156754368),
+ UINT64_C(0xBA77CA47B4EB704E), UINT64_C(0x66D6E665EA619E53),
+ UINT64_C(0x72B3218B70D61702), UINT64_C(0xAE120DA92E5CF91F),
+ UINT64_C(0xCAF078CFCCC2CA39), UINT64_C(0x165154ED92482424),
+ UINT64_C(0x2D3FBA84A3B2894E), UINT64_C(0xF19E96A6FD386753),
+ UINT64_C(0x957CE3C01FA65475), UINT64_C(0x49DDCFE2412CBA68),
+ UINT64_C(0x5DB8080CDB9B3339), UINT64_C(0x8119242E8511DD24),
+ UINT64_C(0xE5FB5148678FEE02), UINT64_C(0x395A7D6A3905001F),
+ UINT64_C(0xCD31DF9552E0FDA1), UINT64_C(0x1190F3B70C6A13BC),
+ UINT64_C(0x757286D1EEF4209A), UINT64_C(0xA9D3AAF3B07ECE87),
+ UINT64_C(0xBDB66D1D2AC947D6), UINT64_C(0x6117413F7443A9CB),
+ UINT64_C(0x05F5345996DD9AED), UINT64_C(0xD954187BC85774F0),
+ UINT64_C(0x683C7E096AB8B902), UINT64_C(0xB49D522B3432571F),
+ UINT64_C(0xD07F274DD6AC6439), UINT64_C(0x0CDE0B6F88268A24),
+ UINT64_C(0x18BBCC8112910375), UINT64_C(0xC41AE0A34C1BED68),
+ UINT64_C(0xA0F895C5AE85DE4E), UINT64_C(0x7C59B9E7F00F3053),
+ UINT64_C(0x88321B189BEACDED), UINT64_C(0x5493373AC56023F0),
+ UINT64_C(0x3071425C27FE10D6), UINT64_C(0xECD06E7E7974FECB),
+ UINT64_C(0xF8B5A990E3C3779A), UINT64_C(0x241485B2BD499987),
+ UINT64_C(0x40F6F0D45FD7AAA1), UINT64_C(0x9C57DCF6015D44BC),
+ UINT64_C(0x362A2C073D23F174), UINT64_C(0xEA8B002563A91F69),
+ UINT64_C(0x8E69754381372C4F), UINT64_C(0x52C85961DFBDC252),
+ UINT64_C(0x46AD9E8F450A4B03), UINT64_C(0x9A0CB2AD1B80A51E),
+ UINT64_C(0xFEEEC7CBF91E9638), UINT64_C(0x224FEBE9A7947825),
+ UINT64_C(0xD6244916CC71859B), UINT64_C(0x0A85653492FB6B86),
+ UINT64_C(0x6E671052706558A0), UINT64_C(0xB2C63C702EEFB6BD),
+ UINT64_C(0xA6A3FB9EB4583FEC), UINT64_C(0x7A02D7BCEAD2D1F1),
+ UINT64_C(0x1EE0A2DA084CE2D7), UINT64_C(0xC2418EF856C60CCA),
+ UINT64_C(0x7329E88AF429C138), UINT64_C(0xAF88C4A8AAA32F25),
+ UINT64_C(0xCB6AB1CE483D1C03), UINT64_C(0x17CB9DEC16B7F21E),
+ UINT64_C(0x03AE5A028C007B4F), UINT64_C(0xDF0F7620D28A9552),
+ UINT64_C(0xBBED03463014A674), UINT64_C(0x674C2F646E9E4869),
+ UINT64_C(0x93278D9B057BB5D7), UINT64_C(0x4F86A1B95BF15BCA),
+ UINT64_C(0x2B64D4DFB96F68EC), UINT64_C(0xF7C5F8FDE7E586F1),
+ UINT64_C(0xE3A03F137D520FA0), UINT64_C(0x3F01133123D8E1BD),
+ UINT64_C(0x5BE36657C146D29B), UINT64_C(0x87424A759FCC3C86),
+ UINT64_C(0xBC2CA41CAE3691EC), UINT64_C(0x608D883EF0BC7FF1),
+ UINT64_C(0x046FFD5812224CD7), UINT64_C(0xD8CED17A4CA8A2CA),
+ UINT64_C(0xCCAB1694D61F2B9B), UINT64_C(0x100A3AB68895C586),
+ UINT64_C(0x74E84FD06A0BF6A0), UINT64_C(0xA84963F2348118BD),
+ UINT64_C(0x5C22C10D5F64E503), UINT64_C(0x8083ED2F01EE0B1E),
+ UINT64_C(0xE4619849E3703838), UINT64_C(0x38C0B46BBDFAD625),
+ UINT64_C(0x2CA57385274D5F74), UINT64_C(0xF0045FA779C7B169),
+ UINT64_C(0x94E62AC19B59824F), UINT64_C(0x484706E3C5D36C52),
+ UINT64_C(0xF92F6091673CA1A0), UINT64_C(0x258E4CB339B64FBD),
+ UINT64_C(0x416C39D5DB287C9B), UINT64_C(0x9DCD15F785A29286),
+ UINT64_C(0x89A8D2191F151BD7), UINT64_C(0x5509FE3B419FF5CA),
+ UINT64_C(0x31EB8B5DA301C6EC), UINT64_C(0xED4AA77FFD8B28F1),
+ UINT64_C(0x19210580966ED54F), UINT64_C(0xC58029A2C8E43B52),
+ UINT64_C(0xA1625CC42A7A0874), UINT64_C(0x7DC370E674F0E669),
+ UINT64_C(0x69A6B708EE476F38), UINT64_C(0xB5079B2AB0CD8125),
+ UINT64_C(0xD1E5EE4C5253B203), UINT64_C(0x0D44C26E0CD95C1E)
+ }
+};
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_le.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_le.h
new file mode 100644
index 00000000..38678836
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_table_le.h
@@ -0,0 +1,523 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* This file has been automatically generated by crc64_tablegen.c. */
+
+const uint64_t lzma_crc64_table[4][256] = {
+ {
+ UINT64_C(0x0000000000000000), UINT64_C(0xB32E4CBE03A75F6F),
+ UINT64_C(0xF4843657A840A05B), UINT64_C(0x47AA7AE9ABE7FF34),
+ UINT64_C(0x7BD0C384FF8F5E33), UINT64_C(0xC8FE8F3AFC28015C),
+ UINT64_C(0x8F54F5D357CFFE68), UINT64_C(0x3C7AB96D5468A107),
+ UINT64_C(0xF7A18709FF1EBC66), UINT64_C(0x448FCBB7FCB9E309),
+ UINT64_C(0x0325B15E575E1C3D), UINT64_C(0xB00BFDE054F94352),
+ UINT64_C(0x8C71448D0091E255), UINT64_C(0x3F5F08330336BD3A),
+ UINT64_C(0x78F572DAA8D1420E), UINT64_C(0xCBDB3E64AB761D61),
+ UINT64_C(0x7D9BA13851336649), UINT64_C(0xCEB5ED8652943926),
+ UINT64_C(0x891F976FF973C612), UINT64_C(0x3A31DBD1FAD4997D),
+ UINT64_C(0x064B62BCAEBC387A), UINT64_C(0xB5652E02AD1B6715),
+ UINT64_C(0xF2CF54EB06FC9821), UINT64_C(0x41E11855055BC74E),
+ UINT64_C(0x8A3A2631AE2DDA2F), UINT64_C(0x39146A8FAD8A8540),
+ UINT64_C(0x7EBE1066066D7A74), UINT64_C(0xCD905CD805CA251B),
+ UINT64_C(0xF1EAE5B551A2841C), UINT64_C(0x42C4A90B5205DB73),
+ UINT64_C(0x056ED3E2F9E22447), UINT64_C(0xB6409F5CFA457B28),
+ UINT64_C(0xFB374270A266CC92), UINT64_C(0x48190ECEA1C193FD),
+ UINT64_C(0x0FB374270A266CC9), UINT64_C(0xBC9D3899098133A6),
+ UINT64_C(0x80E781F45DE992A1), UINT64_C(0x33C9CD4A5E4ECDCE),
+ UINT64_C(0x7463B7A3F5A932FA), UINT64_C(0xC74DFB1DF60E6D95),
+ UINT64_C(0x0C96C5795D7870F4), UINT64_C(0xBFB889C75EDF2F9B),
+ UINT64_C(0xF812F32EF538D0AF), UINT64_C(0x4B3CBF90F69F8FC0),
+ UINT64_C(0x774606FDA2F72EC7), UINT64_C(0xC4684A43A15071A8),
+ UINT64_C(0x83C230AA0AB78E9C), UINT64_C(0x30EC7C140910D1F3),
+ UINT64_C(0x86ACE348F355AADB), UINT64_C(0x3582AFF6F0F2F5B4),
+ UINT64_C(0x7228D51F5B150A80), UINT64_C(0xC10699A158B255EF),
+ UINT64_C(0xFD7C20CC0CDAF4E8), UINT64_C(0x4E526C720F7DAB87),
+ UINT64_C(0x09F8169BA49A54B3), UINT64_C(0xBAD65A25A73D0BDC),
+ UINT64_C(0x710D64410C4B16BD), UINT64_C(0xC22328FF0FEC49D2),
+ UINT64_C(0x85895216A40BB6E6), UINT64_C(0x36A71EA8A7ACE989),
+ UINT64_C(0x0ADDA7C5F3C4488E), UINT64_C(0xB9F3EB7BF06317E1),
+ UINT64_C(0xFE5991925B84E8D5), UINT64_C(0x4D77DD2C5823B7BA),
+ UINT64_C(0x64B62BCAEBC387A1), UINT64_C(0xD7986774E864D8CE),
+ UINT64_C(0x90321D9D438327FA), UINT64_C(0x231C512340247895),
+ UINT64_C(0x1F66E84E144CD992), UINT64_C(0xAC48A4F017EB86FD),
+ UINT64_C(0xEBE2DE19BC0C79C9), UINT64_C(0x58CC92A7BFAB26A6),
+ UINT64_C(0x9317ACC314DD3BC7), UINT64_C(0x2039E07D177A64A8),
+ UINT64_C(0x67939A94BC9D9B9C), UINT64_C(0xD4BDD62ABF3AC4F3),
+ UINT64_C(0xE8C76F47EB5265F4), UINT64_C(0x5BE923F9E8F53A9B),
+ UINT64_C(0x1C4359104312C5AF), UINT64_C(0xAF6D15AE40B59AC0),
+ UINT64_C(0x192D8AF2BAF0E1E8), UINT64_C(0xAA03C64CB957BE87),
+ UINT64_C(0xEDA9BCA512B041B3), UINT64_C(0x5E87F01B11171EDC),
+ UINT64_C(0x62FD4976457FBFDB), UINT64_C(0xD1D305C846D8E0B4),
+ UINT64_C(0x96797F21ED3F1F80), UINT64_C(0x2557339FEE9840EF),
+ UINT64_C(0xEE8C0DFB45EE5D8E), UINT64_C(0x5DA24145464902E1),
+ UINT64_C(0x1A083BACEDAEFDD5), UINT64_C(0xA9267712EE09A2BA),
+ UINT64_C(0x955CCE7FBA6103BD), UINT64_C(0x267282C1B9C65CD2),
+ UINT64_C(0x61D8F8281221A3E6), UINT64_C(0xD2F6B4961186FC89),
+ UINT64_C(0x9F8169BA49A54B33), UINT64_C(0x2CAF25044A02145C),
+ UINT64_C(0x6B055FEDE1E5EB68), UINT64_C(0xD82B1353E242B407),
+ UINT64_C(0xE451AA3EB62A1500), UINT64_C(0x577FE680B58D4A6F),
+ UINT64_C(0x10D59C691E6AB55B), UINT64_C(0xA3FBD0D71DCDEA34),
+ UINT64_C(0x6820EEB3B6BBF755), UINT64_C(0xDB0EA20DB51CA83A),
+ UINT64_C(0x9CA4D8E41EFB570E), UINT64_C(0x2F8A945A1D5C0861),
+ UINT64_C(0x13F02D374934A966), UINT64_C(0xA0DE61894A93F609),
+ UINT64_C(0xE7741B60E174093D), UINT64_C(0x545A57DEE2D35652),
+ UINT64_C(0xE21AC88218962D7A), UINT64_C(0x5134843C1B317215),
+ UINT64_C(0x169EFED5B0D68D21), UINT64_C(0xA5B0B26BB371D24E),
+ UINT64_C(0x99CA0B06E7197349), UINT64_C(0x2AE447B8E4BE2C26),
+ UINT64_C(0x6D4E3D514F59D312), UINT64_C(0xDE6071EF4CFE8C7D),
+ UINT64_C(0x15BB4F8BE788911C), UINT64_C(0xA6950335E42FCE73),
+ UINT64_C(0xE13F79DC4FC83147), UINT64_C(0x521135624C6F6E28),
+ UINT64_C(0x6E6B8C0F1807CF2F), UINT64_C(0xDD45C0B11BA09040),
+ UINT64_C(0x9AEFBA58B0476F74), UINT64_C(0x29C1F6E6B3E0301B),
+ UINT64_C(0xC96C5795D7870F42), UINT64_C(0x7A421B2BD420502D),
+ UINT64_C(0x3DE861C27FC7AF19), UINT64_C(0x8EC62D7C7C60F076),
+ UINT64_C(0xB2BC941128085171), UINT64_C(0x0192D8AF2BAF0E1E),
+ UINT64_C(0x4638A2468048F12A), UINT64_C(0xF516EEF883EFAE45),
+ UINT64_C(0x3ECDD09C2899B324), UINT64_C(0x8DE39C222B3EEC4B),
+ UINT64_C(0xCA49E6CB80D9137F), UINT64_C(0x7967AA75837E4C10),
+ UINT64_C(0x451D1318D716ED17), UINT64_C(0xF6335FA6D4B1B278),
+ UINT64_C(0xB199254F7F564D4C), UINT64_C(0x02B769F17CF11223),
+ UINT64_C(0xB4F7F6AD86B4690B), UINT64_C(0x07D9BA1385133664),
+ UINT64_C(0x4073C0FA2EF4C950), UINT64_C(0xF35D8C442D53963F),
+ UINT64_C(0xCF273529793B3738), UINT64_C(0x7C0979977A9C6857),
+ UINT64_C(0x3BA3037ED17B9763), UINT64_C(0x888D4FC0D2DCC80C),
+ UINT64_C(0x435671A479AAD56D), UINT64_C(0xF0783D1A7A0D8A02),
+ UINT64_C(0xB7D247F3D1EA7536), UINT64_C(0x04FC0B4DD24D2A59),
+ UINT64_C(0x3886B22086258B5E), UINT64_C(0x8BA8FE9E8582D431),
+ UINT64_C(0xCC0284772E652B05), UINT64_C(0x7F2CC8C92DC2746A),
+ UINT64_C(0x325B15E575E1C3D0), UINT64_C(0x8175595B76469CBF),
+ UINT64_C(0xC6DF23B2DDA1638B), UINT64_C(0x75F16F0CDE063CE4),
+ UINT64_C(0x498BD6618A6E9DE3), UINT64_C(0xFAA59ADF89C9C28C),
+ UINT64_C(0xBD0FE036222E3DB8), UINT64_C(0x0E21AC88218962D7),
+ UINT64_C(0xC5FA92EC8AFF7FB6), UINT64_C(0x76D4DE52895820D9),
+ UINT64_C(0x317EA4BB22BFDFED), UINT64_C(0x8250E80521188082),
+ UINT64_C(0xBE2A516875702185), UINT64_C(0x0D041DD676D77EEA),
+ UINT64_C(0x4AAE673FDD3081DE), UINT64_C(0xF9802B81DE97DEB1),
+ UINT64_C(0x4FC0B4DD24D2A599), UINT64_C(0xFCEEF8632775FAF6),
+ UINT64_C(0xBB44828A8C9205C2), UINT64_C(0x086ACE348F355AAD),
+ UINT64_C(0x34107759DB5DFBAA), UINT64_C(0x873E3BE7D8FAA4C5),
+ UINT64_C(0xC094410E731D5BF1), UINT64_C(0x73BA0DB070BA049E),
+ UINT64_C(0xB86133D4DBCC19FF), UINT64_C(0x0B4F7F6AD86B4690),
+ UINT64_C(0x4CE50583738CB9A4), UINT64_C(0xFFCB493D702BE6CB),
+ UINT64_C(0xC3B1F050244347CC), UINT64_C(0x709FBCEE27E418A3),
+ UINT64_C(0x3735C6078C03E797), UINT64_C(0x841B8AB98FA4B8F8),
+ UINT64_C(0xADDA7C5F3C4488E3), UINT64_C(0x1EF430E13FE3D78C),
+ UINT64_C(0x595E4A08940428B8), UINT64_C(0xEA7006B697A377D7),
+ UINT64_C(0xD60ABFDBC3CBD6D0), UINT64_C(0x6524F365C06C89BF),
+ UINT64_C(0x228E898C6B8B768B), UINT64_C(0x91A0C532682C29E4),
+ UINT64_C(0x5A7BFB56C35A3485), UINT64_C(0xE955B7E8C0FD6BEA),
+ UINT64_C(0xAEFFCD016B1A94DE), UINT64_C(0x1DD181BF68BDCBB1),
+ UINT64_C(0x21AB38D23CD56AB6), UINT64_C(0x9285746C3F7235D9),
+ UINT64_C(0xD52F0E859495CAED), UINT64_C(0x6601423B97329582),
+ UINT64_C(0xD041DD676D77EEAA), UINT64_C(0x636F91D96ED0B1C5),
+ UINT64_C(0x24C5EB30C5374EF1), UINT64_C(0x97EBA78EC690119E),
+ UINT64_C(0xAB911EE392F8B099), UINT64_C(0x18BF525D915FEFF6),
+ UINT64_C(0x5F1528B43AB810C2), UINT64_C(0xEC3B640A391F4FAD),
+ UINT64_C(0x27E05A6E926952CC), UINT64_C(0x94CE16D091CE0DA3),
+ UINT64_C(0xD3646C393A29F297), UINT64_C(0x604A2087398EADF8),
+ UINT64_C(0x5C3099EA6DE60CFF), UINT64_C(0xEF1ED5546E415390),
+ UINT64_C(0xA8B4AFBDC5A6ACA4), UINT64_C(0x1B9AE303C601F3CB),
+ UINT64_C(0x56ED3E2F9E224471), UINT64_C(0xE5C372919D851B1E),
+ UINT64_C(0xA26908783662E42A), UINT64_C(0x114744C635C5BB45),
+ UINT64_C(0x2D3DFDAB61AD1A42), UINT64_C(0x9E13B115620A452D),
+ UINT64_C(0xD9B9CBFCC9EDBA19), UINT64_C(0x6A978742CA4AE576),
+ UINT64_C(0xA14CB926613CF817), UINT64_C(0x1262F598629BA778),
+ UINT64_C(0x55C88F71C97C584C), UINT64_C(0xE6E6C3CFCADB0723),
+ UINT64_C(0xDA9C7AA29EB3A624), UINT64_C(0x69B2361C9D14F94B),
+ UINT64_C(0x2E184CF536F3067F), UINT64_C(0x9D36004B35545910),
+ UINT64_C(0x2B769F17CF112238), UINT64_C(0x9858D3A9CCB67D57),
+ UINT64_C(0xDFF2A94067518263), UINT64_C(0x6CDCE5FE64F6DD0C),
+ UINT64_C(0x50A65C93309E7C0B), UINT64_C(0xE388102D33392364),
+ UINT64_C(0xA4226AC498DEDC50), UINT64_C(0x170C267A9B79833F),
+ UINT64_C(0xDCD7181E300F9E5E), UINT64_C(0x6FF954A033A8C131),
+ UINT64_C(0x28532E49984F3E05), UINT64_C(0x9B7D62F79BE8616A),
+ UINT64_C(0xA707DB9ACF80C06D), UINT64_C(0x14299724CC279F02),
+ UINT64_C(0x5383EDCD67C06036), UINT64_C(0xE0ADA17364673F59)
+ }, {
+ UINT64_C(0x0000000000000000), UINT64_C(0x54E979925CD0F10D),
+ UINT64_C(0xA9D2F324B9A1E21A), UINT64_C(0xFD3B8AB6E5711317),
+ UINT64_C(0xC17D4962DC4DDAB1), UINT64_C(0x959430F0809D2BBC),
+ UINT64_C(0x68AFBA4665EC38AB), UINT64_C(0x3C46C3D4393CC9A6),
+ UINT64_C(0x10223DEE1795ABE7), UINT64_C(0x44CB447C4B455AEA),
+ UINT64_C(0xB9F0CECAAE3449FD), UINT64_C(0xED19B758F2E4B8F0),
+ UINT64_C(0xD15F748CCBD87156), UINT64_C(0x85B60D1E9708805B),
+ UINT64_C(0x788D87A87279934C), UINT64_C(0x2C64FE3A2EA96241),
+ UINT64_C(0x20447BDC2F2B57CE), UINT64_C(0x74AD024E73FBA6C3),
+ UINT64_C(0x899688F8968AB5D4), UINT64_C(0xDD7FF16ACA5A44D9),
+ UINT64_C(0xE13932BEF3668D7F), UINT64_C(0xB5D04B2CAFB67C72),
+ UINT64_C(0x48EBC19A4AC76F65), UINT64_C(0x1C02B80816179E68),
+ UINT64_C(0x3066463238BEFC29), UINT64_C(0x648F3FA0646E0D24),
+ UINT64_C(0x99B4B516811F1E33), UINT64_C(0xCD5DCC84DDCFEF3E),
+ UINT64_C(0xF11B0F50E4F32698), UINT64_C(0xA5F276C2B823D795),
+ UINT64_C(0x58C9FC745D52C482), UINT64_C(0x0C2085E60182358F),
+ UINT64_C(0x4088F7B85E56AF9C), UINT64_C(0x14618E2A02865E91),
+ UINT64_C(0xE95A049CE7F74D86), UINT64_C(0xBDB37D0EBB27BC8B),
+ UINT64_C(0x81F5BEDA821B752D), UINT64_C(0xD51CC748DECB8420),
+ UINT64_C(0x28274DFE3BBA9737), UINT64_C(0x7CCE346C676A663A),
+ UINT64_C(0x50AACA5649C3047B), UINT64_C(0x0443B3C41513F576),
+ UINT64_C(0xF9783972F062E661), UINT64_C(0xAD9140E0ACB2176C),
+ UINT64_C(0x91D78334958EDECA), UINT64_C(0xC53EFAA6C95E2FC7),
+ UINT64_C(0x380570102C2F3CD0), UINT64_C(0x6CEC098270FFCDDD),
+ UINT64_C(0x60CC8C64717DF852), UINT64_C(0x3425F5F62DAD095F),
+ UINT64_C(0xC91E7F40C8DC1A48), UINT64_C(0x9DF706D2940CEB45),
+ UINT64_C(0xA1B1C506AD3022E3), UINT64_C(0xF558BC94F1E0D3EE),
+ UINT64_C(0x086336221491C0F9), UINT64_C(0x5C8A4FB0484131F4),
+ UINT64_C(0x70EEB18A66E853B5), UINT64_C(0x2407C8183A38A2B8),
+ UINT64_C(0xD93C42AEDF49B1AF), UINT64_C(0x8DD53B3C839940A2),
+ UINT64_C(0xB193F8E8BAA58904), UINT64_C(0xE57A817AE6757809),
+ UINT64_C(0x18410BCC03046B1E), UINT64_C(0x4CA8725E5FD49A13),
+ UINT64_C(0x8111EF70BCAD5F38), UINT64_C(0xD5F896E2E07DAE35),
+ UINT64_C(0x28C31C54050CBD22), UINT64_C(0x7C2A65C659DC4C2F),
+ UINT64_C(0x406CA61260E08589), UINT64_C(0x1485DF803C307484),
+ UINT64_C(0xE9BE5536D9416793), UINT64_C(0xBD572CA48591969E),
+ UINT64_C(0x9133D29EAB38F4DF), UINT64_C(0xC5DAAB0CF7E805D2),
+ UINT64_C(0x38E121BA129916C5), UINT64_C(0x6C0858284E49E7C8),
+ UINT64_C(0x504E9BFC77752E6E), UINT64_C(0x04A7E26E2BA5DF63),
+ UINT64_C(0xF99C68D8CED4CC74), UINT64_C(0xAD75114A92043D79),
+ UINT64_C(0xA15594AC938608F6), UINT64_C(0xF5BCED3ECF56F9FB),
+ UINT64_C(0x088767882A27EAEC), UINT64_C(0x5C6E1E1A76F71BE1),
+ UINT64_C(0x6028DDCE4FCBD247), UINT64_C(0x34C1A45C131B234A),
+ UINT64_C(0xC9FA2EEAF66A305D), UINT64_C(0x9D135778AABAC150),
+ UINT64_C(0xB177A9428413A311), UINT64_C(0xE59ED0D0D8C3521C),
+ UINT64_C(0x18A55A663DB2410B), UINT64_C(0x4C4C23F46162B006),
+ UINT64_C(0x700AE020585E79A0), UINT64_C(0x24E399B2048E88AD),
+ UINT64_C(0xD9D81304E1FF9BBA), UINT64_C(0x8D316A96BD2F6AB7),
+ UINT64_C(0xC19918C8E2FBF0A4), UINT64_C(0x9570615ABE2B01A9),
+ UINT64_C(0x684BEBEC5B5A12BE), UINT64_C(0x3CA2927E078AE3B3),
+ UINT64_C(0x00E451AA3EB62A15), UINT64_C(0x540D28386266DB18),
+ UINT64_C(0xA936A28E8717C80F), UINT64_C(0xFDDFDB1CDBC73902),
+ UINT64_C(0xD1BB2526F56E5B43), UINT64_C(0x85525CB4A9BEAA4E),
+ UINT64_C(0x7869D6024CCFB959), UINT64_C(0x2C80AF90101F4854),
+ UINT64_C(0x10C66C44292381F2), UINT64_C(0x442F15D675F370FF),
+ UINT64_C(0xB9149F60908263E8), UINT64_C(0xEDFDE6F2CC5292E5),
+ UINT64_C(0xE1DD6314CDD0A76A), UINT64_C(0xB5341A8691005667),
+ UINT64_C(0x480F903074714570), UINT64_C(0x1CE6E9A228A1B47D),
+ UINT64_C(0x20A02A76119D7DDB), UINT64_C(0x744953E44D4D8CD6),
+ UINT64_C(0x8972D952A83C9FC1), UINT64_C(0xDD9BA0C0F4EC6ECC),
+ UINT64_C(0xF1FF5EFADA450C8D), UINT64_C(0xA51627688695FD80),
+ UINT64_C(0x582DADDE63E4EE97), UINT64_C(0x0CC4D44C3F341F9A),
+ UINT64_C(0x308217980608D63C), UINT64_C(0x646B6E0A5AD82731),
+ UINT64_C(0x9950E4BCBFA93426), UINT64_C(0xCDB99D2EE379C52B),
+ UINT64_C(0x90FB71CAD654A0F5), UINT64_C(0xC41208588A8451F8),
+ UINT64_C(0x392982EE6FF542EF), UINT64_C(0x6DC0FB7C3325B3E2),
+ UINT64_C(0x518638A80A197A44), UINT64_C(0x056F413A56C98B49),
+ UINT64_C(0xF854CB8CB3B8985E), UINT64_C(0xACBDB21EEF686953),
+ UINT64_C(0x80D94C24C1C10B12), UINT64_C(0xD43035B69D11FA1F),
+ UINT64_C(0x290BBF007860E908), UINT64_C(0x7DE2C69224B01805),
+ UINT64_C(0x41A405461D8CD1A3), UINT64_C(0x154D7CD4415C20AE),
+ UINT64_C(0xE876F662A42D33B9), UINT64_C(0xBC9F8FF0F8FDC2B4),
+ UINT64_C(0xB0BF0A16F97FF73B), UINT64_C(0xE4567384A5AF0636),
+ UINT64_C(0x196DF93240DE1521), UINT64_C(0x4D8480A01C0EE42C),
+ UINT64_C(0x71C2437425322D8A), UINT64_C(0x252B3AE679E2DC87),
+ UINT64_C(0xD810B0509C93CF90), UINT64_C(0x8CF9C9C2C0433E9D),
+ UINT64_C(0xA09D37F8EEEA5CDC), UINT64_C(0xF4744E6AB23AADD1),
+ UINT64_C(0x094FC4DC574BBEC6), UINT64_C(0x5DA6BD4E0B9B4FCB),
+ UINT64_C(0x61E07E9A32A7866D), UINT64_C(0x350907086E777760),
+ UINT64_C(0xC8328DBE8B066477), UINT64_C(0x9CDBF42CD7D6957A),
+ UINT64_C(0xD073867288020F69), UINT64_C(0x849AFFE0D4D2FE64),
+ UINT64_C(0x79A1755631A3ED73), UINT64_C(0x2D480CC46D731C7E),
+ UINT64_C(0x110ECF10544FD5D8), UINT64_C(0x45E7B682089F24D5),
+ UINT64_C(0xB8DC3C34EDEE37C2), UINT64_C(0xEC3545A6B13EC6CF),
+ UINT64_C(0xC051BB9C9F97A48E), UINT64_C(0x94B8C20EC3475583),
+ UINT64_C(0x698348B826364694), UINT64_C(0x3D6A312A7AE6B799),
+ UINT64_C(0x012CF2FE43DA7E3F), UINT64_C(0x55C58B6C1F0A8F32),
+ UINT64_C(0xA8FE01DAFA7B9C25), UINT64_C(0xFC177848A6AB6D28),
+ UINT64_C(0xF037FDAEA72958A7), UINT64_C(0xA4DE843CFBF9A9AA),
+ UINT64_C(0x59E50E8A1E88BABD), UINT64_C(0x0D0C771842584BB0),
+ UINT64_C(0x314AB4CC7B648216), UINT64_C(0x65A3CD5E27B4731B),
+ UINT64_C(0x989847E8C2C5600C), UINT64_C(0xCC713E7A9E159101),
+ UINT64_C(0xE015C040B0BCF340), UINT64_C(0xB4FCB9D2EC6C024D),
+ UINT64_C(0x49C73364091D115A), UINT64_C(0x1D2E4AF655CDE057),
+ UINT64_C(0x216889226CF129F1), UINT64_C(0x7581F0B03021D8FC),
+ UINT64_C(0x88BA7A06D550CBEB), UINT64_C(0xDC53039489803AE6),
+ UINT64_C(0x11EA9EBA6AF9FFCD), UINT64_C(0x4503E72836290EC0),
+ UINT64_C(0xB8386D9ED3581DD7), UINT64_C(0xECD1140C8F88ECDA),
+ UINT64_C(0xD097D7D8B6B4257C), UINT64_C(0x847EAE4AEA64D471),
+ UINT64_C(0x794524FC0F15C766), UINT64_C(0x2DAC5D6E53C5366B),
+ UINT64_C(0x01C8A3547D6C542A), UINT64_C(0x5521DAC621BCA527),
+ UINT64_C(0xA81A5070C4CDB630), UINT64_C(0xFCF329E2981D473D),
+ UINT64_C(0xC0B5EA36A1218E9B), UINT64_C(0x945C93A4FDF17F96),
+ UINT64_C(0x6967191218806C81), UINT64_C(0x3D8E608044509D8C),
+ UINT64_C(0x31AEE56645D2A803), UINT64_C(0x65479CF41902590E),
+ UINT64_C(0x987C1642FC734A19), UINT64_C(0xCC956FD0A0A3BB14),
+ UINT64_C(0xF0D3AC04999F72B2), UINT64_C(0xA43AD596C54F83BF),
+ UINT64_C(0x59015F20203E90A8), UINT64_C(0x0DE826B27CEE61A5),
+ UINT64_C(0x218CD888524703E4), UINT64_C(0x7565A11A0E97F2E9),
+ UINT64_C(0x885E2BACEBE6E1FE), UINT64_C(0xDCB7523EB73610F3),
+ UINT64_C(0xE0F191EA8E0AD955), UINT64_C(0xB418E878D2DA2858),
+ UINT64_C(0x492362CE37AB3B4F), UINT64_C(0x1DCA1B5C6B7BCA42),
+ UINT64_C(0x5162690234AF5051), UINT64_C(0x058B1090687FA15C),
+ UINT64_C(0xF8B09A268D0EB24B), UINT64_C(0xAC59E3B4D1DE4346),
+ UINT64_C(0x901F2060E8E28AE0), UINT64_C(0xC4F659F2B4327BED),
+ UINT64_C(0x39CDD344514368FA), UINT64_C(0x6D24AAD60D9399F7),
+ UINT64_C(0x414054EC233AFBB6), UINT64_C(0x15A92D7E7FEA0ABB),
+ UINT64_C(0xE892A7C89A9B19AC), UINT64_C(0xBC7BDE5AC64BE8A1),
+ UINT64_C(0x803D1D8EFF772107), UINT64_C(0xD4D4641CA3A7D00A),
+ UINT64_C(0x29EFEEAA46D6C31D), UINT64_C(0x7D0697381A063210),
+ UINT64_C(0x712612DE1B84079F), UINT64_C(0x25CF6B4C4754F692),
+ UINT64_C(0xD8F4E1FAA225E585), UINT64_C(0x8C1D9868FEF51488),
+ UINT64_C(0xB05B5BBCC7C9DD2E), UINT64_C(0xE4B2222E9B192C23),
+ UINT64_C(0x1989A8987E683F34), UINT64_C(0x4D60D10A22B8CE39),
+ UINT64_C(0x61042F300C11AC78), UINT64_C(0x35ED56A250C15D75),
+ UINT64_C(0xC8D6DC14B5B04E62), UINT64_C(0x9C3FA586E960BF6F),
+ UINT64_C(0xA0796652D05C76C9), UINT64_C(0xF4901FC08C8C87C4),
+ UINT64_C(0x09AB957669FD94D3), UINT64_C(0x5D42ECE4352D65DE)
+ }, {
+ UINT64_C(0x0000000000000000), UINT64_C(0x3F0BE14A916A6DCB),
+ UINT64_C(0x7E17C29522D4DB96), UINT64_C(0x411C23DFB3BEB65D),
+ UINT64_C(0xFC2F852A45A9B72C), UINT64_C(0xC3246460D4C3DAE7),
+ UINT64_C(0x823847BF677D6CBA), UINT64_C(0xBD33A6F5F6170171),
+ UINT64_C(0x6A87A57F245D70DD), UINT64_C(0x558C4435B5371D16),
+ UINT64_C(0x149067EA0689AB4B), UINT64_C(0x2B9B86A097E3C680),
+ UINT64_C(0x96A8205561F4C7F1), UINT64_C(0xA9A3C11FF09EAA3A),
+ UINT64_C(0xE8BFE2C043201C67), UINT64_C(0xD7B4038AD24A71AC),
+ UINT64_C(0xD50F4AFE48BAE1BA), UINT64_C(0xEA04ABB4D9D08C71),
+ UINT64_C(0xAB18886B6A6E3A2C), UINT64_C(0x94136921FB0457E7),
+ UINT64_C(0x2920CFD40D135696), UINT64_C(0x162B2E9E9C793B5D),
+ UINT64_C(0x57370D412FC78D00), UINT64_C(0x683CEC0BBEADE0CB),
+ UINT64_C(0xBF88EF816CE79167), UINT64_C(0x80830ECBFD8DFCAC),
+ UINT64_C(0xC19F2D144E334AF1), UINT64_C(0xFE94CC5EDF59273A),
+ UINT64_C(0x43A76AAB294E264B), UINT64_C(0x7CAC8BE1B8244B80),
+ UINT64_C(0x3DB0A83E0B9AFDDD), UINT64_C(0x02BB49749AF09016),
+ UINT64_C(0x38C63AD73E7BDDF1), UINT64_C(0x07CDDB9DAF11B03A),
+ UINT64_C(0x46D1F8421CAF0667), UINT64_C(0x79DA19088DC56BAC),
+ UINT64_C(0xC4E9BFFD7BD26ADD), UINT64_C(0xFBE25EB7EAB80716),
+ UINT64_C(0xBAFE7D685906B14B), UINT64_C(0x85F59C22C86CDC80),
+ UINT64_C(0x52419FA81A26AD2C), UINT64_C(0x6D4A7EE28B4CC0E7),
+ UINT64_C(0x2C565D3D38F276BA), UINT64_C(0x135DBC77A9981B71),
+ UINT64_C(0xAE6E1A825F8F1A00), UINT64_C(0x9165FBC8CEE577CB),
+ UINT64_C(0xD079D8177D5BC196), UINT64_C(0xEF72395DEC31AC5D),
+ UINT64_C(0xEDC9702976C13C4B), UINT64_C(0xD2C29163E7AB5180),
+ UINT64_C(0x93DEB2BC5415E7DD), UINT64_C(0xACD553F6C57F8A16),
+ UINT64_C(0x11E6F50333688B67), UINT64_C(0x2EED1449A202E6AC),
+ UINT64_C(0x6FF1379611BC50F1), UINT64_C(0x50FAD6DC80D63D3A),
+ UINT64_C(0x874ED556529C4C96), UINT64_C(0xB845341CC3F6215D),
+ UINT64_C(0xF95917C370489700), UINT64_C(0xC652F689E122FACB),
+ UINT64_C(0x7B61507C1735FBBA), UINT64_C(0x446AB136865F9671),
+ UINT64_C(0x057692E935E1202C), UINT64_C(0x3A7D73A3A48B4DE7),
+ UINT64_C(0x718C75AE7CF7BBE2), UINT64_C(0x4E8794E4ED9DD629),
+ UINT64_C(0x0F9BB73B5E236074), UINT64_C(0x30905671CF490DBF),
+ UINT64_C(0x8DA3F084395E0CCE), UINT64_C(0xB2A811CEA8346105),
+ UINT64_C(0xF3B432111B8AD758), UINT64_C(0xCCBFD35B8AE0BA93),
+ UINT64_C(0x1B0BD0D158AACB3F), UINT64_C(0x2400319BC9C0A6F4),
+ UINT64_C(0x651C12447A7E10A9), UINT64_C(0x5A17F30EEB147D62),
+ UINT64_C(0xE72455FB1D037C13), UINT64_C(0xD82FB4B18C6911D8),
+ UINT64_C(0x9933976E3FD7A785), UINT64_C(0xA6387624AEBDCA4E),
+ UINT64_C(0xA4833F50344D5A58), UINT64_C(0x9B88DE1AA5273793),
+ UINT64_C(0xDA94FDC5169981CE), UINT64_C(0xE59F1C8F87F3EC05),
+ UINT64_C(0x58ACBA7A71E4ED74), UINT64_C(0x67A75B30E08E80BF),
+ UINT64_C(0x26BB78EF533036E2), UINT64_C(0x19B099A5C25A5B29),
+ UINT64_C(0xCE049A2F10102A85), UINT64_C(0xF10F7B65817A474E),
+ UINT64_C(0xB01358BA32C4F113), UINT64_C(0x8F18B9F0A3AE9CD8),
+ UINT64_C(0x322B1F0555B99DA9), UINT64_C(0x0D20FE4FC4D3F062),
+ UINT64_C(0x4C3CDD90776D463F), UINT64_C(0x73373CDAE6072BF4),
+ UINT64_C(0x494A4F79428C6613), UINT64_C(0x7641AE33D3E60BD8),
+ UINT64_C(0x375D8DEC6058BD85), UINT64_C(0x08566CA6F132D04E),
+ UINT64_C(0xB565CA530725D13F), UINT64_C(0x8A6E2B19964FBCF4),
+ UINT64_C(0xCB7208C625F10AA9), UINT64_C(0xF479E98CB49B6762),
+ UINT64_C(0x23CDEA0666D116CE), UINT64_C(0x1CC60B4CF7BB7B05),
+ UINT64_C(0x5DDA28934405CD58), UINT64_C(0x62D1C9D9D56FA093),
+ UINT64_C(0xDFE26F2C2378A1E2), UINT64_C(0xE0E98E66B212CC29),
+ UINT64_C(0xA1F5ADB901AC7A74), UINT64_C(0x9EFE4CF390C617BF),
+ UINT64_C(0x9C4505870A3687A9), UINT64_C(0xA34EE4CD9B5CEA62),
+ UINT64_C(0xE252C71228E25C3F), UINT64_C(0xDD592658B98831F4),
+ UINT64_C(0x606A80AD4F9F3085), UINT64_C(0x5F6161E7DEF55D4E),
+ UINT64_C(0x1E7D42386D4BEB13), UINT64_C(0x2176A372FC2186D8),
+ UINT64_C(0xF6C2A0F82E6BF774), UINT64_C(0xC9C941B2BF019ABF),
+ UINT64_C(0x88D5626D0CBF2CE2), UINT64_C(0xB7DE83279DD54129),
+ UINT64_C(0x0AED25D26BC24058), UINT64_C(0x35E6C498FAA82D93),
+ UINT64_C(0x74FAE74749169BCE), UINT64_C(0x4BF1060DD87CF605),
+ UINT64_C(0xE318EB5CF9EF77C4), UINT64_C(0xDC130A1668851A0F),
+ UINT64_C(0x9D0F29C9DB3BAC52), UINT64_C(0xA204C8834A51C199),
+ UINT64_C(0x1F376E76BC46C0E8), UINT64_C(0x203C8F3C2D2CAD23),
+ UINT64_C(0x6120ACE39E921B7E), UINT64_C(0x5E2B4DA90FF876B5),
+ UINT64_C(0x899F4E23DDB20719), UINT64_C(0xB694AF694CD86AD2),
+ UINT64_C(0xF7888CB6FF66DC8F), UINT64_C(0xC8836DFC6E0CB144),
+ UINT64_C(0x75B0CB09981BB035), UINT64_C(0x4ABB2A430971DDFE),
+ UINT64_C(0x0BA7099CBACF6BA3), UINT64_C(0x34ACE8D62BA50668),
+ UINT64_C(0x3617A1A2B155967E), UINT64_C(0x091C40E8203FFBB5),
+ UINT64_C(0x4800633793814DE8), UINT64_C(0x770B827D02EB2023),
+ UINT64_C(0xCA382488F4FC2152), UINT64_C(0xF533C5C265964C99),
+ UINT64_C(0xB42FE61DD628FAC4), UINT64_C(0x8B2407574742970F),
+ UINT64_C(0x5C9004DD9508E6A3), UINT64_C(0x639BE59704628B68),
+ UINT64_C(0x2287C648B7DC3D35), UINT64_C(0x1D8C270226B650FE),
+ UINT64_C(0xA0BF81F7D0A1518F), UINT64_C(0x9FB460BD41CB3C44),
+ UINT64_C(0xDEA84362F2758A19), UINT64_C(0xE1A3A228631FE7D2),
+ UINT64_C(0xDBDED18BC794AA35), UINT64_C(0xE4D530C156FEC7FE),
+ UINT64_C(0xA5C9131EE54071A3), UINT64_C(0x9AC2F254742A1C68),
+ UINT64_C(0x27F154A1823D1D19), UINT64_C(0x18FAB5EB135770D2),
+ UINT64_C(0x59E69634A0E9C68F), UINT64_C(0x66ED777E3183AB44),
+ UINT64_C(0xB15974F4E3C9DAE8), UINT64_C(0x8E5295BE72A3B723),
+ UINT64_C(0xCF4EB661C11D017E), UINT64_C(0xF045572B50776CB5),
+ UINT64_C(0x4D76F1DEA6606DC4), UINT64_C(0x727D1094370A000F),
+ UINT64_C(0x3361334B84B4B652), UINT64_C(0x0C6AD20115DEDB99),
+ UINT64_C(0x0ED19B758F2E4B8F), UINT64_C(0x31DA7A3F1E442644),
+ UINT64_C(0x70C659E0ADFA9019), UINT64_C(0x4FCDB8AA3C90FDD2),
+ UINT64_C(0xF2FE1E5FCA87FCA3), UINT64_C(0xCDF5FF155BED9168),
+ UINT64_C(0x8CE9DCCAE8532735), UINT64_C(0xB3E23D8079394AFE),
+ UINT64_C(0x64563E0AAB733B52), UINT64_C(0x5B5DDF403A195699),
+ UINT64_C(0x1A41FC9F89A7E0C4), UINT64_C(0x254A1DD518CD8D0F),
+ UINT64_C(0x9879BB20EEDA8C7E), UINT64_C(0xA7725A6A7FB0E1B5),
+ UINT64_C(0xE66E79B5CC0E57E8), UINT64_C(0xD96598FF5D643A23),
+ UINT64_C(0x92949EF28518CC26), UINT64_C(0xAD9F7FB81472A1ED),
+ UINT64_C(0xEC835C67A7CC17B0), UINT64_C(0xD388BD2D36A67A7B),
+ UINT64_C(0x6EBB1BD8C0B17B0A), UINT64_C(0x51B0FA9251DB16C1),
+ UINT64_C(0x10ACD94DE265A09C), UINT64_C(0x2FA73807730FCD57),
+ UINT64_C(0xF8133B8DA145BCFB), UINT64_C(0xC718DAC7302FD130),
+ UINT64_C(0x8604F9188391676D), UINT64_C(0xB90F185212FB0AA6),
+ UINT64_C(0x043CBEA7E4EC0BD7), UINT64_C(0x3B375FED7586661C),
+ UINT64_C(0x7A2B7C32C638D041), UINT64_C(0x45209D785752BD8A),
+ UINT64_C(0x479BD40CCDA22D9C), UINT64_C(0x789035465CC84057),
+ UINT64_C(0x398C1699EF76F60A), UINT64_C(0x0687F7D37E1C9BC1),
+ UINT64_C(0xBBB45126880B9AB0), UINT64_C(0x84BFB06C1961F77B),
+ UINT64_C(0xC5A393B3AADF4126), UINT64_C(0xFAA872F93BB52CED),
+ UINT64_C(0x2D1C7173E9FF5D41), UINT64_C(0x121790397895308A),
+ UINT64_C(0x530BB3E6CB2B86D7), UINT64_C(0x6C0052AC5A41EB1C),
+ UINT64_C(0xD133F459AC56EA6D), UINT64_C(0xEE3815133D3C87A6),
+ UINT64_C(0xAF2436CC8E8231FB), UINT64_C(0x902FD7861FE85C30),
+ UINT64_C(0xAA52A425BB6311D7), UINT64_C(0x9559456F2A097C1C),
+ UINT64_C(0xD44566B099B7CA41), UINT64_C(0xEB4E87FA08DDA78A),
+ UINT64_C(0x567D210FFECAA6FB), UINT64_C(0x6976C0456FA0CB30),
+ UINT64_C(0x286AE39ADC1E7D6D), UINT64_C(0x176102D04D7410A6),
+ UINT64_C(0xC0D5015A9F3E610A), UINT64_C(0xFFDEE0100E540CC1),
+ UINT64_C(0xBEC2C3CFBDEABA9C), UINT64_C(0x81C922852C80D757),
+ UINT64_C(0x3CFA8470DA97D626), UINT64_C(0x03F1653A4BFDBBED),
+ UINT64_C(0x42ED46E5F8430DB0), UINT64_C(0x7DE6A7AF6929607B),
+ UINT64_C(0x7F5DEEDBF3D9F06D), UINT64_C(0x40560F9162B39DA6),
+ UINT64_C(0x014A2C4ED10D2BFB), UINT64_C(0x3E41CD0440674630),
+ UINT64_C(0x83726BF1B6704741), UINT64_C(0xBC798ABB271A2A8A),
+ UINT64_C(0xFD65A96494A49CD7), UINT64_C(0xC26E482E05CEF11C),
+ UINT64_C(0x15DA4BA4D78480B0), UINT64_C(0x2AD1AAEE46EEED7B),
+ UINT64_C(0x6BCD8931F5505B26), UINT64_C(0x54C6687B643A36ED),
+ UINT64_C(0xE9F5CE8E922D379C), UINT64_C(0xD6FE2FC403475A57),
+ UINT64_C(0x97E20C1BB0F9EC0A), UINT64_C(0xA8E9ED51219381C1)
+ }, {
+ UINT64_C(0x0000000000000000), UINT64_C(0x1DEE8A5E222CA1DC),
+ UINT64_C(0x3BDD14BC445943B8), UINT64_C(0x26339EE26675E264),
+ UINT64_C(0x77BA297888B28770), UINT64_C(0x6A54A326AA9E26AC),
+ UINT64_C(0x4C673DC4CCEBC4C8), UINT64_C(0x5189B79AEEC76514),
+ UINT64_C(0xEF7452F111650EE0), UINT64_C(0xF29AD8AF3349AF3C),
+ UINT64_C(0xD4A9464D553C4D58), UINT64_C(0xC947CC137710EC84),
+ UINT64_C(0x98CE7B8999D78990), UINT64_C(0x8520F1D7BBFB284C),
+ UINT64_C(0xA3136F35DD8ECA28), UINT64_C(0xBEFDE56BFFA26BF4),
+ UINT64_C(0x4C300AC98DC40345), UINT64_C(0x51DE8097AFE8A299),
+ UINT64_C(0x77ED1E75C99D40FD), UINT64_C(0x6A03942BEBB1E121),
+ UINT64_C(0x3B8A23B105768435), UINT64_C(0x2664A9EF275A25E9),
+ UINT64_C(0x0057370D412FC78D), UINT64_C(0x1DB9BD5363036651),
+ UINT64_C(0xA34458389CA10DA5), UINT64_C(0xBEAAD266BE8DAC79),
+ UINT64_C(0x98994C84D8F84E1D), UINT64_C(0x8577C6DAFAD4EFC1),
+ UINT64_C(0xD4FE714014138AD5), UINT64_C(0xC910FB1E363F2B09),
+ UINT64_C(0xEF2365FC504AC96D), UINT64_C(0xF2CDEFA2726668B1),
+ UINT64_C(0x986015931B88068A), UINT64_C(0x858E9FCD39A4A756),
+ UINT64_C(0xA3BD012F5FD14532), UINT64_C(0xBE538B717DFDE4EE),
+ UINT64_C(0xEFDA3CEB933A81FA), UINT64_C(0xF234B6B5B1162026),
+ UINT64_C(0xD4072857D763C242), UINT64_C(0xC9E9A209F54F639E),
+ UINT64_C(0x771447620AED086A), UINT64_C(0x6AFACD3C28C1A9B6),
+ UINT64_C(0x4CC953DE4EB44BD2), UINT64_C(0x5127D9806C98EA0E),
+ UINT64_C(0x00AE6E1A825F8F1A), UINT64_C(0x1D40E444A0732EC6),
+ UINT64_C(0x3B737AA6C606CCA2), UINT64_C(0x269DF0F8E42A6D7E),
+ UINT64_C(0xD4501F5A964C05CF), UINT64_C(0xC9BE9504B460A413),
+ UINT64_C(0xEF8D0BE6D2154677), UINT64_C(0xF26381B8F039E7AB),
+ UINT64_C(0xA3EA36221EFE82BF), UINT64_C(0xBE04BC7C3CD22363),
+ UINT64_C(0x9837229E5AA7C107), UINT64_C(0x85D9A8C0788B60DB),
+ UINT64_C(0x3B244DAB87290B2F), UINT64_C(0x26CAC7F5A505AAF3),
+ UINT64_C(0x00F95917C3704897), UINT64_C(0x1D17D349E15CE94B),
+ UINT64_C(0x4C9E64D30F9B8C5F), UINT64_C(0x5170EE8D2DB72D83),
+ UINT64_C(0x7743706F4BC2CFE7), UINT64_C(0x6AADFA3169EE6E3B),
+ UINT64_C(0xA218840D981E1391), UINT64_C(0xBFF60E53BA32B24D),
+ UINT64_C(0x99C590B1DC475029), UINT64_C(0x842B1AEFFE6BF1F5),
+ UINT64_C(0xD5A2AD7510AC94E1), UINT64_C(0xC84C272B3280353D),
+ UINT64_C(0xEE7FB9C954F5D759), UINT64_C(0xF391339776D97685),
+ UINT64_C(0x4D6CD6FC897B1D71), UINT64_C(0x50825CA2AB57BCAD),
+ UINT64_C(0x76B1C240CD225EC9), UINT64_C(0x6B5F481EEF0EFF15),
+ UINT64_C(0x3AD6FF8401C99A01), UINT64_C(0x273875DA23E53BDD),
+ UINT64_C(0x010BEB384590D9B9), UINT64_C(0x1CE5616667BC7865),
+ UINT64_C(0xEE288EC415DA10D4), UINT64_C(0xF3C6049A37F6B108),
+ UINT64_C(0xD5F59A785183536C), UINT64_C(0xC81B102673AFF2B0),
+ UINT64_C(0x9992A7BC9D6897A4), UINT64_C(0x847C2DE2BF443678),
+ UINT64_C(0xA24FB300D931D41C), UINT64_C(0xBFA1395EFB1D75C0),
+ UINT64_C(0x015CDC3504BF1E34), UINT64_C(0x1CB2566B2693BFE8),
+ UINT64_C(0x3A81C88940E65D8C), UINT64_C(0x276F42D762CAFC50),
+ UINT64_C(0x76E6F54D8C0D9944), UINT64_C(0x6B087F13AE213898),
+ UINT64_C(0x4D3BE1F1C854DAFC), UINT64_C(0x50D56BAFEA787B20),
+ UINT64_C(0x3A78919E8396151B), UINT64_C(0x27961BC0A1BAB4C7),
+ UINT64_C(0x01A58522C7CF56A3), UINT64_C(0x1C4B0F7CE5E3F77F),
+ UINT64_C(0x4DC2B8E60B24926B), UINT64_C(0x502C32B8290833B7),
+ UINT64_C(0x761FAC5A4F7DD1D3), UINT64_C(0x6BF126046D51700F),
+ UINT64_C(0xD50CC36F92F31BFB), UINT64_C(0xC8E24931B0DFBA27),
+ UINT64_C(0xEED1D7D3D6AA5843), UINT64_C(0xF33F5D8DF486F99F),
+ UINT64_C(0xA2B6EA171A419C8B), UINT64_C(0xBF586049386D3D57),
+ UINT64_C(0x996BFEAB5E18DF33), UINT64_C(0x848574F57C347EEF),
+ UINT64_C(0x76489B570E52165E), UINT64_C(0x6BA611092C7EB782),
+ UINT64_C(0x4D958FEB4A0B55E6), UINT64_C(0x507B05B56827F43A),
+ UINT64_C(0x01F2B22F86E0912E), UINT64_C(0x1C1C3871A4CC30F2),
+ UINT64_C(0x3A2FA693C2B9D296), UINT64_C(0x27C12CCDE095734A),
+ UINT64_C(0x993CC9A61F3718BE), UINT64_C(0x84D243F83D1BB962),
+ UINT64_C(0xA2E1DD1A5B6E5B06), UINT64_C(0xBF0F57447942FADA),
+ UINT64_C(0xEE86E0DE97859FCE), UINT64_C(0xF3686A80B5A93E12),
+ UINT64_C(0xD55BF462D3DCDC76), UINT64_C(0xC8B57E3CF1F07DAA),
+ UINT64_C(0xD6E9A7309F3239A7), UINT64_C(0xCB072D6EBD1E987B),
+ UINT64_C(0xED34B38CDB6B7A1F), UINT64_C(0xF0DA39D2F947DBC3),
+ UINT64_C(0xA1538E481780BED7), UINT64_C(0xBCBD041635AC1F0B),
+ UINT64_C(0x9A8E9AF453D9FD6F), UINT64_C(0x876010AA71F55CB3),
+ UINT64_C(0x399DF5C18E573747), UINT64_C(0x24737F9FAC7B969B),
+ UINT64_C(0x0240E17DCA0E74FF), UINT64_C(0x1FAE6B23E822D523),
+ UINT64_C(0x4E27DCB906E5B037), UINT64_C(0x53C956E724C911EB),
+ UINT64_C(0x75FAC80542BCF38F), UINT64_C(0x6814425B60905253),
+ UINT64_C(0x9AD9ADF912F63AE2), UINT64_C(0x873727A730DA9B3E),
+ UINT64_C(0xA104B94556AF795A), UINT64_C(0xBCEA331B7483D886),
+ UINT64_C(0xED6384819A44BD92), UINT64_C(0xF08D0EDFB8681C4E),
+ UINT64_C(0xD6BE903DDE1DFE2A), UINT64_C(0xCB501A63FC315FF6),
+ UINT64_C(0x75ADFF0803933402), UINT64_C(0x6843755621BF95DE),
+ UINT64_C(0x4E70EBB447CA77BA), UINT64_C(0x539E61EA65E6D666),
+ UINT64_C(0x0217D6708B21B372), UINT64_C(0x1FF95C2EA90D12AE),
+ UINT64_C(0x39CAC2CCCF78F0CA), UINT64_C(0x24244892ED545116),
+ UINT64_C(0x4E89B2A384BA3F2D), UINT64_C(0x536738FDA6969EF1),
+ UINT64_C(0x7554A61FC0E37C95), UINT64_C(0x68BA2C41E2CFDD49),
+ UINT64_C(0x39339BDB0C08B85D), UINT64_C(0x24DD11852E241981),
+ UINT64_C(0x02EE8F674851FBE5), UINT64_C(0x1F0005396A7D5A39),
+ UINT64_C(0xA1FDE05295DF31CD), UINT64_C(0xBC136A0CB7F39011),
+ UINT64_C(0x9A20F4EED1867275), UINT64_C(0x87CE7EB0F3AAD3A9),
+ UINT64_C(0xD647C92A1D6DB6BD), UINT64_C(0xCBA943743F411761),
+ UINT64_C(0xED9ADD965934F505), UINT64_C(0xF07457C87B1854D9),
+ UINT64_C(0x02B9B86A097E3C68), UINT64_C(0x1F5732342B529DB4),
+ UINT64_C(0x3964ACD64D277FD0), UINT64_C(0x248A26886F0BDE0C),
+ UINT64_C(0x7503911281CCBB18), UINT64_C(0x68ED1B4CA3E01AC4),
+ UINT64_C(0x4EDE85AEC595F8A0), UINT64_C(0x53300FF0E7B9597C),
+ UINT64_C(0xEDCDEA9B181B3288), UINT64_C(0xF02360C53A379354),
+ UINT64_C(0xD610FE275C427130), UINT64_C(0xCBFE74797E6ED0EC),
+ UINT64_C(0x9A77C3E390A9B5F8), UINT64_C(0x879949BDB2851424),
+ UINT64_C(0xA1AAD75FD4F0F640), UINT64_C(0xBC445D01F6DC579C),
+ UINT64_C(0x74F1233D072C2A36), UINT64_C(0x691FA96325008BEA),
+ UINT64_C(0x4F2C37814375698E), UINT64_C(0x52C2BDDF6159C852),
+ UINT64_C(0x034B0A458F9EAD46), UINT64_C(0x1EA5801BADB20C9A),
+ UINT64_C(0x38961EF9CBC7EEFE), UINT64_C(0x257894A7E9EB4F22),
+ UINT64_C(0x9B8571CC164924D6), UINT64_C(0x866BFB923465850A),
+ UINT64_C(0xA05865705210676E), UINT64_C(0xBDB6EF2E703CC6B2),
+ UINT64_C(0xEC3F58B49EFBA3A6), UINT64_C(0xF1D1D2EABCD7027A),
+ UINT64_C(0xD7E24C08DAA2E01E), UINT64_C(0xCA0CC656F88E41C2),
+ UINT64_C(0x38C129F48AE82973), UINT64_C(0x252FA3AAA8C488AF),
+ UINT64_C(0x031C3D48CEB16ACB), UINT64_C(0x1EF2B716EC9DCB17),
+ UINT64_C(0x4F7B008C025AAE03), UINT64_C(0x52958AD220760FDF),
+ UINT64_C(0x74A614304603EDBB), UINT64_C(0x69489E6E642F4C67),
+ UINT64_C(0xD7B57B059B8D2793), UINT64_C(0xCA5BF15BB9A1864F),
+ UINT64_C(0xEC686FB9DFD4642B), UINT64_C(0xF186E5E7FDF8C5F7),
+ UINT64_C(0xA00F527D133FA0E3), UINT64_C(0xBDE1D8233113013F),
+ UINT64_C(0x9BD246C15766E35B), UINT64_C(0x863CCC9F754A4287),
+ UINT64_C(0xEC9136AE1CA42CBC), UINT64_C(0xF17FBCF03E888D60),
+ UINT64_C(0xD74C221258FD6F04), UINT64_C(0xCAA2A84C7AD1CED8),
+ UINT64_C(0x9B2B1FD69416ABCC), UINT64_C(0x86C59588B63A0A10),
+ UINT64_C(0xA0F60B6AD04FE874), UINT64_C(0xBD188134F26349A8),
+ UINT64_C(0x03E5645F0DC1225C), UINT64_C(0x1E0BEE012FED8380),
+ UINT64_C(0x383870E3499861E4), UINT64_C(0x25D6FABD6BB4C038),
+ UINT64_C(0x745F4D278573A52C), UINT64_C(0x69B1C779A75F04F0),
+ UINT64_C(0x4F82599BC12AE694), UINT64_C(0x526CD3C5E3064748),
+ UINT64_C(0xA0A13C6791602FF9), UINT64_C(0xBD4FB639B34C8E25),
+ UINT64_C(0x9B7C28DBD5396C41), UINT64_C(0x8692A285F715CD9D),
+ UINT64_C(0xD71B151F19D2A889), UINT64_C(0xCAF59F413BFE0955),
+ UINT64_C(0xECC601A35D8BEB31), UINT64_C(0xF1288BFD7FA74AED),
+ UINT64_C(0x4FD56E9680052119), UINT64_C(0x523BE4C8A22980C5),
+ UINT64_C(0x74087A2AC45C62A1), UINT64_C(0x69E6F074E670C37D),
+ UINT64_C(0x386F47EE08B7A669), UINT64_C(0x2581CDB02A9B07B5),
+ UINT64_C(0x03B253524CEEE5D1), UINT64_C(0x1E5CD90C6EC2440D)
+ }
+};
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_tablegen.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_tablegen.c
new file mode 100644
index 00000000..c0e0950b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_tablegen.c
@@ -0,0 +1,94 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc64_tablegen.c
+/// \brief Generate crc64_table_le.h and crc64_table_be.h
+///
+/// Compiling: gcc -std=c99 -o crc64_tablegen crc64_tablegen.c
+/// Add -DWORDS_BIGENDIAN to generate big endian table.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#ifdef WORDS_BIGENDIAN
+# include "../../common/bswap.h"
+#endif
+
+
+static uint64_t crc64_table[4][256];
+
+
+extern void
+init_crc64_table(void)
+{
+ static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
+
+ for (size_t s = 0; s < 4; ++s) {
+ for (size_t b = 0; b < 256; ++b) {
+ uint64_t r = s == 0 ? b : crc64_table[s - 1][b];
+
+ for (size_t i = 0; i < 8; ++i) {
+ if (r & 1)
+ r = (r >> 1) ^ poly64;
+ else
+ r >>= 1;
+ }
+
+ crc64_table[s][b] = r;
+ }
+ }
+
+#ifdef WORDS_BIGENDIAN
+ for (size_t s = 0; s < 4; ++s)
+ for (size_t b = 0; b < 256; ++b)
+ crc64_table[s][b] = bswap_64(crc64_table[s][b]);
+#endif
+
+ return;
+}
+
+
+static void
+print_crc64_table(void)
+{
+ printf("/* This file has been automatically generated by "
+ "crc64_tablegen.c. */\n\n"
+ "const uint64_t lzma_crc64_table[4][256] = {\n\t{");
+
+ for (size_t s = 0; s < 4; ++s) {
+ for (size_t b = 0; b < 256; ++b) {
+ if ((b % 2) == 0)
+ printf("\n\t\t");
+
+ printf("UINT64_C(0x%016" PRIX64 ")",
+ crc64_table[s][b]);
+
+ if (b != 255)
+ printf(",%s", (b+1) % 2 == 0 ? "" : " ");
+ }
+
+ if (s == 3)
+ printf("\n\t}\n};\n");
+ else
+ printf("\n\t}, {");
+ }
+
+ return;
+}
+
+
+int
+main(void)
+{
+ init_crc64_table();
+ print_crc64_table();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_x86.S b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_x86.S
new file mode 100644
index 00000000..17de076a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc64_x86.S
@@ -0,0 +1,287 @@
+/*
+ * Speed-optimized CRC64 using slicing-by-four algorithm
+ *
+ * This uses only i386 instructions, but it is optimized for i686 and later
+ * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2).
+ *
+ * Authors: Igor Pavlov (original CRC32 assembly code)
+ * Lasse Collin (CRC64 adaptation of the modified CRC32 code)
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ *
+ * This code needs lzma_crc64_table, which can be created using the
+ * following C code:
+
+uint64_t lzma_crc64_table[4][256];
+
+void
+init_table(void)
+{
+ // ECMA-182
+ static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42);
+
+ for (size_t s = 0; s < 4; ++s) {
+ for (size_t b = 0; b < 256; ++b) {
+ uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b];
+
+ for (size_t i = 0; i < 8; ++i) {
+ if (r & 1)
+ r = (r >> 1) ^ poly64;
+ else
+ r >>= 1;
+ }
+
+ lzma_crc64_table[s][b] = r;
+ }
+ }
+}
+
+ * The prototype of the CRC64 function:
+ * extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc);
+ */
+
+/*
+ * On some systems, the functions need to be prefixed. The prefix is
+ * usually an underscore.
+ */
+#ifndef __USER_LABEL_PREFIX__
+# define __USER_LABEL_PREFIX__
+#endif
+#define MAKE_SYM_CAT(prefix, sym) prefix ## sym
+#define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym)
+#define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64)
+#define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table)
+
+/*
+ * Solaris assembler doesn't have .p2align, and Darwin uses .align
+ * differently than GNU/Linux and Solaris.
+ */
+#if defined(__MACH__) || defined(__MSDOS__)
+# define ALIGN(pow2, abs) .align pow2
+#else
+# define ALIGN(pow2, abs) .align abs
+#endif
+
+ .text
+ .globl LZMA_CRC64
+
+#if !defined(__MACH__) && !defined(_WIN32) && !defined(__CYGWIN__) \
+ && !defined(__MSDOS__)
+ .type LZMA_CRC64, @function
+#endif
+
+ ALIGN(4, 16)
+LZMA_CRC64:
+ /*
+ * Register usage:
+ * %eax crc LSB
+ * %edx crc MSB
+ * %esi buf
+ * %edi size or buf + size
+ * %ebx lzma_crc64_table
+ * %ebp Table index
+ * %ecx Temporary
+ */
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+ movl 0x14(%esp), %esi /* buf */
+ movl 0x18(%esp), %edi /* size */
+ movl 0x1C(%esp), %eax /* crc LSB */
+ movl 0x20(%esp), %edx /* crc MSB */
+
+ /*
+ * Store the address of lzma_crc64_table to %ebx. This is needed to
+ * get position-independent code (PIC).
+ *
+ * The PIC macro is defined by libtool, while __PIC__ is defined
+ * by GCC but only on some systems. Testing for both makes it simpler
+ * to test this code without libtool, and keeps the code working also
+ * when built with libtool but using something else than GCC.
+ *
+ * I understood that libtool may define PIC on Windows even though
+ * the code in Windows DLLs is not PIC in sense that it is in ELF
+ * binaries, so we need a separate check to always use the non-PIC
+ * code on Windows.
+ */
+#if (!defined(PIC) && !defined(__PIC__)) \
+ || (defined(_WIN32) || defined(__CYGWIN__))
+ /* Not PIC */
+ movl $LZMA_CRC64_TABLE, %ebx
+#elif defined(__MACH__)
+ /* Mach-O */
+ call .L_get_pc
+.L_pic:
+ leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx
+ movl (%ebx), %ebx
+#else
+ /* ELF */
+ call .L_get_pc
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx
+#endif
+
+ /* Complement the initial value. */
+ notl %eax
+ notl %edx
+
+.L_align:
+ /*
+ * Check if there is enough input to use slicing-by-four.
+ * We need eight bytes, because the loop pre-reads four bytes.
+ */
+ cmpl $8, %edi
+ jl .L_rest
+
+ /* Check if we have reached alignment of four bytes. */
+ testl $3, %esi
+ jz .L_slice
+
+ /* Calculate CRC of the next input byte. */
+ movzbl (%esi), %ebp
+ incl %esi
+ movzbl %al, %ecx
+ xorl %ecx, %ebp
+ shrdl $8, %edx, %eax
+ xorl (%ebx, %ebp, 8), %eax
+ shrl $8, %edx
+ xorl 4(%ebx, %ebp, 8), %edx
+ decl %edi
+ jmp .L_align
+
+.L_slice:
+ /*
+ * If we get here, there's at least eight bytes of aligned input
+ * available. Make %edi multiple of four bytes. Store the possible
+ * remainder over the "size" variable in the argument stack.
+ */
+ movl %edi, 0x18(%esp)
+ andl $-4, %edi
+ subl %edi, 0x18(%esp)
+
+ /*
+ * Let %edi be buf + size - 4 while running the main loop. This way
+ * we can compare for equality to determine when exit the loop.
+ */
+ addl %esi, %edi
+ subl $4, %edi
+
+ /* Read in the first four aligned bytes. */
+ movl (%esi), %ecx
+
+.L_loop:
+ xorl %eax, %ecx
+ movzbl %cl, %ebp
+ movl 0x1800(%ebx, %ebp, 8), %eax
+ xorl %edx, %eax
+ movl 0x1804(%ebx, %ebp, 8), %edx
+ movzbl %ch, %ebp
+ xorl 0x1000(%ebx, %ebp, 8), %eax
+ xorl 0x1004(%ebx, %ebp, 8), %edx
+ shrl $16, %ecx
+ movzbl %cl, %ebp
+ xorl 0x0800(%ebx, %ebp, 8), %eax
+ xorl 0x0804(%ebx, %ebp, 8), %edx
+ movzbl %ch, %ebp
+ addl $4, %esi
+ xorl (%ebx, %ebp, 8), %eax
+ xorl 4(%ebx, %ebp, 8), %edx
+
+ /* Check for end of aligned input. */
+ cmpl %edi, %esi
+
+ /*
+ * Copy the next input byte to %ecx. It is slightly faster to
+ * read it here than at the top of the loop.
+ */
+ movl (%esi), %ecx
+ jl .L_loop
+
+ /*
+ * Process the remaining four bytes, which we have already
+ * copied to %ecx.
+ */
+ xorl %eax, %ecx
+ movzbl %cl, %ebp
+ movl 0x1800(%ebx, %ebp, 8), %eax
+ xorl %edx, %eax
+ movl 0x1804(%ebx, %ebp, 8), %edx
+ movzbl %ch, %ebp
+ xorl 0x1000(%ebx, %ebp, 8), %eax
+ xorl 0x1004(%ebx, %ebp, 8), %edx
+ shrl $16, %ecx
+ movzbl %cl, %ebp
+ xorl 0x0800(%ebx, %ebp, 8), %eax
+ xorl 0x0804(%ebx, %ebp, 8), %edx
+ movzbl %ch, %ebp
+ addl $4, %esi
+ xorl (%ebx, %ebp, 8), %eax
+ xorl 4(%ebx, %ebp, 8), %edx
+
+ /* Copy the number of remaining bytes to %edi. */
+ movl 0x18(%esp), %edi
+
+.L_rest:
+ /* Check for end of input. */
+ testl %edi, %edi
+ jz .L_return
+
+ /* Calculate CRC of the next input byte. */
+ movzbl (%esi), %ebp
+ incl %esi
+ movzbl %al, %ecx
+ xorl %ecx, %ebp
+ shrdl $8, %edx, %eax
+ xorl (%ebx, %ebp, 8), %eax
+ shrl $8, %edx
+ xorl 4(%ebx, %ebp, 8), %edx
+ decl %edi
+ jmp .L_rest
+
+.L_return:
+ /* Complement the final value. */
+ notl %eax
+ notl %edx
+
+ popl %ebp
+ popl %edi
+ popl %esi
+ popl %ebx
+ ret
+
+#if defined(PIC) || defined(__PIC__)
+ ALIGN(4, 16)
+.L_get_pc:
+ movl (%esp), %ebx
+ ret
+#endif
+
+#if defined(__MACH__) && (defined(PIC) || defined(__PIC__))
+ /* Mach-O PIC */
+ .section __IMPORT,__pointers,non_lazy_symbol_pointers
+.L_lzma_crc64_table$non_lazy_ptr:
+ .indirect_symbol LZMA_CRC64_TABLE
+ .long 0
+
+#elif defined(_WIN32) || defined(__CYGWIN__)
+# ifdef DLL_EXPORT
+ /* This is equivalent of __declspec(dllexport). */
+ .section .drectve
+ .ascii " -export:lzma_crc64"
+# endif
+
+#elif !defined(__MSDOS__)
+ /* ELF */
+ .size LZMA_CRC64, .-LZMA_CRC64
+#endif
+
+/*
+ * This is needed to support non-executable stack. It's ugly to
+ * use __linux__ here, but I don't know a way to detect when
+ * we are using GNU assembler.
+ */
+#if defined(__ELF__) && defined(__linux__)
+ .section .note.GNU-stack,"",@progbits
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc_macros.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc_macros.h
new file mode 100644
index 00000000..b21b3af7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/crc_macros.h
@@ -0,0 +1,34 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file crc_macros.h
+/// \brief Some endian-dependent macros for CRC32 and CRC64
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef WORDS_BIGENDIAN
+# include "../../common/bswap.h"
+
+# define A(x) ((x) >> 24)
+# define B(x) (((x) >> 16) & 0xFF)
+# define C(x) (((x) >> 8) & 0xFF)
+# define D(x) ((x) & 0xFF)
+
+# define S8(x) ((x) << 8)
+# define S32(x) ((x) << 32)
+
+#else
+# define A(x) ((x) & 0xFF)
+# define B(x) (((x) >> 8) & 0xFF)
+# define C(x) (((x) >> 16) & 0xFF)
+# define D(x) ((x) >> 24)
+
+# define S8(x) ((x) >> 8)
+# define S32(x) ((x) >> 32)
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/sha256.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/sha256.c
new file mode 100644
index 00000000..55f14c0e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/check/sha256.c
@@ -0,0 +1,215 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file sha256.c
+/// \brief SHA-256
+///
+/// \todo Crypto++ has x86 ASM optimizations. They use SSE so if they
+/// are imported to liblzma, SSE instructions need to be used
+/// conditionally to keep the code working on older boxes.
+/// We could also support using some external libary for SHA-256.
+//
+// This code is based on the code found from 7-Zip, which has a modified
+// version of the SHA-256 found from Crypto++ <http://www.cryptopp.com/>.
+// The code was modified a little to fit into liblzma.
+//
+// Authors: Kevin Springle
+// Wei Dai
+// Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// Avoid bogus warnings in transform().
+#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) || __GNUC__ > 4
+# pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+
+#include "check.h"
+
+#ifndef WORDS_BIGENDIAN
+# include "../../common/bswap.h"
+#endif
+
+// At least on x86, GCC is able to optimize this to a rotate instruction.
+#define rotr_32(num, amount) ((num) >> (amount) | (num) << (32 - (amount)))
+
+#define blk0(i) (W[i] = data[i])
+#define blk2(i) (W[i & 15] += s1(W[(i - 2) & 15]) + W[(i - 7) & 15] \
+ + s0(W[(i - 15) & 15]))
+
+#define Ch(x, y, z) (z ^ (x & (y ^ z)))
+#define Maj(x, y, z) ((x & y) | (z & (x | y)))
+
+#define a(i) T[(0 - i) & 7]
+#define b(i) T[(1 - i) & 7]
+#define c(i) T[(2 - i) & 7]
+#define d(i) T[(3 - i) & 7]
+#define e(i) T[(4 - i) & 7]
+#define f(i) T[(5 - i) & 7]
+#define g(i) T[(6 - i) & 7]
+#define h(i) T[(7 - i) & 7]
+
+#define R(i) \
+ h(i) += S1(e(i)) + Ch(e(i), f(i), g(i)) + SHA256_K[i + j] \
+ + (j ? blk2(i) : blk0(i)); \
+ d(i) += h(i); \
+ h(i) += S0(a(i)) + Maj(a(i), b(i), c(i))
+
+#define S0(x) (rotr_32(x, 2) ^ rotr_32(x, 13) ^ rotr_32(x, 22))
+#define S1(x) (rotr_32(x, 6) ^ rotr_32(x, 11) ^ rotr_32(x, 25))
+#define s0(x) (rotr_32(x, 7) ^ rotr_32(x, 18) ^ (x >> 3))
+#define s1(x) (rotr_32(x, 17) ^ rotr_32(x, 19) ^ (x >> 10))
+
+
+static const uint32_t SHA256_K[64] = {
+ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
+ 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
+ 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
+ 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
+ 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
+ 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
+ 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
+ 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
+ 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
+ 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
+ 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
+ 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
+ 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
+ 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
+ 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
+ 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
+};
+
+
+static void
+transform(uint32_t state[static 8], const uint32_t data[static 16])
+{
+ uint32_t W[16];
+ uint32_t T[8];
+
+ // Copy state[] to working vars.
+ memcpy(T, state, sizeof(T));
+
+ // 64 operations, partially loop unrolled
+ for (unsigned int j = 0; j < 64; j += 16) {
+ R( 0); R( 1); R( 2); R( 3);
+ R( 4); R( 5); R( 6); R( 7);
+ R( 8); R( 9); R(10); R(11);
+ R(12); R(13); R(14); R(15);
+ }
+
+ // Add the working vars back into state[].
+ state[0] += a(0);
+ state[1] += b(0);
+ state[2] += c(0);
+ state[3] += d(0);
+ state[4] += e(0);
+ state[5] += f(0);
+ state[6] += g(0);
+ state[7] += h(0);
+}
+
+
+static void
+process(lzma_check_state *check)
+{
+#ifdef WORDS_BIGENDIAN
+ transform(check->state.sha256.state, check->buffer.u32);
+
+#else
+ uint32_t data[16];
+
+ for (size_t i = 0; i < 16; ++i)
+ data[i] = bswap_32(check->buffer.u32[i]);
+
+ transform(check->state.sha256.state, data);
+#endif
+
+ return;
+}
+
+
+extern void
+lzma_sha256_init(lzma_check_state *check)
+{
+ static const uint32_t s[8] = {
+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+ 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
+ };
+
+ memcpy(check->state.sha256.state, s, sizeof(s));
+ check->state.sha256.size = 0;
+
+ return;
+}
+
+
+extern void
+lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check)
+{
+ // Copy the input data into a properly aligned temporary buffer.
+ // This way we can be called with arbitrarily sized buffers
+ // (no need to be multiple of 64 bytes), and the code works also
+ // on architectures that don't allow unaligned memory access.
+ while (size > 0) {
+ const size_t copy_start = check->state.sha256.size & 0x3F;
+ size_t copy_size = 64 - copy_start;
+ if (copy_size > size)
+ copy_size = size;
+
+ memcpy(check->buffer.u8 + copy_start, buf, copy_size);
+
+ buf += copy_size;
+ size -= copy_size;
+ check->state.sha256.size += copy_size;
+
+ if ((check->state.sha256.size & 0x3F) == 0)
+ process(check);
+ }
+
+ return;
+}
+
+
+extern void
+lzma_sha256_finish(lzma_check_state *check)
+{
+ // Add padding as described in RFC 3174 (it describes SHA-1 but
+ // the same padding style is used for SHA-256 too).
+ size_t pos = check->state.sha256.size & 0x3F;
+ check->buffer.u8[pos++] = 0x80;
+
+ while (pos != 64 - 8) {
+ if (pos == 64) {
+ process(check);
+ pos = 0;
+ }
+
+ check->buffer.u8[pos++] = 0x00;
+ }
+
+ // Convert the message size from bytes to bits.
+ check->state.sha256.size *= 8;
+
+#ifdef WORDS_BIGENDIAN
+ check->buffer.u64[(64 - 8) / 8] = check->state.sha256.size;
+#else
+ check->buffer.u64[(64 - 8) / 8] = bswap_64(check->state.sha256.size);
+#endif
+
+ process(check);
+
+ for (size_t i = 0; i < 8; ++i)
+#ifdef WORDS_BIGENDIAN
+ check->buffer.u32[i] = check->state.sha256.state[i];
+#else
+ check->buffer.u32[i] = bswap_32(check->state.sha256.state[i]);
+#endif
+
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/Makefile.inc
new file mode 100644
index 00000000..aaaeee93
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/Makefile.inc
@@ -0,0 +1,67 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+liblzma_la_SOURCES += \
+ common/common.c \
+ common/common.h \
+ common/bsr.h \
+ common/block_util.c \
+ common/easy_preset.c \
+ common/easy_preset.h \
+ common/filter_common.c \
+ common/filter_common.h \
+ common/index.c \
+ common/index.h \
+ common/stream_flags_common.c \
+ common/stream_flags_common.h \
+ common/vli_size.c
+
+if COND_MAIN_ENCODER
+liblzma_la_SOURCES += \
+ common/alone_encoder.c \
+ common/block_buffer_encoder.c \
+ common/block_encoder.c \
+ common/block_encoder.h \
+ common/block_header_encoder.c \
+ common/easy_buffer_encoder.c \
+ common/easy_encoder.c \
+ common/easy_encoder_memusage.c \
+ common/filter_buffer_encoder.c \
+ common/filter_encoder.c \
+ common/filter_encoder.h \
+ common/filter_flags_encoder.c \
+ common/index_encoder.c \
+ common/index_encoder.h \
+ common/stream_buffer_encoder.c \
+ common/stream_encoder.c \
+ common/stream_encoder.h \
+ common/stream_flags_encoder.c \
+ common/vli_encoder.c
+endif
+
+if COND_MAIN_DECODER
+liblzma_la_SOURCES += \
+ common/alone_decoder.c \
+ common/alone_decoder.h \
+ common/auto_decoder.c \
+ common/block_buffer_decoder.c \
+ common/block_decoder.c \
+ common/block_decoder.h \
+ common/block_header_decoder.c \
+ common/easy_decoder_memusage.c \
+ common/filter_buffer_decoder.c \
+ common/filter_decoder.c \
+ common/filter_decoder.h \
+ common/filter_flags_decoder.c \
+ common/index_decoder.c \
+ common/index_hash.c \
+ common/stream_buffer_decoder.c \
+ common/stream_decoder.c \
+ common/stream_decoder.h \
+ common/stream_flags_decoder.c \
+ common/vli_decoder.c
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.c
new file mode 100644
index 00000000..827116a7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.c
@@ -0,0 +1,231 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file alone_decoder.c
+/// \brief Decoder for LZMA_Alone files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "alone_decoder.h"
+#include "lzma_decoder.h"
+#include "lz_decoder.h"
+
+
+struct lzma_coder_s {
+ lzma_next_coder next;
+
+ enum {
+ SEQ_PROPERTIES,
+ SEQ_DICTIONARY_SIZE,
+ SEQ_UNCOMPRESSED_SIZE,
+ SEQ_CODER_INIT,
+ SEQ_CODE,
+ } sequence;
+
+ /// Position in the header fields
+ size_t pos;
+
+ /// Uncompressed size decoded from the header
+ lzma_vli uncompressed_size;
+
+ /// Memory usage limit
+ uint64_t memlimit;
+
+ /// Amount of memory actually needed (only an estimate)
+ uint64_t memusage;
+
+ /// Options decoded from the header needed to initialize
+ /// the LZMA decoder
+ lzma_options_lzma options;
+};
+
+
+static lzma_ret
+alone_decode(lzma_coder *coder,
+ lzma_allocator *allocator lzma_attribute((unused)),
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ lzma_action action)
+{
+ while (*out_pos < out_size
+ && (coder->sequence == SEQ_CODE || *in_pos < in_size))
+ switch (coder->sequence) {
+ case SEQ_PROPERTIES:
+ if (lzma_lzma_lclppb_decode(&coder->options, in[*in_pos]))
+ return LZMA_FORMAT_ERROR;
+
+ coder->sequence = SEQ_DICTIONARY_SIZE;
+ ++*in_pos;
+ break;
+
+ case SEQ_DICTIONARY_SIZE:
+ coder->options.dict_size
+ |= (size_t)(in[*in_pos]) << (coder->pos * 8);
+
+ if (++coder->pos == 4) {
+ if (coder->options.dict_size != UINT32_MAX) {
+ // A hack to ditch tons of false positives:
+ // We allow only dictionary sizes that are
+ // 2^n or 2^n + 2^(n-1). LZMA_Alone created
+ // only files with 2^n, but accepts any
+ // dictionary size. If someone complains, this
+ // will be reconsidered.
+ uint32_t d = coder->options.dict_size - 1;
+ d |= d >> 2;
+ d |= d >> 3;
+ d |= d >> 4;
+ d |= d >> 8;
+ d |= d >> 16;
+ ++d;
+
+ if (d != coder->options.dict_size)
+ return LZMA_FORMAT_ERROR;
+ }
+
+ coder->pos = 0;
+ coder->sequence = SEQ_UNCOMPRESSED_SIZE;
+ }
+
+ ++*in_pos;
+ break;
+
+ case SEQ_UNCOMPRESSED_SIZE:
+ coder->uncompressed_size
+ |= (lzma_vli)(in[*in_pos]) << (coder->pos * 8);
+ ++*in_pos;
+ if (++coder->pos < 8)
+ break;
+
+ // Another hack to ditch false positives: Assume that
+ // if the uncompressed size is known, it must be less
+ // than 256 GiB. Again, if someone complains, this
+ // will be reconsidered.
+ if (coder->uncompressed_size != LZMA_VLI_UNKNOWN
+ && coder->uncompressed_size
+ >= (LZMA_VLI_C(1) << 38))
+ return LZMA_FORMAT_ERROR;
+
+ // Calculate the memory usage so that it is ready
+ // for SEQ_CODER_INIT.
+ coder->memusage = lzma_lzma_decoder_memusage(&coder->options)
+ + LZMA_MEMUSAGE_BASE;
+
+ coder->pos = 0;
+ coder->sequence = SEQ_CODER_INIT;
+
+ // Fall through
+
+ case SEQ_CODER_INIT: {
+ if (coder->memusage > coder->memlimit)
+ return LZMA_MEMLIMIT_ERROR;
+
+ lzma_filter_info filters[2] = {
+ {
+ .init = &lzma_lzma_decoder_init,
+ .options = &coder->options,
+ }, {
+ .init = NULL,
+ }
+ };
+
+ const lzma_ret ret = lzma_next_filter_init(&coder->next,
+ allocator, filters);
+ if (ret != LZMA_OK)
+ return ret;
+
+ // Use a hack to set the uncompressed size.
+ lzma_lz_decoder_uncompressed(coder->next.coder,
+ coder->uncompressed_size);
+
+ coder->sequence = SEQ_CODE;
+ break;
+ }
+
+ case SEQ_CODE: {
+ return coder->next.code(coder->next.coder,
+ allocator, in, in_pos, in_size,
+ out, out_pos, out_size, action);
+ }
+
+ default:
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+alone_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static lzma_ret
+alone_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
+ uint64_t *old_memlimit, uint64_t new_memlimit)
+{
+ if (new_memlimit != 0 && new_memlimit < coder->memusage)
+ return LZMA_MEMLIMIT_ERROR;
+
+ *memusage = coder->memusage;
+ *old_memlimit = coder->memlimit;
+ coder->memlimit = new_memlimit;
+
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_alone_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ uint64_t memlimit)
+{
+ lzma_next_coder_init(&lzma_alone_decoder_init, next, allocator);
+
+ if (memlimit == 0)
+ return LZMA_PROG_ERROR;
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &alone_decode;
+ next->end = &alone_decoder_end;
+ next->memconfig = &alone_decoder_memconfig;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ next->coder->sequence = SEQ_PROPERTIES;
+ next->coder->pos = 0;
+ next->coder->options.dict_size = 0;
+ next->coder->options.preset_dict = NULL;
+ next->coder->options.preset_dict_size = 0;
+ next->coder->uncompressed_size = 0;
+ next->coder->memlimit = memlimit;
+ next->coder->memusage = LZMA_MEMUSAGE_BASE;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_alone_decoder(lzma_stream *strm, uint64_t memlimit)
+{
+ lzma_next_strm_init(lzma_alone_decoder_init, strm, memlimit);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.h
new file mode 100644
index 00000000..e2362a7e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_decoder.h
@@ -0,0 +1,24 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file alone_decoder.h
+/// \brief Decoder for LZMA_Alone files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_ALONE_DECODER_H
+#define LZMA_ALONE_DECODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_alone_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, uint64_t memlimit);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_encoder.c
new file mode 100644
index 00000000..8536bdd2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/alone_encoder.c
@@ -0,0 +1,159 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file alone_decoder.c
+/// \brief Decoder for LZMA_Alone files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+#include "lzma_encoder.h"
+
+
+#define ALONE_HEADER_SIZE (1 + 4 + 8)
+
+
+struct lzma_coder_s {
+ lzma_next_coder next;
+
+ enum {
+ SEQ_HEADER,
+ SEQ_CODE,
+ } sequence;
+
+ size_t header_pos;
+ uint8_t header[ALONE_HEADER_SIZE];
+};
+
+
+static lzma_ret
+alone_encode(lzma_coder *coder,
+ lzma_allocator *allocator lzma_attribute((unused)),
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ lzma_action action)
+{
+ while (*out_pos < out_size)
+ switch (coder->sequence) {
+ case SEQ_HEADER:
+ lzma_bufcpy(coder->header, &coder->header_pos,
+ ALONE_HEADER_SIZE,
+ out, out_pos, out_size);
+ if (coder->header_pos < ALONE_HEADER_SIZE)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_CODE;
+ break;
+
+ case SEQ_CODE:
+ return coder->next.code(coder->next.coder,
+ allocator, in, in_pos, in_size,
+ out, out_pos, out_size, action);
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+alone_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+// At least for now, this is not used by any internal function.
+static lzma_ret
+alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_options_lzma *options)
+{
+ lzma_next_coder_init(&alone_encoder_init, next, allocator);
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &alone_encode;
+ next->end = &alone_encoder_end;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ // Basic initializations
+ next->coder->sequence = SEQ_HEADER;
+ next->coder->header_pos = 0;
+
+ // Encode the header:
+ // - Properties (1 byte)
+ if (lzma_lzma_lclppb_encode(options, next->coder->header))
+ return LZMA_OPTIONS_ERROR;
+
+ // - Dictionary size (4 bytes)
+ if (options->dict_size < LZMA_DICT_SIZE_MIN)
+ return LZMA_OPTIONS_ERROR;
+
+ // Round up to to the next 2^n or 2^n + 2^(n - 1) depending on which
+ // one is the next unless it is UINT32_MAX. While the header would
+ // allow any 32-bit integer, we do this to keep the decoder of liblzma
+ // accepting the resulting files.
+ uint32_t d = options->dict_size - 1;
+ d |= d >> 2;
+ d |= d >> 3;
+ d |= d >> 4;
+ d |= d >> 8;
+ d |= d >> 16;
+ if (d != UINT32_MAX)
+ ++d;
+
+ integer_write_32(next->coder->header + 1, d);
+
+ // - Uncompressed size (always unknown and using EOPM)
+ memset(next->coder->header + 1 + 4, 0xFF, 8);
+
+ // Initialize the LZMA encoder.
+ const lzma_filter_info filters[2] = {
+ {
+ .init = &lzma_lzma_encoder_init,
+ .options = (void *)(options),
+ }, {
+ .init = NULL,
+ }
+ };
+
+ return lzma_next_filter_init(&next->coder->next, allocator, filters);
+}
+
+
+/*
+extern lzma_ret
+lzma_alone_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_options_alone *options)
+{
+ lzma_next_coder_init(&alone_encoder_init, next, allocator, options);
+}
+*/
+
+
+extern LZMA_API(lzma_ret)
+lzma_alone_encoder(lzma_stream *strm, const lzma_options_lzma *options)
+{
+ lzma_next_strm_init(alone_encoder_init, strm, options);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/auto_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/auto_decoder.c
new file mode 100644
index 00000000..c9d85a0f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/auto_decoder.c
@@ -0,0 +1,188 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file auto_decoder.c
+/// \brief Autodetect between .xz Stream and .lzma (LZMA_Alone) formats
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_decoder.h"
+#include "alone_decoder.h"
+
+
+struct lzma_coder_s {
+ /// Stream decoder or LZMA_Alone decoder
+ lzma_next_coder next;
+
+ uint64_t memlimit;
+ uint32_t flags;
+
+ enum {
+ SEQ_INIT,
+ SEQ_CODE,
+ SEQ_FINISH,
+ } sequence;
+};
+
+
+static lzma_ret
+auto_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ switch (coder->sequence) {
+ case SEQ_INIT:
+ if (*in_pos >= in_size)
+ return LZMA_OK;
+
+ // Update the sequence now, because we want to continue from
+ // SEQ_CODE even if we return some LZMA_*_CHECK.
+ coder->sequence = SEQ_CODE;
+
+ // Detect the file format. For now this is simple, since if
+ // it doesn't start with 0xFD (the first magic byte of the
+ // new format), it has to be LZMA_Alone, or something that
+ // we don't support at all.
+ if (in[*in_pos] == 0xFD) {
+ return_if_error(lzma_stream_decoder_init(
+ &coder->next, allocator,
+ coder->memlimit, coder->flags));
+ } else {
+ return_if_error(lzma_alone_decoder_init(&coder->next,
+ allocator, coder->memlimit));
+
+ // If the application wants to know about missing
+ // integrity check or about the check in general, we
+ // need to handle it here, because LZMA_Alone decoder
+ // doesn't accept any flags.
+ if (coder->flags & LZMA_TELL_NO_CHECK)
+ return LZMA_NO_CHECK;
+
+ if (coder->flags & LZMA_TELL_ANY_CHECK)
+ return LZMA_GET_CHECK;
+ }
+
+ // Fall through
+
+ case SEQ_CODE: {
+ const lzma_ret ret = coder->next.code(
+ coder->next.coder, allocator,
+ in, in_pos, in_size,
+ out, out_pos, out_size, action);
+ if (ret != LZMA_STREAM_END
+ || (coder->flags & LZMA_CONCATENATED) == 0)
+ return ret;
+
+ coder->sequence = SEQ_FINISH;
+ }
+
+ // Fall through
+
+ case SEQ_FINISH:
+ // When LZMA_DECODE_CONCATENATED was used and we were decoding
+ // LZMA_Alone file, we need to check check that there is no
+ // trailing garbage and wait for LZMA_FINISH.
+ if (*in_pos < in_size)
+ return LZMA_DATA_ERROR;
+
+ return action == LZMA_FINISH ? LZMA_STREAM_END : LZMA_OK;
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+}
+
+
+static void
+auto_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static lzma_check
+auto_decoder_get_check(const lzma_coder *coder)
+{
+ // It is LZMA_Alone if get_check is NULL.
+ return coder->next.get_check == NULL ? LZMA_CHECK_NONE
+ : coder->next.get_check(coder->next.coder);
+}
+
+
+static lzma_ret
+auto_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
+ uint64_t *old_memlimit, uint64_t new_memlimit)
+{
+ lzma_ret ret;
+
+ if (coder->next.memconfig != NULL) {
+ ret = coder->next.memconfig(coder->next.coder,
+ memusage, old_memlimit, new_memlimit);
+ assert(*old_memlimit == coder->memlimit);
+ } else {
+ // No coder is configured yet. Use the base value as
+ // the current memory usage.
+ *memusage = LZMA_MEMUSAGE_BASE;
+ *old_memlimit = coder->memlimit;
+ ret = LZMA_OK;
+ }
+
+ if (ret == LZMA_OK && new_memlimit != 0)
+ coder->memlimit = new_memlimit;
+
+ return ret;
+}
+
+
+static lzma_ret
+auto_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ uint64_t memlimit, uint32_t flags)
+{
+ lzma_next_coder_init(&auto_decoder_init, next, allocator);
+
+ if (memlimit == 0)
+ return LZMA_PROG_ERROR;
+
+ if (flags & ~LZMA_SUPPORTED_FLAGS)
+ return LZMA_OPTIONS_ERROR;
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &auto_decode;
+ next->end = &auto_decoder_end;
+ next->get_check = &auto_decoder_get_check;
+ next->memconfig = &auto_decoder_memconfig;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ next->coder->memlimit = memlimit;
+ next->coder->flags = flags;
+ next->coder->sequence = SEQ_INIT;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_auto_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
+{
+ lzma_next_strm_init(auto_decoder_init, strm, memlimit, flags);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_decoder.c
new file mode 100644
index 00000000..75ecc804
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_decoder.c
@@ -0,0 +1,82 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_buffer_decoder.c
+/// \brief Single-call .xz Block decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "block_decoder.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_buffer_decode(lzma_block *block, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ if (in_pos == NULL || (in == NULL && *in_pos != in_size)
+ || *in_pos > in_size || out_pos == NULL
+ || (out == NULL && *out_pos != out_size)
+ || *out_pos > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Initialize the Block decoder.
+ lzma_next_coder block_decoder = LZMA_NEXT_CODER_INIT;
+ lzma_ret ret = lzma_block_decoder_init(
+ &block_decoder, allocator, block);
+
+ if (ret == LZMA_OK) {
+ // Save the positions so that we can restore them in case
+ // an error occurs.
+ const size_t in_start = *in_pos;
+ const size_t out_start = *out_pos;
+
+ // Do the actual decoding.
+ ret = block_decoder.code(block_decoder.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size,
+ LZMA_FINISH);
+
+ if (ret == LZMA_STREAM_END) {
+ ret = LZMA_OK;
+ } else {
+ if (ret == LZMA_OK) {
+ // Either the input was truncated or the
+ // output buffer was too small.
+ assert(*in_pos == in_size
+ || *out_pos == out_size);
+
+ // If all the input was consumed, then the
+ // input is truncated, even if the output
+ // buffer is also full. This is because
+ // processing the last byte of the Block
+ // never produces output.
+ //
+ // NOTE: This assumption may break when new
+ // filters are added, if the end marker of
+ // the filter doesn't consume at least one
+ // complete byte.
+ if (*in_pos == in_size)
+ ret = LZMA_DATA_ERROR;
+ else
+ ret = LZMA_BUF_ERROR;
+ }
+
+ // Restore the positions.
+ *in_pos = in_start;
+ *out_pos = out_start;
+ }
+ }
+
+ // Free the decoder memory. This needs to be done even if
+ // initialization fails, because the internal API doesn't
+ // require the initialization function to free its memory on error.
+ lzma_next_end(&block_decoder, allocator);
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_encoder.c
new file mode 100644
index 00000000..6163a102
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_buffer_encoder.c
@@ -0,0 +1,301 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_buffer_encoder.c
+/// \brief Single-call .xz Block encoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "block_encoder.h"
+#include "filter_encoder.h"
+#include "lzma2_encoder.h"
+#include "check.h"
+
+
+/// Estimate the maximum size of the Block Header and Check fields for
+/// a Block that uses LZMA2 uncompressed chunks. We could use
+/// lzma_block_header_size() but this is simpler.
+///
+/// Block Header Size + Block Flags + Compressed Size
+/// + Uncompressed Size + Filter Flags for LZMA2 + CRC32 + Check
+/// and round up to the next multiple of four to take Header Padding
+/// into account.
+#define HEADERS_BOUND ((1 + 1 + 2 * LZMA_VLI_BYTES_MAX + 3 + 4 \
+ + LZMA_CHECK_SIZE_MAX + 3) & ~3)
+
+
+static lzma_vli
+lzma2_bound(lzma_vli uncompressed_size)
+{
+ // Prevent integer overflow in overhead calculation.
+ if (uncompressed_size > COMPRESSED_SIZE_MAX)
+ return 0;
+
+ // Calculate the exact overhead of the LZMA2 headers: Round
+ // uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX,
+ // multiply by the size of per-chunk header, and add one byte for
+ // the end marker.
+ const lzma_vli overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)
+ / LZMA2_CHUNK_MAX)
+ * LZMA2_HEADER_UNCOMPRESSED + 1;
+
+ // Catch the possible integer overflow.
+ if (COMPRESSED_SIZE_MAX - overhead < uncompressed_size)
+ return 0;
+
+ return uncompressed_size + overhead;
+}
+
+
+extern LZMA_API(size_t)
+lzma_block_buffer_bound(size_t uncompressed_size)
+{
+ // For now, if the data doesn't compress, we always use uncompressed
+ // chunks of LZMA2. In future we may use Subblock filter too, but
+ // but for simplicity we probably will still use the same bound
+ // calculation even though Subblock filter would have slightly less
+ // overhead.
+ lzma_vli lzma2_size = lzma2_bound(uncompressed_size);
+ if (lzma2_size == 0)
+ return 0;
+
+ // Take Block Padding into account.
+ lzma2_size = (lzma2_size + 3) & ~LZMA_VLI_C(3);
+
+#if SIZE_MAX < LZMA_VLI_MAX
+ // Catch the possible integer overflow on 32-bit systems. There's no
+ // overflow on 64-bit systems, because lzma2_bound() already takes
+ // into account the size of the headers in the Block.
+ if (SIZE_MAX - HEADERS_BOUND < lzma2_size)
+ return 0;
+#endif
+
+ return HEADERS_BOUND + lzma2_size;
+}
+
+
+static lzma_ret
+block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // TODO: Figure out if the last filter is LZMA2 or Subblock and use
+ // that filter to encode the uncompressed chunks.
+
+ // Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at
+ // all, but LZMA2 always requires a dictionary, so use the minimum
+ // value to minimize memory usage of the decoder.
+ lzma_options_lzma lzma2 = {
+ .dict_size = LZMA_DICT_SIZE_MIN,
+ };
+
+ lzma_filter filters[2];
+ filters[0].id = LZMA_FILTER_LZMA2;
+ filters[0].options = &lzma2;
+ filters[1].id = LZMA_VLI_UNKNOWN;
+
+ // Set the above filter options to *block temporarily so that we can
+ // encode the Block Header.
+ lzma_filter *filters_orig = block->filters;
+ block->filters = filters;
+
+ if (lzma_block_header_size(block) != LZMA_OK) {
+ block->filters = filters_orig;
+ return LZMA_PROG_ERROR;
+ }
+
+ // Check that there's enough output space. The caller has already
+ // set block->compressed_size to what lzma2_bound() has returned,
+ // so we can reuse that value. We know that compressed_size is a
+ // known valid VLI and header_size is a small value so their sum
+ // will never overflow.
+ assert(block->compressed_size == lzma2_bound(in_size));
+ if (out_size - *out_pos
+ < block->header_size + block->compressed_size) {
+ block->filters = filters_orig;
+ return LZMA_BUF_ERROR;
+ }
+
+ if (lzma_block_header_encode(block, out + *out_pos) != LZMA_OK) {
+ block->filters = filters_orig;
+ return LZMA_PROG_ERROR;
+ }
+
+ block->filters = filters_orig;
+ *out_pos += block->header_size;
+
+ // Encode the data using LZMA2 uncompressed chunks.
+ size_t in_pos = 0;
+ uint8_t control = 0x01; // Dictionary reset
+
+ while (in_pos < in_size) {
+ // Control byte: Indicate uncompressed chunk, of which
+ // the first resets the dictionary.
+ out[(*out_pos)++] = control;
+ control = 0x02; // No dictionary reset
+
+ // Size of the uncompressed chunk
+ const size_t copy_size
+ = MIN(in_size - in_pos, LZMA2_CHUNK_MAX);
+ out[(*out_pos)++] = (copy_size - 1) >> 8;
+ out[(*out_pos)++] = (copy_size - 1) & 0xFF;
+
+ // The actual data
+ assert(*out_pos + copy_size <= out_size);
+ memcpy(out + *out_pos, in + in_pos, copy_size);
+
+ in_pos += copy_size;
+ *out_pos += copy_size;
+ }
+
+ // End marker
+ out[(*out_pos)++] = 0x00;
+ assert(*out_pos <= out_size);
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+block_encode_normal(lzma_block *block, lzma_allocator *allocator,
+ const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // Find out the size of the Block Header.
+ block->compressed_size = lzma2_bound(in_size);
+ if (block->compressed_size == 0)
+ return LZMA_DATA_ERROR;
+
+ block->uncompressed_size = in_size;
+ return_if_error(lzma_block_header_size(block));
+
+ // Reserve space for the Block Header and skip it for now.
+ if (out_size - *out_pos <= block->header_size)
+ return LZMA_BUF_ERROR;
+
+ const size_t out_start = *out_pos;
+ *out_pos += block->header_size;
+
+ // Limit out_size so that we stop encoding if the output would grow
+ // bigger than what uncompressed Block would be.
+ if (out_size - *out_pos > block->compressed_size)
+ out_size = *out_pos + block->compressed_size;
+
+ // TODO: In many common cases this could be optimized to use
+ // significantly less memory.
+ lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;
+ lzma_ret ret = lzma_raw_encoder_init(
+ &raw_encoder, allocator, block->filters);
+
+ if (ret == LZMA_OK) {
+ size_t in_pos = 0;
+ ret = raw_encoder.code(raw_encoder.coder, allocator,
+ in, &in_pos, in_size, out, out_pos, out_size,
+ LZMA_FINISH);
+ }
+
+ // NOTE: This needs to be run even if lzma_raw_encoder_init() failed.
+ lzma_next_end(&raw_encoder, allocator);
+
+ if (ret == LZMA_STREAM_END) {
+ // Compression was successful. Write the Block Header.
+ block->compressed_size
+ = *out_pos - (out_start + block->header_size);
+ ret = lzma_block_header_encode(block, out + out_start);
+ if (ret != LZMA_OK)
+ ret = LZMA_PROG_ERROR;
+
+ } else if (ret == LZMA_OK) {
+ // Output buffer became full.
+ ret = LZMA_BUF_ERROR;
+ }
+
+ // Reset *out_pos if something went wrong.
+ if (ret != LZMA_OK)
+ *out_pos = out_start;
+
+ return ret;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator,
+ const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // Sanity checks
+ if (block == NULL || block->filters == NULL
+ || (in == NULL && in_size != 0) || out == NULL
+ || out_pos == NULL || *out_pos > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Check the version field.
+ if (block->version != 0)
+ return LZMA_OPTIONS_ERROR;
+
+ // Size of a Block has to be a multiple of four, so limit the size
+ // here already. This way we don't need to check it again when adding
+ // Block Padding.
+ out_size -= (out_size - *out_pos) & 3;
+
+ // Get the size of the Check field.
+ const size_t check_size = lzma_check_size(block->check);
+ if (check_size == UINT32_MAX)
+ return LZMA_PROG_ERROR;
+
+ // Reserve space for the Check field.
+ if (out_size - *out_pos <= check_size)
+ return LZMA_BUF_ERROR;
+
+ out_size -= check_size;
+
+ // Do the actual compression.
+ const lzma_ret ret = block_encode_normal(block, allocator,
+ in, in_size, out, out_pos, out_size);
+ if (ret != LZMA_OK) {
+ // If the error was something else than output buffer
+ // becoming full, return the error now.
+ if (ret != LZMA_BUF_ERROR)
+ return ret;
+
+ // The data was uncompressible (at least with the options
+ // given to us) or the output buffer was too small. Use the
+ // uncompressed chunks of LZMA2 to wrap the data into a valid
+ // Block. If we haven't been given enough output space, even
+ // this may fail.
+ return_if_error(block_encode_uncompressed(block, in, in_size,
+ out, out_pos, out_size));
+ }
+
+ assert(*out_pos <= out_size);
+
+ // Block Padding. No buffer overflow here, because we already adjusted
+ // out_size so that (out_size - out_start) is a multiple of four.
+ // Thus, if the buffer is full, the loop body can never run.
+ for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) {
+ assert(*out_pos < out_size);
+ out[(*out_pos)++] = 0x00;
+ }
+
+ // If there's no Check field, we are done now.
+ if (check_size > 0) {
+ // Calculate the integrity check. We reserved space for
+ // the Check field earlier so we don't need to check for
+ // available output space here.
+ lzma_check_state check;
+ lzma_check_init(&check, block->check);
+ lzma_check_update(&check, block->check, in, in_size);
+ lzma_check_finish(&check, block->check);
+
+ memcpy(block->raw_check, check.buffer.u8, check_size);
+ memcpy(out + *out_pos, check.buffer.u8, check_size);
+ *out_pos += check_size;
+ }
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.c
new file mode 100644
index 00000000..f5a5792d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.c
@@ -0,0 +1,244 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_decoder.c
+/// \brief Decodes .xz Blocks
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "block_decoder.h"
+#include "filter_decoder.h"
+#include "check.h"
+
+
+struct lzma_coder_s {
+ enum {
+ SEQ_CODE,
+ SEQ_PADDING,
+ SEQ_CHECK,
+ } sequence;
+
+ /// The filters in the chain; initialized with lzma_raw_decoder_init().
+ lzma_next_coder next;
+
+ /// Decoding options; we also write Compressed Size and Uncompressed
+ /// Size back to this structure when the decoding has been finished.
+ lzma_block *block;
+
+ /// Compressed Size calculated while decoding
+ lzma_vli compressed_size;
+
+ /// Uncompressed Size calculated while decoding
+ lzma_vli uncompressed_size;
+
+ /// Maximum allowed Compressed Size; this takes into account the
+ /// size of the Block Header and Check fields when Compressed Size
+ /// is unknown.
+ lzma_vli compressed_limit;
+
+ /// Position when reading the Check field
+ size_t check_pos;
+
+ /// Check of the uncompressed data
+ lzma_check_state check;
+};
+
+
+static inline bool
+update_size(lzma_vli *size, lzma_vli add, lzma_vli limit)
+{
+ if (limit > LZMA_VLI_MAX)
+ limit = LZMA_VLI_MAX;
+
+ if (limit < *size || limit - *size < add)
+ return true;
+
+ *size += add;
+
+ return false;
+}
+
+
+static inline bool
+is_size_valid(lzma_vli size, lzma_vli reference)
+{
+ return reference == LZMA_VLI_UNKNOWN || reference == size;
+}
+
+
+static lzma_ret
+block_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ switch (coder->sequence) {
+ case SEQ_CODE: {
+ const size_t in_start = *in_pos;
+ const size_t out_start = *out_pos;
+
+ const lzma_ret ret = coder->next.code(coder->next.coder,
+ allocator, in, in_pos, in_size,
+ out, out_pos, out_size, action);
+
+ const size_t in_used = *in_pos - in_start;
+ const size_t out_used = *out_pos - out_start;
+
+ // NOTE: We compare to compressed_limit here, which prevents
+ // the total size of the Block growing past LZMA_VLI_MAX.
+ if (update_size(&coder->compressed_size, in_used,
+ coder->compressed_limit)
+ || update_size(&coder->uncompressed_size,
+ out_used,
+ coder->block->uncompressed_size))
+ return LZMA_DATA_ERROR;
+
+ lzma_check_update(&coder->check, coder->block->check,
+ out + out_start, out_used);
+
+ if (ret != LZMA_STREAM_END)
+ return ret;
+
+ // Compressed and Uncompressed Sizes are now at their final
+ // values. Verify that they match the values given to us.
+ if (!is_size_valid(coder->compressed_size,
+ coder->block->compressed_size)
+ || !is_size_valid(coder->uncompressed_size,
+ coder->block->uncompressed_size))
+ return LZMA_DATA_ERROR;
+
+ // Copy the values into coder->block. The caller
+ // may use this information to construct Index.
+ coder->block->compressed_size = coder->compressed_size;
+ coder->block->uncompressed_size = coder->uncompressed_size;
+
+ coder->sequence = SEQ_PADDING;
+ }
+
+ // Fall through
+
+ case SEQ_PADDING:
+ // Compressed Data is padded to a multiple of four bytes.
+ while (coder->compressed_size & 3) {
+ if (*in_pos >= in_size)
+ return LZMA_OK;
+
+ // We use compressed_size here just get the Padding
+ // right. The actual Compressed Size was stored to
+ // coder->block already, and won't be modified by
+ // us anymore.
+ ++coder->compressed_size;
+
+ if (in[(*in_pos)++] != 0x00)
+ return LZMA_DATA_ERROR;
+ }
+
+ if (coder->block->check == LZMA_CHECK_NONE)
+ return LZMA_STREAM_END;
+
+ lzma_check_finish(&coder->check, coder->block->check);
+ coder->sequence = SEQ_CHECK;
+
+ // Fall through
+
+ case SEQ_CHECK: {
+ const size_t check_size = lzma_check_size(coder->block->check);
+ lzma_bufcpy(in, in_pos, in_size, coder->block->raw_check,
+ &coder->check_pos, check_size);
+ if (coder->check_pos < check_size)
+ return LZMA_OK;
+
+ // Validate the Check only if we support it.
+ // coder->check.buffer may be uninitialized
+ // when the Check ID is not supported.
+ if (lzma_check_is_supported(coder->block->check)
+ && memcmp(coder->block->raw_check,
+ coder->check.buffer.u8,
+ check_size) != 0)
+ return LZMA_DATA_ERROR;
+
+ return LZMA_STREAM_END;
+ }
+ }
+
+ return LZMA_PROG_ERROR;
+}
+
+
+static void
+block_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_block_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ lzma_block *block)
+{
+ lzma_next_coder_init(&lzma_block_decoder_init, next, allocator);
+
+ // Validate the options. lzma_block_unpadded_size() does that for us
+ // except for Uncompressed Size and filters. Filters are validated
+ // by the raw decoder.
+ if (lzma_block_unpadded_size(block) == 0
+ || !lzma_vli_is_valid(block->uncompressed_size))
+ return LZMA_PROG_ERROR;
+
+ // Allocate and initialize *next->coder if needed.
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &block_decode;
+ next->end = &block_decoder_end;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ // Basic initializations
+ next->coder->sequence = SEQ_CODE;
+ next->coder->block = block;
+ next->coder->compressed_size = 0;
+ next->coder->uncompressed_size = 0;
+
+ // If Compressed Size is not known, we calculate the maximum allowed
+ // value so that encoded size of the Block (including Block Padding)
+ // is still a valid VLI and a multiple of four.
+ next->coder->compressed_limit
+ = block->compressed_size == LZMA_VLI_UNKNOWN
+ ? (LZMA_VLI_MAX & ~LZMA_VLI_C(3))
+ - block->header_size
+ - lzma_check_size(block->check)
+ : block->compressed_size;
+
+ // Initialize the check. It's caller's problem if the Check ID is not
+ // supported, and the Block decoder cannot verify the Check field.
+ // Caller can test lzma_check_is_supported(block->check).
+ next->coder->check_pos = 0;
+ lzma_check_init(&next->coder->check, block->check);
+
+ // Initialize the filter chain.
+ return lzma_raw_decoder_init(&next->coder->next, allocator,
+ block->filters);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_decoder(lzma_stream *strm, lzma_block *block)
+{
+ lzma_next_strm_init(lzma_block_decoder_init, strm, block);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.h
new file mode 100644
index 00000000..f2a14042
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_decoder.h
@@ -0,0 +1,24 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_decoder.h
+/// \brief Decodes .xz Blocks
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_BLOCK_DECODER_H
+#define LZMA_BLOCK_DECODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_block_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, lzma_block *block);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.c
new file mode 100644
index 00000000..a6ee62b0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.c
@@ -0,0 +1,200 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_encoder.c
+/// \brief Encodes .xz Blocks
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "block_encoder.h"
+#include "filter_encoder.h"
+#include "check.h"
+
+
+struct lzma_coder_s {
+ /// The filters in the chain; initialized with lzma_raw_decoder_init().
+ lzma_next_coder next;
+
+ /// Encoding options; we also write Unpadded Size, Compressed Size,
+ /// and Uncompressed Size back to this structure when the encoding
+ /// has been finished.
+ lzma_block *block;
+
+ enum {
+ SEQ_CODE,
+ SEQ_PADDING,
+ SEQ_CHECK,
+ } sequence;
+
+ /// Compressed Size calculated while encoding
+ lzma_vli compressed_size;
+
+ /// Uncompressed Size calculated while encoding
+ lzma_vli uncompressed_size;
+
+ /// Position in the Check field
+ size_t pos;
+
+ /// Check of the uncompressed data
+ lzma_check_state check;
+};
+
+
+static lzma_ret
+block_encode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ // Check that our amount of input stays in proper limits.
+ if (LZMA_VLI_MAX - coder->uncompressed_size < in_size - *in_pos)
+ return LZMA_DATA_ERROR;
+
+ switch (coder->sequence) {
+ case SEQ_CODE: {
+ const size_t in_start = *in_pos;
+ const size_t out_start = *out_pos;
+
+ const lzma_ret ret = coder->next.code(coder->next.coder,
+ allocator, in, in_pos, in_size,
+ out, out_pos, out_size, action);
+
+ const size_t in_used = *in_pos - in_start;
+ const size_t out_used = *out_pos - out_start;
+
+ if (COMPRESSED_SIZE_MAX - coder->compressed_size < out_used)
+ return LZMA_DATA_ERROR;
+
+ coder->compressed_size += out_used;
+
+ // No need to check for overflow because we have already
+ // checked it at the beginning of this function.
+ coder->uncompressed_size += in_used;
+
+ lzma_check_update(&coder->check, coder->block->check,
+ in + in_start, in_used);
+
+ if (ret != LZMA_STREAM_END || action == LZMA_SYNC_FLUSH)
+ return ret;
+
+ assert(*in_pos == in_size);
+ assert(action == LZMA_FINISH);
+
+ // Copy the values into coder->block. The caller
+ // may use this information to construct Index.
+ coder->block->compressed_size = coder->compressed_size;
+ coder->block->uncompressed_size = coder->uncompressed_size;
+
+ coder->sequence = SEQ_PADDING;
+ }
+
+ // Fall through
+
+ case SEQ_PADDING:
+ // Pad Compressed Data to a multiple of four bytes. We can
+ // use coder->compressed_size for this since we don't need
+ // it for anything else anymore.
+ while (coder->compressed_size & 3) {
+ if (*out_pos >= out_size)
+ return LZMA_OK;
+
+ out[*out_pos] = 0x00;
+ ++*out_pos;
+ ++coder->compressed_size;
+ }
+
+ if (coder->block->check == LZMA_CHECK_NONE)
+ return LZMA_STREAM_END;
+
+ lzma_check_finish(&coder->check, coder->block->check);
+
+ coder->sequence = SEQ_CHECK;
+
+ // Fall through
+
+ case SEQ_CHECK: {
+ const size_t check_size = lzma_check_size(coder->block->check);
+ lzma_bufcpy(coder->check.buffer.u8, &coder->pos, check_size,
+ out, out_pos, out_size);
+ if (coder->pos < check_size)
+ return LZMA_OK;
+
+ memcpy(coder->block->raw_check, coder->check.buffer.u8,
+ check_size);
+ return LZMA_STREAM_END;
+ }
+ }
+
+ return LZMA_PROG_ERROR;
+}
+
+
+static void
+block_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_block_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ lzma_block *block)
+{
+ lzma_next_coder_init(&lzma_block_encoder_init, next, allocator);
+
+ if (block->version != 0)
+ return LZMA_OPTIONS_ERROR;
+
+ // If the Check ID is not supported, we cannot calculate the check and
+ // thus not create a proper Block.
+ if ((unsigned int)(block->check) > LZMA_CHECK_ID_MAX)
+ return LZMA_PROG_ERROR;
+
+ if (!lzma_check_is_supported(block->check))
+ return LZMA_UNSUPPORTED_CHECK;
+
+ // Allocate and initialize *next->coder if needed.
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &block_encode;
+ next->end = &block_encoder_end;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ // Basic initializations
+ next->coder->sequence = SEQ_CODE;
+ next->coder->block = block;
+ next->coder->compressed_size = 0;
+ next->coder->uncompressed_size = 0;
+ next->coder->pos = 0;
+
+ // Initialize the check
+ lzma_check_init(&next->coder->check, block->check);
+
+ // Initialize the requested filters.
+ return lzma_raw_encoder_init(&next->coder->next, allocator,
+ block->filters);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_encoder(lzma_stream *strm, lzma_block *block)
+{
+ lzma_next_strm_init(lzma_block_encoder_init, strm, block);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.h
new file mode 100644
index 00000000..36cd0fe7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_encoder.h
@@ -0,0 +1,49 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_encoder.h
+/// \brief Encodes .xz Blocks
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_BLOCK_ENCODER_H
+#define LZMA_BLOCK_ENCODER_H
+
+#include "common.h"
+
+
+/// \brief Biggest Compressed Size value that the Block encoder supports
+///
+/// The maximum size of a single Block is limited by the maximum size of
+/// a Stream, which in theory is 2^63 - 3 bytes (i.e. LZMA_VLI_MAX - 3).
+/// While the size is really big and no one should hit it in practice, we
+/// take it into account in some places anyway to catch some errors e.g. if
+/// application passes insanely big value to some function.
+///
+/// We could take into account the headers etc. to determine the exact
+/// maximum size of the Compressed Data field, but the complexity would give
+/// us nothing useful. Instead, limit the size of Compressed Data so that
+/// even with biggest possible Block Header and Check fields the total
+/// encoded size of the Block stays as a valid VLI. This doesn't guarantee
+/// that the size of the Stream doesn't grow too big, but that problem is
+/// taken care outside the Block handling code.
+///
+/// ~LZMA_VLI_C(3) is to guarantee that if we need padding at the end of
+/// the Compressed Data field, it will still stay in the proper limit.
+///
+/// This constant is in this file because it is needed in both
+/// block_encoder.c and block_buffer_encoder.c.
+#define COMPRESSED_SIZE_MAX ((LZMA_VLI_MAX - LZMA_BLOCK_HEADER_SIZE_MAX \
+ - LZMA_CHECK_SIZE_MAX) & ~LZMA_VLI_C(3))
+
+
+extern lzma_ret lzma_block_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, lzma_block *block);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_decoder.c
new file mode 100644
index 00000000..6b0829cf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_decoder.c
@@ -0,0 +1,118 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_header_decoder.c
+/// \brief Decodes Block Header from .xz files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+#include "check.h"
+
+
+static void
+free_properties(lzma_block *block, lzma_allocator *allocator)
+{
+ // Free allocated filter options. The last array member is not
+ // touched after the initialization in the beginning of
+ // lzma_block_header_decode(), so we don't need to touch that here.
+ for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i) {
+ lzma_free(block->filters[i].options, allocator);
+ block->filters[i].id = LZMA_VLI_UNKNOWN;
+ block->filters[i].options = NULL;
+ }
+
+ return;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_header_decode(lzma_block *block,
+ lzma_allocator *allocator, const uint8_t *in)
+{
+ // NOTE: We consider the header to be corrupt not only when the
+ // CRC32 doesn't match, but also when variable-length integers
+ // are invalid or over 63 bits, or if the header is too small
+ // to contain the claimed information.
+
+ // Initialize the filter options array. This way the caller can
+ // safely free() the options even if an error occurs in this function.
+ for (size_t i = 0; i <= LZMA_FILTERS_MAX; ++i) {
+ block->filters[i].id = LZMA_VLI_UNKNOWN;
+ block->filters[i].options = NULL;
+ }
+
+ // Always zero for now.
+ block->version = 0;
+
+ // Validate Block Header Size and Check type. The caller must have
+ // already set these, so it is a programming error if this test fails.
+ if (lzma_block_header_size_decode(in[0]) != block->header_size
+ || (unsigned int)(block->check) > LZMA_CHECK_ID_MAX)
+ return LZMA_PROG_ERROR;
+
+ // Exclude the CRC32 field.
+ const size_t in_size = block->header_size - 4;
+
+ // Verify CRC32
+ if (lzma_crc32(in, in_size, 0) != integer_read_32(in + in_size))
+ return LZMA_DATA_ERROR;
+
+ // Check for unsupported flags.
+ if (in[1] & 0x3C)
+ return LZMA_OPTIONS_ERROR;
+
+ // Start after the Block Header Size and Block Flags fields.
+ size_t in_pos = 2;
+
+ // Compressed Size
+ if (in[1] & 0x40) {
+ return_if_error(lzma_vli_decode(&block->compressed_size,
+ NULL, in, &in_pos, in_size));
+
+ // Validate Compressed Size. This checks that it isn't zero
+ // and that the total size of the Block is a valid VLI.
+ if (lzma_block_unpadded_size(block) == 0)
+ return LZMA_DATA_ERROR;
+ } else {
+ block->compressed_size = LZMA_VLI_UNKNOWN;
+ }
+
+ // Uncompressed Size
+ if (in[1] & 0x80)
+ return_if_error(lzma_vli_decode(&block->uncompressed_size,
+ NULL, in, &in_pos, in_size));
+ else
+ block->uncompressed_size = LZMA_VLI_UNKNOWN;
+
+ // Filter Flags
+ const size_t filter_count = (in[1] & 3) + 1;
+ for (size_t i = 0; i < filter_count; ++i) {
+ const lzma_ret ret = lzma_filter_flags_decode(
+ &block->filters[i], allocator,
+ in, &in_pos, in_size);
+ if (ret != LZMA_OK) {
+ free_properties(block, allocator);
+ return ret;
+ }
+ }
+
+ // Padding
+ while (in_pos < in_size) {
+ if (in[in_pos++] != 0x00) {
+ free_properties(block, allocator);
+
+ // Possibly some new field present so use
+ // LZMA_OPTIONS_ERROR instead of LZMA_DATA_ERROR.
+ return LZMA_OPTIONS_ERROR;
+ }
+ }
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_encoder.c
new file mode 100644
index 00000000..b427bfa4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_header_encoder.c
@@ -0,0 +1,134 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_header_encoder.c
+/// \brief Encodes Block Header for .xz files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+#include "check.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_header_size(lzma_block *block)
+{
+ if (block->version != 0)
+ return LZMA_OPTIONS_ERROR;
+
+ // Block Header Size + Block Flags + CRC32.
+ uint32_t size = 1 + 1 + 4;
+
+ // Compressed Size
+ if (block->compressed_size != LZMA_VLI_UNKNOWN) {
+ const uint32_t add = lzma_vli_size(block->compressed_size);
+ if (add == 0 || block->compressed_size == 0)
+ return LZMA_PROG_ERROR;
+
+ size += add;
+ }
+
+ // Uncompressed Size
+ if (block->uncompressed_size != LZMA_VLI_UNKNOWN) {
+ const uint32_t add = lzma_vli_size(block->uncompressed_size);
+ if (add == 0)
+ return LZMA_PROG_ERROR;
+
+ size += add;
+ }
+
+ // List of Filter Flags
+ if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
+ return LZMA_PROG_ERROR;
+
+ for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
+ // Don't allow too many filters.
+ if (i == LZMA_FILTERS_MAX)
+ return LZMA_PROG_ERROR;
+
+ uint32_t add;
+ return_if_error(lzma_filter_flags_size(&add,
+ block->filters + i));
+
+ size += add;
+ }
+
+ // Pad to a multiple of four bytes.
+ block->header_size = (size + 3) & ~UINT32_C(3);
+
+ // NOTE: We don't verify that the encoded size of the Block stays
+ // within limits. This is because it is possible that we are called
+ // with exaggerated Compressed Size (e.g. LZMA_VLI_MAX) to reserve
+ // space for Block Header, and later called again with lower,
+ // real values.
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_header_encode(const lzma_block *block, uint8_t *out)
+{
+ // Valdidate everything but filters.
+ if (lzma_block_unpadded_size(block) == 0
+ || !lzma_vli_is_valid(block->uncompressed_size))
+ return LZMA_PROG_ERROR;
+
+ // Indicate the size of the buffer _excluding_ the CRC32 field.
+ const size_t out_size = block->header_size - 4;
+
+ // Store the Block Header Size.
+ out[0] = out_size / 4;
+
+ // We write Block Flags in pieces.
+ out[1] = 0x00;
+ size_t out_pos = 2;
+
+ // Compressed Size
+ if (block->compressed_size != LZMA_VLI_UNKNOWN) {
+ return_if_error(lzma_vli_encode(block->compressed_size, NULL,
+ out, &out_pos, out_size));
+
+ out[1] |= 0x40;
+ }
+
+ // Uncompressed Size
+ if (block->uncompressed_size != LZMA_VLI_UNKNOWN) {
+ return_if_error(lzma_vli_encode(block->uncompressed_size, NULL,
+ out, &out_pos, out_size));
+
+ out[1] |= 0x80;
+ }
+
+ // Filter Flags
+ if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
+ return LZMA_PROG_ERROR;
+
+ size_t filter_count = 0;
+ do {
+ // There can be at maximum of four filters.
+ if (filter_count == LZMA_FILTERS_MAX)
+ return LZMA_PROG_ERROR;
+
+ return_if_error(lzma_filter_flags_encode(
+ block->filters + filter_count,
+ out, &out_pos, out_size));
+
+ } while (block->filters[++filter_count].id != LZMA_VLI_UNKNOWN);
+
+ out[1] |= filter_count - 1;
+
+ // Padding
+ memzero(out + out_pos, out_size - out_pos);
+
+ // CRC32
+ integer_write_32(out + out_size, lzma_crc32(out, out_size, 0));
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_util.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_util.c
new file mode 100644
index 00000000..b35e1268
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/block_util.c
@@ -0,0 +1,92 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file block_header.c
+/// \brief Utility functions to handle lzma_block
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+#include "index.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_block_compressed_size(lzma_block *block, lzma_vli total_size)
+{
+ // Validate everything but Uncompressed Size and filters.
+ if (lzma_block_unpadded_size(block) == 0)
+ return LZMA_PROG_ERROR;
+
+ const uint32_t container_size = block->header_size
+ + lzma_check_size(block->check);
+
+ // Validate that Compressed Size will be greater than zero.
+ if (container_size <= total_size)
+ return LZMA_DATA_ERROR;
+
+ // Calculate what Compressed Size is supposed to be.
+ // If Compressed Size was present in Block Header,
+ // compare that the new value matches it.
+ const lzma_vli compressed_size = total_size - container_size;
+ if (block->compressed_size != LZMA_VLI_UNKNOWN
+ && block->compressed_size != compressed_size)
+ return LZMA_DATA_ERROR;
+
+ block->compressed_size = compressed_size;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_block_unpadded_size(const lzma_block *block)
+{
+ // Validate the values that we are interested in i.e. all but
+ // Uncompressed Size and the filters.
+ //
+ // NOTE: This function is used for validation too, so it is
+ // essential that these checks are always done even if
+ // Compressed Size is unknown.
+ if (block == NULL || block->version != 0
+ || block->header_size < LZMA_BLOCK_HEADER_SIZE_MIN
+ || block->header_size > LZMA_BLOCK_HEADER_SIZE_MAX
+ || (block->header_size & 3)
+ || !lzma_vli_is_valid(block->compressed_size)
+ || block->compressed_size == 0
+ || (unsigned int)(block->check) > LZMA_CHECK_ID_MAX)
+ return 0;
+
+ // If Compressed Size is unknown, return that we cannot know
+ // size of the Block either.
+ if (block->compressed_size == LZMA_VLI_UNKNOWN)
+ return LZMA_VLI_UNKNOWN;
+
+ // Calculate Unpadded Size and validate it.
+ const lzma_vli unpadded_size = block->compressed_size
+ + block->header_size
+ + lzma_check_size(block->check);
+
+ assert(unpadded_size >= UNPADDED_SIZE_MIN);
+ if (unpadded_size > UNPADDED_SIZE_MAX)
+ return 0;
+
+ return unpadded_size;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_block_total_size(const lzma_block *block)
+{
+ lzma_vli unpadded_size = lzma_block_unpadded_size(block);
+
+ if (unpadded_size != LZMA_VLI_UNKNOWN)
+ unpadded_size = vli_ceil4(unpadded_size);
+
+ return unpadded_size;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/bsr.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/bsr.h
new file mode 100644
index 00000000..749f659a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/bsr.h
@@ -0,0 +1,62 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file bsr.h
+/// \brief Bit scan reverse
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_BSR_H
+#define LZMA_BSR_H
+
+// NOTE: Both input and output variables for lzma_bsr must be uint32_t.
+
+#if defined(__GNUC__) && (defined (HAVE_ASM_X86) || defined(HAVE_ASM_X86_64))
+# define lzma_bsr(dest, n) \
+ __asm__("bsrl %1, %0" : "=r" (dest) : "rm" (n))
+
+#else
+# define lzma_bsr(dest, n) dest = lzma_bsr_helper(n)
+
+static inline uint32_t
+lzma_bsr_helper(uint32_t n)
+{
+ assert(n != 0);
+
+ uint32_t i = 31;
+
+ if ((n & UINT32_C(0xFFFF0000)) == 0) {
+ n <<= 16;
+ i = 15;
+ }
+
+ if ((n & UINT32_C(0xFF000000)) == 0) {
+ n <<= 8;
+ i -= 8;
+ }
+
+ if ((n & UINT32_C(0xF0000000)) == 0) {
+ n <<= 4;
+ i -= 4;
+ }
+
+ if ((n & UINT32_C(0xC0000000)) == 0) {
+ n <<= 2;
+ i -= 2;
+ }
+
+ if ((n & UINT32_C(0x80000000)) == 0)
+ --i;
+
+ return i;
+}
+
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.c
new file mode 100644
index 00000000..4a427068
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.c
@@ -0,0 +1,357 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file common.h
+/// \brief Common functions needed in many places in liblzma
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+
+/////////////
+// Version //
+/////////////
+
+extern LZMA_API(uint32_t)
+lzma_version_number(void)
+{
+ return LZMA_VERSION;
+}
+
+
+extern LZMA_API(const char *)
+lzma_version_string(void)
+{
+ return LZMA_VERSION_STRING;
+}
+
+
+///////////////////////
+// Memory allocation //
+///////////////////////
+
+extern void * lzma_attribute((malloc))
+lzma_alloc(size_t size, lzma_allocator *allocator)
+{
+ // Some malloc() variants return NULL if called with size == 0.
+ if (size == 0)
+ size = 1;
+
+ void *ptr;
+
+ if (allocator != NULL && allocator->alloc != NULL)
+ ptr = allocator->alloc(allocator->opaque, 1, size);
+ else
+ ptr = malloc(size);
+
+ return ptr;
+}
+
+
+extern void
+lzma_free(void *ptr, lzma_allocator *allocator)
+{
+ if (allocator != NULL && allocator->free != NULL)
+ allocator->free(allocator->opaque, ptr);
+ else
+ free(ptr);
+
+ return;
+}
+
+
+//////////
+// Misc //
+//////////
+
+extern size_t
+lzma_bufcpy(const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size)
+{
+ const size_t in_avail = in_size - *in_pos;
+ const size_t out_avail = out_size - *out_pos;
+ const size_t copy_size = MIN(in_avail, out_avail);
+
+ memcpy(out + *out_pos, in + *in_pos, copy_size);
+
+ *in_pos += copy_size;
+ *out_pos += copy_size;
+
+ return copy_size;
+}
+
+
+extern lzma_ret
+lzma_next_filter_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ lzma_next_coder_init(filters[0].init, next, allocator);
+
+ return filters[0].init == NULL
+ ? LZMA_OK : filters[0].init(next, allocator, filters);
+}
+
+
+extern void
+lzma_next_end(lzma_next_coder *next, lzma_allocator *allocator)
+{
+ if (next->init != (uintptr_t)(NULL)) {
+ // To avoid tiny end functions that simply call
+ // lzma_free(coder, allocator), we allow leaving next->end
+ // NULL and call lzma_free() here.
+ if (next->end != NULL)
+ next->end(next->coder, allocator);
+ else
+ lzma_free(next->coder, allocator);
+
+ // Reset the variables so the we don't accidentally think
+ // that it is an already initialized coder.
+ *next = LZMA_NEXT_CODER_INIT;
+ }
+
+ return;
+}
+
+
+//////////////////////////////////////
+// External to internal API wrapper //
+//////////////////////////////////////
+
+extern lzma_ret
+lzma_strm_init(lzma_stream *strm)
+{
+ if (strm == NULL)
+ return LZMA_PROG_ERROR;
+
+ if (strm->internal == NULL) {
+ strm->internal = lzma_alloc(sizeof(lzma_internal),
+ strm->allocator);
+ if (strm->internal == NULL)
+ return LZMA_MEM_ERROR;
+
+ strm->internal->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ strm->internal->supported_actions[LZMA_RUN] = false;
+ strm->internal->supported_actions[LZMA_SYNC_FLUSH] = false;
+ strm->internal->supported_actions[LZMA_FULL_FLUSH] = false;
+ strm->internal->supported_actions[LZMA_FINISH] = false;
+ strm->internal->sequence = ISEQ_RUN;
+
+ strm->total_in = 0;
+ strm->total_out = 0;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_code(lzma_stream *strm, lzma_action action)
+{
+ // Sanity checks
+ if ((strm->next_in == NULL && strm->avail_in != 0)
+ || (strm->next_out == NULL && strm->avail_out != 0)
+ || strm->internal == NULL
+ || strm->internal->next.code == NULL
+ || (unsigned int)(action) > LZMA_FINISH
+ || !strm->internal->supported_actions[action])
+ return LZMA_PROG_ERROR;
+
+ switch (strm->internal->sequence) {
+ case ISEQ_RUN:
+ switch (action) {
+ case LZMA_RUN:
+ break;
+
+ case LZMA_SYNC_FLUSH:
+ strm->internal->sequence = ISEQ_SYNC_FLUSH;
+ break;
+
+ case LZMA_FULL_FLUSH:
+ strm->internal->sequence = ISEQ_FULL_FLUSH;
+ break;
+
+ case LZMA_FINISH:
+ strm->internal->sequence = ISEQ_FINISH;
+ break;
+ }
+
+ break;
+
+ case ISEQ_SYNC_FLUSH:
+ // The same action must be used until we return
+ // LZMA_STREAM_END, and the amount of input must not change.
+ if (action != LZMA_SYNC_FLUSH
+ || strm->internal->avail_in != strm->avail_in)
+ return LZMA_PROG_ERROR;
+
+ break;
+
+ case ISEQ_FULL_FLUSH:
+ if (action != LZMA_FULL_FLUSH
+ || strm->internal->avail_in != strm->avail_in)
+ return LZMA_PROG_ERROR;
+
+ break;
+
+ case ISEQ_FINISH:
+ if (action != LZMA_FINISH
+ || strm->internal->avail_in != strm->avail_in)
+ return LZMA_PROG_ERROR;
+
+ break;
+
+ case ISEQ_END:
+ return LZMA_STREAM_END;
+
+ case ISEQ_ERROR:
+ default:
+ return LZMA_PROG_ERROR;
+ }
+
+ size_t in_pos = 0;
+ size_t out_pos = 0;
+ lzma_ret ret = strm->internal->next.code(
+ strm->internal->next.coder, strm->allocator,
+ strm->next_in, &in_pos, strm->avail_in,
+ strm->next_out, &out_pos, strm->avail_out, action);
+
+ strm->next_in += in_pos;
+ strm->avail_in -= in_pos;
+ strm->total_in += in_pos;
+
+ strm->next_out += out_pos;
+ strm->avail_out -= out_pos;
+ strm->total_out += out_pos;
+
+ strm->internal->avail_in = strm->avail_in;
+
+ switch (ret) {
+ case LZMA_OK:
+ // Don't return LZMA_BUF_ERROR when it happens the first time.
+ // This is to avoid returning LZMA_BUF_ERROR when avail_out
+ // was zero but still there was no more data left to written
+ // to next_out.
+ if (out_pos == 0 && in_pos == 0) {
+ if (strm->internal->allow_buf_error)
+ ret = LZMA_BUF_ERROR;
+ else
+ strm->internal->allow_buf_error = true;
+ } else {
+ strm->internal->allow_buf_error = false;
+ }
+ break;
+
+ case LZMA_STREAM_END:
+ if (strm->internal->sequence == ISEQ_SYNC_FLUSH
+ || strm->internal->sequence == ISEQ_FULL_FLUSH)
+ strm->internal->sequence = ISEQ_RUN;
+ else
+ strm->internal->sequence = ISEQ_END;
+
+ // Fall through
+
+ case LZMA_NO_CHECK:
+ case LZMA_UNSUPPORTED_CHECK:
+ case LZMA_GET_CHECK:
+ case LZMA_MEMLIMIT_ERROR:
+ // Something else than LZMA_OK, but not a fatal error,
+ // that is, coding may be continued (except if ISEQ_END).
+ strm->internal->allow_buf_error = false;
+ break;
+
+ default:
+ // All the other errors are fatal; coding cannot be continued.
+ assert(ret != LZMA_BUF_ERROR);
+ strm->internal->sequence = ISEQ_ERROR;
+ break;
+ }
+
+ return ret;
+}
+
+
+extern LZMA_API(void)
+lzma_end(lzma_stream *strm)
+{
+ if (strm != NULL && strm->internal != NULL) {
+ lzma_next_end(&strm->internal->next, strm->allocator);
+ lzma_free(strm->internal, strm->allocator);
+ strm->internal = NULL;
+ }
+
+ return;
+}
+
+
+extern LZMA_API(lzma_check)
+lzma_get_check(const lzma_stream *strm)
+{
+ // Return LZMA_CHECK_NONE if we cannot know the check type.
+ // It's a bug in the application if this happens.
+ if (strm->internal->next.get_check == NULL)
+ return LZMA_CHECK_NONE;
+
+ return strm->internal->next.get_check(strm->internal->next.coder);
+}
+
+
+extern LZMA_API(uint64_t)
+lzma_memusage(const lzma_stream *strm)
+{
+ uint64_t memusage;
+ uint64_t old_memlimit;
+
+ if (strm == NULL || strm->internal == NULL
+ || strm->internal->next.memconfig == NULL
+ || strm->internal->next.memconfig(
+ strm->internal->next.coder,
+ &memusage, &old_memlimit, 0) != LZMA_OK)
+ return 0;
+
+ return memusage;
+}
+
+
+extern LZMA_API(uint64_t)
+lzma_memlimit_get(const lzma_stream *strm)
+{
+ uint64_t old_memlimit;
+ uint64_t memusage;
+
+ if (strm == NULL || strm->internal == NULL
+ || strm->internal->next.memconfig == NULL
+ || strm->internal->next.memconfig(
+ strm->internal->next.coder,
+ &memusage, &old_memlimit, 0) != LZMA_OK)
+ return 0;
+
+ return old_memlimit;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_memlimit_set(lzma_stream *strm, uint64_t new_memlimit)
+{
+ // Dummy variables to simplify memconfig functions
+ uint64_t old_memlimit;
+ uint64_t memusage;
+
+ if (strm == NULL || strm->internal == NULL
+ || strm->internal->next.memconfig == NULL)
+ return LZMA_PROG_ERROR;
+
+ if (new_memlimit != 0 && new_memlimit < LZMA_MEMUSAGE_BASE)
+ return LZMA_MEMLIMIT_ERROR;
+
+ return strm->internal->next.memconfig(strm->internal->next.coder,
+ &memusage, &old_memlimit, new_memlimit);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.h
new file mode 100644
index 00000000..3dad93aa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/common.h
@@ -0,0 +1,270 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file common.h
+/// \brief Definitions common to the whole liblzma library
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_COMMON_H
+#define LZMA_COMMON_H
+
+#include "sysdefs.h"
+#include "mythread.h"
+#include "integer.h"
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+# ifdef DLL_EXPORT
+# define LZMA_API_EXPORT __declspec(dllexport)
+# else
+# define LZMA_API_EXPORT
+# endif
+// Don't use ifdef or defined() below.
+#elif HAVE_VISIBILITY
+# define LZMA_API_EXPORT __attribute__((__visibility__("default")))
+#else
+# define LZMA_API_EXPORT
+#endif
+
+#define LZMA_API(type) LZMA_API_EXPORT type LZMA_API_CALL
+
+#include "lzma.h"
+
+// These allow helping the compiler in some often-executed branches, whose
+// result is almost always the same.
+#ifdef __GNUC__
+# define likely(expr) __builtin_expect(expr, true)
+# define unlikely(expr) __builtin_expect(expr, false)
+#else
+# define likely(expr) (expr)
+# define unlikely(expr) (expr)
+#endif
+
+
+/// Size of temporary buffers needed in some filters
+#define LZMA_BUFFER_SIZE 4096
+
+
+/// Starting value for memory usage estimates. Instead of calculating size
+/// of _every_ structure and taking into accont malloc() overhead etc. we
+/// add a base size to all memory usage estimates. It's not very accurate
+/// but should be easily good enough.
+#define LZMA_MEMUSAGE_BASE (UINT64_C(1) << 15)
+
+/// Start of internal Filter ID space. These IDs must never be used
+/// in Streams.
+#define LZMA_FILTER_RESERVED_START (LZMA_VLI_C(1) << 62)
+
+
+/// Internal helper filter used by Subblock decoder. It is mapped to an
+/// otherwise invalid Filter ID, which is impossible to get from any input
+/// file (even if malicious file).
+#define LZMA_FILTER_SUBBLOCK_HELPER LZMA_VLI_C(0x7000000000000001)
+
+
+/// Supported flags that can be passed to lzma_stream_decoder()
+/// or lzma_auto_decoder().
+#define LZMA_SUPPORTED_FLAGS \
+ ( LZMA_TELL_NO_CHECK \
+ | LZMA_TELL_UNSUPPORTED_CHECK \
+ | LZMA_TELL_ANY_CHECK \
+ | LZMA_CONCATENATED )
+
+
+/// Type of encoder/decoder specific data; the actual structure is defined
+/// differently in different coders.
+typedef struct lzma_coder_s lzma_coder;
+
+typedef struct lzma_next_coder_s lzma_next_coder;
+
+typedef struct lzma_filter_info_s lzma_filter_info;
+
+
+/// Type of a function used to initialize a filter encoder or decoder
+typedef lzma_ret (*lzma_init_function)(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters);
+
+/// Type of a function to do some kind of coding work (filters, Stream,
+/// Block encoders/decoders etc.). Some special coders use don't use both
+/// input and output buffers, but for simplicity they still use this same
+/// function prototype.
+typedef lzma_ret (*lzma_code_function)(
+ lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ lzma_action action);
+
+/// Type of a function to free the memory allocated for the coder
+typedef void (*lzma_end_function)(
+ lzma_coder *coder, lzma_allocator *allocator);
+
+
+/// Raw coder validates and converts an array of lzma_filter structures to
+/// an array of lzma_filter_info structures. This array is used with
+/// lzma_next_filter_init to initialize the filter chain.
+struct lzma_filter_info_s {
+ /// Pointer to function used to initialize the filter.
+ /// This is NULL to indicate end of array.
+ lzma_init_function init;
+
+ /// Pointer to filter's options structure
+ void *options;
+};
+
+
+/// Hold data and function pointers of the next filter in the chain.
+struct lzma_next_coder_s {
+ /// Pointer to coder-specific data
+ lzma_coder *coder;
+
+ /// "Pointer" to init function. This is never called here.
+ /// We need only to detect if we are initializing a coder
+ /// that was allocated earlier. See lzma_next_coder_init and
+ /// lzma_next_strm_init macros in this file.
+ uintptr_t init;
+
+ /// Pointer to function to do the actual coding
+ lzma_code_function code;
+
+ /// Pointer to function to free lzma_next_coder.coder. This can
+ /// be NULL; in that case, lzma_free is called to free
+ /// lzma_next_coder.coder.
+ lzma_end_function end;
+
+ /// Pointer to function to return the type of the integrity check.
+ /// Most coders won't support this.
+ lzma_check (*get_check)(const lzma_coder *coder);
+
+ /// Pointer to function to get and/or change the memory usage limit.
+ /// If new_memlimit == 0, the limit is not changed.
+ lzma_ret (*memconfig)(lzma_coder *coder, uint64_t *memusage,
+ uint64_t *old_memlimit, uint64_t new_memlimit);
+};
+
+
+/// Macro to initialize lzma_next_coder structure
+#define LZMA_NEXT_CODER_INIT \
+ (lzma_next_coder){ \
+ .coder = NULL, \
+ .init = (uintptr_t)(NULL), \
+ .code = NULL, \
+ .end = NULL, \
+ .get_check = NULL, \
+ .memconfig = NULL, \
+ }
+
+
+/// Internal data for lzma_strm_init, lzma_code, and lzma_end. A pointer to
+/// this is stored in lzma_stream.
+struct lzma_internal_s {
+ /// The actual coder that should do something useful
+ lzma_next_coder next;
+
+ /// Track the state of the coder. This is used to validate arguments
+ /// so that the actual coders can rely on e.g. that LZMA_SYNC_FLUSH
+ /// is used on every call to lzma_code until next.code has returned
+ /// LZMA_STREAM_END.
+ enum {
+ ISEQ_RUN,
+ ISEQ_SYNC_FLUSH,
+ ISEQ_FULL_FLUSH,
+ ISEQ_FINISH,
+ ISEQ_END,
+ ISEQ_ERROR,
+ } sequence;
+
+ /// A copy of lzma_stream avail_in. This is used to verify that the
+ /// amount of input doesn't change once e.g. LZMA_FINISH has been
+ /// used.
+ size_t avail_in;
+
+ /// Indicates which lzma_action values are allowed by next.code.
+ bool supported_actions[4];
+
+ /// If true, lzma_code will return LZMA_BUF_ERROR if no progress was
+ /// made (no input consumed and no output produced by next.code).
+ bool allow_buf_error;
+};
+
+
+/// Allocates memory
+extern void *lzma_alloc(size_t size, lzma_allocator *allocator)
+ lzma_attribute((malloc));
+
+/// Frees memory
+extern void lzma_free(void *ptr, lzma_allocator *allocator);
+
+
+/// Allocates strm->internal if it is NULL, and initializes *strm and
+/// strm->internal. This function is only called via lzma_next_strm_init macro.
+extern lzma_ret lzma_strm_init(lzma_stream *strm);
+
+/// Initializes the next filter in the chain, if any. This takes care of
+/// freeing the memory of previously initialized filter if it is different
+/// than the filter being initialized now. This way the actual filter
+/// initialization functions don't need to use lzma_next_coder_init macro.
+extern lzma_ret lzma_next_filter_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+/// Frees the memory allocated for next->coder either using next->end or,
+/// if next->end is NULL, using lzma_free.
+extern void lzma_next_end(lzma_next_coder *next, lzma_allocator *allocator);
+
+
+/// Copy as much data as possible from in[] to out[] and update *in_pos
+/// and *out_pos accordingly. Returns the number of bytes copied.
+extern size_t lzma_bufcpy(const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size);
+
+
+/// \brief Return if expression doesn't evaluate to LZMA_OK
+///
+/// There are several situations where we want to return immediatelly
+/// with the value of expr if it isn't LZMA_OK. This macro shortens
+/// the code a little.
+#define return_if_error(expr) \
+do { \
+ const lzma_ret ret_ = (expr); \
+ if (ret_ != LZMA_OK) \
+ return ret_; \
+} while (0)
+
+
+/// If next isn't already initialized, free the previous coder. Then mark
+/// that next is _possibly_ initialized for the coder using this macro.
+/// "Possibly" means that if e.g. allocation of next->coder fails, the
+/// structure isn't actually initialized for this coder, but leaving
+/// next->init to func is still OK.
+#define lzma_next_coder_init(func, next, allocator) \
+do { \
+ if ((uintptr_t)(func) != (next)->init) \
+ lzma_next_end(next, allocator); \
+ (next)->init = (uintptr_t)(func); \
+} while (0)
+
+
+/// Initializes lzma_strm and calls func() to initialize strm->internal->next.
+/// (The function being called will use lzma_next_coder_init()). If
+/// initialization fails, memory that wasn't freed by func() is freed
+/// along strm->internal.
+#define lzma_next_strm_init(func, strm, ...) \
+do { \
+ return_if_error(lzma_strm_init(strm)); \
+ const lzma_ret ret_ = func(&(strm)->internal->next, \
+ (strm)->allocator, __VA_ARGS__); \
+ if (ret_ != LZMA_OK) { \
+ lzma_end(strm); \
+ return ret_; \
+ } \
+} while (0)
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_buffer_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_buffer_encoder.c
new file mode 100644
index 00000000..e79065c7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_buffer_encoder.c
@@ -0,0 +1,29 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file easy_buffer_encoder.c
+/// \brief Easy single-call .xz Stream encoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "easy_preset.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_easy_buffer_encode(uint32_t preset, lzma_check check,
+ lzma_allocator *allocator, const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ lzma_options_easy opt_easy;
+ if (lzma_easy_preset(&opt_easy, preset))
+ return LZMA_OPTIONS_ERROR;
+
+ return lzma_stream_buffer_encode(opt_easy.filters, check,
+ allocator, in, in_size, out, out_pos, out_size);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_decoder_memusage.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_decoder_memusage.c
new file mode 100644
index 00000000..1a8f6e9d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_decoder_memusage.c
@@ -0,0 +1,26 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file easy_decoder_memusage.c
+/// \brief Decoder memory usage calculation to match easy encoder presets
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "easy_preset.h"
+
+
+extern LZMA_API(uint64_t)
+lzma_easy_decoder_memusage(uint32_t preset)
+{
+ lzma_options_easy opt_easy;
+ if (lzma_easy_preset(&opt_easy, preset))
+ return UINT32_MAX;
+
+ return lzma_raw_decoder_memusage(opt_easy.filters);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder.c
new file mode 100644
index 00000000..afe9c49c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder.c
@@ -0,0 +1,82 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file easy_encoder.c
+/// \brief Easy .xz Stream encoder initialization
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "easy_preset.h"
+#include "stream_encoder.h"
+
+
+struct lzma_coder_s {
+ lzma_next_coder stream_encoder;
+ lzma_options_easy opt_easy;
+};
+
+
+static lzma_ret
+easy_encode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ return coder->stream_encoder.code(
+ coder->stream_encoder.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size, action);
+}
+
+
+static void
+easy_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->stream_encoder, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static lzma_ret
+easy_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ uint32_t preset, lzma_check check)
+{
+ lzma_next_coder_init(&easy_encoder_init, next, allocator);
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &easy_encode;
+ next->end = &easy_encoder_end;
+
+ next->coder->stream_encoder = LZMA_NEXT_CODER_INIT;
+ }
+
+ if (lzma_easy_preset(&next->coder->opt_easy, preset))
+ return LZMA_OPTIONS_ERROR;
+
+ return lzma_stream_encoder_init(&next->coder->stream_encoder,
+ allocator, next->coder->opt_easy.filters, check);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_easy_encoder(lzma_stream *strm, uint32_t preset, lzma_check check)
+{
+ lzma_next_strm_init(easy_encoder_init, strm, preset, check);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;
+ strm->internal->supported_actions[LZMA_FULL_FLUSH] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder_memusage.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder_memusage.c
new file mode 100644
index 00000000..e97b4ef1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_encoder_memusage.c
@@ -0,0 +1,26 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file easy_encoder_memusage.c
+/// \brief Easy .xz Stream encoder memory usage calculation
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "easy_preset.h"
+
+
+extern LZMA_API(uint64_t)
+lzma_easy_encoder_memusage(uint32_t preset)
+{
+ lzma_options_easy opt_easy;
+ if (lzma_easy_preset(&opt_easy, preset))
+ return UINT32_MAX;
+
+ return lzma_raw_encoder_memusage(opt_easy.filters);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.c
new file mode 100644
index 00000000..4f6d203d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.c
@@ -0,0 +1,29 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file easy_preset.c
+/// \brief Preset handling for easy encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "easy_preset.h"
+
+
+extern bool
+lzma_easy_preset(lzma_options_easy *opt_easy, uint32_t preset)
+{
+ if (lzma_lzma_preset(&opt_easy->opt_lzma, preset))
+ return true;
+
+ opt_easy->filters[0].id = LZMA_FILTER_LZMA2;
+ opt_easy->filters[0].options = &opt_easy->opt_lzma;
+ opt_easy->filters[1].id = LZMA_VLI_UNKNOWN;
+
+ return false;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.h
new file mode 100644
index 00000000..92e09690
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/easy_preset.h
@@ -0,0 +1,34 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file easy_preset.h
+/// \brief Preset handling for easy encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+
+typedef struct {
+ /// We need to keep the filters array available in case
+ /// LZMA_FULL_FLUSH is used.
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
+
+ /// Options for LZMA2
+ lzma_options_lzma opt_lzma;
+
+ // Options for more filters can be added later, so this struct
+ // is not ready to be put into the public API.
+
+} lzma_options_easy;
+
+
+/// Set *easy to the settings given by the preset. Returns true on error,
+/// false on success.
+extern bool lzma_easy_preset(lzma_options_easy *easy, uint32_t preset);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_decoder.c
new file mode 100644
index 00000000..91cfe6d2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_decoder.c
@@ -0,0 +1,89 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_buffer_decoder.c
+/// \brief Single-call raw decoding
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_decoder.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_raw_buffer_decode(const lzma_filter *filters, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // Validate what isn't validated later in filter_common.c.
+ if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL
+ || out_pos == NULL || *out_pos > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Initialize the decoer.
+ lzma_next_coder next = LZMA_NEXT_CODER_INIT;
+ return_if_error(lzma_raw_decoder_init(&next, allocator, filters));
+
+ // Store the positions so that we can restore them if something
+ // goes wrong.
+ const size_t in_start = *in_pos;
+ const size_t out_start = *out_pos;
+
+ // Do the actual decoding and free decoder's memory.
+ lzma_ret ret = next.code(next.coder, allocator, in, in_pos, in_size,
+ out, out_pos, out_size, LZMA_FINISH);
+
+ if (ret == LZMA_STREAM_END) {
+ ret = LZMA_OK;
+ } else {
+ if (ret == LZMA_OK) {
+ // Either the input was truncated or the
+ // output buffer was too small.
+ assert(*in_pos == in_size || *out_pos == out_size);
+
+ if (*in_pos != in_size) {
+ // Since input wasn't consumed completely,
+ // the output buffer became full and is
+ // too small.
+ ret = LZMA_BUF_ERROR;
+
+ } else if (*out_pos != out_size) {
+ // Since output didn't became full, the input
+ // has to be truncated.
+ ret = LZMA_DATA_ERROR;
+
+ } else {
+ // All the input was consumed and output
+ // buffer is full. Now we don't immediatelly
+ // know the reason for the error. Try
+ // decoding one more byte. If it succeeds,
+ // then the output buffer was too small. If
+ // we cannot get a new output byte, the input
+ // is truncated.
+ uint8_t tmp[1];
+ size_t tmp_pos = 0;
+ (void)next.code(next.coder, allocator,
+ in, in_pos, in_size,
+ tmp, &tmp_pos, 1, LZMA_FINISH);
+
+ if (tmp_pos == 1)
+ ret = LZMA_BUF_ERROR;
+ else
+ ret = LZMA_DATA_ERROR;
+ }
+ }
+
+ // Restore the positions.
+ *in_pos = in_start;
+ *out_pos = out_start;
+ }
+
+ lzma_next_end(&next, allocator);
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_encoder.c
new file mode 100644
index 00000000..4b72e57e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_buffer_encoder.c
@@ -0,0 +1,56 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_buffer_encoder.c
+/// \brief Single-call raw encoding
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_encoder.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_raw_buffer_encode(const lzma_filter *filters, lzma_allocator *allocator,
+ const uint8_t *in, size_t in_size, uint8_t *out,
+ size_t *out_pos, size_t out_size)
+{
+ // Validate what isn't validated later in filter_common.c.
+ if ((in == NULL && in_size != 0) || out == NULL
+ || out_pos == NULL || *out_pos > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Initialize the encoder
+ lzma_next_coder next = LZMA_NEXT_CODER_INIT;
+ return_if_error(lzma_raw_encoder_init(&next, allocator, filters));
+
+ // Store the output position so that we can restore it if
+ // something goes wrong.
+ const size_t out_start = *out_pos;
+
+ // Do the actual encoding and free coder's memory.
+ size_t in_pos = 0;
+ lzma_ret ret = next.code(next.coder, allocator, in, &in_pos, in_size,
+ out, out_pos, out_size, LZMA_FINISH);
+ lzma_next_end(&next, allocator);
+
+ if (ret == LZMA_STREAM_END) {
+ ret = LZMA_OK;
+ } else {
+ if (ret == LZMA_OK) {
+ // Output buffer was too small.
+ assert(*out_pos == out_size);
+ ret = LZMA_BUF_ERROR;
+ }
+
+ // Restore the output position.
+ *out_pos = out_start;
+ }
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.c
new file mode 100644
index 00000000..52c6e737
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.c
@@ -0,0 +1,263 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_common.c
+/// \brief Filter-specific stuff common for both encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_common.h"
+
+
+static const struct {
+ /// Filter ID
+ lzma_vli id;
+
+ /// True if it is OK to use this filter as non-last filter in
+ /// the chain.
+ bool non_last_ok;
+
+ /// True if it is OK to use this filter as the last filter in
+ /// the chain.
+ bool last_ok;
+
+ /// True if the filter may change the size of the data (that is, the
+ /// amount of encoded output can be different than the amount of
+ /// uncompressed input).
+ bool changes_size;
+
+} features[] = {
+#if defined (HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1)
+ {
+ .id = LZMA_FILTER_LZMA1,
+ .non_last_ok = false,
+ .last_ok = true,
+ .changes_size = true,
+ },
+#endif
+#ifdef HAVE_DECODER_LZMA2
+ {
+ .id = LZMA_FILTER_LZMA2,
+ .non_last_ok = false,
+ .last_ok = true,
+ .changes_size = true,
+ },
+#endif
+#if defined(HAVE_ENCODER_SUBBLOCK) || defined(HAVE_DECODER_SUBBLOCK)
+ {
+ .id = LZMA_FILTER_SUBBLOCK,
+ .non_last_ok = true,
+ .last_ok = true,
+ .changes_size = true,
+ },
+#endif
+#ifdef HAVE_DECODER_X86
+ {
+ .id = LZMA_FILTER_X86,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+#if defined(HAVE_ENCODER_POWERPC) || defined(HAVE_DECODER_POWERPC)
+ {
+ .id = LZMA_FILTER_POWERPC,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+#ifdef HAVE_DECODER_IA64
+ {
+ .id = LZMA_FILTER_IA64,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+#if defined(HAVE_ENCODER_ARM) || defined(HAVE_DECODER_ARM)
+ {
+ .id = LZMA_FILTER_ARM,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+#if defined(HAVE_ENCODER_ARMTHUMB) || defined(HAVE_DECODER_ARMTHUMB)
+ {
+ .id = LZMA_FILTER_ARMTHUMB,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+#if defined(HAVE_ENCODER_SPARC) || defined(HAVE_DECODER_SPARC)
+ {
+ .id = LZMA_FILTER_SPARC,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+#if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA)
+ {
+ .id = LZMA_FILTER_DELTA,
+ .non_last_ok = true,
+ .last_ok = false,
+ .changes_size = false,
+ },
+#endif
+ {
+ .id = LZMA_VLI_UNKNOWN
+ }
+};
+
+
+static lzma_ret
+validate_chain(const lzma_filter *filters, size_t *count)
+{
+ // There must be at least one filter.
+ if (filters == NULL || filters[0].id == LZMA_VLI_UNKNOWN)
+ return LZMA_PROG_ERROR;
+
+ // Number of non-last filters that may change the size of the data
+ // significantly (that is, more than 1-2 % or so).
+ size_t changes_size_count = 0;
+
+ // True if it is OK to add a new filter after the current filter.
+ bool non_last_ok = true;
+
+ // True if the last filter in the given chain is actually usable as
+ // the last filter. Only filters that support embedding End of Payload
+ // Marker can be used as the last filter in the chain.
+ bool last_ok = false;
+
+ size_t i = 0;
+ do {
+ size_t j;
+ for (j = 0; filters[i].id != features[j].id; ++j)
+ if (features[j].id == LZMA_VLI_UNKNOWN)
+ return LZMA_OPTIONS_ERROR;
+
+ // If the previous filter in the chain cannot be a non-last
+ // filter, the chain is invalid.
+ if (!non_last_ok)
+ return LZMA_OPTIONS_ERROR;
+
+ non_last_ok = features[j].non_last_ok;
+ last_ok = features[j].last_ok;
+ changes_size_count += features[j].changes_size;
+
+ } while (filters[++i].id != LZMA_VLI_UNKNOWN);
+
+ // There must be 1-4 filters. The last filter must be usable as
+ // the last filter in the chain. At maximum of three filters are
+ // allowed to change the size of the data.
+ if (i > LZMA_FILTERS_MAX || !last_ok || changes_size_count > 3)
+ return LZMA_OPTIONS_ERROR;
+
+ *count = i;
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *options,
+ lzma_filter_find coder_find, bool is_encoder)
+{
+ // Do some basic validation and get the number of filters.
+ size_t count;
+ return_if_error(validate_chain(options, &count));
+
+ // Set the filter functions and copy the options pointer.
+ lzma_filter_info filters[LZMA_FILTERS_MAX + 1];
+ if (is_encoder) {
+ for (size_t i = 0; i < count; ++i) {
+ // The order of the filters is reversed in the
+ // encoder. It allows more efficient handling
+ // of the uncompressed data.
+ const size_t j = count - i - 1;
+
+ const lzma_filter_coder *const fc
+ = coder_find(options[i].id);
+ if (fc == NULL || fc->init == NULL)
+ return LZMA_OPTIONS_ERROR;
+
+ filters[j].init = fc->init;
+ filters[j].options = options[i].options;
+ }
+ } else {
+ for (size_t i = 0; i < count; ++i) {
+ const lzma_filter_coder *const fc
+ = coder_find(options[i].id);
+ if (fc == NULL || fc->init == NULL)
+ return LZMA_OPTIONS_ERROR;
+
+ filters[i].init = fc->init;
+ filters[i].options = options[i].options;
+ }
+ }
+
+ // Terminate the array.
+ filters[count].init = NULL;
+
+ // Initialize the filters.
+ const lzma_ret ret = lzma_next_filter_init(next, allocator, filters);
+ if (ret != LZMA_OK)
+ lzma_next_end(next, allocator);
+
+ return ret;
+}
+
+
+extern uint64_t
+lzma_raw_coder_memusage(lzma_filter_find coder_find,
+ const lzma_filter *filters)
+{
+ // The chain has to have at least one filter.
+ {
+ size_t tmp;
+ if (validate_chain(filters, &tmp) != LZMA_OK)
+ return UINT64_MAX;
+ }
+
+ uint64_t total = 0;
+ size_t i = 0;
+
+ do {
+ const lzma_filter_coder *const fc
+ = coder_find(filters[i].id);
+ if (fc == NULL)
+ return UINT64_MAX; // Unsupported Filter ID
+
+ if (fc->memusage == NULL) {
+ // This filter doesn't have a function to calculate
+ // the memory usage and validate the options. Such
+ // filters need only little memory, so we use 1 KiB
+ // as a good estimate. They also accept all possible
+ // options, so there's no need to worry about lack
+ // of validation.
+ total += 1024;
+ } else {
+ // Call the filter-specific memory usage calculation
+ // function.
+ const uint64_t usage
+ = fc->memusage(filters[i].options);
+ if (usage == UINT64_MAX)
+ return UINT64_MAX; // Invalid options
+
+ total += usage;
+ }
+ } while (filters[++i].id != LZMA_VLI_UNKNOWN);
+
+ // Add some fixed amount of extra. It's to compensate memory usage
+ // of Stream, Block etc. coders, malloc() overhead, stack etc.
+ return total + LZMA_MEMUSAGE_BASE;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.h
new file mode 100644
index 00000000..86ec8a0c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_common.h
@@ -0,0 +1,50 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_common.c
+/// \brief Filter-specific stuff common for both encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_FILTER_COMMON_H
+#define LZMA_FILTER_COMMON_H
+
+#include "common.h"
+
+
+/// Both lzma_filter_encoder and lzma_filter_decoder begin with these members.
+typedef struct {
+ /// Filter ID
+ lzma_vli id;
+
+ /// Initializes the filter encoder and calls lzma_next_filter_init()
+ /// for filters + 1.
+ lzma_init_function init;
+
+ /// Calculates memory usage of the encoder. If the options are
+ /// invalid, UINT64_MAX is returned.
+ uint64_t (*memusage)(const void *options);
+
+} lzma_filter_coder;
+
+
+typedef const lzma_filter_coder *(*lzma_filter_find)(lzma_vli id);
+
+
+extern lzma_ret lzma_raw_coder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *filters,
+ lzma_filter_find coder_find, bool is_encoder);
+
+
+extern uint64_t lzma_raw_coder_memusage(lzma_filter_find coder_find,
+ const lzma_filter *filters);
+
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.c
new file mode 100644
index 00000000..c7d813b5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.c
@@ -0,0 +1,201 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_decoder.c
+/// \brief Filter ID mapping to filter-specific functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_decoder.h"
+#include "filter_common.h"
+#include "lzma_decoder.h"
+#include "lzma2_decoder.h"
+#include "subblock_decoder.h"
+#include "subblock_decoder_helper.h"
+#include "simple_decoder.h"
+#include "delta_decoder.h"
+
+
+typedef struct {
+ /// Filter ID
+ lzma_vli id;
+
+ /// Initializes the filter encoder and calls lzma_next_filter_init()
+ /// for filters + 1.
+ lzma_init_function init;
+
+ /// Calculates memory usage of the encoder. If the options are
+ /// invalid, UINT64_MAX is returned.
+ uint64_t (*memusage)(const void *options);
+
+ /// Decodes Filter Properties.
+ ///
+ /// \return - LZMA_OK: Properties decoded successfully.
+ /// - LZMA_OPTIONS_ERROR: Unsupported properties
+ /// - LZMA_MEM_ERROR: Memory allocation failed.
+ lzma_ret (*props_decode)(void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size);
+
+} lzma_filter_decoder;
+
+
+static const lzma_filter_decoder decoders[] = {
+#ifdef HAVE_DECODER_LZMA1
+ {
+ .id = LZMA_FILTER_LZMA1,
+ .init = &lzma_lzma_decoder_init,
+ .memusage = &lzma_lzma_decoder_memusage,
+ .props_decode = &lzma_lzma_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_LZMA2
+ {
+ .id = LZMA_FILTER_LZMA2,
+ .init = &lzma_lzma2_decoder_init,
+ .memusage = &lzma_lzma2_decoder_memusage,
+ .props_decode = &lzma_lzma2_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_SUBBLOCK
+ {
+ .id = LZMA_FILTER_SUBBLOCK,
+ .init = &lzma_subblock_decoder_init,
+// .memusage = &lzma_subblock_decoder_memusage,
+ .props_decode = NULL,
+ },
+ {
+ .id = LZMA_FILTER_SUBBLOCK_HELPER,
+ .init = &lzma_subblock_decoder_helper_init,
+ .memusage = NULL,
+ .props_decode = NULL,
+ },
+#endif
+#ifdef HAVE_DECODER_X86
+ {
+ .id = LZMA_FILTER_X86,
+ .init = &lzma_simple_x86_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_POWERPC
+ {
+ .id = LZMA_FILTER_POWERPC,
+ .init = &lzma_simple_powerpc_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_IA64
+ {
+ .id = LZMA_FILTER_IA64,
+ .init = &lzma_simple_ia64_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_ARM
+ {
+ .id = LZMA_FILTER_ARM,
+ .init = &lzma_simple_arm_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_ARMTHUMB
+ {
+ .id = LZMA_FILTER_ARMTHUMB,
+ .init = &lzma_simple_armthumb_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_SPARC
+ {
+ .id = LZMA_FILTER_SPARC,
+ .init = &lzma_simple_sparc_decoder_init,
+ .memusage = NULL,
+ .props_decode = &lzma_simple_props_decode,
+ },
+#endif
+#ifdef HAVE_DECODER_DELTA
+ {
+ .id = LZMA_FILTER_DELTA,
+ .init = &lzma_delta_decoder_init,
+ .memusage = &lzma_delta_coder_memusage,
+ .props_decode = &lzma_delta_props_decode,
+ },
+#endif
+};
+
+
+static const lzma_filter_decoder *
+decoder_find(lzma_vli id)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i)
+ if (decoders[i].id == id)
+ return decoders + i;
+
+ return NULL;
+}
+
+
+extern LZMA_API(lzma_bool)
+lzma_filter_decoder_is_supported(lzma_vli id)
+{
+ return decoder_find(id) != NULL;
+}
+
+
+extern lzma_ret
+lzma_raw_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *options)
+{
+ return lzma_raw_coder_init(next, allocator,
+ options, (lzma_filter_find)(&decoder_find), false);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_raw_decoder(lzma_stream *strm, const lzma_filter *options)
+{
+ lzma_next_strm_init(lzma_raw_decoder_init, strm, options);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(uint64_t)
+lzma_raw_decoder_memusage(const lzma_filter *filters)
+{
+ return lzma_raw_coder_memusage(
+ (lzma_filter_find)(&decoder_find), filters);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_properties_decode(lzma_filter *filter, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size)
+{
+ // Make it always NULL so that the caller can always safely free() it.
+ filter->options = NULL;
+
+ const lzma_filter_decoder *const fd = decoder_find(filter->id);
+ if (fd == NULL)
+ return LZMA_OPTIONS_ERROR;
+
+ if (fd->props_decode == NULL)
+ return props_size == 0 ? LZMA_OK : LZMA_OPTIONS_ERROR;
+
+ return fd->props_decode(
+ &filter->options, allocator, props, props_size);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.h
new file mode 100644
index 00000000..e31754ff
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_decoder.h
@@ -0,0 +1,25 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_decoder.c
+/// \brief Filter ID mapping to filter-specific functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_FILTER_DECODER_H
+#define LZMA_FILTER_DECODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_raw_decoder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *options);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.c
new file mode 100644
index 00000000..b98b2085
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.c
@@ -0,0 +1,273 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_decoder.c
+/// \brief Filter ID mapping to filter-specific functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_encoder.h"
+#include "filter_common.h"
+#include "lzma_encoder.h"
+#include "lzma2_encoder.h"
+#include "subblock_encoder.h"
+#include "simple_encoder.h"
+#include "delta_encoder.h"
+
+
+typedef struct {
+ /// Filter ID
+ lzma_vli id;
+
+ /// Initializes the filter encoder and calls lzma_next_filter_init()
+ /// for filters + 1.
+ lzma_init_function init;
+
+ /// Calculates memory usage of the encoder. If the options are
+ /// invalid, UINT64_MAX is returned.
+ uint64_t (*memusage)(const void *options);
+
+ /// Calculates the minimum sane size for Blocks (or other types of
+ /// chunks) to which the input data can be splitted to make
+ /// multithreaded encoding possible. If this is NULL, it is assumed
+ /// that the encoder is fast enough with single thread.
+ lzma_vli (*chunk_size)(const void *options);
+
+ /// Tells the size of the Filter Properties field. If options are
+ /// invalid, UINT32_MAX is returned. If this is NULL, props_size_fixed
+ /// is used.
+ lzma_ret (*props_size_get)(uint32_t *size, const void *options);
+ uint32_t props_size_fixed;
+
+ /// Encodes Filter Properties.
+ ///
+ /// \return - LZMA_OK: Properties encoded sucessfully.
+ /// - LZMA_OPTIONS_ERROR: Unsupported options
+ /// - LZMA_PROG_ERROR: Invalid options or not enough
+ /// output space
+ lzma_ret (*props_encode)(const void *options, uint8_t *out);
+
+} lzma_filter_encoder;
+
+
+static const lzma_filter_encoder encoders[] = {
+#ifdef HAVE_ENCODER_LZMA1
+ {
+ .id = LZMA_FILTER_LZMA1,
+ .init = &lzma_lzma_encoder_init,
+ .memusage = &lzma_lzma_encoder_memusage,
+ .chunk_size = NULL, // FIXME
+ .props_size_get = NULL,
+ .props_size_fixed = 5,
+ .props_encode = &lzma_lzma_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_LZMA2
+ {
+ .id = LZMA_FILTER_LZMA2,
+ .init = &lzma_lzma2_encoder_init,
+ .memusage = &lzma_lzma2_encoder_memusage,
+ .chunk_size = NULL, // FIXME
+ .props_size_get = NULL,
+ .props_size_fixed = 1,
+ .props_encode = &lzma_lzma2_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_SUBBLOCK
+ {
+ .id = LZMA_FILTER_SUBBLOCK,
+ .init = &lzma_subblock_encoder_init,
+// .memusage = &lzma_subblock_encoder_memusage,
+ .chunk_size = NULL,
+ .props_size_get = NULL,
+ .props_size_fixed = 0,
+ .props_encode = NULL,
+ },
+#endif
+#ifdef HAVE_ENCODER_X86
+ {
+ .id = LZMA_FILTER_X86,
+ .init = &lzma_simple_x86_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_POWERPC
+ {
+ .id = LZMA_FILTER_POWERPC,
+ .init = &lzma_simple_powerpc_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_IA64
+ {
+ .id = LZMA_FILTER_IA64,
+ .init = &lzma_simple_ia64_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_ARM
+ {
+ .id = LZMA_FILTER_ARM,
+ .init = &lzma_simple_arm_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_ARMTHUMB
+ {
+ .id = LZMA_FILTER_ARMTHUMB,
+ .init = &lzma_simple_armthumb_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_SPARC
+ {
+ .id = LZMA_FILTER_SPARC,
+ .init = &lzma_simple_sparc_encoder_init,
+ .memusage = NULL,
+ .chunk_size = NULL,
+ .props_size_get = &lzma_simple_props_size,
+ .props_encode = &lzma_simple_props_encode,
+ },
+#endif
+#ifdef HAVE_ENCODER_DELTA
+ {
+ .id = LZMA_FILTER_DELTA,
+ .init = &lzma_delta_encoder_init,
+ .memusage = &lzma_delta_coder_memusage,
+ .chunk_size = NULL,
+ .props_size_get = NULL,
+ .props_size_fixed = 1,
+ .props_encode = &lzma_delta_props_encode,
+ },
+#endif
+};
+
+
+static const lzma_filter_encoder *
+encoder_find(lzma_vli id)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(encoders); ++i)
+ if (encoders[i].id == id)
+ return encoders + i;
+
+ return NULL;
+}
+
+
+extern LZMA_API(lzma_bool)
+lzma_filter_encoder_is_supported(lzma_vli id)
+{
+ return encoder_find(id) != NULL;
+}
+
+
+extern lzma_ret
+lzma_raw_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *options)
+{
+ return lzma_raw_coder_init(next, allocator,
+ options, (lzma_filter_find)(&encoder_find), true);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_raw_encoder(lzma_stream *strm, const lzma_filter *options)
+{
+ lzma_next_strm_init(lzma_raw_coder_init, strm, options,
+ (lzma_filter_find)(&encoder_find), true);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(uint64_t)
+lzma_raw_encoder_memusage(const lzma_filter *filters)
+{
+ return lzma_raw_coder_memusage(
+ (lzma_filter_find)(&encoder_find), filters);
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_chunk_size(const lzma_filter *filters)
+{
+ lzma_vli max = 0;
+
+ for (size_t i = 0; filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
+ const lzma_filter_encoder *const fe
+ = encoder_find(filters[i].id);
+ if (fe->chunk_size != NULL) {
+ const lzma_vli size
+ = fe->chunk_size(filters[i].options);
+ if (size == LZMA_VLI_UNKNOWN)
+ return LZMA_VLI_UNKNOWN;
+
+ if (size > max)
+ max = size;
+ }
+ }
+
+ return max;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_properties_size(uint32_t *size, const lzma_filter *filter)
+{
+ const lzma_filter_encoder *const fe = encoder_find(filter->id);
+ if (fe == NULL) {
+ // Unknown filter - if the Filter ID is a proper VLI,
+ // return LZMA_OPTIONS_ERROR instead of LZMA_PROG_ERROR,
+ // because it's possible that we just don't have support
+ // compiled in for the requested filter.
+ return filter->id <= LZMA_VLI_MAX
+ ? LZMA_OPTIONS_ERROR : LZMA_PROG_ERROR;
+ }
+
+ if (fe->props_size_get == NULL) {
+ // No props_size_get() function, use props_size_fixed.
+ *size = fe->props_size_fixed;
+ return LZMA_OK;
+ }
+
+ return fe->props_size_get(size, filter->options);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_properties_encode(const lzma_filter *filter, uint8_t *props)
+{
+ const lzma_filter_encoder *const fe = encoder_find(filter->id);
+ if (fe == NULL)
+ return LZMA_PROG_ERROR;
+
+ if (fe->props_encode == NULL)
+ return LZMA_OK;
+
+ return fe->props_encode(filter->options, props);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.h
new file mode 100644
index 00000000..207b9ad4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_encoder.h
@@ -0,0 +1,29 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_encoder.c
+/// \brief Filter ID mapping to filter-specific functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_FILTER_ENCODER_H
+#define LZMA_FILTER_ENCODER_H
+
+#include "common.h"
+
+
+// FIXME !!! Public API
+extern lzma_vli lzma_chunk_size(const lzma_filter *filters);
+
+
+extern lzma_ret lzma_raw_encoder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *options);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_decoder.c
new file mode 100644
index 00000000..5b29ea5a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_decoder.c
@@ -0,0 +1,48 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_flags_decoder.c
+/// \brief Decodes a Filter Flags field
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_decoder.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_filter_flags_decode(
+ lzma_filter *filter, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size)
+{
+ // Set the pointer to NULL so the caller can always safely free it.
+ filter->options = NULL;
+
+ // Filter ID
+ return_if_error(lzma_vli_decode(&filter->id, NULL,
+ in, in_pos, in_size));
+
+ if (filter->id >= LZMA_FILTER_RESERVED_START)
+ return LZMA_DATA_ERROR;
+
+ // Size of Properties
+ lzma_vli props_size;
+ return_if_error(lzma_vli_decode(&props_size, NULL,
+ in, in_pos, in_size));
+
+ // Filter Properties
+ if (in_size - *in_pos < props_size)
+ return LZMA_DATA_ERROR;
+
+ const lzma_ret ret = lzma_properties_decode(
+ filter, allocator, in + *in_pos, props_size);
+
+ *in_pos += props_size;
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_encoder.c
new file mode 100644
index 00000000..471e9794
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/filter_flags_encoder.c
@@ -0,0 +1,58 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file filter_flags_encoder.c
+/// \brief Decodes a Filter Flags field
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "filter_encoder.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_filter_flags_size(uint32_t *size, const lzma_filter *filter)
+{
+ if (filter->id >= LZMA_FILTER_RESERVED_START)
+ return LZMA_PROG_ERROR;
+
+ return_if_error(lzma_properties_size(size, filter));
+
+ *size += lzma_vli_size(filter->id) + lzma_vli_size(*size);
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_filter_flags_encode(const lzma_filter *filter,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // Filter ID
+ if (filter->id >= LZMA_FILTER_RESERVED_START)
+ return LZMA_PROG_ERROR;
+
+ return_if_error(lzma_vli_encode(filter->id, NULL,
+ out, out_pos, out_size));
+
+ // Size of Properties
+ uint32_t props_size;
+ return_if_error(lzma_properties_size(&props_size, filter));
+ return_if_error(lzma_vli_encode(props_size, NULL,
+ out, out_pos, out_size));
+
+ // Filter Properties
+ if (out_size - *out_pos < props_size)
+ return LZMA_PROG_ERROR;
+
+ return_if_error(lzma_properties_encode(filter, out + *out_pos));
+
+ *out_pos += props_size;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.c
new file mode 100644
index 00000000..88976380
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.c
@@ -0,0 +1,778 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file index.c
+/// \brief Handling of Index
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "index.h"
+
+
+/// Number of Records to allocate at once in the unrolled list.
+#define INDEX_GROUP_SIZE 256
+
+
+typedef struct lzma_index_group_s lzma_index_group;
+struct lzma_index_group_s {
+ /// Previous group
+ lzma_index_group *prev;
+
+ /// Next group
+ lzma_index_group *next;
+
+ /// Index of the last Record in this group
+ size_t last;
+
+ /// Unpadded Size fields as special cumulative sum relative to the
+ /// beginning of the group. It's special in sense that the previous
+ /// value is rounded up the next multiple of four with before
+ /// calculating the new value. The total encoded size of the Blocks
+ /// in the group is unpadded_sums[last] rounded up to the next
+ /// multiple of four.
+ ///
+ /// For example, if the Unpadded Sizes are 39, 57, and 81, the stored
+ /// values are 39, 97 (40 + 57), and 181 (100 + 181). The total
+ /// encoded size of these Blocks is 184.
+ ///
+ /// This encoding is nice from point of view of lzma_index_locate().
+ lzma_vli unpadded_sums[INDEX_GROUP_SIZE];
+
+ /// Uncompressed Size fields as cumulative sum relative to the
+ /// beginning of the group. The uncompressed size of the group is
+ /// uncompressed_sums[last].
+ lzma_vli uncompressed_sums[INDEX_GROUP_SIZE];
+
+ /// True if the Record is padding
+ bool paddings[INDEX_GROUP_SIZE];
+};
+
+
+struct lzma_index_s {
+ /// Total size of the Blocks and padding
+ lzma_vli total_size;
+
+ /// Uncompressed size of the Stream
+ lzma_vli uncompressed_size;
+
+ /// Number of non-padding records. This is needed for Index encoder.
+ lzma_vli count;
+
+ /// Size of the List of Records field; this is updated every time
+ /// a new non-padding Record is added.
+ lzma_vli index_list_size;
+
+ /// First group of Records
+ lzma_index_group *head;
+
+ /// Last group of Records
+ lzma_index_group *tail;
+
+ /// Tracking the read position
+ struct {
+ /// Group where the current read position is.
+ lzma_index_group *group;
+
+ /// The most recently read Record in *group
+ size_t record;
+
+ /// Uncompressed offset of the beginning of *group relative
+ /// to the beginning of the Stream
+ lzma_vli uncompressed_offset;
+
+ /// Compressed offset of the beginning of *group relative
+ /// to the beginning of the Stream
+ lzma_vli stream_offset;
+ } current;
+
+ /// Information about earlier Indexes when multiple Indexes have
+ /// been combined.
+ struct {
+ /// Sum of the Record counts of the all but the last Stream.
+ lzma_vli count;
+
+ /// Sum of the List of Records fields of all but the last
+ /// Stream. This is needed when a new Index is concatenated
+ /// to this lzma_index structure.
+ lzma_vli index_list_size;
+
+ /// Total size of all but the last Stream and all Stream
+ /// Padding fields.
+ lzma_vli streams_size;
+ } old;
+};
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_memusage(lzma_vli count)
+{
+ if (count > LZMA_VLI_MAX)
+ return UINT64_MAX;
+
+ return sizeof(lzma_index) + (count + INDEX_GROUP_SIZE - 1)
+ / INDEX_GROUP_SIZE * sizeof(lzma_index_group);
+}
+
+
+static void
+free_index_list(lzma_index *i, lzma_allocator *allocator)
+{
+ lzma_index_group *g = i->head;
+
+ while (g != NULL) {
+ lzma_index_group *tmp = g->next;
+ lzma_free(g, allocator);
+ g = tmp;
+ }
+
+ return;
+}
+
+
+extern LZMA_API(lzma_index *)
+lzma_index_init(lzma_index *i, lzma_allocator *allocator)
+{
+ if (i == NULL) {
+ i = lzma_alloc(sizeof(lzma_index), allocator);
+ if (i == NULL)
+ return NULL;
+ } else {
+ free_index_list(i, allocator);
+ }
+
+ i->total_size = 0;
+ i->uncompressed_size = 0;
+ i->count = 0;
+ i->index_list_size = 0;
+ i->head = NULL;
+ i->tail = NULL;
+ i->current.group = NULL;
+ i->old.count = 0;
+ i->old.index_list_size = 0;
+ i->old.streams_size = 0;
+
+ return i;
+}
+
+
+extern LZMA_API(void)
+lzma_index_end(lzma_index *i, lzma_allocator *allocator)
+{
+ if (i != NULL) {
+ free_index_list(i, allocator);
+ lzma_free(i, allocator);
+ }
+
+ return;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_count(const lzma_index *i)
+{
+ return i->count;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_size(const lzma_index *i)
+{
+ return index_size(i->count, i->index_list_size);
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_total_size(const lzma_index *i)
+{
+ return i->total_size;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_stream_size(const lzma_index *i)
+{
+ // Stream Header + Blocks + Index + Stream Footer
+ return LZMA_STREAM_HEADER_SIZE + i->total_size
+ + index_size(i->count, i->index_list_size)
+ + LZMA_STREAM_HEADER_SIZE;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_file_size(const lzma_index *i)
+{
+ // If multiple Streams are concatenated, the Stream Header, Index,
+ // and Stream Footer fields of all but the last Stream are already
+ // included in old.streams_size. Thus, we need to calculate only the
+ // size of the last Index, not all Indexes.
+ return i->old.streams_size + LZMA_STREAM_HEADER_SIZE + i->total_size
+ + index_size(i->count - i->old.count,
+ i->index_list_size - i->old.index_list_size)
+ + LZMA_STREAM_HEADER_SIZE;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_uncompressed_size(const lzma_index *i)
+{
+ return i->uncompressed_size;
+}
+
+
+extern uint32_t
+lzma_index_padding_size(const lzma_index *i)
+{
+ return (LZMA_VLI_C(4)
+ - index_size_unpadded(i->count, i->index_list_size)) & 3;
+}
+
+
+/// Appends a new Record to the Index. If needed, this allocates a new
+/// Record group.
+static lzma_ret
+index_append_real(lzma_index *i, lzma_allocator *allocator,
+ lzma_vli unpadded_size, lzma_vli uncompressed_size,
+ bool is_padding)
+{
+ // Add the new record.
+ if (i->tail == NULL || i->tail->last == INDEX_GROUP_SIZE - 1) {
+ // Allocate a new group.
+ lzma_index_group *g = lzma_alloc(sizeof(lzma_index_group),
+ allocator);
+ if (g == NULL)
+ return LZMA_MEM_ERROR;
+
+ // Initialize the group and set its first record.
+ g->prev = i->tail;
+ g->next = NULL;
+ g->last = 0;
+ g->unpadded_sums[0] = unpadded_size;
+ g->uncompressed_sums[0] = uncompressed_size;
+ g->paddings[0] = is_padding;
+
+ // If this is the first group, make it the head.
+ if (i->head == NULL)
+ i->head = g;
+ else
+ i->tail->next = g;
+
+ // Make it the new tail.
+ i->tail = g;
+
+ } else {
+ // i->tail has space left for at least one record.
+ i->tail->unpadded_sums[i->tail->last + 1]
+ = unpadded_size + vli_ceil4(
+ i->tail->unpadded_sums[i->tail->last]);
+ i->tail->uncompressed_sums[i->tail->last + 1]
+ = i->tail->uncompressed_sums[i->tail->last]
+ + uncompressed_size;
+ i->tail->paddings[i->tail->last + 1] = is_padding;
+ ++i->tail->last;
+ }
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_append(lzma_index *i, lzma_allocator *allocator,
+ lzma_vli unpadded_size, lzma_vli uncompressed_size)
+{
+ if (unpadded_size < UNPADDED_SIZE_MIN
+ || unpadded_size > UNPADDED_SIZE_MAX
+ || uncompressed_size > LZMA_VLI_MAX)
+ return LZMA_PROG_ERROR;
+
+ // This looks a bit ugly. We want to first validate that the Index
+ // and Stream stay in valid limits after adding this Record. After
+ // validating, we may need to allocate a new lzma_index_group (it's
+ // slightly more correct to validate before allocating, YMMV).
+ lzma_ret ret;
+
+ // First update the overall info so we can validate it.
+ const lzma_vli index_list_size_add = lzma_vli_size(unpadded_size)
+ + lzma_vli_size(uncompressed_size);
+
+ const lzma_vli total_size = vli_ceil4(unpadded_size);
+
+ i->total_size += total_size;
+ i->uncompressed_size += uncompressed_size;
+ ++i->count;
+ i->index_list_size += index_list_size_add;
+
+ if (i->total_size > LZMA_VLI_MAX
+ || i->uncompressed_size > LZMA_VLI_MAX
+ || lzma_index_size(i) > LZMA_BACKWARD_SIZE_MAX
+ || lzma_index_file_size(i) > LZMA_VLI_MAX)
+ ret = LZMA_DATA_ERROR; // Would grow past the limits.
+ else
+ ret = index_append_real(i, allocator, unpadded_size,
+ uncompressed_size, false);
+
+ if (ret != LZMA_OK) {
+ // Something went wrong. Undo the updates.
+ i->total_size -= total_size;
+ i->uncompressed_size -= uncompressed_size;
+ --i->count;
+ i->index_list_size -= index_list_size_add;
+ }
+
+ return ret;
+}
+
+
+/// Initialize i->current to point to the first Record.
+static bool
+init_current(lzma_index *i)
+{
+ if (i->head == NULL) {
+ assert(i->count == 0);
+ return true;
+ }
+
+ assert(i->count > 0);
+
+ i->current.group = i->head;
+ i->current.record = 0;
+ i->current.stream_offset = LZMA_STREAM_HEADER_SIZE;
+ i->current.uncompressed_offset = 0;
+
+ return false;
+}
+
+
+/// Go backward to the previous group.
+static void
+previous_group(lzma_index *i)
+{
+ assert(i->current.group->prev != NULL);
+
+ // Go to the previous group first.
+ i->current.group = i->current.group->prev;
+ i->current.record = i->current.group->last;
+
+ // Then update the offsets.
+ i->current.stream_offset -= vli_ceil4(i->current.group->unpadded_sums[
+ i->current.group->last]);
+ i->current.uncompressed_offset -= i->current.group->uncompressed_sums[
+ i->current.group->last];
+
+ return;
+}
+
+
+/// Go forward to the next group.
+static void
+next_group(lzma_index *i)
+{
+ assert(i->current.group->next != NULL);
+
+ // Update the offsets first.
+ i->current.stream_offset += vli_ceil4(i->current.group->unpadded_sums[
+ i->current.group->last]);
+ i->current.uncompressed_offset += i->current.group
+ ->uncompressed_sums[i->current.group->last];
+
+ // Then go to the next group.
+ i->current.record = 0;
+ i->current.group = i->current.group->next;
+
+ return;
+}
+
+
+/// Set *info from i->current.
+static void
+set_info(const lzma_index *i, lzma_index_record *info)
+{
+ // First copy the cumulative sizes from the current Record of the
+ // current group.
+ info->unpadded_size
+ = i->current.group->unpadded_sums[i->current.record];
+ info->total_size = vli_ceil4(info->unpadded_size);
+ info->uncompressed_size = i->current.group->uncompressed_sums[
+ i->current.record];
+
+ // Copy the start offsets of this group.
+ info->stream_offset = i->current.stream_offset;
+ info->uncompressed_offset = i->current.uncompressed_offset;
+
+ // If it's not the first Record in this group, we need to do some
+ // adjustements.
+ if (i->current.record > 0) {
+ // Since the _sums[] are cumulative, we substract the sums of
+ // the previous Record to get the sizes of the current Record,
+ // and add the sums of the previous Record to the offsets.
+ // With unpadded_sums[] we need to take into account that it
+ // uses a bit weird way to do the cumulative summing
+ const lzma_vli total_sum
+ = vli_ceil4(i->current.group->unpadded_sums[
+ i->current.record - 1]);
+
+ const lzma_vli uncompressed_sum = i->current.group
+ ->uncompressed_sums[i->current.record - 1];
+
+ info->total_size -= total_sum;
+ info->unpadded_size -= total_sum;
+ info->uncompressed_size -= uncompressed_sum;
+
+ info->stream_offset += total_sum;
+ info->uncompressed_offset += uncompressed_sum;
+ }
+
+ return;
+}
+
+
+extern LZMA_API(lzma_bool)
+lzma_index_read(lzma_index *i, lzma_index_record *info)
+{
+ if (i->current.group == NULL) {
+ // We are at the beginning of the Record list. Set up
+ // i->current point at the first Record. Return if there
+ // are no Records.
+ if (init_current(i))
+ return true;
+ } else do {
+ // Try to go the next Record.
+ if (i->current.record < i->current.group->last)
+ ++i->current.record;
+ else if (i->current.group->next == NULL)
+ return true;
+ else
+ next_group(i);
+ } while (i->current.group->paddings[i->current.record]);
+
+ // We found a new Record. Set the information to *info.
+ set_info(i, info);
+
+ return false;
+}
+
+
+extern LZMA_API(void)
+lzma_index_rewind(lzma_index *i)
+{
+ i->current.group = NULL;
+ return;
+}
+
+
+extern LZMA_API(lzma_bool)
+lzma_index_locate(lzma_index *i, lzma_index_record *info, lzma_vli target)
+{
+ // Check if it is possible to fullfill the request.
+ if (target >= i->uncompressed_size)
+ return true;
+
+ // Now we know that we will have an answer. Initialize the current
+ // read position if needed.
+ if (i->current.group == NULL && init_current(i))
+ return true;
+
+ // Locate the group where the wanted Block is. First search forward.
+ while (i->current.uncompressed_offset <= target) {
+ // If the first uncompressed byte of the next group is past
+ // the target offset, it has to be this or an earlier group.
+ if (i->current.uncompressed_offset + i->current.group
+ ->uncompressed_sums[i->current.group->last]
+ > target)
+ break;
+
+ // Go forward to the next group.
+ next_group(i);
+ }
+
+ // Then search backward.
+ while (i->current.uncompressed_offset > target)
+ previous_group(i);
+
+ // Now the target Block is somewhere in i->current.group. Offsets
+ // in groups are relative to the beginning of the group, thus
+ // we must adjust the target before starting the search loop.
+ assert(target >= i->current.uncompressed_offset);
+ target -= i->current.uncompressed_offset;
+
+ // Use binary search to locate the exact Record. It is the first
+ // Record whose uncompressed_sums[] value is greater than target.
+ // This is because we want the rightmost Record that fullfills the
+ // search criterion. It is possible that there are empty Blocks or
+ // padding, we don't want to return them.
+ size_t left = 0;
+ size_t right = i->current.group->last;
+
+ while (left < right) {
+ const size_t pos = left + (right - left) / 2;
+ if (i->current.group->uncompressed_sums[pos] <= target)
+ left = pos + 1;
+ else
+ right = pos;
+ }
+
+ i->current.record = left;
+
+#ifndef NDEBUG
+ // The found Record must not be padding or have zero uncompressed size.
+ assert(!i->current.group->paddings[i->current.record]);
+
+ if (i->current.record == 0)
+ assert(i->current.group->uncompressed_sums[0] > 0);
+ else
+ assert(i->current.group->uncompressed_sums[i->current.record]
+ - i->current.group->uncompressed_sums[
+ i->current.record - 1] > 0);
+#endif
+
+ set_info(i, info);
+
+ return false;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_cat(lzma_index *restrict dest, lzma_index *restrict src,
+ lzma_allocator *allocator, lzma_vli padding)
+{
+ if (dest == NULL || src == NULL || dest == src
+ || padding > LZMA_VLI_MAX)
+ return LZMA_PROG_ERROR;
+
+ // Check that the combined size of the Indexes stays within limits.
+ {
+ const lzma_vli dest_size = index_size_unpadded(
+ dest->count, dest->index_list_size);
+ const lzma_vli src_size = index_size_unpadded(
+ src->count, src->index_list_size);
+ if (vli_ceil4(dest_size + src_size) > LZMA_BACKWARD_SIZE_MAX)
+ return LZMA_DATA_ERROR;
+ }
+
+ // Check that the combined size of the "files" (combined total
+ // encoded sizes) stays within limits.
+ {
+ const lzma_vli dest_size = lzma_index_file_size(dest);
+ const lzma_vli src_size = lzma_index_file_size(src);
+ if (dest_size + src_size > LZMA_VLI_MAX
+ || dest_size + src_size + padding
+ > LZMA_VLI_MAX)
+ return LZMA_DATA_ERROR;
+ }
+
+ // Add a padding Record to take into account the size of
+ // Index + Stream Footer + Stream Padding + Stream Header.
+ //
+ // NOTE: This cannot overflow, because Index Size is always
+ // far smaller than LZMA_VLI_MAX, and adding two VLIs
+ // (Index Size and padding) doesn't overflow.
+ padding += index_size(dest->count - dest->old.count,
+ dest->index_list_size
+ - dest->old.index_list_size)
+ + LZMA_STREAM_HEADER_SIZE * 2;
+
+ // While the above cannot overflow, but it may become an invalid VLI.
+ if (padding > LZMA_VLI_MAX)
+ return LZMA_DATA_ERROR;
+
+ // Add the padding Record.
+ {
+ lzma_ret ret;
+
+ // First update the info so we can validate it.
+ dest->old.streams_size += padding;
+
+ if (dest->old.streams_size > LZMA_VLI_MAX
+ || lzma_index_file_size(dest) > LZMA_VLI_MAX)
+ ret = LZMA_DATA_ERROR; // Would grow past the limits.
+ else
+ ret = index_append_real(dest, allocator,
+ padding, 0, true);
+
+ // If something went wrong, undo the updated value and return
+ // the error.
+ if (ret != LZMA_OK) {
+ dest->old.streams_size -= padding;
+ return ret;
+ }
+ }
+
+ // Avoid wasting lots of memory if src->head has only a few records
+ // that fit into dest->tail. That is, combine two groups if possible.
+ //
+ // NOTE: We know that dest->tail != NULL since we just appended
+ // a padding Record. But we don't know about src->head.
+ if (src->head != NULL && src->head->last + 1
+ <= INDEX_GROUP_SIZE - dest->tail->last - 1) {
+ // Copy the first Record.
+ dest->tail->unpadded_sums[dest->tail->last + 1]
+ = vli_ceil4(dest->tail->unpadded_sums[
+ dest->tail->last])
+ + src->head->unpadded_sums[0];
+
+ dest->tail->uncompressed_sums[dest->tail->last + 1]
+ = dest->tail->uncompressed_sums[dest->tail->last]
+ + src->head->uncompressed_sums[0];
+
+ dest->tail->paddings[dest->tail->last + 1]
+ = src->head->paddings[0];
+
+ ++dest->tail->last;
+
+ // Copy the rest.
+ for (size_t i = 1; i < src->head->last; ++i) {
+ dest->tail->unpadded_sums[dest->tail->last + 1]
+ = vli_ceil4(dest->tail->unpadded_sums[
+ dest->tail->last])
+ + src->head->unpadded_sums[i + 1]
+ - src->head->unpadded_sums[i];
+
+ dest->tail->uncompressed_sums[dest->tail->last + 1]
+ = dest->tail->uncompressed_sums[
+ dest->tail->last]
+ + src->head->uncompressed_sums[i + 1]
+ - src->head->uncompressed_sums[i];
+
+ dest->tail->paddings[dest->tail->last + 1]
+ = src->head->paddings[i + 1];
+
+ ++dest->tail->last;
+ }
+
+ // Free the head group of *src. Don't bother updating prev
+ // pointers since those won't be used for anything before
+ // we deallocate the whole *src structure.
+ lzma_index_group *tmp = src->head;
+ src->head = src->head->next;
+ lzma_free(tmp, allocator);
+ }
+
+ // If there are groups left in *src, join them as is. Note that if we
+ // are combining already combined Indexes, src->head can be non-NULL
+ // even if we just combined the old src->head to dest->tail.
+ if (src->head != NULL) {
+ src->head->prev = dest->tail;
+ dest->tail->next = src->head;
+ dest->tail = src->tail;
+ }
+
+ // Update information about earlier Indexes. Only the last Index
+ // from *src won't be counted in dest->old. The last Index is left
+ // open and can be even appended with lzma_index_append().
+ dest->old.count = dest->count + src->old.count;
+ dest->old.index_list_size
+ = dest->index_list_size + src->old.index_list_size;
+ dest->old.streams_size += src->old.streams_size;
+
+ // Update overall information.
+ dest->total_size += src->total_size;
+ dest->uncompressed_size += src->uncompressed_size;
+ dest->count += src->count;
+ dest->index_list_size += src->index_list_size;
+
+ // *src has nothing left but the base structure.
+ lzma_free(src, allocator);
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_index *)
+lzma_index_dup(const lzma_index *src, lzma_allocator *allocator)
+{
+ lzma_index *dest = lzma_alloc(sizeof(lzma_index), allocator);
+ if (dest == NULL)
+ return NULL;
+
+ // Copy the base structure except the pointers.
+ *dest = *src;
+ dest->head = NULL;
+ dest->tail = NULL;
+ dest->current.group = NULL;
+
+ // Copy the Records.
+ const lzma_index_group *src_group = src->head;
+ while (src_group != NULL) {
+ // Allocate a new group.
+ lzma_index_group *dest_group = lzma_alloc(
+ sizeof(lzma_index_group), allocator);
+ if (dest_group == NULL) {
+ lzma_index_end(dest, allocator);
+ return NULL;
+ }
+
+ // Set the pointers.
+ dest_group->prev = dest->tail;
+ dest_group->next = NULL;
+
+ if (dest->head == NULL)
+ dest->head = dest_group;
+ else
+ dest->tail->next = dest_group;
+
+ dest->tail = dest_group;
+
+ dest_group->last = src_group->last;
+
+ // Copy the arrays so that we don't read uninitialized memory.
+ const size_t count = src_group->last + 1;
+ memcpy(dest_group->unpadded_sums, src_group->unpadded_sums,
+ sizeof(lzma_vli) * count);
+ memcpy(dest_group->uncompressed_sums,
+ src_group->uncompressed_sums,
+ sizeof(lzma_vli) * count);
+ memcpy(dest_group->paddings, src_group->paddings,
+ sizeof(bool) * count);
+
+ // Copy also the read position.
+ if (src_group == src->current.group)
+ dest->current.group = dest->tail;
+
+ src_group = src_group->next;
+ }
+
+ return dest;
+}
+
+
+extern LZMA_API(lzma_bool)
+lzma_index_equal(const lzma_index *a, const lzma_index *b)
+{
+ // No point to compare more if the pointers are the same.
+ if (a == b)
+ return true;
+
+ // Compare the basic properties.
+ if (a->total_size != b->total_size
+ || a->uncompressed_size != b->uncompressed_size
+ || a->index_list_size != b->index_list_size
+ || a->count != b->count)
+ return false;
+
+ // Compare the Records.
+ const lzma_index_group *ag = a->head;
+ const lzma_index_group *bg = b->head;
+ while (ag != NULL && bg != NULL) {
+ const size_t count = ag->last + 1;
+ if (ag->last != bg->last
+ || memcmp(ag->unpadded_sums,
+ bg->unpadded_sums,
+ sizeof(lzma_vli) * count) != 0
+ || memcmp(ag->uncompressed_sums,
+ bg->uncompressed_sums,
+ sizeof(lzma_vli) * count) != 0
+ || memcmp(ag->paddings, bg->paddings,
+ sizeof(bool) * count) != 0)
+ return false;
+
+ ag = ag->next;
+ bg = bg->next;
+ }
+
+ return ag == NULL && bg == NULL;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.h
new file mode 100644
index 00000000..8b21dd78
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index.h
@@ -0,0 +1,69 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file index.h
+/// \brief Handling of Index
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_INDEX_H
+#define LZMA_INDEX_H
+
+#include "common.h"
+
+
+/// Minimum Unpadded Size
+#define UNPADDED_SIZE_MIN LZMA_VLI_C(5)
+
+/// Maximum Unpadded Size
+#define UNPADDED_SIZE_MAX (LZMA_VLI_MAX & ~LZMA_VLI_C(3))
+
+
+/// Get the size of the Index Padding field. This is needed by Index encoder
+/// and decoder, but applications should have no use for this.
+extern uint32_t lzma_index_padding_size(const lzma_index *i);
+
+
+/// Round the variable-length integer to the next multiple of four.
+static inline lzma_vli
+vli_ceil4(lzma_vli vli)
+{
+ assert(vli <= LZMA_VLI_MAX);
+ return (vli + 3) & ~LZMA_VLI_C(3);
+}
+
+
+/// Calculate the size of the Index field excluding Index Padding
+static inline lzma_vli
+index_size_unpadded(lzma_vli count, lzma_vli index_list_size)
+{
+ // Index Indicator + Number of Records + List of Records + CRC32
+ return 1 + lzma_vli_size(count) + index_list_size + 4;
+}
+
+
+/// Calculate the size of the Index field including Index Padding
+static inline lzma_vli
+index_size(lzma_vli count, lzma_vli index_list_size)
+{
+ return vli_ceil4(index_size_unpadded(count, index_list_size));
+}
+
+
+/// Calculate the total size of the Stream
+static inline lzma_vli
+index_stream_size(lzma_vli blocks_size,
+ lzma_vli count, lzma_vli index_list_size)
+{
+ return LZMA_STREAM_HEADER_SIZE + blocks_size
+ + index_size(count, index_list_size)
+ + LZMA_STREAM_HEADER_SIZE;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_decoder.c
new file mode 100644
index 00000000..09898acc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_decoder.c
@@ -0,0 +1,325 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file index_decoder.c
+/// \brief Decodes the Index field
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "index.h"
+#include "check.h"
+
+
+struct lzma_coder_s {
+ enum {
+ SEQ_INDICATOR,
+ SEQ_COUNT,
+ SEQ_MEMUSAGE,
+ SEQ_UNPADDED,
+ SEQ_UNCOMPRESSED,
+ SEQ_PADDING_INIT,
+ SEQ_PADDING,
+ SEQ_CRC32,
+ } sequence;
+
+ /// Memory usage limit
+ uint64_t memlimit;
+
+ /// Target Index
+ lzma_index *index;
+
+ /// Number of Records left to decode.
+ lzma_vli count;
+
+ /// The most recent Unpadded Size field
+ lzma_vli unpadded_size;
+
+ /// The most recent Uncompressed Size field
+ lzma_vli uncompressed_size;
+
+ /// Position in integers
+ size_t pos;
+
+ /// CRC32 of the List of Records field
+ uint32_t crc32;
+};
+
+
+static lzma_ret
+index_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out lzma_attribute((unused)),
+ size_t *restrict out_pos lzma_attribute((unused)),
+ size_t out_size lzma_attribute((unused)),
+ lzma_action action lzma_attribute((unused)))
+{
+ // Similar optimization as in index_encoder.c
+ const size_t in_start = *in_pos;
+ lzma_ret ret = LZMA_OK;
+
+ while (*in_pos < in_size)
+ switch (coder->sequence) {
+ case SEQ_INDICATOR:
+ // Return LZMA_DATA_ERROR instead of e.g. LZMA_PROG_ERROR or
+ // LZMA_FORMAT_ERROR, because a typical usage case for Index
+ // decoder is when parsing the Stream backwards. If seeking
+ // backward from the Stream Footer gives us something that
+ // doesn't begin with Index Indicator, the file is considered
+ // corrupt, not "programming error" or "unrecognized file
+ // format". One could argue that the application should
+ // verify the Index Indicator before trying to decode the
+ // Index, but well, I suppose it is simpler this way.
+ if (in[(*in_pos)++] != 0x00)
+ return LZMA_DATA_ERROR;
+
+ coder->sequence = SEQ_COUNT;
+ break;
+
+ case SEQ_COUNT:
+ ret = lzma_vli_decode(&coder->count, &coder->pos,
+ in, in_pos, in_size);
+ if (ret != LZMA_STREAM_END)
+ goto out;
+
+ coder->pos = 0;
+ coder->sequence = SEQ_MEMUSAGE;
+
+ // Fall through
+
+ case SEQ_MEMUSAGE:
+ if (lzma_index_memusage(coder->count) > coder->memlimit) {
+ ret = LZMA_MEMLIMIT_ERROR;
+ goto out;
+ }
+
+ ret = LZMA_OK;
+ coder->sequence = coder->count == 0
+ ? SEQ_PADDING_INIT : SEQ_UNPADDED;
+ break;
+
+ case SEQ_UNPADDED:
+ case SEQ_UNCOMPRESSED: {
+ lzma_vli *size = coder->sequence == SEQ_UNPADDED
+ ? &coder->unpadded_size
+ : &coder->uncompressed_size;
+
+ ret = lzma_vli_decode(size, &coder->pos,
+ in, in_pos, in_size);
+ if (ret != LZMA_STREAM_END)
+ goto out;
+
+ ret = LZMA_OK;
+ coder->pos = 0;
+
+ if (coder->sequence == SEQ_UNPADDED) {
+ // Validate that encoded Unpadded Size isn't too small
+ // or too big.
+ if (coder->unpadded_size < UNPADDED_SIZE_MIN
+ || coder->unpadded_size
+ > UNPADDED_SIZE_MAX)
+ return LZMA_DATA_ERROR;
+
+ coder->sequence = SEQ_UNCOMPRESSED;
+ } else {
+ // Add the decoded Record to the Index.
+ return_if_error(lzma_index_append(
+ coder->index, allocator,
+ coder->unpadded_size,
+ coder->uncompressed_size));
+
+ // Check if this was the last Record.
+ coder->sequence = --coder->count == 0
+ ? SEQ_PADDING_INIT
+ : SEQ_UNPADDED;
+ }
+
+ break;
+ }
+
+ case SEQ_PADDING_INIT:
+ coder->pos = lzma_index_padding_size(coder->index);
+ coder->sequence = SEQ_PADDING;
+
+ // Fall through
+
+ case SEQ_PADDING:
+ if (coder->pos > 0) {
+ --coder->pos;
+ if (in[(*in_pos)++] != 0x00)
+ return LZMA_DATA_ERROR;
+
+ break;
+ }
+
+ // Finish the CRC32 calculation.
+ coder->crc32 = lzma_crc32(in + in_start,
+ *in_pos - in_start, coder->crc32);
+
+ coder->sequence = SEQ_CRC32;
+
+ // Fall through
+
+ case SEQ_CRC32:
+ do {
+ if (*in_pos == in_size)
+ return LZMA_OK;
+
+ if (((coder->crc32 >> (coder->pos * 8)) & 0xFF)
+ != in[(*in_pos)++])
+ return LZMA_DATA_ERROR;
+
+ } while (++coder->pos < 4);
+
+ // Make index NULL so we don't free it unintentionally.
+ coder->index = NULL;
+
+ return LZMA_STREAM_END;
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+out:
+ // Update the CRC32,
+ coder->crc32 = lzma_crc32(in + in_start,
+ *in_pos - in_start, coder->crc32);
+
+ return ret;
+}
+
+
+static void
+index_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_index_end(coder->index, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static lzma_ret
+index_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
+ uint64_t *old_memlimit, uint64_t new_memlimit)
+{
+ *memusage = lzma_index_memusage(coder->count);
+
+ if (new_memlimit != 0 && new_memlimit < *memusage)
+ return LZMA_MEMLIMIT_ERROR;
+
+ *old_memlimit = coder->memlimit;
+ coder->memlimit = new_memlimit;
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+index_decoder_reset(lzma_coder *coder, lzma_allocator *allocator,
+ lzma_index **i, uint64_t memlimit)
+{
+ // We always allocate a new lzma_index.
+ *i = lzma_index_init(NULL, allocator);
+ if (*i == NULL)
+ return LZMA_MEM_ERROR;
+
+ // Initialize the rest.
+ coder->sequence = SEQ_INDICATOR;
+ coder->memlimit = memlimit;
+ coder->index = *i;
+ coder->count = 0; // Needs to be initialized due to _memconfig().
+ coder->pos = 0;
+ coder->crc32 = 0;
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+index_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ lzma_index **i, uint64_t memlimit)
+{
+ lzma_next_coder_init(&index_decoder_init, next, allocator);
+
+ if (i == NULL || memlimit == 0)
+ return LZMA_PROG_ERROR;
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &index_decode;
+ next->end = &index_decoder_end;
+ next->memconfig = &index_decoder_memconfig;
+ next->coder->index = NULL;
+ } else {
+ lzma_index_end(next->coder->index, allocator);
+ }
+
+ return index_decoder_reset(next->coder, allocator, i, memlimit);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_decoder(lzma_stream *strm, lzma_index **i, uint64_t memlimit)
+{
+ lzma_next_strm_init(index_decoder_init, strm, i, memlimit);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_buffer_decode(
+ lzma_index **i, uint64_t *memlimit, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size)
+{
+ // Sanity checks
+ if (i == NULL || in == NULL || in_pos == NULL || *in_pos > in_size)
+ return LZMA_PROG_ERROR;
+
+ // Initialize the decoder.
+ lzma_coder coder;
+ return_if_error(index_decoder_reset(&coder, allocator, i, *memlimit));
+
+ // Store the input start position so that we can restore it in case
+ // of an error.
+ const size_t in_start = *in_pos;
+
+ // Do the actual decoding.
+ lzma_ret ret = index_decode(&coder, allocator, in, in_pos, in_size,
+ NULL, NULL, 0, LZMA_RUN);
+
+ if (ret == LZMA_STREAM_END) {
+ ret = LZMA_OK;
+ } else {
+ // Something went wrong, free the Index structure and restore
+ // the input position.
+ lzma_index_end(*i, allocator);
+ *i = NULL;
+ *in_pos = in_start;
+
+ if (ret == LZMA_OK) {
+ // The input is truncated or otherwise corrupt.
+ // Use LZMA_DATA_ERROR instead of LZMA_BUF_ERROR
+ // like lzma_vli_decode() does in single-call mode.
+ ret = LZMA_DATA_ERROR;
+
+ } else if (ret == LZMA_MEMLIMIT_ERROR) {
+ // Tell the caller how much memory would have
+ // been needed.
+ *memlimit = lzma_index_memusage(coder.count);
+ }
+ }
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.c
new file mode 100644
index 00000000..b346c19a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.c
@@ -0,0 +1,260 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file index_encoder.c
+/// \brief Encodes the Index field
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "index_encoder.h"
+#include "index.h"
+#include "check.h"
+
+
+struct lzma_coder_s {
+ enum {
+ SEQ_INDICATOR,
+ SEQ_COUNT,
+ SEQ_UNPADDED,
+ SEQ_UNCOMPRESSED,
+ SEQ_NEXT,
+ SEQ_PADDING,
+ SEQ_CRC32,
+ } sequence;
+
+ /// Index given to us to encode. Note that we modify it in sense that
+ /// we read it, and read position is tracked in lzma_index structure.
+ lzma_index *index;
+
+ /// The current Index Record being encoded
+ lzma_index_record record;
+
+ /// Position in integers
+ size_t pos;
+
+ /// CRC32 of the List of Records field
+ uint32_t crc32;
+};
+
+
+static lzma_ret
+index_encode(lzma_coder *coder,
+ lzma_allocator *allocator lzma_attribute((unused)),
+ const uint8_t *restrict in lzma_attribute((unused)),
+ size_t *restrict in_pos lzma_attribute((unused)),
+ size_t in_size lzma_attribute((unused)),
+ uint8_t *restrict out, size_t *restrict out_pos,
+ size_t out_size, lzma_action action lzma_attribute((unused)))
+{
+ // Position where to start calculating CRC32. The idea is that we
+ // need to call lzma_crc32() only once per call to index_encode().
+ const size_t out_start = *out_pos;
+
+ // Return value to use if we return at the end of this function.
+ // We use "goto out" to jump out of the while-switch construct
+ // instead of returning directly, because that way we don't need
+ // to copypaste the lzma_crc32() call to many places.
+ lzma_ret ret = LZMA_OK;
+
+ while (*out_pos < out_size)
+ switch (coder->sequence) {
+ case SEQ_INDICATOR:
+ out[*out_pos] = 0x00;
+ ++*out_pos;
+ coder->sequence = SEQ_COUNT;
+ break;
+
+ case SEQ_COUNT: {
+ const lzma_vli index_count = lzma_index_count(coder->index);
+ ret = lzma_vli_encode(index_count, &coder->pos,
+ out, out_pos, out_size);
+ if (ret != LZMA_STREAM_END)
+ goto out;
+
+ ret = LZMA_OK;
+ coder->pos = 0;
+ coder->sequence = SEQ_NEXT;
+ break;
+ }
+
+ case SEQ_NEXT:
+ if (lzma_index_read(coder->index, &coder->record)) {
+ // Get the size of the Index Padding field.
+ coder->pos = lzma_index_padding_size(coder->index);
+ assert(coder->pos <= 3);
+ coder->sequence = SEQ_PADDING;
+ break;
+ }
+
+ // Unpadded Size must be within valid limits.
+ if (coder->record.unpadded_size < UNPADDED_SIZE_MIN
+ || coder->record.unpadded_size
+ > UNPADDED_SIZE_MAX)
+ return LZMA_PROG_ERROR;
+
+ coder->sequence = SEQ_UNPADDED;
+
+ // Fall through
+
+ case SEQ_UNPADDED:
+ case SEQ_UNCOMPRESSED: {
+ const lzma_vli size = coder->sequence == SEQ_UNPADDED
+ ? coder->record.unpadded_size
+ : coder->record.uncompressed_size;
+
+ ret = lzma_vli_encode(size, &coder->pos,
+ out, out_pos, out_size);
+ if (ret != LZMA_STREAM_END)
+ goto out;
+
+ ret = LZMA_OK;
+ coder->pos = 0;
+
+ // Advance to SEQ_UNCOMPRESSED or SEQ_NEXT.
+ ++coder->sequence;
+ break;
+ }
+
+ case SEQ_PADDING:
+ if (coder->pos > 0) {
+ --coder->pos;
+ out[(*out_pos)++] = 0x00;
+ break;
+ }
+
+ // Finish the CRC32 calculation.
+ coder->crc32 = lzma_crc32(out + out_start,
+ *out_pos - out_start, coder->crc32);
+
+ coder->sequence = SEQ_CRC32;
+
+ // Fall through
+
+ case SEQ_CRC32:
+ // We don't use the main loop, because we don't want
+ // coder->crc32 to be touched anymore.
+ do {
+ if (*out_pos == out_size)
+ return LZMA_OK;
+
+ out[*out_pos] = (coder->crc32 >> (coder->pos * 8))
+ & 0xFF;
+ ++*out_pos;
+
+ } while (++coder->pos < 4);
+
+ return LZMA_STREAM_END;
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+out:
+ // Update the CRC32.
+ coder->crc32 = lzma_crc32(out + out_start,
+ *out_pos - out_start, coder->crc32);
+
+ return ret;
+}
+
+
+static void
+index_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static void
+index_encoder_reset(lzma_coder *coder, lzma_index *i)
+{
+ lzma_index_rewind(i);
+
+ coder->sequence = SEQ_INDICATOR;
+ coder->index = i;
+ coder->pos = 0;
+ coder->crc32 = 0;
+
+ return;
+}
+
+
+extern lzma_ret
+lzma_index_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ lzma_index *i)
+{
+ lzma_next_coder_init(&lzma_index_encoder_init, next, allocator);
+
+ if (i == NULL)
+ return LZMA_PROG_ERROR;
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &index_encode;
+ next->end = &index_encoder_end;
+ }
+
+ index_encoder_reset(next->coder, i);
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_encoder(lzma_stream *strm, lzma_index *i)
+{
+ lzma_next_strm_init(lzma_index_encoder_init, strm, i);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_buffer_encode(lzma_index *i,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // Validate the arugments.
+ if (i == NULL || out == NULL || out_pos == NULL || *out_pos > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Don't try to encode if there's not enough output space.
+ if (out_size - *out_pos < lzma_index_size(i))
+ return LZMA_BUF_ERROR;
+
+ // The Index encoder needs just one small data structure so we can
+ // allocate it on stack.
+ lzma_coder coder;
+ index_encoder_reset(&coder, i);
+
+ // Do the actual encoding. This should never fail, but store
+ // the original *out_pos just in case.
+ const size_t out_start = *out_pos;
+ lzma_ret ret = index_encode(&coder, NULL, NULL, NULL, 0,
+ out, out_pos, out_size, LZMA_RUN);
+
+ if (ret == LZMA_STREAM_END) {
+ ret = LZMA_OK;
+ } else {
+ // We should never get here, but just in case, restore the
+ // output position and set the error accordingly if something
+ // goes wrong and debugging isn't enabled.
+ assert(0);
+ *out_pos = out_start;
+ ret = LZMA_PROG_ERROR;
+ }
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.h
new file mode 100644
index 00000000..5e1ce4ea
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_encoder.h
@@ -0,0 +1,25 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file index_encoder.h
+/// \brief Encodes the Index field
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_INDEX_ENCODER_H
+#define LZMA_INDEX_ENCODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_index_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, lzma_index *i);
+
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_hash.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_hash.c
new file mode 100644
index 00000000..f55ea909
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/index_hash.c
@@ -0,0 +1,334 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file index_hash.c
+/// \brief Validates Index by using a hash function
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+#include "index.h"
+#include "check.h"
+
+
+typedef struct {
+ /// Sum of the Block sizes (including Block Padding)
+ lzma_vli blocks_size;
+
+ /// Sum of the Uncompressed Size fields
+ lzma_vli uncompressed_size;
+
+ /// Number of Records
+ lzma_vli count;
+
+ /// Size of the List of Index Records as bytes
+ lzma_vli index_list_size;
+
+ /// Check calculated from Unpadded Sizes and Uncompressed Sizes.
+ lzma_check_state check;
+
+} lzma_index_hash_info;
+
+
+struct lzma_index_hash_s {
+ enum {
+ SEQ_BLOCK,
+ SEQ_COUNT,
+ SEQ_UNPADDED,
+ SEQ_UNCOMPRESSED,
+ SEQ_PADDING_INIT,
+ SEQ_PADDING,
+ SEQ_CRC32,
+ } sequence;
+
+ /// Information collected while decoding the actual Blocks.
+ lzma_index_hash_info blocks;
+
+ /// Information collected from the Index field.
+ lzma_index_hash_info records;
+
+ /// Number of Records not fully decoded
+ lzma_vli remaining;
+
+ /// Unpadded Size currently being read from an Index Record.
+ lzma_vli unpadded_size;
+
+ /// Uncompressed Size currently being read from an Index Record.
+ lzma_vli uncompressed_size;
+
+ /// Position in variable-length integers when decoding them from
+ /// the List of Records.
+ size_t pos;
+
+ /// CRC32 of the Index
+ uint32_t crc32;
+};
+
+
+extern LZMA_API(lzma_index_hash *)
+lzma_index_hash_init(lzma_index_hash *index_hash, lzma_allocator *allocator)
+{
+ if (index_hash == NULL) {
+ index_hash = lzma_alloc(sizeof(lzma_index_hash), allocator);
+ if (index_hash == NULL)
+ return NULL;
+ }
+
+ index_hash->sequence = SEQ_BLOCK;
+ index_hash->blocks.blocks_size = 0;
+ index_hash->blocks.uncompressed_size = 0;
+ index_hash->blocks.count = 0;
+ index_hash->blocks.index_list_size = 0;
+ index_hash->records.blocks_size = 0;
+ index_hash->records.uncompressed_size = 0;
+ index_hash->records.count = 0;
+ index_hash->records.index_list_size = 0;
+ index_hash->unpadded_size = 0;
+ index_hash->uncompressed_size = 0;
+ index_hash->pos = 0;
+ index_hash->crc32 = 0;
+
+ // These cannot fail because LZMA_CHECK_BEST is known to be supported.
+ (void)lzma_check_init(&index_hash->blocks.check, LZMA_CHECK_BEST);
+ (void)lzma_check_init(&index_hash->records.check, LZMA_CHECK_BEST);
+
+ return index_hash;
+}
+
+
+extern LZMA_API(void)
+lzma_index_hash_end(lzma_index_hash *index_hash, lzma_allocator *allocator)
+{
+ lzma_free(index_hash, allocator);
+ return;
+}
+
+
+extern LZMA_API(lzma_vli)
+lzma_index_hash_size(const lzma_index_hash *index_hash)
+{
+ // Get the size of the Index from ->blocks instead of ->records for
+ // cases where application wants to know the Index Size before
+ // decoding the Index.
+ return index_size(index_hash->blocks.count,
+ index_hash->blocks.index_list_size);
+}
+
+
+/// Updates the sizes and the hash without any validation.
+static lzma_ret
+hash_append(lzma_index_hash_info *info, lzma_vli unpadded_size,
+ lzma_vli uncompressed_size)
+{
+ info->blocks_size += vli_ceil4(unpadded_size);
+ info->uncompressed_size += uncompressed_size;
+ info->index_list_size += lzma_vli_size(unpadded_size)
+ + lzma_vli_size(uncompressed_size);
+ ++info->count;
+
+ const lzma_vli sizes[2] = { unpadded_size, uncompressed_size };
+ lzma_check_update(&info->check, LZMA_CHECK_BEST,
+ (const uint8_t *)(sizes), sizeof(sizes));
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_hash_append(lzma_index_hash *index_hash, lzma_vli unpadded_size,
+ lzma_vli uncompressed_size)
+{
+ // Validate the arguments.
+ if (index_hash->sequence != SEQ_BLOCK
+ || unpadded_size < UNPADDED_SIZE_MIN
+ || unpadded_size > UNPADDED_SIZE_MAX
+ || uncompressed_size > LZMA_VLI_MAX)
+ return LZMA_PROG_ERROR;
+
+ // Update the hash.
+ return_if_error(hash_append(&index_hash->blocks,
+ unpadded_size, uncompressed_size));
+
+ // Validate the properties of *info are still in allowed limits.
+ if (index_hash->blocks.blocks_size > LZMA_VLI_MAX
+ || index_hash->blocks.uncompressed_size > LZMA_VLI_MAX
+ || index_size(index_hash->blocks.count,
+ index_hash->blocks.index_list_size)
+ > LZMA_BACKWARD_SIZE_MAX
+ || index_stream_size(index_hash->blocks.blocks_size,
+ index_hash->blocks.count,
+ index_hash->blocks.index_list_size)
+ > LZMA_VLI_MAX)
+ return LZMA_DATA_ERROR;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_index_hash_decode(lzma_index_hash *index_hash, const uint8_t *in,
+ size_t *in_pos, size_t in_size)
+{
+ // Catch zero input buffer here, because in contrast to Index encoder
+ // and decoder functions, applications call this function directly
+ // instead of via lzma_code(), which does the buffer checking.
+ if (*in_pos >= in_size)
+ return LZMA_BUF_ERROR;
+
+ // NOTE: This function has many similarities to index_encode() and
+ // index_decode() functions found from index_encoder.c and
+ // index_decoder.c. See the comments especially in index_encoder.c.
+ const size_t in_start = *in_pos;
+ lzma_ret ret = LZMA_OK;
+
+ while (*in_pos < in_size)
+ switch (index_hash->sequence) {
+ case SEQ_BLOCK:
+ // Check the Index Indicator is present.
+ if (in[(*in_pos)++] != 0x00)
+ return LZMA_DATA_ERROR;
+
+ index_hash->sequence = SEQ_COUNT;
+ break;
+
+ case SEQ_COUNT: {
+ ret = lzma_vli_decode(&index_hash->remaining,
+ &index_hash->pos, in, in_pos, in_size);
+ if (ret != LZMA_STREAM_END)
+ goto out;
+
+ // The count must match the count of the Blocks decoded.
+ if (index_hash->remaining != index_hash->blocks.count)
+ return LZMA_DATA_ERROR;
+
+ ret = LZMA_OK;
+ index_hash->pos = 0;
+
+ // Handle the special case when there are no Blocks.
+ index_hash->sequence = index_hash->remaining == 0
+ ? SEQ_PADDING_INIT : SEQ_UNPADDED;
+ break;
+ }
+
+ case SEQ_UNPADDED:
+ case SEQ_UNCOMPRESSED: {
+ lzma_vli *size = index_hash->sequence == SEQ_UNPADDED
+ ? &index_hash->unpadded_size
+ : &index_hash->uncompressed_size;
+
+ ret = lzma_vli_decode(size, &index_hash->pos,
+ in, in_pos, in_size);
+ if (ret != LZMA_STREAM_END)
+ goto out;
+
+ ret = LZMA_OK;
+ index_hash->pos = 0;
+
+ if (index_hash->sequence == SEQ_UNPADDED) {
+ if (index_hash->unpadded_size < UNPADDED_SIZE_MIN
+ || index_hash->unpadded_size
+ > UNPADDED_SIZE_MAX)
+ return LZMA_DATA_ERROR;
+
+ index_hash->sequence = SEQ_UNCOMPRESSED;
+ } else {
+ // Update the hash.
+ return_if_error(hash_append(&index_hash->records,
+ index_hash->unpadded_size,
+ index_hash->uncompressed_size));
+
+ // Verify that we don't go over the known sizes. Note
+ // that this validation is simpler than the one used
+ // in lzma_index_hash_append(), because here we know
+ // that values in index_hash->blocks are already
+ // validated and we are fine as long as we don't
+ // exceed them in index_hash->records.
+ if (index_hash->blocks.blocks_size
+ < index_hash->records.blocks_size
+ || index_hash->blocks.uncompressed_size
+ < index_hash->records.uncompressed_size
+ || index_hash->blocks.index_list_size
+ < index_hash->records.index_list_size)
+ return LZMA_DATA_ERROR;
+
+ // Check if this was the last Record.
+ index_hash->sequence = --index_hash->remaining == 0
+ ? SEQ_PADDING_INIT : SEQ_UNPADDED;
+ }
+
+ break;
+ }
+
+ case SEQ_PADDING_INIT:
+ index_hash->pos = (LZMA_VLI_C(4) - index_size_unpadded(
+ index_hash->records.count,
+ index_hash->records.index_list_size)) & 3;
+ index_hash->sequence = SEQ_PADDING;
+
+ // Fall through
+
+ case SEQ_PADDING:
+ if (index_hash->pos > 0) {
+ --index_hash->pos;
+ if (in[(*in_pos)++] != 0x00)
+ return LZMA_DATA_ERROR;
+
+ break;
+ }
+
+ // Compare the sizes.
+ if (index_hash->blocks.blocks_size
+ != index_hash->records.blocks_size
+ || index_hash->blocks.uncompressed_size
+ != index_hash->records.uncompressed_size
+ || index_hash->blocks.index_list_size
+ != index_hash->records.index_list_size)
+ return LZMA_DATA_ERROR;
+
+ // Finish the hashes and compare them.
+ lzma_check_finish(&index_hash->blocks.check, LZMA_CHECK_BEST);
+ lzma_check_finish(&index_hash->records.check, LZMA_CHECK_BEST);
+ if (memcmp(index_hash->blocks.check.buffer.u8,
+ index_hash->records.check.buffer.u8,
+ lzma_check_size(LZMA_CHECK_BEST)) != 0)
+ return LZMA_DATA_ERROR;
+
+ // Finish the CRC32 calculation.
+ index_hash->crc32 = lzma_crc32(in + in_start,
+ *in_pos - in_start, index_hash->crc32);
+
+ index_hash->sequence = SEQ_CRC32;
+
+ // Fall through
+
+ case SEQ_CRC32:
+ do {
+ if (*in_pos == in_size)
+ return LZMA_OK;
+
+ if (((index_hash->crc32 >> (index_hash->pos * 8))
+ & 0xFF) != in[(*in_pos)++])
+ return LZMA_DATA_ERROR;
+
+ } while (++index_hash->pos < 4);
+
+ return LZMA_STREAM_END;
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+out:
+ // Update the CRC32,
+ index_hash->crc32 = lzma_crc32(in + in_start,
+ *in_pos - in_start, index_hash->crc32);
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_decoder.c
new file mode 100644
index 00000000..daab1ff7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_decoder.c
@@ -0,0 +1,93 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_buffer_decoder.c
+/// \brief Single-call .xz Stream decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_decoder.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_buffer_decode(uint64_t *memlimit, uint32_t flags,
+ lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos, size_t in_size,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ // Sanity checks
+ if (in_pos == NULL || (in == NULL && *in_pos != in_size)
+ || *in_pos > in_size || out_pos == NULL
+ || (out == NULL && *out_pos != out_size)
+ || *out_pos > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Catch flags that are not allowed in buffer-to-buffer decoding.
+ if (flags & LZMA_TELL_ANY_CHECK)
+ return LZMA_PROG_ERROR;
+
+ // Initialize the Stream decoder.
+ // TODO: We need something to tell the decoder that it can use the
+ // output buffer as workspace, and thus save significant amount of RAM.
+ lzma_next_coder stream_decoder = LZMA_NEXT_CODER_INIT;
+ lzma_ret ret = lzma_stream_decoder_init(
+ &stream_decoder, allocator, *memlimit, flags);
+
+ if (ret == LZMA_OK) {
+ // Save the positions so that we can restore them in case
+ // an error occurs.
+ const size_t in_start = *in_pos;
+ const size_t out_start = *out_pos;
+
+ // Do the actual decoding.
+ ret = stream_decoder.code(stream_decoder.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size,
+ LZMA_FINISH);
+
+ if (ret == LZMA_STREAM_END) {
+ ret = LZMA_OK;
+ } else {
+ // Something went wrong, restore the positions.
+ *in_pos = in_start;
+ *out_pos = out_start;
+
+ if (ret == LZMA_OK) {
+ // Either the input was truncated or the
+ // output buffer was too small.
+ assert(*in_pos == in_size
+ || *out_pos == out_size);
+
+ // If all the input was consumed, then the
+ // input is truncated, even if the output
+ // buffer is also full. This is because
+ // processing the last byte of the Stream
+ // never produces output.
+ if (*in_pos == in_size)
+ ret = LZMA_DATA_ERROR;
+ else
+ ret = LZMA_BUF_ERROR;
+
+ } else if (ret == LZMA_MEMLIMIT_ERROR) {
+ // Let the caller know how much memory would
+ // have been needed.
+ uint64_t memusage;
+ (void)stream_decoder.memconfig(
+ stream_decoder.coder,
+ memlimit, &memusage, 0);
+ }
+ }
+ }
+
+ // Free the decoder memory. This needs to be done even if
+ // initialization fails, because the internal API doesn't
+ // require the initialization function to free its memory on error.
+ lzma_next_end(&stream_decoder, allocator);
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_encoder.c
new file mode 100644
index 00000000..ae00f133
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_buffer_encoder.c
@@ -0,0 +1,133 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_buffer_encoder.c
+/// \brief Single-call .xz Stream encoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "index.h"
+
+
+/// Maximum size of Index that has exactly one Record.
+/// Index Indicator + Number of Records + Record + CRC32 rounded up to
+/// the next multiple of four.
+#define INDEX_BOUND ((1 + 1 + 2 * LZMA_VLI_BYTES_MAX + 4 + 3) & ~3)
+
+/// Stream Header, Stream Footer, and Index
+#define HEADERS_BOUND (2 * LZMA_STREAM_HEADER_SIZE + INDEX_BOUND)
+
+
+extern LZMA_API(size_t)
+lzma_stream_buffer_bound(size_t uncompressed_size)
+{
+ // Get the maximum possible size of a Block.
+ const size_t block_bound = lzma_block_buffer_bound(uncompressed_size);
+ if (block_bound == 0)
+ return 0;
+
+ // Catch the possible integer overflow and also prevent the size of
+ // the Stream exceeding LZMA_VLI_MAX (theoretically possible on
+ // 64-bit systems).
+ if (MIN(SIZE_MAX, LZMA_VLI_MAX) - block_bound < HEADERS_BOUND)
+ return 0;
+
+ return block_bound + HEADERS_BOUND;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check,
+ lzma_allocator *allocator, const uint8_t *in, size_t in_size,
+ uint8_t *out, size_t *out_pos_ptr, size_t out_size)
+{
+ // Sanity checks
+ if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX
+ || (in == NULL && in_size != 0) || out == NULL
+ || out_pos_ptr == NULL || *out_pos_ptr > out_size)
+ return LZMA_PROG_ERROR;
+
+ // Note for the paranoids: Index encoder prevents the Stream from
+ // getting too big and still being accepted with LZMA_OK, and Block
+ // encoder catches if the input is too big. So we don't need to
+ // separately check if the buffers are too big.
+
+ // Use a local copy. We update *out_pos_ptr only if everything
+ // succeeds.
+ size_t out_pos = *out_pos_ptr;
+
+ // Check that there's enough space for both Stream Header and
+ // Stream Footer.
+ if (out_size - out_pos <= 2 * LZMA_STREAM_HEADER_SIZE)
+ return LZMA_BUF_ERROR;
+
+ // Reserve space for Stream Footer so we don't need to check for
+ // available space again before encoding Stream Footer.
+ out_size -= LZMA_STREAM_HEADER_SIZE;
+
+ // Encode the Stream Header.
+ lzma_stream_flags stream_flags = {
+ .version = 0,
+ .check = check,
+ };
+
+ if (lzma_stream_header_encode(&stream_flags, out + out_pos)
+ != LZMA_OK)
+ return LZMA_PROG_ERROR;
+
+ out_pos += LZMA_STREAM_HEADER_SIZE;
+
+ // Block
+ lzma_block block = {
+ .version = 0,
+ .check = check,
+ .filters = filters,
+ };
+
+ return_if_error(lzma_block_buffer_encode(&block, allocator,
+ in, in_size, out, &out_pos, out_size));
+
+ // Index
+ {
+ // Create an Index with one Record.
+ lzma_index *i = lzma_index_init(NULL, NULL);
+ if (i == NULL)
+ return LZMA_MEM_ERROR;
+
+ lzma_ret ret = lzma_index_append(i, NULL,
+ lzma_block_unpadded_size(&block),
+ block.uncompressed_size);
+
+ // If adding the Record was successful, encode the Index
+ // and get its size which will be stored into Stream Footer.
+ if (ret == LZMA_OK) {
+ ret = lzma_index_buffer_encode(
+ i, out, &out_pos, out_size);
+
+ stream_flags.backward_size = lzma_index_size(i);
+ }
+
+ lzma_index_end(i, NULL);
+
+ if (ret != LZMA_OK)
+ return ret;
+ }
+
+ // Stream Footer. We have already reserved space for this.
+ if (lzma_stream_footer_encode(&stream_flags, out + out_pos)
+ != LZMA_OK)
+ return LZMA_PROG_ERROR;
+
+ out_pos += LZMA_STREAM_HEADER_SIZE;
+
+ // Everything went fine, make the new output position available
+ // to the application.
+ *out_pos_ptr = out_pos;
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.c
new file mode 100644
index 00000000..dcec5d3c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.c
@@ -0,0 +1,447 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_decoder.c
+/// \brief Decodes .xz Streams
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_decoder.h"
+#include "block_decoder.h"
+
+
+struct lzma_coder_s {
+ enum {
+ SEQ_STREAM_HEADER,
+ SEQ_BLOCK_HEADER,
+ SEQ_BLOCK,
+ SEQ_INDEX,
+ SEQ_STREAM_FOOTER,
+ SEQ_STREAM_PADDING,
+ } sequence;
+
+ /// Block or Metadata decoder. This takes little memory and the same
+ /// data structure can be used to decode every Block Header, so it's
+ /// a good idea to have a separate lzma_next_coder structure for it.
+ lzma_next_coder block_decoder;
+
+ /// Block options decoded by the Block Header decoder and used by
+ /// the Block decoder.
+ lzma_block block_options;
+
+ /// Stream Flags from Stream Header
+ lzma_stream_flags stream_flags;
+
+ /// Index is hashed so that it can be compared to the sizes of Blocks
+ /// with O(1) memory usage.
+ lzma_index_hash *index_hash;
+
+ /// Memory usage limit
+ uint64_t memlimit;
+
+ /// Amount of memory actually needed (only an estimate)
+ uint64_t memusage;
+
+ /// If true, LZMA_NO_CHECK is returned if the Stream has
+ /// no integrity check.
+ bool tell_no_check;
+
+ /// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
+ /// an integrity check that isn't supported by this liblzma build.
+ bool tell_unsupported_check;
+
+ /// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
+ bool tell_any_check;
+
+ /// If true, we will decode concatenated Streams that possibly have
+ /// Stream Padding between or after them. LZMA_STREAM_END is returned
+ /// once the application isn't giving us any new input, and we aren't
+ /// in the middle of a Stream, and possible Stream Padding is a
+ /// multiple of four bytes.
+ bool concatenated;
+
+ /// When decoding concatenated Streams, this is true as long as we
+ /// are decoding the first Stream. This is needed to avoid misleading
+ /// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
+ /// bytes.
+ bool first_stream;
+
+ /// Write position in buffer[] and position in Stream Padding
+ size_t pos;
+
+ /// Buffer to hold Stream Header, Block Header, and Stream Footer.
+ /// Block Header has biggest maximum size.
+ uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
+};
+
+
+static lzma_ret
+stream_decoder_reset(lzma_coder *coder, lzma_allocator *allocator)
+{
+ // Initialize the Index hash used to verify the Index.
+ coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
+ if (coder->index_hash == NULL)
+ return LZMA_MEM_ERROR;
+
+ // Reset the rest of the variables.
+ coder->sequence = SEQ_STREAM_HEADER;
+ coder->pos = 0;
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+stream_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ // When decoding the actual Block, it may be able to produce more
+ // output even if we don't give it any new input.
+ while (true)
+ switch (coder->sequence) {
+ case SEQ_STREAM_HEADER: {
+ // Copy the Stream Header to the internal buffer.
+ lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
+ LZMA_STREAM_HEADER_SIZE);
+
+ // Return if we didn't get the whole Stream Header yet.
+ if (coder->pos < LZMA_STREAM_HEADER_SIZE)
+ return LZMA_OK;
+
+ coder->pos = 0;
+
+ // Decode the Stream Header.
+ const lzma_ret ret = lzma_stream_header_decode(
+ &coder->stream_flags, coder->buffer);
+ if (ret != LZMA_OK)
+ return ret == LZMA_FORMAT_ERROR && !coder->first_stream
+ ? LZMA_DATA_ERROR : ret;
+
+ // If we are decoding concatenated Streams, and the later
+ // Streams have invalid Header Magic Bytes, we give
+ // LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
+ coder->first_stream = false;
+
+ // Copy the type of the Check so that Block Header and Block
+ // decoders see it.
+ coder->block_options.check = coder->stream_flags.check;
+
+ // Even if we return LZMA_*_CHECK below, we want
+ // to continue from Block Header decoding.
+ coder->sequence = SEQ_BLOCK_HEADER;
+
+ // Detect if there's no integrity check or if it is
+ // unsupported if those were requested by the application.
+ if (coder->tell_no_check && coder->stream_flags.check
+ == LZMA_CHECK_NONE)
+ return LZMA_NO_CHECK;
+
+ if (coder->tell_unsupported_check
+ && !lzma_check_is_supported(
+ coder->stream_flags.check))
+ return LZMA_UNSUPPORTED_CHECK;
+
+ if (coder->tell_any_check)
+ return LZMA_GET_CHECK;
+ }
+
+ // Fall through
+
+ case SEQ_BLOCK_HEADER: {
+ if (*in_pos >= in_size)
+ return LZMA_OK;
+
+ if (coder->pos == 0) {
+ // Detect if it's Index.
+ if (in[*in_pos] == 0x00) {
+ coder->sequence = SEQ_INDEX;
+ break;
+ }
+
+ // Calculate the size of the Block Header. Note that
+ // Block Header decoder wants to see this byte too
+ // so don't advance *in_pos.
+ coder->block_options.header_size
+ = lzma_block_header_size_decode(
+ in[*in_pos]);
+ }
+
+ // Copy the Block Header to the internal buffer.
+ lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
+ coder->block_options.header_size);
+
+ // Return if we didn't get the whole Block Header yet.
+ if (coder->pos < coder->block_options.header_size)
+ return LZMA_OK;
+
+ coder->pos = 0;
+
+ // Set up a buffer to hold the filter chain. Block Header
+ // decoder will initialize all members of this array so
+ // we don't need to do it here.
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
+ coder->block_options.filters = filters;
+
+ // Decode the Block Header.
+ return_if_error(lzma_block_header_decode(&coder->block_options,
+ allocator, coder->buffer));
+
+ // Check the memory usage limit.
+ const uint64_t memusage = lzma_raw_decoder_memusage(filters);
+ lzma_ret ret;
+
+ if (memusage == UINT64_MAX) {
+ // One or more unknown Filter IDs.
+ ret = LZMA_OPTIONS_ERROR;
+ } else {
+ // Now we can set coder->memusage since we know that
+ // the filter chain is valid. We don't want
+ // lzma_memusage() to return UINT64_MAX in case of
+ // invalid filter chain.
+ coder->memusage = memusage;
+
+ if (memusage > coder->memlimit) {
+ // The chain would need too much memory.
+ ret = LZMA_MEMLIMIT_ERROR;
+ } else {
+ // Memory usage is OK.
+ // Initialize the Block decoder.
+ ret = lzma_block_decoder_init(
+ &coder->block_decoder,
+ allocator,
+ &coder->block_options);
+ }
+ }
+
+ // Free the allocated filter options since they are needed
+ // only to initialize the Block decoder.
+ for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i)
+ lzma_free(filters[i].options, allocator);
+
+ coder->block_options.filters = NULL;
+
+ // Check if memory usage calculation and Block enocoder
+ // initialization succeeded.
+ if (ret != LZMA_OK)
+ return ret;
+
+ coder->sequence = SEQ_BLOCK;
+ }
+
+ // Fall through
+
+ case SEQ_BLOCK: {
+ const lzma_ret ret = coder->block_decoder.code(
+ coder->block_decoder.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size,
+ action);
+
+ if (ret != LZMA_STREAM_END)
+ return ret;
+
+ // Block decoded successfully. Add the new size pair to
+ // the Index hash.
+ return_if_error(lzma_index_hash_append(coder->index_hash,
+ lzma_block_unpadded_size(
+ &coder->block_options),
+ coder->block_options.uncompressed_size));
+
+ coder->sequence = SEQ_BLOCK_HEADER;
+ break;
+ }
+
+ case SEQ_INDEX: {
+ // If we don't have any input, don't call
+ // lzma_index_hash_decode() since it would return
+ // LZMA_BUF_ERROR, which we must not do here.
+ if (*in_pos >= in_size)
+ return LZMA_OK;
+
+ // Decode the Index and compare it to the hash calculated
+ // from the sizes of the Blocks (if any).
+ const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
+ in, in_pos, in_size);
+ if (ret != LZMA_STREAM_END)
+ return ret;
+
+ coder->sequence = SEQ_STREAM_FOOTER;
+ }
+
+ // Fall through
+
+ case SEQ_STREAM_FOOTER: {
+ // Copy the Stream Footer to the internal buffer.
+ lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
+ LZMA_STREAM_HEADER_SIZE);
+
+ // Return if we didn't get the whole Stream Footer yet.
+ if (coder->pos < LZMA_STREAM_HEADER_SIZE)
+ return LZMA_OK;
+
+ coder->pos = 0;
+
+ // Decode the Stream Footer. The decoder gives
+ // LZMA_FORMAT_ERROR if the magic bytes don't match,
+ // so convert that return code to LZMA_DATA_ERROR.
+ lzma_stream_flags footer_flags;
+ const lzma_ret ret = lzma_stream_footer_decode(
+ &footer_flags, coder->buffer);
+ if (ret != LZMA_OK)
+ return ret == LZMA_FORMAT_ERROR
+ ? LZMA_DATA_ERROR : ret;
+
+ // Check that Index Size stored in the Stream Footer matches
+ // the real size of the Index field.
+ if (lzma_index_hash_size(coder->index_hash)
+ != footer_flags.backward_size)
+ return LZMA_DATA_ERROR;
+
+ // Compare that the Stream Flags fields are identical in
+ // both Stream Header and Stream Footer.
+ return_if_error(lzma_stream_flags_compare(
+ &coder->stream_flags, &footer_flags));
+
+ if (!coder->concatenated)
+ return LZMA_STREAM_END;
+
+ coder->sequence = SEQ_STREAM_PADDING;
+ }
+
+ // Fall through
+
+ case SEQ_STREAM_PADDING:
+ assert(coder->concatenated);
+
+ // Skip over possible Stream Padding.
+ while (true) {
+ if (*in_pos >= in_size) {
+ // Unless LZMA_FINISH was used, we cannot
+ // know if there's more input coming later.
+ if (action != LZMA_FINISH)
+ return LZMA_OK;
+
+ // Stream Padding must be a multiple of
+ // four bytes.
+ return coder->pos == 0
+ ? LZMA_STREAM_END
+ : LZMA_DATA_ERROR;
+ }
+
+ // If the byte is not zero, it probably indicates
+ // beginning of a new Stream (or the file is corrupt).
+ if (in[*in_pos] != 0x00)
+ break;
+
+ ++*in_pos;
+ coder->pos = (coder->pos + 1) & 3;
+ }
+
+ // Stream Padding must be a multiple of four bytes (empty
+ // Stream Padding is OK).
+ if (coder->pos != 0) {
+ ++*in_pos;
+ return LZMA_DATA_ERROR;
+ }
+
+ // Prepare to decode the next Stream.
+ return_if_error(stream_decoder_reset(coder, allocator));
+ break;
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+stream_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->block_decoder, allocator);
+ lzma_index_hash_end(coder->index_hash, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static lzma_check
+stream_decoder_get_check(const lzma_coder *coder)
+{
+ return coder->stream_flags.check;
+}
+
+
+static lzma_ret
+stream_decoder_memconfig(lzma_coder *coder, uint64_t *memusage,
+ uint64_t *old_memlimit, uint64_t new_memlimit)
+{
+ if (new_memlimit != 0 && new_memlimit < coder->memusage)
+ return LZMA_MEMLIMIT_ERROR;
+
+ *memusage = coder->memusage;
+ *old_memlimit = coder->memlimit;
+ coder->memlimit = new_memlimit;
+
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_stream_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ uint64_t memlimit, uint32_t flags)
+{
+ lzma_next_coder_init(&lzma_stream_decoder_init, next, allocator);
+
+ if (memlimit == 0)
+ return LZMA_PROG_ERROR;
+
+ if (flags & ~LZMA_SUPPORTED_FLAGS)
+ return LZMA_OPTIONS_ERROR;
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &stream_decode;
+ next->end = &stream_decoder_end;
+ next->get_check = &stream_decoder_get_check;
+ next->memconfig = &stream_decoder_memconfig;
+
+ next->coder->block_decoder = LZMA_NEXT_CODER_INIT;
+ next->coder->index_hash = NULL;
+ }
+
+ next->coder->memlimit = memlimit;
+ next->coder->memusage = LZMA_MEMUSAGE_BASE;
+ next->coder->tell_no_check = (flags & LZMA_TELL_NO_CHECK) != 0;
+ next->coder->tell_unsupported_check
+ = (flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
+ next->coder->tell_any_check = (flags & LZMA_TELL_ANY_CHECK) != 0;
+ next->coder->concatenated = (flags & LZMA_CONCATENATED) != 0;
+ next->coder->first_stream = true;
+
+ return stream_decoder_reset(next->coder, allocator);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_decoder(lzma_stream *strm, uint64_t memlimit, uint32_t flags)
+{
+ lzma_next_strm_init(lzma_stream_decoder_init, strm, memlimit, flags);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.h
new file mode 100644
index 00000000..fb09574d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_decoder.h
@@ -0,0 +1,23 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_decoder.h
+/// \brief Decodes .xz Streams
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_STREAM_DECODER_H
+#define LZMA_STREAM_DECODER_H
+
+#include "common.h"
+
+extern lzma_ret lzma_stream_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, uint64_t memlimit, uint32_t flags);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.c
new file mode 100644
index 00000000..431ab27e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.c
@@ -0,0 +1,276 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_encoder.c
+/// \brief Encodes .xz Streams
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_encoder.h"
+#include "block_encoder.h"
+#include "index_encoder.h"
+
+
+struct lzma_coder_s {
+ enum {
+ SEQ_STREAM_HEADER,
+ SEQ_BLOCK_INIT,
+ SEQ_BLOCK_HEADER,
+ SEQ_BLOCK_ENCODE,
+ SEQ_INDEX_ENCODE,
+ SEQ_STREAM_FOOTER,
+ } sequence;
+
+ /// Block
+ lzma_next_coder block_encoder;
+
+ /// Options for the Block encoder
+ lzma_block block_options;
+
+ /// Index encoder. This is separate from Block encoder, because this
+ /// doesn't take much memory, and when encoding multiple Streams
+ /// with the same encoding options we avoid reallocating memory.
+ lzma_next_coder index_encoder;
+
+ /// Index to hold sizes of the Blocks
+ lzma_index *index;
+
+ /// Read position in buffer[]
+ size_t buffer_pos;
+
+ /// Total number of bytes in buffer[]
+ size_t buffer_size;
+
+ /// Buffer to hold Stream Header, Block Header, and Stream Footer.
+ /// Block Header has biggest maximum size.
+ uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
+};
+
+
+static lzma_ret
+block_encoder_init(lzma_coder *coder, lzma_allocator *allocator)
+{
+ // Prepare the Block options. Even though Block encoder doesn't need
+ // compressed_size, uncompressed_size, and header_size to be
+ // initialized, it is a good idea to do it here, because this way
+ // we catch if someone gave us Filter ID that cannot be used in
+ // Blocks/Streams.
+ coder->block_options.compressed_size = LZMA_VLI_UNKNOWN;
+ coder->block_options.uncompressed_size = LZMA_VLI_UNKNOWN;
+
+ return_if_error(lzma_block_header_size(&coder->block_options));
+
+ // Initialize the actual Block encoder.
+ return lzma_block_encoder_init(&coder->block_encoder, allocator,
+ &coder->block_options);
+}
+
+
+static lzma_ret
+stream_encode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ // Main loop
+ while (*out_pos < out_size)
+ switch (coder->sequence) {
+ case SEQ_STREAM_HEADER:
+ case SEQ_BLOCK_HEADER:
+ case SEQ_STREAM_FOOTER:
+ lzma_bufcpy(coder->buffer, &coder->buffer_pos,
+ coder->buffer_size, out, out_pos, out_size);
+ if (coder->buffer_pos < coder->buffer_size)
+ return LZMA_OK;
+
+ if (coder->sequence == SEQ_STREAM_FOOTER)
+ return LZMA_STREAM_END;
+
+ coder->buffer_pos = 0;
+ ++coder->sequence;
+ break;
+
+ case SEQ_BLOCK_INIT: {
+ if (*in_pos == in_size) {
+ // If we are requested to flush or finish the current
+ // Block, return LZMA_STREAM_END immediatelly since
+ // there's nothing to do.
+ if (action != LZMA_FINISH)
+ return action == LZMA_RUN
+ ? LZMA_OK : LZMA_STREAM_END;
+
+ // The application had used LZMA_FULL_FLUSH to finish
+ // the previous Block, but now wants to finish without
+ // encoding new data, or it is simply creating an
+ // empty Stream with no Blocks.
+ //
+ // Initialize the Index encoder, and continue to
+ // actually encoding the Index.
+ return_if_error(lzma_index_encoder_init(
+ &coder->index_encoder, allocator,
+ coder->index));
+ coder->sequence = SEQ_INDEX_ENCODE;
+ break;
+ }
+
+ // Initialize the Block encoder except if this is the first
+ // Block, because stream_encoder_init() has already
+ // initialized it.
+ if (lzma_index_count(coder->index) != 0)
+ return_if_error(block_encoder_init(coder, allocator));
+
+ // Encode the Block Header. This shouldn't fail since we have
+ // already initialized the Block encoder.
+ if (lzma_block_header_encode(&coder->block_options,
+ coder->buffer) != LZMA_OK)
+ return LZMA_PROG_ERROR;
+
+ coder->buffer_size = coder->block_options.header_size;
+ coder->sequence = SEQ_BLOCK_HEADER;
+ break;
+ }
+
+ case SEQ_BLOCK_ENCODE: {
+ static const lzma_action convert[4] = {
+ LZMA_RUN,
+ LZMA_SYNC_FLUSH,
+ LZMA_FINISH,
+ LZMA_FINISH,
+ };
+
+ const lzma_ret ret = coder->block_encoder.code(
+ coder->block_encoder.coder, allocator,
+ in, in_pos, in_size,
+ out, out_pos, out_size, convert[action]);
+ if (ret != LZMA_STREAM_END || action == LZMA_SYNC_FLUSH)
+ return ret;
+
+ // Add a new Index Record.
+ const lzma_vli unpadded_size = lzma_block_unpadded_size(
+ &coder->block_options);
+ assert(unpadded_size != 0);
+ return_if_error(lzma_index_append(coder->index, allocator,
+ unpadded_size,
+ coder->block_options.uncompressed_size));
+
+ coder->sequence = SEQ_BLOCK_INIT;
+ break;
+ }
+
+ case SEQ_INDEX_ENCODE: {
+ // Call the Index encoder. It doesn't take any input, so
+ // those pointers can be NULL.
+ const lzma_ret ret = coder->index_encoder.code(
+ coder->index_encoder.coder, allocator,
+ NULL, NULL, 0,
+ out, out_pos, out_size, LZMA_RUN);
+ if (ret != LZMA_STREAM_END)
+ return ret;
+
+ // Encode the Stream Footer into coder->buffer.
+ const lzma_stream_flags stream_flags = {
+ .version = 0,
+ .backward_size = lzma_index_size(coder->index),
+ .check = coder->block_options.check,
+ };
+
+ if (lzma_stream_footer_encode(&stream_flags, coder->buffer)
+ != LZMA_OK)
+ return LZMA_PROG_ERROR;
+
+ coder->buffer_size = LZMA_STREAM_HEADER_SIZE;
+ coder->sequence = SEQ_STREAM_FOOTER;
+ break;
+ }
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+stream_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->block_encoder, allocator);
+ lzma_next_end(&coder->index_encoder, allocator);
+ lzma_index_end(coder->index, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_stream_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *filters, lzma_check check)
+{
+ lzma_next_coder_init(&lzma_stream_encoder_init, next, allocator);
+
+ if (filters == NULL)
+ return LZMA_PROG_ERROR;
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &stream_encode;
+ next->end = &stream_encoder_end;
+
+ next->coder->block_encoder = LZMA_NEXT_CODER_INIT;
+ next->coder->index_encoder = LZMA_NEXT_CODER_INIT;
+ next->coder->index = NULL;
+ }
+
+ // Basic initializations
+ next->coder->sequence = SEQ_STREAM_HEADER;
+ next->coder->block_options.version = 0;
+ next->coder->block_options.check = check;
+ next->coder->block_options.filters = (lzma_filter *)(filters);
+
+ // Initialize the Index
+ next->coder->index = lzma_index_init(next->coder->index, allocator);
+ if (next->coder->index == NULL)
+ return LZMA_MEM_ERROR;
+
+ // Encode the Stream Header
+ lzma_stream_flags stream_flags = {
+ .version = 0,
+ .check = check,
+ };
+ return_if_error(lzma_stream_header_encode(
+ &stream_flags, next->coder->buffer));
+
+ next->coder->buffer_pos = 0;
+ next->coder->buffer_size = LZMA_STREAM_HEADER_SIZE;
+
+ // Initialize the Block encoder. This way we detect if the given
+ // filters are supported by the current liblzma build, and the
+ // application doesn't need to keep the filters structure available
+ // unless it is going to use LZMA_FULL_FLUSH.
+ return block_encoder_init(next->coder, allocator);
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_encoder(lzma_stream *strm,
+ const lzma_filter *filters, lzma_check check)
+{
+ lzma_next_strm_init(lzma_stream_encoder_init, strm, filters, check);
+
+ strm->internal->supported_actions[LZMA_RUN] = true;
+ strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;
+ strm->internal->supported_actions[LZMA_FULL_FLUSH] = true;
+ strm->internal->supported_actions[LZMA_FINISH] = true;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.h
new file mode 100644
index 00000000..8e6599a2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_encoder.h
@@ -0,0 +1,25 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_encoder.h
+/// \brief Encodes .xz Streams
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_STREAM_ENCODER_H
+#define LZMA_STREAM_ENCODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_stream_encoder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter *filters, lzma_check check);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.c
new file mode 100644
index 00000000..81781a87
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.c
@@ -0,0 +1,49 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_flags_common.c
+/// \brief Common stuff for Stream flags coders
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_flags_common.h"
+
+
+const uint8_t lzma_header_magic[6] = { 0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00 };
+const uint8_t lzma_footer_magic[2] = { 0x59, 0x5A };
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_flags_compare(
+ const lzma_stream_flags *a, const lzma_stream_flags *b)
+{
+ // We can compare only version 0 structures.
+ if (a->version != 0 || b->version != 0)
+ return LZMA_OPTIONS_ERROR;
+
+ // Check type
+ if ((unsigned int)(a->check) > LZMA_CHECK_ID_MAX
+ || (unsigned int)(b->check) > LZMA_CHECK_ID_MAX)
+ return LZMA_PROG_ERROR;
+
+ if (a->check != b->check)
+ return LZMA_DATA_ERROR;
+
+ // Backward Sizes are compared only if they are known in both.
+ if (a->backward_size != LZMA_VLI_UNKNOWN
+ && b->backward_size != LZMA_VLI_UNKNOWN) {
+ if (!is_backward_size_valid(a) || !is_backward_size_valid(b))
+ return LZMA_PROG_ERROR;
+
+ if (a->backward_size != b->backward_size)
+ return LZMA_DATA_ERROR;
+ }
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.h
new file mode 100644
index 00000000..e9fc9834
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_common.h
@@ -0,0 +1,35 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_flags_common.h
+/// \brief Common stuff for Stream flags coders
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_STREAM_FLAGS_COMMON_H
+#define LZMA_STREAM_FLAGS_COMMON_H
+
+#include "common.h"
+
+/// Size of the Stream Flags field
+#define LZMA_STREAM_FLAGS_SIZE 2
+
+extern const uint8_t lzma_header_magic[6];
+extern const uint8_t lzma_footer_magic[2];
+
+
+static inline bool
+is_backward_size_valid(const lzma_stream_flags *options)
+{
+ return options->backward_size >= LZMA_BACKWARD_SIZE_MIN
+ && options->backward_size <= LZMA_BACKWARD_SIZE_MAX
+ && (options->backward_size & 3) == 0;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_decoder.c
new file mode 100644
index 00000000..59cacece
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_decoder.c
@@ -0,0 +1,84 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_flags_decoder.c
+/// \brief Decodes Stream Header and Stream Footer from .xz files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_flags_common.h"
+
+
+static bool
+stream_flags_decode(lzma_stream_flags *options, const uint8_t *in)
+{
+ // Reserved bits must be unset.
+ if (in[0] != 0x00 || (in[1] & 0xF0))
+ return true;
+
+ options->version = 0;
+ options->check = in[1] & 0x0F;
+
+ return false;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_header_decode(lzma_stream_flags *options, const uint8_t *in)
+{
+ // Magic
+ if (memcmp(in, lzma_header_magic, sizeof(lzma_header_magic)) != 0)
+ return LZMA_FORMAT_ERROR;
+
+ // Verify the CRC32 so we can distinguish between corrupt
+ // and unsupported files.
+ const uint32_t crc = lzma_crc32(in + sizeof(lzma_header_magic),
+ LZMA_STREAM_FLAGS_SIZE, 0);
+ if (crc != integer_read_32(in + sizeof(lzma_header_magic)
+ + LZMA_STREAM_FLAGS_SIZE))
+ return LZMA_DATA_ERROR;
+
+ // Stream Flags
+ if (stream_flags_decode(options, in + sizeof(lzma_header_magic)))
+ return LZMA_OPTIONS_ERROR;
+
+ // Set Backward Size to indicate unknown value. That way
+ // lzma_stream_flags_compare() can be used to compare Stream Header
+ // and Stream Footer while keeping it useful also for comparing
+ // two Stream Footers.
+ options->backward_size = LZMA_VLI_UNKNOWN;
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_footer_decode(lzma_stream_flags *options, const uint8_t *in)
+{
+ // Magic
+ if (memcmp(in + sizeof(uint32_t) * 2 + LZMA_STREAM_FLAGS_SIZE,
+ lzma_footer_magic, sizeof(lzma_footer_magic)) != 0)
+ return LZMA_FORMAT_ERROR;
+
+ // CRC32
+ const uint32_t crc = lzma_crc32(in + sizeof(uint32_t),
+ sizeof(uint32_t) + LZMA_STREAM_FLAGS_SIZE, 0);
+ if (crc != integer_read_32(in))
+ return LZMA_DATA_ERROR;
+
+ // Stream Flags
+ if (stream_flags_decode(options, in + sizeof(uint32_t) * 2))
+ return LZMA_OPTIONS_ERROR;
+
+ // Backward Size
+ options->backward_size = integer_read_32(in + sizeof(uint32_t));
+ options->backward_size = (options->backward_size + 1) * 4;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_encoder.c
new file mode 100644
index 00000000..8ba2f3d3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/stream_flags_encoder.c
@@ -0,0 +1,88 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file stream_flags_encoder.c
+/// \brief Encodes Stream Header and Stream Footer for .xz files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "stream_flags_common.h"
+
+
+static bool
+stream_flags_encode(const lzma_stream_flags *options, uint8_t *out)
+{
+ if ((unsigned int)(options->check) > LZMA_CHECK_ID_MAX)
+ return true;
+
+ out[0] = 0x00;
+ out[1] = options->check;
+
+ return false;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_header_encode(const lzma_stream_flags *options, uint8_t *out)
+{
+ assert(sizeof(lzma_header_magic) + LZMA_STREAM_FLAGS_SIZE
+ + 4 == LZMA_STREAM_HEADER_SIZE);
+
+ if (options->version != 0)
+ return LZMA_OPTIONS_ERROR;
+
+ // Magic
+ memcpy(out, lzma_header_magic, sizeof(lzma_header_magic));
+
+ // Stream Flags
+ if (stream_flags_encode(options, out + sizeof(lzma_header_magic)))
+ return LZMA_PROG_ERROR;
+
+ // CRC32 of the Stream Header
+ const uint32_t crc = lzma_crc32(out + sizeof(lzma_header_magic),
+ LZMA_STREAM_FLAGS_SIZE, 0);
+
+ integer_write_32(out + sizeof(lzma_header_magic)
+ + LZMA_STREAM_FLAGS_SIZE, crc);
+
+ return LZMA_OK;
+}
+
+
+extern LZMA_API(lzma_ret)
+lzma_stream_footer_encode(const lzma_stream_flags *options, uint8_t *out)
+{
+ assert(2 * 4 + LZMA_STREAM_FLAGS_SIZE + sizeof(lzma_footer_magic)
+ == LZMA_STREAM_HEADER_SIZE);
+
+ if (options->version != 0)
+ return LZMA_OPTIONS_ERROR;
+
+ // Backward Size
+ if (!is_backward_size_valid(options))
+ return LZMA_PROG_ERROR;
+
+ integer_write_32(out + 4, options->backward_size / 4 - 1);
+
+ // Stream Flags
+ if (stream_flags_encode(options, out + 2 * 4))
+ return LZMA_PROG_ERROR;
+
+ // CRC32
+ const uint32_t crc = lzma_crc32(
+ out + 4, 4 + LZMA_STREAM_FLAGS_SIZE, 0);
+
+ integer_write_32(out, crc);
+
+ // Magic
+ memcpy(out + 2 * 4 + LZMA_STREAM_FLAGS_SIZE,
+ lzma_footer_magic, sizeof(lzma_footer_magic));
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_decoder.c
new file mode 100644
index 00000000..e78d7a8e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_decoder.c
@@ -0,0 +1,88 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file vli_decoder.c
+/// \brief Decodes variable-length integers
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_vli_decode(lzma_vli *restrict vli, size_t *vli_pos,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size)
+{
+ // If we haven't been given vli_pos, work in single-call mode.
+ size_t vli_pos_internal = 0;
+ if (vli_pos == NULL) {
+ vli_pos = &vli_pos_internal;
+ *vli = 0;
+
+ // If there's no input, use LZMA_DATA_ERROR. This way it is
+ // easy to decode VLIs from buffers that have known size,
+ // and get the correct error code in case the buffer is
+ // too short.
+ if (*in_pos >= in_size)
+ return LZMA_DATA_ERROR;
+
+ } else {
+ // Initialize *vli when starting to decode a new integer.
+ if (*vli_pos == 0)
+ *vli = 0;
+
+ // Validate the arguments.
+ if (*vli_pos >= LZMA_VLI_BYTES_MAX
+ || (*vli >> (*vli_pos * 7)) != 0)
+ return LZMA_PROG_ERROR;;
+
+ if (*in_pos >= in_size)
+ return LZMA_BUF_ERROR;
+ }
+
+ do {
+ // Read the next byte. Use a temporary variable so that we
+ // can update *in_pos immediatelly.
+ const uint8_t byte = in[*in_pos];
+ ++*in_pos;
+
+ // Add the newly read byte to *vli.
+ *vli += (lzma_vli)(byte & 0x7F) << (*vli_pos * 7);
+ ++*vli_pos;
+
+ // Check if this is the last byte of a multibyte integer.
+ if ((byte & 0x80) == 0) {
+ // We don't allow using variable-length integers as
+ // padding i.e. the encoding must use the most the
+ // compact form.
+ if (byte == 0x00 && *vli_pos > 1)
+ return LZMA_DATA_ERROR;
+
+ return vli_pos == &vli_pos_internal
+ ? LZMA_OK : LZMA_STREAM_END;
+ }
+
+ // There is at least one more byte coming. If we have already
+ // read maximum number of bytes, the integer is considered
+ // corrupt.
+ //
+ // If we need bigger integers in future, old versions liblzma
+ // will confusingly indicate the file being corrupt istead of
+ // unsupported. I suppose it's still better this way, because
+ // in the foreseeable future (writing this in 2008) the only
+ // reason why files would appear having over 63-bit integers
+ // is that the files are simply corrupt.
+ if (*vli_pos == LZMA_VLI_BYTES_MAX)
+ return LZMA_DATA_ERROR;
+
+ } while (*in_pos < in_size);
+
+ return vli_pos == &vli_pos_internal ? LZMA_DATA_ERROR : LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_encoder.c
new file mode 100644
index 00000000..0f5cf6c7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_encoder.c
@@ -0,0 +1,71 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file vli_encoder.c
+/// \brief Encodes variable-length integers
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+
+extern LZMA_API(lzma_ret)
+lzma_vli_encode(lzma_vli vli, size_t *vli_pos,
+ uint8_t *restrict out, size_t *restrict out_pos,
+ size_t out_size)
+{
+ // If we haven't been given vli_pos, work in single-call mode.
+ size_t vli_pos_internal = 0;
+ if (vli_pos == NULL) {
+ vli_pos = &vli_pos_internal;
+
+ // In single-call mode, we expect that the caller has
+ // reserved enough output space.
+ if (*out_pos >= out_size)
+ return LZMA_PROG_ERROR;
+ } else {
+ // This never happens when we are called by liblzma, but
+ // may happen if called directly from an application.
+ if (*out_pos >= out_size)
+ return LZMA_BUF_ERROR;
+ }
+
+ // Validate the arguments.
+ if (*vli_pos >= LZMA_VLI_BYTES_MAX || vli > LZMA_VLI_MAX)
+ return LZMA_PROG_ERROR;
+
+ // Shift vli so that the next bits to encode are the lowest. In
+ // single-call mode this never changes vli since *vli_pos is zero.
+ vli >>= *vli_pos * 7;
+
+ // Write the non-last bytes in a loop.
+ while (vli >= 0x80) {
+ // We don't need *vli_pos during this function call anymore,
+ // but update it here so that it is ready if we need to
+ // return before the whole integer has been decoded.
+ ++*vli_pos;
+ assert(*vli_pos < LZMA_VLI_BYTES_MAX);
+
+ // Write the next byte.
+ out[*out_pos] = (uint8_t)(vli) | 0x80;
+ vli >>= 7;
+
+ if (++*out_pos == out_size)
+ return vli_pos == &vli_pos_internal
+ ? LZMA_PROG_ERROR : LZMA_OK;
+ }
+
+ // Write the last byte.
+ out[*out_pos] = (uint8_t)(vli);
+ ++*out_pos;
+ ++*vli_pos;
+
+ return vli_pos == &vli_pos_internal ? LZMA_OK : LZMA_STREAM_END;
+
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_size.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_size.c
new file mode 100644
index 00000000..1491ebc5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/common/vli_size.c
@@ -0,0 +1,32 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file vli_size.c
+/// \brief Calculates the encoded size of a variable-length integer
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+
+extern LZMA_API(uint32_t)
+lzma_vli_size(lzma_vli vli)
+{
+ if (vli > LZMA_VLI_MAX)
+ return 0;
+
+ uint32_t i = 0;
+ do {
+ vli >>= 7;
+ ++i;
+ } while (vli != 0);
+
+ assert(i <= LZMA_VLI_BYTES_MAX);
+ return i;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/Makefile.inc
new file mode 100644
index 00000000..c7739b44
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/Makefile.inc
@@ -0,0 +1,23 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+liblzma_la_SOURCES += \
+ delta/delta_common.c \
+ delta/delta_common.h \
+ delta/delta_private.h
+
+if COND_ENCODER_DELTA
+liblzma_la_SOURCES += \
+ delta/delta_encoder.c \
+ delta/delta_encoder.h
+endif
+
+if COND_DECODER_DELTA
+liblzma_la_SOURCES += \
+ delta/delta_decoder.c \
+ delta/delta_decoder.h
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.c
new file mode 100644
index 00000000..02d3d3de
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.c
@@ -0,0 +1,75 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_common.c
+/// \brief Common stuff for Delta encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "delta_common.h"
+#include "delta_private.h"
+
+
+static void
+delta_coder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_delta_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, lzma_code_function code)
+{
+ // Allocate memory for the decoder if needed.
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ // End function is the same for encoder and decoder.
+ next->end = &delta_coder_end;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ // Coding function is different for encoder and decoder.
+ next->code = code;
+
+ // Validate the options.
+ if (lzma_delta_coder_memusage(filters[0].options) == UINT64_MAX)
+ return LZMA_OPTIONS_ERROR;
+
+ // Set the delta distance.
+ const lzma_options_delta *opt = filters[0].options;
+ next->coder->distance = opt->dist;
+
+ // Initialize the rest of the variables.
+ next->coder->pos = 0;
+ memzero(next->coder->history, LZMA_DELTA_DIST_MAX);
+
+ // Initialize the next decoder in the chain, if any.
+ return lzma_next_filter_init(&next->coder->next,
+ allocator, filters + 1);
+}
+
+
+extern uint64_t
+lzma_delta_coder_memusage(const void *options)
+{
+ const lzma_options_delta *opt = options;
+
+ if (opt == NULL || opt->type != LZMA_DELTA_TYPE_BYTE
+ || opt->dist < LZMA_DELTA_DIST_MIN
+ || opt->dist > LZMA_DELTA_DIST_MAX)
+ return UINT64_MAX;
+
+ return sizeof(lzma_coder);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.h
new file mode 100644
index 00000000..41ca4da8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_common.h
@@ -0,0 +1,22 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_common.h
+/// \brief Common stuff for Delta encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_DELTA_COMMON_H
+#define LZMA_DELTA_COMMON_H
+
+#include "common.h"
+
+extern uint64_t lzma_delta_coder_memusage(const void *options);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.c
new file mode 100644
index 00000000..74ecca79
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.c
@@ -0,0 +1,77 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_decoder.c
+/// \brief Delta filter decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "delta_decoder.h"
+#include "delta_private.h"
+
+
+static void
+decode_buffer(lzma_coder *coder, uint8_t *buffer, size_t size)
+{
+ const size_t distance = coder->distance;
+
+ for (size_t i = 0; i < size; ++i) {
+ buffer[i] += coder->history[(distance + coder->pos) & 0xFF];
+ coder->history[coder->pos-- & 0xFF] = buffer[i];
+ }
+}
+
+
+static lzma_ret
+delta_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ assert(coder->next.code != NULL);
+
+ const size_t out_start = *out_pos;
+
+ const lzma_ret ret = coder->next.code(coder->next.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size,
+ action);
+
+ decode_buffer(coder, out + out_start, *out_pos - out_start);
+
+ return ret;
+}
+
+
+extern lzma_ret
+lzma_delta_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return lzma_delta_coder_init(next, allocator, filters, &delta_decode);
+}
+
+
+extern lzma_ret
+lzma_delta_props_decode(void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size)
+{
+ if (props_size != 1)
+ return LZMA_OPTIONS_ERROR;
+
+ lzma_options_delta *opt
+ = lzma_alloc(sizeof(lzma_options_delta), allocator);
+ if (opt == NULL)
+ return LZMA_MEM_ERROR;
+
+ opt->type = LZMA_DELTA_TYPE_BYTE;
+ opt->dist = props[0] + 1;
+
+ *options = opt;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.h
new file mode 100644
index 00000000..5624a2d4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_decoder.h
@@ -0,0 +1,27 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_decoder.h
+/// \brief Delta filter decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_DELTA_DECODER_H
+#define LZMA_DELTA_DECODER_H
+
+#include "delta_common.h"
+
+extern lzma_ret lzma_delta_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_delta_props_decode(
+ void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.c
new file mode 100644
index 00000000..1a4983cb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.c
@@ -0,0 +1,108 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_encoder.c
+/// \brief Delta filter encoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "delta_encoder.h"
+#include "delta_private.h"
+
+
+/// Copies and encodes the data at the same time. This is used when Delta
+/// is the first filter in the chain (and thus the last filter in the
+/// encoder's filter stack).
+static void
+copy_and_encode(lzma_coder *coder,
+ const uint8_t *restrict in, uint8_t *restrict out, size_t size)
+{
+ const size_t distance = coder->distance;
+
+ for (size_t i = 0; i < size; ++i) {
+ const uint8_t tmp = coder->history[
+ (distance + coder->pos) & 0xFF];
+ coder->history[coder->pos-- & 0xFF] = in[i];
+ out[i] = in[i] - tmp;
+ }
+}
+
+
+/// Encodes the data in place. This is used when we are the last filter
+/// in the chain (and thus non-last filter in the encoder's filter stack).
+static void
+encode_in_place(lzma_coder *coder, uint8_t *buffer, size_t size)
+{
+ const size_t distance = coder->distance;
+
+ for (size_t i = 0; i < size; ++i) {
+ const uint8_t tmp = coder->history[
+ (distance + coder->pos) & 0xFF];
+ coder->history[coder->pos-- & 0xFF] = buffer[i];
+ buffer[i] -= tmp;
+ }
+}
+
+
+static lzma_ret
+delta_encode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ lzma_ret ret;
+
+ if (coder->next.code == NULL) {
+ const size_t in_avail = in_size - *in_pos;
+ const size_t out_avail = out_size - *out_pos;
+ const size_t size = MIN(in_avail, out_avail);
+
+ copy_and_encode(coder, in + *in_pos, out + *out_pos, size);
+
+ *in_pos += size;
+ *out_pos += size;
+
+ ret = action != LZMA_RUN && *in_pos == in_size
+ ? LZMA_STREAM_END : LZMA_OK;
+
+ } else {
+ const size_t out_start = *out_pos;
+
+ ret = coder->next.code(coder->next.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size,
+ action);
+
+ encode_in_place(coder, out + out_start, *out_pos - out_start);
+ }
+
+ return ret;
+}
+
+
+extern lzma_ret
+lzma_delta_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return lzma_delta_coder_init(next, allocator, filters, &delta_encode);
+}
+
+
+extern lzma_ret
+lzma_delta_props_encode(const void *options, uint8_t *out)
+{
+ // The caller must have already validated the options, so it's
+ // LZMA_PROG_ERROR if they are invalid.
+ if (lzma_delta_coder_memusage(options) == UINT64_MAX)
+ return LZMA_PROG_ERROR;
+
+ const lzma_options_delta *opt = options;
+ out[0] = opt->dist - LZMA_DELTA_DIST_MIN;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.h
new file mode 100644
index 00000000..32b0b0ee
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_encoder.h
@@ -0,0 +1,25 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_encoder.h
+/// \brief Delta filter encoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_DELTA_ENCODER_H
+#define LZMA_DELTA_ENCODER_H
+
+#include "delta_common.h"
+
+extern lzma_ret lzma_delta_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_delta_props_encode(const void *options, uint8_t *out);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_private.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_private.h
new file mode 100644
index 00000000..8823c5f3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/delta/delta_private.h
@@ -0,0 +1,39 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file delta_private.h
+/// \brief Private common stuff for Delta encoder and decoder
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_DELTA_PRIVATE_H
+#define LZMA_DELTA_PRIVATE_H
+
+#include "delta_common.h"
+
+struct lzma_coder_s {
+ /// Next coder in the chain
+ lzma_next_coder next;
+
+ /// Delta distance
+ size_t distance;
+
+ /// Position in history[]
+ uint8_t pos;
+
+ /// Buffer to hold history of the original data
+ uint8_t history[LZMA_DELTA_DIST_MAX];
+};
+
+
+extern lzma_ret lzma_delta_coder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, lzma_code_function code);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma.pc.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma.pc.in
new file mode 100644
index 00000000..eb3f6c75
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma.pc.in
@@ -0,0 +1,19 @@
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: liblzma
+Description: General purpose data compression library
+URL: @PACKAGE_HOMEPAGE@
+Version: @PACKAGE_VERSION@
+Cflags: -I${includedir}
+Libs: -L${libdir} -llzma
+Libs.private: @PTHREAD_CFLAGS@ @PTHREAD_LIBS@
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma_w32res.rc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma_w32res.rc
new file mode 100644
index 00000000..d4d8159c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/liblzma_w32res.rc
@@ -0,0 +1,12 @@
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#define MY_TYPE VFT_DLL
+#define MY_NAME "liblzma"
+#define MY_SUFFIX ".dll"
+#define MY_DESC "liblzma data compression library"
+#include "common_w32res.rc"
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/Makefile.inc
new file mode 100644
index 00000000..470d59c0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/Makefile.inc
@@ -0,0 +1,21 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+if COND_ENCODER_LZ
+liblzma_la_SOURCES += \
+ lz/lz_encoder.c \
+ lz/lz_encoder.h \
+ lz/lz_encoder_hash.h \
+ lz/lz_encoder_mf.c
+endif
+
+
+if COND_DECODER_LZ
+liblzma_la_SOURCES += \
+ lz/lz_decoder.c \
+ lz/lz_decoder.h
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.c
new file mode 100644
index 00000000..f69bbc61
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.c
@@ -0,0 +1,301 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lz_decoder.c
+/// \brief LZ out window
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// liblzma supports multiple LZ77-based filters. The LZ part is shared
+// between these filters. The LZ code takes care of dictionary handling
+// and passing the data between filters in the chain. The filter-specific
+// part decodes from the input buffer to the dictionary.
+
+
+#include "lz_decoder.h"
+
+
+struct lzma_coder_s {
+ /// Dictionary (history buffer)
+ lzma_dict dict;
+
+ /// The actual LZ-based decoder e.g. LZMA
+ lzma_lz_decoder lz;
+
+ /// Next filter in the chain, if any. Note that LZMA and LZMA2 are
+ /// only allowed as the last filter, but the long-range filter in
+ /// future can be in the middle of the chain.
+ lzma_next_coder next;
+
+ /// True if the next filter in the chain has returned LZMA_STREAM_END.
+ bool next_finished;
+
+ /// True if the LZ decoder (e.g. LZMA) has detected end of payload
+ /// marker. This may become true before next_finished becomes true.
+ bool this_finished;
+
+ /// Temporary buffer needed when the LZ-based filter is not the last
+ /// filter in the chain. The output of the next filter is first
+ /// decoded into buffer[], which is then used as input for the actual
+ /// LZ-based decoder.
+ struct {
+ size_t pos;
+ size_t size;
+ uint8_t buffer[LZMA_BUFFER_SIZE];
+ } temp;
+};
+
+
+static void
+lz_decoder_reset(lzma_coder *coder)
+{
+ coder->dict.pos = 0;
+ coder->dict.full = 0;
+ coder->dict.buf[coder->dict.size - 1] = '\0';
+ coder->dict.need_reset = false;
+ return;
+}
+
+
+static lzma_ret
+decode_buffer(lzma_coder *coder,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size)
+{
+ while (true) {
+ // Wrap the dictionary if needed.
+ if (coder->dict.pos == coder->dict.size)
+ coder->dict.pos = 0;
+
+ // Store the current dictionary position. It is needed to know
+ // where to start copying to the out[] buffer.
+ const size_t dict_start = coder->dict.pos;
+
+ // Calculate how much we allow coder->lz.code() to decode.
+ // It must not decode past the end of the dictionary
+ // buffer, and we don't want it to decode more than is
+ // actually needed to fill the out[] buffer.
+ coder->dict.limit = coder->dict.pos + MIN(out_size - *out_pos,
+ coder->dict.size - coder->dict.pos);
+
+ // Call the coder->lz.code() to do the actual decoding.
+ const lzma_ret ret = coder->lz.code(
+ coder->lz.coder, &coder->dict,
+ in, in_pos, in_size);
+
+ // Copy the decoded data from the dictionary to the out[]
+ // buffer.
+ const size_t copy_size = coder->dict.pos - dict_start;
+ assert(copy_size <= out_size - *out_pos);
+ memcpy(out + *out_pos, coder->dict.buf + dict_start,
+ copy_size);
+ *out_pos += copy_size;
+
+ // Reset the dictionary if so requested by coder->lz.code().
+ if (coder->dict.need_reset) {
+ lz_decoder_reset(coder);
+
+ // Since we reset dictionary, we don't check if
+ // dictionary became full.
+ if (ret != LZMA_OK || *out_pos == out_size)
+ return ret;
+ } else {
+ // Return if everything got decoded or an error
+ // occurred, or if there's no more data to decode.
+ //
+ // Note that detecting if there's something to decode
+ // is done by looking if dictionary become full
+ // instead of looking if *in_pos == in_size. This
+ // is because it is possible that all the input was
+ // consumed already but some data is pending to be
+ // written to the dictionary.
+ if (ret != LZMA_OK || *out_pos == out_size
+ || coder->dict.pos < coder->dict.size)
+ return ret;
+ }
+ }
+}
+
+
+static lzma_ret
+lz_decode(lzma_coder *coder,
+ lzma_allocator *allocator lzma_attribute((unused)),
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ lzma_action action)
+{
+ if (coder->next.code == NULL)
+ return decode_buffer(coder, in, in_pos, in_size,
+ out, out_pos, out_size);
+
+ // We aren't the last coder in the chain, we need to decode
+ // our input to a temporary buffer.
+ while (*out_pos < out_size) {
+ // Fill the temporary buffer if it is empty.
+ if (!coder->next_finished
+ && coder->temp.pos == coder->temp.size) {
+ coder->temp.pos = 0;
+ coder->temp.size = 0;
+
+ const lzma_ret ret = coder->next.code(
+ coder->next.coder,
+ allocator, in, in_pos, in_size,
+ coder->temp.buffer, &coder->temp.size,
+ LZMA_BUFFER_SIZE, action);
+
+ if (ret == LZMA_STREAM_END)
+ coder->next_finished = true;
+ else if (ret != LZMA_OK || coder->temp.size == 0)
+ return ret;
+ }
+
+ if (coder->this_finished) {
+ if (coder->temp.size != 0)
+ return LZMA_DATA_ERROR;
+
+ if (coder->next_finished)
+ return LZMA_STREAM_END;
+
+ return LZMA_OK;
+ }
+
+ const lzma_ret ret = decode_buffer(coder, coder->temp.buffer,
+ &coder->temp.pos, coder->temp.size,
+ out, out_pos, out_size);
+
+ if (ret == LZMA_STREAM_END)
+ coder->this_finished = true;
+ else if (ret != LZMA_OK)
+ return ret;
+ else if (coder->next_finished && *out_pos < out_size)
+ return LZMA_DATA_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+lz_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder->dict.buf, allocator);
+
+ if (coder->lz.end != NULL)
+ coder->lz.end(coder->lz.coder, allocator);
+ else
+ lzma_free(coder->lz.coder, allocator);
+
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_lz_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters,
+ lzma_ret (*lz_init)(lzma_lz_decoder *lz,
+ lzma_allocator *allocator, const void *options,
+ lzma_lz_options *lz_options))
+{
+ // Allocate the base structure if it isn't already allocated.
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &lz_decode;
+ next->end = &lz_decoder_end;
+
+ next->coder->dict.buf = NULL;
+ next->coder->dict.size = 0;
+ next->coder->lz = LZMA_LZ_DECODER_INIT;
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ // Allocate and initialize the LZ-based decoder. It will also give
+ // us the dictionary size.
+ lzma_lz_options lz_options;
+ return_if_error(lz_init(&next->coder->lz, allocator,
+ filters[0].options, &lz_options));
+
+ // If the dictionary size is very small, increase it to 4096 bytes.
+ // This is to prevent constant wrapping of the dictionary, which
+ // would slow things down. The downside is that since we don't check
+ // separately for the real dictionary size, we may happily accept
+ // corrupt files.
+ if (lz_options.dict_size < 4096)
+ lz_options.dict_size = 4096;
+
+ // Make dictionary size a multipe of 16. Some LZ-based decoders like
+ // LZMA use the lowest bits lzma_dict.pos to know the alignment of the
+ // data. Aligned buffer is also good when memcpying from the
+ // dictionary to the output buffer, since applications are
+ // recommended to give aligned buffers to liblzma.
+ //
+ // Avoid integer overflow.
+ if (lz_options.dict_size > SIZE_MAX - 15)
+ return LZMA_MEM_ERROR;
+
+ lz_options.dict_size = (lz_options.dict_size + 15) & ~((size_t)(15));
+
+ // Allocate and initialize the dictionary.
+ if (next->coder->dict.size != lz_options.dict_size) {
+ lzma_free(next->coder->dict.buf, allocator);
+ next->coder->dict.buf
+ = lzma_alloc(lz_options.dict_size, allocator);
+ if (next->coder->dict.buf == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->coder->dict.size = lz_options.dict_size;
+ }
+
+ lz_decoder_reset(next->coder);
+
+ // Use the preset dictionary if it was given to us.
+ if (lz_options.preset_dict != NULL
+ && lz_options.preset_dict_size > 0) {
+ // If the preset dictionary is bigger than the actual
+ // dictionary, copy only the tail.
+ const size_t copy_size = MIN(lz_options.preset_dict_size,
+ lz_options.dict_size);
+ const size_t offset = lz_options.preset_dict_size - copy_size;
+ memcpy(next->coder->dict.buf, lz_options.preset_dict + offset,
+ copy_size);
+ next->coder->dict.pos = copy_size;
+ next->coder->dict.full = copy_size;
+ }
+
+ // Miscellaneous initializations
+ next->coder->next_finished = false;
+ next->coder->this_finished = false;
+ next->coder->temp.pos = 0;
+ next->coder->temp.size = 0;
+
+ // Initialize the next filter in the chain, if any.
+ return lzma_next_filter_init(&next->coder->next, allocator,
+ filters + 1);
+}
+
+
+extern uint64_t
+lzma_lz_decoder_memusage(size_t dictionary_size)
+{
+ return sizeof(lzma_coder) + (uint64_t)(dictionary_size);
+}
+
+
+extern void
+lzma_lz_decoder_uncompressed(lzma_coder *coder, lzma_vli uncompressed_size)
+{
+ coder->lz.set_uncompressed(coder->lz.coder, uncompressed_size);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.h
new file mode 100644
index 00000000..aab77ae9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_decoder.h
@@ -0,0 +1,236 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lz_decoder.h
+/// \brief LZ out window
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZ_DECODER_H
+#define LZMA_LZ_DECODER_H
+
+#include "common.h"
+
+
+typedef struct {
+ /// Pointer to the dictionary buffer. It can be an allocated buffer
+ /// internal to liblzma, or it can a be a buffer given by the
+ /// application when in single-call mode (not implemented yet).
+ uint8_t *buf;
+
+ /// Write position in dictionary. The next byte will be written to
+ /// buf[pos].
+ size_t pos;
+
+ /// Indicates how full the dictionary is. This is used by
+ /// dict_is_distance_valid() to detect corrupt files that would
+ /// read beyond the beginning of the dictionary.
+ size_t full;
+
+ /// Write limit
+ size_t limit;
+
+ /// Size of the dictionary
+ size_t size;
+
+ /// True when dictionary should be reset before decoding more data.
+ bool need_reset;
+
+} lzma_dict;
+
+
+typedef struct {
+ size_t dict_size;
+ const uint8_t *preset_dict;
+ size_t preset_dict_size;
+} lzma_lz_options;
+
+
+typedef struct {
+ /// Data specific to the LZ-based decoder
+ lzma_coder *coder;
+
+ /// Function to decode from in[] to *dict
+ lzma_ret (*code)(lzma_coder *restrict coder,
+ lzma_dict *restrict dict, const uint8_t *restrict in,
+ size_t *restrict in_pos, size_t in_size);
+
+ void (*reset)(lzma_coder *coder, const void *options);
+
+ /// Set the uncompressed size
+ void (*set_uncompressed)(lzma_coder *coder,
+ lzma_vli uncompressed_size);
+
+ /// Free allocated resources
+ void (*end)(lzma_coder *coder, lzma_allocator *allocator);
+
+} lzma_lz_decoder;
+
+
+#define LZMA_LZ_DECODER_INIT \
+ (lzma_lz_decoder){ \
+ .coder = NULL, \
+ .code = NULL, \
+ .reset = NULL, \
+ .set_uncompressed = NULL, \
+ .end = NULL, \
+ }
+
+
+extern lzma_ret lzma_lz_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters,
+ lzma_ret (*lz_init)(lzma_lz_decoder *lz,
+ lzma_allocator *allocator, const void *options,
+ lzma_lz_options *lz_options));
+
+extern uint64_t lzma_lz_decoder_memusage(size_t dictionary_size);
+
+extern void lzma_lz_decoder_uncompressed(
+ lzma_coder *coder, lzma_vli uncompressed_size);
+
+
+//////////////////////
+// Inline functions //
+//////////////////////
+
+/// Get a byte from the history buffer.
+static inline uint8_t
+dict_get(const lzma_dict *const dict, const uint32_t distance)
+{
+ return dict->buf[dict->pos - distance - 1
+ + (distance < dict->pos ? 0 : dict->size)];
+}
+
+
+/// Test if dictionary is empty.
+static inline bool
+dict_is_empty(const lzma_dict *const dict)
+{
+ return dict->full == 0;
+}
+
+
+/// Validate the match distance
+static inline bool
+dict_is_distance_valid(const lzma_dict *const dict, const size_t distance)
+{
+ return dict->full > distance;
+}
+
+
+/// Repeat *len bytes at distance.
+static inline bool
+dict_repeat(lzma_dict *dict, uint32_t distance, uint32_t *len)
+{
+ // Don't write past the end of the dictionary.
+ const size_t dict_avail = dict->limit - dict->pos;
+ uint32_t left = MIN(dict_avail, *len);
+ *len -= left;
+
+ // Repeat a block of data from the history. Because memcpy() is faster
+ // than copying byte by byte in a loop, the copying process gets split
+ // into three cases.
+ if (distance < left) {
+ // Source and target areas overlap, thus we can't use
+ // memcpy() nor even memmove() safely.
+ do {
+ dict->buf[dict->pos] = dict_get(dict, distance);
+ ++dict->pos;
+ } while (--left > 0);
+
+ } else if (distance < dict->pos) {
+ // The easiest and fastest case
+ memcpy(dict->buf + dict->pos,
+ dict->buf + dict->pos - distance - 1,
+ left);
+ dict->pos += left;
+
+ } else {
+ // The bigger the dictionary, the more rare this
+ // case occurs. We need to "wrap" the dict, thus
+ // we might need two memcpy() to copy all the data.
+ assert(dict->full == dict->size);
+ const uint32_t copy_pos
+ = dict->pos - distance - 1 + dict->size;
+ uint32_t copy_size = dict->size - copy_pos;
+
+ if (copy_size < left) {
+ memmove(dict->buf + dict->pos, dict->buf + copy_pos,
+ copy_size);
+ dict->pos += copy_size;
+ copy_size = left - copy_size;
+ memcpy(dict->buf + dict->pos, dict->buf, copy_size);
+ dict->pos += copy_size;
+ } else {
+ memmove(dict->buf + dict->pos, dict->buf + copy_pos,
+ left);
+ dict->pos += left;
+ }
+ }
+
+ // Update how full the dictionary is.
+ if (dict->full < dict->pos)
+ dict->full = dict->pos;
+
+ return unlikely(*len != 0);
+}
+
+
+/// Puts one byte into the dictionary. Returns true if the dictionary was
+/// already full and the byte couldn't be added.
+static inline bool
+dict_put(lzma_dict *dict, uint8_t byte)
+{
+ if (unlikely(dict->pos == dict->limit))
+ return true;
+
+ dict->buf[dict->pos++] = byte;
+
+ if (dict->pos > dict->full)
+ dict->full = dict->pos;
+
+ return false;
+}
+
+
+/// Copies arbitrary amount of data into the dictionary.
+static inline void
+dict_write(lzma_dict *restrict dict, const uint8_t *restrict in,
+ size_t *restrict in_pos, size_t in_size,
+ size_t *restrict left)
+{
+ // NOTE: If we are being given more data than the size of the
+ // dictionary, it could be possible to optimize the LZ decoder
+ // so that not everything needs to go through the dictionary.
+ // This shouldn't be very common thing in practice though, and
+ // the slowdown of one extra memcpy() isn't bad compared to how
+ // much time it would have taken if the data were compressed.
+
+ if (in_size - *in_pos > *left)
+ in_size = *in_pos + *left;
+
+ *left -= lzma_bufcpy(in, in_pos, in_size,
+ dict->buf, &dict->pos, dict->limit);
+
+ if (dict->pos > dict->full)
+ dict->full = dict->pos;
+
+ return;
+}
+
+
+static inline void
+dict_reset(lzma_dict *dict)
+{
+ dict->need_reset = true;
+ return;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.c
new file mode 100644
index 00000000..c4154f59
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.c
@@ -0,0 +1,561 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lz_encoder.c
+/// \brief LZ in window
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lz_encoder.h"
+#include "lz_encoder_hash.h"
+#include "check.h"
+
+
+struct lzma_coder_s {
+ /// LZ-based encoder e.g. LZMA
+ lzma_lz_encoder lz;
+
+ /// History buffer and match finder
+ lzma_mf mf;
+
+ /// Next coder in the chain
+ lzma_next_coder next;
+};
+
+
+/// \brief Moves the data in the input window to free space for new data
+///
+/// mf->buffer is a sliding input window, which keeps mf->keep_size_before
+/// bytes of input history available all the time. Now and then we need to
+/// "slide" the buffer to make space for the new data to the end of the
+/// buffer. At the same time, data older than keep_size_before is dropped.
+///
+static void
+move_window(lzma_mf *mf)
+{
+ // Align the move to a multiple of 16 bytes. Some LZ-based encoders
+ // like LZMA use the lowest bits of mf->read_pos to know the
+ // alignment of the uncompressed data. We also get better speed
+ // for memmove() with aligned buffers.
+ assert(mf->read_pos > mf->keep_size_before);
+ const uint32_t move_offset
+ = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
+
+ assert(mf->write_pos > move_offset);
+ const size_t move_size = mf->write_pos - move_offset;
+
+ assert(move_offset + move_size <= mf->size);
+
+ memmove(mf->buffer, mf->buffer + move_offset, move_size);
+
+ mf->offset += move_offset;
+ mf->read_pos -= move_offset;
+ mf->read_limit -= move_offset;
+ mf->write_pos -= move_offset;
+
+ return;
+}
+
+
+/// \brief Tries to fill the input window (mf->buffer)
+///
+/// If we are the last encoder in the chain, our input data is in in[].
+/// Otherwise we call the next filter in the chain to process in[] and
+/// write its output to mf->buffer.
+///
+/// This function must not be called once it has returned LZMA_STREAM_END.
+///
+static lzma_ret
+fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
+ size_t *in_pos, size_t in_size, lzma_action action)
+{
+ assert(coder->mf.read_pos <= coder->mf.write_pos);
+
+ // Move the sliding window if needed.
+ if (coder->mf.read_pos >= coder->mf.size - coder->mf.keep_size_after)
+ move_window(&coder->mf);
+
+ // Maybe this is ugly, but lzma_mf uses uint32_t for most things
+ // (which I find cleanest), but we need size_t here when filling
+ // the history window.
+ size_t write_pos = coder->mf.write_pos;
+ size_t in_used;
+ lzma_ret ret;
+ if (coder->next.code == NULL) {
+ // Not using a filter, simply memcpy() as much as possible.
+ in_used = lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
+ &write_pos, coder->mf.size);
+
+ ret = action != LZMA_RUN && *in_pos == in_size
+ ? LZMA_STREAM_END : LZMA_OK;
+
+ } else {
+ const size_t in_start = *in_pos;
+ ret = coder->next.code(coder->next.coder, allocator,
+ in, in_pos, in_size,
+ coder->mf.buffer, &write_pos,
+ coder->mf.size, action);
+ in_used = *in_pos - in_start;
+ }
+
+ coder->mf.write_pos = write_pos;
+
+ // If end of stream has been reached or flushing completed, we allow
+ // the encoder to process all the input (that is, read_pos is allowed
+ // to reach write_pos). Otherwise we keep keep_size_after bytes
+ // available as prebuffer.
+ if (ret == LZMA_STREAM_END) {
+ assert(*in_pos == in_size);
+ ret = LZMA_OK;
+ coder->mf.action = action;
+ coder->mf.read_limit = coder->mf.write_pos;
+
+ } else if (coder->mf.write_pos > coder->mf.keep_size_after) {
+ // This needs to be done conditionally, because if we got
+ // only little new input, there may be too little input
+ // to do any encoding yet.
+ coder->mf.read_limit = coder->mf.write_pos
+ - coder->mf.keep_size_after;
+ }
+
+ // Restart the match finder after finished LZMA_SYNC_FLUSH.
+ if (coder->mf.pending > 0
+ && coder->mf.read_pos < coder->mf.read_limit) {
+ // Match finder may update coder->pending and expects it to
+ // start from zero, so use a temporary variable.
+ const size_t pending = coder->mf.pending;
+ coder->mf.pending = 0;
+
+ // Rewind read_pos so that the match finder can hash
+ // the pending bytes.
+ assert(coder->mf.read_pos >= pending);
+ coder->mf.read_pos -= pending;
+
+ // Call the skip function directly instead of using
+ // mf_skip(), since we don't want to touch mf->read_ahead.
+ coder->mf.skip(&coder->mf, pending);
+ }
+
+ return ret;
+}
+
+
+static lzma_ret
+lz_encode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size,
+ uint8_t *restrict out, size_t *restrict out_pos,
+ size_t out_size, lzma_action action)
+{
+ while (*out_pos < out_size
+ && (*in_pos < in_size || action != LZMA_RUN)) {
+ // Read more data to coder->mf.buffer if needed.
+ if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
+ >= coder->mf.read_limit)
+ return_if_error(fill_window(coder, allocator,
+ in, in_pos, in_size, action));
+
+ // Encode
+ const lzma_ret ret = coder->lz.code(coder->lz.coder,
+ &coder->mf, out, out_pos, out_size);
+ if (ret != LZMA_OK) {
+ // Setting this to LZMA_RUN for cases when we are
+ // flushing. It doesn't matter when finishing or if
+ // an error occurred.
+ coder->mf.action = LZMA_RUN;
+ return ret;
+ }
+ }
+
+ return LZMA_OK;
+}
+
+
+static bool
+lz_encoder_prepare(lzma_mf *mf, lzma_allocator *allocator,
+ const lzma_lz_options *lz_options)
+{
+ // For now, the dictionary size is limited to 1.5 GiB. This may grow
+ // in the future if needed, but it needs a little more work than just
+ // changing this check.
+ if (lz_options->dict_size < LZMA_DICT_SIZE_MIN
+ || lz_options->dict_size
+ > (UINT32_C(1) << 30) + (UINT32_C(1) << 29)
+ || lz_options->nice_len > lz_options->match_len_max)
+ return true;
+
+ mf->keep_size_before = lz_options->before_size + lz_options->dict_size;
+
+ mf->keep_size_after = lz_options->after_size
+ + lz_options->match_len_max;
+
+ // To avoid constant memmove()s, allocate some extra space. Since
+ // memmove()s become more expensive when the size of the buffer
+ // increases, we reserve more space when a large dictionary is
+ // used to make the memmove() calls rarer.
+ //
+ // This works with dictionaries up to about 3 GiB. If bigger
+ // dictionary is wanted, some extra work is needed:
+ // - Several variables in lzma_mf have to be changed from uint32_t
+ // to size_t.
+ // - Memory usage calculation needs something too, e.g. use uint64_t
+ // for mf->size.
+ uint32_t reserve = lz_options->dict_size / 2;
+ if (reserve > (UINT32_C(1) << 30))
+ reserve /= 2;
+
+ reserve += (lz_options->before_size + lz_options->match_len_max
+ + lz_options->after_size) / 2 + (UINT32_C(1) << 19);
+
+ const uint32_t old_size = mf->size;
+ mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
+
+ // Deallocate the old history buffer if it exists but has different
+ // size than what is needed now.
+ if (mf->buffer != NULL && old_size != mf->size) {
+ lzma_free(mf->buffer, allocator);
+ mf->buffer = NULL;
+ }
+
+ // Match finder options
+ mf->match_len_max = lz_options->match_len_max;
+ mf->nice_len = lz_options->nice_len;
+
+ // cyclic_size has to stay smaller than 2 Gi. Note that this doesn't
+ // mean limitting dictionary size to less than 2 GiB. With a match
+ // finder that uses multibyte resolution (hashes start at e.g. every
+ // fourth byte), cyclic_size would stay below 2 Gi even when
+ // dictionary size is greater than 2 GiB.
+ //
+ // It would be possible to allow cyclic_size >= 2 Gi, but then we
+ // would need to be careful to use 64-bit types in various places
+ // (size_t could do since we would need bigger than 32-bit address
+ // space anyway). It would also require either zeroing a multigigabyte
+ // buffer at initialization (waste of time and RAM) or allow
+ // normalization in lz_encoder_mf.c to access uninitialized
+ // memory to keep the code simpler. The current way is simple and
+ // still allows pretty big dictionaries, so I don't expect these
+ // limits to change.
+ mf->cyclic_size = lz_options->dict_size + 1;
+
+ // Validate the match finder ID and setup the function pointers.
+ switch (lz_options->match_finder) {
+#ifdef HAVE_MF_HC3
+ case LZMA_MF_HC3:
+ mf->find = &lzma_mf_hc3_find;
+ mf->skip = &lzma_mf_hc3_skip;
+ break;
+#endif
+#ifdef HAVE_MF_HC4
+ case LZMA_MF_HC4:
+ mf->find = &lzma_mf_hc4_find;
+ mf->skip = &lzma_mf_hc4_skip;
+ break;
+#endif
+#ifdef HAVE_MF_BT2
+ case LZMA_MF_BT2:
+ mf->find = &lzma_mf_bt2_find;
+ mf->skip = &lzma_mf_bt2_skip;
+ break;
+#endif
+#ifdef HAVE_MF_BT3
+ case LZMA_MF_BT3:
+ mf->find = &lzma_mf_bt3_find;
+ mf->skip = &lzma_mf_bt3_skip;
+ break;
+#endif
+#ifdef HAVE_MF_BT4
+ case LZMA_MF_BT4:
+ mf->find = &lzma_mf_bt4_find;
+ mf->skip = &lzma_mf_bt4_skip;
+ break;
+#endif
+
+ default:
+ return true;
+ }
+
+ // Calculate the sizes of mf->hash and mf->son and check that
+ // nice_len is big enough for the selected match finder.
+ const uint32_t hash_bytes = lz_options->match_finder & 0x0F;
+ if (hash_bytes > mf->nice_len)
+ return true;
+
+ const bool is_bt = (lz_options->match_finder & 0x10) != 0;
+ uint32_t hs;
+
+ if (hash_bytes == 2) {
+ hs = 0xFFFF;
+ } else {
+ // Round dictionary size up to the next 2^n - 1 so it can
+ // be used as a hash mask.
+ hs = lz_options->dict_size - 1;
+ hs |= hs >> 1;
+ hs |= hs >> 2;
+ hs |= hs >> 4;
+ hs |= hs >> 8;
+ hs >>= 1;
+ hs |= 0xFFFF;
+
+ if (hs > (UINT32_C(1) << 24)) {
+ if (hash_bytes == 3)
+ hs = (UINT32_C(1) << 24) - 1;
+ else
+ hs >>= 1;
+ }
+ }
+
+ mf->hash_mask = hs;
+
+ ++hs;
+ if (hash_bytes > 2)
+ hs += HASH_2_SIZE;
+ if (hash_bytes > 3)
+ hs += HASH_3_SIZE;
+/*
+ No match finder uses this at the moment.
+ if (mf->hash_bytes > 4)
+ hs += HASH_4_SIZE;
+*/
+
+ // If the above code calculating hs is modified, make sure that
+ // this assertion stays valid (UINT32_MAX / 5 is not strictly the
+ // exact limit). If it doesn't, you need to calculate that
+ // hash_size_sum + sons_count cannot overflow.
+ assert(hs < UINT32_MAX / 5);
+
+ const uint32_t old_count = mf->hash_size_sum + mf->sons_count;
+ mf->hash_size_sum = hs;
+ mf->sons_count = mf->cyclic_size;
+ if (is_bt)
+ mf->sons_count *= 2;
+
+ const uint32_t new_count = mf->hash_size_sum + mf->sons_count;
+
+ // Deallocate the old hash array if it exists and has different size
+ // than what is needed now.
+ if (mf->hash != NULL && old_count != new_count) {
+ lzma_free(mf->hash, allocator);
+ mf->hash = NULL;
+ }
+
+ // Maximum number of match finder cycles
+ mf->depth = lz_options->depth;
+ if (mf->depth == 0) {
+ mf->depth = 16 + (mf->nice_len / 2);
+ if (!is_bt)
+ mf->depth /= 2;
+ }
+
+ return false;
+}
+
+
+static bool
+lz_encoder_init(lzma_mf *mf, lzma_allocator *allocator,
+ const lzma_lz_options *lz_options)
+{
+ // Allocate the history buffer.
+ if (mf->buffer == NULL) {
+ mf->buffer = lzma_alloc(mf->size, allocator);
+ if (mf->buffer == NULL)
+ return true;
+ }
+
+ // Use cyclic_size as initial mf->offset. This allows
+ // avoiding a few branches in the match finders. The downside is
+ // that match finder needs to be normalized more often, which may
+ // hurt performance with huge dictionaries.
+ mf->offset = mf->cyclic_size;
+ mf->read_pos = 0;
+ mf->read_ahead = 0;
+ mf->read_limit = 0;
+ mf->write_pos = 0;
+ mf->pending = 0;
+
+ // Allocate match finder's hash array.
+ const size_t alloc_count = mf->hash_size_sum + mf->sons_count;
+
+#if UINT32_MAX >= SIZE_MAX / 4
+ // Check for integer overflow. (Huge dictionaries are not
+ // possible on 32-bit CPU.)
+ if (alloc_count > SIZE_MAX / sizeof(uint32_t))
+ return true;
+#endif
+
+ if (mf->hash == NULL) {
+ mf->hash = lzma_alloc(alloc_count * sizeof(uint32_t),
+ allocator);
+ if (mf->hash == NULL)
+ return true;
+ }
+
+ mf->son = mf->hash + mf->hash_size_sum;
+ mf->cyclic_pos = 0;
+
+ // Initialize the hash table. Since EMPTY_HASH_VALUE is zero, we
+ // can use memset().
+/*
+ for (uint32_t i = 0; i < hash_size_sum; ++i)
+ mf->hash[i] = EMPTY_HASH_VALUE;
+*/
+ memzero(mf->hash, (size_t)(mf->hash_size_sum) * sizeof(uint32_t));
+
+ // We don't need to initialize mf->son, but not doing that will
+ // make Valgrind complain in normalization (see normalize() in
+ // lz_encoder_mf.c).
+ //
+ // Skipping this initialization is *very* good when big dictionary is
+ // used but only small amount of data gets actually compressed: most
+ // of the mf->hash won't get actually allocated by the kernel, so
+ // we avoid wasting RAM and improve initialization speed a lot.
+ //memzero(mf->son, (size_t)(mf->sons_count) * sizeof(uint32_t));
+
+ // Handle preset dictionary.
+ if (lz_options->preset_dict != NULL
+ && lz_options->preset_dict_size > 0) {
+ // If the preset dictionary is bigger than the actual
+ // dictionary, use only the tail.
+ mf->write_pos = MIN(lz_options->preset_dict_size, mf->size);
+ memcpy(mf->buffer, lz_options->preset_dict
+ + lz_options->preset_dict_size - mf->write_pos,
+ mf->write_pos);
+ mf->action = LZMA_SYNC_FLUSH;
+ mf->skip(mf, mf->write_pos);
+ }
+
+ mf->action = LZMA_RUN;
+
+ return false;
+}
+
+
+extern uint64_t
+lzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
+{
+ // Old buffers must not exist when calling lz_encoder_prepare().
+ lzma_mf mf = {
+ .buffer = NULL,
+ .hash = NULL,
+ };
+
+ // Setup the size information into mf.
+ if (lz_encoder_prepare(&mf, NULL, lz_options))
+ return UINT64_MAX;
+
+ // Calculate the memory usage.
+ return (uint64_t)(mf.hash_size_sum + mf.sons_count)
+ * sizeof(uint32_t)
+ + (uint64_t)(mf.size) + sizeof(lzma_coder);
+}
+
+
+static void
+lz_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+
+ lzma_free(coder->mf.hash, allocator);
+ lzma_free(coder->mf.buffer, allocator);
+
+ if (coder->lz.end != NULL)
+ coder->lz.end(coder->lz.coder, allocator);
+ else
+ lzma_free(coder->lz.coder, allocator);
+
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_lz_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters,
+ lzma_ret (*lz_init)(lzma_lz_encoder *lz,
+ lzma_allocator *allocator, const void *options,
+ lzma_lz_options *lz_options))
+{
+#ifdef HAVE_SMALL
+ // We need that the CRC32 table has been initialized.
+ lzma_crc32_init();
+#endif
+
+ // Allocate and initialize the base data structure.
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &lz_encode;
+ next->end = &lz_encoder_end;
+
+ next->coder->lz.coder = NULL;
+ next->coder->lz.code = NULL;
+ next->coder->lz.end = NULL;
+
+ next->coder->mf.buffer = NULL;
+ next->coder->mf.hash = NULL;
+
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ }
+
+ // Initialize the LZ-based encoder.
+ lzma_lz_options lz_options;
+ return_if_error(lz_init(&next->coder->lz, allocator,
+ filters[0].options, &lz_options));
+
+ // Setup the size information into next->coder->mf and deallocate
+ // old buffers if they have wrong size.
+ if (lz_encoder_prepare(&next->coder->mf, allocator, &lz_options))
+ return LZMA_OPTIONS_ERROR;
+
+ // Allocate new buffers if needed, and do the rest of
+ // the initialization.
+ if (lz_encoder_init(&next->coder->mf, allocator, &lz_options))
+ return LZMA_MEM_ERROR;
+
+ // Initialize the next filter in the chain, if any.
+ return lzma_next_filter_init(&next->coder->next, allocator,
+ filters + 1);
+}
+
+
+extern LZMA_API(lzma_bool)
+lzma_mf_is_supported(lzma_match_finder mf)
+{
+ bool ret = false;
+
+#ifdef HAVE_MF_HC3
+ if (mf == LZMA_MF_HC3)
+ ret = true;
+#endif
+
+#ifdef HAVE_MF_HC4
+ if (mf == LZMA_MF_HC4)
+ ret = true;
+#endif
+
+#ifdef HAVE_MF_BT2
+ if (mf == LZMA_MF_BT2)
+ ret = true;
+#endif
+
+#ifdef HAVE_MF_BT3
+ if (mf == LZMA_MF_BT3)
+ ret = true;
+#endif
+
+#ifdef HAVE_MF_BT4
+ if (mf == LZMA_MF_BT4)
+ ret = true;
+#endif
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.h
new file mode 100644
index 00000000..9301abe5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder.h
@@ -0,0 +1,326 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lz_encoder.h
+/// \brief LZ in window and match finder API
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZ_ENCODER_H
+#define LZMA_LZ_ENCODER_H
+
+#include "common.h"
+
+
+/// A table of these is used by the LZ-based encoder to hold
+/// the length-distance pairs found by the match finder.
+typedef struct {
+ uint32_t len;
+ uint32_t dist;
+} lzma_match;
+
+
+typedef struct lzma_mf_s lzma_mf;
+struct lzma_mf_s {
+ ///////////////
+ // In Window //
+ ///////////////
+
+ /// Pointer to buffer with data to be compressed
+ uint8_t *buffer;
+
+ /// Total size of the allocated buffer (that is, including all
+ /// the extra space)
+ uint32_t size;
+
+ /// Number of bytes that must be kept available in our input history.
+ /// That is, once keep_size_before bytes have been processed,
+ /// buffer[read_pos - keep_size_before] is the oldest byte that
+ /// must be available for reading.
+ uint32_t keep_size_before;
+
+ /// Number of bytes that must be kept in buffer after read_pos.
+ /// That is, read_pos <= write_pos - keep_size_after as long as
+ /// action is LZMA_RUN; when action != LZMA_RUN, read_pos is allowed
+ /// to reach write_pos so that the last bytes get encoded too.
+ uint32_t keep_size_after;
+
+ /// Match finders store locations of matches using 32-bit integers.
+ /// To avoid adjusting several megabytes of integers every time the
+ /// input window is moved with move_window, we only adjust the
+ /// offset of the buffer. Thus, buffer[value_in_hash_table - offset]
+ /// is the byte pointed by value_in_hash_table.
+ uint32_t offset;
+
+ /// buffer[read_pos] is the next byte to run through the match
+ /// finder. This is incremented in the match finder once the byte
+ /// has been processed.
+ uint32_t read_pos;
+
+ /// Number of bytes that have been ran through the match finder, but
+ /// which haven't been encoded by the LZ-based encoder yet.
+ uint32_t read_ahead;
+
+ /// As long as read_pos is less than read_limit, there is enough
+ /// input available in buffer for at least one encoding loop.
+ ///
+ /// Because of the stateful API, read_limit may and will get greater
+ /// than read_pos quite often. This is taken into account when
+ /// calculating the value for keep_size_after.
+ uint32_t read_limit;
+
+ /// buffer[write_pos] is the first byte that doesn't contain valid
+ /// uncompressed data; that is, the next input byte will be copied
+ /// to buffer[write_pos].
+ uint32_t write_pos;
+
+ /// Number of bytes not hashed before read_pos. This is needed to
+ /// restart the match finder after LZMA_SYNC_FLUSH.
+ uint32_t pending;
+
+ //////////////////
+ // Match Finder //
+ //////////////////
+
+ /// Find matches. Returns the number of distance-length pairs written
+ /// to the matches array. This is called only via lzma_mf_find().
+ uint32_t (*find)(lzma_mf *mf, lzma_match *matches);
+
+ /// Skips num bytes. This is like find() but doesn't make the
+ /// distance-length pairs available, thus being a little faster.
+ /// This is called only via mf_skip().
+ void (*skip)(lzma_mf *mf, uint32_t num);
+
+ uint32_t *hash;
+ uint32_t *son;
+ uint32_t cyclic_pos;
+ uint32_t cyclic_size; // Must be dictionary size + 1.
+ uint32_t hash_mask;
+
+ /// Maximum number of loops in the match finder
+ uint32_t depth;
+
+ /// Maximum length of a match that the match finder will try to find.
+ uint32_t nice_len;
+
+ /// Maximum length of a match supported by the LZ-based encoder.
+ /// If the longest match found by the match finder is nice_len,
+ /// mf_find() tries to expand it up to match_len_max bytes.
+ uint32_t match_len_max;
+
+ /// When running out of input, binary tree match finders need to know
+ /// if it is due to flushing or finishing. The action is used also
+ /// by the LZ-based encoders themselves.
+ lzma_action action;
+
+ /// Number of elements in hash[]
+ uint32_t hash_size_sum;
+
+ /// Number of elements in son[]
+ uint32_t sons_count;
+};
+
+
+typedef struct {
+ /// Extra amount of data to keep available before the "actual"
+ /// dictionary.
+ size_t before_size;
+
+ /// Size of the history buffer
+ size_t dict_size;
+
+ /// Extra amount of data to keep available after the "actual"
+ /// dictionary.
+ size_t after_size;
+
+ /// Maximum length of a match that the LZ-based encoder can accept.
+ /// This is used to extend matches of length nice_len to the
+ /// maximum possible length.
+ size_t match_len_max;
+
+ /// Match finder will search matches of at maximum of this length.
+ /// This must be less than or equal to match_len_max.
+ size_t nice_len;
+
+ /// Type of the match finder to use
+ lzma_match_finder match_finder;
+
+ /// Maximum search depth
+ uint32_t depth;
+
+ /// TODO: Comment
+ const uint8_t *preset_dict;
+
+ uint32_t preset_dict_size;
+
+} lzma_lz_options;
+
+
+// The total usable buffer space at any moment outside the match finder:
+// before_size + dict_size + after_size + match_len_max
+//
+// In reality, there's some extra space allocated to prevent the number of
+// memmove() calls reasonable. The bigger the dict_size is, the bigger
+// this extra buffer will be since with bigger dictionaries memmove() would
+// also take longer.
+//
+// A single encoder loop in the LZ-based encoder may call the match finder
+// (mf_find() or mf_skip()) at maximum of after_size times.
+// In other words, a single encoder loop may advance lzma_mf.read_pos at
+// maximum of after_size times. Since matches are looked up to
+// lzma_mf.buffer[lzma_mf.read_pos + match_len_max - 1], the total
+// amount of extra buffer needed after dict_size becomes
+// after_size + match_len_max.
+//
+// before_size has two uses. The first one is to keep literals available
+// in cases when the LZ-based encoder has made some read ahead.
+// TODO: Maybe this could be changed by making the LZ-based encoders to
+// store the actual literals as they do with length-distance pairs.
+//
+// Alrogithms such as LZMA2 first try to compress a chunk, and then check
+// if the encoded result is smaller than the uncompressed one. If the chunk
+// was uncompressible, it is better to store it in uncompressed form in
+// the output stream. To do this, the whole uncompressed chunk has to be
+// still available in the history buffer. before_size achieves that.
+
+
+typedef struct {
+ /// Data specific to the LZ-based encoder
+ lzma_coder *coder;
+
+ /// Function to encode from *dict to out[]
+ lzma_ret (*code)(lzma_coder *restrict coder,
+ lzma_mf *restrict mf, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size);
+
+ /// Free allocated resources
+ void (*end)(lzma_coder *coder, lzma_allocator *allocator);
+
+} lzma_lz_encoder;
+
+
+// Basic steps:
+// 1. Input gets copied into the dictionary.
+// 2. Data in dictionary gets run through the match finder byte by byte.
+// 3. The literals and matches are encoded using e.g. LZMA.
+//
+// The bytes that have been ran through the match finder, but not encoded yet,
+// are called `read ahead'.
+
+
+/// Get pointer to the first byte not ran through the match finder
+static inline const uint8_t *
+mf_ptr(const lzma_mf *mf)
+{
+ return mf->buffer + mf->read_pos;
+}
+
+
+/// Get the number of bytes that haven't been ran through the match finder yet.
+static inline uint32_t
+mf_avail(const lzma_mf *mf)
+{
+ return mf->write_pos - mf->read_pos;
+}
+
+
+/// Get the number of bytes that haven't been encoded yet (some of these
+/// bytes may have been ran through the match finder though).
+static inline uint32_t
+mf_unencoded(const lzma_mf *mf)
+{
+ return mf->write_pos - mf->read_pos + mf->read_ahead;
+}
+
+
+/// Calculate the absolute offset from the beginning of the most recent
+/// dictionary reset. Only the lowest four bits are important, so there's no
+/// problem that we don't know the 64-bit size of the data encoded so far.
+///
+/// NOTE: When moving the input window, we need to do it so that the lowest
+/// bits of dict->read_pos are not modified to keep this macro working
+/// as intended.
+static inline uint32_t
+mf_position(const lzma_mf *mf)
+{
+ return mf->read_pos - mf->read_ahead;
+}
+
+
+/// Since everything else begins with mf_, use it also for lzma_mf_find().
+#define mf_find lzma_mf_find
+
+
+/// Skip the given number of bytes. This is used when a good match was found.
+/// For example, if mf_find() finds a match of 200 bytes long, the first byte
+/// of that match was already consumed by mf_find(), and the rest 199 bytes
+/// have to be skipped with mf_skip(mf, 199).
+static inline void
+mf_skip(lzma_mf *mf, uint32_t amount)
+{
+ if (amount != 0) {
+ mf->skip(mf, amount);
+ mf->read_ahead += amount;
+ }
+}
+
+
+/// Copies at maximum of *left amount of bytes from the history buffer
+/// to out[]. This is needed by LZMA2 to encode uncompressed chunks.
+static inline void
+mf_read(lzma_mf *mf, uint8_t *out, size_t *out_pos, size_t out_size,
+ size_t *left)
+{
+ const size_t out_avail = out_size - *out_pos;
+ const size_t copy_size = MIN(out_avail, *left);
+
+ assert(mf->read_ahead == 0);
+ assert(mf->read_pos >= *left);
+
+ memcpy(out + *out_pos, mf->buffer + mf->read_pos - *left,
+ copy_size);
+
+ *out_pos += copy_size;
+ *left -= copy_size;
+ return;
+}
+
+
+extern lzma_ret lzma_lz_encoder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters,
+ lzma_ret (*lz_init)(lzma_lz_encoder *lz,
+ lzma_allocator *allocator, const void *options,
+ lzma_lz_options *lz_options));
+
+
+extern uint64_t lzma_lz_encoder_memusage(const lzma_lz_options *lz_options);
+
+
+// These are only for LZ encoder's internal use.
+extern uint32_t lzma_mf_find(
+ lzma_mf *mf, uint32_t *count, lzma_match *matches);
+
+extern uint32_t lzma_mf_hc3_find(lzma_mf *dict, lzma_match *matches);
+extern void lzma_mf_hc3_skip(lzma_mf *dict, uint32_t amount);
+
+extern uint32_t lzma_mf_hc4_find(lzma_mf *dict, lzma_match *matches);
+extern void lzma_mf_hc4_skip(lzma_mf *dict, uint32_t amount);
+
+extern uint32_t lzma_mf_bt2_find(lzma_mf *dict, lzma_match *matches);
+extern void lzma_mf_bt2_skip(lzma_mf *dict, uint32_t amount);
+
+extern uint32_t lzma_mf_bt3_find(lzma_mf *dict, lzma_match *matches);
+extern void lzma_mf_bt3_skip(lzma_mf *dict, uint32_t amount);
+
+extern uint32_t lzma_mf_bt4_find(lzma_mf *dict, lzma_match *matches);
+extern void lzma_mf_bt4_skip(lzma_mf *dict, uint32_t amount);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_hash.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_hash.h
new file mode 100644
index 00000000..3196e6e5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_hash.h
@@ -0,0 +1,99 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lz_encoder_hash.h
+/// \brief Hash macros for match finders
+//
+// Author: Igor Pavlov
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZ_ENCODER_HASH_H
+#define LZMA_LZ_ENCODER_HASH_H
+
+#define HASH_2_SIZE (UINT32_C(1) << 10)
+#define HASH_3_SIZE (UINT32_C(1) << 16)
+#define HASH_4_SIZE (UINT32_C(1) << 20)
+
+#define HASH_2_MASK (HASH_2_SIZE - 1)
+#define HASH_3_MASK (HASH_3_SIZE - 1)
+#define HASH_4_MASK (HASH_4_SIZE - 1)
+
+#define FIX_3_HASH_SIZE (HASH_2_SIZE)
+#define FIX_4_HASH_SIZE (HASH_2_SIZE + HASH_3_SIZE)
+#define FIX_5_HASH_SIZE (HASH_2_SIZE + HASH_3_SIZE + HASH_4_SIZE)
+
+// TODO Benchmark, and probably doesn't need to be endian dependent.
+#if !defined(WORDS_BIGENDIAN) && defined(HAVE_FAST_UNALIGNED_ACCESS)
+# define hash_2_calc() \
+ const uint32_t hash_value = *(const uint16_t *)(cur);
+#else
+# define hash_2_calc() \
+ const uint32_t hash_value \
+ = (uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)
+#endif
+
+#define hash_3_calc() \
+ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask
+
+#define hash_4_calc() \
+ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_3_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
+ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8) \
+ ^ (lzma_crc32_table[0][cur[3]] << 5)) & mf->hash_mask
+
+
+// The following are not currently used.
+
+#define hash_5_calc() \
+ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_3_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
+ uint32_t hash_4_value = (temp ^ ((uint32_t)(cur[2]) << 8) ^ \
+ ^ lzma_crc32_table[0][cur[3]] << 5); \
+ const uint32_t hash_value \
+ = (hash_4_value ^ (lzma_crc32_table[0][cur[4]] << 3)) \
+ & mf->hash_mask; \
+ hash_4_value &= HASH_4_MASK
+
+/*
+#define hash_zip_calc() \
+ const uint32_t hash_value \
+ = (((uint32_t)(cur[0]) | ((uint32_t)(cur[1]) << 8)) \
+ ^ lzma_crc32_table[0][cur[2]]) & 0xFFFF
+*/
+
+#define hash_zip_calc() \
+ const uint32_t hash_value \
+ = (((uint32_t)(cur[2]) | ((uint32_t)(cur[0]) << 8)) \
+ ^ lzma_crc32_table[0][cur[1]]) & 0xFFFF
+
+#define mt_hash_2_calc() \
+ const uint32_t hash_2_value \
+ = (lzma_crc32_table[0][cur[0]] ^ cur[1]) & HASH_2_MASK
+
+#define mt_hash_3_calc() \
+ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_3_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK
+
+#define mt_hash_4_calc() \
+ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1]; \
+ const uint32_t hash_2_value = temp & HASH_2_MASK; \
+ const uint32_t hash_3_value \
+ = (temp ^ ((uint32_t)(cur[2]) << 8)) & HASH_3_MASK; \
+ const uint32_t hash_4_value = (temp ^ ((uint32_t)(cur[2]) << 8) ^ \
+ (lzma_crc32_table[0][cur[3]] << 5)) & HASH_4_MASK
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_mf.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_mf.c
new file mode 100644
index 00000000..c3fd8c13
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lz/lz_encoder_mf.c
@@ -0,0 +1,756 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lz_encoder_mf.c
+/// \brief Match finders
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lz_encoder.h"
+#include "lz_encoder_hash.h"
+#include "check.h"
+
+
+/// \brief Find matches starting from the current byte
+///
+/// \return The length of the longest match found
+extern uint32_t
+lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
+{
+ // Call the match finder. It returns the number of length-distance
+ // pairs found.
+ // FIXME: Minimum count is zero, what _exactly_ is the maximum?
+ const uint32_t count = mf->find(mf, matches);
+
+ // Length of the longest match; assume that no matches were found
+ // and thus the maximum length is zero.
+ uint32_t len_best = 0;
+
+ if (count > 0) {
+#ifndef NDEBUG
+ // Validate the matches.
+ for (uint32_t i = 0; i < count; ++i) {
+ assert(matches[i].len <= mf->nice_len);
+ assert(matches[i].dist < mf->read_pos);
+ assert(memcmp(mf_ptr(mf) - 1,
+ mf_ptr(mf) - matches[i].dist - 2,
+ matches[i].len) == 0);
+ }
+#endif
+
+ // The last used element in the array contains
+ // the longest match.
+ len_best = matches[count - 1].len;
+
+ // If a match of maximum search length was found, try to
+ // extend the match to maximum possible length.
+ if (len_best == mf->nice_len) {
+ // The limit for the match length is either the
+ // maximum match length supported by the LZ-based
+ // encoder or the number of bytes left in the
+ // dictionary, whichever is smaller.
+ uint32_t limit = mf_avail(mf) + 1;
+ if (limit > mf->match_len_max)
+ limit = mf->match_len_max;
+
+ // Pointer to the byte we just ran through
+ // the match finder.
+ const uint8_t *p1 = mf_ptr(mf) - 1;
+
+ // Pointer to the beginning of the match. We need -1
+ // here because the match distances are zero based.
+ const uint8_t *p2 = p1 - matches[count - 1].dist - 1;
+
+ while (len_best < limit
+ && p1[len_best] == p2[len_best])
+ ++len_best;
+ }
+ }
+
+ *count_ptr = count;
+
+ // Finally update the read position to indicate that match finder was
+ // run for this dictionary offset.
+ ++mf->read_ahead;
+
+ return len_best;
+}
+
+
+/// Hash value to indicate unused element in the hash. Since we start the
+/// positions from dict_size + 1, zero is always too far to qualify
+/// as usable match position.
+#define EMPTY_HASH_VALUE 0
+
+
+/// Normalization must be done when lzma_mf.offset + lzma_mf.read_pos
+/// reaches MUST_NORMALIZE_POS.
+#define MUST_NORMALIZE_POS UINT32_MAX
+
+
+/// \brief Normalizes hash values
+///
+/// The hash arrays store positions of match candidates. The positions are
+/// relative to an arbitrary offset that is not the same as the absolute
+/// offset in the input stream. The relative position of the current byte
+/// is lzma_mf.offset + lzma_mf.read_pos. The distances of the matches are
+/// the differences of the current read position and the position found from
+/// the hash.
+///
+/// To prevent integer overflows of the offsets stored in the hash arrays,
+/// we need to "normalize" the stored values now and then. During the
+/// normalization, we drop values that indicate distance greater than the
+/// dictionary size, thus making space for new values.
+static void
+normalize(lzma_mf *mf)
+{
+ assert(mf->read_pos + mf->offset == MUST_NORMALIZE_POS);
+
+ // In future we may not want to touch the lowest bits, because there
+ // may be match finders that use larger resolution than one byte.
+ const uint32_t subvalue
+ = (MUST_NORMALIZE_POS - mf->cyclic_size);
+ // & (~(UINT32_C(1) << 10) - 1);
+
+ const uint32_t count = mf->hash_size_sum + mf->sons_count;
+ uint32_t *hash = mf->hash;
+
+ for (uint32_t i = 0; i < count; ++i) {
+ // If the distance is greater than the dictionary size,
+ // we can simply mark the hash element as empty.
+ //
+ // NOTE: Only the first mf->hash_size_sum elements are
+ // initialized for sure. There may be uninitialized elements
+ // in mf->son. Since we go through both mf->hash and
+ // mf->son here in normalization, Valgrind may complain
+ // that the "if" below depends on uninitialized value. In
+ // this case it is safe to ignore the warning. See also the
+ // comments in lz_encoder_init() in lz_encoder.c.
+ if (hash[i] <= subvalue)
+ hash[i] = EMPTY_HASH_VALUE;
+ else
+ hash[i] -= subvalue;
+ }
+
+ // Update offset to match the new locations.
+ mf->offset -= subvalue;
+
+ return;
+}
+
+
+/// Mark the current byte as processed from point of view of the match finder.
+static void
+move_pos(lzma_mf *mf)
+{
+ if (++mf->cyclic_pos == mf->cyclic_size)
+ mf->cyclic_pos = 0;
+
+ ++mf->read_pos;
+ assert(mf->read_pos <= mf->write_pos);
+
+ if (unlikely(mf->read_pos + mf->offset == UINT32_MAX))
+ normalize(mf);
+}
+
+
+/// When flushing, we cannot run the match finder unless there is nice_len
+/// bytes available in the dictionary. Instead, we skip running the match
+/// finder (indicating that no match was found), and count how many bytes we
+/// have ignored this way.
+///
+/// When new data is given after the flushing was completed, the match finder
+/// is restarted by rewinding mf->read_pos backwards by mf->pending. Then
+/// the missed bytes are added to the hash using the match finder's skip
+/// function (with small amount of input, it may start using mf->pending
+/// again if flushing).
+///
+/// Due to this rewinding, we don't touch cyclic_pos or test for
+/// normalization. It will be done when the match finder's skip function
+/// catches up after a flush.
+static void
+move_pending(lzma_mf *mf)
+{
+ ++mf->read_pos;
+ assert(mf->read_pos <= mf->write_pos);
+ ++mf->pending;
+}
+
+
+/// Calculate len_limit and determine if there is enough input to run
+/// the actual match finder code. Sets up "cur" and "pos". This macro
+/// is used by all find functions and binary tree skip functions. Hash
+/// chain skip function doesn't need len_limit so a simpler code is used
+/// in them.
+#define header(is_bt, len_min, ret_op) \
+ uint32_t len_limit = mf_avail(mf); \
+ if (mf->nice_len <= len_limit) { \
+ len_limit = mf->nice_len; \
+ } else if (len_limit < (len_min) \
+ || (is_bt && mf->action == LZMA_SYNC_FLUSH)) { \
+ assert(mf->action != LZMA_RUN); \
+ move_pending(mf); \
+ ret_op; \
+ } \
+ const uint8_t *cur = mf_ptr(mf); \
+ const uint32_t pos = mf->read_pos + mf->offset
+
+
+/// Header for find functions. "return 0" indicates that zero matches
+/// were found.
+#define header_find(is_bt, len_min) \
+ header(is_bt, len_min, return 0); \
+ uint32_t matches_count = 0
+
+
+/// Header for a loop in a skip function. "continue" tells to skip the rest
+/// of the code in the loop.
+#define header_skip(is_bt, len_min) \
+ header(is_bt, len_min, continue)
+
+
+/// Calls hc_find_func() or bt_find_func() and calculates the total number
+/// of matches found. Updates the dictionary position and returns the number
+/// of matches found.
+#define call_find(func, len_best) \
+do { \
+ matches_count = func(len_limit, pos, cur, cur_match, mf->depth, \
+ mf->son, mf->cyclic_pos, mf->cyclic_size, \
+ matches + matches_count, len_best) \
+ - matches; \
+ move_pos(mf); \
+ return matches_count; \
+} while (0)
+
+
+////////////////
+// Hash Chain //
+////////////////
+
+#if defined(HAVE_MF_HC3) || defined(HAVE_MF_HC4)
+///
+///
+/// \param len_limit Don't look for matches longer than len_limit.
+/// \param pos lzma_mf.read_pos + lzma_mf.offset
+/// \param cur Pointer to current byte (mf_ptr(mf))
+/// \param cur_match Start position of the current match candidate
+/// \param depth Maximum length of the hash chain
+/// \param son lzma_mf.son (contains the hash chain)
+/// \param cyclic_pos
+/// \param cyclic_size
+/// \param matches Array to hold the matches.
+/// \param len_best The length of the longest match found so far.
+static lzma_match *
+hc_find_func(
+ const uint32_t len_limit,
+ const uint32_t pos,
+ const uint8_t *const cur,
+ uint32_t cur_match,
+ uint32_t depth,
+ uint32_t *const son,
+ const uint32_t cyclic_pos,
+ const uint32_t cyclic_size,
+ lzma_match *matches,
+ uint32_t len_best)
+{
+ son[cyclic_pos] = cur_match;
+
+ while (true) {
+ const uint32_t delta = pos - cur_match;
+ if (depth-- == 0 || delta >= cyclic_size)
+ return matches;
+
+ const uint8_t *const pb = cur - delta;
+ cur_match = son[cyclic_pos - delta
+ + (delta > cyclic_pos ? cyclic_size : 0)];
+
+ if (pb[len_best] == cur[len_best] && pb[0] == cur[0]) {
+ uint32_t len = 0;
+ while (++len != len_limit)
+ if (pb[len] != cur[len])
+ break;
+
+ if (len_best < len) {
+ len_best = len;
+ matches->len = len;
+ matches->dist = delta - 1;
+ ++matches;
+
+ if (len == len_limit)
+ return matches;
+ }
+ }
+ }
+}
+
+
+#define hc_find(len_best) \
+ call_find(hc_find_func, len_best)
+
+
+#define hc_skip() \
+do { \
+ mf->son[mf->cyclic_pos] = cur_match; \
+ move_pos(mf); \
+} while (0)
+
+#endif
+
+
+#ifdef HAVE_MF_HC3
+extern uint32_t
+lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches)
+{
+ header_find(false, 3);
+
+ hash_3_calc();
+
+ const uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
+
+ uint32_t len_best = 2;
+
+ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
+ for ( ; len_best != len_limit; ++len_best)
+ if (*(cur + len_best - delta2) != cur[len_best])
+ break;
+
+ matches[0].len = len_best;
+ matches[0].dist = delta2 - 1;
+ matches_count = 1;
+
+ if (len_best == len_limit) {
+ hc_skip();
+ return 1; // matches_count
+ }
+ }
+
+ hc_find(len_best);
+}
+
+
+extern void
+lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
+{
+ do {
+ if (mf_avail(mf) < 3) {
+ move_pending(mf);
+ continue;
+ }
+
+ const uint8_t *cur = mf_ptr(mf);
+ const uint32_t pos = mf->read_pos + mf->offset;
+
+ hash_3_calc();
+
+ const uint32_t cur_match
+ = mf->hash[FIX_3_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
+
+ hc_skip();
+
+ } while (--amount != 0);
+}
+#endif
+
+
+#ifdef HAVE_MF_HC4
+extern uint32_t
+lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
+{
+ header_find(false, 4);
+
+ hash_4_calc();
+
+ uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t delta3
+ = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
+ const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value ] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
+ mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
+
+ uint32_t len_best = 1;
+
+ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
+ len_best = 2;
+ matches[0].len = 2;
+ matches[0].dist = delta2 - 1;
+ matches_count = 1;
+ }
+
+ if (delta2 != delta3 && delta3 < mf->cyclic_size
+ && *(cur - delta3) == *cur) {
+ len_best = 3;
+ matches[matches_count++].dist = delta3 - 1;
+ delta2 = delta3;
+ }
+
+ if (matches_count != 0) {
+ for ( ; len_best != len_limit; ++len_best)
+ if (*(cur + len_best - delta2) != cur[len_best])
+ break;
+
+ matches[matches_count - 1].len = len_best;
+
+ if (len_best == len_limit) {
+ hc_skip();
+ return matches_count;
+ }
+ }
+
+ if (len_best < 3)
+ len_best = 3;
+
+ hc_find(len_best);
+}
+
+
+extern void
+lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount)
+{
+ do {
+ if (mf_avail(mf) < 4) {
+ move_pending(mf);
+ continue;
+ }
+
+ const uint8_t *cur = mf_ptr(mf);
+ const uint32_t pos = mf->read_pos + mf->offset;
+
+ hash_4_calc();
+
+ const uint32_t cur_match
+ = mf->hash[FIX_4_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
+ mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
+
+ hc_skip();
+
+ } while (--amount != 0);
+}
+#endif
+
+
+/////////////////
+// Binary Tree //
+/////////////////
+
+#if defined(HAVE_MF_BT2) || defined(HAVE_MF_BT3) || defined(HAVE_MF_BT4)
+static lzma_match *
+bt_find_func(
+ const uint32_t len_limit,
+ const uint32_t pos,
+ const uint8_t *const cur,
+ uint32_t cur_match,
+ uint32_t depth,
+ uint32_t *const son,
+ const uint32_t cyclic_pos,
+ const uint32_t cyclic_size,
+ lzma_match *matches,
+ uint32_t len_best)
+{
+ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1;
+ uint32_t *ptr1 = son + (cyclic_pos << 1);
+
+ uint32_t len0 = 0;
+ uint32_t len1 = 0;
+
+ while (true) {
+ const uint32_t delta = pos - cur_match;
+ if (depth-- == 0 || delta >= cyclic_size) {
+ *ptr0 = EMPTY_HASH_VALUE;
+ *ptr1 = EMPTY_HASH_VALUE;
+ return matches;
+ }
+
+ uint32_t *const pair = son + ((cyclic_pos - delta
+ + (delta > cyclic_pos ? cyclic_size : 0))
+ << 1);
+
+ const uint8_t *const pb = cur - delta;
+ uint32_t len = MIN(len0, len1);
+
+ if (pb[len] == cur[len]) {
+ while (++len != len_limit)
+ if (pb[len] != cur[len])
+ break;
+
+ if (len_best < len) {
+ len_best = len;
+ matches->len = len;
+ matches->dist = delta - 1;
+ ++matches;
+
+ if (len == len_limit) {
+ *ptr1 = pair[0];
+ *ptr0 = pair[1];
+ return matches;
+ }
+ }
+ }
+
+ if (pb[len] < cur[len]) {
+ *ptr1 = cur_match;
+ ptr1 = pair + 1;
+ cur_match = *ptr1;
+ len1 = len;
+ } else {
+ *ptr0 = cur_match;
+ ptr0 = pair;
+ cur_match = *ptr0;
+ len0 = len;
+ }
+ }
+}
+
+
+static void
+bt_skip_func(
+ const uint32_t len_limit,
+ const uint32_t pos,
+ const uint8_t *const cur,
+ uint32_t cur_match,
+ uint32_t depth,
+ uint32_t *const son,
+ const uint32_t cyclic_pos,
+ const uint32_t cyclic_size)
+{
+ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1;
+ uint32_t *ptr1 = son + (cyclic_pos << 1);
+
+ uint32_t len0 = 0;
+ uint32_t len1 = 0;
+
+ while (true) {
+ const uint32_t delta = pos - cur_match;
+ if (depth-- == 0 || delta >= cyclic_size) {
+ *ptr0 = EMPTY_HASH_VALUE;
+ *ptr1 = EMPTY_HASH_VALUE;
+ return;
+ }
+
+ uint32_t *pair = son + ((cyclic_pos - delta
+ + (delta > cyclic_pos ? cyclic_size : 0))
+ << 1);
+ const uint8_t *pb = cur - delta;
+ uint32_t len = MIN(len0, len1);
+
+ if (pb[len] == cur[len]) {
+ while (++len != len_limit)
+ if (pb[len] != cur[len])
+ break;
+
+ if (len == len_limit) {
+ *ptr1 = pair[0];
+ *ptr0 = pair[1];
+ return;
+ }
+ }
+
+ if (pb[len] < cur[len]) {
+ *ptr1 = cur_match;
+ ptr1 = pair + 1;
+ cur_match = *ptr1;
+ len1 = len;
+ } else {
+ *ptr0 = cur_match;
+ ptr0 = pair;
+ cur_match = *ptr0;
+ len0 = len;
+ }
+ }
+}
+
+
+#define bt_find(len_best) \
+ call_find(bt_find_func, len_best)
+
+#define bt_skip() \
+do { \
+ bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, \
+ mf->son, mf->cyclic_pos, \
+ mf->cyclic_size); \
+ move_pos(mf); \
+} while (0)
+
+#endif
+
+
+#ifdef HAVE_MF_BT2
+extern uint32_t
+lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches)
+{
+ header_find(true, 2);
+
+ hash_2_calc();
+
+ const uint32_t cur_match = mf->hash[hash_value];
+ mf->hash[hash_value] = pos;
+
+ bt_find(1);
+}
+
+
+extern void
+lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
+{
+ do {
+ header_skip(true, 2);
+
+ hash_2_calc();
+
+ const uint32_t cur_match = mf->hash[hash_value];
+ mf->hash[hash_value] = pos;
+
+ bt_skip();
+
+ } while (--amount != 0);
+}
+#endif
+
+
+#ifdef HAVE_MF_BT3
+extern uint32_t
+lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches)
+{
+ header_find(true, 3);
+
+ hash_3_calc();
+
+ const uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t cur_match = mf->hash[FIX_3_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
+
+ uint32_t len_best = 2;
+
+ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
+ for ( ; len_best != len_limit; ++len_best)
+ if (*(cur + len_best - delta2) != cur[len_best])
+ break;
+
+ matches[0].len = len_best;
+ matches[0].dist = delta2 - 1;
+ matches_count = 1;
+
+ if (len_best == len_limit) {
+ bt_skip();
+ return 1; // matches_count
+ }
+ }
+
+ bt_find(len_best);
+}
+
+
+extern void
+lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
+{
+ do {
+ header_skip(true, 3);
+
+ hash_3_calc();
+
+ const uint32_t cur_match
+ = mf->hash[FIX_3_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_value] = pos;
+
+ bt_skip();
+
+ } while (--amount != 0);
+}
+#endif
+
+
+#ifdef HAVE_MF_BT4
+extern uint32_t
+lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches)
+{
+ header_find(true, 4);
+
+ hash_4_calc();
+
+ uint32_t delta2 = pos - mf->hash[hash_2_value];
+ const uint32_t delta3
+ = pos - mf->hash[FIX_3_HASH_SIZE + hash_3_value];
+ const uint32_t cur_match = mf->hash[FIX_4_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
+ mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
+
+ uint32_t len_best = 1;
+
+ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
+ len_best = 2;
+ matches[0].len = 2;
+ matches[0].dist = delta2 - 1;
+ matches_count = 1;
+ }
+
+ if (delta2 != delta3 && delta3 < mf->cyclic_size
+ && *(cur - delta3) == *cur) {
+ len_best = 3;
+ matches[matches_count++].dist = delta3 - 1;
+ delta2 = delta3;
+ }
+
+ if (matches_count != 0) {
+ for ( ; len_best != len_limit; ++len_best)
+ if (*(cur + len_best - delta2) != cur[len_best])
+ break;
+
+ matches[matches_count - 1].len = len_best;
+
+ if (len_best == len_limit) {
+ bt_skip();
+ return matches_count;
+ }
+ }
+
+ if (len_best < 3)
+ len_best = 3;
+
+ bt_find(len_best);
+}
+
+
+extern void
+lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount)
+{
+ do {
+ header_skip(true, 4);
+
+ hash_4_calc();
+
+ const uint32_t cur_match
+ = mf->hash[FIX_4_HASH_SIZE + hash_value];
+
+ mf->hash[hash_2_value] = pos;
+ mf->hash[FIX_3_HASH_SIZE + hash_3_value] = pos;
+ mf->hash[FIX_4_HASH_SIZE + hash_value] = pos;
+
+ bt_skip();
+
+ } while (--amount != 0);
+}
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/Makefile.inc
new file mode 100644
index 00000000..7fc4d172
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/Makefile.inc
@@ -0,0 +1,43 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+EXTRA_DIST += lzma/fastpos_tablegen.c
+
+liblzma_la_SOURCES += lzma/lzma_common.h
+
+if COND_ENCODER_LZMA1
+liblzma_la_SOURCES += \
+ lzma/fastpos.h \
+ lzma/lzma_encoder.h \
+ lzma/lzma_encoder.c \
+ lzma/lzma_encoder_presets.c \
+ lzma/lzma_encoder_private.h \
+ lzma/lzma_encoder_optimum_fast.c \
+ lzma/lzma_encoder_optimum_normal.c
+
+if !COND_SMALL
+liblzma_la_SOURCES += lzma/fastpos_table.c
+endif
+endif
+
+if COND_DECODER_LZMA1
+liblzma_la_SOURCES += \
+ lzma/lzma_decoder.c \
+ lzma/lzma_decoder.h
+endif
+
+if COND_ENCODER_LZMA2
+liblzma_la_SOURCES += \
+ lzma/lzma2_encoder.c \
+ lzma/lzma2_encoder.h
+endif
+
+if COND_DECODER_LZMA2
+liblzma_la_SOURCES += \
+ lzma/lzma2_decoder.c \
+ lzma/lzma2_decoder.h
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos.h
new file mode 100644
index 00000000..0ad4b119
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos.h
@@ -0,0 +1,145 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file fastpos.h
+/// \brief Kind of two-bit version of bit scan reverse
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_FASTPOS_H
+#define LZMA_FASTPOS_H
+
+// LZMA encodes match distances (positions) by storing the highest two
+// bits using a six-bit value [0, 63], and then the missing lower bits.
+// Dictionary size is also stored using this encoding in the new .lzma
+// file format header.
+//
+// fastpos.h provides a way to quickly find out the correct six-bit
+// values. The following table gives some examples of this encoding:
+//
+// pos return
+// 0 0
+// 1 1
+// 2 2
+// 3 3
+// 4 4
+// 5 4
+// 6 5
+// 7 5
+// 8 6
+// 11 6
+// 12 7
+// ... ...
+// 15 7
+// 16 8
+// 17 8
+// ... ...
+// 23 8
+// 24 9
+// 25 9
+// ... ...
+//
+//
+// Provided functions or macros
+// ----------------------------
+//
+// get_pos_slot(pos) is the basic version. get_pos_slot_2(pos)
+// assumes that pos >= FULL_DISTANCES, thus the result is at least
+// FULL_DISTANCES_BITS * 2. Using get_pos_slot(pos) instead of
+// get_pos_slot_2(pos) would give the same result, but get_pos_slot_2(pos)
+// should be tiny bit faster due to the assumption being made.
+//
+//
+// Size vs. speed
+// --------------
+//
+// With some CPUs that have fast BSR (bit scan reverse) instruction, the
+// size optimized version is slightly faster than the bigger table based
+// approach. Such CPUs include Intel Pentium Pro, Pentium II, Pentium III
+// and Core 2 (possibly others). AMD K7 seems to have slower BSR, but that
+// would still have speed roughly comparable to the table version. Older
+// x86 CPUs like the original Pentium have very slow BSR; on those systems
+// the table version is a lot faster.
+//
+// On some CPUs, the table version is a lot faster when using position
+// dependent code, but with position independent code the size optimized
+// version is slightly faster. This occurs at least on 32-bit SPARC (no
+// ASM optimizations).
+//
+// I'm making the table version the default, because that has good speed
+// on all systems I have tried. The size optimized version is sometimes
+// slightly faster, but sometimes it is a lot slower.
+
+#ifdef HAVE_SMALL
+# include "bsr.h"
+
+# define get_pos_slot(pos) ((pos) <= 4 ? (pos) : get_pos_slot_2(pos))
+
+static inline uint32_t
+get_pos_slot_2(uint32_t pos)
+{
+ uint32_t i;
+ lzma_bsr(i, pos);
+ return (i + i) + ((pos >> (i - 1)) & 1);
+}
+
+
+#else
+
+#define FASTPOS_BITS 13
+
+extern const uint8_t lzma_fastpos[1 << FASTPOS_BITS];
+
+
+#define fastpos_shift(extra, n) \
+ ((extra) + (n) * (FASTPOS_BITS - 1))
+
+#define fastpos_limit(extra, n) \
+ (UINT32_C(1) << (FASTPOS_BITS + fastpos_shift(extra, n)))
+
+#define fastpos_result(pos, extra, n) \
+ lzma_fastpos[(pos) >> fastpos_shift(extra, n)] \
+ + 2 * fastpos_shift(extra, n)
+
+
+static inline uint32_t
+get_pos_slot(uint32_t pos)
+{
+ // If it is small enough, we can pick the result directly from
+ // the precalculated table.
+ if (pos < fastpos_limit(0, 0))
+ return lzma_fastpos[pos];
+
+ if (pos < fastpos_limit(0, 1))
+ return fastpos_result(pos, 0, 1);
+
+ return fastpos_result(pos, 0, 2);
+}
+
+
+#ifdef FULL_DISTANCES_BITS
+static inline uint32_t
+get_pos_slot_2(uint32_t pos)
+{
+ assert(pos >= FULL_DISTANCES);
+
+ if (pos < fastpos_limit(FULL_DISTANCES_BITS - 1, 0))
+ return fastpos_result(pos, FULL_DISTANCES_BITS - 1, 0);
+
+ if (pos < fastpos_limit(FULL_DISTANCES_BITS - 1, 1))
+ return fastpos_result(pos, FULL_DISTANCES_BITS - 1, 1);
+
+ return fastpos_result(pos, FULL_DISTANCES_BITS - 1, 2);
+}
+#endif
+
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_table.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_table.c
new file mode 100644
index 00000000..25b51932
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_table.c
@@ -0,0 +1,521 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* This file has been automatically generated by fastpos_tablegen.c. */
+
+#include "common.h"
+#include "fastpos.h"
+
+const uint8_t lzma_fastpos[1 << FASTPOS_BITS] = {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25
+};
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_tablegen.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_tablegen.c
new file mode 100644
index 00000000..bde9a1dc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/fastpos_tablegen.c
@@ -0,0 +1,58 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file fastpos_tablegen.c
+/// \brief Generates the lzma_fastpos[] lookup table
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include <sys/types.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include "fastpos.h"
+
+
+int
+main(void)
+{
+ uint8_t fastpos[1 << FASTPOS_BITS];
+
+ const uint8_t fast_slots = 2 * FASTPOS_BITS;
+ uint32_t c = 2;
+
+ fastpos[0] = 0;
+ fastpos[1] = 1;
+
+ for (uint8_t slot_fast = 2; slot_fast < fast_slots; ++slot_fast) {
+ const uint32_t k = 1 << ((slot_fast >> 1) - 1);
+ for (uint32_t j = 0; j < k; ++j, ++c)
+ fastpos[c] = slot_fast;
+ }
+
+ printf("/* This file has been automatically generated "
+ "by fastpos_tablegen.c. */\n\n"
+ "#include \"common.h\"\n"
+ "#include \"fastpos.h\"\n\n"
+ "const uint8_t lzma_fastpos[1 << FASTPOS_BITS] = {");
+
+ for (size_t i = 0; i < (1 << FASTPOS_BITS); ++i) {
+ if (i % 16 == 0)
+ printf("\n\t");
+
+ printf("%3u", (unsigned int)(fastpos[i]));
+
+ if (i != (1 << FASTPOS_BITS) - 1)
+ printf(",");
+ }
+
+ printf("\n};\n");
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.c
new file mode 100644
index 00000000..f6f744b0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.c
@@ -0,0 +1,307 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma2_decoder.c
+/// \brief LZMA2 decoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lzma2_decoder.h"
+#include "lz_decoder.h"
+#include "lzma_decoder.h"
+
+
+struct lzma_coder_s {
+ enum sequence {
+ SEQ_CONTROL,
+ SEQ_UNCOMPRESSED_1,
+ SEQ_UNCOMPRESSED_2,
+ SEQ_COMPRESSED_0,
+ SEQ_COMPRESSED_1,
+ SEQ_PROPERTIES,
+ SEQ_LZMA,
+ SEQ_COPY,
+ } sequence;
+
+ /// Sequence after the size fields have been decoded.
+ enum sequence next_sequence;
+
+ /// LZMA decoder
+ lzma_lz_decoder lzma;
+
+ /// Uncompressed size of LZMA chunk
+ size_t uncompressed_size;
+
+ /// Compressed size of the chunk (naturally equals to uncompressed
+ /// size of uncompressed chunk)
+ size_t compressed_size;
+
+ /// True if properties are needed. This is false before the
+ /// first LZMA chunk.
+ bool need_properties;
+
+ /// True if dictionary reset is needed. This is false before the
+ /// first chunk (LZMA or uncompressed).
+ bool need_dictionary_reset;
+
+ lzma_options_lzma options;
+};
+
+
+static lzma_ret
+lzma2_decode(lzma_coder *restrict coder, lzma_dict *restrict dict,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size)
+{
+ // With SEQ_LZMA it is possible that no new input is needed to do
+ // some progress. The rest of the sequences assume that there is
+ // at least one byte of input.
+ while (*in_pos < in_size || coder->sequence == SEQ_LZMA)
+ switch (coder->sequence) {
+ case SEQ_CONTROL: {
+ const uint32_t control = in[*in_pos];
+ ++*in_pos;
+
+ if (control >= 0xE0 || control == 1) {
+ // Dictionary reset implies that next LZMA chunk has
+ // to set new properties.
+ coder->need_properties = true;
+ coder->need_dictionary_reset = true;
+ } else if (coder->need_dictionary_reset) {
+ return LZMA_DATA_ERROR;
+ }
+
+ if (control >= 0x80) {
+ // LZMA chunk. The highest five bits of the
+ // uncompressed size are taken from the control byte.
+ coder->uncompressed_size = (control & 0x1F) << 16;
+ coder->sequence = SEQ_UNCOMPRESSED_1;
+
+ // See if there are new properties or if we need to
+ // reset the state.
+ if (control >= 0xC0) {
+ // When there are new properties, state reset
+ // is done at SEQ_PROPERTIES.
+ coder->need_properties = false;
+ coder->next_sequence = SEQ_PROPERTIES;
+
+ } else if (coder->need_properties) {
+ return LZMA_DATA_ERROR;
+
+ } else {
+ coder->next_sequence = SEQ_LZMA;
+
+ // If only state reset is wanted with old
+ // properties, do the resetting here for
+ // simplicity.
+ if (control >= 0xA0)
+ coder->lzma.reset(coder->lzma.coder,
+ &coder->options);
+ }
+ } else {
+ // End marker
+ if (control == 0x00)
+ return LZMA_STREAM_END;
+
+ // Invalid control values
+ if (control > 2)
+ return LZMA_DATA_ERROR;
+
+ // It's uncompressed chunk
+ coder->sequence = SEQ_COMPRESSED_0;
+ coder->next_sequence = SEQ_COPY;
+ }
+
+ if (coder->need_dictionary_reset) {
+ // Finish the dictionary reset and let the caller
+ // flush the dictionary to the actual output buffer.
+ coder->need_dictionary_reset = false;
+ dict_reset(dict);
+ return LZMA_OK;
+ }
+
+ break;
+ }
+
+ case SEQ_UNCOMPRESSED_1:
+ coder->uncompressed_size += (uint32_t)(in[(*in_pos)++]) << 8;
+ coder->sequence = SEQ_UNCOMPRESSED_2;
+ break;
+
+ case SEQ_UNCOMPRESSED_2:
+ coder->uncompressed_size += in[(*in_pos)++] + 1;
+ coder->sequence = SEQ_COMPRESSED_0;
+ coder->lzma.set_uncompressed(coder->lzma.coder,
+ coder->uncompressed_size);
+ break;
+
+ case SEQ_COMPRESSED_0:
+ coder->compressed_size = (uint32_t)(in[(*in_pos)++]) << 8;
+ coder->sequence = SEQ_COMPRESSED_1;
+ break;
+
+ case SEQ_COMPRESSED_1:
+ coder->compressed_size += in[(*in_pos)++] + 1;
+ coder->sequence = coder->next_sequence;
+ break;
+
+ case SEQ_PROPERTIES:
+ if (lzma_lzma_lclppb_decode(&coder->options, in[(*in_pos)++]))
+ return LZMA_DATA_ERROR;
+
+ coder->lzma.reset(coder->lzma.coder, &coder->options);
+
+ coder->sequence = SEQ_LZMA;
+ break;
+
+ case SEQ_LZMA: {
+ // Store the start offset so that we can update
+ // coder->compressed_size later.
+ const size_t in_start = *in_pos;
+
+ // Decode from in[] to *dict.
+ const lzma_ret ret = coder->lzma.code(coder->lzma.coder,
+ dict, in, in_pos, in_size);
+
+ // Validate and update coder->compressed_size.
+ const size_t in_used = *in_pos - in_start;
+ if (in_used > coder->compressed_size)
+ return LZMA_DATA_ERROR;
+
+ coder->compressed_size -= in_used;
+
+ // Return if we didn't finish the chunk, or an error occurred.
+ if (ret != LZMA_STREAM_END)
+ return ret;
+
+ // The LZMA decoder must have consumed the whole chunk now.
+ // We don't need to worry about uncompressed size since it
+ // is checked by the LZMA decoder.
+ if (coder->compressed_size != 0)
+ return LZMA_DATA_ERROR;
+
+ coder->sequence = SEQ_CONTROL;
+ break;
+ }
+
+ case SEQ_COPY: {
+ // Copy from input to the dictionary as is.
+ // FIXME Can copy too much?
+ dict_write(dict, in, in_pos, in_size, &coder->compressed_size);
+ if (coder->compressed_size != 0)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_CONTROL;
+ break;
+ }
+
+ default:
+ assert(0);
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+lzma2_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ assert(coder->lzma.end == NULL);
+ lzma_free(coder->lzma.coder, allocator);
+
+ lzma_free(coder, allocator);
+
+ return;
+}
+
+
+static lzma_ret
+lzma2_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
+ const void *opt, lzma_lz_options *lz_options)
+{
+ if (lz->coder == NULL) {
+ lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (lz->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ lz->code = &lzma2_decode;
+ lz->end = &lzma2_decoder_end;
+
+ lz->coder->lzma = LZMA_LZ_DECODER_INIT;
+ }
+
+ const lzma_options_lzma *options = opt;
+
+ lz->coder->sequence = SEQ_CONTROL;
+ lz->coder->need_properties = true;
+ lz->coder->need_dictionary_reset = options->preset_dict == NULL
+ || options->preset_dict_size == 0;
+
+ return lzma_lzma_decoder_create(&lz->coder->lzma,
+ allocator, options, lz_options);
+}
+
+
+extern lzma_ret
+lzma_lzma2_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ // LZMA2 can only be the last filter in the chain. This is enforced
+ // by the raw_decoder initialization.
+ assert(filters[1].init == NULL);
+
+ return lzma_lz_decoder_init(next, allocator, filters,
+ &lzma2_decoder_init);
+}
+
+
+extern uint64_t
+lzma_lzma2_decoder_memusage(const void *options)
+{
+ return sizeof(lzma_coder)
+ + lzma_lzma_decoder_memusage_nocheck(options);
+}
+
+
+extern lzma_ret
+lzma_lzma2_props_decode(void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size)
+{
+ if (props_size != 1)
+ return LZMA_OPTIONS_ERROR;
+
+ // Check that reserved bits are unset.
+ if (props[0] & 0xC0)
+ return LZMA_OPTIONS_ERROR;
+
+ // Decode the dictionary size.
+ if (props[0] > 40)
+ return LZMA_OPTIONS_ERROR;
+
+ lzma_options_lzma *opt = lzma_alloc(
+ sizeof(lzma_options_lzma), allocator);
+ if (opt == NULL)
+ return LZMA_MEM_ERROR;
+
+ if (props[0] == 40) {
+ opt->dict_size = UINT32_MAX;
+ } else {
+ opt->dict_size = 2 | (props[0] & 1);
+ opt->dict_size <<= props[0] / 2 + 11;
+ }
+
+ opt->preset_dict = NULL;
+ opt->preset_dict_size = 0;
+
+ *options = opt;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.h
new file mode 100644
index 00000000..fe7d2749
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_decoder.h
@@ -0,0 +1,30 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma2_decoder.h
+/// \brief LZMA2 decoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZMA2_DECODER_H
+#define LZMA_LZMA2_DECODER_H
+
+#include "common.h"
+
+extern lzma_ret lzma_lzma2_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern uint64_t lzma_lzma2_decoder_memusage(const void *options);
+
+extern lzma_ret lzma_lzma2_props_decode(
+ void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.c
new file mode 100644
index 00000000..a91e0191
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.c
@@ -0,0 +1,386 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma2_encoder.c
+/// \brief LZMA2 encoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lz_encoder.h"
+#include "lzma_encoder.h"
+#include "fastpos.h"
+#include "lzma2_encoder.h"
+
+
+struct lzma_coder_s {
+ enum {
+ SEQ_INIT,
+ SEQ_LZMA_ENCODE,
+ SEQ_LZMA_COPY,
+ SEQ_UNCOMPRESSED_HEADER,
+ SEQ_UNCOMPRESSED_COPY,
+ } sequence;
+
+ /// LZMA encoder
+ lzma_coder *lzma;
+
+ /// If this is not NULL, we will check new options from this
+ /// structure when starting a new chunk.
+ const lzma_options_lzma *opt_new;
+
+ /// LZMA options currently in use.
+ lzma_options_lzma opt_cur;
+
+ bool need_properties;
+ bool need_state_reset;
+ bool need_dictionary_reset;
+
+ /// Uncompressed size of a chunk
+ size_t uncompressed_size;
+
+ /// Compressed size of a chunk (excluding headers); this is also used
+ /// to indicate the end of buf[] in SEQ_LZMA_COPY.
+ size_t compressed_size;
+
+ /// Read position in buf[]
+ size_t buf_pos;
+
+ /// Buffer to hold the chunk header and LZMA compressed data
+ uint8_t buf[LZMA2_HEADER_MAX + LZMA2_CHUNK_MAX];
+};
+
+
+static void
+lzma2_header_lzma(lzma_coder *coder)
+{
+ assert(coder->uncompressed_size > 0);
+ assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX);
+ assert(coder->compressed_size > 0);
+ assert(coder->compressed_size <= LZMA2_CHUNK_MAX);
+
+ size_t pos;
+
+ if (coder->need_properties) {
+ pos = 0;
+
+ if (coder->need_dictionary_reset)
+ coder->buf[pos] = 0x80 + (3 << 5);
+ else
+ coder->buf[pos] = 0x80 + (2 << 5);
+ } else {
+ pos = 1;
+
+ if (coder->need_state_reset)
+ coder->buf[pos] = 0x80 + (1 << 5);
+ else
+ coder->buf[pos] = 0x80;
+ }
+
+ // Set the start position for copying.
+ coder->buf_pos = pos;
+
+ // Uncompressed size
+ size_t size = coder->uncompressed_size - 1;
+ coder->buf[pos++] += size >> 16;
+ coder->buf[pos++] = (size >> 8) & 0xFF;
+ coder->buf[pos++] = size & 0xFF;
+
+ // Compressed size
+ size = coder->compressed_size - 1;
+ coder->buf[pos++] = size >> 8;
+ coder->buf[pos++] = size & 0xFF;
+
+ // Properties, if needed
+ if (coder->need_properties)
+ lzma_lzma_lclppb_encode(&coder->opt_cur, coder->buf + pos);
+
+ coder->need_properties = false;
+ coder->need_state_reset = false;
+ coder->need_dictionary_reset = false;
+
+ // The copying code uses coder->compressed_size to indicate the end
+ // of coder->buf[], so we need add the maximum size of the header here.
+ coder->compressed_size += LZMA2_HEADER_MAX;
+
+ return;
+}
+
+
+static void
+lzma2_header_uncompressed(lzma_coder *coder)
+{
+ assert(coder->uncompressed_size > 0);
+ assert(coder->uncompressed_size <= LZMA2_CHUNK_MAX);
+
+ // If this is the first chunk, we need to include dictionary
+ // reset indicator.
+ if (coder->need_dictionary_reset)
+ coder->buf[0] = 1;
+ else
+ coder->buf[0] = 2;
+
+ coder->need_dictionary_reset = false;
+
+ // "Compressed" size
+ coder->buf[1] = (coder->uncompressed_size - 1) >> 8;
+ coder->buf[2] = (coder->uncompressed_size - 1) & 0xFF;
+
+ // Set the start position for copying.
+ coder->buf_pos = 0;
+ return;
+}
+
+
+static lzma_ret
+lzma2_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint8_t *restrict out, size_t *restrict out_pos,
+ size_t out_size)
+{
+ while (*out_pos < out_size)
+ switch (coder->sequence) {
+ case SEQ_INIT:
+ // If there's no input left and we are flushing or finishing,
+ // don't start a new chunk.
+ if (mf_unencoded(mf) == 0) {
+ // Write end of payload marker if finishing.
+ if (mf->action == LZMA_FINISH)
+ out[(*out_pos)++] = 0;
+
+ return mf->action == LZMA_RUN
+ ? LZMA_OK : LZMA_STREAM_END;
+ }
+
+ // Look if there are new options. At least for now,
+ // only lc/lp/pb can be changed.
+ if (coder->opt_new != NULL
+ && (coder->opt_cur.lc != coder->opt_new->lc
+ || coder->opt_cur.lp != coder->opt_new->lp
+ || coder->opt_cur.pb != coder->opt_new->pb)) {
+ // Options have been changed, copy them to opt_cur.
+ // These get validated as part of
+ // lzma_lzma_encoder_reset() below.
+ coder->opt_cur.lc = coder->opt_new->lc;
+ coder->opt_cur.lp = coder->opt_new->lp;
+ coder->opt_cur.pb = coder->opt_new->pb;
+
+ // We need to write the new options and reset
+ // the encoder state.
+ coder->need_properties = true;
+ coder->need_state_reset = true;
+ }
+
+ if (coder->need_state_reset)
+ return_if_error(lzma_lzma_encoder_reset(
+ coder->lzma, &coder->opt_cur));
+
+ coder->uncompressed_size = 0;
+ coder->compressed_size = 0;
+ coder->sequence = SEQ_LZMA_ENCODE;
+
+ // Fall through
+
+ case SEQ_LZMA_ENCODE: {
+ // Calculate how much more uncompressed data this chunk
+ // could accept.
+ const uint32_t left = LZMA2_UNCOMPRESSED_MAX
+ - coder->uncompressed_size;
+ uint32_t limit;
+
+ if (left < mf->match_len_max) {
+ // Must flush immediatelly since the next LZMA symbol
+ // could make the uncompressed size of the chunk too
+ // big.
+ limit = 0;
+ } else {
+ // Calculate maximum read_limit that is OK from point
+ // of view of LZMA2 chunk size.
+ limit = mf->read_pos - mf->read_ahead
+ + left - mf->match_len_max;
+ }
+
+ // Save the start position so that we can update
+ // coder->uncompressed_size.
+ const uint32_t read_start = mf->read_pos - mf->read_ahead;
+
+ // Call the LZMA encoder until the chunk is finished.
+ const lzma_ret ret = lzma_lzma_encode(coder->lzma, mf,
+ coder->buf + LZMA2_HEADER_MAX,
+ &coder->compressed_size,
+ LZMA2_CHUNK_MAX, limit);
+
+ coder->uncompressed_size += mf->read_pos - mf->read_ahead
+ - read_start;
+
+ assert(coder->compressed_size <= LZMA2_CHUNK_MAX);
+ assert(coder->uncompressed_size <= LZMA2_UNCOMPRESSED_MAX);
+
+ if (ret != LZMA_STREAM_END)
+ return LZMA_OK;
+
+ // See if the chunk compressed. If it didn't, we encode it
+ // as uncompressed chunk. This saves a few bytes of space
+ // and makes decoding faster.
+ if (coder->compressed_size >= coder->uncompressed_size) {
+ coder->uncompressed_size += mf->read_ahead;
+ assert(coder->uncompressed_size
+ <= LZMA2_UNCOMPRESSED_MAX);
+ mf->read_ahead = 0;
+ lzma2_header_uncompressed(coder);
+ coder->need_state_reset = true;
+ coder->sequence = SEQ_UNCOMPRESSED_HEADER;
+ break;
+ }
+
+ // The chunk did compress at least by one byte, so we store
+ // the chunk as LZMA.
+ lzma2_header_lzma(coder);
+
+ coder->sequence = SEQ_LZMA_COPY;
+ }
+
+ // Fall through
+
+ case SEQ_LZMA_COPY:
+ // Copy the compressed chunk along its headers to the
+ // output buffer.
+ lzma_bufcpy(coder->buf, &coder->buf_pos,
+ coder->compressed_size,
+ out, out_pos, out_size);
+ if (coder->buf_pos != coder->compressed_size)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_INIT;
+ break;
+
+ case SEQ_UNCOMPRESSED_HEADER:
+ // Copy the three-byte header to indicate uncompressed chunk.
+ lzma_bufcpy(coder->buf, &coder->buf_pos,
+ LZMA2_HEADER_UNCOMPRESSED,
+ out, out_pos, out_size);
+ if (coder->buf_pos != LZMA2_HEADER_UNCOMPRESSED)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_UNCOMPRESSED_COPY;
+
+ // Fall through
+
+ case SEQ_UNCOMPRESSED_COPY:
+ // Copy the uncompressed data as is from the dictionary
+ // to the output buffer.
+ mf_read(mf, out, out_pos, out_size, &coder->uncompressed_size);
+ if (coder->uncompressed_size != 0)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_INIT;
+ break;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+lzma2_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_free(coder->lzma, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+static lzma_ret
+lzma2_encoder_init(lzma_lz_encoder *lz, lzma_allocator *allocator,
+ const void *options, lzma_lz_options *lz_options)
+{
+ if (options == NULL)
+ return LZMA_PROG_ERROR;
+
+ if (lz->coder == NULL) {
+ lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (lz->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ lz->code = &lzma2_encode;
+ lz->end = &lzma2_encoder_end;
+
+ lz->coder->lzma = NULL;
+ }
+
+ lz->coder->opt_cur = *(const lzma_options_lzma *)(options);
+ lz->coder->opt_new = lz->coder->opt_cur.persistent
+ ? options : NULL;
+
+ lz->coder->sequence = SEQ_INIT;
+ lz->coder->need_properties = true;
+ lz->coder->need_state_reset = false;
+ lz->coder->need_dictionary_reset
+ = lz->coder->opt_cur.preset_dict == NULL
+ || lz->coder->opt_cur.preset_dict_size == 0;
+
+ // Initialize LZMA encoder
+ return_if_error(lzma_lzma_encoder_create(&lz->coder->lzma, allocator,
+ &lz->coder->opt_cur, lz_options));
+
+ // Make sure that we will always have enough history available in
+ // case we need to use uncompressed chunks. They are used when the
+ // compressed size of a chunk is not smaller than the uncompressed
+ // size, so we need to have at least LZMA2_COMPRESSED_MAX bytes
+ // history available.
+ if (lz_options->before_size + lz_options->dict_size < LZMA2_CHUNK_MAX)
+ lz_options->before_size
+ = LZMA2_CHUNK_MAX - lz_options->dict_size;
+
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_lzma2_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return lzma_lz_encoder_init(
+ next, allocator, filters, &lzma2_encoder_init);
+}
+
+
+extern uint64_t
+lzma_lzma2_encoder_memusage(const void *options)
+{
+ const uint64_t lzma_mem = lzma_lzma_encoder_memusage(options);
+ if (lzma_mem == UINT64_MAX)
+ return UINT64_MAX;
+
+ return sizeof(lzma_coder) + lzma_mem;
+}
+
+
+extern lzma_ret
+lzma_lzma2_props_encode(const void *options, uint8_t *out)
+{
+ const lzma_options_lzma *const opt = options;
+ uint32_t d = MAX(opt->dict_size, LZMA_DICT_SIZE_MIN);
+
+ // Round up to to the next 2^n - 1 or 2^n + 2^(n - 1) - 1 depending
+ // on which one is the next:
+ --d;
+ d |= d >> 2;
+ d |= d >> 3;
+ d |= d >> 4;
+ d |= d >> 8;
+ d |= d >> 16;
+
+ // Get the highest two bits using the proper encoding:
+ if (d == UINT32_MAX)
+ out[0] = 40;
+ else
+ out[0] = get_pos_slot(d + 1) - 24;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.h
new file mode 100644
index 00000000..9fb90ebe
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma2_encoder.h
@@ -0,0 +1,43 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma2_encoder.h
+/// \brief LZMA2 encoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZMA2_ENCODER_H
+#define LZMA_LZMA2_ENCODER_H
+
+#include "common.h"
+
+
+/// Maximum number of bytes of actual data per chunk (no headers)
+#define LZMA2_CHUNK_MAX (UINT32_C(1) << 16)
+
+/// Maximum uncompressed size of LZMA chunk (no headers)
+#define LZMA2_UNCOMPRESSED_MAX (UINT32_C(1) << 21)
+
+/// Maximum size of LZMA2 headers
+#define LZMA2_HEADER_MAX 6
+
+/// Size of a header for uncompressed chunk
+#define LZMA2_HEADER_UNCOMPRESSED 3
+
+
+extern lzma_ret lzma_lzma2_encoder_init(
+ lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters);
+
+extern uint64_t lzma_lzma2_encoder_memusage(const void *options);
+
+extern lzma_ret lzma_lzma2_props_encode(const void *options, uint8_t *out);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_common.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_common.h
new file mode 100644
index 00000000..f648d501
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_common.h
@@ -0,0 +1,225 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_common.h
+/// \brief Private definitions common to LZMA encoder and decoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZMA_COMMON_H
+#define LZMA_LZMA_COMMON_H
+
+#include "common.h"
+#include "range_common.h"
+
+
+///////////////////
+// Miscellaneous //
+///////////////////
+
+/// Maximum number of position states. A position state is the lowest pos bits
+/// number of bits of the current uncompressed offset. In some places there
+/// are different sets of probabilities for different pos states.
+#define POS_STATES_MAX (1 << LZMA_PB_MAX)
+
+
+/// Validates lc, lp, and pb.
+static inline bool
+is_lclppb_valid(const lzma_options_lzma *options)
+{
+ return options->lc <= LZMA_LCLP_MAX && options->lp <= LZMA_LCLP_MAX
+ && options->lc + options->lp <= LZMA_LCLP_MAX
+ && options->pb <= LZMA_PB_MAX;
+}
+
+
+///////////
+// State //
+///////////
+
+/// This enum is used to track which events have occurred most recently and
+/// in which order. This information is used to predict the next event.
+///
+/// Events:
+/// - Literal: One 8-bit byte
+/// - Match: Repeat a chunk of data at some distance
+/// - Long repeat: Multi-byte match at a recently seen distance
+/// - Short repeat: One-byte repeat at a recently seen distance
+///
+/// The event names are in from STATE_oldest_older_previous. REP means
+/// either short or long repeated match, and NONLIT means any non-literal.
+typedef enum {
+ STATE_LIT_LIT,
+ STATE_MATCH_LIT_LIT,
+ STATE_REP_LIT_LIT,
+ STATE_SHORTREP_LIT_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT,
+ STATE_SHORTREP_LIT,
+ STATE_LIT_MATCH,
+ STATE_LIT_LONGREP,
+ STATE_LIT_SHORTREP,
+ STATE_NONLIT_MATCH,
+ STATE_NONLIT_REP,
+} lzma_lzma_state;
+
+
+/// Total number of states
+#define STATES 12
+
+/// The lowest 7 states indicate that the previous state was a literal.
+#define LIT_STATES 7
+
+
+/// Indicate that the latest state was a literal.
+#define update_literal(state) \
+ state = ((state) <= STATE_SHORTREP_LIT_LIT \
+ ? STATE_LIT_LIT \
+ : ((state) <= STATE_LIT_SHORTREP \
+ ? (state) - 3 \
+ : (state) - 6))
+
+/// Indicate that the latest state was a match.
+#define update_match(state) \
+ state = ((state) < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH)
+
+/// Indicate that the latest state was a long repeated match.
+#define update_long_rep(state) \
+ state = ((state) < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP)
+
+/// Indicate that the latest state was a short match.
+#define update_short_rep(state) \
+ state = ((state) < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP)
+
+/// Test if the previous state was a literal.
+#define is_literal_state(state) \
+ ((state) < LIT_STATES)
+
+
+/////////////
+// Literal //
+/////////////
+
+/// Each literal coder is divided in three sections:
+/// - 0x001-0x0FF: Without match byte
+/// - 0x101-0x1FF: With match byte; match bit is 0
+/// - 0x201-0x2FF: With match byte; match bit is 1
+///
+/// Match byte is used when the previous LZMA symbol was something else than
+/// a literal (that is, it was some kind of match).
+#define LITERAL_CODER_SIZE 0x300
+
+/// Maximum number of literal coders
+#define LITERAL_CODERS_MAX (1 << LZMA_LCLP_MAX)
+
+/// Locate the literal coder for the next literal byte. The choice depends on
+/// - the lowest literal_pos_bits bits of the position of the current
+/// byte; and
+/// - the highest literal_context_bits bits of the previous byte.
+#define literal_subcoder(probs, lc, lp_mask, pos, prev_byte) \
+ ((probs)[(((pos) & lp_mask) << lc) + ((prev_byte) >> (8 - lc))])
+
+
+static inline void
+literal_init(probability (*probs)[LITERAL_CODER_SIZE],
+ uint32_t lc, uint32_t lp)
+{
+ assert(lc + lp <= LZMA_LCLP_MAX);
+
+ const uint32_t coders = 1U << (lc + lp);
+
+ for (uint32_t i = 0; i < coders; ++i)
+ for (uint32_t j = 0; j < LITERAL_CODER_SIZE; ++j)
+ bit_reset(probs[i][j]);
+
+ return;
+}
+
+
+//////////////////
+// Match length //
+//////////////////
+
+// Minimum length of a match is two bytes.
+#define MATCH_LEN_MIN 2
+
+// Match length is encoded with 4, 5, or 10 bits.
+//
+// Length Bits
+// 2-9 4 = Choice=0 + 3 bits
+// 10-17 5 = Choice=1 + Choice2=0 + 3 bits
+// 18-273 10 = Choice=1 + Choice2=1 + 8 bits
+#define LEN_LOW_BITS 3
+#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
+#define LEN_MID_BITS 3
+#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
+#define LEN_HIGH_BITS 8
+#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
+#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
+
+// Maximum length of a match is 273 which is a result of the encoding
+// described above.
+#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
+
+
+////////////////////
+// Match distance //
+////////////////////
+
+// Different set of probabilities is used for match distances that have very
+// short match length: Lengths of 2, 3, and 4 bytes have a separate set of
+// probabilities for each length. The matches with longer length use a shared
+// set of probabilities.
+#define LEN_TO_POS_STATES 4
+
+// Macro to get the index of the appropriate probability array.
+#define get_len_to_pos_state(len) \
+ ((len) < LEN_TO_POS_STATES + MATCH_LEN_MIN \
+ ? (len) - MATCH_LEN_MIN \
+ : LEN_TO_POS_STATES - 1)
+
+// The highest two bits of a match distance (pos slot) are encoded using six
+// bits. See fastpos.h for more explanation.
+#define POS_SLOT_BITS 6
+#define POS_SLOTS (1 << POS_SLOT_BITS)
+
+// Match distances up to 127 are fully encoded using probabilities. Since
+// the highest two bits (pos slot) are always encoded using six bits, the
+// distances 0-3 don't need any additional bits to encode, since the pos
+// slot itself is the same as the actual distance. START_POS_MODEL_INDEX
+// indicates the first pos slot where at least one additional bit is needed.
+#define START_POS_MODEL_INDEX 4
+
+// Match distances greater than 127 are encoded in three pieces:
+// - pos slot: the highest two bits
+// - direct bits: 2-26 bits below the highest two bits
+// - alignment bits: four lowest bits
+//
+// Direct bits don't use any probabilities.
+//
+// The pos slot value of 14 is for distances 128-191 (see the table in
+// fastpos.h to understand why).
+#define END_POS_MODEL_INDEX 14
+
+// Pos slots that indicate a distance <= 127.
+#define FULL_DISTANCES_BITS (END_POS_MODEL_INDEX / 2)
+#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
+
+// For match distances greater than 127, only the highest two bits and the
+// lowest four bits (alignment) is encoded using probabilities.
+#define ALIGN_BITS 4
+#define ALIGN_TABLE_SIZE (1 << ALIGN_BITS)
+#define ALIGN_MASK (ALIGN_TABLE_SIZE - 1)
+
+// LZMA remembers the four most recent match distances. Reusing these distances
+// tends to take less space than re-encoding the actual distance value.
+#define REP_DISTANCES 4
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.c
new file mode 100644
index 00000000..94bd4ddf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.c
@@ -0,0 +1,1059 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_decoder.c
+/// \brief LZMA decoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lz_decoder.h"
+#include "lzma_common.h"
+#include "lzma_decoder.h"
+#include "range_decoder.h"
+
+
+#ifdef HAVE_SMALL
+
+// Macros for (somewhat) size-optimized code.
+#define seq_4(seq) seq
+
+#define seq_6(seq) seq
+
+#define seq_8(seq) seq
+
+#define seq_len(seq) \
+ seq ## _CHOICE, \
+ seq ## _CHOICE2, \
+ seq ## _BITTREE
+
+#define len_decode(target, ld, pos_state, seq) \
+do { \
+case seq ## _CHOICE: \
+ rc_if_0(ld.choice, seq ## _CHOICE) { \
+ rc_update_0(ld.choice); \
+ probs = ld.low[pos_state];\
+ limit = LEN_LOW_SYMBOLS; \
+ target = MATCH_LEN_MIN; \
+ } else { \
+ rc_update_1(ld.choice); \
+case seq ## _CHOICE2: \
+ rc_if_0(ld.choice2, seq ## _CHOICE2) { \
+ rc_update_0(ld.choice2); \
+ probs = ld.mid[pos_state]; \
+ limit = LEN_MID_SYMBOLS; \
+ target = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \
+ } else { \
+ rc_update_1(ld.choice2); \
+ probs = ld.high; \
+ limit = LEN_HIGH_SYMBOLS; \
+ target = MATCH_LEN_MIN + LEN_LOW_SYMBOLS \
+ + LEN_MID_SYMBOLS; \
+ } \
+ } \
+ symbol = 1; \
+case seq ## _BITTREE: \
+ do { \
+ rc_bit(probs[symbol], , , seq ## _BITTREE); \
+ } while (symbol < limit); \
+ target += symbol - limit; \
+} while (0)
+
+#else // HAVE_SMALL
+
+// Unrolled versions
+#define seq_4(seq) \
+ seq ## 0, \
+ seq ## 1, \
+ seq ## 2, \
+ seq ## 3
+
+#define seq_6(seq) \
+ seq ## 0, \
+ seq ## 1, \
+ seq ## 2, \
+ seq ## 3, \
+ seq ## 4, \
+ seq ## 5
+
+#define seq_8(seq) \
+ seq ## 0, \
+ seq ## 1, \
+ seq ## 2, \
+ seq ## 3, \
+ seq ## 4, \
+ seq ## 5, \
+ seq ## 6, \
+ seq ## 7
+
+#define seq_len(seq) \
+ seq ## _CHOICE, \
+ seq ## _LOW0, \
+ seq ## _LOW1, \
+ seq ## _LOW2, \
+ seq ## _CHOICE2, \
+ seq ## _MID0, \
+ seq ## _MID1, \
+ seq ## _MID2, \
+ seq ## _HIGH0, \
+ seq ## _HIGH1, \
+ seq ## _HIGH2, \
+ seq ## _HIGH3, \
+ seq ## _HIGH4, \
+ seq ## _HIGH5, \
+ seq ## _HIGH6, \
+ seq ## _HIGH7
+
+#define len_decode(target, ld, pos_state, seq) \
+do { \
+ symbol = 1; \
+case seq ## _CHOICE: \
+ rc_if_0(ld.choice, seq ## _CHOICE) { \
+ rc_update_0(ld.choice); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW0); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW1); \
+ rc_bit_case(ld.low[pos_state][symbol], , , seq ## _LOW2); \
+ target = symbol - LEN_LOW_SYMBOLS + MATCH_LEN_MIN; \
+ } else { \
+ rc_update_1(ld.choice); \
+case seq ## _CHOICE2: \
+ rc_if_0(ld.choice2, seq ## _CHOICE2) { \
+ rc_update_0(ld.choice2); \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
+ seq ## _MID0); \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
+ seq ## _MID1); \
+ rc_bit_case(ld.mid[pos_state][symbol], , , \
+ seq ## _MID2); \
+ target = symbol - LEN_MID_SYMBOLS \
+ + MATCH_LEN_MIN + LEN_LOW_SYMBOLS; \
+ } else { \
+ rc_update_1(ld.choice2); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH0); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH1); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH2); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH3); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH4); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH5); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH6); \
+ rc_bit_case(ld.high[symbol], , , seq ## _HIGH7); \
+ target = symbol - LEN_HIGH_SYMBOLS \
+ + MATCH_LEN_MIN \
+ + LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; \
+ } \
+ } \
+} while (0)
+
+#endif // HAVE_SMALL
+
+
+/// Length decoder probabilities; see comments in lzma_common.h.
+typedef struct {
+ probability choice;
+ probability choice2;
+ probability low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
+ probability mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
+ probability high[LEN_HIGH_SYMBOLS];
+} lzma_length_decoder;
+
+
+struct lzma_coder_s {
+ ///////////////////
+ // Probabilities //
+ ///////////////////
+
+ /// Literals; see comments in lzma_common.h.
+ probability literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
+
+ /// If 1, it's a match. Otherwise it's a single 8-bit literal.
+ probability is_match[STATES][POS_STATES_MAX];
+
+ /// If 1, it's a repeated match. The distance is one of rep0 .. rep3.
+ probability is_rep[STATES];
+
+ /// If 0, distance of a repeated match is rep0.
+ /// Otherwise check is_rep1.
+ probability is_rep0[STATES];
+
+ /// If 0, distance of a repeated match is rep1.
+ /// Otherwise check is_rep2.
+ probability is_rep1[STATES];
+
+ /// If 0, distance of a repeated match is rep2. Otherwise it is rep3.
+ probability is_rep2[STATES];
+
+ /// If 1, the repeated match has length of one byte. Otherwise
+ /// the length is decoded from rep_len_decoder.
+ probability is_rep0_long[STATES][POS_STATES_MAX];
+
+ /// Probability tree for the highest two bits of the match distance.
+ /// There is a separate probability tree for match lengths of
+ /// 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
+ probability pos_slot[LEN_TO_POS_STATES][POS_SLOTS];
+
+ /// Probility trees for additional bits for match distance when the
+ /// distance is in the range [4, 127].
+ probability pos_special[FULL_DISTANCES - END_POS_MODEL_INDEX];
+
+ /// Probability tree for the lowest four bits of a match distance
+ /// that is equal to or greater than 128.
+ probability pos_align[ALIGN_TABLE_SIZE];
+
+ /// Length of a normal match
+ lzma_length_decoder match_len_decoder;
+
+ /// Length of a repeated match
+ lzma_length_decoder rep_len_decoder;
+
+ ///////////////////
+ // Decoder state //
+ ///////////////////
+
+ // Range coder
+ lzma_range_decoder rc;
+
+ // Types of the most recently seen LZMA symbols
+ lzma_lzma_state state;
+
+ uint32_t rep0; ///< Distance of the latest match
+ uint32_t rep1; ///< Distance of second latest match
+ uint32_t rep2; ///< Distance of third latest match
+ uint32_t rep3; ///< Distance of fourth latest match
+
+ uint32_t pos_mask; // (1U << pb) - 1
+ uint32_t literal_context_bits;
+ uint32_t literal_pos_mask;
+
+ /// Uncompressed size as bytes, or LZMA_VLI_UNKNOWN if end of
+ /// payload marker is expected.
+ lzma_vli uncompressed_size;
+
+ ////////////////////////////////
+ // State of incomplete symbol //
+ ////////////////////////////////
+
+ /// Position where to continue the decoder loop
+ enum {
+ SEQ_NORMALIZE,
+ SEQ_IS_MATCH,
+ seq_8(SEQ_LITERAL),
+ seq_8(SEQ_LITERAL_MATCHED),
+ SEQ_LITERAL_WRITE,
+ SEQ_IS_REP,
+ seq_len(SEQ_MATCH_LEN),
+ seq_6(SEQ_POS_SLOT),
+ SEQ_POS_MODEL,
+ SEQ_DIRECT,
+ seq_4(SEQ_ALIGN),
+ SEQ_EOPM,
+ SEQ_IS_REP0,
+ SEQ_SHORTREP,
+ SEQ_IS_REP0_LONG,
+ SEQ_IS_REP1,
+ SEQ_IS_REP2,
+ seq_len(SEQ_REP_LEN),
+ SEQ_COPY,
+ } sequence;
+
+ /// Base of the current probability tree
+ probability *probs;
+
+ /// Symbol being decoded. This is also used as an index variable in
+ /// bittree decoders: probs[symbol]
+ uint32_t symbol;
+
+ /// Used as a loop termination condition on bittree decoders and
+ /// direct bits decoder.
+ uint32_t limit;
+
+ /// Matched literal decoder: 0x100 or 0 to help avoiding branches.
+ /// Bittree reverse decoders: Offset of the next bit: 1 << offset
+ uint32_t offset;
+
+ /// If decoding a literal: match byte.
+ /// If decoding a match: length of the match.
+ uint32_t len;
+};
+
+
+static lzma_ret
+lzma_decode(lzma_coder *restrict coder, lzma_dict *restrict dictptr,
+ const uint8_t *restrict in,
+ size_t *restrict in_pos, size_t in_size)
+{
+ ////////////////////
+ // Initialization //
+ ////////////////////
+
+ if (!rc_read_init(&coder->rc, in, in_pos, in_size))
+ return LZMA_OK;
+
+ ///////////////
+ // Variables //
+ ///////////////
+
+ // Making local copies of often-used variables improves both
+ // speed and readability.
+
+ lzma_dict dict = *dictptr;
+
+ const size_t dict_start = dict.pos;
+
+ // Range decoder
+ rc_to_local(coder->rc, *in_pos);
+
+ // State
+ uint32_t state = coder->state;
+ uint32_t rep0 = coder->rep0;
+ uint32_t rep1 = coder->rep1;
+ uint32_t rep2 = coder->rep2;
+ uint32_t rep3 = coder->rep3;
+
+ const uint32_t pos_mask = coder->pos_mask;
+
+ // These variables are actually needed only if we last time ran
+ // out of input in the middle of the decoder loop.
+ probability *probs = coder->probs;
+ uint32_t symbol = coder->symbol;
+ uint32_t limit = coder->limit;
+ uint32_t offset = coder->offset;
+ uint32_t len = coder->len;
+
+ const uint32_t literal_pos_mask = coder->literal_pos_mask;
+ const uint32_t literal_context_bits = coder->literal_context_bits;
+
+ // Temporary variables
+ uint32_t pos_state = dict.pos & pos_mask;
+
+ lzma_ret ret = LZMA_OK;
+
+ // If uncompressed size is known, there must be no end of payload
+ // marker.
+ const bool no_eopm = coder->uncompressed_size
+ != LZMA_VLI_UNKNOWN;
+ if (no_eopm && coder->uncompressed_size < dict.limit - dict.pos)
+ dict.limit = dict.pos + (size_t)(coder->uncompressed_size);
+
+ // The main decoder loop. The "switch" is used to restart the decoder at
+ // correct location. Once restarted, the "switch" is no longer used.
+ switch (coder->sequence)
+ while (true) {
+ // Calculate new pos_state. This is skipped on the first loop
+ // since we already calculated it when setting up the local
+ // variables.
+ pos_state = dict.pos & pos_mask;
+
+ case SEQ_NORMALIZE:
+ case SEQ_IS_MATCH:
+ if (unlikely(no_eopm && dict.pos == dict.limit))
+ break;
+
+ rc_if_0(coder->is_match[state][pos_state], SEQ_IS_MATCH) {
+ rc_update_0(coder->is_match[state][pos_state]);
+
+ // It's a literal i.e. a single 8-bit byte.
+
+ probs = literal_subcoder(coder->literal,
+ literal_context_bits, literal_pos_mask,
+ dict.pos, dict_get(&dict, 0));
+ symbol = 1;
+
+ if (is_literal_state(state)) {
+ // Decode literal without match byte.
+#ifdef HAVE_SMALL
+ case SEQ_LITERAL:
+ do {
+ rc_bit(probs[symbol], , , SEQ_LITERAL);
+ } while (symbol < (1 << 8));
+#else
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL0);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL1);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL2);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL3);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL4);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL5);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL6);
+ rc_bit_case(probs[symbol], , , SEQ_LITERAL7);
+#endif
+ } else {
+ // Decode literal with match byte.
+ //
+ // We store the byte we compare against
+ // ("match byte") to "len" to minimize the
+ // number of variables we need to store
+ // between decoder calls.
+ len = dict_get(&dict, rep0) << 1;
+
+ // The usage of "offset" allows omitting some
+ // branches, which should give tiny speed
+ // improvement on some CPUs. "offset" gets
+ // set to zero if match_bit didn't match.
+ offset = 0x100;
+
+#ifdef HAVE_SMALL
+ case SEQ_LITERAL_MATCHED:
+ do {
+ const uint32_t match_bit
+ = len & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit
+ + symbol;
+
+ rc_bit(probs[subcoder_index],
+ offset &= ~match_bit,
+ offset &= match_bit,
+ SEQ_LITERAL_MATCHED);
+
+ // It seems to be faster to do this
+ // here instead of putting it to the
+ // beginning of the loop and then
+ // putting the "case" in the middle
+ // of the loop.
+ len <<= 1;
+
+ } while (symbol < (1 << 8));
+#else
+ // Unroll the loop.
+ uint32_t match_bit;
+ uint32_t subcoder_index;
+
+# define d(seq) \
+ case seq: \
+ match_bit = len & offset; \
+ subcoder_index = offset + match_bit + symbol; \
+ rc_bit(probs[subcoder_index], \
+ offset &= ~match_bit, \
+ offset &= match_bit, \
+ seq)
+
+ d(SEQ_LITERAL_MATCHED0);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED1);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED2);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED3);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED4);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED5);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED6);
+ len <<= 1;
+ d(SEQ_LITERAL_MATCHED7);
+# undef d
+#endif
+ }
+
+ //update_literal(state);
+ // Use a lookup table to update to literal state,
+ // since compared to other state updates, this would
+ // need two branches.
+ static const lzma_lzma_state next_state[] = {
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_LIT_LIT,
+ STATE_MATCH_LIT_LIT,
+ STATE_REP_LIT_LIT,
+ STATE_SHORTREP_LIT_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT,
+ STATE_SHORTREP_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT
+ };
+ state = next_state[state];
+
+ case SEQ_LITERAL_WRITE:
+ if (unlikely(dict_put(&dict, symbol))) {
+ coder->sequence = SEQ_LITERAL_WRITE;
+ goto out;
+ }
+
+ continue;
+ }
+
+ // Instead of a new byte we are going to get a byte range
+ // (distance and length) which will be repeated from our
+ // output history.
+
+ rc_update_1(coder->is_match[state][pos_state]);
+
+ case SEQ_IS_REP:
+ rc_if_0(coder->is_rep[state], SEQ_IS_REP) {
+ // Not a repeated match
+ rc_update_0(coder->is_rep[state]);
+ update_match(state);
+
+ // The latest three match distances are kept in
+ // memory in case there are repeated matches.
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+
+ // Decode the length of the match.
+ len_decode(len, coder->match_len_decoder,
+ pos_state, SEQ_MATCH_LEN);
+
+ // Prepare to decode the highest two bits of the
+ // match distance.
+ probs = coder->pos_slot[get_len_to_pos_state(len)];
+ symbol = 1;
+
+#ifdef HAVE_SMALL
+ case SEQ_POS_SLOT:
+ do {
+ rc_bit(probs[symbol], , , SEQ_POS_SLOT);
+ } while (symbol < POS_SLOTS);
+#else
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT0);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT1);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT2);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT3);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT4);
+ rc_bit_case(probs[symbol], , , SEQ_POS_SLOT5);
+#endif
+ // Get rid of the highest bit that was needed for
+ // indexing of the probability array.
+ symbol -= POS_SLOTS;
+ assert(symbol <= 63);
+
+ if (symbol < START_POS_MODEL_INDEX) {
+ // Match distances [0, 3] have only two bits.
+ rep0 = symbol;
+ } else {
+ // Decode the lowest [1, 29] bits of
+ // the match distance.
+ limit = (symbol >> 1) - 1;
+ assert(limit >= 1 && limit <= 30);
+ rep0 = 2 + (symbol & 1);
+
+ if (symbol < END_POS_MODEL_INDEX) {
+ // Prepare to decode the low bits for
+ // a distance of [4, 127].
+ assert(limit <= 5);
+ rep0 <<= limit;
+ assert(rep0 <= 96);
+ // -1 is fine, because we start
+ // decoding at probs[1], not probs[0].
+ // NOTE: This violates the C standard,
+ // since we are doing pointer
+ // arithmetic past the beginning of
+ // the array.
+ assert((int32_t)(rep0 - symbol - 1)
+ >= -1);
+ assert((int32_t)(rep0 - symbol - 1)
+ <= 82);
+ probs = coder->pos_special + rep0
+ - symbol - 1;
+ symbol = 1;
+ offset = 0;
+ case SEQ_POS_MODEL:
+#ifdef HAVE_SMALL
+ do {
+ rc_bit(probs[symbol], ,
+ rep0 += 1 << offset,
+ SEQ_POS_MODEL);
+ } while (++offset < limit);
+#else
+ switch (limit) {
+ case 5:
+ assert(offset == 0);
+ rc_bit(probs[symbol], ,
+ rep0 += 1,
+ SEQ_POS_MODEL);
+ ++offset;
+ --limit;
+ case 4:
+ rc_bit(probs[symbol], ,
+ rep0 += 1 << offset,
+ SEQ_POS_MODEL);
+ ++offset;
+ --limit;
+ case 3:
+ rc_bit(probs[symbol], ,
+ rep0 += 1 << offset,
+ SEQ_POS_MODEL);
+ ++offset;
+ --limit;
+ case 2:
+ rc_bit(probs[symbol], ,
+ rep0 += 1 << offset,
+ SEQ_POS_MODEL);
+ ++offset;
+ --limit;
+ case 1:
+ // We need "symbol" only for
+ // indexing the probability
+ // array, thus we can use
+ // rc_bit_last() here to omit
+ // the unneeded updating of
+ // "symbol".
+ rc_bit_last(probs[symbol], ,
+ rep0 += 1 << offset,
+ SEQ_POS_MODEL);
+ }
+#endif
+ } else {
+ // The distace is >= 128. Decode the
+ // lower bits without probabilities
+ // except the lowest four bits.
+ assert(symbol >= 14);
+ assert(limit >= 6);
+ limit -= ALIGN_BITS;
+ assert(limit >= 2);
+ case SEQ_DIRECT:
+ // Not worth manual unrolling
+ do {
+ rc_direct(rep0, SEQ_DIRECT);
+ } while (--limit > 0);
+
+ // Decode the lowest four bits using
+ // probabilities.
+ rep0 <<= ALIGN_BITS;
+ symbol = 1;
+#ifdef HAVE_SMALL
+ offset = 0;
+ case SEQ_ALIGN:
+ do {
+ rc_bit(coder->pos_align[
+ symbol], ,
+ rep0 += 1 << offset,
+ SEQ_ALIGN);
+ } while (++offset < ALIGN_BITS);
+#else
+ case SEQ_ALIGN0:
+ rc_bit(coder->pos_align[symbol], ,
+ rep0 += 1, SEQ_ALIGN0);
+ case SEQ_ALIGN1:
+ rc_bit(coder->pos_align[symbol], ,
+ rep0 += 2, SEQ_ALIGN1);
+ case SEQ_ALIGN2:
+ rc_bit(coder->pos_align[symbol], ,
+ rep0 += 4, SEQ_ALIGN2);
+ case SEQ_ALIGN3:
+ // Like in SEQ_POS_MODEL, we don't
+ // need "symbol" for anything else
+ // than indexing the probability array.
+ rc_bit_last(coder->pos_align[symbol], ,
+ rep0 += 8, SEQ_ALIGN3);
+#endif
+
+ if (rep0 == UINT32_MAX) {
+ // End of payload marker was
+ // found. It must not be
+ // present if uncompressed
+ // size is known.
+ if (coder->uncompressed_size
+ != LZMA_VLI_UNKNOWN) {
+ ret = LZMA_DATA_ERROR;
+ goto out;
+ }
+
+ case SEQ_EOPM:
+ // TODO Comment
+ rc_normalize(SEQ_EOPM);
+ ret = LZMA_STREAM_END;
+ goto out;
+ }
+ }
+ }
+
+ // Validate the distance we just decoded.
+ if (unlikely(!dict_is_distance_valid(&dict, rep0))) {
+ ret = LZMA_DATA_ERROR;
+ goto out;
+ }
+
+ } else {
+ rc_update_1(coder->is_rep[state]);
+
+ // Repeated match
+ //
+ // The match distance is a value that we have had
+ // earlier. The latest four match distances are
+ // available as rep0, rep1, rep2 and rep3. We will
+ // now decode which of them is the new distance.
+ //
+ // There cannot be a match if we haven't produced
+ // any output, so check that first.
+ if (unlikely(!dict_is_distance_valid(&dict, 0))) {
+ ret = LZMA_DATA_ERROR;
+ goto out;
+ }
+
+ case SEQ_IS_REP0:
+ rc_if_0(coder->is_rep0[state], SEQ_IS_REP0) {
+ rc_update_0(coder->is_rep0[state]);
+ // The distance is rep0.
+
+ case SEQ_IS_REP0_LONG:
+ rc_if_0(coder->is_rep0_long[state][pos_state],
+ SEQ_IS_REP0_LONG) {
+ rc_update_0(coder->is_rep0_long[
+ state][pos_state]);
+
+ update_short_rep(state);
+
+ case SEQ_SHORTREP:
+ if (unlikely(dict_put(&dict, dict_get(
+ &dict, rep0)))) {
+ coder->sequence = SEQ_SHORTREP;
+ goto out;
+ }
+
+ continue;
+ }
+
+ // Repeating more than one byte at
+ // distance of rep0.
+ rc_update_1(coder->is_rep0_long[
+ state][pos_state]);
+
+ } else {
+ rc_update_1(coder->is_rep0[state]);
+
+ case SEQ_IS_REP1:
+ // The distance is rep1, rep2 or rep3. Once
+ // we find out which one of these three, it
+ // is stored to rep0 and rep1, rep2 and rep3
+ // are updated accordingly.
+ rc_if_0(coder->is_rep1[state], SEQ_IS_REP1) {
+ rc_update_0(coder->is_rep1[state]);
+
+ const uint32_t distance = rep1;
+ rep1 = rep0;
+ rep0 = distance;
+
+ } else {
+ rc_update_1(coder->is_rep1[state]);
+ case SEQ_IS_REP2:
+ rc_if_0(coder->is_rep2[state],
+ SEQ_IS_REP2) {
+ rc_update_0(coder->is_rep2[
+ state]);
+
+ const uint32_t distance = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ rep0 = distance;
+
+ } else {
+ rc_update_1(coder->is_rep2[
+ state]);
+
+ const uint32_t distance = rep3;
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ }
+ }
+
+ update_long_rep(state);
+
+ // Decode the length of the repeated match.
+ len_decode(len, coder->rep_len_decoder,
+ pos_state, SEQ_REP_LEN);
+ }
+
+ /////////////////////////////////
+ // Repeat from history buffer. //
+ /////////////////////////////////
+
+ // The length is always between these limits. There is no way
+ // to trigger the algorithm to set len outside this range.
+ assert(len >= MATCH_LEN_MIN);
+ assert(len <= MATCH_LEN_MAX);
+
+ case SEQ_COPY:
+ // Repeat len bytes from distance of rep0.
+ if (unlikely(dict_repeat(&dict, rep0, &len))) {
+ coder->sequence = SEQ_COPY;
+ goto out;
+ }
+ }
+
+ rc_normalize(SEQ_NORMALIZE);
+ coder->sequence = SEQ_IS_MATCH;
+
+out:
+ // Save state
+
+ // NOTE: Must not copy dict.limit.
+ dictptr->pos = dict.pos;
+ dictptr->full = dict.full;
+
+ rc_from_local(coder->rc, *in_pos);
+
+ coder->state = state;
+ coder->rep0 = rep0;
+ coder->rep1 = rep1;
+ coder->rep2 = rep2;
+ coder->rep3 = rep3;
+
+ coder->probs = probs;
+ coder->symbol = symbol;
+ coder->limit = limit;
+ coder->offset = offset;
+ coder->len = len;
+
+ // Update the remaining amount of uncompressed data if uncompressed
+ // size was known.
+ if (coder->uncompressed_size != LZMA_VLI_UNKNOWN) {
+ coder->uncompressed_size -= dict.pos - dict_start;
+
+ // Since there cannot be end of payload marker if the
+ // uncompressed size was known, we check here if we
+ // finished decoding.
+ if (coder->uncompressed_size == 0 && ret == LZMA_OK
+ && coder->sequence != SEQ_NORMALIZE)
+ ret = coder->sequence == SEQ_IS_MATCH
+ ? LZMA_STREAM_END : LZMA_DATA_ERROR;
+ }
+
+ // We can do an additional check in the range decoder to catch some
+ // corrupted files.
+ if (ret == LZMA_STREAM_END) {
+ if (!rc_is_finished(coder->rc))
+ ret = LZMA_DATA_ERROR;
+
+ // Reset the range decoder so that it is ready to reinitialize
+ // for a new LZMA2 chunk.
+ rc_reset(coder->rc);
+ }
+
+ return ret;
+}
+
+
+
+static void
+lzma_decoder_uncompressed(lzma_coder *coder, lzma_vli uncompressed_size)
+{
+ coder->uncompressed_size = uncompressed_size;
+}
+
+/*
+extern void
+lzma_lzma_decoder_uncompressed(void *coder_ptr, lzma_vli uncompressed_size)
+{
+ // This is hack.
+ (*(lzma_coder **)(coder))->uncompressed_size = uncompressed_size;
+}
+*/
+
+static void
+lzma_decoder_reset(lzma_coder *coder, const void *opt)
+{
+ const lzma_options_lzma *options = opt;
+
+ // NOTE: We assume that lc/lp/pb are valid since they were
+ // successfully decoded with lzma_lzma_decode_properties().
+ // FIXME?
+
+ // Calculate pos_mask. We don't need pos_bits as is for anything.
+ coder->pos_mask = (1U << options->pb) - 1;
+
+ // Initialize the literal decoder.
+ literal_init(coder->literal, options->lc, options->lp);
+
+ coder->literal_context_bits = options->lc;
+ coder->literal_pos_mask = (1U << options->lp) - 1;
+
+ // State
+ coder->state = STATE_LIT_LIT;
+ coder->rep0 = 0;
+ coder->rep1 = 0;
+ coder->rep2 = 0;
+ coder->rep3 = 0;
+ coder->pos_mask = (1U << options->pb) - 1;
+
+ // Range decoder
+ rc_reset(coder->rc);
+
+ // Bit and bittree decoders
+ for (uint32_t i = 0; i < STATES; ++i) {
+ for (uint32_t j = 0; j <= coder->pos_mask; ++j) {
+ bit_reset(coder->is_match[i][j]);
+ bit_reset(coder->is_rep0_long[i][j]);
+ }
+
+ bit_reset(coder->is_rep[i]);
+ bit_reset(coder->is_rep0[i]);
+ bit_reset(coder->is_rep1[i]);
+ bit_reset(coder->is_rep2[i]);
+ }
+
+ for (uint32_t i = 0; i < LEN_TO_POS_STATES; ++i)
+ bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
+
+ for (uint32_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
+ bit_reset(coder->pos_special[i]);
+
+ bittree_reset(coder->pos_align, ALIGN_BITS);
+
+ // Len decoders (also bit/bittree)
+ const uint32_t num_pos_states = 1U << options->pb;
+ bit_reset(coder->match_len_decoder.choice);
+ bit_reset(coder->match_len_decoder.choice2);
+ bit_reset(coder->rep_len_decoder.choice);
+ bit_reset(coder->rep_len_decoder.choice2);
+
+ for (uint32_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
+ bittree_reset(coder->match_len_decoder.low[pos_state],
+ LEN_LOW_BITS);
+ bittree_reset(coder->match_len_decoder.mid[pos_state],
+ LEN_MID_BITS);
+
+ bittree_reset(coder->rep_len_decoder.low[pos_state],
+ LEN_LOW_BITS);
+ bittree_reset(coder->rep_len_decoder.mid[pos_state],
+ LEN_MID_BITS);
+ }
+
+ bittree_reset(coder->match_len_decoder.high, LEN_HIGH_BITS);
+ bittree_reset(coder->rep_len_decoder.high, LEN_HIGH_BITS);
+
+ coder->sequence = SEQ_IS_MATCH;
+ coder->probs = NULL;
+ coder->symbol = 0;
+ coder->limit = 0;
+ coder->offset = 0;
+ coder->len = 0;
+
+ return;
+}
+
+
+extern lzma_ret
+lzma_lzma_decoder_create(lzma_lz_decoder *lz, lzma_allocator *allocator,
+ const void *opt, lzma_lz_options *lz_options)
+{
+ if (lz->coder == NULL) {
+ lz->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (lz->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ lz->code = &lzma_decode;
+ lz->reset = &lzma_decoder_reset;
+ lz->set_uncompressed = &lzma_decoder_uncompressed;
+ }
+
+ // All dictionary sizes are OK here. LZ decoder will take care of
+ // the special cases.
+ const lzma_options_lzma *options = opt;
+ lz_options->dict_size = options->dict_size;
+ lz_options->preset_dict = options->preset_dict;
+ lz_options->preset_dict_size = options->preset_dict_size;
+
+ return LZMA_OK;
+}
+
+
+/// Allocate and initialize LZMA decoder. This is used only via LZ
+/// initialization (lzma_lzma_decoder_init() passes function pointer to
+/// the LZ initialization).
+static lzma_ret
+lzma_decoder_init(lzma_lz_decoder *lz, lzma_allocator *allocator,
+ const void *options, lzma_lz_options *lz_options)
+{
+ if (!is_lclppb_valid(options))
+ return LZMA_PROG_ERROR;
+
+ return_if_error(lzma_lzma_decoder_create(
+ lz, allocator, options, lz_options));
+
+ lzma_decoder_reset(lz->coder, options);
+ lzma_decoder_uncompressed(lz->coder, LZMA_VLI_UNKNOWN);
+
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_lzma_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ // LZMA can only be the last filter in the chain. This is enforced
+ // by the raw_decoder initialization.
+ assert(filters[1].init == NULL);
+
+ return lzma_lz_decoder_init(next, allocator, filters,
+ &lzma_decoder_init);
+}
+
+
+extern bool
+lzma_lzma_lclppb_decode(lzma_options_lzma *options, uint8_t byte)
+{
+ if (byte > (4 * 5 + 4) * 9 + 8)
+ return true;
+
+ // See the file format specification to understand this.
+ options->pb = byte / (9 * 5);
+ byte -= options->pb * 9 * 5;
+ options->lp = byte / 9;
+ options->lc = byte - options->lp * 9;
+
+ return options->lc + options->lp > LZMA_LCLP_MAX;
+}
+
+
+extern uint64_t
+lzma_lzma_decoder_memusage_nocheck(const void *options)
+{
+ const lzma_options_lzma *const opt = options;
+ return sizeof(lzma_coder) + lzma_lz_decoder_memusage(opt->dict_size);
+}
+
+
+extern uint64_t
+lzma_lzma_decoder_memusage(const void *options)
+{
+ if (!is_lclppb_valid(options))
+ return UINT64_MAX;
+
+ return lzma_lzma_decoder_memusage_nocheck(options);
+}
+
+
+extern lzma_ret
+lzma_lzma_props_decode(void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size)
+{
+ if (props_size != 5)
+ return LZMA_OPTIONS_ERROR;
+
+ lzma_options_lzma *opt
+ = lzma_alloc(sizeof(lzma_options_lzma), allocator);
+ if (opt == NULL)
+ return LZMA_MEM_ERROR;
+
+ if (lzma_lzma_lclppb_decode(opt, props[0]))
+ goto error;
+
+ // All dictionary sizes are accepted, including zero. LZ decoder
+ // will automatically use a dictionary at least a few KiB even if
+ // a smaller dictionary is requested.
+ opt->dict_size = integer_read_32(props + 1);
+
+ opt->preset_dict = NULL;
+ opt->preset_dict_size = 0;
+
+ *options = opt;
+
+ return LZMA_OK;
+
+error:
+ lzma_free(opt, allocator);
+ return LZMA_OPTIONS_ERROR;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.h
new file mode 100644
index 00000000..590a4b2d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_decoder.h
@@ -0,0 +1,54 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_decoder.h
+/// \brief LZMA decoder API
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZMA_DECODER_H
+#define LZMA_LZMA_DECODER_H
+
+#include "common.h"
+
+
+/// Allocates and initializes LZMA decoder
+extern lzma_ret lzma_lzma_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern uint64_t lzma_lzma_decoder_memusage(const void *options);
+
+extern lzma_ret lzma_lzma_props_decode(
+ void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size);
+
+
+/// \brief Decodes the LZMA Properties byte (lc/lp/pb)
+///
+/// \return true if error occorred, false on success
+///
+extern bool lzma_lzma_lclppb_decode(
+ lzma_options_lzma *options, uint8_t byte);
+
+
+#ifdef LZMA_LZ_DECODER_H
+/// Allocate and setup function pointers only. This is used by LZMA1 and
+/// LZMA2 decoders.
+extern lzma_ret lzma_lzma_decoder_create(
+ lzma_lz_decoder *lz, lzma_allocator *allocator,
+ const void *opt, lzma_lz_options *lz_options);
+
+/// Gets memory usage without validating lc/lp/pb. This is used by LZMA2
+/// decoder, because raw LZMA2 decoding doesn't need lc/lp/pb.
+extern uint64_t lzma_lzma_decoder_memusage_nocheck(const void *options);
+
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.c
new file mode 100644
index 00000000..2a9c14c9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.c
@@ -0,0 +1,677 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_encoder.c
+/// \brief LZMA encoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lzma2_encoder.h"
+#include "lzma_encoder_private.h"
+#include "fastpos.h"
+
+
+/////////////
+// Literal //
+/////////////
+
+static inline void
+literal_matched(lzma_range_encoder *rc, probability *subcoder,
+ uint32_t match_byte, uint32_t symbol)
+{
+ uint32_t offset = 0x100;
+ symbol += UINT32_C(1) << 8;
+
+ do {
+ match_byte <<= 1;
+ const uint32_t match_bit = match_byte & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit + (symbol >> 8);
+ const uint32_t bit = (symbol >> 7) & 1;
+ rc_bit(rc, &subcoder[subcoder_index], bit);
+
+ symbol <<= 1;
+ offset &= ~(match_byte ^ symbol);
+
+ } while (symbol < (UINT32_C(1) << 16));
+}
+
+
+static inline void
+literal(lzma_coder *coder, lzma_mf *mf, uint32_t position)
+{
+ // Locate the literal byte to be encoded and the subcoder.
+ const uint8_t cur_byte = mf->buffer[
+ mf->read_pos - mf->read_ahead];
+ probability *subcoder = literal_subcoder(coder->literal,
+ coder->literal_context_bits, coder->literal_pos_mask,
+ position, mf->buffer[mf->read_pos - mf->read_ahead - 1]);
+
+ if (is_literal_state(coder->state)) {
+ // Previous LZMA-symbol was a literal. Encode a normal
+ // literal without a match byte.
+ rc_bittree(&coder->rc, subcoder, 8, cur_byte);
+ } else {
+ // Previous LZMA-symbol was a match. Use the last byte of
+ // the match as a "match byte". That is, compare the bits
+ // of the current literal and the match byte.
+ const uint8_t match_byte = mf->buffer[
+ mf->read_pos - coder->reps[0] - 1
+ - mf->read_ahead];
+ literal_matched(&coder->rc, subcoder, match_byte, cur_byte);
+ }
+
+ update_literal(coder->state);
+}
+
+
+//////////////////
+// Match length //
+//////////////////
+
+static void
+length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state)
+{
+ const uint32_t table_size = lc->table_size;
+ lc->counters[pos_state] = table_size;
+
+ const uint32_t a0 = rc_bit_0_price(lc->choice);
+ const uint32_t a1 = rc_bit_1_price(lc->choice);
+ const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2);
+ const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2);
+ uint32_t *const prices = lc->prices[pos_state];
+
+ uint32_t i;
+ for (i = 0; i < table_size && i < LEN_LOW_SYMBOLS; ++i)
+ prices[i] = a0 + rc_bittree_price(lc->low[pos_state],
+ LEN_LOW_BITS, i);
+
+ for (; i < table_size && i < LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS; ++i)
+ prices[i] = b0 + rc_bittree_price(lc->mid[pos_state],
+ LEN_MID_BITS, i - LEN_LOW_SYMBOLS);
+
+ for (; i < table_size; ++i)
+ prices[i] = b1 + rc_bittree_price(lc->high, LEN_HIGH_BITS,
+ i - LEN_LOW_SYMBOLS - LEN_MID_SYMBOLS);
+
+ return;
+}
+
+
+static inline void
+length(lzma_range_encoder *rc, lzma_length_encoder *lc,
+ const uint32_t pos_state, uint32_t len, const bool fast_mode)
+{
+ assert(len <= MATCH_LEN_MAX);
+ len -= MATCH_LEN_MIN;
+
+ if (len < LEN_LOW_SYMBOLS) {
+ rc_bit(rc, &lc->choice, 0);
+ rc_bittree(rc, lc->low[pos_state], LEN_LOW_BITS, len);
+ } else {
+ rc_bit(rc, &lc->choice, 1);
+ len -= LEN_LOW_SYMBOLS;
+
+ if (len < LEN_MID_SYMBOLS) {
+ rc_bit(rc, &lc->choice2, 0);
+ rc_bittree(rc, lc->mid[pos_state], LEN_MID_BITS, len);
+ } else {
+ rc_bit(rc, &lc->choice2, 1);
+ len -= LEN_MID_SYMBOLS;
+ rc_bittree(rc, lc->high, LEN_HIGH_BITS, len);
+ }
+ }
+
+ // Only getoptimum uses the prices so don't update the table when
+ // in fast mode.
+ if (!fast_mode)
+ if (--lc->counters[pos_state] == 0)
+ length_update_prices(lc, pos_state);
+}
+
+
+///////////
+// Match //
+///////////
+
+static inline void
+match(lzma_coder *coder, const uint32_t pos_state,
+ const uint32_t distance, const uint32_t len)
+{
+ update_match(coder->state);
+
+ length(&coder->rc, &coder->match_len_encoder, pos_state, len,
+ coder->fast_mode);
+
+ const uint32_t pos_slot = get_pos_slot(distance);
+ const uint32_t len_to_pos_state = get_len_to_pos_state(len);
+ rc_bittree(&coder->rc, coder->pos_slot[len_to_pos_state],
+ POS_SLOT_BITS, pos_slot);
+
+ if (pos_slot >= START_POS_MODEL_INDEX) {
+ const uint32_t footer_bits = (pos_slot >> 1) - 1;
+ const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
+ const uint32_t pos_reduced = distance - base;
+
+ if (pos_slot < END_POS_MODEL_INDEX) {
+ // Careful here: base - pos_slot - 1 can be -1, but
+ // rc_bittree_reverse starts at probs[1], not probs[0].
+ rc_bittree_reverse(&coder->rc,
+ coder->pos_special + base - pos_slot - 1,
+ footer_bits, pos_reduced);
+ } else {
+ rc_direct(&coder->rc, pos_reduced >> ALIGN_BITS,
+ footer_bits - ALIGN_BITS);
+ rc_bittree_reverse(
+ &coder->rc, coder->pos_align,
+ ALIGN_BITS, pos_reduced & ALIGN_MASK);
+ ++coder->align_price_count;
+ }
+ }
+
+ coder->reps[3] = coder->reps[2];
+ coder->reps[2] = coder->reps[1];
+ coder->reps[1] = coder->reps[0];
+ coder->reps[0] = distance;
+ ++coder->match_price_count;
+}
+
+
+////////////////////
+// Repeated match //
+////////////////////
+
+static inline void
+rep_match(lzma_coder *coder, const uint32_t pos_state,
+ const uint32_t rep, const uint32_t len)
+{
+ if (rep == 0) {
+ rc_bit(&coder->rc, &coder->is_rep0[coder->state], 0);
+ rc_bit(&coder->rc,
+ &coder->is_rep0_long[coder->state][pos_state],
+ len != 1);
+ } else {
+ const uint32_t distance = coder->reps[rep];
+ rc_bit(&coder->rc, &coder->is_rep0[coder->state], 1);
+
+ if (rep == 1) {
+ rc_bit(&coder->rc, &coder->is_rep1[coder->state], 0);
+ } else {
+ rc_bit(&coder->rc, &coder->is_rep1[coder->state], 1);
+ rc_bit(&coder->rc, &coder->is_rep2[coder->state],
+ rep - 2);
+
+ if (rep == 3)
+ coder->reps[3] = coder->reps[2];
+
+ coder->reps[2] = coder->reps[1];
+ }
+
+ coder->reps[1] = coder->reps[0];
+ coder->reps[0] = distance;
+ }
+
+ if (len == 1) {
+ update_short_rep(coder->state);
+ } else {
+ length(&coder->rc, &coder->rep_len_encoder, pos_state, len,
+ coder->fast_mode);
+ update_long_rep(coder->state);
+ }
+}
+
+
+//////////
+// Main //
+//////////
+
+static void
+encode_symbol(lzma_coder *coder, lzma_mf *mf,
+ uint32_t back, uint32_t len, uint32_t position)
+{
+ const uint32_t pos_state = position & coder->pos_mask;
+
+ if (back == UINT32_MAX) {
+ // Literal i.e. eight-bit byte
+ assert(len == 1);
+ rc_bit(&coder->rc,
+ &coder->is_match[coder->state][pos_state], 0);
+ literal(coder, mf, position);
+ } else {
+ // Some type of match
+ rc_bit(&coder->rc,
+ &coder->is_match[coder->state][pos_state], 1);
+
+ if (back < REP_DISTANCES) {
+ // It's a repeated match i.e. the same distance
+ // has been used earlier.
+ rc_bit(&coder->rc, &coder->is_rep[coder->state], 1);
+ rep_match(coder, pos_state, back, len);
+ } else {
+ // Normal match
+ rc_bit(&coder->rc, &coder->is_rep[coder->state], 0);
+ match(coder, pos_state, back - REP_DISTANCES, len);
+ }
+ }
+
+ assert(mf->read_ahead >= len);
+ mf->read_ahead -= len;
+}
+
+
+static bool
+encode_init(lzma_coder *coder, lzma_mf *mf)
+{
+ assert(mf_position(mf) == 0);
+
+ if (mf->read_pos == mf->read_limit) {
+ if (mf->action == LZMA_RUN)
+ return false; // We cannot do anything.
+
+ // We are finishing (we cannot get here when flushing).
+ assert(mf->write_pos == mf->read_pos);
+ assert(mf->action == LZMA_FINISH);
+ } else {
+ // Do the actual initialization. The first LZMA symbol must
+ // always be a literal.
+ mf_skip(mf, 1);
+ mf->read_ahead = 0;
+ rc_bit(&coder->rc, &coder->is_match[0][0], 0);
+ rc_bittree(&coder->rc, coder->literal[0], 8, mf->buffer[0]);
+ }
+
+ // Initialization is done (except if empty file).
+ coder->is_initialized = true;
+
+ return true;
+}
+
+
+static void
+encode_eopm(lzma_coder *coder, uint32_t position)
+{
+ const uint32_t pos_state = position & coder->pos_mask;
+ rc_bit(&coder->rc, &coder->is_match[coder->state][pos_state], 1);
+ rc_bit(&coder->rc, &coder->is_rep[coder->state], 0);
+ match(coder, pos_state, UINT32_MAX, MATCH_LEN_MIN);
+}
+
+
+/// Number of bytes that a single encoding loop in lzma_lzma_encode() can
+/// consume from the dictionary. This limit comes from lzma_lzma_optimum()
+/// and may need to be updated if that function is significantly modified.
+#define LOOP_INPUT_MAX (OPTS + 1)
+
+
+extern lzma_ret
+lzma_lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint8_t *restrict out, size_t *restrict out_pos,
+ size_t out_size, uint32_t limit)
+{
+ // Initialize the stream if no data has been encoded yet.
+ if (!coder->is_initialized && !encode_init(coder, mf))
+ return LZMA_OK;
+
+ // Get the lowest bits of the uncompressed offset from the LZ layer.
+ uint32_t position = mf_position(mf);
+
+ while (true) {
+ // Encode pending bits, if any. Calling this before encoding
+ // the next symbol is needed only with plain LZMA, since
+ // LZMA2 always provides big enough buffer to flush
+ // everything out from the range encoder. For the same reason,
+ // rc_encode() never returns true when this function is used
+ // as part of LZMA2 encoder.
+ if (rc_encode(&coder->rc, out, out_pos, out_size)) {
+ assert(limit == UINT32_MAX);
+ return LZMA_OK;
+ }
+
+ // With LZMA2 we need to take care that compressed size of
+ // a chunk doesn't get too big.
+ // TODO
+ if (limit != UINT32_MAX
+ && (mf->read_pos - mf->read_ahead >= limit
+ || *out_pos + rc_pending(&coder->rc)
+ >= LZMA2_CHUNK_MAX
+ - LOOP_INPUT_MAX))
+ break;
+
+ // Check that there is some input to process.
+ if (mf->read_pos >= mf->read_limit) {
+ if (mf->action == LZMA_RUN)
+ return LZMA_OK;
+
+ if (mf->read_ahead == 0)
+ break;
+ }
+
+ // Get optimal match (repeat position and length).
+ // Value ranges for pos:
+ // - [0, REP_DISTANCES): repeated match
+ // - [REP_DISTANCES, UINT32_MAX):
+ // match at (pos - REP_DISTANCES)
+ // - UINT32_MAX: not a match but a literal
+ // Value ranges for len:
+ // - [MATCH_LEN_MIN, MATCH_LEN_MAX]
+ uint32_t len;
+ uint32_t back;
+
+ if (coder->fast_mode)
+ lzma_lzma_optimum_fast(coder, mf, &back, &len);
+ else
+ lzma_lzma_optimum_normal(
+ coder, mf, &back, &len, position);
+
+ encode_symbol(coder, mf, back, len, position);
+
+ position += len;
+ }
+
+ if (!coder->is_flushed) {
+ coder->is_flushed = true;
+
+ // We don't support encoding plain LZMA streams without EOPM,
+ // and LZMA2 doesn't use EOPM at LZMA level.
+ if (limit == UINT32_MAX)
+ encode_eopm(coder, position);
+
+ // Flush the remaining bytes from the range encoder.
+ rc_flush(&coder->rc);
+
+ // Copy the remaining bytes to the output buffer. If there
+ // isn't enough output space, we will copy out the remaining
+ // bytes on the next call to this function by using
+ // the rc_encode() call in the encoding loop above.
+ if (rc_encode(&coder->rc, out, out_pos, out_size)) {
+ assert(limit == UINT32_MAX);
+ return LZMA_OK;
+ }
+ }
+
+ // Make it ready for the next LZMA2 chunk.
+ coder->is_flushed = false;
+
+ return LZMA_STREAM_END;
+}
+
+
+static lzma_ret
+lzma_encode(lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint8_t *restrict out, size_t *restrict out_pos,
+ size_t out_size)
+{
+ // Plain LZMA has no support for sync-flushing.
+ if (unlikely(mf->action == LZMA_SYNC_FLUSH))
+ return LZMA_OPTIONS_ERROR;
+
+ return lzma_lzma_encode(coder, mf, out, out_pos, out_size, UINT32_MAX);
+}
+
+
+////////////////////
+// Initialization //
+////////////////////
+
+static bool
+is_options_valid(const lzma_options_lzma *options)
+{
+ // Validate some of the options. LZ encoder validates nice_len too
+ // but we need a valid value here earlier.
+ return is_lclppb_valid(options)
+ && options->nice_len >= MATCH_LEN_MIN
+ && options->nice_len <= MATCH_LEN_MAX
+ && (options->mode == LZMA_MODE_FAST
+ || options->mode == LZMA_MODE_NORMAL);
+}
+
+
+static void
+set_lz_options(lzma_lz_options *lz_options, const lzma_options_lzma *options)
+{
+ // LZ encoder initialization does the validation for these so we
+ // don't need to validate here.
+ lz_options->before_size = OPTS;
+ lz_options->dict_size = options->dict_size;
+ lz_options->after_size = LOOP_INPUT_MAX;
+ lz_options->match_len_max = MATCH_LEN_MAX;
+ lz_options->nice_len = options->nice_len;
+ lz_options->match_finder = options->mf;
+ lz_options->depth = options->depth;
+ lz_options->preset_dict = options->preset_dict;
+ lz_options->preset_dict_size = options->preset_dict_size;
+ return;
+}
+
+
+static void
+length_encoder_reset(lzma_length_encoder *lencoder,
+ const uint32_t num_pos_states, const bool fast_mode)
+{
+ bit_reset(lencoder->choice);
+ bit_reset(lencoder->choice2);
+
+ for (size_t pos_state = 0; pos_state < num_pos_states; ++pos_state) {
+ bittree_reset(lencoder->low[pos_state], LEN_LOW_BITS);
+ bittree_reset(lencoder->mid[pos_state], LEN_MID_BITS);
+ }
+
+ bittree_reset(lencoder->high, LEN_HIGH_BITS);
+
+ if (!fast_mode)
+ for (size_t pos_state = 0; pos_state < num_pos_states;
+ ++pos_state)
+ length_update_prices(lencoder, pos_state);
+
+ return;
+}
+
+
+extern lzma_ret
+lzma_lzma_encoder_reset(lzma_coder *coder, const lzma_options_lzma *options)
+{
+ if (!is_options_valid(options))
+ return LZMA_OPTIONS_ERROR;
+
+ coder->pos_mask = (1U << options->pb) - 1;
+ coder->literal_context_bits = options->lc;
+ coder->literal_pos_mask = (1U << options->lp) - 1;
+
+ // Range coder
+ rc_reset(&coder->rc);
+
+ // State
+ coder->state = 0;
+ for (size_t i = 0; i < REP_DISTANCES; ++i)
+ coder->reps[i] = 0;
+
+ literal_init(coder->literal, options->lc, options->lp);
+
+ // Bit encoders
+ for (size_t i = 0; i < STATES; ++i) {
+ for (size_t j = 0; j <= coder->pos_mask; ++j) {
+ bit_reset(coder->is_match[i][j]);
+ bit_reset(coder->is_rep0_long[i][j]);
+ }
+
+ bit_reset(coder->is_rep[i]);
+ bit_reset(coder->is_rep0[i]);
+ bit_reset(coder->is_rep1[i]);
+ bit_reset(coder->is_rep2[i]);
+ }
+
+ for (size_t i = 0; i < FULL_DISTANCES - END_POS_MODEL_INDEX; ++i)
+ bit_reset(coder->pos_special[i]);
+
+ // Bit tree encoders
+ for (size_t i = 0; i < LEN_TO_POS_STATES; ++i)
+ bittree_reset(coder->pos_slot[i], POS_SLOT_BITS);
+
+ bittree_reset(coder->pos_align, ALIGN_BITS);
+
+ // Length encoders
+ length_encoder_reset(&coder->match_len_encoder,
+ 1U << options->pb, coder->fast_mode);
+
+ length_encoder_reset(&coder->rep_len_encoder,
+ 1U << options->pb, coder->fast_mode);
+
+ // Price counts are incremented every time appropriate probabilities
+ // are changed. price counts are set to zero when the price tables
+ // are updated, which is done when the appropriate price counts have
+ // big enough value, and lzma_mf.read_ahead == 0 which happens at
+ // least every OPTS (a few thousand) possible price count increments.
+ //
+ // By resetting price counts to UINT32_MAX / 2, we make sure that the
+ // price tables will be initialized before they will be used (since
+ // the value is definitely big enough), and that it is OK to increment
+ // price counts without risk of integer overflow (since UINT32_MAX / 2
+ // is small enough). The current code doesn't increment price counts
+ // before initializing price tables, but it maybe done in future if
+ // we add support for saving the state between LZMA2 chunks.
+ coder->match_price_count = UINT32_MAX / 2;
+ coder->align_price_count = UINT32_MAX / 2;
+
+ coder->opts_end_index = 0;
+ coder->opts_current_index = 0;
+
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_lzma_encoder_create(lzma_coder **coder_ptr, lzma_allocator *allocator,
+ const lzma_options_lzma *options, lzma_lz_options *lz_options)
+{
+ // Allocate lzma_coder if it wasn't already allocated.
+ if (*coder_ptr == NULL) {
+ *coder_ptr = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (*coder_ptr == NULL)
+ return LZMA_MEM_ERROR;
+ }
+
+ lzma_coder *coder = *coder_ptr;
+
+ // Set compression mode. We haven't validates the options yet,
+ // but it's OK here, since nothing bad happens with invalid
+ // options in the code below, and they will get rejected by
+ // lzma_lzma_encoder_reset() call at the end of this function.
+ switch (options->mode) {
+ case LZMA_MODE_FAST:
+ coder->fast_mode = true;
+ break;
+
+ case LZMA_MODE_NORMAL: {
+ coder->fast_mode = false;
+
+ // Set dist_table_size.
+ // Round the dictionary size up to next 2^n.
+ uint32_t log_size = 0;
+ while ((UINT32_C(1) << log_size) < options->dict_size)
+ ++log_size;
+
+ coder->dist_table_size = log_size * 2;
+
+ // Length encoders' price table size
+ coder->match_len_encoder.table_size
+ = options->nice_len + 1 - MATCH_LEN_MIN;
+ coder->rep_len_encoder.table_size
+ = options->nice_len + 1 - MATCH_LEN_MIN;
+ break;
+ }
+
+ default:
+ return LZMA_OPTIONS_ERROR;
+ }
+
+ // We don't need to write the first byte as literal if there is
+ // a non-empty preset dictionary. encode_init() wouldn't even work
+ // if there is a non-empty preset dictionary, because encode_init()
+ // assumes that position is zero and previous byte is also zero.
+ coder->is_initialized = options->preset_dict != NULL
+ && options->preset_dict_size > 0;
+ coder->is_flushed = false;
+
+ set_lz_options(lz_options, options);
+
+ return lzma_lzma_encoder_reset(coder, options);
+}
+
+
+static lzma_ret
+lzma_encoder_init(lzma_lz_encoder *lz, lzma_allocator *allocator,
+ const void *options, lzma_lz_options *lz_options)
+{
+ lz->code = &lzma_encode;
+ return lzma_lzma_encoder_create(
+ &lz->coder, allocator, options, lz_options);
+}
+
+
+extern lzma_ret
+lzma_lzma_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return lzma_lz_encoder_init(
+ next, allocator, filters, &lzma_encoder_init);
+}
+
+
+extern uint64_t
+lzma_lzma_encoder_memusage(const void *options)
+{
+ if (!is_options_valid(options))
+ return UINT64_MAX;
+
+ lzma_lz_options lz_options;
+ set_lz_options(&lz_options, options);
+
+ const uint64_t lz_memusage = lzma_lz_encoder_memusage(&lz_options);
+ if (lz_memusage == UINT64_MAX)
+ return UINT64_MAX;
+
+ return (uint64_t)(sizeof(lzma_coder)) + lz_memusage;
+}
+
+
+extern bool
+lzma_lzma_lclppb_encode(const lzma_options_lzma *options, uint8_t *byte)
+{
+ if (!is_lclppb_valid(options))
+ return true;
+
+ *byte = (options->pb * 5 + options->lp) * 9 + options->lc;
+ assert(*byte <= (4 * 5 + 4) * 9 + 8);
+
+ return false;
+}
+
+
+#ifdef HAVE_ENCODER_LZMA1
+extern lzma_ret
+lzma_lzma_props_encode(const void *options, uint8_t *out)
+{
+ const lzma_options_lzma *const opt = options;
+
+ if (lzma_lzma_lclppb_encode(opt, out))
+ return LZMA_PROG_ERROR;
+
+ integer_write_32(out + 1, opt->dict_size);
+
+ return LZMA_OK;
+}
+#endif
+
+
+extern LZMA_API(lzma_bool)
+lzma_mode_is_supported(lzma_mode mode)
+{
+ return mode == LZMA_MODE_FAST || mode == LZMA_MODE_NORMAL;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.h
new file mode 100644
index 00000000..4d061b3d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder.h
@@ -0,0 +1,56 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_encoder.h
+/// \brief LZMA encoder API
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZMA_ENCODER_H
+#define LZMA_LZMA_ENCODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_lzma_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+
+extern uint64_t lzma_lzma_encoder_memusage(const void *options);
+
+extern lzma_ret lzma_lzma_props_encode(const void *options, uint8_t *out);
+
+
+/// Encodes lc/lp/pb into one byte. Returns false on success and true on error.
+extern bool lzma_lzma_lclppb_encode(
+ const lzma_options_lzma *options, uint8_t *byte);
+
+
+#ifdef LZMA_LZ_ENCODER_H
+
+/// Initializes raw LZMA encoder; this is used by LZMA2.
+extern lzma_ret lzma_lzma_encoder_create(
+ lzma_coder **coder_ptr, lzma_allocator *allocator,
+ const lzma_options_lzma *options, lzma_lz_options *lz_options);
+
+
+/// Resets an already initialized LZMA encoder; this is used by LZMA2.
+extern lzma_ret lzma_lzma_encoder_reset(
+ lzma_coder *coder, const lzma_options_lzma *options);
+
+
+extern lzma_ret lzma_lzma_encode(lzma_coder *restrict coder,
+ lzma_mf *restrict mf, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ uint32_t read_limit);
+
+#endif
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_fast.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_fast.c
new file mode 100644
index 00000000..d1e59ee4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_fast.c
@@ -0,0 +1,181 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_encoder_optimum_fast.c
+//
+// Author: Igor Pavlov
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lzma_encoder_private.h"
+
+
+#define change_pair(small_dist, big_dist) \
+ (((big_dist) >> 7) > (small_dist))
+
+
+extern void
+lzma_lzma_optimum_fast(lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint32_t *restrict back_res, uint32_t *restrict len_res)
+{
+ const uint32_t nice_len = mf->nice_len;
+
+ uint32_t len_main;
+ uint32_t matches_count;
+ if (mf->read_ahead == 0) {
+ len_main = mf_find(mf, &matches_count, coder->matches);
+ } else {
+ assert(mf->read_ahead == 1);
+ len_main = coder->longest_match_length;
+ matches_count = coder->matches_count;
+ }
+
+ const uint8_t *buf = mf_ptr(mf) - 1;
+ const uint32_t buf_avail = MIN(mf_avail(mf) + 1, MATCH_LEN_MAX);
+
+ if (buf_avail < 2) {
+ // There's not enough input left to encode a match.
+ *back_res = UINT32_MAX;
+ *len_res = 1;
+ return;
+ }
+
+ // Look for repeated matches; scan the previous four match distances
+ uint32_t rep_len = 0;
+ uint32_t rep_index = 0;
+
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
+ // Pointer to the beginning of the match candidate
+ const uint8_t *const buf_back = buf - coder->reps[i] - 1;
+
+ // If the first two bytes (2 == MATCH_LEN_MIN) do not match,
+ // this rep is not useful.
+ if (not_equal_16(buf, buf_back))
+ continue;
+
+ // The first two bytes matched.
+ // Calculate the length of the match.
+ uint32_t len;
+ for (len = 2; len < buf_avail
+ && buf[len] == buf_back[len]; ++len) ;
+
+ // If we have found a repeated match that is at least
+ // nice_len long, return it immediatelly.
+ if (len >= nice_len) {
+ *back_res = i;
+ *len_res = len;
+ mf_skip(mf, len - 1);
+ return;
+ }
+
+ if (len > rep_len) {
+ rep_index = i;
+ rep_len = len;
+ }
+ }
+
+ // We didn't find a long enough repeated match. Encode it as a normal
+ // match if the match length is at least nice_len.
+ if (len_main >= nice_len) {
+ *back_res = coder->matches[matches_count - 1].dist
+ + REP_DISTANCES;
+ *len_res = len_main;
+ mf_skip(mf, len_main - 1);
+ return;
+ }
+
+ uint32_t back_main = 0;
+ if (len_main >= 2) {
+ back_main = coder->matches[matches_count - 1].dist;
+
+ while (matches_count > 1 && len_main ==
+ coder->matches[matches_count - 2].len + 1) {
+ if (!change_pair(coder->matches[
+ matches_count - 2].dist,
+ back_main))
+ break;
+
+ --matches_count;
+ len_main = coder->matches[matches_count - 1].len;
+ back_main = coder->matches[matches_count - 1].dist;
+ }
+
+ if (len_main == 2 && back_main >= 0x80)
+ len_main = 1;
+ }
+
+ if (rep_len >= 2) {
+ if (rep_len + 1 >= len_main
+ || (rep_len + 2 >= len_main
+ && back_main > (UINT32_C(1) << 9))
+ || (rep_len + 3 >= len_main
+ && back_main > (UINT32_C(1) << 15))) {
+ *back_res = rep_index;
+ *len_res = rep_len;
+ mf_skip(mf, rep_len - 1);
+ return;
+ }
+ }
+
+ if (len_main < 2 || buf_avail <= 2) {
+ *back_res = UINT32_MAX;
+ *len_res = 1;
+ return;
+ }
+
+ // Get the matches for the next byte. If we find a better match,
+ // the current byte is encoded as a literal.
+ coder->longest_match_length = mf_find(mf,
+ &coder->matches_count, coder->matches);
+
+ if (coder->longest_match_length >= 2) {
+ const uint32_t new_dist = coder->matches[
+ coder->matches_count - 1].dist;
+
+ if ((coder->longest_match_length >= len_main
+ && new_dist < back_main)
+ || (coder->longest_match_length == len_main + 1
+ && !change_pair(back_main, new_dist))
+ || (coder->longest_match_length > len_main + 1)
+ || (coder->longest_match_length + 1 >= len_main
+ && len_main >= 3
+ && change_pair(new_dist, back_main))) {
+ *back_res = UINT32_MAX;
+ *len_res = 1;
+ return;
+ }
+ }
+
+ // In contrast to LZMA SDK, dictionary could not have been moved
+ // between mf_find() calls, thus it is safe to just increment
+ // the old buf pointer instead of recalculating it with mf_ptr().
+ ++buf;
+
+ const uint32_t limit = len_main - 1;
+
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
+ const uint8_t *const buf_back = buf - coder->reps[i] - 1;
+
+ if (not_equal_16(buf, buf_back))
+ continue;
+
+ uint32_t len;
+ for (len = 2; len < limit
+ && buf[len] == buf_back[len]; ++len) ;
+
+ if (len >= limit) {
+ *back_res = UINT32_MAX;
+ *len_res = 1;
+ return;
+ }
+ }
+
+ *back_res = back_main + REP_DISTANCES;
+ *len_res = len_main;
+ mf_skip(mf, len_main - 2);
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_normal.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_normal.c
new file mode 100644
index 00000000..08bc26f5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_optimum_normal.c
@@ -0,0 +1,870 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_encoder_optimum_normal.c
+//
+// Author: Igor Pavlov
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "lzma_encoder_private.h"
+#include "fastpos.h"
+
+
+////////////
+// Prices //
+////////////
+
+static uint32_t
+get_literal_price(const lzma_coder *const coder, const uint32_t pos,
+ const uint32_t prev_byte, const bool match_mode,
+ uint32_t match_byte, uint32_t symbol)
+{
+ const probability *const subcoder = literal_subcoder(coder->literal,
+ coder->literal_context_bits, coder->literal_pos_mask,
+ pos, prev_byte);
+
+ uint32_t price = 0;
+
+ if (!match_mode) {
+ price = rc_bittree_price(subcoder, 8, symbol);
+ } else {
+ uint32_t offset = 0x100;
+ symbol += UINT32_C(1) << 8;
+
+ do {
+ match_byte <<= 1;
+
+ const uint32_t match_bit = match_byte & offset;
+ const uint32_t subcoder_index
+ = offset + match_bit + (symbol >> 8);
+ const uint32_t bit = (symbol >> 7) & 1;
+ price += rc_bit_price(subcoder[subcoder_index], bit);
+
+ symbol <<= 1;
+ offset &= ~(match_byte ^ symbol);
+
+ } while (symbol < (UINT32_C(1) << 16));
+ }
+
+ return price;
+}
+
+
+static inline uint32_t
+get_len_price(const lzma_length_encoder *const lencoder,
+ const uint32_t len, const uint32_t pos_state)
+{
+ // NOTE: Unlike the other price tables, length prices are updated
+ // in lzma_encoder.c
+ return lencoder->prices[pos_state][len - MATCH_LEN_MIN];
+}
+
+
+static inline uint32_t
+get_short_rep_price(const lzma_coder *const coder,
+ const lzma_lzma_state state, const uint32_t pos_state)
+{
+ return rc_bit_0_price(coder->is_rep0[state])
+ + rc_bit_0_price(coder->is_rep0_long[state][pos_state]);
+}
+
+
+static inline uint32_t
+get_pure_rep_price(const lzma_coder *const coder, const uint32_t rep_index,
+ const lzma_lzma_state state, uint32_t pos_state)
+{
+ uint32_t price;
+
+ if (rep_index == 0) {
+ price = rc_bit_0_price(coder->is_rep0[state]);
+ price += rc_bit_1_price(coder->is_rep0_long[state][pos_state]);
+ } else {
+ price = rc_bit_1_price(coder->is_rep0[state]);
+
+ if (rep_index == 1) {
+ price += rc_bit_0_price(coder->is_rep1[state]);
+ } else {
+ price += rc_bit_1_price(coder->is_rep1[state]);
+ price += rc_bit_price(coder->is_rep2[state],
+ rep_index - 2);
+ }
+ }
+
+ return price;
+}
+
+
+static inline uint32_t
+get_rep_price(const lzma_coder *const coder, const uint32_t rep_index,
+ const uint32_t len, const lzma_lzma_state state,
+ const uint32_t pos_state)
+{
+ return get_len_price(&coder->rep_len_encoder, len, pos_state)
+ + get_pure_rep_price(coder, rep_index, state, pos_state);
+}
+
+
+static inline uint32_t
+get_pos_len_price(const lzma_coder *const coder, const uint32_t pos,
+ const uint32_t len, const uint32_t pos_state)
+{
+ const uint32_t len_to_pos_state = get_len_to_pos_state(len);
+ uint32_t price;
+
+ if (pos < FULL_DISTANCES) {
+ price = coder->distances_prices[len_to_pos_state][pos];
+ } else {
+ const uint32_t pos_slot = get_pos_slot_2(pos);
+ price = coder->pos_slot_prices[len_to_pos_state][pos_slot]
+ + coder->align_prices[pos & ALIGN_MASK];
+ }
+
+ price += get_len_price(&coder->match_len_encoder, len, pos_state);
+
+ return price;
+}
+
+
+static void
+fill_distances_prices(lzma_coder *coder)
+{
+ for (uint32_t len_to_pos_state = 0;
+ len_to_pos_state < LEN_TO_POS_STATES;
+ ++len_to_pos_state) {
+
+ uint32_t *const pos_slot_prices
+ = coder->pos_slot_prices[len_to_pos_state];
+
+ // Price to encode the pos_slot.
+ for (uint32_t pos_slot = 0;
+ pos_slot < coder->dist_table_size; ++pos_slot)
+ pos_slot_prices[pos_slot] = rc_bittree_price(
+ coder->pos_slot[len_to_pos_state],
+ POS_SLOT_BITS, pos_slot);
+
+ // For matches with distance >= FULL_DISTANCES, add the price
+ // of the direct bits part of the match distance. (Align bits
+ // are handled by fill_align_prices()).
+ for (uint32_t pos_slot = END_POS_MODEL_INDEX;
+ pos_slot < coder->dist_table_size; ++pos_slot)
+ pos_slot_prices[pos_slot] += rc_direct_price(
+ ((pos_slot >> 1) - 1) - ALIGN_BITS);
+
+ // Distances in the range [0, 3] are fully encoded with
+ // pos_slot, so they are used for coder->distances_prices
+ // as is.
+ for (uint32_t i = 0; i < START_POS_MODEL_INDEX; ++i)
+ coder->distances_prices[len_to_pos_state][i]
+ = pos_slot_prices[i];
+ }
+
+ // Distances in the range [4, 127] depend on pos_slot and pos_special.
+ // We do this in a loop separate from the above loop to avoid
+ // redundant calls to get_pos_slot().
+ for (uint32_t i = START_POS_MODEL_INDEX; i < FULL_DISTANCES; ++i) {
+ const uint32_t pos_slot = get_pos_slot(i);
+ const uint32_t footer_bits = ((pos_slot >> 1) - 1);
+ const uint32_t base = (2 | (pos_slot & 1)) << footer_bits;
+ const uint32_t price = rc_bittree_reverse_price(
+ coder->pos_special + base - pos_slot - 1,
+ footer_bits, i - base);
+
+ for (uint32_t len_to_pos_state = 0;
+ len_to_pos_state < LEN_TO_POS_STATES;
+ ++len_to_pos_state)
+ coder->distances_prices[len_to_pos_state][i]
+ = price + coder->pos_slot_prices[
+ len_to_pos_state][pos_slot];
+ }
+
+ coder->match_price_count = 0;
+ return;
+}
+
+
+static void
+fill_align_prices(lzma_coder *coder)
+{
+ for (uint32_t i = 0; i < ALIGN_TABLE_SIZE; ++i)
+ coder->align_prices[i] = rc_bittree_reverse_price(
+ coder->pos_align, ALIGN_BITS, i);
+
+ coder->align_price_count = 0;
+ return;
+}
+
+
+/////////////
+// Optimal //
+/////////////
+
+static inline void
+make_literal(lzma_optimal *optimal)
+{
+ optimal->back_prev = UINT32_MAX;
+ optimal->prev_1_is_literal = false;
+}
+
+
+static inline void
+make_short_rep(lzma_optimal *optimal)
+{
+ optimal->back_prev = 0;
+ optimal->prev_1_is_literal = false;
+}
+
+
+#define is_short_rep(optimal) \
+ ((optimal).back_prev == 0)
+
+
+static void
+backward(lzma_coder *restrict coder, uint32_t *restrict len_res,
+ uint32_t *restrict back_res, uint32_t cur)
+{
+ coder->opts_end_index = cur;
+
+ uint32_t pos_mem = coder->opts[cur].pos_prev;
+ uint32_t back_mem = coder->opts[cur].back_prev;
+
+ do {
+ if (coder->opts[cur].prev_1_is_literal) {
+ make_literal(&coder->opts[pos_mem]);
+ coder->opts[pos_mem].pos_prev = pos_mem - 1;
+
+ if (coder->opts[cur].prev_2) {
+ coder->opts[pos_mem - 1].prev_1_is_literal
+ = false;
+ coder->opts[pos_mem - 1].pos_prev
+ = coder->opts[cur].pos_prev_2;
+ coder->opts[pos_mem - 1].back_prev
+ = coder->opts[cur].back_prev_2;
+ }
+ }
+
+ const uint32_t pos_prev = pos_mem;
+ const uint32_t back_cur = back_mem;
+
+ back_mem = coder->opts[pos_prev].back_prev;
+ pos_mem = coder->opts[pos_prev].pos_prev;
+
+ coder->opts[pos_prev].back_prev = back_cur;
+ coder->opts[pos_prev].pos_prev = cur;
+ cur = pos_prev;
+
+ } while (cur != 0);
+
+ coder->opts_current_index = coder->opts[0].pos_prev;
+ *len_res = coder->opts[0].pos_prev;
+ *back_res = coder->opts[0].back_prev;
+
+ return;
+}
+
+
+//////////
+// Main //
+//////////
+
+static inline uint32_t
+helper1(lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint32_t *restrict back_res, uint32_t *restrict len_res,
+ uint32_t position)
+{
+ const uint32_t nice_len = mf->nice_len;
+
+ uint32_t len_main;
+ uint32_t matches_count;
+
+ if (mf->read_ahead == 0) {
+ len_main = mf_find(mf, &matches_count, coder->matches);
+ } else {
+ assert(mf->read_ahead == 1);
+ len_main = coder->longest_match_length;
+ matches_count = coder->matches_count;
+ }
+
+ const uint32_t buf_avail = MIN(mf_avail(mf) + 1, MATCH_LEN_MAX);
+ if (buf_avail < 2) {
+ *back_res = UINT32_MAX;
+ *len_res = 1;
+ return UINT32_MAX;
+ }
+
+ const uint8_t *const buf = mf_ptr(mf) - 1;
+
+ uint32_t rep_lens[REP_DISTANCES];
+ uint32_t rep_max_index = 0;
+
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
+ const uint8_t *const buf_back = buf - coder->reps[i] - 1;
+
+ if (not_equal_16(buf, buf_back)) {
+ rep_lens[i] = 0;
+ continue;
+ }
+
+ uint32_t len_test;
+ for (len_test = 2; len_test < buf_avail
+ && buf[len_test] == buf_back[len_test];
+ ++len_test) ;
+
+ rep_lens[i] = len_test;
+ if (len_test > rep_lens[rep_max_index])
+ rep_max_index = i;
+ }
+
+ if (rep_lens[rep_max_index] >= nice_len) {
+ *back_res = rep_max_index;
+ *len_res = rep_lens[rep_max_index];
+ mf_skip(mf, *len_res - 1);
+ return UINT32_MAX;
+ }
+
+
+ if (len_main >= nice_len) {
+ *back_res = coder->matches[matches_count - 1].dist
+ + REP_DISTANCES;
+ *len_res = len_main;
+ mf_skip(mf, len_main - 1);
+ return UINT32_MAX;
+ }
+
+ const uint8_t current_byte = *buf;
+ const uint8_t match_byte = *(buf - coder->reps[0] - 1);
+
+ if (len_main < 2 && current_byte != match_byte
+ && rep_lens[rep_max_index] < 2) {
+ *back_res = UINT32_MAX;
+ *len_res = 1;
+ return UINT32_MAX;
+ }
+
+ coder->opts[0].state = coder->state;
+
+ const uint32_t pos_state = position & coder->pos_mask;
+
+ coder->opts[1].price = rc_bit_0_price(
+ coder->is_match[coder->state][pos_state])
+ + get_literal_price(coder, position, buf[-1],
+ !is_literal_state(coder->state),
+ match_byte, current_byte);
+
+ make_literal(&coder->opts[1]);
+
+ const uint32_t match_price = rc_bit_1_price(
+ coder->is_match[coder->state][pos_state]);
+ const uint32_t rep_match_price = match_price
+ + rc_bit_1_price(coder->is_rep[coder->state]);
+
+ if (match_byte == current_byte) {
+ const uint32_t short_rep_price = rep_match_price
+ + get_short_rep_price(
+ coder, coder->state, pos_state);
+
+ if (short_rep_price < coder->opts[1].price) {
+ coder->opts[1].price = short_rep_price;
+ make_short_rep(&coder->opts[1]);
+ }
+ }
+
+ const uint32_t len_end = MAX(len_main, rep_lens[rep_max_index]);
+
+ if (len_end < 2) {
+ *back_res = coder->opts[1].back_prev;
+ *len_res = 1;
+ return UINT32_MAX;
+ }
+
+ coder->opts[1].pos_prev = 0;
+
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i)
+ coder->opts[0].backs[i] = coder->reps[i];
+
+ uint32_t len = len_end;
+ do {
+ coder->opts[len].price = RC_INFINITY_PRICE;
+ } while (--len >= 2);
+
+
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i) {
+ uint32_t rep_len = rep_lens[i];
+ if (rep_len < 2)
+ continue;
+
+ const uint32_t price = rep_match_price + get_pure_rep_price(
+ coder, i, coder->state, pos_state);
+
+ do {
+ const uint32_t cur_and_len_price = price
+ + get_len_price(
+ &coder->rep_len_encoder,
+ rep_len, pos_state);
+
+ if (cur_and_len_price < coder->opts[rep_len].price) {
+ coder->opts[rep_len].price = cur_and_len_price;
+ coder->opts[rep_len].pos_prev = 0;
+ coder->opts[rep_len].back_prev = i;
+ coder->opts[rep_len].prev_1_is_literal = false;
+ }
+ } while (--rep_len >= 2);
+ }
+
+
+ const uint32_t normal_match_price = match_price
+ + rc_bit_0_price(coder->is_rep[coder->state]);
+
+ len = rep_lens[0] >= 2 ? rep_lens[0] + 1 : 2;
+ if (len <= len_main) {
+ uint32_t i = 0;
+ while (len > coder->matches[i].len)
+ ++i;
+
+ for(; ; ++len) {
+ const uint32_t dist = coder->matches[i].dist;
+ const uint32_t cur_and_len_price = normal_match_price
+ + get_pos_len_price(coder,
+ dist, len, pos_state);
+
+ if (cur_and_len_price < coder->opts[len].price) {
+ coder->opts[len].price = cur_and_len_price;
+ coder->opts[len].pos_prev = 0;
+ coder->opts[len].back_prev
+ = dist + REP_DISTANCES;
+ coder->opts[len].prev_1_is_literal = false;
+ }
+
+ if (len == coder->matches[i].len)
+ if (++i == matches_count)
+ break;
+ }
+ }
+
+ return len_end;
+}
+
+
+static inline uint32_t
+helper2(lzma_coder *coder, uint32_t *reps, const uint8_t *buf,
+ uint32_t len_end, uint32_t position, const uint32_t cur,
+ const uint32_t nice_len, const uint32_t buf_avail_full)
+{
+ uint32_t matches_count = coder->matches_count;
+ uint32_t new_len = coder->longest_match_length;
+ uint32_t pos_prev = coder->opts[cur].pos_prev;
+ uint32_t state;
+
+ if (coder->opts[cur].prev_1_is_literal) {
+ --pos_prev;
+
+ if (coder->opts[cur].prev_2) {
+ state = coder->opts[coder->opts[cur].pos_prev_2].state;
+
+ if (coder->opts[cur].back_prev_2 < REP_DISTANCES)
+ update_long_rep(state);
+ else
+ update_match(state);
+
+ } else {
+ state = coder->opts[pos_prev].state;
+ }
+
+ update_literal(state);
+
+ } else {
+ state = coder->opts[pos_prev].state;
+ }
+
+ if (pos_prev == cur - 1) {
+ if (is_short_rep(coder->opts[cur]))
+ update_short_rep(state);
+ else
+ update_literal(state);
+ } else {
+ uint32_t pos;
+ if (coder->opts[cur].prev_1_is_literal
+ && coder->opts[cur].prev_2) {
+ pos_prev = coder->opts[cur].pos_prev_2;
+ pos = coder->opts[cur].back_prev_2;
+ update_long_rep(state);
+ } else {
+ pos = coder->opts[cur].back_prev;
+ if (pos < REP_DISTANCES)
+ update_long_rep(state);
+ else
+ update_match(state);
+ }
+
+ if (pos < REP_DISTANCES) {
+ reps[0] = coder->opts[pos_prev].backs[pos];
+
+ uint32_t i;
+ for (i = 1; i <= pos; ++i)
+ reps[i] = coder->opts[pos_prev].backs[i - 1];
+
+ for (; i < REP_DISTANCES; ++i)
+ reps[i] = coder->opts[pos_prev].backs[i];
+
+ } else {
+ reps[0] = pos - REP_DISTANCES;
+
+ for (uint32_t i = 1; i < REP_DISTANCES; ++i)
+ reps[i] = coder->opts[pos_prev].backs[i - 1];
+ }
+ }
+
+ coder->opts[cur].state = state;
+
+ for (uint32_t i = 0; i < REP_DISTANCES; ++i)
+ coder->opts[cur].backs[i] = reps[i];
+
+ const uint32_t cur_price = coder->opts[cur].price;
+
+ const uint8_t current_byte = *buf;
+ const uint8_t match_byte = *(buf - reps[0] - 1);
+
+ const uint32_t pos_state = position & coder->pos_mask;
+
+ const uint32_t cur_and_1_price = cur_price
+ + rc_bit_0_price(coder->is_match[state][pos_state])
+ + get_literal_price(coder, position, buf[-1],
+ !is_literal_state(state), match_byte, current_byte);
+
+ bool next_is_literal = false;
+
+ if (cur_and_1_price < coder->opts[cur + 1].price) {
+ coder->opts[cur + 1].price = cur_and_1_price;
+ coder->opts[cur + 1].pos_prev = cur;
+ make_literal(&coder->opts[cur + 1]);
+ next_is_literal = true;
+ }
+
+ const uint32_t match_price = cur_price
+ + rc_bit_1_price(coder->is_match[state][pos_state]);
+ const uint32_t rep_match_price = match_price
+ + rc_bit_1_price(coder->is_rep[state]);
+
+ if (match_byte == current_byte
+ && !(coder->opts[cur + 1].pos_prev < cur
+ && coder->opts[cur + 1].back_prev == 0)) {
+
+ const uint32_t short_rep_price = rep_match_price
+ + get_short_rep_price(coder, state, pos_state);
+
+ if (short_rep_price <= coder->opts[cur + 1].price) {
+ coder->opts[cur + 1].price = short_rep_price;
+ coder->opts[cur + 1].pos_prev = cur;
+ make_short_rep(&coder->opts[cur + 1]);
+ next_is_literal = true;
+ }
+ }
+
+ if (buf_avail_full < 2)
+ return len_end;
+
+ const uint32_t buf_avail = MIN(buf_avail_full, nice_len);
+
+ if (!next_is_literal && match_byte != current_byte) { // speed optimization
+ // try literal + rep0
+ const uint8_t *const buf_back = buf - reps[0] - 1;
+ const uint32_t limit = MIN(buf_avail_full, nice_len + 1);
+
+ uint32_t len_test = 1;
+ while (len_test < limit && buf[len_test] == buf_back[len_test])
+ ++len_test;
+
+ --len_test;
+
+ if (len_test >= 2) {
+ uint32_t state_2 = state;
+ update_literal(state_2);
+
+ const uint32_t pos_state_next = (position + 1) & coder->pos_mask;
+ const uint32_t next_rep_match_price = cur_and_1_price
+ + rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ + rc_bit_1_price(coder->is_rep[state_2]);
+
+ //for (; len_test >= 2; --len_test) {
+ const uint32_t offset = cur + 1 + len_test;
+
+ while (len_end < offset)
+ coder->opts[++len_end].price = RC_INFINITY_PRICE;
+
+ const uint32_t cur_and_len_price = next_rep_match_price
+ + get_rep_price(coder, 0, len_test,
+ state_2, pos_state_next);
+
+ if (cur_and_len_price < coder->opts[offset].price) {
+ coder->opts[offset].price = cur_and_len_price;
+ coder->opts[offset].pos_prev = cur + 1;
+ coder->opts[offset].back_prev = 0;
+ coder->opts[offset].prev_1_is_literal = true;
+ coder->opts[offset].prev_2 = false;
+ }
+ //}
+ }
+ }
+
+
+ uint32_t start_len = 2; // speed optimization
+
+ for (uint32_t rep_index = 0; rep_index < REP_DISTANCES; ++rep_index) {
+ const uint8_t *const buf_back = buf - reps[rep_index] - 1;
+ if (not_equal_16(buf, buf_back))
+ continue;
+
+ uint32_t len_test;
+ for (len_test = 2; len_test < buf_avail
+ && buf[len_test] == buf_back[len_test];
+ ++len_test) ;
+
+ while (len_end < cur + len_test)
+ coder->opts[++len_end].price = RC_INFINITY_PRICE;
+
+ const uint32_t len_test_temp = len_test;
+ const uint32_t price = rep_match_price + get_pure_rep_price(
+ coder, rep_index, state, pos_state);
+
+ do {
+ const uint32_t cur_and_len_price = price
+ + get_len_price(&coder->rep_len_encoder,
+ len_test, pos_state);
+
+ if (cur_and_len_price < coder->opts[cur + len_test].price) {
+ coder->opts[cur + len_test].price = cur_and_len_price;
+ coder->opts[cur + len_test].pos_prev = cur;
+ coder->opts[cur + len_test].back_prev = rep_index;
+ coder->opts[cur + len_test].prev_1_is_literal = false;
+ }
+ } while (--len_test >= 2);
+
+ len_test = len_test_temp;
+
+ if (rep_index == 0)
+ start_len = len_test + 1;
+
+
+ uint32_t len_test_2 = len_test + 1;
+ const uint32_t limit = MIN(buf_avail_full,
+ len_test_2 + nice_len);
+ for (; len_test_2 < limit
+ && buf[len_test_2] == buf_back[len_test_2];
+ ++len_test_2) ;
+
+ len_test_2 -= len_test + 1;
+
+ if (len_test_2 >= 2) {
+ uint32_t state_2 = state;
+ update_long_rep(state_2);
+
+ uint32_t pos_state_next = (position + len_test) & coder->pos_mask;
+
+ const uint32_t cur_and_len_literal_price = price
+ + get_len_price(&coder->rep_len_encoder,
+ len_test, pos_state)
+ + rc_bit_0_price(coder->is_match[state_2][pos_state_next])
+ + get_literal_price(coder, position + len_test,
+ buf[len_test - 1], true,
+ buf_back[len_test], buf[len_test]);
+
+ update_literal(state_2);
+
+ pos_state_next = (position + len_test + 1) & coder->pos_mask;
+
+ const uint32_t next_rep_match_price = cur_and_len_literal_price
+ + rc_bit_1_price(coder->is_match[state_2][pos_state_next])
+ + rc_bit_1_price(coder->is_rep[state_2]);
+
+ //for(; len_test_2 >= 2; len_test_2--) {
+ const uint32_t offset = cur + len_test + 1 + len_test_2;
+
+ while (len_end < offset)
+ coder->opts[++len_end].price = RC_INFINITY_PRICE;
+
+ const uint32_t cur_and_len_price = next_rep_match_price
+ + get_rep_price(coder, 0, len_test_2,
+ state_2, pos_state_next);
+
+ if (cur_and_len_price < coder->opts[offset].price) {
+ coder->opts[offset].price = cur_and_len_price;
+ coder->opts[offset].pos_prev = cur + len_test + 1;
+ coder->opts[offset].back_prev = 0;
+ coder->opts[offset].prev_1_is_literal = true;
+ coder->opts[offset].prev_2 = true;
+ coder->opts[offset].pos_prev_2 = cur;
+ coder->opts[offset].back_prev_2 = rep_index;
+ }
+ //}
+ }
+ }
+
+
+ //for (uint32_t len_test = 2; len_test <= new_len; ++len_test)
+ if (new_len > buf_avail) {
+ new_len = buf_avail;
+
+ matches_count = 0;
+ while (new_len > coder->matches[matches_count].len)
+ ++matches_count;
+
+ coder->matches[matches_count++].len = new_len;
+ }
+
+
+ if (new_len >= start_len) {
+ const uint32_t normal_match_price = match_price
+ + rc_bit_0_price(coder->is_rep[state]);
+
+ while (len_end < cur + new_len)
+ coder->opts[++len_end].price = RC_INFINITY_PRICE;
+
+ uint32_t i = 0;
+ while (start_len > coder->matches[i].len)
+ ++i;
+
+ for (uint32_t len_test = start_len; ; ++len_test) {
+ const uint32_t cur_back = coder->matches[i].dist;
+ uint32_t cur_and_len_price = normal_match_price
+ + get_pos_len_price(coder,
+ cur_back, len_test, pos_state);
+
+ if (cur_and_len_price < coder->opts[cur + len_test].price) {
+ coder->opts[cur + len_test].price = cur_and_len_price;
+ coder->opts[cur + len_test].pos_prev = cur;
+ coder->opts[cur + len_test].back_prev
+ = cur_back + REP_DISTANCES;
+ coder->opts[cur + len_test].prev_1_is_literal = false;
+ }
+
+ if (len_test == coder->matches[i].len) {
+ // Try Match + Literal + Rep0
+ const uint8_t *const buf_back = buf - cur_back - 1;
+ uint32_t len_test_2 = len_test + 1;
+ const uint32_t limit = MIN(buf_avail_full,
+ len_test_2 + nice_len);
+
+ for (; len_test_2 < limit &&
+ buf[len_test_2] == buf_back[len_test_2];
+ ++len_test_2) ;
+
+ len_test_2 -= len_test + 1;
+
+ if (len_test_2 >= 2) {
+ uint32_t state_2 = state;
+ update_match(state_2);
+ uint32_t pos_state_next
+ = (position + len_test) & coder->pos_mask;
+
+ const uint32_t cur_and_len_literal_price = cur_and_len_price
+ + rc_bit_0_price(
+ coder->is_match[state_2][pos_state_next])
+ + get_literal_price(coder,
+ position + len_test,
+ buf[len_test - 1],
+ true,
+ buf_back[len_test],
+ buf[len_test]);
+
+ update_literal(state_2);
+ pos_state_next = (pos_state_next + 1) & coder->pos_mask;
+
+ const uint32_t next_rep_match_price
+ = cur_and_len_literal_price
+ + rc_bit_1_price(
+ coder->is_match[state_2][pos_state_next])
+ + rc_bit_1_price(coder->is_rep[state_2]);
+
+ // for(; len_test_2 >= 2; --len_test_2) {
+ const uint32_t offset = cur + len_test + 1 + len_test_2;
+
+ while (len_end < offset)
+ coder->opts[++len_end].price = RC_INFINITY_PRICE;
+
+ cur_and_len_price = next_rep_match_price
+ + get_rep_price(coder, 0, len_test_2,
+ state_2, pos_state_next);
+
+ if (cur_and_len_price < coder->opts[offset].price) {
+ coder->opts[offset].price = cur_and_len_price;
+ coder->opts[offset].pos_prev = cur + len_test + 1;
+ coder->opts[offset].back_prev = 0;
+ coder->opts[offset].prev_1_is_literal = true;
+ coder->opts[offset].prev_2 = true;
+ coder->opts[offset].pos_prev_2 = cur;
+ coder->opts[offset].back_prev_2
+ = cur_back + REP_DISTANCES;
+ }
+ //}
+ }
+
+ if (++i == matches_count)
+ break;
+ }
+ }
+ }
+
+ return len_end;
+}
+
+
+extern void
+lzma_lzma_optimum_normal(lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint32_t *restrict back_res, uint32_t *restrict len_res,
+ uint32_t position)
+{
+ // If we have symbols pending, return the next pending symbol.
+ if (coder->opts_end_index != coder->opts_current_index) {
+ assert(mf->read_ahead > 0);
+ *len_res = coder->opts[coder->opts_current_index].pos_prev
+ - coder->opts_current_index;
+ *back_res = coder->opts[coder->opts_current_index].back_prev;
+ coder->opts_current_index = coder->opts[
+ coder->opts_current_index].pos_prev;
+ return;
+ }
+
+ // Update the price tables. In LZMA SDK <= 4.60 (and possibly later)
+ // this was done in both initialization function and in the main loop.
+ // In liblzma they were moved into this single place.
+ if (mf->read_ahead == 0) {
+ if (coder->match_price_count >= (1 << 7))
+ fill_distances_prices(coder);
+
+ if (coder->align_price_count >= ALIGN_TABLE_SIZE)
+ fill_align_prices(coder);
+ }
+
+ // TODO: This needs quite a bit of cleaning still. But splitting
+ // the oroginal function to two pieces makes it at least a little
+ // more readable, since those two parts don't share many variables.
+
+ uint32_t len_end = helper1(coder, mf, back_res, len_res, position);
+ if (len_end == UINT32_MAX)
+ return;
+
+ uint32_t reps[REP_DISTANCES];
+ memcpy(reps, coder->reps, sizeof(reps));
+
+ uint32_t cur;
+ for (cur = 1; cur < len_end; ++cur) {
+ assert(cur < OPTS);
+
+ coder->longest_match_length = mf_find(
+ mf, &coder->matches_count, coder->matches);
+
+ if (coder->longest_match_length >= mf->nice_len)
+ break;
+
+ len_end = helper2(coder, reps, mf_ptr(mf) - 1, len_end,
+ position + cur, cur, mf->nice_len,
+ MIN(mf_avail(mf) + 1, OPTS - 1 - cur));
+ }
+
+ backward(coder, len_res, back_res, cur);
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_presets.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_presets.c
new file mode 100644
index 00000000..cbfaed89
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_presets.c
@@ -0,0 +1,55 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_encoder_presets.c
+/// \brief Encoder presets
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "common.h"
+
+
+extern LZMA_API(lzma_bool)
+lzma_lzma_preset(lzma_options_lzma *options, uint32_t preset)
+{
+ const uint32_t level = preset & LZMA_PRESET_LEVEL_MASK;
+ const uint32_t flags = preset & ~LZMA_PRESET_LEVEL_MASK;
+ const uint32_t supported_flags = LZMA_PRESET_EXTREME;
+
+ if (level > 9 || (flags & ~supported_flags))
+ return true;
+
+ const uint32_t dict_shift = level <= 1 ? 16 : level + 17;
+ options->dict_size = UINT32_C(1) << dict_shift;
+
+ options->preset_dict = NULL;
+ options->preset_dict_size = 0;
+
+ options->lc = LZMA_LC_DEFAULT;
+ options->lp = LZMA_LP_DEFAULT;
+ options->pb = LZMA_PB_DEFAULT;
+
+ options->persistent = false;
+ options->mode = level <= 2 ? LZMA_MODE_FAST : LZMA_MODE_NORMAL;
+
+ options->nice_len = level == 0 ? 8 : level <= 5 ? 32 : 64;
+ options->mf = level <= 1 ? LZMA_MF_HC3 : level <= 2 ? LZMA_MF_HC4
+ : LZMA_MF_BT4;
+ options->depth = 0;
+
+ if (flags & LZMA_PRESET_EXTREME) {
+ options->lc = 4; // FIXME?
+ options->mode = LZMA_MODE_NORMAL;
+ options->mf = LZMA_MF_BT4;
+ options->nice_len = 273;
+ options->depth = 512;
+ }
+
+ return false;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_private.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_private.h
new file mode 100644
index 00000000..52b1086f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/lzma/lzma_encoder_private.h
@@ -0,0 +1,150 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzma_encoder_private.h
+/// \brief Private definitions for LZMA encoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_LZMA_ENCODER_PRIVATE_H
+#define LZMA_LZMA_ENCODER_PRIVATE_H
+
+#include "lz_encoder.h"
+#include "range_encoder.h"
+#include "lzma_common.h"
+#include "lzma_encoder.h"
+
+
+// Macro to compare if the first two bytes in two buffers differ. This is
+// needed in lzma_lzma_optimum_*() to test if the match is at least
+// MATCH_LEN_MIN bytes. Unaligned access gives tiny gain so there's no
+// reason to not use it when it is supported.
+#ifdef HAVE_FAST_UNALIGNED_ACCESS
+# define not_equal_16(a, b) \
+ (*(const uint16_t *)(a) != *(const uint16_t *)(b))
+#else
+# define not_equal_16(a, b) \
+ ((a)[0] != (b)[0] || (a)[1] != (b)[1])
+#endif
+
+
+// Optimal - Number of entries in the optimum array.
+#define OPTS (1 << 12)
+
+
+typedef struct {
+ probability choice;
+ probability choice2;
+ probability low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
+ probability mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
+ probability high[LEN_HIGH_SYMBOLS];
+
+ uint32_t prices[POS_STATES_MAX][LEN_SYMBOLS];
+ uint32_t table_size;
+ uint32_t counters[POS_STATES_MAX];
+
+} lzma_length_encoder;
+
+
+typedef struct {
+ lzma_lzma_state state;
+
+ bool prev_1_is_literal;
+ bool prev_2;
+
+ uint32_t pos_prev_2;
+ uint32_t back_prev_2;
+
+ uint32_t price;
+ uint32_t pos_prev; // pos_next;
+ uint32_t back_prev;
+
+ uint32_t backs[REP_DISTANCES];
+
+} lzma_optimal;
+
+
+struct lzma_coder_s {
+ /// Range encoder
+ lzma_range_encoder rc;
+
+ /// State
+ lzma_lzma_state state;
+
+ /// The four most recent match distances
+ uint32_t reps[REP_DISTANCES];
+
+ /// Array of match candidates
+ lzma_match matches[MATCH_LEN_MAX + 1];
+
+ /// Number of match candidates in matches[]
+ uint32_t matches_count;
+
+ /// Varibale to hold the length of the longest match between calls
+ /// to lzma_lzma_optimum_*().
+ uint32_t longest_match_length;
+
+ /// True if using getoptimumfast
+ bool fast_mode;
+
+ /// True if the encoder has been initialized by encoding the first
+ /// byte as a literal.
+ bool is_initialized;
+
+ /// True if the range encoder has been flushed, but not all bytes
+ /// have been written to the output buffer yet.
+ bool is_flushed;
+
+ uint32_t pos_mask; ///< (1 << pos_bits) - 1
+ uint32_t literal_context_bits;
+ uint32_t literal_pos_mask;
+
+ // These are the same as in lzma_decoder.c. See comments there.
+ probability literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
+ probability is_match[STATES][POS_STATES_MAX];
+ probability is_rep[STATES];
+ probability is_rep0[STATES];
+ probability is_rep1[STATES];
+ probability is_rep2[STATES];
+ probability is_rep0_long[STATES][POS_STATES_MAX];
+ probability pos_slot[LEN_TO_POS_STATES][POS_SLOTS];
+ probability pos_special[FULL_DISTANCES - END_POS_MODEL_INDEX];
+ probability pos_align[ALIGN_TABLE_SIZE];
+
+ // These are the same as in lzma_decoder.c except that the encoders
+ // include also price tables.
+ lzma_length_encoder match_len_encoder;
+ lzma_length_encoder rep_len_encoder;
+
+ // Price tables
+ uint32_t pos_slot_prices[LEN_TO_POS_STATES][POS_SLOTS];
+ uint32_t distances_prices[LEN_TO_POS_STATES][FULL_DISTANCES];
+ uint32_t dist_table_size;
+ uint32_t match_price_count;
+
+ uint32_t align_prices[ALIGN_TABLE_SIZE];
+ uint32_t align_price_count;
+
+ // Optimal
+ uint32_t opts_end_index;
+ uint32_t opts_current_index;
+ lzma_optimal opts[OPTS];
+};
+
+
+extern void lzma_lzma_optimum_fast(
+ lzma_coder *restrict coder, lzma_mf *restrict mf,
+ uint32_t *restrict back_res, uint32_t *restrict len_res);
+
+extern void lzma_lzma_optimum_normal(lzma_coder *restrict coder,
+ lzma_mf *restrict mf, uint32_t *restrict back_res,
+ uint32_t *restrict len_res, uint32_t position);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/Makefile.inc
new file mode 100644
index 00000000..d8a597a2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/Makefile.inc
@@ -0,0 +1,21 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+EXTRA_DIST += rangecoder/price_tablegen.c
+
+liblzma_la_SOURCES += rangecoder/range_common.h
+
+if COND_ENCODER_LZMA1
+liblzma_la_SOURCES += \
+ rangecoder/range_encoder.h \
+ rangecoder/price.h \
+ rangecoder/price_table.c
+endif
+
+if COND_DECODER_LZMA1
+liblzma_la_SOURCES += rangecoder/range_decoder.h
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price.h
new file mode 100644
index 00000000..60df27ef
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price.h
@@ -0,0 +1,94 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file price.h
+/// \brief Probability price calculation
+//
+// Author: Igor Pavlov
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_PRICE_H
+#define LZMA_PRICE_H
+
+
+#define RC_MOVE_REDUCING_BITS 4
+#define RC_BIT_PRICE_SHIFT_BITS 4
+#define RC_PRICE_TABLE_SIZE (RC_BIT_MODEL_TOTAL >> RC_MOVE_REDUCING_BITS)
+
+#define RC_INFINITY_PRICE (UINT32_C(1) << 30)
+
+
+/// Lookup table for the inline functions defined in this file.
+extern const uint8_t lzma_rc_prices[RC_PRICE_TABLE_SIZE];
+
+
+static inline uint32_t
+rc_bit_price(const probability prob, const uint32_t bit)
+{
+ return lzma_rc_prices[(prob ^ ((UINT32_C(0) - bit)
+ & (RC_BIT_MODEL_TOTAL - 1))) >> RC_MOVE_REDUCING_BITS];
+}
+
+
+static inline uint32_t
+rc_bit_0_price(const probability prob)
+{
+ return lzma_rc_prices[prob >> RC_MOVE_REDUCING_BITS];
+}
+
+
+static inline uint32_t
+rc_bit_1_price(const probability prob)
+{
+ return lzma_rc_prices[(prob ^ (RC_BIT_MODEL_TOTAL - 1))
+ >> RC_MOVE_REDUCING_BITS];
+}
+
+
+static inline uint32_t
+rc_bittree_price(const probability *const probs,
+ const uint32_t bit_levels, uint32_t symbol)
+{
+ uint32_t price = 0;
+ symbol += UINT32_C(1) << bit_levels;
+
+ do {
+ const uint32_t bit = symbol & 1;
+ symbol >>= 1;
+ price += rc_bit_price(probs[symbol], bit);
+ } while (symbol != 1);
+
+ return price;
+}
+
+
+static inline uint32_t
+rc_bittree_reverse_price(const probability *const probs,
+ uint32_t bit_levels, uint32_t symbol)
+{
+ uint32_t price = 0;
+ uint32_t model_index = 1;
+
+ do {
+ const uint32_t bit = symbol & 1;
+ symbol >>= 1;
+ price += rc_bit_price(probs[model_index], bit);
+ model_index = (model_index << 1) + bit;
+ } while (--bit_levels != 0);
+
+ return price;
+}
+
+
+static inline uint32_t
+rc_direct_price(const uint32_t bits)
+{
+ return bits << RC_BIT_PRICE_SHIFT_BITS;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_table.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_table.c
new file mode 100644
index 00000000..f847f68a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_table.c
@@ -0,0 +1,24 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* This file has been automatically generated by price_tablegen.c. */
+
+#include "range_encoder.h"
+
+const uint8_t lzma_rc_prices[RC_PRICE_TABLE_SIZE] = {
+ 128, 103, 91, 84, 78, 73, 69, 66,
+ 63, 61, 58, 56, 54, 52, 51, 49,
+ 48, 46, 45, 44, 43, 42, 41, 40,
+ 39, 38, 37, 36, 35, 34, 34, 33,
+ 32, 31, 31, 30, 29, 29, 28, 28,
+ 27, 26, 26, 25, 25, 24, 24, 23,
+ 23, 22, 22, 22, 21, 21, 20, 20,
+ 19, 19, 19, 18, 18, 17, 17, 17,
+ 16, 16, 16, 15, 15, 15, 14, 14,
+ 14, 13, 13, 13, 12, 12, 12, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9,
+ 9, 9, 9, 8, 8, 8, 8, 7,
+ 7, 7, 7, 6, 6, 6, 6, 5,
+ 5, 5, 5, 5, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1
+};
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_tablegen.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_tablegen.c
new file mode 100644
index 00000000..ee6a05a9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/price_tablegen.c
@@ -0,0 +1,89 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file price_tablegen.c
+/// \brief Probability price table generator
+///
+/// Compiling: gcc -std=c99 -o price_tablegen price_tablegen.c
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include <inttypes.h>
+#include <stdio.h>
+#include "range_common.h"
+#include "price.h"
+
+
+static uint32_t rc_prices[RC_PRICE_TABLE_SIZE];
+
+
+static void
+init_price_table(void)
+{
+ for (uint32_t i = (UINT32_C(1) << RC_MOVE_REDUCING_BITS) / 2;
+ i < RC_BIT_MODEL_TOTAL;
+ i += (UINT32_C(1) << RC_MOVE_REDUCING_BITS)) {
+ const uint32_t cycles_bits = RC_BIT_PRICE_SHIFT_BITS;
+ uint32_t w = i;
+ uint32_t bit_count = 0;
+
+ for (uint32_t j = 0; j < cycles_bits; ++j) {
+ w *= w;
+ bit_count <<= 1;
+
+ while (w >= (UINT32_C(1) << 16)) {
+ w >>= 1;
+ ++bit_count;
+ }
+ }
+
+ rc_prices[i >> RC_MOVE_REDUCING_BITS]
+ = (RC_BIT_MODEL_TOTAL_BITS << cycles_bits)
+ - 15 - bit_count;
+ }
+
+ return;
+}
+
+
+static void
+print_price_table(void)
+{
+ printf("/* This file has been automatically generated by "
+ "price_tablegen.c. */\n\n"
+ "#include \"range_encoder.h\"\n\n"
+ "const uint8_t lzma_rc_prices["
+ "RC_PRICE_TABLE_SIZE] = {");
+
+ const size_t array_size = sizeof(lzma_rc_prices)
+ / sizeof(lzma_rc_prices[0]);
+ for (size_t i = 0; i < array_size; ++i) {
+ if (i % 8 == 0)
+ printf("\n\t");
+
+ printf("%4" PRIu32, rc_prices[i]);
+
+ if (i != array_size - 1)
+ printf(",");
+ }
+
+ printf("\n};\n");
+
+ return;
+}
+
+
+int
+main(void)
+{
+ init_price_table();
+ print_price_table();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_common.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_common.h
new file mode 100644
index 00000000..bd9f73fb
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_common.h
@@ -0,0 +1,75 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file range_common.h
+/// \brief Common things for range encoder and decoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_RANGE_COMMON_H
+#define LZMA_RANGE_COMMON_H
+
+#ifdef HAVE_CONFIG_H
+# include "common.h"
+#endif
+
+
+///////////////
+// Constants //
+///////////////
+
+#define RC_SHIFT_BITS 8
+#define RC_TOP_BITS 24
+#define RC_TOP_VALUE (UINT32_C(1) << RC_TOP_BITS)
+#define RC_BIT_MODEL_TOTAL_BITS 11
+#define RC_BIT_MODEL_TOTAL (UINT32_C(1) << RC_BIT_MODEL_TOTAL_BITS)
+#define RC_MOVE_BITS 5
+
+
+////////////
+// Macros //
+////////////
+
+// Resets the probability so that both 0 and 1 have probability of 50 %
+#define bit_reset(prob) \
+ prob = RC_BIT_MODEL_TOTAL >> 1
+
+// This does the same for a complete bit tree.
+// (A tree represented as an array.)
+#define bittree_reset(probs, bit_levels) \
+ for (uint32_t bt_i = 0; bt_i < (1 << (bit_levels)); ++bt_i) \
+ bit_reset((probs)[bt_i])
+
+
+//////////////////////
+// Type definitions //
+//////////////////////
+
+/// \brief Type of probabilities used with range coder
+///
+/// This needs to be at least 12-bit integer, so uint16_t is a logical choice.
+/// However, on some architecture and compiler combinations, a bigger type
+/// may give better speed, because the probability variables are accessed
+/// a lot. On the other hand, bigger probability type increases cache
+/// footprint, since there are 2 to 14 thousand probability variables in
+/// LZMA (assuming the limit of lc + lp <= 4; with lc + lp <= 12 there
+/// would be about 1.5 million variables).
+///
+/// With malicious files, the initialization speed of the LZMA decoder can
+/// become important. In that case, smaller probability variables mean that
+/// there is less bytes to write to RAM, which makes initialization faster.
+/// With big probability type, the initialization can become so slow that it
+/// can be a problem e.g. for email servers doing virus scanning.
+///
+/// I will be sticking to uint16_t unless some specific architectures
+/// are *much* faster (20-50 %) with uint32_t.
+typedef uint16_t probability;
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_decoder.h
new file mode 100644
index 00000000..6f79cb30
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_decoder.h
@@ -0,0 +1,181 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file range_decoder.h
+/// \brief Range Decoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_RANGE_DECODER_H
+#define LZMA_RANGE_DECODER_H
+
+#include "range_common.h"
+
+
+typedef struct {
+ uint32_t range;
+ uint32_t code;
+ uint32_t init_bytes_left;
+} lzma_range_decoder;
+
+
+/// Reads the first five bytes to initialize the range decoder.
+static inline bool
+rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,
+ size_t *restrict in_pos, size_t in_size)
+{
+ while (rc->init_bytes_left > 0) {
+ if (*in_pos == in_size)
+ return false;
+
+ rc->code = (rc->code << 8) | in[*in_pos];
+ ++*in_pos;
+ --rc->init_bytes_left;
+ }
+
+ return true;
+}
+
+
+/// Makes local copies of range decoder and *in_pos variables. Doing this
+/// improves speed significantly. The range decoder macros expect also
+/// variables `in' and `in_size' to be defined.
+#define rc_to_local(range_decoder, in_pos) \
+ lzma_range_decoder rc = range_decoder; \
+ size_t rc_in_pos = (in_pos); \
+ uint32_t rc_bound
+
+
+/// Stores the local copes back to the range decoder structure.
+#define rc_from_local(range_decoder, in_pos) \
+do { \
+ range_decoder = rc; \
+ in_pos = rc_in_pos; \
+} while (0)
+
+
+/// Resets the range decoder structure.
+#define rc_reset(range_decoder) \
+do { \
+ (range_decoder).range = UINT32_MAX; \
+ (range_decoder).code = 0; \
+ (range_decoder).init_bytes_left = 5; \
+} while (0)
+
+
+/// When decoding has been properly finished, rc.code is always zero unless
+/// the input stream is corrupt. So checking this can catch some corrupt
+/// files especially if they don't have any other integrity check.
+#define rc_is_finished(range_decoder) \
+ ((range_decoder).code == 0)
+
+
+/// Read the next input byte if needed. If more input is needed but there is
+/// no more input available, "goto out" is used to jump out of the main
+/// decoder loop.
+#define rc_normalize(seq) \
+do { \
+ if (rc.range < RC_TOP_VALUE) { \
+ if (unlikely(rc_in_pos == in_size)) { \
+ coder->sequence = seq; \
+ goto out; \
+ } \
+ rc.range <<= RC_SHIFT_BITS; \
+ rc.code = (rc.code << RC_SHIFT_BITS) | in[rc_in_pos++]; \
+ } \
+} while (0)
+
+
+/// Start decoding a bit. This must be used together with rc_update_0()
+/// and rc_update_1():
+///
+/// rc_if_0(prob, seq) {
+/// rc_update_0(prob);
+/// // Do something
+/// } else {
+/// rc_update_1(prob);
+/// // Do something else
+/// }
+///
+#define rc_if_0(prob, seq) \
+ rc_normalize(seq); \
+ rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
+ if (rc.code < rc_bound)
+
+
+/// Update the range decoder state and the used probability variable to
+/// match a decoded bit of 0.
+#define rc_update_0(prob) \
+do { \
+ rc.range = rc_bound; \
+ prob += (RC_BIT_MODEL_TOTAL - (prob)) >> RC_MOVE_BITS; \
+} while (0)
+
+
+/// Update the range decoder state and the used probability variable to
+/// match a decoded bit of 1.
+#define rc_update_1(prob) \
+do { \
+ rc.range -= rc_bound; \
+ rc.code -= rc_bound; \
+ prob -= (prob) >> RC_MOVE_BITS; \
+} while (0)
+
+
+/// Decodes one bit and runs action0 or action1 depending on the decoded bit.
+/// This macro is used as the last step in bittree reverse decoders since
+/// those don't use "symbol" for anything else than indexing the probability
+/// arrays.
+#define rc_bit_last(prob, action0, action1, seq) \
+do { \
+ rc_if_0(prob, seq) { \
+ rc_update_0(prob); \
+ action0; \
+ } else { \
+ rc_update_1(prob); \
+ action1; \
+ } \
+} while (0)
+
+
+/// Decodes one bit, updates "symbol", and runs action0 or action1 depending
+/// on the decoded bit.
+#define rc_bit(prob, action0, action1, seq) \
+ rc_bit_last(prob, \
+ symbol <<= 1; action0, \
+ symbol = (symbol << 1) + 1; action1, \
+ seq);
+
+
+/// Like rc_bit() but add "case seq:" as a prefix. This makes the unrolled
+/// loops more readable because the code isn't littered with "case"
+/// statements. On the other hand this also makes it less readable, since
+/// spotting the places where the decoder loop may be restarted is less
+/// obvious.
+#define rc_bit_case(prob, action0, action1, seq) \
+ case seq: rc_bit(prob, action0, action1, seq)
+
+
+/// Decode a bit without using a probability.
+#define rc_direct(dest, seq) \
+do { \
+ rc_normalize(seq); \
+ rc.range >>= 1; \
+ rc.code -= rc.range; \
+ rc_bound = UINT32_C(0) - (rc.code >> 31); \
+ rc.code += rc.range & rc_bound; \
+ dest = (dest << 1) + (rc_bound + 1); \
+} while (0)
+
+
+// NOTE: No macros are provided for bittree decoding. It seems to be simpler
+// to just write them open in the code.
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_encoder.h
new file mode 100644
index 00000000..874f7092
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/rangecoder/range_encoder.h
@@ -0,0 +1,233 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file range_encoder.h
+/// \brief Range Encoder
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_RANGE_ENCODER_H
+#define LZMA_RANGE_ENCODER_H
+
+#include "range_common.h"
+#include "price.h"
+
+
+/// Maximum number of symbols that can be put pending into lzma_range_encoder
+/// structure between calls to lzma_rc_encode(). For LZMA, 52+5 is enough
+/// (match with big distance and length followed by range encoder flush).
+#define RC_SYMBOLS_MAX 58
+
+
+typedef struct {
+ uint64_t low;
+ uint64_t cache_size;
+ uint32_t range;
+ uint8_t cache;
+
+ /// Number of symbols in the tables
+ size_t count;
+
+ /// rc_encode()'s position in the tables
+ size_t pos;
+
+ /// Symbols to encode
+ enum {
+ RC_BIT_0,
+ RC_BIT_1,
+ RC_DIRECT_0,
+ RC_DIRECT_1,
+ RC_FLUSH,
+ } symbols[RC_SYMBOLS_MAX];
+
+ /// Probabilities associated with RC_BIT_0 or RC_BIT_1
+ probability *probs[RC_SYMBOLS_MAX];
+
+} lzma_range_encoder;
+
+
+static inline void
+rc_reset(lzma_range_encoder *rc)
+{
+ rc->low = 0;
+ rc->cache_size = 1;
+ rc->range = UINT32_MAX;
+ rc->cache = 0;
+ rc->count = 0;
+ rc->pos = 0;
+}
+
+
+static inline void
+rc_bit(lzma_range_encoder *rc, probability *prob, uint32_t bit)
+{
+ rc->symbols[rc->count] = bit;
+ rc->probs[rc->count] = prob;
+ ++rc->count;
+}
+
+
+static inline void
+rc_bittree(lzma_range_encoder *rc, probability *probs,
+ uint32_t bit_count, uint32_t symbol)
+{
+ uint32_t model_index = 1;
+
+ do {
+ const uint32_t bit = (symbol >> --bit_count) & 1;
+ rc_bit(rc, &probs[model_index], bit);
+ model_index = (model_index << 1) + bit;
+ } while (bit_count != 0);
+}
+
+
+static inline void
+rc_bittree_reverse(lzma_range_encoder *rc, probability *probs,
+ uint32_t bit_count, uint32_t symbol)
+{
+ uint32_t model_index = 1;
+
+ do {
+ const uint32_t bit = symbol & 1;
+ symbol >>= 1;
+ rc_bit(rc, &probs[model_index], bit);
+ model_index = (model_index << 1) + bit;
+ } while (--bit_count != 0);
+}
+
+
+static inline void
+rc_direct(lzma_range_encoder *rc,
+ uint32_t value, uint32_t bit_count)
+{
+ do {
+ rc->symbols[rc->count++]
+ = RC_DIRECT_0 + ((value >> --bit_count) & 1);
+ } while (bit_count != 0);
+}
+
+
+static inline void
+rc_flush(lzma_range_encoder *rc)
+{
+ for (size_t i = 0; i < 5; ++i)
+ rc->symbols[rc->count++] = RC_FLUSH;
+}
+
+
+static inline bool
+rc_shift_low(lzma_range_encoder *rc,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ if ((uint32_t)(rc->low) < (uint32_t)(0xFF000000)
+ || (uint32_t)(rc->low >> 32) != 0) {
+ do {
+ if (*out_pos == out_size)
+ return true;
+
+ out[*out_pos] = rc->cache + (uint8_t)(rc->low >> 32);
+ ++*out_pos;
+ rc->cache = 0xFF;
+
+ } while (--rc->cache_size != 0);
+
+ rc->cache = (rc->low >> 24) & 0xFF;
+ }
+
+ ++rc->cache_size;
+ rc->low = (rc->low & 0x00FFFFFF) << RC_SHIFT_BITS;
+
+ return false;
+}
+
+
+static inline bool
+rc_encode(lzma_range_encoder *rc,
+ uint8_t *out, size_t *out_pos, size_t out_size)
+{
+ assert(rc->count <= RC_SYMBOLS_MAX);
+
+ while (rc->pos < rc->count) {
+ // Normalize
+ if (rc->range < RC_TOP_VALUE) {
+ if (rc_shift_low(rc, out, out_pos, out_size))
+ return true;
+
+ rc->range <<= RC_SHIFT_BITS;
+ }
+
+ // Encode a bit
+ switch (rc->symbols[rc->pos]) {
+ case RC_BIT_0: {
+ probability prob = *rc->probs[rc->pos];
+ rc->range = (rc->range >> RC_BIT_MODEL_TOTAL_BITS)
+ * prob;
+ prob += (RC_BIT_MODEL_TOTAL - prob) >> RC_MOVE_BITS;
+ *rc->probs[rc->pos] = prob;
+ break;
+ }
+
+ case RC_BIT_1: {
+ probability prob = *rc->probs[rc->pos];
+ const uint32_t bound = prob * (rc->range
+ >> RC_BIT_MODEL_TOTAL_BITS);
+ rc->low += bound;
+ rc->range -= bound;
+ prob -= prob >> RC_MOVE_BITS;
+ *rc->probs[rc->pos] = prob;
+ break;
+ }
+
+ case RC_DIRECT_0:
+ rc->range >>= 1;
+ break;
+
+ case RC_DIRECT_1:
+ rc->range >>= 1;
+ rc->low += rc->range;
+ break;
+
+ case RC_FLUSH:
+ // Prevent further normalizations.
+ rc->range = UINT32_MAX;
+
+ // Flush the last five bytes (see rc_flush()).
+ do {
+ if (rc_shift_low(rc, out, out_pos, out_size))
+ return true;
+ } while (++rc->pos < rc->count);
+
+ // Reset the range encoder so we are ready to continue
+ // encoding if we weren't finishing the stream.
+ rc_reset(rc);
+ return false;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ ++rc->pos;
+ }
+
+ rc->count = 0;
+ rc->pos = 0;
+
+ return false;
+}
+
+
+static inline uint64_t
+rc_pending(const lzma_range_encoder *rc)
+{
+ return rc->cache_size + 5 - 1;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/Makefile.inc
new file mode 100644
index 00000000..8a5e2d7f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/Makefile.inc
@@ -0,0 +1,47 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+liblzma_la_SOURCES += \
+ simple/simple_coder.c \
+ simple/simple_coder.h \
+ simple/simple_private.h
+
+if COND_ENCODER_SIMPLE
+liblzma_la_SOURCES += \
+ simple/simple_encoder.c \
+ simple/simple_encoder.h
+endif
+
+if COND_DECODER_SIMPLE
+liblzma_la_SOURCES += \
+ simple/simple_decoder.c \
+ simple/simple_decoder.h
+endif
+
+if COND_FILTER_X86
+liblzma_la_SOURCES += simple/x86.c
+endif
+
+if COND_FILTER_POWERPC
+liblzma_la_SOURCES += simple/powerpc.c
+endif
+
+if COND_FILTER_IA64
+liblzma_la_SOURCES += simple/ia64.c
+endif
+
+if COND_FILTER_ARM
+liblzma_la_SOURCES += simple/arm.c
+endif
+
+if COND_FILTER_ARMTHUMB
+liblzma_la_SOURCES += simple/armthumb.c
+endif
+
+if COND_FILTER_SPARC
+liblzma_la_SOURCES += simple/sparc.c
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/arm.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/arm.c
new file mode 100644
index 00000000..5f4cee99
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/arm.c
@@ -0,0 +1,71 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file arm.c
+/// \brief Filter for ARM binaries
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+static size_t
+arm_code(lzma_simple *simple lzma_attribute((unused)),
+ uint32_t now_pos, bool is_encoder,
+ uint8_t *buffer, size_t size)
+{
+ size_t i;
+ for (i = 0; i + 4 <= size; i += 4) {
+ if (buffer[i + 3] == 0xEB) {
+ uint32_t src = (buffer[i + 2] << 16)
+ | (buffer[i + 1] << 8)
+ | (buffer[i + 0]);
+ src <<= 2;
+
+ uint32_t dest;
+ if (is_encoder)
+ dest = now_pos + (uint32_t)(i) + 8 + src;
+ else
+ dest = src - (now_pos + (uint32_t)(i) + 8);
+
+ dest >>= 2;
+ buffer[i + 2] = (dest >> 16);
+ buffer[i + 1] = (dest >> 8);
+ buffer[i + 0] = dest;
+ }
+ }
+
+ return i;
+}
+
+
+static lzma_ret
+arm_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, bool is_encoder)
+{
+ return lzma_simple_coder_init(next, allocator, filters,
+ &arm_code, 0, 4, 4, is_encoder);
+}
+
+
+extern lzma_ret
+lzma_simple_arm_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return arm_coder_init(next, allocator, filters, true);
+}
+
+
+extern lzma_ret
+lzma_simple_arm_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return arm_coder_init(next, allocator, filters, false);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/armthumb.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/armthumb.c
new file mode 100644
index 00000000..a87e85d9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/armthumb.c
@@ -0,0 +1,76 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file armthumb.c
+/// \brief Filter for ARM-Thumb binaries
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+static size_t
+armthumb_code(lzma_simple *simple lzma_attribute((unused)),
+ uint32_t now_pos, bool is_encoder,
+ uint8_t *buffer, size_t size)
+{
+ size_t i;
+ for (i = 0; i + 4 <= size; i += 2) {
+ if ((buffer[i + 1] & 0xF8) == 0xF0
+ && (buffer[i + 3] & 0xF8) == 0xF8) {
+ uint32_t src = ((buffer[i + 1] & 0x7) << 19)
+ | (buffer[i + 0] << 11)
+ | ((buffer[i + 3] & 0x7) << 8)
+ | (buffer[i + 2]);
+
+ src <<= 1;
+
+ uint32_t dest;
+ if (is_encoder)
+ dest = now_pos + (uint32_t)(i) + 4 + src;
+ else
+ dest = src - (now_pos + (uint32_t)(i) + 4);
+
+ dest >>= 1;
+ buffer[i + 1] = 0xF0 | ((dest >> 19) & 0x7);
+ buffer[i + 0] = (dest >> 11);
+ buffer[i + 3] = 0xF8 | ((dest >> 8) & 0x7);
+ buffer[i + 2] = (dest);
+ i += 2;
+ }
+ }
+
+ return i;
+}
+
+
+static lzma_ret
+armthumb_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, bool is_encoder)
+{
+ return lzma_simple_coder_init(next, allocator, filters,
+ &armthumb_code, 0, 4, 2, is_encoder);
+}
+
+
+extern lzma_ret
+lzma_simple_armthumb_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return armthumb_coder_init(next, allocator, filters, true);
+}
+
+
+extern lzma_ret
+lzma_simple_armthumb_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return armthumb_coder_init(next, allocator, filters, false);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/ia64.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/ia64.c
new file mode 100644
index 00000000..06e22b62
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/ia64.c
@@ -0,0 +1,112 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file ia64.c
+/// \brief Filter for IA64 (Itanium) binaries
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+static size_t
+ia64_code(lzma_simple *simple lzma_attribute((unused)),
+ uint32_t now_pos, bool is_encoder,
+ uint8_t *buffer, size_t size)
+{
+ static const uint32_t BRANCH_TABLE[32] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 4, 4, 6, 6, 0, 0, 7, 7,
+ 4, 4, 0, 0, 4, 4, 0, 0
+ };
+
+ size_t i;
+ for (i = 0; i + 16 <= size; i += 16) {
+ const uint32_t instr_template = buffer[i] & 0x1F;
+ const uint32_t mask = BRANCH_TABLE[instr_template];
+ uint32_t bit_pos = 5;
+
+ for (size_t slot = 0; slot < 3; ++slot, bit_pos += 41) {
+ if (((mask >> slot) & 1) == 0)
+ continue;
+
+ const size_t byte_pos = (bit_pos >> 3);
+ const uint32_t bit_res = bit_pos & 0x7;
+ uint64_t instruction = 0;
+
+ for (size_t j = 0; j < 6; ++j)
+ instruction += (uint64_t)(
+ buffer[i + j + byte_pos])
+ << (8 * j);
+
+ uint64_t inst_norm = instruction >> bit_res;
+
+ if (((inst_norm >> 37) & 0xF) == 0x5
+ && ((inst_norm >> 9) & 0x7) == 0
+ /* && (inst_norm & 0x3F)== 0 */
+ ) {
+ uint32_t src = (uint32_t)(
+ (inst_norm >> 13) & 0xFFFFF);
+ src |= ((inst_norm >> 36) & 1) << 20;
+
+ src <<= 4;
+
+ uint32_t dest;
+ if (is_encoder)
+ dest = now_pos + (uint32_t)(i) + src;
+ else
+ dest = src - (now_pos + (uint32_t)(i));
+
+ dest >>= 4;
+
+ inst_norm &= ~((uint64_t)(0x8FFFFF) << 13);
+ inst_norm |= (uint64_t)(dest & 0xFFFFF) << 13;
+ inst_norm |= (uint64_t)(dest & 0x100000)
+ << (36 - 20);
+
+ instruction &= (1 << bit_res) - 1;
+ instruction |= (inst_norm << bit_res);
+
+ for (size_t j = 0; j < 6; j++)
+ buffer[i + j + byte_pos] = (uint8_t)(
+ instruction
+ >> (8 * j));
+ }
+ }
+ }
+
+ return i;
+}
+
+
+static lzma_ret
+ia64_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, bool is_encoder)
+{
+ return lzma_simple_coder_init(next, allocator, filters,
+ &ia64_code, 0, 16, 16, is_encoder);
+}
+
+
+extern lzma_ret
+lzma_simple_ia64_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return ia64_coder_init(next, allocator, filters, true);
+}
+
+
+extern lzma_ret
+lzma_simple_ia64_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return ia64_coder_init(next, allocator, filters, false);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/powerpc.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/powerpc.c
new file mode 100644
index 00000000..df99a813
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/powerpc.c
@@ -0,0 +1,75 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file powerpc.c
+/// \brief Filter for PowerPC (big endian) binaries
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+static size_t
+powerpc_code(lzma_simple *simple lzma_attribute((unused)),
+ uint32_t now_pos, bool is_encoder,
+ uint8_t *buffer, size_t size)
+{
+ size_t i;
+ for (i = 0; i + 4 <= size; i += 4) {
+ // PowerPC branch 6(48) 24(Offset) 1(Abs) 1(Link)
+ if ((buffer[i] >> 2) == 0x12
+ && ((buffer[i + 3] & 3) == 1)) {
+
+ const uint32_t src = ((buffer[i + 0] & 3) << 24)
+ | (buffer[i + 1] << 16)
+ | (buffer[i + 2] << 8)
+ | (buffer[i + 3] & (~3));
+
+ uint32_t dest;
+ if (is_encoder)
+ dest = now_pos + (uint32_t)(i) + src;
+ else
+ dest = src - (now_pos + (uint32_t)(i));
+
+ buffer[i + 0] = 0x48 | ((dest >> 24) & 0x03);
+ buffer[i + 1] = (dest >> 16);
+ buffer[i + 2] = (dest >> 8);
+ buffer[i + 3] &= 0x03;
+ buffer[i + 3] |= dest;
+ }
+ }
+
+ return i;
+}
+
+
+static lzma_ret
+powerpc_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, bool is_encoder)
+{
+ return lzma_simple_coder_init(next, allocator, filters,
+ &powerpc_code, 0, 4, 4, is_encoder);
+}
+
+
+extern lzma_ret
+lzma_simple_powerpc_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return powerpc_coder_init(next, allocator, filters, true);
+}
+
+
+extern lzma_ret
+lzma_simple_powerpc_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return powerpc_coder_init(next, allocator, filters, false);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.c
new file mode 100644
index 00000000..165a08ae
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.c
@@ -0,0 +1,270 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_coder.c
+/// \brief Wrapper for simple filters
+///
+/// Simple filters don't change the size of the data i.e. number of bytes
+/// in equals the number of bytes out.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+/// Copied or encodes/decodes more data to out[].
+static lzma_ret
+copy_or_code(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ assert(!coder->end_was_reached);
+
+ if (coder->next.code == NULL) {
+ lzma_bufcpy(in, in_pos, in_size, out, out_pos, out_size);
+
+ // Check if end of stream was reached.
+ if (coder->is_encoder && action == LZMA_FINISH
+ && *in_pos == in_size)
+ coder->end_was_reached = true;
+
+ } else {
+ // Call the next coder in the chain to provide us some data.
+ // We don't care about uncompressed_size here, because
+ // the next filter in the chain will do it for us (since
+ // we don't change the size of the data).
+ const lzma_ret ret = coder->next.code(
+ coder->next.coder, allocator,
+ in, in_pos, in_size,
+ out, out_pos, out_size, action);
+
+ if (ret == LZMA_STREAM_END) {
+ assert(!coder->is_encoder
+ || action == LZMA_FINISH);
+ coder->end_was_reached = true;
+
+ } else if (ret != LZMA_OK) {
+ return ret;
+ }
+ }
+
+ return LZMA_OK;
+}
+
+
+static size_t
+call_filter(lzma_coder *coder, uint8_t *buffer, size_t size)
+{
+ const size_t filtered = coder->filter(coder->simple,
+ coder->now_pos, coder->is_encoder,
+ buffer, size);
+ coder->now_pos += filtered;
+ return filtered;
+}
+
+
+static lzma_ret
+simple_code(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ // TODO: Add partial support for LZMA_SYNC_FLUSH. We can support it
+ // in cases when the filter is able to filter everything. With most
+ // simple filters it can be done at offset that is a multiple of 2,
+ // 4, or 16. With x86 filter, it needs good luck, and thus cannot
+ // be made to work predictably.
+ if (action == LZMA_SYNC_FLUSH)
+ return LZMA_OPTIONS_ERROR;
+
+ // Flush already filtered data from coder->buffer[] to out[].
+ if (coder->pos < coder->filtered) {
+ lzma_bufcpy(coder->buffer, &coder->pos, coder->filtered,
+ out, out_pos, out_size);
+
+ // If we couldn't flush all the filtered data, return to
+ // application immediatelly.
+ if (coder->pos < coder->filtered)
+ return LZMA_OK;
+
+ if (coder->end_was_reached) {
+ assert(coder->filtered == coder->size);
+ return LZMA_STREAM_END;
+ }
+ }
+
+ // If we get here, there is no filtered data left in the buffer.
+ coder->filtered = 0;
+
+ assert(!coder->end_was_reached);
+
+ // If there is more output space left than there is unfiltered data
+ // in coder->buffer[], flush coder->buffer[] to out[], and copy/code
+ // more data to out[] hopefully filling it completely. Then filter
+ // the data in out[]. This step is where most of the data gets
+ // filtered if the buffer sizes used by the application are reasonable.
+ const size_t out_avail = out_size - *out_pos;
+ const size_t buf_avail = coder->size - coder->pos;
+ if (out_avail > buf_avail) {
+ // Store the old position so that we know from which byte
+ // to start filtering.
+ const size_t out_start = *out_pos;
+
+ // Flush data from coder->buffer[] to out[], but don't reset
+ // coder->pos and coder->size yet. This way the coder can be
+ // restarted if the next filter in the chain returns e.g.
+ // LZMA_MEM_ERROR.
+ memcpy(out + *out_pos, coder->buffer + coder->pos, buf_avail);
+ *out_pos += buf_avail;
+
+ // Copy/Encode/Decode more data to out[].
+ {
+ const lzma_ret ret = copy_or_code(coder, allocator,
+ in, in_pos, in_size,
+ out, out_pos, out_size, action);
+ assert(ret != LZMA_STREAM_END);
+ if (ret != LZMA_OK)
+ return ret;
+ }
+
+ // Filter out[].
+ const size_t size = *out_pos - out_start;
+ const size_t filtered = call_filter(
+ coder, out + out_start, size);
+
+ const size_t unfiltered = size - filtered;
+ assert(unfiltered <= coder->allocated / 2);
+
+ // Now we can update coder->pos and coder->size, because
+ // the next coder in the chain (if any) was successful.
+ coder->pos = 0;
+ coder->size = unfiltered;
+
+ if (coder->end_was_reached) {
+ // The last byte has been copied to out[] already.
+ // They are left as is.
+ coder->size = 0;
+
+ } else if (unfiltered > 0) {
+ // There is unfiltered data left in out[]. Copy it to
+ // coder->buffer[] and rewind *out_pos appropriately.
+ *out_pos -= unfiltered;
+ memcpy(coder->buffer, out + *out_pos, unfiltered);
+ }
+ } else if (coder->pos > 0) {
+ memmove(coder->buffer, coder->buffer + coder->pos, buf_avail);
+ coder->size -= coder->pos;
+ coder->pos = 0;
+ }
+
+ assert(coder->pos == 0);
+
+ // If coder->buffer[] isn't empty, try to fill it by copying/decoding
+ // more data. Then filter coder->buffer[] and copy the successfully
+ // filtered data to out[]. It is probable, that some filtered and
+ // unfiltered data will be left to coder->buffer[].
+ if (coder->size > 0) {
+ {
+ const lzma_ret ret = copy_or_code(coder, allocator,
+ in, in_pos, in_size,
+ coder->buffer, &coder->size,
+ coder->allocated, action);
+ assert(ret != LZMA_STREAM_END);
+ if (ret != LZMA_OK)
+ return ret;
+ }
+
+ coder->filtered = call_filter(
+ coder, coder->buffer, coder->size);
+
+ // Everything is considered to be filtered if coder->buffer[]
+ // contains the last bytes of the data.
+ if (coder->end_was_reached)
+ coder->filtered = coder->size;
+
+ // Flush as much as possible.
+ lzma_bufcpy(coder->buffer, &coder->pos, coder->filtered,
+ out, out_pos, out_size);
+ }
+
+ // Check if we got everything done.
+ if (coder->end_was_reached && coder->pos == coder->size)
+ return LZMA_STREAM_END;
+
+ return LZMA_OK;
+}
+
+
+static void
+simple_coder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_free(coder->simple, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_simple_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters,
+ size_t (*filter)(lzma_simple *simple, uint32_t now_pos,
+ bool is_encoder, uint8_t *buffer, size_t size),
+ size_t simple_size, size_t unfiltered_max,
+ uint32_t alignment, bool is_encoder)
+{
+ // Allocate memory for the lzma_coder structure if needed.
+ if (next->coder == NULL) {
+ // Here we allocate space also for the temporary buffer. We
+ // need twice the size of unfiltered_max, because then it
+ // is always possible to filter at least unfiltered_max bytes
+ // more data in coder->buffer[] if it can be filled completely.
+ next->coder = lzma_alloc(sizeof(lzma_coder)
+ + 2 * unfiltered_max, allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &simple_code;
+ next->end = &simple_coder_end;
+
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ next->coder->filter = filter;
+ next->coder->allocated = 2 * unfiltered_max;
+
+ // Allocate memory for filter-specific data structure.
+ if (simple_size > 0) {
+ next->coder->simple = lzma_alloc(
+ simple_size, allocator);
+ if (next->coder->simple == NULL)
+ return LZMA_MEM_ERROR;
+ } else {
+ next->coder->simple = NULL;
+ }
+ }
+
+ if (filters[0].options != NULL) {
+ const lzma_options_bcj *simple = filters[0].options;
+ next->coder->now_pos = simple->start_offset;
+ if (next->coder->now_pos & (alignment - 1))
+ return LZMA_OPTIONS_ERROR;
+ } else {
+ next->coder->now_pos = 0;
+ }
+
+ // Reset variables.
+ next->coder->is_encoder = is_encoder;
+ next->coder->end_was_reached = false;
+ next->coder->pos = 0;
+ next->coder->filtered = 0;
+ next->coder->size = 0;
+
+ return lzma_next_filter_init(
+ &next->coder->next, allocator, filters + 1);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.h
new file mode 100644
index 00000000..0fcecfab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_coder.h
@@ -0,0 +1,62 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_coder.h
+/// \brief Wrapper for simple filters
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SIMPLE_CODER_H
+#define LZMA_SIMPLE_CODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_simple_x86_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_simple_x86_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+
+extern lzma_ret lzma_simple_powerpc_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_simple_powerpc_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+
+extern lzma_ret lzma_simple_ia64_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_simple_ia64_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+
+extern lzma_ret lzma_simple_arm_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_simple_arm_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+
+extern lzma_ret lzma_simple_armthumb_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_simple_armthumb_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+
+extern lzma_ret lzma_simple_sparc_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+extern lzma_ret lzma_simple_sparc_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.c
new file mode 100644
index 00000000..48610af9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.c
@@ -0,0 +1,42 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_decoder.c
+/// \brief Properties decoder for simple filters
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_decoder.h"
+
+
+extern lzma_ret
+lzma_simple_props_decode(void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size)
+{
+ if (props_size == 0)
+ return LZMA_OK;
+
+ if (props_size != 4)
+ return LZMA_OPTIONS_ERROR;
+
+ lzma_options_bcj *opt = lzma_alloc(
+ sizeof(lzma_options_bcj), allocator);
+ if (opt == NULL)
+ return LZMA_MEM_ERROR;
+
+ opt->start_offset = integer_read_32(props);
+
+ // Don't leave an options structure allocated if start_offset is zero.
+ if (opt->start_offset == 0)
+ lzma_free(opt, allocator);
+ else
+ *options = opt;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.h
new file mode 100644
index 00000000..018c93be
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_decoder.h
@@ -0,0 +1,24 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_decoder.h
+/// \brief Properties decoder for simple filters
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SIMPLE_DECODER_H
+#define LZMA_SIMPLE_DECODER_H
+
+#include "simple_coder.h"
+
+extern lzma_ret lzma_simple_props_decode(
+ void **options, lzma_allocator *allocator,
+ const uint8_t *props, size_t props_size);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.c
new file mode 100644
index 00000000..8b767139
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.c
@@ -0,0 +1,40 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_encoder.c
+/// \brief Properties encoder for simple filters
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_encoder.h"
+
+
+extern lzma_ret
+lzma_simple_props_size(uint32_t *size, const void *options)
+{
+ const lzma_options_bcj *const opt = options;
+ *size = (opt == NULL || opt->start_offset == 0) ? 0 : 4;
+ return LZMA_OK;
+}
+
+
+extern lzma_ret
+lzma_simple_props_encode(const void *options, uint8_t *out)
+{
+ const lzma_options_bcj *const opt = options;
+
+ // The default start offset is zero, so we don't need to store any
+ // options unless the start offset is non-zero.
+ if (opt == NULL || opt->start_offset == 0)
+ return LZMA_OK;
+
+ integer_write_32(out, opt->start_offset);
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.h
new file mode 100644
index 00000000..3c21efe0
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_encoder.h
@@ -0,0 +1,25 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_encoder.c
+/// \brief Properties encoder for simple filters
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SIMPLE_ENCODER_H
+#define LZMA_SIMPLE_ENCODER_H
+
+#include "simple_coder.h"
+
+
+extern lzma_ret lzma_simple_props_size(uint32_t *size, const void *options);
+
+extern lzma_ret lzma_simple_props_encode(const void *options, uint8_t *out);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_private.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_private.h
new file mode 100644
index 00000000..d5d10e96
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/simple_private.h
@@ -0,0 +1,78 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file simple_private.h
+/// \brief Private definitions for so called simple filters
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SIMPLE_PRIVATE_H
+#define LZMA_SIMPLE_PRIVATE_H
+
+#include "simple_coder.h"
+
+
+typedef struct lzma_simple_s lzma_simple;
+
+struct lzma_coder_s {
+ /// Next filter in the chain
+ lzma_next_coder next;
+
+ /// True if the next coder in the chain has returned LZMA_STREAM_END
+ /// or if we have processed uncompressed_size bytes.
+ bool end_was_reached;
+
+ /// True if filter() should encode the data; false to decode.
+ /// Currently all simple filters use the same function for encoding
+ /// and decoding, because the difference between encoders and decoders
+ /// is very small.
+ bool is_encoder;
+
+ /// Pointer to filter-specific function, which does
+ /// the actual filtering.
+ size_t (*filter)(lzma_simple *simple, uint32_t now_pos,
+ bool is_encoder, uint8_t *buffer, size_t size);
+
+ /// Pointer to filter-specific data, or NULL if filter doesn't need
+ /// any extra data.
+ lzma_simple *simple;
+
+ /// The lowest 32 bits of the current position in the data. Most
+ /// filters need this to do conversions between absolute and relative
+ /// addresses.
+ uint32_t now_pos;
+
+ /// Size of the memory allocated for the buffer.
+ size_t allocated;
+
+ /// Flushing position in the temporary buffer. buffer[pos] is the
+ /// next byte to be copied to out[].
+ size_t pos;
+
+ /// buffer[filtered] is the first unfiltered byte. When pos is smaller
+ /// than filtered, there is unflushed filtered data in the buffer.
+ size_t filtered;
+
+ /// Total number of bytes (both filtered and unfiltered) currently
+ /// in the temporary buffer.
+ size_t size;
+
+ /// Temporary buffer
+ uint8_t buffer[];
+};
+
+
+extern lzma_ret lzma_simple_coder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters,
+ size_t (*filter)(lzma_simple *simple, uint32_t now_pos,
+ bool is_encoder, uint8_t *buffer, size_t size),
+ size_t simple_size, size_t unfiltered_max,
+ uint32_t alignment, bool is_encoder);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/sparc.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/sparc.c
new file mode 100644
index 00000000..c17f8287
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/sparc.c
@@ -0,0 +1,83 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file sparc.c
+/// \brief Filter for SPARC binaries
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+static size_t
+sparc_code(lzma_simple *simple lzma_attribute((unused)),
+ uint32_t now_pos, bool is_encoder,
+ uint8_t *buffer, size_t size)
+{
+ size_t i;
+ for (i = 0; i + 4 <= size; i += 4) {
+
+ if ((buffer[i] == 0x40 && (buffer[i + 1] & 0xC0) == 0x00)
+ || (buffer[i] == 0x7F
+ && (buffer[i + 1] & 0xC0) == 0xC0)) {
+
+ uint32_t src = ((uint32_t)buffer[i + 0] << 24)
+ | ((uint32_t)buffer[i + 1] << 16)
+ | ((uint32_t)buffer[i + 2] << 8)
+ | ((uint32_t)buffer[i + 3]);
+
+ src <<= 2;
+
+ uint32_t dest;
+ if (is_encoder)
+ dest = now_pos + (uint32_t)(i) + src;
+ else
+ dest = src - (now_pos + (uint32_t)(i));
+
+ dest >>= 2;
+
+ dest = (((0 - ((dest >> 22) & 1)) << 22) & 0x3FFFFFFF)
+ | (dest & 0x3FFFFF)
+ | 0x40000000;
+
+ buffer[i + 0] = (uint8_t)(dest >> 24);
+ buffer[i + 1] = (uint8_t)(dest >> 16);
+ buffer[i + 2] = (uint8_t)(dest >> 8);
+ buffer[i + 3] = (uint8_t)(dest);
+ }
+ }
+
+ return i;
+}
+
+
+static lzma_ret
+sparc_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, bool is_encoder)
+{
+ return lzma_simple_coder_init(next, allocator, filters,
+ &sparc_code, 0, 4, 4, is_encoder);
+}
+
+
+extern lzma_ret
+lzma_simple_sparc_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return sparc_coder_init(next, allocator, filters, true);
+}
+
+
+extern lzma_ret
+lzma_simple_sparc_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ return sparc_coder_init(next, allocator, filters, false);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/x86.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/x86.c
new file mode 100644
index 00000000..d01beafc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/simple/x86.c
@@ -0,0 +1,156 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file x86.c
+/// \brief Filter for x86 binaries (BCJ filter)
+///
+// Authors: Igor Pavlov
+// Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "simple_private.h"
+
+
+#define Test86MSByte(b) ((b) == 0 || (b) == 0xFF)
+
+
+struct lzma_simple_s {
+ uint32_t prev_mask;
+ uint32_t prev_pos;
+};
+
+
+static size_t
+x86_code(lzma_simple *simple, uint32_t now_pos, bool is_encoder,
+ uint8_t *buffer, size_t size)
+{
+ static const bool MASK_TO_ALLOWED_STATUS[8]
+ = { true, true, true, false, true, false, false, false };
+
+ static const uint32_t MASK_TO_BIT_NUMBER[8]
+ = { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+ uint32_t prev_mask = simple->prev_mask;
+ uint32_t prev_pos = simple->prev_pos;
+
+ if (size < 5)
+ return 0;
+
+ if (now_pos - prev_pos > 5)
+ prev_pos = now_pos - 5;
+
+ const size_t limit = size - 5;
+ size_t buffer_pos = 0;
+
+ while (buffer_pos <= limit) {
+ uint8_t b = buffer[buffer_pos];
+ if (b != 0xE8 && b != 0xE9) {
+ ++buffer_pos;
+ continue;
+ }
+
+ const uint32_t offset = now_pos + (uint32_t)(buffer_pos)
+ - prev_pos;
+ prev_pos = now_pos + (uint32_t)(buffer_pos);
+
+ if (offset > 5) {
+ prev_mask = 0;
+ } else {
+ for (uint32_t i = 0; i < offset; ++i) {
+ prev_mask &= 0x77;
+ prev_mask <<= 1;
+ }
+ }
+
+ b = buffer[buffer_pos + 4];
+
+ if (Test86MSByte(b)
+ && MASK_TO_ALLOWED_STATUS[(prev_mask >> 1) & 0x7]
+ && (prev_mask >> 1) < 0x10) {
+
+ uint32_t src = ((uint32_t)(b) << 24)
+ | ((uint32_t)(buffer[buffer_pos + 3]) << 16)
+ | ((uint32_t)(buffer[buffer_pos + 2]) << 8)
+ | (buffer[buffer_pos + 1]);
+
+ uint32_t dest;
+ while (true) {
+ if (is_encoder)
+ dest = src + (now_pos + (uint32_t)(
+ buffer_pos) + 5);
+ else
+ dest = src - (now_pos + (uint32_t)(
+ buffer_pos) + 5);
+
+ if (prev_mask == 0)
+ break;
+
+ const uint32_t i = MASK_TO_BIT_NUMBER[
+ prev_mask >> 1];
+
+ b = (uint8_t)(dest >> (24 - i * 8));
+
+ if (!Test86MSByte(b))
+ break;
+
+ src = dest ^ ((1 << (32 - i * 8)) - 1);
+ }
+
+ buffer[buffer_pos + 4]
+ = (uint8_t)(~(((dest >> 24) & 1) - 1));
+ buffer[buffer_pos + 3] = (uint8_t)(dest >> 16);
+ buffer[buffer_pos + 2] = (uint8_t)(dest >> 8);
+ buffer[buffer_pos + 1] = (uint8_t)(dest);
+ buffer_pos += 5;
+ prev_mask = 0;
+
+ } else {
+ ++buffer_pos;
+ prev_mask |= 1;
+ if (Test86MSByte(b))
+ prev_mask |= 0x10;
+ }
+ }
+
+ simple->prev_mask = prev_mask;
+ simple->prev_pos = prev_pos;
+
+ return buffer_pos;
+}
+
+
+static lzma_ret
+x86_coder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters, bool is_encoder)
+{
+ const lzma_ret ret = lzma_simple_coder_init(next, allocator, filters,
+ &x86_code, sizeof(lzma_simple), 5, 1, is_encoder);
+
+ if (ret == LZMA_OK) {
+ next->coder->simple->prev_mask = 0;
+ next->coder->simple->prev_pos = (uint32_t)(-5);
+ }
+
+ return ret;
+}
+
+
+extern lzma_ret
+lzma_simple_x86_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return x86_coder_init(next, allocator, filters, true);
+}
+
+
+extern lzma_ret
+lzma_simple_x86_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ return x86_coder_init(next, allocator, filters, false);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/Makefile.inc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/Makefile.inc
new file mode 100644
index 00000000..a4710cc5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/Makefile.inc
@@ -0,0 +1,20 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+if COND_ENCODER_SUBBLOCK
+liblzma_la_SOURCES += \
+ subblock/subblock_encoder.c \
+ subblock/subblock_encoder.h
+endif
+
+if COND_DECODER_SUBBLOCK
+liblzma_la_SOURCES += \
+ subblock/subblock_decoder.c \
+ subblock/subblock_decoder.h \
+ subblock/subblock_decoder_helper.c \
+ subblock/subblock_decoder_helper.h
+endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.c
new file mode 100644
index 00000000..3a4daee9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.c
@@ -0,0 +1,632 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file subblock_decoder.c
+/// \brief Decoder of the Subblock filter
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "subblock_decoder.h"
+#include "subblock_decoder_helper.h"
+#include "filter_decoder.h"
+
+
+/// Maximum number of consecutive Subblocks with Subblock Type Padding
+#define PADDING_MAX 31
+
+
+struct lzma_coder_s {
+ lzma_next_coder next;
+
+ enum {
+ // These require that there is at least one input
+ // byte available.
+ SEQ_FLAGS,
+ SEQ_FILTER_FLAGS,
+ SEQ_FILTER_END,
+ SEQ_REPEAT_COUNT_1,
+ SEQ_REPEAT_COUNT_2,
+ SEQ_REPEAT_COUNT_3,
+ SEQ_REPEAT_SIZE,
+ SEQ_REPEAT_READ_DATA,
+ SEQ_SIZE_1,
+ SEQ_SIZE_2,
+ SEQ_SIZE_3, // This must be right before SEQ_DATA.
+
+ // These don't require any input to be available.
+ SEQ_DATA,
+ SEQ_REPEAT_FAST,
+ SEQ_REPEAT_NORMAL,
+ } sequence;
+
+ /// Number of bytes left in the current Subblock Data field.
+ size_t size;
+
+ /// Number of consecutive Subblocks with Subblock Type Padding
+ uint32_t padding;
+
+ /// True when .next.code() has returned LZMA_STREAM_END.
+ bool next_finished;
+
+ /// True when the Subblock decoder has detected End of Payload Marker.
+ /// This may become true before next_finished becomes true.
+ bool this_finished;
+
+ /// True if Subfilters are allowed.
+ bool allow_subfilters;
+
+ /// Indicates if at least one byte of decoded output has been
+ /// produced after enabling Subfilter.
+ bool got_output_with_subfilter;
+
+ /// Possible subfilter
+ lzma_next_coder subfilter;
+
+ /// Filter Flags decoder is needed to parse the ID and Properties
+ /// of the subfilter.
+ lzma_next_coder filter_flags_decoder;
+
+ /// The filter_flags_decoder stores its results here.
+ lzma_filter filter_flags;
+
+ /// Options for the Subblock decoder helper. This is used to tell
+ /// the helper when it should return LZMA_STREAM_END to the subfilter.
+ lzma_options_subblock_helper helper;
+
+ struct {
+ /// How many times buffer should be repeated
+ size_t count;
+
+ /// Size of the buffer
+ size_t size;
+
+ /// Position in the buffer
+ size_t pos;
+
+ /// Buffer to hold the data to be repeated
+ uint8_t buffer[LZMA_SUBBLOCK_RLE_MAX];
+ } repeat;
+
+ /// Temporary buffer needed when the Subblock filter is not the last
+ /// filter in the chain. The output of the next filter is first
+ /// decoded into buffer[], which is then used as input for the actual
+ /// Subblock decoder.
+ struct {
+ size_t pos;
+ size_t size;
+ uint8_t buffer[LZMA_BUFFER_SIZE];
+ } temp;
+};
+
+
+/// Values of valid Subblock Flags
+enum {
+ FLAG_PADDING,
+ FLAG_EOPM,
+ FLAG_DATA,
+ FLAG_REPEAT,
+ FLAG_SET_SUBFILTER,
+ FLAG_END_SUBFILTER,
+};
+
+
+/// Calls the subfilter and updates coder->uncompressed_size.
+static lzma_ret
+subfilter_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ assert(coder->subfilter.code != NULL);
+
+ // Call the subfilter.
+ const lzma_ret ret = coder->subfilter.code(
+ coder->subfilter.coder, allocator,
+ in, in_pos, in_size, out, out_pos, out_size, action);
+
+ return ret;
+}
+
+
+static lzma_ret
+decode_buffer(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *in, size_t *in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ while (*out_pos < out_size && (*in_pos < in_size
+ || coder->sequence >= SEQ_DATA))
+ switch (coder->sequence) {
+ case SEQ_FLAGS: {
+ // Do the correct action depending on the Subblock Type.
+ switch (in[*in_pos] >> 4) {
+ case FLAG_PADDING:
+ // Only check that reserved bits are zero.
+ if (++coder->padding > PADDING_MAX
+ || in[*in_pos] & 0x0F)
+ return LZMA_DATA_ERROR;
+ ++*in_pos;
+ break;
+
+ case FLAG_EOPM:
+ // There must be no Padding before EOPM.
+ if (coder->padding != 0)
+ return LZMA_DATA_ERROR;
+
+ // Check that reserved bits are zero.
+ if (in[*in_pos] & 0x0F)
+ return LZMA_DATA_ERROR;
+
+ // There must be no Subfilter enabled.
+ if (coder->subfilter.code != NULL)
+ return LZMA_DATA_ERROR;
+
+ ++*in_pos;
+ return LZMA_STREAM_END;
+
+ case FLAG_DATA:
+ // First four bits of the Subblock Data size.
+ coder->size = in[*in_pos] & 0x0F;
+ ++*in_pos;
+ coder->got_output_with_subfilter = true;
+ coder->sequence = SEQ_SIZE_1;
+ break;
+
+ case FLAG_REPEAT:
+ // First four bits of the Repeat Count. We use
+ // coder->size as a temporary place for it.
+ coder->size = in[*in_pos] & 0x0F;
+ ++*in_pos;
+ coder->got_output_with_subfilter = true;
+ coder->sequence = SEQ_REPEAT_COUNT_1;
+ break;
+
+ case FLAG_SET_SUBFILTER: {
+ if (coder->padding != 0 || (in[*in_pos] & 0x0F)
+ || coder->subfilter.code != NULL
+ || !coder->allow_subfilters)
+ return LZMA_DATA_ERROR;
+
+ assert(coder->filter_flags.options == NULL);
+ abort();
+// return_if_error(lzma_filter_flags_decoder_init(
+// &coder->filter_flags_decoder,
+// allocator, &coder->filter_flags));
+
+ coder->got_output_with_subfilter = false;
+
+ ++*in_pos;
+ coder->sequence = SEQ_FILTER_FLAGS;
+ break;
+ }
+
+ case FLAG_END_SUBFILTER: {
+ if (coder->padding != 0 || (in[*in_pos] & 0x0F)
+ || coder->subfilter.code == NULL
+ || !coder->got_output_with_subfilter)
+ return LZMA_DATA_ERROR;
+
+ // Tell the helper filter to indicate End of Input
+ // to our subfilter.
+ coder->helper.end_was_reached = true;
+
+ size_t dummy = 0;
+ const lzma_ret ret = subfilter_decode(coder, allocator,
+ NULL, &dummy, 0, out, out_pos,out_size,
+ action);
+
+ // If we didn't reach the end of the subfilter's output
+ // yet, return to the application. On the next call we
+ // will get to this same switch-case again, because we
+ // haven't updated *in_pos yet.
+ if (ret != LZMA_STREAM_END)
+ return ret;
+
+ // Free Subfilter's memory. This is a bit debatable,
+ // since we could avoid some malloc()/free() calls
+ // if the same Subfilter gets used soon again. But
+ // if Subfilter isn't used again, we could leave
+ // a memory-hogging filter dangling until someone
+ // frees Subblock filter itself.
+ lzma_next_end(&coder->subfilter, allocator);
+
+ // Free memory used for subfilter options. This is
+ // safe, because we don't support any Subfilter that
+ // would allow pointers in the options structure.
+ lzma_free(coder->filter_flags.options, allocator);
+ coder->filter_flags.options = NULL;
+
+ ++*in_pos;
+
+ break;
+ }
+
+ default:
+ return LZMA_DATA_ERROR;
+ }
+
+ break;
+ }
+
+ case SEQ_FILTER_FLAGS: {
+ const lzma_ret ret = coder->filter_flags_decoder.code(
+ coder->filter_flags_decoder.coder, allocator,
+ in, in_pos, in_size, NULL, NULL, 0, LZMA_RUN);
+ if (ret != LZMA_STREAM_END)
+ return ret == LZMA_OPTIONS_ERROR
+ ? LZMA_DATA_ERROR : ret;
+
+ // Don't free the filter_flags_decoder. It doesn't take much
+ // memory and we may need it again.
+
+ // Initialize the Subfilter. Subblock and Copy filters are
+ // not allowed.
+ if (coder->filter_flags.id == LZMA_FILTER_SUBBLOCK)
+ return LZMA_DATA_ERROR;
+
+ coder->helper.end_was_reached = false;
+
+ lzma_filter filters[3] = {
+ {
+ .id = coder->filter_flags.id,
+ .options = coder->filter_flags.options,
+ }, {
+ .id = LZMA_FILTER_SUBBLOCK_HELPER,
+ .options = &coder->helper,
+ }, {
+ .id = LZMA_VLI_UNKNOWN,
+ .options = NULL,
+ }
+ };
+
+ // Optimization: We know that LZMA uses End of Payload Marker
+ // (not End of Input), so we can omit the helper filter.
+ if (filters[0].id == LZMA_FILTER_LZMA1)
+ filters[1].id = LZMA_VLI_UNKNOWN;
+
+ return_if_error(lzma_raw_decoder_init(
+ &coder->subfilter, allocator, filters));
+
+ coder->sequence = SEQ_FLAGS;
+ break;
+ }
+
+ case SEQ_FILTER_END:
+ // We are in the beginning of a Subblock. The next Subblock
+ // whose type is not Padding, must indicate end of Subfilter.
+ if (in[*in_pos] == (FLAG_PADDING << 4)) {
+ ++*in_pos;
+ break;
+ }
+
+ if (in[*in_pos] != (FLAG_END_SUBFILTER << 4))
+ return LZMA_DATA_ERROR;
+
+ coder->sequence = SEQ_FLAGS;
+ break;
+
+ case SEQ_REPEAT_COUNT_1:
+ case SEQ_SIZE_1:
+ // We use the same code to parse
+ // - the Size (28 bits) in Subblocks of type Data; and
+ // - the Repeat count (28 bits) in Subblocks of type
+ // Repeating Data.
+ coder->size |= (size_t)(in[*in_pos]) << 4;
+ ++*in_pos;
+ ++coder->sequence;
+ break;
+
+ case SEQ_REPEAT_COUNT_2:
+ case SEQ_SIZE_2:
+ coder->size |= (size_t)(in[*in_pos]) << 12;
+ ++*in_pos;
+ ++coder->sequence;
+ break;
+
+ case SEQ_REPEAT_COUNT_3:
+ case SEQ_SIZE_3:
+ coder->size |= (size_t)(in[*in_pos]) << 20;
+ ++*in_pos;
+
+ // The real value is the stored value plus one.
+ ++coder->size;
+
+ // This moves to SEQ_REPEAT_SIZE or SEQ_DATA. That's why
+ // SEQ_DATA must be right after SEQ_SIZE_3 in coder->sequence.
+ ++coder->sequence;
+ break;
+
+ case SEQ_REPEAT_SIZE:
+ // Move the Repeat Count to the correct variable and parse
+ // the Size of the Data to be repeated.
+ coder->repeat.count = coder->size;
+ coder->repeat.size = (size_t)(in[*in_pos]) + 1;
+ coder->repeat.pos = 0;
+
+ // The size of the Data field must be bigger than the number
+ // of Padding bytes before this Subblock.
+ if (coder->repeat.size <= coder->padding)
+ return LZMA_DATA_ERROR;
+
+ ++*in_pos;
+ coder->padding = 0;
+ coder->sequence = SEQ_REPEAT_READ_DATA;
+ break;
+
+ case SEQ_REPEAT_READ_DATA: {
+ // Fill coder->repeat.buffer[].
+ const size_t in_avail = in_size - *in_pos;
+ const size_t out_avail
+ = coder->repeat.size - coder->repeat.pos;
+ const size_t copy_size = MIN(in_avail, out_avail);
+
+ memcpy(coder->repeat.buffer + coder->repeat.pos,
+ in + *in_pos, copy_size);
+ *in_pos += copy_size;
+ coder->repeat.pos += copy_size;
+
+ if (coder->repeat.pos == coder->repeat.size) {
+ coder->repeat.pos = 0;
+
+ if (coder->repeat.size == 1
+ && coder->subfilter.code == NULL)
+ coder->sequence = SEQ_REPEAT_FAST;
+ else
+ coder->sequence = SEQ_REPEAT_NORMAL;
+ }
+
+ break;
+ }
+
+ case SEQ_DATA: {
+ // The size of the Data field must be bigger than the number
+ // of Padding bytes before this Subblock.
+ assert(coder->size > 0);
+ if (coder->size <= coder->padding)
+ return LZMA_DATA_ERROR;
+
+ coder->padding = 0;
+
+ // Limit the amount of input to match the available
+ // Subblock Data size.
+ size_t in_limit;
+ if (in_size - *in_pos > coder->size)
+ in_limit = *in_pos + coder->size;
+ else
+ in_limit = in_size;
+
+ if (coder->subfilter.code == NULL) {
+ const size_t copy_size = lzma_bufcpy(
+ in, in_pos, in_limit,
+ out, out_pos, out_size);
+
+ coder->size -= copy_size;
+ } else {
+ const size_t in_start = *in_pos;
+ const lzma_ret ret = subfilter_decode(
+ coder, allocator,
+ in, in_pos, in_limit,
+ out, out_pos, out_size,
+ action);
+
+ // Update the number of unprocessed bytes left in
+ // this Subblock. This assert() is true because
+ // in_limit prevents *in_pos getting too big.
+ assert(*in_pos - in_start <= coder->size);
+ coder->size -= *in_pos - in_start;
+
+ if (ret == LZMA_STREAM_END) {
+ // End of Subfilter can occur only at
+ // a Subblock boundary.
+ if (coder->size != 0)
+ return LZMA_DATA_ERROR;
+
+ // We need a Subblock with Unset
+ // Subfilter before more data.
+ coder->sequence = SEQ_FILTER_END;
+ break;
+ }
+
+ if (ret != LZMA_OK)
+ return ret;
+ }
+
+ // If we couldn't process the whole Subblock Data yet, return.
+ if (coder->size > 0)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_FLAGS;
+ break;
+ }
+
+ case SEQ_REPEAT_FAST: {
+ // Optimization for cases when there is only one byte to
+ // repeat and no Subfilter.
+ const size_t out_avail = out_size - *out_pos;
+ const size_t copy_size = MIN(coder->repeat.count, out_avail);
+
+ memset(out + *out_pos, coder->repeat.buffer[0], copy_size);
+
+ *out_pos += copy_size;
+ coder->repeat.count -= copy_size;
+
+ if (coder->repeat.count != 0)
+ return LZMA_OK;
+
+ coder->sequence = SEQ_FLAGS;
+ break;
+ }
+
+ case SEQ_REPEAT_NORMAL:
+ do {
+ // Cycle the repeat buffer if needed.
+ if (coder->repeat.pos == coder->repeat.size) {
+ if (--coder->repeat.count == 0) {
+ coder->sequence = SEQ_FLAGS;
+ break;
+ }
+
+ coder->repeat.pos = 0;
+ }
+
+ if (coder->subfilter.code == NULL) {
+ lzma_bufcpy(coder->repeat.buffer,
+ &coder->repeat.pos,
+ coder->repeat.size,
+ out, out_pos, out_size);
+ } else {
+ const lzma_ret ret = subfilter_decode(
+ coder, allocator,
+ coder->repeat.buffer,
+ &coder->repeat.pos,
+ coder->repeat.size,
+ out, out_pos, out_size,
+ action);
+
+ if (ret == LZMA_STREAM_END) {
+ // End of Subfilter can occur only at
+ // a Subblock boundary.
+ if (coder->repeat.pos
+ != coder->repeat.size
+ || --coder->repeat
+ .count != 0)
+ return LZMA_DATA_ERROR;
+
+ // We need a Subblock with Unset
+ // Subfilter before more data.
+ coder->sequence = SEQ_FILTER_END;
+ break;
+
+ } else if (ret != LZMA_OK) {
+ return ret;
+ }
+ }
+ } while (*out_pos < out_size);
+
+ break;
+
+ default:
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+subblock_decode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ if (coder->next.code == NULL)
+ return decode_buffer(coder, allocator, in, in_pos, in_size,
+ out, out_pos, out_size, action);
+
+ while (*out_pos < out_size) {
+ if (!coder->next_finished
+ && coder->temp.pos == coder->temp.size) {
+ coder->temp.pos = 0;
+ coder->temp.size = 0;
+
+ const lzma_ret ret = coder->next.code(
+ coder->next.coder,
+ allocator, in, in_pos, in_size,
+ coder->temp.buffer, &coder->temp.size,
+ LZMA_BUFFER_SIZE, action);
+
+ if (ret == LZMA_STREAM_END)
+ coder->next_finished = true;
+ else if (coder->temp.size == 0 || ret != LZMA_OK)
+ return ret;
+ }
+
+ if (coder->this_finished) {
+ if (coder->temp.pos != coder->temp.size)
+ return LZMA_DATA_ERROR;
+
+ if (coder->next_finished)
+ return LZMA_STREAM_END;
+
+ return LZMA_OK;
+ }
+
+ const lzma_ret ret = decode_buffer(coder, allocator,
+ coder->temp.buffer, &coder->temp.pos,
+ coder->temp.size,
+ out, out_pos, out_size, action);
+
+ if (ret == LZMA_STREAM_END)
+ // The next coder in the chain hasn't finished
+ // yet. If the input data is valid, there
+ // must be no more output coming, but the
+ // next coder may still need a litle more
+ // input to detect End of Payload Marker.
+ coder->this_finished = true;
+ else if (ret != LZMA_OK)
+ return ret;
+ else if (coder->next_finished && *out_pos < out_size)
+ return LZMA_DATA_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+subblock_decoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_next_end(&coder->subfilter, allocator);
+ lzma_next_end(&coder->filter_flags_decoder, allocator);
+ lzma_free(coder->filter_flags.options, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_subblock_decoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &subblock_decode;
+ next->end = &subblock_decoder_end;
+
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ next->coder->subfilter = LZMA_NEXT_CODER_INIT;
+ next->coder->filter_flags_decoder = LZMA_NEXT_CODER_INIT;
+
+ } else {
+ lzma_next_end(&next->coder->subfilter, allocator);
+ lzma_free(next->coder->filter_flags.options, allocator);
+ }
+
+ next->coder->filter_flags.options = NULL;
+
+ next->coder->sequence = SEQ_FLAGS;
+ next->coder->padding = 0;
+ next->coder->next_finished = false;
+ next->coder->this_finished = false;
+ next->coder->temp.pos = 0;
+ next->coder->temp.size = 0;
+
+ if (filters[0].options != NULL)
+ next->coder->allow_subfilters = ((lzma_options_subblock *)(
+ filters[0].options))->allow_subfilters;
+ else
+ next->coder->allow_subfilters = false;
+
+ return lzma_next_filter_init(
+ &next->coder->next, allocator, filters + 1);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.h
new file mode 100644
index 00000000..28a0b319
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder.h
@@ -0,0 +1,24 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file subblock_decoder.h
+/// \brief Decoder of the Subblock filter
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SUBBLOCK_DECODER_H
+#define LZMA_SUBBLOCK_DECODER_H
+
+#include "common.h"
+
+
+extern lzma_ret lzma_subblock_decoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.c
new file mode 100644
index 00000000..c797c274
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.c
@@ -0,0 +1,72 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file subblock_decoder_helper.c
+/// \brief Helper filter for the Subblock decoder
+///
+/// This filter is used to indicate End of Input for subfilters needing it.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "subblock_decoder_helper.h"
+
+
+struct lzma_coder_s {
+ const lzma_options_subblock_helper *options;
+};
+
+
+static lzma_ret
+helper_decode(lzma_coder *coder,
+ lzma_allocator *allocator lzma_attribute((unused)),
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ lzma_action action lzma_attribute((unused)))
+{
+ // If end_was_reached is true, we cannot have any input.
+ assert(!coder->options->end_was_reached || *in_pos == in_size);
+
+ // We can safely copy as much as possible, because we are never
+ // given more data than a single Subblock Data field.
+ lzma_bufcpy(in, in_pos, in_size, out, out_pos, out_size);
+
+ // Return LZMA_STREAM_END when instructed so by the Subblock decoder.
+ return coder->options->end_was_reached ? LZMA_STREAM_END : LZMA_OK;
+}
+
+
+static void
+helper_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_subblock_decoder_helper_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters)
+{
+ // This is always the last filter in the chain.
+ assert(filters[1].init == NULL);
+
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &helper_decode;
+ next->end = &helper_end;
+ }
+
+ next->coder->options = filters[0].options;
+
+ return LZMA_OK;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.h
new file mode 100644
index 00000000..23c17238
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_decoder_helper.h
@@ -0,0 +1,31 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file subblock_decoder_helper.h
+/// \brief Helper filter for the Subblock decoder
+///
+/// This filter is used to indicate End of Input for subfilters needing it.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SUBBLOCK_DECODER_HELPER_H
+#define LZMA_SUBBLOCK_DECODER_HELPER_H
+
+#include "common.h"
+
+
+typedef struct {
+ bool end_was_reached;
+} lzma_options_subblock_helper;
+
+
+extern lzma_ret lzma_subblock_decoder_helper_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.c
new file mode 100644
index 00000000..da598e28
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.c
@@ -0,0 +1,986 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file subblock_encoder.c
+/// \brief Encoder of the Subblock filter
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "subblock_encoder.h"
+#include "filter_encoder.h"
+
+
+/// Maximum number of repeats that a single Repeating Data can indicate.
+/// This is directly from the file format specification.
+#define REPEAT_COUNT_MAX (1U << 28)
+
+/// Number of bytes the data chunk (not including the header part) must be
+/// before we care about alignment. This is somewhat arbitrary. It just
+/// doesn't make sense to waste bytes for alignment when the data chunk
+/// is very small.
+#define MIN_CHUNK_SIZE_FOR_ALIGN 4
+
+/// Number of bytes of the header part of Subblock Type `Data'. This is
+/// used as the `skew' argument for subblock_align().
+#define ALIGN_SKEW_DATA 4
+
+/// Like above but for Repeating Data.
+#define ALIGN_SKEW_REPEATING_DATA 5
+
+/// Writes one byte to output buffer and updates the alignment counter.
+#define write_byte(b) \
+do { \
+ assert(*out_pos < out_size); \
+ out[*out_pos] = b; \
+ ++*out_pos; \
+ ++coder->alignment.out_pos; \
+} while (0)
+
+
+struct lzma_coder_s {
+ lzma_next_coder next;
+ bool next_finished;
+
+ enum {
+ SEQ_FILL,
+ SEQ_FLUSH,
+ SEQ_RLE_COUNT_0,
+ SEQ_RLE_COUNT_1,
+ SEQ_RLE_COUNT_2,
+ SEQ_RLE_COUNT_3,
+ SEQ_RLE_SIZE,
+ SEQ_RLE_DATA,
+ SEQ_DATA_SIZE_0,
+ SEQ_DATA_SIZE_1,
+ SEQ_DATA_SIZE_2,
+ SEQ_DATA_SIZE_3,
+ SEQ_DATA,
+ SEQ_SUBFILTER_INIT,
+ SEQ_SUBFILTER_FLAGS,
+ } sequence;
+
+ /// Pointer to the options given by the application. This is used
+ /// for two-way communication with the application.
+ lzma_options_subblock *options;
+
+ /// Position in various arrays.
+ size_t pos;
+
+ /// Holds subblock.size - 1 or rle.size - 1 when encoding size
+ /// of Data or Repeat Count.
+ uint32_t tmp;
+
+ struct {
+ /// This is a copy of options->alignment, or
+ /// LZMA_SUBBLOCK_ALIGNMENT_DEFAULT if options is NULL.
+ uint32_t multiple;
+
+ /// Number of input bytes which we have processed and started
+ /// writing out. 32-bit integer is enough since we care only
+ /// about the lowest bits when fixing alignment.
+ uint32_t in_pos;
+
+ /// Number of bytes written out.
+ uint32_t out_pos;
+ } alignment;
+
+ struct {
+ /// Pointer to allocated buffer holding the Data field
+ /// of Subblock Type "Data".
+ uint8_t *data;
+
+ /// Number of bytes in the buffer.
+ size_t size;
+
+ /// Allocated size of the buffer.
+ size_t limit;
+
+ /// Number of input bytes that we have already read but
+ /// not yet started writing out. This can be different
+ /// to `size' when using Subfilter. That's why we track
+ /// in_pending separately for RLE (see below).
+ uint32_t in_pending;
+ } subblock;
+
+ struct {
+ /// Buffer to hold the data that may be coded with
+ /// Subblock Type `Repeating Data'.
+ uint8_t buffer[LZMA_SUBBLOCK_RLE_MAX];
+
+ /// Number of bytes in buffer[].
+ size_t size;
+
+ /// Number of times the first `size' bytes of buffer[]
+ /// will be repeated.
+ uint64_t count;
+
+ /// Like subblock.in_pending above, but for RLE.
+ uint32_t in_pending;
+ } rle;
+
+ struct {
+ enum {
+ SUB_NONE,
+ SUB_SET,
+ SUB_RUN,
+ SUB_FLUSH,
+ SUB_FINISH,
+ SUB_END_MARKER,
+ } mode;
+
+ /// This is a copy of options->allow_subfilters. We use
+ /// this to verify that the application doesn't change
+ /// the value of allow_subfilters.
+ bool allow;
+
+ /// When this is true, application is not allowed to modify
+ /// options->subblock_mode. We may still modify it here.
+ bool mode_locked;
+
+ /// True if we have encoded at least one byte of data with
+ /// the Subfilter.
+ bool got_input;
+
+ /// Track the amount of input available once
+ /// LZMA_SUBFILTER_FINISH has been enabled.
+ /// This is needed for sanity checking (kind
+ /// of duplicating what common/code.c does).
+ size_t in_avail;
+
+ /// Buffer for the Filter Flags field written after
+ /// the `Set Subfilter' indicator.
+ uint8_t *flags;
+
+ /// Size of Filter Flags field.
+ uint32_t flags_size;
+
+ /// Pointers to Subfilter.
+ lzma_next_coder subcoder;
+
+ } subfilter;
+
+ /// Temporary buffer used when we are not the last filter in the chain.
+ struct {
+ size_t pos;
+ size_t size;
+ uint8_t buffer[LZMA_BUFFER_SIZE];
+ } temp;
+};
+
+
+/// \brief Aligns the output buffer
+///
+/// Aligns the output buffer so that after skew bytes the output position is
+/// a multiple of coder->alignment.multiple.
+static bool
+subblock_align(lzma_coder *coder, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size,
+ size_t chunk_size, uint32_t skew)
+{
+ assert(*out_pos < out_size);
+
+ // Fix the alignment only if it makes sense at least a little.
+ if (chunk_size >= MIN_CHUNK_SIZE_FOR_ALIGN) {
+ const uint32_t target = coder->alignment.in_pos
+ % coder->alignment.multiple;
+
+ while ((coder->alignment.out_pos + skew)
+ % coder->alignment.multiple != target) {
+ // Zero indicates padding.
+ write_byte(0x00);
+
+ // Check if output buffer got full and indicate it to
+ // the caller.
+ if (*out_pos == out_size)
+ return true;
+ }
+ }
+
+ // Output buffer is not full.
+ return false;
+}
+
+
+/// \brief Checks if buffer contains repeated data
+///
+/// \param needle Buffer containing a single repeat chunk
+/// \param needle_size Size of needle in bytes
+/// \param buf Buffer to search for repeated needles
+/// \param buf_chunks Buffer size is buf_chunks * needle_size.
+///
+/// \return True if the whole buf is filled with repeated needles.
+///
+static bool
+is_repeating(const uint8_t *restrict needle, size_t needle_size,
+ const uint8_t *restrict buf, size_t buf_chunks)
+{
+ while (buf_chunks-- != 0) {
+ if (memcmp(buf, needle, needle_size) != 0)
+ return false;
+
+ buf += needle_size;
+ }
+
+ return true;
+}
+
+
+/// \brief Optimizes the repeating style and updates coder->sequence
+static void
+subblock_rle_flush(lzma_coder *coder)
+{
+ // The Subblock decoder can use memset() when the size of the data
+ // being repeated is one byte, so we check if the RLE buffer is
+ // filled with a single repeating byte.
+ if (coder->rle.size > 1) {
+ const uint8_t b = coder->rle.buffer[0];
+ size_t i = 0;
+ while (true) {
+ if (coder->rle.buffer[i] != b)
+ break;
+
+ if (++i == coder->rle.size) {
+ // TODO Integer overflow check maybe,
+ // although this needs at least 2**63 bytes
+ // of input until it gets triggered...
+ coder->rle.count *= coder->rle.size;
+ coder->rle.size = 1;
+ break;
+ }
+ }
+ }
+
+ if (coder->rle.count == 1) {
+ // The buffer should be repeated only once. It is
+ // waste of space to use Repeating Data. Instead,
+ // write a regular Data Subblock. See SEQ_RLE_COUNT_0
+ // in subblock_buffer() for more info.
+ coder->tmp = coder->rle.size - 1;
+ } else if (coder->rle.count > REPEAT_COUNT_MAX) {
+ // There's so much to repeat that it doesn't fit into
+ // 28-bit integer. We will write two or more Subblocks
+ // of type Repeating Data.
+ coder->tmp = REPEAT_COUNT_MAX - 1;
+ } else {
+ coder->tmp = coder->rle.count - 1;
+ }
+
+ coder->sequence = SEQ_RLE_COUNT_0;
+
+ return;
+}
+
+
+/// \brief Resizes coder->subblock.data for a new size limit
+static lzma_ret
+subblock_data_size(lzma_coder *coder, lzma_allocator *allocator,
+ size_t new_limit)
+{
+ // Verify that the new limit is valid.
+ if (new_limit < LZMA_SUBBLOCK_DATA_SIZE_MIN
+ || new_limit > LZMA_SUBBLOCK_DATA_SIZE_MAX)
+ return LZMA_OPTIONS_ERROR;
+
+ // Ff the new limit is different than the previous one, we need
+ // to reallocate the data buffer.
+ if (new_limit != coder->subblock.limit) {
+ lzma_free(coder->subblock.data, allocator);
+ coder->subblock.data = lzma_alloc(new_limit, allocator);
+ if (coder->subblock.data == NULL)
+ return LZMA_MEM_ERROR;
+ }
+
+ coder->subblock.limit = new_limit;
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+subblock_buffer(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ // Changing allow_subfilter is not allowed.
+ if (coder->options != NULL && coder->subfilter.allow
+ != coder->options->allow_subfilters)
+ return LZMA_PROG_ERROR;
+
+ // Check if we need to do something special with the Subfilter.
+ if (coder->subfilter.allow) {
+ assert(coder->options != NULL);
+
+ // See if subfilter_mode has been changed.
+ switch (coder->options->subfilter_mode) {
+ case LZMA_SUBFILTER_NONE:
+ if (coder->subfilter.mode != SUB_NONE)
+ return LZMA_PROG_ERROR;
+ break;
+
+ case LZMA_SUBFILTER_SET:
+ if (coder->subfilter.mode_locked
+ || coder->subfilter.mode != SUB_NONE)
+ return LZMA_PROG_ERROR;
+
+ coder->subfilter.mode = SUB_SET;
+ coder->subfilter.got_input = false;
+
+ if (coder->sequence == SEQ_FILL)
+ coder->sequence = SEQ_FLUSH;
+
+ break;
+
+ case LZMA_SUBFILTER_RUN:
+ if (coder->subfilter.mode != SUB_RUN)
+ return LZMA_PROG_ERROR;
+
+ break;
+
+ case LZMA_SUBFILTER_FINISH: {
+ const size_t in_avail = in_size - *in_pos;
+
+ if (coder->subfilter.mode == SUB_RUN) {
+ if (coder->subfilter.mode_locked)
+ return LZMA_PROG_ERROR;
+
+ coder->subfilter.mode = SUB_FINISH;
+ coder->subfilter.in_avail = in_avail;
+
+ } else if (coder->subfilter.mode != SUB_FINISH
+ || coder->subfilter.in_avail
+ != in_avail) {
+ return LZMA_PROG_ERROR;
+ }
+
+ break;
+ }
+
+ default:
+ return LZMA_OPTIONS_ERROR;
+ }
+
+ // If we are sync-flushing or finishing, the application may
+ // no longer change subfilter_mode. Note that this check is
+ // done after checking the new subfilter_mode above; this
+ // way the application may e.g. set LZMA_SUBFILTER_SET and
+ // LZMA_SYNC_FLUSH at the same time, but it cannot modify
+ // subfilter_mode on the later lzma_code() calls before
+ // we have returned LZMA_STREAM_END.
+ if (action != LZMA_RUN)
+ coder->subfilter.mode_locked = true;
+ }
+
+ // Main loop
+ while (*out_pos < out_size)
+ switch (coder->sequence) {
+ case SEQ_FILL:
+ // Grab the new Subblock Data Size and reallocate the buffer.
+ if (coder->subblock.size == 0 && coder->options != NULL
+ && coder->options->subblock_data_size
+ != coder->subblock.limit)
+ return_if_error(subblock_data_size(coder,
+ allocator, coder->options
+ ->subblock_data_size));
+
+ if (coder->subfilter.mode == SUB_NONE) {
+ assert(coder->subfilter.subcoder.code == NULL);
+
+ // No Subfilter is enabled, just copy the data as is.
+ coder->subblock.in_pending += lzma_bufcpy(
+ in, in_pos, in_size,
+ coder->subblock.data,
+ &coder->subblock.size,
+ coder->subblock.limit);
+
+ // If we ran out of input before the whole buffer
+ // was filled, return to application.
+ if (coder->subblock.size < coder->subblock.limit
+ && action == LZMA_RUN)
+ return LZMA_OK;
+
+ } else {
+ assert(coder->options->subfilter_mode
+ != LZMA_SUBFILTER_SET);
+
+ // Using LZMA_FINISH automatically toggles
+ // LZMA_SUBFILTER_FINISH.
+ //
+ // NOTE: It is possible that application had set
+ // LZMA_SUBFILTER_SET and LZMA_FINISH at the same
+ // time. In that case it is possible that we will
+ // cycle to LZMA_SUBFILTER_RUN, LZMA_SUBFILTER_FINISH,
+ // and back to LZMA_SUBFILTER_NONE in a single
+ // Subblock encoder function call.
+ if (action == LZMA_FINISH) {
+ coder->options->subfilter_mode
+ = LZMA_SUBFILTER_FINISH;
+ coder->subfilter.mode = SUB_FINISH;
+ }
+
+ const size_t in_start = *in_pos;
+
+ const lzma_ret ret = coder->subfilter.subcoder.code(
+ coder->subfilter.subcoder.coder,
+ allocator, in, in_pos, in_size,
+ coder->subblock.data,
+ &coder->subblock.size,
+ coder->subblock.limit,
+ coder->subfilter.mode == SUB_FINISH
+ ? LZMA_FINISH : action);
+
+ const size_t in_used = *in_pos - in_start;
+ coder->subblock.in_pending += in_used;
+ if (in_used > 0)
+ coder->subfilter.got_input = true;
+
+ coder->subfilter.in_avail = in_size - *in_pos;
+
+ if (ret == LZMA_STREAM_END) {
+ // All currently available input must have
+ // been processed.
+ assert(*in_pos == in_size);
+
+ // Flush now. Even if coder->subblock.size
+ // happened to be zero, we still need to go
+ // to SEQ_FLUSH to possibly finish RLE or
+ // write the Subfilter Unset indicator.
+ coder->sequence = SEQ_FLUSH;
+
+ if (coder->subfilter.mode == SUB_RUN) {
+ // Flushing with Subfilter enabled.
+ assert(action == LZMA_SYNC_FLUSH);
+ coder->subfilter.mode = SUB_FLUSH;
+ break;
+ }
+
+ // Subfilter finished its job.
+ assert(coder->subfilter.mode == SUB_FINISH
+ || action == LZMA_FINISH);
+
+ // At least one byte of input must have been
+ // encoded with the Subfilter. This is
+ // required by the file format specification.
+ if (!coder->subfilter.got_input)
+ return LZMA_PROG_ERROR;
+
+ // We don't strictly need to do this, but
+ // doing it sounds like a good idea, because
+ // otherwise the Subfilter's memory could be
+ // left allocated for long time, and would
+ // just waste memory.
+ lzma_next_end(&coder->subfilter.subcoder,
+ allocator);
+
+ // We need to flush the currently buffered
+ // data and write Unset Subfilter marker.
+ // Note that we cannot set
+ // coder->options->subfilter_mode to
+ // LZMA_SUBFILTER_NONE yet, because we
+ // haven't written the Unset Subfilter
+ // marker yet.
+ coder->subfilter.mode = SUB_END_MARKER;
+ coder->sequence = SEQ_FLUSH;
+ break;
+ }
+
+ // Return if we couldn't fill the buffer or
+ // if an error occurred.
+ if (coder->subblock.size < coder->subblock.limit
+ || ret != LZMA_OK)
+ return ret;
+ }
+
+ coder->sequence = SEQ_FLUSH;
+
+ // SEQ_FILL doesn't produce any output so falling through
+ // to SEQ_FLUSH is safe.
+ assert(*out_pos < out_size);
+
+ // Fall through
+
+ case SEQ_FLUSH:
+ if (coder->options != NULL) {
+ // Update the alignment variable.
+ coder->alignment.multiple = coder->options->alignment;
+ if (coder->alignment.multiple
+ < LZMA_SUBBLOCK_ALIGNMENT_MIN
+ || coder->alignment.multiple
+ > LZMA_SUBBLOCK_ALIGNMENT_MAX)
+ return LZMA_OPTIONS_ERROR;
+
+ // Run-length encoder
+ //
+ // First check if there is some data pending and we
+ // have an obvious need to flush it immediatelly.
+ if (coder->rle.count > 0
+ && (coder->rle.size
+ != coder->options->rle
+ || coder->subblock.size
+ % coder->rle.size)) {
+ subblock_rle_flush(coder);
+ break;
+ }
+
+ // Grab the (possibly new) RLE chunk size and
+ // validate it.
+ coder->rle.size = coder->options->rle;
+ if (coder->rle.size > LZMA_SUBBLOCK_RLE_MAX)
+ return LZMA_OPTIONS_ERROR;
+
+ if (coder->subblock.size != 0
+ && coder->rle.size
+ != LZMA_SUBBLOCK_RLE_OFF
+ && coder->subblock.size
+ % coder->rle.size == 0) {
+
+ // Initialize coder->rle.buffer if we don't
+ // have RLE already running.
+ if (coder->rle.count == 0)
+ memcpy(coder->rle.buffer,
+ coder->subblock.data,
+ coder->rle.size);
+
+ // Test if coder->subblock.data is repeating.
+ // If coder->rle.count would overflow, we
+ // force flushing. Forced flushing shouldn't
+ // really happen in real-world situations.
+ const size_t count = coder->subblock.size
+ / coder->rle.size;
+ if (UINT64_MAX - count > coder->rle.count
+ && is_repeating(
+ coder->rle.buffer,
+ coder->rle.size,
+ coder->subblock.data,
+ count)) {
+ coder->rle.count += count;
+ coder->rle.in_pending += coder
+ ->subblock.in_pending;
+ coder->subblock.in_pending = 0;
+ coder->subblock.size = 0;
+
+ } else if (coder->rle.count > 0) {
+ // It's not repeating or at least not
+ // with the same byte sequence as the
+ // earlier Subblock Data buffers. We
+ // have some data pending in the RLE
+ // buffer already, so do a flush.
+ // Once flushed, we will check again
+ // if the Subblock Data happens to
+ // contain a different repeating
+ // sequence.
+ subblock_rle_flush(coder);
+ break;
+ }
+ }
+ }
+
+ // If we now have some data left in coder->subblock, the RLE
+ // buffer is empty and we must write a regular Subblock Data.
+ if (coder->subblock.size > 0) {
+ assert(coder->rle.count == 0);
+ coder->tmp = coder->subblock.size - 1;
+ coder->sequence = SEQ_DATA_SIZE_0;
+ break;
+ }
+
+ // Check if we should enable Subfilter.
+ if (coder->subfilter.mode == SUB_SET) {
+ if (coder->rle.count > 0)
+ subblock_rle_flush(coder);
+ else
+ coder->sequence = SEQ_SUBFILTER_INIT;
+ break;
+ }
+
+ // Check if we have just finished Subfiltering.
+ if (coder->subfilter.mode == SUB_END_MARKER) {
+ if (coder->rle.count > 0) {
+ subblock_rle_flush(coder);
+ break;
+ }
+
+ coder->options->subfilter_mode = LZMA_SUBFILTER_NONE;
+ coder->subfilter.mode = SUB_NONE;
+
+ write_byte(0x50);
+ if (*out_pos == out_size)
+ return LZMA_OK;
+ }
+
+ // Check if we have already written everything.
+ if (action != LZMA_RUN && *in_pos == in_size
+ && (coder->subfilter.mode == SUB_NONE
+ || coder->subfilter.mode == SUB_FLUSH)) {
+ if (coder->rle.count > 0) {
+ subblock_rle_flush(coder);
+ break;
+ }
+
+ if (action == LZMA_SYNC_FLUSH) {
+ if (coder->subfilter.mode == SUB_FLUSH)
+ coder->subfilter.mode = SUB_RUN;
+
+ coder->subfilter.mode_locked = false;
+ coder->sequence = SEQ_FILL;
+
+ } else {
+ assert(action == LZMA_FINISH);
+
+ // Write EOPM.
+ // NOTE: No need to use write_byte() here
+ // since we are finishing.
+ out[*out_pos] = 0x10;
+ ++*out_pos;
+ }
+
+ return LZMA_STREAM_END;
+ }
+
+ // Otherwise we have more work to do.
+ coder->sequence = SEQ_FILL;
+ break;
+
+ case SEQ_RLE_COUNT_0:
+ assert(coder->rle.count > 0);
+
+ if (coder->rle.count == 1) {
+ // The buffer should be repeated only once. Fix
+ // the alignment and write the first byte of
+ // Subblock Type `Data'.
+ if (subblock_align(coder, out, out_pos, out_size,
+ coder->rle.size, ALIGN_SKEW_DATA))
+ return LZMA_OK;
+
+ write_byte(0x20 | (coder->tmp & 0x0F));
+
+ } else {
+ // We have something to actually repeat, which should
+ // mean that it takes less space with run-length
+ // encoding.
+ if (subblock_align(coder, out, out_pos, out_size,
+ coder->rle.size,
+ ALIGN_SKEW_REPEATING_DATA))
+ return LZMA_OK;
+
+ write_byte(0x30 | (coder->tmp & 0x0F));
+ }
+
+ // NOTE: If we have to write more than one Repeating Data
+ // due to rle.count > REPEAT_COUNT_MAX, the subsequent
+ // Repeating Data Subblocks may get wrong alignment, because
+ // we add rle.in_pending to alignment.in_pos at once instead
+ // of adding only as much as this particular Repeating Data
+ // consumed input data. Correct alignment is always restored
+ // after all the required Repeating Data Subblocks have been
+ // written. This problem occurs in such a weird cases that
+ // it's not worth fixing.
+ coder->alignment.out_pos += coder->rle.size;
+ coder->alignment.in_pos += coder->rle.in_pending;
+ coder->rle.in_pending = 0;
+
+ coder->sequence = SEQ_RLE_COUNT_1;
+ break;
+
+ case SEQ_RLE_COUNT_1:
+ write_byte(coder->tmp >> 4);
+ coder->sequence = SEQ_RLE_COUNT_2;
+ break;
+
+ case SEQ_RLE_COUNT_2:
+ write_byte(coder->tmp >> 12);
+ coder->sequence = SEQ_RLE_COUNT_3;
+ break;
+
+ case SEQ_RLE_COUNT_3:
+ write_byte(coder->tmp >> 20);
+
+ // Again, see if we are writing regular Data or Repeating Data.
+ // In the former case, we skip SEQ_RLE_SIZE.
+ if (coder->rle.count == 1)
+ coder->sequence = SEQ_RLE_DATA;
+ else
+ coder->sequence = SEQ_RLE_SIZE;
+
+ if (coder->rle.count > REPEAT_COUNT_MAX)
+ coder->rle.count -= REPEAT_COUNT_MAX;
+ else
+ coder->rle.count = 0;
+
+ break;
+
+ case SEQ_RLE_SIZE:
+ assert(coder->rle.size >= LZMA_SUBBLOCK_RLE_MIN);
+ assert(coder->rle.size <= LZMA_SUBBLOCK_RLE_MAX);
+ write_byte(coder->rle.size - 1);
+ coder->sequence = SEQ_RLE_DATA;
+ break;
+
+ case SEQ_RLE_DATA:
+ lzma_bufcpy(coder->rle.buffer, &coder->pos, coder->rle.size,
+ out, out_pos, out_size);
+ if (coder->pos < coder->rle.size)
+ return LZMA_OK;
+
+ coder->pos = 0;
+ coder->sequence = SEQ_FLUSH;
+ break;
+
+ case SEQ_DATA_SIZE_0:
+ // We need four bytes for the Size field.
+ if (subblock_align(coder, out, out_pos, out_size,
+ coder->subblock.size, ALIGN_SKEW_DATA))
+ return LZMA_OK;
+
+ coder->alignment.out_pos += coder->subblock.size;
+ coder->alignment.in_pos += coder->subblock.in_pending;
+ coder->subblock.in_pending = 0;
+
+ write_byte(0x20 | (coder->tmp & 0x0F));
+ coder->sequence = SEQ_DATA_SIZE_1;
+ break;
+
+ case SEQ_DATA_SIZE_1:
+ write_byte(coder->tmp >> 4);
+ coder->sequence = SEQ_DATA_SIZE_2;
+ break;
+
+ case SEQ_DATA_SIZE_2:
+ write_byte(coder->tmp >> 12);
+ coder->sequence = SEQ_DATA_SIZE_3;
+ break;
+
+ case SEQ_DATA_SIZE_3:
+ write_byte(coder->tmp >> 20);
+ coder->sequence = SEQ_DATA;
+ break;
+
+ case SEQ_DATA:
+ lzma_bufcpy(coder->subblock.data, &coder->pos,
+ coder->subblock.size, out, out_pos, out_size);
+ if (coder->pos < coder->subblock.size)
+ return LZMA_OK;
+
+ coder->subblock.size = 0;
+ coder->pos = 0;
+ coder->sequence = SEQ_FLUSH;
+ break;
+
+ case SEQ_SUBFILTER_INIT: {
+ assert(coder->subblock.size == 0);
+ assert(coder->subblock.in_pending == 0);
+ assert(coder->rle.count == 0);
+ assert(coder->rle.in_pending == 0);
+ assert(coder->subfilter.mode == SUB_SET);
+ assert(coder->options != NULL);
+
+ // There must be a filter specified.
+ if (coder->options->subfilter_options.id == LZMA_VLI_UNKNOWN)
+ return LZMA_OPTIONS_ERROR;
+
+ // Initialize a raw encoder to work as a Subfilter.
+ lzma_filter options[2];
+ options[0] = coder->options->subfilter_options;
+ options[1].id = LZMA_VLI_UNKNOWN;
+
+ return_if_error(lzma_raw_encoder_init(
+ &coder->subfilter.subcoder, allocator,
+ options));
+
+ // Encode the Filter Flags field into a buffer. This should
+ // never fail since we have already successfully initialized
+ // the Subfilter itself. Check it still, and return
+ // LZMA_PROG_ERROR instead of whatever the ret would say.
+ lzma_ret ret = lzma_filter_flags_size(
+ &coder->subfilter.flags_size, options);
+ assert(ret == LZMA_OK);
+ if (ret != LZMA_OK)
+ return LZMA_PROG_ERROR;
+
+ coder->subfilter.flags = lzma_alloc(
+ coder->subfilter.flags_size, allocator);
+ if (coder->subfilter.flags == NULL)
+ return LZMA_MEM_ERROR;
+
+ // Now we have a big-enough buffer. Encode the Filter Flags.
+ // Like above, this should never fail.
+ size_t dummy = 0;
+ ret = lzma_filter_flags_encode(options, coder->subfilter.flags,
+ &dummy, coder->subfilter.flags_size);
+ assert(ret == LZMA_OK);
+ assert(dummy == coder->subfilter.flags_size);
+ if (ret != LZMA_OK || dummy != coder->subfilter.flags_size)
+ return LZMA_PROG_ERROR;
+
+ // Write a Subblock indicating a new Subfilter.
+ write_byte(0x40);
+
+ coder->options->subfilter_mode = LZMA_SUBFILTER_RUN;
+ coder->subfilter.mode = SUB_RUN;
+ coder->alignment.out_pos += coder->subfilter.flags_size;
+ coder->sequence = SEQ_SUBFILTER_FLAGS;
+
+ // It is safe to fall through because SEQ_SUBFILTER_FLAGS
+ // uses lzma_bufcpy() which doesn't write unless there is
+ // output space.
+ }
+
+ // Fall through
+
+ case SEQ_SUBFILTER_FLAGS:
+ // Copy the Filter Flags to the output stream.
+ lzma_bufcpy(coder->subfilter.flags, &coder->pos,
+ coder->subfilter.flags_size,
+ out, out_pos, out_size);
+ if (coder->pos < coder->subfilter.flags_size)
+ return LZMA_OK;
+
+ lzma_free(coder->subfilter.flags, allocator);
+ coder->subfilter.flags = NULL;
+
+ coder->pos = 0;
+ coder->sequence = SEQ_FILL;
+ break;
+
+ default:
+ return LZMA_PROG_ERROR;
+ }
+
+ return LZMA_OK;
+}
+
+
+static lzma_ret
+subblock_encode(lzma_coder *coder, lzma_allocator *allocator,
+ const uint8_t *restrict in, size_t *restrict in_pos,
+ size_t in_size, uint8_t *restrict out,
+ size_t *restrict out_pos, size_t out_size, lzma_action action)
+{
+ if (coder->next.code == NULL)
+ return subblock_buffer(coder, allocator, in, in_pos, in_size,
+ out, out_pos, out_size, action);
+
+ while (*out_pos < out_size
+ && (*in_pos < in_size || action != LZMA_RUN)) {
+ if (!coder->next_finished
+ && coder->temp.pos == coder->temp.size) {
+ coder->temp.pos = 0;
+ coder->temp.size = 0;
+
+ const lzma_ret ret = coder->next.code(coder->next.coder,
+ allocator, in, in_pos, in_size,
+ coder->temp.buffer, &coder->temp.size,
+ LZMA_BUFFER_SIZE, action);
+ if (ret == LZMA_STREAM_END) {
+ assert(action != LZMA_RUN);
+ coder->next_finished = true;
+ } else if (coder->temp.size == 0 || ret != LZMA_OK) {
+ return ret;
+ }
+ }
+
+ const lzma_ret ret = subblock_buffer(coder, allocator,
+ coder->temp.buffer, &coder->temp.pos,
+ coder->temp.size, out, out_pos, out_size,
+ coder->next_finished ? LZMA_FINISH : LZMA_RUN);
+ if (ret == LZMA_STREAM_END) {
+ assert(action != LZMA_RUN);
+ assert(coder->next_finished);
+ return LZMA_STREAM_END;
+ }
+
+ if (ret != LZMA_OK)
+ return ret;
+ }
+
+ return LZMA_OK;
+}
+
+
+static void
+subblock_encoder_end(lzma_coder *coder, lzma_allocator *allocator)
+{
+ lzma_next_end(&coder->next, allocator);
+ lzma_next_end(&coder->subfilter.subcoder, allocator);
+ lzma_free(coder->subblock.data, allocator);
+ lzma_free(coder->subfilter.flags, allocator);
+ lzma_free(coder, allocator);
+ return;
+}
+
+
+extern lzma_ret
+lzma_subblock_encoder_init(lzma_next_coder *next, lzma_allocator *allocator,
+ const lzma_filter_info *filters)
+{
+ if (next->coder == NULL) {
+ next->coder = lzma_alloc(sizeof(lzma_coder), allocator);
+ if (next->coder == NULL)
+ return LZMA_MEM_ERROR;
+
+ next->code = &subblock_encode;
+ next->end = &subblock_encoder_end;
+
+ next->coder->next = LZMA_NEXT_CODER_INIT;
+ next->coder->subblock.data = NULL;
+ next->coder->subblock.limit = 0;
+ next->coder->subfilter.subcoder = LZMA_NEXT_CODER_INIT;
+ } else {
+ lzma_next_end(&next->coder->subfilter.subcoder,
+ allocator);
+ lzma_free(next->coder->subfilter.flags, allocator);
+ }
+
+ next->coder->subfilter.flags = NULL;
+
+ next->coder->next_finished = false;
+ next->coder->sequence = SEQ_FILL;
+ next->coder->options = filters[0].options;
+ next->coder->pos = 0;
+
+ next->coder->alignment.in_pos = 0;
+ next->coder->alignment.out_pos = 0;
+ next->coder->subblock.size = 0;
+ next->coder->subblock.in_pending = 0;
+ next->coder->rle.count = 0;
+ next->coder->rle.in_pending = 0;
+ next->coder->subfilter.mode = SUB_NONE;
+ next->coder->subfilter.mode_locked = false;
+
+ next->coder->temp.pos = 0;
+ next->coder->temp.size = 0;
+
+ // Grab some values from the options structure if it is available.
+ size_t subblock_size_limit;
+ if (next->coder->options != NULL) {
+ if (next->coder->options->alignment
+ < LZMA_SUBBLOCK_ALIGNMENT_MIN
+ || next->coder->options->alignment
+ > LZMA_SUBBLOCK_ALIGNMENT_MAX) {
+ subblock_encoder_end(next->coder, allocator);
+ return LZMA_OPTIONS_ERROR;
+ }
+ next->coder->alignment.multiple
+ = next->coder->options->alignment;
+ next->coder->subfilter.allow
+ = next->coder->options->allow_subfilters;
+ subblock_size_limit = next->coder->options->subblock_data_size;
+ } else {
+ next->coder->alignment.multiple
+ = LZMA_SUBBLOCK_ALIGNMENT_DEFAULT;
+ next->coder->subfilter.allow = false;
+ subblock_size_limit = LZMA_SUBBLOCK_DATA_SIZE_DEFAULT;
+ }
+
+ return_if_error(subblock_data_size(next->coder, allocator,
+ subblock_size_limit));
+
+ return lzma_next_filter_init(
+ &next->coder->next, allocator, filters + 1);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.h
new file mode 100644
index 00000000..23eea87a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/liblzma/subblock/subblock_encoder.h
@@ -0,0 +1,23 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file subblock_encoder.h
+/// \brief Encoder of the Subblock filter
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_SUBBLOCK_ENCODER_H
+#define LZMA_SUBBLOCK_ENCODER_H
+
+#include "common.h"
+
+extern lzma_ret lzma_subblock_encoder_init(lzma_next_coder *next,
+ lzma_allocator *allocator, const lzma_filter_info *filters);
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.am
new file mode 100644
index 00000000..22295eea
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.am
@@ -0,0 +1,29 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+bin_PROGRAMS = lzmainfo
+
+lzmainfo_SOURCES = lzmainfo.c
+
+lzmainfo_CPPFLAGS = \
+ -DLOCALEDIR=\"$(localedir)\" \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(DYNAMIC_CPPFLAGS)
+
+lzmainfo_LDFLAGS = $(DYNAMIC_LDFLAGS)
+lzmainfo_LDADD = $(top_builddir)/src/liblzma/liblzma.la
+
+if COND_GNULIB
+lzmainfo_LDADD += $(top_builddir)/lib/libgnu.a
+endif
+
+lzmainfo_LDADD += $(LTLIBINTL)
+
+
+dist_man_MANS = lzmainfo.1
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.in
new file mode 100644
index 00000000..81c698fd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/Makefile.in
@@ -0,0 +1,659 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+bin_PROGRAMS = lzmainfo$(EXEEXT)
+@COND_GNULIB_TRUE@am__append_1 = $(top_builddir)/lib/libgnu.a
+subdir = src/lzmainfo
+DIST_COMMON = $(dist_man_MANS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"
+PROGRAMS = $(bin_PROGRAMS)
+am_lzmainfo_OBJECTS = lzmainfo-lzmainfo.$(OBJEXT)
+lzmainfo_OBJECTS = $(am_lzmainfo_OBJECTS)
+am__DEPENDENCIES_1 =
+lzmainfo_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+lzmainfo_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(lzmainfo_LDFLAGS) \
+ $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(lzmainfo_SOURCES)
+DIST_SOURCES = $(lzmainfo_SOURCES)
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+man1dir = $(mandir)/man1
+NROFF = nroff
+MANS = $(dist_man_MANS)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+lzmainfo_SOURCES = lzmainfo.c
+lzmainfo_CPPFLAGS = \
+ -DLOCALEDIR=\"$(localedir)\" \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(DYNAMIC_CPPFLAGS)
+
+lzmainfo_LDFLAGS = $(DYNAMIC_LDFLAGS)
+lzmainfo_LDADD = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(LTLIBINTL)
+dist_man_MANS = lzmainfo.1
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/lzmainfo/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/lzmainfo/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p || test -f $$p1; \
+ then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+ @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+lzmainfo$(EXEEXT): $(lzmainfo_OBJECTS) $(lzmainfo_DEPENDENCIES)
+ @rm -f lzmainfo$(EXEEXT)
+ $(lzmainfo_LINK) $(lzmainfo_OBJECTS) $(lzmainfo_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lzmainfo-lzmainfo.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+lzmainfo-lzmainfo.o: lzmainfo.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmainfo_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lzmainfo-lzmainfo.o -MD -MP -MF $(DEPDIR)/lzmainfo-lzmainfo.Tpo -c -o lzmainfo-lzmainfo.o `test -f 'lzmainfo.c' || echo '$(srcdir)/'`lzmainfo.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/lzmainfo-lzmainfo.Tpo $(DEPDIR)/lzmainfo-lzmainfo.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzmainfo.c' object='lzmainfo-lzmainfo.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmainfo_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lzmainfo-lzmainfo.o `test -f 'lzmainfo.c' || echo '$(srcdir)/'`lzmainfo.c
+
+lzmainfo-lzmainfo.obj: lzmainfo.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmainfo_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lzmainfo-lzmainfo.obj -MD -MP -MF $(DEPDIR)/lzmainfo-lzmainfo.Tpo -c -o lzmainfo-lzmainfo.obj `if test -f 'lzmainfo.c'; then $(CYGPATH_W) 'lzmainfo.c'; else $(CYGPATH_W) '$(srcdir)/lzmainfo.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/lzmainfo-lzmainfo.Tpo $(DEPDIR)/lzmainfo-lzmainfo.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='lzmainfo.c' object='lzmainfo-lzmainfo.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmainfo_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lzmainfo-lzmainfo.obj `if test -f 'lzmainfo.c'; then $(CYGPATH_W) 'lzmainfo.c'; else $(CYGPATH_W) '$(srcdir)/lzmainfo.c'; fi`
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-man1: $(dist_man_MANS)
+ @$(NORMAL_INSTALL)
+ test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)"
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ { for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | while read p; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; echo "$$p"; \
+ done | \
+ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+ sed 'N;N;s,\n, ,g' | { \
+ list=; while read file base inst; do \
+ if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
+ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \
+ fi; \
+ done; \
+ for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+ while read files; do \
+ test -z "$$files" || { \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \
+ done; }
+
+uninstall-man1:
+ @$(NORMAL_UNINSTALL)
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ files=`{ for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+ test -z "$$files" || { \
+ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; }
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @list='$(MANS)'; if test -n "$$list"; then \
+ list=`for p in $$list; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
+ if test -n "$$list" && \
+ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
+ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
+ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \
+ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \
+ echo " typically \`make maintainer-clean' will remove them" >&2; \
+ exit 1; \
+ else :; fi; \
+ else :; fi
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS) $(MANS)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-man
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man: install-man1
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS uninstall-man
+
+uninstall-man: uninstall-man1
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \
+ clean-generic clean-libtool ctags distclean distclean-compile \
+ distclean-generic distclean-libtool distclean-tags distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-binPROGRAMS install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-man1 install-pdf install-pdf-am install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am uninstall-binPROGRAMS uninstall-man \
+ uninstall-man1
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.1
new file mode 100644
index 00000000..ef736a6c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.1
@@ -0,0 +1,55 @@
+.\"
+.\" Author: Lasse Collin
+.\"
+.\" This file has been put into the public domain.
+.\" You can do whatever you want with this file.
+.\"
+.TH LZMAINFO 1 "2009-08-13" "Tukaani" "XZ Utils"
+.SH NAME
+lzmainfo \- show infomation stored in the .lzma file header
+.SH SYNOPSIS
+.B lzmainfo
+.RB [ \-\-help ]
+.RB [ \-\-version ]
+.RI [ file ]...
+.SH DESCRIPTION
+.B lzmainfo
+shows information stored in the
+.B .lzma
+file header. It reads the first 13 bytes from the specified
+.IR file ,
+decodes the header, and prints it to standard output in human
+readable format. If no
+.I files
+are given or
+.I file
+is
+.BR \- ,
+standard input is read.
+.PP
+Usually the most interesting information is the uncompressed size and
+the dictionary size. Uncompressed size can be shown only if the file is
+in the non-streamed
+.B .lzma
+format variant. The amount of memory required to decompress the file is
+a few dozen kilobytes plus the dictionary size.
+.PP
+.B lzmainfo
+is included in XZ Utils primarily for backward compatibility with LZMA Utils.
+.SH EXIT STATUS
+.TP
+.B 0
+All is good.
+.TP
+.B 1
+An error occurred.
+.SH BUGS
+.B lzmainfo
+uses
+.B MB
+while the correct suffix would be
+.B MiB
+(2^20 bytes).
+This is to keep the output compatible with LZMA Utils.
+.SH SEE ALSO
+.BR xz (1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.c
new file mode 100644
index 00000000..f1e607ae
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/lzmainfo/lzmainfo.c
@@ -0,0 +1,244 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file lzmainfo.c
+/// \brief lzmainfo tool for compatibility with LZMA Utils
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include <stdio.h>
+#include <errno.h>
+
+#ifdef ENABLE_NLS
+# include <libintl.h>
+# define _(msgid) gettext(msgid)
+#else
+# define _(msgid) msgid
+#endif
+
+#include "lzma.h"
+#include "getopt.h"
+
+
+/// Name of the program from argv[0]
+static const char *argv0;
+
+
+/// Close stdout unless we are already going to exit with EXIT_FAILURE.
+/// If closing stdout fails, set exit status to EXIT_FAILURE and print
+/// an error message to stderr. We don't care about closing stderr,
+/// because we don't print anything to stderr unless we are going to
+/// use EXIT_FAILURE anyway.
+static void lzma_attribute((noreturn))
+my_exit(int status)
+{
+ if (status != EXIT_FAILURE) {
+ const int ferror_err = ferror(stdout);
+ const int fclose_err = fclose(stdout);
+
+ if (ferror_err || fclose_err) {
+ // If it was fclose() that failed, we have the reason
+ // in errno. If only ferror() indicated an error,
+ // we have no idea what the reason was.
+ fprintf(stderr, "%s: %s: %s\n", argv0,
+ _("Writing to standard output "
+ "failed"),
+ fclose_err ? strerror(errno)
+ : _("Unknown error"));
+ status = EXIT_FAILURE;
+ }
+ }
+
+ exit(status);
+}
+
+
+static void lzma_attribute((noreturn))
+help(void)
+{
+ printf(
+_("Usage: %s [--help] [--version] [FILE]...\n"
+"Show information stored in the .lzma file header"), argv0);
+
+ printf(_(
+"\nWith no FILE, or when FILE is -, read standard input.\n"));
+ printf("\n");
+
+ printf(_("Report bugs to <%s> (in English or Finnish).\n"),
+ PACKAGE_BUGREPORT);
+ printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_HOMEPAGE);
+
+ my_exit(EXIT_SUCCESS);
+}
+
+
+static void lzma_attribute((noreturn))
+version(void)
+{
+ puts("lzmainfo (" PACKAGE_NAME ") " PACKAGE_VERSION);
+ my_exit(EXIT_SUCCESS);
+}
+
+
+/// Parse command line options.
+static void
+parse_args(int argc, char **argv)
+{
+ enum {
+ OPT_HELP,
+ OPT_VERSION,
+ };
+
+ static const struct option long_opts[] = {
+ { "help", no_argument, NULL, OPT_HELP },
+ { "version", no_argument, NULL, OPT_VERSION },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int c;
+ while ((c = getopt_long(argc, argv, "", long_opts, NULL)) != -1) {
+ switch (c) {
+ case OPT_HELP:
+ help();
+
+ case OPT_VERSION:
+ version();
+
+ default:
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return;
+}
+
+
+/// Primitive base-2 logarithm for integers
+static uint32_t
+my_log2(uint32_t n)
+{
+ uint32_t e;
+ for (e = 0; n > 1; ++e, n /= 2) ;
+ return e;
+}
+
+
+/// Parse the .lzma header and display information about it.
+static bool
+lzmainfo(const char *name, FILE *f)
+{
+ uint8_t buf[13];
+ const size_t size = fread(buf, 1, sizeof(buf), f);
+ if (size != 13) {
+ fprintf(stderr, "%s: %s: %s\n", argv0, name,
+ ferror(f) ? strerror(errno)
+ : _("File is too small to be a .lzma file"));
+ return true;
+ }
+
+ lzma_filter filter = { .id = LZMA_FILTER_LZMA1 };
+
+ // Parse the first five bytes.
+ switch (lzma_properties_decode(&filter, NULL, buf, 5)) {
+ case LZMA_OK:
+ break;
+
+ case LZMA_OPTIONS_ERROR:
+ fprintf(stderr, "%s: %s: %s\n", argv0, name,
+ _("Not a .lzma file"));
+ return true;
+
+ case LZMA_MEM_ERROR:
+ fprintf(stderr, "%s: %s\n", argv0, strerror(ENOMEM));
+ exit(EXIT_FAILURE);
+
+ default:
+ fprintf(stderr, "%s: %s\n", argv0, _("Internal error (bug)"));
+ exit(EXIT_FAILURE);
+ }
+
+ // Uncompressed size
+ uint64_t uncompressed_size = 0;
+ for (size_t i = 0; i < 8; ++i)
+ uncompressed_size |= (uint64_t)(buf[5 + i]) << (i * 8);
+
+ // Display the results. We don't want to translate these and also
+ // will use MB instead of MiB, because someone could be parsing
+ // this output and we don't want to break that when people move
+ // from LZMA Utils to XZ Utils.
+ if (f != stdin)
+ printf("%s\n", name);
+
+ printf("Uncompressed size: ");
+ if (uncompressed_size == UINT64_MAX)
+ printf("Unknown");
+ else
+ printf("%" PRIu64 " MB (%" PRIu64 " bytes)",
+ (uncompressed_size + 512 * 1024)
+ / (1024 * 1024),
+ uncompressed_size);
+
+ lzma_options_lzma *opt = filter.options;
+
+ printf("\nDictionary size: "
+ "%u MB (2^%u bytes)\n"
+ "Literal context bits (lc): %" PRIu32 "\n"
+ "Literal pos bits (lp): %" PRIu32 "\n"
+ "Number of pos bits (pb): %" PRIu32 "\n",
+ (opt->dict_size + 512 * 1024) / (1024 * 1024),
+ my_log2(opt->dict_size), opt->lc, opt->lp, opt->pb);
+
+ free(opt);
+
+ return false;
+}
+
+
+extern int
+main(int argc, char **argv)
+{
+ int ret = EXIT_SUCCESS;
+ argv0 = argv[0];
+
+ parse_args(argc, argv);
+
+ // We print empty lines around the output only when reading from
+ // files specified on the command line. This is due to how
+ // LZMA Utils did it.
+ if (optind == argc) {
+ lzmainfo("(stdin)", stdin);
+ } else {
+ printf("\n");
+
+ do {
+ if (strcmp(argv[optind], "-") == 0) {
+ if (lzmainfo("(stdin)", stdin))
+ ret = EXIT_FAILURE;
+ } else {
+ FILE *f = fopen(argv[optind], "r");
+ if (f == NULL) {
+ ret = EXIT_FAILURE;
+ fprintf(stderr, "%s: %s: %s\n",
+ argv0, argv[optind],
+ strerror(errno));
+ continue;
+ }
+
+ if (lzmainfo(argv[optind], f))
+ ret = EXIT_FAILURE;
+
+ printf("\n");
+ fclose(f);
+ }
+ } while (++optind < argc);
+ }
+
+ my_exit(ret);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.am
new file mode 100644
index 00000000..86f27388
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.am
@@ -0,0 +1,65 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+nodist_bin_SCRIPTS = xzdiff xzgrep xzmore xzless
+dist_man_MANS = xzdiff.1 xzgrep.1 xzmore.1 xzless.1
+
+install-exec-hook:
+ cd $(DESTDIR)$(bindir) && \
+ target=`echo xzdiff | sed '$(transform)'` && \
+ for name in xzcmp lzdiff lzcmp; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done && \
+ target=`echo xzgrep | sed '$(transform)'` && \
+ for name in xzegrep xzfgrep lzgrep lzegrep lzfgrep; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done && \
+ for name in xzmore xzless; do \
+ target=`echo $$name | sed '$(transform)'` && \
+ link=`echo $$name | sed 's/xz/lz/;$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done
+
+install-data-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ target=`echo xzdiff | sed '$(transform)'` && \
+ for name in xzcmp lzdiff lzcmp; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done && \
+ target=`echo xzgrep | sed '$(transform)'` && \
+ for name in xzegrep xzfgrep lzgrep lzegrep lzfgrep; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done && \
+ for name in xzmore xzless; do \
+ target=`echo $$name | sed '$(transform)'` && \
+ link=`echo $$name | sed 's/xz/lz/;$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done
+
+uninstall-hook:
+ cd $(DESTDIR)$(bindir) && \
+ for name in xzcmp lzdiff lzcmp xzegrep xzfgrep \
+ lzgrep lzegrep lzfgrep lzmore lzless; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link; \
+ done
+ cd $(DESTDIR)$(mandir)/man1 && \
+ for name in xzcmp lzdiff lzcmp xzegrep xzfgrep \
+ lzgrep lzegrep lzfgrep lzmore lzless; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1; \
+ done
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.in
new file mode 100644
index 00000000..66e23b38
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/Makefile.in
@@ -0,0 +1,586 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = src/scripts
+DIST_COMMON = $(dist_man_MANS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in $(srcdir)/xzdiff.in $(srcdir)/xzgrep.in \
+ $(srcdir)/xzless.in $(srcdir)/xzmore.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES = xzdiff xzgrep xzmore xzless
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"
+SCRIPTS = $(nodist_bin_SCRIPTS)
+SOURCES =
+DIST_SOURCES =
+man1dir = $(mandir)/man1
+NROFF = nroff
+MANS = $(dist_man_MANS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+nodist_bin_SCRIPTS = xzdiff xzgrep xzmore xzless
+dist_man_MANS = xzdiff.1 xzgrep.1 xzmore.1 xzless.1
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/scripts/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/scripts/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+xzdiff: $(top_builddir)/config.status $(srcdir)/xzdiff.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+xzgrep: $(top_builddir)/config.status $(srcdir)/xzgrep.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+xzmore: $(top_builddir)/config.status $(srcdir)/xzmore.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+xzless: $(top_builddir)/config.status $(srcdir)/xzless.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+install-nodist_binSCRIPTS: $(nodist_bin_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+ @list='$(nodist_bin_SCRIPTS)'; test -n "$(bindir)" || list=; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n' \
+ -e 'h;s|.*|.|' \
+ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+ if (++n[d] == $(am__install_max)) { \
+ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+ else { print "f", d "/" $$4, $$1 } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-nodist_binSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(nodist_bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 's,.*/,,;$(transform)'`; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-man1: $(dist_man_MANS)
+ @$(NORMAL_INSTALL)
+ test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)"
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ { for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | while read p; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; echo "$$p"; \
+ done | \
+ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+ sed 'N;N;s,\n, ,g' | { \
+ list=; while read file base inst; do \
+ if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
+ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \
+ fi; \
+ done; \
+ for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+ while read files; do \
+ test -z "$$files" || { \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \
+ done; }
+
+uninstall-man1:
+ @$(NORMAL_UNINSTALL)
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ files=`{ for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+ test -z "$$files" || { \
+ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; }
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+ @list='$(MANS)'; if test -n "$$list"; then \
+ list=`for p in $$list; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
+ if test -n "$$list" && \
+ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
+ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
+ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \
+ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \
+ echo " typically \`make maintainer-clean' will remove them" >&2; \
+ exit 1; \
+ else :; fi; \
+ else :; fi
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS) $(MANS)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-man
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-data-hook
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-nodist_binSCRIPTS
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-exec-hook
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man: install-man1
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-man uninstall-nodist_binSCRIPTS
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) uninstall-hook
+uninstall-man: uninstall-man1
+
+.MAKE: install-am install-data-am install-exec-am install-strip \
+ uninstall-am
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+ distclean distclean-generic distclean-libtool distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-data-hook install-dvi \
+ install-dvi-am install-exec install-exec-am install-exec-hook \
+ install-html install-html-am install-info install-info-am \
+ install-man install-man1 install-nodist_binSCRIPTS install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \
+ uninstall-hook uninstall-man uninstall-man1 \
+ uninstall-nodist_binSCRIPTS
+
+
+install-exec-hook:
+ cd $(DESTDIR)$(bindir) && \
+ target=`echo xzdiff | sed '$(transform)'` && \
+ for name in xzcmp lzdiff lzcmp; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done && \
+ target=`echo xzgrep | sed '$(transform)'` && \
+ for name in xzegrep xzfgrep lzgrep lzegrep lzfgrep; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done && \
+ for name in xzmore xzless; do \
+ target=`echo $$name | sed '$(transform)'` && \
+ link=`echo $$name | sed 's/xz/lz/;$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done
+
+install-data-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ target=`echo xzdiff | sed '$(transform)'` && \
+ for name in xzcmp lzdiff lzcmp; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done && \
+ target=`echo xzgrep | sed '$(transform)'` && \
+ for name in xzegrep xzfgrep lzgrep lzegrep lzfgrep; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done && \
+ for name in xzmore xzless; do \
+ target=`echo $$name | sed '$(transform)'` && \
+ link=`echo $$name | sed 's/xz/lz/;$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done
+
+uninstall-hook:
+ cd $(DESTDIR)$(bindir) && \
+ for name in xzcmp lzdiff lzcmp xzegrep xzfgrep \
+ lzgrep lzegrep lzfgrep lzmore lzless; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link; \
+ done
+ cd $(DESTDIR)$(mandir)/man1 && \
+ for name in xzcmp lzdiff lzcmp xzegrep xzfgrep \
+ lzgrep lzegrep lzfgrep lzmore lzless; do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1; \
+ done
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.1
new file mode 100644
index 00000000..252d1cd3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.1
@@ -0,0 +1,74 @@
+.\"
+.\" Original zdiff.1 for gzip: Jean-loup Gailly
+.\"
+.\" Modifications for XZ Utils: Lasse Collin
+.\" Andrew Dudman
+.\"
+.\" License: GNU GPLv2+
+.\"
+.TH XZDIFF 1 "2009-07-05" "Tukaani" "XZ Utils"
+.SH NAME
+xzcmp, xzdiff, lzcmp, lzdiff \- compare compressed files
+.SH SYNOPSIS
+.B xzcmp
+.RI [ cmp_options "] " file1 " [" file2 ]
+.br
+.B xzdiff
+.RI [ diff_options "] " file1 " [" file2 ]
+.br
+.B lzcmp
+.RI [ cmp_options "] " file1 " [" file2 ]
+.br
+.B lzdiff
+.RI [ diff_options "] " file1 " [" file2 ]
+.SH DESCRIPTION
+.B xzcmp
+and
+.B xdiff
+invoke
+.BR cmp (1)
+or
+.BR diff (1)
+on files compressed with
+.BR xz (1),
+.BR lzma (1),
+.BR gzip (1),
+or
+.BR bzip2 (1).
+All options specified are passed directly to
+.B cmp
+or
+.BR diff .
+If only one file is specified, then the files compared are
+.I file1
+(which must have a suffix of a supported compression format) and
+.I file1
+from which the compression format suffix has been stripped.
+If two files are specified, then they are uncompressed if necessary and fed to
+.BR cmp (1)
+or
+.BR diff (1).
+The exit status from
+.B cmp
+or
+.B diff
+is preserved.
+.PP
+The names
+.B lzcmp
+and
+.B lzdiff
+are provided for backward compatibility with LZMA Utils.
+.SH "SEE ALSO"
+.BR cmp (1),
+.BR diff (1),
+.BR xz (1),
+.BR gzip (1),
+.BR bzip2 (1),
+.BR zdiff (1)
+.SH BUGS
+Messages from the
+.BR cmp (1)
+or
+.BR diff (1)
+programs refer to temporary filenames instead of those specified.
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.in
new file mode 100644
index 00000000..2d6e5da4
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzdiff.in
@@ -0,0 +1,172 @@
+#!@POSIX_SHELL@
+
+# Copyright (C) 1998, 2002, 2006, 2007 Free Software Foundation
+# Copyright (C) 1993 Jean-loup Gailly
+
+# Modified for XZ Utils by Andrew Dudman and Lasse Collin.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#SET_PATH - This line is a placeholder to ease patching this script.
+
+# Instead of unsetting XZ_OPT, just make sure that xz will use file format
+# autodetection. This way memory usage limit and thread limit can be
+# specified via XZ_OPT. With gzip and bzip2 it's OK to just unset the
+# environment variables.
+xz='@xz@ --format=auto'
+unset GZIP BZIP BZIP2
+
+case ${0##*/} in
+ *cmp*) prog=xzcmp; cmp=${CMP:-cmp};;
+ *) prog=xzdiff; cmp=${DIFF:-diff};;
+esac
+
+version="$prog (@PACKAGE_NAME@) @VERSION@"
+
+usage="Usage: ${0##*/} [OPTION]... FILE1 [FILE2]
+Compare FILE1 to FILE2, using their uncompressed contents if they are
+compressed. If FILE2 is omitted, then the files compared are FILE1 and
+FILE1 from which the compression format suffix has been stripped.
+
+Do comparisons like '$cmp' does. OPTIONs are the same as for '$cmp'.
+
+Report bugs to <@PACKAGE_BUGREPORT@>."
+
+# sed script to escape all ' for the shell, and then (to handle trailing
+# newlines correctly) turn trailing X on last line into '.
+escape='
+ s/'\''/'\''\\'\'''\''/g
+ $s/X$/'\''/
+'
+
+while :; do
+ case $1 in
+ --h*) printf '%s\n' "$usage" || exit 2; exit;;
+ --v*) echo "$version" || exit 2; exit;;
+ --) shift; break;;
+ -*\'*) cmp="$cmp '"`printf '%sX\n' "$1" | sed "$escape"`;;
+ -?*) cmp="$cmp '$1'";;
+ *) break;;
+ esac
+ shift
+done
+cmp="$cmp --"
+
+for file; do
+ test "X$file" = X- || <"$file" || exit 2
+done
+
+xz1=$xz
+xz2=$xz
+xz_status=0
+exec 3>&1
+
+if test $# -eq 1; then
+ case $1 in
+ *[-.]xz | *[-.]lzma | *.t[lx]z)
+ ;;
+ *[-.]bz2 | *.tbz | *.tbz2)
+ xz1=$bzip2;;
+ *[-.][zZ] | *_z | *[-.]gz | *.t[ag]z)
+ xz1=$gzip;;
+ *)
+ echo >&2 "$0: $1: Unknown compressed file name suffix"
+ exit 2;;
+ esac
+ case $1 in
+ *[-.][zZ] | *_z | *[-.][gx]z | *[-.]bz2 | *[-.]lzma)
+ FILE=`expr "X$1" : 'X\(.*\)[-.][abglmxzZ2]*$'`;;
+ *.t[abglx]z)
+ FILE=`expr "X$1" : 'X\(.*[-.]t\)[abglx]z$'`ar;;
+ *.tbz2)
+ FILE=`expr "X$1" : 'X\(.*[-.]t\)bz2$'`ar;;
+ esac
+ xz_status=$(
+ exec 4>&1
+ ($xz1 -cd -- "$1" 4>&-; echo $? >&4) 3>&- | eval "$cmp" - '"$FILE"' >&3
+ )
+elif test $# -eq 2; then
+ case $1 in
+ *[-.]bz2 | *.tbz | *.tbz2) xz1=$bzip2;;
+ *[-.][zZ] | *_z | *[-.]gz | *.t[ag]z) xz1=$gzip;;
+ esac
+ case $2 in
+ *[-.]bz2 | *.tbz | *.tbz2) xz2=$bzip2;;
+ *[-.][zZ] | *_z | *[-.]gz | *.t[ag]z) xz2=$gzip;;
+ esac
+ case $1 in
+ *[-.][zZ] | *_z | *[-.][gx]z | *[-.]bz2 | *[-.]lzma | *.t[abglx]z | *.tbz2 | -)
+ case "$2" in
+ *[-.][zZ] | *_z | *[-.][gx]z | *[-.]bz2 | *[-.]lzma | *.t[abglx]z | *.tbz2 | -)
+ if test "$1$2" = --; then
+ xz_status=$(
+ exec 4>&1
+ ($xz1 -cdfq - 4>&-; echo $? >&4) 3>&- |
+ eval "$cmp" - - >&3
+ )
+ elif # Reject Solaris 8's buggy /bin/bash 2.03.
+ echo X | (echo X | eval "$cmp" /dev/fd/5 - >/dev/null 2>&1) 5<&0; then
+ xz_status=$(
+ exec 4>&1
+ ($xz1 -cdfq -- "$1" 4>&-; echo $? >&4) 3>&- |
+ ( ($xz2 -cdfq -- "$2" 4>&-; echo $? >&4) 3>&- 5<&- </dev/null |
+ eval "$cmp" /dev/fd/5 - >&3) 5<&0
+ )
+ case $xz_status in
+ *[1-9]*) xz_status=1;;
+ *) xz_status=0;;
+ esac
+ else
+ F=`expr "/$2" : '.*/\(.*\)[-.][ablmtxz2]*$'` || F=$prog
+ tmp=
+ trap '
+ test -n "$tmp" && rm -f "$tmp"
+ (exit 2); exit 2
+ ' HUP INT PIPE TERM 0
+ tmp=`mktemp -t -- "$F.XXXXXX"` || exit 2
+ $xz2 -cdfq -- "$2" > "$tmp" || exit 2
+ xz_status=$(
+ exec 4>&1
+ ($xz1 -cdfq -- "$1" 4>&-; echo $? >&4) 3>&- |
+ eval "$cmp" - '"$tmp"' >&3
+ )
+ cmp_status=$?
+ rm -f "$tmp" || xz_status=$?
+ trap - HUP INT PIPE TERM 0
+ (exit $cmp_status)
+ fi;;
+ *)
+ xz_status=$(
+ exec 4>&1
+ ($xz1 -cdfq -- "$1" 4>&-; echo $? >&4) 3>&- |
+ eval "$cmp" - '"$2"' >&3
+ );;
+ esac;;
+ *)
+ case "$2" in
+ *[-.][zZ] | *_z | *[-.][gx]z | *[-.]bz2 | *[-.]lzma | *.t[abglx]z | *.tbz2 | -)
+ xz_status=$(
+ exec 4>&1
+ ($xz2 -cdfq -- "$2" 4>&-; echo $? >&4) 3>&- |
+ eval "$cmp" '"$1"' - >&3
+ );;
+ *)
+ eval "$cmp" '"$1"' '"$2"';;
+ esac;;
+ esac
+else
+ echo >&2 "$0: Invalid number of operands; try \`${0##*/} --help' for help"
+ exit 2
+fi
+
+cmp_status=$?
+test "$xz_status" -eq 0 || exit 2
+exit $cmp_status
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.1
new file mode 100644
index 00000000..996d64ae
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.1
@@ -0,0 +1,94 @@
+.\"
+.\" Original zgrep.1 for gzip: Jean-loup Gailly
+.\" Charles Levert <charles@comm.polymtl.ca>
+.\"
+.\" Modifications for XZ Utils: Lasse Collin
+.\"
+.\" License: GNU GPLv2+
+.\"
+.TH XZGREP 1 "2009-07-05" "Tukaani" "XZ Utils"
+.SH NAME
+xzgrep \- search compressed files for a regular expression
+.SH SYNOPSIS
+.B xzgrep
+.RI [ grep_options ]
+.RB [ \-e ]
+.I pattern
+.IR file ".\|.\|."
+.br
+.B xzegrep
+.RB ...
+.br
+.B xzfgrep
+.RB ...
+.br
+.B lzgrep
+.RB ...
+.br
+.B lzegrep
+.RB ...
+.br
+.B lzfgrep
+.RB ...
+.SH DESCRIPTION
+.B xzgrep
+invokes
+.BR grep (1)
+on
+.I files
+which may be either uncompressed or compressed with
+.BR xz (1),
+.BR lzma (1),
+.BR gzip (1),
+or
+.BR bzip2 (1).
+All options specified are passed directly to
+.BR grep (1).
+.PP
+If no
+.I file
+is specified, then the standard input is decompressed if necessary and fed to
+.BR grep (1).
+When reading from standard input,
+.BR gzip (1)
+and
+.BR bzip2 (1)
+compressed files are not supported.
+.PP
+If
+.B xzgrep
+is invoked as
+.B xzegrep
+or
+.B xzfgrep
+then
+.BR egrep (1)
+or
+.BR fgrep (1)
+is used instead of
+.BR grep (1).
+The same applies to names
+.BR lzgrep ,
+.BR lzegrep ,
+and
+.BR lzfgrep ,
+which are provided for backward compatibility with LZMA Utils.
+.PP
+.SH ENVIRONMENT
+.TP
+.B GREP
+If the
+.B GREP
+environment variable is set,
+.B xzgrep
+uses it instead of
+.BR grep (1),
+.BR egrep (1),
+or
+.BR fgrep (1).
+.SH "SEE ALSO"
+.BR grep (1),
+.BR xz (1),
+.BR gzip (1),
+.BR bzip2 (1),
+.BR zgrep (1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.in
new file mode 100644
index 00000000..9a9b393f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzgrep.in
@@ -0,0 +1,196 @@
+#!@POSIX_SHELL@
+
+# xzgrep -- a wrapper around a grep program that decompresses files as needed
+# Adapted from a version sent by Charles Levert <charles@comm.polymtl.ca>
+
+# Copyright (C) 1998, 2001, 2002, 2006, 2007 Free Software Foundation
+# Copyright (C) 1993 Jean-loup Gailly
+
+# Modified for XZ Utils by Andrew Dudman and Lasse Collin.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#SET_PATH - This line is a placeholder to ease patching this script.
+
+# Instead of unsetting XZ_OPT, just make sure that xz will use file format
+# autodetection. This way memory usage limit and thread limit can be
+# specified via XZ_OPT. With gzip and bzip2 it's OK to just unset the
+# environment variables.
+xz='@xz@ --format=auto'
+unset GZIP BZIP BZIP2
+
+case ${0##/*} in
+ *egrep*) prog=xzegrep; grep=${GREP:-egrep};;
+ *fgrep*) prog=xzfgrep; grep=${GREP:-fgrep};;
+ *) prog=xzgrep; grep=${GREP:-grep};;
+esac
+
+version="$prog (@PACKAGE_NAME@) @VERSION@"
+
+usage="Usage: ${0##/*} [OPTION]... [-e] PATTERN [FILE]...
+Look for instances of PATTERN in the input FILEs, using their
+uncompressed contents if they are compressed.
+
+OPTIONs are the same as for '$grep'.
+
+Report bugs to <@PACKAGE_BUGREPORT@>."
+
+# sed script to escape all ' for the shell, and then (to handle trailing
+# newlines correctly) turn trailing X on last line into '.
+escape='
+ s/'\''/'\''\\'\'''\''/g
+ $s/X$/'\''/
+'
+operands=
+have_pat=0
+files_with_matches=0
+files_without_matches=0
+no_filename=0
+with_filename=0
+
+while test $# -ne 0; do
+ option=$1
+ shift
+ optarg=
+
+ case $option in
+ (-[0123456789abcdhHiIKLlnoqrRsTuUvVwxyzZ]?*)
+ arg2=-\'$(expr "X${option}X" : 'X-.[0-9]*\(.*\)' | sed "$escape")
+ eval "set -- $arg2 "'${1+"$@"}'
+ option=$(expr "X$option" : 'X\(-.[0-9]*\)');;
+ (--binary-*=* | --[lm]a*=* | --reg*=*)
+ ;;
+ (-[ABCDefm] | --binary-* | --file | --[lm]a* | --reg*)
+ case ${1?"$option option requires an argument"} in
+ (*\'*)
+ optarg=" '"$(printf '%sX\n' "$1" | sed "$escape");;
+ (*)
+ optarg=" '$1'";;
+ esac
+ shift;;
+ (--)
+ break;;
+ (-?*)
+ ;;
+ (*)
+ case $option in
+ (*\'*)
+ operands="$operands '"$(printf '%sX\n' "$option" | sed "$escape");;
+ (*)
+ operands="$operands '$option'";;
+ esac
+ ${POSIXLY_CORRECT+break}
+ continue;;
+ esac
+
+ case $option in
+ (-[drRzZ] | --di* | --exc* | --inc* | --rec* | --nu*)
+ printf >&2 '%s: %s: Option not supported\n' "$0" "$option"
+ exit 2;;
+ (-[ef]* | --file | --file=* | --reg*)
+ have_pat=1;;
+ (--h | --he | --hel | --help)
+ echo "$usage" || exit 2
+ exit;;
+ (-H | --wi | --wit | --with | --with- | --with-f | --with-fi \
+ | --with-fil | --with-file | --with-filen | --with-filena | --with-filenam \
+ | --with-filename)
+ with_filename=1
+ continue;;
+ (-l | --files-with-*)
+ files_with_matches=1;;
+ (-L | --files-witho*)
+ files_without_matches=1;;
+ (--no-f*)
+ no_filename=1;;
+ (-V | --v | --ve | --ver | --vers | --versi | --versio | --version)
+ echo "$version" || exit 2
+ exit;;
+ esac
+
+ case $option in
+ (*\'?*)
+ option=\'$(expr "X${option}X" : 'X\(.*\)' | sed "$escape");;
+ (*)
+ option="'$option'";;
+ esac
+
+ grep="$grep $option$optarg"
+done
+
+eval "set -- $operands "'${1+"$@"}'
+
+if test $have_pat -eq 0; then
+ case ${1?"Missing pattern; try \`${0##*/} --help' for help"} in
+ (*\'*)
+ grep="$grep -- '"$(printf '%sX\n' "$1" | sed "$escape");;
+ (*)
+ grep="$grep -- '$1'";;
+ esac
+ shift
+fi
+
+if test $# -eq 0; then
+ set -- -
+fi
+
+exec 3>&1
+res=0
+
+for i; do
+ case $i in
+ *[-.][zZ] | *_z | *[-.]gz | *.t[ag]z) uncompress="gzip -cdfq";;
+ *[-.]bz2 | *[-.]tbz | *.tbz2) uncompress="bzip2 -cdfq";;
+ *) uncompress="$xz -cdfq";;
+ esac
+ # Fail if xz or grep (or sed) fails.
+ xz_status=$(
+ exec 5>&1
+ (eval "$uncompress" -- "$i" 5>&-; echo $? >&5) 3>&- |
+ if test $files_with_matches -eq 1; then
+ eval "$grep" -q && { printf '%s\n' "$i" || exit 2; }
+ elif test $files_without_matches -eq 1; then
+ eval "$grep" -q || {
+ r=$?
+ if test $r -eq 1; then
+ printf '%s\n' "$i" || r=2
+ fi
+ exit $r
+ }
+ elif test $with_filename -eq 0 &&
+ { test $# -eq 1 || test $no_filename -eq 1; }; then
+ eval "$grep"
+ else
+ case $i in
+ (*'
+'* | *'&'* | *'\'* | *'|'*)
+ i=$(printf '%s\n' "$i" |
+ sed '
+ $!N
+ $s/[&\|]/\\&/g
+ $s/\n/\\n/g
+ ');;
+ esac
+ sed_script="s|^|$i:|"
+
+ # Fail if grep or sed fails.
+ r=$(
+ exec 4>&1
+ (eval "$grep" 4>&-; echo $? >&4) 3>&- | sed "$sed_script" >&3 4>&-
+ ) || r=2
+ exit $r
+ fi >&3 5>&-
+ )
+ r=$?
+ test "$xz_status" -eq 0 || test "$xz_status" -eq 2 || r=2
+ test $res -lt $r && res=$r
+done
+exit $res
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.1
new file mode 100644
index 00000000..4fe4bcba
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.1
@@ -0,0 +1,66 @@
+.\"
+.\" Authors: Andrew Dudman
+.\" Lasse Collin
+.\"
+.\" This file has been put into the public domain.
+.\" You can do whatever you want with this file.
+.\"
+.\" (Note that this file is not based on gzip's zless.1.)
+.\"
+.TH XZLESS 1 "2009-07-05" "Tukaani" "XZ Utils"
+.SH NAME
+xzless, lzless \- view xz or lzma compressed (text) files
+.SH SYNOPSIS
+.B xzless
+.RI [ file ...]
+.br
+.B lzless
+.RI [ file ...]
+.SH DESCRIPTION
+.B xzless
+is a filter that displays pagefulls of uncompressed text from compressed
+file(s) to a terminal. It works on files compressed with
+.BR xz (1)
+or
+.BR lzma (1).
+If no
+.I files
+are given,
+.B xzless
+reads from standard input.
+.PP
+.B xzless
+uses
+.BR less (1)
+as its only pager. Unlike
+.BR xzmore ,
+the choice of pagers is not alterable by an environment variable.
+Commands are based on both
+.BR more (1)
+and
+.BR vi (1),
+and allow back and forth movement and searching.
+See the
+.BR less (1)
+manual for more information.
+.PP
+The command named
+.B lzless
+is provided for backward compatibility with LZMA Utils.
+.SH ENVIRONMENT
+.TP
+.B LESSMETACHARS
+A list of characters special to the shell. Set by
+.B xzless
+unless it is already set in the environment.
+.TP
+.B LESSOPEN
+Set to a command line to invoke the
+.BR xz (1)
+decompressor for preprocessing the input files to
+.BR less (1).
+.SH "SEE ALSO"
+.BR less (1),
+.BR xz (1),
+.BR xzmore (1),
+.BR zless (1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.in
new file mode 100644
index 00000000..a3da697c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzless.in
@@ -0,0 +1,58 @@
+#!@POSIX_SHELL@
+
+# Copyright (C) 1998, 2002, 2006, 2007 Free Software Foundation
+
+# The original version for gzip was written by Paul Eggert.
+# Modified for XZ Utils by Andrew Dudman and Lasse Collin.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#SET_PATH - This line is a placeholder to ease patching this script.
+
+# Instead of unsetting XZ_OPT, just make sure that xz will use file format
+# autodetection. This way memory usage limit and thread limit can be
+# specified via XZ_OPT.
+xz='@xz@ --format=auto'
+
+version='xzless (@PACKAGE_NAME@) @VERSION@'
+
+usage="Usage: ${0##*/} [OPTION]... [FILE]...
+Like 'less', but operate on the uncompressed contents of xz compressed FILEs.
+
+Options are the same as for 'less'.
+
+Report bugs to <@PACKAGE_BUGREPORT@>."
+
+case $1 in
+ --help) echo "$usage" || exit 2; exit;;
+ --version) echo "$version" || exit 2; exit;;
+esac
+
+if test "${LESSMETACHARS+set}" != set; then
+ # Work around a bug in less 394 and earlier;
+ # it mishandles the metacharacters '$%=~'.
+ space=' '
+ tab=' '
+ nl='
+'
+ LESSMETACHARS="$space$tab$nl'"';*?"()<>[|&^`#\$%=~'
+fi
+
+if test "$(less -V | { read ver && echo ${ver#less }; })" -ge 429; then
+ # less 429 or later: LESSOPEN pipe will be used on
+ # standard input if $LESSOPEN begins with |-.
+ LESSOPEN="|-$xz -cdfq -- %s"
+else
+ LESSOPEN="|$xz -cdfq -- %s"
+fi
+export LESSMETACHARS LESSOPEN
+
+exec less "$@"
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.1
new file mode 100644
index 00000000..a94e8326
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.1
@@ -0,0 +1,54 @@
+.\"
+.\" Original zdiff.1 for gzip: Jean-loup Gailly
+.\" Modifications for XZ Utils: Lasse Collin
+.\"
+.\" License: GNU GPLv2+
+.\"
+.TH XZMORE 1 "2009-07-05" "Tukaani" "XZ Utils"
+.SH NAME
+xzmore, lzmore \- view xz or lzma compressed (text) files
+.SH SYNOPSIS
+.B xzmore
+.RI [ "filename ..." ]
+.br
+.B lzmore
+.RI [ "filename ..." ]
+.SH DESCRIPTION
+.B xzmore
+is a filter which allows examination of
+.BR xz (1)
+or
+.BR lzma (1)
+compressed text files one screenful at a time on a soft-copy terminal.
+.PP
+To use a pager other than the default
+.B more,
+set environment variable
+.B PAGER
+to the name of the desired program.
+The name
+.B lzmore
+is provided for backward compatibility with LZMA Utils.
+.TP
+.BR e " or " q
+When the prompt --More--(Next file:
+.IR file )
+is printed, this command causes
+.B xzmore
+to exit.
+.TP
+.B s
+When the prompt --More--(Next file:
+.IR file )
+is printed, this command causes
+.B xzmore
+to skip the next file and continue.
+.PP
+For list of keyboard commands supported while actually viewing the
+content of a file, refer to manual of the pager you use, usually
+.BR more (1).
+.SH "SEE ALSO"
+.BR more (1),
+.BR xz (1),
+.BR xzless (1),
+.BR zmore (1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.in
new file mode 100644
index 00000000..940d6614
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/scripts/xzmore.in
@@ -0,0 +1,78 @@
+#!@POSIX_SHELL@
+
+# Copyright (C) 2001, 2002, 2007 Free Software Foundation
+# Copyright (C) 1992, 1993 Jean-loup Gailly
+
+# Modified for XZ Utils by Andrew Dudman and Lasse Collin.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#SET_PATH - This line is a placeholder to ease patching this script.
+
+# Instead of unsetting XZ_OPT, just make sure that xz will use file format
+# autodetection. This way memory usage limit and thread limit can be
+# specified via XZ_OPT.
+xz='@xz@ --format=auto'
+
+version='xzmore (@PACKAGE_NAME@) @VERSION@'
+
+usage="Usage: ${0##*/} [OPTION]... [FILE]...
+Like 'more', but operate on the uncompressed contents of xz compressed FILEs.
+
+Report bugs to <@PACKAGE_BUGREPORT@>."
+
+case $1 in
+ --help) echo "$usage" || exit 2; exit;;
+ --version) echo "$version" || exit 2; exit;;
+esac
+
+oldtty=`stty -g 2>/dev/null`
+if stty -cbreak 2>/dev/null; then
+ cb='cbreak'; ncb='-cbreak'
+else
+ # 'stty min 1' resets eof to ^a on both SunOS and SysV!
+ cb='min 1 -icanon'; ncb='icanon eof ^d'
+fi
+if test $? -eq 0 && test -n "$oldtty"; then
+ trap 'stty $oldtty 2>/dev/null; exit' 0 2 3 5 10 13 15
+else
+ trap 'stty $ncb echo 2>/dev/null; exit' 0 2 3 5 10 13 15
+fi
+
+if test $# = 0; then
+ if test -t 0; then
+ echo "$usage"; exit 1
+ else
+ $xz -cdfq | eval "${PAGER:-more}"
+ fi
+else
+ FIRST=1
+ for FILE; do
+ < "$FILE" || continue
+ if test $FIRST -eq 0; then
+ printf "%s--More--(Next file: %s)" "" "$FILE"
+ stty $cb -echo 2>/dev/null
+ ANS=`dd bs=1 count=1 2>/dev/null`
+ stty $ncb echo 2>/dev/null
+ echo " "
+ case "$ANS" in
+ [eq]) exit;;
+ esac
+ fi
+ if test "$ANS" != 's'; then
+ echo "------> $FILE <------"
+ $xz -cdfq -- "$FILE" | eval "${PAGER:-more}"
+ fi
+ if test -t 1; then
+ FIRST=0
+ fi
+ done
+fi
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.am
new file mode 100644
index 00000000..ccdc66ca
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.am
@@ -0,0 +1,96 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+bin_PROGRAMS = xz
+
+xz_SOURCES = \
+ args.c \
+ args.h \
+ coder.c \
+ coder.h \
+ file_io.c \
+ file_io.h \
+ hardware.c \
+ hardware.h \
+ main.c \
+ main.h \
+ message.c \
+ message.h \
+ options.c \
+ options.h \
+ private.h \
+ signals.c \
+ signals.h \
+ suffix.c \
+ suffix.h \
+ util.c \
+ util.h
+
+if COND_W32
+xz_SOURCES += xz_w32res.rc
+endif
+
+xz_CPPFLAGS = \
+ -DLOCALEDIR=\"$(localedir)\" \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(STATIC_CPPFLAGS)
+
+xz_LDFLAGS = $(STATIC_LDFLAGS)
+xz_LDADD = $(top_builddir)/src/liblzma/liblzma.la
+
+if COND_GNULIB
+xz_LDADD += $(top_builddir)/lib/libgnu.a
+endif
+
+# libgnu.a may need these libs, so this must be after libgnu.a.
+xz_LDADD += $(LTLIBINTL)
+
+
+# Windows resource compiler support
+.rc.o:
+ $(RC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(xz_CPPFLAGS) $(CPPFLAGS) $(RCFLAGS) -i $< -o $@
+
+
+dist_man_MANS = xz.1
+
+
+## Create symlinks for unxz and xzcat for convenience. Create symlinks also
+## for lzma, unlzma, and lzcat for compatibility with LZMA Utils 4.32.x.
+xzlinks = unxz xzcat lzma unlzma lzcat
+
+install-exec-hook:
+ cd $(DESTDIR)$(bindir) && \
+ target=`echo xz | sed '$(transform)'`$(EXEEXT) && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done
+
+install-data-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ target=`echo xz | sed '$(transform)'` && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done
+
+uninstall-hook:
+ cd $(DESTDIR)$(bindir) && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link; \
+ done
+ cd $(DESTDIR)$(mandir)/man1 && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1; \
+ done
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.in
new file mode 100644
index 00000000..cb127ade
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/Makefile.in
@@ -0,0 +1,849 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+bin_PROGRAMS = xz$(EXEEXT)
+@COND_W32_TRUE@am__append_1 = xz_w32res.rc
+@COND_GNULIB_TRUE@am__append_2 = $(top_builddir)/lib/libgnu.a
+subdir = src/xz
+DIST_COMMON = $(dist_man_MANS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"
+PROGRAMS = $(bin_PROGRAMS)
+am__xz_SOURCES_DIST = args.c args.h coder.c coder.h file_io.c \
+ file_io.h hardware.c hardware.h main.c main.h message.c \
+ message.h options.c options.h private.h signals.c signals.h \
+ suffix.c suffix.h util.c util.h xz_w32res.rc
+@COND_W32_TRUE@am__objects_1 = xz_w32res.$(OBJEXT)
+am_xz_OBJECTS = xz-args.$(OBJEXT) xz-coder.$(OBJEXT) \
+ xz-file_io.$(OBJEXT) xz-hardware.$(OBJEXT) xz-main.$(OBJEXT) \
+ xz-message.$(OBJEXT) xz-options.$(OBJEXT) xz-signals.$(OBJEXT) \
+ xz-suffix.$(OBJEXT) xz-util.$(OBJEXT) $(am__objects_1)
+xz_OBJECTS = $(am_xz_OBJECTS)
+am__DEPENDENCIES_1 =
+xz_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_2) $(am__DEPENDENCIES_1)
+xz_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(xz_LDFLAGS) \
+ $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(xz_SOURCES)
+DIST_SOURCES = $(am__xz_SOURCES_DIST)
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+man1dir = $(mandir)/man1
+NROFF = nroff
+MANS = $(dist_man_MANS)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+xz_SOURCES = args.c args.h coder.c coder.h file_io.c file_io.h \
+ hardware.c hardware.h main.c main.h message.c message.h \
+ options.c options.h private.h signals.c signals.h suffix.c \
+ suffix.h util.c util.h $(am__append_1)
+xz_CPPFLAGS = \
+ -DLOCALEDIR=\"$(localedir)\" \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(STATIC_CPPFLAGS)
+
+xz_LDFLAGS = $(STATIC_LDFLAGS)
+
+# libgnu.a may need these libs, so this must be after libgnu.a.
+xz_LDADD = $(top_builddir)/src/liblzma/liblzma.la $(am__append_2) \
+ $(LTLIBINTL)
+dist_man_MANS = xz.1
+xzlinks = unxz xzcat lzma unlzma lzcat
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj .rc
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/xz/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/xz/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p || test -f $$p1; \
+ then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+ @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+xz$(EXEEXT): $(xz_OBJECTS) $(xz_DEPENDENCIES)
+ @rm -f xz$(EXEEXT)
+ $(xz_LINK) $(xz_OBJECTS) $(xz_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-args.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-coder.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-file_io.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-hardware.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-main.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-message.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-options.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-signals.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-suffix.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xz-util.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+xz-args.o: args.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-args.o -MD -MP -MF $(DEPDIR)/xz-args.Tpo -c -o xz-args.o `test -f 'args.c' || echo '$(srcdir)/'`args.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-args.Tpo $(DEPDIR)/xz-args.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='args.c' object='xz-args.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-args.o `test -f 'args.c' || echo '$(srcdir)/'`args.c
+
+xz-args.obj: args.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-args.obj -MD -MP -MF $(DEPDIR)/xz-args.Tpo -c -o xz-args.obj `if test -f 'args.c'; then $(CYGPATH_W) 'args.c'; else $(CYGPATH_W) '$(srcdir)/args.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-args.Tpo $(DEPDIR)/xz-args.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='args.c' object='xz-args.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-args.obj `if test -f 'args.c'; then $(CYGPATH_W) 'args.c'; else $(CYGPATH_W) '$(srcdir)/args.c'; fi`
+
+xz-coder.o: coder.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-coder.o -MD -MP -MF $(DEPDIR)/xz-coder.Tpo -c -o xz-coder.o `test -f 'coder.c' || echo '$(srcdir)/'`coder.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-coder.Tpo $(DEPDIR)/xz-coder.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='coder.c' object='xz-coder.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-coder.o `test -f 'coder.c' || echo '$(srcdir)/'`coder.c
+
+xz-coder.obj: coder.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-coder.obj -MD -MP -MF $(DEPDIR)/xz-coder.Tpo -c -o xz-coder.obj `if test -f 'coder.c'; then $(CYGPATH_W) 'coder.c'; else $(CYGPATH_W) '$(srcdir)/coder.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-coder.Tpo $(DEPDIR)/xz-coder.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='coder.c' object='xz-coder.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-coder.obj `if test -f 'coder.c'; then $(CYGPATH_W) 'coder.c'; else $(CYGPATH_W) '$(srcdir)/coder.c'; fi`
+
+xz-file_io.o: file_io.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-file_io.o -MD -MP -MF $(DEPDIR)/xz-file_io.Tpo -c -o xz-file_io.o `test -f 'file_io.c' || echo '$(srcdir)/'`file_io.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-file_io.Tpo $(DEPDIR)/xz-file_io.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='file_io.c' object='xz-file_io.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-file_io.o `test -f 'file_io.c' || echo '$(srcdir)/'`file_io.c
+
+xz-file_io.obj: file_io.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-file_io.obj -MD -MP -MF $(DEPDIR)/xz-file_io.Tpo -c -o xz-file_io.obj `if test -f 'file_io.c'; then $(CYGPATH_W) 'file_io.c'; else $(CYGPATH_W) '$(srcdir)/file_io.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-file_io.Tpo $(DEPDIR)/xz-file_io.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='file_io.c' object='xz-file_io.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-file_io.obj `if test -f 'file_io.c'; then $(CYGPATH_W) 'file_io.c'; else $(CYGPATH_W) '$(srcdir)/file_io.c'; fi`
+
+xz-hardware.o: hardware.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-hardware.o -MD -MP -MF $(DEPDIR)/xz-hardware.Tpo -c -o xz-hardware.o `test -f 'hardware.c' || echo '$(srcdir)/'`hardware.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-hardware.Tpo $(DEPDIR)/xz-hardware.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='hardware.c' object='xz-hardware.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-hardware.o `test -f 'hardware.c' || echo '$(srcdir)/'`hardware.c
+
+xz-hardware.obj: hardware.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-hardware.obj -MD -MP -MF $(DEPDIR)/xz-hardware.Tpo -c -o xz-hardware.obj `if test -f 'hardware.c'; then $(CYGPATH_W) 'hardware.c'; else $(CYGPATH_W) '$(srcdir)/hardware.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-hardware.Tpo $(DEPDIR)/xz-hardware.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='hardware.c' object='xz-hardware.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-hardware.obj `if test -f 'hardware.c'; then $(CYGPATH_W) 'hardware.c'; else $(CYGPATH_W) '$(srcdir)/hardware.c'; fi`
+
+xz-main.o: main.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-main.o -MD -MP -MF $(DEPDIR)/xz-main.Tpo -c -o xz-main.o `test -f 'main.c' || echo '$(srcdir)/'`main.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-main.Tpo $(DEPDIR)/xz-main.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='main.c' object='xz-main.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-main.o `test -f 'main.c' || echo '$(srcdir)/'`main.c
+
+xz-main.obj: main.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-main.obj -MD -MP -MF $(DEPDIR)/xz-main.Tpo -c -o xz-main.obj `if test -f 'main.c'; then $(CYGPATH_W) 'main.c'; else $(CYGPATH_W) '$(srcdir)/main.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-main.Tpo $(DEPDIR)/xz-main.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='main.c' object='xz-main.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-main.obj `if test -f 'main.c'; then $(CYGPATH_W) 'main.c'; else $(CYGPATH_W) '$(srcdir)/main.c'; fi`
+
+xz-message.o: message.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-message.o -MD -MP -MF $(DEPDIR)/xz-message.Tpo -c -o xz-message.o `test -f 'message.c' || echo '$(srcdir)/'`message.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-message.Tpo $(DEPDIR)/xz-message.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='message.c' object='xz-message.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-message.o `test -f 'message.c' || echo '$(srcdir)/'`message.c
+
+xz-message.obj: message.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-message.obj -MD -MP -MF $(DEPDIR)/xz-message.Tpo -c -o xz-message.obj `if test -f 'message.c'; then $(CYGPATH_W) 'message.c'; else $(CYGPATH_W) '$(srcdir)/message.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-message.Tpo $(DEPDIR)/xz-message.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='message.c' object='xz-message.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-message.obj `if test -f 'message.c'; then $(CYGPATH_W) 'message.c'; else $(CYGPATH_W) '$(srcdir)/message.c'; fi`
+
+xz-options.o: options.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-options.o -MD -MP -MF $(DEPDIR)/xz-options.Tpo -c -o xz-options.o `test -f 'options.c' || echo '$(srcdir)/'`options.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-options.Tpo $(DEPDIR)/xz-options.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='options.c' object='xz-options.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-options.o `test -f 'options.c' || echo '$(srcdir)/'`options.c
+
+xz-options.obj: options.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-options.obj -MD -MP -MF $(DEPDIR)/xz-options.Tpo -c -o xz-options.obj `if test -f 'options.c'; then $(CYGPATH_W) 'options.c'; else $(CYGPATH_W) '$(srcdir)/options.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-options.Tpo $(DEPDIR)/xz-options.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='options.c' object='xz-options.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-options.obj `if test -f 'options.c'; then $(CYGPATH_W) 'options.c'; else $(CYGPATH_W) '$(srcdir)/options.c'; fi`
+
+xz-signals.o: signals.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-signals.o -MD -MP -MF $(DEPDIR)/xz-signals.Tpo -c -o xz-signals.o `test -f 'signals.c' || echo '$(srcdir)/'`signals.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-signals.Tpo $(DEPDIR)/xz-signals.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='signals.c' object='xz-signals.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-signals.o `test -f 'signals.c' || echo '$(srcdir)/'`signals.c
+
+xz-signals.obj: signals.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-signals.obj -MD -MP -MF $(DEPDIR)/xz-signals.Tpo -c -o xz-signals.obj `if test -f 'signals.c'; then $(CYGPATH_W) 'signals.c'; else $(CYGPATH_W) '$(srcdir)/signals.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-signals.Tpo $(DEPDIR)/xz-signals.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='signals.c' object='xz-signals.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-signals.obj `if test -f 'signals.c'; then $(CYGPATH_W) 'signals.c'; else $(CYGPATH_W) '$(srcdir)/signals.c'; fi`
+
+xz-suffix.o: suffix.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-suffix.o -MD -MP -MF $(DEPDIR)/xz-suffix.Tpo -c -o xz-suffix.o `test -f 'suffix.c' || echo '$(srcdir)/'`suffix.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-suffix.Tpo $(DEPDIR)/xz-suffix.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='suffix.c' object='xz-suffix.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-suffix.o `test -f 'suffix.c' || echo '$(srcdir)/'`suffix.c
+
+xz-suffix.obj: suffix.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-suffix.obj -MD -MP -MF $(DEPDIR)/xz-suffix.Tpo -c -o xz-suffix.obj `if test -f 'suffix.c'; then $(CYGPATH_W) 'suffix.c'; else $(CYGPATH_W) '$(srcdir)/suffix.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-suffix.Tpo $(DEPDIR)/xz-suffix.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='suffix.c' object='xz-suffix.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-suffix.obj `if test -f 'suffix.c'; then $(CYGPATH_W) 'suffix.c'; else $(CYGPATH_W) '$(srcdir)/suffix.c'; fi`
+
+xz-util.o: util.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-util.o -MD -MP -MF $(DEPDIR)/xz-util.Tpo -c -o xz-util.o `test -f 'util.c' || echo '$(srcdir)/'`util.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-util.Tpo $(DEPDIR)/xz-util.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='util.c' object='xz-util.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-util.o `test -f 'util.c' || echo '$(srcdir)/'`util.c
+
+xz-util.obj: util.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xz-util.obj -MD -MP -MF $(DEPDIR)/xz-util.Tpo -c -o xz-util.obj `if test -f 'util.c'; then $(CYGPATH_W) 'util.c'; else $(CYGPATH_W) '$(srcdir)/util.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xz-util.Tpo $(DEPDIR)/xz-util.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='util.c' object='xz-util.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xz_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xz-util.obj `if test -f 'util.c'; then $(CYGPATH_W) 'util.c'; else $(CYGPATH_W) '$(srcdir)/util.c'; fi`
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-man1: $(dist_man_MANS)
+ @$(NORMAL_INSTALL)
+ test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)"
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ { for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | while read p; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; echo "$$p"; \
+ done | \
+ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+ sed 'N;N;s,\n, ,g' | { \
+ list=; while read file base inst; do \
+ if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
+ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \
+ fi; \
+ done; \
+ for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+ while read files; do \
+ test -z "$$files" || { \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \
+ done; }
+
+uninstall-man1:
+ @$(NORMAL_UNINSTALL)
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ files=`{ for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+ test -z "$$files" || { \
+ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; }
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @list='$(MANS)'; if test -n "$$list"; then \
+ list=`for p in $$list; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
+ if test -n "$$list" && \
+ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
+ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
+ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \
+ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \
+ echo " typically \`make maintainer-clean' will remove them" >&2; \
+ exit 1; \
+ else :; fi; \
+ else :; fi
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS) $(MANS)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-man
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-data-hook
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-exec-hook
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man: install-man1
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS uninstall-man
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) uninstall-hook
+uninstall-man: uninstall-man1
+
+.MAKE: install-am install-data-am install-exec-am install-strip \
+ uninstall-am
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \
+ clean-generic clean-libtool ctags distclean distclean-compile \
+ distclean-generic distclean-libtool distclean-tags distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-binPROGRAMS install-data install-data-am \
+ install-data-hook install-dvi install-dvi-am install-exec \
+ install-exec-am install-exec-hook install-html install-html-am \
+ install-info install-info-am install-man install-man1 \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+ pdf pdf-am ps ps-am tags uninstall uninstall-am \
+ uninstall-binPROGRAMS uninstall-hook uninstall-man \
+ uninstall-man1
+
+
+# Windows resource compiler support
+.rc.o:
+ $(RC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(xz_CPPFLAGS) $(CPPFLAGS) $(RCFLAGS) -i $< -o $@
+
+install-exec-hook:
+ cd $(DESTDIR)$(bindir) && \
+ target=`echo xz | sed '$(transform)'`$(EXEEXT) && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link && \
+ $(LN_S) $$target $$link; \
+ done
+
+install-data-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ target=`echo xz | sed '$(transform)'` && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1; \
+ done
+
+uninstall-hook:
+ cd $(DESTDIR)$(bindir) && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link; \
+ done
+ cd $(DESTDIR)$(mandir)/man1 && \
+ for name in $(xzlinks); do \
+ link=`echo $$name | sed '$(transform)'` && \
+ rm -f $$link.1; \
+ done
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.c
new file mode 100644
index 00000000..c443ddb5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.c
@@ -0,0 +1,532 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file args.c
+/// \brief Argument parsing
+///
+/// \note Filter-specific options parsing is in options.c.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+#include "getopt.h"
+#include <ctype.h>
+
+
+bool opt_stdout = false;
+bool opt_force = false;
+bool opt_keep_original = false;
+
+// We don't modify or free() this, but we need to assign it in some
+// non-const pointers.
+const char *stdin_filename = "(stdin)";
+
+
+static void
+parse_real(args_info *args, int argc, char **argv)
+{
+ enum {
+ OPT_SUBBLOCK = INT_MIN,
+ OPT_X86,
+ OPT_POWERPC,
+ OPT_IA64,
+ OPT_ARM,
+ OPT_ARMTHUMB,
+ OPT_SPARC,
+ OPT_DELTA,
+ OPT_LZMA1,
+ OPT_LZMA2,
+
+ OPT_FILES,
+ OPT_FILES0,
+ };
+
+ static const char short_opts[]
+ = "cC:defF:hHlkM:qQrS:tT:vVz0123456789";
+
+ static const struct option long_opts[] = {
+ // Operation mode
+ { "compress", no_argument, NULL, 'z' },
+ { "decompress", no_argument, NULL, 'd' },
+ { "uncompress", no_argument, NULL, 'd' },
+ { "test", no_argument, NULL, 't' },
+ { "list", no_argument, NULL, 'l' },
+
+ // Operation modifiers
+ { "keep", no_argument, NULL, 'k' },
+ { "force", no_argument, NULL, 'f' },
+ { "stdout", no_argument, NULL, 'c' },
+ { "to-stdout", no_argument, NULL, 'c' },
+ { "suffix", required_argument, NULL, 'S' },
+ // { "recursive", no_argument, NULL, 'r' }, // TODO
+ { "files", optional_argument, NULL, OPT_FILES },
+ { "files0", optional_argument, NULL, OPT_FILES0 },
+
+ // Basic compression settings
+ { "format", required_argument, NULL, 'F' },
+ { "check", required_argument, NULL, 'C' },
+ { "memory", required_argument, NULL, 'M' },
+ { "threads", required_argument, NULL, 'T' },
+
+ { "extreme", no_argument, NULL, 'e' },
+ { "fast", no_argument, NULL, '0' },
+ { "best", no_argument, NULL, '9' },
+
+ // Filters
+ { "lzma1", optional_argument, NULL, OPT_LZMA1 },
+ { "lzma2", optional_argument, NULL, OPT_LZMA2 },
+ { "x86", optional_argument, NULL, OPT_X86 },
+ { "powerpc", optional_argument, NULL, OPT_POWERPC },
+ { "ia64", optional_argument, NULL, OPT_IA64 },
+ { "arm", optional_argument, NULL, OPT_ARM },
+ { "armthumb", optional_argument, NULL, OPT_ARMTHUMB },
+ { "sparc", optional_argument, NULL, OPT_SPARC },
+ { "delta", optional_argument, NULL, OPT_DELTA },
+ { "subblock", optional_argument, NULL, OPT_SUBBLOCK },
+
+ // Other options
+ { "quiet", no_argument, NULL, 'q' },
+ { "verbose", no_argument, NULL, 'v' },
+ { "no-warn", no_argument, NULL, 'Q' },
+ { "help", no_argument, NULL, 'h' },
+ { "long-help", no_argument, NULL, 'H' },
+ { "version", no_argument, NULL, 'V' },
+
+ { NULL, 0, NULL, 0 }
+ };
+
+ int c;
+
+ while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL))
+ != -1) {
+ switch (c) {
+ // Compression preset (also for decompression if --format=raw)
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ coder_set_preset(c - '0');
+ break;
+
+ // --memory
+ case 'M': {
+ // Support specifying the limit as a percentage of
+ // installed physical RAM.
+ size_t len = strlen(optarg);
+ if (len > 0 && optarg[len - 1] == '%') {
+ optarg[len - 1] = '\0';
+ hardware_memlimit_set_percentage(
+ str_to_uint64(
+ "memory%", optarg, 1, 100));
+ } else {
+ // On 32-bit systems, SIZE_MAX would make more
+ // sense than UINT64_MAX. But use UINT64_MAX
+ // still so that scripts that assume > 4 GiB
+ // values don't break.
+ hardware_memlimit_set(str_to_uint64(
+ "memory", optarg,
+ 0, UINT64_MAX));
+ }
+
+ break;
+ }
+
+ // --suffix
+ case 'S':
+ suffix_set(optarg);
+ break;
+
+ case 'T':
+ hardware_threadlimit_set(str_to_uint64(
+ "threads", optarg, 0, UINT32_MAX));
+ break;
+
+ // --version
+ case 'V':
+ // This doesn't return.
+ message_version();
+
+ // --stdout
+ case 'c':
+ opt_stdout = true;
+ break;
+
+ // --decompress
+ case 'd':
+ opt_mode = MODE_DECOMPRESS;
+ break;
+
+ // --extreme
+ case 'e':
+ coder_set_extreme();
+ break;
+
+ // --force
+ case 'f':
+ opt_force = true;
+ break;
+
+ // --help
+ case 'h':
+ // This doesn't return.
+ message_help(false);
+
+ // --long-help
+ case 'H':
+ // This doesn't return.
+ message_help(true);
+
+ // --list
+ case 'l':
+ opt_mode = MODE_LIST;
+ break;
+
+ // --keep
+ case 'k':
+ opt_keep_original = true;
+ break;
+
+ // --quiet
+ case 'q':
+ message_verbosity_decrease();
+ break;
+
+ case 'Q':
+ set_exit_no_warn();
+ break;
+
+ case 't':
+ opt_mode = MODE_TEST;
+ break;
+
+ // --verbose
+ case 'v':
+ message_verbosity_increase();
+ break;
+
+ case 'z':
+ opt_mode = MODE_COMPRESS;
+ break;
+
+ // Filter setup
+
+ case OPT_SUBBLOCK:
+ coder_add_filter(LZMA_FILTER_SUBBLOCK,
+ options_subblock(optarg));
+ break;
+
+ case OPT_X86:
+ coder_add_filter(LZMA_FILTER_X86,
+ options_bcj(optarg));
+ break;
+
+ case OPT_POWERPC:
+ coder_add_filter(LZMA_FILTER_POWERPC,
+ options_bcj(optarg));
+ break;
+
+ case OPT_IA64:
+ coder_add_filter(LZMA_FILTER_IA64,
+ options_bcj(optarg));
+ break;
+
+ case OPT_ARM:
+ coder_add_filter(LZMA_FILTER_ARM,
+ options_bcj(optarg));
+ break;
+
+ case OPT_ARMTHUMB:
+ coder_add_filter(LZMA_FILTER_ARMTHUMB,
+ options_bcj(optarg));
+ break;
+
+ case OPT_SPARC:
+ coder_add_filter(LZMA_FILTER_SPARC,
+ options_bcj(optarg));
+ break;
+
+ case OPT_DELTA:
+ coder_add_filter(LZMA_FILTER_DELTA,
+ options_delta(optarg));
+ break;
+
+ case OPT_LZMA1:
+ coder_add_filter(LZMA_FILTER_LZMA1,
+ options_lzma(optarg));
+ break;
+
+ case OPT_LZMA2:
+ coder_add_filter(LZMA_FILTER_LZMA2,
+ options_lzma(optarg));
+ break;
+
+ // Other
+
+ // --format
+ case 'F': {
+ // Just in case, support both "lzma" and "alone" since
+ // the latter was used for forward compatibility in
+ // LZMA Utils 4.32.x.
+ static const struct {
+ char str[8];
+ enum format_type format;
+ } types[] = {
+ { "auto", FORMAT_AUTO },
+ { "xz", FORMAT_XZ },
+ { "lzma", FORMAT_LZMA },
+ { "alone", FORMAT_LZMA },
+ // { "gzip", FORMAT_GZIP },
+ // { "gz", FORMAT_GZIP },
+ { "raw", FORMAT_RAW },
+ };
+
+ size_t i = 0;
+ while (strcmp(types[i].str, optarg) != 0)
+ if (++i == ARRAY_SIZE(types))
+ message_fatal(_("%s: Unknown file "
+ "format type"),
+ optarg);
+
+ opt_format = types[i].format;
+ break;
+ }
+
+ // --check
+ case 'C': {
+ static const struct {
+ char str[8];
+ lzma_check check;
+ } types[] = {
+ { "none", LZMA_CHECK_NONE },
+ { "crc32", LZMA_CHECK_CRC32 },
+ { "crc64", LZMA_CHECK_CRC64 },
+ { "sha256", LZMA_CHECK_SHA256 },
+ };
+
+ size_t i = 0;
+ while (strcmp(types[i].str, optarg) != 0) {
+ if (++i == ARRAY_SIZE(types))
+ message_fatal(_("%s: Unsupported "
+ "integrity "
+ "check type"), optarg);
+ }
+
+ // Use a separate check in case we are using different
+ // liblzma than what was used to compile us.
+ if (!lzma_check_is_supported(types[i].check))
+ message_fatal(_("%s: Unsupported integrity "
+ "check type"), optarg);
+
+ coder_set_check(types[i].check);
+ break;
+ }
+
+ case OPT_FILES:
+ args->files_delim = '\n';
+
+ // Fall through
+
+ case OPT_FILES0:
+ if (args->files_name != NULL)
+ message_fatal(_("Only one file can be "
+ "specified with `--files'"
+ "or `--files0'."));
+
+ if (optarg == NULL) {
+ args->files_name = (char *)stdin_filename;
+ args->files_file = stdin;
+ } else {
+ args->files_name = optarg;
+ args->files_file = fopen(optarg,
+ c == OPT_FILES ? "r" : "rb");
+ if (args->files_file == NULL)
+ message_fatal("%s: %s", optarg,
+ strerror(errno));
+ }
+
+ break;
+
+ default:
+ message_try_help();
+ my_exit(E_ERROR);
+ }
+ }
+
+ return;
+}
+
+
+static void
+parse_environment(args_info *args, char *argv0)
+{
+ char *env = getenv("XZ_OPT");
+ if (env == NULL)
+ return;
+
+ // We modify the string, so make a copy of it.
+ env = xstrdup(env);
+
+ // Calculate the number of arguments in env. argc stats at one
+ // to include space for the program name.
+ int argc = 1;
+ bool prev_was_space = true;
+ for (size_t i = 0; env[i] != '\0'; ++i) {
+ // NOTE: Cast to unsigned char is needed so that correct
+ // value gets passed to isspace(), which expects
+ // unsigned char cast to int. Casting to int is done
+ // automatically due to integer promotion, but we need to
+ // force char to unsigned char manually. Otherwise 8-bit
+ // characters would get promoted to wrong value if
+ // char is signed.
+ if (isspace((unsigned char)env[i])) {
+ prev_was_space = true;
+ } else if (prev_was_space) {
+ prev_was_space = false;
+
+ // Keep argc small enough to fit into a singed int
+ // and to keep it usable for memory allocation.
+ if (++argc == MIN(INT_MAX, SIZE_MAX / sizeof(char *)))
+ message_fatal(_("The environment variable "
+ "XZ_OPT contains too many "
+ "arguments"));
+ }
+ }
+
+ // Allocate memory to hold pointers to the arguments. Add one to get
+ // space for the terminating NULL (if some systems happen to need it).
+ char **argv = xmalloc(((size_t)(argc) + 1) * sizeof(char *));
+ argv[0] = argv0;
+ argv[argc] = NULL;
+
+ // Go through the string again. Split the arguments using '\0'
+ // characters and add pointers to the resulting strings to argv.
+ argc = 1;
+ prev_was_space = true;
+ for (size_t i = 0; env[i] != '\0'; ++i) {
+ if (isspace((unsigned char)env[i])) {
+ prev_was_space = true;
+ env[i] = '\0';
+ } else if (prev_was_space) {
+ prev_was_space = false;
+ argv[argc++] = env + i;
+ }
+ }
+
+ // Parse the argument list we got from the environment. All non-option
+ // arguments i.e. filenames are ignored.
+ parse_real(args, argc, argv);
+
+ // Reset the state of the getopt_long() so that we can parse the
+ // command line options too. There are two incompatible ways to
+ // do it.
+#ifdef HAVE_OPTRESET
+ // BSD
+ optind = 1;
+ optreset = 1;
+#else
+ // GNU, Solaris
+ optind = 0;
+#endif
+
+ // We don't need the argument list from environment anymore.
+ free(argv);
+ free(env);
+
+ return;
+}
+
+
+extern void
+args_parse(args_info *args, int argc, char **argv)
+{
+ // Initialize those parts of *args that we need later.
+ args->files_name = NULL;
+ args->files_file = NULL;
+ args->files_delim = '\0';
+
+ // Check how we were called.
+ {
+#ifdef DOSLIKE
+ // We adjusted argv[0] in the beginning of main() so we don't
+ // need to do anything here.
+ const char *name = argv[0];
+#else
+ // Remove the leading path name, if any.
+ const char *name = strrchr(argv[0], '/');
+ if (name == NULL)
+ name = argv[0];
+ else
+ ++name;
+#endif
+
+ // NOTE: It's possible that name[0] is now '\0' if argv[0]
+ // is weird, but it doesn't matter here.
+
+ // Look for full command names instead of substrings like
+ // "un", "cat", and "lz" to reduce possibility of false
+ // positives when the programs have been renamed.
+ if (strstr(name, "xzcat") != NULL) {
+ opt_mode = MODE_DECOMPRESS;
+ opt_stdout = true;
+ } else if (strstr(name, "unxz") != NULL) {
+ opt_mode = MODE_DECOMPRESS;
+ } else if (strstr(name, "lzcat") != NULL) {
+ opt_format = FORMAT_LZMA;
+ opt_mode = MODE_DECOMPRESS;
+ opt_stdout = true;
+ } else if (strstr(name, "unlzma") != NULL) {
+ opt_format = FORMAT_LZMA;
+ opt_mode = MODE_DECOMPRESS;
+ } else if (strstr(name, "lzma") != NULL) {
+ opt_format = FORMAT_LZMA;
+ }
+ }
+
+ // First the flags from environment
+ parse_environment(args, argv[0]);
+
+ // Then from the command line
+ parse_real(args, argc, argv);
+
+ // Never remove the source file when the destination is not on disk.
+ // In test mode the data is written nowhere, but setting opt_stdout
+ // will make the rest of the code behave well.
+ if (opt_stdout || opt_mode == MODE_TEST) {
+ opt_keep_original = true;
+ opt_stdout = true;
+ }
+
+ // When compressing, if no --format flag was used, or it
+ // was --format=auto, we compress to the .xz format.
+ if (opt_mode == MODE_COMPRESS && opt_format == FORMAT_AUTO)
+ opt_format = FORMAT_XZ;
+
+ // Compression settings need to be validated (options themselves and
+ // their memory usage) when compressing to any file format. It has to
+ // be done also when uncompressing raw data, since for raw decoding
+ // the options given on the command line are used to know what kind
+ // of raw data we are supposed to decode.
+ if (opt_mode == MODE_COMPRESS || opt_format == FORMAT_RAW)
+ coder_set_compression_settings();
+
+ // If no filenames are given, use stdin.
+ if (argv[optind] == NULL && args->files_name == NULL) {
+ // We don't modify or free() the "-" constant. The caller
+ // modifies this so don't make the struct itself const.
+ static char *names_stdin[2] = { (char *)"-", NULL };
+ args->arg_names = names_stdin;
+ args->arg_count = 1;
+ } else {
+ // We got at least one filename from the command line, or
+ // --files or --files0 was specified.
+ args->arg_names = argv + optind;
+ args->arg_count = argc - optind;
+ }
+
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.h
new file mode 100644
index 00000000..0c993811
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/args.h
@@ -0,0 +1,43 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file args.h
+/// \brief Argument parsing
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+typedef struct {
+ /// Filenames from command line
+ char **arg_names;
+
+ /// Number of filenames from command line
+ size_t arg_count;
+
+ /// Name of the file from which to read filenames. This is NULL
+ /// if --files or --files0 was not used.
+ char *files_name;
+
+ /// File opened for reading from which filenames are read. This is
+ /// non-NULL only if files_name is non-NULL.
+ FILE *files_file;
+
+ /// Delimiter for filenames read from files_file
+ char files_delim;
+
+} args_info;
+
+
+extern bool opt_stdout;
+extern bool opt_force;
+extern bool opt_keep_original;
+// extern bool opt_recursive;
+
+extern const char *stdin_filename;
+
+extern void args_parse(args_info *args, int argc, char **argv);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.c
new file mode 100644
index 00000000..c0f621ab
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.c
@@ -0,0 +1,641 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file coder.c
+/// \brief Compresses or uncompresses a file
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+
+/// Return value type for coder_init().
+enum coder_init_ret {
+ CODER_INIT_NORMAL,
+ CODER_INIT_PASSTHRU,
+ CODER_INIT_ERROR,
+};
+
+
+enum operation_mode opt_mode = MODE_COMPRESS;
+
+enum format_type opt_format = FORMAT_AUTO;
+
+
+/// Stream used to communicate with liblzma
+static lzma_stream strm = LZMA_STREAM_INIT;
+
+/// Filters needed for all encoding all formats, and also decoding in raw data
+static lzma_filter filters[LZMA_FILTERS_MAX + 1];
+
+/// Input and output buffers
+static uint8_t in_buf[IO_BUFFER_SIZE];
+static uint8_t out_buf[IO_BUFFER_SIZE];
+
+/// Number of filters. Zero indicates that we are using a preset.
+static size_t filters_count = 0;
+
+/// Number of the preset (0-9)
+static size_t preset_number = 6;
+
+/// True if we should auto-adjust the compression settings to use less memory
+/// if memory usage limit is too low for the original settings.
+static bool auto_adjust = true;
+
+/// Indicate if no preset has been explicitly given. In that case, if we need
+/// to auto-adjust for lower memory usage, we won't print a warning.
+static bool preset_default = true;
+
+/// If a preset is used (no custom filter chain) and preset_extreme is true,
+/// a significantly slower compression is used to achieve slightly better
+/// compression ratio.
+static bool preset_extreme = false;
+
+/// Integrity check type
+#ifdef HAVE_CHECK_CRC64
+static lzma_check check = LZMA_CHECK_CRC64;
+#else
+static lzma_check check = LZMA_CHECK_CRC32;
+#endif
+
+
+extern void
+coder_set_check(lzma_check new_check)
+{
+ check = new_check;
+ return;
+}
+
+
+extern void
+coder_set_preset(size_t new_preset)
+{
+ preset_number = new_preset;
+ preset_default = false;
+ return;
+}
+
+
+extern void
+coder_set_extreme(void)
+{
+ preset_extreme = true;
+ return;
+}
+
+
+extern void
+coder_add_filter(lzma_vli id, void *options)
+{
+ if (filters_count == LZMA_FILTERS_MAX)
+ message_fatal(_("Maximum number of filters is four"));
+
+ filters[filters_count].id = id;
+ filters[filters_count].options = options;
+ ++filters_count;
+
+ return;
+}
+
+
+static void lzma_attribute((noreturn))
+memlimit_too_small(uint64_t memory_usage, uint64_t memory_limit)
+{
+ message_fatal(_("Memory usage limit (%" PRIu64 " MiB) is too small "
+ "for the given filter setup (%" PRIu64 " MiB)"),
+ memory_limit >> 20, memory_usage >> 20);
+}
+
+
+extern void
+coder_set_compression_settings(void)
+{
+ // Options for LZMA1 or LZMA2 in case we are using a preset.
+ static lzma_options_lzma opt_lzma;
+
+ if (filters_count == 0) {
+ // We are using a preset. This is not a good idea in raw mode
+ // except when playing around with things. Different versions
+ // of this software may use different options in presets, and
+ // thus make uncompressing the raw data difficult.
+ if (opt_format == FORMAT_RAW) {
+ // The message is shown only if warnings are allowed
+ // but the exit status isn't changed.
+ message(V_WARNING, _("Using a preset in raw mode "
+ "is discouraged."));
+ message(V_WARNING, _("The exact options of the "
+ "presets may vary between software "
+ "versions."));
+ }
+
+ // Get the preset for LZMA1 or LZMA2.
+ if (preset_extreme)
+ preset_number |= LZMA_PRESET_EXTREME;
+
+ if (lzma_lzma_preset(&opt_lzma, preset_number))
+ message_bug();
+
+ // Use LZMA2 except with --format=lzma we use LZMA1.
+ filters[0].id = opt_format == FORMAT_LZMA
+ ? LZMA_FILTER_LZMA1 : LZMA_FILTER_LZMA2;
+ filters[0].options = &opt_lzma;
+ filters_count = 1;
+ } else {
+ preset_default = false;
+ }
+
+ // Terminate the filter options array.
+ filters[filters_count].id = LZMA_VLI_UNKNOWN;
+
+ // If we are using the .lzma format, allow exactly one filter
+ // which has to be LZMA1.
+ if (opt_format == FORMAT_LZMA && (filters_count != 1
+ || filters[0].id != LZMA_FILTER_LZMA1))
+ message_fatal(_("The .lzma format supports only "
+ "the LZMA1 filter"));
+
+ // If we are using the .xz format, make sure that there is no LZMA1
+ // filter to prevent LZMA_PROG_ERROR.
+ if (opt_format == FORMAT_XZ)
+ for (size_t i = 0; i < filters_count; ++i)
+ if (filters[i].id == LZMA_FILTER_LZMA1)
+ message_fatal(_("LZMA1 cannot be used "
+ "with the .xz format"));
+
+ // Print the selected filter chain.
+ message_filters(V_DEBUG, filters);
+
+ // If using --format=raw, we can be decoding. The memusage function
+ // also validates the filter chain and the options used for the
+ // filters.
+ const uint64_t memory_limit = hardware_memlimit_get();
+ uint64_t memory_usage;
+ if (opt_mode == MODE_COMPRESS)
+ memory_usage = lzma_raw_encoder_memusage(filters);
+ else
+ memory_usage = lzma_raw_decoder_memusage(filters);
+
+ if (memory_usage == UINT64_MAX)
+ message_fatal("Unsupported filter chain or filter options");
+
+ // Print memory usage info.
+ message(V_DEBUG, _("%s MiB (%s B) of memory is required per thread, "
+ "limit is %s MiB (%s B)"),
+ uint64_to_str(memory_usage >> 20, 0),
+ uint64_to_str(memory_usage, 1),
+ uint64_to_str(memory_limit >> 20, 2),
+ uint64_to_str(memory_limit, 3));
+
+ if (memory_usage > memory_limit) {
+ // If --no-auto-adjust was used or we didn't find LZMA1 or
+ // LZMA2 as the last filter, give an error immediatelly.
+ // --format=raw implies --no-auto-adjust.
+ if (!auto_adjust || opt_format == FORMAT_RAW)
+ memlimit_too_small(memory_usage, memory_limit);
+
+ assert(opt_mode == MODE_COMPRESS);
+
+ // Look for the last filter if it is LZMA2 or LZMA1, so
+ // we can make it use less RAM. With other filters we don't
+ // know what to do.
+ size_t i = 0;
+ while (filters[i].id != LZMA_FILTER_LZMA2
+ && filters[i].id != LZMA_FILTER_LZMA1) {
+ if (filters[i].id == LZMA_VLI_UNKNOWN)
+ memlimit_too_small(memory_usage, memory_limit);
+
+ ++i;
+ }
+
+ // Decrease the dictionary size until we meet the memory
+ // usage limit. First round down to full mebibytes.
+ lzma_options_lzma *opt = filters[i].options;
+ const uint32_t orig_dict_size = opt->dict_size;
+ opt->dict_size &= ~((UINT32_C(1) << 20) - 1);
+ while (true) {
+ // If it is below 1 MiB, auto-adjusting failed. We
+ // could be more sophisticated and scale it down even
+ // more, but let's see if many complain about this
+ // version.
+ //
+ // FIXME: Displays the scaled memory usage instead
+ // of the original.
+ if (opt->dict_size < (UINT32_C(1) << 20))
+ memlimit_too_small(memory_usage, memory_limit);
+
+ memory_usage = lzma_raw_encoder_memusage(filters);
+ if (memory_usage == UINT64_MAX)
+ message_bug();
+
+ // Accept it if it is low enough.
+ if (memory_usage <= memory_limit)
+ break;
+
+ // Otherwise 1 MiB down and try again. I hope this
+ // isn't too slow method for cases where the original
+ // dict_size is very big.
+ opt->dict_size -= UINT32_C(1) << 20;
+ }
+
+ // Tell the user that we decreased the dictionary size.
+ // However, omit the message if no preset or custom chain
+ // was given. FIXME: Always warn?
+ if (!preset_default)
+ message(V_WARNING, "Adjusted LZMA%c dictionary size "
+ "from %s MiB to %s MiB to not exceed "
+ "the memory usage limit of %s MiB",
+ filters[i].id == LZMA_FILTER_LZMA2
+ ? '2' : '1',
+ uint64_to_str(orig_dict_size >> 20, 0),
+ uint64_to_str(opt->dict_size >> 20, 1),
+ uint64_to_str(memory_limit >> 20, 2));
+ }
+
+/*
+ // Limit the number of worker threads so that memory usage
+ // limit isn't exceeded.
+ assert(memory_usage > 0);
+ size_t thread_limit = memory_limit / memory_usage;
+ if (thread_limit == 0)
+ thread_limit = 1;
+
+ if (opt_threads > thread_limit)
+ opt_threads = thread_limit;
+*/
+
+ return;
+}
+
+
+/// Return true if the data in in_buf seems to be in the .xz format.
+static bool
+is_format_xz(void)
+{
+ return strm.avail_in >= 6 && memcmp(in_buf, "\3757zXZ", 6) == 0;
+}
+
+
+/// Return true if the data in in_buf seems to be in the .lzma format.
+static bool
+is_format_lzma(void)
+{
+ // The .lzma header is 13 bytes.
+ if (strm.avail_in < 13)
+ return false;
+
+ // Decode the LZMA1 properties.
+ lzma_filter filter = { .id = LZMA_FILTER_LZMA1 };
+ if (lzma_properties_decode(&filter, NULL, in_buf, 5) != LZMA_OK)
+ return false;
+
+ // A hack to ditch tons of false positives: We allow only dictionary
+ // sizes that are 2^n or 2^n + 2^(n-1) or UINT32_MAX. LZMA_Alone
+ // created only files with 2^n, but accepts any dictionary size.
+ // If someone complains, this will be reconsidered.
+ lzma_options_lzma *opt = filter.options;
+ const uint32_t dict_size = opt->dict_size;
+ free(opt);
+
+ if (dict_size != UINT32_MAX) {
+ uint32_t d = dict_size - 1;
+ d |= d >> 2;
+ d |= d >> 3;
+ d |= d >> 4;
+ d |= d >> 8;
+ d |= d >> 16;
+ ++d;
+ if (d != dict_size || dict_size == 0)
+ return false;
+ }
+
+ // Another hack to ditch false positives: Assume that if the
+ // uncompressed size is known, it must be less than 256 GiB.
+ // Again, if someone complains, this will be reconsidered.
+ uint64_t uncompressed_size = 0;
+ for (size_t i = 0; i < 8; ++i)
+ uncompressed_size |= (uint64_t)(in_buf[5 + i]) << (i * 8);
+
+ if (uncompressed_size != UINT64_MAX
+ && uncompressed_size > (UINT64_C(1) << 38))
+ return false;
+
+ return true;
+}
+
+
+/// Detect the input file type (for now, this done only when decompressing),
+/// and initialize an appropriate coder. Return value indicates if a normal
+/// liblzma-based coder was initialized (CODER_INIT_NORMAL), if passthru
+/// mode should be used (CODER_INIT_PASSTHRU), or if an error occurred
+/// (CODER_INIT_ERROR).
+static enum coder_init_ret
+coder_init(file_pair *pair)
+{
+ lzma_ret ret = LZMA_PROG_ERROR;
+
+ if (opt_mode == MODE_COMPRESS) {
+ switch (opt_format) {
+ case FORMAT_AUTO:
+ // args.c ensures this.
+ assert(0);
+ break;
+
+ case FORMAT_XZ:
+ ret = lzma_stream_encoder(&strm, filters, check);
+ break;
+
+ case FORMAT_LZMA:
+ ret = lzma_alone_encoder(&strm, filters[0].options);
+ break;
+
+ case FORMAT_RAW:
+ ret = lzma_raw_encoder(&strm, filters);
+ break;
+ }
+ } else {
+ const uint32_t flags = LZMA_TELL_UNSUPPORTED_CHECK
+ | LZMA_CONCATENATED;
+
+ // We abuse FORMAT_AUTO to indicate unknown file format,
+ // for which we may consider passthru mode.
+ enum format_type init_format = FORMAT_AUTO;
+
+ switch (opt_format) {
+ case FORMAT_AUTO:
+ if (is_format_xz())
+ init_format = FORMAT_XZ;
+ else if (is_format_lzma())
+ init_format = FORMAT_LZMA;
+ break;
+
+ case FORMAT_XZ:
+ if (is_format_xz())
+ init_format = FORMAT_XZ;
+ break;
+
+ case FORMAT_LZMA:
+ if (is_format_lzma())
+ init_format = FORMAT_LZMA;
+ break;
+
+ case FORMAT_RAW:
+ init_format = FORMAT_RAW;
+ break;
+ }
+
+ switch (init_format) {
+ case FORMAT_AUTO:
+ // Uknown file format. If --decompress --stdout
+ // --force have been given, then we copy the input
+ // as is to stdout. Checking for MODE_DECOMPRESS
+ // is needed, because we don't want to do use
+ // passthru mode with --test.
+ if (opt_mode == MODE_DECOMPRESS
+ && opt_stdout && opt_force)
+ return CODER_INIT_PASSTHRU;
+
+ ret = LZMA_FORMAT_ERROR;
+ break;
+
+ case FORMAT_XZ:
+ ret = lzma_stream_decoder(&strm,
+ hardware_memlimit_get(), flags);
+ break;
+
+ case FORMAT_LZMA:
+ ret = lzma_alone_decoder(&strm,
+ hardware_memlimit_get());
+ break;
+
+ case FORMAT_RAW:
+ // Memory usage has already been checked in
+ // coder_set_compression_settings().
+ ret = lzma_raw_decoder(&strm, filters);
+ break;
+ }
+ }
+
+ if (ret != LZMA_OK) {
+ message_error("%s: %s", pair->src_name, message_strm(ret));
+ return CODER_INIT_ERROR;
+ }
+
+ return CODER_INIT_NORMAL;
+}
+
+
+/// Compress or decompress using liblzma.
+static bool
+coder_normal(file_pair *pair)
+{
+ // Encoder needs to know when we have given all the input to it.
+ // The decoders need to know it too when we are using
+ // LZMA_CONCATENATED. We need to check for src_eof here, because
+ // the first input chunk has been already read, and that may
+ // have been the only chunk we will read.
+ lzma_action action = pair->src_eof ? LZMA_FINISH : LZMA_RUN;
+
+ lzma_ret ret;
+
+ // Assume that something goes wrong.
+ bool success = false;
+
+ strm.next_out = out_buf;
+ strm.avail_out = IO_BUFFER_SIZE;
+
+ while (!user_abort) {
+ // Fill the input buffer if it is empty and we haven't reached
+ // end of file yet.
+ if (strm.avail_in == 0 && !pair->src_eof) {
+ strm.next_in = in_buf;
+ strm.avail_in = io_read(pair, in_buf, IO_BUFFER_SIZE);
+
+ if (strm.avail_in == SIZE_MAX)
+ break;
+
+ if (pair->src_eof)
+ action = LZMA_FINISH;
+ }
+
+ // Let liblzma do the actual work.
+ ret = lzma_code(&strm, action);
+
+ // Write out if the output buffer became full.
+ if (strm.avail_out == 0) {
+ if (opt_mode != MODE_TEST && io_write(pair, out_buf,
+ IO_BUFFER_SIZE - strm.avail_out))
+ break;
+
+ strm.next_out = out_buf;
+ strm.avail_out = IO_BUFFER_SIZE;
+ }
+
+ if (ret != LZMA_OK) {
+ // Determine if the return value indicates that we
+ // won't continue coding.
+ const bool stop = ret != LZMA_NO_CHECK
+ && ret != LZMA_UNSUPPORTED_CHECK;
+
+ if (stop) {
+ // Write the remaining bytes even if something
+ // went wrong, because that way the user gets
+ // as much data as possible, which can be good
+ // when trying to get at least some useful
+ // data out of damaged files.
+ if (opt_mode != MODE_TEST && io_write(pair,
+ out_buf, IO_BUFFER_SIZE
+ - strm.avail_out))
+ break;
+ }
+
+ if (ret == LZMA_STREAM_END) {
+ // Check that there is no trailing garbage.
+ // This is needed for LZMA_Alone and raw
+ // streams.
+ if (strm.avail_in == 0 && !pair->src_eof) {
+ // Try reading one more byte.
+ // Hopefully we don't get any more
+ // input, and thus pair->src_eof
+ // becomes true.
+ strm.avail_in = io_read(
+ pair, in_buf, 1);
+ if (strm.avail_in == SIZE_MAX)
+ break;
+
+ assert(strm.avail_in == 0
+ || strm.avail_in == 1);
+ }
+
+ if (strm.avail_in == 0) {
+ assert(pair->src_eof);
+ success = true;
+ break;
+ }
+
+ // We hadn't reached the end of the file.
+ ret = LZMA_DATA_ERROR;
+ assert(stop);
+ }
+
+ // If we get here and stop is true, something went
+ // wrong and we print an error. Otherwise it's just
+ // a warning and coding can continue.
+ if (stop) {
+ message_error("%s: %s", pair->src_name,
+ message_strm(ret));
+ } else {
+ message_warning("%s: %s", pair->src_name,
+ message_strm(ret));
+
+ // When compressing, all possible errors set
+ // stop to true.
+ assert(opt_mode != MODE_COMPRESS);
+ }
+
+ if (ret == LZMA_MEMLIMIT_ERROR) {
+ // Figure out how much memory it would have
+ // actually needed.
+ uint64_t memusage = lzma_memusage(&strm);
+ uint64_t memlimit = hardware_memlimit_get();
+
+ // Round the memory limit down and usage up.
+ // This way we don't display a ridiculous
+ // message like "Limit was 9 MiB, but 9 MiB
+ // would have been needed".
+ memusage = (memusage + 1024 * 1024 - 1)
+ / (1024 * 1024);
+ memlimit /= 1024 * 1024;
+
+ message_error(_("Limit was %s MiB, "
+ "but %s MiB would "
+ "have been needed"),
+ uint64_to_str(memlimit, 0),
+ uint64_to_str(memusage, 1));
+ }
+
+ if (stop)
+ break;
+ }
+
+ // Show progress information under certain conditions.
+ message_progress_update();
+ }
+
+ return success;
+}
+
+
+/// Copy from input file to output file without processing the data in any
+/// way. This is used only when trying to decompress unrecognized files
+/// with --decompress --stdout --force, so the output is always stdout.
+static bool
+coder_passthru(file_pair *pair)
+{
+ while (strm.avail_in != 0) {
+ if (user_abort)
+ return false;
+
+ if (io_write(pair, in_buf, strm.avail_in))
+ return false;
+
+ strm.total_in += strm.avail_in;
+ strm.total_out = strm.total_in;
+ message_progress_update();
+
+ strm.avail_in = io_read(pair, in_buf, IO_BUFFER_SIZE);
+ if (strm.avail_in == SIZE_MAX)
+ return false;
+ }
+
+ return true;
+}
+
+
+extern void
+coder_run(const char *filename)
+{
+ // Try to open the input and output files.
+ file_pair *pair = io_open(filename);
+ if (pair == NULL)
+ return;
+
+ // Initialize the progress indicator.
+ const uint64_t in_size = pair->src_st.st_size <= (off_t)(0)
+ ? 0 : (uint64_t)(pair->src_st.st_size);
+ message_progress_start(&strm, pair->src_name, in_size);
+
+ // Assume that something goes wrong.
+ bool success = false;
+
+ // Read the first chunk of input data. This is needed to detect
+ // the input file type (for now, only for decompression).
+ strm.next_in = in_buf;
+ strm.avail_in = io_read(pair, in_buf, IO_BUFFER_SIZE);
+
+ switch (coder_init(pair)) {
+ case CODER_INIT_NORMAL:
+ success = coder_normal(pair);
+ break;
+
+ case CODER_INIT_PASSTHRU:
+ success = coder_passthru(pair);
+ break;
+
+ case CODER_INIT_ERROR:
+ break;
+ }
+
+ message_progress_end(success);
+
+ // Close the file pair. It needs to know if coding was successful to
+ // know if the source or target file should be unlinked.
+ io_close(pair, success);
+
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.h
new file mode 100644
index 00000000..69d2729a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/coder.h
@@ -0,0 +1,59 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file coder.h
+/// \brief Compresses or uncompresses a file
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+enum operation_mode {
+ MODE_COMPRESS,
+ MODE_DECOMPRESS,
+ MODE_TEST,
+ MODE_LIST,
+};
+
+
+// NOTE: The order of these is significant in suffix.c.
+enum format_type {
+ FORMAT_AUTO,
+ FORMAT_XZ,
+ FORMAT_LZMA,
+ // HEADER_GZIP,
+ FORMAT_RAW,
+};
+
+
+/// Operation mode of the command line tool. This is set in args.c and read
+/// in several files.
+extern enum operation_mode opt_mode;
+
+/// File format to use when encoding or what format(s) to accept when
+/// decoding. This is a global because it's needed also in suffix.c.
+/// This is set in args.c.
+extern enum format_type opt_format;
+
+
+/// Set the integrity check type used when compressing
+extern void coder_set_check(lzma_check check);
+
+/// Set preset number
+extern void coder_set_preset(size_t new_preset);
+
+/// Enable extreme mode
+extern void coder_set_extreme(void);
+
+/// Add a filter to the custom filter chain
+extern void coder_add_filter(lzma_vli id, void *options);
+
+///
+extern void coder_set_compression_settings(void);
+
+/// Compress or decompress the given file
+extern void coder_run(const char *filename);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.c
new file mode 100644
index 00000000..43d8b55d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.c
@@ -0,0 +1,721 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file file_io.c
+/// \brief File opening, unlinking, and closing
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+#include <fcntl.h>
+
+#ifdef DOSLIKE
+# include <io.h>
+#endif
+
+#if defined(HAVE_FUTIMES) || defined(HAVE_FUTIMESAT) || defined(HAVE_UTIMES)
+# include <sys/time.h>
+#elif defined(HAVE_UTIME)
+# include <utime.h>
+#endif
+
+#ifndef O_BINARY
+# define O_BINARY 0
+#endif
+
+#ifndef O_NOCTTY
+# define O_NOCTTY 0
+#endif
+
+#ifndef DOSLIKE
+# include "open_stdxxx.h"
+static bool warn_fchown;
+#endif
+
+
+extern void
+io_init(void)
+{
+#ifndef DOSLIKE
+ // Make sure that stdin, stdout, and and stderr are connected to
+ // a valid file descriptor. Exit immediatelly with exit code ERROR
+ // if we cannot make the file descriptors valid. Maybe we should
+ // print an error message, but our stderr could be screwed anyway.
+ open_stdxxx(E_ERROR);
+
+ // If fchown() fails setting the owner, we warn about it only if
+ // we are root.
+ warn_fchown = geteuid() == 0;
+#endif
+
+#ifdef __DJGPP__
+ // Avoid doing useless things when statting files.
+ // This isn't important but doesn't hurt.
+ _djstat_flags = _STAT_INODE | _STAT_EXEC_EXT
+ | _STAT_EXEC_MAGIC | _STAT_DIRSIZE;
+#endif
+
+ return;
+}
+
+
+/// \brief Unlinks a file
+///
+/// This tries to verify that the file being unlinked really is the file that
+/// we want to unlink by verifying device and inode numbers. There's still
+/// a small unavoidable race, but this is much better than nothing (the file
+/// could have been moved/replaced even hours earlier).
+static void
+io_unlink(const char *name, const struct stat *known_st)
+{
+#ifdef DOSLIKE
+ // On Windows, st_ino is meaningless, so don't bother testing it.
+ // Just silence a compiler warning.
+ (void)known_st;
+#else
+ struct stat new_st;
+
+ if (lstat(name, &new_st)
+ || new_st.st_dev != known_st->st_dev
+ || new_st.st_ino != known_st->st_ino)
+ message_error(_("%s: File seems to be moved, not removing"),
+ name);
+ else
+#endif
+ // There's a race condition between lstat() and unlink()
+ // but at least we have tried to avoid removing wrong file.
+ if (unlink(name))
+ message_error(_("%s: Cannot remove: %s"),
+ name, strerror(errno));
+
+ return;
+}
+
+
+/// \brief Copies owner/group and permissions
+///
+/// \todo ACL and EA support
+///
+static void
+io_copy_attrs(const file_pair *pair)
+{
+ // Skip chown and chmod on Windows.
+#ifndef DOSLIKE
+ // This function is more tricky than you may think at first.
+ // Blindly copying permissions may permit users to access the
+ // destination file who didn't have permission to access the
+ // source file.
+
+ // Try changing the owner of the file. If we aren't root or the owner
+ // isn't already us, fchown() probably doesn't succeed. We warn
+ // about failing fchown() only if we are root.
+ if (fchown(pair->dest_fd, pair->src_st.st_uid, -1) && warn_fchown)
+ message_warning(_("%s: Cannot set the file owner: %s"),
+ pair->dest_name, strerror(errno));
+
+ mode_t mode;
+
+ if (fchown(pair->dest_fd, -1, pair->src_st.st_gid)) {
+ message_warning(_("%s: Cannot set the file group: %s"),
+ pair->dest_name, strerror(errno));
+ // We can still safely copy some additional permissions:
+ // `group' must be at least as strict as `other' and
+ // also vice versa.
+ //
+ // NOTE: After this, the owner of the source file may
+ // get additional permissions. This shouldn't be too bad,
+ // because the owner would have had permission to chmod
+ // the original file anyway.
+ mode = ((pair->src_st.st_mode & 0070) >> 3)
+ & (pair->src_st.st_mode & 0007);
+ mode = (pair->src_st.st_mode & 0700) | (mode << 3) | mode;
+ } else {
+ // Drop the setuid, setgid, and sticky bits.
+ mode = pair->src_st.st_mode & 0777;
+ }
+
+ if (fchmod(pair->dest_fd, mode))
+ message_warning(_("%s: Cannot set the file permissions: %s"),
+ pair->dest_name, strerror(errno));
+#endif
+
+ // Copy the timestamps. We have several possible ways to do this, of
+ // which some are better in both security and precision.
+ //
+ // First, get the nanosecond part of the timestamps. As of writing,
+ // it's not standardized by POSIX, and there are several names for
+ // the same thing in struct stat.
+ long atime_nsec;
+ long mtime_nsec;
+
+# if defined(HAVE_STRUCT_STAT_ST_ATIM_TV_NSEC)
+ // GNU and Solaris
+ atime_nsec = pair->src_st.st_atim.tv_nsec;
+ mtime_nsec = pair->src_st.st_mtim.tv_nsec;
+
+# elif defined(HAVE_STRUCT_STAT_ST_ATIMESPEC_TV_NSEC)
+ // BSD
+ atime_nsec = pair->src_st.st_atimespec.tv_nsec;
+ mtime_nsec = pair->src_st.st_mtimespec.tv_nsec;
+
+# elif defined(HAVE_STRUCT_STAT_ST_ATIMENSEC)
+ // GNU and BSD without extensions
+ atime_nsec = pair->src_st.st_atimensec;
+ mtime_nsec = pair->src_st.st_mtimensec;
+
+# elif defined(HAVE_STRUCT_STAT_ST_UATIME)
+ // Tru64
+ atime_nsec = pair->src_st.st_uatime * 1000;
+ mtime_nsec = pair->src_st.st_umtime * 1000;
+
+# elif defined(HAVE_STRUCT_STAT_ST_ATIM_ST__TIM_TV_NSEC)
+ // UnixWare
+ atime_nsec = pair->src_st.st_atim.st__tim.tv_nsec;
+ mtime_nsec = pair->src_st.st_mtim.st__tim.tv_nsec;
+
+# else
+ // Safe fallback
+ atime_nsec = 0;
+ mtime_nsec = 0;
+# endif
+
+ // Construct a structure to hold the timestamps and call appropriate
+ // function to set the timestamps.
+#if defined(HAVE_FUTIMENS)
+ // Use nanosecond precision.
+ struct timespec tv[2];
+ tv[0].tv_sec = pair->src_st.st_atime;
+ tv[0].tv_nsec = atime_nsec;
+ tv[1].tv_sec = pair->src_st.st_mtime;
+ tv[1].tv_nsec = mtime_nsec;
+
+ (void)futimens(pair->dest_fd, tv);
+
+#elif defined(HAVE_FUTIMES) || defined(HAVE_FUTIMESAT) || defined(HAVE_UTIMES)
+ // Use microsecond precision.
+ struct timeval tv[2];
+ tv[0].tv_sec = pair->src_st.st_atime;
+ tv[0].tv_usec = atime_nsec / 1000;
+ tv[1].tv_sec = pair->src_st.st_mtime;
+ tv[1].tv_usec = mtime_nsec / 1000;
+
+# if defined(HAVE_FUTIMES)
+ (void)futimes(pair->dest_fd, tv);
+# elif defined(HAVE_FUTIMESAT)
+ (void)futimesat(pair->dest_fd, NULL, tv);
+# else
+ // Argh, no function to use a file descriptor to set the timestamp.
+ (void)utimes(pair->dest_name, tv);
+# endif
+
+#elif defined(HAVE_UTIME)
+ // Use one-second precision. utime() doesn't support using file
+ // descriptor either. Some systems have broken utime() prototype
+ // so don't make this const.
+ struct utimbuf buf = {
+ .actime = pair->src_st.st_atime,
+ .modtime = pair->src_st.st_mtime,
+ };
+
+ // Avoid warnings.
+ (void)atime_nsec;
+ (void)mtime_nsec;
+
+ (void)utime(pair->dest_name, &buf);
+#endif
+
+ return;
+}
+
+
+/// Opens the source file. Returns false on success, true on error.
+static bool
+io_open_src(file_pair *pair)
+{
+ // There's nothing to open when reading from stdin.
+ if (pair->src_name == stdin_filename) {
+ pair->src_fd = STDIN_FILENO;
+#ifdef DOSLIKE
+ setmode(STDIN_FILENO, O_BINARY);
+#endif
+ return false;
+ }
+
+ // We accept only regular files if we are writing the output
+ // to disk too, and if --force was not given.
+ const bool reg_files_only = !opt_stdout && !opt_force;
+
+ // Flags for open()
+ int flags = O_RDONLY | O_BINARY | O_NOCTTY;
+
+#ifndef DOSLIKE
+ // If we accept only regular files, we need to be careful to avoid
+ // problems with special files like devices and FIFOs. O_NONBLOCK
+ // prevents blocking when opening such files. When we want to accept
+ // special files, we must not use O_NONBLOCK, or otherwise we won't
+ // block waiting e.g. FIFOs to become readable.
+ if (reg_files_only)
+ flags |= O_NONBLOCK;
+#endif
+
+#if defined(O_NOFOLLOW)
+ if (reg_files_only)
+ flags |= O_NOFOLLOW;
+#elif !defined(DOSLIKE)
+ // Some POSIX-like systems lack O_NOFOLLOW (it's not required
+ // by POSIX). Check for symlinks with a separate lstat() on
+ // these systems.
+ if (reg_files_only) {
+ struct stat st;
+ if (lstat(pair->src_name, &st)) {
+ message_error("%s: %s", pair->src_name,
+ strerror(errno));
+ return true;
+
+ } else if (S_ISLNK(st.st_mode)) {
+ message_warning(_("%s: Is a symbolic link, "
+ "skipping"), pair->src_name);
+ return true;
+ }
+ }
+#endif
+
+ // Try to open the file. If we are accepting non-regular files,
+ // unblock the caught signals so that open() can be interrupted
+ // if it blocks e.g. due to a FIFO file.
+ if (!reg_files_only)
+ signals_unblock();
+
+ // Maybe this wouldn't need a loop, since all the signal handlers for
+ // which we don't use SA_RESTART set user_abort to true. But it
+ // doesn't hurt to have it just in case.
+ do {
+ pair->src_fd = open(pair->src_name, flags);
+ } while (pair->src_fd == -1 && errno == EINTR && !user_abort);
+
+ if (!reg_files_only)
+ signals_block();
+
+ if (pair->src_fd == -1) {
+ // If we were interrupted, don't display any error message.
+ if (errno == EINTR) {
+ // All the signals that don't have SA_RESTART
+ // set user_abort.
+ assert(user_abort);
+ return true;
+ }
+
+#ifdef O_NOFOLLOW
+ // Give an understandable error message in if reason
+ // for failing was that the file was a symbolic link.
+ //
+ // Note that at least Linux, OpenBSD, Solaris, and Darwin
+ // use ELOOP to indicate if O_NOFOLLOW was the reason
+ // that open() failed. Because there may be
+ // directories in the pathname, ELOOP may occur also
+ // because of a symlink loop in the directory part.
+ // So ELOOP doesn't tell us what actually went wrong.
+ //
+ // FreeBSD associates EMLINK with O_NOFOLLOW and
+ // Tru64 uses ENOTSUP. We use these directly here
+ // and skip the lstat() call and the associated race.
+ // I want to hear if there are other kernels that
+ // fail with something else than ELOOP with O_NOFOLLOW.
+ bool was_symlink = false;
+
+# if defined(__FreeBSD__) || defined(__DragonFly__)
+ if (errno == EMLINK)
+ was_symlink = true;
+
+# elif defined(__digital__) && defined(__unix__)
+ if (errno == ENOTSUP)
+ was_symlink = true;
+
+# elif defined(__NetBSD__)
+ // FIXME? As of 2008-11-20, NetBSD doesn't document what
+ // errno is used with O_NOFOLLOW. It seems to be EFTYPE,
+ // but since it isn't documented, it may be wrong to rely
+ // on it here.
+ if (errno == EFTYPE)
+ was_symlink = true;
+
+# else
+ if (errno == ELOOP && reg_files_only) {
+ const int saved_errno = errno;
+ struct stat st;
+ if (lstat(pair->src_name, &st) == 0
+ && S_ISLNK(st.st_mode))
+ was_symlink = true;
+
+ errno = saved_errno;
+ }
+# endif
+
+ if (was_symlink)
+ message_warning(_("%s: Is a symbolic link, "
+ "skipping"), pair->src_name);
+ else
+#endif
+ // Something else than O_NOFOLLOW failing
+ // (assuming that the race conditions didn't
+ // confuse us).
+ message_error("%s: %s", pair->src_name,
+ strerror(errno));
+
+ return true;
+ }
+
+#ifndef DOSLIKE
+ // Drop O_NONBLOCK, which is used only when we are accepting only
+ // regular files. After the open() call, we want things to block
+ // instead of giving EAGAIN.
+ if (reg_files_only) {
+ flags = fcntl(pair->src_fd, F_GETFL);
+ if (flags == -1)
+ goto error_msg;
+
+ flags &= ~O_NONBLOCK;
+
+ if (fcntl(pair->src_fd, F_SETFL, flags))
+ goto error_msg;
+ }
+#endif
+
+ // Stat the source file. We need the result also when we copy
+ // the permissions, and when unlinking.
+ if (fstat(pair->src_fd, &pair->src_st))
+ goto error_msg;
+
+ if (S_ISDIR(pair->src_st.st_mode)) {
+ message_warning(_("%s: Is a directory, skipping"),
+ pair->src_name);
+ goto error;
+ }
+
+ if (reg_files_only) {
+ if (!S_ISREG(pair->src_st.st_mode)) {
+ message_warning(_("%s: Not a regular file, "
+ "skipping"), pair->src_name);
+ goto error;
+ }
+
+ // These are meaningless on Windows.
+#ifndef DOSLIKE
+ if (pair->src_st.st_mode & (S_ISUID | S_ISGID)) {
+ // gzip rejects setuid and setgid files even
+ // when --force was used. bzip2 doesn't check
+ // for them, but calls fchown() after fchmod(),
+ // and many systems automatically drop setuid
+ // and setgid bits there.
+ //
+ // We accept setuid and setgid files if
+ // --force was used. We drop these bits
+ // explicitly in io_copy_attr().
+ message_warning(_("%s: File has setuid or "
+ "setgid bit set, skipping"),
+ pair->src_name);
+ goto error;
+ }
+
+ if (pair->src_st.st_mode & S_ISVTX) {
+ message_warning(_("%s: File has sticky bit "
+ "set, skipping"),
+ pair->src_name);
+ goto error;
+ }
+
+ if (pair->src_st.st_nlink > 1) {
+ message_warning(_("%s: Input file has more "
+ "than one hard link, "
+ "skipping"), pair->src_name);
+ goto error;
+ }
+#endif
+ }
+
+ return false;
+
+error_msg:
+ message_error("%s: %s", pair->src_name, strerror(errno));
+error:
+ (void)close(pair->src_fd);
+ return true;
+}
+
+
+/// \brief Closes source file of the file_pair structure
+///
+/// \param pair File whose src_fd should be closed
+/// \param success If true, the file will be removed from the disk if
+/// closing succeeds and --keep hasn't been used.
+static void
+io_close_src(file_pair *pair, bool success)
+{
+ if (pair->src_fd != STDIN_FILENO && pair->src_fd != -1) {
+#ifdef DOSLIKE
+ (void)close(pair->src_fd);
+#endif
+
+ // If we are going to unlink(), do it before closing the file.
+ // This way there's no risk that someone replaces the file and
+ // happens to get same inode number, which would make us
+ // unlink() wrong file.
+ //
+ // NOTE: DOS-like systems are an exception to this, because
+ // they don't allow unlinking files that are open. *sigh*
+ if (success && !opt_keep_original)
+ io_unlink(pair->src_name, &pair->src_st);
+
+#ifndef DOSLIKE
+ (void)close(pair->src_fd);
+#endif
+ }
+
+ return;
+}
+
+
+static bool
+io_open_dest(file_pair *pair)
+{
+ if (opt_stdout || pair->src_fd == STDIN_FILENO) {
+ // We don't modify or free() this.
+ pair->dest_name = (char *)"(stdout)";
+ pair->dest_fd = STDOUT_FILENO;
+#ifdef DOSLIKE
+ setmode(STDOUT_FILENO, O_BINARY);
+#endif
+ return false;
+ }
+
+ pair->dest_name = suffix_get_dest_name(pair->src_name);
+ if (pair->dest_name == NULL)
+ return true;
+
+ // If --force was used, unlink the target file first.
+ if (opt_force && unlink(pair->dest_name) && errno != ENOENT) {
+ message_error("%s: Cannot unlink: %s",
+ pair->dest_name, strerror(errno));
+ free(pair->dest_name);
+ return true;
+ }
+
+ if (opt_force && unlink(pair->dest_name) && errno != ENOENT) {
+ message_error("%s: Cannot unlink: %s", pair->dest_name,
+ strerror(errno));
+ free(pair->dest_name);
+ return true;
+ }
+
+ // Open the file.
+ const int flags = O_WRONLY | O_BINARY | O_NOCTTY | O_CREAT | O_EXCL;
+ const mode_t mode = S_IRUSR | S_IWUSR;
+ pair->dest_fd = open(pair->dest_name, flags, mode);
+
+ if (pair->dest_fd == -1) {
+ // Don't bother with error message if user requested
+ // us to exit anyway.
+ if (!user_abort)
+ message_error("%s: %s", pair->dest_name,
+ strerror(errno));
+
+ free(pair->dest_name);
+ return true;
+ }
+
+ // If this really fails... well, we have a safe fallback.
+ if (fstat(pair->dest_fd, &pair->dest_st)) {
+ pair->dest_st.st_dev = 0;
+ pair->dest_st.st_ino = 0;
+ }
+
+ return false;
+}
+
+
+/// \brief Closes destination file of the file_pair structure
+///
+/// \param pair File whose dest_fd should be closed
+/// \param success If false, the file will be removed from the disk.
+///
+/// \return Zero if closing succeeds. On error, -1 is returned and
+/// error message printed.
+static int
+io_close_dest(file_pair *pair, bool success)
+{
+ if (pair->dest_fd == -1 || pair->dest_fd == STDOUT_FILENO)
+ return 0;
+
+ if (close(pair->dest_fd)) {
+ message_error(_("%s: Closing the file failed: %s"),
+ pair->dest_name, strerror(errno));
+
+ // Closing destination file failed, so we cannot trust its
+ // contents. Get rid of junk:
+ io_unlink(pair->dest_name, &pair->dest_st);
+ free(pair->dest_name);
+ return -1;
+ }
+
+ // If the operation using this file wasn't successful, we git rid
+ // of the junk file.
+ if (!success)
+ io_unlink(pair->dest_name, &pair->dest_st);
+
+ free(pair->dest_name);
+
+ return 0;
+}
+
+
+extern file_pair *
+io_open(const char *src_name)
+{
+ if (is_empty_filename(src_name))
+ return NULL;
+
+ // Since we have only one file open at a time, we can use
+ // a statically allocated structure.
+ static file_pair pair;
+
+ pair = (file_pair){
+ .src_name = src_name,
+ .dest_name = NULL,
+ .src_fd = -1,
+ .dest_fd = -1,
+ .src_eof = false,
+ };
+
+ // Block the signals, for which we have a custom signal handler, so
+ // that we don't need to worry about EINTR.
+ signals_block();
+
+ file_pair *ret = NULL;
+ if (!io_open_src(&pair)) {
+ // io_open_src() may have unblocked the signals temporarily,
+ // and thus user_abort may have got set even if open()
+ // succeeded.
+ if (user_abort || io_open_dest(&pair))
+ io_close_src(&pair, false);
+ else
+ ret = &pair;
+ }
+
+ signals_unblock();
+
+ return ret;
+}
+
+
+extern void
+io_close(file_pair *pair, bool success)
+{
+ signals_block();
+
+ if (success && pair->dest_fd != STDOUT_FILENO)
+ io_copy_attrs(pair);
+
+ // Close the destination first. If it fails, we must not remove
+ // the source file!
+ if (io_close_dest(pair, success))
+ success = false;
+
+ // Close the source file, and unlink it if the operation using this
+ // file pair was successful and we haven't requested to keep the
+ // source file.
+ io_close_src(pair, success);
+
+ signals_unblock();
+
+ return;
+}
+
+
+extern size_t
+io_read(file_pair *pair, uint8_t *buf, size_t size)
+{
+ // We use small buffers here.
+ assert(size < SSIZE_MAX);
+
+ size_t left = size;
+
+ while (left > 0) {
+ const ssize_t amount = read(pair->src_fd, buf, left);
+
+ if (amount == 0) {
+ pair->src_eof = true;
+ break;
+ }
+
+ if (amount == -1) {
+ if (errno == EINTR) {
+ if (user_abort)
+ return SIZE_MAX;
+
+ continue;
+ }
+
+ message_error(_("%s: Read error: %s"),
+ pair->src_name, strerror(errno));
+
+ // FIXME Is this needed?
+ pair->src_eof = true;
+
+ return SIZE_MAX;
+ }
+
+ buf += (size_t)(amount);
+ left -= (size_t)(amount);
+ }
+
+ return size - left;
+}
+
+
+extern bool
+io_write(const file_pair *pair, const uint8_t *buf, size_t size)
+{
+ assert(size < SSIZE_MAX);
+
+ while (size > 0) {
+ const ssize_t amount = write(pair->dest_fd, buf, size);
+ if (amount == -1) {
+ if (errno == EINTR) {
+ if (user_abort)
+ return -1;
+
+ continue;
+ }
+
+ // Handle broken pipe specially. gzip and bzip2
+ // don't print anything on SIGPIPE. In addition,
+ // gzip --quiet uses exit status 2 (warning) on
+ // broken pipe instead of whatever raise(SIGPIPE)
+ // would make it return. It is there to hide "Broken
+ // pipe" message on some old shells (probably old
+ // GNU bash).
+ //
+ // We don't do anything special with --quiet, which
+ // is what bzip2 does too. If we get SIGPIPE, we
+ // will handle it like other signals by setting
+ // user_abort, and get EPIPE here.
+ if (errno != EPIPE)
+ message_error(_("%s: Write error: %s"),
+ pair->dest_name, strerror(errno));
+
+ return true;
+ }
+
+ buf += (size_t)(amount);
+ size -= (size_t)(amount);
+ }
+
+ return false;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.h
new file mode 100644
index 00000000..b3f24046
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/file_io.h
@@ -0,0 +1,88 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file file_io.h
+/// \brief I/O types and functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// Some systems have suboptimal BUFSIZ. Use a bit bigger value on them.
+#if BUFSIZ <= 1024
+# define IO_BUFFER_SIZE 8192
+#else
+# define IO_BUFFER_SIZE BUFSIZ
+#endif
+
+
+typedef struct {
+ /// Name of the source filename (as given on the command line) or
+ /// pointer to static "(stdin)" when reading from standard input.
+ const char *src_name;
+
+ /// Destination filename converted from src_name or pointer to static
+ /// "(stdout)" when writing to standard output.
+ char *dest_name;
+
+ /// File descriptor of the source file
+ int src_fd;
+
+ /// File descriptor of the target file
+ int dest_fd;
+
+ /// Stat of the source file.
+ struct stat src_st;
+
+ /// Stat of the destination file.
+ struct stat dest_st;
+
+ /// True once end of the source file has been detected.
+ bool src_eof;
+
+} file_pair;
+
+
+/// \brief Initialize the I/O module
+extern void io_init(void);
+
+
+/// \brief Opens a file pair
+extern file_pair *io_open(const char *src_name);
+
+
+/// \brief Closes the file descriptors and frees possible allocated memory
+///
+/// The success argument determines if source or destination file gets
+/// unlinked:
+/// - false: The destination file is unlinked.
+/// - true: The source file is unlinked unless writing to stdout or --keep
+/// was used.
+extern void io_close(file_pair *pair, bool success);
+
+
+/// \brief Reads from the source file to a buffer
+///
+/// \param pair File pair having the source file open for reading
+/// \param buf Destination buffer to hold the read data
+/// \param size Size of the buffer; assumed be smaller than SSIZE_MAX
+///
+/// \return On success, number of bytes read is returned. On end of
+/// file zero is returned and pair->src_eof set to true.
+/// On error, SIZE_MAX is returned and error message printed.
+extern size_t io_read(file_pair *pair, uint8_t *buf, size_t size);
+
+
+/// \brief Writes a buffer to the destination file
+///
+/// \param pair File pair having the destination file open for writing
+/// \param buf Buffer containing the data to be written
+/// \param size Size of the buffer; assumed be smaller than SSIZE_MAX
+///
+/// \return On success, zero is returned. On error, -1 is returned
+/// and error message printed.
+extern bool io_write(const file_pair *pair, const uint8_t *buf, size_t size);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.c
new file mode 100644
index 00000000..a56ef2f1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.c
@@ -0,0 +1,97 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file hardware.c
+/// \brief Detection of available hardware resources
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+#include "physmem.h"
+#include "cpucores.h"
+
+
+/// Maximum number of free *coder* threads. This can be set with
+/// the --threads=NUM command line option.
+static uint32_t threadlimit;
+
+/// Memory usage limit
+static uint64_t memlimit;
+
+
+extern void
+hardware_threadlimit_set(uint32_t new_threadlimit)
+{
+ if (new_threadlimit == 0) {
+ // The default is the number of available CPU cores.
+ threadlimit = cpucores();
+ if (threadlimit == 0)
+ threadlimit = 1;
+ } else {
+ threadlimit = new_threadlimit;
+ }
+
+ return;
+}
+
+
+extern uint32_t
+hardware_threadlimit_get(void)
+{
+ return threadlimit;
+}
+
+
+extern void
+hardware_memlimit_set(uint64_t new_memlimit)
+{
+ if (new_memlimit == 0) {
+ // The default is 40 % of total installed physical RAM.
+ hardware_memlimit_set_percentage(40);
+ } else {
+ memlimit = new_memlimit;
+ }
+
+ return;
+}
+
+
+extern void
+hardware_memlimit_set_percentage(uint32_t percentage)
+{
+ assert(percentage > 0);
+ assert(percentage <= 100);
+
+ uint64_t mem = physmem();
+
+ // If we cannot determine the amount of RAM, assume 32 MiB. Maybe
+ // even that is too much on some systems. But on most systems it's
+ // far too little, and can be annoying.
+ if (mem == 0)
+ mem = UINT64_C(32) * 1024 * 1024;
+
+ memlimit = percentage * mem / 100;
+ return;
+}
+
+
+extern uint64_t
+hardware_memlimit_get(void)
+{
+ return memlimit;
+}
+
+
+extern void
+hardware_init(void)
+{
+ hardware_memlimit_set(0);
+ hardware_threadlimit_set(0);
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.h
new file mode 100644
index 00000000..3b041c0f
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/hardware.h
@@ -0,0 +1,37 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file hardware.h
+/// \brief Detection of available hardware resources
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// Initialize some hardware-specific variables, which are needed by other
+/// hardware_* functions.
+extern void hardware_init(void);
+
+
+/// Set custom value for maximum number of coder threads.
+extern void hardware_threadlimit_set(uint32_t threadlimit);
+
+/// Get the maximum number of coder threads. Some additional helper threads
+/// are allowed on top of this).
+extern uint32_t hardware_threadlimit_get(void);
+
+
+/// Set custom memory usage limit. This is used for both encoding and
+/// decoding. Zero indicates resetting the limit back to defaults.
+extern void hardware_memlimit_set(uint64_t memlimit);
+
+/// Set custom memory usage limit as a percentage of installed RAM.
+/// The percentage must be in the range [1, 100].
+extern void hardware_memlimit_set_percentage(uint32_t percentage);
+
+/// Get the current memory usage limit.
+extern uint64_t hardware_memlimit_get(void);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.c
new file mode 100644
index 00000000..40f48645
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.c
@@ -0,0 +1,314 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file main.c
+/// \brief main()
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+#include <ctype.h>
+
+
+/// Exit status to use. This can be changed with set_exit_status().
+static enum exit_status_type exit_status = E_SUCCESS;
+
+/// True if --no-warn is specified. When this is true, we don't set
+/// the exit status to E_WARNING when something worth a warning happens.
+static bool no_warn = false;
+
+
+extern void
+set_exit_status(enum exit_status_type new_status)
+{
+ assert(new_status == E_WARNING || new_status == E_ERROR);
+
+ if (exit_status != E_ERROR)
+ exit_status = new_status;
+
+ return;
+}
+
+
+extern void
+set_exit_no_warn(void)
+{
+ no_warn = true;
+ return;
+}
+
+
+extern void
+my_exit(enum exit_status_type status)
+{
+ // Close stdout. If something goes wrong, print an error message
+ // to stderr.
+ {
+ const int ferror_err = ferror(stdout);
+ const int fclose_err = fclose(stdout);
+ if (ferror_err || fclose_err) {
+ // If it was fclose() that failed, we have the reason
+ // in errno. If only ferror() indicated an error,
+ // we have no idea what the reason was.
+ message(V_ERROR, "%s: %s", _("Writing to standard "
+ "output failed"),
+ fclose_err ? strerror(errno)
+ : _("Unknown error"));
+ status = E_ERROR;
+ }
+ }
+
+ // Close stderr. If something goes wrong, there's nothing where we
+ // could print an error message. Just set the exit status.
+ {
+ const int ferror_err = ferror(stderr);
+ const int fclose_err = fclose(stderr);
+ if (fclose_err || ferror_err)
+ status = E_ERROR;
+ }
+
+ // Suppress the exit status indicating a warning if --no-warn
+ // was specified.
+ if (status == E_WARNING && no_warn)
+ status = E_SUCCESS;
+
+ // If we have got a signal, raise it to kill the program.
+ // Otherwise we just call exit().
+ signals_exit();
+ exit(status);
+}
+
+
+static const char *
+read_name(const args_info *args)
+{
+ // FIXME: Maybe we should have some kind of memory usage limit here
+ // like the tool has for the actual compression and uncompression.
+ // Giving some huge text file with --files0 makes us to read the
+ // whole file in RAM.
+ static char *name = NULL;
+ static size_t size = 256;
+
+ // Allocate the initial buffer. This is never freed, since after it
+ // is no longer needed, the program exits very soon. It is safe to
+ // use xmalloc() and xrealloc() in this function, because while
+ // executing this function, no files are open for writing, and thus
+ // there's no need to cleanup anything before exiting.
+ if (name == NULL)
+ name = xmalloc(size);
+
+ // Write position in name
+ size_t pos = 0;
+
+ // Read one character at a time into name.
+ while (!user_abort) {
+ const int c = fgetc(args->files_file);
+
+ if (ferror(args->files_file)) {
+ // Take care of EINTR since we have established
+ // the signal handlers already.
+ if (errno == EINTR)
+ continue;
+
+ message_error(_("%s: Error reading filenames: %s"),
+ args->files_name, strerror(errno));
+ return NULL;
+ }
+
+ if (feof(args->files_file)) {
+ if (pos != 0)
+ message_error(_("%s: Unexpected end of input "
+ "when reading filenames"),
+ args->files_name);
+
+ return NULL;
+ }
+
+ if (c == args->files_delim) {
+ // We allow consecutive newline (--files) or '\0'
+ // characters (--files0), and ignore such empty
+ // filenames.
+ if (pos == 0)
+ continue;
+
+ // A non-empty name was read. Terminate it with '\0'
+ // and return it.
+ name[pos] = '\0';
+ return name;
+ }
+
+ if (c == '\0') {
+ // A null character was found when using --files,
+ // which expects plain text input separated with
+ // newlines.
+ message_error(_("%s: Null character found when "
+ "reading filenames; maybe you meant "
+ "to use `--files0' instead "
+ "of `--files'?"), args->files_name);
+ return NULL;
+ }
+
+ name[pos++] = c;
+
+ // Allocate more memory if needed. There must always be space
+ // at least for one character to allow terminating the string
+ // with '\0'.
+ if (pos == size) {
+ size *= 2;
+ name = xrealloc(name, size);
+ }
+ }
+
+ return NULL;
+}
+
+
+int
+main(int argc, char **argv)
+{
+ // Initialize the file I/O as the very first step. This makes sure
+ // that stdin, stdout, and stderr are something valid.
+ io_init();
+
+#ifdef DOSLIKE
+ // Adjust argv[0] to make it look nicer in messages, and also to
+ // help the code in args.c.
+ {
+ // Strip the leading path.
+ char *p = argv[0] + strlen(argv[0]);
+ while (argv[0] < p && p[-1] != '/' && p[-1] != '\\')
+ --p;
+
+ argv[0] = p;
+
+ // Strip the .exe suffix.
+ p = strrchr(p, '.');
+ if (p != NULL)
+ *p = '\0';
+
+ // Make it lowercase.
+ for (p = argv[0]; *p != '\0'; ++p)
+ if (*p >= 'A' && *p <= 'Z')
+ *p = *p - 'A' + 'a';
+ }
+#endif
+
+ // Set up the locale.
+ setlocale(LC_ALL, "");
+
+#ifdef ENABLE_NLS
+ // Set up the message translations too.
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+#endif
+
+ // Set the program invocation name used in various messages, and
+ // do other message handling related initializations.
+ message_init(argv[0]);
+
+ // Set hardware-dependent default values. These can be overridden
+ // on the command line, thus this must be done before parse_args().
+ hardware_init();
+
+ // Parse the command line arguments and get an array of filenames.
+ // This doesn't return if something is wrong with the command line
+ // arguments. If there are no arguments, one filename ("-") is still
+ // returned to indicate stdin.
+ args_info args;
+ args_parse(&args, argc, argv);
+
+ // Tell the message handling code how many input files there are if
+ // we know it. This way the progress indicator can show it.
+ if (args.files_name != NULL)
+ message_set_files(0);
+ else
+ message_set_files(args.arg_count);
+
+ // Refuse to write compressed data to standard output if it is
+ // a terminal and --force wasn't used.
+ if (opt_mode == MODE_COMPRESS && !opt_force) {
+ if (opt_stdout || (args.arg_count == 1
+ && strcmp(args.arg_names[0], "-") == 0)) {
+ if (is_tty_stdout()) {
+ message_try_help();
+ my_exit(E_ERROR);
+ }
+ }
+ }
+
+ if (opt_mode == MODE_LIST) {
+ message_fatal("--list is not implemented yet.");
+ }
+
+ // Hook the signal handlers. We don't need these before we start
+ // the actual action, so this is done after parsing the command
+ // line arguments.
+ signals_init();
+
+ // Process the files given on the command line. Note that if no names
+ // were given, parse_args() gave us a fake "-" filename.
+ for (size_t i = 0; i < args.arg_count && !user_abort; ++i) {
+ if (strcmp("-", args.arg_names[i]) == 0) {
+ // Processing from stdin to stdout. Unless --force
+ // was used, check that we aren't writing compressed
+ // data to a terminal or reading it from terminal.
+ if (!opt_force) {
+ if (opt_mode == MODE_COMPRESS) {
+ if (is_tty_stdout())
+ continue;
+ } else if (is_tty_stdin()) {
+ continue;
+ }
+ }
+
+ // It doesn't make sense to compress data from stdin
+ // if we are supposed to read filenames from stdin
+ // too (enabled with --files or --files0).
+ if (args.files_name == stdin_filename) {
+ message_error(_("Cannot read data from "
+ "standard input when "
+ "reading filenames "
+ "from standard input"));
+ continue;
+ }
+
+ // Replace the "-" with a special pointer, which is
+ // recognized by coder_run() and other things.
+ // This way error messages get a proper filename
+ // string and the code still knows that it is
+ // handling the special case of stdin.
+ args.arg_names[i] = (char *)stdin_filename;
+ }
+
+ // Do the actual compression or uncompression.
+ coder_run(args.arg_names[i]);
+ }
+
+ // If --files or --files0 was used, process the filenames from the
+ // given file or stdin. Note that here we don't consider "-" to
+ // indicate stdin like we do with the command line arguments.
+ if (args.files_name != NULL) {
+ // read_name() checks for user_abort so we don't need to
+ // check it as loop termination condition.
+ while (true) {
+ const char *name = read_name(&args);
+ if (name == NULL)
+ break;
+
+ // read_name() doesn't return empty names.
+ assert(name[0] != '\0');
+ coder_run(name);
+ }
+
+ if (args.files_name != stdin_filename)
+ (void)fclose(args.files_file);
+ }
+
+ my_exit(exit_status);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.h
new file mode 100644
index 00000000..5253991b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/main.h
@@ -0,0 +1,39 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file main.h
+/// \brief Miscellanous declarations
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// Possible exit status values. These are the same as used by gzip and bzip2.
+enum exit_status_type {
+ E_SUCCESS = 0,
+ E_ERROR = 1,
+ E_WARNING = 2,
+};
+
+
+/// Sets the exit status after a warning or error has occurred. If new_status
+/// is E_WARNING and the old exit status was already E_ERROR, the exit
+/// status is not changed.
+extern void set_exit_status(enum exit_status_type new_status);
+
+
+/// Use E_SUCCESS instead of E_WARNING if something worth a warning occurs
+/// but nothing worth an error has occurred. This is called when --no-warn
+/// is specified.
+extern void set_exit_no_warn(void);
+
+
+/// Exits the program using the given status. This takes care of closing
+/// stdin, stdout, and stderr and catches possible errors. If we had got
+/// a signal, this function will raise it so that to the parent process it
+/// appears that we were killed by the signal sent by the user.
+extern void my_exit(enum exit_status_type status) lzma_attribute((noreturn));
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.c
new file mode 100644
index 00000000..7d3c7cfa
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.c
@@ -0,0 +1,1174 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file message.c
+/// \brief Printing messages to stderr
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+#ifdef HAVE_SYS_TIME_H
+# include <sys/time.h>
+#endif
+
+#include <stdarg.h>
+
+
+/// Name of the program which is prefixed to the error messages.
+static const char *argv0;
+
+/// Number of the current file
+static unsigned int files_pos = 0;
+
+/// Total number of input files; zero if unknown.
+static unsigned int files_total;
+
+/// Verbosity level
+static enum message_verbosity verbosity = V_WARNING;
+
+/// Filename which we will print with the verbose messages
+static const char *filename;
+
+/// True once the a filename has been printed to stderr as part of progress
+/// message. If automatic progress updating isn't enabled, this becomes true
+/// after the first progress message has been printed due to user sending
+/// SIGINFO, SIGUSR1, or SIGALRM. Once this variable is true, we will print
+/// an empty line before the next filename to make the output more readable.
+static bool first_filename_printed = false;
+
+/// This is set to true when we have printed the current filename to stderr
+/// as part of a progress message. This variable is useful only if not
+/// updating progress automatically: if user sends many SIGINFO, SIGUSR1, or
+/// SIGALRM signals, we won't print the name of the same file multiple times.
+static bool current_filename_printed = false;
+
+/// True if we should print progress indicator and update it automatically
+/// if also verbose >= V_VERBOSE.
+static bool progress_automatic;
+
+/// True if message_progress_start() has been called but
+/// message_progress_end() hasn't been called yet.
+static bool progress_started = false;
+
+/// This is true when a progress message was printed and the cursor is still
+/// on the same line with the progress message. In that case, a newline has
+/// to be printed before any error messages.
+static bool progress_active = false;
+
+/// Pointer to lzma_stream used to do the encoding or decoding.
+static lzma_stream *progress_strm;
+
+/// Expected size of the input stream is needed to show completion percentage
+/// and estimate remaining time.
+static uint64_t expected_in_size;
+
+/// Time when we started processing the file
+static uint64_t start_time;
+
+
+// Use alarm() and SIGALRM when they are supported. This has two minor
+// advantages over the alternative of polling gettimeofday():
+// - It is possible for the user to send SIGINFO, SIGUSR1, or SIGALRM to
+// get intermediate progress information even when --verbose wasn't used
+// or stderr is not a terminal.
+// - alarm() + SIGALRM seems to have slightly less overhead than polling
+// gettimeofday().
+#ifdef SIGALRM
+
+/// The signal handler for SIGALRM sets this to true. It is set back to false
+/// once the progress message has been updated.
+static volatile sig_atomic_t progress_needs_updating = false;
+
+/// Signal handler for SIGALRM
+static void
+progress_signal_handler(int sig lzma_attribute((unused)))
+{
+ progress_needs_updating = true;
+ return;
+}
+
+#else
+
+/// This is true when progress message printing is wanted. Using the same
+/// variable name as above to avoid some ifdefs.
+static bool progress_needs_updating = false;
+
+/// Elapsed time when the next progress message update should be done.
+static uint64_t progress_next_update;
+
+#endif
+
+
+/// Get the current time as microseconds since epoch
+static uint64_t
+my_time(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return (uint64_t)(tv.tv_sec) * UINT64_C(1000000) + tv.tv_usec;
+}
+
+
+/// Wrapper for snprintf() to help constructing a string in pieces.
+static void lzma_attribute((format(printf, 3, 4)))
+my_snprintf(char **pos, size_t *left, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ const int len = vsnprintf(*pos, *left, fmt, ap);
+ va_end(ap);
+
+ // If an error occurred, we want the caller to think that the whole
+ // buffer was used. This way no more data will be written to the
+ // buffer. We don't need better error handling here.
+ if (len < 0 || (size_t)(len) >= *left) {
+ *left = 0;
+ } else {
+ *pos += len;
+ *left -= len;
+ }
+
+ return;
+}
+
+
+extern void
+message_init(const char *given_argv0)
+{
+ // Name of the program
+ argv0 = given_argv0;
+
+ // If --verbose is used, we use a progress indicator if and only
+ // if stderr is a terminal. If stderr is not a terminal, we print
+ // verbose information only after finishing the file. As a special
+ // exception, even if --verbose was not used, user can send SIGALRM
+ // to make us print progress information once without automatic
+ // updating.
+ progress_automatic = isatty(STDERR_FILENO);
+
+ // Commented out because COLUMNS is rarely exported to environment.
+ // Most users have at least 80 columns anyway, let's think something
+ // fancy here if enough people complain.
+/*
+ if (progress_automatic) {
+ // stderr is a terminal. Check the COLUMNS environment
+ // variable to see if the terminal is wide enough. If COLUMNS
+ // doesn't exist or it has some unparseable value, we assume
+ // that the terminal is wide enough.
+ const char *columns_str = getenv("COLUMNS");
+ if (columns_str != NULL) {
+ char *endptr;
+ const long columns = strtol(columns_str, &endptr, 10);
+ if (*endptr != '\0' || columns < 80)
+ progress_automatic = false;
+ }
+ }
+*/
+
+#ifdef SIGALRM
+ // At least DJGPP lacks SA_RESTART. It's not essential for us (the
+ // rest of the code can handle interrupted system calls), so just
+ // define it zero.
+# ifndef SA_RESTART
+# define SA_RESTART 0
+# endif
+ // Establish the signal handlers which set a flag to tell us that
+ // progress info should be updated. Since these signals don't
+ // require any quick action, we set SA_RESTART.
+ static const int sigs[] = {
+#ifdef SIGALRM
+ SIGALRM,
+#endif
+#ifdef SIGINFO
+ SIGINFO,
+#endif
+#ifdef SIGUSR1
+ SIGUSR1,
+#endif
+ };
+
+ struct sigaction sa;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ sa.sa_handler = &progress_signal_handler;
+
+ for (size_t i = 0; i < ARRAY_SIZE(sigs); ++i)
+ if (sigaction(sigs[i], &sa, NULL))
+ message_signal_handler();
+#endif
+
+ return;
+}
+
+
+extern void
+message_verbosity_increase(void)
+{
+ if (verbosity < V_DEBUG)
+ ++verbosity;
+
+ return;
+}
+
+
+extern void
+message_verbosity_decrease(void)
+{
+ if (verbosity > V_SILENT)
+ --verbosity;
+
+ return;
+}
+
+
+extern void
+message_set_files(unsigned int files)
+{
+ files_total = files;
+ return;
+}
+
+
+/// Prints the name of the current file if it hasn't been printed already,
+/// except if we are processing exactly one stream from stdin to stdout.
+/// I think it looks nicer to not print "(stdin)" when --verbose is used
+/// in a pipe and no other files are processed.
+static void
+print_filename(void)
+{
+ if (!current_filename_printed
+ && (files_total != 1 || filename != stdin_filename)) {
+ signals_block();
+
+ // If a file was already processed, put an empty line
+ // before the next filename to improve readability.
+ if (first_filename_printed)
+ fputc('\n', stderr);
+
+ first_filename_printed = true;
+ current_filename_printed = true;
+
+ // If we don't know how many files there will be due
+ // to usage of --files or --files0.
+ if (files_total == 0)
+ fprintf(stderr, "%s (%u)\n", filename,
+ files_pos);
+ else
+ fprintf(stderr, "%s (%u/%u)\n", filename,
+ files_pos, files_total);
+
+ signals_unblock();
+ }
+
+ return;
+}
+
+
+extern void
+message_progress_start(
+ lzma_stream *strm, const char *src_name, uint64_t in_size)
+{
+ // Store the pointer to the lzma_stream used to do the coding.
+ // It is needed to find out the position in the stream.
+ progress_strm = strm;
+
+ // Store the processing start time of the file and its expected size.
+ // If we aren't printing any statistics, then these are unused. But
+ // since it is possible that the user sends us a signal to show
+ // statistics, we need to have these available anyway.
+ start_time = my_time();
+ filename = src_name;
+ expected_in_size = in_size;
+
+ // Indicate that progress info may need to be printed before
+ // printing error messages.
+ progress_started = true;
+
+ // Indicate the name of this file hasn't been printed to
+ // stderr yet.
+ current_filename_printed = false;
+
+ // Start numbering the files starting from one.
+ ++files_pos;
+
+ // If progress indicator is wanted, print the filename and possibly
+ // the file count now.
+ if (verbosity >= V_VERBOSE && progress_automatic) {
+ // Print the filename to stderr if that is appropriate with
+ // the current settings.
+ print_filename();
+
+ // Start the timer to display the first progress message
+ // after one second. An alternative would be to show the
+ // first message almost immediatelly, but delaying by one
+ // second looks better to me, since extremely early
+ // progress info is pretty much useless.
+#ifdef SIGALRM
+ // First disable a possibly existing alarm.
+ alarm(0);
+ progress_needs_updating = false;
+ alarm(1);
+#else
+ progress_needs_updating = true;
+ progress_next_update = 1000000;
+#endif
+ }
+
+ return;
+}
+
+
+/// Make the string indicating completion percentage.
+static const char *
+progress_percentage(uint64_t in_pos, bool final)
+{
+ static char buf[sizeof("100.0 %")];
+
+ double percentage;
+
+ if (final) {
+ // Use floating point conversion of snprintf() also for
+ // 100.0 % instead of fixed string, because the decimal
+ // separator isn't a dot in all locales.
+ percentage = 100.0;
+ } else {
+ // If the size of the input file is unknown or the size told us is
+ // clearly wrong since we have processed more data than the alleged
+ // size of the file, show a static string indicating that we have
+ // no idea of the completion percentage.
+ if (expected_in_size == 0 || in_pos > expected_in_size)
+ return "--- %";
+
+ // Never show 100.0 % before we actually are finished.
+ percentage = (double)(in_pos) / (double)(expected_in_size)
+ * 99.9;
+ }
+
+ snprintf(buf, sizeof(buf), "%.1f %%", percentage);
+
+ return buf;
+}
+
+
+static void
+progress_sizes_helper(char **pos, size_t *left, uint64_t value, bool final)
+{
+ // Allow high precision only for the final message, since it looks
+ // stupid for in-progress information.
+ if (final) {
+ // At maximum of four digits is allowed for exact byte count.
+ if (value < 10000) {
+ my_snprintf(pos, left, "%s B",
+ uint64_to_str(value, 0));
+ return;
+ }
+
+ // At maximum of five significant digits is allowed for KiB.
+ if (value < UINT64_C(10239900)) {
+ my_snprintf(pos, left, "%s KiB", double_to_str(
+ (double)(value) / 1024.0));
+ return;
+ }
+ }
+
+ // Otherwise we use MiB.
+ my_snprintf(pos, left, "%s MiB",
+ double_to_str((double)(value) / (1024.0 * 1024.0)));
+
+ return;
+}
+
+
+/// Make the string containing the amount of input processed, amount of
+/// output produced, and the compression ratio.
+static const char *
+progress_sizes(uint64_t compressed_pos, uint64_t uncompressed_pos, bool final)
+{
+ // This is enough to hold sizes up to about 99 TiB if thousand
+ // separator is used, or about 1 PiB without thousand separator.
+ // After that the progress indicator will look a bit silly, since
+ // the compression ratio no longer fits with three decimal places.
+ static char buf[44];
+
+ char *pos = buf;
+ size_t left = sizeof(buf);
+
+ // Print the sizes. If this the final message, use more reasonable
+ // units than MiB if the file was small.
+ progress_sizes_helper(&pos, &left, compressed_pos, final);
+ my_snprintf(&pos, &left, " / ");
+ progress_sizes_helper(&pos, &left, uncompressed_pos, final);
+
+ // Avoid division by zero. If we cannot calculate the ratio, set
+ // it to some nice number greater than 10.0 so that it gets caught
+ // in the next if-clause.
+ const double ratio = uncompressed_pos > 0
+ ? (double)(compressed_pos) / (double)(uncompressed_pos)
+ : 16.0;
+
+ // If the ratio is very bad, just indicate that it is greater than
+ // 9.999. This way the length of the ratio field stays fixed.
+ if (ratio > 9.999)
+ snprintf(pos, left, " > %.3f", 9.999);
+ else
+ snprintf(pos, left, " = %.3f", ratio);
+
+ return buf;
+}
+
+
+/// Make the string containing the processing speed of uncompressed data.
+static const char *
+progress_speed(uint64_t uncompressed_pos, uint64_t elapsed)
+{
+ // Don't print the speed immediatelly, since the early values look
+ // like somewhat random.
+ if (elapsed < 3000000)
+ return "";
+
+ static const char unit[][8] = {
+ "KiB/s",
+ "MiB/s",
+ "GiB/s",
+ };
+
+ size_t unit_index = 0;
+
+ // Calculate the speed as KiB/s.
+ double speed = (double)(uncompressed_pos)
+ / ((double)(elapsed) * (1024.0 / 1e6));
+
+ // Adjust the unit of the speed if needed.
+ while (speed > 999.0) {
+ speed /= 1024.0;
+ if (++unit_index == ARRAY_SIZE(unit))
+ return ""; // Way too fast ;-)
+ }
+
+ // Use decimal point only if the number is small. Examples:
+ // - 0.1 KiB/s
+ // - 9.9 KiB/s
+ // - 99 KiB/s
+ // - 999 KiB/s
+ static char buf[sizeof("999 GiB/s")];
+ snprintf(buf, sizeof(buf), "%.*f %s",
+ speed > 9.9 ? 0 : 1, speed, unit[unit_index]);
+ return buf;
+}
+
+
+/// Make a string indicating elapsed or remaining time. The format is either
+/// M:SS or H:MM:SS depending on if the time is an hour or more.
+static const char *
+progress_time(uint64_t useconds)
+{
+ // 9999 hours = 416 days
+ static char buf[sizeof("9999:59:59")];
+
+ uint32_t seconds = useconds / 1000000;
+
+ // Don't show anything if the time is zero or ridiculously big.
+ if (seconds == 0 || seconds > ((9999 * 60) + 59) * 60 + 59)
+ return "";
+
+ uint32_t minutes = seconds / 60;
+ seconds %= 60;
+
+ if (minutes >= 60) {
+ const uint32_t hours = minutes / 60;
+ minutes %= 60;
+ snprintf(buf, sizeof(buf),
+ "%" PRIu32 ":%02" PRIu32 ":%02" PRIu32,
+ hours, minutes, seconds);
+ } else {
+ snprintf(buf, sizeof(buf), "%" PRIu32 ":%02" PRIu32,
+ minutes, seconds);
+ }
+
+ return buf;
+}
+
+
+/// Make the string to contain the estimated remaining time, or if the amount
+/// of input isn't known, how much time has elapsed.
+static const char *
+progress_remaining(uint64_t in_pos, uint64_t elapsed)
+{
+ // Show the amount of time spent so far when making an estimate of
+ // remaining time wouldn't be reasonable:
+ // - Input size is unknown.
+ // - Input has grown bigger since we started (de)compressing.
+ // - We haven't processed much data yet, so estimate would be
+ // too inaccurate.
+ // - Only a few seconds has passed since we started (de)compressing,
+ // so estimate would be too inaccurate.
+ if (expected_in_size == 0 || in_pos > expected_in_size
+ || in_pos < (UINT64_C(1) << 19) || elapsed < 8000000)
+ return progress_time(elapsed);
+
+ // Calculate the estimate. Don't give an estimate of zero seconds,
+ // since it is possible that all the input has been already passed
+ // to the library, but there is still quite a bit of output pending.
+ uint32_t remaining = (double)(expected_in_size - in_pos)
+ * ((double)(elapsed) / 1e6) / (double)(in_pos);
+ if (remaining < 1)
+ remaining = 1;
+
+ static char buf[sizeof("9 h 55 min")];
+
+ // Select appropriate precision for the estimated remaining time.
+ if (remaining <= 10) {
+ // At maximum of 10 seconds remaining.
+ // Show the number of seconds as is.
+ snprintf(buf, sizeof(buf), "%" PRIu32 " s", remaining);
+
+ } else if (remaining <= 50) {
+ // At maximum of 50 seconds remaining.
+ // Round up to the next multiple of five seconds.
+ remaining = (remaining + 4) / 5 * 5;
+ snprintf(buf, sizeof(buf), "%" PRIu32 " s", remaining);
+
+ } else if (remaining <= 590) {
+ // At maximum of 9 minutes and 50 seconds remaining.
+ // Round up to the next multiple of ten seconds.
+ remaining = (remaining + 9) / 10 * 10;
+ snprintf(buf, sizeof(buf), "%" PRIu32 " min %" PRIu32 " s",
+ remaining / 60, remaining % 60);
+
+ } else if (remaining <= 59 * 60) {
+ // At maximum of 59 minutes remaining.
+ // Round up to the next multiple of a minute.
+ remaining = (remaining + 59) / 60;
+ snprintf(buf, sizeof(buf), "%" PRIu32 " min", remaining);
+
+ } else if (remaining <= 9 * 3600 + 50 * 60) {
+ // At maximum of 9 hours and 50 minutes left.
+ // Round up to the next multiple of ten minutes.
+ remaining = (remaining + 599) / 600 * 10;
+ snprintf(buf, sizeof(buf), "%" PRIu32 " h %" PRIu32 " min",
+ remaining / 60, remaining % 60);
+
+ } else if (remaining <= 23 * 3600) {
+ // At maximum of 23 hours remaining.
+ // Round up to the next multiple of an hour.
+ remaining = (remaining + 3599) / 3600;
+ snprintf(buf, sizeof(buf), "%" PRIu32 " h", remaining);
+
+ } else if (remaining <= 9 * 24 * 3600 + 23 * 3600) {
+ // At maximum of 9 days and 23 hours remaining.
+ // Round up to the next multiple of an hour.
+ remaining = (remaining + 3599) / 3600;
+ snprintf(buf, sizeof(buf), "%" PRIu32 " d %" PRIu32 " h",
+ remaining / 24, remaining % 24);
+
+ } else if (remaining <= 999 * 24 * 3600) {
+ // At maximum of 999 days remaining. ;-)
+ // Round up to the next multiple of a day.
+ remaining = (remaining + 24 * 3600 - 1) / (24 * 3600);
+ snprintf(buf, sizeof(buf), "%" PRIu32 " d", remaining);
+
+ } else {
+ // The estimated remaining time is so big that it's better
+ // that we just show the elapsed time.
+ return progress_time(elapsed);
+ }
+
+ return buf;
+}
+
+
+/// Calculate the elapsed time as microseconds.
+static uint64_t
+progress_elapsed(void)
+{
+ return my_time() - start_time;
+}
+
+
+/// Get information about position in the stream. This is currently simple,
+/// but it will become more complicated once we have multithreading support.
+static void
+progress_pos(uint64_t *in_pos,
+ uint64_t *compressed_pos, uint64_t *uncompressed_pos)
+{
+ *in_pos = progress_strm->total_in;
+
+ if (opt_mode == MODE_COMPRESS) {
+ *compressed_pos = progress_strm->total_out;
+ *uncompressed_pos = progress_strm->total_in;
+ } else {
+ *compressed_pos = progress_strm->total_in;
+ *uncompressed_pos = progress_strm->total_out;
+ }
+
+ return;
+}
+
+
+extern void
+message_progress_update(void)
+{
+ if (!progress_needs_updating)
+ return;
+
+ // Calculate how long we have been processing this file.
+ const uint64_t elapsed = progress_elapsed();
+
+#ifndef SIGALRM
+ if (progress_next_update > elapsed)
+ return;
+
+ progress_next_update = elapsed + 1000000;
+#endif
+
+ // Get our current position in the stream.
+ uint64_t in_pos;
+ uint64_t compressed_pos;
+ uint64_t uncompressed_pos;
+ progress_pos(&in_pos, &compressed_pos, &uncompressed_pos);
+
+ // Block signals so that fprintf() doesn't get interrupted.
+ signals_block();
+
+ // Print the filename if it hasn't been printed yet.
+ print_filename();
+
+ // Print the actual progress message. The idea is that there is at
+ // least three spaces between the fields in typical situations, but
+ // even in rare situations there is at least one space.
+ fprintf(stderr, " %7s %43s %9s %10s\r",
+ progress_percentage(in_pos, false),
+ progress_sizes(compressed_pos, uncompressed_pos, false),
+ progress_speed(uncompressed_pos, elapsed),
+ progress_remaining(in_pos, elapsed));
+
+#ifdef SIGALRM
+ // Updating the progress info was finished. Reset
+ // progress_needs_updating to wait for the next SIGALRM.
+ //
+ // NOTE: This has to be done before alarm(1) or with (very) bad
+ // luck we could be setting this to false after the alarm has already
+ // been triggered.
+ progress_needs_updating = false;
+
+ if (verbosity >= V_VERBOSE && progress_automatic) {
+ // Mark that the progress indicator is active, so if an error
+ // occurs, the error message gets printed cleanly.
+ progress_active = true;
+
+ // Restart the timer so that progress_needs_updating gets
+ // set to true after about one second.
+ alarm(1);
+ } else {
+ // The progress message was printed because user had sent us
+ // SIGALRM. In this case, each progress message is printed
+ // on its own line.
+ fputc('\n', stderr);
+ }
+#else
+ // When SIGALRM isn't supported and we get here, it's always due to
+ // automatic progress update. We set progress_active here too like
+ // described above.
+ assert(verbosity >= V_VERBOSE);
+ assert(progress_automatic);
+ progress_active = true;
+#endif
+
+ signals_unblock();
+
+ return;
+}
+
+
+static void
+progress_flush(bool finished)
+{
+ if (!progress_started || verbosity < V_VERBOSE)
+ return;
+
+ uint64_t in_pos;
+ uint64_t compressed_pos;
+ uint64_t uncompressed_pos;
+ progress_pos(&in_pos, &compressed_pos, &uncompressed_pos);
+
+ // Avoid printing intermediate progress info if some error occurs
+ // in the beginning of the stream. (If something goes wrong later in
+ // the stream, it is sometimes useful to tell the user where the
+ // error approximately occurred, especially if the error occurs
+ // after a time-consuming operation.)
+ if (!finished && !progress_active
+ && (compressed_pos == 0 || uncompressed_pos == 0))
+ return;
+
+ progress_active = false;
+
+ const uint64_t elapsed = progress_elapsed();
+ const char *elapsed_str = progress_time(elapsed);
+
+ signals_block();
+
+ // When using the auto-updating progress indicator, the final
+ // statistics are printed in the same format as the progress
+ // indicator itself.
+ if (progress_automatic) {
+ // Using floating point conversion for the percentage instead
+ // of static "100.0 %" string, because the decimal separator
+ // isn't a dot in all locales.
+ fprintf(stderr, " %7s %43s %9s %10s\n",
+ progress_percentage(in_pos, finished),
+ progress_sizes(compressed_pos, uncompressed_pos, true),
+ progress_speed(uncompressed_pos, elapsed),
+ elapsed_str);
+ } else {
+ // The filename is always printed.
+ fprintf(stderr, "%s: ", filename);
+
+ // Percentage is printed only if we didn't finish yet.
+ // FIXME: This may look weird when size of the input
+ // isn't known.
+ if (!finished)
+ fprintf(stderr, "%s, ",
+ progress_percentage(in_pos, false));
+
+ // Size information is always printed.
+ fprintf(stderr, "%s", progress_sizes(
+ compressed_pos, uncompressed_pos, true));
+
+ // The speed and elapsed time aren't always shown.
+ const char *speed = progress_speed(uncompressed_pos, elapsed);
+ if (speed[0] != '\0')
+ fprintf(stderr, ", %s", speed);
+
+ if (elapsed_str[0] != '\0')
+ fprintf(stderr, ", %s", elapsed_str);
+
+ fputc('\n', stderr);
+ }
+
+ signals_unblock();
+
+ return;
+}
+
+
+extern void
+message_progress_end(bool success)
+{
+ assert(progress_started);
+ progress_flush(success);
+ progress_started = false;
+ return;
+}
+
+
+static void
+vmessage(enum message_verbosity v, const char *fmt, va_list ap)
+{
+ if (v <= verbosity) {
+ signals_block();
+
+ progress_flush(false);
+
+ fprintf(stderr, "%s: ", argv0);
+ vfprintf(stderr, fmt, ap);
+ fputc('\n', stderr);
+
+ signals_unblock();
+ }
+
+ return;
+}
+
+
+extern void
+message(enum message_verbosity v, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vmessage(v, fmt, ap);
+ va_end(ap);
+ return;
+}
+
+
+extern void
+message_warning(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vmessage(V_WARNING, fmt, ap);
+ va_end(ap);
+
+ set_exit_status(E_WARNING);
+ return;
+}
+
+
+extern void
+message_error(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vmessage(V_ERROR, fmt, ap);
+ va_end(ap);
+
+ set_exit_status(E_ERROR);
+ return;
+}
+
+
+extern void
+message_fatal(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vmessage(V_ERROR, fmt, ap);
+ va_end(ap);
+
+ my_exit(E_ERROR);
+}
+
+
+extern void
+message_bug(void)
+{
+ message_fatal(_("Internal error (bug)"));
+}
+
+
+extern void
+message_signal_handler(void)
+{
+ message_fatal(_("Cannot establish signal handlers"));
+}
+
+
+extern const char *
+message_strm(lzma_ret code)
+{
+ switch (code) {
+ case LZMA_NO_CHECK:
+ return _("No integrity check; not verifying file integrity");
+
+ case LZMA_UNSUPPORTED_CHECK:
+ return _("Unsupported type of integrity check; "
+ "not verifying file integrity");
+
+ case LZMA_MEM_ERROR:
+ return strerror(ENOMEM);
+
+ case LZMA_MEMLIMIT_ERROR:
+ return _("Memory usage limit reached");
+
+ case LZMA_FORMAT_ERROR:
+ return _("File format not recognized");
+
+ case LZMA_OPTIONS_ERROR:
+ return _("Unsupported options");
+
+ case LZMA_DATA_ERROR:
+ return _("Compressed data is corrupt");
+
+ case LZMA_BUF_ERROR:
+ return _("Unexpected end of input");
+
+ case LZMA_OK:
+ case LZMA_STREAM_END:
+ case LZMA_GET_CHECK:
+ case LZMA_PROG_ERROR:
+ return _("Internal error (bug)");
+ }
+
+ return NULL;
+}
+
+
+extern void
+message_filters(enum message_verbosity v, const lzma_filter *filters)
+{
+ if (v > verbosity)
+ return;
+
+ fprintf(stderr, _("%s: Filter chain:"), argv0);
+
+ for (size_t i = 0; filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
+ fprintf(stderr, " --");
+
+ switch (filters[i].id) {
+ case LZMA_FILTER_LZMA1:
+ case LZMA_FILTER_LZMA2: {
+ const lzma_options_lzma *opt = filters[i].options;
+ const char *mode;
+ const char *mf;
+
+ switch (opt->mode) {
+ case LZMA_MODE_FAST:
+ mode = "fast";
+ break;
+
+ case LZMA_MODE_NORMAL:
+ mode = "normal";
+ break;
+
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+
+ switch (opt->mf) {
+ case LZMA_MF_HC3:
+ mf = "hc3";
+ break;
+
+ case LZMA_MF_HC4:
+ mf = "hc4";
+ break;
+
+ case LZMA_MF_BT2:
+ mf = "bt2";
+ break;
+
+ case LZMA_MF_BT3:
+ mf = "bt3";
+ break;
+
+ case LZMA_MF_BT4:
+ mf = "bt4";
+ break;
+
+ default:
+ mf = "UNKNOWN";
+ break;
+ }
+
+ fprintf(stderr, "lzma%c=dict=%" PRIu32
+ ",lc=%" PRIu32 ",lp=%" PRIu32
+ ",pb=%" PRIu32
+ ",mode=%s,nice=%" PRIu32 ",mf=%s"
+ ",depth=%" PRIu32,
+ filters[i].id == LZMA_FILTER_LZMA2
+ ? '2' : '1',
+ opt->dict_size,
+ opt->lc, opt->lp, opt->pb,
+ mode, opt->nice_len, mf, opt->depth);
+ break;
+ }
+
+ case LZMA_FILTER_X86:
+ fprintf(stderr, "x86");
+ break;
+
+ case LZMA_FILTER_POWERPC:
+ fprintf(stderr, "powerpc");
+ break;
+
+ case LZMA_FILTER_IA64:
+ fprintf(stderr, "ia64");
+ break;
+
+ case LZMA_FILTER_ARM:
+ fprintf(stderr, "arm");
+ break;
+
+ case LZMA_FILTER_ARMTHUMB:
+ fprintf(stderr, "armthumb");
+ break;
+
+ case LZMA_FILTER_SPARC:
+ fprintf(stderr, "sparc");
+ break;
+
+ case LZMA_FILTER_DELTA: {
+ const lzma_options_delta *opt = filters[i].options;
+ fprintf(stderr, "delta=dist=%" PRIu32, opt->dist);
+ break;
+ }
+
+ default:
+ fprintf(stderr, "UNKNOWN");
+ break;
+ }
+ }
+
+ fputc('\n', stderr);
+ return;
+}
+
+
+extern void
+message_try_help(void)
+{
+ // Print this with V_WARNING instead of V_ERROR to prevent it from
+ // showing up when --quiet has been specified.
+ message(V_WARNING, _("Try `%s --help' for more information."), argv0);
+ return;
+}
+
+
+extern void
+message_version(void)
+{
+ // It is possible that liblzma version is different than the command
+ // line tool version, so print both.
+ printf("xz (" PACKAGE_NAME ") " LZMA_VERSION_STRING "\n");
+ printf("liblzma %s\n", lzma_version_string());
+ my_exit(E_SUCCESS);
+}
+
+
+extern void
+message_help(bool long_help)
+{
+ printf(_("Usage: %s [OPTION]... [FILE]...\n"
+ "Compress or decompress FILEs in the .xz format.\n\n"),
+ argv0);
+
+ puts(_("Mandatory arguments to long options are mandatory for "
+ "short options too.\n"));
+
+ if (long_help)
+ puts(_(" Operation mode:\n"));
+
+ puts(_(
+" -z, --compress force compression\n"
+" -d, --decompress force decompression\n"
+" -t, --test test compressed file integrity\n"
+" -l, --list list information about files"));
+
+ if (long_help)
+ puts(_("\n Operation modifiers:\n"));
+
+ puts(_(
+" -k, --keep keep (don't delete) input files\n"
+" -f, --force force overwrite of output file and (de)compress links\n"
+" -c, --stdout write to standard output and don't delete input files"));
+
+ if (long_help)
+ puts(_(
+" -S, --suffix=.SUF use the suffix `.SUF' on compressed files\n"
+" --files=[FILE] read filenames to process from FILE; if FILE is\n"
+" omitted, filenames are read from the standard input;\n"
+" filenames must be terminated with the newline character\n"
+" --files0=[FILE] like --files but use the null character as terminator"));
+
+ if (long_help) {
+ puts(_("\n Basic file format and compression options:\n"));
+ puts(_(
+" -F, --format=FMT file format to encode or decode; possible values are\n"
+" `auto' (default), `xz', `lzma', and `raw'\n"
+" -C, --check=CHECK integrity check type: `crc32', `crc64' (default),\n"
+" or `sha256'"));
+ }
+
+ puts(_(
+" -0 .. -9 compression preset; 0-2 fast compression, 3-5 good\n"
+" compression, 6-9 excellent compression; default is 6"));
+
+ puts(_(
+" -e, --extreme use more CPU time when encoding to increase compression\n"
+" ratio without increasing memory usage of the decoder"));
+
+ if (long_help)
+ puts(_(
+" -M, --memory=NUM use roughly NUM bytes of memory at maximum; 0 indicates\n"
+" the default setting, which depends on the operation mode\n"
+" and the amount of physical memory (RAM)"));
+
+ if (long_help) {
+ puts(_(
+"\n Custom filter chain for compression (alternative for using presets):"));
+
+#if defined(HAVE_ENCODER_LZMA1) || defined(HAVE_DECODER_LZMA1) \
+ || defined(HAVE_ENCODER_LZMA2) || defined(HAVE_DECODER_LZMA2)
+ puts(_(
+"\n"
+" --lzma1[=OPTS] LZMA1 or LZMA2; OPTS is a comma-separated list of zero or\n"
+" --lzma2[=OPTS] more of the following options (valid values; default):\n"
+" preset=NUM reset options to preset number NUM (0-9)\n"
+" dict=NUM dictionary size (4KiB - 1536MiB; 8MiB)\n"
+" lc=NUM number of literal context bits (0-4; 3)\n"
+" lp=NUM number of literal position bits (0-4; 0)\n"
+" pb=NUM number of position bits (0-4; 2)\n"
+" mode=MODE compression mode (fast, normal; normal)\n"
+" nice=NUM nice length of a match (2-273; 64)\n"
+" mf=NAME match finder (hc3, hc4, bt2, bt3, bt4; bt4)\n"
+" depth=NUM maximum search depth; 0=automatic (default)"));
+#endif
+
+ puts(_(
+"\n"
+" --x86[=OPTS] x86 BCJ filter\n"
+" --powerpc[=OPTS] PowerPC BCJ filter (big endian only)\n"
+" --ia64[=OPTS] IA64 (Itanium) BCJ filter\n"
+" --arm[=OPTS] ARM BCJ filter (little endian only)\n"
+" --armthumb[=OPTS] ARM-Thumb BCJ filter (little endian only)\n"
+" --sparc[=OPTS] SPARC BCJ filter\n"
+" Valid OPTS for all BCJ filters:\n"
+" start=NUM start offset for conversions (default=0)"));
+
+#if defined(HAVE_ENCODER_DELTA) || defined(HAVE_DECODER_DELTA)
+ puts(_(
+"\n"
+" --delta[=OPTS] Delta filter; valid OPTS (valid values; default):\n"
+" dist=NUM distance between bytes being subtracted\n"
+" from each other (1-256; 1)"));
+#endif
+
+#if defined(HAVE_ENCODER_SUBBLOCK) || defined(HAVE_DECODER_SUBBLOCK)
+ puts(_(
+"\n"
+" --subblock[=OPTS] Subblock filter; valid OPTS (valid values; default):\n"
+" size=NUM number of bytes of data per subblock\n"
+" (1 - 256Mi; 4Ki)\n"
+" rle=NUM run-length encoder chunk size (0-256; 0)"));
+#endif
+ }
+
+ if (long_help)
+ puts(_("\n Other options:\n"));
+
+ puts(_(
+" -q, --quiet suppress warnings; specify twice to suppress errors too\n"
+" -v, --verbose be verbose; specify twice for even more verbose"));
+
+ if (long_help)
+ puts(_(
+" -Q, --no-warn make warnings not affect the exit status"));
+
+ if (long_help)
+ puts(_(
+"\n"
+" -h, --help display the short help (lists only the basic options)\n"
+" -H, --long-help display this long help"));
+ else
+ puts(_(
+" -h, --help display this short help\n"
+" -H, --long-help display the long help (lists also the advanced options)"));
+
+ puts(_(
+" -V, --version display the version number"));
+
+ puts(_("\nWith no FILE, or when FILE is -, read standard input.\n"));
+
+ if (long_help) {
+ printf(_(
+"On this system and configuration, this program will use at maximum of roughly\n"
+"%s MiB RAM and "), uint64_to_str(hardware_memlimit_get() / (1024 * 1024), 0));
+ printf(N_("one thread.\n\n", "%s threads.\n\n",
+ hardware_threadlimit_get()),
+ uint64_to_str(hardware_threadlimit_get(), 0));
+ }
+
+ printf(_("Report bugs to <%s> (in English or Finnish).\n"),
+ PACKAGE_BUGREPORT);
+ printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_HOMEPAGE);
+
+ my_exit(E_SUCCESS);
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.h
new file mode 100644
index 00000000..11a44f2d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/message.h
@@ -0,0 +1,134 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file message.h
+/// \brief Printing messages to stderr
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// Verbosity levels
+enum message_verbosity {
+ V_SILENT, ///< No messages
+ V_ERROR, ///< Only error messages
+ V_WARNING, ///< Errors and warnings
+ V_VERBOSE, ///< Errors, warnings, and verbose statistics
+ V_DEBUG, ///< Debugging, FIXME remove?
+};
+
+
+/// \brief Initializes the message functions
+///
+/// \param argv0 Name of the program i.e. argv[0] from main()
+/// \param verbosity Verbosity level
+///
+/// If an error occurs, this function doesn't return.
+///
+extern void message_init(const char *argv0);
+
+
+/// Increase verbosity level by one step unless it was at maximum.
+extern void message_verbosity_increase(void);
+
+/// Decrease verbosity level by one step unless it was at minimum.
+extern void message_verbosity_decrease(void);
+
+
+/// Set the total number of files to be processed (stdin is counted as a file
+/// here). The default is one.
+extern void message_set_files(unsigned int files);
+
+
+/// \brief Print a message if verbosity level is at least "verbosity"
+///
+/// This doesn't touch the exit status.
+extern void message(enum message_verbosity verbosity, const char *fmt, ...)
+ lzma_attribute((format(printf, 2, 3)));
+
+
+/// \brief Prints a warning and possibly sets exit status
+///
+/// The message is printed only if verbosity level is at least V_WARNING.
+/// The exit status is set to WARNING unless it was already at ERROR.
+extern void message_warning(const char *fmt, ...)
+ lzma_attribute((format(printf, 1, 2)));
+
+
+/// \brief Prints an error message and sets exit status
+///
+/// The message is printed only if verbosity level is at least V_ERROR.
+/// The exit status is set to ERROR.
+extern void message_error(const char *fmt, ...)
+ lzma_attribute((format(printf, 1, 2)));
+
+
+/// \brief Prints an error message and exits with EXIT_ERROR
+///
+/// The message is printed only if verbosity level is at least V_ERROR.
+extern void message_fatal(const char *fmt, ...)
+ lzma_attribute((format(printf, 1, 2)))
+ lzma_attribute((noreturn));
+
+
+/// Print an error message that an internal error occurred and exit with
+/// EXIT_ERROR.
+extern void message_bug(void) lzma_attribute((noreturn));
+
+
+/// Print a message that establishing signal handlers failed, and exit with
+/// exit status ERROR.
+extern void message_signal_handler(void) lzma_attribute((noreturn));
+
+
+/// Convert lzma_ret to a string.
+extern const char *message_strm(lzma_ret code);
+
+
+/// Print the filter chain.
+extern void message_filters(
+ enum message_verbosity v, const lzma_filter *filters);
+
+
+/// Print a message that user should try --help.
+extern void message_try_help(void);
+
+
+/// Prints the version number to stdout and exits with exit status SUCCESS.
+extern void message_version(void) lzma_attribute((noreturn));
+
+
+/// Print the help message.
+extern void message_help(bool long_help) lzma_attribute((noreturn));
+
+
+/// \brief Start progress info handling
+///
+/// This must be paired with a call to message_progress_end() before the
+/// given *strm becomes invalid.
+///
+/// \param strm Pointer to lzma_stream used for the coding.
+/// \param filename Name of the input file. stdin_filename is
+/// handled specially.
+/// \param in_size Size of the input file, or zero if unknown.
+///
+extern void message_progress_start(
+ lzma_stream *strm, const char *filename, uint64_t in_size);
+
+
+/// Update the progress info if in verbose mode and enough time has passed
+/// since the previous update. This can be called only when
+/// message_progress_start() has already been used.
+extern void message_progress_update(void);
+
+
+/// \brief Finishes the progress message if we were in verbose mode
+///
+/// \param finished True if the whole stream was successfully coded
+/// and output written to the output stream.
+///
+extern void message_progress_end(bool finished);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.c
new file mode 100644
index 00000000..c60f0c12
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.c
@@ -0,0 +1,440 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file options.c
+/// \brief Parser for filter-specific options
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+
+///////////////////
+// Generic stuff //
+///////////////////
+
+typedef struct {
+ const char *name;
+ uint64_t id;
+} name_id_map;
+
+
+typedef struct {
+ const char *name;
+ const name_id_map *map;
+ uint64_t min;
+ uint64_t max;
+} option_map;
+
+
+/// Parses option=value pairs that are separated with colons, semicolons,
+/// or commas: opt=val:opt=val;opt=val,opt=val
+///
+/// Each option is a string, that is converted to an integer using the
+/// index where the option string is in the array.
+///
+/// Value can be
+/// - a string-id map mapping a list of possible string values to integers
+/// (opts[i].map != NULL, opts[i].min and opts[i].max are ignored);
+/// - a number with minimum and maximum value limit
+/// (opts[i].map == NULL && opts[i].min != UINT64_MAX);
+/// - a string that will be parsed by the filter-specific code
+/// (opts[i].map == NULL && opts[i].min == UINT64_MAX, opts[i].max ignored)
+///
+/// When parsing both option and value succeed, a filter-specific function
+/// is called, which should update the given value to filter-specific
+/// options structure.
+///
+/// \param str String containing the options from the command line
+/// \param opts Filter-specific option map
+/// \param set Filter-specific function to update filter_options
+/// \param filter_options Pointer to filter-specific options structure
+///
+/// \return Returns only if no errors occur.
+///
+static void
+parse_options(const char *str, const option_map *opts,
+ void (*set)(void *filter_options,
+ uint32_t key, uint64_t value, const char *valuestr),
+ void *filter_options)
+{
+ if (str == NULL || str[0] == '\0')
+ return;
+
+ char *s = xstrdup(str);
+ char *name = s;
+
+ while (true) {
+ if (*name == ',') {
+ if (*++name == '\0')
+ break;
+
+ continue;
+ }
+
+ char *split = strchr(name, ',');
+ if (split != NULL)
+ *split = '\0';
+
+ char *value = strchr(name, '=');
+ if (value != NULL)
+ *value++ = '\0';
+
+ if (value == NULL || value[0] == '\0')
+ message_fatal(_("%s: Options must be `name=value' "
+ "pairs separated with commas"), str);
+
+ // Look for the option name from the option map.
+ bool found = false;
+ for (size_t i = 0; opts[i].name != NULL; ++i) {
+ if (strcmp(name, opts[i].name) != 0)
+ continue;
+
+ if (opts[i].map != NULL) {
+ // value is a string which we should map
+ // to an integer.
+ size_t j;
+ for (j = 0; opts[i].map[j].name != NULL; ++j) {
+ if (strcmp(opts[i].map[j].name, value)
+ == 0)
+ break;
+ }
+
+ if (opts[i].map[j].name == NULL)
+ message_fatal(_("%s: Invalid option "
+ "value"), value);
+
+ set(filter_options, i, opts[i].map[j].id,
+ value);
+
+ } else if (opts[i].min == UINT64_MAX) {
+ // value is a special string that will be
+ // parsed by set().
+ set(filter_options, i, 0, value);
+
+ } else {
+ // value is an integer.
+ const uint64_t v = str_to_uint64(name, value,
+ opts[i].min, opts[i].max);
+ set(filter_options, i, v, value);
+ }
+
+ found = true;
+ break;
+ }
+
+ if (!found)
+ message_fatal(_("%s: Invalid option name"), name);
+
+ if (split == NULL)
+ break;
+
+ name = split + 1;
+ }
+
+ free(s);
+ return;
+}
+
+
+//////////////
+// Subblock //
+//////////////
+
+enum {
+ OPT_SIZE,
+ OPT_RLE,
+ OPT_ALIGN,
+};
+
+
+static void
+set_subblock(void *options, uint32_t key, uint64_t value,
+ const char *valuestr lzma_attribute((unused)))
+{
+ lzma_options_subblock *opt = options;
+
+ switch (key) {
+ case OPT_SIZE:
+ opt->subblock_data_size = value;
+ break;
+
+ case OPT_RLE:
+ opt->rle = value;
+ break;
+
+ case OPT_ALIGN:
+ opt->alignment = value;
+ break;
+ }
+}
+
+
+extern lzma_options_subblock *
+options_subblock(const char *str)
+{
+ static const option_map opts[] = {
+ { "size", NULL, LZMA_SUBBLOCK_DATA_SIZE_MIN,
+ LZMA_SUBBLOCK_DATA_SIZE_MAX },
+ { "rle", NULL, LZMA_SUBBLOCK_RLE_OFF,
+ LZMA_SUBBLOCK_RLE_MAX },
+ { "align",NULL, LZMA_SUBBLOCK_ALIGNMENT_MIN,
+ LZMA_SUBBLOCK_ALIGNMENT_MAX },
+ { NULL, NULL, 0, 0 }
+ };
+
+ lzma_options_subblock *options
+ = xmalloc(sizeof(lzma_options_subblock));
+ *options = (lzma_options_subblock){
+ .allow_subfilters = false,
+ .alignment = LZMA_SUBBLOCK_ALIGNMENT_DEFAULT,
+ .subblock_data_size = LZMA_SUBBLOCK_DATA_SIZE_DEFAULT,
+ .rle = LZMA_SUBBLOCK_RLE_OFF,
+ };
+
+ parse_options(str, opts, &set_subblock, options);
+
+ return options;
+}
+
+
+///////////
+// Delta //
+///////////
+
+enum {
+ OPT_DIST,
+};
+
+
+static void
+set_delta(void *options, uint32_t key, uint64_t value,
+ const char *valuestr lzma_attribute((unused)))
+{
+ lzma_options_delta *opt = options;
+ switch (key) {
+ case OPT_DIST:
+ opt->dist = value;
+ break;
+ }
+}
+
+
+extern lzma_options_delta *
+options_delta(const char *str)
+{
+ static const option_map opts[] = {
+ { "dist", NULL, LZMA_DELTA_DIST_MIN,
+ LZMA_DELTA_DIST_MAX },
+ { NULL, NULL, 0, 0 }
+ };
+
+ lzma_options_delta *options = xmalloc(sizeof(lzma_options_delta));
+ *options = (lzma_options_delta){
+ // It's hard to give a useful default for this.
+ .type = LZMA_DELTA_TYPE_BYTE,
+ .dist = LZMA_DELTA_DIST_MIN,
+ };
+
+ parse_options(str, opts, &set_delta, options);
+
+ return options;
+}
+
+
+/////////
+// BCJ //
+/////////
+
+enum {
+ OPT_START_OFFSET,
+};
+
+
+static void
+set_bcj(void *options, uint32_t key, uint64_t value,
+ const char *valuestr lzma_attribute((unused)))
+{
+ lzma_options_bcj *opt = options;
+ switch (key) {
+ case OPT_START_OFFSET:
+ opt->start_offset = value;
+ break;
+ }
+}
+
+
+extern lzma_options_bcj *
+options_bcj(const char *str)
+{
+ static const option_map opts[] = {
+ { "start", NULL, 0, UINT32_MAX },
+ { NULL, NULL, 0, 0 }
+ };
+
+ lzma_options_bcj *options = xmalloc(sizeof(lzma_options_bcj));
+ *options = (lzma_options_bcj){
+ .start_offset = 0,
+ };
+
+ parse_options(str, opts, &set_bcj, options);
+
+ return options;
+}
+
+
+//////////
+// LZMA //
+//////////
+
+enum {
+ OPT_PRESET,
+ OPT_DICT,
+ OPT_LC,
+ OPT_LP,
+ OPT_PB,
+ OPT_MODE,
+ OPT_NICE,
+ OPT_MF,
+ OPT_DEPTH,
+};
+
+
+static void lzma_attribute((noreturn))
+error_lzma_preset(const char *valuestr)
+{
+ message_fatal(_("Unsupported LZMA1/LZMA2 preset: %s"), valuestr);
+}
+
+
+static void
+set_lzma(void *options, uint32_t key, uint64_t value, const char *valuestr)
+{
+ lzma_options_lzma *opt = options;
+
+ switch (key) {
+ case OPT_PRESET: {
+ if (valuestr[0] < '0' || valuestr[0] > '9')
+ error_lzma_preset(valuestr);
+
+ uint32_t preset = valuestr[0] - '0';
+
+ // Currently only "e" is supported as a modifier,
+ // so keep this simple for now.
+ if (valuestr[1] != '\0') {
+ if (valuestr[1] == 'e')
+ preset |= LZMA_PRESET_EXTREME;
+ else
+ error_lzma_preset(valuestr);
+
+ if (valuestr[2] != '\0')
+ error_lzma_preset(valuestr);
+ }
+
+ if (lzma_lzma_preset(options, preset))
+ error_lzma_preset(valuestr);
+
+ break;
+ }
+
+ case OPT_DICT:
+ opt->dict_size = value;
+ break;
+
+ case OPT_LC:
+ opt->lc = value;
+ break;
+
+ case OPT_LP:
+ opt->lp = value;
+ break;
+
+ case OPT_PB:
+ opt->pb = value;
+ break;
+
+ case OPT_MODE:
+ opt->mode = value;
+ break;
+
+ case OPT_NICE:
+ opt->nice_len = value;
+ break;
+
+ case OPT_MF:
+ opt->mf = value;
+ break;
+
+ case OPT_DEPTH:
+ opt->depth = value;
+ break;
+ }
+}
+
+
+extern lzma_options_lzma *
+options_lzma(const char *str)
+{
+ static const name_id_map modes[] = {
+ { "fast", LZMA_MODE_FAST },
+ { "normal", LZMA_MODE_NORMAL },
+ { NULL, 0 }
+ };
+
+ static const name_id_map mfs[] = {
+ { "hc3", LZMA_MF_HC3 },
+ { "hc4", LZMA_MF_HC4 },
+ { "bt2", LZMA_MF_BT2 },
+ { "bt3", LZMA_MF_BT3 },
+ { "bt4", LZMA_MF_BT4 },
+ { NULL, 0 }
+ };
+
+ static const option_map opts[] = {
+ { "preset", NULL, UINT64_MAX, 0 },
+ { "dict", NULL, LZMA_DICT_SIZE_MIN,
+ (UINT32_C(1) << 30) + (UINT32_C(1) << 29) },
+ { "lc", NULL, LZMA_LCLP_MIN, LZMA_LCLP_MAX },
+ { "lp", NULL, LZMA_LCLP_MIN, LZMA_LCLP_MAX },
+ { "pb", NULL, LZMA_PB_MIN, LZMA_PB_MAX },
+ { "mode", modes, 0, 0 },
+ { "nice", NULL, 2, 273 },
+ { "mf", mfs, 0, 0 },
+ { "depth", NULL, 0, UINT32_MAX },
+ { NULL, NULL, 0, 0 }
+ };
+
+ lzma_options_lzma *options = xmalloc(sizeof(lzma_options_lzma));
+ *options = (lzma_options_lzma){
+ .dict_size = LZMA_DICT_SIZE_DEFAULT,
+ .preset_dict = NULL,
+ .preset_dict_size = 0,
+ .lc = LZMA_LC_DEFAULT,
+ .lp = LZMA_LP_DEFAULT,
+ .pb = LZMA_PB_DEFAULT,
+ .persistent = false,
+ .mode = LZMA_MODE_NORMAL,
+ .nice_len = 64,
+ .mf = LZMA_MF_BT4,
+ .depth = 0,
+ };
+
+ parse_options(str, opts, &set_lzma, options);
+
+ if (options->lc + options->lp > LZMA_LCLP_MAX)
+ message_fatal(_("The sum of lc and lp must be at "
+ "maximum of 4"));
+
+ const uint32_t nice_len_min = options->mf & 0x0F;
+ if (options->nice_len < nice_len_min)
+ message_fatal(_("The selected match finder requires at "
+ "least nice=%" PRIu32), nice_len_min);
+
+ return options;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.h
new file mode 100644
index 00000000..6daa5aa9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/options.h
@@ -0,0 +1,40 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file options.h
+/// \brief Parser for filter-specific options
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// \brief Parser for Subblock options
+///
+/// \return Pointer to allocated options structure.
+/// Doesn't return on error.
+extern lzma_options_subblock *options_subblock(const char *str);
+
+
+/// \brief Parser for Delta options
+///
+/// \return Pointer to allocated options structure.
+/// Doesn't return on error.
+extern lzma_options_delta *options_delta(const char *str);
+
+
+/// \brief Parser for BCJ options
+///
+/// \return Pointer to allocated options structure.
+/// Doesn't return on error.
+extern lzma_options_bcj *options_bcj(const char *str);
+
+
+/// \brief Parser for LZMA options
+///
+/// \return Pointer to allocated options structure.
+/// Doesn't return on error.
+extern lzma_options_lzma *options_lzma(const char *str);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/private.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/private.h
new file mode 100644
index 00000000..50883ac6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/private.h
@@ -0,0 +1,57 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file private.h
+/// \brief Common includes, definions, and prototypes
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include "mythread.h"
+#include "lzma.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <signal.h>
+#include <locale.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#ifdef ENABLE_NLS
+# include <libintl.h>
+# define _(msgid) gettext(msgid)
+# define N_(msgid1, msgid2, n) ngettext(msgid1, msgid2, n)
+#else
+# define _(msgid) (msgid)
+# define N_(msgid1, msgid2, n) ((n) == 1 ? (msgid1) : (msgid2))
+#endif
+
+#ifndef STDIN_FILENO
+# define STDIN_FILENO (fileno(stdin))
+#endif
+
+#ifndef STDOUT_FILENO
+# define STDOUT_FILENO (fileno(stdout))
+#endif
+
+#ifndef STDERR_FILENO
+# define STDERR_FILENO (fileno(stderr))
+#endif
+
+#include "main.h"
+#include "coder.h"
+#include "message.h"
+#include "args.h"
+#include "hardware.h"
+#include "file_io.h"
+#include "options.h"
+#include "signals.h"
+#include "suffix.h"
+#include "util.h"
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.c
new file mode 100644
index 00000000..b6dd8cf3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.c
@@ -0,0 +1,175 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file signals.c
+/// \brief Handling signals to abort operation
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+
+volatile sig_atomic_t user_abort = false;
+
+
+#ifndef _WIN32
+
+/// If we were interrupted by a signal, we store the signal number so that
+/// we can raise that signal to kill the program when all cleanups have
+/// been done.
+static volatile sig_atomic_t exit_signal = 0;
+
+/// Mask of signals for which have have established a signal handler to set
+/// user_abort to true.
+static sigset_t hooked_signals;
+
+/// signals_block() and signals_unblock() can be called recursively.
+static size_t signals_block_count = 0;
+
+
+static void
+signal_handler(int sig)
+{
+ exit_signal = sig;
+ user_abort = true;
+ return;
+}
+
+
+extern void
+signals_init(void)
+{
+ // List of signals for which we establish the signal handler.
+ static const int sigs[] = {
+ SIGINT,
+ SIGTERM,
+#ifdef SIGHUP
+ SIGHUP,
+#endif
+#ifdef SIGPIPE
+ SIGPIPE,
+#endif
+#ifdef SIGXCPU
+ SIGXCPU,
+#endif
+#ifdef SIGXFSZ
+ SIGXFSZ,
+#endif
+ };
+
+ // Mask of the signals for which we have established a signal handler.
+ sigemptyset(&hooked_signals);
+ for (size_t i = 0; i < ARRAY_SIZE(sigs); ++i)
+ sigaddset(&hooked_signals, sigs[i]);
+
+ struct sigaction sa;
+
+ // All the signals that we handle we also blocked while the signal
+ // handler runs.
+ sa.sa_mask = hooked_signals;
+
+ // Don't set SA_RESTART, because we want EINTR so that we can check
+ // for user_abort and cleanup before exiting. We block the signals
+ // for which we have established a handler when we don't want EINTR.
+ sa.sa_flags = 0;
+ sa.sa_handler = &signal_handler;
+
+ for (size_t i = 0; i < ARRAY_SIZE(sigs); ++i) {
+ // If the parent process has left some signals ignored,
+ // we don't unignore them.
+ struct sigaction old;
+ if (sigaction(sigs[i], NULL, &old) == 0
+ && old.sa_handler == SIG_IGN)
+ continue;
+
+ // Establish the signal handler.
+ if (sigaction(sigs[i], &sa, NULL))
+ message_signal_handler();
+ }
+
+ return;
+}
+
+
+extern void
+signals_block(void)
+{
+ if (signals_block_count++ == 0) {
+ const int saved_errno = errno;
+ mythread_sigmask(SIG_BLOCK, &hooked_signals, NULL);
+ errno = saved_errno;
+ }
+
+ return;
+}
+
+
+extern void
+signals_unblock(void)
+{
+ assert(signals_block_count > 0);
+
+ if (--signals_block_count == 0) {
+ const int saved_errno = errno;
+ mythread_sigmask(SIG_UNBLOCK, &hooked_signals, NULL);
+ errno = saved_errno;
+ }
+
+ return;
+}
+
+
+extern void
+signals_exit(void)
+{
+ const int sig = exit_signal;
+
+ if (sig != 0) {
+ struct sigaction sa;
+ sa.sa_handler = SIG_DFL;
+ sigfillset(&sa.sa_mask);
+ sa.sa_flags = 0;
+ sigaction(sig, &sa, NULL);
+ raise(exit_signal);
+ }
+
+ return;
+}
+
+#else
+
+// While Windows has some very basic signal handling functions as required
+// by C89, they are not really used, or so I understood. Instead, we use
+// SetConsoleCtrlHandler() to catch user pressing C-c.
+
+#include <windows.h>
+
+
+static BOOL WINAPI
+signal_handler(DWORD type lzma_attribute((unused)))
+{
+ // Since we don't get a signal number which we could raise() at
+ // signals_exit() like on POSIX, just set the exit status to
+ // indicate an error, so that we cannot return with zero exit status.
+ set_exit_status(E_ERROR);
+ user_abort = true;
+ return TRUE;
+}
+
+
+extern void
+signals_init(void)
+{
+ if (!SetConsoleCtrlHandler(&signal_handler, TRUE))
+ message_signal_handler();
+
+ return;
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.h
new file mode 100644
index 00000000..7573810b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/signals.h
@@ -0,0 +1,46 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file signals.h
+/// \brief Handling signals to abort operation
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// If this is true, we will clean up the possibly incomplete output file,
+/// return to main() as soon as practical. That is, the code needs to poll
+/// this variable in various places.
+extern volatile sig_atomic_t user_abort;
+
+
+/// Initialize the signal handler, which will set user_abort to true when
+/// user e.g. presses C-c.
+extern void signals_init(void);
+
+
+#ifndef _WIN32
+
+/// Block the signals which don't have SA_RESTART and which would just set
+/// user_abort to true. This is handy when we don't want to handle EINTR
+/// and don't want SA_RESTART either.
+extern void signals_block(void);
+
+/// Unblock the signals blocked by signals_block().
+extern void signals_unblock(void);
+
+/// If user has sent us a signal earlier to terminate the process,
+/// re-raise that signal to actually terminate the process.
+extern void signals_exit(void);
+
+#else
+
+#define signals_block() do { } while (0)
+#define signals_unblock() do { } while (0)
+#define signals_exit() do { } while (0)
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.c
new file mode 100644
index 00000000..42a9ec62
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.c
@@ -0,0 +1,213 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file suffix.c
+/// \brief Checks filename suffix and creates the destination filename
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+// For case-insensitive filename suffix on case-insensitive systems
+#ifdef DOSLIKE
+# define strcmp strcasecmp
+#endif
+
+
+static char *custom_suffix = NULL;
+
+
+struct suffix_pair {
+ const char *compressed;
+ const char *uncompressed;
+};
+
+
+/// \brief Checks if src_name has given compressed_suffix
+///
+/// \param suffix Filename suffix to look for
+/// \param src_name Input filename
+/// \param src_len strlen(src_name)
+///
+/// \return If src_name has the suffix, src_len - strlen(suffix) is
+/// returned. It's always a positive integer. Otherwise zero
+/// is returned.
+static size_t
+test_suffix(const char *suffix, const char *src_name, size_t src_len)
+{
+ const size_t suffix_len = strlen(suffix);
+
+ // The filename must have at least one character in addition to
+ // the suffix. src_name may contain path to the filename, so we
+ // need to check for directory separator too.
+ if (src_len <= suffix_len || src_name[src_len - suffix_len - 1] == '/')
+ return 0;
+
+ if (strcmp(suffix, src_name + src_len - suffix_len) == 0)
+ return src_len - suffix_len;
+
+ return 0;
+}
+
+
+/// \brief Removes the filename suffix of the compressed file
+///
+/// \return Name of the uncompressed file, or NULL if file has unknown
+/// suffix.
+static char *
+uncompressed_name(const char *src_name, const size_t src_len)
+{
+ static const struct suffix_pair suffixes[] = {
+ { ".xz", "" },
+ { ".txz", ".tar" }, // .txz abbreviation for .txt.gz is rare.
+ { ".lzma", "" },
+ { ".tlz", ".tar" },
+ // { ".gz", "" },
+ // { ".tgz", ".tar" },
+ };
+
+ const char *new_suffix = "";
+ size_t new_len = 0;
+
+ if (opt_format == FORMAT_RAW) {
+ // Don't check for known suffixes when --format=raw was used.
+ if (custom_suffix == NULL) {
+ message_error(_("%s: With --format=raw, "
+ "--suffix=.SUF is required unless "
+ "writing to stdout"), src_name);
+ return NULL;
+ }
+ } else {
+ for (size_t i = 0; i < ARRAY_SIZE(suffixes); ++i) {
+ new_len = test_suffix(suffixes[i].compressed,
+ src_name, src_len);
+ if (new_len != 0) {
+ new_suffix = suffixes[i].uncompressed;
+ break;
+ }
+ }
+ }
+
+ if (new_len == 0 && custom_suffix != NULL)
+ new_len = test_suffix(custom_suffix, src_name, src_len);
+
+ if (new_len == 0) {
+ message_warning(_("%s: Filename has an unknown suffix, "
+ "skipping"), src_name);
+ return NULL;
+ }
+
+ const size_t new_suffix_len = strlen(new_suffix);
+ char *dest_name = xmalloc(new_len + new_suffix_len + 1);
+
+ memcpy(dest_name, src_name, new_len);
+ memcpy(dest_name + new_len, new_suffix, new_suffix_len);
+ dest_name[new_len + new_suffix_len] = '\0';
+
+ return dest_name;
+}
+
+
+/// \brief Appends suffix to src_name
+///
+/// In contrast to uncompressed_name(), we check only suffixes that are valid
+/// for the specified file format.
+static char *
+compressed_name(const char *src_name, const size_t src_len)
+{
+ // The order of these must match the order in args.h.
+ static const struct suffix_pair all_suffixes[][3] = {
+ {
+ { ".xz", "" },
+ { ".txz", ".tar" },
+ { NULL, NULL }
+ }, {
+ { ".lzma", "" },
+ { ".tlz", ".tar" },
+ { NULL, NULL }
+/*
+ }, {
+ { ".gz", "" },
+ { ".tgz", ".tar" },
+ { NULL, NULL }
+*/
+ }, {
+ // --format=raw requires specifying the suffix
+ // manually or using stdout.
+ { NULL, NULL }
+ }
+ };
+
+ // args.c ensures this.
+ assert(opt_format != FORMAT_AUTO);
+
+ const size_t format = opt_format - 1;
+ const struct suffix_pair *const suffixes = all_suffixes[format];
+
+ for (size_t i = 0; suffixes[i].compressed != NULL; ++i) {
+ if (test_suffix(suffixes[i].compressed, src_name, src_len)
+ != 0) {
+ message_warning(_("%s: File already has `%s' "
+ "suffix, skipping"), src_name,
+ suffixes[i].compressed);
+ return NULL;
+ }
+ }
+
+ // TODO: Hmm, maybe it would be better to validate this in args.c,
+ // since the suffix handling when decoding is weird now.
+ if (opt_format == FORMAT_RAW && custom_suffix == NULL) {
+ message_error(_("%s: With --format=raw, "
+ "--suffix=.SUF is required unless "
+ "writing to stdout"), src_name);
+ return NULL;
+ }
+
+ const char *suffix = custom_suffix != NULL
+ ? custom_suffix : suffixes[0].compressed;
+ const size_t suffix_len = strlen(suffix);
+
+ char *dest_name = xmalloc(src_len + suffix_len + 1);
+
+ memcpy(dest_name, src_name, src_len);
+ memcpy(dest_name + src_len, suffix, suffix_len);
+ dest_name[src_len + suffix_len] = '\0';
+
+ return dest_name;
+}
+
+
+extern char *
+suffix_get_dest_name(const char *src_name)
+{
+ assert(src_name != NULL);
+
+ // Length of the name is needed in all cases to locate the end of
+ // the string to compare the suffix, so calculate the length here.
+ const size_t src_len = strlen(src_name);
+
+ return opt_mode == MODE_COMPRESS
+ ? compressed_name(src_name, src_len)
+ : uncompressed_name(src_name, src_len);
+}
+
+
+extern void
+suffix_set(const char *suffix)
+{
+ // Empty suffix and suffixes having a slash are rejected. Such
+ // suffixes would break things later.
+ if (suffix[0] == '\0' || strchr(suffix, '/') != NULL)
+ message_fatal(_("%s: Invalid filename suffix"), optarg);
+
+ // Replace the old custom_suffix (if any) with the new suffix.
+ free(custom_suffix);
+ custom_suffix = xstrdup(suffix);
+ return;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.h
new file mode 100644
index 00000000..ca455038
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/suffix.h
@@ -0,0 +1,30 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file suffix.h
+/// \brief Checks filename suffix and creates the destination filename
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// \brief Get the name of the destination file
+///
+/// Depending on the global variable opt_mode, this tries to find a matching
+/// counterpart for src_name. If the name can be constructed, it is allocated
+/// and returned (caller must free it). On error, a message is printed and
+/// NULL is returned.
+extern char *suffix_get_dest_name(const char *src_name);
+
+
+/// \brief Set a custom filename suffix
+///
+/// This function calls xstrdup() for the given suffix, thus the caller
+/// doesn't need to keep the memory allocated. There can be only one custom
+/// suffix, thus if this is called multiple times, the old suffixes are freed
+/// and forgotten.
+extern void suffix_set(const char *suffix);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.c
new file mode 100644
index 00000000..a767ea0b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.c
@@ -0,0 +1,231 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file util.c
+/// \brief Miscellaneous utility functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "private.h"
+
+
+// Thousand separator for format strings is not supported outside POSIX.
+// This is used in uint64_to_str() and double_to_str().
+#ifdef DOSLIKE
+# define THOUSAND ""
+#else
+# define THOUSAND "'"
+#endif
+
+
+extern void *
+xrealloc(void *ptr, size_t size)
+{
+ assert(size > 0);
+
+ ptr = realloc(ptr, size);
+ if (ptr == NULL)
+ message_fatal("%s", strerror(errno));
+
+ return ptr;
+}
+
+
+extern char *
+xstrdup(const char *src)
+{
+ assert(src != NULL);
+ const size_t size = strlen(src) + 1;
+ char *dest = xmalloc(size);
+ return memcpy(dest, src, size);
+}
+
+
+extern uint64_t
+str_to_uint64(const char *name, const char *value, uint64_t min, uint64_t max)
+{
+ uint64_t result = 0;
+
+ // Skip blanks.
+ while (*value == ' ' || *value == '\t')
+ ++value;
+
+ // Accept special value "max". Supporting "min" doesn't seem useful.
+ if (strcmp(value, "max") == 0)
+ return max;
+
+ if (*value < '0' || *value > '9')
+ message_fatal(_("%s: Value is not a non-negative "
+ "decimal integer"), value);
+
+ do {
+ // Don't overflow.
+ if (result > (UINT64_MAX - 9) / 10)
+ goto error;
+
+ result *= 10;
+ result += *value - '0';
+ ++value;
+ } while (*value >= '0' && *value <= '9');
+
+ if (*value != '\0') {
+ // Look for suffix.
+ static const struct {
+ const char name[4];
+ uint64_t multiplier;
+ } suffixes[] = {
+ { "k", UINT64_C(1000) },
+ { "kB", UINT64_C(1000) },
+ { "M", UINT64_C(1000000) },
+ { "MB", UINT64_C(1000000) },
+ { "G", UINT64_C(1000000000) },
+ { "GB", UINT64_C(1000000000) },
+ { "Ki", UINT64_C(1024) },
+ { "KiB", UINT64_C(1024) },
+ { "Mi", UINT64_C(1048576) },
+ { "MiB", UINT64_C(1048576) },
+ { "Gi", UINT64_C(1073741824) },
+ { "GiB", UINT64_C(1073741824) }
+ };
+
+ uint64_t multiplier = 0;
+ for (size_t i = 0; i < ARRAY_SIZE(suffixes); ++i) {
+ if (strcmp(value, suffixes[i].name) == 0) {
+ multiplier = suffixes[i].multiplier;
+ break;
+ }
+ }
+
+ if (multiplier == 0) {
+ message(V_ERROR, _("%s: Invalid multiplier suffix. "
+ "Valid suffixes:"), value);
+ message_fatal("`k' (10^3), `M' (10^6), `G' (10^9) "
+ "`Ki' (2^10), `Mi' (2^20), "
+ "`Gi' (2^30)");
+ }
+
+ // Don't overflow here either.
+ if (result > UINT64_MAX / multiplier)
+ goto error;
+
+ result *= multiplier;
+ }
+
+ if (result < min || result > max)
+ goto error;
+
+ return result;
+
+error:
+ message_fatal(_("Value of the option `%s' must be in the range "
+ "[%" PRIu64 ", %" PRIu64 "]"),
+ name, min, max);
+}
+
+
+extern const char *
+uint64_to_str(uint64_t value, uint32_t slot)
+{
+ // 2^64 with thousand separators is 26 bytes plus trailing '\0'.
+ static char bufs[4][32];
+
+ assert(slot < ARRAY_SIZE(bufs));
+
+ snprintf(bufs[slot], sizeof(bufs[slot]), "%" THOUSAND PRIu64, value);
+ return bufs[slot];
+}
+
+
+extern const char *
+double_to_str(double value)
+{
+ // 64 bytes is surely enough, since it won't fit in some other
+ // fields anyway.
+ static char buf[64];
+
+ snprintf(buf, sizeof(buf), "%" THOUSAND ".1f", value);
+ return buf;
+}
+
+
+/*
+/// \brief Simple quoting to get rid of ASCII control characters
+///
+/// This is not so cool and locale-dependent, but should be good enough
+/// At least we don't print any control characters on the terminal.
+///
+extern char *
+str_quote(const char *str)
+{
+ size_t dest_len = 0;
+ bool has_ctrl = false;
+
+ while (str[dest_len] != '\0')
+ if (*(unsigned char *)(str + dest_len++) < 0x20)
+ has_ctrl = true;
+
+ char *dest = malloc(dest_len + 1);
+ if (dest != NULL) {
+ if (has_ctrl) {
+ for (size_t i = 0; i < dest_len; ++i)
+ if (*(unsigned char *)(str + i) < 0x20)
+ dest[i] = '?';
+ else
+ dest[i] = str[i];
+
+ dest[dest_len] = '\0';
+
+ } else {
+ // Usually there are no control characters,
+ // so we can optimize.
+ memcpy(dest, str, dest_len + 1);
+ }
+ }
+
+ return dest;
+}
+*/
+
+
+extern bool
+is_empty_filename(const char *filename)
+{
+ if (filename[0] == '\0') {
+ message_error(_("Empty filename, skipping"));
+ return true;
+ }
+
+ return false;
+}
+
+
+extern bool
+is_tty_stdin(void)
+{
+ const bool ret = isatty(STDIN_FILENO);
+
+ if (ret)
+ message_error(_("Compressed data not read from a terminal "
+ "unless `--force' is used."));
+
+ return ret;
+}
+
+
+extern bool
+is_tty_stdout(void)
+{
+ const bool ret = isatty(STDOUT_FILENO);
+
+ if (ret)
+ message_error(_("Compressed data not written to a terminal "
+ "unless `--force' is used."));
+
+ return ret;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.h
new file mode 100644
index 00000000..9ea7fed5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/util.h
@@ -0,0 +1,81 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file util.h
+/// \brief Miscellaneous utility functions
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+/// \brief Safe malloc() that never returns NULL
+///
+/// \note xmalloc(), xrealloc(), and xstrdup() must not be used when
+/// there are files open for writing, that should be cleaned up
+/// before exiting.
+#define xmalloc(size) xrealloc(NULL, size)
+
+
+/// \brief Safe realloc() that never returns NULL
+extern void *xrealloc(void *ptr, size_t size);
+
+
+/// \brief Safe strdup() that never returns NULL
+extern char *xstrdup(const char *src);
+
+
+/// \brief Fancy version of strtoull()
+///
+/// \param name Name of the option to show in case of an error
+/// \param value String containing the number to be parsed; may
+/// contain suffixes "k", "M", "G", "Ki", "Mi", or "Gi"
+/// \param min Minimum valid value
+/// \param max Maximum valid value
+///
+/// \return Parsed value that is in the range [min, max]. Does not return
+/// if an error occurs.
+///
+extern uint64_t str_to_uint64(const char *name, const char *value,
+ uint64_t min, uint64_t max);
+
+
+/// \brief Convert uint64_t to a string
+///
+/// Convert the given value to a string with locale-specific thousand
+/// separators, if supported by the snprintf() implementation. The string
+/// is stored into an internal static buffer indicated by the slot argument.
+/// A pointer to the selected buffer is returned.
+///
+/// This function exists, because non-POSIX systems don't support thousand
+/// separator in format strings. Solving the problem in a simple way doesn't
+/// work, because it breaks gettext (specifically, the xgettext tool).
+extern const char *uint64_to_str(uint64_t value, uint32_t slot);
+
+
+/// \brief Convert double to a string with one decimal place
+///
+/// This is like uint64_to_str() except that this converts a double and
+/// uses exactly one decimal place.
+extern const char *double_to_str(double value);
+
+
+/// \brief Check if filename is empty and print an error message
+extern bool is_empty_filename(const char *filename);
+
+
+/// \brief Test if stdin is a terminal
+///
+/// If stdin is a terminal, an error message is printed and exit status set
+/// to EXIT_ERROR.
+extern bool is_tty_stdin(void);
+
+
+/// \brief Test if stdout is a terminal
+///
+/// If stdout is a terminal, an error message is printed and exit status set
+/// to EXIT_ERROR.
+extern bool is_tty_stdout(void);
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz.1
new file mode 100644
index 00000000..ad3d8463
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz.1
@@ -0,0 +1,1250 @@
+'\" t
+.\"
+.\" Author: Lasse Collin
+.\"
+.\" This file has been put into the public domain.
+.\" You can do whatever you want with this file.
+.\"
+.TH XZ 1 "2009-08-27" "Tukaani" "XZ Utils"
+.SH NAME
+xz, unxz, xzcat, lzma, unlzma, lzcat \- Compress or decompress .xz and .lzma files
+.SH SYNOPSIS
+.B xz
+.RI [ option ]...
+.RI [ file ]...
+.PP
+.B unxz
+is equivalent to
+.BR "xz \-\-decompress" .
+.br
+.B xzcat
+is equivalent to
+.BR "xz \-\-decompress \-\-stdout" .
+.br
+.B lzma
+is equivalent to
+.BR "xz \-\-format=lzma" .
+.br
+.B unlzma
+is equivalent to
+.BR "xz \-\-format=lzma \-\-decompress" .
+.br
+.B lzcat
+is equivalent to
+.BR "xz \-\-format=lzma \-\-decompress \-\-stdout" .
+.PP
+When writing scripts that need to decompress files, it is recommended to
+always use the name
+.B xz
+with appropriate arguments
+.RB ( "xz \-d"
+or
+.BR "xz \-dc" )
+instead of the names
+.B unxz
+and
+.BR xzcat.
+.SH DESCRIPTION
+.B xz
+is a general-purpose data compression tool with command line syntax similar to
+.BR gzip (1)
+and
+.BR bzip2 (1).
+The native file format is the
+.B .xz
+format, but also the legacy
+.B .lzma
+format and raw compressed streams with no container format headers
+are supported.
+.PP
+.B xz
+compresses or decompresses each
+.I file
+according to the selected operation mode.
+If no
+.I files
+are given or
+.I file
+is
+.BR \- ,
+.B xz
+reads from standard input and writes the processed data to standard output.
+.B xz
+will refuse (display an error and skip the
+.IR file )
+to write compressed data to standard output if it is a terminal. Similarly,
+.B xz
+will refuse to read compressed data from standard input if it is a terminal.
+.PP
+Unless
+.B \-\-stdout
+is specified,
+.I files
+other than
+.B \-
+are written to a new file whose name is derived from the source
+.I file
+name:
+.IP \(bu 3
+When compressing, the suffix of the target file format
+.RB ( .xz
+or
+.BR .lzma )
+is appended to the source filename to get the target filename.
+.IP \(bu 3
+When decompressing, the
+.B .xz
+or
+.B .lzma
+suffix is removed from the filename to get the target filename.
+.B xz
+also recognizes the suffixes
+.B .txz
+and
+.BR .tlz ,
+and replaces them with the
+.B .tar
+suffix.
+.PP
+If the target file already exists, an error is displayed and the
+.I file
+is skipped.
+.PP
+Unless writing to standard output,
+.B xz
+will display a warning and skip the
+.I file
+if any of the following applies:
+.IP \(bu 3
+.I File
+is not a regular file. Symbolic links are not followed, thus they
+are never considered to be regular files.
+.IP \(bu 3
+.I File
+has more than one hardlink.
+.IP \(bu 3
+.I File
+has setuid, setgid, or sticky bit set.
+.IP \(bu 3
+The operation mode is set to compress, and the
+.I file
+already has a suffix of the target file format
+.RB ( .xz
+or
+.B .txz
+when compressing to the
+.B .xz
+format, and
+.B .lzma
+or
+.B .tlz
+when compressing to the
+.B .lzma
+format).
+.IP \(bu 3
+The operation mode is set to decompress, and the
+.I file
+doesn't have a suffix of any of the supported file formats
+.RB ( .xz ,
+.BR .txz ,
+.BR .lzma ,
+or
+.BR .tlz ).
+.PP
+After successfully compressing or decompressing the
+.IR file ,
+.B xz
+copies the owner, group, permissions, access time, and modification time
+from the source
+.I file
+to the target file. If copying the group fails, the permissions are modified
+so that the target file doesn't become accessible to users who didn't have
+permission to access the source
+.IR file .
+.B xz
+doesn't support copying other metadata like access control lists
+or extended attributes yet.
+.PP
+Once the target file has been successfully closed, the source
+.I file
+is removed unless
+.B \-\-keep
+was specified. The source
+.I file
+is never removed if the output is written to standard output.
+.PP
+Sending
+.B SIGINFO
+or
+.B SIGUSR1
+to the
+.B xz
+process makes it print progress information to standard error.
+This has only limited use since when standard error is a terminal, using
+.B \-\-verbose
+will display an automatically updating progress indicator.
+.SS "Memory usage"
+The memory usage of
+.B xz
+varies from a few hundred kilobytes to several gigabytes depending on
+the compression settings. The settings used when compressing a file
+affect also the memory usage of the decompressor. Typically the decompressor
+needs only 5\ % to 20\ % of the amount of RAM that the compressor needed when
+creating the file. Still, the worst-case memory usage of the decompressor
+is several gigabytes.
+.PP
+To prevent uncomfortable surprises caused by huge memory usage,
+.B xz
+has a built-in memory usage limiter. The default limit is 40 % of total
+physical RAM. While operating systems provide ways to limit the memory usage
+of processes, relying on it wasn't deemed to be flexible enough.
+.PP
+When compressing, if the selected compression settings exceed the memory
+usage limit, the settings are automatically adjusted downwards and a notice
+about this is displayed. As an exception, if the memory usage limit is
+exceeded when compressing with
+.BR \-\-format=raw ,
+an error is displayed and
+.B xz
+will exit with exit status
+.BR 1 .
+.PP
+If source
+.I file
+cannot be decompressed without exceeding the memory usage limit, an error
+message is displayed and the file is skipped. Note that compressed files
+may contain many blocks, which may have been compressed with different
+settings. Typically all blocks will have roughly the same memory requirements,
+but it is possible that a block later in the file will exceed the memory usage
+limit, and an error about too low memory usage limit gets displayed after some
+data has already been decompressed.
+.PP
+The absolute value of the active memory usage limit can be seen near
+the bottom of the output of
+.BR \-\-long\-help .
+The default limit can be overridden with
+\fB\-\-memory=\fIlimit\fR.
+.SH OPTIONS
+.SS "Integer suffixes and special values"
+In most places where an integer argument is expected, an optional suffix
+is supported to easily indicate large integers. There must be no space
+between the integer and the suffix.
+.TP
+.BR k " or " kB
+The integer is multiplied by 1,000 (10^3). For example,
+.B "5k"
+or
+.B "5kB"
+equals
+.BR "5000" .
+.TP
+.BR Ki " or " KiB
+The integer is multiplied by 1,024 (2^10).
+.TP
+.BR M " or " MB
+The integer is multiplied by 1,000,000 (10^6).
+.TP
+.BR Mi " or " MiB
+The integer is multiplied by 1,048,576 (2^20).
+.TP
+.BR G " or " GB
+The integer is multiplied by 1,000,000,000 (10^9).
+.TP
+.BR Gi " or " GiB
+The integer is multiplied by 1,073,741,824 (2^30).
+.PP
+A special value
+.B max
+can be used to indicate the maximum integer value supported by the option.
+.SS "Operation mode"
+If multiple operation mode options are given, the last one takes effect.
+.TP
+.BR \-z ", " \-\-compress
+Compress. This is the default operation mode when no operation mode option
+is specified, and no other operation mode is implied from the command name
+(for example,
+.B unxz
+implies
+.BR \-\-decompress ).
+.TP
+.BR \-d ", " \-\-decompress ", " \-\-uncompress
+Decompress.
+.TP
+.BR \-t ", " \-\-test
+Test the integrity of compressed
+.IR files .
+No files are created or removed. This option is equivalent to
+.B "\-\-decompress \-\-stdout"
+except that the decompressed data is discarded instead of being
+written to standard output.
+.TP
+.BR \-l ", " \-\-list
+View information about the compressed files. No uncompressed output is
+produced, and no files are created or removed. In list mode, the program
+cannot read the compressed data from standard input or from other
+unseekable sources.
+.IP
+.B "This feature has not been implemented yet."
+.SS "Operation modifiers"
+.TP
+.BR \-k ", " \-\-keep
+Keep (don't delete) the input files.
+.TP
+.BR \-f ", " \-\-force
+This option has several effects:
+.RS
+.IP \(bu 3
+If the target file already exists, delete it before compressing or
+decompressing.
+.IP \(bu 3
+Compress or decompress even if the input is not a regular file,
+has more than one hardlink, or has setuid, setgid, or sticky bit set.
+The setuid, setgid, and sticky bits are not copied to the target file.
+.IP \(bu 3
+If combined with
+.B \-\-decompress
+.BR \-\-stdout
+and
+.B xz
+doesn't recognize the type of the source file,
+.B xz
+will copy the source file as is to standard output. This allows using
+.B xzcat
+.B \--force
+like
+.BR cat (1)
+for files that have not been compressed with
+.BR xz .
+Note that in future,
+.B xz
+might support new compressed file formats, which may make
+.B xz
+decompress more types of files instead of copying them as is to
+standard output.
+.BI \-\-format= format
+can be used to restrict
+.B xz
+to decompress only a single file format.
+.IP \(bu 3
+Allow writing compressed data to a terminal, and reading compressed data
+from a terminal.
+.RE
+.TP
+.BR \-c ", " \-\-stdout ", " \-\-to-stdout
+Write the compressed or decompressed data to standard output instead of
+a file. This implies
+.BR \-\-keep .
+.TP
+\fB\-S\fR \fI.suf\fR, \fB\-\-suffix=\fI.suf
+When compressing, use
+.I .suf
+as the suffix for the target file instead of
+.B .xz
+or
+.BR .lzma .
+If not writing to standard output and the source file already has the suffix
+.IR .suf ,
+a warning is displayed and the file is skipped.
+.IP
+When decompressing, recognize also files with the suffix
+.I .suf
+in addition to files with the
+.BR .xz ,
+.BR .txz ,
+.BR .lzma ,
+or
+.B .tlz
+suffix. If the source file has the suffix
+.IR .suf ,
+the suffix is removed to get the target filename.
+.IP
+When compressing or decompressing raw streams
+.RB ( \-\-format=raw ),
+the suffix must always be specified unless writing to standard output,
+because there is no default suffix for raw streams.
+.TP
+\fB\-\-files\fR[\fB=\fIfile\fR]
+Read the filenames to process from
+.IR file ;
+if
+.I file
+is omitted, filenames are read from standard input. Filenames must be
+terminated with the newline character. If filenames are given also as
+command line arguments, they are processed before the filenames read from
+.IR file .
+.TP
+\fB\-\-files0\fR[\fB=\fIfile\fR]
+This is identical to \fB\-\-files\fR[\fB=\fIfile\fR] except that the
+filenames must be terminated with the null character.
+.SS "Basic file format and compression options"
+.TP
+\fB\-F\fR \fIformat\fR, \fB\-\-format=\fIformat
+Specify the file format to compress or decompress:
+.RS
+.IP \(bu 3
+.BR auto :
+This is the default. When compressing,
+.B auto
+is equivalent to
+.BR xz .
+When decompressing, the format of the input file is autodetected. Note that
+raw streams (created with
+.BR \-\-format=raw )
+cannot be autodetected.
+.IP \(bu 3
+.BR xz :
+Compress to the
+.B .xz
+file format, or accept only
+.B .xz
+files when decompressing.
+.IP \(bu 3
+.B lzma
+or
+.BR alone :
+Compress to the legacy
+.B .lzma
+file format, or accept only
+.B .lzma
+files when decompressing. The alternative name
+.B alone
+is provided for backwards compatibility with LZMA Utils.
+.IP \(bu 3
+.BR raw :
+Compress or uncompress a raw stream (no headers). This is meant for advanced
+users only. To decode raw streams, you need to set not only
+.B \-\-format=raw
+but also specify the filter chain, which would normally be stored in the
+container format headers.
+.RE
+.TP
+\fB\-C\fR \fIcheck\fR, \fB\-\-check=\fIcheck
+Specify the type of the integrity check, which is calculated from the
+uncompressed data. This option has an effect only when compressing into the
+.B .xz
+format; the
+.B .lzma
+format doesn't support integrity checks.
+The integrity check (if any) is verified when the
+.B .xz
+file is decompressed.
+.IP
+Supported
+.I check
+types:
+.RS
+.IP \(bu 3
+.BR none :
+Don't calculate an integrity check at all. This is usually a bad idea. This
+can be useful when integrity of the data is verified by other means anyway.
+.IP \(bu 3
+.BR crc32 :
+Calculate CRC32 using the polynomial from IEEE-802.3 (Ethernet).
+.IP \(bu 3
+.BR crc64 :
+Calculate CRC64 using the polynomial from ECMA-182. This is the default, since
+it is slightly better than CRC32 at detecting damaged files and the speed
+difference is negligible.
+.IP \(bu 3
+.BR sha256 :
+Calculate SHA-256. This is somewhat slower than CRC32 and CRC64.
+.RE
+.IP
+Integrity of the
+.B .xz
+headers is always verified with CRC32. It is not possible to change or
+disable it.
+.TP
+.BR \-0 " ... " \-9
+Select compression preset. If a preset level is specified multiple times,
+the last one takes effect.
+.IP
+The compression preset levels can be categorised roughly into three
+categories:
+.RS
+.IP "\fB\-0\fR ... \fB\-2"
+Fast presets with relatively low memory usage.
+.B \-1
+and
+.B \-2
+should give compression speed and ratios comparable to
+.B "bzip2 \-1"
+and
+.BR "bzip2 \-9" ,
+respectively.
+Currently
+.B \-0
+is not very good (not much faster than
+.B \-1
+but much worse compression). In future,
+.B \-0
+may be indicate some fast algorithm instead of LZMA2.
+.IP "\fB\-3\fR ... \fB\-5"
+Good compression ratio with low to medium memory usage.
+These are significantly slower than levels 0\-2.
+.IP "\fB\-6\fR ... \fB\-9"
+Excellent compression with medium to high memory usage. These are also
+slower than the lower preset levels. The default is
+.BR \-6 .
+Unless you want to maximize the compression ratio, you probably don't want
+a higher preset level than
+.B \-7
+due to speed and memory usage.
+.RE
+.IP
+The exact compression settings (filter chain) used by each preset may
+vary between
+.B xz
+versions. The settings may also vary between files being compressed, if
+.B xz
+determines that modified settings will probably give better compression
+ratio without significantly affecting compression time or memory usage.
+.IP
+Because the settings may vary, the memory usage may vary too. The following
+table lists the maximum memory usage of each preset level, which won't be
+exceeded even in future versions of
+.BR xz .
+.IP
+.B "FIXME: The table below is just a rough idea."
+.RS
+.RS
+.TS
+tab(;);
+c c c
+n n n.
+Preset;Compression;Decompression
+\-0;6 MiB;1 MiB
+\-1;6 MiB;1 MiB
+\-2;10 MiB;1 MiB
+\-3;20 MiB;2 MiB
+\-4;30 MiB;3 MiB
+\-5;60 MiB;6 MiB
+\-6;100 MiB;10 MiB
+\-7;200 MiB;20 MiB
+\-8;400 MiB;40 MiB
+\-9;800 MiB;80 MiB
+.TE
+.RE
+.RE
+.IP
+When compressing,
+.B xz
+automatically adjusts the compression settings downwards if
+the memory usage limit would be exceeded, so it is safe to specify
+a high preset level even on systems that don't have lots of RAM.
+.TP
+.BR \-\-fast " and " \-\-best
+These are somewhat misleading aliases for
+.B \-0
+and
+.BR \-9 ,
+respectively.
+These are provided only for backwards compatibility with LZMA Utils.
+Avoid using these options.
+.IP
+Especially the name of
+.B \-\-best
+is misleading, because the definition of best depends on the input data,
+and that usually people don't want the very best compression ratio anyway,
+because it would be very slow.
+.TP
+.BR \-e ", " \-\-extreme
+Modify the compression preset (\fB\-0\fR ... \fB\-9\fR) so that a little bit
+better compression ratio can be achieved without increasing memory usage
+of the compressor or decompressor (exception: compressor memory usage may
+increase a little with presets \fB\-0\fR ... \fB\-2\fR). The downside is that
+the compression time will increase dramatically (it can easily double).
+.TP
+\fB\-M\fR \fIlimit\fR, \fB\-\-memory=\fIlimit
+Set the memory usage limit. If this option is specied multiple times,
+the last one takes effect. The
+.I limit
+can be specified in multiple ways:
+.RS
+.IP \(bu 3
+The
+.I limit
+can be an absolute value in bytes. Using an integer suffix like
+.B MiB
+can be useful. Example:
+.B "\-\-memory=80MiB"
+.IP \(bu 3
+The
+.I limit
+can be specified as a percentage of physical RAM. Example:
+.B "\-\-memory=70%"
+.IP \(bu 3
+The
+.I limit
+can be reset back to its default value (currently 40 % of physical RAM)
+by setting it to
+.BR 0 .
+.IP \(bu 3
+The memory usage limiting can be effectively disabled by setting
+.I limit
+to
+.BR max .
+This isn't recommended. It's usually better to use, for example,
+.BR \-\-memory=90% .
+.RE
+.IP
+The current
+.I limit
+can be seen near the bottom of the output of the
+.B \-\-long-help
+option.
+.TP
+\fB\-T\fR \fIthreads\fR, \fB\-\-threads=\fIthreads
+Specify the maximum number of worker threads to use. The default is
+the number of available CPU cores. You can see the current value of
+.I threads
+near the end of the output of the
+.B \-\-long\-help
+option.
+.IP
+The actual number of worker threads can be less than
+.I threads
+if using more threads would exceed the memory usage limit.
+In addition to CPU-intensive worker threads,
+.B xz
+may use a few auxiliary threads, which don't use a lot of CPU time.
+.IP
+.B "Multithreaded compression and decompression are not implemented yet,"
+.B "so this option has no effect for now."
+.SS Custom compressor filter chains
+A custom filter chain allows specifying the compression settings in detail
+instead of relying on the settings associated to the preset levels.
+When a custom filter chain is specified, the compression preset level options
+(\fB\-0\fR ... \fB\-9\fR and \fB\-\-extreme\fR) are silently ignored.
+.PP
+A filter chain is comparable to piping on the UN*X command line.
+When compressing, the uncompressed input goes to the first filter, whose
+output goes to the next filter (if any). The output of the last filter
+gets written to the compressed file. The maximum number of filters in
+the chain is four, but typically a filter chain has only one or two filters.
+.PP
+Many filters have limitations where they can be in the filter chain:
+some filters can work only as the last filter in the chain, some only
+as a non-last filter, and some work in any position in the chain. Depending
+on the filter, this limitation is either inherent to the filter design or
+exists to prevent security issues.
+.PP
+A custom filter chain is specified by using one or more filter options in
+the order they are wanted in the filter chain. That is, the order of filter
+options is significant! When decoding raw streams
+.RB ( \-\-format=raw ),
+the filter chain is specified in the same order as it was specified when
+compressing.
+.PP
+Filters take filter-specific
+.I options
+as a comma-separated list. Extra commas in
+.I options
+are ignored. Every option has a default value, so you need to
+specify only those you want to change.
+.TP
+\fB\-\-lzma1\fR[\fB=\fIoptions\fR], \fB\-\-lzma2\fR[\fB=\fIoptions\fR]
+Add LZMA1 or LZMA2 filter to the filter chain. These filter can be used
+only as the last filter in the chain.
+.IP
+LZMA1 is a legacy filter, which is supported almost solely due to the legacy
+.B .lzma
+file format, which supports only LZMA1. LZMA2 is an updated
+version of LZMA1 to fix some practical issues of LZMA1. The
+.B .xz
+format uses LZMA2, and doesn't support LZMA1 at all. Compression speed and
+ratios of LZMA1 and LZMA2 are practically the same.
+.IP
+LZMA1 and LZMA2 share the same set of
+.IR options :
+.RS
+.TP
+.BI preset= preset
+Reset all LZMA1 or LZMA2
+.I options
+to
+.IR preset .
+.I Preset
+consist of an integer, which may be followed by single-letter preset
+modifiers. The integer can be from
+.B 0
+to
+.BR 9 ,
+matching the command line options \fB\-0\fR ... \fB\-9\fR.
+The only supported modifier is currently
+.BR e ,
+which matches
+.BR \-\-extreme .
+.IP
+The default
+.I preset
+is
+.BR 6 ,
+from which the default values for the rest of the LZMA1 or LZMA2
+.I options
+are taken.
+.TP
+.BI dict= size
+Dictionary (history buffer) size indicates how many bytes of the recently
+processed uncompressed data is kept in memory. One method to reduce size of
+the uncompressed data is to store distance-length pairs, which
+indicate what data to repeat from the dictionary buffer. The bigger
+the dictionary, the better the compression ratio usually is,
+but dictionaries bigger than the uncompressed data are waste of RAM.
+.IP
+Typical dictionary size is from 64 KiB to 64 MiB. The minimum is 4 KiB.
+The maximum for compression is currently 1.5 GiB. The decompressor already
+supports dictionaries up to one byte less than 4 GiB, which is the
+maximum for LZMA1 and LZMA2 stream formats.
+.IP
+Dictionary size has the biggest effect on compression ratio.
+Dictionary size and match finder together determine the memory usage of
+the LZMA1 or LZMA2 encoder. The same dictionary size is required
+for decompressing that was used when compressing, thus the memory usage of
+the decoder is determined by the dictionary size used when compressing.
+.TP
+.BI lc= lc
+Specify the number of literal context bits. The minimum is
+.B 0
+and the maximum is
+.BR 4 ;
+the default is
+.BR 3 .
+In addition, the sum of
+.I lc
+and
+.I lp
+must not exceed
+.BR 4 .
+.TP
+.BI lp= lp
+Specify the number of literal position bits. The minimum is
+.B 0
+and the maximum is
+.BR 4 ;
+the default is
+.BR 0 .
+.TP
+.BI pb= pb
+Specify the number of position bits. The minimum is
+.B 0
+and the maximum is
+.BR 4 ;
+the default is
+.BR 2 .
+.TP
+.BI mode= mode
+Compression
+.I mode
+specifies the function used to analyze the data produced by the match finder.
+Supported
+.I modes
+are
+.B fast
+and
+.BR normal .
+The default is
+.B fast
+for
+.I presets
+.BR 0 \- 2
+and
+.B normal
+for
+.I presets
+.BR 3 \- 9 .
+.TP
+.BI mf= mf
+Match finder has a major effect on encoder speed, memory usage, and
+compression ratio. Usually Hash Chain match finders are faster than
+Binary Tree match finders. Hash Chains are usually used together with
+.B mode=fast
+and Binary Trees with
+.BR mode=normal .
+The memory usage formulas are only rough estimates,
+which are closest to reality when
+.I dict
+is a power of two.
+.RS
+.TP
+.B hc3
+Hash Chain with 2- and 3-byte hashing
+.br
+Minimum value for
+.IR nice :
+3
+.br
+Memory usage:
+.I dict
+* 7.5 (if
+.I dict
+<= 16 MiB);
+.br
+.I dict
+* 5.5 + 64 MiB (if
+.I dict
+> 16 MiB)
+.TP
+.B hc4
+Hash Chain with 2-, 3-, and 4-byte hashing
+.br
+Minimum value for
+.IR nice :
+4
+.br
+Memory usage:
+.I dict
+* 7.5
+.TP
+.B bt2
+Binary Tree with 2-byte hashing
+.br
+Minimum value for
+.IR nice :
+2
+.br
+Memory usage:
+.I dict
+* 9.5
+.TP
+.B bt3
+Binary Tree with 2- and 3-byte hashing
+.br
+Minimum value for
+.IR nice :
+3
+.br
+Memory usage:
+.I dict
+* 11.5 (if
+.I dict
+<= 16 MiB);
+.br
+.I dict
+* 9.5 + 64 MiB (if
+.I dict
+> 16 MiB)
+.TP
+.B bt4
+Binary Tree with 2-, 3-, and 4-byte hashing
+.br
+Minimum value for
+.IR nice :
+4
+.br
+Memory usage:
+.I dict
+* 11.5
+.RE
+.TP
+.BI nice= nice
+Specify what is considered to be a nice length for a match. Once a match
+of at least
+.I nice
+bytes is found, the algorithm stops looking for possibly better matches.
+.IP
+.I nice
+can be 2\-273 bytes. Higher values tend to give better compression ratio
+at expense of speed. The default depends on the
+.I preset
+level.
+.TP
+.BI depth= depth
+Specify the maximum search depth in the match finder. The default is the
+special value
+.BR 0 ,
+which makes the compressor determine a reasonable
+.I depth
+from
+.I mf
+and
+.IR nice .
+.IP
+Using very high values for
+.I depth
+can make the encoder extremely slow with carefully crafted files.
+Avoid setting the
+.I depth
+over 1000 unless you are prepared to interrupt the compression in case it
+is taking too long.
+.RE
+.IP
+When decoding raw streams
+.RB ( \-\-format=raw ),
+LZMA2 needs only the value of
+.BR dict .
+LZMA1 needs also
+.BR lc ,
+.BR lp ,
+and
+.BR pb.
+.TP
+\fB\-\-x86\fR[\fB=\fIoptions\fR]
+.TP
+\fB\-\-powerpc\fR[\fB=\fIoptions\fR]
+.TP
+\fB\-\-ia64\fR[\fB=\fIoptions\fR]
+.TP
+\fB\-\-arm\fR[\fB=\fIoptions\fR]
+.TP
+\fB\-\-armthumb\fR[\fB=\fIoptions\fR]
+.TP
+\fB\-\-sparc\fR[\fB=\fIoptions\fR]
+Add a branch/call/jump (BCJ) filter to the filter chain. These filters
+can be used only as non-last filter in the filter chain.
+.IP
+A BCJ filter converts relative addresses in the machine code to their
+absolute counterparts. This doesn't change the size of the data, but
+it increases redundancy, which allows e.g. LZMA2 to get better
+compression ratio.
+.IP
+The BCJ filters are always reversible, so using a BCJ filter for wrong
+type of data doesn't cause any data loss. However, applying a BCJ filter
+for wrong type of data is a bad idea, because it tends to make the
+compression ratio worse.
+.IP
+Different instruction sets have have different alignment:
+.RS
+.RS
+.TS
+tab(;);
+l n l
+l n l.
+Filter;Alignment;Notes
+x86;1;32-bit and 64-bit x86
+PowerPC;4;Big endian only
+ARM;4;Little endian only
+ARM-Thumb;2;Little endian only
+IA-64;16;Big or little endian
+SPARC;4;Big or little endian
+.TE
+.RE
+.RE
+.IP
+Since the BCJ-filtered data is usually compressed with LZMA2, the compression
+ratio may be improved slightly if the LZMA2 options are set to match the
+alignment of the selected BCJ filter. For example, with the IA-64 filter,
+it's good to set
+.B pb=4
+with LZMA2 (2^4=16). The x86 filter is an exception; it's usually good to
+stick to LZMA2's default four-byte alignment when compressing x86 executables.
+.IP
+All BCJ filters support the same
+.IR options :
+.RS
+.TP
+.BI start= offset
+Specify the start
+.I offset
+that is used when converting between relative and absolute addresses.
+The
+.I offset
+must be a multiple of the alignment of the filter (see the table above).
+The default is zero. In practice, the default is good; specifying
+a custom
+.I offset
+is almost never useful.
+.IP
+Specifying a non-zero start
+.I offset
+is probably useful only if the executable has multiple sections, and there
+are many cross-section jumps or calls. Applying a BCJ filter separately for
+each section with proper start offset and then compressing the result as
+a single chunk may give some improvement in compression ratio compared
+to applying the BCJ filter with the default
+.I offset
+for the whole executable.
+.RE
+.TP
+\fB\-\-delta\fR[\fB=\fIoptions\fR]
+Add Delta filter to the filter chain. The Delta filter
+can be used only as non-last filter in the filter chain.
+.IP
+Currently only simple byte-wise delta calculation is supported. It can
+be useful when compressing e.g. uncompressed bitmap images or uncompressed
+PCM audio. However, special purpose algorithms may give significantly better
+results than Delta + LZMA2. This is true especially with audio, which
+compresses faster and better e.g. with FLAC.
+.IP
+Supported
+.IR options :
+.RS
+.TP
+.BI dist= distance
+Specify the
+.I distance
+of the delta calculation as bytes.
+.I distance
+must be 1\-256. The default is 1.
+.IP
+For example, with
+.B dist=2
+and eight-byte input A1 B1 A2 B3 A3 B5 A4 B7, the output will be
+A1 B1 01 02 01 02 01 02.
+.RE
+.SS "Other options"
+.TP
+.BR \-q ", " \-\-quiet
+Suppress warnings and notices. Specify this twice to suppress errors too.
+This option has no effect on the exit status. That is, even if a warning
+was suppressed, the exit status to indicate a warning is still used.
+.TP
+.BR \-v ", " \-\-verbose
+Be verbose. If standard error is connected to a terminal,
+.B xz
+will display a progress indicator.
+Specifying
+.B \-\-verbose
+twice will give even more verbose output (useful mostly for debugging).
+.TP
+.BR \-Q ", " \-\-no\-warn
+Don't set the exit status to
+.B 2
+even if a condition worth a warning was detected. This option doesn't affect
+the verbosity level, thus both
+.B \-\-quiet
+and
+.B \-\-no\-warn
+have to be used to not display warnings and to not alter the exit status.
+.TP
+.BR \-h ", " \-\-help
+Display a help message describing the most commonly used options,
+and exit successfully.
+.TP
+.BR \-H ", " \-\-long\-help
+Display a help message describing all features of
+.BR xz ,
+and exit successfully
+.TP
+.BR \-V ", " \-\-version
+Display the version number of
+.B xz
+and liblzma.
+.SH "EXIT STATUS"
+.TP
+.B 0
+All is good.
+.TP
+.B 1
+An error occurred.
+.TP
+.B 2
+Something worth a warning occurred, but no actual errors occurred.
+.PP
+Notices (not warnings or errors) printed on standard error don't affect
+the exit status.
+.SH ENVIRONMENT
+.TP
+.B XZ_OPT
+A space-separated list of options is parsed from
+.B XZ_OPT
+before parsing the options given on the command line. Note that only
+options are parsed from
+.BR XZ_OPT ;
+all non-options are silently ignored. Parsing is done with
+.BR getopt_long (3)
+which is used also for the command line arguments.
+.SH "LZMA UTILS COMPATIBILITY"
+The command line syntax of
+.B xz
+is practically a superset of
+.BR lzma ,
+.BR unlzma ,
+and
+.BR lzcat
+as found from LZMA Utils 4.32.x. In most cases, it is possible to replace
+LZMA Utils with XZ Utils without breaking existing scripts. There are some
+incompatibilities though, which may sometimes cause problems.
+.SS "Compression preset levels"
+The numbering of the compression level presets is not identical in
+.B xz
+and LZMA Utils.
+The most important difference is how dictionary sizes are mapped to different
+presets. Dictionary size is roughly equal to the decompressor memory usage.
+.RS
+.TS
+tab(;);
+c c c
+c n n.
+Level;xz;LZMA Utils
+\-1;64 KiB;64 KiB
+\-2;512 KiB;1 MiB
+\-3;1 MiB;512 KiB
+\-4;2 MiB;1 MiB
+\-5;4 MiB;2 MiB
+\-6;8 MiB;4 MiB
+\-7;16 MiB;8 MiB
+\-8;32 MiB;16 MiB
+\-9;64 MiB;32 MiB
+.TE
+.RE
+.PP
+The dictionary size differences affect the compressor memory usage too,
+but there are some other differences between LZMA Utils and XZ Utils, which
+make the difference even bigger:
+.RS
+.TS
+tab(;);
+c c c
+c n n.
+Level;xz;LZMA Utils 4.32.x
+\-1;2 MiB;2 MiB
+\-2;5 MiB;12 MiB
+\-3;13 MiB;12 MiB
+\-4;25 MiB;16 MiB
+\-5;48 MiB;26 MiB
+\-6;94 MiB;45 MiB
+\-7;186 MiB;83 MiB
+\-8;370 MiB;159 MiB
+\-9;674 MiB;311 MiB
+.TE
+.RE
+.PP
+The default preset level in LZMA Utils is
+.B \-7
+while in XZ Utils it is
+.BR \-6 ,
+so both use 8 MiB dictionary by default.
+.SS "Streamed vs. non-streamed .lzma files"
+Uncompressed size of the file can be stored in the
+.B .lzma
+header. LZMA Utils does that when compressing regular files.
+The alternative is to mark that uncompressed size is unknown and
+use end of payload marker to indicate where the decompressor should stop.
+LZMA Utils uses this method when uncompressed size isn't known, which is
+the case for example in pipes.
+.PP
+.B xz
+supports decompressing
+.B .lzma
+files with or without end of payload marker, but all
+.B .lzma
+files created by
+.B xz
+will use end of payload marker and have uncompressed size marked as unknown
+in the
+.B .lzma
+header. This may be a problem in some (uncommon) situations. For example, a
+.B .lzma
+decompressor in an embedded device might work only with files that have known
+uncompressed size. If you hit this problem, you need to use LZMA Utils or
+LZMA SDK to create
+.B .lzma
+files with known uncompressed size.
+.SS "Unsupported .lzma files"
+The
+.B .lzma
+format allows
+.I lc
+values up to 8, and
+.I lp
+values up to 4. LZMA Utils can decompress files with any
+.I lc
+and
+.IR lp ,
+but always creates files with
+.B lc=3
+and
+.BR lp=0 .
+Creating files with other
+.I lc
+and
+.I lp
+is possible with
+.B xz
+and with LZMA SDK.
+.PP
+The implementation of the LZMA1 filter in liblzma requires
+that the sum of
+.I lc
+and
+.I lp
+must not exceed 4. Thus,
+.B .lzma
+files which exceed this limitation, cannot be decompressed with
+.BR xz .
+.PP
+LZMA Utils creates only
+.B .lzma
+files which have dictionary size of
+.RI "2^" n
+(a power of 2), but accepts files with any dictionary size.
+liblzma accepts only
+.B .lzma
+files which have dictionary size of
+.RI "2^" n
+or
+.RI "2^" n " + 2^(" n "\-1)."
+This is to decrease false positives when autodetecting
+.B .lzma
+files.
+.PP
+These limitations shouldn't be a problem in practice, since practically all
+.B .lzma
+files have been compressed with settings that liblzma will accept.
+.SS "Trailing garbage"
+When decompressing, LZMA Utils silently ignore everything after the first
+.B .lzma
+stream. In most situations, this is a bug. This also means that LZMA Utils
+don't support decompressing concatenated
+.B .lzma
+files.
+.PP
+If there is data left after the first
+.B .lzma
+stream,
+.B xz
+considers the file to be corrupt. This may break obscure scripts which have
+assumed that trailing garbage is ignored.
+.SH NOTES
+.SS Compressed output may vary
+The exact compressed output produced from the same uncompressed input file
+may vary between XZ Utils versions even if compression options are identical.
+This is because the encoder can be improved (faster or better compression)
+without affecting the file format. The output can vary even between different
+builds of the same XZ Utils version, if different build options are used or
+if the endianness of the hardware is different for different builds.
+.PP
+The above means that implementing
+.B \-\-rsyncable
+to create rsyncable
+.B .xz
+files is not going to happen without freezing a part of the encoder
+implementation, which can then be used with
+.BR \-\-rsyncable .
+.SS Embedded .xz decompressors
+Embedded
+.B .xz
+decompressor implementations like XZ Embedded don't necessarily support files
+created with
+.I check
+types other than
+.B none
+and
+.BR crc32 .
+Since the default is \fB\-\-check=\fIcrc64\fR, you must use
+.B \-\-check=none
+or
+.B \-\-check=crc32
+when creating files for embedded systems.
+.PP
+Outside embedded systems, all
+.B .xz
+format decompressors support all the
+.I check
+types, or at least are able to decompress the file without verifying the
+integrity check if the particular
+.I check
+is not supported.
+.PP
+XZ Embedded supports BCJ filters, but only with the default start offset.
+.SH "SEE ALSO"
+.BR xzdec (1),
+.BR gzip (1),
+.BR bzip2 (1)
+.PP
+XZ Utils: <http://tukaani.org/xz/>
+.br
+XZ Embedded: <http://tukaani.org/xz/embedded.html>
+.br
+LZMA SDK: <http://7-zip.org/sdk.html>
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz_w32res.rc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz_w32res.rc
new file mode 100644
index 00000000..bad30202
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xz/xz_w32res.rc
@@ -0,0 +1,12 @@
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#define MY_TYPE VFT_APP
+#define MY_NAME "xz"
+#define MY_SUFFIX ".exe"
+#define MY_DESC "xz data compression tool for .xz and .lzma files"
+#include "common_w32res.rc"
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.am
new file mode 100644
index 00000000..aaa5b5f6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.am
@@ -0,0 +1,61 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+# Windows resource compiler support. It's fine to use xz_CPPFLAGS
+# also for lzmadec.
+.rc.o:
+ $(RC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(xzdec_CPPFLAGS) $(CPPFLAGS) $(RCFLAGS) -i $< -o $@
+
+
+bin_PROGRAMS = xzdec lzmadec
+
+xzdec_SOURCES = xzdec.c
+
+if COND_W32
+xzdec_SOURCES += xzdec_w32res.rc
+endif
+
+xzdec_CPPFLAGS = \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(STATIC_CPPFLAGS)
+xzdec_LDFLAGS = $(STATIC_LDFLAGS)
+xzdec_LDADD = $(top_builddir)/src/liblzma/liblzma.la
+
+if COND_GNULIB
+xzdec_LDADD += $(top_builddir)/lib/libgnu.a
+endif
+
+xzdec_LDADD += $(LTLIBINTL)
+
+
+lzmadec_SOURCES = xzdec.c
+
+if COND_W32
+lzmadec_SOURCES += lzmadec_w32res.rc
+endif
+
+lzmadec_CPPFLAGS = $(xzdec_CPPFLAGS) -DLZMADEC
+lzmadec_LDFLAGS = $(xzdec_LDFLAGS)
+lzmadec_LDADD = $(xzdec_LDADD)
+
+
+dist_man_MANS = xzdec.1
+
+install-data-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ target=`echo xzdec | sed '$(transform)'` && \
+ link=`echo lzmadec | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1
+
+uninstall-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ link=`echo lzmadec | sed '$(transform)'` && \
+ rm -f $$link.1
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.in
new file mode 100644
index 00000000..4fda1f5c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/Makefile.in
@@ -0,0 +1,714 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+bin_PROGRAMS = xzdec$(EXEEXT) lzmadec$(EXEEXT)
+@COND_W32_TRUE@am__append_1 = xzdec_w32res.rc
+@COND_GNULIB_TRUE@am__append_2 = $(top_builddir)/lib/libgnu.a
+@COND_W32_TRUE@am__append_3 = lzmadec_w32res.rc
+subdir = src/xzdec
+DIST_COMMON = $(dist_man_MANS) $(srcdir)/Makefile.am \
+ $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"
+PROGRAMS = $(bin_PROGRAMS)
+am__lzmadec_SOURCES_DIST = xzdec.c lzmadec_w32res.rc
+@COND_W32_TRUE@am__objects_1 = lzmadec_w32res.$(OBJEXT)
+am_lzmadec_OBJECTS = lzmadec-xzdec.$(OBJEXT) $(am__objects_1)
+lzmadec_OBJECTS = $(am_lzmadec_OBJECTS)
+am__DEPENDENCIES_1 =
+am__DEPENDENCIES_2 = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_2) $(am__DEPENDENCIES_1)
+lzmadec_DEPENDENCIES = $(am__DEPENDENCIES_2)
+lzmadec_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(lzmadec_LDFLAGS) \
+ $(LDFLAGS) -o $@
+am__xzdec_SOURCES_DIST = xzdec.c xzdec_w32res.rc
+@COND_W32_TRUE@am__objects_2 = xzdec_w32res.$(OBJEXT)
+am_xzdec_OBJECTS = xzdec-xzdec.$(OBJEXT) $(am__objects_2)
+xzdec_OBJECTS = $(am_xzdec_OBJECTS)
+xzdec_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_2) $(am__DEPENDENCIES_1)
+xzdec_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(xzdec_LDFLAGS) \
+ $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(lzmadec_SOURCES) $(xzdec_SOURCES)
+DIST_SOURCES = $(am__lzmadec_SOURCES_DIST) $(am__xzdec_SOURCES_DIST)
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+man1dir = $(mandir)/man1
+NROFF = nroff
+MANS = $(dist_man_MANS)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+xzdec_SOURCES = xzdec.c $(am__append_1)
+xzdec_CPPFLAGS = \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(STATIC_CPPFLAGS)
+
+xzdec_LDFLAGS = $(STATIC_LDFLAGS)
+xzdec_LDADD = $(top_builddir)/src/liblzma/liblzma.la $(am__append_2) \
+ $(LTLIBINTL)
+lzmadec_SOURCES = xzdec.c $(am__append_3)
+lzmadec_CPPFLAGS = $(xzdec_CPPFLAGS) -DLZMADEC
+lzmadec_LDFLAGS = $(xzdec_LDFLAGS)
+lzmadec_LDADD = $(xzdec_LDADD)
+dist_man_MANS = xzdec.1
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj .rc
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/xzdec/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign src/xzdec/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed 's/$(EXEEXT)$$//' | \
+ while read p p1; do if test -f $$p || test -f $$p1; \
+ then echo "$$p"; echo "$$p"; else :; fi; \
+ done | \
+ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+ sed 'N;N;N;s,\n, ,g' | \
+ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+ if ($$2 == $$4) files[d] = files[d] " " $$1; \
+ else { print "f", $$3 "/" $$4, $$1; } } \
+ END { for (d in files) print "f", d, files[d] }' | \
+ while read type dir files; do \
+ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+ test -z "$$files" || { \
+ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+ } \
+ ; done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+ files=`for p in $$list; do echo "$$p"; done | \
+ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+ -e 's/$$/$(EXEEXT)/' `; \
+ test -n "$$list" || exit 0; \
+ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+ @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+lzmadec$(EXEEXT): $(lzmadec_OBJECTS) $(lzmadec_DEPENDENCIES)
+ @rm -f lzmadec$(EXEEXT)
+ $(lzmadec_LINK) $(lzmadec_OBJECTS) $(lzmadec_LDADD) $(LIBS)
+xzdec$(EXEEXT): $(xzdec_OBJECTS) $(xzdec_DEPENDENCIES)
+ @rm -f xzdec$(EXEEXT)
+ $(xzdec_LINK) $(xzdec_OBJECTS) $(xzdec_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lzmadec-xzdec.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xzdec-xzdec.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+lzmadec-xzdec.o: xzdec.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmadec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lzmadec-xzdec.o -MD -MP -MF $(DEPDIR)/lzmadec-xzdec.Tpo -c -o lzmadec-xzdec.o `test -f 'xzdec.c' || echo '$(srcdir)/'`xzdec.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/lzmadec-xzdec.Tpo $(DEPDIR)/lzmadec-xzdec.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='xzdec.c' object='lzmadec-xzdec.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmadec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lzmadec-xzdec.o `test -f 'xzdec.c' || echo '$(srcdir)/'`xzdec.c
+
+lzmadec-xzdec.obj: xzdec.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmadec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT lzmadec-xzdec.obj -MD -MP -MF $(DEPDIR)/lzmadec-xzdec.Tpo -c -o lzmadec-xzdec.obj `if test -f 'xzdec.c'; then $(CYGPATH_W) 'xzdec.c'; else $(CYGPATH_W) '$(srcdir)/xzdec.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/lzmadec-xzdec.Tpo $(DEPDIR)/lzmadec-xzdec.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='xzdec.c' object='lzmadec-xzdec.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(lzmadec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lzmadec-xzdec.obj `if test -f 'xzdec.c'; then $(CYGPATH_W) 'xzdec.c'; else $(CYGPATH_W) '$(srcdir)/xzdec.c'; fi`
+
+xzdec-xzdec.o: xzdec.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xzdec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xzdec-xzdec.o -MD -MP -MF $(DEPDIR)/xzdec-xzdec.Tpo -c -o xzdec-xzdec.o `test -f 'xzdec.c' || echo '$(srcdir)/'`xzdec.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xzdec-xzdec.Tpo $(DEPDIR)/xzdec-xzdec.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='xzdec.c' object='xzdec-xzdec.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xzdec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xzdec-xzdec.o `test -f 'xzdec.c' || echo '$(srcdir)/'`xzdec.c
+
+xzdec-xzdec.obj: xzdec.c
+@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xzdec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT xzdec-xzdec.obj -MD -MP -MF $(DEPDIR)/xzdec-xzdec.Tpo -c -o xzdec-xzdec.obj `if test -f 'xzdec.c'; then $(CYGPATH_W) 'xzdec.c'; else $(CYGPATH_W) '$(srcdir)/xzdec.c'; fi`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/xzdec-xzdec.Tpo $(DEPDIR)/xzdec-xzdec.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='xzdec.c' object='xzdec-xzdec.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(xzdec_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o xzdec-xzdec.obj `if test -f 'xzdec.c'; then $(CYGPATH_W) 'xzdec.c'; else $(CYGPATH_W) '$(srcdir)/xzdec.c'; fi`
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+install-man1: $(dist_man_MANS)
+ @$(NORMAL_INSTALL)
+ test -z "$(man1dir)" || $(MKDIR_P) "$(DESTDIR)$(man1dir)"
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ { for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | while read p; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; echo "$$p"; \
+ done | \
+ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+ sed 'N;N;s,\n, ,g' | { \
+ list=; while read file base inst; do \
+ if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
+ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \
+ fi; \
+ done; \
+ for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+ while read files; do \
+ test -z "$$files" || { \
+ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \
+ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \
+ done; }
+
+uninstall-man1:
+ @$(NORMAL_UNINSTALL)
+ @list=''; test -n "$(man1dir)" || exit 0; \
+ files=`{ for i in $$list; do echo "$$i"; done; \
+ l2='$(dist_man_MANS)'; for i in $$l2; do echo "$$i"; done | \
+ sed -n '/\.1[a-z]*$$/p'; \
+ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
+ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+ test -z "$$files" || { \
+ echo " ( cd '$(DESTDIR)$(man1dir)' && rm -f" $$files ")"; \
+ cd "$(DESTDIR)$(man1dir)" && rm -f $$files; }
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @list='$(MANS)'; if test -n "$$list"; then \
+ list=`for p in $$list; do \
+ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
+ if test -n "$$list" && \
+ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
+ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
+ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \
+ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \
+ echo " typically \`make maintainer-clean' will remove them" >&2; \
+ exit 1; \
+ else :; fi; \
+ else :; fi
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS) $(MANS)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-man
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-data-hook
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man: install-man1
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS uninstall-man
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) uninstall-hook
+uninstall-man: uninstall-man1
+
+.MAKE: install-am install-data-am install-strip uninstall-am
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \
+ clean-generic clean-libtool ctags distclean distclean-compile \
+ distclean-generic distclean-libtool distclean-tags distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-binPROGRAMS install-data install-data-am \
+ install-data-hook install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-man1 install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am uninstall-binPROGRAMS \
+ uninstall-hook uninstall-man uninstall-man1
+
+
+# Windows resource compiler support. It's fine to use xz_CPPFLAGS
+# also for lzmadec.
+.rc.o:
+ $(RC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(xzdec_CPPFLAGS) $(CPPFLAGS) $(RCFLAGS) -i $< -o $@
+
+install-data-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ target=`echo xzdec | sed '$(transform)'` && \
+ link=`echo lzmadec | sed '$(transform)'` && \
+ rm -f $$link.1 && \
+ $(LN_S) $$target.1 $$link.1
+
+uninstall-hook:
+ cd $(DESTDIR)$(mandir)/man1 && \
+ link=`echo lzmadec | sed '$(transform)'` && \
+ rm -f $$link.1
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/lzmadec_w32res.rc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/lzmadec_w32res.rc
new file mode 100644
index 00000000..7d90e420
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/lzmadec_w32res.rc
@@ -0,0 +1,5 @@
+#define MY_TYPE VFT_APP
+#define MY_NAME "lzmadec"
+#define MY_SUFFIX ".exe"
+#define MY_DESC "lzmadec uncompression tool for .lzma files"
+#include "common_w32res.rc"
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.1 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.1
new file mode 100644
index 00000000..442a19ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.1
@@ -0,0 +1,168 @@
+.\"
+.\" Author: Lasse Collin
+.\"
+.\" This file has been put into the public domain.
+.\" You can do whatever you want with this file.
+.\"
+.TH XZDEC 1 "2009-06-04" "Tukaani" "XZ Utils"
+.SH NAME
+xzdec, lzmadec \- Small .xz and .lzma decompressors
+.SH SYNOPSIS
+.B xzdec
+.RI [ option ]...
+.RI [ file ]...
+.br
+.B lzmadec
+.RI [ option ]...
+.RI [ file ]...
+.SH DESCRIPTION
+.B xzdec
+is a liblzma-based decompression-only tool for
+.B .xz
+(and only
+.BR .xz )
+files.
+.B xzdec
+is intended to work as a drop-in replacement for
+.BR xz (1)
+in the most common situations where a script has been written to use
+.B "xz \-\-decompress \-\-stdout"
+(and possibly a few other commonly used options) to decompress
+.B .xz
+files.
+.B lzmadec
+is identical to
+.B xzdec
+except that
+.B lzmadec
+supports
+.B .lzma
+files instead of
+.B .xz
+files.
+.PP
+To reduce the size of the executable,
+.B xzdec
+doesn't support multithreading or localization, and doesn't read options from
+.B XZ_OPT
+environment variable.
+.B xzdec
+doesn't support displaying intermediate progress information: sending
+.B SIGINFO
+to
+.B xzdec
+does nothing, but sending
+.B SIGUSR1
+terminates the process instead of displaying progress information.
+.SH OPTIONS
+.TP
+.BR \-d ", " \-\-decompress ", " \-\-uncompress
+Ignored for
+.BR xz (1)
+compatibility.
+.B xzdec
+supports only decompression.
+.TP
+.BR \-k ", " \-\-keep
+Ignored for
+.BR xz (1)
+compatibility.
+.B xzdec
+never creates or removes any files.
+.TP
+.BR \-c ", " \-\-stdout ", " \-\-to-stdout
+Ignored for
+.BR xz (1)
+compatibility.
+.B xzdec
+always writes the decompressed data to standard output.
+.TP
+\fB\-M\fR \fIlimit\fR, \fB\-\-memory=\fIlimit
+Set the memory usage
+.IR limit .
+If this option is specified multiple times, the last one takes effect. The
+.I limit
+can be specified in multiple ways:
+.RS
+.IP \(bu 3
+The
+.I limit
+can be an absolute value in bytes. Using an integer suffix like
+.B MiB
+can be useful. Example:
+.B "\-\-memory=80MiB"
+.IP \(bu 3
+The
+.I limit
+can be specified as a percentage of physical RAM. Example:
+.B "\-\-memory=70%"
+.IP \(bu 3
+The
+.I limit
+can be reset back to its default value (currently 40 % of physical RAM)
+by setting it to
+.BR 0 .
+.IP \(bu 3
+The memory usage limiting can be effectively disabled by setting
+.I limit
+to
+.BR max .
+This isn't recommended. It's usually better to use, for example,
+.BR \-\-memory=90% .
+.RE
+.IP
+The current
+.I limit
+can be seen near the bottom of the output of the
+.B \-\-help
+option.
+.TP
+.BR \-q ", " \-\-quiet
+Specifying this once does nothing since
+.B xzdec
+never displays any warnings or notices.
+Specify this twice to suppress errors.
+.TP
+.BR \-Q ", " \-\-no-warn
+Ignored for
+.BR xz (1)
+compatibility.
+.B xzdec
+never uses the exit status
+.BR "2" .
+.TP
+.BR \-h ", " \-\-help
+Display a help message and exit successfully.
+.TP
+.BR \-V ", " \-\-version
+Display the version number of
+.B xzdec
+and liblzma.
+.SH "EXIT STATUS"
+.TP
+.B 0
+All was good.
+.TP
+.B 1
+An error occurred.
+.PP
+.B xzdec
+doesn't have any warning messages like
+.BR xz (1)
+has, thus the exit status
+.B 2
+is not used by
+.BR xzdec .
+.SH NOTES
+.B xzdec
+and
+.B lzmadec
+are not really that small. The size can be reduced further by dropping
+features from liblzma at compile time, but that shouldn't usually be done
+for executables distributed in typical non-embedded operating system
+distributions. If you need a truly small
+.B .xz
+decompressor, consider using XZ Embedded.
+.\" TODO: Provide URL to XZ Embedded.
+.SH "SEE ALSO"
+.BR xz (1)
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.c
new file mode 100644
index 00000000..78f70984
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec.c
@@ -0,0 +1,498 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file xzdec.c
+/// \brief Simple single-threaded tool to uncompress .xz or .lzma files
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include "lzma.h"
+
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#ifdef DOSLIKE
+# include <fcntl.h>
+# include <io.h>
+#endif
+
+#include "getopt.h"
+#include "physmem.h"
+
+
+#ifdef LZMADEC
+# define TOOL_FORMAT "lzma"
+#else
+# define TOOL_FORMAT "xz"
+#endif
+
+
+/// Number of bytes to use memory at maximum
+static uint64_t memlimit;
+
+/// Error messages are suppressed if this is zero, which is the case when
+/// --quiet has been given at least twice.
+static unsigned int display_errors = 2;
+
+/// Program name to be shown in error messages
+static const char *argv0;
+
+
+static void lzma_attribute((format(printf, 1, 2)))
+my_errorf(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+
+ if (display_errors) {
+ fprintf(stderr, "%s: ", argv0);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ }
+
+ va_end(ap);
+ return;
+}
+
+
+static void lzma_attribute((noreturn))
+my_exit(void)
+{
+ int status = EXIT_SUCCESS;
+
+ // Close stdout. We don't care about stderr, because we write to it
+ // only when an error has already occurred.
+ const int ferror_err = ferror(stdout);
+ const int fclose_err = fclose(stdout);
+
+ if (ferror_err || fclose_err) {
+ // If it was fclose() that failed, we have the reason
+ // in errno. If only ferror() indicated an error,
+ // we have no idea what the reason was.
+ my_errorf("Writing to standard output failed: %s", fclose_err
+ ? strerror(errno) : "Unknown error");
+ status = EXIT_FAILURE;
+ }
+
+ exit(status);
+}
+
+
+static void lzma_attribute((noreturn))
+help(void)
+{
+ printf(
+"Usage: %s [OPTION]... [FILE]...\n"
+"Uncompress files in the ." TOOL_FORMAT " format to the standard output.\n"
+"\n"
+" -c, --stdout (ignored)\n"
+" -d, --decompress (ignored)\n"
+" -k, --keep (ignored)\n"
+" -M, --memory=NUM use NUM bytes of memory at maximum (0 means default)\n"
+" -q, --quiet specify *twice* to suppress errors\n"
+" -Q, --no-warn (ignored)\n"
+" -h, --help display this help and exit\n"
+" -V, --version display the version number and exit\n"
+"\n"
+"With no FILE, or when FILE is -, read standard input.\n"
+"\n"
+"On this system and configuration, this program will use at maximum of roughly\n"
+"%" PRIu64 " MiB RAM.\n"
+"\n"
+"Report bugs to <" PACKAGE_BUGREPORT "> (in English or Finnish).\n"
+PACKAGE_NAME " home page: <" PACKAGE_HOMEPAGE ">\n",
+ argv0, memlimit / (1024 * 1024));
+ my_exit();
+}
+
+
+static void lzma_attribute((noreturn))
+version(void)
+{
+ printf(TOOL_FORMAT "dec (" PACKAGE_NAME ") " LZMA_VERSION_STRING "\n"
+ "liblzma %s\n", lzma_version_string());
+
+ my_exit();
+}
+
+
+/// Find out the amount of physical memory (RAM) in the system, and set
+/// the memory usage limit to the given percentage of RAM.
+static void
+memlimit_set_percentage(uint32_t percentage)
+{
+ uint64_t mem = physmem();
+
+ // If we cannot determine the amount of RAM, assume 32 MiB.
+ if (mem == 0)
+ mem = UINT64_C(32) * 1024 * 1024;
+
+ memlimit = percentage * mem / 100;
+ return;
+}
+
+
+/// Set the memory usage limit to give number of bytes. Zero is a special
+/// value to indicate the default limit.
+static void
+memlimit_set(uint64_t new_memlimit)
+{
+ if (new_memlimit == 0)
+ memlimit_set_percentage(40);
+ else
+ memlimit = new_memlimit;
+
+ return;
+}
+
+
+/// \brief Convert a string to uint64_t
+///
+/// This is rudely copied from src/xz/util.c and modified a little. :-(
+///
+/// \param max Return value when the string "max" was specified.
+///
+static uint64_t
+str_to_uint64(const char *value, uint64_t max)
+{
+ uint64_t result = 0;
+
+ // Accept special value "max".
+ if (strcmp(value, "max") == 0)
+ return max;
+
+ if (*value < '0' || *value > '9') {
+ my_errorf("%s: Value is not a non-negative decimal integer",
+ value);
+ exit(EXIT_FAILURE);
+ }
+
+ do {
+ // Don't overflow.
+ if (result > (UINT64_MAX - 9) / 10)
+ return UINT64_MAX;
+
+ result *= 10;
+ result += *value - '0';
+ ++value;
+ } while (*value >= '0' && *value <= '9');
+
+ if (*value != '\0') {
+ // Look for suffix.
+ static const struct {
+ const char name[4];
+ uint32_t multiplier;
+ } suffixes[] = {
+ { "k", 1000 },
+ { "kB", 1000 },
+ { "M", 1000000 },
+ { "MB", 1000000 },
+ { "G", 1000000000 },
+ { "GB", 1000000000 },
+ { "Ki", 1024 },
+ { "KiB", 1024 },
+ { "Mi", 1048576 },
+ { "MiB", 1048576 },
+ { "Gi", 1073741824 },
+ { "GiB", 1073741824 }
+ };
+
+ uint32_t multiplier = 0;
+ for (size_t i = 0; i < ARRAY_SIZE(suffixes); ++i) {
+ if (strcmp(value, suffixes[i].name) == 0) {
+ multiplier = suffixes[i].multiplier;
+ break;
+ }
+ }
+
+ if (multiplier == 0) {
+ my_errorf("%s: Invalid suffix", value);
+ exit(EXIT_FAILURE);
+ }
+
+ // Don't overflow here either.
+ if (result > UINT64_MAX / multiplier)
+ result = UINT64_MAX;
+ else
+ result *= multiplier;
+ }
+
+ return result;
+}
+
+
+/// Parses command line options.
+static void
+parse_options(int argc, char **argv)
+{
+ static const char short_opts[] = "cdkM:hqQV";
+ static const struct option long_opts[] = {
+ { "stdout", no_argument, NULL, 'c' },
+ { "to-stdout", no_argument, NULL, 'c' },
+ { "decompress", no_argument, NULL, 'd' },
+ { "uncompress", no_argument, NULL, 'd' },
+ { "keep", no_argument, NULL, 'k' },
+ { "memory", required_argument, NULL, 'M' },
+ { "quiet", no_argument, NULL, 'q' },
+ { "no-warn", no_argument, NULL, 'Q' },
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, 'V' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int c;
+
+ while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL))
+ != -1) {
+ switch (c) {
+ case 'c':
+ case 'd':
+ case 'k':
+ case 'Q':
+ break;
+
+ case 'M': {
+ // Support specifying the limit as a percentage of
+ // installed physical RAM.
+ const size_t len = strlen(optarg);
+ if (len > 0 && optarg[len - 1] == '%') {
+ // Memory limit is a percentage of total
+ // installed RAM.
+ optarg[len - 1] = '\0';
+ const uint64_t percentage
+ = str_to_uint64(optarg, 100);
+ if (percentage < 1 || percentage > 100) {
+ my_errorf("Percentage must be in "
+ "the range [1, 100]");
+ exit(EXIT_FAILURE);
+ }
+
+ memlimit_set_percentage(percentage);
+ } else {
+ memlimit_set(str_to_uint64(
+ optarg, UINT64_MAX));
+ }
+
+ break;
+ }
+
+ case 'q':
+ if (display_errors > 0)
+ --display_errors;
+
+ break;
+
+ case 'h':
+ help();
+
+ case 'V':
+ version();
+
+ default:
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return;
+}
+
+
+static void
+uncompress(lzma_stream *strm, FILE *file, const char *filename)
+{
+ lzma_ret ret;
+
+ // Initialize the decoder
+#ifdef LZMADEC
+ ret = lzma_alone_decoder(strm, memlimit);
+#else
+ ret = lzma_stream_decoder(strm, memlimit, LZMA_CONCATENATED);
+#endif
+
+ // The only reasonable error here is LZMA_MEM_ERROR.
+ // FIXME: Maybe also LZMA_MEMLIMIT_ERROR in future?
+ if (ret != LZMA_OK) {
+ my_errorf("%s", ret == LZMA_MEM_ERROR ? strerror(ENOMEM)
+ : "Internal error (bug)");
+ exit(EXIT_FAILURE);
+ }
+
+ // Input and output buffers
+ uint8_t in_buf[BUFSIZ];
+ uint8_t out_buf[BUFSIZ];
+
+ strm->avail_in = 0;
+ strm->next_out = out_buf;
+ strm->avail_out = BUFSIZ;
+
+ lzma_action action = LZMA_RUN;
+
+ while (true) {
+ if (strm->avail_in == 0) {
+ strm->next_in = in_buf;
+ strm->avail_in = fread(in_buf, 1, BUFSIZ, file);
+
+ if (ferror(file)) {
+ // POSIX says that fread() sets errno if
+ // an error occurred. ferror() doesn't
+ // touch errno.
+ my_errorf("%s: Error reading input file: %s",
+ filename, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+#ifndef LZMADEC
+ // When using LZMA_CONCATENATED, we need to tell
+ // liblzma when it has got all the input.
+ if (feof(file))
+ action = LZMA_FINISH;
+#endif
+ }
+
+ ret = lzma_code(strm, action);
+
+ // Write and check write error before checking decoder error.
+ // This way as much data as possible gets written to output
+ // even if decoder detected an error.
+ if (strm->avail_out == 0 || ret != LZMA_OK) {
+ const size_t write_size = BUFSIZ - strm->avail_out;
+
+ if (fwrite(out_buf, 1, write_size, stdout)
+ != write_size) {
+ // Wouldn't be a surprise if writing to stderr
+ // would fail too but at least try to show an
+ // error message.
+ my_errorf("Cannot write to standard output: "
+ "%s", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ strm->next_out = out_buf;
+ strm->avail_out = BUFSIZ;
+ }
+
+ if (ret != LZMA_OK) {
+ if (ret == LZMA_STREAM_END) {
+#ifdef LZMADEC
+ // Check that there's no trailing garbage.
+ if (strm->avail_in != 0
+ || fread(in_buf, 1, 1, file)
+ != 0
+ || !feof(file))
+ ret = LZMA_DATA_ERROR;
+ else
+ return;
+#else
+ // lzma_stream_decoder() already guarantees
+ // that there's no trailing garbage.
+ assert(strm->avail_in == 0);
+ assert(action == LZMA_FINISH);
+ assert(feof(file));
+ return;
+#endif
+ }
+
+ const char *msg;
+ switch (ret) {
+ case LZMA_MEM_ERROR:
+ msg = strerror(ENOMEM);
+ break;
+
+ case LZMA_MEMLIMIT_ERROR:
+ msg = "Memory usage limit reached";
+ break;
+
+ case LZMA_FORMAT_ERROR:
+ msg = "File format not recognized";
+ break;
+
+ case LZMA_OPTIONS_ERROR:
+ // FIXME: Better message?
+ msg = "Unsupported compression options";
+ break;
+
+ case LZMA_DATA_ERROR:
+ msg = "File is corrupt";
+ break;
+
+ case LZMA_BUF_ERROR:
+ msg = "Unexpected end of input";
+ break;
+
+ default:
+ msg = "Internal error (bug)";
+ break;
+ }
+
+ my_errorf("%s: %s", filename, msg);
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+
+int
+main(int argc, char **argv)
+{
+ // Set the argv0 global so that we can print the command name in
+ // error and help messages.
+ argv0 = argv[0];
+
+ // Set the default memory usage limit. This is needed before parsing
+ // the command line arguments.
+ memlimit_set(0);
+
+ // Parse the command line options.
+ parse_options(argc, argv);
+
+ // The same lzma_stream is used for all files that we decode. This way
+ // we don't need to reallocate memory for every file if they use same
+ // compression settings.
+ lzma_stream strm = LZMA_STREAM_INIT;
+
+ // Some systems require setting stdin and stdout to binary mode.
+#ifdef DOSLIKE
+ setmode(fileno(stdin), O_BINARY);
+ setmode(fileno(stdout), O_BINARY);
+#endif
+
+ if (optind == argc) {
+ // No filenames given, decode from stdin.
+ uncompress(&strm, stdin, "(stdin)");
+ } else {
+ // Loop through the filenames given on the command line.
+ do {
+ // "-" indicates stdin.
+ if (strcmp(argv[optind], "-") == 0) {
+ uncompress(&strm, stdin, "(stdin)");
+ } else {
+ FILE *file = fopen(argv[optind], "rb");
+ if (file == NULL) {
+ my_errorf("%s: %s", argv[optind],
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ uncompress(&strm, file, argv[optind]);
+ fclose(file);
+ }
+ } while (++optind < argc);
+ }
+
+#ifndef NDEBUG
+ // Free the memory only when debugging. Freeing wastes some time,
+ // but allows detecting possible memory leaks with Valgrind.
+ lzma_end(&strm);
+#endif
+
+ my_exit();
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec_w32res.rc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec_w32res.rc
new file mode 100644
index 00000000..626f26ce
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/src/xzdec/xzdec_w32res.rc
@@ -0,0 +1,12 @@
+/*
+ * Author: Lasse Collin
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#define MY_TYPE VFT_APP
+#define MY_NAME "xzdec"
+#define MY_SUFFIX ".exe"
+#define MY_DESC "xzdec uncompression tool for .xz files"
+#include "common_w32res.rc"
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.am b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.am
new file mode 100644
index 00000000..d01787a3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.am
@@ -0,0 +1,51 @@
+##
+## Author: Lasse Collin
+##
+## This file has been put into the public domain.
+## You can do whatever you want with this file.
+##
+
+EXTRA_DIST = \
+ files \
+ tests.h \
+ test_files.sh \
+ test_compress.sh \
+ bcj_test.c \
+ compress_prepared_bcj_sparc \
+ compress_prepared_bcj_x86
+
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(STATIC_CPPFLAGS)
+
+AM_LDFLAGS = $(STATIC_LDFLAGS)
+
+LDADD = $(top_builddir)/src/liblzma/liblzma.la
+
+if COND_GNULIB
+LDADD += $(top_builddir)/lib/libgnu.a
+endif
+
+LDADD += $(LTLIBINTL)
+
+check_PROGRAMS = \
+ create_compress_files \
+ test_check \
+ test_stream_flags \
+ test_filter_flags \
+ test_block_header \
+ test_index
+
+TESTS = \
+ test_check \
+ test_stream_flags \
+ test_filter_flags \
+ test_block_header \
+ test_index \
+ test_files.sh \
+ test_compress.sh
+
+clean-local:
+ -rm -f compress_generated_*
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.in b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.in
new file mode 100644
index 00000000..d4a018ec
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/Makefile.in
@@ -0,0 +1,687 @@
+# Makefile.in generated by automake 1.11 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+@COND_GNULIB_TRUE@am__append_1 = $(top_builddir)/lib/libgnu.a
+check_PROGRAMS = create_compress_files$(EXEEXT) test_check$(EXEEXT) \
+ test_stream_flags$(EXEEXT) test_filter_flags$(EXEEXT) \
+ test_block_header$(EXEEXT) test_index$(EXEEXT)
+TESTS = test_check$(EXEEXT) test_stream_flags$(EXEEXT) \
+ test_filter_flags$(EXEEXT) test_block_header$(EXEEXT) \
+ test_index$(EXEEXT) test_files.sh test_compress.sh
+subdir = tests
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \
+ $(top_srcdir)/m4/getopt.m4 $(top_srcdir)/m4/gettext.m4 \
+ $(top_srcdir)/m4/iconv.m4 $(top_srcdir)/m4/lc_cpucores.m4 \
+ $(top_srcdir)/m4/lc_physmem.m4 $(top_srcdir)/m4/lib-ld.m4 \
+ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/posix-shell.m4 \
+ $(top_srcdir)/m4/progtest.m4 $(top_srcdir)/m4/visibility.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+create_compress_files_SOURCES = create_compress_files.c
+create_compress_files_OBJECTS = create_compress_files.$(OBJEXT)
+create_compress_files_LDADD = $(LDADD)
+am__DEPENDENCIES_1 =
+create_compress_files_DEPENDENCIES = \
+ $(top_builddir)/src/liblzma/liblzma.la $(am__append_1) \
+ $(am__DEPENDENCIES_1)
+test_block_header_SOURCES = test_block_header.c
+test_block_header_OBJECTS = test_block_header.$(OBJEXT)
+test_block_header_LDADD = $(LDADD)
+test_block_header_DEPENDENCIES = \
+ $(top_builddir)/src/liblzma/liblzma.la $(am__append_1) \
+ $(am__DEPENDENCIES_1)
+test_check_SOURCES = test_check.c
+test_check_OBJECTS = test_check.$(OBJEXT)
+test_check_LDADD = $(LDADD)
+test_check_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+test_filter_flags_SOURCES = test_filter_flags.c
+test_filter_flags_OBJECTS = test_filter_flags.$(OBJEXT)
+test_filter_flags_LDADD = $(LDADD)
+test_filter_flags_DEPENDENCIES = \
+ $(top_builddir)/src/liblzma/liblzma.la $(am__append_1) \
+ $(am__DEPENDENCIES_1)
+test_index_SOURCES = test_index.c
+test_index_OBJECTS = test_index.$(OBJEXT)
+test_index_LDADD = $(LDADD)
+test_index_DEPENDENCIES = $(top_builddir)/src/liblzma/liblzma.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+test_stream_flags_SOURCES = test_stream_flags.c
+test_stream_flags_OBJECTS = test_stream_flags.$(OBJEXT)
+test_stream_flags_LDADD = $(LDADD)
+test_stream_flags_DEPENDENCIES = \
+ $(top_builddir)/src/liblzma/liblzma.la $(am__append_1) \
+ $(am__DEPENDENCIES_1)
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = create_compress_files.c test_block_header.c test_check.c \
+ test_filter_flags.c test_index.c test_stream_flags.c
+DIST_SOURCES = create_compress_files.c test_block_header.c \
+ test_check.c test_filter_flags.c test_index.c \
+ test_stream_flags.c
+ETAGS = etags
+CTAGS = ctags
+am__tty_colors = \
+red=; grn=; lgn=; blu=; std=
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_CFLAGS = @AM_CFLAGS@
+AR = @AR@
+AS = @AS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CFLAG_VISIBILITY = @CFLAG_VISIBILITY@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+DYNAMIC_CPPFLAGS = @DYNAMIC_CPPFLAGS@
+DYNAMIC_LDFLAGS = @DYNAMIC_LDFLAGS@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GETOPT_H = @GETOPT_H@
+GMSGFMT = @GMSGFMT@
+GMSGFMT_015 = @GMSGFMT_015@
+GREP = @GREP@
+HAVE_VISIBILITY = @HAVE_VISIBILITY@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MSGFMT = @MSGFMT@
+MSGFMT_015 = @MSGFMT_015@
+MSGMERGE = @MSGMERGE@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_HOMEPAGE = @PACKAGE_HOMEPAGE@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+POSIX_SHELL = @POSIX_SHELL@
+POSUB = @POSUB@
+PREFERABLY_POSIX_SHELL = @PREFERABLY_POSIX_SHELL@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+RC = @RC@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STATIC_CPPFLAGS = @STATIC_CPPFLAGS@
+STATIC_LDFLAGS = @STATIC_LDFLAGS@
+STRIP = @STRIP@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+XGETTEXT = @XGETTEXT@
+XGETTEXT_015 = @XGETTEXT_015@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+acx_pthread_config = @acx_pthread_config@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+xz = @xz@
+EXTRA_DIST = \
+ files \
+ tests.h \
+ test_files.sh \
+ test_compress.sh \
+ bcj_test.c \
+ compress_prepared_bcj_sparc \
+ compress_prepared_bcj_x86
+
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/common \
+ -I$(top_srcdir)/src/liblzma/api \
+ -I$(top_builddir)/lib \
+ $(STATIC_CPPFLAGS)
+
+AM_LDFLAGS = $(STATIC_LDFLAGS)
+LDADD = $(top_builddir)/src/liblzma/liblzma.la $(am__append_1) \
+ $(LTLIBINTL)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tests/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign tests/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-checkPROGRAMS:
+ @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \
+ echo " rm -f" $$list; \
+ rm -f $$list || exit $$?; \
+ test -n "$(EXEEXT)" || exit 0; \
+ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+ echo " rm -f" $$list; \
+ rm -f $$list
+create_compress_files$(EXEEXT): $(create_compress_files_OBJECTS) $(create_compress_files_DEPENDENCIES)
+ @rm -f create_compress_files$(EXEEXT)
+ $(LINK) $(create_compress_files_OBJECTS) $(create_compress_files_LDADD) $(LIBS)
+test_block_header$(EXEEXT): $(test_block_header_OBJECTS) $(test_block_header_DEPENDENCIES)
+ @rm -f test_block_header$(EXEEXT)
+ $(LINK) $(test_block_header_OBJECTS) $(test_block_header_LDADD) $(LIBS)
+test_check$(EXEEXT): $(test_check_OBJECTS) $(test_check_DEPENDENCIES)
+ @rm -f test_check$(EXEEXT)
+ $(LINK) $(test_check_OBJECTS) $(test_check_LDADD) $(LIBS)
+test_filter_flags$(EXEEXT): $(test_filter_flags_OBJECTS) $(test_filter_flags_DEPENDENCIES)
+ @rm -f test_filter_flags$(EXEEXT)
+ $(LINK) $(test_filter_flags_OBJECTS) $(test_filter_flags_LDADD) $(LIBS)
+test_index$(EXEEXT): $(test_index_OBJECTS) $(test_index_DEPENDENCIES)
+ @rm -f test_index$(EXEEXT)
+ $(LINK) $(test_index_OBJECTS) $(test_index_LDADD) $(LIBS)
+test_stream_flags$(EXEEXT): $(test_stream_flags_OBJECTS) $(test_stream_flags_DEPENDENCIES)
+ @rm -f test_stream_flags$(EXEEXT)
+ $(LINK) $(test_stream_flags_OBJECTS) $(test_stream_flags_LDADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/create_compress_files.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_block_header.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_check.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_filter_flags.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_index.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_stream_flags.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+check-TESTS: $(TESTS)
+ @failed=0; all=0; xfail=0; xpass=0; skip=0; \
+ srcdir=$(srcdir); export srcdir; \
+ list=' $(TESTS) '; \
+ $(am__tty_colors); \
+ if test -n "$$list"; then \
+ for tst in $$list; do \
+ if test -f ./$$tst; then dir=./; \
+ elif test -f $$tst; then dir=; \
+ else dir="$(srcdir)/"; fi; \
+ if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \
+ all=`expr $$all + 1`; \
+ case " $(XFAIL_TESTS) " in \
+ *[\ \ ]$$tst[\ \ ]*) \
+ xpass=`expr $$xpass + 1`; \
+ failed=`expr $$failed + 1`; \
+ col=$$red; res=XPASS; \
+ ;; \
+ *) \
+ col=$$grn; res=PASS; \
+ ;; \
+ esac; \
+ elif test $$? -ne 77; then \
+ all=`expr $$all + 1`; \
+ case " $(XFAIL_TESTS) " in \
+ *[\ \ ]$$tst[\ \ ]*) \
+ xfail=`expr $$xfail + 1`; \
+ col=$$lgn; res=XFAIL; \
+ ;; \
+ *) \
+ failed=`expr $$failed + 1`; \
+ col=$$red; res=FAIL; \
+ ;; \
+ esac; \
+ else \
+ skip=`expr $$skip + 1`; \
+ col=$$blu; res=SKIP; \
+ fi; \
+ echo "$${col}$$res$${std}: $$tst"; \
+ done; \
+ if test "$$all" -eq 1; then \
+ tests="test"; \
+ All=""; \
+ else \
+ tests="tests"; \
+ All="All "; \
+ fi; \
+ if test "$$failed" -eq 0; then \
+ if test "$$xfail" -eq 0; then \
+ banner="$$All$$all $$tests passed"; \
+ else \
+ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \
+ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \
+ fi; \
+ else \
+ if test "$$xpass" -eq 0; then \
+ banner="$$failed of $$all $$tests failed"; \
+ else \
+ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \
+ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \
+ fi; \
+ fi; \
+ dashes="$$banner"; \
+ skipped=""; \
+ if test "$$skip" -ne 0; then \
+ if test "$$skip" -eq 1; then \
+ skipped="($$skip test was not run)"; \
+ else \
+ skipped="($$skip tests were not run)"; \
+ fi; \
+ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \
+ dashes="$$skipped"; \
+ fi; \
+ report=""; \
+ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \
+ report="Please report to $(PACKAGE_BUGREPORT)"; \
+ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \
+ dashes="$$report"; \
+ fi; \
+ dashes=`echo "$$dashes" | sed s/./=/g`; \
+ if test "$$failed" -eq 0; then \
+ echo "$$grn$$dashes"; \
+ else \
+ echo "$$red$$dashes"; \
+ fi; \
+ echo "$$banner"; \
+ test -z "$$skipped" || echo "$$skipped"; \
+ test -z "$$report" || echo "$$report"; \
+ echo "$$dashes$$std"; \
+ test "$$failed" -eq 0; \
+ else :; fi
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+ $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS)
+ $(MAKE) $(AM_MAKEFLAGS) check-TESTS
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-checkPROGRAMS clean-generic clean-libtool clean-local \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: check-am install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-TESTS check-am clean \
+ clean-checkPROGRAMS clean-generic clean-libtool clean-local \
+ ctags distclean distclean-compile distclean-generic \
+ distclean-libtool distclean-tags distdir dvi dvi-am html \
+ html-am info info-am install install-am install-data \
+ install-data-am install-dvi install-dvi-am install-exec \
+ install-exec-am install-html install-html-am install-info \
+ install-info-am install-man install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am
+
+
+clean-local:
+ -rm -f compress_generated_*
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/bcj_test.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/bcj_test.c
new file mode 100644
index 00000000..8b6a5662
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/bcj_test.c
@@ -0,0 +1,67 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file bcj_test.c
+/// \brief Source code of compress_prepared_bcj_*
+///
+/// This is a simple program that should make the compiler to generate
+/// PC-relative branches, jumps, and calls. The compiled files can then
+/// be used to test the branch conversion filters. Note that this program
+/// itself does nothing useful.
+///
+/// Compiling: gcc -std=c99 -fPIC -c bcj_test.c
+/// Don't optimize or strip.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+extern int jump(int a, int b);
+
+
+extern int
+call(int a, int b)
+{
+ if (a < b)
+ a = jump(a, b);
+
+ return a;
+}
+
+
+extern int
+jump(int a, int b)
+{
+ // The loop generates conditional jump backwards.
+ while (1) {
+ if (a < b) {
+ a *= 2;
+ a += 3 * b;
+ break;
+ } else {
+ // Put enough code here to prevent JMP SHORT on x86.
+ a += b;
+ a /= 2;
+ b += b % 5;
+ a -= b / 3;
+ b = 2 * b + a - 1;
+ a *= b + a + 1;
+ b += a - 1;
+ a += b * 2 - a / 5;
+ }
+ }
+
+ return a;
+}
+
+
+int
+main(int argc, char **argv)
+{
+ int a = call(argc, argc + 1);
+ return a == 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_sparc b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_sparc
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_sparc
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_x86 b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_x86
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/compress_prepared_bcj_x86
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/create_compress_files.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/create_compress_files.c
new file mode 100644
index 00000000..395fefd1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/create_compress_files.c
@@ -0,0 +1,159 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file create_compress_files.c
+/// \brief Creates bunch of test files to be compressed
+///
+/// Using a test file generator program saves space in the source code
+/// package considerably.
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "sysdefs.h"
+#include <stdio.h>
+
+
+// Avoid re-creating the test files every time the tests are run.
+#define create_test(name) \
+do { \
+ if (!file_exists("compress_generated_" #name)) { \
+ FILE *file = file_create("compress_generated_" #name); \
+ write_ ## name(file); \
+ file_finish(file, "compress_generated_" #name); \
+ } \
+} while (0)
+
+
+static bool
+file_exists(const char *filename)
+{
+ // Trying to be somewhat portable by avoiding stat().
+ FILE *file = fopen(filename, "rb");
+ bool ret;
+
+ if (file != NULL) {
+ fclose(file);
+ ret = true;
+ } else {
+ ret = false;
+ }
+
+ return ret;
+}
+
+
+static FILE *
+file_create(const char *filename)
+{
+ FILE *file = fopen(filename, "wb");
+
+ if (file == NULL) {
+ perror(filename);
+ exit(1);
+ }
+
+ return file;
+}
+
+
+static void
+file_finish(FILE *file, const char *filename)
+{
+ const bool ferror_fail = ferror(file);
+ const bool fclose_fail = fclose(file);
+
+ if (ferror_fail || fclose_fail) {
+ perror(filename);
+ exit(1);
+ }
+}
+
+
+// File that repeats "abc\n" a few thousand times. This is targeted
+// especially at Subblock filter's run-length encoder.
+static void
+write_abc(FILE *file)
+{
+ for (size_t i = 0; i < 12345; ++i)
+ fwrite("abc\n", 4, 1, file);
+}
+
+
+// File that doesn't compress. We always use the same random seed to
+// generate identical files on all systems.
+static void
+write_random(FILE *file)
+{
+ uint32_t n = 5;
+
+ for (size_t i = 0; i < 123456; ++i) {
+ n = 101771 * n + 71777;
+
+ putc(n & 0xFF, file);
+ putc((n >> 8) & 0xFF, file);
+ putc((n >> 16) & 0xFF, file);
+ putc(n >> 24, file);
+ }
+}
+
+
+// Text file
+static void
+write_text(FILE *file)
+{
+ static const char *lorem[] = {
+ "Lorem", "ipsum", "dolor", "sit", "amet,", "consectetur",
+ "adipisicing", "elit,", "sed", "do", "eiusmod", "tempor",
+ "incididunt", "ut", "labore", "et", "dolore", "magna",
+ "aliqua.", "Ut", "enim", "ad", "minim", "veniam,", "quis",
+ "nostrud", "exercitation", "ullamco", "laboris", "nisi",
+ "ut", "aliquip", "ex", "ea", "commodo", "consequat.",
+ "Duis", "aute", "irure", "dolor", "in", "reprehenderit",
+ "in", "voluptate", "velit", "esse", "cillum", "dolore",
+ "eu", "fugiat", "nulla", "pariatur.", "Excepteur", "sint",
+ "occaecat", "cupidatat", "non", "proident,", "sunt", "in",
+ "culpa", "qui", "officia", "deserunt", "mollit", "anim",
+ "id", "est", "laborum."
+ };
+
+ // Let the first paragraph be the original text.
+ for (size_t w = 0; w < ARRAY_SIZE(lorem); ++w) {
+ fprintf(file, "%s ", lorem[w]);
+
+ if (w % 7 == 6)
+ fprintf(file, "\n");
+ }
+
+ // The rest shall be (hopefully) meaningless combinations of
+ // the same words.
+ uint32_t n = 29;
+
+ for (size_t p = 0; p < 500; ++p) {
+ fprintf(file, "\n\n");
+
+ for (size_t w = 0; w < ARRAY_SIZE(lorem); ++w) {
+ n = 101771 * n + 71777;
+
+ fprintf(file, "%s ", lorem[n % ARRAY_SIZE(lorem)]);
+
+ if (w % 7 == 6)
+ fprintf(file, "\n");
+ }
+ }
+}
+
+
+int
+main(void)
+{
+ create_test(abc);
+ create_test(random);
+ create_test(text);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/README b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/README
new file mode 100644
index 00000000..392ff768
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/README
@@ -0,0 +1,232 @@
+
+.xz Test Files
+----------------
+
+0. Introduction
+
+ This directory contains bunch of files to test handling of .xz files
+ in .xz decoder implementations. Many of the files have been created
+ by hand with a hex editor, thus there is no better "source code" than
+ the files themselves. All the test files (*.xz) and this README have
+ been put into the public domain.
+
+
+1. File Types
+
+ Good files (good-*.xz) must decode successfully without requiring
+ a lot of CPU time or RAM.
+
+ Unsupported files (unsupported-*.xz) are good files, but headers
+ indicate features not supported by the current file format
+ specification.
+
+ Bad files (bad-*.xz) must cause the decoder to give an error. Like
+ with the good files, these files must not require a lot of CPU time
+ or RAM before they get detected to be broken.
+
+
+2. Descriptions of Individual Files
+
+2.1. Good Files
+
+ good-0-empty.xz has one Stream with no Blocks.
+
+ good-0pad-empty.xz has one Stream with no Blocks followed by
+ four-byte Stream Padding.
+
+ good-0cat-empty.xz has two zero-Block Streams concatenated without
+ Stream Padding.
+
+ good-0catpad-empty.xz has two zero-Block Streams concatenated with
+ four-byte Stream Padding between the Streams.
+
+ good-1-check-none.xz has one Stream with one Block with two
+ uncompressed LZMA2 chunks and no integrity check.
+
+ good-1-check-crc32.xz has one Stream with one Block with two
+ uncompressed LZMA2 chunks and CRC32 check.
+
+ good-1-check-crc64.xz is like good-1-check-crc32.xz but with CRC64.
+
+ good-1-check-sha256.xz is like good-1-check-crc32.xz but with
+ SHA256.
+
+ good-2-lzma2.xz has one Stream with two Blocks with one uncompressed
+ LZMA2 chunk in each Block.
+
+ good-1-block_header-1.xz has both Compressed Size and Uncompressed
+ Size in the Block Header. This has also four extra bytes of Header
+ Padding.
+
+ good-1-block_header-2.xz has known Compressed Size.
+
+ good-1-block_header-3.xz has known Uncompressed Size.
+
+ good-1-delta-lzma2.tiff.xz is an image file that compresses
+ better with Delta+LZMA2 than with plain LZMA2.
+
+ good-1-x86-lzma2.xz uses the x86 filter (BCJ) and LZMA2. The
+ uncompressed file is compress_prepared_bcj_x86 found from the tests
+ directory.
+
+ good-1-sparc-lzma2.xz uses the SPARC filter and LZMA. The
+ uncompressed file is compress_prepared_bcj_sparc found from the tests
+ directory.
+
+ good-1-lzma2-1.xz has two LZMA2 chunks, of which the second sets
+ new properties.
+
+ good-1-lzma2-2.xz has two LZMA2 chunks, of which the second resets
+ the state without specifying new properties.
+
+ good-1-lzma2-3.xz has two LZMA2 chunks, of which the first is
+ uncompressed and the second is LZMA. The first chunk resets dictionary
+ and the second sets new properties.
+
+ good-1-lzma2-4.xz has three LZMA2 chunks: First is LZMA, second is
+ uncompressed with dictionary reset, and third is LZMA with new
+ properties but without dictionary reset.
+
+ good-1-3delta-lzma2.xz has three Delta filters and LZMA2.
+
+
+2.2. Unsupported Files
+
+ unsupported-check.xz uses Check ID 0x02 which isn't supported by
+ the current version of the file format. It is implementation-defined
+ how this file handled (it may reject it, or decode it possibly with
+ a warning).
+
+ unsupported-block_header.xz has a non-null byte in Header Padding,
+ which may indicate presence of a new unsupported field.
+
+ unsupported-filter_flags-1.xz has unsupported Filter ID 0x7F.
+
+ unsupported-filter_flags-2.xz specifies only Delta filter in the
+ List of Filter Flags, but Delta isn't allowed as the last filter in
+ the chain. It could be a little more correct to detect this file as
+ corrupt instead of unsupported, but saying it is unsupported is
+ simpler in case of liblzma.
+
+ unsupported-filter_flags-3.xz specifies two LZMA2 filters in the
+ List of Filter Flags. LZMA2 is allowed only as the last filter in the
+ chain. It could be a little more correct to detect this file as
+ corrupt instead of unsupported, but saying it is unsupported is
+ simpler in case of liblzma.
+
+
+2.3. Bad Files
+
+ bad-0pad-empty.xz has one Stream with no Blocks followed by
+ five-byte Stream Padding. Stream Padding must be a multiple of four
+ bytes, thus this file is corrupt.
+
+ bad-0catpad-empty.xz has two zero-Block Streams concatenated with
+ five-byte Stream Padding between the Streams.
+
+ bad-0cat-alone.xz is good-0-empty.xz concatenated with an empty
+ LZMA_Alone file.
+
+ bad-0cat-header_magic.xz is good-0cat-empty.xz but with one byte
+ wrong in the Header Magic Bytes field of the second Stream. liblzma
+ gives LZMA_DATA_ERROR for this. (LZMA_FORMAT_ERROR is used only if
+ the first Stream of a file has invalid Header Magic Bytes.)
+
+ bad-0-header_magic.xz is good-0-empty.xz but with one byte wrong
+ in the Header Magic Bytes field. liblzma gives LZMA_FORMAT_ERROR for
+ this.
+
+ bad-0-footer_magic.xz is good-0-empty.xz but with one byte wrong
+ in the Footer Magic Bytes field. liblzma gives LZMA_DATA_ERROR for
+ this.
+
+ bad-0-empty-truncated.xz is good-0-empty.xz without the last byte
+ of the file.
+
+ bad-0-nonempty_index.xz has no Blocks but Index claims that there is
+ one Block.
+
+ bad-0-backward_size.xz has wrong Backward Size in Stream Footer.
+
+ bad-1-stream_flags-1.xz has different Stream Flags in Stream Header
+ and Stream Footer.
+
+ bad-1-stream_flags-2.xz has wrong CRC32 in Stream Header.
+
+ bad-1-stream_flags-3.xz has wrong CRC32 in Stream Footer.
+
+ bad-1-vli-1.xz has two-byte variable-length integer in the
+ Uncompressed Size field in Block Header while one-byte would be enough
+ for that value. It's important that the file gets rejected due to too
+ big integer encoding instead of due to Uncompressed Size not matching
+ the value stored in the Block Header. That is, the decoder must not
+ try to decode the Compressed Data field.
+
+ bad-1-vli-2.xz has ten-byte variable-length integer as Uncompressed
+ Size in Block Header. It's important that the file gets rejected due
+ to too big integer encoding instead of due to Uncompressed Size not
+ matching the value stored in the Block Header. That is, the decoder
+ must not try to decode the Compressed Data field.
+
+ bad-1-block_header-1.xz has Block Header that ends in the middle of
+ the Filter Flags field.
+
+ bad-1-block_header-2.xz has Block Header that has Compressed Size and
+ Uncompressed Size but no List of Filter Flags field.
+
+ bad-1-block_header-3.xz has wrong CRC32 in Block Header.
+
+ bad-1-block_header-4.xz has too big Compressed Size in Block Header
+ (2^63 - 1 bytes while maximum is a little less, because the whole
+ Block must stay smaller than 2^63). It's important that the file
+ gets rejected due to invalid Compressed Size value; the decoder
+ must not try decoding the Compressed Data field.
+
+ bad-1-block_header-5.xz has zero as Compressed Size in Block Header.
+
+ bad-2-index-1.xz has wrong Unpadded Sizes in Index.
+
+ bad-2-index-2.xz has wrong Uncompressed Sizes in Index.
+
+ bad-2-index-3.xz has non-null byte in Index Padding.
+
+ bad-2-index-4.xz wrong CRC32 in Index.
+
+ bad-2-index-5.xz has zero as Unpadded Size. It is important that the
+ file gets rejected specifically due to Unpadded Size having an invalid
+ value.
+
+ bad-2-compressed_data_padding.xz has non-null byte in the padding of
+ the Compressed Data field of the first Block.
+
+ bad-1-check-crc32.xz has wrong Check (CRC32).
+
+ bad-1-check-crc64.xz has wrong Check (CRC64).
+
+ bad-1-check-sha256.xz has wrong Check (SHA-256).
+
+ bad-1-lzma2-1.xz has LZMA2 stream whose first chunk (uncompressed)
+ doesn't reset the dictionary.
+
+ bad-1-lzma2-2.xz has two LZMA2 chunks, of which the second chunk
+ indicates dictionary reset, but the LZMA compressed data tries to
+ repeat data from the previous chunk.
+
+ bad-1-lzma2-3.xz sets new invalid properties (lc=8, lp=0, pb=0) in
+ the middle of Block.
+
+ bad-1-lzma2-4.xz has two LZMA2 chunks, of which the first is
+ uncompressed and the second is LZMA. The first chunk resets dictionary
+ as it should, but the second chunk tries to reset state without
+ specifying properties for LZMA.
+
+ bad-1-lzma2-5.xz is like bad-1-lzma2-4.xz but doesn't try to reset
+ anything in the header of the second chunk.
+
+ bad-1-lzma2-6.xz has reserved LZMA2 control byte value (0x03).
+
+ bad-1-lzma2-7.xz has EOPM at LZMA level.
+
+ bad-1-lzma2-8.xz is like good-1-lzma2-4.xz but doesn't set new
+ properties in the third LZMA2 chunk.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-backward_size.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-backward_size.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-backward_size.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-empty-truncated.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-empty-truncated.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-empty-truncated.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-footer_magic.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-footer_magic.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-footer_magic.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-header_magic.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-header_magic.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-header_magic.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-nonempty_index.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-nonempty_index.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0-nonempty_index.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-alone.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-alone.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-alone.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-header_magic.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-header_magic.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0cat-header_magic.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0catpad-empty.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0catpad-empty.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0catpad-empty.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0pad-empty.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0pad-empty.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-0pad-empty.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-4.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-4.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-4.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-5.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-5.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-block_header-5.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc32.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc32.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc32.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc64.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc64.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-crc64.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-sha256.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-sha256.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-check-sha256.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-4.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-4.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-4.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-5.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-5.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-5.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-6.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-6.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-6.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-7.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-7.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-7.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-8.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-8.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-lzma2-8.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-stream_flags-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-1-vli-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-compressed_data_padding.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-compressed_data_padding.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-compressed_data_padding.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-4.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-4.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-4.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-5.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-5.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/bad-2-index-5.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0-empty.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0-empty.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0-empty.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0cat-empty.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0cat-empty.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0cat-empty.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0catpad-empty.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0catpad-empty.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0catpad-empty.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0pad-empty.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0pad-empty.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-0pad-empty.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-3delta-lzma2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-3delta-lzma2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-3delta-lzma2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-block_header-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc32.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc32.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc32.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc64.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc64.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-crc64.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-none.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-none.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-none.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-sha256.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-sha256.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-check-sha256.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-delta-lzma2.tiff.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-delta-lzma2.tiff.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-delta-lzma2.tiff.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-4.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-4.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-lzma2-4.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-sparc-lzma2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-sparc-lzma2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-sparc-lzma2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-x86-lzma2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-x86-lzma2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-1-x86-lzma2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-2-lzma2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-2-lzma2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/good-2-lzma2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-block_header.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-block_header.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-block_header.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-check.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-check.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-check.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-1.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-1.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-1.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-2.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-2.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-2.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-3.xz b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-3.xz
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/files/unsupported-filter_flags-3.xz
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_block_header.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_block_header.c
new file mode 100644
index 00000000..d77d99a5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_block_header.c
@@ -0,0 +1,242 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file test_block_header.c
+/// \brief Tests Block Header coders
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "tests.h"
+
+
+static uint8_t buf[LZMA_BLOCK_HEADER_SIZE_MAX];
+static lzma_block known_options;
+static lzma_block decoded_options;
+
+static lzma_options_lzma opt_lzma;
+
+static lzma_filter filters_none[1] = {
+ {
+ .id = LZMA_VLI_UNKNOWN,
+ },
+};
+
+
+static lzma_filter filters_one[2] = {
+ {
+ .id = LZMA_FILTER_LZMA2,
+ .options = &opt_lzma,
+ }, {
+ .id = LZMA_VLI_UNKNOWN,
+ }
+};
+
+
+static lzma_filter filters_four[5] = {
+ {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_LZMA2,
+ .options = &opt_lzma,
+ }, {
+ .id = LZMA_VLI_UNKNOWN,
+ }
+};
+
+
+static lzma_filter filters_five[6] = {
+ {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_X86,
+ .options = NULL,
+ }, {
+ .id = LZMA_FILTER_LZMA2,
+ .options = &opt_lzma,
+ }, {
+ .id = LZMA_VLI_UNKNOWN,
+ }
+};
+
+
+static void
+code(void)
+{
+ expect(lzma_block_header_encode(&known_options, buf) == LZMA_OK);
+
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
+ memcrap(filters, sizeof(filters));
+ memcrap(&decoded_options, sizeof(decoded_options));
+
+ decoded_options.header_size = known_options.header_size;
+ decoded_options.check = known_options.check;
+ decoded_options.filters = filters;
+ expect(lzma_block_header_decode(&decoded_options, NULL, buf)
+ == LZMA_OK);
+
+ expect(known_options.compressed_size
+ == decoded_options.compressed_size);
+ expect(known_options.uncompressed_size
+ == decoded_options.uncompressed_size);
+
+ for (size_t i = 0; known_options.filters[i].id
+ != LZMA_VLI_UNKNOWN; ++i)
+ expect(known_options.filters[i].id == filters[i].id);
+
+ for (size_t i = 0; i < LZMA_FILTERS_MAX; ++i)
+ free(decoded_options.filters[i].options);
+}
+
+
+static void
+test1(void)
+{
+ known_options = (lzma_block){
+ .check = LZMA_CHECK_NONE,
+ .compressed_size = LZMA_VLI_UNKNOWN,
+ .uncompressed_size = LZMA_VLI_UNKNOWN,
+ .filters = NULL,
+ };
+
+ expect(lzma_block_header_size(&known_options) == LZMA_PROG_ERROR);
+
+ known_options.filters = filters_none;
+ expect(lzma_block_header_size(&known_options) == LZMA_PROG_ERROR);
+
+ known_options.filters = filters_five;
+ expect(lzma_block_header_size(&known_options) == LZMA_PROG_ERROR);
+
+ known_options.filters = filters_one;
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+
+ known_options.check = 999; // Some invalid value, which gets ignored.
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+
+ known_options.compressed_size = 5;
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+
+ known_options.compressed_size = 0; // Cannot be zero.
+ expect(lzma_block_header_size(&known_options) == LZMA_PROG_ERROR);
+
+ // LZMA_VLI_MAX is too big to keep the total size of the Block
+ // a valid VLI, but lzma_block_header_size() is not meant
+ // to validate it. (lzma_block_header_encode() must validate it.)
+ known_options.compressed_size = LZMA_VLI_MAX;
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+
+ known_options.compressed_size = LZMA_VLI_UNKNOWN;
+ known_options.uncompressed_size = 0;
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+
+ known_options.uncompressed_size = LZMA_VLI_MAX + 1;
+ expect(lzma_block_header_size(&known_options) == LZMA_PROG_ERROR);
+}
+
+
+static void
+test2(void)
+{
+ known_options = (lzma_block){
+ .check = LZMA_CHECK_CRC32,
+ .compressed_size = LZMA_VLI_UNKNOWN,
+ .uncompressed_size = LZMA_VLI_UNKNOWN,
+ .filters = filters_four,
+ };
+
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+ code();
+
+ known_options.compressed_size = 123456;
+ known_options.uncompressed_size = 234567;
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+ code();
+
+ // We can make the sizes smaller while keeping the header size
+ // the same.
+ known_options.compressed_size = 12;
+ known_options.uncompressed_size = 23;
+ code();
+}
+
+
+static void
+test3(void)
+{
+ known_options = (lzma_block){
+ .check = LZMA_CHECK_CRC32,
+ .compressed_size = LZMA_VLI_UNKNOWN,
+ .uncompressed_size = LZMA_VLI_UNKNOWN,
+ .filters = filters_one,
+ };
+
+ expect(lzma_block_header_size(&known_options) == LZMA_OK);
+ known_options.header_size += 4;
+ expect(lzma_block_header_encode(&known_options, buf) == LZMA_OK);
+
+ lzma_filter filters[LZMA_FILTERS_MAX + 1];
+ decoded_options.header_size = known_options.header_size;
+ decoded_options.check = known_options.check;
+ decoded_options.filters = filters;
+
+ // Wrong size
+ ++buf[0];
+ expect(lzma_block_header_decode(&decoded_options, NULL, buf)
+ == LZMA_PROG_ERROR);
+ --buf[0];
+
+ // Wrong CRC32
+ buf[known_options.header_size - 1] ^= 1;
+ expect(lzma_block_header_decode(&decoded_options, NULL, buf)
+ == LZMA_DATA_ERROR);
+ buf[known_options.header_size - 1] ^= 1;
+
+ // Unsupported filter
+ // NOTE: This may need updating when new IDs become supported.
+ buf[2] ^= 0x1F;
+ integer_write_32(buf + known_options.header_size - 4,
+ lzma_crc32(buf, known_options.header_size - 4, 0));
+ expect(lzma_block_header_decode(&decoded_options, NULL, buf)
+ == LZMA_OPTIONS_ERROR);
+ buf[2] ^= 0x1F;
+
+ // Non-nul Padding
+ buf[known_options.header_size - 4 - 1] ^= 1;
+ integer_write_32(buf + known_options.header_size - 4,
+ lzma_crc32(buf, known_options.header_size - 4, 0));
+ expect(lzma_block_header_decode(&decoded_options, NULL, buf)
+ == LZMA_OPTIONS_ERROR);
+ buf[known_options.header_size - 4 - 1] ^= 1;
+}
+
+
+int
+main(void)
+{
+ succeed(lzma_lzma_preset(&opt_lzma, 1));
+
+ test1();
+ test2();
+ test3();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_check.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_check.c
new file mode 100644
index 00000000..08affa80
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_check.c
@@ -0,0 +1,85 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file test_check.c
+/// \brief Tests integrity checks
+///
+/// \todo Add SHA256
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "tests.h"
+
+
+static const uint8_t test_string[9] = "123456789";
+static const uint8_t test_unaligned[12] = "xxx123456789";
+
+
+static bool
+test_crc32(void)
+{
+ static const uint32_t test_vector = 0xCBF43926;
+
+ // Test 1
+ uint32_t crc = lzma_crc32(test_string, sizeof(test_string), 0);
+ if (crc != test_vector)
+ return true;
+
+ // Test 2
+ crc = lzma_crc32(test_unaligned + 3, sizeof(test_string), 0);
+ if (crc != test_vector)
+ return true;
+
+ // Test 3
+ crc = 0;
+ for (size_t i = 0; i < sizeof(test_string); ++i)
+ crc = lzma_crc32(test_string + i, 1, crc);
+ if (crc != test_vector)
+ return true;
+
+ return false;
+}
+
+
+static bool
+test_crc64(void)
+{
+ static const uint64_t test_vector = 0x995DC9BBDF1939FA;
+
+ // Test 1
+ uint64_t crc = lzma_crc64(test_string, sizeof(test_string), 0);
+ if (crc != test_vector)
+ return true;
+
+ // Test 2
+ crc = lzma_crc64(test_unaligned + 3, sizeof(test_string), 0);
+ if (crc != test_vector)
+ return true;
+
+ // Test 3
+ crc = 0;
+ for (size_t i = 0; i < sizeof(test_string); ++i)
+ crc = lzma_crc64(test_string + i, 1, crc);
+ if (crc != test_vector)
+ return true;
+
+ return false;
+}
+
+
+int
+main(void)
+{
+ bool error = false;
+
+ error |= test_crc32();
+ error |= test_crc64();
+
+ return error ? 1 : 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
new file mode 100755
index 00000000..ff0cb304
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
@@ -0,0 +1,129 @@
+#!/bin/sh
+
+###############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+###############################################################################
+
+# Find out if our shell supports functions.
+eval 'unset foo ; foo() { return 42; } ; foo'
+if test $? != 42 ; then
+ echo "/bin/sh doesn't support functions, skipping this test."
+ (exit 77)
+ exit 77
+fi
+
+test_xz() {
+ if $XZ -c "$@" "$FILE" > tmp_compressed; then
+ :
+ else
+ echo "Compressing failed: $* $FILE"
+ (exit 1)
+ exit 1
+ fi
+
+ if $XZ -cd tmp_compressed > tmp_uncompressed ; then
+ :
+ else
+ echo "Decoding failed: $* $FILE"
+ (exit 1)
+ exit 1
+ fi
+
+ if cmp tmp_uncompressed "$FILE" ; then
+ :
+ else
+ echo "Decoded file does not match the original: $* $FILE"
+ (exit 1)
+ exit 1
+ fi
+
+ if $XZDEC tmp_compressed > tmp_uncompressed ; then
+ :
+ else
+ echo "Decoding failed: $* $FILE"
+ (exit 1)
+ exit 1
+ fi
+
+ if cmp tmp_uncompressed "$FILE" ; then
+ :
+ else
+ echo "Decoded file does not match the original: $* $FILE"
+ (exit 1)
+ exit 1
+ fi
+
+ # Show progress:
+ echo . | tr -d '\n\r'
+}
+
+XZ="../src/xz/xz --memory=28MiB --threads=1"
+XZDEC="../src/xzdec/xzdec --memory=4MiB"
+unset XZ_OPT
+
+# Create the required input files.
+if ./create_compress_files ; then
+ :
+else
+ rm -f compress_*
+ echo "Failed to create files to test compression."
+ (exit 1)
+ exit 1
+fi
+
+# Remove temporary now (in case they are something weird), and on exit.
+rm -f tmp_compressed tmp_uncompressed
+trap 'rm -f tmp_compressed tmp_uncompressed' 0
+
+# Encode and decode each file with various filter configurations.
+# This takes quite a bit of time.
+echo "test_compress.sh:"
+for FILE in compress_generated_* "$srcdir"/compress_prepared_*
+do
+ MSG=`echo "x$FILE" | sed 's,^x,,; s,^.*/,,; s,^compress_,,'`
+ echo " $MSG" | tr -d '\n\r'
+
+ # Don't test with empty arguments; it breaks some ancient
+ # proprietary /bin/sh versions due to $@ used in test_xz().
+ test_xz -1
+ test_xz -2
+ test_xz -3
+ test_xz -4
+
+ # Disabled until Subblock format is stable.
+# --subblock \
+# --subblock=size=1 \
+# --subblock=size=1,rle=1 \
+# --subblock=size=1,rle=4 \
+# --subblock=size=4,rle=4 \
+# --subblock=size=8,rle=4 \
+# --subblock=size=8,rle=8 \
+# --subblock=size=4096,rle=12 \
+#
+ for ARGS in \
+ --delta=dist=1 \
+ --delta=dist=4 \
+ --delta=dist=256 \
+ --x86 \
+ --powerpc \
+ --ia64 \
+ --arm \
+ --armthumb \
+ --sparc
+ do
+ test_xz $ARGS --lzma2=dict=64KiB,nice=32,mode=fast
+
+ # Disabled until Subblock format is stable.
+ # test_xz --subblock $ARGS --lzma2=dict=64KiB,nice=32,mode=fast
+ done
+
+ echo
+done
+
+(exit 0)
+exit 0
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
new file mode 100755
index 00000000..7dd9a390
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
@@ -0,0 +1,33 @@
+#/bin/sh
+
+###############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+###############################################################################
+
+for I in "$srcdir"/files/good-*.xz
+do
+ if ../src/xzdec/xzdec "$I" > /dev/null 2> /dev/null ; then
+ :
+ else
+ echo "Good file failed: $I"
+ (exit 1)
+ exit 1
+ fi
+done
+
+for I in "$srcdir"/files/bad-*.xz
+do
+ if ../src/xzdec/xzdec "$I" > /dev/null 2> /dev/null ; then
+ echo "Bad file succeeded: $I"
+ (exit 1)
+ exit 1
+ fi
+done
+
+(exit 0)
+exit 0
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_filter_flags.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_filter_flags.c
new file mode 100644
index 00000000..3b20cbfc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_filter_flags.c
@@ -0,0 +1,283 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file test_filter_flags.c
+/// \brief Tests Filter Flags coders
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "tests.h"
+
+
+static uint8_t buffer[4096];
+static lzma_filter known_flags;
+static lzma_filter decoded_flags;
+static lzma_stream strm = LZMA_STREAM_INIT;
+
+
+static bool
+encode(uint32_t known_size)
+{
+ memcrap(buffer, sizeof(buffer));
+
+ uint32_t tmp;
+ if (lzma_filter_flags_size(&tmp, &known_flags) != LZMA_OK)
+ return true;
+
+ if (tmp != known_size)
+ return true;
+
+ size_t out_pos = 0;
+ if (lzma_filter_flags_encode(&known_flags,
+ buffer, &out_pos, known_size) != LZMA_OK)
+ return true;
+
+ if (out_pos != known_size)
+ return true;
+
+ return false;
+}
+
+
+static bool
+decode_ret(uint32_t known_size, lzma_ret expected_ret)
+{
+ memcrap(&decoded_flags, sizeof(decoded_flags));
+
+ size_t pos = 0;
+ if (lzma_filter_flags_decode(&decoded_flags, NULL,
+ buffer, &pos, known_size) != expected_ret
+ || pos != known_size)
+ return true;
+
+ return false;
+}
+
+
+static bool
+decode(uint32_t known_size)
+{
+ if (decode_ret(known_size, LZMA_OK))
+ return true;
+
+ if (known_flags.id != decoded_flags.id)
+ return true;
+
+ return false;
+}
+
+
+#if defined(HAVE_ENCODER_SUBBLOCK) && defined(HAVE_DECODER_SUBBLOCK)
+static void
+test_subblock(void)
+{
+ // Test 1
+ known_flags.id = LZMA_FILTER_SUBBLOCK;
+ known_flags.options = NULL;
+ expect(!encode(2));
+ expect(!decode(2));
+ expect(decoded_flags.options == NULL);
+
+ // Test 2
+ buffer[0] = LZMA_FILTER_SUBBLOCK;
+ buffer[1] = 1;
+ buffer[2] = 0;
+ expect(!decode_ret(3, LZMA_OPTIONS_ERROR));
+}
+#endif
+
+
+#if defined(HAVE_ENCODER_X86) && defined(HAVE_DECODER_X86)
+static void
+test_bcj(void)
+{
+ // Test 1
+ known_flags.id = LZMA_FILTER_X86;
+ known_flags.options = NULL;
+
+ expect(!encode(2));
+ expect(!decode(2));
+ expect(decoded_flags.options == NULL);
+
+ // Test 2
+ lzma_options_bcj options;
+ options.start_offset = 0;
+ known_flags.options = &options;
+ expect(!encode(2));
+ expect(!decode(2));
+ expect(decoded_flags.options == NULL);
+
+ // Test 3
+ options.start_offset = 123456;
+ known_flags.options = &options;
+ expect(!encode(6));
+ expect(!decode(6));
+ expect(decoded_flags.options != NULL);
+
+ lzma_options_bcj *decoded = decoded_flags.options;
+ expect(decoded->start_offset == options.start_offset);
+
+ free(decoded);
+}
+#endif
+
+
+#if defined(HAVE_ENCODER_DELTA) && defined(HAVE_DECODER_DELTA)
+static void
+test_delta(void)
+{
+ // Test 1
+ known_flags.id = LZMA_FILTER_DELTA;
+ known_flags.options = NULL;
+ expect(encode(99));
+
+ // Test 2
+ lzma_options_delta options = {
+ .type = LZMA_DELTA_TYPE_BYTE,
+ .dist = 0
+ };
+ known_flags.options = &options;
+ expect(encode(99));
+
+ // Test 3
+ options.dist = LZMA_DELTA_DIST_MIN;
+ expect(!encode(3));
+ expect(!decode(3));
+ expect(((lzma_options_delta *)(decoded_flags.options))->dist
+ == options.dist);
+
+ free(decoded_flags.options);
+
+ // Test 4
+ options.dist = LZMA_DELTA_DIST_MAX;
+ expect(!encode(3));
+ expect(!decode(3));
+ expect(((lzma_options_delta *)(decoded_flags.options))->dist
+ == options.dist);
+
+ free(decoded_flags.options);
+
+ // Test 5
+ options.dist = LZMA_DELTA_DIST_MAX + 1;
+ expect(encode(99));
+}
+#endif
+
+/*
+#ifdef HAVE_FILTER_LZMA
+static void
+validate_lzma(void)
+{
+ const lzma_options_lzma *known = known_flags.options;
+ const lzma_options_lzma *decoded = decoded_flags.options;
+
+ expect(known->dictionary_size <= decoded->dictionary_size);
+
+ if (known->dictionary_size == 1)
+ expect(decoded->dictionary_size == 1);
+ else
+ expect(known->dictionary_size + known->dictionary_size / 2
+ > decoded->dictionary_size);
+
+ expect(known->literal_context_bits == decoded->literal_context_bits);
+ expect(known->literal_pos_bits == decoded->literal_pos_bits);
+ expect(known->pos_bits == decoded->pos_bits);
+}
+
+
+static void
+test_lzma(void)
+{
+ // Test 1
+ known_flags.id = LZMA_FILTER_LZMA1;
+ known_flags.options = NULL;
+ expect(encode(99));
+
+ // Test 2
+ lzma_options_lzma options = {
+ .dictionary_size = 0,
+ .literal_context_bits = 0,
+ .literal_pos_bits = 0,
+ .pos_bits = 0,
+ .preset_dictionary = NULL,
+ .preset_dictionary_size = 0,
+ .mode = LZMA_MODE_INVALID,
+ .fast_bytes = 0,
+ .match_finder = LZMA_MF_INVALID,
+ .match_finder_cycles = 0,
+ };
+
+ // Test 3 (empty dictionary not allowed)
+ known_flags.options = &options;
+ expect(encode(99));
+
+ // Test 4 (brute-force test some valid dictionary sizes)
+ options.dictionary_size = LZMA_DICTIONARY_SIZE_MIN;
+ while (options.dictionary_size != LZMA_DICTIONARY_SIZE_MAX) {
+ if (++options.dictionary_size == 5000)
+ options.dictionary_size = LZMA_DICTIONARY_SIZE_MAX - 5;
+
+ expect(!encode(4));
+ expect(!decode(4));
+ validate_lzma();
+
+ free(decoded_flags.options);
+ }
+
+ // Test 5 (too big dictionary size)
+ options.dictionary_size = LZMA_DICTIONARY_SIZE_MAX + 1;
+ expect(encode(99));
+
+ // Test 6 (brute-force test lc/lp/pb)
+ options.dictionary_size = LZMA_DICTIONARY_SIZE_MIN;
+ for (uint32_t lc = LZMA_LITERAL_CONTEXT_BITS_MIN;
+ lc <= LZMA_LITERAL_CONTEXT_BITS_MAX; ++lc) {
+ for (uint32_t lp = LZMA_LITERAL_POS_BITS_MIN;
+ lp <= LZMA_LITERAL_POS_BITS_MAX; ++lp) {
+ for (uint32_t pb = LZMA_POS_BITS_MIN;
+ pb <= LZMA_POS_BITS_MAX; ++pb) {
+ if (lc + lp > LZMA_LITERAL_BITS_MAX)
+ continue;
+
+ options.literal_context_bits = lc;
+ options.literal_pos_bits = lp;
+ options.pos_bits = pb;
+
+ expect(!encode(4));
+ expect(!decode(4));
+ validate_lzma();
+
+ free(decoded_flags.options);
+ }
+ }
+ }
+}
+#endif
+*/
+
+int
+main(void)
+{
+#if defined(HAVE_ENCODER_SUBBLOCK) && defined(HAVE_DECODER_SUBBLOCK)
+ test_subblock();
+#endif
+#if defined(HAVE_ENCODER_X86) && defined(HAVE_DECODER_X86)
+ test_bcj();
+#endif
+#if defined(HAVE_ENCODER_DELTA) && defined(HAVE_DECODER_DELTA)
+ test_delta();
+#endif
+// #ifdef HAVE_FILTER_LZMA
+// test_lzma();
+// #endif
+
+ lzma_end(&strm);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_index.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_index.c
new file mode 100644
index 00000000..48d48759
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_index.c
@@ -0,0 +1,534 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file test_index.c
+/// \brief Tests functions handling the lzma_index structure
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "tests.h"
+
+#define MEMLIMIT (LZMA_VLI_C(1) << 20)
+
+
+static lzma_index *
+create_empty(void)
+{
+ lzma_index *i = lzma_index_init(NULL, NULL);
+ expect(i != NULL);
+ return i;
+}
+
+
+static lzma_index *
+create_small(void)
+{
+ lzma_index *i = lzma_index_init(NULL, NULL);
+ expect(i != NULL);
+ expect(lzma_index_append(i, NULL, 101, 555) == LZMA_OK);
+ expect(lzma_index_append(i, NULL, 602, 777) == LZMA_OK);
+ expect(lzma_index_append(i, NULL, 804, 999) == LZMA_OK);
+ return i;
+}
+
+
+static lzma_index *
+create_big(void)
+{
+ lzma_index *i = lzma_index_init(NULL, NULL);
+ expect(i != NULL);
+
+ lzma_vli total_size = 0;
+ lzma_vli uncompressed_size = 0;
+
+ // Add pseudo-random sizes (but always the same size values).
+ const size_t count = 5555;
+ uint32_t n = 11;
+ for (size_t j = 0; j < count; ++j) {
+ n = 7019 * n + 7607;
+ const uint32_t t = n * 3011;
+ expect(lzma_index_append(i, NULL, t, n) == LZMA_OK);
+ total_size += (t + 3) & ~LZMA_VLI_C(3);
+ uncompressed_size += n;
+ }
+
+ expect(lzma_index_count(i) == count);
+ expect(lzma_index_total_size(i) == total_size);
+ expect(lzma_index_uncompressed_size(i) == uncompressed_size);
+ expect(lzma_index_total_size(i) + lzma_index_size(i)
+ + 2 * LZMA_STREAM_HEADER_SIZE
+ == lzma_index_stream_size(i));
+
+ return i;
+}
+
+
+static void
+test_equal(void)
+{
+ lzma_index *a = create_empty();
+ lzma_index *b = create_small();
+ lzma_index *c = create_big();
+ expect(a && b && c);
+
+ expect(lzma_index_equal(a, a));
+ expect(lzma_index_equal(b, b));
+ expect(lzma_index_equal(c, c));
+
+ expect(!lzma_index_equal(a, b));
+ expect(!lzma_index_equal(a, c));
+ expect(!lzma_index_equal(b, c));
+
+ lzma_index_end(a, NULL);
+ lzma_index_end(b, NULL);
+ lzma_index_end(c, NULL);
+}
+
+
+static void
+test_overflow(void)
+{
+ // Integer overflow tests
+ lzma_index *i = create_empty();
+
+ expect(lzma_index_append(i, NULL, LZMA_VLI_MAX - 5, 1234)
+ == LZMA_DATA_ERROR);
+
+ // TODO
+
+ lzma_index_end(i, NULL);
+}
+
+
+static void
+test_copy(const lzma_index *i)
+{
+ lzma_index *d = lzma_index_dup(i, NULL);
+ expect(d != NULL);
+ lzma_index_end(d, NULL);
+}
+
+
+static void
+test_read(lzma_index *i)
+{
+ lzma_index_record record;
+
+ // Try twice so we see that rewinding works.
+ for (size_t j = 0; j < 2; ++j) {
+ lzma_vli total_size = 0;
+ lzma_vli uncompressed_size = 0;
+ lzma_vli stream_offset = LZMA_STREAM_HEADER_SIZE;
+ lzma_vli uncompressed_offset = 0;
+ uint32_t count = 0;
+
+ while (!lzma_index_read(i, &record)) {
+ ++count;
+
+ total_size += record.total_size;
+ uncompressed_size += record.uncompressed_size;
+
+ expect(record.stream_offset == stream_offset);
+ expect(record.uncompressed_offset
+ == uncompressed_offset);
+
+ stream_offset += record.total_size;
+ uncompressed_offset += record.uncompressed_size;
+ }
+
+ expect(lzma_index_total_size(i) == total_size);
+ expect(lzma_index_uncompressed_size(i) == uncompressed_size);
+ expect(lzma_index_count(i) == count);
+
+ lzma_index_rewind(i);
+ }
+}
+
+
+static void
+test_code(lzma_index *i)
+{
+ const size_t alloc_size = 128 * 1024;
+ uint8_t *buf = malloc(alloc_size);
+ expect(buf != NULL);
+
+ // Encode
+ lzma_stream strm = LZMA_STREAM_INIT;
+ expect(lzma_index_encoder(&strm, i) == LZMA_OK);
+ const lzma_vli index_size = lzma_index_size(i);
+ succeed(coder_loop(&strm, NULL, 0, buf, index_size,
+ LZMA_STREAM_END, LZMA_RUN));
+
+ // Decode
+ lzma_index *d;
+ expect(lzma_index_decoder(&strm, &d, MEMLIMIT) == LZMA_OK);
+ succeed(decoder_loop(&strm, buf, index_size));
+
+ expect(lzma_index_equal(i, d));
+
+ lzma_index_end(d, NULL);
+ lzma_end(&strm);
+
+ // Decode with hashing
+ lzma_index_hash *h = lzma_index_hash_init(NULL, NULL);
+ expect(h != NULL);
+ lzma_index_rewind(i);
+ lzma_index_record r;
+ while (!lzma_index_read(i, &r))
+ expect(lzma_index_hash_append(h, r.unpadded_size,
+ r.uncompressed_size) == LZMA_OK);
+ size_t pos = 0;
+ while (pos < index_size - 1)
+ expect(lzma_index_hash_decode(h, buf, &pos, pos + 1)
+ == LZMA_OK);
+ expect(lzma_index_hash_decode(h, buf, &pos, pos + 1)
+ == LZMA_STREAM_END);
+
+ lzma_index_hash_end(h, NULL);
+
+ // Encode buffer
+ size_t buf_pos = 1;
+ expect(lzma_index_buffer_encode(i, buf, &buf_pos, index_size)
+ == LZMA_BUF_ERROR);
+ expect(buf_pos == 1);
+
+ succeed(lzma_index_buffer_encode(i, buf, &buf_pos, index_size + 1));
+ expect(buf_pos == index_size + 1);
+
+ // Decode buffer
+ buf_pos = 1;
+ uint64_t memlimit = MEMLIMIT;
+ expect(lzma_index_buffer_decode(&d, &memlimit, NULL, buf, &buf_pos,
+ index_size) == LZMA_DATA_ERROR);
+ expect(buf_pos == 1);
+ expect(d == NULL);
+
+ succeed(lzma_index_buffer_decode(&d, &memlimit, NULL, buf, &buf_pos,
+ index_size + 1));
+ expect(buf_pos == index_size + 1);
+ expect(lzma_index_equal(i, d));
+
+ lzma_index_end(d, NULL);
+
+ free(buf);
+}
+
+
+static void
+test_many(lzma_index *i)
+{
+ test_copy(i);
+ test_read(i);
+ test_code(i);
+}
+
+
+static void
+test_cat(void)
+{
+ lzma_index *a, *b, *c;
+
+ // Empty Indexes
+ a = create_empty();
+ b = create_empty();
+ expect(lzma_index_cat(a, b, NULL, 0) == LZMA_OK);
+ expect(lzma_index_count(a) == 0);
+ expect(lzma_index_stream_size(a) == 2 * LZMA_STREAM_HEADER_SIZE + 8);
+ expect(lzma_index_file_size(a)
+ == 2 * (2 * LZMA_STREAM_HEADER_SIZE + 8));
+
+ b = create_empty();
+ expect(lzma_index_cat(a, b, NULL, 0) == LZMA_OK);
+ expect(lzma_index_count(a) == 0);
+ expect(lzma_index_stream_size(a) == 2 * LZMA_STREAM_HEADER_SIZE + 8);
+ expect(lzma_index_file_size(a)
+ == 3 * (2 * LZMA_STREAM_HEADER_SIZE + 8));
+
+ b = create_empty();
+ c = create_empty();
+ expect(lzma_index_cat(b, c, NULL, 4) == LZMA_OK);
+ expect(lzma_index_count(b) == 0);
+ expect(lzma_index_stream_size(b) == 2 * LZMA_STREAM_HEADER_SIZE + 8);
+ expect(lzma_index_file_size(b)
+ == 2 * (2 * LZMA_STREAM_HEADER_SIZE + 8) + 4);
+
+ expect(lzma_index_cat(a, b, NULL, 8) == LZMA_OK);
+ expect(lzma_index_count(a) == 0);
+ expect(lzma_index_stream_size(a) == 2 * LZMA_STREAM_HEADER_SIZE + 8);
+ expect(lzma_index_file_size(a)
+ == 5 * (2 * LZMA_STREAM_HEADER_SIZE + 8) + 4 + 8);
+
+ lzma_index_end(a, NULL);
+
+ // Small Indexes
+ a = create_small();
+ lzma_vli stream_size = lzma_index_stream_size(a);
+ b = create_small();
+ expect(lzma_index_cat(a, b, NULL, 4) == LZMA_OK);
+ expect(lzma_index_file_size(a) == stream_size * 2 + 4);
+ expect(lzma_index_stream_size(a) > stream_size);
+ expect(lzma_index_stream_size(a) < stream_size * 2);
+
+ b = create_small();
+ c = create_small();
+ expect(lzma_index_cat(b, c, NULL, 8) == LZMA_OK);
+ expect(lzma_index_cat(a, b, NULL, 12) == LZMA_OK);
+ expect(lzma_index_file_size(a) == stream_size * 4 + 4 + 8 + 12);
+
+ lzma_index_end(a, NULL);
+
+ // Big Indexes
+ a = create_big();
+ stream_size = lzma_index_stream_size(a);
+ b = create_big();
+ expect(lzma_index_cat(a, b, NULL, 4) == LZMA_OK);
+ expect(lzma_index_file_size(a) == stream_size * 2 + 4);
+ expect(lzma_index_stream_size(a) > stream_size);
+ expect(lzma_index_stream_size(a) < stream_size * 2);
+
+ b = create_big();
+ c = create_big();
+ expect(lzma_index_cat(b, c, NULL, 8) == LZMA_OK);
+ expect(lzma_index_cat(a, b, NULL, 12) == LZMA_OK);
+ expect(lzma_index_file_size(a) == stream_size * 4 + 4 + 8 + 12);
+
+ lzma_index_end(a, NULL);
+}
+
+
+static void
+test_locate(void)
+{
+ lzma_index_record r;
+ lzma_index *i = lzma_index_init(NULL, NULL);
+ expect(i != NULL);
+
+ // Cannot locate anything from an empty Index.
+ expect(lzma_index_locate(i, &r, 0));
+ expect(lzma_index_locate(i, &r, 555));
+
+ // One empty Record: nothing is found since there's no uncompressed
+ // data.
+ expect(lzma_index_append(i, NULL, 16, 0) == LZMA_OK);
+ expect(lzma_index_locate(i, &r, 0));
+
+ // Non-empty Record and we can find something.
+ expect(lzma_index_append(i, NULL, 32, 5) == LZMA_OK);
+ expect(!lzma_index_locate(i, &r, 0));
+ expect(r.total_size == 32);
+ expect(r.uncompressed_size == 5);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 16);
+ expect(r.uncompressed_offset == 0);
+
+ // Still cannot find anything past the end.
+ expect(lzma_index_locate(i, &r, 5));
+
+ // Add the third Record.
+ expect(lzma_index_append(i, NULL, 40, 11) == LZMA_OK);
+
+ expect(!lzma_index_locate(i, &r, 0));
+ expect(r.total_size == 32);
+ expect(r.uncompressed_size == 5);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 16);
+ expect(r.uncompressed_offset == 0);
+
+ expect(!lzma_index_read(i, &r));
+ expect(r.total_size == 40);
+ expect(r.uncompressed_size == 11);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 16 + 32);
+ expect(r.uncompressed_offset == 5);
+
+ expect(!lzma_index_locate(i, &r, 2));
+ expect(r.total_size == 32);
+ expect(r.uncompressed_size == 5);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 16);
+ expect(r.uncompressed_offset == 0);
+
+ expect(!lzma_index_locate(i, &r, 5));
+ expect(r.total_size == 40);
+ expect(r.uncompressed_size == 11);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 16 + 32);
+ expect(r.uncompressed_offset == 5);
+
+ expect(!lzma_index_locate(i, &r, 5 + 11 - 1));
+ expect(r.total_size == 40);
+ expect(r.uncompressed_size == 11);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 16 + 32);
+ expect(r.uncompressed_offset == 5);
+
+ expect(lzma_index_locate(i, &r, 5 + 11));
+ expect(lzma_index_locate(i, &r, 5 + 15));
+
+ // Large Index
+ i = lzma_index_init(i, NULL);
+ expect(i != NULL);
+
+ for (size_t n = 4; n <= 4 * 5555; n += 4)
+ expect(lzma_index_append(i, NULL, n + 8, n) == LZMA_OK);
+
+ expect(lzma_index_count(i) == 5555);
+
+ // First Record
+ expect(!lzma_index_locate(i, &r, 0));
+ expect(r.total_size == 4 + 8);
+ expect(r.uncompressed_size == 4);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE);
+ expect(r.uncompressed_offset == 0);
+
+ expect(!lzma_index_locate(i, &r, 3));
+ expect(r.total_size == 4 + 8);
+ expect(r.uncompressed_size == 4);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE);
+ expect(r.uncompressed_offset == 0);
+
+ // Second Record
+ expect(!lzma_index_locate(i, &r, 4));
+ expect(r.total_size == 2 * 4 + 8);
+ expect(r.uncompressed_size == 2 * 4);
+ expect(r.stream_offset == LZMA_STREAM_HEADER_SIZE + 4 + 8);
+ expect(r.uncompressed_offset == 4);
+
+ // Last Record
+ expect(!lzma_index_locate(i, &r, lzma_index_uncompressed_size(i) - 1));
+ expect(r.total_size == 4 * 5555 + 8);
+ expect(r.uncompressed_size == 4 * 5555);
+ expect(r.stream_offset == lzma_index_total_size(i)
+ + LZMA_STREAM_HEADER_SIZE - 4 * 5555 - 8);
+ expect(r.uncompressed_offset
+ == lzma_index_uncompressed_size(i) - 4 * 5555);
+
+ // Allocation chunk boundaries. See INDEX_GROUP_SIZE in
+ // liblzma/common/index.c.
+ const size_t group_multiple = 256 * 4;
+ const size_t radius = 8;
+ const size_t start = group_multiple - radius;
+ lzma_vli ubase = 0;
+ lzma_vli tbase = 0;
+ size_t n;
+ for (n = 1; n < start; ++n) {
+ ubase += n * 4;
+ tbase += n * 4 + 8;
+ }
+
+ while (n < start + 2 * radius) {
+ expect(!lzma_index_locate(i, &r, ubase + n * 4));
+
+ expect(r.stream_offset == tbase + n * 4 + 8
+ + LZMA_STREAM_HEADER_SIZE);
+ expect(r.uncompressed_offset == ubase + n * 4);
+
+ tbase += n * 4 + 8;
+ ubase += n * 4;
+ ++n;
+
+ expect(r.total_size == n * 4 + 8);
+ expect(r.uncompressed_size == n * 4);
+ }
+
+ // Do it also backwards since lzma_index_locate() uses relative search.
+ while (n > start) {
+ expect(!lzma_index_locate(i, &r, ubase + (n - 1) * 4));
+
+ expect(r.total_size == n * 4 + 8);
+ expect(r.uncompressed_size == n * 4);
+
+ --n;
+ tbase -= n * 4 + 8;
+ ubase -= n * 4;
+
+ expect(r.stream_offset == tbase + n * 4 + 8
+ + LZMA_STREAM_HEADER_SIZE);
+ expect(r.uncompressed_offset == ubase + n * 4);
+ }
+
+ // Test locating in concatend Index.
+ i = lzma_index_init(i, NULL);
+ expect(i != NULL);
+ for (n = 0; n < group_multiple; ++n)
+ expect(lzma_index_append(i, NULL, 8, 0) == LZMA_OK);
+ expect(lzma_index_append(i, NULL, 16, 1) == LZMA_OK);
+ expect(!lzma_index_locate(i, &r, 0));
+ expect(r.total_size == 16);
+ expect(r.uncompressed_size == 1);
+ expect(r.stream_offset
+ == LZMA_STREAM_HEADER_SIZE + group_multiple * 8);
+ expect(r.uncompressed_offset == 0);
+
+ lzma_index_end(i, NULL);
+}
+
+
+static void
+test_corrupt(void)
+{
+ const size_t alloc_size = 128 * 1024;
+ uint8_t *buf = malloc(alloc_size);
+ expect(buf != NULL);
+ lzma_stream strm = LZMA_STREAM_INIT;
+
+ lzma_index *i = create_empty();
+ expect(lzma_index_append(i, NULL, 0, 1) == LZMA_PROG_ERROR);
+ lzma_index_end(i, NULL);
+
+ // Create a valid Index and corrupt it in different ways.
+ i = create_small();
+ expect(lzma_index_encoder(&strm, i) == LZMA_OK);
+ succeed(coder_loop(&strm, NULL, 0, buf, 20,
+ LZMA_STREAM_END, LZMA_RUN));
+ lzma_index_end(i, NULL);
+
+ // Wrong Index Indicator
+ buf[0] ^= 1;
+ expect(lzma_index_decoder(&strm, &i, MEMLIMIT) == LZMA_OK);
+ succeed(decoder_loop_ret(&strm, buf, 1, LZMA_DATA_ERROR));
+ buf[0] ^= 1;
+
+ // Wrong Number of Records and thus CRC32 fails.
+ --buf[1];
+ expect(lzma_index_decoder(&strm, &i, MEMLIMIT) == LZMA_OK);
+ succeed(decoder_loop_ret(&strm, buf, 10, LZMA_DATA_ERROR));
+ ++buf[1];
+
+ // Padding not NULs
+ buf[15] ^= 1;
+ expect(lzma_index_decoder(&strm, &i, MEMLIMIT) == LZMA_OK);
+ succeed(decoder_loop_ret(&strm, buf, 16, LZMA_DATA_ERROR));
+
+ lzma_end(&strm);
+ free(buf);
+}
+
+
+int
+main(void)
+{
+ test_equal();
+
+ test_overflow();
+
+ lzma_index *i = create_empty();
+ test_many(i);
+ lzma_index_end(i, NULL);
+
+ i = create_small();
+ test_many(i);
+ lzma_index_end(i, NULL);
+
+ i = create_big();
+ test_many(i);
+ lzma_index_end(i, NULL);
+
+ test_cat();
+
+ test_locate();
+
+ test_corrupt();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_stream_flags.c b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_stream_flags.c
new file mode 100644
index 00000000..59c04f04
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_stream_flags.c
@@ -0,0 +1,182 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file test_stream_flags.c
+/// \brief Tests Stream Header and Stream Footer coders
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#include "tests.h"
+
+
+static lzma_stream_flags known_flags;
+static lzma_stream_flags decoded_flags;
+static uint8_t buffer[LZMA_STREAM_HEADER_SIZE];
+
+
+static bool
+validate(void)
+{
+ // TODO: This could require the specific error type as an argument.
+ // We could also test that lzma_stream_flags_compare() gives
+ // the correct return values in different situations.
+ return lzma_stream_flags_compare(&known_flags, &decoded_flags)
+ != LZMA_OK;
+}
+
+
+static bool
+test_header_decoder(lzma_ret expected_ret)
+{
+ memcrap(&decoded_flags, sizeof(decoded_flags));
+
+ if (lzma_stream_header_decode(&decoded_flags, buffer) != expected_ret)
+ return true;
+
+ if (expected_ret != LZMA_OK)
+ return false;
+
+ // Header doesn't have Backward Size, so make
+ // lzma_stream_flags_compare() ignore it.
+ decoded_flags.backward_size = LZMA_VLI_UNKNOWN;
+ return validate();
+}
+
+
+static void
+test_header(void)
+{
+ memcrap(buffer, sizeof(buffer));
+ expect(lzma_stream_header_encode(&known_flags, buffer) == LZMA_OK);
+ succeed(test_header_decoder(LZMA_OK));
+}
+
+
+static bool
+test_footer_decoder(lzma_ret expected_ret)
+{
+ memcrap(&decoded_flags, sizeof(decoded_flags));
+
+ if (lzma_stream_footer_decode(&decoded_flags, buffer) != expected_ret)
+ return true;
+
+ if (expected_ret != LZMA_OK)
+ return false;
+
+ return validate();
+}
+
+
+static void
+test_footer(void)
+{
+ memcrap(buffer, sizeof(buffer));
+ expect(lzma_stream_footer_encode(&known_flags, buffer) == LZMA_OK);
+ succeed(test_footer_decoder(LZMA_OK));
+}
+
+
+static void
+test_encode_invalid(void)
+{
+ known_flags.check = LZMA_CHECK_ID_MAX + 1;
+ known_flags.backward_size = 1024;
+
+ expect(lzma_stream_header_encode(&known_flags, buffer)
+ == LZMA_PROG_ERROR);
+
+ expect(lzma_stream_footer_encode(&known_flags, buffer)
+ == LZMA_PROG_ERROR);
+
+ known_flags.check = (lzma_check)(-1);
+
+ expect(lzma_stream_header_encode(&known_flags, buffer)
+ == LZMA_PROG_ERROR);
+
+ expect(lzma_stream_footer_encode(&known_flags, buffer)
+ == LZMA_PROG_ERROR);
+
+ known_flags.check = LZMA_CHECK_NONE;
+ known_flags.backward_size = 0;
+
+ // Header encoder ignores backward_size.
+ expect(lzma_stream_header_encode(&known_flags, buffer) == LZMA_OK);
+
+ expect(lzma_stream_footer_encode(&known_flags, buffer)
+ == LZMA_PROG_ERROR);
+
+ known_flags.backward_size = LZMA_VLI_MAX;
+
+ expect(lzma_stream_header_encode(&known_flags, buffer) == LZMA_OK);
+
+ expect(lzma_stream_footer_encode(&known_flags, buffer)
+ == LZMA_PROG_ERROR);
+}
+
+
+static void
+test_decode_invalid(void)
+{
+ known_flags.check = LZMA_CHECK_NONE;
+ known_flags.backward_size = 1024;
+
+ expect(lzma_stream_header_encode(&known_flags, buffer) == LZMA_OK);
+
+ // Test 1 (invalid Magic Bytes)
+ buffer[5] ^= 1;
+ succeed(test_header_decoder(LZMA_FORMAT_ERROR));
+ buffer[5] ^= 1;
+
+ // Test 2a (valid CRC32)
+ uint32_t crc = lzma_crc32(buffer + 6, 2, 0);
+ integer_write_32(buffer + 8, crc);
+ succeed(test_header_decoder(LZMA_OK));
+
+ // Test 2b (invalid Stream Flags with valid CRC32)
+ buffer[6] ^= 0x20;
+ crc = lzma_crc32(buffer + 6, 2, 0);
+ integer_write_32(buffer + 8, crc);
+ succeed(test_header_decoder(LZMA_OPTIONS_ERROR));
+
+ // Test 3 (invalid CRC32)
+ expect(lzma_stream_header_encode(&known_flags, buffer) == LZMA_OK);
+ buffer[9] ^= 1;
+ succeed(test_header_decoder(LZMA_DATA_ERROR));
+
+ // Test 4 (invalid Stream Flags with valid CRC32)
+ expect(lzma_stream_footer_encode(&known_flags, buffer) == LZMA_OK);
+ buffer[9] ^= 0x40;
+ crc = lzma_crc32(buffer + 4, 6, 0);
+ integer_write_32(buffer, crc);
+ succeed(test_footer_decoder(LZMA_OPTIONS_ERROR));
+
+ // Test 5 (invalid Magic Bytes)
+ expect(lzma_stream_footer_encode(&known_flags, buffer) == LZMA_OK);
+ buffer[11] ^= 1;
+ succeed(test_footer_decoder(LZMA_FORMAT_ERROR));
+}
+
+
+int
+main(void)
+{
+ // Valid headers
+ known_flags.backward_size = 1024;
+ for (lzma_check check = LZMA_CHECK_NONE;
+ check <= LZMA_CHECK_ID_MAX; ++check) {
+ test_header();
+ test_footer();
+ }
+
+ // Invalid headers
+ test_encode_invalid();
+ test_decode_invalid();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/tests.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/tests.h
new file mode 100644
index 00000000..3aebb7e5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/tests.h
@@ -0,0 +1,126 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+///////////////////////////////////////////////////////////////////////////////
+//
+/// \file tests.h
+/// \brief Common definitions for test applications
+//
+// Author: Lasse Collin
+//
+// This file has been put into the public domain.
+// You can do whatever you want with this file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef LZMA_TESTS_H
+#define LZMA_TESTS_H
+
+#include "sysdefs.h"
+#include "integer.h"
+#include "lzma.h"
+
+#include <stdio.h>
+
+#define memcrap(buf, size) memset(buf, 0xFD, size)
+
+#define expect(test) ((test) ? 0 : (fprintf(stderr, "%s:%u: %s\n", \
+ __FILE__, __LINE__, #test), abort(), 0))
+
+#define succeed(test) expect(!(test))
+
+#define fail(test) expect(test)
+
+
+static inline const char *
+lzma_ret_sym(lzma_ret ret)
+{
+ if ((unsigned int)(ret) > LZMA_PROG_ERROR)
+ return "UNKNOWN_ERROR";
+
+ static const char *msgs[] = {
+ "LZMA_OK",
+ "LZMA_STREAM_END",
+ "LZMA_NO_CHECK",
+ "LZMA_UNSUPPORTED_CHECK",
+ "LZMA_GET_CHECK",
+ "LZMA_MEM_ERROR",
+ "LZMA_MEMLIMIT_ERROR",
+ "LZMA_FORMAT_ERROR",
+ "LZMA_OPTIONS_ERROR",
+ "LZMA_DATA_ERROR",
+ "LZMA_BUF_ERROR",
+ "LZMA_PROG_ERROR"
+ };
+
+ return msgs[ret];
+}
+
+
+static inline bool
+coder_loop(lzma_stream *strm, uint8_t *in, size_t in_size,
+ uint8_t *out, size_t out_size,
+ lzma_ret expected_ret, lzma_action finishing_action)
+{
+ size_t in_left = in_size;
+ size_t out_left = out_size > 0 ? out_size + 1 : 0;
+ lzma_action action = LZMA_RUN;
+ lzma_ret ret;
+
+ strm->next_in = NULL;
+ strm->avail_in = 0;
+ strm->next_out = NULL;
+ strm->avail_out = 0;
+
+ while (true) {
+ if (in_left > 0) {
+ if (--in_left == 0)
+ action = finishing_action;
+
+ strm->next_in = in++;
+ strm->avail_in = 1;
+ }
+
+ if (out_left > 0) {
+ --out_left;
+ strm->next_out = out++;
+ strm->avail_out = 1;
+ }
+
+ ret = lzma_code(strm, action);
+ if (ret != LZMA_OK)
+ break;
+ }
+
+ bool error = false;
+
+ if (ret != expected_ret)
+ error = true;
+
+ if (expected_ret == LZMA_STREAM_END) {
+ if (strm->total_in != in_size || strm->total_out != out_size)
+ error = true;
+ } else {
+ if (strm->total_in != in_size || strm->total_out != out_size)
+ error = true;
+ }
+
+ return error;
+}
+
+
+static inline bool
+decoder_loop_ret(lzma_stream *strm, uint8_t *in, size_t in_size,
+ lzma_ret expected_ret)
+{
+ return coder_loop(strm, in, in_size, NULL, 0, expected_ret, LZMA_RUN);
+}
+
+
+static inline bool
+decoder_loop(lzma_stream *strm, uint8_t *in, size_t in_size)
+{
+ return coder_loop(strm, in, in_size, NULL, 0,
+ LZMA_STREAM_END, LZMA_RUN);
+}
+
+#endif
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/version.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/version.sh
new file mode 100644
index 00000000..40d04936
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/version.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+#
+#############################################################################
+#
+# Get the version string from version.h and print it out without
+# trailing newline. This makes it suitable for use in configure.ac.
+#
+#############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+#############################################################################
+
+sed -n 's/LZMA_VERSION_STABILITY_ALPHA/alpha/
+ s/LZMA_VERSION_STABILITY_BETA/beta/
+ s/LZMA_VERSION_STABILITY_STABLE//
+ s/^#define LZMA_VERSION_[MPS][AIT][AJNT][A-Z]* //p' \
+ src/liblzma/api/lzma/version.h \
+ | tr '\n' '|' \
+ | sed 's/|/./; s/|/./; s/|//g' \
+ | tr -d '\n'
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/Makefile b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/Makefile
new file mode 100644
index 00000000..db362ef2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/Makefile
@@ -0,0 +1,308 @@
+###############################################################################
+#
+# Makefile to build XZ Utils using MinGW
+#
+# Make flags to alter compilation:
+#
+# DEBUG=1 Enable assertions. Don't use this for production builds!
+# You may also want to set CFLAGS="-g -O0" to disable
+# optimizations.
+#
+# W64=1 Build for 64-bit Windows. Make sure that you have 64-bit
+# MinGW in PATH.
+#
+# WINE=1 Shortcut to set CC, AR, and STRIP to use Wine to run Windows
+# versions of MinGW binaries.
+#
+# The usual CPPFLAGS and CFLAGS are supported too.
+#
+###############################################################################
+#
+# Author: Lasse Collin
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+###############################################################################
+
+ifdef W64
+CC = x86_64-pc-mingw32-gcc
+WINDRES = x86_64-pc-mingw32-windres
+AR = x86_64-pc-mingw32-ar
+STRIP = x86_64-pc-mingw32-strip
+else
+CC = mingw32-gcc
+WINDRES = windres
+AR = ar
+STRIP = strip
+endif
+
+SED = sed
+MKDIR = mkdir
+CP = cp
+RM = rm -f
+
+CFLAGS = -g -Wall -Wextra -O2
+# CFLAGS = -Wall -Wextra -O3 -fomit-frame-pointer -funroll-loops
+
+ALL_CFLAGS = -std=gnu99 -mms-bitfields
+
+ALL_CPPFLAGS = \
+ -I. \
+ -I../src/common \
+ -I../src/liblzma/api \
+ -I../src/liblzma/common \
+ -I../src/liblzma/check \
+ -I../src/liblzma/rangecoder \
+ -I../src/liblzma/lz \
+ -I../src/liblzma/lzma \
+ -I../src/liblzma/delta \
+ -I../src/liblzma/simple \
+ -I../src/liblzma/subblock
+
+ALL_CPPFLAGS += -DHAVE_CONFIG_H
+
+# This works with Wine too while using native GNU make, sed, and rm.
+ifdef WINE
+ifdef W64
+CC := wine c:/MinGW64/bin/x86_64-pc-mingw32-gcc
+WINDRES := wine c:/MinGW64/bin/x86_64-pc-mingw32-windres
+AR := wine c:/MinGW64/bin/x86_64-pc-mingw32-ar
+STRIP := wine c:/MinGW64/bin/x86_64-pc-mingw32-strip
+else
+CC := wine c:/MinGW/bin/gcc
+WINDRES := wine c:/MinGW/bin/windres
+AR := wine c:/MinGW/bin/ar
+STRIP := wine c:/MinGW/bin/strip
+endif
+endif
+
+ifdef DEBUG
+# Use echo since it works for this purpose on both Windows and POSIX.
+STRIP := echo Skipping strip
+else
+ALL_CPPFLAGS += -DNDEBUG
+endif
+
+ALL_CPPFLAGS += $(CPPFLAGS)
+ALL_CFLAGS += $(CFLAGS)
+
+
+################
+# Common rules #
+################
+
+.PHONY: all clean pkg
+all: liblzma xzdec xz
+clean: liblzma-clean xzdec-clean xz-clean
+
+pkg: all
+ $(RM) -r pkg
+ $(MKDIR) -p pkg/lib pkg/include/lzma
+ $(CP) liblzma.dll xz-dynamic.exe xz.exe xzdec-dynamic.exe xzdec.exe lzmadec-dynamic.exe lzmadec.exe pkg
+ $(CP) liblzma.a liblzma.def liblzma_static.lib pkg/lib
+ $(CP) ../src/liblzma/api/lzma.h pkg/include
+ $(CP) ../src/liblzma/api/lzma/*.h pkg/include/lzma
+
+%.o: %.rc
+ $(WINDRES) $(ALL_CPPFLAGS) $< $@
+
+
+###############
+# liblzma.dll #
+###############
+
+.PHONY: liblzma
+liblzma: liblzma.dll liblzma_static.lib
+
+LIBLZMA_SRCS_C = \
+ ../src/liblzma/common/alone_decoder.c \
+ ../src/liblzma/common/alone_encoder.c \
+ ../src/liblzma/common/auto_decoder.c \
+ ../src/liblzma/common/block_buffer_decoder.c \
+ ../src/liblzma/common/block_buffer_encoder.c \
+ ../src/liblzma/common/block_decoder.c \
+ ../src/liblzma/common/block_encoder.c \
+ ../src/liblzma/common/block_header_decoder.c \
+ ../src/liblzma/common/block_header_encoder.c \
+ ../src/liblzma/common/block_util.c \
+ ../src/liblzma/common/common.c \
+ ../src/liblzma/common/easy_buffer_encoder.c \
+ ../src/liblzma/common/easy_decoder_memusage.c \
+ ../src/liblzma/common/easy_encoder.c \
+ ../src/liblzma/common/easy_encoder_memusage.c \
+ ../src/liblzma/common/easy_preset.c \
+ ../src/liblzma/common/filter_buffer_decoder.c \
+ ../src/liblzma/common/filter_buffer_encoder.c \
+ ../src/liblzma/common/filter_common.c \
+ ../src/liblzma/common/filter_decoder.c \
+ ../src/liblzma/common/filter_encoder.c \
+ ../src/liblzma/common/filter_flags_decoder.c \
+ ../src/liblzma/common/filter_flags_encoder.c \
+ ../src/liblzma/common/index.c \
+ ../src/liblzma/common/index_decoder.c \
+ ../src/liblzma/common/index_encoder.c \
+ ../src/liblzma/common/index_hash.c \
+ ../src/liblzma/common/stream_buffer_decoder.c \
+ ../src/liblzma/common/stream_buffer_encoder.c \
+ ../src/liblzma/common/stream_decoder.c \
+ ../src/liblzma/common/stream_encoder.c \
+ ../src/liblzma/common/stream_flags_common.c \
+ ../src/liblzma/common/stream_flags_decoder.c \
+ ../src/liblzma/common/stream_flags_encoder.c \
+ ../src/liblzma/common/vli_decoder.c \
+ ../src/liblzma/common/vli_encoder.c \
+ ../src/liblzma/common/vli_size.c \
+ ../src/liblzma/check/check.c \
+ ../src/liblzma/check/crc32_table.c \
+ ../src/liblzma/check/crc64_table.c \
+ ../src/liblzma/check/sha256.c \
+ ../src/liblzma/rangecoder/price_table.c \
+ ../src/liblzma/lz/lz_decoder.c \
+ ../src/liblzma/lz/lz_encoder.c \
+ ../src/liblzma/lz/lz_encoder_mf.c \
+ ../src/liblzma/lzma/fastpos_table.c \
+ ../src/liblzma/lzma/lzma2_decoder.c \
+ ../src/liblzma/lzma/lzma2_encoder.c \
+ ../src/liblzma/lzma/lzma_decoder.c \
+ ../src/liblzma/lzma/lzma_encoder.c \
+ ../src/liblzma/lzma/lzma_encoder_optimum_fast.c \
+ ../src/liblzma/lzma/lzma_encoder_optimum_normal.c \
+ ../src/liblzma/lzma/lzma_encoder_presets.c \
+ ../src/liblzma/delta/delta_common.c \
+ ../src/liblzma/delta/delta_decoder.c \
+ ../src/liblzma/delta/delta_encoder.c \
+ ../src/liblzma/simple/arm.c \
+ ../src/liblzma/simple/armthumb.c \
+ ../src/liblzma/simple/ia64.c \
+ ../src/liblzma/simple/powerpc.c \
+ ../src/liblzma/simple/simple_coder.c \
+ ../src/liblzma/simple/simple_decoder.c \
+ ../src/liblzma/simple/simple_encoder.c \
+ ../src/liblzma/simple/sparc.c \
+ ../src/liblzma/simple/x86.c
+
+LIBLZMA_SRCS_ASM =
+
+ifdef W64
+LIBLZMA_SRCS_C += \
+ ../src/liblzma/check/crc32_fast.c \
+ ../src/liblzma/check/crc64_fast.c
+else
+LIBLZMA_SRCS_ASM += \
+ ../src/liblzma/check/crc32_x86.S \
+ ../src/liblzma/check/crc64_x86.S
+endif
+
+LIBLZMA_OBJS_C = $(LIBLZMA_SRCS_C:.c=.o)
+LIBLZMA_OBJS_ASM = $(LIBLZMA_SRCS_ASM:.S=.o)
+LIBLZMA_OBJS = \
+ $(LIBLZMA_OBJS_C) \
+ $(LIBLZMA_OBJS_ASM) \
+ ../src/liblzma/liblzma_w32res.o
+
+LIBLZMA_OBJS_STATIC_C = $(LIBLZMA_SRCS_C:.c=-static.o)
+LIBLZMA_OBJS_STATIC_ASM = $(LIBLZMA_SRCS_ASM:.S=-static.o)
+LIBLZMA_OBJS_STATIC = $(LIBLZMA_OBJS_STATIC_C) $(LIBLZMA_OBJS_STATIC_ASM)
+
+# The sed is needed to remove ordinals from the .def file. I'm not going
+# to track the ordinal numbers, so people should link against liblzma.dll
+# only by using symbol names.
+liblzma.dll: $(LIBLZMA_OBJS)
+ $(CC) $(ALL_CFLAGS) -shared -o liblzma.dll $(LIBLZMA_OBJS) -Wl,--out-implib,liblzma.a,--output-def,liblzma.def.in
+ $(SED) 's/ \+@ *[0-9]\+//' liblzma.def.in > liblzma.def
+ $(RM) liblzma.def.in
+ $(STRIP) --strip-unneeded liblzma.a
+ $(STRIP) --strip-all liblzma.dll
+
+$(LIBLZMA_OBJS_C): %.o: %.c
+ $(CC) -DDLL_EXPORT $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+$(LIBLZMA_OBJS_ASM): %.o: %.S
+ $(CC) -DDLL_EXPORT $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+liblzma_static.lib: $(LIBLZMA_OBJS_STATIC)
+ $(RM) $@
+ $(AR) rcs $@ $(LIBLZMA_OBJS_STATIC)
+ $(STRIP) --strip-unneeded $@
+
+$(LIBLZMA_OBJS_STATIC_C): %-static.o: %.c
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+$(LIBLZMA_OBJS_STATIC_ASM): %-static.o: %.S
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+.PHONY: liblzma-clean
+liblzma-clean:
+ -$(RM) $(LIBLZMA_OBJS) $(LIBLZMA_OBJS_STATIC) liblzma.def.in liblzma.def liblzma.a liblzma.dll liblzma_static.lib
+
+
+###########################
+# xzdec.exe & lzmadec.exe #
+###########################
+
+.PHONY: xzdec
+xzdec: xzdec-dynamic.exe lzmadec-dynamic.exe xzdec.exe lzmadec.exe
+
+XZDEC_SRCS = ../src/xzdec/xzdec.c
+
+xzdec-dynamic.exe: liblzma.dll $(XZDEC_SRCS) ../src/xzdec/xzdec_w32res.o
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) $(XZDEC_SRCS) ../src/xzdec/xzdec_w32res.o -o $@ liblzma.a
+ $(STRIP) --strip-all $@
+
+lzmadec-dynamic.exe: liblzma.dll $(XZDEC_SRCS) ../src/xzdec/lzmadec_w32res.o
+ $(CC) $(ALL_CPPFLAGS) -DLZMADEC $(ALL_CFLAGS) $(XZDEC_SRCS) ../src/xzdec/lzmadec_w32res.o -o $@ liblzma.a
+ $(STRIP) --strip-all $@
+
+xzdec.exe: liblzma_static.lib $(XZDEC_SRCS) ../src/xzdec/xzdec_w32res.o
+ $(CC) -DLZMA_API_STATIC $(ALL_CPPFLAGS) $(ALL_CFLAGS) $(XZDEC_SRCS) ../src/xzdec/xzdec_w32res.o -o $@ liblzma_static.lib
+ $(STRIP) --strip-all $@
+
+lzmadec.exe: liblzma_static.lib $(XZDEC_SRCS) ../src/xzdec/lzmadec_w32res.o
+ $(CC) -DLZMA_API_STATIC $(ALL_CPPFLAGS) -DLZMADEC $(ALL_CFLAGS) $(XZDEC_SRCS) ../src/xzdec/lzmadec_w32res.o -o $@ liblzma_static.lib
+ $(STRIP) --strip-all $@
+
+.PHONY: xzdec-clean
+xzdec-clean:
+ -$(RM) xzdec-dynamic.exe lzmadec-dynamic.exe xzdec.exe lzmadec.exe ../src/xzdec/xzdec_w32res.o ../src/xzdec/lzmadec_w32res.o
+
+
+##########
+# xz.exe #
+##########
+
+.PHONY: xz
+xz: xz-dynamic.exe xz.exe
+
+XZ_SRCS = \
+ ../src/xz/args.c \
+ ../src/xz/coder.c \
+ ../src/xz/file_io.c \
+ ../src/xz/hardware.c \
+ ../src/xz/main.c \
+ ../src/xz/message.c \
+ ../src/xz/options.c \
+ ../src/xz/signals.c \
+ ../src/xz/suffix.c \
+ ../src/xz/util.c
+
+XZ_OBJS = $(XZ_SRCS:.c=.o)
+XZ_OBJS_STATIC = $(XZ_SRCS:.c=-static.o)
+
+$(XZ_OBJS): %.o: %.c
+ $(CC) $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+xz-dynamic.exe: liblzma.dll $(XZ_OBJS) ../src/xz/xz_w32res.o
+ $(CC) $(ALL_CFLAGS) $(XZ_OBJS) ../src/xz/xz_w32res.o -o $@ liblzma.a
+ $(STRIP) --strip-all $@
+
+$(XZ_OBJS_STATIC): %-static.o: %.c
+ $(CC) -DLZMA_API_STATIC $(ALL_CPPFLAGS) $(ALL_CFLAGS) -c -o $@ $<
+
+xz.exe: liblzma_static.lib $(XZ_OBJS_STATIC) ../src/xz/xz_w32res.o
+ $(CC) $(ALL_CFLAGS) $(XZ_OBJS_STATIC) ../src/xz/xz_w32res.o -o $@ liblzma_static.lib
+ $(STRIP) --strip-all $@
+
+.PHONY: xz-clean
+xz-clean:
+ -$(RM) $(XZ_OBJS) $(XZ_OBJS_STATIC) ../src/xz/xz_w32res.o xz-dynamic.exe xz.exe
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/README b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/README
new file mode 100644
index 00000000..0e529de9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/README
@@ -0,0 +1,155 @@
+
+XZ Utils on Windows
+===================
+
+Introduction
+
+ This document explains how to build XZ Utils for Microsoft Windows
+ using MinGW (Minimalist GNU for Windows).
+
+ This is currently experimental and has got very little testing.
+ No ABI stability is promised for liblzma.dll.
+
+
+Why MinGW
+
+ XZ Utils code is C99. It should be possible to compile at least
+ liblzma using any C99 compiler. Compiling the command line tools may
+ need a little extra work to get them built on new systems, because
+ they use some features that aren't standardized in POSIX.
+
+ MinGW is free software. MinGW runtime provides some functions that
+ made porting the command line tools easier. Most(?) of the MinGW
+ runtime, which gets linked into the resulting binaries, is in the
+ public domain.
+
+ While most C compilers nowadays support C99 well enough (including
+ most compilers for Windows), MSVC doesn't. It seems that Microsoft
+ has no plans to ever support C99. Thus, it is not possible to build
+ XZ Utils using MSVC without doing a lot of work to convert the code.
+ Using prebuilt liblzma from MSVC is possible though, since the
+ liblzma API headers are in C89 and contain some non-standard extra
+ hacks required by MSVC.
+
+
+Getting and Installing MinGW
+
+ You can download MinGW for 32-bit Windows from Sourceforge:
+
+ http://sourceforge.net/project/showfiles.php?group_id=2435
+
+ It is enough to pick Automated MinGW Installer and MSYS Base System.
+ Using the automated installer, select at least runtime, w32api,
+ core compiler, and MinGW make. From MSYS you actually need only
+ certain tools, but it is easiest to just install the whole MSYS.
+
+ To build for x86-64 version of Windows, you can download a snapshot
+ of MinGW targeting for 64-bit Windows:
+
+ http://sourceforge.net/project/showfiles.php?group_id=202880
+
+ You can use the 32-bit MSYS also for 64-bit build, since we don't
+ link against anything in MSYS, just use the tools from it. You may
+ use the make tool from 32-bit MinGW (mingw32-make.exe) although
+ probably the make.exe from MSYS works too.
+
+ Naturally you can pick the components manually, for example to try
+ the latest available GCC. It is also possible to use a cross-compiler
+ to build Windows binaries for example on GNU/Linux, or use Wine to
+ run the Windows binaries. However, these instructions focus on
+ building on Windows.
+
+
+Building for 32-bit Windows
+
+ Add MinGW and MSYS to PATH (adjust if you installed to non-default
+ location):
+
+ set PATH=C:\MinGW\bin;C:\MSYS\1.0\bin;%PATH%
+
+ Then it should be enough to just run mingw32-make in this directory
+ (the directory containing this README):
+
+ mingw32-make
+
+
+Building for 64-bit Windows
+
+ For 64-bit build the PATH has to point to 64-bit MinGW:
+
+ set PATH=C:\MinGW64\bin;C:\MSYS\1.0\bin;%PATH%
+
+ You need to pass W64=1 to mingw32-make (or make if you don't have
+ mingw32-make):
+
+ mingw32-make W64=1
+
+
+Additional Make Flags and Targets
+
+ You may want to try some additional optimizations, which may or
+ may not make the code faster (and may or may not hit possible
+ compiler bugs more easily):
+
+ mingw32-make CFLAGS="-O3 -fomit-frame-pointer -funroll-loops"
+
+ If you want to enable assertions (the assert() macro), use DEBUG=1.
+ You may want to disable optimizations too if you plan to actually
+ debug the code. Never use DEBUG=1 for production builds!
+
+ mingw32-make DEBUG=1 CFLAGS="-g -O0"
+
+ To copy the built binaries and required headers into a clean
+ directory, use the pkg target:
+
+ mingw32-make pkg
+
+ It first removes a possibly existing pkg directory, and then
+ recreates it with the required files.
+
+ TODO: The pkg target doesn't copy any license or other copyright
+ related information into the pkg directory.
+
+
+Creating an Import Library for MSVC
+
+ The included Makefile creates import library liblzma.a which works
+ only(?) with MinGW. To use liblzma.dll for MSVC, you need to create
+ liblzma.lib using the lib command from MSVC:
+
+ lib /def:liblzma.def /out:liblzma.lib /machine:ix86
+
+ On x86-64, the /machine argument has to naturally be changed:
+
+ lib /def:liblzma.def /out:liblzma.lib /machine:x64
+
+
+To Do
+
+ - Test Win64 support and add instructions about getting x86-64
+ version of MinGW.
+
+ - Creating the import library for other compilers/linkers
+
+ - Building with other compilers for Windows
+
+ - liblzma currently uses cdecl. Would stdcall be more compatible?
+
+ - Support building more size-optimized liblzma (the HAVE_SMALL
+ define and other things that are needed)
+
+ - Support selecting which parts of liblzma to build to make the
+ library even smaller.
+
+ - Use the configure script on Windows just like it is used on all
+ the other systems?
+
+
+Bugs
+
+ Report bugs to <lasse.collin@tukaani.org> (in English or Finnish).
+
+ Take into account that I don't have MSVC and I cannot very easily
+ test anything on Windows. As of writing, I have tried MinGW and the
+ resulting binaries only under 32-bit Wine.
+
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/config.h b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/config.h
new file mode 100644
index 00000000..4214da86
--- /dev/null
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/windows/config.h
@@ -0,0 +1,167 @@
+/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: expandtab:ts=8:sw=4:softtabstop=4:
+/* Define to 1 if using x86 assembler optimizations. */
+/* #undef HAVE_ASM_X86 */
+
+/* Define to 1 if using x86_64 assembler optimizations. */
+/* #undef HAVE_ASM_X86_64 */
+
+/* Define to 1 if crc32 integrity check is enabled. */
+#define HAVE_CHECK_CRC32 1
+
+/* Define to 1 if crc64 integrity check is enabled. */
+#define HAVE_CHECK_CRC64 1
+
+/* Define to 1 if sha256 integrity check is enabled. */
+#define HAVE_CHECK_SHA256 1
+
+/* Define to 1 if decoder components are enabled. */
+#define HAVE_DECODER 1
+
+/* Define to 1 if arm decoder is enabled. */
+#define HAVE_DECODER_ARM 1
+
+/* Define to 1 if armthumb decoder is enabled. */
+#define HAVE_DECODER_ARMTHUMB 1
+
+/* Define to 1 if delta decoder is enabled. */
+#define HAVE_DECODER_DELTA 1
+
+/* Define to 1 if ia64 decoder is enabled. */
+#define HAVE_DECODER_IA64 1
+
+/* Define to 1 if lzma1 decoder is enabled. */
+#define HAVE_DECODER_LZMA1 1
+
+/* Define to 1 if lzma2 decoder is enabled. */
+#define HAVE_DECODER_LZMA2 1
+
+/* Define to 1 if powerpc decoder is enabled. */
+#define HAVE_DECODER_POWERPC 1
+
+/* Define to 1 if sparc decoder is enabled. */
+#define HAVE_DECODER_SPARC 1
+
+/* Define to 1 if subblock decoder is enabled. */
+/* #undef HAVE_DECODER_SUBBLOCK */
+
+/* Define to 1 if x86 decoder is enabled. */
+#define HAVE_DECODER_X86 1
+
+/* Define to 1 if encoder components are enabled. */
+#define HAVE_ENCODER 1
+
+/* Define to 1 if arm encoder is enabled. */
+#define HAVE_ENCODER_ARM 1
+
+/* Define to 1 if armthumb encoder is enabled. */
+#define HAVE_ENCODER_ARMTHUMB 1
+
+/* Define to 1 if delta encoder is enabled. */
+#define HAVE_ENCODER_DELTA 1
+
+/* Define to 1 if ia64 encoder is enabled. */
+#define HAVE_ENCODER_IA64 1
+
+/* Define to 1 if lzma1 encoder is enabled. */
+#define HAVE_ENCODER_LZMA1 1
+
+/* Define to 1 if lzma2 encoder is enabled. */
+#define HAVE_ENCODER_LZMA2 1
+
+/* Define to 1 if powerpc encoder is enabled. */
+#define HAVE_ENCODER_POWERPC 1
+
+/* Define to 1 if sparc encoder is enabled. */
+#define HAVE_ENCODER_SPARC 1
+
+/* Define to 1 if subblock encoder is enabled. */
+/* #undef HAVE_ENCODER_SUBBLOCK */
+
+/* Define to 1 if x86 encoder is enabled. */
+#define HAVE_ENCODER_X86 1
+
+/* Define to 1 if the system supports fast unaligned memory access. */
+#define HAVE_FAST_UNALIGNED_ACCESS 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <limits.h> header file. */
+#define HAVE_LIMITS_H 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 to enable bt2 match finder. */
+#define HAVE_MF_BT2 1
+
+/* Define to 1 to enable bt3 match finder. */
+#define HAVE_MF_BT3 1
+
+/* Define to 1 to enable bt4 match finder. */
+#define HAVE_MF_BT4 1
+
+/* Define to 1 to enable hc3 match finder. */
+#define HAVE_MF_HC3 1
+
+/* Define to 1 to enable hc4 match finder. */
+#define HAVE_MF_HC4 1
+
+/* Define to 1 if optimizing for size. */
+/* #undef HAVE_SMALL */
+
+/* Define to 1 if stdbool.h conforms to C99. */
+#define HAVE_STDBOOL_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if the system has the type `uintptr_t'. */
+#define HAVE_UINTPTR_T 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the `utime' function. */
+#define HAVE_UTIME 1
+
+/* Define to 1 or 0, depending whether the compiler supports simple visibility
+ declarations. */
+#define HAVE_VISIBILITY 0
+
+/* Define to 1 if the system has the type `_Bool'. */
+#define HAVE__BOOL 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "lasse.collin@tukaani.org"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "XZ Utils"
+
+/* The size of `size_t', as computed by sizeof. */
+#ifdef _WIN64
+# define SIZEOF_SIZE_T 8
+#else
+# define SIZEOF_SIZE_T 4
+#endif
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel and VAX). */
+#if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+#elif ! defined __LITTLE_ENDIAN__
+/* # undef WORDS_BIGENDIAN */
+#endif
diff --git a/storage/tokudb/PerconaFT/tools/CMakeLists.txt b/storage/tokudb/PerconaFT/tools/CMakeLists.txt
new file mode 100644
index 00000000..dd54249a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/CMakeLists.txt
@@ -0,0 +1,25 @@
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO)
+
+set(tools tokudb_dump tokuftdump tokuft_logprint tdb-recover ftverify)
+foreach(tool ${tools})
+ add_executable(${tool} ${tool}.cc)
+ add_dependencies(${tool} install_tdb_h)
+ target_link_libraries(${tool} ${LIBTOKUDB}_static ft_static z lzma snappy ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
+
+ # detect when we are being built as a subproject
+ if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
+ (CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
+ target_link_libraries(${tool} sql)
+ endif()
+ target_link_libraries(${tool} mysys)
+ endif ()
+
+ add_space_separated_property(TARGET ${tool} COMPILE_FLAGS -fvisibility=hidden)
+endforeach(tool)
+
+# link in math.h library just for this tool.
+target_link_libraries(ftverify m)
+
+install(TARGETS tokuftdump DESTINATION ${INSTALL_BINDIR} COMPONENT tokudb-engine)
+install(TARGETS tokuft_logprint DESTINATION ${INSTALL_BINDIR} COMPONENT tokudb-engine)
diff --git a/storage/tokudb/PerconaFT/tools/ftverify.cc b/storage/tokudb/PerconaFT/tools/ftverify.cc
new file mode 100644
index 00000000..ee40b991
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/ftverify.cc
@@ -0,0 +1,452 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+////////////////////////////////////////////////////////////////////
+// ftverify - Command line tool that checks the validity of a given
+// fractal tree file, one block at a time.
+////////////////////////////////////////////////////////////////////
+
+#include "portability/toku_assert.h"
+#include "portability/toku_list.h"
+#include "portability/toku_portability.h"
+
+#include "ft/serialize/block_allocator.h"
+#include "ft/ft-internal.h"
+#include "ft/serialize/ft-serialize.h"
+#include "ft/serialize/ft_layout_version.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/node.h"
+#include "ft/serialize/rbuf.h"
+#include "ft/serialize/sub_block.h"
+#include "util/threadpool.h"
+
+#include <fcntl.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+static int num_cores = 0; // cache the number of cores for the parallelization
+static struct toku_thread_pool *ft_pool = NULL;
+static FILE *outf;
+static double pct = 0.5;
+
+// Struct for reporting sub block stats.
+struct verify_block_extra {
+ BLOCKNUM b;
+ int n_sub_blocks;
+ uint32_t header_length;
+ uint32_t calc_xsum;
+ uint32_t stored_xsum;
+ bool header_valid;
+ bool sub_blocks_valid;
+ struct sub_block_info *sub_block_results;
+};
+
+// Initialization function for the sub block stats.
+static void
+init_verify_block_extra(BLOCKNUM b, struct verify_block_extra *e)
+{
+ static const struct verify_block_extra default_vbe =
+ {
+ .b = { 0 },
+ .n_sub_blocks = 0,
+ .header_length = 0,
+ .calc_xsum = 0,
+ .stored_xsum = 0,
+ .header_valid = true,
+ .sub_blocks_valid = true,
+ .sub_block_results = NULL
+ };
+ *e = default_vbe;
+ e->b = b;
+}
+
+// Reports percentage of completed blocks.
+static void
+report(int64_t blocks_done, int64_t blocks_failed, int64_t total_blocks)
+{
+ int64_t blocks_per_report = llrint(pct * total_blocks / 100.0);
+ if (blocks_per_report < 1) {
+ blocks_per_report = 1;
+ }
+ if (blocks_done % blocks_per_report == 0) {
+ double pct_actually_done = (100.0 * blocks_done) / total_blocks;
+ printf("% 3.3lf%% | %" PRId64 " blocks checked, %" PRId64 " bad block(s) detected\n",
+ pct_actually_done, blocks_done, blocks_failed);
+ fflush(stdout);
+ }
+}
+
+// Helper function to deserialize one of the two headers for the ft
+// we are checking.
+static void
+deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
+{
+ struct rbuf rb_0;
+ struct rbuf rb_1;
+ uint64_t checkpoint_count_0;
+ uint64_t checkpoint_count_1;
+ LSN checkpoint_lsn_0;
+ LSN checkpoint_lsn_1;
+ uint32_t version_0, version_1;
+ bool h0_acceptable = false;
+ bool h1_acceptable = false;
+ int r0, r1;
+ int r;
+
+ {
+ toku_off_t header_0_off = 0;
+ r0 = deserialize_ft_from_fd_into_rbuf(
+ fd,
+ header_0_off,
+ &rb_0,
+ &checkpoint_count_0,
+ &checkpoint_lsn_0,
+ &version_0
+ );
+ if ((r0==0) && (checkpoint_lsn_0.lsn <= MAX_LSN.lsn)) {
+ h0_acceptable = true;
+ }
+ }
+ {
+ toku_off_t header_1_off = BlockAllocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
+ r1 = deserialize_ft_from_fd_into_rbuf(
+ fd,
+ header_1_off,
+ &rb_1,
+ &checkpoint_count_1,
+ &checkpoint_lsn_1,
+ &version_1
+ );
+ if ((r1==0) && (checkpoint_lsn_1.lsn <= MAX_LSN.lsn)) {
+ h1_acceptable = true;
+ }
+ }
+
+ // If either header is too new, the dictionary is unreadable
+ if (r0 == TOKUDB_DICTIONARY_TOO_NEW || r1 == TOKUDB_DICTIONARY_TOO_NEW) {
+ fprintf(stderr, "This dictionary was created with a version of PerconaFT that is too new. Aborting.\n");
+ abort();
+ }
+ if (h0_acceptable) {
+ printf("Found dictionary header 1 with LSN %" PRIu64 "\n", checkpoint_lsn_0.lsn);
+ r = deserialize_ft_versioned(fd, &rb_0, h1p, version_0);
+
+ if (r != 0) {
+ printf("---Header Error----\n");
+ }
+
+ } else {
+ *h1p = NULL;
+ }
+ if (h1_acceptable) {
+ printf("Found dictionary header 2 with LSN %" PRIu64 "\n", checkpoint_lsn_1.lsn);
+ r = deserialize_ft_versioned(fd, &rb_1, h2p, version_1);
+ if (r != 0) {
+ printf("---Header Error----\n");
+ }
+ } else {
+ *h2p = NULL;
+ }
+
+ if (rb_0.buf) toku_free(rb_0.buf);
+ if (rb_1.buf) toku_free(rb_1.buf);
+}
+
+// Helper struct for tracking block checking progress.
+struct check_block_table_extra {
+ int fd;
+ int64_t blocks_done, blocks_failed, total_blocks;
+ struct ft *h;
+};
+
+// Check non-upgraded (legacy) node.
+// NOTE: These nodes have less checksumming than more
+// recent nodes. This effectively means that we are
+// skipping over these nodes.
+static int
+check_old_node(FTNODE node, struct rbuf *rb, int version)
+{
+ int r = 0;
+ read_legacy_node_info(node, rb, version);
+ // For version 14 nodes, advance the buffer to the end
+ // and verify the checksum.
+ if (version == FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM) {
+ // Advance the buffer to the end.
+ rb->ndone = rb->size - 4;
+ r = check_legacy_end_checksum(rb);
+ }
+
+ return r;
+}
+
+// Read, decompress, and check the given block.
+static int
+check_block(BLOCKNUM blocknum, int64_t UU(blocksize), int64_t UU(address), void *extra)
+{
+ int r = 0;
+ int failure = 0;
+ struct check_block_table_extra *CAST_FROM_VOIDP(cbte, extra);
+ int fd = cbte->fd;
+ FT ft = cbte->h;
+
+ struct verify_block_extra be;
+ init_verify_block_extra(blocknum, &be);
+
+ // Let's read the block off of disk and fill a buffer with that
+ // block.
+ struct rbuf rb = RBUF_INITIALIZER;
+ read_block_from_fd_into_rbuf(fd, blocknum, ft, &rb);
+
+ // Allocate the node.
+ FTNODE XMALLOC(node);
+
+ initialize_ftnode(node, blocknum);
+
+ r = read_and_check_magic(&rb);
+ if (r == DB_BADFORMAT) {
+ printf(" Magic failed.\n");
+ failure++;
+ }
+
+ r = read_and_check_version(node, &rb);
+ if (r != 0) {
+ printf(" Version check failed.\n");
+ failure++;
+ }
+
+ int version = node->layout_version_read_from_disk;
+
+ ////////////////////////////
+ // UPGRADE FORK GOES HERE //
+ ////////////////////////////
+
+ // Check nodes before major layout changes in version 15.
+ // All newer versions should follow the same layout, for now.
+ // This predicate would need to be changed if the layout
+ // of the nodes on disk does indeed change in the future.
+ if (version < FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES)
+ {
+ struct rbuf nrb;
+ // Use old decompression method for legacy nodes.
+ r = decompress_from_raw_block_into_rbuf(rb.buf, rb.size, &nrb, blocknum);
+ if (r != 0) {
+ failure++;
+ goto cleanup;
+ }
+
+ // Check the end-to-end checksum.
+ r = check_old_node(node, &nrb, version);
+ if (r != 0) {
+ failure++;
+ }
+ goto cleanup;
+ }
+
+ read_node_info(node, &rb, version);
+
+ FTNODE_DISK_DATA ndd;
+ allocate_and_read_partition_offsets(node, &rb, &ndd);
+
+ r = check_node_info_checksum(&rb);
+ if (r == TOKUDB_BAD_CHECKSUM) {
+ printf(" Node info checksum failed.\n");
+ failure++;
+ }
+
+ // Get the partition info sub block.
+ struct sub_block sb;
+ sub_block_init(&sb);
+ r = read_compressed_sub_block(&rb, &sb);
+ if (r != 0) {
+ printf(" Partition info checksum failed.\n");
+ failure++;
+ }
+
+ just_decompress_sub_block(&sb);
+
+ // If we want to inspect the data inside the partitions, we need
+ // to call setup_ftnode_partitions(node, bfe, true)
+
+ // <CER> TODO: Create function for this.
+ // Using the node info, decompress all the keys and pivots to
+ // detect any corruptions.
+ for (int i = 0; i < node->n_children; ++i) {
+ uint32_t curr_offset = BP_START(ndd,i);
+ uint32_t curr_size = BP_SIZE(ndd,i);
+ struct rbuf curr_rbuf = {.buf = NULL, .size = 0, .ndone = 0};
+ rbuf_init(&curr_rbuf, rb.buf + curr_offset, curr_size);
+ struct sub_block curr_sb;
+ sub_block_init(&curr_sb);
+
+ r = read_compressed_sub_block(&rb, &sb);
+ if (r != 0) {
+ printf(" Compressed child partition %d checksum failed.\n", i);
+ failure++;
+ }
+ just_decompress_sub_block(&sb);
+
+ r = verify_ftnode_sub_block(&sb, nullptr, blocknum);
+ if (r != 0) {
+ printf(" Uncompressed child partition %d checksum failed.\n", i);
+ failure++;
+ }
+
+ // <CER> If needed, we can print row and/or pivot info at this
+ // point.
+ }
+
+cleanup:
+ // Cleanup and error incrementing.
+ if (failure) {
+ cbte->blocks_failed++;
+ }
+
+ cbte->blocks_done++;
+
+ if (node) {
+ toku_free(node);
+ }
+
+ // Print the status of this block to the console.
+ report(cbte->blocks_done, cbte->blocks_failed, cbte->total_blocks);
+ // We need to ALWAYS return 0 if we want to continue iterating
+ // through the nodes in the file.
+ r = 0;
+ return r;
+}
+
+// This calls toku_blocktable_iterate on the given block table.
+// Passes our check_block() function to be called as we iterate over
+// the block table. This will print any interesting failures and
+// update us on our progress.
+static void check_block_table(int fd, block_table *bt, struct ft *h) {
+ int64_t num_blocks = bt->get_blocks_in_use_unlocked();
+ printf("Starting verification of checkpoint containing");
+ printf(" %" PRId64 " blocks.\n", num_blocks);
+ fflush(stdout);
+
+ struct check_block_table_extra extra = { .fd = fd,
+ .blocks_done = 0,
+ .blocks_failed = 0,
+ .total_blocks = num_blocks,
+ .h = h };
+ int r = bt->iterate(block_table::TRANSLATION_CURRENT,
+ check_block,
+ &extra,
+ true,
+ true);
+ if (r != 0) {
+ // We can print more information here if necessary.
+ }
+
+ assert(extra.blocks_done == extra.total_blocks);
+ printf("Finished verification. ");
+ printf(" %" PRId64 " blocks checked,", extra.blocks_done);
+ printf(" %" PRId64 " bad block(s) detected\n", extra.blocks_failed);
+ fflush(stdout);
+}
+
+int
+main(int argc, char const * const argv[])
+{
+ // open the file
+ int r = 0;
+ int dictfd;
+ const char *dictfname, *outfname;
+ if (argc < 3 || argc > 4) {
+ fprintf(stderr, "%s: Invalid arguments.\n", argv[0]);
+ fprintf(stderr, "Usage: %s <dictionary> <logfile> [report%%]\n", argv[0]);
+ r = EX_USAGE;
+ goto exit;
+ }
+
+ assert(argc == 3 || argc == 4);
+ dictfname = argv[1];
+ outfname = argv[2];
+ if (argc == 4) {
+ set_errno(0);
+ pct = strtod(argv[3], NULL);
+ assert_zero(get_maybe_error_errno());
+ assert(pct > 0.0 && pct <= 100.0);
+ }
+
+ // Open the file as read-only.
+ dictfd = open(dictfname, O_RDONLY | O_BINARY, S_IRWXU | S_IRWXG | S_IRWXO);
+ if (dictfd < 0) {
+ perror(dictfname);
+ fflush(stderr);
+ abort();
+ }
+ outf = fopen(outfname, "w");
+ if (!outf) {
+ perror(outfname);
+ fflush(stderr);
+ abort();
+ }
+
+ // body of toku_ft_serialize_init();
+ num_cores = toku_os_get_number_active_processors();
+ r = toku_thread_pool_create(&ft_pool, num_cores); lazy_assert_zero(r);
+ assert_zero(r);
+
+ // deserialize the header(s)
+ struct ft *h1, *h2;
+ deserialize_headers(dictfd, &h1, &h2);
+
+ // walk over the block table and check blocks
+ if (h1) {
+ printf("Checking dictionary from header 1.\n");
+ check_block_table(dictfd, &h1->blocktable, h1);
+ }
+ if (h2) {
+ printf("Checking dictionary from header 2.\n");
+ check_block_table(dictfd, &h2->blocktable, h2);
+ }
+ if (h1 == NULL && h2 == NULL) {
+ printf("Both headers have a corruption and could not be used.\n");
+ }
+
+ toku_thread_pool_destroy(&ft_pool);
+exit:
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/tools/pmprof b/storage/tokudb/PerconaFT/tools/pmprof
new file mode 100644
index 00000000..de0a7e3c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/pmprof
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# a poor man's profiler
+# http://webcache.googleusercontent.com/search?q=cache:http://mituzas.lt/2009/02/15/poor-mans-contention-profiling/
+
+nsamples=1
+sleeptime=1
+
+while [ $# -gt 0 ] ; do
+ arg=$1;
+ if [[ $arg =~ --(.*)=(.*) ]] ; then
+ eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]}
+ else
+ break
+ fi
+ shift
+done
+
+pid=$1
+
+for x in $(seq 1 $nsamples)
+ do
+ gdb -ex "set pagination 0" -ex "thread apply all bt" -batch -p $pid
+ sleep $sleeptime
+ done | \
+awk '
+ BEGIN { s = ""; }
+ /^Thread/ { if (s != "") print s; s = ""; }
+ /^\#/ { if ($3 == "in") { v = $4; } else { v = $2 } if (s != "" ) { s = s "," v} else { s = v } }
+ END { print s }' | \
+sort | uniq -c | sort -r -n -k 1,1
diff --git a/storage/tokudb/PerconaFT/tools/tdb-recover.cc b/storage/tokudb/PerconaFT/tools/tdb-recover.cc
new file mode 100644
index 00000000..f01d0109
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/tdb-recover.cc
@@ -0,0 +1,80 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Recover an env. The logs are in argv[1]. The new database is created in the cwd. */
+
+// Test:
+// cd ../src/tests/tmpdir
+// ../../../ft/recover ../dir.test_log2.c.tdb
+
+#include "ft/ft-ops.h"
+#include "ft/logger/recover.h"
+
+static int recovery_main(int argc, const char *const argv[]);
+
+int main(int argc, const char *const argv[]) {
+ int r = toku_ft_layer_init();
+ assert(r == 0);
+ r = recovery_main(argc, argv);
+ toku_ft_layer_destroy();
+ return r;
+}
+
+int recovery_main (int argc, const char *const argv[]) {
+ const char *data_dir, *log_dir;
+ if (argc==3) {
+ data_dir = argv[1];
+ log_dir = argv[2];
+ } else if (argc==2) {
+ data_dir = log_dir = argv[1];
+ } else {
+ printf("Usage: %s <datadir> [ <logdir> ]\n", argv[0]);
+ return(1);
+ }
+
+ int r = tokuft_recover(nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ data_dir, log_dir, nullptr, nullptr, nullptr, nullptr, 0);
+ if (r!=0) {
+ fprintf(stderr, "Recovery failed\n");
+ return(1);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/tools/tokudb_dump.cc b/storage/tokudb/PerconaFT/tools/tokudb_dump.cc
new file mode 100644
index 00000000..d7362fc6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/tokudb_dump.cc
@@ -0,0 +1,685 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+
+#include <toku_stdlib.h>
+#include <toku_stdint.h>
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <memory.h>
+
+typedef struct {
+ bool leadingspace;
+ bool plaintext;
+ bool header;
+ bool footer;
+ bool is_private;
+ bool recovery_and_txn;
+ char* progname;
+ char* homedir;
+ char* database;
+ char* subdatabase;
+ int exitcode;
+ int recover_flags;
+ DBTYPE dbtype;
+ DBTYPE opened_dbtype;
+ DB* db;
+ DB_ENV* dbenv;
+} dump_globals;
+
+dump_globals g;
+
+#define SET_BITS(bitvector, bits) ((bitvector) |= (bits))
+#define REMOVE_BITS(bitvector, bits) ((bitvector) &= ~(bits))
+#define IS_SET_ANY(bitvector, bits) ((bitvector) & (bits))
+#define IS_SET_ALL(bitvector, bits) (((bitvector) & (bits)) == (bits))
+
+#define IS_POWER_OF_2(num) ((num) > 0 && ((num) & ((num) - 1)) == 0)
+
+//DB_ENV->err disabled since it does not use db_strerror
+#define PRINT_ERROR(retval, ...) \
+do { \
+if (0) g.dbenv->err(g.dbenv, retval, __VA_ARGS__); \
+else { \
+ fprintf(stderr, "\tIn %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__); \
+ fprintf(stderr, "%s: %s:", g.progname, db_strerror(retval)); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ fflush(stderr); \
+} \
+} while (0)
+
+//DB_ENV->err disabled since it does not use db_strerror, errx does not exist.
+#define PRINT_ERRORX(...) \
+do { \
+if (0) g.dbenv->err(g.dbenv, 0, __VA_ARGS__); \
+else { \
+ fprintf(stderr, "\tIn %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__); \
+ fprintf(stderr, "%s: ", g.progname); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ fflush(stderr); \
+} \
+} while (0)
+
+int strtoint32 (char* str, int32_t* num, int32_t min, int32_t max, int base);
+int strtouint32 (char* str, uint32_t* num, uint32_t min, uint32_t max, int base);
+int strtoint64 (char* str, int64_t* num, int64_t min, int64_t max, int base);
+int strtouint64 (char* str, uint64_t* num, uint64_t min, uint64_t max, int base);
+
+/*
+ * Convert a string to an integer of type "type".
+ *
+ *
+ * Sets errno and returns:
+ * EINVAL: str == NULL, num == NULL, or string not of the form [ \t]*[+-]?[0-9]+
+ * ERANGE: value out of range specified. (Range of [min, max])
+ *
+ * *num is unchanged on error.
+ * Returns:
+ *
+ */
+#define DEF_STR_TO(name, type, bigtype, strtofunc, frmt) \
+int name(char* str, type* num, type min, type max, int base) \
+{ \
+ char* test; \
+ bigtype value; \
+ \
+ assert(str); \
+ assert(num); \
+ assert(min <= max); \
+ assert(g.dbenv || g.progname); \
+ assert(base == 0 || (base >= 2 && base <= 36)); \
+ \
+ errno = 0; \
+ while (isspace(*str)) str++; \
+ value = strtofunc(str, &test, base); \
+ if ((*test != '\0' && *test != '\n') || test == str) { \
+ PRINT_ERRORX("%s: Invalid numeric argument\n", str); \
+ errno = EINVAL; \
+ goto error; \
+ } \
+ if (errno != 0) { \
+ PRINT_ERROR(errno, "%s\n", str); \
+ } \
+ if (value < min) { \
+ PRINT_ERRORX("%s: Less than minimum value (%" frmt ")\n", str, min); \
+ goto error; \
+ } \
+ if (value > max) { \
+ PRINT_ERRORX("%s: Greater than maximum value (%" frmt ")\n", str, max); \
+ goto error; \
+ } \
+ *num = value; \
+ return EXIT_SUCCESS; \
+error: \
+ return errno; \
+}
+
+DEF_STR_TO(strtoint32, int32_t, int64_t, strtoll, PRId32)
+DEF_STR_TO(strtouint32, uint32_t, uint64_t, strtoull, PRIu32)
+DEF_STR_TO(strtoint64, int64_t, int64_t, strtoll, PRId64)
+DEF_STR_TO(strtouint64, uint64_t, uint64_t, strtoull, PRIu64)
+
+static inline void
+outputbyte(uint8_t ch)
+{
+ if (g.plaintext) {
+ if (ch == '\\') printf("\\\\");
+ else if (isprint(ch)) printf("%c", ch);
+ else printf("\\%02x", ch);
+ }
+ else printf("%02x", ch);
+}
+
+static inline void
+outputstring(char* str)
+{
+ char* p;
+
+ for (p = str; *p != '\0'; p++) {
+ outputbyte((uint8_t)*p);
+ }
+}
+
+static inline void
+outputplaintextstring(char* str)
+{
+ bool old_plaintext = g.plaintext;
+ g.plaintext = true;
+ outputstring(str);
+ g.plaintext = old_plaintext;
+}
+
+static inline int
+verify_library_version(void)
+{
+ int major;
+ int minor;
+
+ db_version(&major, &minor, NULL);
+ if (major != DB_VERSION_MAJOR || minor != DB_VERSION_MINOR) {
+ PRINT_ERRORX("version %d.%d doesn't match library version %d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, major, minor);
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
+}
+
+static int last_caught = 0;
+
+static void catch_signal(int which_signal) {
+ last_caught = which_signal;
+ if (last_caught == 0) last_caught = SIGINT;
+}
+
+static inline void
+init_catch_signals(void) {
+ signal(SIGINT, catch_signal);
+ signal(SIGTERM, catch_signal);
+#ifdef SIGHUP
+ signal(SIGHUP, catch_signal);
+#endif
+#ifdef SIGPIPE
+ signal(SIGPIPE, catch_signal);
+#endif
+}
+
+static inline int
+caught_any_signals(void) {
+ return last_caught != 0;
+}
+
+static inline void
+resend_signals(void) {
+ if (last_caught) {
+ signal(last_caught, SIG_DFL);
+ raise(last_caught);
+ }
+}
+
+static int usage (void);
+static int create_init_env(void);
+static int dump_database (void);
+static int open_database (void);
+static int dump_pairs (void);
+static int dump_footer (void);
+static int dump_header (void);
+static int close_database (void);
+
+int main(int argc, char *const argv[]) {
+ int ch;
+ int retval;
+
+ /* Set up the globals. */
+ memset(&g, 0, sizeof(g));
+ g.leadingspace = true;
+ //TODO: Uncomment when DB_UNKNOWN + db->get_type are implemented.
+ g.dbtype = DB_UNKNOWN;
+ //g.dbtype = DB_BTREE;
+ g.progname = argv[0];
+ g.header = true;
+ g.footer = true;
+ g.recovery_and_txn = true;
+
+ if (verify_library_version() != 0) goto error;
+
+ while ((ch = getopt(argc, argv, "d:f:h:klNP:ps:RrVTx")) != EOF) {
+ switch (ch) {
+ case ('d'): {
+ PRINT_ERRORX("-%c option not supported.\n", ch);
+ goto error;
+ }
+ case ('f'): {
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr,
+ "%s: %s: reopen: %s\n",
+ g.progname, optarg, strerror(errno));
+ goto error;
+ }
+ break;
+ }
+ case ('h'): {
+ g.homedir = optarg;
+ break;
+ }
+ case ('k'): {
+ PRINT_ERRORX("-%c option not supported.\n", ch);
+ goto error;
+ }
+ case ('l'): {
+ //TODO: Implement (Requires master database support)
+ PRINT_ERRORX("-%c option not supported.\n", ch); //YET!
+ goto error;
+ }
+ case ('N'): {
+ PRINT_ERRORX("-%c option not supported.\n", ch);
+ goto error;
+ }
+ case ('P'): {
+ /* Clear password. */
+ memset(optarg, 0, strlen(optarg));
+ PRINT_ERRORX("-%c option not supported.\n", ch);
+ goto error;
+ }
+ case ('p'): {
+ g.plaintext = true;
+ break;
+ }
+ case ('R'): {
+ //TODO: Uncomment when DB_SALVAGE,DB_AGGRESSIVE are implemented.
+ /*g.recover_flags |= DB_SALVAGE | DB_AGGRESSIVE;*/
+
+ //TODO: Implement aggressive recovery (requires db->verify())
+ PRINT_ERRORX("-%c option not supported.\n", ch);
+ goto error;
+ }
+ case ('r'): {
+ //TODO: Uncomment when DB_SALVAGE,DB_AGGRESSIVE are implemented.
+ /*g.recover_flags |= DB_SALVAGE;*/
+
+ //TODO: Implement recovery (requires db->verify())
+ PRINT_ERRORX("-%c option not supported.\n", ch);
+ goto error;
+ }
+ case ('s'): {
+ g.subdatabase = optarg;
+ break;
+ }
+ case ('V'): {
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ goto cleanup;
+ }
+ case ('T'): {
+ g.plaintext = true;
+ g.leadingspace = false;
+ g.header = false;
+ g.footer = false;
+ break;
+ }
+ case ('x'): {
+ g.recovery_and_txn = false;
+ break;
+ }
+ case ('?'):
+ default: {
+ g.exitcode = usage();
+ goto cleanup;
+ }
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ //TODO: Uncomment when DB_SALVAGE,DB_AGGRESSIVE,DB_PRINTABLE,db->verify are implemented.
+ /*
+ if (g.plaintext) g.recover_flags |= DB_PRINTABLE;
+
+ if (g.subdatabase != NULL && IS_SET_ALL(g.recover_flags, DB_SALVAGE)) {
+ if (IS_SET_ALL(g.recover_flags, DB_AGGRESSIVE)) {
+ PRINT_ERRORX("The -s and -R options may not both be specified.\n");
+ goto error;
+ }
+ PRINT_ERRORX("The -s and -r options may not both be specified.\n");
+ goto error;
+
+ }
+ */
+
+ if (argc != 1) {
+ g.exitcode = usage();
+ goto cleanup;
+ }
+
+ init_catch_signals();
+
+ g.database = argv[0];
+ if (caught_any_signals()) goto cleanup;
+ if (create_init_env() != 0) goto error;
+ if (caught_any_signals()) goto cleanup;
+ if (dump_database() != 0) goto error;
+ if (false) {
+error:
+ g.exitcode = EXIT_FAILURE;
+ fprintf(stderr, "%s: Quitting out due to errors.\n", g.progname);
+ }
+cleanup:
+ if (g.dbenv && (retval = g.dbenv->close(g.dbenv, 0)) != 0) {
+ g.exitcode = EXIT_FAILURE;
+ fprintf(stderr, "%s: %s: dbenv->close\n", g.progname, db_strerror(retval));
+ }
+ // if (g.subdatabase) free(g.subdatabase);
+ resend_signals();
+
+ return g.exitcode;
+}
+
+int dump_database()
+{
+ int retval;
+
+ /* Create a database handle. */
+ retval = db_create(&g.db, g.dbenv, 0);
+ if (retval != 0) {
+ PRINT_ERROR(retval, "db_create");
+ return EXIT_FAILURE;
+ }
+
+ /*
+ TODO: If/when supporting encryption
+ if (g.password && (retval = db->set_flags(db, DB_ENCRYPT))) {
+ PRINT_ERROR(ret, "DB->set_flags: DB_ENCRYPT");
+ goto error;
+ }
+ */
+ if (open_database() != 0) goto error;
+ if (caught_any_signals()) goto cleanup;
+ if (g.header && dump_header() != 0) goto error;
+ if (caught_any_signals()) goto cleanup;
+ if (dump_pairs() != 0) goto error;
+ if (caught_any_signals()) goto cleanup;
+ if (g.footer && dump_footer() != 0) goto error;
+
+ if (false) {
+error:
+ g.exitcode = EXIT_FAILURE;
+ }
+cleanup:
+
+ if (close_database() != 0) g.exitcode = EXIT_FAILURE;
+
+ return g.exitcode;
+}
+
+int usage()
+{
+ fprintf(stderr,
+ "usage: %s [-pVT] [-x] [-f output] [-h home] [-s database] db_file\n",
+ g.progname);
+ return EXIT_FAILURE;
+}
+
+int create_init_env()
+{
+ int retval;
+ DB_ENV* dbenv;
+ int flags;
+ //TODO: Experiments to determine right cache size for tokudb, or maybe command line argument.
+
+ retval = db_env_create(&dbenv, 0);
+ if (retval) {
+ fprintf(stderr, "%s: db_dbenv_create: %s\n", g.progname, db_strerror(retval));
+ goto error;
+ }
+ ///TODO: UNCOMMENT/IMPLEMENT dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, g.progname);
+ /*
+ TODO: Anything for encryption?
+ */
+
+ /* Open the dbenvironment. */
+ g.is_private = false;
+ //flags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_USE_ENVIRON;
+ flags = DB_INIT_LOCK | DB_INIT_MPOOL; ///TODO: UNCOMMENT/IMPLEMENT | DB_USE_ENVIRON;
+ if (g.recovery_and_txn) {
+ SET_BITS(flags, DB_INIT_LOG | DB_INIT_TXN | DB_RECOVER);
+ }
+
+ /*
+ ///TODO: UNCOMMENT/IMPLEMENT Notes: We require DB_PRIVATE
+ if (!dbenv->open(dbenv, g.homedir, flags, 0)) goto success;
+ */
+
+ /*
+ ///TODO: UNCOMMENT/IMPLEMENT
+ retval = dbenv->set_cachesize(dbenv, 0, cache, 1);
+ if (retval) {
+ PRINT_ERROR(retval, "DB_ENV->set_cachesize");
+ goto error;
+ }
+ */
+ g.is_private = true;
+ //TODO: Do we want to support transactions even in single-process mode?
+ //Logging is not necessary.. this is read-only.
+ //However, do we need to use DB_INIT_LOG to join a logging environment?
+ //REMOVE_BITS(flags, DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ SET_BITS(flags, DB_CREATE | DB_PRIVATE);
+
+ retval = dbenv->open(dbenv, g.homedir, flags, 0);
+ if (retval) {
+ PRINT_ERROR(retval, "DB_ENV->open");
+ goto error;
+ }
+ g.dbenv = dbenv;
+ return EXIT_SUCCESS;
+
+error:
+ return EXIT_FAILURE;
+}
+
+#define DUMP_FLAG(bit, dump) if (IS_SET_ALL(flags, bit)) printf(dump);
+
+#define DUMP_IGNORED_FLAG(bit, dump)
+
+
+int dump_header()
+{
+ uint32_t flags;
+ int retval;
+ DB* db = g.db;
+
+ assert(g.header);
+ printf("VERSION=3\n");
+ printf("format=%s\n", g.plaintext ? "print" : "bytevalue");
+ //TODO: Uncomment when DB_UNKNOWN + db->get_type are implemented.
+ /*assert(g.dbtype == DB_BTREE || (g.dbtype == DB_UNKNOWN && g.opened_dbtype == DB_BTREE));*/
+ printf("type=btree\n");
+ //TODO: Get page size from db. Currently tokudb does not support db->get_pagesize.
+ //Don't print this out //printf("db_pagesize=4096\n");
+ if (g.subdatabase) {
+ printf("subdatabase=");
+ outputplaintextstring(g.subdatabase);
+ printf("\n");
+ }
+ //TODO: Uncomment when db->get_flags is implemented
+ if ((retval = db->get_flags(db, &flags)) != 0) {
+ PRINT_ERROR(retval, "DB->get_flags");
+ goto error;
+ }
+ DUMP_IGNORED_FLAG(DB_CHKSUM, "chksum=1\n");
+ DUMP_IGNORED_FLAG(DB_RECNUM, "recnum=1\n");
+ printf("HEADER=END\n");
+
+ if (ferror(stdout)) goto error;
+ return EXIT_SUCCESS;
+
+error:
+ return EXIT_FAILURE;
+}
+
+int dump_footer()
+{
+ printf("DATA=END\n");
+ if (ferror(stdout)) goto error;
+
+ return EXIT_SUCCESS;
+error:
+ return EXIT_FAILURE;
+}
+
+int open_database()
+{
+ DB* db = g.db;
+ int retval;
+
+ int open_flags = 0;//|DB_RDONLY;
+ //TODO: Transaction auto commit stuff
+ SET_BITS(open_flags, DB_AUTO_COMMIT);
+
+ retval = db->open(db, NULL, g.database, g.subdatabase, g.dbtype, open_flags, 0666);
+ if (retval != 0) {
+ PRINT_ERROR(retval, "DB->open: %s", g.database);
+ goto error;
+ }
+ //TODO: Uncomment when DB_UNKNOWN + db->get_type are implemented.
+ /*
+ retval = db->get_type(db, &g.opened_dbtype);
+ if (retval != 0) {
+ PRINT_ERROR(retval, "DB->get_type");
+ goto error;
+ }
+ if (g.opened_dbtype != DB_BTREE) {
+ PRINT_ERRORX("Unsupported db type %d\n", g.opened_dbtype);
+ goto error;
+ }
+ if (g.dbtype != DB_UNKNOWN && g.opened_dbtype != g.dbtype) {
+ PRINT_ERRORX("DBTYPE %d does not match opened DBTYPE %d.\n", g.dbtype, g.opened_dbtype);
+ goto error;
+ }*/
+ return EXIT_SUCCESS;
+error:
+ fprintf(stderr, "Quitting out due to errors.\n");
+ return EXIT_FAILURE;
+}
+
+static int dump_dbt(DBT* dbt)
+{
+ char* str;
+ uint32_t idx;
+
+ assert(dbt);
+ str = (char*)dbt->data;
+ if (g.leadingspace) printf(" ");
+ if (dbt->size > 0) {
+ assert(dbt->data);
+ for (idx = 0; idx < dbt->size; idx++) {
+ outputbyte(str[idx]);
+ if (ferror(stdout)) {
+ perror("stdout");
+ goto error;
+ }
+ }
+ }
+ printf("\n");
+ if (false) {
+error:
+ g.exitcode = EXIT_FAILURE;
+ }
+ return g.exitcode;
+}
+
+int dump_pairs()
+{
+ int retval;
+ DBT key;
+ DBT data;
+ DB* db = g.db;
+ DBC* dbc = NULL;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ DB_TXN* txn = NULL;
+ if (g.recovery_and_txn) {
+ retval = g.dbenv->txn_begin(g.dbenv, NULL, &txn, 0);
+ if (retval) {
+ PRINT_ERROR(retval, "DB_ENV->txn_begin");
+ goto error;
+ }
+ }
+
+ if ((retval = db->cursor(db, txn, &dbc, 0)) != 0) {
+ PRINT_ERROR(retval, "DB->cursor");
+ goto error;
+ }
+ while ((retval = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
+ if (caught_any_signals()) goto cleanup;
+ if (dump_dbt(&key) != 0) goto error;
+ if (dump_dbt(&data) != 0) goto error;
+ }
+ if (retval != DB_NOTFOUND) {
+ PRINT_ERROR(retval, "DBC->c_get");
+ goto error;
+ }
+
+
+ if (false) {
+error:
+ g.exitcode = EXIT_FAILURE;
+ }
+cleanup:
+ if (dbc && (retval = dbc->c_close(dbc)) != 0) {
+ PRINT_ERROR(retval, "DBC->c_close");
+ g.exitcode = EXIT_FAILURE;
+ }
+ if (txn) {
+ if (retval) {
+ int r2 = txn->abort(txn);
+ if (r2) PRINT_ERROR(r2, "DB_TXN->abort");
+ }
+ else {
+ retval = txn->commit(txn, 0);
+ if (retval) PRINT_ERROR(retval, "DB_TXN->abort");
+ }
+ }
+ return g.exitcode;
+}
+
+int close_database()
+{
+ DB* db = g.db;
+ int retval;
+
+ assert(db);
+ if ((retval = db->close(db, 0)) != 0) {
+ PRINT_ERROR(retval, "DB->close");
+ goto error;
+ }
+ return EXIT_SUCCESS;
+error:
+ return EXIT_FAILURE;
+}
diff --git a/storage/tokudb/PerconaFT/tools/tokuft_logprint.cc b/storage/tokudb/PerconaFT/tools/tokuft_logprint.cc
new file mode 100644
index 00000000..924eee2d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/tokuft_logprint.cc
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Dump the log from stdin to stdout. */
+#include "ft/ft.h"
+#include "ft/log_header.h"
+#include "ft/logger/logger.h"
+
+using namespace std;
+
+int main (int argc, const char *const argv[]) {
+ int r = toku_ft_layer_init();
+ assert_zero(r);
+
+ int count=-1;
+ while (argc>1) {
+ if (strcmp(argv[1], "--oldcode")==0) {
+ fprintf(stderr,"Old code no longer works.\n");
+ exit(1);
+ } else {
+ count = atoi(argv[1]);
+ }
+ argc--; argv++;
+ }
+ int i;
+ uint32_t version;
+ r = toku_read_and_print_logmagic(stdin, &version);
+ for (i=0; i!=count; i++) {
+ r = toku_logprint_one_record(stdout, stdin);
+ if (r==EOF) break;
+ if (r!=0) {
+ fflush(stdout);
+ fprintf(stderr, "Problem in log err=%d\n", r);
+ exit(1);
+ }
+ }
+ toku_ft_layer_destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/tools/tokuftdump.cc b/storage/tokudb/PerconaFT/tools/tokuftdump.cc
new file mode 100644
index 00000000..44edb151
--- /dev/null
+++ b/storage/tokudb/PerconaFT/tools/tokuftdump.cc
@@ -0,0 +1,1246 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Dump a fractal tree file
+
+#include <ctype.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <string>
+#include <iostream>
+#include <fstream>
+#include <map>
+#include <string>
+#include <string.h>
+#include "ft/serialize/block_table.h"
+#include "ft/cachetable/cachetable.h"
+#include "ft/ft.h"
+#include "ft/ft-internal.h"
+#include "ft/serialize/ft-serialize.h"
+#include "ft/serialize/ft_node-serialize.h"
+#include "ft/node.h"
+
+using namespace std;
+
+static int do_dump_data = 1;
+static int do_interactive = 0;
+static int do_json = 0;
+static int do_header = 0;
+static int do_fragmentation = 0;
+static int do_garbage = 0;
+static int do_translation_table = 0;
+static int do_summary = 0;
+static int do_rootnode = 0;
+static int do_node = 0;
+static BLOCKNUM do_node_num;
+static int do_tsv = 0;
+static const char *arg0;
+static const char *fname;
+
+//it holdes the messges count for each FT's node
+typedef struct nodeMessage{
+ int id;
+ int clean;//0=clean >=1 dirty
+ int *count;//holds the messages
+ nodeMessage *nextNode;
+}NMC;
+enum { maxline = 128};
+
+static int printNodeMessagesToSTDout(NMC* ptr);
+
+static int printLevelSTDout(int *);
+
+static void treeToSTDout(NMC *msgs[], int height);
+
+static void format_time(const uint64_t time_int, char *buf) {
+ time_t timer = (time_t) time_int;
+ ctime_r(&timer, buf);
+ assert(buf[24] == '\n');
+ buf[24] = 0;
+}
+
+static void print_item(const void *val, uint32_t len) {
+ printf("\"");
+ uint32_t i;
+ for (i=0; i<len; i++) {
+ unsigned char ch = ((unsigned char*)val)[i];
+ if (isprint(ch) && ch!='\\' && ch!='"') {
+ printf("%c", ch);
+ } else {
+ printf("\\%03o", ch);
+ }
+ }
+ printf("\"");
+}
+
+static void simple_hex_dump(unsigned char *vp, uint64_t size) {
+ for (uint64_t i = 0; i < size; i++) {
+ unsigned char c = vp[i];
+ printf("%2.2X", c);
+ }
+}
+
+static void hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) {
+ uint64_t n = size / 32;
+ for (uint64_t i = 0; i < n; i++) {
+ printf("%" PRIu64 ": ", offset);
+ for (uint64_t j = 0; j < 32; j++) {
+ unsigned char c = vp[j];
+ printf("%2.2X", c);
+ if (((j+1) % 4) == 0)
+ printf(" ");
+ }
+ for (uint64_t j = 0; j < 32; j++) {
+ unsigned char c = vp[j];
+ printf("%c", isprint(c) ? c : ' ');
+ }
+ printf("\n");
+ vp += 32;
+ offset += 32;
+ }
+ size = size % 32;
+ for (uint64_t i=0; i<size; i++) {
+ if ((i % 32) == 0)
+ printf("%" PRIu64 ": ", offset+i);
+ printf("%2.2X", vp[i]);
+ if (((i+1) % 4) == 0)
+ printf(" ");
+ if (((i+1) % 32) == 0)
+ printf("\n");
+ }
+ printf("\n");
+}
+
+static void dump_descriptor(DESCRIPTOR d) {
+ printf(" descriptor size %u ", d->dbt.size);
+ simple_hex_dump((unsigned char*) d->dbt.data, d->dbt.size);
+ printf("\n");
+}
+
+static void open_header(int fd, FT *header, CACHEFILE cf) {
+ FT ft = NULL;
+ int r;
+ const char *fn = toku_cachefile_fname_in_env(cf);
+ r = toku_deserialize_ft_from (fd, fn, MAX_LSN, &ft);
+ if (r != 0) {
+ fprintf(stderr, "%s: can not deserialize from %s error %d\n", arg0, fname, r);
+ exit(1);
+ }
+ assert_zero(r);
+ ft->cf = cf;
+ *header = ft;
+}
+
+static void dump_header(FT ft) {
+ char timestr[26];
+ printf("ft:\n");
+ printf(" layout_version=%d\n", ft->h->layout_version);
+ printf(" layout_version_original=%d\n", ft->h->layout_version_original);
+ printf(" layout_version_read_from_disk=%d\n", ft->layout_version_read_from_disk);
+ printf(" build_id=%d\n", ft->h->build_id);
+ printf(" build_id_original=%d\n", ft->h->build_id_original);
+ format_time(ft->h->time_of_creation, timestr);
+ printf(" time_of_creation= %" PRIu64 " %s\n", ft->h->time_of_creation, timestr);
+ format_time(ft->h->time_of_last_modification, timestr);
+ printf(" time_of_last_modification=%" PRIu64 " %s\n", ft->h->time_of_last_modification, timestr);
+ printf(" dirty=%d\n", ft->h->dirty());
+ printf(" checkpoint_count=%" PRId64 "\n", ft->h->checkpoint_count);
+ printf(" checkpoint_lsn=%" PRId64 "\n", ft->h->checkpoint_lsn.lsn);
+ printf(" nodesize=%u\n", ft->h->nodesize);
+ printf(" fanout=%u\n", ft->h->fanout);
+ printf(" basementnodesize=%u\n", ft->h->basementnodesize);
+ printf(" compression_method=%u\n", (unsigned) ft->h->compression_method);
+ printf(" unnamed_root=%" PRId64 "\n", ft->h->root_blocknum.b);
+ printf(" flags=%u\n", ft->h->flags);
+ dump_descriptor(&ft->descriptor);
+ printf(" estimated numrows=%" PRId64 "\n", ft->in_memory_stats.numrows);
+ printf(" estimated numbytes=%" PRId64 "\n", ft->in_memory_stats.numbytes);
+ printf(" logical row count=%" PRId64 "\n", ft->in_memory_logical_rows);
+}
+
+static int64_t getRootNode(FT ft) {
+ return ft->h->root_blocknum.b;
+}
+
+static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le, const uint32_t idx UU(), void *const ai UU()) {
+ unsigned int *le_index = (unsigned int *) ai;
+ printf("%u: ", *le_index); *le_index += 1;
+ print_klpair(stdout, key, keylen, le);
+ printf("\n");
+ return 0;
+}
+
+static int getHeight(int fd, BLOCKNUM blocknum, FT ft){
+ FTNODE n;
+ FTNODE_DISK_DATA ndd = nullptr;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
+ assert_zero(r);
+ assert(n!=0);
+ return n->height;
+}
+
+static FTNODE getNode(int fd, BLOCKNUM blocknum, FT ft) {
+ FTNODE n;
+ FTNODE_DISK_DATA ndd = nullptr;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
+ assert_zero(r);;
+ return n;
+}
+
+static int countNodes(NMC *level){
+ int count=0;
+ NMC *ptr=level;
+ while(ptr!=NULL){
+ count++;
+ ptr=ptr->nextNode;
+ }
+ return count;
+}
+
+static int * countMessages(NMC *level){
+ int *counts=new int[16];
+ for(int i=0;i<16;i++){
+ counts[i]=0;
+ }
+ NMC *ptr=level;
+ while(ptr!=NULL){
+ for(int i=0;i<16;i++){
+ counts[i]+=ptr->count[i];
+ }
+ ptr=ptr->nextNode;
+ }
+ return counts;
+}
+
+static NMC * getLast(NMC *level){
+ if (level==NULL) return NULL;
+ NMC *ptr=level;
+ while(ptr->nextNode!=NULL){
+ ptr=ptr->nextNode;
+ }
+ return ptr;
+}
+
+/*
+ * Prints the total messages at each to STDout
+ */
+static int printLevelSTDout(int *count){
+ int isEmpty=0;
+ for(int j=0;j<16;j++){
+ if(count[j]>0){
+ cout <<count[j]<<" ";
+ isEmpty++;
+ switch (j) {
+ case FT_INSERT: cout <<"INSERT(s) "; break;
+ case FT_INSERT_NO_OVERWRITE: cout <<"INSERT_NO_OVERWRITE(s) "; break;
+ case FT_DELETE_ANY: cout <<"DELETE_ANY(s) "; break;
+ case FT_ABORT_ANY: cout <<"ABORT_ANY(s) "; break;
+ case FT_COMMIT_ANY: cout <<"COMMIT_ANY(s) "; break;
+ case FT_COMMIT_BROADCAST_ALL: cout <<"COMMIT_BROADCAST_ALL(s) "; break;
+ case FT_COMMIT_BROADCAST_TXN: cout <<"COMMIT_BROADCAST_TXN(s) "; break;
+ case FT_ABORT_BROADCAST_TXN: cout <<"ABORT_BROADCAST_TXN(s) "; break;
+ case FT_OPTIMIZE: cout <<"OPTIMIZE(s) "; break;
+ case FT_OPTIMIZE_FOR_UPGRADE: cout <<"OPTIMIZE_FOR_UPGRADE(s) "; break;
+ case FT_UPDATE: cout <<"UPDATE(s) "; break;
+ case FT_UPDATE_BROADCAST_ALL: cout <<"UPDATE_BROADCAST_ALL(s) "; break;
+ }
+
+ }
+ }
+ return isEmpty;
+}
+
+/*
+ * Prints the total # of messages in a node to STD output
+ */
+static int printNodeMessagesToSTDout(NMC *ptr){
+ cout <<"\nNode :"<<ptr->id<<" has :";
+ for(int j=0;j<16;j++){
+ if(ptr->count[j]>0){
+ cout <<ptr->count[j]<<" ";
+ switch (j) {
+ case FT_INSERT: cout <<"INSERT(s) "; break;
+ case FT_INSERT_NO_OVERWRITE: cout <<"INSERT_NO_OVERWRITE(s) "; break;
+ case FT_DELETE_ANY: cout <<"DELETE_ANY(s) "; break;
+ case FT_ABORT_ANY: cout <<"ABORT_ANY(s) "; break;
+ case FT_COMMIT_ANY: cout <<"COMMIT_ANY(s) "; break;
+ case FT_COMMIT_BROADCAST_ALL: cout <<"COMMIT_BROADCAST_ALL(s) "; break;
+ case FT_COMMIT_BROADCAST_TXN: cout <<"COMMIT_BROADCAST_TXN(s) "; break;
+ case FT_ABORT_BROADCAST_TXN: cout <<"ABORT_BROADCAST_TXN(s) "; break;
+ case FT_OPTIMIZE: cout <<"OPTIMIZE(s) "; break;
+ case FT_OPTIMIZE_FOR_UPGRADE: cout <<"OPTIMIZE_FOR_UPGRADE(s) "; break;
+ case FT_UPDATE: cout <<"UPDATE(s) "; break;
+ case FT_UPDATE_BROADCAST_ALL: cout <<"UPDATE_BROADCAST_ALL(s) "; break;
+ }
+ }
+ }
+ return 1;
+}
+
+static void levelToSTDout(NMC *list, int level){
+ NMC *ptr=list;
+ cout <<endl<<"Height : "<<level<<endl;
+ while(ptr!=NULL){
+ if(ptr->clean!=0){
+ printNodeMessagesToSTDout(ptr);
+ }
+ else{
+ cout << "\nNode : "<<ptr->id<<" has no messages";
+ }
+ ptr=ptr->nextNode;
+ }
+ cout <<endl;
+}
+
+/*
+ * prints the tree total # of nodes and total # of messages at each height in :
+ * STDout in human readable format
+ */
+static void treeToSTDout(NMC *msgs[], int height){
+ for(int i=height; i>=0 ; i--){
+ cout <<"At height "<<i;
+ int *counts=countMessages(msgs[i]);
+ cout <<"\n Node Count: "<< countNodes(msgs[i])<<endl;
+ cout <<" Messages: ";
+ if(printLevelSTDout(counts)==0) cout <<"0\n";
+ else cout <<endl;
+ }
+}
+
+//traverse through the FT and report back the count of messages in every node
+static void countMessagesInFT(int fd, BLOCKNUM blocknum, FT ft,NMC *msgs[]){
+ FTNODE n=getNode(fd,blocknum,ft);
+
+ NMC *last=NULL;
+ if(msgs[n->height]==NULL){
+ last = msgs[n->height]=new NMC;
+ }else {
+ last=getLast(msgs[n->height]);
+ last->nextNode=new NMC;
+ last=last->nextNode;
+ }
+ last->id=blocknum.b;
+ last->count=new int[16];
+ for(int i=0;i<16;i++){
+ last->count[i]=0;
+ }
+ last->clean=0;
+ last->nextNode=NULL;
+
+ if (n->height==0){
+ toku_ftnode_free(&n);
+ return;
+ }
+ for(int i=0;i<n->n_children;i++){
+ NONLEAF_CHILDINFO bnc = BNC(n, i);
+ if (n->height==1 && n->bp[i].ptr.tag==BCT_NULL){
+ cout <<n->bp[i].ptr.tag;
+ }
+ auto dump_fn=[&](const ft_msg &msg, bool UU(is_fresh)) {
+ enum ft_msg_type type = (enum ft_msg_type) msg.type();
+ last->count[type]++;
+ last->clean=1;
+ return 0;
+ };
+
+ bnc->msg_buffer.iterate(dump_fn);
+
+ blocknum=make_blocknum(BP_BLOCKNUM(n, i).b);
+ countMessagesInFT(fd,blocknum,ft, msgs);
+ }
+
+ toku_ftnode_free(&n);
+}
+
+static void dump_node(int fd, BLOCKNUM blocknum, FT ft) {
+ FTNODE n;
+ FTNODE_DISK_DATA ndd = nullptr;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(ft);
+ int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
+ assert_zero(r);
+ assert(n!=0);
+ printf("ftnode\n");
+ DISKOFF disksize, diskoffset;
+ ft->blocktable.translate_blocknum_to_offset_size(blocknum, &diskoffset, &disksize);
+ printf(" diskoffset =%" PRId64 "\n", diskoffset);
+ printf(" disksize =%" PRId64 "\n", disksize);
+ printf(" serialize_size =%u\n", toku_serialize_ftnode_size(n));
+ printf(" flags =%u\n", n->flags);
+ printf(" blocknum=%" PRId64 "\n", n->blocknum.b);
+ //printf(" log_lsn =%lld\n", n->log_lsn.lsn); // The log_lsn is a memory-only value.
+ printf(" height =%d\n", n->height);
+ printf(" layout_version=%d\n", n->layout_version);
+ printf(" layout_version_original=%d\n", n->layout_version_original);
+ printf(" layout_version_read_from_disk=%d\n", n->layout_version_read_from_disk);
+ printf(" build_id=%d\n", n->build_id);
+ printf(" max_msn_applied_to_node_on_disk=%" PRId64 " (0x%" PRIx64 ")\n", n->max_msn_applied_to_node_on_disk.msn, n->max_msn_applied_to_node_on_disk.msn);
+ printf(" io time %lf decompress time %lf deserialize time %lf\n",
+ tokutime_to_seconds(bfe.io_time),
+ tokutime_to_seconds(bfe.decompress_time),
+ tokutime_to_seconds(bfe.deserialize_time));
+
+ printf(" n_children=%d\n", n->n_children);
+ printf(" pivotkeys.total_size()=%u\n", (unsigned) n->pivotkeys.total_size());
+
+ if (n->height > 0) {
+ printf(" pivots:\n");
+ } else {
+ printf("LEAF keys:\n");
+ }
+
+ for (int i=0; i<n->n_children-1; i++) {
+ const DBT piv = n->pivotkeys.get_pivot(i);
+ printf(" pivot %2d:", i);
+ if (n->flags)
+ printf(" flags=%x ", n->flags);
+ print_item(piv.data, piv.size);
+ printf("\n");
+ }
+
+ if (n->height > 0) {
+ printf(" children:\n");
+ } else {
+ printf("LEAF data:\n");
+ }
+
+ for (int i=0; i<n->n_children; i++) {
+ printf(" child %d: ", i);
+ if (n->height > 0) {
+ printf("%" PRId64 "\n", BP_BLOCKNUM(n, i).b);
+ NONLEAF_CHILDINFO bnc = BNC(n, i);
+ unsigned int n_bytes = toku_bnc_nbytesinbuf(bnc);
+ int n_entries = toku_bnc_n_entries(bnc);
+ if (n_bytes > 0 || n_entries > 0) {
+ printf(" buffer contains %u bytes (%d items)\n", n_bytes, n_entries);
+ }
+ if (do_dump_data) {
+ struct dump_data_fn {
+ int operator()(const ft_msg &msg, bool UU(is_fresh)) {
+ enum ft_msg_type type = (enum ft_msg_type) msg.type();
+ MSN msn = msg.msn();
+ XIDS xids = msg.xids();
+ const void *key = msg.kdbt()->data;
+ const void *data = msg.vdbt()->data;
+ uint32_t keylen = msg.kdbt()->size;
+ uint32_t datalen = msg.vdbt()->size;
+ printf(" msn=%" PRIu64 " (0x%" PRIx64 ") ", msn.msn, msn.msn);
+ printf(" TYPE=");
+ switch (type) {
+ case FT_NONE: printf("NONE"); goto ok;
+ case FT_INSERT: printf("INSERT"); goto ok;
+ case FT_INSERT_NO_OVERWRITE: printf("INSERT_NO_OVERWRITE"); goto ok;
+ case FT_DELETE_ANY: printf("DELETE_ANY"); goto ok;
+ case FT_ABORT_ANY: printf("ABORT_ANY"); goto ok;
+ case FT_COMMIT_ANY: printf("COMMIT_ANY"); goto ok;
+ case FT_COMMIT_BROADCAST_ALL: printf("COMMIT_BROADCAST_ALL"); goto ok;
+ case FT_COMMIT_BROADCAST_TXN: printf("COMMIT_BROADCAST_TXN"); goto ok;
+ case FT_ABORT_BROADCAST_TXN: printf("ABORT_BROADCAST_TXN"); goto ok;
+ case FT_OPTIMIZE: printf("OPTIMIZE"); goto ok;
+ case FT_OPTIMIZE_FOR_UPGRADE: printf("OPTIMIZE_FOR_UPGRADE"); goto ok;
+ case FT_UPDATE: printf("UPDATE"); goto ok;
+ case FT_UPDATE_BROADCAST_ALL: printf("UPDATE_BROADCAST_ALL"); goto ok;
+ }
+ printf("HUH?");
+ok:
+ printf(" xid=");
+ toku_xids_fprintf(stdout, xids);
+ printf(" ");
+ print_item(key, keylen);
+ if (datalen>0) {
+ printf(" ");
+ print_item(data, datalen);
+ }
+ printf("\n");
+ return 0;
+ }
+ } dump_fn;
+ bnc->msg_buffer.iterate(dump_fn);
+ }
+ } else {
+ printf(" n_bytes_in_buffer= %" PRIu64 "", BLB_DATA(n, i)->get_disk_size());
+ printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->num_klpairs());
+ if (do_dump_data) {
+ unsigned int le_index = 0;
+ BLB_DATA(n, i)->iterate<void, print_le>(&le_index);
+ }
+ }
+ }
+ toku_ftnode_free(&n);
+ toku_free(ndd);
+}
+
+static void dump_block_translation(FT ft, uint64_t offset) {
+ ft->blocktable.blocknum_dump_translation(make_blocknum(offset));
+}
+
+static void dump_fragmentation(int UU(f), FT ft, int tsv) {
+ int64_t used_space;
+ int64_t total_space;
+ ft->blocktable.internal_fragmentation(&total_space, &used_space);
+ int64_t fragsizes = total_space - used_space;
+
+ if (tsv) {
+ printf("%" PRId64 "\t%" PRId64 "\t%" PRId64 "\t%.1f\n", used_space, total_space, fragsizes,
+ 100. * ((double)fragsizes / (double)(total_space)));
+ } else {
+ printf("used_size\t%" PRId64 "\n", used_space);
+ printf("total_size\t%" PRId64 "\n", total_space);
+ printf("fragsizes\t%" PRId64 "\n", fragsizes);
+ printf("fragmentation\t%.1f\n", 100. * ((double)fragsizes / (double)(total_space)));
+ }
+}
+
+typedef struct {
+ int fd;
+ FT ft;
+ uint64_t blocksizes;
+ uint64_t leafsizes;
+ uint64_t leafblocks;
+} frag_help_extra;
+
+static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) {
+ frag_help_extra *CAST_FROM_VOIDP(info, extra);
+ FTNODE n;
+ FTNODE_DISK_DATA ndd = NULL;
+ ftnode_fetch_extra bfe;
+ bfe.create_for_full_read(info->ft);
+ int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
+ if (r==0) {
+ info->blocksizes += size;
+ if (n->height == 0) {
+ info->leafsizes += size;
+ info->leafblocks++;
+ }
+ toku_ftnode_free(&n);
+ toku_free(ndd);
+ }
+ return 0;
+}
+
+static void dump_nodesizes(int fd, FT ft) {
+ frag_help_extra info;
+ memset(&info, 0, sizeof(info));
+ info.fd = fd;
+ info.ft = ft;
+ ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED,
+ nodesizes_helper, &info, true, true);
+ printf("leafblocks\t%" PRIu64 "\n", info.leafblocks);
+ printf("blocksizes\t%" PRIu64 "\n", info.blocksizes);
+ printf("leafsizes\t%" PRIu64 "\n", info.leafsizes);
+}
+
+/* ===== struct and function to get a summary of atree ===== */
+
+typedef struct {
+ int fd;
+ FT ft;
+ uint64_t blocksizes;
+ uint64_t leafsizes;
+ uint64_t serialsize; // sizes of serialized data (assume uncomressed)
+ uint64_t leafblocks; // count of leaf nodes
+ uint64_t nonleafnode_cnt; // count of non-leaf nodes
+ uint64_t maxheight; // height of the tree
+ uint64_t msg_cnt; // message count in non-leafs
+ uint64_t msg_size; // size (in bytes of all messages in non-leafs
+ uint64_t pairs_cnt; // count of pairs in leaf nodes
+ std::map<int, int> height_cnt; // count of nodes per height
+ std::map<int, int> hmsg_cnt; // count of message per height
+ std::map<int, uint64_t> hmsg_size; // size of message per height
+ std::map<int, uint64_t> hdisk_size; // disk size per height
+ std::map<int, uint64_t> hserial_size; // serial size per height
+} summary_help_extra;
+
+static int summary_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) {
+ summary_help_extra *CAST_FROM_VOIDP(info, extra);
+ FTNODE n;
+ FTNODE_DISK_DATA ndd = NULL;
+ ftnode_fetch_extra bfe;
+
+ bfe.create_for_full_read(info->ft);
+ int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
+ if (r==0) {
+ info->blocksizes += size;
+
+ (info->height_cnt)[n->height]++;
+
+ if (n->height == 0) {
+ info->leafsizes += size;
+ info->leafblocks++;
+ } else {
+ info->nonleafnode_cnt++;
+ }
+
+ info->hdisk_size[n->height] += size;
+ auto serialsize = toku_serialize_ftnode_size(n);
+ info->serialsize += serialsize;
+ info->hserial_size[n->height] += serialsize;
+
+
+ if ((uint64_t)n->height > info->maxheight) {
+ info->maxheight = n->height;
+ }
+
+
+
+ for (int i=0; i<n->n_children; i++) {
+ //printf(" child %d: ", i);
+ if (n->height > 0) {
+ NONLEAF_CHILDINFO bnc = BNC(n, i);
+ unsigned int n_bytes = toku_bnc_nbytesinbuf(bnc);
+ int n_entries = toku_bnc_n_entries(bnc);
+ //if (n_bytes > 0 || n_entries > 0) {
+ // printf(" buffer contains %u bytes (%d items)\n", n_bytes, n_entries);
+ //}
+ info->msg_cnt += n_entries;
+ info->msg_size += n_bytes;
+ info->hmsg_cnt[n->height] += n_entries;
+ info->hmsg_size[n->height] += n_bytes;
+ } else {
+ info->pairs_cnt += BLB_DATA(n, i)->num_klpairs();
+ }
+ }
+ if (n->height ==0) {
+ info->hmsg_cnt[0] += n->n_children; // this way we count partitions per leaf node
+ }
+
+
+ toku_ftnode_free(&n);
+ toku_free(ndd);
+ }
+ return 0;
+}
+
+static std::string humanNumber(uint64_t value) {
+ std::string numWithCommas = to_string(value);
+ int insertPosition = numWithCommas.length() - 3;
+ while (insertPosition > 0) {
+ numWithCommas.insert(insertPosition, ",");
+ insertPosition-=3;
+ }
+ return numWithCommas;
+}
+
+static void dump_summary(int fd, FT ft) {
+ summary_help_extra info;
+ //memset(&info, 0, sizeof(info));
+ info.fd = fd;
+ info.ft = ft;
+ info.blocksizes = 0;
+ info.leafsizes = 0;
+ info.serialsize = 0;
+ info.leafblocks = 0;
+ info.nonleafnode_cnt = 0;
+ info.maxheight = 0;
+ info.msg_cnt = 0;
+ info.msg_size = 0;
+ info.pairs_cnt = 0;
+
+ ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED,
+ summary_helper, &info, true, true);
+ printf("leaf nodes:\t%" PRIu64 "\n", info.leafblocks);
+ printf("non-leaf nodes:\t%" PRIu64 "\n", info.nonleafnode_cnt);
+ printf("Leaf size:\t%s\n", humanNumber(info.leafsizes).c_str());
+ printf("Total size:\t%s\n", humanNumber(info.blocksizes).c_str());
+ printf("Total uncompressed size:\t%s\n", humanNumber(info.serialsize).c_str());
+ printf("Messages count:\t%" PRIu64 "\n", info.msg_cnt);
+ printf("Messages size:\t%s\n", humanNumber(info.msg_size).c_str());
+ printf("Records count:\t%" PRIu64 "\n", info.pairs_cnt);
+ printf("Tree height:\t%" PRIu64 "\n", info.maxheight);
+ for(auto elem : info.height_cnt) {
+ std::string hdr;
+ double children_per_node;
+ if (elem.first == 0) {
+ hdr = "basement nodes";
+ children_per_node = (double)info.hmsg_cnt[0]/elem.second;
+ } else {
+ hdr = "msg cnt";
+ children_per_node = (double)info.height_cnt[elem.first-1]/elem.second;
+ }
+
+ printf("height: %d, nodes count: %d; avg children/node: %f\n\t %s: %d; msg size: %s; disksize: %s; uncompressed size: %s; ratio: %f\n",
+ elem.first, elem.second, children_per_node,
+ hdr.c_str(),
+ info.hmsg_cnt[elem.first],
+ humanNumber(info.hmsg_size[elem.first]).c_str(),
+ humanNumber(info.hdisk_size[elem.first]).c_str(),
+ humanNumber(info.hserial_size[elem.first]).c_str(),
+ (double)info.hserial_size[elem.first]/info.hdisk_size[elem.first] );
+ }
+}
+
+/* ===== end of summary ===== */
+
+static void dump_garbage_stats(int fd, FT ft) {
+ assert(fd == toku_cachefile_get_fd(ft->cf));
+ uint64_t total_space = 0;
+ uint64_t used_space = 0;
+ toku_ft_get_garbage(ft, &total_space, &used_space);
+ printf("garbage total size :%20" PRIu64 "\n", total_space);
+ printf("garbage used size :%20" PRIu64 "\n", used_space);
+ float a=used_space,b=total_space;
+
+ float percentage=((1-a/b)*100);
+ printf("Total garbage : %2.3f%%\n", percentage);
+}
+
+typedef struct __dump_node_extra {
+ int fd;
+ FT ft;
+} dump_node_extra;
+
+static int dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) {
+ dump_node_extra *CAST_FROM_VOIDP(info, extra);
+ dump_node(info->fd, b, info->ft);
+ return 0;
+}
+
+static uint32_t get_unaligned_uint32(unsigned char *p) {
+ uint32_t n;
+ memcpy(&n, p, sizeof n);
+ return n;
+}
+
+struct dump_sub_block {
+ uint32_t compressed_size;
+ uint32_t uncompressed_size;
+ uint32_t xsum;
+};
+
+static void sub_block_deserialize(struct dump_sub_block *sb, unsigned char *sub_block_header) {
+ sb->compressed_size = toku_dtoh32(get_unaligned_uint32(sub_block_header+0));
+ sb->uncompressed_size = toku_dtoh32(get_unaligned_uint32(sub_block_header+4));
+ sb->xsum = toku_dtoh32(get_unaligned_uint32(sub_block_header+8));
+}
+
+static void verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) {
+ // verify the header checksum
+ const size_t node_header = 8 + sizeof (uint32_t) + sizeof (uint32_t) + sizeof (uint32_t);
+
+ printf("%.8s layout_version=%u %u build=%d\n", cp, get_unaligned_uint32(cp+8), get_unaligned_uint32(cp+12), get_unaligned_uint32(cp+16));
+
+ unsigned char *sub_block_header = &cp[node_header];
+ uint32_t n_sub_blocks = toku_dtoh32(get_unaligned_uint32(&sub_block_header[0]));
+ uint32_t header_length = node_header + n_sub_blocks * sizeof (struct dump_sub_block);
+ header_length += sizeof (uint32_t); // CRC
+ if (header_length > size) {
+ printf("header length too big: %u\n", header_length);
+ return;
+ }
+ uint32_t header_xsum = toku_x1764_memory(cp, header_length);
+ uint32_t expected_xsum = toku_dtoh32(get_unaligned_uint32(&cp[header_length]));
+ if (header_xsum != expected_xsum) {
+ printf("header checksum failed: %u %u\n", header_xsum, expected_xsum);
+ return;
+ }
+
+ // deserialize the sub block header
+ struct dump_sub_block sub_block[n_sub_blocks];
+ sub_block_header += sizeof (uint32_t);
+ for (uint32_t i = 0 ; i < n_sub_blocks; i++) {
+ sub_block_deserialize(&sub_block[i], sub_block_header);
+ sub_block_header += sizeof (struct dump_sub_block);
+ }
+
+ // verify the sub block header
+ uint32_t offset = header_length + 4;
+ for (uint32_t i = 0 ; i < n_sub_blocks; i++) {
+ uint32_t xsum = toku_x1764_memory(cp + offset, sub_block[i].compressed_size);
+ printf("%u: %u %u %u", i, sub_block[i].compressed_size, sub_block[i].uncompressed_size, sub_block[i].xsum);
+ if (xsum != sub_block[i].xsum)
+ printf(" fail %u offset %" PRIu64, xsum, file_offset + offset);
+ printf("\n");
+ offset += sub_block[i].compressed_size;
+ }
+ if (offset != size)
+ printf("offset %u expected %" PRIu64 "\n", offset, size);
+}
+
+static void dump_block(int fd, BLOCKNUM blocknum, FT ft) {
+ DISKOFF offset, size;
+ ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size);
+ printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size);
+
+ unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size));
+ uint64_t r = pread(fd, vp, size, offset);
+ if (r == (uint64_t)size) {
+ verify_block(vp, offset, size);
+ }
+ toku_free(vp);
+}
+
+static void dump_file(int fd, uint64_t offset, uint64_t size, FILE *outfp) {
+ unsigned char *XMALLOC_N(size, vp);
+ uint64_t r = pread(fd, vp, size, offset);
+ if (r == size) {
+ if (outfp == stdout) {
+ hex_dump(vp, offset, size);
+ } else {
+ size_t wrote = fwrite(vp, size, 1, outfp);
+ assert(wrote == 1);
+ }
+ }
+ toku_free(vp);
+}
+
+static void set_file(int fd, uint64_t offset, unsigned char newc) {
+ toku_os_pwrite(fd, &newc, sizeof newc, offset);
+}
+
+static int readline(char *line, int maxline) {
+ int i = 0;
+ int c;
+ while ((c = getchar()) != EOF && c != '\n' && i < maxline) {
+ line[i++] = (char)c;
+ }
+ line[i++] = 0;
+ return c == EOF ? EOF : i;
+}
+
+static int split_fields(char *line, char *fields[], int maxfields) {
+ int i;
+ for (i=0; i<maxfields; i++)
+ fields[i] = NULL;
+ for (i=0; i<maxfields; i++, line=NULL) {
+ fields[i] = strtok(line, " ");
+ if (fields[i] == NULL) {
+ break;
+ }
+ }
+ return i;
+}
+
+static uint64_t getuint64(const char *f) {
+ if (strncmp(f, "0x", 2) == 0 || strncmp(f, "0X", 2) == 0)
+ return strtoull(f, 0, 16);
+ else if (strncmp(f, "0", 1) == 0)
+ return strtoull(f, 0, 8);
+ else
+ return strtoull(f, 0, 10);
+}
+
+static void interactive_help(void) {
+ fprintf(stderr, "help\n");
+ fprintf(stderr, "header\n");
+ cout <<"mr/MessagesReport [NUMBER] \n Reports messages for the level of the tree you want get more details about\n";
+ cout <<"rf/readFile ft-file-name \n Switch to a different FT\n";
+ fprintf(stderr, "node NUMBER \n");
+ fprintf(stderr, "bx OFFSET | block_translation OFFSET\n");
+ fprintf(stderr, "dumpdata 0|1\n");
+ fprintf(stderr, "fragmentation\n");
+ fprintf(stderr, "nodesizes\n");
+ fprintf(stderr, "garbage\n");
+ fprintf(stderr, "file OFFSET SIZE [outfilename]\n");
+ fprintf(stderr, "quit\n");
+}
+
+static void freeNMC(NMC *msgs[], int height){
+ for(int i=0;i<height;i++){
+ if(msgs[i]!=NULL){
+ delete(msgs[i]->count);
+
+ while(msgs[i]->nextNode!=NULL){
+ NMC* ptr=msgs[i]->nextNode;
+ msgs[i]=msgs[i]->nextNode;
+ delete ptr;
+
+ }
+ msgs[i]=NULL;
+ }
+ }
+}
+
+static void writeTree(NMC *msgs[],int height,char *name UU()){
+ ofstream mytree ("/tmp/tree.txt",fstream::out);
+ if (mytree.is_open()){
+ for(int i=height;i>=0;i--){
+ NMC * ptr=msgs[i];
+ mytree <<i<<endl;
+ while(ptr!=NULL){
+ mytree << ptr->id<<"\t";
+ if(ptr->clean!=0)mytree << "1"<<"\t";
+ else mytree << "0"<<"\t";
+ for(int j=0;j<15;j++)mytree << ptr->count[j]<<" ";
+ mytree << ptr->count[i]<<endl;
+ ptr=ptr->nextNode;
+ }
+ mytree <<endl;
+ }
+ }
+ else cout << "Unable to open file";
+ mytree.close();
+}
+
+static void writeJson(NMC *msgs[],int height,const char *name){
+ ofstream mytree (name,fstream::out);
+ if (mytree.is_open()){
+ mytree <<"{\n \"FT\":[";
+ for(int i=height;i>=0;i--){
+ NMC * ptr=msgs[i];
+ mytree <<"{\n\"Level\": {\"Height\":\""<<i<<"\",\n \"Nodes\":[";
+ while(ptr!=NULL){
+ mytree <<"{\"ID\":\""<< ptr->id<<"\",";
+ if(ptr->clean!=0){
+ mytree <<"\"Messages\":[";
+ for(int j=0;j<16;j++)
+ {
+ mytree <<"{";
+ switch (j) {
+ case FT_INSERT: mytree <<"\"INSERT\":\""<<ptr->count[j]<<"\""; break;
+ case FT_INSERT_NO_OVERWRITE: mytree <<"\"INSERT_NOVERWTE\":\""<<ptr->count[j]<<"\""; break;
+ case FT_DELETE_ANY: mytree <<"\"DELETE\":\""<<ptr->count[j]<<"\""; break;
+ case FT_ABORT_ANY: mytree <<"\"ABORT\":\""<<ptr->count[j]<<"\""; break;
+ case FT_COMMIT_ANY: mytree <<"\"COMMITY\":\""<<ptr->count[j]<<"\""; break;
+ case FT_COMMIT_BROADCAST_ALL: mytree <<"\"COMMIT_BROADCAST_ALL\":\""<<ptr->count[j]<<"\"" ; break;
+ case FT_COMMIT_BROADCAST_TXN: mytree <<"\"COMMIT_BROADCAST_TXN\":\""<<ptr->count[j]<<"\""; break;
+ case FT_ABORT_BROADCAST_TXN: mytree <<"\"ABORT_BROADCAST_TXN\":\""<<ptr->count[j]<<"\"";break;
+ case FT_OPTIMIZE: mytree <<"\"OPTIMIZE\":\""<<ptr->count[j]<<"\""; break;
+ case FT_OPTIMIZE_FOR_UPGRADE: mytree <<"\"OPTIMIZE_FOR_UPGRADE\":\""<<ptr->count[j]<<"\"";break;
+ case FT_UPDATE: mytree <<"\"UPDATE\":\""<<ptr->count[j]<<"\""; break;
+ case FT_UPDATE_BROADCAST_ALL: mytree <<"\"UPDATE_BROADCAST_ALL\":\""<<ptr->count[j]<<"\""; break;
+ }
+ mytree <<"}";
+ if(j<15)mytree<<",";
+ }
+
+ mytree <<"]}";
+
+ }
+ else {
+ mytree <<"\"Messages\":\""<< "0"<<"\"}";
+ }
+ if(ptr->nextNode!=NULL)mytree <<",\n";
+ else mytree <<"]}\n";
+ ptr=ptr->nextNode;
+ }
+ mytree <<"\n}\n";
+ if(i!=0)mytree <<",\n";
+ }
+ mytree <<"\n]}\n";
+
+ }
+ else cout << "Unable to open file";
+ mytree.close();
+}
+
+static void writeTree(NMC *msgs[],int height){
+ ofstream mytree ("/tmp/tree1.txt",fstream::out);
+ if (mytree.is_open()){
+ for(int i=height;i>=0;i--){
+ NMC * ptr=msgs[i];
+ mytree <<i<<endl;
+ while(ptr!=NULL){
+ mytree << ptr->id<<",";
+ if(ptr->clean!=0)mytree << "1"<<",";
+ else mytree << "0"<<",";
+ for(int j=0;j<15;j++)mytree << ptr->count[j]<<",";
+ mytree << ptr->count[i]<<endl;
+ ptr=ptr->nextNode;
+ }
+ mytree <<".\"";
+ }
+ }
+ else cout << "Unable to open file";
+ mytree.close();
+}
+
+static void FT_to_JSON(int fd, FT ft, CACHEFILE cf, const char * JsonFile){
+ toku_ft_free(ft);
+ open_header(fd, &ft, cf);
+ int root=getRootNode(ft);
+ BLOCKNUM off = make_blocknum(root);
+ int height=getHeight(fd,off, ft);
+ NMC *msgs[height];
+ for(int i=0;i<=height;i++){
+ msgs[i]=NULL;
+ }
+ open_header(fd, &ft, cf);
+ root=getRootNode(ft);
+ off = make_blocknum(root);
+ countMessagesInFT(fd,off, ft,msgs);
+ cout <<"to STD output: \n";
+ treeToSTDout(msgs,height);
+ writeTree(msgs,height);
+ cout<<"FT's json file was generated here:";
+ if(JsonFile!=NULL) {
+ cout <<JsonFile;
+ writeJson(msgs,height,JsonFile);
+ }
+ else {
+ cout <<"./FT.json";
+ writeJson(msgs,height,"./FT.json");
+ }
+ cout<<endl;
+ freeNMC(msgs,height);
+ exit(0);
+}
+
+static void run_iteractive_loop(int fd, FT ft, CACHEFILE cf) {
+ toku_ft_free(ft);
+ open_header(fd, &ft, cf);
+ int root=getRootNode(ft);
+ BLOCKNUM off = make_blocknum(root);
+ int height=getHeight(fd,off, ft);
+ NMC *msgs[height];
+ for(int i=0;i<=height;i++){
+ msgs[i]=NULL;
+ }
+ while (1) {
+ printf("ftdump>");
+ fflush(stdout);
+ char line[maxline+1];
+ int r = readline(line, maxline);
+ if (r == EOF)
+ break;
+ const int maxfields = 4;
+ char *fields[maxfields];
+ int nfields = split_fields(line, fields, maxfields);
+ if (nfields == 0)
+ continue;
+ if (strcmp(fields[0], "help") == 0) {
+ interactive_help();
+ } else if (strcmp(fields[0], "header") == 0) {
+ toku_ft_free(ft);
+ open_header(fd, &ft, cf);
+ dump_header(ft);
+ } else if (strcmp(fields[0], "rn") == 0||strcmp(fields[0], "rootNode")==0||strcmp(fields[0], "rootnode") == 0) {
+ printf("Root node :%d\n",root);
+ } else if (strcmp(fields[0], "block") == 0 && nfields == 2) {
+ BLOCKNUM blocknum = make_blocknum(getuint64(fields[1]));
+ dump_block(fd, blocknum, ft);
+ }else if ((strcmp(fields[0], "readFile") == 0 ||strcmp(fields[0], "readfile") == 0 ||strcmp(fields[0], "rf") == 0 )&& nfields == 2) {
+ fname=fields[1];
+ fd = open(fname, O_RDWR + O_BINARY);
+ toku_ft_free(ft);
+ open_header(fd, &ft, cf);
+ root=getRootNode(ft);
+ off = make_blocknum(root);
+ height=getHeight(fd,off, ft);
+ if (fd < 0) {
+ fprintf(stderr, "%s: can not open the FT dump %s errno %d\n", arg0, fname, errno);
+ continue;
+ }
+ } else if (strcmp(fields[0], "node") == 0 && nfields == 2) {
+ off = make_blocknum(getuint64(fields[1]));
+ dump_node(fd, off, ft);
+ }else if ((strcmp(fields[0], "mr") == 0||(strcmp(fields[0], "nc")) == 0 ||strcmp(fields[0], "messagesReport") == 0 )) {
+ freeNMC(msgs,height);
+ toku_ft_free(ft);
+ open_header(fd, &ft, cf);
+ root=getRootNode(ft);
+ off = make_blocknum(root);
+ countMessagesInFT(fd,off, ft,msgs);
+ int level=-1;
+ if(nfields == 2)level=getuint64(fields[1]);
+ if(level>=0){
+ levelToSTDout(msgs[level], level);
+ }
+ else{
+ cout <<"to STD output: \n";
+ treeToSTDout(msgs,height);
+ }
+ writeTree(msgs,height);
+ writeTree(msgs,height, NULL);
+
+ }else if (strcmp(fields[0], "dumpdata") == 0 && nfields == 2) {
+
+ do_dump_data = strtol(fields[1], NULL, 10);
+ }
+ else if (strcmp(fields[0], "block_translation") == 0 || strcmp(fields[0], "bx") == 0) {
+ uint64_t offset = 0;
+ if (nfields == 2)
+ offset = getuint64(fields[1]);
+ dump_block_translation(ft, offset);
+ } else if (strcmp(fields[0], "fragmentation") == 0) {
+ dump_fragmentation(fd, ft, do_tsv);
+ } else if (strcmp(fields[0], "nodesizes") == 0) {
+ dump_nodesizes(fd, ft);
+ } else if (strcmp(fields[0], "garbage") == 0||strcmp(fields[0], "g") == 0) {
+ dump_garbage_stats(fd, ft);
+ } else if (strcmp(fields[0], "file") == 0 && nfields >= 3) {
+ uint64_t offset = getuint64(fields[1]);
+ uint64_t size = getuint64(fields[2]);
+ FILE *outfp = stdout;
+ if (nfields >= 4)
+ outfp = fopen(fields[3], "w");
+ dump_file(fd, offset, size, outfp);
+ } else if (strcmp(fields[0], "setfile") == 0 && nfields == 3) {
+ uint64_t offset = getuint64(fields[1]);
+ unsigned char newc = getuint64(fields[2]);
+ set_file(fd, offset, newc);
+ } else if (strcmp(fields[0], "quit") == 0 || strcmp(fields[0], "q") == 0) {
+ toku_ft_free(ft);
+ exit(0);
+ }
+ }
+ freeNMC(msgs,height);
+}
+
+static int usage(void) {
+ fprintf(stderr, "Usage: %s ", arg0);
+ fprintf(stderr, "--interactive ");
+ fprintf(stderr, "--support /path/to/fractal-tree/file \n\t an interactive way to see what messages and/or switch between FTs");
+ fprintf(stderr, "--json /path/to/fractal-tree/file [output json file]\n\t if left empty an FT.json will be created automatically");
+ fprintf(stderr, "--nodata ");
+ fprintf(stderr, "--dumpdata 0|1 ");
+ fprintf(stderr, "--header ");
+ fprintf(stderr, "--rootnode ");
+ fprintf(stderr, "--node N ");
+ fprintf(stderr, "--fragmentation ");
+ fprintf(stderr, "--garbage ");
+ fprintf(stderr, "--tsv ");
+ fprintf(stderr, "--translation-table ");
+ fprintf(stderr, "--tsv ");
+ fprintf(stderr, "--summary ");
+ fprintf(stderr, "filename \n");
+ return 1;
+}
+
+int main (int argc, const char *const argv[]) {
+ arg0 = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "--interactive") == 0 || strcmp(argv[0], "--i") == 0) {
+ do_interactive = 1;
+ }
+ else if ((strcmp(argv[0], "--json") == 0 || strcmp(argv[0], "--s")== 0)&& argc >= 2) {
+ do_json = 1;
+ fname=argv[1];
+ argc--; argv++;
+ break;
+ } else if (strcmp(argv[0], "--nodata") == 0) {
+ do_dump_data = 0;
+ } else if (strcmp(argv[0], "--dumpdata") == 0 && argc > 1) {
+ do_dump_data = atoi(argv[0]);
+ } else if (strcmp(argv[0], "--header") == 0) {
+ do_header = 1;
+ } else if (strcmp(argv[0], "--rootnode") == 0) {
+ do_rootnode = 1;
+ } else if (strcmp(argv[0], "--node") == 0 && argc > 1) {
+ argc--; argv++;
+ do_node = 1;
+ do_node_num = make_blocknum(getuint64(argv[0]));
+ } else if (strcmp(argv[0], "--fragmentation") == 0) {
+ do_fragmentation = 1;
+ } else if (strcmp(argv[0], "--garbage") == 0) {
+ do_garbage = 1;
+ } else if (strcmp(argv[0], "--tsv") == 0) {
+ do_tsv = 1;
+ } else if (strcmp(argv[0], "--translation-table") == 0) {
+ do_translation_table = 1;
+ } else if (strcmp(argv[0], "--summary") == 0) {
+ do_summary = 1;
+ } else if (strcmp(argv[0], "--help") == 0 || strcmp(argv[0], "-?") == 0 || strcmp(argv[0], "-h") == 0) {
+ return usage();
+ } else {
+ break;
+ }
+ argc--; argv++;
+ }
+ if (argc != 1 && do_json==0)
+ return usage();
+
+ int r = toku_ft_layer_init();
+ assert_zero(r);
+ if(fname==NULL)fname = argv[0];
+ int fd = open(fname, O_RDWR + O_BINARY);
+ if (fd < 0) {
+ fprintf(stderr, "%s: can not open %s errno %d\n", arg0, fname, errno);
+ return 1;
+ }
+ // create a cachefile for the header
+ CACHETABLE ct = NULL;
+ toku_cachetable_create(&ct, 1<<25, (LSN){0}, 0);
+ CACHEFILE cf = NULL;
+ r = toku_cachetable_openfd (&cf, ct, fd, fname);
+ assert_zero(r);
+ FT ft = NULL;
+ open_header(fd, &ft, cf);
+ if (do_json ) {
+ const char *arg=argv[1];
+ FT_to_JSON(fd, ft, cf,arg);
+ }
+ if (do_interactive) {
+ run_iteractive_loop(fd, ft, cf);
+ }
+ else {
+ if (do_header) {
+ dump_header(ft);
+ }
+ if (do_rootnode) {
+ dump_node(fd, ft->h->root_blocknum, ft);
+ }
+ if (do_node) {
+ dump_node(fd, do_node_num, ft);
+ }
+ if (do_fragmentation) {
+ dump_fragmentation(fd, ft, do_tsv);
+ }
+ if (do_translation_table) {
+ ft->blocktable.dump_translation_table_pretty(stdout);
+ }
+ if (do_summary) {
+ dump_summary(fd, ft);
+ }
+ if (do_garbage) {
+ dump_garbage_stats(fd, ft);
+ }
+ if (!do_header && !do_rootnode && !do_fragmentation && !do_translation_table && !do_garbage && !do_summary) {
+ printf("Block translation:");
+ ft->blocktable.dump_translation_table(stdout);
+ dump_header(ft);
+ struct __dump_node_extra info;
+ info.fd = fd;
+ info.ft = ft;
+ ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED,
+ dump_node_wrapper, &info, true, true);
+ }
+ }
+ toku_cachefile_close(&cf, false, ZERO_LSN);
+ toku_cachetable_close(&ct);
+ toku_ft_free(ft);
+ toku_ft_layer_destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/CMakeLists.txt b/storage/tokudb/PerconaFT/util/CMakeLists.txt
new file mode 100644
index 00000000..6f6b899e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/CMakeLists.txt
@@ -0,0 +1,34 @@
+set(util_srcs
+ context
+ dbt
+ frwlock
+ kibbutz
+ memarena
+ mempool
+ minicron
+ partitioned_counter
+ queue
+ threadpool
+ scoped_malloc
+ x1764
+ )
+
+add_library(util SHARED ${util_srcs})
+add_library(util_static STATIC ${util_srcs})
+maybe_add_gcov_to_libraries(util util_static)
+set_target_properties(util_static PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_link_libraries(util LINK_PRIVATE ${LIBTOKUPORTABILITY})
+target_link_libraries(util LINK_PUBLIC ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
+add_dependencies(util install_tdb_h)
+add_dependencies(util_static install_tdb_h)
+
+# detect when we are being built as a subproject
+if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ install(
+ FILES partitioned_counter.h
+ DESTINATION include
+ COMPONENT tokukv_headers
+ )
+endif ()
+
+add_subdirectory(tests)
diff --git a/storage/tokudb/PerconaFT/util/bytestring.h b/storage/tokudb/PerconaFT/util/bytestring.h
new file mode 100644
index 00000000..f946ad60
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/bytestring.h
@@ -0,0 +1,46 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "portability/toku_stdint.h"
+
+struct BYTESTRING {
+ uint32_t len;
+ char *data;
+};
diff --git a/storage/tokudb/PerconaFT/util/constexpr.h b/storage/tokudb/PerconaFT/util/constexpr.h
new file mode 100644
index 00000000..fce2cf3a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/constexpr.h
@@ -0,0 +1,52 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+constexpr char UU() static_tolower(const char a) {
+ return a >= 'A' && a <= 'Z' ? a - 'A' + 'a' : a;
+}
+
+constexpr int UU() static_strncasecmp(const char *a, const char *b, size_t len) {
+ return len == 0 ? 0 : (
+ static_tolower(*a) != static_tolower(*b) || *a == '\0' ?
+ static_tolower(*a) - static_tolower(*b) :
+ static_strncasecmp(a+1, b+1, len-1)
+ );
+}
+
diff --git a/storage/tokudb/PerconaFT/util/context.cc b/storage/tokudb/PerconaFT/util/context.cc
new file mode 100644
index 00000000..dafe4e84
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/context.cc
@@ -0,0 +1,184 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <string.h>
+
+#include <util/context.h>
+
+namespace toku {
+
+ static const context default_context(CTX_DEFAULT);
+ static __thread const context *tl_current_context = &default_context;
+
+ // save the old context, set the current context
+ context::context(const context_id id) :
+ m_old_ctx(tl_current_context),
+ m_id(id) {
+ tl_current_context = this;
+ }
+
+ // restore the old context
+ context::~context() {
+ tl_current_context = m_old_ctx;
+ }
+
+} // namespace toku
+
+// thread local context
+
+const toku::context *toku_thread_get_context() {
+ return toku::tl_current_context;
+}
+
+// engine status
+
+static struct context_status context_status;
+#define CONTEXT_STATUS_INIT(key, legend) TOKUFT_STATUS_INIT(context_status, key, nullptr, PARCOUNT, "context: " legend, TOKU_ENGINE_STATUS)
+
+void toku_context_status_init(void) {
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_FULL_FETCH, "tree traversals blocked by a full fetch");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_PARTIAL_FETCH, "tree traversals blocked by a partial fetch");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_FULL_EVICTION, "tree traversals blocked by a full eviction");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_PARTIAL_EVICTION, "tree traversals blocked by a partial eviction");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_MESSAGE_INJECTION, "tree traversals blocked by a message injection");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_MESSAGE_APPLICATION, "tree traversals blocked by a message application");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_FLUSH, "tree traversals blocked by a flush");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_BY_CLEANER, "tree traversals blocked by a the cleaner thread");
+ CONTEXT_STATUS_INIT(CTX_SEARCH_BLOCKED_OTHER, "tree traversals blocked by something uninstrumented");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_FULL_FETCH, "promotion blocked by a full fetch (should never happen)");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_PARTIAL_FETCH, "promotion blocked by a partial fetch (should never happen)");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_FULL_EVICTION, "promotion blocked by a full eviction (should never happen)");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_PARTIAL_EVICTION, "promotion blocked by a partial eviction (should never happen)");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_MESSAGE_INJECTION, "promotion blocked by a message injection");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_MESSAGE_APPLICATION, "promotion blocked by a message application");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_FLUSH, "promotion blocked by a flush");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_BY_CLEANER, "promotion blocked by the cleaner thread");
+ CONTEXT_STATUS_INIT(CTX_PROMO_BLOCKED_OTHER, "promotion blocked by something uninstrumented");
+ CONTEXT_STATUS_INIT(CTX_BLOCKED_OTHER, "something uninstrumented blocked by something uninstrumented");
+ context_status.initialized = true;
+}
+#undef FS_STATUS_INIT
+
+void toku_context_get_status(struct context_status *status) {
+ assert(context_status.initialized);
+ *status = context_status;
+}
+
+#define STATUS_INC(x, d) increment_partitioned_counter(context_status.status[x].value.parcount, d);
+
+void toku_context_note_frwlock_contention(const context_id blocked, const context_id blocking) {
+ assert(context_status.initialized);
+ if (blocked != CTX_SEARCH && blocked != CTX_PROMO) {
+ // Return early if this event is "unknown"
+ STATUS_INC(CTX_BLOCKED_OTHER, 1);
+ return;
+ }
+ switch (blocking) {
+ case CTX_FULL_FETCH:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_FULL_FETCH, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_FULL_FETCH, 1);
+ }
+ break;
+ case CTX_PARTIAL_FETCH:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_PARTIAL_FETCH, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_PARTIAL_FETCH, 1);
+ }
+ break;
+ case CTX_FULL_EVICTION:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_FULL_EVICTION, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_FULL_EVICTION, 1);
+ }
+ break;
+ case CTX_PARTIAL_EVICTION:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_PARTIAL_EVICTION, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_PARTIAL_EVICTION, 1);
+ }
+ break;
+ case CTX_MESSAGE_INJECTION:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_MESSAGE_INJECTION, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_MESSAGE_INJECTION, 1);
+ }
+ break;
+ case CTX_MESSAGE_APPLICATION:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_MESSAGE_APPLICATION, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_MESSAGE_APPLICATION, 1);
+ }
+ break;
+ case CTX_FLUSH:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_FLUSH, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_FLUSH, 1);
+ }
+ break;
+ case CTX_CLEANER:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_BY_CLEANER, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_BY_CLEANER, 1);
+ }
+ break;
+ default:
+ if (blocked == CTX_SEARCH) {
+ STATUS_INC(CTX_SEARCH_BLOCKED_OTHER, 1);
+ } else if (blocked == CTX_PROMO) {
+ STATUS_INC(CTX_PROMO_BLOCKED_OTHER, 1);
+ }
+ break;
+ }
+}
+
+void toku_context_status_destroy(void) {
+ for (int i = 0; i < CTX_STATUS_NUM_ROWS; ++i) {
+ if (context_status.status[i].type == PARCOUNT) {
+ destroy_partitioned_counter(context_status.status[i].value.parcount);
+ }
+ }
+}
diff --git a/storage/tokudb/PerconaFT/util/context.h b/storage/tokudb/PerconaFT/util/context.h
new file mode 100644
index 00000000..de4d2076
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/context.h
@@ -0,0 +1,152 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <portability/toku_portability.h>
+
+#include <db.h>
+
+#include <util/status.h>
+
+enum context_id {
+ CTX_INVALID = -1,
+ CTX_DEFAULT = 0, // default context for when no context is set
+ CTX_SEARCH, // searching for a key at the bottom of the tree
+ CTX_PROMO, // promoting a message down the tree
+ CTX_FULL_FETCH, // performing full fetch (pivots + some partial fetch)
+ CTX_PARTIAL_FETCH, // performing partial fetch
+ CTX_FULL_EVICTION, // running partial eviction
+ CTX_PARTIAL_EVICTION, // running partial eviction
+ CTX_MESSAGE_INJECTION, // injecting a message into a buffer
+ CTX_MESSAGE_APPLICATION, // applying ancestor's messages to a basement node
+ CTX_FLUSH, // flushing a buffer
+ CTX_CLEANER // doing work as the cleaner thread
+};
+
+// Note a contention event in engine status
+void toku_context_note_frwlock_contention(const context_id blocking, const context_id blocked);
+
+namespace toku {
+
+ // class for tracking what a thread is doing
+ //
+ // usage:
+ //
+ // // automatically tag and document what you're doing
+ // void my_interesting_function(void) {
+ // toku::context ctx("doing something interesting", INTERESTING_FN_1);
+ // ...
+ // {
+ // toku::context inner_ctx("doing something expensive", EXPENSIVE_FN_1);
+ // my_rwlock.wrlock();
+ // expensive();
+ // my_rwlock.wrunlock();
+ // }
+ // ...
+ // }
+ //
+ // // ... so later you can write code like this.
+ // // here, we save some info to help determine why a lock could not be acquired
+ // void my_rwlock::wrlock() {
+ // r = try_acquire_write_lock();
+ // if (r == 0) {
+ // m_write_locked_context_id = get_thread_local_context()->get_id();
+ // ...
+ // } else {
+ // if (m_write_locked_context_id == EXPENSIVE_FN_1) {
+ // status.blocked_because_of_expensive_fn_1++;
+ // } else if (...) {
+ // ...
+ // }
+ // ...
+ // }
+ // }
+ class context {
+ public:
+ context(const context_id id);
+
+ ~context();
+
+ context_id get_id() const {
+ return m_id;
+ }
+
+ private:
+ // each thread has a stack of contexts, rooted at the trivial "root context"
+ const context *m_old_ctx;
+ const context_id m_id;
+ };
+
+} // namespace toku
+
+// Get the current context of this thread
+const toku::context *toku_thread_get_context();
+
+enum context_status_entry {
+ CTX_SEARCH_BLOCKED_BY_FULL_FETCH = 0,
+ CTX_SEARCH_BLOCKED_BY_PARTIAL_FETCH,
+ CTX_SEARCH_BLOCKED_BY_FULL_EVICTION,
+ CTX_SEARCH_BLOCKED_BY_PARTIAL_EVICTION,
+ CTX_SEARCH_BLOCKED_BY_MESSAGE_INJECTION,
+ CTX_SEARCH_BLOCKED_BY_MESSAGE_APPLICATION,
+ CTX_SEARCH_BLOCKED_BY_FLUSH,
+ CTX_SEARCH_BLOCKED_BY_CLEANER,
+ CTX_SEARCH_BLOCKED_OTHER,
+ CTX_PROMO_BLOCKED_BY_FULL_FETCH,
+ CTX_PROMO_BLOCKED_BY_PARTIAL_FETCH,
+ CTX_PROMO_BLOCKED_BY_FULL_EVICTION,
+ CTX_PROMO_BLOCKED_BY_PARTIAL_EVICTION,
+ CTX_PROMO_BLOCKED_BY_MESSAGE_INJECTION,
+ CTX_PROMO_BLOCKED_BY_MESSAGE_APPLICATION,
+ CTX_PROMO_BLOCKED_BY_FLUSH,
+ CTX_PROMO_BLOCKED_BY_CLEANER,
+ CTX_PROMO_BLOCKED_OTHER,
+ CTX_BLOCKED_OTHER,
+ CTX_STATUS_NUM_ROWS
+};
+
+struct context_status {
+ bool initialized;
+ TOKU_ENGINE_STATUS_ROW_S status[CTX_STATUS_NUM_ROWS];
+};
+
+void toku_context_get_status(struct context_status *status);
+
+void toku_context_status_init(void);
+void toku_context_status_destroy(void);
diff --git a/storage/tokudb/PerconaFT/util/dbt.cc b/storage/tokudb/PerconaFT/util/dbt.cc
new file mode 100644
index 00000000..b6d2a584
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/dbt.cc
@@ -0,0 +1,291 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include <string.h>
+
+#include "portability/memory.h"
+
+#include "util/dbt.h"
+
+DBT *toku_init_dbt(DBT *dbt) {
+ memset(dbt, 0, sizeof(*dbt));
+ return dbt;
+}
+
+DBT toku_empty_dbt(void) {
+ static const DBT empty_dbt = { .data = 0, .size = 0, .ulen = 0, .flags = 0 };
+ return empty_dbt;
+}
+
+DBT *toku_init_dbt_flags(DBT *dbt, uint32_t flags) {
+ toku_init_dbt(dbt);
+ dbt->flags = flags;
+ return dbt;
+}
+
+DBT_ARRAY *toku_dbt_array_init(DBT_ARRAY *dbts, uint32_t size) {
+ uint32_t capacity = 1;
+ while (capacity < size) { capacity *= 2; }
+
+ XMALLOC_N(capacity, dbts->dbts);
+ for (uint32_t i = 0; i < capacity; i++) {
+ toku_init_dbt_flags(&dbts->dbts[i], DB_DBT_REALLOC);
+ }
+ dbts->size = size;
+ dbts->capacity = capacity;
+ return dbts;
+}
+
+void toku_dbt_array_resize(DBT_ARRAY *dbts, uint32_t size) {
+ if (size != dbts->size) {
+ if (size > dbts->capacity) {
+ const uint32_t old_capacity = dbts->capacity;
+ uint32_t new_capacity = dbts->capacity;
+ while (new_capacity < size) {
+ new_capacity *= 2;
+ }
+ dbts->capacity = new_capacity;
+ XREALLOC_N(new_capacity, dbts->dbts);
+ for (uint32_t i = old_capacity; i < new_capacity; i++) {
+ toku_init_dbt_flags(&dbts->dbts[i], DB_DBT_REALLOC);
+ }
+ } else if (size < dbts->size) {
+ if (dbts->capacity >= 8 && size < dbts->capacity / 4) {
+ const int old_capacity = dbts->capacity;
+ const int new_capacity = dbts->capacity / 2;
+ for (int i = new_capacity; i < old_capacity; i++) {
+ toku_destroy_dbt(&dbts->dbts[i]);
+ }
+ XREALLOC_N(new_capacity, dbts->dbts);
+ dbts->capacity = new_capacity;
+ }
+ }
+ dbts->size = size;
+ }
+}
+
+void toku_dbt_array_destroy_shallow(DBT_ARRAY *dbts) {
+ toku_free(dbts->dbts);
+ ZERO_STRUCT(*dbts);
+}
+
+void toku_dbt_array_destroy(DBT_ARRAY *dbts) {
+ for (uint32_t i = 0; i < dbts->capacity; i++) {
+ toku_destroy_dbt(&dbts->dbts[i]);
+ }
+ toku_dbt_array_destroy_shallow(dbts);
+}
+
+
+
+void toku_destroy_dbt(DBT *dbt) {
+ switch (dbt->flags) {
+ case DB_DBT_MALLOC:
+ case DB_DBT_REALLOC:
+ toku_free(dbt->data);
+ toku_init_dbt(dbt);
+ break;
+ }
+}
+
+DBT *toku_fill_dbt(DBT *dbt, const void *k, uint32_t len) {
+ toku_init_dbt(dbt);
+ dbt->size=len;
+ dbt->data=(char*)k;
+ return dbt;
+}
+
+DBT *toku_memdup_dbt(DBT *dbt, const void *k, size_t len) {
+ toku_init_dbt_flags(dbt, DB_DBT_MALLOC);
+ dbt->size = len;
+ dbt->data = toku_xmemdup(k, len);
+ return dbt;
+}
+
+DBT *toku_copyref_dbt(DBT *dst, const DBT src) {
+ dst->flags = 0;
+ dst->ulen = 0;
+ dst->size = src.size;
+ dst->data = src.data;
+ return dst;
+}
+
+DBT *toku_clone_dbt(DBT *dst, const DBT &src) {
+ return toku_memdup_dbt(dst, src.data, src.size);
+}
+
+void
+toku_sdbt_cleanup(struct simple_dbt *sdbt) {
+ if (sdbt->data) toku_free(sdbt->data);
+ memset(sdbt, 0, sizeof(*sdbt));
+}
+
+static inline int sdbt_realloc(struct simple_dbt *sdbt) {
+ void *new_data = toku_realloc(sdbt->data, sdbt->len);
+ int r;
+ if (new_data == NULL) {
+ r = get_error_errno();
+ } else {
+ sdbt->data = new_data;
+ r = 0;
+ }
+ return r;
+}
+
+static inline int dbt_realloc(DBT *dbt) {
+ void *new_data = toku_realloc(dbt->data, dbt->ulen);
+ int r;
+ if (new_data == NULL) {
+ r = get_error_errno();
+ } else {
+ dbt->data = new_data;
+ r = 0;
+ }
+ return r;
+}
+
+// sdbt is the static value used when flags==0
+// Otherwise malloc or use the user-supplied memory, as according to the flags in d->flags.
+int toku_dbt_set(uint32_t len, const void *val, DBT *d, struct simple_dbt *sdbt) {
+ int r;
+ if (d == nullptr) {
+ r = 0;
+ } else {
+ switch (d->flags) {
+ case (DB_DBT_USERMEM):
+ d->size = len;
+ if (d->ulen<len) r = DB_BUFFER_SMALL;
+ else {
+ memcpy(d->data, val, len);
+ r = 0;
+ }
+ break;
+ case (DB_DBT_MALLOC):
+ d->data = NULL;
+ d->ulen = 0;
+ // fallthrough
+ // to DB_DBT_REALLOC
+ case (DB_DBT_REALLOC):
+ if (d->ulen < len) {
+ d->ulen = len*2;
+ r = dbt_realloc(d);
+ }
+ else if (d->ulen > 16 && d->ulen > len*4) {
+ d->ulen = len*2 < 16 ? 16 : len*2;
+ r = dbt_realloc(d);
+ }
+ else if (d->data==NULL) {
+ d->ulen = len;
+ r = dbt_realloc(d);
+ }
+ else r=0;
+
+ if (r==0) {
+ memcpy(d->data, val, len);
+ d->size = len;
+ }
+ break;
+ case (0):
+ if (sdbt->len < len) {
+ sdbt->len = len*2;
+ r = sdbt_realloc(sdbt);
+ }
+ else if (sdbt->len > 16 && sdbt->len > len*4) {
+ sdbt->len = len*2 < 16 ? 16 : len*2;
+ r = sdbt_realloc(sdbt);
+ }
+ else r=0;
+
+ if (r==0) {
+ memcpy(sdbt->data, val, len);
+ d->data = sdbt->data;
+ d->size = len;
+ }
+ break;
+ default:
+ r = EINVAL;
+ break;
+ }
+ }
+ return r;
+}
+
+const DBT *toku_dbt_positive_infinity(void) {
+ static DBT positive_infinity_dbt = {};
+ return &positive_infinity_dbt;
+}
+
+const DBT *toku_dbt_negative_infinity(void) {
+ static DBT negative_infinity_dbt = {};
+ return &negative_infinity_dbt;
+}
+
+bool toku_dbt_is_infinite(const DBT *dbt) {
+ return dbt == toku_dbt_positive_infinity() || dbt == toku_dbt_negative_infinity();
+}
+
+bool toku_dbt_is_empty(const DBT *dbt) {
+ // can't have a null data field with a non-zero size
+ paranoid_invariant(dbt->data != nullptr || dbt->size == 0);
+ return dbt->data == nullptr;
+}
+
+int toku_dbt_infinite_compare(const DBT *a, const DBT *b) {
+ if (a == b) {
+ return 0;
+ } else if (a == toku_dbt_positive_infinity()) {
+ return 1;
+ } else if (b == toku_dbt_positive_infinity()) {
+ return -1;
+ } else if (a == toku_dbt_negative_infinity()) {
+ return -1;
+ } else {
+ invariant(b == toku_dbt_negative_infinity());
+ return 1;
+ }
+}
+
+bool toku_dbt_equals(const DBT *a, const DBT *b) {
+ if (!toku_dbt_is_infinite(a) && !toku_dbt_is_infinite(b)) {
+ return a->data == b->data && a->size == b->size;
+ } else {
+ // a or b is infinite, so they're equal if they are the same infinite
+ return a == b ? true : false;
+ }
+}
diff --git a/storage/tokudb/PerconaFT/util/dbt.h b/storage/tokudb/PerconaFT/util/dbt.h
new file mode 100644
index 00000000..1b837567
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/dbt.h
@@ -0,0 +1,101 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <db.h>
+
+// TODO: John
+// Document this API a little better so that DBT
+// memory management can be morm widely understood.
+
+DBT *toku_init_dbt(DBT *);
+
+// returns: an initialized but empty dbt (for which toku_dbt_is_empty() is true)
+DBT toku_empty_dbt(void);
+
+DBT *toku_init_dbt_flags(DBT *, uint32_t flags);
+
+void toku_destroy_dbt(DBT *);
+
+DBT *toku_fill_dbt(DBT *dbt, const void *k, uint32_t len);
+
+DBT *toku_memdup_dbt(DBT *dbt, const void *k, size_t len);
+
+DBT *toku_copyref_dbt(DBT *dst, const DBT src);
+
+DBT *toku_clone_dbt(DBT *dst, const DBT &src);
+
+int toku_dbt_set(uint32_t len, const void *val, DBT *d, struct simple_dbt *sdbt);
+
+int toku_dbt_set_value(DBT *, const void **val, uint32_t vallen, void **staticptrp, bool dbt1_disposable);
+
+void toku_sdbt_cleanup(struct simple_dbt *sdbt);
+
+// returns: special DBT pointer representing positive infinity
+const DBT *toku_dbt_positive_infinity(void);
+
+// returns: special DBT pointer representing negative infinity
+const DBT *toku_dbt_negative_infinity(void);
+
+// returns: true if the given dbt is either positive or negative infinity
+bool toku_dbt_is_infinite(const DBT *dbt);
+
+// returns: true if the given dbt has no data (ie: dbt->data == nullptr)
+bool toku_dbt_is_empty(const DBT *dbt);
+
+// effect: compares two potentially infinity-valued dbts
+// requires: at least one is infinite (assert otherwise)
+int toku_dbt_infinite_compare(const DBT *a, const DBT *b);
+
+// returns: true if the given dbts have the same data pointer and size
+bool toku_dbt_equals(const DBT *a, const DBT *b);
diff --git a/storage/tokudb/PerconaFT/util/dmt.cc b/storage/tokudb/PerconaFT/util/dmt.cc
new file mode 100644
index 00000000..a584bf2b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/dmt.cc
@@ -0,0 +1,1213 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <string.h>
+#include <db.h>
+
+#include <portability/memory.h>
+#include <limits.h>
+
+namespace toku {
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create(void) {
+ toku_mempool_zero(&this->mp);
+ this->values_same_size = true;
+ this->value_length = 0;
+ this->is_array = true;
+ this->d.a.num_values = 0;
+ //TODO: maybe allocate enough space for something by default?
+ // We may be relying on not needing to allocate space the first time (due to limited time spent while a lock is held)
+}
+
+/**
+ * Note: create_from_sorted_memory_of_fixed_size_elements does not take ownership of 'mem'.
+ * Owner is still responsible for freeing it.
+ * While in the OMT a similar function would steal ownership, this doesn't make sense for the DMT because
+ * we (usually) have to add padding for alignment (mem has all of the elements PACKED).
+ * Also all current uses (as of Jan 12, 2014) of this function would require mallocing a new array
+ * in order to allow stealing.
+ */
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fixed_size_elements(
+ const void *mem,
+ const uint32_t numvalues,
+ const uint32_t mem_length,
+ const uint32_t fixed_value_length) {
+ this->values_same_size = true;
+ this->value_length = fixed_value_length;
+ this->is_array = true;
+ this->d.a.num_values = numvalues;
+ const uint8_t pad_bytes = get_fixed_length_alignment_overhead();
+ uint32_t aligned_memsize = mem_length + numvalues * pad_bytes;
+ toku_mempool_construct(&this->mp, aligned_memsize);
+ if (aligned_memsize > 0) {
+ paranoid_invariant(numvalues > 0);
+ void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize);
+ paranoid_invariant_notnull(ptr);
+ uint8_t * const dest = static_cast<uint8_t *>(ptr);
+ const uint8_t * const src = static_cast<const uint8_t *>(mem);
+ if (pad_bytes == 0) {
+ paranoid_invariant(aligned_memsize == mem_length);
+ memcpy(dest, src, aligned_memsize);
+ } else {
+ // TODO(leif): check what vectorizes best: multiplying like this or adding to offsets
+ const uint32_t fixed_len = this->value_length;
+ const uint32_t fixed_aligned_len = align(this->value_length);
+ paranoid_invariant(this->d.a.num_values*fixed_len == mem_length);
+ for (uint32_t i = 0; i < this->d.a.num_values; i++) {
+ memcpy(&dest[i*fixed_aligned_len], &src[i*fixed_len], fixed_len);
+ }
+ }
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::clone(const dmt &src) {
+ *this = src;
+ toku_mempool_clone(&src.mp, &this->mp);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::clear(void) {
+ this->is_array = true;
+ this->d.a.num_values = 0;
+ this->values_same_size = true; // Reset state
+ this->value_length = 0;
+ //TODO(leif): Note that this can mess with our memory_footprint calculation (we may touch past what is marked as 'used' in the mempool)
+ // One 'fix' is for mempool to also track what was touched, and reset() shouldn't reset that, though realloc() might.
+ toku_mempool_reset(&this->mp);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::destroy(void) {
+ this->clear();
+ toku_mempool_destroy(&this->mp);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::size(void) const {
+ if (this->is_array) {
+ return this->d.a.num_values;
+ } else {
+ return this->nweight(this->d.t.root);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::nweight(const subtree &subtree) const {
+ if (subtree.is_null()) {
+ return 0;
+ } else {
+ const dmt_node & node = get_node(subtree);
+ return node.weight;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert(const dmtwriter_t &value, const dmtcmp_t &v, uint32_t *const idx) {
+ int r;
+ uint32_t insert_idx;
+
+ r = this->find_zero<dmtcmp_t, h>(v, nullptr, nullptr, &insert_idx);
+ if (r==0) {
+ if (idx) *idx = insert_idx;
+ return DB_KEYEXIST;
+ }
+ if (r != DB_NOTFOUND) return r;
+
+ if ((r = this->insert_at(value, insert_idx))) return r;
+ if (idx) *idx = insert_idx;
+
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert_at(const dmtwriter_t &value, const uint32_t idx) {
+ if (idx > this->size()) { return EINVAL; }
+
+ bool same_size = this->values_same_size && (this->size() == 0 || value.get_size() == this->value_length);
+ if (this->is_array) {
+ if (same_size && idx == this->d.a.num_values) {
+ return this->insert_at_array_end<true>(value);
+ }
+ this->convert_from_array_to_tree();
+ }
+ // Is a tree.
+ paranoid_invariant(!is_array);
+ if (!same_size) {
+ this->values_same_size = false;
+ this->value_length = 0;
+ }
+
+ this->maybe_resize_tree(&value);
+ subtree *rebalance_subtree = nullptr;
+ this->insert_internal(&this->d.t.root, value, idx, &rebalance_subtree);
+ if (rebalance_subtree != nullptr) {
+ this->rebalance(rebalance_subtree);
+ }
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<bool with_resize>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert_at_array_end(const dmtwriter_t& value_in) {
+ paranoid_invariant(this->is_array);
+ paranoid_invariant(this->values_same_size);
+ if (this->d.a.num_values == 0) {
+ this->value_length = value_in.get_size();
+ }
+ paranoid_invariant(this->value_length == value_in.get_size());
+
+ if (with_resize) {
+ this->maybe_resize_array_for_insert();
+ }
+ dmtdata_t *dest = this->alloc_array_value_end();
+ value_in.write_to(dest);
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_array_value_end(void) {
+ paranoid_invariant(this->is_array);
+ paranoid_invariant(this->values_same_size);
+ this->d.a.num_values++;
+
+ void *ptr = toku_mempool_malloc(&this->mp, align(this->value_length));
+ paranoid_invariant_notnull(ptr);
+ paranoid_invariant(reinterpret_cast<size_t>(ptr) % ALIGNMENT == 0);
+ dmtdata_t *CAST_FROM_VOIDP(n, ptr);
+ paranoid_invariant(n == get_array_value(this->d.a.num_values - 1));
+ return n;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_array_value(const uint32_t idx) const {
+ paranoid_invariant(this->is_array);
+ paranoid_invariant(this->values_same_size);
+
+ paranoid_invariant(idx < this->d.a.num_values);
+ return get_array_value_internal(&this->mp, idx);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_array_value_internal(const struct mempool *mempool, const uint32_t idx) const {
+ void* ptr = toku_mempool_get_pointer_from_base_and_offset(mempool, idx * align(this->value_length));
+ dmtdata_t *CAST_FROM_VOIDP(value, ptr);
+ return value;
+}
+
+//TODO(leif) write microbenchmarks to compare growth factor. Note: growth factor here is actually 2.5 because of mempool_construct
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_array_for_insert(void) {
+ bool space_available = toku_mempool_get_free_size(&this->mp) >= align(this->value_length);
+
+ if (!space_available) {
+ const uint32_t n = this->d.a.num_values + 1;
+ const uint32_t new_n = n <=2 ? 4 : 2*n;
+ const uint32_t new_space = align(this->value_length) * new_n;
+
+ struct mempool new_kvspace;
+ toku_mempool_construct(&new_kvspace, new_space);
+ size_t copy_bytes = this->d.a.num_values * align(this->value_length);
+ invariant(copy_bytes + align(this->value_length) <= new_space);
+ paranoid_invariant(copy_bytes <= toku_mempool_get_used_size(&this->mp));
+ // Copy over to new mempool
+ if (this->d.a.num_values > 0) {
+ void* dest = toku_mempool_malloc(&new_kvspace, copy_bytes);
+ invariant(dest!=nullptr);
+ memcpy(dest, get_array_value(0), copy_bytes);
+ }
+ toku_mempool_destroy(&this->mp);
+ this->mp = new_kvspace;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::align(const uint32_t x) const {
+ return roundup_to_multiple(ALIGNMENT, x);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::prepare_for_serialize(void) {
+ if (!this->is_array) {
+ this->convert_from_tree_to_array();
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::convert_from_tree_to_array(void) {
+ paranoid_invariant(!this->is_array);
+ paranoid_invariant(this->values_same_size);
+
+ const uint32_t num_values = this->size();
+
+ node_offset *tmp_array;
+ bool malloced = false;
+ tmp_array = alloc_temp_node_offsets(num_values);
+ if (!tmp_array) {
+ malloced = true;
+ XMALLOC_N(num_values, tmp_array);
+ }
+ this->fill_array_with_subtree_offsets(tmp_array, this->d.t.root);
+
+ struct mempool new_mp;
+ const uint32_t fixed_len = this->value_length;
+ const uint32_t fixed_aligned_len = align(this->value_length);
+ size_t mem_needed = num_values * fixed_aligned_len;
+ toku_mempool_construct(&new_mp, mem_needed);
+ uint8_t* CAST_FROM_VOIDP(dest, toku_mempool_malloc(&new_mp, mem_needed));
+ paranoid_invariant_notnull(dest);
+ for (uint32_t i = 0; i < num_values; i++) {
+ const dmt_node &n = get_node(tmp_array[i]);
+ memcpy(&dest[i*fixed_aligned_len], &n.value, fixed_len);
+ }
+ toku_mempool_destroy(&this->mp);
+ this->mp = new_mp;
+ this->is_array = true;
+ this->d.a.num_values = num_values;
+
+ if (malloced) toku_free(tmp_array);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::convert_from_array_to_tree(void) {
+ paranoid_invariant(this->is_array);
+ paranoid_invariant(this->values_same_size);
+
+ //save array-format information to locals
+ const uint32_t num_values = this->d.a.num_values;
+
+ node_offset *tmp_array;
+ bool malloced = false;
+ tmp_array = alloc_temp_node_offsets(num_values);
+ if (!tmp_array) {
+ malloced = true;
+ XMALLOC_N(num_values, tmp_array);
+ }
+
+ struct mempool old_mp = this->mp;
+ size_t mem_needed = num_values * align(this->value_length + __builtin_offsetof(dmt_node, value));
+ toku_mempool_construct(&this->mp, mem_needed);
+
+ for (uint32_t i = 0; i < num_values; i++) {
+ dmtwriter_t writer(this->value_length, get_array_value_internal(&old_mp, i));
+ tmp_array[i] = node_malloc_and_set_value(writer);
+ }
+ this->is_array = false;
+ this->rebuild_subtree_from_offsets(&this->d.t.root, tmp_array, num_values);
+
+ if (malloced) toku_free(tmp_array);
+ toku_mempool_destroy(&old_mp);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::delete_at(const uint32_t idx) {
+ uint32_t n = this->size();
+ if (idx >= n) { return EINVAL; }
+
+ if (n == 1) {
+ this->clear(); //Emptying out the entire dmt.
+ return 0;
+ }
+ if (this->is_array) {
+ this->convert_from_array_to_tree();
+ }
+ paranoid_invariant(!is_array);
+
+ subtree *rebalance_subtree = nullptr;
+ this->delete_internal(&this->d.t.root, idx, nullptr, &rebalance_subtree);
+ if (rebalance_subtree != nullptr) {
+ this->rebalance(rebalance_subtree);
+ }
+ this->maybe_resize_tree(nullptr);
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate(iterate_extra_t *const iterate_extra) const {
+ return this->iterate_on_range<iterate_extra_t, f>(0, this->size(), iterate_extra);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
+ if (right > this->size()) { return EINVAL; }
+ if (left == right) { return 0; }
+ if (this->is_array) {
+ return this->iterate_internal_array<iterate_extra_t, f>(left, right, iterate_extra);
+ }
+ return this->iterate_internal<iterate_extra_t, f>(left, right, this->d.t.root, 0, iterate_extra);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::verify(void) const {
+ uint32_t num_values = this->size();
+ invariant(num_values < UINT32_MAX);
+ size_t pool_used = toku_mempool_get_used_size(&this->mp);
+ size_t pool_size = toku_mempool_get_size(&this->mp);
+ size_t pool_frag = toku_mempool_get_frag_size(&this->mp);
+ invariant(pool_used <= pool_size);
+ if (this->is_array) {
+ invariant(this->values_same_size);
+ invariant(num_values == this->d.a.num_values);
+
+ // We know exactly how much memory should be used.
+ invariant(pool_used == num_values * align(this->value_length));
+
+ // Array form must have 0 fragmentation in mempool.
+ invariant(pool_frag == 0);
+ } else {
+ if (this->values_same_size) {
+ // We know exactly how much memory should be used.
+ invariant(pool_used == num_values * align(this->value_length + __builtin_offsetof(dmt_node, value)));
+ } else {
+ // We can only do a lower bound on memory usage.
+ invariant(pool_used >= num_values * __builtin_offsetof(dmt_node, value));
+ }
+ std::vector<bool> touched(pool_size, false);
+ verify_internal(this->d.t.root, &touched);
+ size_t bytes_used = 0;
+ for (size_t i = 0; i < pool_size; i++) {
+ if (touched.at(i)) {
+ ++bytes_used;
+ }
+ }
+ invariant(bytes_used == pool_used);
+ }
+}
+
+// Verifies all weights are internally consistent.
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::verify_internal(const subtree &subtree, std::vector<bool> *touched) const {
+ if (subtree.is_null()) {
+ return;
+ }
+ const dmt_node &node = get_node(subtree);
+
+ if (this->values_same_size) {
+ invariant(node.value_length == this->value_length);
+ }
+
+ size_t offset = toku_mempool_get_offset_from_pointer_and_base(&this->mp, &node);
+ size_t node_size = align(__builtin_offsetof(dmt_node, value) + node.value_length);
+ invariant(offset <= touched->size());
+ invariant(offset+node_size <= touched->size());
+ invariant(offset % ALIGNMENT == 0);
+ // Mark memory as touched and never allocated to multiple nodes.
+ for (size_t i = offset; i < offset+node_size; ++i) {
+ invariant(!touched->at(i));
+ touched->at(i) = true;
+ }
+
+ const uint32_t leftweight = this->nweight(node.left);
+ const uint32_t rightweight = this->nweight(node.right);
+
+ invariant(leftweight + rightweight + 1 == this->nweight(subtree));
+ verify_internal(node.left, touched);
+ verify_internal(node.right, touched);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_ptr(iterate_extra_t *const iterate_extra) {
+ if (this->is_array) {
+ this->iterate_ptr_internal_array<iterate_extra_t, f>(0, this->size(), iterate_extra);
+ } else {
+ this->iterate_ptr_internal<iterate_extra_t, f>(0, this->size(), this->d.t.root, 0, iterate_extra);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fetch(const uint32_t idx, uint32_t *const value_len, dmtdataout_t *const value) const {
+ if (idx >= this->size()) { return EINVAL; }
+ if (this->is_array) {
+ this->fetch_internal_array(idx, value_len, value);
+ } else {
+ this->fetch_internal(this->d.t.root, idx, value_len, value);
+ }
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_zero(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ uint32_t tmp_index;
+ uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
+ int r;
+ if (this->is_array) {
+ r = this->find_internal_zero_array<dmtcmp_t, h>(extra, value_len, value, child_idxp);
+ }
+ else {
+ r = this->find_internal_zero<dmtcmp_t, h>(this->d.t.root, extra, value_len, value, child_idxp);
+ }
+ return r;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find(const dmtcmp_t &extra, int direction, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ uint32_t tmp_index;
+ uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
+ paranoid_invariant(direction != 0);
+ if (direction < 0) {
+ if (this->is_array) {
+ return this->find_internal_minus_array<dmtcmp_t, h>(extra, value_len, value, child_idxp);
+ } else {
+ return this->find_internal_minus<dmtcmp_t, h>(this->d.t.root, extra, value_len, value, child_idxp);
+ }
+ } else {
+ if (this->is_array) {
+ return this->find_internal_plus_array<dmtcmp_t, h>(extra, value_len, value, child_idxp);
+ } else {
+ return this->find_internal_plus<dmtcmp_t, h>(this->d.t.root, extra, value_len, value, child_idxp);
+ }
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+size_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::memory_size(void) {
+ return (sizeof *this) + toku_mempool_get_size(&this->mp);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+dmt_node_templated<dmtdata_t> & dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_node(const subtree &subtree) const {
+ paranoid_invariant(!subtree.is_null());
+ return get_node(subtree.get_offset());
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+dmt_node_templated<dmtdata_t> & dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_node(const node_offset offset) const {
+ void* ptr = toku_mempool_get_pointer_from_base_and_offset(&this->mp, offset);
+ dmt_node *CAST_FROM_VOIDP(node, ptr);
+ return *node;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_set_value(dmt_node * n, const dmtwriter_t &value) {
+ n->value_length = value.get_size();
+ value.write_to(&n->value);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+node_offset dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_malloc_and_set_value(const dmtwriter_t &value) {
+ size_t val_size = value.get_size();
+ size_t size_to_alloc = __builtin_offsetof(dmt_node, value) + val_size;
+ size_to_alloc = align(size_to_alloc);
+ void* np = toku_mempool_malloc(&this->mp, size_to_alloc);
+ paranoid_invariant_notnull(np);
+ dmt_node *CAST_FROM_VOIDP(n, np);
+ node_set_value(n, value);
+
+ return toku_mempool_get_offset_from_pointer_and_base(&this->mp, np);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_free(const subtree &st) {
+ dmt_node &n = get_node(st);
+ size_t size_to_free = __builtin_offsetof(dmt_node, value) + n.value_length;
+ size_to_free = align(size_to_free);
+ toku_mempool_mfree(&this->mp, &n, size_to_free);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_tree(const dmtwriter_t * value) {
+ const ssize_t curr_capacity = toku_mempool_get_size(&this->mp);
+ const ssize_t curr_free = toku_mempool_get_free_size(&this->mp);
+ const ssize_t curr_used = toku_mempool_get_used_size(&this->mp);
+ ssize_t add_size = 0;
+ if (value) {
+ add_size = __builtin_offsetof(dmt_node, value) + value->get_size();
+ add_size = align(add_size);
+ }
+
+ const ssize_t need_size = curr_used + add_size;
+ paranoid_invariant(need_size <= UINT32_MAX);
+ //TODO(leif) consider different growth rates
+ const ssize_t new_size = 2*need_size;
+ paranoid_invariant(new_size <= UINT32_MAX);
+
+ if ((curr_capacity / 2 >= new_size) || // Way too much allocated
+ (curr_free < add_size)) { // No room in mempool
+ // Copy all memory and reconstruct dmt in new mempool.
+ if (curr_free < add_size && toku_mempool_get_frag_size(&this->mp) == 0) {
+ // TODO(yoni) or TODO(leif) consider doing this not just when frag size is zero, but also when it is a small percentage of the total mempool size
+ // Offsets remain the same in the new mempool so we can just realloc.
+ toku_mempool_realloc_larger(&this->mp, new_size);
+ } else if (!this->d.t.root.is_null()) {
+ struct mempool new_kvspace;
+ toku_mempool_construct(&new_kvspace, new_size);
+
+ const dmt_node &n = get_node(this->d.t.root);
+ node_offset *tmp_array;
+ bool malloced = false;
+ tmp_array = alloc_temp_node_offsets(n.weight);
+ if (!tmp_array) {
+ malloced = true;
+ XMALLOC_N(n.weight, tmp_array);
+ }
+ this->fill_array_with_subtree_offsets(tmp_array, this->d.t.root);
+ for (node_offset i = 0; i < n.weight; i++) {
+ dmt_node &node = get_node(tmp_array[i]);
+ const size_t bytes_to_copy = __builtin_offsetof(dmt_node, value) + node.value_length;
+ const size_t bytes_to_alloc = align(bytes_to_copy);
+ void* newdata = toku_mempool_malloc(&new_kvspace, bytes_to_alloc);
+ memcpy(newdata, &node, bytes_to_copy);
+ tmp_array[i] = toku_mempool_get_offset_from_pointer_and_base(&new_kvspace, newdata);
+ }
+
+ struct mempool old_kvspace = this->mp;
+ this->mp = new_kvspace;
+ this->rebuild_subtree_from_offsets(&this->d.t.root, tmp_array, n.weight);
+ if (malloced) toku_free(tmp_array);
+ toku_mempool_destroy(&old_kvspace);
+ } else {
+ toku_mempool_destroy(&this->mp);
+ toku_mempool_construct(&this->mp, new_size);
+ }
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+bool dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const {
+ if (subtree.is_null()) { return false; }
+ const dmt_node &n = get_node(subtree);
+ // one of the 1's is for the root.
+ // the other is to take ceil(n/2)
+ const uint32_t weight_left = this->nweight(n.left) + leftmod;
+ const uint32_t weight_right = this->nweight(n.right) + rightmod;
+ return ((1+weight_left < (1+1+weight_right)/2)
+ ||
+ (1+weight_right < (1+1+weight_left)/2));
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert_internal(subtree *const subtreep, const dmtwriter_t &value, const uint32_t idx, subtree **const rebalance_subtree) {
+ if (subtreep->is_null()) {
+ paranoid_invariant_zero(idx);
+ const node_offset newoffset = this->node_malloc_and_set_value(value);
+ dmt_node &newnode = get_node(newoffset);
+ newnode.weight = 1;
+ newnode.left.set_to_null();
+ newnode.right.set_to_null();
+ subtreep->set_offset(newoffset);
+ } else {
+ dmt_node &n = get_node(*subtreep);
+ n.weight++;
+ if (idx <= this->nweight(n.left)) {
+ if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 1, 0)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->insert_internal(&n.left, value, idx, rebalance_subtree);
+ } else {
+ if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, 1)) {
+ *rebalance_subtree = subtreep;
+ }
+ const uint32_t sub_index = idx - this->nweight(n.left) - 1;
+ this->insert_internal(&n.right, value, sub_index, rebalance_subtree);
+ }
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::delete_internal(subtree *const subtreep, const uint32_t idx, subtree *const subtree_replace, subtree **const rebalance_subtree) {
+ paranoid_invariant_notnull(subtreep);
+ paranoid_invariant_notnull(rebalance_subtree);
+ paranoid_invariant(!subtreep->is_null());
+ dmt_node &n = get_node(*subtreep);
+ const uint32_t leftweight = this->nweight(n.left);
+ if (idx < leftweight) {
+ n.weight--;
+ if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, -1, 0)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->delete_internal(&n.left, idx, subtree_replace, rebalance_subtree);
+ } else if (idx == leftweight) {
+ // Found the correct index.
+ if (n.left.is_null()) {
+ paranoid_invariant_zero(idx);
+ // Delete n and let parent point to n.right
+ subtree ptr_this = *subtreep;
+ *subtreep = n.right;
+ subtree to_free;
+ if (subtree_replace != nullptr) {
+ // Swap self with the other node. Taking over all responsibility.
+ to_free = *subtree_replace;
+ dmt_node &ancestor = get_node(*subtree_replace);
+ if (*rebalance_subtree == &ancestor.right) {
+ // Take over rebalance responsibility.
+ *rebalance_subtree = &n.right;
+ }
+ n.weight = ancestor.weight;
+ n.left = ancestor.left;
+ n.right = ancestor.right;
+ *subtree_replace = ptr_this;
+ } else {
+ to_free = ptr_this;
+ }
+ this->node_free(to_free);
+ } else if (n.right.is_null()) {
+ // Delete n and let parent point to n.left
+ subtree to_free = *subtreep;
+ *subtreep = n.left;
+ paranoid_invariant(idx>0);
+ paranoid_invariant_null(subtree_replace); // To be recursive, we're looking for index 0. n is index > 0 here.
+ this->node_free(to_free);
+ } else {
+ if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, -1)) {
+ *rebalance_subtree = subtreep;
+ }
+ // don't need to copy up value, it's only used by this
+ // next call, and when that gets to the bottom there
+ // won't be any more recursion
+ n.weight--;
+ this->delete_internal(&n.right, 0, subtreep, rebalance_subtree);
+ }
+ } else {
+ n.weight--;
+ if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, -1)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->delete_internal(&n.right, idx - leftweight - 1, subtree_replace, rebalance_subtree);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_internal_array(const uint32_t left, const uint32_t right,
+ iterate_extra_t *const iterate_extra) const {
+ int r;
+ for (uint32_t i = left; i < right; ++i) {
+ r = f(this->value_length, *get_array_value(i), i, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_ptr_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra) {
+ if (!subtree.is_null()) {
+ dmt_node &n = get_node(subtree);
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root) {
+ this->iterate_ptr_internal<iterate_extra_t, f>(left, right, n.left, idx, iterate_extra);
+ }
+ if (left <= idx_root && idx_root < right) {
+ int r = f(n.value_length, &n.value, idx_root, iterate_extra);
+ lazy_assert_zero(r);
+ }
+ if (idx_root + 1 < right) {
+ this->iterate_ptr_internal<iterate_extra_t, f>(left, right, n.right, idx_root + 1, iterate_extra);
+ }
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_ptr_internal_array(const uint32_t left, const uint32_t right,
+ iterate_extra_t *const iterate_extra) {
+ for (uint32_t i = left; i < right; ++i) {
+ int r = f(this->value_length, get_array_value(i), i, iterate_extra);
+ lazy_assert_zero(r);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const {
+ if (subtree.is_null()) { return 0; }
+ int r;
+ const dmt_node &n = get_node(subtree);
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root) {
+ r = this->iterate_internal<iterate_extra_t, f>(left, right, n.left, idx, iterate_extra);
+ if (r != 0) { return r; }
+ }
+ if (left <= idx_root && idx_root < right) {
+ r = f(n.value_length, n.value, idx_root, iterate_extra);
+ if (r != 0) { return r; }
+ }
+ if (idx_root + 1 < right) {
+ return this->iterate_internal<iterate_extra_t, f>(left, right, n.right, idx_root + 1, iterate_extra);
+ }
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fetch_internal_array(const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const {
+ copyout(value_len, value, this->value_length, get_array_value(i));
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fetch_internal(const subtree &subtree, const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const {
+ dmt_node &n = get_node(subtree);
+ const uint32_t leftweight = this->nweight(n.left);
+ if (i < leftweight) {
+ this->fetch_internal(n.left, i, value_len, value);
+ } else if (i == leftweight) {
+ copyout(value_len, value, &n);
+ } else {
+ this->fetch_internal(n.right, i - leftweight - 1, value_len, value);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fill_array_with_subtree_offsets(node_offset *const array, const subtree &subtree) const {
+ if (!subtree.is_null()) {
+ const dmt_node &tree = get_node(subtree);
+ this->fill_array_with_subtree_offsets(&array[0], tree.left);
+ array[this->nweight(tree.left)] = subtree.get_offset();
+ this->fill_array_with_subtree_offsets(&array[this->nweight(tree.left) + 1], tree.right);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::rebuild_subtree_from_offsets(subtree *const subtree, const node_offset *const offsets, const uint32_t numvalues) {
+ if (numvalues==0) {
+ subtree->set_to_null();
+ } else {
+ uint32_t halfway = numvalues/2;
+ subtree->set_offset(offsets[halfway]);
+ dmt_node &newnode = get_node(offsets[halfway]);
+ newnode.weight = numvalues;
+ // value is already in there.
+ this->rebuild_subtree_from_offsets(&newnode.left, &offsets[0], halfway);
+ this->rebuild_subtree_from_offsets(&newnode.right, &offsets[halfway+1], numvalues-(halfway+1));
+ }
+}
+
+//TODO(leif): Note that this can mess with our memory_footprint calculation (we may touch past what is marked as 'used' in the mempool)
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+node_offset* dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_temp_node_offsets(uint32_t num_offsets) {
+ size_t mem_needed = num_offsets * sizeof(node_offset);
+ size_t mem_free;
+ mem_free = toku_mempool_get_free_size(&this->mp);
+ node_offset* CAST_FROM_VOIDP(tmp, toku_mempool_get_next_free_ptr(&this->mp));
+ if (mem_free >= mem_needed) {
+ return tmp;
+ }
+ return nullptr;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::rebalance(subtree *const subtree) {
+ paranoid_invariant(!subtree->is_null());
+
+ // There is a possible "optimization" here:
+ // if (this->values_same_size && subtree == &this->d.t.root) {
+ // this->convert_from_tree_to_array();
+ // return;
+ // }
+ // but we don't want to do it because it involves actually copying values around
+ // as opposed to stopping in the middle of rebalancing (like in the OMT)
+
+ node_offset offset = subtree->get_offset();
+ const dmt_node &n = get_node(offset);
+ node_offset *tmp_array;
+ bool malloced = false;
+ tmp_array = alloc_temp_node_offsets(n.weight);
+ if (!tmp_array) {
+ malloced = true;
+ XMALLOC_N(n.weight, tmp_array);
+ }
+ this->fill_array_with_subtree_offsets(tmp_array, *subtree);
+ this->rebuild_subtree_from_offsets(subtree, tmp_array, n.weight);
+ if (malloced) toku_free(tmp_array);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t *const out, const dmt_node *const n) {
+ if (outlen) {
+ *outlen = n->value_length;
+ }
+ if (out) {
+ *out = n->value;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t **const out, dmt_node *const n) {
+ if (outlen) {
+ *outlen = n->value_length;
+ }
+ if (out) {
+ *out = &n->value;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t *const out, const uint32_t len, const dmtdata_t *const stored_value_ptr) {
+ if (outlen) {
+ *outlen = len;
+ }
+ if (out) {
+ *out = *stored_value_ptr;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t **const out, const uint32_t len, dmtdata_t *const stored_value_ptr) {
+ if (outlen) {
+ *outlen = len;
+ }
+ if (out) {
+ *out = stored_value_ptr;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_zero_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = 0;
+ uint32_t limit = this->d.a.num_values;
+ uint32_t best_pos = subtree::NODE_NULL;
+ uint32_t best_zero = subtree::NODE_NULL;
+
+ while (min!=limit) {
+ uint32_t mid = (min + limit) / 2;
+ int hv = h(this->value_length, *get_array_value(mid), extra);
+ if (hv<0) {
+ min = mid+1;
+ }
+ else if (hv>0) {
+ best_pos = mid;
+ limit = mid;
+ }
+ else {
+ best_zero = mid;
+ limit = mid;
+ }
+ }
+ if (best_zero!=subtree::NODE_NULL) {
+ //Found a zero
+ copyout(value_len, value, this->value_length, get_array_value(best_zero));
+ *idxp = best_zero;
+ return 0;
+ }
+ if (best_pos!=subtree::NODE_NULL) *idxp = best_pos;
+ else *idxp = this->d.a.num_values;
+ return DB_NOTFOUND;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_zero(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (subtree.is_null()) {
+ *idxp = 0;
+ return DB_NOTFOUND;
+ }
+ dmt_node &n = get_node(subtree);
+ int hv = h(n.value_length, n.value, extra);
+ if (hv<0) {
+ int r = this->find_internal_zero<dmtcmp_t, h>(n.right, extra, value_len, value, idxp);
+ *idxp += this->nweight(n.left)+1;
+ return r;
+ } else if (hv>0) {
+ return this->find_internal_zero<dmtcmp_t, h>(n.left, extra, value_len, value, idxp);
+ } else {
+ int r = this->find_internal_zero<dmtcmp_t, h>(n.left, extra, value_len, value, idxp);
+ if (r==DB_NOTFOUND) {
+ *idxp = this->nweight(n.left);
+ copyout(value_len, value, &n);
+ r = 0;
+ }
+ return r;
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_plus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = 0;
+ uint32_t limit = this->d.a.num_values;
+ uint32_t best = subtree::NODE_NULL;
+
+ while (min != limit) {
+ const uint32_t mid = (min + limit) / 2;
+ const int hv = h(this->value_length, *get_array_value(mid), extra);
+ if (hv > 0) {
+ best = mid;
+ limit = mid;
+ } else {
+ min = mid + 1;
+ }
+ }
+ if (best == subtree::NODE_NULL) { return DB_NOTFOUND; }
+ copyout(value_len, value, this->value_length, get_array_value(best));
+ *idxp = best;
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_plus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (subtree.is_null()) {
+ return DB_NOTFOUND;
+ }
+ dmt_node & n = get_node(subtree);
+ int hv = h(n.value_length, n.value, extra);
+ int r;
+ if (hv > 0) {
+ r = this->find_internal_plus<dmtcmp_t, h>(n.left, extra, value_len, value, idxp);
+ if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n.left);
+ copyout(value_len, value, &n);
+ r = 0;
+ }
+ } else {
+ r = this->find_internal_plus<dmtcmp_t, h>(n.right, extra, value_len, value, idxp);
+ if (r == 0) {
+ *idxp += this->nweight(n.left) + 1;
+ }
+ }
+ return r;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_minus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = 0;
+ uint32_t limit = this->d.a.num_values;
+ uint32_t best = subtree::NODE_NULL;
+
+ while (min != limit) {
+ const uint32_t mid = (min + limit) / 2;
+ const int hv = h(this->value_length, *get_array_value(mid), extra);
+ if (hv < 0) {
+ best = mid;
+ min = mid + 1;
+ } else {
+ limit = mid;
+ }
+ }
+ if (best == subtree::NODE_NULL) { return DB_NOTFOUND; }
+ copyout(value_len, value, this->value_length, get_array_value(best));
+ *idxp = best;
+ return 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_minus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (subtree.is_null()) {
+ return DB_NOTFOUND;
+ }
+ dmt_node & n = get_node(subtree);
+ int hv = h(n.value_length, n.value, extra);
+ if (hv < 0) {
+ int r = this->find_internal_minus<dmtcmp_t, h>(n.right, extra, value_len, value, idxp);
+ if (r == 0) {
+ *idxp += this->nweight(n.left) + 1;
+ } else if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n.left);
+ copyout(value_len, value, &n);
+ r = 0;
+ }
+ return r;
+ } else {
+ return this->find_internal_minus<dmtcmp_t, h>(n.left, extra, value_len, value, idxp);
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_fixed_length(void) const {
+ return this->values_same_size ? this->value_length : 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_fixed_length_alignment_overhead(void) const {
+ return this->values_same_size ? align(this->value_length) - this->value_length : 0;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+bool dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::value_length_is_fixed(void) const {
+ return this->values_same_size;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::serialize_values(uint32_t expected_unpadded_memory, struct wbuf *wb) const {
+ invariant(this->is_array);
+ invariant(this->values_same_size);
+ const uint8_t pad_bytes = get_fixed_length_alignment_overhead();
+ const uint32_t fixed_len = this->value_length;
+ const uint32_t fixed_aligned_len = align(this->value_length);
+ paranoid_invariant(expected_unpadded_memory == this->d.a.num_values * this->value_length);
+ paranoid_invariant(toku_mempool_get_used_size(&this->mp) >=
+ expected_unpadded_memory + pad_bytes * this->d.a.num_values);
+ if (this->d.a.num_values == 0) {
+ // Nothing to serialize
+ } else if (pad_bytes == 0) {
+ // Basically a memcpy
+ wbuf_nocrc_literal_bytes(wb, get_array_value(0), expected_unpadded_memory);
+ } else {
+ uint8_t* const dest = wbuf_nocrc_reserve_literal_bytes(wb, expected_unpadded_memory);
+ const uint8_t* const src = reinterpret_cast<uint8_t*>(get_array_value(0));
+ //TODO(leif) maybe look at vectorization here
+ for (uint32_t i = 0; i < this->d.a.num_values; i++) {
+ memcpy(&dest[i*fixed_len], &src[i*fixed_aligned_len], fixed_len);
+ }
+ }
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::create(uint32_t _max_values, uint32_t _max_value_bytes) {
+ this->max_values = _max_values;
+ this->max_value_bytes = _max_value_bytes;
+ this->temp.create();
+ paranoid_invariant_null(toku_mempool_get_base(&this->temp.mp));
+ this->temp_valid = true;
+ this->sorted_node_offsets = nullptr;
+ // Include enough space for alignment padding
+ size_t initial_space = (ALIGNMENT - 1) * _max_values + _max_value_bytes;
+
+ toku_mempool_construct(&this->temp.mp, initial_space); // Adds 25%
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::append(const dmtwriter_t &value) {
+ paranoid_invariant(this->temp_valid);
+ //NOTE: Always use d.a.num_values for size because we have not yet created root.
+ if (this->temp.values_same_size && (this->temp.d.a.num_values == 0 || value.get_size() == this->temp.value_length)) {
+ temp.insert_at_array_end<false>(value);
+ return;
+ }
+ if (this->temp.is_array) {
+ // Convert to tree format (without weights and linkage)
+ XMALLOC_N(this->max_values, this->sorted_node_offsets);
+
+ // Include enough space for alignment padding
+ size_t mem_needed = (ALIGNMENT - 1 + __builtin_offsetof(dmt_node, value)) * max_values + max_value_bytes;
+ struct mempool old_mp = this->temp.mp;
+
+ const uint32_t num_values = this->temp.d.a.num_values;
+ toku_mempool_construct(&this->temp.mp, mem_needed);
+
+ // Copy over and get node_offsets
+ for (uint32_t i = 0; i < num_values; i++) {
+ dmtwriter_t writer(this->temp.value_length, this->temp.get_array_value_internal(&old_mp, i));
+ this->sorted_node_offsets[i] = this->temp.node_malloc_and_set_value(writer);
+ }
+ this->temp.is_array = false;
+ this->temp.values_same_size = false;
+ this->temp.value_length = 0;
+ toku_mempool_destroy(&old_mp);
+ }
+ paranoid_invariant(!this->temp.is_array);
+ this->sorted_node_offsets[this->temp.d.a.num_values++] = this->temp.node_malloc_and_set_value(value);
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+bool dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::value_length_is_fixed(void) {
+ paranoid_invariant(this->temp_valid);
+ return this->temp.values_same_size;
+}
+
+template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
+void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::build(dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> *dest) {
+ invariant(this->temp_valid);
+ //NOTE: Always use d.a.num_values for size because we have not yet created root.
+ invariant(this->temp.d.a.num_values <= this->max_values);
+ // Memory invariant is taken care of incrementally (during append())
+
+ if (!this->temp.is_array) {
+ invariant_notnull(this->sorted_node_offsets);
+ this->temp.rebuild_subtree_from_offsets(&this->temp.d.t.root, this->sorted_node_offsets, this->temp.d.a.num_values);
+ toku_free(this->sorted_node_offsets);
+ this->sorted_node_offsets = nullptr;
+ }
+ paranoid_invariant_null(this->sorted_node_offsets);
+
+ const size_t used = toku_mempool_get_used_size(&this->temp.mp);
+ const size_t allocated = toku_mempool_get_size(&this->temp.mp);
+ // We want to use no more than (about) the actual used space + 25% overhead for mempool growth.
+ // When we know the elements are fixed-length, we use the better dmt constructor.
+ // In practice, as of Jan 2014, we use the builder in two cases:
+ // - When we know the elements are not fixed-length.
+ // - During upgrade of a pre version 26 basement node.
+ // During upgrade, we will probably wildly overallocate because we don't account for the values that aren't stored in the dmt, so here we want to shrink the mempool.
+ // When we know the elements are not fixed-length, we still know how much memory they occupy in total, modulo alignment, so we want to allow for mempool overhead and worst-case alignment overhead, and not shrink the mempool.
+ const size_t max_allowed = used + (ALIGNMENT-1) * this->temp.size();
+ const size_t max_allowed_with_mempool_overhead = max_allowed + max_allowed / 4;
+ //TODO(leif): get footprint calculation correct (under jemalloc) and add some form of footprint constraint
+ if (allocated > max_allowed_with_mempool_overhead) {
+ // Reallocate smaller mempool to save memory
+ invariant_zero(toku_mempool_get_frag_size(&this->temp.mp));
+ struct mempool new_mp;
+ toku_mempool_construct(&new_mp, used);
+ void * newbase = toku_mempool_malloc(&new_mp, used);
+ invariant_notnull(newbase);
+ memcpy(newbase, toku_mempool_get_base(&this->temp.mp), used);
+ toku_mempool_destroy(&this->temp.mp);
+ this->temp.mp = new_mp;
+ }
+
+ *dest = this->temp;
+ this->temp_valid = false;
+
+}
+} // namespace toku
diff --git a/storage/tokudb/PerconaFT/util/dmt.h b/storage/tokudb/PerconaFT/util/dmt.h
new file mode 100644
index 00000000..99be296d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/dmt.h
@@ -0,0 +1,675 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <vector>
+
+#include "portability/memory.h"
+#include "portability/toku_portability.h"
+#include "portability/toku_race_tools.h"
+#include "portability/toku_stdint.h"
+
+#include "ft/serialize/wbuf.h"
+#include "util/growable_array.h"
+#include "util/mempool.h"
+
+namespace toku {
+typedef uint32_t node_offset;
+
+
+/**
+ * Dynamic Order Maintenance Tree (DMT)
+ *
+ * Maintains a collection of totally ordered values, where each value has weight 1.
+ * A DMT supports variable sized values.
+ * The DMT is a mutable datatype.
+ *
+ * The Abstraction:
+ *
+ * An DMT is a vector of values, $V$, where $|V|$ is the length of the vector.
+ * The vector is numbered from $0$ to $|V|-1$.
+ *
+ * We can create a new DMT, which is the empty vector.
+ *
+ * We can insert a new element $x$ into slot $i$, changing $V$ into $V'$ where
+ * $|V'|=1+|V|$ and
+ *
+ * V'_j = V_j if $j<i$
+ * x if $j=i$
+ * V_{j-1} if $j>i$.
+ *
+ * We can specify $i$ using a kind of function instead of as an integer.
+ * Let $b$ be a function mapping from values to nonzero integers, such that
+ * the signum of $b$ is monotically increasing.
+ * We can specify $i$ as the minimum integer such that $b(V_i)>0$.
+ *
+ * We look up a value using its index, or using a Heaviside function.
+ * For lookups, we allow $b$ to be zero for some values, and again the signum of $b$ must be monotonically increasing.
+ * When lookup up values, we can look up
+ * $V_i$ where $i$ is the minimum integer such that $b(V_i)=0$. (With a special return code if no such value exists.)
+ * (Rationale: Ordinarily we want $i$ to be unique. But for various reasons we want to allow multiple zeros, and we want the smallest $i$ in that case.)
+ * $V_i$ where $i$ is the minimum integer such that $b(V_i)>0$. (Or an indication that no such value exists.)
+ * $V_i$ where $i$ is the maximum integer such that $b(V_i)<0$. (Or an indication that no such value exists.)
+ *
+ * When looking up a value using a Heaviside function, we get the value and its index.
+ *
+ * Performance:
+ * Insertion and deletion should run with $O(\log |V|)$ time and $O(\log |V|)$ calls to the Heaviside function.
+ * The memory required is O(|V|).
+ *
+ * Usage:
+ * The dmt is templated by three parameters:
+ * - dmtdata_t is what will be stored within the dmt. These could be pointers or real data types (ints, structs).
+ * - dmtdataout_t is what will be returned by find and related functions. By default, it is the same as dmtdata_t, but you can set it to (dmtdata_t *).
+ * - dmtwriter_t is a class that effectively handles (de)serialization between the value stored in the dmt and outside the dmt.
+ * To create an dmt which will store "TXNID"s, for example, it is a good idea to typedef the template:
+ * typedef dmt<TXNID, TXNID, txnid_writer_t> txnid_dmt_t;
+ * If you are storing structs (or you want to edit what is stored), you may want to be able to get a pointer to the data actually stored in the dmt (see find_zero). To do this, use the second template parameter:
+ * typedef dmt<struct foo, struct foo *, foo_writer_t> foo_dmt_t;
+ */
+
+namespace dmt_internal {
+
+class subtree {
+private:
+ uint32_t m_index;
+public:
+ // The maximum mempool size for a dmt is 2**32-2
+ static const uint32_t NODE_NULL = UINT32_MAX;
+ inline void set_to_null(void) {
+ m_index = NODE_NULL;
+ }
+
+ inline bool is_null(void) const {
+ return NODE_NULL == this->get_offset();
+ }
+
+ inline node_offset get_offset(void) const {
+ return m_index;
+ }
+
+ inline void set_offset(node_offset index) {
+ paranoid_invariant(index != NODE_NULL);
+ m_index = index;
+ }
+} __attribute__((__packed__,__aligned__(4)));
+
+template<typename dmtdata_t>
+class dmt_node_templated {
+public:
+ uint32_t weight;
+ subtree left;
+ subtree right;
+ uint32_t value_length;
+ dmtdata_t value;
+} __attribute__((__aligned__(4))); //NOTE: we cannot use attribute packed or dmtdata_t will call copy constructors (dmtdata_t might not be packed by default)
+
+}
+
+using namespace toku::dmt_internal;
+
+// Each data type used in a dmt requires a dmt_writer class (allows you to insert/etc with dynamic sized types).
+// A dmt_writer can be thought of a (de)serializer
+// There is no default implementation.
+// A dmtwriter instance handles reading/writing 'dmtdata_t's to/from the dmt.
+// The class must implement the following functions:
+// The size required in a dmt for the dmtdata_t represented:
+// size_t get_size(void) const;
+// Write the dmtdata_t to memory owned by a dmt:
+// void write_to(dmtdata_t *const dest) const;
+// Constructor (others are allowed, but this one is required)
+// dmtwriter(const uint32_t dmtdata_t_len, dmtdata_t *const src)
+
+template<typename dmtdata_t,
+ typename dmtdataout_t,
+ typename dmtwriter_t
+ >
+class dmt {
+private:
+ typedef dmt_node_templated<dmtdata_t> dmt_node;
+
+public:
+ static const uint8_t ALIGNMENT = 4;
+
+ class builder {
+ public:
+ void append(const dmtwriter_t &value);
+
+ // Create a dmt builder to build a dmt that will have at most n_values values and use
+ // at most n_value_bytes bytes in the mempool to store values (not counting node or alignment overhead).
+ void create(uint32_t n_values, uint32_t n_value_bytes);
+
+ bool value_length_is_fixed(void);
+
+ // Constructs a dmt that contains everything that was append()ed to this builder.
+ // Destroys this builder and frees associated memory.
+ void build(dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> *dest);
+ private:
+ uint32_t max_values;
+ uint32_t max_value_bytes;
+ node_offset *sorted_node_offsets;
+ bool temp_valid;
+ dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> temp;
+ };
+
+ /**
+ * Effect: Create an empty DMT.
+ * Performance: constant time.
+ */
+ void create(void);
+
+ /**
+ * Effect: Create a DMT containing values. The number of values is in numvalues.
+ * Each value is of a fixed (at runtime) length.
+ * mem contains the values in packed form (no alignment padding)
+ * Caller retains ownership of mem.
+ * Requires: this has not been created yet
+ * Rationale: Normally to insert N values takes O(N lg N) amortized time.
+ * If the N values are known in advance, are sorted, and
+ * the structure is empty, we can batch insert them much faster.
+ */
+ __attribute__((nonnull))
+ void create_from_sorted_memory_of_fixed_size_elements(
+ const void *mem,
+ const uint32_t numvalues,
+ const uint32_t mem_length,
+ const uint32_t fixed_value_length);
+
+ /**
+ * Effect: Creates a copy of an dmt.
+ * Creates this as the clone.
+ * Each element is copied directly. If they are pointers, the underlying data is not duplicated.
+ * Performance: O(memory) (essentially a memdup)
+ * The underlying structures are memcpy'd. Only the values themselves are copied (shallow copy)
+ */
+ void clone(const dmt &src);
+
+ /**
+ * Effect: Set the tree to be empty.
+ * Note: Will not reallocate or resize any memory.
+ * Note: If this dmt had variable sized elements, it will start tracking again (until it gets values of two different sizes)
+ * Performance: time=O(1)
+ */
+ void clear(void);
+
+ /**
+ * Effect: Destroy an DMT, freeing all its memory.
+ * If the values being stored are pointers, their underlying data is not freed.
+ * Those values may be freed before or after calling ::destroy()
+ * Rationale: Returns no values since free() cannot fail.
+ * Rationale: Does not free the underlying pointers to reduce complexity/maintain abstraction layer
+ * Performance: time=O(1)
+ */
+ void destroy(void);
+
+ /**
+ * Effect: return |this| (number of values stored in this dmt).
+ * Performance: time=O(1)
+ */
+ uint32_t size(void) const;
+
+ /**
+ * Effect: Serialize all values contained in this dmt into a packed form (no alignment padding).
+ * We serialized to wb. expected_unpadded_memory is the size of memory reserved in the wbuf
+ * for serialization. (We assert that serialization requires exactly the expected amount)
+ * Requires:
+ * ::prepare_for_serialize() has been called and no non-const functions have been called since.
+ * This dmt has fixed-length values and is in array form.
+ * Performance:
+ * O(memory)
+ */
+ void serialize_values(uint32_t expected_unpadded_memory, struct wbuf *wb) const;
+
+ /**
+ * Effect: Insert value into the DMT.
+ * If there is some i such that $h(V_i, v)=0$ then returns DB_KEYEXIST.
+ * Otherwise, let i be the minimum value such that $h(V_i, v)>0$.
+ * If no such i exists, then let i be |V|
+ * Then this has the same effect as
+ * insert_at(tree, value, i);
+ * If idx!=NULL then i is stored in *idx
+ * Requires: The signum of h must be monotonically increasing.
+ * Returns:
+ * 0 success
+ * DB_KEYEXIST the key is present (h was equal to zero for some value)
+ * On nonzero return, dmt is unchanged.
+ * Performance: time=O(\log N) amortized.
+ * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
+ */
+ template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)>
+ int insert(const dmtwriter_t &value, const dmtcmp_t &v, uint32_t *const idx);
+
+ /**
+ * Effect: Increases indexes of all items at slot >= idx by 1.
+ * Insert value into the position at idx.
+ * Returns:
+ * 0 success
+ * EINVAL if idx > this->size()
+ * On error, dmt is unchanged.
+ * Performance: time=O(\log N) amortized time.
+ * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
+ */
+ int insert_at(const dmtwriter_t &value, const uint32_t idx);
+
+ /**
+ * Effect: Delete the item in slot idx.
+ * Decreases indexes of all items at slot > idx by 1.
+ * Returns
+ * 0 success
+ * EINVAL if idx>=this->size()
+ * On error, dmt is unchanged.
+ * Rationale: To delete an item, first find its index using find or find_zero, then delete it.
+ * Performance: time=O(\log N) amortized.
+ */
+ int delete_at(const uint32_t idx);
+
+ /**
+ * Effect: Iterate over the values of the dmt, from left to right, calling f on each value.
+ * The first argument passed to f is a ref-to-const of the value stored in the dmt.
+ * The second argument passed to f is the index of the value.
+ * The third argument passed to f is iterate_extra.
+ * The indices run from 0 (inclusive) to this->size() (exclusive).
+ * Requires: f != NULL
+ * Returns:
+ * If f ever returns nonzero, then the iteration stops, and the value returned by f is returned by iterate.
+ * If f always returns zero, then iterate returns 0.
+ * Requires: Don't modify the dmt while running. (E.g., f may not insert or delete values from the dmt.)
+ * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the dmt.
+ * Rationale: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
+ * Rationale: We may at some point use functors, but for now this is a smaller change from the old DMT.
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate(iterate_extra_t *const iterate_extra) const;
+
+ /**
+ * Effect: Iterate over the values of the dmt, from left to right, calling f on each value.
+ * The first argument passed to f is a ref-to-const of the value stored in the dmt.
+ * The second argument passed to f is the index of the value.
+ * The third argument passed to f is iterate_extra.
+ * The indices run from 0 (inclusive) to this->size() (exclusive).
+ * We will iterate only over [left,right)
+ *
+ * Requires: left <= right
+ * Requires: f != NULL
+ * Returns:
+ * EINVAL if right > this->size()
+ * If f ever returns nonzero, then the iteration stops, and the value returned by f is returned by iterate_on_range.
+ * If f always returns zero, then iterate_on_range returns 0.
+ * Requires: Don't modify the dmt while running. (E.g., f may not insert or delete values from the dmt.)
+ * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the dmt.
+ * Rational: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const;
+
+ // Attempt to verify this dmt is well formed. (Crashes/asserts/aborts if not well formed)
+ void verify(void) const;
+
+ /**
+ * Effect: Iterate over the values of the dmt, from left to right, calling f on each value.
+ * The first argument passed to f is a pointer to the value stored in the dmt.
+ * The second argument passed to f is the index of the value.
+ * The third argument passed to f is iterate_extra.
+ * The indices run from 0 (inclusive) to this->size() (exclusive).
+ * Requires: same as for iterate()
+ * Returns: same as for iterate()
+ * Performance: same as for iterate()
+ * Rationale: In general, most iterators should use iterate() since they should not modify the data stored in the dmt. This function is for iterators which need to modify values (for example, free_items).
+ * Rationale: We assume if you are transforming the data in place, you want to do it to everything at once, so there is not yet an iterate_on_range_ptr (but there could be).
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void iterate_ptr(iterate_extra_t *const iterate_extra);
+
+ /**
+ * Effect: Set *value=V_idx
+ * Returns
+ * 0 success
+ * EINVAL if index>=toku_dmt_size(dmt)
+ * On nonzero return, *value is unchanged
+ * Performance: time=O(\log N)
+ */
+ int fetch(const uint32_t idx, uint32_t *const value_size, dmtdataout_t *const value) const;
+
+ /**
+ * Effect: Find the smallest i such that h(V_i, extra)>=0
+ * If there is such an i and h(V_i,extra)==0 then set *idxp=i, set *value = V_i, and return 0.
+ * If there is such an i and h(V_i,extra)>0 then set *idxp=i and return DB_NOTFOUND.
+ * If there is no such i then set *idx=this->size() and return DB_NOTFOUND.
+ * Note: value is of type dmtdataout_t, which may be of type (dmtdata_t) or (dmtdata_t *) but is fixed by the instantiation.
+ * If it is the value type, then the value is copied out (even if the value type is a pointer to something else)
+ * If it is the pointer type, then *value is set to a pointer to the data within the dmt.
+ * This is determined by the type of the dmt as initially declared.
+ * If the dmt is declared as dmt<foo_t>, then foo_t's will be stored and foo_t's will be returned by find and related functions.
+ * If the dmt is declared as dmt<foo_t, foo_t *>, then foo_t's will be stored, and pointers to the stored items will be returned by find and related functions.
+ * Rationale:
+ * Structs too small for malloc should be stored directly in the dmt.
+ * These structs may need to be edited as they exist inside the dmt, so we need a way to get a pointer within the dmt.
+ * Using separate functions for returning pointers and values increases code duplication and reduces type-checking.
+ * That also reduces the ability of the creator of a data structure to give advice to its future users.
+ * Slight overloading in this case seemed to provide a better API and better type checking.
+ */
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_zero(const dmtcmp_t &extra, uint32_t *const value_size, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ /**
+ * Effect:
+ * If direction >0 then find the smallest i such that h(V_i,extra)>0.
+ * If direction <0 then find the largest i such that h(V_i,extra)<0.
+ * (Direction may not be equal to zero.)
+ * If value!=NULL then store V_i in *value
+ * If idxp!=NULL then store i in *idxp.
+ * Requires: The signum of h is monotically increasing.
+ * Returns
+ * 0 success
+ * DB_NOTFOUND no such value is found.
+ * On nonzero return, *value and *idxp are unchanged
+ * Performance: time=O(\log N)
+ * Rationale:
+ * Here's how to use the find function to find various things
+ * Cases for find:
+ * find first value: ( h(v)=+1, direction=+1 )
+ * find last value ( h(v)=-1, direction=-1 )
+ * find first X ( h(v)=(v< x) ? -1 : 1 direction=+1 )
+ * find last X ( h(v)=(v<=x) ? -1 : 1 direction=-1 )
+ * find X or successor to X ( same as find first X. )
+ *
+ * Rationale: To help understand heaviside functions and behavor of find:
+ * There are 7 kinds of heaviside functions.
+ * The signus of the h must be monotonically increasing.
+ * Given a function of the following form, A is the element
+ * returned for direction>0, B is the element returned
+ * for direction<0, C is the element returned for
+ * direction==0 (see find_zero) (with a return of 0), and D is the element
+ * returned for direction==0 (see find_zero) with a return of DB_NOTFOUND.
+ * If any of A, B, or C are not found, then asking for the
+ * associated direction will return DB_NOTFOUND.
+ * See find_zero for more information.
+ *
+ * Let the following represent the signus of the heaviside function.
+ *
+ * -...-
+ * A
+ * D
+ *
+ * +...+
+ * B
+ * D
+ *
+ * 0...0
+ * C
+ *
+ * -...-0...0
+ * AC
+ *
+ * 0...0+...+
+ * C B
+ *
+ * -...-+...+
+ * AB
+ * D
+ *
+ * -...-0...0+...+
+ * AC B
+ */
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find(const dmtcmp_t &extra, int direction, uint32_t *const value_size, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ /**
+ * Effect: Return the size (in bytes) of the dmt, as it resides in main memory.
+ * If the data stored are pointers, don't include the size of what they all point to.
+ * //TODO(leif or yoni): (maybe rename and) return memory footprint instead of allocated size
+ */
+ size_t memory_size(void);
+
+ // Returns whether all values in the dmt are known to be the same size.
+ // Note:
+ // There are no false positives, but false negatives are allowed.
+ // A false negative can happen if this dmt had 2 (or more) different size values,
+ // and then enough were deleted so that all the remaining ones are the same size.
+ // Once that happens, this dmt will never again return true for this function unless/until
+ // ::clear() is called
+ bool value_length_is_fixed(void) const;
+
+
+ // If this dmt is empty, return value is undefined.
+ // else if value_length_is_fixed() then it returns the fixed length.
+ // else returns 0
+ uint32_t get_fixed_length(void) const;
+
+ // Preprocesses the dmt so that serialization can happen quickly.
+ // After this call, serialize_values() can be called but no other mutator function can be called in between.
+ void prepare_for_serialize(void);
+
+private:
+ // Do a bit of verification that subtree and nodes act like packed c structs and do not introduce unnecessary padding for alignment.
+ ENSURE_POD(subtree);
+ static_assert(ALIGNMENT > 0, "ALIGNMENT <= 0");
+ static_assert((ALIGNMENT & (ALIGNMENT - 1)) == 0, "ALIGNMENT not a power of 2");
+ static_assert(sizeof(dmt_node) - sizeof(dmtdata_t) == __builtin_offsetof(dmt_node, value), "value is not last field in node");
+ static_assert(4 * sizeof(uint32_t) == __builtin_offsetof(dmt_node, value), "dmt_node is padded");
+ static_assert(__builtin_offsetof(dmt_node, value) % ALIGNMENT == 0, "dmt_node requires padding for alignment");
+ ENSURE_POD(dmt_node);
+
+ struct dmt_array {
+ uint32_t num_values;
+ };
+
+ struct dmt_tree {
+ subtree root;
+ };
+
+ /*
+ Relationship between values_same_size, d.a.num_values, value_length, is_array:
+ In an empty dmt:
+ is_array is true
+ value_same_size is true
+ value_length is undefined
+ d.a.num_values is 0
+ In a non-empty array dmt:
+ is_array is true
+ values_same_size is true
+ value_length is defined
+ d.a.num_values > 0
+ In a non-empty tree dmt:
+ is_array = false
+ value_same_size is true iff all values have been the same size since the last time the dmt turned into a tree.
+ value_length is defined iff values_same_size is true
+ d.a.num_values is undefined (the memory is used for the tree)
+ Note that in tree form, the dmt keeps track of if all values are the same size until the first time they are not.
+ 'values_same_size' will not become true again (even if we change all values to be the same size)
+ until/unless the dmt becomes empty, at which point it becomes an array again.
+ */
+ bool values_same_size;
+ uint32_t value_length; // valid iff values_same_size is true.
+ struct mempool mp;
+ bool is_array;
+ union {
+ struct dmt_array a;
+ struct dmt_tree t;
+ } d;
+
+ // Returns pad bytes per element (for alignment) or 0 if not fixed length.
+ uint32_t get_fixed_length_alignment_overhead(void) const;
+
+ void verify_internal(const subtree &subtree, std::vector<bool> *touched) const;
+
+ // Retrieves the node for a given subtree.
+ // Requires: !subtree.is_null()
+ dmt_node & get_node(const subtree &subtree) const;
+
+ // Retrieves the node at a given offset in the mempool.
+ dmt_node & get_node(const node_offset offset) const;
+
+ // Returns the weight of a subtree rooted at st.
+ // if st.is_null(), returns 0
+ // Perf: O(1)
+ uint32_t nweight(const subtree &st) const;
+
+ // Allocates space for a node (in the mempool) and uses the dmtwriter to write the value into the node
+ node_offset node_malloc_and_set_value(const dmtwriter_t &value);
+
+ // Uses the dmtwriter to write a value into node n
+ void node_set_value(dmt_node *n, const dmtwriter_t &value);
+
+ // (mempool-)free the memory for a node
+ void node_free(const subtree &st);
+
+ // Effect: Resizes the mempool (holding the array) if necessary to hold one more item of length: this->value_length
+ // Requires:
+ // This dmt is in array form (and thus this->values_same_length)
+ void maybe_resize_array_for_insert(void);
+
+ // Effect: Converts a dmt from array form to tree form.
+ // Perf: O(n)
+ // Note: This does not clear the 'this->values_same_size' bit
+ void convert_to_tree(void);
+
+ // Effect: Resizes the mempool holding a tree if necessary. If value==nullptr then it may shrink if overallocated,
+ // otherwise resize only happens if there is not enough free space for an insert of value
+ void maybe_resize_tree(const dmtwriter_t * value);
+
+ // Returns true if the tree rooted at st would need rebalance after adding
+ // leftmod to the left subtree and rightmod to the right subtree
+ bool will_need_rebalance(const subtree &st, const int leftmod, const int rightmod) const;
+
+ __attribute__((nonnull))
+ void insert_internal(subtree *const subtreep, const dmtwriter_t &value, const uint32_t idx, subtree **const rebalance_subtree);
+
+ template<bool with_resize>
+ int insert_at_array_end(const dmtwriter_t& value_in);
+
+ dmtdata_t * alloc_array_value_end(void);
+
+ dmtdata_t * get_array_value(const uint32_t idx) const;
+
+ dmtdata_t * get_array_value_internal(const struct mempool *mempool, const uint32_t idx) const;
+
+ void convert_from_array_to_tree(void);
+
+ void convert_from_tree_to_array(void);
+
+ void delete_internal(subtree *const subtreep, const uint32_t idx, subtree *const subtree_replace, subtree **const rebalance_subtree);
+
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_internal_array(const uint32_t left, const uint32_t right,
+ iterate_extra_t *const iterate_extra) const;
+
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void iterate_ptr_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra);
+
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void iterate_ptr_internal_array(const uint32_t left, const uint32_t right,
+ iterate_extra_t *const iterate_extra);
+
+ template<typename iterate_extra_t,
+ int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const;
+
+ void fetch_internal_array(const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const;
+
+ void fetch_internal(const subtree &subtree, const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const;
+
+ __attribute__((nonnull))
+ void fill_array_with_subtree_offsets(node_offset *const array, const subtree &subtree) const;
+
+ __attribute__((nonnull))
+ void rebuild_subtree_from_offsets(subtree *const subtree, const node_offset *const offsets, const uint32_t numvalues);
+
+ __attribute__((nonnull))
+ void rebalance(subtree *const subtree);
+
+ static void copyout(uint32_t *const outlen, dmtdata_t *const out, const dmt_node *const n);
+
+ static void copyout(uint32_t *const outlen, dmtdata_t **const out, dmt_node *const n);
+
+ static void copyout(uint32_t *const outlen, dmtdata_t *const out, const uint32_t len, const dmtdata_t *const stored_value_ptr);
+
+ static void copyout(uint32_t *const outlen, dmtdata_t **const out, const uint32_t len, dmtdata_t *const stored_value_ptr);
+
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_internal_zero_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_internal_zero(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_internal_plus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_internal_plus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_internal_minus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename dmtcmp_t,
+ int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
+ int find_internal_minus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const;
+
+ // Allocate memory for an array: node_offset[num_idx] from pre-allocated contiguous free space in the mempool.
+ // If there is not enough space, returns nullptr.
+ node_offset* alloc_temp_node_offsets(uint32_t num_idxs);
+
+ // Returns the aligned size of x.
+ // If x % ALIGNMENT == 0, returns x
+ // o.w. returns x + (ALIGNMENT - (x % ALIGNMENT))
+ uint32_t align(const uint32_t x) const;
+};
+
+} // namespace toku
+
+// include the implementation here
+#include "dmt.cc"
+
diff --git a/storage/tokudb/PerconaFT/util/doubly_linked_list.h b/storage/tokudb/PerconaFT/util/doubly_linked_list.h
new file mode 100644
index 00000000..25ddaaa3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/doubly_linked_list.h
@@ -0,0 +1,174 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+//******************************************************************************
+//
+// Overview: A doubly linked list with elements of type T.
+// Each element that wants to be put into the list provides a
+// LinkedListElement<T> as well as a pointer to the the object of type T.
+// Typically, the user embeds the linked list element into the object itself,
+// for example as
+// struct foo {
+// toku::LinkedListElement<struct foo *> linked_list_elt;
+// ... other elements of foo
+// };
+// then when inserting foo into a list defined as
+// toku::DoublyLinkedList<struct foo *> list_of_foos;
+// you write
+// struct foo f;
+// list_of_foos->insert(&f->linked_list_elt, &f);
+//
+// Operations: Constructor and deconstructors are provided (they don't
+// need to anything but fill in a field) for the DoublyLinkedList.
+// Operations to insert an element and remove it, as well as to pop
+// an element out of the list.
+// Also a LinkedListElement class is provided with a method to get a
+// pointer to the object of type T.
+//******************************************************************************
+
+#include <stdbool.h>
+#include <portability/toku_assert.h>
+
+namespace toku {
+
+template<typename T> class DoublyLinkedList;
+
+template<typename T> class LinkedListElement {
+ friend class DoublyLinkedList<T>;
+ private:
+ T container;
+ LinkedListElement<T> *prev, *next;
+ public:
+ T get_container(void) {
+ return container;
+ }
+};
+
+template<typename T> class DoublyLinkedList {
+ public:
+ void init (void);
+ // Effect: Initialize a doubly linked list (to be empty).
+
+ void insert(LinkedListElement<T> *ll_elt, T container);
+ // Effect: Add an item to a linked list.
+ // Implementation note: Push the item to the head of the list.
+
+ void remove(LinkedListElement<T> *ll_elt);
+ // Effect: Remove an item from a linked list.
+ // Requires: The item is in the list identified by head.
+
+ bool pop(LinkedListElement<T> **ll_eltp);
+ // Effect: if the list is empty, return false.
+ // Otherwise return true and set *ll_eltp to the first item, and remove that item from the list.
+
+ template<typename extra_t> int iterate(int (*fun)(T container, extra_t extra), extra_t extra);
+ // Effect: Call fun(e, extra) on every element of the linked list. If ever fun returns nonzero, then quit early and return that value.
+ // If fun always return zero, then this function returns zero.
+
+ private:
+ LinkedListElement<T> *m_first;
+};
+
+//******************************************************************************
+// DoublyLinkedList implementation starts here.
+//******************************************************************************
+
+#include <stddef.h>
+
+
+
+template<typename T> void DoublyLinkedList<T>::init(void) {
+ m_first = NULL;
+}
+
+template<typename T> void DoublyLinkedList<T>::insert(LinkedListElement<T> *ll_elt, T container) {
+ LinkedListElement<T> *old_first = m_first;
+ ll_elt->container = container;
+ ll_elt->next = old_first;
+ ll_elt->prev = NULL;
+ if (old_first!=NULL) {
+ old_first->prev = ll_elt;
+ }
+ m_first = ll_elt;
+}
+
+template<typename T> void DoublyLinkedList<T>::remove(LinkedListElement<T> *ll_elt) {
+ LinkedListElement<T> *old_prev = ll_elt->prev;
+ LinkedListElement<T> *old_next = ll_elt->next;
+
+ if (old_prev==NULL) {
+ m_first = old_next;
+ } else {
+ old_prev->next = old_next;
+ }
+ if (old_next==NULL) {
+ /* nothing */
+ } else {
+ old_next->prev = old_prev;
+ }
+}
+
+template<typename T> bool DoublyLinkedList<T>::pop(LinkedListElement<T> **ll_eltp) {
+ LinkedListElement<T> *first = m_first;
+ if (first) {
+ invariant(first->prev==NULL);
+ m_first = first->next;
+ if (first->next) {
+ first->next->prev = NULL;
+ }
+ first->next=NULL;
+ *ll_eltp = first;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+template<typename T>
+template<typename extra_t>
+int DoublyLinkedList<T>::iterate(int (*fun)(T container, extra_t extra), extra_t extra) {
+ for (LinkedListElement<T> *le = m_first; le; le=le->next) {
+ int r = fun(le->container, extra);
+ if (r!=0) return r;
+ }
+ return 0;
+}
+
+}
diff --git a/storage/tokudb/PerconaFT/util/fmutex.h b/storage/tokudb/PerconaFT/util/fmutex.h
new file mode 100644
index 00000000..fed1bc24
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/fmutex.h
@@ -0,0 +1,146 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+extern toku_instr_key *fmutex_cond_key;
+
+// fair mutex
+struct fmutex {
+ pthread_mutex_t mutex;
+ int mutex_held;
+ int num_want_mutex;
+ struct queue_item *wait_head;
+ struct queue_item *wait_tail;
+};
+
+// item on the queue
+struct queue_item {
+ pthread_cond_t *cond;
+ struct queue_item *next;
+};
+
+static void enq_item(struct fmutex *fm, struct queue_item *const item) {
+ assert(item->next == NULL);
+ if (fm->wait_tail != NULL) {
+ fm->wait_tail->next = item;
+ } else {
+ assert(fm->wait_head == NULL);
+ fm->wait_head = item;
+ }
+ fm->wait_tail = item;
+}
+
+static pthread_cond_t *deq_item(struct fmutex *fm) {
+ assert(fm->wait_head != NULL);
+ assert(fm->wait_tail != NULL);
+ struct queue_item *item = fm->wait_head;
+ fm->wait_head = fm->wait_head->next;
+ if (fm->wait_tail == item) {
+ fm->wait_tail = NULL;
+ }
+ return item->cond;
+}
+
+void fmutex_create(struct fmutex *fm) {
+ pthread_mutex_init(&fm->mutex, NULL);
+ fm->mutex_held = 0;
+ fm->num_want_mutex = 0;
+ fm->wait_head = NULL;
+ fm->wait_tail = NULL;
+}
+
+void fmutex_destroy(struct fmutex *fm) {
+ pthread_mutex_destroy(&fm->mutex);
+}
+
+// Prerequisite: Holds m_mutex.
+void fmutex_lock(struct fmutex *fm) {
+ pthread_mutex_lock(&fm->mutex);
+
+ if (fm->mutex_held == 0 || fm->num_want_mutex == 0) {
+ // No one holds the lock. Grant the write lock.
+ fm->mutex_held = 1;
+ return;
+ }
+
+ pthread_cond_t cond;
+ pthread_cond_init(*fmutex_cond_key, &cond, nullptr);
+ struct queue_item item = {.cond = &cond, .next = NULL};
+ enq_item(fm, &item);
+
+ // Wait for our turn.
+ ++fm->num_want_mutex;
+ pthread_cond_wait(&cond, &fm->mutex);
+ pthread_cond_destroy(&cond);
+
+ // Now it's our turn.
+ assert(fm->num_want_mutex > 0);
+ assert(fm->mutex_held == 0);
+
+ // Not waiting anymore; grab the lock.
+ --fm->num_want_mutex;
+ fm->mutex_held = 1;
+
+ pthread_mutex_unlock();
+}
+
+void fmutex_mutex_unlock(struct fmutex *fm) {
+ pthread_mutex_lock();
+
+ fm->mutex_held = 0;
+ if (fm->wait_head == NULL) {
+ assert(fm->num_want_mutex == 0);
+ return;
+ }
+ assert(fm->num_want_mutex > 0);
+
+ // Grant lock to the next waiter
+ pthread_cond_t *cond = deq_item(fm);
+ pthread_cond_signal(cond);
+
+ pthread_mutex_unlock();
+}
+
+int fmutex_users(struct fmutex *fm) const {
+ return fm->mutex_held + fm->num_want_mutex;
+}
+
+int fmutex_blocked_users(struct fmutex *fm) const {
+ return fm->num_want_mutex;
+}
diff --git a/storage/tokudb/PerconaFT/util/frwlock.cc b/storage/tokudb/PerconaFT/util/frwlock.cc
new file mode 100644
index 00000000..1f821fe5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/frwlock.cc
@@ -0,0 +1,351 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_assert.h>
+
+#include <util/context.h>
+#include <util/frwlock.h>
+
+toku_instr_key *frwlock_m_wait_read_key;
+
+namespace toku {
+
+ static __thread int thread_local_tid = -1;
+ static int get_local_tid() {
+ if (thread_local_tid == -1) {
+ thread_local_tid = toku_os_gettid();
+ }
+ return thread_local_tid;
+ }
+
+ void frwlock::init(toku_mutex_t *const mutex
+#if defined(TOKU_MYSQL_WITH_PFS)
+ ,
+ const toku_instr_key &rwlock_instr_key
+#endif
+ ) {
+ m_mutex = mutex;
+
+ m_num_readers = 0;
+ m_num_writers = 0;
+ m_num_want_write = 0;
+ m_num_want_read = 0;
+ m_num_signaled_readers = 0;
+ m_num_expensive_want_write = 0;
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_pthread_rwlock_init(rwlock_instr_key, &m_rwlock, nullptr);
+#endif
+ toku_cond_init(toku_uninstrumented, &m_wait_read, nullptr);
+ m_queue_item_read.cond = &m_wait_read;
+ m_queue_item_read.next = nullptr;
+ m_wait_read_is_in_queue = false;
+ m_current_writer_expensive = false;
+ m_read_wait_expensive = false;
+ m_current_writer_tid = -1;
+ m_blocking_writer_context_id = CTX_INVALID;
+
+ m_wait_head = nullptr;
+ m_wait_tail = nullptr;
+ }
+
+ void frwlock::deinit(void) {
+ toku_cond_destroy(&m_wait_read);
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_pthread_rwlock_destroy(&m_rwlock);
+#endif
+ }
+
+ bool frwlock::queue_is_empty(void) const { return m_wait_head == nullptr; }
+
+ void frwlock::enq_item(queue_item *const item) {
+ paranoid_invariant_null(item->next);
+ if (m_wait_tail != nullptr) {
+ m_wait_tail->next = item;
+ } else {
+ paranoid_invariant_null(m_wait_head);
+ m_wait_head = item;
+ }
+ m_wait_tail = item;
+ }
+
+ toku_cond_t *frwlock::deq_item(void) {
+ paranoid_invariant_notnull(m_wait_head);
+ paranoid_invariant_notnull(m_wait_tail);
+ queue_item *item = m_wait_head;
+ m_wait_head = m_wait_head->next;
+ if (m_wait_tail == item) {
+ m_wait_tail = nullptr;
+ }
+ return item->cond;
+ }
+
+ // Prerequisite: Holds m_mutex.
+ void frwlock::write_lock(bool expensive) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ /* Instrumentation start */
+ toku_rwlock_instrumentation rwlock_instr;
+ toku_instr_rwlock_wrlock_wait_start(
+ rwlock_instr, m_rwlock, __FILE__, __LINE__);
+#endif
+
+ toku_mutex_assert_locked(m_mutex);
+ if (this->try_write_lock(expensive)) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ /* Instrumentation end */
+ toku_instr_rwlock_wrlock_wait_end(rwlock_instr, 0);
+#endif
+ return;
+ }
+
+ toku_cond_t cond = TOKU_COND_INITIALIZER;
+ queue_item item = {.cond = &cond, .next = nullptr};
+ this->enq_item(&item);
+
+ // Wait for our turn.
+ ++m_num_want_write;
+ if (expensive) {
+ ++m_num_expensive_want_write;
+ }
+ if (m_num_writers == 0 && m_num_want_write == 1) {
+ // We are the first to want a write lock. No new readers can get the
+ // lock.
+ // Set our thread id and context for proper instrumentation.
+ // see: toku_context_note_frwlock_contention()
+ m_current_writer_tid = get_local_tid();
+ m_blocking_writer_context_id = toku_thread_get_context()->get_id();
+ }
+ toku_cond_wait(&cond, m_mutex);
+ toku_cond_destroy(&cond);
+
+ // Now it's our turn.
+ paranoid_invariant(m_num_want_write > 0);
+ paranoid_invariant_zero(m_num_readers);
+ paranoid_invariant_zero(m_num_writers);
+ paranoid_invariant_zero(m_num_signaled_readers);
+
+ // Not waiting anymore; grab the lock.
+ --m_num_want_write;
+ if (expensive) {
+ --m_num_expensive_want_write;
+ }
+ m_num_writers = 1;
+ m_current_writer_expensive = expensive;
+ m_current_writer_tid = get_local_tid();
+ m_blocking_writer_context_id = toku_thread_get_context()->get_id();
+
+#if defined(TOKU_MYSQL_WITH_PFS)
+ /* Instrumentation end */
+ toku_instr_rwlock_wrlock_wait_end(rwlock_instr, 0);
+#endif
+ }
+
+ bool frwlock::try_write_lock(bool expensive) {
+ toku_mutex_assert_locked(m_mutex);
+ if (m_num_readers > 0 || m_num_writers > 0 ||
+ m_num_signaled_readers > 0 || m_num_want_write > 0) {
+ return false;
+ }
+ // No one holds the lock. Grant the write lock.
+ paranoid_invariant_zero(m_num_want_write);
+ paranoid_invariant_zero(m_num_want_read);
+ m_num_writers = 1;
+ m_current_writer_expensive = expensive;
+ m_current_writer_tid = get_local_tid();
+ m_blocking_writer_context_id = toku_thread_get_context()->get_id();
+ return true;
+ }
+
+ void frwlock::read_lock(void) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ /* Instrumentation start */
+ toku_rwlock_instrumentation rwlock_instr;
+ toku_instr_rwlock_rdlock_wait_start(
+ rwlock_instr, m_rwlock, __FILE__, __LINE__);
+#endif
+ toku_mutex_assert_locked(m_mutex);
+ if (m_num_writers > 0 || m_num_want_write > 0) {
+ if (!m_wait_read_is_in_queue) {
+ // Throw the read cond_t onto the queue.
+ paranoid_invariant(m_num_signaled_readers == m_num_want_read);
+ m_queue_item_read.next = nullptr;
+ this->enq_item(&m_queue_item_read);
+ m_wait_read_is_in_queue = true;
+ paranoid_invariant(!m_read_wait_expensive);
+ m_read_wait_expensive = (m_current_writer_expensive ||
+ (m_num_expensive_want_write > 0));
+ }
+
+ // Note this contention event in engine status.
+ toku_context_note_frwlock_contention(
+ toku_thread_get_context()->get_id(),
+ m_blocking_writer_context_id);
+
+ // Wait for our turn.
+ ++m_num_want_read;
+ toku_cond_wait(&m_wait_read, m_mutex);
+
+ // Now it's our turn.
+ paranoid_invariant_zero(m_num_writers);
+ paranoid_invariant(m_num_want_read > 0);
+ paranoid_invariant(m_num_signaled_readers > 0);
+
+ // Not waiting anymore; grab the lock.
+ --m_num_want_read;
+ --m_num_signaled_readers;
+ }
+ ++m_num_readers;
+#if defined(TOKU_MYSQL_WITH_PFS)
+ /* Instrumentation end */
+ toku_instr_rwlock_rdlock_wait_end(rwlock_instr, 0);
+#endif
+ }
+
+ bool frwlock::try_read_lock(void) {
+ toku_mutex_assert_locked(m_mutex);
+ if (m_num_writers > 0 || m_num_want_write > 0) {
+ return false;
+ }
+ // No writer holds the lock.
+ // No writers are waiting.
+ // Grant the read lock.
+ ++m_num_readers;
+ return true;
+ }
+
+ void frwlock::maybe_signal_next_writer(void) {
+ if (m_num_want_write > 0 && m_num_signaled_readers == 0 &&
+ m_num_readers == 0) {
+ toku_cond_t *cond = this->deq_item();
+ paranoid_invariant(cond != &m_wait_read);
+ // Grant write lock to waiting writer.
+ paranoid_invariant(m_num_want_write > 0);
+ toku_cond_signal(cond);
+ }
+ }
+
+ void frwlock::read_unlock(void) {
+#ifdef TOKU_MYSQL_WITH_PFS
+ toku_instr_rwlock_unlock(m_rwlock);
+#endif
+ toku_mutex_assert_locked(m_mutex);
+ paranoid_invariant(m_num_writers == 0);
+ paranoid_invariant(m_num_readers > 0);
+ --m_num_readers;
+ this->maybe_signal_next_writer();
+ }
+
+ bool frwlock::read_lock_is_expensive(void) {
+ toku_mutex_assert_locked(m_mutex);
+ if (m_wait_read_is_in_queue) {
+ return m_read_wait_expensive;
+ } else {
+ return m_current_writer_expensive ||
+ (m_num_expensive_want_write > 0);
+ }
+ }
+
+ void frwlock::maybe_signal_or_broadcast_next(void) {
+ paranoid_invariant(m_num_signaled_readers == 0);
+
+ if (this->queue_is_empty()) {
+ paranoid_invariant(m_num_want_write == 0);
+ paranoid_invariant(m_num_want_read == 0);
+ return;
+ }
+ toku_cond_t *cond = this->deq_item();
+ if (cond == &m_wait_read) {
+ // Grant read locks to all waiting readers
+ paranoid_invariant(m_wait_read_is_in_queue);
+ paranoid_invariant(m_num_want_read > 0);
+ m_num_signaled_readers = m_num_want_read;
+ m_wait_read_is_in_queue = false;
+ m_read_wait_expensive = false;
+ toku_cond_broadcast(cond);
+ } else {
+ // Grant write lock to waiting writer.
+ paranoid_invariant(m_num_want_write > 0);
+ toku_cond_signal(cond);
+ }
+ }
+
+ void frwlock::write_unlock(void) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_instr_rwlock_unlock(m_rwlock);
+#endif
+ toku_mutex_assert_locked(m_mutex);
+ paranoid_invariant(m_num_writers == 1);
+ m_num_writers = 0;
+ m_current_writer_expensive = false;
+ m_current_writer_tid = -1;
+ m_blocking_writer_context_id = CTX_INVALID;
+ this->maybe_signal_or_broadcast_next();
+ }
+ bool frwlock::write_lock_is_expensive(void) {
+ toku_mutex_assert_locked(m_mutex);
+ return (m_num_expensive_want_write > 0) || (m_current_writer_expensive);
+ }
+
+ uint32_t frwlock::users(void) const {
+ toku_mutex_assert_locked(m_mutex);
+ return m_num_readers + m_num_writers + m_num_want_read +
+ m_num_want_write;
+ }
+ uint32_t frwlock::blocked_users(void) const {
+ toku_mutex_assert_locked(m_mutex);
+ return m_num_want_read + m_num_want_write;
+ }
+ uint32_t frwlock::writers(void) const {
+ // this is sometimes called as "assert(lock->writers())" when we
+ // assume we have the write lock. if that's the assumption, we may
+ // not own the mutex, so we don't assert_locked here
+ return m_num_writers;
+ }
+ uint32_t frwlock::blocked_writers(void) const {
+ toku_mutex_assert_locked(m_mutex);
+ return m_num_want_write;
+ }
+ uint32_t frwlock::readers(void) const {
+ toku_mutex_assert_locked(m_mutex);
+ return m_num_readers;
+ }
+ uint32_t frwlock::blocked_readers(void) const {
+ toku_mutex_assert_locked(m_mutex);
+ return m_num_want_read;
+ }
+
+} // namespace toku
diff --git a/storage/tokudb/PerconaFT/util/frwlock.h b/storage/tokudb/PerconaFT/util/frwlock.h
new file mode 100644
index 00000000..b02d95e5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/frwlock.h
@@ -0,0 +1,131 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_portability.h>
+#include <toku_pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <util/context.h>
+
+//TODO: update comment, this is from rwlock.h
+
+namespace toku {
+
+ class frwlock {
+ public:
+ void init(toku_mutex_t *const mutex
+#if defined(TOKU_MYSQL_WITH_PFS)
+ ,
+ const toku_instr_key &rwlock_instr_key
+#endif
+ );
+ void deinit(void);
+
+ void write_lock(bool expensive);
+ bool try_write_lock(bool expensive);
+ void write_unlock(void);
+ // returns true if acquiring a write lock will be expensive
+ bool write_lock_is_expensive(void);
+
+ void read_lock(void);
+ bool try_read_lock(void);
+ void read_unlock(void);
+ // returns true if acquiring a read lock will be expensive
+ bool read_lock_is_expensive(void);
+
+ uint32_t users(void) const;
+ uint32_t blocked_users(void) const;
+ uint32_t writers(void) const;
+ uint32_t blocked_writers(void) const;
+ uint32_t readers(void) const;
+ uint32_t blocked_readers(void) const;
+
+ private:
+ struct queue_item {
+ toku_cond_t *cond;
+ struct queue_item *next;
+ };
+
+ bool queue_is_empty(void) const;
+ void enq_item(queue_item *const item);
+ toku_cond_t *deq_item(void);
+ void maybe_signal_or_broadcast_next(void);
+ void maybe_signal_next_writer(void);
+
+ toku_mutex_t *m_mutex;
+
+ uint32_t m_num_readers;
+ uint32_t m_num_writers;
+ uint32_t m_num_want_write;
+ uint32_t m_num_want_read;
+ uint32_t m_num_signaled_readers;
+ // number of writers waiting that are expensive
+ // MUST be < m_num_want_write
+ uint32_t m_num_expensive_want_write;
+ // bool that states if the current writer is expensive
+ // if there is no current writer, then is false
+ bool m_current_writer_expensive;
+ // bool that states if waiting for a read
+ // is expensive
+ // if there are currently no waiting readers, then set to false
+ bool m_read_wait_expensive;
+ // thread-id of the current writer
+ int m_current_writer_tid;
+ // context id describing the context of the current writer blocking
+ // new readers (either because this writer holds the write lock or
+ // is the first to want the write lock).
+ context_id m_blocking_writer_context_id;
+ queue_item m_queue_item_read;
+ bool m_wait_read_is_in_queue;
+
+ toku_cond_t m_wait_read;
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_pthread_rwlock_t m_rwlock;
+#endif
+ queue_item *m_wait_head;
+ queue_item *m_wait_tail;
+ };
+
+ ENSURE_POD(frwlock);
+
+} // namespace toku
+
+// include the implementation here
+// #include "frwlock.cc"
diff --git a/storage/tokudb/PerconaFT/util/growable_array.h b/storage/tokudb/PerconaFT/util/growable_array.h
new file mode 100644
index 00000000..bc9e67af
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/growable_array.h
@@ -0,0 +1,138 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <memory.h>
+
+//******************************************************************************
+//
+// Overview: A growable array is a little bit like std::vector except that
+// it doesn't have constructors (hence can be used in static constructs, since
+// the google style guide says no constructors), and it's a little simpler.
+// Operations:
+// init and deinit (we don't have constructors and destructors).
+// fetch_unchecked to get values out.
+// store_unchecked to put values in.
+// push to add an element at the end
+// get_size to find out the size
+// get_memory_size to find out how much memory the data stucture is using.
+//
+//******************************************************************************
+
+namespace toku {
+
+template<typename T> class GrowableArray {
+ public:
+ void init (void)
+ // Effect: Initialize the array to contain no elements.
+ {
+ m_array=NULL;
+ m_size=0;
+ m_size_limit=0;
+ }
+
+ void deinit (void)
+ // Effect: Deinitialize the array (freeing any memory it uses, for example).
+ {
+ toku_free(m_array);
+ m_array =NULL;
+ m_size =0;
+ m_size_limit=0;
+ }
+
+ T fetch_unchecked (size_t i) const
+ // Effect: Fetch the ith element. If i is out of range, the system asserts.
+ {
+ return m_array[i];
+ }
+
+ void store_unchecked (size_t i, T v)
+ // Effect: Store v in the ith element. If i is out of range, the system asserts.
+ {
+ paranoid_invariant(i<m_size);
+ m_array[i]=v;
+ }
+
+ void push (T v)
+ // Effect: Add v to the end of the array (increasing the size). The amortized cost of this operation is constant.
+ // Implementation hint: Double the size of the array when it gets too big so that the amortized cost stays constant.
+ {
+ if (m_size>=m_size_limit) {
+ if (m_array==NULL) {
+ m_size_limit=1;
+ } else {
+ m_size_limit*=2;
+ }
+ XREALLOC_N(m_size_limit, m_array);
+ }
+ m_array[m_size++]=v;
+ }
+
+ size_t get_size (void) const
+ // Effect: Return the number of elements in the array.
+ {
+ return m_size;
+ }
+ size_t memory_size(void) const
+ // Effect: Return the size (in bytes) that the array occupies in memory. This is really only an estimate.
+ {
+ return sizeof(*this)+sizeof(T)*m_size_limit;
+ }
+
+ private:
+ T *m_array;
+ size_t m_size;
+ size_t m_size_limit; // How much space is allocated in array.
+};
+
+}
diff --git a/storage/tokudb/PerconaFT/util/kibbutz.cc b/storage/tokudb/PerconaFT/util/kibbutz.cc
new file mode 100644
index 00000000..409bf6bd
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/kibbutz.cc
@@ -0,0 +1,242 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <memory.h>
+
+#include <portability/toku_config.h>
+#include <portability/toku_time.h>
+#include <toku_pthread.h>
+
+#include "kibbutz.h"
+
+// A Kibbutz is a collection of workers and some work to do.
+struct todo {
+ void (*f)(void *extra);
+ void *extra;
+ struct todo *next;
+ struct todo *prev;
+};
+
+struct kid {
+ struct kibbutz *k;
+};
+
+struct kibbutz {
+ toku_mutex_t mutex;
+ toku_cond_t cond;
+ bool please_shutdown;
+ struct todo *head, *tail; // head is the next thing to do.
+ int n_workers;
+ pthread_t *workers; // an array of n_workers
+ struct kid *ids; // pass this in when creating a worker so it knows who it is.
+
+ uint64_t threads_active;
+ uint64_t queue_size;
+ uint64_t max_queue_size;
+ uint64_t total_items_processed;
+ uint64_t total_execution_time;
+};
+
+static void *work_on_kibbutz(void *);
+
+toku_instr_key *kibbutz_mutex_key;
+toku_instr_key *kibbutz_k_cond_key;
+toku_instr_key *kibbutz_thread_key;
+
+int toku_kibbutz_create(int n_workers, KIBBUTZ *kb_ret) {
+ int r = 0;
+ *kb_ret = NULL;
+ KIBBUTZ XCALLOC(k);
+ toku_mutex_init(*kibbutz_mutex_key, &k->mutex, nullptr);
+ toku_cond_init(*kibbutz_k_cond_key, &k->cond, nullptr);
+ k->please_shutdown = false;
+ k->head = NULL;
+ k->tail = NULL;
+ k->n_workers = n_workers;
+ k->threads_active = 0;
+ k->queue_size = 0;
+ k->max_queue_size = 0;
+ k->total_items_processed = 0;
+ k->total_execution_time = 0;
+ XMALLOC_N(n_workers, k->workers);
+ XMALLOC_N(n_workers, k->ids);
+ for (int i = 0; i < n_workers; i++) {
+ k->ids[i].k = k;
+ r = toku_pthread_create(*kibbutz_thread_key,
+ &k->workers[i],
+ nullptr,
+ work_on_kibbutz,
+ &k->ids[i]);
+ if (r != 0) {
+ k->n_workers = i;
+ toku_kibbutz_destroy(k);
+ break;
+ }
+ }
+ if (r == 0) {
+ *kb_ret = k;
+ }
+ return r;
+}
+
+static void klock (KIBBUTZ k) {
+ toku_mutex_lock(&k->mutex);
+}
+static void kunlock (KIBBUTZ k) {
+ toku_mutex_unlock(&k->mutex);
+}
+static void kwait (KIBBUTZ k) {
+ toku_cond_wait(&k->cond, &k->mutex);
+}
+static void ksignal (KIBBUTZ k) {
+ toku_cond_signal(&k->cond);
+}
+
+//
+// pops the tail of the kibbutz off the list and works on it
+// Note that in toku_kibbutz_enq, items are enqueued at the head,
+// making the work be done in FIFO order. This is necessary
+// to avoid deadlocks in flusher threads.
+//
+static void *work_on_kibbutz (void *kidv) {
+ struct kid *CAST_FROM_VOIDP(kid, kidv);
+ KIBBUTZ k = kid->k;
+ klock(k);
+ while (1) {
+ while (k->tail) {
+ struct todo *item = k->tail;
+ k->tail = item->prev;
+ toku_sync_sub_and_fetch(&k->queue_size, 1);
+ if (k->tail==NULL) {
+ k->head=NULL;
+ } else {
+ // if there are other things to do, then wake up the next guy, if there is one.
+ ksignal(k);
+ }
+ kunlock(k);
+ toku_sync_add_and_fetch(&k->threads_active, 1);
+ uint64_t starttime = toku_current_time_microsec();
+ item->f(item->extra);
+ uint64_t duration = toku_current_time_microsec() - starttime;
+ toku_sync_add_and_fetch(&k->total_execution_time, duration);
+ toku_sync_add_and_fetch(&k->total_items_processed, 1);
+ toku_sync_sub_and_fetch(&k->threads_active, 1);
+ toku_free(item);
+ klock(k);
+ // if there's another item on k->head, then we'll just go grab it now, without waiting for a signal.
+ }
+ if (k->please_shutdown) {
+ // Don't follow this unless the work is all done, so that when we
+ // set please_shutdown, all the work finishes before any threads
+ // quit.
+ ksignal(k); // must wake up anyone else who is waiting, so they can
+ // shut down.
+ kunlock(k);
+ toku_instr_delete_current_thread();
+ return nullptr;
+ }
+ // There is no work to do and it's not time to shutdown, so wait.
+ kwait(k);
+ }
+}
+
+//
+// adds work to the head of the kibbutz
+// Note that in work_on_kibbutz, items are popped off the tail for work,
+// making the work be done in FIFO order. This is necessary
+// to avoid deadlocks in flusher threads.
+//
+void toku_kibbutz_enq (KIBBUTZ k, void (*f)(void*), void *extra) {
+ struct todo *XMALLOC(td);
+ td->f = f;
+ td->extra = extra;
+ klock(k);
+ assert(!k->please_shutdown);
+ td->next = k->head;
+ td->prev = NULL;
+ if (k->head) {
+ assert(k->head->prev == NULL);
+ k->head->prev = td;
+ }
+ k->head = td;
+ if (k->tail==NULL) k->tail = td;
+
+ uint64_t newsize = toku_sync_add_and_fetch(&k->queue_size, 1);
+ // not exactly precise but we'll live with it
+ if (newsize > k->max_queue_size) k->max_queue_size = k->queue_size;
+
+ ksignal(k);
+ kunlock(k);
+}
+
+void toku_kibbutz_get_status(KIBBUTZ k,
+ uint64_t *num_threads,
+ uint64_t *num_threads_active,
+ uint64_t *queue_size,
+ uint64_t *max_queue_size,
+ uint64_t *total_items_processed,
+ uint64_t *total_execution_time) {
+ *num_threads = k->n_workers;
+ *num_threads_active = k->threads_active;
+ *queue_size = k->queue_size;
+ *max_queue_size = k->max_queue_size;
+ *total_items_processed = k->total_items_processed;
+ *total_execution_time = k->total_execution_time / 1000; // return in ms.
+}
+
+void toku_kibbutz_destroy (KIBBUTZ k)
+// Effect: wait for all the enqueued work to finish, and then destroy the kibbutz.
+// Note: It is an error for to perform kibbutz_enq operations after this is called.
+{
+ klock(k);
+ assert(!k->please_shutdown);
+ k->please_shutdown = true;
+ ksignal(k); // must wake everyone up to tell them to shutdown.
+ kunlock(k);
+ for (int i=0; i<k->n_workers; i++) {
+ void *result;
+ int r = toku_pthread_join(k->workers[i], &result);
+ assert(r==0);
+ assert(result==NULL);
+ }
+ toku_free(k->workers);
+ toku_free(k->ids);
+ toku_cond_destroy(&k->cond);
+ toku_mutex_destroy(&k->mutex);
+ toku_free(k);
+}
diff --git a/storage/tokudb/PerconaFT/util/kibbutz.h b/storage/tokudb/PerconaFT/util/kibbutz.h
new file mode 100644
index 00000000..74cd5a6d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/kibbutz.h
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+//
+// The kibbutz is another threadpool meant to do arbitrary work.
+//
+
+typedef struct kibbutz *KIBBUTZ;
+//
+// create a kibbutz where n_workers is the number of threads in the threadpool
+//
+int toku_kibbutz_create (int n_workers, KIBBUTZ *kb);
+//
+// enqueue a workitem in the kibbutz. When the kibbutz is to work on this workitem,
+// it calls f(extra).
+// At any time, the kibbutz is operating on at most n_workers jobs.
+// Other enqueued workitems are on a queue. An invariant is
+// that no currently enqueued item was placed on the queue before
+// any item that is currently being operated on. Another way to state
+// this is that all items on the queue were placed there before any item
+// that is currently being worked on
+//
+void toku_kibbutz_enq (KIBBUTZ k, void (*f)(void*), void *extra);
+//
+// get kibbuts status
+//
+void toku_kibbutz_get_status(KIBBUTZ k,
+ uint64_t *num_threads,
+ uint64_t *num_threads_active,
+ uint64_t *queue_size,
+ uint64_t *max_queue_size,
+ uint64_t *total_items_processed,
+ uint64_t *total_execution_time);
+//
+// destroys the kibbutz
+//
+void toku_kibbutz_destroy (KIBBUTZ k);
diff --git a/storage/tokudb/PerconaFT/util/memarena.cc b/storage/tokudb/PerconaFT/util/memarena.cc
new file mode 100644
index 00000000..8c054221
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/memarena.cc
@@ -0,0 +1,191 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <algorithm>
+#include <string.h>
+#include <memory.h>
+
+#include <util/memarena.h>
+
+void memarena::create(size_t initial_size) {
+ _current_chunk = arena_chunk();
+ _other_chunks = nullptr;
+ _size_of_other_chunks = 0;
+ _footprint_of_other_chunks = 0;
+ _n_other_chunks = 0;
+
+ _current_chunk.size = initial_size;
+ if (_current_chunk.size > 0) {
+ XMALLOC_N(_current_chunk.size, _current_chunk.buf);
+ }
+}
+
+void memarena::destroy(void) {
+ if (_current_chunk.buf) {
+ toku_free(_current_chunk.buf);
+ }
+ for (int i = 0; i < _n_other_chunks; i++) {
+ toku_free(_other_chunks[i].buf);
+ }
+ if (_other_chunks) {
+ toku_free(_other_chunks);
+ }
+ _current_chunk = arena_chunk();
+ _other_chunks = nullptr;
+ _n_other_chunks = 0;
+}
+
+static size_t round_to_page(size_t size) {
+ const size_t page_size = 4096;
+ const size_t r = page_size + ((size - 1) & ~(page_size - 1));
+ assert((r & (page_size - 1)) == 0); // make sure it's aligned
+ assert(r >= size); // make sure it's not too small
+ assert(r < size + page_size); // make sure we didn't grow by more than a page.
+ return r;
+}
+
+static const size_t MEMARENA_MAX_CHUNK_SIZE = 64 * 1024 * 1024;
+
+void *memarena::malloc_from_arena(size_t size) {
+ if (_current_chunk.buf == nullptr || _current_chunk.size < _current_chunk.used + size) {
+ // The existing block isn't big enough.
+ // Add the block to the vector of blocks.
+ if (_current_chunk.buf) {
+ invariant(_current_chunk.size > 0);
+ int old_n = _n_other_chunks;
+ XREALLOC_N(old_n + 1, _other_chunks);
+ _other_chunks[old_n] = _current_chunk;
+ _n_other_chunks = old_n + 1;
+ _size_of_other_chunks += _current_chunk.size;
+ _footprint_of_other_chunks += toku_memory_footprint(_current_chunk.buf, _current_chunk.used);
+ }
+
+ // Make a new one. Grow the buffer size exponentially until we hit
+ // the max chunk size, but make it at least `size' bytes so the
+ // current allocation always fit.
+ size_t new_size = std::min(MEMARENA_MAX_CHUNK_SIZE, 2 * _current_chunk.size);
+ if (new_size < size) {
+ new_size = size;
+ }
+ new_size = round_to_page(new_size); // at least size, but round to the next page size
+ XMALLOC_N(new_size, _current_chunk.buf);
+ _current_chunk.used = 0;
+ _current_chunk.size = new_size;
+ }
+ invariant(_current_chunk.buf != nullptr);
+
+ // allocate in the existing block.
+ char *p = _current_chunk.buf + _current_chunk.used;
+ _current_chunk.used += size;
+ return p;
+}
+
+void memarena::move_memory(memarena *dest) {
+ // Move memory to dest
+ XREALLOC_N(dest->_n_other_chunks + _n_other_chunks + 1, dest->_other_chunks);
+ dest->_size_of_other_chunks += _size_of_other_chunks + _current_chunk.size;
+ dest->_footprint_of_other_chunks += _footprint_of_other_chunks + toku_memory_footprint(_current_chunk.buf, _current_chunk.used);
+ for (int i = 0; i < _n_other_chunks; i++) {
+ dest->_other_chunks[dest->_n_other_chunks++] = _other_chunks[i];
+ }
+ dest->_other_chunks[dest->_n_other_chunks++] = _current_chunk;
+
+ // Clear out this memarena's memory
+ toku_free(_other_chunks);
+ _current_chunk = arena_chunk();
+ _other_chunks = nullptr;
+ _size_of_other_chunks = 0;
+ _footprint_of_other_chunks = 0;
+ _n_other_chunks = 0;
+}
+
+size_t memarena::total_memory_size(void) const {
+ return sizeof(*this) +
+ total_size_in_use() +
+ _n_other_chunks * sizeof(*_other_chunks);
+}
+
+size_t memarena::total_size_in_use(void) const {
+ return _size_of_other_chunks + _current_chunk.used;
+}
+
+size_t memarena::total_footprint(void) const {
+ return sizeof(*this) +
+ _footprint_of_other_chunks +
+ toku_memory_footprint(_current_chunk.buf, _current_chunk.used) +
+ _n_other_chunks * sizeof(*_other_chunks);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+const void *memarena::chunk_iterator::current(size_t *used) const {
+ if (_chunk_idx < 0) {
+ *used = _ma->_current_chunk.used;
+ return _ma->_current_chunk.buf;
+ } else if (_chunk_idx < _ma->_n_other_chunks) {
+ *used = _ma->_other_chunks[_chunk_idx].used;
+ return _ma->_other_chunks[_chunk_idx].buf;
+ }
+ *used = 0;
+ return nullptr;
+}
+
+void memarena::chunk_iterator::next() {
+ _chunk_idx++;
+}
+
+bool memarena::chunk_iterator::more() const {
+ if (_chunk_idx < 0) {
+ return _ma->_current_chunk.buf != nullptr;
+ }
+ return _chunk_idx < _ma->_n_other_chunks;
+}
diff --git a/storage/tokudb/PerconaFT/util/memarena.h b/storage/tokudb/PerconaFT/util/memarena.h
new file mode 100644
index 00000000..c1de3c94
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/memarena.h
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/*
+ * A memarena is used to efficiently store a collection of objects that never move
+ * The pattern is allocate more and more stuff and free all of the items at once.
+ * The underlying memory will store 1 or more objects per chunk. Each chunk is
+ * contiguously laid out in memory but chunks are not necessarily contiguous with
+ * each other.
+ */
+class memarena {
+public:
+ memarena() :
+ _current_chunk(arena_chunk()),
+ _other_chunks(nullptr),
+ _n_other_chunks(0),
+ _size_of_other_chunks(0),
+ _footprint_of_other_chunks(0) {
+ }
+
+ // Effect: Create a memarena with the specified initial size
+ void create(size_t initial_size);
+
+ void destroy(void);
+
+ // Effect: Allocate some memory. The returned value remains valid until the memarena is cleared or closed.
+ // In case of ENOMEM, aborts.
+ void *malloc_from_arena(size_t size);
+
+ // Effect: Move all the memory from this memarena into DEST.
+ // When SOURCE is closed the memory won't be freed.
+ // When DEST is closed, the memory will be freed, unless DEST moves its memory to another memarena...
+ void move_memory(memarena *dest);
+
+ // Effect: Calculate the amount of memory used by a memory arena.
+ size_t total_memory_size(void) const;
+
+ // Effect: Calculate the used space of the memory arena (ie: excludes unused space)
+ size_t total_size_in_use(void) const;
+
+ // Effect: Calculate the amount of memory used, according to toku_memory_footprint(),
+ // which is a more expensive but more accurate count of memory used.
+ size_t total_footprint(void) const;
+
+ // iterator over the underlying chunks that store objects in the memarena.
+ // a chunk is represented by a pointer to const memory and a usable byte count.
+ class chunk_iterator {
+ public:
+ chunk_iterator(const memarena *ma) :
+ _ma(ma), _chunk_idx(-1) {
+ }
+
+ // returns: base pointer to the current chunk
+ // *used set to the number of usable bytes
+ // if more() is false, returns nullptr and *used = 0
+ const void *current(size_t *used) const;
+
+ // requires: more() is true
+ void next();
+
+ bool more() const;
+
+ private:
+ // -1 represents the 'initial' chunk in a memarena, ie: ma->_current_chunk
+ // >= 0 represents the i'th chunk in the ma->_other_chunks array
+ const memarena *_ma;
+ int _chunk_idx;
+ };
+
+private:
+ struct arena_chunk {
+ arena_chunk() : buf(nullptr), used(0), size(0) { }
+ char *buf;
+ size_t used;
+ size_t size;
+ };
+
+ struct arena_chunk _current_chunk;
+ struct arena_chunk *_other_chunks;
+ int _n_other_chunks;
+ size_t _size_of_other_chunks; // the buf_size of all the other chunks.
+ size_t _footprint_of_other_chunks; // the footprint of all the other chunks.
+
+ friend class memarena_unit_test;
+};
diff --git a/storage/tokudb/PerconaFT/util/mempool.cc b/storage/tokudb/PerconaFT/util/mempool.cc
new file mode 100644
index 00000000..b27be71c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/mempool.cc
@@ -0,0 +1,197 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <string.h>
+#include <memory.h>
+#include <toku_assert.h>
+#include "mempool.h"
+
+/* Contract:
+ * Caller allocates mempool struct as convenient for caller, but memory used for data storage
+ * must be dynamically allocated via toku_malloc().
+ * Caller dynamically allocates memory for mempool and initializes mempool by calling toku_mempool_init().
+ * Once a buffer is assigned to a mempool (via toku_mempool_init()), the mempool owns it and
+ * is responsible for destroying it when the mempool is destroyed.
+ * Caller destroys mempool by calling toku_mempool_destroy().
+ *
+ * Note, toku_mempool_init() does not allocate the memory because sometimes the caller will already have
+ * the memory allocated and will assign the pre-allocated memory to the mempool.
+ */
+
+/* This is a constructor to be used when the memory for the mempool struct has been
+ * allocated by the caller, but no memory has yet been allocatd for the data.
+ */
+void toku_mempool_zero(struct mempool *mp) {
+ // printf("mempool_zero %p\n", mp);
+ memset(mp, 0, sizeof(*mp));
+}
+
+// TODO 4050 this is dirty, try to replace all uses of this
+void toku_mempool_init(struct mempool *mp, void *base, size_t free_offset, size_t size) {
+ // printf("mempool_init %p %p %lu\n", mp, base, size);
+ paranoid_invariant(base != 0);
+ paranoid_invariant(size < (1U<<31)); // used to be assert(size >= 0), but changed to size_t so now let's make sure it's not more than 2GB...
+ paranoid_invariant(free_offset <= size);
+ mp->base = base;
+ mp->size = size;
+ mp->free_offset = free_offset; // address of first available memory
+ mp->frag_size = 0; // byte count of wasted space (formerly used, no longer used or available)
+}
+
+/* allocate memory and construct mempool
+ */
+void toku_mempool_construct(struct mempool *mp, size_t data_size) {
+ if (data_size) {
+ // add 25% slack
+ size_t mp_size = data_size + (data_size / 4);
+ mp->base = toku_xmalloc_aligned(64, mp_size);
+ mp->size = mp_size;
+ mp->free_offset = 0;
+ mp->frag_size = 0;
+ }
+ else {
+ toku_mempool_zero(mp);
+ }
+}
+
+void toku_mempool_reset(struct mempool *mp) {
+ mp->free_offset = 0;
+ mp->frag_size = 0;
+}
+
+void toku_mempool_realloc_larger(struct mempool *mp, size_t data_size) {
+ invariant(data_size >= mp->free_offset);
+
+ size_t mpsize = data_size + (data_size/4); // allow 1/4 room for expansion (would be wasted if read-only)
+ void* newmem = toku_xmalloc_aligned(64, mpsize); // allocate new buffer for mempool
+ memcpy(newmem, mp->base, mp->free_offset); // Copy old info
+ toku_free(mp->base);
+ mp->base = newmem;
+ mp->size = mpsize;
+}
+
+
+void toku_mempool_destroy(struct mempool *mp) {
+ // printf("mempool_destroy %p %p %lu %lu\n", mp, mp->base, mp->size, mp->frag_size);
+ if (mp->base)
+ toku_free(mp->base);
+ toku_mempool_zero(mp);
+}
+
+void *toku_mempool_get_base(const struct mempool *mp) {
+ return mp->base;
+}
+
+void *toku_mempool_get_pointer_from_base_and_offset(const struct mempool *mp, size_t offset) {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(mp->base) + offset);
+}
+
+size_t toku_mempool_get_offset_from_pointer_and_base(const struct mempool *mp, const void* p) {
+ paranoid_invariant(p >= mp->base);
+ return reinterpret_cast<const char*>(p) - reinterpret_cast<const char*>(mp->base);
+}
+
+size_t toku_mempool_get_size(const struct mempool *mp) {
+ return mp->size;
+}
+
+size_t toku_mempool_get_frag_size(const struct mempool *mp) {
+ return mp->frag_size;
+}
+
+size_t toku_mempool_get_used_size(const struct mempool *mp) {
+ return mp->free_offset - mp->frag_size;
+}
+
+void* toku_mempool_get_next_free_ptr(const struct mempool *mp) {
+ return toku_mempool_get_pointer_from_base_and_offset(mp, mp->free_offset);
+}
+
+size_t toku_mempool_get_offset_limit(const struct mempool *mp) {
+ return mp->free_offset;
+}
+
+size_t toku_mempool_get_free_size(const struct mempool *mp) {
+ return mp->size - mp->free_offset;
+}
+
+size_t toku_mempool_get_allocated_size(const struct mempool *mp) {
+ return mp->free_offset;
+}
+
+void *toku_mempool_malloc(struct mempool *mp, size_t size) {
+ paranoid_invariant(size < (1U<<31));
+ paranoid_invariant(mp->size < (1U<<31));
+ paranoid_invariant(mp->free_offset < (1U<<31));
+ paranoid_invariant(mp->free_offset <= mp->size);
+ void *vp;
+ if (mp->free_offset + size > mp->size) {
+ vp = nullptr;
+ } else {
+ vp = reinterpret_cast<char *>(mp->base) + mp->free_offset;
+ mp->free_offset += size;
+ }
+ paranoid_invariant(mp->free_offset <= mp->size);
+ paranoid_invariant(vp == 0 || toku_mempool_inrange(mp, vp, size));
+ return vp;
+}
+
+// if vp is null then we are freeing something, but not specifying what. The data won't be freed until compression is done.
+void toku_mempool_mfree(struct mempool *mp, void *vp, size_t size) {
+ if (vp) { paranoid_invariant(toku_mempool_inrange(mp, vp, size)); }
+ mp->frag_size += size;
+ invariant(mp->frag_size <= mp->free_offset);
+ invariant(mp->frag_size <= mp->size);
+}
+
+
+/* get memory footprint */
+size_t toku_mempool_footprint(struct mempool *mp) {
+ void * base = mp->base;
+ size_t touched = mp->free_offset;
+ size_t rval = toku_memory_footprint(base, touched);
+ return rval;
+}
+
+void toku_mempool_clone(const struct mempool* orig_mp, struct mempool* new_mp) {
+ new_mp->frag_size = orig_mp->frag_size;
+ new_mp->free_offset = orig_mp->free_offset;
+ new_mp->size = orig_mp->free_offset; // only make the cloned mempool store what is needed
+ new_mp->base = toku_xmalloc_aligned(64, new_mp->size);
+ memcpy(new_mp->base, orig_mp->base, new_mp->size);
+}
diff --git a/storage/tokudb/PerconaFT/util/mempool.h b/storage/tokudb/PerconaFT/util/mempool.h
new file mode 100644
index 00000000..feafdc17
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/mempool.h
@@ -0,0 +1,129 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+/* a memory pool is a contiguous region of memory that supports single
+ allocations from the pool. these allocated regions are never recycled.
+ when the memory pool no longer has free space, the allocated chunks
+ must be relocated by the application to a new memory pool. */
+
+#include <stddef.h>
+
+struct mempool;
+
+ // TODO 4050 Hide mempool struct internals from callers
+
+struct mempool {
+ void *base; /* the base address of the memory */
+ size_t free_offset; /* the offset of the memory pool free space */
+ size_t size; /* the size of the memory */
+ size_t frag_size; /* the size of the fragmented memory */
+};
+
+/* This is a constructor to be used when the memory for the mempool struct has been
+ * allocated by the caller, but no memory has yet been allocatd for the data.
+ */
+void toku_mempool_zero(struct mempool *mp);
+
+/* initialize the memory pool with the base address and size of a
+ contiguous chunk of memory */
+void toku_mempool_init(struct mempool *mp, void *base, size_t free_offset, size_t size);
+
+/* allocate memory and construct mempool
+ */
+void toku_mempool_construct(struct mempool *mp, size_t data_size);
+
+/* treat mempool as if it has just been created; ignore any frag and start allocating from beginning again.
+ */
+void toku_mempool_reset(struct mempool *mp);
+
+/* reallocate memory for construct mempool
+ */
+void toku_mempool_realloc_larger(struct mempool *mp, size_t data_size);
+
+/* destroy the memory pool */
+void toku_mempool_destroy(struct mempool *mp);
+
+/* get the base address of the memory pool */
+void *toku_mempool_get_base(const struct mempool *mp);
+
+/* get the a pointer that is offset bytes in front of base of the memory pool */
+void *toku_mempool_get_pointer_from_base_and_offset(const struct mempool *mp, size_t offset);
+
+/* get the offset from base of a pointer */
+size_t toku_mempool_get_offset_from_pointer_and_base(const struct mempool *mp, const void* p);
+
+/* get the a pointer of the first free byte (if any) */
+void* toku_mempool_get_next_free_ptr(const struct mempool *mp);
+
+/* get the limit of valid offsets. (anything later was not allocated) */
+size_t toku_mempool_get_offset_limit(const struct mempool *mp);
+
+/* get the size of the memory pool */
+size_t toku_mempool_get_size(const struct mempool *mp);
+
+/* get the amount of fragmented (wasted) space in the memory pool */
+size_t toku_mempool_get_frag_size(const struct mempool *mp);
+
+/* get the amount of space that is holding useful data */
+size_t toku_mempool_get_used_size(const struct mempool *mp);
+
+/* get the amount of space that is available for new data */
+size_t toku_mempool_get_free_size(const struct mempool *mp);
+
+/* get the amount of space that has been allocated for use (wasted or not) */
+size_t toku_mempool_get_allocated_size(const struct mempool *mp);
+
+/* allocate a chunk of memory from the memory pool */
+void *toku_mempool_malloc(struct mempool *mp, size_t size);
+
+/* free a previously allocated chunk of memory. the free only updates
+ a count of the amount of free space in the memory pool. the memory
+ pool does not keep track of the locations of the free chunks */
+void toku_mempool_mfree(struct mempool *mp, void *vp, size_t size);
+
+/* verify that a memory range is contained within a mempool */
+static inline int toku_mempool_inrange(struct mempool *mp, void *vp, size_t size) {
+ return (mp->base <= vp) && ((char *)vp + size <= (char *)mp->base + mp->size);
+}
+
+/* get memory footprint */
+size_t toku_mempool_footprint(struct mempool *mp);
+
+void toku_mempool_clone(const struct mempool* orig_mp, struct mempool* new_mp);
diff --git a/storage/tokudb/PerconaFT/util/minicron.cc b/storage/tokudb/PerconaFT/util/minicron.cc
new file mode 100644
index 00000000..241e498c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/minicron.cc
@@ -0,0 +1,201 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include <errno.h>
+#include <string.h>
+
+#include "portability/toku_assert.h"
+#include "util/minicron.h"
+
+toku_instr_key *minicron_p_mutex_key;
+toku_instr_key *minicron_p_condvar_key;
+toku_instr_key *minicron_thread_key;
+
+static void toku_gettime(toku_timespec_t *a) {
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ a->tv_sec = tv.tv_sec;
+ a->tv_nsec = tv.tv_usec * 1000LL;
+}
+
+
+static int
+timespec_compare (toku_timespec_t *a, toku_timespec_t *b) {
+ if (a->tv_sec > b->tv_sec) return 1;
+ if (a->tv_sec < b->tv_sec) return -1;
+ if (a->tv_nsec > b->tv_nsec) return 1;
+ if (a->tv_nsec < b->tv_nsec) return -1;
+ return 0;
+}
+
+// Implementation notes:
+// When calling do_shutdown or change_period, the mutex is obtained, the variables in the minicron struct are modified, and
+// the condition variable is signalled. Possibly the minicron thread will miss the signal. To avoid this problem, whenever
+// the minicron thread acquires the mutex, it must check to see what the variables say to do (e.g., should it shut down?).
+
+static void*
+minicron_do (void *pv)
+{
+ struct minicron *CAST_FROM_VOIDP(p, pv);
+ toku_mutex_lock(&p->mutex);
+ while (1) {
+ if (p->do_shutdown) {
+ toku_mutex_unlock(&p->mutex);
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+ }
+ if (p->period_in_ms == 0) {
+ // if we aren't supposed to do it then just do an untimed wait.
+ toku_cond_wait(&p->condvar, &p->mutex);
+ }
+ else if (p->period_in_ms <= 1000) {
+ uint32_t period_in_ms = p->period_in_ms;
+ toku_mutex_unlock(&p->mutex);
+ usleep(period_in_ms * 1000);
+ toku_mutex_lock(&p->mutex);
+ }
+ else {
+ // Recompute the wakeup time every time (instead of once per call to f) in case the period changges.
+ toku_timespec_t wakeup_at = p->time_of_last_call_to_f;
+ wakeup_at.tv_sec += (p->period_in_ms/1000);
+ wakeup_at.tv_nsec += (p->period_in_ms % 1000) * 1000000;
+ toku_timespec_t now;
+ toku_gettime(&now);
+ int compare = timespec_compare(&wakeup_at, &now);
+ // if the time to wakeup has yet to come, then we sleep
+ // otherwise, we continue
+ if (compare > 0) {
+ int r = toku_cond_timedwait(&p->condvar, &p->mutex, &wakeup_at);
+ if (r!=0 && r!=ETIMEDOUT) fprintf(stderr, "%s:%d r=%d (%s)", __FILE__, __LINE__, r, strerror(r));
+ assert(r==0 || r==ETIMEDOUT);
+ }
+ }
+ // Now we woke up, and we should figure out what to do
+ if (p->do_shutdown) {
+ toku_mutex_unlock(&p->mutex);
+ toku_instr_delete_current_thread();
+ return toku_pthread_done(nullptr);
+ }
+ if (p->period_in_ms > 1000) {
+ toku_timespec_t now;
+ toku_gettime(&now);
+ toku_timespec_t time_to_call = p->time_of_last_call_to_f;
+ time_to_call.tv_sec += p->period_in_ms/1000;
+ time_to_call.tv_nsec += (p->period_in_ms % 1000) * 1000000;
+ int compare = timespec_compare(&time_to_call, &now);
+ if (compare <= 0) {
+ toku_gettime(&p->time_of_last_call_to_f); // the measured period includes the time to make the call.
+ toku_mutex_unlock(&p->mutex);
+ int r = p->f(p->arg);
+ assert(r==0);
+ toku_mutex_lock(&p->mutex);
+
+ }
+ }
+ else if (p->period_in_ms != 0) {
+ toku_mutex_unlock(&p->mutex);
+ int r = p->f(p->arg);
+ assert(r==0);
+ toku_mutex_lock(&p->mutex);
+ }
+ }
+}
+
+int
+toku_minicron_setup(struct minicron *p, uint32_t period_in_ms, int(*f)(void *), void *arg)
+{
+ p->f = f;
+ p->arg = arg;
+ toku_gettime(&p->time_of_last_call_to_f);
+ // printf("now=%.6f", p->time_of_last_call_to_f.tv_sec +
+ // p->time_of_last_call_to_f.tv_nsec*1e-9);
+ p->period_in_ms = period_in_ms;
+ p->do_shutdown = false;
+ toku_mutex_init(*minicron_p_mutex_key, &p->mutex, nullptr);
+ toku_cond_init(*minicron_p_condvar_key, &p->condvar, nullptr);
+ return toku_pthread_create(
+ *minicron_thread_key, &p->thread, nullptr, minicron_do, p);
+}
+
+void toku_minicron_change_period(struct minicron *p, uint32_t new_period) {
+ toku_mutex_lock(&p->mutex);
+ p->period_in_ms = new_period;
+ toku_cond_signal(&p->condvar);
+ toku_mutex_unlock(&p->mutex);
+}
+
+/* unlocked function for use by engine status which takes no locks */
+uint32_t
+toku_minicron_get_period_in_seconds_unlocked(struct minicron *p)
+{
+ uint32_t retval = p->period_in_ms/1000;
+ return retval;
+}
+
+/* unlocked function for use by engine status which takes no locks */
+uint32_t
+toku_minicron_get_period_in_ms_unlocked(struct minicron *p)
+{
+ uint32_t retval = p->period_in_ms;
+ return retval;
+}
+
+int
+toku_minicron_shutdown(struct minicron *p) {
+ toku_mutex_lock(&p->mutex);
+ assert(!p->do_shutdown);
+ p->do_shutdown = true;
+ //printf("%s:%d signalling\n", __FILE__, __LINE__);
+ toku_cond_signal(&p->condvar);
+ toku_mutex_unlock(&p->mutex);
+ void *returned_value;
+ //printf("%s:%d joining\n", __FILE__, __LINE__);
+ int r = toku_pthread_join(p->thread, &returned_value);
+ if (r!=0) fprintf(stderr, "%s:%d r=%d (%s)\n", __FILE__, __LINE__, r, strerror(r));
+ assert(r==0); assert(returned_value==0);
+ toku_cond_destroy(&p->condvar);
+ toku_mutex_destroy(&p->mutex);
+ //printf("%s:%d shutdowned\n", __FILE__, __LINE__);
+ return 0;
+}
+
+bool
+toku_minicron_has_been_shutdown(struct minicron *p) {
+ return p->do_shutdown;
+}
diff --git a/storage/tokudb/PerconaFT/util/minicron.h b/storage/tokudb/PerconaFT/util/minicron.h
new file mode 100644
index 00000000..b5b19bb1
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/minicron.h
@@ -0,0 +1,74 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_pthread.h>
+#include <toku_time.h>
+
+// Specification:
+// A minicron is a miniature cron job for executing a job periodically inside a pthread.
+// To create a minicron,
+// 1) allocate a "struct minicron" somewhere.
+// Rationale: This struct can be stored inside another struct (such as the cachetable), avoiding a malloc/free pair.
+// 2) call toku_minicron_setup, specifying a period (in milliseconds), a function, and some arguments.
+// If the period is positive then the function is called periodically (with the period specified)
+// Note: The period is measured from when the previous call to f finishes to when the new call starts.
+// Thus, if the period is 5 minutes, and it takes 8 minutes to run f, then the actual periodicity is 13 minutes.
+// Rationale: If f always takes longer than f to run, then it will get "behind". This module makes getting behind explicit.
+// 3) When finished, call toku_minicron_shutdown.
+// 4) If you want to change the period, then call toku_minicron_change_period. The time since f finished is applied to the new period
+// and the call is rescheduled. (If the time since f finished is more than the new period, then f is called immediately).
+
+struct minicron {
+ toku_pthread_t thread;
+ toku_timespec_t time_of_last_call_to_f;
+ toku_mutex_t mutex;
+ toku_cond_t condvar;
+ int (*f)(void*);
+ void *arg;
+ uint32_t period_in_ms;
+ bool do_shutdown;
+};
+
+int toku_minicron_setup (struct minicron *s, uint32_t period_in_ms, int(*f)(void *), void *arg);
+void toku_minicron_change_period(struct minicron *p, uint32_t new_period);
+uint32_t toku_minicron_get_period_in_seconds_unlocked(struct minicron *p);
+uint32_t toku_minicron_get_period_in_ms_unlocked(struct minicron *p);
+int toku_minicron_shutdown(struct minicron *p);
+bool toku_minicron_has_been_shutdown(struct minicron *p);
diff --git a/storage/tokudb/PerconaFT/util/nb_mutex.h b/storage/tokudb/PerconaFT/util/nb_mutex.h
new file mode 100644
index 00000000..d777961a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/nb_mutex.h
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include "rwlock.h"
+
+//Use case:
+// General purpose non blocking mutex with properties:
+// 1. one writer at a time
+
+// An external mutex must be locked when using these functions. An alternate
+// design would bury a mutex into the nb_mutex itself. While this may
+// increase parallelism at the expense of single thread performance, we
+// are experimenting with a single higher level lock.
+
+extern toku_instr_key *nb_mutex_key;
+
+typedef struct nb_mutex *NB_MUTEX;
+struct nb_mutex {
+ struct st_rwlock lock;
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_mutex_t toku_mutex;
+#endif
+};
+
+#if defined(TOKU_MYSQL_WITH_PFS)
+#define nb_mutex_init(MK, RK, M) \
+ inline_nb_mutex_init(MK, RK, M)
+#else
+#define nb_mutex_init(MK, RK, M) inline_nb_mutex_init(M)
+#endif
+
+// initialize an nb mutex
+inline void inline_nb_mutex_init(
+#if defined(TOKU_MYSQL_WITH_PFS)
+ const toku_instr_key &mutex_instr_key,
+ const toku_instr_key &rwlock_instr_key,
+#endif
+ NB_MUTEX nb_mutex) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_mutex_init(mutex_instr_key, &nb_mutex->toku_mutex, nullptr);
+#endif
+ rwlock_init(rwlock_instr_key, &nb_mutex->lock);
+}
+
+// destroy a read write lock
+inline void nb_mutex_destroy(NB_MUTEX nb_mutex) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_instr_mutex_destroy(nb_mutex->toku_mutex.psi_mutex);
+#endif
+ rwlock_destroy(&nb_mutex->lock);
+}
+
+// obtain a write lock
+// expects: mutex is locked
+inline void nb_mutex_lock(NB_MUTEX nb_mutex, toku_mutex_t *mutex) {
+#ifdef TOKU_MYSQL_WITH_PFS
+ toku_mutex_instrumentation mutex_instr;
+ toku_instr_mutex_lock_start(mutex_instr,
+ *mutex,
+ __FILE__,
+ __LINE__); // TODO: pull these to caller?
+#endif
+ rwlock_write_lock(&nb_mutex->lock, mutex);
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_instr_mutex_lock_end(mutex_instr, 0);
+#endif
+}
+
+// release a write lock
+// expects: mutex is locked
+
+inline void nb_mutex_unlock(NB_MUTEX nb_mutex) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_instr_mutex_unlock(nb_mutex->toku_mutex.psi_mutex);
+#endif
+ rwlock_write_unlock(&nb_mutex->lock);
+}
+
+static inline void nb_mutex_wait_for_users(NB_MUTEX nb_mutex, toku_mutex_t *mutex) {
+ rwlock_wait_for_users(&nb_mutex->lock, mutex);
+}
+
+// returns: the number of writers who are waiting for the lock
+
+static inline int nb_mutex_blocked_writers(NB_MUTEX nb_mutex) {
+ return rwlock_blocked_writers(&nb_mutex->lock);
+}
+
+// returns: the number of writers
+
+static inline int nb_mutex_writers(NB_MUTEX nb_mutex) {
+ return rwlock_writers(&nb_mutex->lock);
+}
+
+// returns: the sum of the number of readers, pending readers,
+// writers, and pending writers
+static inline int nb_mutex_users(NB_MUTEX nb_mutex) {
+ return rwlock_users(&nb_mutex->lock);
+}
diff --git a/storage/tokudb/PerconaFT/util/omt.cc b/storage/tokudb/PerconaFT/util/omt.cc
new file mode 100644
index 00000000..44da9847
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/omt.cc
@@ -0,0 +1,1388 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <db.h>
+#include <string.h>
+
+#include <portability/memory.h>
+
+namespace toku {
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create(void) {
+ this->create_internal(2);
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_no_array(void) {
+ if (!supports_marks) {
+ this->create_internal_no_array(0);
+ } else {
+ this->is_array = false;
+ this->capacity = 0;
+ this->d.t.nodes = nullptr;
+ this->d.t.root.set_to_null();
+ this->d.t.free_idx = 0;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_from_sorted_array(
+ const omtdata_t *const values,
+ const uint32_t numvalues) {
+ this->create_internal(numvalues);
+ memcpy(this->d.a.values, values, numvalues * (sizeof values[0]));
+ this->d.a.num_values = numvalues;
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::create_steal_sorted_array(
+ omtdata_t **const values,
+ const uint32_t numvalues,
+ const uint32_t new_capacity) {
+ paranoid_invariant_notnull(values);
+ this->create_internal_no_array(new_capacity);
+ this->d.a.num_values = numvalues;
+ this->d.a.values = *values;
+ *values = nullptr;
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::split_at(
+ omt *const newomt,
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ paranoid_invariant_notnull(newomt);
+ if (idx > this->size()) {
+ return EINVAL;
+ }
+ this->convert_to_array();
+ const uint32_t newsize = this->size() - idx;
+ newomt->create_from_sorted_array(
+ &this->d.a.values[this->d.a.start_idx + idx], newsize);
+ this->d.a.num_values = idx;
+ this->maybe_resize_array(idx);
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::merge(
+ omt *const leftomt,
+ omt *const rightomt) {
+ barf_if_marked(*this);
+ paranoid_invariant_notnull(leftomt);
+ paranoid_invariant_notnull(rightomt);
+ const uint32_t leftsize = leftomt->size();
+ const uint32_t rightsize = rightomt->size();
+ const uint32_t newsize = leftsize + rightsize;
+
+ if (leftomt->is_array) {
+ if (leftomt->capacity -
+ (leftomt->d.a.start_idx + leftomt->d.a.num_values) >=
+ rightsize) {
+ this->create_steal_sorted_array(&leftomt->d.a.values,
+ leftomt->d.a.num_values,
+ leftomt->capacity);
+ this->d.a.start_idx = leftomt->d.a.start_idx;
+ } else {
+ this->create_internal(newsize);
+ memcpy(&this->d.a.values[0],
+ &leftomt->d.a.values[leftomt->d.a.start_idx],
+ leftomt->d.a.num_values * (sizeof this->d.a.values[0]));
+ }
+ } else {
+ this->create_internal(newsize);
+ leftomt->fill_array_with_subtree_values(&this->d.a.values[0],
+ leftomt->d.t.root);
+ }
+ leftomt->destroy();
+ this->d.a.num_values = leftsize;
+
+ if (rightomt->is_array) {
+ memcpy(
+ &this->d.a.values[this->d.a.start_idx + this->d.a.num_values],
+ &rightomt->d.a.values[rightomt->d.a.start_idx],
+ rightomt->d.a.num_values * (sizeof this->d.a.values[0]));
+ } else {
+ rightomt->fill_array_with_subtree_values(
+ &this->d.a.values[this->d.a.start_idx + this->d.a.num_values],
+ rightomt->d.t.root);
+ }
+ rightomt->destroy();
+ this->d.a.num_values += rightsize;
+ paranoid_invariant(this->size() == newsize);
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::clone(const omt &src) {
+ barf_if_marked(*this);
+ this->create_internal(src.size());
+ if (src.is_array) {
+ memcpy(&this->d.a.values[0],
+ &src.d.a.values[src.d.a.start_idx],
+ src.d.a.num_values * (sizeof this->d.a.values[0]));
+ } else {
+ src.fill_array_with_subtree_values(&this->d.a.values[0],
+ src.d.t.root);
+ }
+ this->d.a.num_values = src.size();
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::clear(void) {
+ if (this->is_array) {
+ this->d.a.start_idx = 0;
+ this->d.a.num_values = 0;
+ } else {
+ this->d.t.root.set_to_null();
+ this->d.t.free_idx = 0;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::destroy(void) {
+ this->clear();
+ this->capacity = 0;
+ if (this->is_array) {
+ if (this->d.a.values != nullptr) {
+ toku_free(this->d.a.values);
+ }
+ this->d.a.values = nullptr;
+ } else {
+ if (this->d.t.nodes != nullptr) {
+ toku_free(this->d.t.nodes);
+ }
+ this->d.t.nodes = nullptr;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::size(void) const {
+ if (this->is_array) {
+ return this->d.a.num_values;
+ } else {
+ return this->nweight(this->d.t.root);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::insert(
+ const omtdata_t &value,
+ const omtcmp_t &v,
+ uint32_t *const idx) {
+ int r;
+ uint32_t insert_idx;
+
+ r = this->find_zero<omtcmp_t, h>(v, nullptr, &insert_idx);
+ if (r == 0) {
+ if (idx)
+ *idx = insert_idx;
+ return DB_KEYEXIST;
+ }
+ if (r != DB_NOTFOUND)
+ return r;
+
+ if ((r = this->insert_at(value, insert_idx)))
+ return r;
+ if (idx)
+ *idx = insert_idx;
+
+ return 0;
+ }
+
+ // The following 3 functions implement a static if for us.
+ template <typename omtdata_t, typename omtdataout_t>
+ static void barf_if_marked(
+ const omt<omtdata_t, omtdataout_t, false> &UU(omt)) {}
+
+ template <typename omtdata_t, typename omtdataout_t>
+ static void barf_if_marked(const omt<omtdata_t, omtdataout_t, true> &omt) {
+ invariant(!omt.has_marks());
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ bool omt<omtdata_t, omtdataout_t, supports_marks>::has_marks(void) const {
+ static_assert(supports_marks, "Does not support marks");
+ if (this->d.t.root.is_null()) {
+ return false;
+ }
+ const omt_node &node = this->d.t.nodes[this->d.t.root.get_index()];
+ return node.get_marks_below() || node.get_marked();
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::insert_at(
+ const omtdata_t &value,
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ if (idx > this->size()) {
+ return EINVAL;
+ }
+
+ this->maybe_resize_or_convert(this->size() + 1);
+ if (this->is_array && idx != this->d.a.num_values &&
+ (idx != 0 || this->d.a.start_idx == 0)) {
+ this->convert_to_tree();
+ }
+ if (this->is_array) {
+ if (idx == this->d.a.num_values) {
+ this->d.a.values[this->d.a.start_idx + this->d.a.num_values] =
+ value;
+ } else {
+ this->d.a.values[--this->d.a.start_idx] = value;
+ }
+ this->d.a.num_values++;
+ } else {
+ subtree *rebalance_subtree = nullptr;
+ this->insert_internal(
+ &this->d.t.root, value, idx, &rebalance_subtree);
+ if (rebalance_subtree != nullptr) {
+ this->rebalance(rebalance_subtree);
+ }
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::set_at(
+ const omtdata_t &value,
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ if (idx >= this->size()) {
+ return EINVAL;
+ }
+
+ if (this->is_array) {
+ this->set_at_internal_array(value, idx);
+ } else {
+ this->set_at_internal(this->d.t.root, value, idx);
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::delete_at(
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ if (idx >= this->size()) {
+ return EINVAL;
+ }
+
+ this->maybe_resize_or_convert(this->size() - 1);
+ if (this->is_array && idx != 0 && idx != this->d.a.num_values - 1) {
+ this->convert_to_tree();
+ }
+ if (this->is_array) {
+ // Testing for 0 does not rule out it being the last entry.
+ // Test explicitly for num_values-1
+ if (idx != this->d.a.num_values - 1) {
+ this->d.a.start_idx++;
+ }
+ this->d.a.num_values--;
+ } else {
+ subtree *rebalance_subtree = nullptr;
+ this->delete_internal(
+ &this->d.t.root, idx, nullptr, &rebalance_subtree);
+ if (rebalance_subtree != nullptr) {
+ this->rebalance(rebalance_subtree);
+ }
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate(
+ iterate_extra_t *const iterate_extra) const {
+ return this->iterate_on_range<iterate_extra_t, f>(
+ 0, this->size(), iterate_extra);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_on_range(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) const {
+ if (right > this->size()) {
+ return EINVAL;
+ }
+ if (left == right) {
+ return 0;
+ }
+ if (this->is_array) {
+ return this->iterate_internal_array<iterate_extra_t, f>(
+ left, right, iterate_extra);
+ }
+ return this->iterate_internal<iterate_extra_t, f>(
+ left, right, this->d.t.root, 0, iterate_extra);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_and_mark_range(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) {
+ static_assert(supports_marks, "does not support marks");
+ if (right > this->size()) {
+ return EINVAL;
+ }
+ if (left == right) {
+ return 0;
+ }
+ paranoid_invariant(!this->is_array);
+ return this->iterate_and_mark_range_internal<iterate_extra_t, f>(
+ left, right, this->d.t.root, 0, iterate_extra);
+ }
+
+ // TODO: We can optimize this if we steal 3 bits. 1 bit: this node is
+ // marked. 1 bit: left subtree has marks. 1 bit: right subtree has marks.
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_over_marked(
+ iterate_extra_t *const iterate_extra) const {
+ static_assert(supports_marks, "does not support marks");
+ paranoid_invariant(!this->is_array);
+ return this->iterate_over_marked_internal<iterate_extra_t, f>(
+ this->d.t.root, 0, iterate_extra);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::unmark(
+ const subtree &st,
+ const uint32_t index,
+ GrowableArray<node_idx> *const indexes) {
+ if (st.is_null()) {
+ return;
+ }
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t index_root = index + this->nweight(n.left);
+
+ const bool below = n.get_marks_below();
+ if (below) {
+ this->unmark(n.left, index, indexes);
+ }
+ if (n.get_marked()) {
+ indexes->push(index_root);
+ }
+ n.clear_stolen_bits();
+ if (below) {
+ this->unmark(n.right, index_root + 1, indexes);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::delete_all_marked(void) {
+ static_assert(supports_marks, "does not support marks");
+ if (!this->has_marks()) {
+ return;
+ }
+ paranoid_invariant(!this->is_array);
+ GrowableArray<node_idx> marked_indexes;
+ marked_indexes.init();
+
+ // Remove all marks.
+ // We need to delete all the stolen bits before calling delete_at to
+ // prevent barfing.
+ this->unmark(this->d.t.root, 0, &marked_indexes);
+
+ for (uint32_t i = 0; i < marked_indexes.get_size(); i++) {
+ // Delete from left to right, shift by number already deleted.
+ // Alternative is delete from right to left.
+ int r = this->delete_at(marked_indexes.fetch_unchecked(i) - i);
+ lazy_assert_zero(r);
+ }
+ marked_indexes.deinit();
+ barf_if_marked(*this);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::
+ verify_marks_consistent_internal(const subtree &st,
+ const bool UU(allow_marks)) const {
+ if (st.is_null()) {
+ return 0;
+ }
+ const omt_node &node = this->d.t.nodes[st.get_index()];
+ uint32_t num_marks =
+ verify_marks_consistent_internal(node.left, node.get_marks_below());
+ num_marks += verify_marks_consistent_internal(node.right,
+ node.get_marks_below());
+ if (node.get_marks_below()) {
+ paranoid_invariant(allow_marks);
+ paranoid_invariant(num_marks > 0);
+ } else {
+ // redundant with invariant below, but nice to have explicitly
+ paranoid_invariant(num_marks == 0);
+ }
+ if (node.get_marked()) {
+ paranoid_invariant(allow_marks);
+ ++num_marks;
+ }
+ return num_marks;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::verify_marks_consistent(
+ void) const {
+ static_assert(supports_marks, "does not support marks");
+ paranoid_invariant(!this->is_array);
+ this->verify_marks_consistent_internal(this->d.t.root, true);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr(
+ iterate_extra_t *const iterate_extra) {
+ if (this->is_array) {
+ this->iterate_ptr_internal_array<iterate_extra_t, f>(
+ 0, this->size(), iterate_extra);
+ } else {
+ this->iterate_ptr_internal<iterate_extra_t, f>(
+ 0, this->size(), this->d.t.root, 0, iterate_extra);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::fetch(
+ const uint32_t idx,
+ omtdataout_t *const value) const {
+ if (idx >= this->size()) {
+ return EINVAL;
+ }
+ if (this->is_array) {
+ this->fetch_internal_array(idx, value);
+ } else {
+ this->fetch_internal(this->d.t.root, idx, value);
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_zero(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ uint32_t tmp_index;
+ uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
+ int r;
+ if (this->is_array) {
+ r = this->find_internal_zero_array<omtcmp_t, h>(
+ extra, value, child_idxp);
+ } else {
+ r = this->find_internal_zero<omtcmp_t, h>(
+ this->d.t.root, extra, value, child_idxp);
+ }
+ return r;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find(
+ const omtcmp_t &extra,
+ int direction,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ uint32_t tmp_index;
+ uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
+ paranoid_invariant(direction != 0);
+ if (direction < 0) {
+ if (this->is_array) {
+ return this->find_internal_minus_array<omtcmp_t, h>(
+ extra, value, child_idxp);
+ } else {
+ return this->find_internal_minus<omtcmp_t, h>(
+ this->d.t.root, extra, value, child_idxp);
+ }
+ } else {
+ if (this->is_array) {
+ return this->find_internal_plus_array<omtcmp_t, h>(
+ extra, value, child_idxp);
+ } else {
+ return this->find_internal_plus<omtcmp_t, h>(
+ this->d.t.root, extra, value, child_idxp);
+ }
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ size_t omt<omtdata_t, omtdataout_t, supports_marks>::memory_size(void) {
+ if (this->is_array) {
+ return (sizeof *this) +
+ this->capacity * (sizeof this->d.a.values[0]);
+ }
+ return (sizeof *this) + this->capacity * (sizeof this->d.t.nodes[0]);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_internal_no_array(
+ const uint32_t new_capacity) {
+ this->is_array = true;
+ this->d.a.start_idx = 0;
+ this->d.a.num_values = 0;
+ this->d.a.values = nullptr;
+ this->capacity = new_capacity;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_internal(
+ const uint32_t new_capacity) {
+ this->create_internal_no_array(new_capacity);
+ XMALLOC_N(this->capacity, this->d.a.values);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::nweight(
+ const subtree &st) const {
+ if (st.is_null()) {
+ return 0;
+ } else {
+ return this->d.t.nodes[st.get_index()].weight;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ typename omt<omtdata_t, omtdataout_t, supports_marks>::node_idx
+ omt<omtdata_t, omtdataout_t, supports_marks>::node_malloc(void) {
+ paranoid_invariant(this->d.t.free_idx < this->capacity);
+ omt_node &n = this->d.t.nodes[this->d.t.free_idx];
+ n.clear_stolen_bits();
+ return this->d.t.free_idx++;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::node_free(
+ const node_idx UU(idx)) {
+ paranoid_invariant(idx < this->capacity);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::maybe_resize_array(
+ const uint32_t n) {
+ const uint32_t new_size = n <= 2 ? 4 : 2 * n;
+ const uint32_t room = this->capacity - this->d.a.start_idx;
+
+ if (room < n || this->capacity / 2 >= new_size) {
+ omtdata_t *XMALLOC_N(new_size, tmp_values);
+ memcpy(tmp_values,
+ &this->d.a.values[this->d.a.start_idx],
+ this->d.a.num_values * (sizeof tmp_values[0]));
+ this->d.a.start_idx = 0;
+ this->capacity = new_size;
+ toku_free(this->d.a.values);
+ this->d.a.values = tmp_values;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::
+ fill_array_with_subtree_values(omtdata_t *const array,
+ const subtree &st) const {
+ if (st.is_null())
+ return;
+ const omt_node &tree = this->d.t.nodes[st.get_index()];
+ this->fill_array_with_subtree_values(&array[0], tree.left);
+ array[this->nweight(tree.left)] = tree.value;
+ this->fill_array_with_subtree_values(
+ &array[this->nweight(tree.left) + 1], tree.right);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::convert_to_array(void) {
+ if (!this->is_array) {
+ const uint32_t num_values = this->size();
+ uint32_t new_size = 2 * num_values;
+ new_size = new_size < 4 ? 4 : new_size;
+
+ omtdata_t *XMALLOC_N(new_size, tmp_values);
+ this->fill_array_with_subtree_values(tmp_values, this->d.t.root);
+ toku_free(this->d.t.nodes);
+ this->is_array = true;
+ this->capacity = new_size;
+ this->d.a.num_values = num_values;
+ this->d.a.values = tmp_values;
+ this->d.a.start_idx = 0;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::rebuild_from_sorted_array(
+ subtree *const st,
+ const omtdata_t *const values,
+ const uint32_t numvalues) {
+ if (numvalues == 0) {
+ st->set_to_null();
+ } else {
+ const uint32_t halfway = numvalues / 2;
+ const node_idx newidx = this->node_malloc();
+ omt_node *const newnode = &this->d.t.nodes[newidx];
+ newnode->weight = numvalues;
+ newnode->value = values[halfway];
+ st->set_index(newidx);
+ // update everything before the recursive calls so the second call
+ // can be a tail call.
+ this->rebuild_from_sorted_array(
+ &newnode->left, &values[0], halfway);
+ this->rebuild_from_sorted_array(&newnode->right,
+ &values[halfway + 1],
+ numvalues - (halfway + 1));
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::convert_to_tree(void) {
+ if (this->is_array) {
+ const uint32_t num_nodes = this->size();
+ uint32_t new_size = num_nodes * 2;
+ new_size = new_size < 4 ? 4 : new_size;
+
+ omt_node *XMALLOC_N(new_size, new_nodes);
+ omtdata_t *const values = this->d.a.values;
+ omtdata_t *const tmp_values = &values[this->d.a.start_idx];
+ this->is_array = false;
+ this->d.t.nodes = new_nodes;
+ this->capacity = new_size;
+ this->d.t.free_idx = 0;
+ this->d.t.root.set_to_null();
+ this->rebuild_from_sorted_array(
+ &this->d.t.root, tmp_values, num_nodes);
+ toku_free(values);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::maybe_resize_or_convert(
+ const uint32_t n) {
+ if (this->is_array) {
+ this->maybe_resize_array(n);
+ } else {
+ const uint32_t new_size = n <= 2 ? 4 : 2 * n;
+ const uint32_t num_nodes = this->nweight(this->d.t.root);
+ if ((this->capacity / 2 >= new_size) ||
+ (this->d.t.free_idx >= this->capacity && num_nodes < n) ||
+ (this->capacity < n)) {
+ this->convert_to_array();
+ // if we had a free list, the "supports_marks" version could
+ // just resize, as it is now, we have to convert to and back
+ // from an array.
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ bool omt<omtdata_t, omtdataout_t, supports_marks>::will_need_rebalance(
+ const subtree &st,
+ const int leftmod,
+ const int rightmod) const {
+ if (st.is_null()) {
+ return false;
+ }
+ const omt_node &n = this->d.t.nodes[st.get_index()];
+ // one of the 1's is for the root.
+ // the other is to take ceil(n/2)
+ const uint32_t weight_left = this->nweight(n.left) + leftmod;
+ const uint32_t weight_right = this->nweight(n.right) + rightmod;
+ return ((1 + weight_left < (1 + 1 + weight_right) / 2) ||
+ (1 + weight_right < (1 + 1 + weight_left) / 2));
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::insert_internal(
+ subtree *const subtreep,
+ const omtdata_t &value,
+ const uint32_t idx,
+ subtree **const rebalance_subtree) {
+ if (subtreep->is_null()) {
+ paranoid_invariant_zero(idx);
+ const node_idx newidx = this->node_malloc();
+ omt_node *const newnode = &this->d.t.nodes[newidx];
+ newnode->weight = 1;
+ newnode->left.set_to_null();
+ newnode->right.set_to_null();
+ newnode->value = value;
+ subtreep->set_index(newidx);
+ } else {
+ omt_node &n = this->d.t.nodes[subtreep->get_index()];
+ n.weight++;
+ if (idx <= this->nweight(n.left)) {
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 1, 0)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->insert_internal(&n.left, value, idx, rebalance_subtree);
+ } else {
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 0, 1)) {
+ *rebalance_subtree = subtreep;
+ }
+ const uint32_t sub_index = idx - this->nweight(n.left) - 1;
+ this->insert_internal(
+ &n.right, value, sub_index, rebalance_subtree);
+ }
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::set_at_internal_array(
+ const omtdata_t &value,
+ const uint32_t idx) {
+ this->d.a.values[this->d.a.start_idx + idx] = value;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::set_at_internal(
+ const subtree &st,
+ const omtdata_t &value,
+ const uint32_t idx) {
+ paranoid_invariant(!st.is_null());
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t leftweight = this->nweight(n.left);
+ if (idx < leftweight) {
+ this->set_at_internal(n.left, value, idx);
+ } else if (idx == leftweight) {
+ n.value = value;
+ } else {
+ this->set_at_internal(n.right, value, idx - leftweight - 1);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::delete_internal(
+ subtree *const subtreep,
+ const uint32_t idx,
+ omt_node *const copyn,
+ subtree **const rebalance_subtree) {
+ paranoid_invariant_notnull(subtreep);
+ paranoid_invariant_notnull(rebalance_subtree);
+ paranoid_invariant(!subtreep->is_null());
+ omt_node &n = this->d.t.nodes[subtreep->get_index()];
+ const uint32_t leftweight = this->nweight(n.left);
+ if (idx < leftweight) {
+ n.weight--;
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, -1, 0)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->delete_internal(&n.left, idx, copyn, rebalance_subtree);
+ } else if (idx == leftweight) {
+ if (n.left.is_null()) {
+ const uint32_t oldidx = subtreep->get_index();
+ *subtreep = n.right;
+ if (copyn != nullptr) {
+ copyn->value = n.value;
+ }
+ this->node_free(oldidx);
+ } else if (n.right.is_null()) {
+ const uint32_t oldidx = subtreep->get_index();
+ *subtreep = n.left;
+ if (copyn != nullptr) {
+ copyn->value = n.value;
+ }
+ this->node_free(oldidx);
+ } else {
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 0, -1)) {
+ *rebalance_subtree = subtreep;
+ }
+ // don't need to copy up value, it's only used by this
+ // next call, and when that gets to the bottom there
+ // won't be any more recursion
+ n.weight--;
+ this->delete_internal(&n.right, 0, &n, rebalance_subtree);
+ }
+ } else {
+ n.weight--;
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 0, -1)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->delete_internal(
+ &n.right, idx - leftweight - 1, copyn, rebalance_subtree);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_internal_array(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) const {
+ int r;
+ for (uint32_t i = left; i < right; ++i) {
+ r = f(this->d.a.values[this->d.a.start_idx + i], i, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr_internal(
+ const uint32_t left,
+ const uint32_t right,
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) {
+ if (!st.is_null()) {
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root) {
+ this->iterate_ptr_internal<iterate_extra_t, f>(
+ left, right, n.left, idx, iterate_extra);
+ }
+ if (left <= idx_root && idx_root < right) {
+ int r = f(&n.value, idx_root, iterate_extra);
+ lazy_assert_zero(r);
+ }
+ if (idx_root + 1 < right) {
+ this->iterate_ptr_internal<iterate_extra_t, f>(
+ left, right, n.right, idx_root + 1, iterate_extra);
+ }
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr_internal_array(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) {
+ for (uint32_t i = left; i < right; ++i) {
+ int r =
+ f(&this->d.a.values[this->d.a.start_idx + i], i, iterate_extra);
+ lazy_assert_zero(r);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_internal(
+ const uint32_t left,
+ const uint32_t right,
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const {
+ if (st.is_null()) {
+ return 0;
+ }
+ int r;
+ const omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root) {
+ r = this->iterate_internal<iterate_extra_t, f>(
+ left, right, n.left, idx, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (left <= idx_root && idx_root < right) {
+ r = f(n.value, idx_root, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (idx_root + 1 < right) {
+ return this->iterate_internal<iterate_extra_t, f>(
+ left, right, n.right, idx_root + 1, iterate_extra);
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::
+ iterate_and_mark_range_internal(const uint32_t left,
+ const uint32_t right,
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) {
+ paranoid_invariant(!st.is_null());
+ int r;
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root && !n.left.is_null()) {
+ n.set_marks_below_bit();
+ r = this->iterate_and_mark_range_internal<iterate_extra_t, f>(
+ left, right, n.left, idx, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (left <= idx_root && idx_root < right) {
+ n.set_marked_bit();
+ r = f(n.value, idx_root, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (idx_root + 1 < right && !n.right.is_null()) {
+ n.set_marks_below_bit();
+ return this->iterate_and_mark_range_internal<iterate_extra_t, f>(
+ left, right, n.right, idx_root + 1, iterate_extra);
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int
+ omt<omtdata_t, omtdataout_t, supports_marks>::iterate_over_marked_internal(
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const {
+ if (st.is_null()) {
+ return 0;
+ }
+ int r;
+ const omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (n.get_marks_below()) {
+ r = this->iterate_over_marked_internal<iterate_extra_t, f>(
+ n.left, idx, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (n.get_marked()) {
+ r = f(n.value, idx_root, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (n.get_marks_below()) {
+ return this->iterate_over_marked_internal<iterate_extra_t, f>(
+ n.right, idx_root + 1, iterate_extra);
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::fetch_internal_array(
+ const uint32_t i,
+ omtdataout_t *const value) const {
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[this->d.a.start_idx + i]);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::fetch_internal(
+ const subtree &st,
+ const uint32_t i,
+ omtdataout_t *const value) const {
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t leftweight = this->nweight(n.left);
+ if (i < leftweight) {
+ this->fetch_internal(n.left, i, value);
+ } else if (i == leftweight) {
+ if (value != nullptr) {
+ copyout(value, &n);
+ }
+ } else {
+ this->fetch_internal(n.right, i - leftweight - 1, value);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::fill_array_with_subtree_idxs(
+ node_idx *const array,
+ const subtree &st) const {
+ if (!st.is_null()) {
+ const omt_node &tree = this->d.t.nodes[st.get_index()];
+ this->fill_array_with_subtree_idxs(&array[0], tree.left);
+ array[this->nweight(tree.left)] = st.get_index();
+ this->fill_array_with_subtree_idxs(
+ &array[this->nweight(tree.left) + 1], tree.right);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::rebuild_subtree_from_idxs(
+ subtree *const st,
+ const node_idx *const idxs,
+ const uint32_t numvalues) {
+ if (numvalues == 0) {
+ st->set_to_null();
+ } else {
+ uint32_t halfway = numvalues / 2;
+ st->set_index(idxs[halfway]);
+ // node_idx newidx = idxs[halfway];
+ omt_node &newnode = this->d.t.nodes[st->get_index()];
+ newnode.weight = numvalues;
+ // value is already in there.
+ this->rebuild_subtree_from_idxs(&newnode.left, &idxs[0], halfway);
+ this->rebuild_subtree_from_idxs(
+ &newnode.right, &idxs[halfway + 1], numvalues - (halfway + 1));
+ // n_idx = newidx;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::rebalance(
+ subtree *const st) {
+ node_idx idx = st->get_index();
+ if (idx == this->d.t.root.get_index()) {
+ // Try to convert to an array.
+ // If this fails, (malloc) nothing will have changed.
+ // In the failure case we continue on to the standard rebalance
+ // algorithm.
+ this->convert_to_array();
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ } else {
+ const omt_node &n = this->d.t.nodes[idx];
+ node_idx *tmp_array;
+ size_t mem_needed = n.weight * (sizeof tmp_array[0]);
+ size_t mem_free = (this->capacity - this->d.t.free_idx) *
+ (sizeof this->d.t.nodes[0]);
+ bool malloced;
+ if (mem_needed <= mem_free) {
+ // There is sufficient free space at the end of the nodes array
+ // to hold enough node indexes to rebalance.
+ malloced = false;
+ tmp_array = reinterpret_cast<node_idx *>(
+ &this->d.t.nodes[this->d.t.free_idx]);
+ } else {
+ malloced = true;
+ XMALLOC_N(n.weight, tmp_array);
+ }
+ this->fill_array_with_subtree_idxs(tmp_array, *st);
+ this->rebuild_subtree_from_idxs(st, tmp_array, n.weight);
+ if (malloced)
+ toku_free(tmp_array);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t *const out,
+ const omt_node *const n) {
+ *out = n->value;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t **const out,
+ omt_node *const n) {
+ *out = &n->value;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t *const out,
+ const omtdata_t *const stored_value_ptr) {
+ *out = *stored_value_ptr;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t **const out,
+ omtdata_t *const stored_value_ptr) {
+ *out = stored_value_ptr;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_zero_array(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = this->d.a.start_idx;
+ uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
+ uint32_t best_pos = subtree::NODE_NULL;
+ uint32_t best_zero = subtree::NODE_NULL;
+
+ while (min != limit) {
+ uint32_t mid = (min + limit) / 2;
+ int hv = h(this->d.a.values[mid], extra);
+ if (hv < 0) {
+ min = mid + 1;
+ } else if (hv > 0) {
+ best_pos = mid;
+ limit = mid;
+ } else {
+ best_zero = mid;
+ limit = mid;
+ }
+ }
+ if (best_zero != subtree::NODE_NULL) {
+ // Found a zero
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[best_zero]);
+ }
+ *idxp = best_zero - this->d.a.start_idx;
+ return 0;
+ }
+ if (best_pos != subtree::NODE_NULL)
+ *idxp = best_pos - this->d.a.start_idx;
+ else
+ *idxp = this->d.a.num_values;
+ return DB_NOTFOUND;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_zero(
+ const subtree &st,
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (st.is_null()) {
+ *idxp = 0;
+ return DB_NOTFOUND;
+ }
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ int hv = h(n.value, extra);
+ if (hv < 0) {
+ int r = this->find_internal_zero<omtcmp_t, h>(
+ n.right, extra, value, idxp);
+ *idxp += this->nweight(n.left) + 1;
+ return r;
+ } else if (hv > 0) {
+ return this->find_internal_zero<omtcmp_t, h>(
+ n.left, extra, value, idxp);
+ } else {
+ int r = this->find_internal_zero<omtcmp_t, h>(
+ n.left, extra, value, idxp);
+ if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n.left);
+ if (value != nullptr) {
+ copyout(value, &n);
+ }
+ r = 0;
+ }
+ return r;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_plus_array(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = this->d.a.start_idx;
+ uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
+ uint32_t best = subtree::NODE_NULL;
+
+ while (min != limit) {
+ const uint32_t mid = (min + limit) / 2;
+ const int hv = h(this->d.a.values[mid], extra);
+ if (hv > 0) {
+ best = mid;
+ limit = mid;
+ } else {
+ min = mid + 1;
+ }
+ }
+ if (best == subtree::NODE_NULL) {
+ return DB_NOTFOUND;
+ }
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[best]);
+ }
+ *idxp = best - this->d.a.start_idx;
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_plus(
+ const subtree &st,
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (st.is_null()) {
+ return DB_NOTFOUND;
+ }
+ omt_node *const n = &this->d.t.nodes[st.get_index()];
+ int hv = h(n->value, extra);
+ int r;
+ if (hv > 0) {
+ r = this->find_internal_plus<omtcmp_t, h>(
+ n->left, extra, value, idxp);
+ if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n->left);
+ if (value != nullptr) {
+ copyout(value, n);
+ }
+ r = 0;
+ }
+ } else {
+ r = this->find_internal_plus<omtcmp_t, h>(
+ n->right, extra, value, idxp);
+ if (r == 0) {
+ *idxp += this->nweight(n->left) + 1;
+ }
+ }
+ return r;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_minus_array(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = this->d.a.start_idx;
+ uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
+ uint32_t best = subtree::NODE_NULL;
+
+ while (min != limit) {
+ const uint32_t mid = (min + limit) / 2;
+ const int hv = h(this->d.a.values[mid], extra);
+ if (hv < 0) {
+ best = mid;
+ min = mid + 1;
+ } else {
+ limit = mid;
+ }
+ }
+ if (best == subtree::NODE_NULL) {
+ return DB_NOTFOUND;
+ }
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[best]);
+ }
+ *idxp = best - this->d.a.start_idx;
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_minus(
+ const subtree &st,
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (st.is_null()) {
+ return DB_NOTFOUND;
+ }
+ omt_node *const n = &this->d.t.nodes[st.get_index()];
+ int hv = h(n->value, extra);
+ if (hv < 0) {
+ int r = this->find_internal_minus<omtcmp_t, h>(
+ n->right, extra, value, idxp);
+ if (r == 0) {
+ *idxp += this->nweight(n->left) + 1;
+ } else if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n->left);
+ if (value != nullptr) {
+ copyout(value, n);
+ }
+ r = 0;
+ }
+ return r;
+ } else {
+ return this->find_internal_minus<omtcmp_t, h>(
+ n->left, extra, value, idxp);
+ }
+ }
+} // namespace toku
diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h
new file mode 100644
index 00000000..849389b9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/omt.h
@@ -0,0 +1,773 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdint.h>
+#include <memory.h>
+#include <toku_portability.h>
+#include <toku_race_tools.h>
+#include "growable_array.h"
+
+namespace toku {
+
+/**
+ * Order Maintenance Tree (OMT)
+ *
+ * Maintains a collection of totally ordered values, where each value has an integer weight.
+ * The OMT is a mutable datatype.
+ *
+ * The Abstraction:
+ *
+ * An OMT is a vector of values, $V$, where $|V|$ is the length of the vector.
+ * The vector is numbered from $0$ to $|V|-1$.
+ * Each value has a weight. The weight of the $i$th element is denoted $w(V_i)$.
+ *
+ * We can create a new OMT, which is the empty vector.
+ *
+ * We can insert a new element $x$ into slot $i$, changing $V$ into $V'$ where
+ * $|V'|=1+|V|$ and
+ *
+ * V'_j = V_j if $j<i$
+ * x if $j=i$
+ * V_{j-1} if $j>i$.
+ *
+ * We can specify $i$ using a kind of function instead of as an integer.
+ * Let $b$ be a function mapping from values to nonzero integers, such that
+ * the signum of $b$ is monotically increasing.
+ * We can specify $i$ as the minimum integer such that $b(V_i)>0$.
+ *
+ * We look up a value using its index, or using a Heaviside function.
+ * For lookups, we allow $b$ to be zero for some values, and again the signum of $b$ must be monotonically increasing.
+ * When lookup up values, we can look up
+ * $V_i$ where $i$ is the minimum integer such that $b(V_i)=0$. (With a special return code if no such value exists.)
+ * (Rationale: Ordinarily we want $i$ to be unique. But for various reasons we want to allow multiple zeros, and we want the smallest $i$ in that case.)
+ * $V_i$ where $i$ is the minimum integer such that $b(V_i)>0$. (Or an indication that no such value exists.)
+ * $V_i$ where $i$ is the maximum integer such that $b(V_i)<0$. (Or an indication that no such value exists.)
+ *
+ * When looking up a value using a Heaviside function, we get the value and its index.
+ *
+ * We can also split an OMT into two OMTs, splitting the weight of the values evenly.
+ * Find a value $j$ such that the values to the left of $j$ have about the same total weight as the values to the right of $j$.
+ * The resulting two OMTs contain the values to the left of $j$ and the values to the right of $j$ respectively.
+ * All of the values from the original OMT go into one of the new OMTs.
+ * If the weights of the values don't split exactly evenly, then the implementation has the freedom to choose whether
+ * the new left OMT or the new right OMT is larger.
+ *
+ * Performance:
+ * Insertion and deletion should run with $O(\log |V|)$ time and $O(\log |V|)$ calls to the Heaviside function.
+ * The memory required is O(|V|).
+ *
+ * Usage:
+ * The omt is templated by two parameters:
+ * - omtdata_t is what will be stored within the omt. These could be pointers or real data types (ints, structs).
+ * - omtdataout_t is what will be returned by find and related functions. By default, it is the same as omtdata_t, but you can set it to (omtdata_t *).
+ * To create an omt which will store "TXNID"s, for example, it is a good idea to typedef the template:
+ * typedef omt<TXNID> txnid_omt_t;
+ * If you are storing structs, you may want to be able to get a pointer to the data actually stored in the omt (see find_zero). To do this, use the second template parameter:
+ * typedef omt<struct foo, struct foo *> foo_omt_t;
+ */
+
+namespace omt_internal {
+
+template<bool subtree_supports_marks>
+class subtree_templated {
+private:
+ uint32_t m_index;
+public:
+ static const uint32_t NODE_NULL = UINT32_MAX;
+ inline void set_to_null(void) {
+ m_index = NODE_NULL;
+ }
+
+ inline bool is_null(void) const {
+ return NODE_NULL == this->get_index();
+ }
+
+ inline uint32_t get_index(void) const {
+ return m_index;
+ }
+
+ inline void set_index(uint32_t index) {
+ paranoid_invariant(index != NODE_NULL);
+ m_index = index;
+ }
+} ;
+
+template<>
+class subtree_templated<true> {
+private:
+ uint32_t m_bitfield;
+ static const uint32_t MASK_INDEX = ~(((uint32_t)1) << 31);
+ static const uint32_t MASK_BIT = ((uint32_t)1) << 31;
+
+ inline void set_index_internal(uint32_t new_index) {
+ m_bitfield = (m_bitfield & MASK_BIT) | new_index;
+ }
+public:
+ static const uint32_t NODE_NULL = INT32_MAX;
+ inline void set_to_null(void) {
+ this->set_index_internal(NODE_NULL);
+ }
+
+ inline bool is_null(void) const {
+ return NODE_NULL == this->get_index();
+ }
+
+ inline uint32_t get_index(void) const {
+ TOKU_DRD_IGNORE_VAR(m_bitfield);
+ const uint32_t bits = m_bitfield;
+ TOKU_DRD_STOP_IGNORING_VAR(m_bitfield);
+ return bits & MASK_INDEX;
+ }
+
+ inline void set_index(uint32_t index) {
+ paranoid_invariant(index < NODE_NULL);
+ this->set_index_internal(index);
+ }
+
+ inline bool get_bit(void) const {
+ TOKU_DRD_IGNORE_VAR(m_bitfield);
+ const uint32_t bits = m_bitfield;
+ TOKU_DRD_STOP_IGNORING_VAR(m_bitfield);
+ return (bits & MASK_BIT) != 0;
+ }
+
+ inline void enable_bit(void) {
+ // These bits may be set by a thread with a write lock on some
+ // leaf, and the index can be read by another thread with a (read
+ // or write) lock on another thread. Also, the has_marks_below
+ // bit can be set by two threads simultaneously. Neither of these
+ // are real races, so if we are using DRD we should tell it to
+ // ignore these bits just while we set this bit. If there were a
+ // race in setting the index, that would be a real race.
+ TOKU_DRD_IGNORE_VAR(m_bitfield);
+ m_bitfield |= MASK_BIT;
+ TOKU_DRD_STOP_IGNORING_VAR(m_bitfield);
+ }
+
+ inline void disable_bit(void) {
+ m_bitfield &= MASK_INDEX;
+ }
+} ;
+
+template<typename omtdata_t, bool subtree_supports_marks>
+class omt_node_templated {
+public:
+ uint32_t weight;
+ subtree_templated<subtree_supports_marks> left;
+ subtree_templated<subtree_supports_marks> right;
+ omtdata_t value;
+
+ // this needs to be in both implementations because we don't have
+ // a "static if" the caller can use
+ inline void clear_stolen_bits(void) {}
+} ;
+
+template<typename omtdata_t>
+class omt_node_templated<omtdata_t, true> {
+public:
+ uint32_t weight;
+ subtree_templated<true> left;
+ subtree_templated<true> right;
+ omtdata_t value;
+ inline bool get_marked(void) const {
+ return left.get_bit();
+ }
+ inline void set_marked_bit(void) {
+ return left.enable_bit();
+ }
+ inline void unset_marked_bit(void) {
+ return left.disable_bit();
+ }
+
+ inline bool get_marks_below(void) const {
+ return right.get_bit();
+ }
+ inline void set_marks_below_bit(void) {
+ // This function can be called by multiple threads.
+ // Checking first reduces cache invalidation.
+ if (!this->get_marks_below()) {
+ right.enable_bit();
+ }
+ }
+ inline void unset_marks_below_bit(void) {
+ right.disable_bit();
+ }
+
+ inline void clear_stolen_bits(void) {
+ this->unset_marked_bit();
+ this->unset_marks_below_bit();
+ }
+} ;
+
+}
+
+template<typename omtdata_t,
+ typename omtdataout_t=omtdata_t,
+ bool supports_marks=false>
+class omt {
+public:
+ /**
+ * Effect: Create an empty OMT.
+ * Performance: constant time.
+ */
+ void create(void);
+
+ /**
+ * Effect: Create an empty OMT with no internal allocated space.
+ * Performance: constant time.
+ * Rationale: In some cases we need a valid omt but don't want to malloc.
+ */
+ void create_no_array(void);
+
+ /**
+ * Effect: Create a OMT containing values. The number of values is in numvalues.
+ * Stores the new OMT in *omtp.
+ * Requires: this has not been created yet
+ * Requires: values != NULL
+ * Requires: values is sorted
+ * Performance: time=O(numvalues)
+ * Rationale: Normally to insert N values takes O(N lg N) amortized time.
+ * If the N values are known in advance, are sorted, and
+ * the structure is empty, we can batch insert them much faster.
+ */
+ __attribute__((nonnull))
+ void create_from_sorted_array(const omtdata_t *const values, const uint32_t numvalues);
+
+ /**
+ * Effect: Create an OMT containing values. The number of values is in numvalues.
+ * On success the OMT takes ownership of *values array, and sets values=NULL.
+ * Requires: this has not been created yet
+ * Requires: values != NULL
+ * Requires: *values is sorted
+ * Requires: *values was allocated with toku_malloc
+ * Requires: Capacity of the *values array is <= new_capacity
+ * Requires: On success, *values may not be accessed again by the caller.
+ * Performance: time=O(1)
+ * Rational: create_from_sorted_array takes O(numvalues) time.
+ * By taking ownership of the array, we save a malloc and memcpy,
+ * and possibly a free (if the caller is done with the array).
+ */
+ void create_steal_sorted_array(omtdata_t **const values, const uint32_t numvalues, const uint32_t new_capacity);
+
+ /**
+ * Effect: Create a new OMT, storing it in *newomt.
+ * The values to the right of index (starting at index) are moved to *newomt.
+ * Requires: newomt != NULL
+ * Returns
+ * 0 success,
+ * EINVAL if index > toku_omt_size(omt)
+ * On nonzero return, omt and *newomt are unmodified.
+ * Performance: time=O(n)
+ * Rationale: We don't need a split-evenly operation. We need to split items so that their total sizes
+ * are even, and other similar splitting criteria. It's easy to split evenly by calling size(), and dividing by two.
+ */
+ __attribute__((nonnull))
+ int split_at(omt *const newomt, const uint32_t idx);
+
+ /**
+ * Effect: Appends leftomt and rightomt to produce a new omt.
+ * Creates this as the new omt.
+ * leftomt and rightomt are destroyed.
+ * Performance: time=O(n) is acceptable, but one can imagine implementations that are O(\log n) worst-case.
+ */
+ __attribute__((nonnull))
+ void merge(omt *const leftomt, omt *const rightomt);
+
+ /**
+ * Effect: Creates a copy of an omt.
+ * Creates this as the clone.
+ * Each element is copied directly. If they are pointers, the underlying data is not duplicated.
+ * Performance: O(n) or the running time of fill_array_with_subtree_values()
+ */
+ void clone(const omt &src);
+
+ /**
+ * Effect: Set the tree to be empty.
+ * Note: Will not reallocate or resize any memory.
+ * Performance: time=O(1)
+ */
+ void clear(void);
+
+ /**
+ * Effect: Destroy an OMT, freeing all its memory.
+ * If the values being stored are pointers, their underlying data is not freed. See free_items()
+ * Those values may be freed before or after calling toku_omt_destroy.
+ * Rationale: Returns no values since free() cannot fail.
+ * Rationale: Does not free the underlying pointers to reduce complexity.
+ * Performance: time=O(1)
+ */
+ void destroy(void);
+
+ /**
+ * Effect: return |this|.
+ * Performance: time=O(1)
+ */
+ uint32_t size(void) const;
+
+
+ /**
+ * Effect: Insert value into the OMT.
+ * If there is some i such that $h(V_i, v)=0$ then returns DB_KEYEXIST.
+ * Otherwise, let i be the minimum value such that $h(V_i, v)>0$.
+ * If no such i exists, then let i be |V|
+ * Then this has the same effect as
+ * insert_at(tree, value, i);
+ * If idx!=NULL then i is stored in *idx
+ * Requires: The signum of h must be monotonically increasing.
+ * Returns:
+ * 0 success
+ * DB_KEYEXIST the key is present (h was equal to zero for some value)
+ * On nonzero return, omt is unchanged.
+ * Performance: time=O(\log N) amortized.
+ * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
+ */
+ template<typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int insert(const omtdata_t &value, const omtcmp_t &v, uint32_t *const idx);
+
+ /**
+ * Effect: Increases indexes of all items at slot >= idx by 1.
+ * Insert value into the position at idx.
+ * Returns:
+ * 0 success
+ * EINVAL if idx > this->size()
+ * On error, omt is unchanged.
+ * Performance: time=O(\log N) amortized time.
+ * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
+ */
+ int insert_at(const omtdata_t &value, const uint32_t idx);
+
+ /**
+ * Effect: Replaces the item at idx with value.
+ * Returns:
+ * 0 success
+ * EINVAL if idx>=this->size()
+ * On error, omt is unchanged.
+ * Performance: time=O(\log N)
+ * Rationale: The FT needs to be able to replace a value with another copy of the same value (allocated in a different location)
+ *
+ */
+ int set_at(const omtdata_t &value, const uint32_t idx);
+
+ /**
+ * Effect: Delete the item in slot idx.
+ * Decreases indexes of all items at slot > idx by 1.
+ * Returns
+ * 0 success
+ * EINVAL if idx>=this->size()
+ * On error, omt is unchanged.
+ * Rationale: To delete an item, first find its index using find or find_zero, then delete it.
+ * Performance: time=O(\log N) amortized.
+ */
+ int delete_at(const uint32_t idx);
+
+ /**
+ * Effect: Iterate over the values of the omt, from left to right, calling f on each value.
+ * The first argument passed to f is a ref-to-const of the value stored in the omt.
+ * The second argument passed to f is the index of the value.
+ * The third argument passed to f is iterate_extra.
+ * The indices run from 0 (inclusive) to this->size() (exclusive).
+ * Requires: f != NULL
+ * Returns:
+ * If f ever returns nonzero, then the iteration stops, and the value returned by f is returned by iterate.
+ * If f always returns zero, then iterate returns 0.
+ * Requires: Don't modify the omt while running. (E.g., f may not insert or delete values from the omt.)
+ * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
+ * Rationale: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
+ * Rationale: We may at some point use functors, but for now this is a smaller change from the old OMT.
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate(iterate_extra_t *const iterate_extra) const;
+
+ /**
+ * Effect: Iterate over the values of the omt, from left to right, calling f on each value.
+ * The first argument passed to f is a ref-to-const of the value stored in the omt.
+ * The second argument passed to f is the index of the value.
+ * The third argument passed to f is iterate_extra.
+ * The indices run from 0 (inclusive) to this->size() (exclusive).
+ * We will iterate only over [left,right)
+ *
+ * Requires: left <= right
+ * Requires: f != NULL
+ * Returns:
+ * EINVAL if right > this->size()
+ * If f ever returns nonzero, then the iteration stops, and the value returned by f is returned by iterate_on_range.
+ * If f always returns zero, then iterate_on_range returns 0.
+ * Requires: Don't modify the omt while running. (E.g., f may not insert or delete values from the omt.)
+ * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
+ * Rational: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const;
+
+ /**
+ * Effect: Iterate over the values of the omt, and mark the nodes that are visited.
+ * Other than the marks, this behaves the same as iterate_on_range.
+ * Requires: supports_marks == true
+ * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
+ * Notes:
+ * This function MAY be called concurrently by multiple threads, but
+ * not concurrently with any other non-const function.
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_and_mark_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra);
+
+ /**
+ * Effect: Iterate over the values of the omt, from left to right, calling f on each value whose node has been marked.
+ * Other than the marks, this behaves the same as iterate.
+ * Requires: supports_marks == true
+ * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
+ */
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_over_marked(iterate_extra_t *const iterate_extra) const;
+
+ /**
+ * Effect: Delete all elements from the omt, whose nodes have been marked.
+ * Requires: supports_marks == true
+ * Performance: time=O(N + i\log N) where i is the number of marked elements, {c,sh}ould be faster
+ */
+ void delete_all_marked(void);
+
+ /**
+ * Effect: Verify that the internal state of the marks in the tree are self-consistent.
+ * Crashes the system if the marks are in a bad state.
+ * Requires: supports_marks == true
+ * Performance: time=O(N)
+ * Notes:
+ * Even though this is a const function, it requires exclusive access.
+ * Rationale:
+ * The current implementation of the marks relies on a sort of
+ * "cache" bit representing the state of bits below it in the tree.
+ * This allows glass-box testing that these bits are correct.
+ */
+ void verify_marks_consistent(void) const;
+
+ /**
+ * Effect: None
+ * Returns whether there are any marks in the tree.
+ */
+ bool has_marks(void) const;
+
+ /**
+ * Effect: Iterate over the values of the omt, from left to right, calling f on each value.
+ * The first argument passed to f is a pointer to the value stored in the omt.
+ * The second argument passed to f is the index of the value.
+ * The third argument passed to f is iterate_extra.
+ * The indices run from 0 (inclusive) to this->size() (exclusive).
+ * Requires: same as for iterate()
+ * Returns: same as for iterate()
+ * Performance: same as for iterate()
+ * Rationale: In general, most iterators should use iterate() since they should not modify the data stored in the omt. This function is for iterators which need to modify values (for example, free_items).
+ * Rationale: We assume if you are transforming the data in place, you want to do it to everything at once, so there is not yet an iterate_on_range_ptr (but there could be).
+ */
+ template<typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void iterate_ptr(iterate_extra_t *const iterate_extra);
+
+ /**
+ * Effect: Set *value=V_idx
+ * Returns
+ * 0 success
+ * EINVAL if index>=toku_omt_size(omt)
+ * On nonzero return, *value is unchanged
+ * Performance: time=O(\log N)
+ */
+ int fetch(const uint32_t idx, omtdataout_t *const value) const;
+
+ /**
+ * Effect: Find the smallest i such that h(V_i, extra)>=0
+ * If there is such an i and h(V_i,extra)==0 then set *idxp=i, set *value = V_i, and return 0.
+ * If there is such an i and h(V_i,extra)>0 then set *idxp=i and return DB_NOTFOUND.
+ * If there is no such i then set *idx=this->size() and return DB_NOTFOUND.
+ * Note: value is of type omtdataout_t, which may be of type (omtdata_t) or (omtdata_t *) but is fixed by the instantiation.
+ * If it is the value type, then the value is copied out (even if the value type is a pointer to something else)
+ * If it is the pointer type, then *value is set to a pointer to the data within the omt.
+ * This is determined by the type of the omt as initially declared.
+ * If the omt is declared as omt<foo_t>, then foo_t's will be stored and foo_t's will be returned by find and related functions.
+ * If the omt is declared as omt<foo_t, foo_t *>, then foo_t's will be stored, and pointers to the stored items will be returned by find and related functions.
+ * Rationale:
+ * Structs too small for malloc should be stored directly in the omt.
+ * These structs may need to be edited as they exist inside the omt, so we need a way to get a pointer within the omt.
+ * Using separate functions for returning pointers and values increases code duplication and reduces type-checking.
+ * That also reduces the ability of the creator of a data structure to give advice to its future users.
+ * Slight overloading in this case seemed to provide a better API and better type checking.
+ */
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_zero(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ /**
+ * Effect:
+ * If direction >0 then find the smallest i such that h(V_i,extra)>0.
+ * If direction <0 then find the largest i such that h(V_i,extra)<0.
+ * (Direction may not be equal to zero.)
+ * If value!=NULL then store V_i in *value
+ * If idxp!=NULL then store i in *idxp.
+ * Requires: The signum of h is monotically increasing.
+ * Returns
+ * 0 success
+ * DB_NOTFOUND no such value is found.
+ * On nonzero return, *value and *idxp are unchanged
+ * Performance: time=O(\log N)
+ * Rationale:
+ * Here's how to use the find function to find various things
+ * Cases for find:
+ * find first value: ( h(v)=+1, direction=+1 )
+ * find last value ( h(v)=-1, direction=-1 )
+ * find first X ( h(v)=(v< x) ? -1 : 1 direction=+1 )
+ * find last X ( h(v)=(v<=x) ? -1 : 1 direction=-1 )
+ * find X or successor to X ( same as find first X. )
+ *
+ * Rationale: To help understand heaviside functions and behavor of find:
+ * There are 7 kinds of heaviside functions.
+ * The signus of the h must be monotonically increasing.
+ * Given a function of the following form, A is the element
+ * returned for direction>0, B is the element returned
+ * for direction<0, C is the element returned for
+ * direction==0 (see find_zero) (with a return of 0), and D is the element
+ * returned for direction==0 (see find_zero) with a return of DB_NOTFOUND.
+ * If any of A, B, or C are not found, then asking for the
+ * associated direction will return DB_NOTFOUND.
+ * See find_zero for more information.
+ *
+ * Let the following represent the signus of the heaviside function.
+ *
+ * -...-
+ * A
+ * D
+ *
+ * +...+
+ * B
+ * D
+ *
+ * 0...0
+ * C
+ *
+ * -...-0...0
+ * AC
+ *
+ * 0...0+...+
+ * C B
+ *
+ * -...-+...+
+ * AB
+ * D
+ *
+ * -...-0...0+...+
+ * AC B
+ */
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find(const omtcmp_t &extra, int direction, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ /**
+ * Effect: Return the size (in bytes) of the omt, as it resides in main memory. If the data stored are pointers, don't include the size of what they all point to.
+ */
+ size_t memory_size(void);
+
+private:
+ typedef uint32_t node_idx;
+ typedef omt_internal::subtree_templated<supports_marks> subtree;
+ typedef omt_internal::omt_node_templated<omtdata_t, supports_marks> omt_node;
+ ENSURE_POD(subtree);
+
+ struct omt_array {
+ uint32_t start_idx;
+ uint32_t num_values;
+ omtdata_t *values;
+ };
+
+ struct omt_tree {
+ subtree root;
+ uint32_t free_idx;
+ omt_node *nodes;
+ };
+
+ bool is_array;
+ uint32_t capacity;
+ union {
+ struct omt_array a;
+ struct omt_tree t;
+ } d;
+
+ __attribute__((nonnull))
+ void unmark(const subtree &subtree, const uint32_t index, GrowableArray<node_idx> *const indexes);
+
+ void create_internal_no_array(const uint32_t new_capacity);
+
+ void create_internal(const uint32_t new_capacity);
+
+ uint32_t nweight(const subtree &subtree) const;
+
+ node_idx node_malloc(void);
+
+ void node_free(const node_idx idx);
+
+ void maybe_resize_array(const uint32_t n);
+
+ __attribute__((nonnull))
+ void fill_array_with_subtree_values(omtdata_t *const array, const subtree &subtree) const;
+
+ void convert_to_array(void);
+
+ __attribute__((nonnull))
+ void rebuild_from_sorted_array(subtree *const subtree, const omtdata_t *const values, const uint32_t numvalues);
+
+ void convert_to_tree(void);
+
+ void maybe_resize_or_convert(const uint32_t n);
+
+ bool will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const;
+
+ __attribute__((nonnull))
+ void insert_internal(subtree *const subtreep, const omtdata_t &value, const uint32_t idx, subtree **const rebalance_subtree);
+
+ void set_at_internal_array(const omtdata_t &value, const uint32_t idx);
+
+ void set_at_internal(const subtree &subtree, const omtdata_t &value, const uint32_t idx);
+
+ void delete_internal(subtree *const subtreep, const uint32_t idx, omt_node *const copyn, subtree **const rebalance_subtree);
+
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_internal_array(const uint32_t left, const uint32_t right,
+ iterate_extra_t *const iterate_extra) const;
+
+ template<typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void iterate_ptr_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra);
+
+ template<typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void iterate_ptr_internal_array(const uint32_t left, const uint32_t right,
+ iterate_extra_t *const iterate_extra);
+
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const;
+
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_and_mark_range_internal(const uint32_t left, const uint32_t right,
+ const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra);
+
+ template<typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int iterate_over_marked_internal(const subtree &subtree, const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const;
+
+ uint32_t verify_marks_consistent_internal(const subtree &subtree, const bool allow_marks) const;
+
+ void fetch_internal_array(const uint32_t i, omtdataout_t *const value) const;
+
+ void fetch_internal(const subtree &subtree, const uint32_t i, omtdataout_t *const value) const;
+
+ __attribute__((nonnull))
+ void fill_array_with_subtree_idxs(node_idx *const array, const subtree &subtree) const;
+
+ __attribute__((nonnull))
+ void rebuild_subtree_from_idxs(subtree *const subtree, const node_idx *const idxs, const uint32_t numvalues);
+
+ __attribute__((nonnull))
+ void rebalance(subtree *const subtree);
+
+ __attribute__((nonnull))
+ static void copyout(omtdata_t *const out, const omt_node *const n);
+
+ __attribute__((nonnull))
+ static void copyout(omtdata_t **const out, omt_node *const n);
+
+ __attribute__((nonnull))
+ static void copyout(omtdata_t *const out, const omtdata_t *const stored_value_ptr);
+
+ __attribute__((nonnull))
+ static void copyout(omtdata_t **const out, omtdata_t *const stored_value_ptr);
+
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_internal_zero_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_internal_zero(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_internal_plus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_internal_plus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_internal_minus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+
+ template<typename omtcmp_t,
+ int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int find_internal_minus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
+};
+
+} // namespace toku
+
+// include the implementation here
+#include "omt.cc"
diff --git a/storage/tokudb/PerconaFT/util/partitioned_counter.cc b/storage/tokudb/PerconaFT/util/partitioned_counter.cc
new file mode 100644
index 00000000..7a6b8ab2
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/partitioned_counter.cc
@@ -0,0 +1,417 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_race_tools.h>
+#include <sys/types.h>
+#include <pthread.h>
+
+#include "memory.h"
+#include "partitioned_counter.h"
+#include "doubly_linked_list.h"
+#include "growable_array.h"
+#include <portability/toku_atomic.h>
+
+#ifdef __APPLE__
+// TODO(leif): The __thread declspec is broken in ways I don't understand
+// on Darwin. Partitioned counters use them and it would be prohibitive
+// to tease them apart before a week after 6.5.0, so instead, we're just
+// not going to use them in the most brutal way possible. This is a
+// terrible implementation of the API in partitioned_counter.h but it
+// should be correct enough to release a non-performant version on OSX for
+// development. Soon, we need to either make portable partitioned
+// counters, or we need to do this disabling in a portable way.
+
+struct partitioned_counter {
+ uint64_t v;
+};
+
+PARTITIONED_COUNTER create_partitioned_counter(void) {
+ PARTITIONED_COUNTER XCALLOC(counter);
+ return counter;
+}
+
+void destroy_partitioned_counter(PARTITIONED_COUNTER counter) {
+ toku_free(counter);
+}
+
+void increment_partitioned_counter(PARTITIONED_COUNTER counter, uint64_t delta) {
+ (void) toku_sync_fetch_and_add(&counter->v, delta);
+}
+
+uint64_t read_partitioned_counter(PARTITIONED_COUNTER counter) {
+ return counter->v;
+}
+
+void partitioned_counters_init(void) {}
+void partitioned_counters_destroy(void) {}
+
+#else // __APPLE__
+
+//******************************************************************************
+//
+// Representation: The representation of a partitioned counter comprises a
+// sum, called sum_of_dead; an index, called the ckey, which indexes into a
+// thread-local array to find a thread-local part of the counter; and a
+// linked list of thread-local parts.
+//
+// There is also a linked list, for each thread that has a thread-local part
+// of any counter, of all the thread-local parts of all the counters.
+//
+// There is a pthread_key which gives us a hook to clean up thread-local
+// state when a thread terminates. For each thread-local part of a counter
+// that the thread has, we add in the thread-local sum into the sum_of_dead.
+//
+// Finally there is a list of all the thread-local arrays so that when we
+// destroy the partitioned counter before the threads are done, we can find
+// and destroy the thread_local_arrays before destroying the pthread_key.
+//
+// Abstraction function: The sum is represented by the sum of _sum and the
+// sum's of the thread-local parts of the counter.
+//
+// Representation invariant: Every thread-local part is in the linked list of
+// the thread-local parts of its counter, as well as in the linked list of
+// the counters of a the thread.
+//
+//******************************************************************************
+
+//******************************************************************************
+// The mutex for the PARTITIONED_COUNTER
+// We have a single mutex for all the counters because
+// (a) the mutex is obtained infrequently, and
+// (b) it helps us avoid race conditions when destroying the counters.
+// The alternative that I couldn't make work is to have a mutex per counter.
+// But the problem is that the counter can be destroyed before threads
+// terminate, or maybe a thread terminates before the counter is destroyed.
+// If the counter is destroyed first, then the mutex is no longer available.
+//******************************************************************************
+
+using namespace toku;
+
+static pthread_mutex_t partitioned_counter_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void pc_lock (void)
+// Effect: Lock the mutex.
+{
+ int r = pthread_mutex_lock(&partitioned_counter_mutex);
+ assert(r==0);
+}
+
+static void pc_unlock (void)
+// Effect: Unlock the mutex.
+{
+ int r = pthread_mutex_unlock(&partitioned_counter_mutex);
+ assert(r==0);
+}
+
+//******************************************************************************
+// Key creation primitives
+//******************************************************************************
+static void pk_create (pthread_key_t *key, void (*destructor)(void*)) {
+ int r = pthread_key_create(key, destructor);
+ assert(r==0);
+}
+
+static void pk_delete (pthread_key_t key) {
+ int r = pthread_key_delete(key);
+ assert(r==0);
+}
+
+static void pk_setspecific (pthread_key_t key, const void *value) {
+ int r = pthread_setspecific(key, value);
+ assert(r==0);
+}
+
+//******************************************************************************
+// The counter itself.
+// The thread local part of a counter, comprising the thread-local sum a pointer
+// to the partitioned_counter, a pointer to the thread_local list head, and two
+// linked lists. One of the lists is all the thread-local parts that belong to
+// the same counter, and the other is all the thread-local parts that belogn to
+// the same thread.
+//******************************************************************************
+
+struct local_counter;
+
+struct partitioned_counter {
+ uint64_t sum_of_dead; // The sum of all thread-local counts from threads that have terminated.
+ uint64_t pc_key; // A unique integer among all counters that have been created but not yet destroyed.
+ DoublyLinkedList<struct local_counter *> ll_counter_head; // A linked list of all the thread-local information for this counter.
+};
+
+struct local_counter {
+ uint64_t sum; // The thread-local sum.
+ PARTITIONED_COUNTER owner_pc; // The partitioned counter that this is part of.
+ GrowableArray<struct local_counter *> *thread_local_array; // The thread local array for this thread holds this local_counter at offset owner_pc->pc_key.
+ LinkedListElement<struct local_counter *> ll_in_counter; // Element for the doubly-linked list of thread-local information for this PARTITIONED_COUNTER.
+};
+
+// Try to get it it into one cache line by aligning it.
+static __thread GrowableArray<struct local_counter *> thread_local_array;
+static __thread bool thread_local_array_inited = false;
+
+static DoublyLinkedList<GrowableArray<struct local_counter *> *> all_thread_local_arrays;
+static __thread LinkedListElement<GrowableArray<struct local_counter *> *> thread_local_ll_elt;
+
+static void destroy_thread_local_part_of_partitioned_counters (void *ignore_me);
+static void destroy_thread_local_part_of_partitioned_counters (void *ignore_me __attribute__((__unused__)))
+// Effect: This function is called whenever a thread terminates using the
+// destructor of the thread_destructor_key (defined below). First grab the
+// lock, then go through all the partitioned counters and removes the part that
+// is local to this thread. We don't actually need the contents of the
+// thread_destructor_key except to cause this function to run. The content of
+// the key is a static string, so don't try to free it.
+{
+ pc_lock();
+ for (size_t i=0; i<thread_local_array.get_size(); i++) {
+ struct local_counter *lc = thread_local_array.fetch_unchecked(i);
+ if (lc==NULL) continue;
+ PARTITIONED_COUNTER owner = lc->owner_pc;
+ owner->sum_of_dead += lc->sum;
+ owner->ll_counter_head.remove(&lc->ll_in_counter);
+ toku_free(lc);
+ }
+ all_thread_local_arrays.remove(&thread_local_ll_elt);
+ thread_local_array_inited = false;
+ thread_local_array.deinit();
+ pc_unlock();
+}
+
+//******************************************************************************
+// We employ a system-wide pthread_key simply to get a notification when a
+// thread terminates. The key will simply contain a constant string (it's "dont
+// care", but it doesn't matter what it is, as long as it's not NULL. We need
+// a constructor function to set up the pthread_key. We used a constructor
+// function intead of a C++ constructor because that's what we are used to,
+// rather than because it's necessarily better. Whenever a thread tries to
+// increment a partitioned_counter for the first time, it sets the
+// pthread_setspecific for the thread_destructor_key. It's OK if the key gets
+// setspecific multiple times, it's always the same value. When a thread (that
+// has created a thread-local part of any partitioned counter) terminates, the
+// destroy_thread_local_part_of_partitioned_counters will run. It may run
+// before or after other pthread_key destructors, but the thread-local
+// ll_thread_head variable is still present until the thread is completely done
+// running.
+//******************************************************************************
+
+static pthread_key_t thread_destructor_key;
+
+//******************************************************************************
+// We don't like using up pthread_keys (macos provides only 128 of them),
+// so we built our own. Also, looking at the source code for linux libc,
+// it looks like pthread_keys get slower if there are a lot of them.
+// So we use only one pthread_key.
+//******************************************************************************
+
+GrowableArray<bool> counters_in_use;
+
+static uint64_t allocate_counter (void)
+// Effect: Find an unused counter number, and allocate it, returning the counter number.
+// Grabs the pc_lock.
+{
+ uint64_t ret;
+ pc_lock();
+ size_t size = counters_in_use.get_size();
+ for (uint64_t i=0; i<size; i++) {
+ if (!counters_in_use.fetch_unchecked(i)) {
+ counters_in_use.store_unchecked(i, true);
+ ret = i;
+ goto unlock;
+ }
+ }
+ counters_in_use.push(true);
+ ret = size;
+unlock:
+ pc_unlock();
+ return ret;
+}
+
+
+static void free_counter(uint64_t counternum)
+// Effect: Free a counter.
+// Requires: The pc mutex is held before calling.
+{
+ assert(counternum < counters_in_use.get_size());
+ assert(counters_in_use.fetch_unchecked(counternum));
+ counters_in_use.store_unchecked(counternum, false);
+}
+
+static void destroy_counters (void) {
+ counters_in_use.deinit();
+}
+
+
+//******************************************************************************
+// Now for the code that actually creates a counter.
+//******************************************************************************
+
+PARTITIONED_COUNTER create_partitioned_counter(void)
+// Effect: Create a counter, initialized to zero.
+{
+ PARTITIONED_COUNTER XMALLOC(result);
+ result->sum_of_dead = 0;
+ result->pc_key = allocate_counter();
+ result->ll_counter_head.init();
+ return result;
+}
+
+void destroy_partitioned_counter(PARTITIONED_COUNTER pc)
+// Effect: Destroy the counter. No operations on this counter are permitted after.
+// Implementation note: Since we have a global lock, we can destroy all the thread-local
+// versions as well.
+{
+ pc_lock();
+ uint64_t pc_key = pc->pc_key;
+ LinkedListElement<struct local_counter *> *first;
+ while (pc->ll_counter_head.pop(&first)) {
+ // We just removed first from the counter list, now we must remove it from the thread-local array.
+ struct local_counter *lc = first->get_container();
+ assert(pc == lc->owner_pc);
+ GrowableArray<struct local_counter *> *tla = lc->thread_local_array;
+ tla->store_unchecked(pc_key, NULL);
+ toku_free(lc);
+ }
+ toku_free(pc);
+ free_counter(pc_key);
+ pc_unlock();
+}
+
+static inline struct local_counter *get_thread_local_counter(uint64_t pc_key, GrowableArray<struct local_counter *> *a)
+{
+ if (pc_key >= a->get_size()) {
+ return NULL;
+ } else {
+ return a->fetch_unchecked(pc_key);
+ }
+}
+
+static struct local_counter *get_or_alloc_thread_local_counter(PARTITIONED_COUNTER pc)
+{
+ // Only this thread is allowed to modify thread_local_array, except for setting tla->array[pc_key] to NULL
+ // when a counter is destroyed (and in that case there should be no race because no other thread should be
+ // trying to access the same local counter at the same time.
+ uint64_t pc_key = pc->pc_key;
+ struct local_counter *lc = get_thread_local_counter(pc->pc_key, &thread_local_array);
+ if (lc == NULL) {
+ XMALLOC(lc); // Might as well do the malloc without holding the pc lock. But most of the rest of this work needs the lock.
+ pc_lock();
+
+ // Set things up so that this thread terminates, the thread-local parts of the counter will be destroyed and merged into their respective counters.
+ if (!thread_local_array_inited) {
+ pk_setspecific(thread_destructor_key, "dont care");
+ thread_local_array_inited=true;
+ thread_local_array.init();
+ all_thread_local_arrays.insert(&thread_local_ll_elt, &thread_local_array);
+ }
+
+ lc->sum = 0;
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&lc->sum, sizeof(lc->sum)); // the counter increment is kind of racy.
+ lc->owner_pc = pc;
+ lc->thread_local_array = &thread_local_array;
+
+ // Grow the array if needed, filling in NULLs
+ while (thread_local_array.get_size() <= pc_key) {
+ thread_local_array.push(NULL);
+ }
+ thread_local_array.store_unchecked(pc_key, lc);
+ pc->ll_counter_head.insert(&lc->ll_in_counter, lc);
+ pc_unlock();
+ }
+ return lc;
+}
+
+void increment_partitioned_counter(PARTITIONED_COUNTER pc, uint64_t amount)
+// Effect: Increment the counter by amount.
+// Requires: No overflows. This is a 64-bit unsigned counter.
+{
+ struct local_counter *lc = get_or_alloc_thread_local_counter(pc);
+ lc->sum += amount;
+}
+
+static int sumit(struct local_counter *lc, uint64_t *sum) {
+ (*sum)+=lc->sum;
+ return 0;
+}
+
+uint64_t read_partitioned_counter(PARTITIONED_COUNTER pc)
+// Effect: Return the current value of the counter.
+// Implementation note: Sum all the thread-local counts along with the sum_of_the_dead.
+{
+ pc_lock();
+ uint64_t sum = pc->sum_of_dead;
+ int r = pc->ll_counter_head.iterate<uint64_t *>(sumit, &sum);
+ assert(r==0);
+ pc_unlock();
+ return sum;
+}
+
+void partitioned_counters_init(void)
+// Effect: Initialize any partitioned counters data structures that must be set up before any partitioned counters run.
+{
+ pk_create(&thread_destructor_key, destroy_thread_local_part_of_partitioned_counters);
+ all_thread_local_arrays.init();
+}
+
+void partitioned_counters_destroy(void)
+// Effect: Destroy any partitioned counters data structures.
+{
+ pc_lock();
+ LinkedListElement<GrowableArray<struct local_counter *> *> *a_ll;
+ while (all_thread_local_arrays.pop(&a_ll)) {
+ a_ll->get_container()->deinit();
+ }
+
+ pk_delete(thread_destructor_key);
+ destroy_counters();
+ pc_unlock();
+}
+
+#endif // __APPLE__
diff --git a/storage/tokudb/PerconaFT/util/partitioned_counter.h b/storage/tokudb/PerconaFT/util/partitioned_counter.h
new file mode 100644
index 00000000..d5bf97cf
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/partitioned_counter.h
@@ -0,0 +1,149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// Overview: A partitioned_counter provides a counter that can be incremented and the running sum can be read at any time.
+// We assume that increments are frequent, whereas reading is infrequent.
+// Implementation hint: Use thread-local storage so each thread increments its own data. The increment does not require a lock or atomic operation.
+// Reading the data can be performed by iterating over the thread-local versions, summing them up.
+// The data structure also includes a sum for all the threads that have died.
+// Use a pthread_key to create the thread-local versions. When a thread finishes, the system calls pthread_key destructor which can add that thread's copy
+// into the sum_of_dead counter.
+// Rationale: For statistics such as are found in engine status, we need a counter that requires no cache misses to increment. We've seen significant
+// performance speedups by removing certain counters. Rather than removing those statistics, we would like to just make the counter fast.
+// We generally increment the counters frequently, and want to fetch the values infrequently.
+// The counters are monotonic.
+// The counters can be split into many counters, which can be summed up at the end.
+// We don't care if we get slightly out-of-date counter sums when we read the counter. We don't care if there is a race on reading the a counter
+// variable and incrementing.
+// See tests/test_partitioned_counter.c for some performance measurements.
+// Operations:
+// create_partitioned_counter Create a counter initialized to zero.
+// destroy_partitioned_counter Destroy it.
+// increment_partitioned_counter Increment it. This is the frequent operation.
+// read_partitioned_counter Get the current value. This is infrequent.
+// See partitioned_counter.cc for the abstraction function and representation invariant.
+//
+// The google style guide says to avoid using constructors, and it appears that
+// constructors may have broken all the tests, because they called
+// pthread_key_create before the key was actually created. So the google style
+// guide may have some wisdom there...
+//
+// This version does not use constructors, essentially reverrting to the google C++ style guide.
+//
+
+// The old C interface. This required a bunch of explicit ___attribute__((__destructor__)) functions to remember to destroy counters at the end.
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+typedef struct partitioned_counter *PARTITIONED_COUNTER;
+PARTITIONED_COUNTER create_partitioned_counter(void);
+// Effect: Create a counter, initialized to zero.
+
+void destroy_partitioned_counter(PARTITIONED_COUNTER);
+// Effect: Destroy the counter. No operations on that counter are permitted after this.
+
+void increment_partitioned_counter(PARTITIONED_COUNTER, uint64_t amount);
+// Effect: Increment the counter by amount.
+// Requires: No overflows. This is a 64-bit unsigned counter.
+
+uint64_t read_partitioned_counter(PARTITIONED_COUNTER) __attribute__((__visibility__("default")));
+// Effect: Return the current value of the counter.
+
+void partitioned_counters_init(void);
+// Effect: Initialize any partitioned counters data structures that must be set up before any partitioned counters run.
+
+void partitioned_counters_destroy(void);
+// Effect: Destroy any partitioned counters data structures.
+
+#if defined(__cplusplus)
+};
+#endif
+
+#if 0
+#include <pthread.h>
+#include "fttypes.h"
+
+// Used inside the PARTITIONED_COUNTER.
+struct linked_list_head {
+ struct linked_list_element *first;
+};
+
+
+class PARTITIONED_COUNTER {
+public:
+ PARTITIONED_COUNTER(void);
+ // Effect: Construct a counter, initialized to zero.
+
+ ~PARTITIONED_COUNTER(void);
+ // Effect: Destruct the counter.
+
+ void increment(uint64_t amount);
+ // Effect: Increment the counter by amount. This is a 64-bit unsigned counter, and if you overflow it, you will get overflowed results (that is mod 2^64).
+ // Requires: Don't use this from a static constructor or destructor.
+
+ uint64_t read(void);
+ // Effect: Read the sum.
+ // Requires: Don't use this from a static constructor or destructor.
+
+private:
+ uint64_t _sum_of_dead; // The sum of all thread-local counts from threads that have terminated.
+ pthread_key_t _key; // The pthread_key which gives us the hook to construct and destruct thread-local storage.
+ struct linked_list_head _ll_counter_head; // A linked list of all the thread-local information for this counter.
+
+ // This function is used to destroy the thread-local part of the state when a thread terminates.
+ // But it's not the destructor for the local part of the counter, it's a destructor on a "dummy" key just so that we get a notification when a thread ends.
+ friend void destroy_thread_local_part_of_partitioned_counters (void *);
+};
+#endif
diff --git a/storage/tokudb/PerconaFT/util/queue.cc b/storage/tokudb/PerconaFT/util/queue.cc
new file mode 100644
index 00000000..39dfbbc6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/queue.cc
@@ -0,0 +1,182 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include "toku_os.h"
+#include <errno.h>
+#include <toku_assert.h>
+#include "queue.h"
+#include "memory.h"
+#include <toku_pthread.h>
+
+toku_instr_key *queue_result_mutex_key;
+toku_instr_key *queue_result_cond_key;
+
+struct qitem;
+
+struct qitem {
+ void *item;
+ struct qitem *next;
+ uint64_t weight;
+};
+
+struct queue {
+ uint64_t contents_weight; // how much stuff is in there?
+ uint64_t weight_limit; // Block enqueueing when the contents gets to be bigger than the weight.
+ struct qitem *head, *tail;
+
+ bool eof;
+
+ toku_mutex_t mutex;
+ toku_cond_t cond;
+};
+
+// Representation invariant:
+// q->contents_weight is the sum of the weights of everything in the queue.
+// q->weight_limit is the limit on the weight before we block.
+// q->head is the oldest thing in the queue. q->tail is the newest. (If nothing is in the queue then both are NULL)
+// If q->head is not null:
+// q->head->item is the oldest item.
+// q->head->weight is the weight of that item.
+// q->head->next is the next youngest thing.
+// q->eof indicates that the producer has said "that's all".
+// q->mutex and q->cond are used as condition variables.
+
+
+int toku_queue_create (QUEUE *q, uint64_t weight_limit)
+{
+ QUEUE CALLOC(result);
+ if (result==NULL) return get_error_errno();
+ result->contents_weight = 0;
+ result->weight_limit = weight_limit;
+ result->head = NULL;
+ result->tail = NULL;
+ result->eof = false;
+ toku_mutex_init(*queue_result_mutex_key, &result->mutex, nullptr);
+ toku_cond_init(*queue_result_cond_key, &result->cond, nullptr);
+ *q = result;
+ return 0;
+}
+
+int toku_queue_destroy (QUEUE q)
+{
+ if (q->head) return EINVAL;
+ assert(q->contents_weight==0);
+ toku_mutex_destroy(&q->mutex);
+ toku_cond_destroy(&q->cond);
+ toku_free(q);
+ return 0;
+}
+
+int toku_queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq)
+{
+ toku_mutex_lock(&q->mutex);
+ assert(!q->eof);
+ // Go ahead and put it in, even if it's too much.
+ struct qitem *MALLOC(qi);
+ if (qi==NULL) {
+ int r = get_error_errno();
+ toku_mutex_unlock(&q->mutex);
+ return r;
+ }
+ q->contents_weight += weight;
+ qi->item = item;
+ qi->weight = weight;
+ qi->next = NULL;
+ if (q->tail) {
+ q->tail->next = qi;
+ } else {
+ assert(q->head==NULL);
+ q->head = qi;
+ }
+ q->tail = qi;
+ // Wake up the consumer.
+ toku_cond_signal(&q->cond);
+ // Now block if there's too much stuff in there.
+ while (q->weight_limit < q->contents_weight) {
+ toku_cond_wait(&q->cond, &q->mutex);
+ }
+ // we are allowed to return.
+ if (total_weight_after_enq) {
+ *total_weight_after_enq = q->contents_weight;
+ }
+ toku_mutex_unlock(&q->mutex);
+ return 0;
+}
+
+int toku_queue_eof (QUEUE q)
+{
+ toku_mutex_lock(&q->mutex);
+ assert(!q->eof);
+ q->eof = true;
+ toku_cond_signal(&q->cond);
+ toku_mutex_unlock(&q->mutex);
+ return 0;
+}
+
+int toku_queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq)
+{
+ toku_mutex_lock(&q->mutex);
+ int result;
+ while (q->head==NULL && !q->eof) {
+ toku_cond_wait(&q->cond, &q->mutex);
+ }
+ if (q->head==NULL) {
+ assert(q->eof);
+ result = EOF;
+ } else {
+ struct qitem *head = q->head;
+ q->contents_weight -= head->weight;
+ *item = head->item;
+ if (weight)
+ *weight = head->weight;
+ if (total_weight_after_deq)
+ *total_weight_after_deq = q->contents_weight;
+ q->head = head->next;
+ toku_free(head);
+ if (q->head==NULL) {
+ q->tail = NULL;
+ }
+ // wake up the producer, since we decreased the contents_weight.
+ toku_cond_signal(&q->cond);
+ // Successful result.
+ result = 0;
+ }
+ toku_mutex_unlock(&q->mutex);
+ return result;
+}
diff --git a/storage/tokudb/PerconaFT/util/queue.h b/storage/tokudb/PerconaFT/util/queue.h
new file mode 100644
index 00000000..c6f1f740
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/queue.h
@@ -0,0 +1,83 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+// The abstraction:
+//
+// queue.h implements a queue suitable for a producer-consumer relationship between two pthreads.
+// The enqueue/dequeue operation is fairly heavyweight (involving pthread condition variables) so it may be useful
+// to enqueue large chunks rather than small chunks.
+// It probably won't work right to have two consumer threads.
+//
+// Every item inserted into the queue has a weight. If the weight
+// gets too big, then the queue blocks on trying to insert more items.
+// The weight can be used to limit the total number of items in the
+// queue (weight of each item=1) or the total memory consumed by queue
+// items (weight of each item is its size). Or the weight's could all be
+// zero for an unlimited queue.
+
+typedef struct queue *QUEUE;
+
+int toku_queue_create (QUEUE *q, uint64_t weight_limit);
+// Effect: Create a queue with a given weight limit. The queue is initially empty.
+
+int toku_queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq);
+// Effect: Insert ITEM of weight WEIGHT into queue. If the resulting contents weight too much then block (don't return) until the total weight is low enough.
+// If total_weight_after_enq!=NULL then return the current weight of the items in the queue (after finishing blocking on overweight, and after enqueueing the item).
+// If successful return 0.
+// If an error occurs, return the error number, and the state of the queue is undefined. The item may have been enqueued or not, and in fact the queue may be badly corrupted if the condition variables go awry. If it's just a matter of out-of-memory, then the queue is probably OK.
+// Requires: There is only a single consumer. (We wake up the consumer using a pthread_cond_signal (which is suitable only for single consumers.)
+
+int toku_queue_eof (QUEUE q);
+// Effect: Inform the queue that no more values will be inserted. After all the values that have been inserted are dequeued, further dequeue operations will return EOF.
+// Returns 0 on success. On failure, things are pretty bad (likely to be some sort of mutex failure).
+
+int toku_queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq);
+// Effect: Wait until the queue becomes nonempty. Then dequeue and return the oldest item. The item and its weight are returned in *ITEM.
+// If weight!=NULL then return the item's weight in *weight.
+// If total_weight_after_deq!=NULL then return the current weight of the items in the queue (after dequeuing the item).
+// Return 0 if an item is returned.
+// Return EOF is we no more items will be returned.
+// Usage note: The queue should be destroyed only after any consumers will no longer look at it (for example, they saw EOF).
+
+int toku_queue_destroy (QUEUE q);
+// Effect: Destroy the queue.
+// Requires: The queue must be empty and no consumer should try to dequeue after this (one way to do this is to make sure the consumer saw EOF).
+// Returns 0 on success. If the queue is not empty, returns EINVAL. Other errors are likely to be bad (some sort of mutex or condvar failure).
+
diff --git a/storage/tokudb/PerconaFT/util/rwlock.h b/storage/tokudb/PerconaFT/util/rwlock.h
new file mode 100644
index 00000000..d9a13ba9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/rwlock.h
@@ -0,0 +1,348 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_assert.h>
+#include <toku_portability.h>
+#include <toku_instrumentation.h>
+
+/* Readers/writers locks implementation
+ *
+ *****************************************
+ * Overview
+ *****************************************
+ *
+ * PerconaFT employs readers/writers locks for the ephemeral locks (e.g.,
+ * on FT nodes) Why not just use the toku_pthread_rwlock API?
+ *
+ * 1) we need multiprocess rwlocks (not just multithreaded)
+ *
+ * 2) pthread rwlocks are very slow since they entail a system call
+ * (about 2000ns on a 2GHz T2500.)
+ *
+ * Related: We expect the common case to be that the lock is
+ * granted
+ *
+ * 3) We are willing to employ machine-specific instructions (such
+ * as atomic exchange, and mfence, each of which runs in about
+ * 10ns.)
+ *
+ * 4) We want to guarantee nonstarvation (many rwlock
+ * implementations can starve the writers because another reader
+ * comes * along before all the other readers have unlocked.)
+ *
+ *****************************************
+ * How it works
+ *****************************************
+ *
+ * We arrange that the rwlock object is in the address space of both
+ * threads or processes. For processes we use mmap().
+ *
+ * The rwlock struct comprises the following fields
+ *
+ * a long mutex field (which is accessed using xchgl() or other
+ * machine-specific instructions. This is a spin lock.
+ *
+ * a read counter (how many readers currently have the lock?)
+ *
+ * a write boolean (does a writer have the lock?)
+ *
+ * a singly linked list of semaphores for waiting requesters. This
+ * list is sorted oldest requester first. Each list element
+ * contains a semaphore (which is provided by the requestor) and a
+ * boolean indicating whether it is a reader or a writer.
+ *
+ * To lock a read rwlock:
+ *
+ * 1) Acquire the mutex.
+ *
+ * 2) If the linked list is not empty or the writer boolean is true
+ * then
+ *
+ * a) initialize your semaphore (to 0),
+ * b) add your list element to the end of the list (with rw="read")
+ * c) release the mutex
+ * d) wait on the semaphore
+ * e) when the semaphore release, return success.
+ *
+ * 3) Otherwise increment the reader count, release the mutex, and
+ * return success.
+ *
+ * To lock the write rwlock is almost the same.
+ * 1) Acquire the mutex
+ * 2) If the list is not empty or the reader count is nonzero
+ * a) initialize semaphore
+ * b) add to end of list (with rw="write")
+ * c) release mutex
+ * d) wait on the semaphore
+ * e) return success when the semaphore releases
+ * 3) Otherwise set writer=true, release mutex and return success.
+ *
+ * To unlock a read rwlock:
+ * 1) Acquire mutex
+ * 2) Decrement reader count
+ * 3) If the count is still positive or the list is empty then
+ * return success
+ * 4) Otherwise (count==zero and the list is nonempty):
+ * a) If the first element of the list is a reader:
+ * i) while the first element is a reader:
+ * x) pop the list
+ * y) increment the reader count
+ * z) increment the semaphore (releasing it for some waiter)
+ * ii) return success
+ * b) Else if the first element is a writer
+ * i) pop the list
+ * ii) set writer to true
+ * iii) increment the semaphore
+ * iv) return success
+ */
+
+//Use case:
+// A read lock is acquired by threads that get and pin an entry in the
+// cachetable. A write lock is acquired by the writer thread when an entry
+// is evicted from the cachetable and is being written storage.
+
+//Use case:
+// General purpose reader writer lock with properties:
+// 1. multiple readers, no writers
+// 2. one writer at a time
+// 3. pending writers have priority over pending readers
+
+// An external mutex must be locked when using these functions. An alternate
+// design would bury a mutex into the rwlock itself. While this may
+// increase parallelism at the expense of single thread performance, we
+// are experimenting with a single higher level lock.
+
+extern toku_instr_key *rwlock_cond_key;
+extern toku_instr_key *rwlock_wait_read_key;
+extern toku_instr_key *rwlock_wait_write_key;
+
+typedef struct st_rwlock *RWLOCK;
+struct st_rwlock {
+ int reader; // the number of readers
+ int want_read; // the number of blocked readers
+ toku_cond_t wait_read;
+ int writer; // the number of writers
+ int want_write; // the number of blocked writers
+ toku_cond_t wait_write;
+ toku_cond_t *wait_users_go_to_zero;
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_pthread_rwlock_t prwlock;
+#endif
+};
+
+// returns: the sum of the number of readers, pending readers, writers, and
+// pending writers
+
+static inline int rwlock_users(RWLOCK rwlock) {
+ return rwlock->reader + rwlock->want_read + rwlock->writer +
+ rwlock->want_write;
+}
+
+#if defined(TOKU_MYSQL_WITH_PFS)
+#define rwlock_init(K, R) inline_rwlock_init(K, R)
+#else
+#define rwlock_init(K, R) inline_rwlock_init(R)
+#endif
+
+// initialize a read write lock
+static inline __attribute__((__unused__)) void inline_rwlock_init(
+#if defined(TOKU_MYSQL_WITH_PFS)
+ const toku_instr_key &rwlock_instr_key,
+#endif
+ RWLOCK rwlock) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_pthread_rwlock_init(rwlock_instr_key, &rwlock->prwlock, nullptr);
+#endif
+ rwlock->reader = rwlock->want_read = 0;
+ rwlock->writer = rwlock->want_write = 0;
+ toku_cond_init(toku_uninstrumented, &rwlock->wait_read, nullptr);
+ toku_cond_init(toku_uninstrumented, &rwlock->wait_write, nullptr);
+ rwlock->wait_users_go_to_zero = NULL;
+}
+
+// destroy a read write lock
+
+static inline __attribute__((__unused__)) void rwlock_destroy(RWLOCK rwlock) {
+ paranoid_invariant(rwlock->reader == 0);
+ paranoid_invariant(rwlock->want_read == 0);
+ paranoid_invariant(rwlock->writer == 0);
+ paranoid_invariant(rwlock->want_write == 0);
+ toku_cond_destroy(&rwlock->wait_read);
+ toku_cond_destroy(&rwlock->wait_write);
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_pthread_rwlock_destroy(&rwlock->prwlock);
+#endif
+}
+
+// obtain a read lock
+// expects: mutex is locked
+
+static inline void rwlock_read_lock(RWLOCK rwlock, toku_mutex_t *mutex) {
+#ifdef TOKU_MYSQL_WITH_PFS
+ /* Instrumentation start */
+ toku_rwlock_instrumentation rwlock_instr;
+ // TODO: pull location information up to caller
+ toku_instr_rwlock_rdlock_wait_start(
+ rwlock_instr, rwlock->prwlock, __FILE__, __LINE__);
+
+#endif
+
+ paranoid_invariant(!rwlock->wait_users_go_to_zero);
+ if (rwlock->writer || rwlock->want_write) {
+ rwlock->want_read++;
+ while (rwlock->writer || rwlock->want_write) {
+ toku_cond_wait(&rwlock->wait_read, mutex);
+ }
+ rwlock->want_read--;
+ }
+ rwlock->reader++;
+#ifdef TOKU_MYSQL_WITH_PFS
+ /* Instrumentation end */
+ toku_instr_rwlock_wrlock_wait_end(rwlock_instr, 0);
+#endif
+}
+
+// release a read lock
+// expects: mutex is locked
+
+static inline void rwlock_read_unlock(RWLOCK rwlock) {
+#ifdef TOKU_MYSQL_WITH_PFS
+ toku_instr_rwlock_unlock(rwlock->prwlock);
+#endif
+ paranoid_invariant(rwlock->reader > 0);
+ paranoid_invariant(rwlock->writer == 0);
+ rwlock->reader--;
+ if (rwlock->reader == 0 && rwlock->want_write) {
+ toku_cond_signal(&rwlock->wait_write);
+ }
+ if (rwlock->wait_users_go_to_zero && rwlock_users(rwlock) == 0) {
+ toku_cond_signal(rwlock->wait_users_go_to_zero);
+ }
+}
+
+// obtain a write lock
+// expects: mutex is locked
+
+static inline void rwlock_write_lock(RWLOCK rwlock, toku_mutex_t *mutex) {
+#ifdef TOKU_MYSQL_WITH_PFS
+ /* Instrumentation start */
+ toku_rwlock_instrumentation rwlock_instr;
+ toku_instr_rwlock_wrlock_wait_start(
+ rwlock_instr, rwlock->prwlock, __FILE__, __LINE__);
+#endif
+ paranoid_invariant(!rwlock->wait_users_go_to_zero);
+ if (rwlock->reader || rwlock->writer) {
+ rwlock->want_write++;
+ while (rwlock->reader || rwlock->writer) {
+ toku_cond_wait(&rwlock->wait_write, mutex);
+ }
+ rwlock->want_write--;
+ }
+ rwlock->writer++;
+#if defined(TOKU_MYSQL_WITH_PFS)
+ /* Instrumentation end */
+ toku_instr_rwlock_wrlock_wait_end(rwlock_instr, 0);
+#endif
+}
+
+// release a write lock
+// expects: mutex is locked
+
+static inline void rwlock_write_unlock(RWLOCK rwlock) {
+#if defined(TOKU_MYSQL_WITH_PFS)
+ toku_instr_rwlock_unlock(rwlock->prwlock);
+#endif
+ paranoid_invariant(rwlock->reader == 0);
+ paranoid_invariant(rwlock->writer == 1);
+ rwlock->writer--;
+ if (rwlock->want_write) {
+ toku_cond_signal(&rwlock->wait_write);
+ } else if (rwlock->want_read) {
+ toku_cond_broadcast(&rwlock->wait_read);
+ }
+ if (rwlock->wait_users_go_to_zero && rwlock_users(rwlock) == 0) {
+ toku_cond_signal(rwlock->wait_users_go_to_zero);
+ }
+}
+
+// returns: the number of readers
+
+static inline int rwlock_readers(RWLOCK rwlock) {
+ return rwlock->reader;
+}
+
+// returns: the number of readers who are waiting for the lock
+
+static inline int rwlock_blocked_readers(RWLOCK rwlock) {
+ return rwlock->want_read;
+}
+
+// returns: the number of writers who are waiting for the lock
+
+static inline int rwlock_blocked_writers(RWLOCK rwlock) {
+ return rwlock->want_write;
+}
+
+// returns: the number of writers
+
+static inline int rwlock_writers(RWLOCK rwlock) {
+ return rwlock->writer;
+}
+
+static inline bool rwlock_write_will_block(RWLOCK rwlock) {
+ return (rwlock->writer > 0 || rwlock->reader > 0);
+}
+
+static inline int rwlock_read_will_block(RWLOCK rwlock) {
+ return (rwlock->writer > 0 || rwlock->want_write > 0);
+}
+
+static inline void rwlock_wait_for_users(RWLOCK rwlock, toku_mutex_t *mutex) {
+ paranoid_invariant(!rwlock->wait_users_go_to_zero);
+ toku_cond_t cond;
+ toku_cond_init(toku_uninstrumented, &cond, nullptr);
+ while (rwlock_users(rwlock) > 0) {
+ rwlock->wait_users_go_to_zero = &cond;
+ toku_cond_wait(&cond, mutex);
+ }
+ rwlock->wait_users_go_to_zero = NULL;
+ toku_cond_destroy(&cond);
+}
+
diff --git a/storage/tokudb/PerconaFT/util/scoped_malloc.cc b/storage/tokudb/PerconaFT/util/scoped_malloc.cc
new file mode 100644
index 00000000..6c4fb95a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/scoped_malloc.cc
@@ -0,0 +1,227 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <portability/memory.h>
+
+#include <util/scoped_malloc.h>
+
+// The __thread storage class modifier isn't well supported on osx, but we
+// aren't worried about the performance on osx, so we provide a
+// pass-through implementation of scoped mallocs.
+#ifdef __APPLE__
+
+namespace toku {
+
+ scoped_malloc::scoped_malloc(const size_t size)
+ : m_size(size),
+ m_local(false),
+ m_buf(toku_xmalloc(size)) {}
+
+ scoped_malloc::~scoped_malloc() {
+ toku_free(m_buf);
+ }
+
+} // namespace toku
+
+void toku_scoped_malloc_init(void) {}
+void toku_scoped_malloc_destroy(void) {}
+void toku_scoped_malloc_destroy_set(void) {}
+void toku_scoped_malloc_destroy_key(void) {}
+
+#else // __APPLE__
+
+#include <set>
+#include <pthread.h>
+
+#include <portability/toku_pthread.h>
+
+namespace toku {
+
+ // see pthread_key handling at the bottom
+ //
+ // when we use gcc 4.8, we can use the 'thread_local' keyword and proper c++
+ // constructors/destructors instead of this pthread / global set wizardy.
+ static pthread_key_t tl_stack_destroy_pthread_key;
+ class tl_stack;
+ std::set<tl_stack *> *global_stack_set;
+ toku_mutex_t global_stack_set_mutex = TOKU_MUTEX_INITIALIZER;
+
+ class tl_stack {
+ // 1MB
+ static const size_t STACK_SIZE = 1 * 1024 * 1024;
+
+ public:
+ void init() {
+ m_stack = reinterpret_cast<char *>(toku_xmalloc(STACK_SIZE));
+ m_current_offset = 0;
+ int r = pthread_setspecific(tl_stack_destroy_pthread_key, this);
+ invariant_zero(r);
+ }
+
+ void destroy() {
+#if defined(TOKU_SCOPED_MALLOC_DEBUG) && TOKU_SCOPED_MALLOC_DEBUG
+ printf("%s %p %p\n", __FUNCTION__, this, m_stack);
+#endif
+ if (m_stack != NULL) {
+ toku_free(m_stack);
+ m_stack = NULL;
+ }
+ }
+
+ // initialize a tl_stack and insert it into the global map
+ static void init_and_register(tl_stack *st) {
+ st->init();
+ invariant_notnull(global_stack_set);
+
+ toku_mutex_lock(&global_stack_set_mutex);
+ std::pair<std::set<tl_stack *>::iterator, bool> p = global_stack_set->insert(st);
+ invariant(p.second);
+ toku_mutex_unlock(&global_stack_set_mutex);
+ }
+
+ // destruct a tl_stack and remove it from the global map
+ // passed in as void * to match the generic pthread destructor API
+ static void destroy_and_deregister(void *key) {
+ invariant_notnull(key);
+ tl_stack *st = reinterpret_cast<tl_stack *>(key);
+
+ size_t n = 0;
+ toku_mutex_lock(&global_stack_set_mutex);
+ if (global_stack_set) {
+ n = global_stack_set->erase(st);
+ }
+ toku_mutex_unlock(&global_stack_set_mutex);
+
+ if (n == 1) {
+ st->destroy(); // destroy the stack if this function erased it from the set. otherwise, somebody else destroyed it.
+ }
+ }
+
+ // Allocate 'size' bytes and return a pointer to the first byte
+ void *alloc(const size_t size) {
+ if (m_stack == NULL) {
+ init_and_register(this);
+ }
+ invariant(m_current_offset + size <= STACK_SIZE);
+ void *mem = &m_stack[m_current_offset];
+ m_current_offset += size;
+ return mem;
+ }
+
+ // Give back a previously allocated region of 'size' bytes.
+ void dealloc(const size_t size) {
+ invariant(m_current_offset >= size);
+ m_current_offset -= size;
+ }
+
+ // Get the current size of free-space in bytes.
+ size_t get_free_space() const {
+ invariant(m_current_offset <= STACK_SIZE);
+ return STACK_SIZE - m_current_offset;
+ }
+
+ private:
+ // Offset of the free region in the stack
+ size_t m_current_offset;
+ char *m_stack;
+ };
+
+ // Each thread has its own local stack.
+ static __thread tl_stack local_stack;
+
+ // Memory is allocated from thread-local storage if available, otherwise from malloc(1).
+ scoped_malloc::scoped_malloc(const size_t size) :
+ m_size(size),
+ m_local(local_stack.get_free_space() >= m_size),
+ m_buf(m_local ? local_stack.alloc(m_size) : toku_xmalloc(m_size)) {
+ }
+
+ scoped_malloc::~scoped_malloc() {
+ if (m_local) {
+ local_stack.dealloc(m_size);
+ } else {
+ toku_free(m_buf);
+ }
+ }
+
+} // namespace toku
+
+// pthread key handling:
+// - there is a process-wide pthread key that is associated with the destructor for a tl_stack
+// - on process construction, we initialize the key; on destruction, we clean it up.
+// - when a thread first uses its tl_stack, it calls pthread_setspecific(&destroy_key, "some key"),
+// associating the destroy key with the tl_stack_destroy_and_deregister destructor
+// - when a thread terminates, it calls the associated destructor; tl_stack_destroy_and_deregister.
+
+void toku_scoped_malloc_init(void) {
+ toku_mutex_lock(&toku::global_stack_set_mutex);
+ invariant_null(toku::global_stack_set);
+ toku::global_stack_set = new std::set<toku::tl_stack *>();
+ toku_mutex_unlock(&toku::global_stack_set_mutex);
+
+ int r = pthread_key_create(&toku::tl_stack_destroy_pthread_key,
+ toku::tl_stack::destroy_and_deregister);
+ invariant_zero(r);
+}
+
+void toku_scoped_malloc_destroy(void) {
+ toku_scoped_malloc_destroy_key();
+ toku_scoped_malloc_destroy_set();
+}
+
+void toku_scoped_malloc_destroy_set(void) {
+ toku_mutex_lock(&toku::global_stack_set_mutex);
+ invariant_notnull(toku::global_stack_set);
+ // Destroy any tl_stacks that were registered as thread locals but did not
+ // get a chance to clean up using the pthread key destructor (because this code
+ // is now running before those threads fully shutdown)
+ for (std::set<toku::tl_stack *>::iterator i = toku::global_stack_set->begin();
+ i != toku::global_stack_set->end(); i++) {
+ (*i)->destroy();
+ }
+ delete toku::global_stack_set;
+ toku::global_stack_set = nullptr;
+ toku_mutex_unlock(&toku::global_stack_set_mutex);
+}
+
+void toku_scoped_malloc_destroy_key(void) {
+ int r = pthread_key_delete(toku::tl_stack_destroy_pthread_key);
+ invariant_zero(r);
+}
+
+#endif // !__APPLE__
diff --git a/storage/tokudb/PerconaFT/util/scoped_malloc.h b/storage/tokudb/PerconaFT/util/scoped_malloc.h
new file mode 100644
index 00000000..b95b687a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/scoped_malloc.h
@@ -0,0 +1,103 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <string.h>
+
+namespace toku {
+
+ class scoped_malloc {
+ public:
+ // Memory is allocated from thread-local storage if available, otherwise from malloc(3).
+ scoped_malloc(const size_t size);
+
+ ~scoped_malloc();
+
+ void *get() const {
+ return m_buf;
+ }
+
+ private:
+ // Non-copyable
+ scoped_malloc();
+
+ const size_t m_size;
+ const bool m_local;
+ void *const m_buf;
+ };
+
+ class scoped_calloc : public scoped_malloc {
+ public:
+ // A scoped malloc whose bytes are initialized to zero, as in calloc(3)
+ scoped_calloc(const size_t size) :
+ scoped_malloc(size) {
+ memset(scoped_malloc::get(), 0, size);
+ }
+ };
+
+ class scoped_malloc_aligned : public scoped_malloc {
+ public:
+ scoped_malloc_aligned(const size_t size, const size_t alignment) :
+ scoped_malloc(size + alignment) {
+ invariant(size >= alignment);
+ invariant(alignment > 0);
+ const uintptr_t addr = reinterpret_cast<uintptr_t>(scoped_malloc::get());
+ const uintptr_t aligned_addr = (addr + alignment) - (addr % alignment);
+ invariant(aligned_addr < addr + size + alignment);
+ m_aligned_buf = reinterpret_cast<char *>(aligned_addr);
+ }
+
+ void *get() const {
+ return m_aligned_buf;
+ }
+
+ private:
+ void *m_aligned_buf;
+ };
+
+} // namespace toku
+
+void toku_scoped_malloc_init(void);
+
+void toku_scoped_malloc_destroy(void);
+
+void toku_scoped_malloc_destroy_set(void);
+
+void toku_scoped_malloc_destroy_key(void);
+
diff --git a/storage/tokudb/PerconaFT/util/sort.h b/storage/tokudb/PerconaFT/util/sort.h
new file mode 100644
index 00000000..0f0bb7ee
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/sort.h
@@ -0,0 +1,208 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <string.h>
+#include <memory.h>
+
+namespace toku {
+
+ template<typename sortdata_t, typename sortextra_t, int (*cmp)(sortextra_t &, const sortdata_t &, const sortdata_t &)>
+ struct sort {
+
+ static const int single_threaded_threshold = 10000;
+
+ /**
+ * Effect: Sort n elements of type sortdata_t in the array a.
+ * Elements are compared by the template parameter cmp, using
+ * the context in extra.
+ */
+ static int
+ mergesort_r(sortdata_t *a, const int n, sortextra_t &extra)
+ {
+ sortdata_t *as[2] = { a, nullptr };
+ if (n >= single_threaded_threshold) {
+ XMALLOC_N(n, as[1]);
+ }
+ int which = mergesort_internal(as, 0, n, extra);
+ if (which == 1) {
+ memcpy(a, as[1], n * (sizeof a[0]));
+ }
+ if (n >= single_threaded_threshold) {
+ toku_free(as[1]);
+ }
+ return 0;
+ }
+
+ private:
+
+ // Sorts the data in as[which]. Returns dest such that as[dest]
+ // contains the sorted data (might be which or 1-which).
+ static int
+ mergesort_internal(sortdata_t *as[2], const int which, const int n, sortextra_t &extra)
+ {
+ if (n <= 1) { return which; }
+ if (n < single_threaded_threshold) {
+ quicksort_r(as[which], n, extra);
+ return which;
+ }
+ const int mid = n / 2;
+ sortdata_t *right_as[2] = { &(as[0])[mid], &(as[1])[mid] };
+ const int r1 = mergesort_internal(as, which, mid, extra);
+ const int r2 = mergesort_internal(right_as, which, n - mid, extra);
+ if (r1 != r2) {
+ // move everything to the same place (r2)
+ memcpy(as[r2], as[r1], mid * (sizeof as[r2][0]));
+ }
+ // now as[r2] has both sorted arrays
+ const int dest = 1 - r2;
+ merge(&(as[dest])[0], &(as[1-dest])[0], mid, &(as[1-dest])[mid], n - mid, extra);
+ return dest;
+ }
+
+ static void
+ merge_c(sortdata_t *dest, const sortdata_t *a, const int an, const sortdata_t *b, const int bn, sortextra_t &extra)
+ {
+ int ai, bi, i;
+ for (ai = 0, bi = 0, i = 0; ai < an && bi < bn; ++i) {
+ if (cmp(extra, a[ai], b[bi]) < 0) {
+ dest[i] = a[ai];
+ ai++;
+ } else {
+ dest[i] = b[bi];
+ bi++;
+ }
+ }
+ if (ai < an) {
+ memcpy(&dest[i], &a[ai], (an - ai) * (sizeof dest[0]));
+ } else if (bi < bn) {
+ memcpy(&dest[i], &b[bi], (bn - bi) * (sizeof dest[0]));
+ }
+ }
+
+ static int
+ binsearch(const sortdata_t &key, const sortdata_t *a, const int n, const int abefore, sortextra_t &extra)
+ {
+ if (n == 0) {
+ return abefore;
+ }
+ const int mid = n / 2;
+ const sortdata_t *akey = &a[mid];
+ int c = cmp(extra, key, *akey);
+ if (c < 0) {
+ if (n == 1) {
+ return abefore;
+ } else {
+ return binsearch(key, a, mid, abefore, extra);
+ }
+ } else if (c > 0) {
+ if (n == 1) {
+ return abefore + 1;
+ } else {
+ return binsearch(key, akey, n - mid, abefore + mid, extra);
+ }
+ } else {
+ return abefore + mid;
+ }
+ }
+
+ static void
+ merge(sortdata_t *dest, const sortdata_t *a_, const int an_, const sortdata_t *b_, const int bn_, sortextra_t &extra)
+ {
+ if (an_ + bn_ < single_threaded_threshold) {
+ merge_c(dest, a_, an_, b_, bn_, extra);
+ } else {
+ const bool swapargs = an_ < bn_;
+ const sortdata_t *a = swapargs ? b_ : a_;
+ const sortdata_t *b = swapargs ? a_ : b_;
+ const int an = swapargs ? bn_ : an_;
+ const int bn = swapargs ? an_ : bn_;
+
+ const int a2 = an / 2;
+ const sortdata_t *akey = &a[a2];
+ const int b2 = binsearch(*akey, b, bn, 0, extra);
+ merge(dest, a, a2, b, b2, extra);
+ merge(&dest[a2 + b2], akey, an - a2, &b[b2], bn - b2, extra);
+ }
+ }
+
+ static void
+ quicksort_r(sortdata_t *a, const int n, sortextra_t &extra)
+ {
+ if (n > 1) {
+ const int lo = 0;
+ int pivot = n / 2;
+ const int hi = n - 1;
+ if (cmp(extra, a[lo], a[pivot]) > 0) {
+ const sortdata_t tmp = a[lo]; a[lo] = a[pivot]; a[pivot] = tmp;
+ }
+ if (cmp(extra, a[pivot], a[hi]) > 0) {
+ const sortdata_t tmp = a[pivot]; a[pivot] = a[hi]; a[hi] = tmp;
+ if (cmp(extra, a[lo], a[pivot]) > 0) {
+ const sortdata_t tmp2 = a[lo]; a[lo] = a[pivot]; a[pivot] = tmp2;
+ }
+ }
+ int li = lo + 1, ri = hi - 1;
+ while (li <= ri) {
+ while (cmp(extra, a[li], a[pivot]) < 0) {
+ li++;
+ }
+ while (cmp(extra, a[pivot], a[ri]) < 0) {
+ ri--;
+ }
+ if (li < ri) {
+ sortdata_t tmp = a[li]; a[li] = a[ri]; a[ri] = tmp;
+ // fix up pivot if we moved it
+ if (pivot == li) { pivot = ri; }
+ else if (pivot == ri) { pivot = li; }
+ li++;
+ ri--;
+ } else if (li == ri) {
+ li++;
+ ri--;
+ }
+ }
+
+ quicksort_r(&a[lo], ri + 1, extra);
+ quicksort_r(&a[li], hi - li + 1, extra);
+ }
+ }
+ };
+
+};
diff --git a/storage/tokudb/PerconaFT/util/status.h b/storage/tokudb/PerconaFT/util/status.h
new file mode 100644
index 00000000..2d03ef1e
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/status.h
@@ -0,0 +1,75 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <util/partitioned_counter.h>
+#include <util/constexpr.h>
+
+#define TOKUFT_STATUS_INIT(array,k,c,t,l,inc) do { \
+ array.status[k].keyname = #k; \
+ array.status[k].columnname = #c; \
+ array.status[k].type = t; \
+ array.status[k].legend = l; \
+ static_assert((inc) != 0, "Var must be included in at least one place"); \
+ constexpr_static_assert(strcmp(#c, "NULL") && strcmp(#c, "0"), \
+ "Use nullptr for no column name instead of NULL, 0, etc..."); \
+ constexpr_static_assert((inc) == TOKU_ENGINE_STATUS \
+ || strcmp(#c, "nullptr"), "Missing column name."); \
+ constexpr_static_assert(static_strncasecmp(#c, "TOKU", strlen("TOKU")), \
+ "Do not start column names with toku."); \
+ array.status[k].include = static_cast<toku_engine_status_include_type>(inc); \
+ if (t == PARCOUNT) { \
+ array.status[k].value.parcount = create_partitioned_counter(); \
+ } \
+} while (0)
+
diff --git a/storage/tokudb/PerconaFT/util/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/util/tests/CMakeLists.txt
new file mode 100644
index 00000000..8d53dd89
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/CMakeLists.txt
@@ -0,0 +1,24 @@
+if(BUILD_TESTING)
+ file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
+ foreach(src ${srcs})
+ get_filename_component(base ${src} NAME_WE)
+ list(APPEND tests ${base})
+ endforeach(src)
+
+ foreach(test ${tests})
+ add_executable(${test} ${test}.cc)
+ target_link_libraries(${test} util ${LIBTOKUPORTABILITY})
+ endforeach(test)
+
+ add_helgrind_test(util helgrind_test_partitioned_counter $<TARGET_FILE:test_partitioned_counter>)
+ add_helgrind_test(util helgrind_test_partitioned_counter_5833 $<TARGET_FILE:test_partitioned_counter_5833>)
+
+ foreach(test ${tests})
+ add_test(util/${test} ${test})
+ endforeach(test)
+
+ set(long_tests
+ util/helgrind_test_partitioned_counter
+ )
+ set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
+endif(BUILD_TESTING)
diff --git a/storage/tokudb/PerconaFT/util/tests/marked-omt-test.cc b/storage/tokudb/PerconaFT/util/tests/marked-omt-test.cc
new file mode 100644
index 00000000..7e60c711
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/marked-omt-test.cc
@@ -0,0 +1,466 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <toku_portability.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <db.h>
+#include <string.h>
+
+#include <memory.h>
+
+#include <portability/toku_atomic.h>
+#include <portability/toku_pthread.h>
+#include <portability/toku_random.h>
+
+#include <util/omt.h>
+#include <util/rwlock.h>
+
+namespace toku {
+
+namespace test {
+
+static inline uint32_t fudge(const uint32_t x) { return x + 300; }
+static inline uint32_t defudge(const uint32_t fx) { return fx - 300; }
+
+int test_iterator(const uint32_t &v, const uint32_t idx, bool *const UU(unused));
+int test_iterator(const uint32_t &v, const uint32_t idx, bool *const UU(unused)) {
+ invariant(defudge(v) == idx);
+ return 0;
+}
+
+int check_iterator_before(const uint32_t &v, const uint32_t idx, bool *const called);
+int check_iterator_before(const uint32_t &v, const uint32_t idx, bool *const called) {
+ invariant(defudge(v) == idx);
+ invariant(idx % 10 < 5);
+ called[idx] = true;
+ return 0;
+}
+
+int check_iterator_after(const uint32_t &v, const uint32_t UU(idx), bool *const called);
+int check_iterator_after(const uint32_t &v, const uint32_t UU(idx), bool *const called) {
+ invariant(defudge(v) % 10 >= 5);
+ called[defudge(v)] = true;
+ return 0;
+}
+
+int die(const uint32_t &UU(v), const uint32_t UU(idx), void *const UU(unused));
+int die(const uint32_t &UU(v), const uint32_t UU(idx), void *const UU(unused)) {
+ abort();
+ return 0; // hahaha
+}
+
+static void run_test(uint32_t nelts) {
+ assert(nelts % 10 == 0); // run_test depends on nelts being a multiple of 10
+
+ omt<uint32_t, uint32_t, true> omt;
+ omt.create();
+ omt.verify_marks_consistent();
+ for (uint32_t i = 0; i < nelts; ++i) {
+ omt.insert_at(fudge(i), i);
+ }
+ omt.verify_marks_consistent();
+
+ int r;
+ for (uint32_t i = 0; i < nelts / 10; ++i) {
+ r = omt.iterate_and_mark_range<bool, test_iterator>(i * 10, i * 10 + 5, nullptr);
+ invariant_zero(r);
+ omt.verify_marks_consistent();
+ }
+
+ bool called[nelts];
+ ZERO_ARRAY(called);
+ r = omt.iterate_over_marked<bool, check_iterator_before>(called);
+ invariant_zero(r);
+ for (uint32_t i = 0; i < nelts; ++i) {
+ if (i % 10 < 5) {
+ invariant(called[i]);
+ } else {
+ invariant(!called[i]);
+ }
+ }
+ omt.verify_marks_consistent();
+
+ invariant(omt.size() == nelts);
+
+ omt.delete_all_marked();
+ omt.verify_marks_consistent();
+
+ invariant(omt.size() * 2 == nelts);
+
+ r = omt.iterate_over_marked<void, die>(nullptr);
+ invariant_zero(r);
+
+ ZERO_ARRAY(called);
+ r = omt.iterate<bool, check_iterator_after>(called);
+ invariant_zero(r);
+ omt.verify_marks_consistent();
+
+ for (uint32_t i = 0; i < nelts; ++i) {
+ if (i % 10 < 5) {
+ invariant(!called[i]);
+ } else {
+ invariant(called[i]);
+ }
+ }
+
+ omt.destroy();
+}
+
+typedef omt<uint32_t, uint32_t, true> stress_omt;
+
+int int_heaviside(const uint32_t &v, const uint32_t &target);
+int int_heaviside(const uint32_t &v, const uint32_t &target) {
+ return (v > target) - (v < target);
+}
+
+struct stress_shared {
+ stress_omt *omt;
+ volatile bool running;
+ struct st_rwlock lock;
+ toku_mutex_t mutex;
+ int num_marker_threads;
+};
+
+struct reader_extra {
+ int tid;
+ stress_shared *shared;
+ uint64_t iterations;
+ uint64_t last_iteration;
+ char buf_read[8];
+ char buf_write[8];
+ struct random_data rand_read;
+ struct random_data rand_write;
+};
+
+static void generate_range(struct random_data *rng, const struct stress_shared &shared, uint32_t *begin, uint32_t *limit) {
+ const uint32_t nelts = shared.omt->size();
+ double range_limit_d = nelts;
+ range_limit_d /= 1000;
+ range_limit_d /= shared.num_marker_threads;
+ range_limit_d += 1;
+ uint32_t range_limit = static_cast<uint32_t>(range_limit_d);
+ if (range_limit < 5) {
+ range_limit = 5;
+ }
+ if (range_limit > 1000) {
+ range_limit = 1000;
+ }
+ *begin = rand_choices(rng, nelts - 1);
+ if (*begin + range_limit > nelts) {
+ range_limit = nelts - *begin;
+ }
+ *limit = *begin + rand_choices(rng, range_limit);
+}
+
+struct pair {
+ uint32_t begin;
+ uint32_t limit;
+};
+
+int mark_read_iterator(const uint32_t &UU(v), const uint32_t idx, struct pair * const pair);
+int mark_read_iterator(const uint32_t &UU(v), const uint32_t idx, struct pair * const pair) {
+ invariant(defudge(v) == idx);
+ invariant(idx >= pair->begin);
+ invariant(idx < pair->limit);
+ return 0;
+}
+
+static void *stress_mark_worker(void *extrav) {
+ struct reader_extra *CAST_FROM_VOIDP(extra, extrav);
+ struct stress_shared &shared = *extra->shared;
+ toku_mutex_t &mutex = shared.mutex;
+
+ while (shared.running) {
+ toku_mutex_lock(&mutex);
+ rwlock_read_lock(&shared.lock, &mutex);
+ toku_mutex_unlock(&mutex);
+
+ struct pair range;
+ generate_range(&extra->rand_read, shared, &range.begin, &range.limit);
+
+ shared.omt->iterate_and_mark_range<pair, mark_read_iterator>(range.begin, range.limit, &range);
+
+ ++extra->iterations;
+
+ toku_mutex_lock(&mutex);
+ rwlock_read_unlock(&shared.lock);
+ toku_mutex_unlock(&mutex);
+
+ usleep(1);
+ }
+
+ return nullptr;
+}
+
+template<typename T>
+class array_ftor {
+ int m_count;
+ T *m_array;
+public:
+ array_ftor(int size) : m_count(0) {
+ XMALLOC_N(size, m_array);
+ }
+ ~array_ftor() {
+ toku_free(m_array);
+ }
+ void operator() (const T &x) { m_array[m_count++] = x; }
+ template<class callback_t>
+ void iterate(callback_t &cb) const {
+ for (int i = 0; i < m_count; ++i) {
+ cb(m_array[i]);
+ }
+ }
+};
+
+int use_array_ftor(const uint32_t &v, const uint32_t UU(idx), array_ftor<uint32_t> *const fp);
+int use_array_ftor(const uint32_t &v, const uint32_t UU(idx), array_ftor<uint32_t> *const fp) {
+ array_ftor<uint32_t> &f = *fp;
+ f(v);
+ return 0;
+}
+
+class inserter {
+ stress_omt *m_omt;
+public:
+ inserter(stress_omt *omt) : m_omt(omt) {}
+ void operator() (const uint32_t &x) {
+ m_omt->insert<uint32_t, int_heaviside>(x, x, nullptr);
+ }
+};
+
+/*
+ * split the range evenly/not evenly between marker threads
+ * context tells it the range
+ * context also holds iteration number
+ *
+ * N threads
+ * N 'contexts' holds iteration number, seed
+ *
+ * create rng based on seed
+ * loop:
+ * generate random range. Mark that range, increment iteration number
+ *
+ *
+ *
+ *
+ * for each context
+ * create rng based on context->last_seed
+ * loop (iteration number times)
+ * mark (in array) random range
+ * context->last_seed := context->seed
+ * check the array and the omt
+ *
+ */
+
+static void simulate_reader_marks_on_array(struct reader_extra *const reader, const struct stress_shared &shared, bool *const should_be_marked) {
+ if (verbose) {
+ fprintf(stderr, "thread %d ran %" PRIu64 " iterations\n", reader->tid, reader->iterations - reader->last_iteration);
+ }
+ for (; reader->last_iteration < reader->iterations; ++reader->last_iteration) {
+ uint32_t begin;
+ uint32_t limit;
+
+ generate_range(&reader->rand_write, shared, &begin, &limit);
+
+ for (uint32_t i = begin; i < limit; i++) {
+ should_be_marked[i] = true;
+ }
+ }
+}
+
+int copy_marks(const uint32_t &v, const uint32_t idx, bool * const is_marked);
+int copy_marks(const uint32_t &v, const uint32_t idx, bool * const is_marked) {
+ invariant(defudge(v) == idx);
+ is_marked[idx] = true;
+ return 0;
+}
+
+static inline uint32_t count_true(const bool *const bools, uint32_t n) {
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < n; ++i) {
+ if (bools[i]) {
+ ++count;
+ }
+ }
+ return count;
+}
+
+static void stress_deleter(struct reader_extra *const readers, int num_marker_threads, stress_omt *omt) {
+ // Verify (iterate_over_marked) agrees exactly with iterate_and_mark_range (multithreaded)
+ stress_shared &shared = *readers[0].shared;
+ bool should_be_marked[omt->size()];
+ ZERO_ARRAY(should_be_marked);
+
+ for (int i = 0; i < num_marker_threads; i++) {
+ simulate_reader_marks_on_array(&readers[i], shared, should_be_marked);
+ }
+
+ bool is_marked_according_to_iterate[omt->size()];
+ ZERO_ARRAY(is_marked_according_to_iterate);
+
+ omt->verify_marks_consistent();
+ omt->iterate_over_marked<bool, copy_marks>(&is_marked_according_to_iterate[0]);
+ omt->verify_marks_consistent();
+
+ invariant(!memcmp(should_be_marked, is_marked_according_to_iterate, sizeof(should_be_marked)));
+
+ if (verbose) {
+ double frac_marked = count_true(should_be_marked, omt->size());
+ frac_marked /= omt->size();
+
+ fprintf(stderr, "Marked: %0.4f\n", frac_marked);
+ omt->verify_marks_consistent();
+ }
+
+ array_ftor<uint32_t> aftor(omt->size());
+ omt->iterate_over_marked<array_ftor<uint32_t>, use_array_ftor>(&aftor);
+ omt->delete_all_marked();
+ omt->verify_marks_consistent();
+ omt->iterate_over_marked<void, die>(nullptr);
+ inserter ins(omt);
+ aftor.iterate(ins);
+ omt->verify_marks_consistent();
+}
+
+static void *stress_delete_worker(void *extrav) {
+ reader_extra *CAST_FROM_VOIDP(readers, extrav);
+ stress_shared &shared = *readers[0].shared;
+ int num_marker_threads = shared.num_marker_threads;
+ toku_mutex_t &mutex = shared.mutex;
+ const double repetitions = 20;
+ for (int i = 0; i < repetitions; ++i) {
+ // sleep 0 - 0.15s
+ // early iterations sleep for a short time
+ // later iterations sleep longer
+ int sleep_for = 1000 * 100 * (1.5 * (i+1) / repetitions);
+ usleep(sleep_for);
+
+ toku_mutex_lock(&mutex);
+ rwlock_write_lock(&shared.lock, &mutex);
+ toku_mutex_unlock(&mutex);
+
+ stress_deleter(readers, num_marker_threads, shared.omt);
+
+ toku_mutex_lock(&mutex);
+ rwlock_write_unlock(&shared.lock);
+ toku_mutex_unlock(&mutex);
+ }
+ toku_sync_bool_compare_and_swap(&shared.running, true, false);
+ return nullptr;
+}
+
+static void stress_test(int nelts) {
+ stress_omt omt;
+ omt.create();
+ for (int i = 0; i < nelts; ++i) {
+ omt.insert_at(fudge(i), i);
+ }
+
+ const int num_marker_threads = 5;
+ struct stress_shared extra;
+ ZERO_STRUCT(extra);
+ extra.omt = &omt;
+ toku_mutex_init(toku_uninstrumented, &extra.mutex, nullptr);
+ rwlock_init(toku_uninstrumented, &extra.lock);
+ extra.running = true;
+ extra.num_marker_threads = num_marker_threads;
+
+ struct reader_extra readers[num_marker_threads];
+ ZERO_ARRAY(readers);
+
+ srandom(time(NULL));
+ toku_pthread_t marker_threads[num_marker_threads];
+ for (int i = 0; i < num_marker_threads; ++i) {
+ struct reader_extra &reader = readers[i];
+ reader.tid = i;
+ reader.shared = &extra;
+
+ int r;
+ int seed = random();
+ r = myinitstate_r(seed, reader.buf_read, 8, &reader.rand_read);
+ invariant_zero(r);
+ r = myinitstate_r(seed, reader.buf_write, 8, &reader.rand_write);
+ invariant_zero(r);
+
+ toku_pthread_create(toku_uninstrumented,
+ &marker_threads[i],
+ nullptr,
+ stress_mark_worker,
+ &reader);
+ }
+
+ toku_pthread_t deleter_thread;
+ toku_pthread_create(toku_uninstrumented,
+ &deleter_thread,
+ nullptr,
+ stress_delete_worker,
+ &readers[0]);
+ toku_pthread_join(deleter_thread, NULL);
+
+ for (int i = 0; i < num_marker_threads; ++i) {
+ toku_pthread_join(marker_threads[i], NULL);
+ }
+
+ rwlock_destroy(&extra.lock);
+ toku_mutex_destroy(&extra.mutex);
+
+ omt.destroy();
+}
+
+} // end namespace test
+
+} // end namespace toku
+
+int test_main(int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ for (int i = 10; i <= 80; i*=2) {
+ toku::test::run_test(i);
+ }
+
+ toku::test::run_test(9000);
+
+ toku::test::stress_test(1000 * 100);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/memarena-test.cc b/storage/tokudb/PerconaFT/util/tests/memarena-test.cc
new file mode 100644
index 00000000..94838506
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/memarena-test.cc
@@ -0,0 +1,184 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <string.h>
+
+#include "portability/toku_assert.h"
+
+#include "util/memarena.h"
+
+class memarena_unit_test {
+private:
+ static const int magic = 37;
+
+ template <typename F>
+ void iterate_chunks(memarena *ma, F &fn) {
+ for (memarena::chunk_iterator it(ma); it.more(); it.next()) {
+ size_t used = 0;
+ const void *buf = it.current(&used);
+ fn(buf, used);
+ }
+ }
+
+ void test_create(size_t size) {
+ memarena ma;
+ ma.create(size);
+ invariant(ma._current_chunk.size == size);
+ invariant(ma._current_chunk.used == 0);
+ if (size == 0) {
+ invariant_null(ma._current_chunk.buf);
+ } else {
+ invariant_notnull(ma._current_chunk.buf);
+ }
+
+ // make sure memory was allocated ok by
+ // writing to buf and reading it back
+ if (size > 0) {
+ memset(ma._current_chunk.buf, magic, size);
+ }
+ for (size_t i = 0; i < size; i++) {
+ const char *buf = reinterpret_cast<char *>(ma._current_chunk.buf);
+ invariant(buf[i] == magic);
+ }
+ ma.destroy();
+ }
+
+ void test_malloc(size_t size) {
+ memarena ma;
+ ma.create(14);
+ void *v = ma.malloc_from_arena(size);
+ invariant_notnull(v);
+
+ // make sure memory was allocated ok by
+ // writing to buf and reading it back
+ if (size > 0) {
+ memset(ma._current_chunk.buf, magic, size);
+ }
+ for (size_t i = 0; i < size; i++) {
+ const char *c = reinterpret_cast<char *>(ma._current_chunk.buf);
+ invariant(c[i] == magic);
+ }
+ ma.destroy();
+ }
+
+ static void test_iterate_fn(const void *buf, size_t used) {
+ for (size_t i = 0; i < used; i++) {
+ const char *c = reinterpret_cast<const char *>(buf);
+ invariant(c[i] == (char) ((intptr_t) &c[i]));
+ }
+ }
+
+ void test_iterate(size_t size) {
+ memarena ma;
+ ma.create(14);
+ for (size_t k = 0; k < size / 64; k += 64) {
+ void *v = ma.malloc_from_arena(64);
+ for (size_t i = 0; i < 64; i++) {
+ char *c = reinterpret_cast<char *>(v);
+ c[i] = (char) ((intptr_t) &c[i]);
+ }
+ }
+ size_t rest = size % 64;
+ if (rest != 0) {
+ void *v = ma.malloc_from_arena(64);
+ for (size_t i = 0; i < 64; i++) {
+ char *c = reinterpret_cast<char *>(v);
+ c[i] = (char) ((intptr_t) &c[i]);
+ }
+ }
+
+ iterate_chunks(&ma, test_iterate_fn);
+ ma.destroy();
+ }
+
+ void test_move_memory(size_t size) {
+ memarena ma;
+ ma.create(14);
+ for (size_t k = 0; k < size / 64; k += 64) {
+ void *v = ma.malloc_from_arena(64);
+ for (size_t i = 0; i < 64; i++) {
+ char *c = reinterpret_cast<char *>(v);
+ c[i] = (char) ((intptr_t) &c[i]);
+ }
+ }
+ size_t rest = size % 64;
+ if (rest != 0) {
+ void *v = ma.malloc_from_arena(64);
+ for (size_t i = 0; i < 64; i++) {
+ char *c = reinterpret_cast<char *>(v);
+ c[i] = (char) ((intptr_t) &c[i]);
+ }
+ }
+
+ memarena ma2;
+ ma.move_memory(&ma2);
+ iterate_chunks(&ma2, test_iterate_fn);
+
+ ma.destroy();
+ ma2.destroy();
+ }
+
+public:
+ void test() {
+ test_create(0);
+ test_create(64);
+ test_create(128 * 1024 * 1024);
+ test_malloc(0);
+ test_malloc(63);
+ test_malloc(64);
+ test_malloc(64 * 1024 * 1024);
+ test_malloc((64 * 1024 * 1024) + 1);
+ test_iterate(0);
+ test_iterate(63);
+ test_iterate(128 * 1024);
+ test_iterate(64 * 1024 * 1024);
+ test_iterate((64 * 1024 * 1024) + 1);
+ test_move_memory(0);
+ test_move_memory(1);
+ test_move_memory(63);
+ test_move_memory(65);
+ test_move_memory(65 * 1024 * 1024);
+ test_move_memory(101 * 1024 * 1024);
+ }
+};
+
+int main(void) {
+ memarena_unit_test test;
+ test.test();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/minicron-change-period-data-race.cc b/storage/tokudb/PerconaFT/util/tests/minicron-change-period-data-race.cc
new file mode 100644
index 00000000..952cbf57
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/minicron-change-period-data-race.cc
@@ -0,0 +1,66 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2018, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2018, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include "test.h"
+#include "util/minicron.h"
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+
+// The thread sanitizer detected a data race in the minicron in a test unrelated to the minicron.
+// This test reproduces the data race in a much smaller test which merely runs minicron tasks
+// while changing the minicron period in an unrelated thread.
+
+static int do_nothing(void *UU(v)) {
+ return 0;
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc,argv);
+
+ minicron m = {};
+ int r = toku_minicron_setup(&m, 1, do_nothing, nullptr);
+ assert(r == 0);
+ for (int i=0; i<1000; i++)
+ toku_minicron_change_period(&m, 1);
+ r = toku_minicron_shutdown(&m);
+ assert(r == 0);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/minicron-test.cc b/storage/tokudb/PerconaFT/util/tests/minicron-test.cc
new file mode 100644
index 00000000..026ab744
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/minicron-test.cc
@@ -0,0 +1,221 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include "test.h"
+#include "util/minicron.h"
+#include <unistd.h>
+
+#include <string.h>
+#include <stdlib.h>
+
+static double
+tdiff (struct timeval *a, struct timeval *b) {
+ return (a->tv_sec-b->tv_sec) + (a->tv_usec-b->tv_usec)*1e-6;
+}
+
+struct timeval starttime;
+static double elapsed (void) {
+ struct timeval now;
+ gettimeofday(&now, 0);
+ return tdiff(&now, &starttime);
+}
+
+static int
+#ifndef GCOV
+__attribute__((__noreturn__))
+#endif
+never_run (void *a) {
+ assert(a==0);
+ assert(0);
+#if defined(GCOV)
+ return 0;
+#endif
+}
+
+// Can we start something with period=0 (the function should never run) and shut it down.
+static void*
+test1 (void* v)
+{
+ struct minicron m;
+ memset(&m, 0, sizeof(struct minicron));
+ int r = toku_minicron_setup(&m, 0, never_run, 0); assert(r==0);
+ sleep(1);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ return v;
+}
+
+// Can we start something with period=10 and shut it down after 2 seconds (the function should never run) .
+static void*
+test2 (void* v)
+{
+ struct minicron m;
+ memset(&m, 0, sizeof(struct minicron));
+ int r = toku_minicron_setup(&m, 10000, never_run, 0); assert(r==0);
+ sleep(2);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ return v;
+}
+
+struct tenx {
+ struct timeval tv;
+ int counter;
+};
+
+static int
+run_5x (void *v) {
+ struct tenx *CAST_FROM_VOIDP(tx, v);
+ struct timeval now;
+ gettimeofday(&now, 0);
+ double diff = tdiff(&now, &tx->tv);
+ if (verbose) printf("T=%f tx->counter=%d\n", diff, tx->counter);
+ // We only verify that the timer was not premature.
+ // Sometimes it will be delayed, but there's no good way to test it and nothing we can do about it.
+ if (!(diff>0.5 + tx->counter)) {
+ printf("T=%f tx->counter=%d\n", diff, tx->counter);
+ assert(0);
+ }
+ tx->counter++;
+ return 0;
+}
+
+// Start something with period=1 and run it a few times
+static void*
+test3 (void* v)
+{
+ struct minicron m;
+ struct tenx tx;
+ gettimeofday(&tx.tv, 0);
+ tx.counter=0;
+ memset(&m, 0, sizeof(struct minicron));
+ int r = toku_minicron_setup(&m, 1000, run_5x, &tx); assert(r==0);
+ sleep(5);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ assert(tx.counter>=4 && tx.counter<=5); // after 5 seconds it could have run 4 or 5 times.
+ return v;
+}
+
+static int
+run_3sec (void *v) {
+ if (verbose) printf("start3sec at %.6f\n", elapsed());
+ int *CAST_FROM_VOIDP(counter, v);
+ (*counter)++;
+ sleep(3);
+ if (verbose) printf("end3sec at %.6f\n", elapsed());
+ return 0;
+}
+
+// make sure that if f is really slow that it doesn't run too many times
+static void*
+test4 (void *v) {
+ struct minicron m;
+ int counter = 0;
+ memset(&m, 0, sizeof(struct minicron));
+ int r = toku_minicron_setup(&m, 2000, run_3sec, &counter); assert(r==0);
+ sleep(10);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ assert(counter==3);
+ return v;
+}
+
+static void*
+test5 (void *v) {
+ struct minicron m;
+ int counter = 0;
+ memset(&m, 0, sizeof(struct minicron));
+ int r = toku_minicron_setup(&m, 10000, run_3sec, &counter); assert(r==0);
+ toku_minicron_change_period(&m, 2000);
+ sleep(10);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ assert(counter==3);
+ return v;
+}
+
+static void*
+test6 (void *v) {
+ struct minicron m;
+ memset(&m, 0, sizeof(struct minicron));
+ int r = toku_minicron_setup(&m, 5000, never_run, 0); assert(r==0);
+ toku_minicron_change_period(&m, 0);
+ sleep(7);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ return v;
+}
+
+// test that we actually run once per period, even if the execution is long
+static void*
+test7 (void *v) {
+ struct minicron m;
+ memset(&m, 0, sizeof(struct minicron));
+ int counter = 0;
+ int r = toku_minicron_setup(&m, 5000, run_3sec, &counter); assert(r==0);
+ sleep(17);
+ r = toku_minicron_shutdown(&m); assert(r==0);
+ assert(counter==3);
+ return v;
+}
+
+typedef void*(*ptf)(void*);
+int
+test_main (int argc, const char *argv[]) {
+ default_parse_args(argc,argv);
+ gettimeofday(&starttime, 0);
+
+ ptf testfuns[] = {test1, test2, test3,
+ test4,
+ test5,
+ test6,
+ test7
+ };
+#define N (sizeof(testfuns)/sizeof(testfuns[0]))
+ toku_pthread_t tests[N];
+
+ unsigned int i;
+ for (i = 0; i < N; i++) {
+ int r = toku_pthread_create(
+ toku_uninstrumented, tests + i, nullptr, testfuns[i], nullptr);
+ assert(r == 0);
+ }
+ for (i = 0; i < N; i++) {
+ void *v;
+ int r=toku_pthread_join(tests[i], &v);
+ assert(r==0);
+ assert(v==0);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/omt-test.cc b/storage/tokudb/PerconaFT/util/tests/omt-test.cc
new file mode 100644
index 00000000..0d2c08f5
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/omt-test.cc
@@ -0,0 +1,898 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <util/omt.h>
+
+static void
+parse_args (int argc, const char *argv[]) {
+ const char *argv0=argv[0];
+ while (argc>1) {
+ int resultcode=0;
+ if (strcmp(argv[1], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[1], "-q")==0) {
+ verbose = 0;
+ } else if (strcmp(argv[1], "-h")==0) {
+ do_usage:
+ fprintf(stderr, "Usage:\n%s [-v|-h]\n", argv0);
+ exit(resultcode);
+ } else {
+ resultcode=1;
+ goto do_usage;
+ }
+ argc--;
+ argv++;
+ }
+}
+/* End ".h like" stuff. */
+
+struct value {
+ uint32_t number;
+};
+#define V(x) ((struct value *)(x))
+
+enum rand_type {
+ TEST_RANDOM,
+ TEST_SORTED,
+ TEST_IDENTITY
+};
+enum close_when_done {
+ CLOSE_WHEN_DONE,
+ KEEP_WHEN_DONE
+};
+enum create_type {
+ STEAL_ARRAY,
+ BATCH_INSERT,
+ INSERT_AT,
+ INSERT_AT_ALMOST_RANDOM,
+};
+
+/* Globals */
+typedef void *OMTVALUE;
+toku::omt<OMTVALUE> *global_omt;
+OMTVALUE* global_values = NULL;
+struct value* global_nums = NULL;
+uint32_t global_length;
+
+static void
+cleanup_globals (void) {
+ assert(global_values);
+ toku_free(global_values);
+ global_values = NULL;
+ assert(global_nums);
+ toku_free(global_nums);
+ global_nums = NULL;
+}
+
+/* Some test wrappers */
+struct functor {
+ int (*f)(OMTVALUE, uint32_t, void *);
+ void *v;
+};
+int call_functor(const OMTVALUE &v, uint32_t idx, functor *const ftor);
+int call_functor(const OMTVALUE &v, uint32_t idx, functor *const ftor) {
+ return ftor->f(const_cast<OMTVALUE>(v), idx, ftor->v);
+}
+static int omt_iterate(toku::omt<void *> *omt, int (*f)(OMTVALUE, uint32_t, void*), void*v) {
+ struct functor ftor = { .f = f, .v = v };
+ return omt->iterate<functor, call_functor>(&ftor);
+}
+
+struct heftor {
+ int (*h)(OMTVALUE, void *v);
+ void *v;
+};
+int call_heftor(const OMTVALUE &v, const heftor &htor);
+int call_heftor(const OMTVALUE &v, const heftor &htor) {
+ return htor.h(const_cast<OMTVALUE>(v), htor.v);
+}
+static int omt_insert(toku::omt<void *> *omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v, uint32_t *index) {
+ struct heftor htor = { .h = h, .v = v };
+ return omt->insert<heftor, call_heftor>(value, htor, index);
+}
+static int omt_find_zero(toku::omt<void *> *V, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index) {
+ struct heftor htor = { .h = h, .v = extra };
+ return V->find_zero<heftor, call_heftor>(htor, value, index);
+}
+static int omt_find(toku::omt<void *> *V, int (*h)(OMTVALUE, void*extra), void*extra, int direction, OMTVALUE *value, uint32_t *index) {
+ struct heftor htor = { .h = h, .v = extra };
+ return V->find<heftor, call_heftor>(htor, direction, value, index);
+}
+static int omt_split_at(toku::omt<void *> *omt, toku::omt<void *> **newomtp, uint32_t index) {
+ toku::omt<void *> *XMALLOC(newomt);
+ int r = omt->split_at(newomt, index);
+ if (r != 0) {
+ toku_free(newomt);
+ } else {
+ *newomtp = newomt;
+ }
+ return r;
+}
+static int omt_merge(toku::omt<void *> *leftomt, toku::omt<void *> *rightomt, toku::omt<void *> **newomtp) {
+ toku::omt<void *> *XMALLOC(newomt);
+ newomt->merge(leftomt, rightomt);
+ toku_free(leftomt);
+ toku_free(rightomt);
+ *newomtp = newomt;
+ return 0;
+}
+
+const unsigned int random_seed = 0xFEADACBA;
+
+static void
+init_init_values (unsigned int seed, uint32_t num_elements) {
+ srandom(seed);
+
+ cleanup_globals();
+
+ XMALLOC_N(num_elements, global_values);
+ XMALLOC_N(num_elements, global_nums);
+ global_length = num_elements;
+}
+
+static void
+init_identity_values (unsigned int seed, uint32_t num_elements) {
+ uint32_t i;
+
+ init_init_values(seed, num_elements);
+
+ for (i = 0; i < global_length; i++) {
+ global_nums[i].number = i;
+ global_values[i] = (OMTVALUE)&global_nums[i];
+ }
+}
+
+static void
+init_distinct_sorted_values (unsigned int seed, uint32_t num_elements) {
+ uint32_t i;
+
+ init_init_values(seed, num_elements);
+
+ uint32_t number = 0;
+
+ for (i = 0; i < global_length; i++) {
+ number += (uint32_t)(random() % 32) + 1;
+ global_nums[i].number = number;
+ global_values[i] = (OMTVALUE)&global_nums[i];
+ }
+}
+
+static void
+init_distinct_random_values (unsigned int seed, uint32_t num_elements) {
+ init_distinct_sorted_values(seed, num_elements);
+
+ uint32_t i;
+ uint32_t choice;
+ uint32_t choices;
+ struct value temp;
+ for (i = 0; i < global_length - 1; i++) {
+ choices = global_length - i;
+ choice = random() % choices;
+ if (choice != i) {
+ temp = global_nums[i];
+ global_nums[i] = global_nums[choice];
+ global_nums[choice] = temp;
+ }
+ }
+}
+
+static void
+init_globals (void) {
+ XMALLOC_N(1, global_values);
+ XMALLOC_N(1, global_nums);
+ global_length = 1;
+}
+
+static void
+test_close (enum close_when_done do_close) {
+ if (do_close == KEEP_WHEN_DONE) {
+ return;
+ }
+ assert(do_close == CLOSE_WHEN_DONE);
+ global_omt->destroy();
+ toku_free(global_omt);
+}
+
+static void
+test_create (enum close_when_done do_close) {
+ XMALLOC(global_omt);
+ global_omt->create();
+ test_close(do_close);
+}
+
+static void
+test_create_size (enum close_when_done do_close) {
+ test_create(KEEP_WHEN_DONE);
+ assert(global_omt->size() == 0);
+ test_close(do_close);
+}
+
+static void
+test_create_insert_at_almost_random (enum close_when_done do_close) {
+ uint32_t i;
+ int r;
+ uint32_t size = 0;
+
+ test_create(KEEP_WHEN_DONE);
+ r = global_omt->insert_at(global_values[0], global_omt->size()+1);
+ CKERR2(r, EINVAL);
+ r = global_omt->insert_at(global_values[0], global_omt->size()+2);
+ CKERR2(r, EINVAL);
+ for (i = 0; i < global_length/2; i++) {
+ assert(size==global_omt->size());
+ r = global_omt->insert_at(global_values[i], i);
+ CKERR(r);
+ assert(++size==global_omt->size());
+ r = global_omt->insert_at(global_values[global_length-1-i], i+1);
+ CKERR(r);
+ assert(++size==global_omt->size());
+ }
+ r = global_omt->insert_at(global_values[0], global_omt->size()+1);
+ CKERR2(r, EINVAL);
+ r = global_omt->insert_at(global_values[0], global_omt->size()+2);
+ CKERR2(r, EINVAL);
+ assert(size==global_omt->size());
+ test_close(do_close);
+}
+
+static void
+test_create_insert_at_sequential (enum close_when_done do_close) {
+ uint32_t i;
+ int r;
+ uint32_t size = 0;
+
+ test_create(KEEP_WHEN_DONE);
+ r = global_omt->insert_at(global_values[0], global_omt->size()+1);
+ CKERR2(r, EINVAL);
+ r = global_omt->insert_at(global_values[0], global_omt->size()+2);
+ CKERR2(r, EINVAL);
+ for (i = 0; i < global_length; i++) {
+ assert(size==global_omt->size());
+ r = global_omt->insert_at(global_values[i], i);
+ CKERR(r);
+ assert(++size==global_omt->size());
+ }
+ r = global_omt->insert_at(global_values[0], global_omt->size()+1);
+ CKERR2(r, EINVAL);
+ r = global_omt->insert_at(global_values[0], global_omt->size()+2);
+ CKERR2(r, EINVAL);
+ assert(size==global_omt->size());
+ test_close(do_close);
+}
+
+static void
+test_create_from_sorted_array (enum create_type create_choice, enum close_when_done do_close) {
+ global_omt = NULL;
+
+ if (create_choice == BATCH_INSERT) {
+ XMALLOC(global_omt);
+ global_omt->create_from_sorted_array(global_values, global_length);
+ }
+ else if (create_choice == STEAL_ARRAY) {
+ XMALLOC(global_omt);
+ OMTVALUE* XMALLOC_N(global_length, values_copy);
+ memcpy(values_copy, global_values, global_length*sizeof(*global_values));
+ global_omt->create_steal_sorted_array(&values_copy, global_length, global_length);
+ assert(values_copy==NULL);
+ }
+ else if (create_choice == INSERT_AT) {
+ test_create_insert_at_sequential(KEEP_WHEN_DONE);
+ }
+ else if (create_choice == INSERT_AT_ALMOST_RANDOM) {
+ test_create_insert_at_almost_random(KEEP_WHEN_DONE);
+ }
+ else {
+ assert(false);
+ }
+
+ assert(global_omt!=NULL);
+ test_close(do_close);
+}
+
+static void
+test_create_from_sorted_array_size (enum create_type create_choice, enum close_when_done do_close) {
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ assert(global_omt->size()==global_length);
+ test_close(do_close);
+}
+
+static void
+test_fetch_verify (toku::omt<void *> *omtree, OMTVALUE* val, uint32_t len ) {
+ uint32_t i;
+ int r;
+ OMTVALUE v = (OMTVALUE)&i;
+ OMTVALUE oldv = v;
+
+ assert(len == omtree->size());
+ for (i = 0; i < len; i++) {
+ assert(oldv!=val[i]);
+ v = NULL;
+ r = omtree->fetch(i, &v);
+ CKERR(r);
+ assert(v != NULL);
+ assert(v != oldv);
+ assert(v == val[i]);
+ assert(V(v)->number == V(val[i])->number);
+ v = oldv;
+ }
+
+ for (i = len; i < len*2; i++) {
+ v = oldv;
+ r = omtree->fetch(i, &v);
+ CKERR2(r, EINVAL);
+ assert(v == oldv);
+ }
+
+}
+
+static void
+test_create_fetch_verify (enum create_type create_choice, enum close_when_done do_close) {
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ test_fetch_verify(global_omt, global_values, global_length);
+ test_close(do_close);
+}
+
+static int iterate_helper_error_return = 1;
+
+static int
+iterate_helper (OMTVALUE v, uint32_t idx, void* extra) {
+ if (extra == NULL) return iterate_helper_error_return;
+ OMTVALUE* vals = (OMTVALUE *)extra;
+ assert(v != NULL);
+ assert(v == vals[idx]);
+ assert(V(v)->number == V(vals[idx])->number);
+ return 0;
+}
+
+static void
+test_iterate_verify (toku::omt<void *> *omtree, OMTVALUE* vals, uint32_t len) {
+ int r;
+ iterate_helper_error_return = 0;
+ r = omt_iterate(omtree, iterate_helper, (void*)vals);
+ CKERR(r);
+ iterate_helper_error_return = 0xFEEDABBA;
+ r = omt_iterate(omtree, iterate_helper, NULL);
+ if (!len) {
+ CKERR2(r, 0);
+ }
+ else {
+ CKERR2(r, iterate_helper_error_return);
+ }
+}
+
+static void
+test_create_iterate_verify (enum create_type create_choice, enum close_when_done do_close) {
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ test_iterate_verify(global_omt, global_values, global_length);
+ test_close(do_close);
+}
+
+
+static void
+permute_array (uint32_t* arr, uint32_t len) {
+ //
+ // create a permutation of 0...size-1
+ //
+ uint32_t i = 0;
+ for (i = 0; i < len; i++) {
+ arr[i] = i;
+ }
+ for (i = 0; i < len - 1; i++) {
+ uint32_t choices = len - i;
+ uint32_t choice = random() % choices;
+ if (choice != i) {
+ uint32_t temp = arr[i];
+ arr[i] = arr[choice];
+ arr[choice] = temp;
+ }
+ }
+}
+
+static void
+test_create_set_at (enum create_type create_choice, enum close_when_done do_close) {
+ uint32_t i = 0;
+
+ struct value* old_nums = NULL;
+ XMALLOC_N(global_length, old_nums);
+
+ uint32_t* perm = NULL;
+ XMALLOC_N(global_length, perm);
+
+ OMTVALUE* old_values = NULL;
+ XMALLOC_N(global_length, old_values);
+
+ permute_array(perm, global_length);
+
+ //
+ // These are going to be the new global_values
+ //
+ for (i = 0; i < global_length; i++) {
+ old_nums[i] = global_nums[i];
+ old_values[i] = &old_nums[i];
+ global_values[i] = &old_nums[i];
+ }
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+ int r;
+ r = global_omt->set_at(global_values[0], global_length);
+ CKERR2(r,EINVAL);
+ r = global_omt->set_at(global_values[0], global_length+1);
+ CKERR2(r,EINVAL);
+ for (i = 0; i < global_length; i++) {
+ uint32_t choice = perm[i];
+ global_values[choice] = &global_nums[choice];
+ global_nums[choice].number = (uint32_t)random();
+ r = global_omt->set_at(global_values[choice], choice);
+ CKERR(r);
+ test_iterate_verify(global_omt, global_values, global_length);
+ test_fetch_verify(global_omt, global_values, global_length);
+ }
+ r = global_omt->set_at(global_values[0], global_length);
+ CKERR2(r,EINVAL);
+ r = global_omt->set_at(global_values[0], global_length+1);
+ CKERR2(r,EINVAL);
+
+ toku_free(perm);
+ toku_free(old_values);
+ toku_free(old_nums);
+
+ test_close(do_close);
+}
+
+static int
+insert_helper (OMTVALUE value, void* extra_insert) {
+ OMTVALUE to_insert = (OMTVALUE)extra_insert;
+ assert(to_insert);
+
+ if (V(value)->number < V(to_insert)->number) return -1;
+ if (V(value)->number > V(to_insert)->number) return +1;
+ return 0;
+}
+
+static void
+test_create_insert (enum close_when_done do_close) {
+ uint32_t i = 0;
+
+ uint32_t* perm = NULL;
+ XMALLOC_N(global_length, perm);
+
+ permute_array(perm, global_length);
+
+ test_create(KEEP_WHEN_DONE);
+ int r;
+ uint32_t size = global_length;
+ global_length = 0;
+ while (global_length < size) {
+ uint32_t choice = perm[global_length];
+ OMTVALUE to_insert = &global_nums[choice];
+ uint32_t idx = UINT32_MAX;
+
+ assert(global_length==global_omt->size());
+ r = omt_insert(global_omt, to_insert, insert_helper, to_insert, &idx);
+ CKERR(r);
+ assert(idx <= global_length);
+ if (idx > 0) {
+ assert(V(to_insert)->number > V(global_values[idx-1])->number);
+ }
+ if (idx < global_length) {
+ assert(V(to_insert)->number < V(global_values[idx])->number);
+ }
+ global_length++;
+ assert(global_length==global_omt->size());
+ /* Make room */
+ for (i = global_length-1; i > idx; i--) {
+ global_values[i] = global_values[i-1];
+ }
+ global_values[idx] = to_insert;
+ test_fetch_verify(global_omt, global_values, global_length);
+ test_iterate_verify(global_omt, global_values, global_length);
+
+ idx = UINT32_MAX;
+ r = omt_insert(global_omt, to_insert, insert_helper, to_insert, &idx);
+ CKERR2(r, DB_KEYEXIST);
+ assert(idx < global_length);
+ assert(V(global_values[idx])->number == V(to_insert)->number);
+ assert(global_length==global_omt->size());
+
+ test_iterate_verify(global_omt, global_values, global_length);
+ test_fetch_verify(global_omt, global_values, global_length);
+ }
+
+ toku_free(perm);
+
+ test_close(do_close);
+}
+
+static void
+test_create_delete_at (enum create_type create_choice, enum close_when_done do_close) {
+ uint32_t i = 0;
+ int r = ENOSYS;
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+
+ assert(global_length == global_omt->size());
+ r = global_omt->delete_at(global_length);
+ CKERR2(r,EINVAL);
+ assert(global_length == global_omt->size());
+ r = global_omt->delete_at(global_length+1);
+ CKERR2(r,EINVAL);
+ while (global_length > 0) {
+ assert(global_length == global_omt->size());
+ uint32_t index_to_delete = random()%global_length;
+ r = global_omt->delete_at(index_to_delete);
+ CKERR(r);
+ for (i = index_to_delete+1; i < global_length; i++) {
+ global_values[i-1] = global_values[i];
+ }
+ global_length--;
+ test_fetch_verify(global_omt, global_values, global_length);
+ test_iterate_verify(global_omt, global_values, global_length);
+ }
+ assert(global_length == 0);
+ assert(global_length == global_omt->size());
+ r = global_omt->delete_at(global_length);
+ CKERR2(r, EINVAL);
+ assert(global_length == global_omt->size());
+ r = global_omt->delete_at(global_length+1);
+ CKERR2(r, EINVAL);
+ test_close(do_close);
+}
+
+static void
+test_split_merge (enum create_type create_choice, enum close_when_done do_close) {
+ int r = ENOSYS;
+ uint32_t i = 0;
+ toku::omt<void *> *left_split = NULL;
+ toku::omt<void *> *right_split = NULL;
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+
+ for (i = 0; i <= global_length; i++) {
+ r = omt_split_at(global_omt, &right_split, global_length+1);
+ CKERR2(r,EINVAL);
+ r = omt_split_at(global_omt, &right_split, global_length+2);
+ CKERR2(r,EINVAL);
+
+ //
+ // test successful split
+ //
+ r = omt_split_at(global_omt, &right_split, i);
+ CKERR(r);
+ left_split = global_omt;
+ global_omt = NULL;
+ assert(left_split->size() == i);
+ assert(right_split->size() == global_length - i);
+ test_fetch_verify(left_split, global_values, i);
+ test_iterate_verify(left_split, global_values, i);
+ test_fetch_verify(right_split, &global_values[i], global_length - i);
+ test_iterate_verify(right_split, &global_values[i], global_length - i);
+ //
+ // verify that new global_omt's cannot do bad splits
+ //
+ r = omt_split_at(left_split, &global_omt, i+1);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == global_length - i);
+ r = omt_split_at(left_split, &global_omt, i+2);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == global_length - i);
+ r = omt_split_at(right_split, &global_omt, global_length - i + 1);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == global_length - i);
+ r = omt_split_at(right_split, &global_omt, global_length - i + 1);
+ CKERR2(r,EINVAL);
+ assert(left_split->size() == i);
+ assert(right_split->size() == global_length - i);
+
+ //
+ // test merge
+ //
+ r = omt_merge(left_split,right_split,&global_omt);
+ CKERR(r);
+ left_split = NULL;
+ right_split = NULL;
+ assert(global_omt->size() == global_length);
+ test_fetch_verify(global_omt, global_values, global_length);
+ test_iterate_verify(global_omt, global_values, global_length);
+ }
+ test_close(do_close);
+}
+
+
+static void
+init_values (enum rand_type rand_choice) {
+ const uint32_t test_size = 100;
+ if (rand_choice == TEST_RANDOM) {
+ init_distinct_random_values(random_seed, test_size);
+ }
+ else if (rand_choice == TEST_SORTED) {
+ init_distinct_sorted_values(random_seed, test_size);
+ }
+ else if (rand_choice == TEST_IDENTITY) {
+ init_identity_values( random_seed, test_size);
+ }
+ else assert(false);
+}
+
+static void
+test_create_array (enum create_type create_choice, enum rand_type rand_choice) {
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_from_sorted_array( create_choice, CLOSE_WHEN_DONE);
+ test_create_from_sorted_array_size(create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_fetch_verify( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_iterate_verify( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_set_at( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_delete_at( create_choice, CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_create_insert( CLOSE_WHEN_DONE);
+ /* ********************************************************************** */
+ init_values(rand_choice);
+ test_split_merge( create_choice, CLOSE_WHEN_DONE);
+}
+
+typedef struct {
+ uint32_t first_zero;
+ uint32_t first_pos;
+} h_extra;
+
+
+static int
+test_heaviside (OMTVALUE v_omt, void* x) {
+ OMTVALUE v = (OMTVALUE) v_omt;
+ h_extra* extra = (h_extra*)x;
+ assert(v && x);
+ assert(extra->first_zero <= extra->first_pos);
+
+ uint32_t value = V(v)->number;
+ if (value < extra->first_zero) return -1;
+ if (value < extra->first_pos) return 0;
+ return 1;
+}
+
+static void
+heavy_extra (h_extra* extra, uint32_t first_zero, uint32_t first_pos) {
+ extra->first_zero = first_zero;
+ extra->first_pos = first_pos;
+}
+
+static void
+test_find_dir (int dir, void* extra, int (*h)(OMTVALUE, void*),
+ int r_expect, bool idx_will_change, uint32_t idx_expect,
+ uint32_t number_expect, bool UU(cursor_valid)) {
+ uint32_t idx = UINT32_MAX;
+ uint32_t old_idx = idx;
+ OMTVALUE omt_val;
+ int r;
+
+ omt_val = NULL;
+
+ /* Verify we can pass NULL value. */
+ omt_val = NULL;
+ idx = old_idx;
+ if (dir == 0) {
+ r = omt_find_zero(global_omt, h, extra, NULL, &idx);
+ }
+ else {
+ r = omt_find( global_omt, h, extra, dir, NULL, &idx);
+ }
+ CKERR2(r, r_expect);
+ if (idx_will_change) {
+ assert(idx == idx_expect);
+ }
+ else {
+ assert(idx == old_idx);
+ }
+ assert(omt_val == NULL);
+
+ /* Verify we can pass NULL idx. */
+ omt_val = NULL;
+ idx = old_idx;
+ if (dir == 0) {
+ r = omt_find_zero(global_omt, h, extra, &omt_val, 0);
+ }
+ else {
+ r = omt_find( global_omt, h, extra, dir, &omt_val, 0);
+ }
+ CKERR2(r, r_expect);
+ assert(idx == old_idx);
+ if (r == DB_NOTFOUND) {
+ assert(omt_val == NULL);
+ }
+ else {
+ assert(V(omt_val)->number == number_expect);
+ }
+
+ /* Verify we can pass NULL both. */
+ omt_val = NULL;
+ idx = old_idx;
+ if (dir == 0) {
+ r = omt_find_zero(global_omt, h, extra, NULL, 0);
+ }
+ else {
+ r = omt_find( global_omt, h, extra, dir, NULL, 0);
+ }
+ CKERR2(r, r_expect);
+ assert(idx == old_idx);
+ assert(omt_val == NULL);
+}
+
+static void
+test_find (enum create_type create_choice, enum close_when_done do_close) {
+ h_extra extra;
+ init_identity_values(random_seed, 100);
+ test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
+
+/*
+ -...-
+ A
+*/
+ heavy_extra(&extra, global_length, global_length);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, global_length-1, global_length-1, true);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, global_length, global_length, false);
+
+
+/*
+ +...+
+ B
+*/
+ heavy_extra(&extra, 0, 0);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, 0, 0, true);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, 0, 0, false);
+
+/*
+ 0...0
+ C
+*/
+ heavy_extra(&extra, 0, global_length);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, 0, true, 0, 0, true);
+
+/*
+ -...-0...0
+ AC
+*/
+ heavy_extra(&extra, global_length/2, global_length);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, global_length/2-1, global_length/2-1, true);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, 0, true, global_length/2, global_length/2, true);
+
+/*
+ 0...0+...+
+ C B
+*/
+ heavy_extra(&extra, 0, global_length/2);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, global_length/2, global_length/2, true);
+ test_find_dir(0, &extra, test_heaviside, 0, true, 0, 0, true);
+
+/*
+ -...-+...+
+ AB
+*/
+ heavy_extra(&extra, global_length/2, global_length/2);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, global_length/2-1, global_length/2-1, true);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, global_length/2, global_length/2, true);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, global_length/2, global_length/2, false);
+
+/*
+ -...-0...0+...+
+ AC B
+*/
+ heavy_extra(&extra, global_length/3, 2*global_length/3);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, global_length/3-1, global_length/3-1, true);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, 2*global_length/3, 2*global_length/3, true);
+ test_find_dir(0, &extra, test_heaviside, 0, true, global_length/3, global_length/3, true);
+
+ /* Cleanup */
+ test_close(do_close);
+}
+
+static void
+runtests_create_choice (enum create_type create_choice) {
+ test_create_array(create_choice, TEST_SORTED);
+ test_create_array(create_choice, TEST_RANDOM);
+ test_create_array(create_choice, TEST_IDENTITY);
+ test_find( create_choice, CLOSE_WHEN_DONE);
+}
+
+static void
+test_clone(uint32_t nelts)
+// Test that each clone operation gives the right data back. If nelts is
+// zero, also tests that you still get a valid omt back and that the way
+// to deallocate it still works.
+{
+ toku::omt<void *> *src = NULL, *dest = NULL;
+ int r;
+
+ XMALLOC(src);
+ src->create();
+ for (long i = 0; i < nelts; ++i) {
+ r = src->insert_at((OMTVALUE) i, i);
+ assert_zero(r);
+ }
+
+ XMALLOC(dest);
+ dest->clone(*src);
+ assert(dest != NULL);
+ assert(dest->size() == nelts);
+ for (long i = 0; i < nelts; ++i) {
+ OMTVALUE v;
+ long l;
+ r = dest->fetch(i, &v);
+ assert_zero(r);
+ l = (long) v;
+ assert(l == i);
+ }
+ dest->destroy();
+ toku_free(dest);
+ src->destroy();
+ toku_free(src);
+}
+
+int
+test_main(int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ init_globals();
+ test_create( CLOSE_WHEN_DONE);
+ test_create_size( CLOSE_WHEN_DONE);
+ runtests_create_choice(BATCH_INSERT);
+ runtests_create_choice(STEAL_ARRAY);
+ runtests_create_choice(INSERT_AT);
+ runtests_create_choice(INSERT_AT_ALMOST_RANDOM);
+ test_clone(0);
+ test_clone(1);
+ test_clone(1000);
+ test_clone(10000);
+ cleanup_globals();
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/util/tests/omt-tmpl-test.cc b/storage/tokudb/PerconaFT/util/tests/omt-tmpl-test.cc
new file mode 100644
index 00000000..8cfc875c
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/omt-tmpl-test.cc
@@ -0,0 +1,162 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <type_traits>
+#include <memory.h>
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <toku_time.h>
+#include <util/omt.h>
+
+namespace toku {
+
+namespace test {
+
+ inline int intcmp(const int &a, const int &b);
+ inline int intcmp(const int &a, const int &b) {
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return +1;
+ }
+ return 0;
+ }
+
+ typedef omt<int> int_omt_t;
+
+ static int intiter_magic = 0xdeadbeef;
+ inline int intiter(const int &value __attribute__((__unused__)), const uint32_t idx __attribute__((__unused__)), int *const extra);
+ inline int intiter(const int &value __attribute__((__unused__)), const uint32_t idx __attribute__((__unused__)), int *const extra) {
+ invariant(*extra == intiter_magic);
+ return 0;
+ }
+
+ struct intiter2extra {
+ int count;
+ int last;
+ };
+ inline int intiter2(const int &value, const uint32_t idx __attribute__((__unused__)), struct intiter2extra *const extra);
+ inline int intiter2(const int &value, const uint32_t idx __attribute__((__unused__)), struct intiter2extra *const extra) {
+ extra->count++;
+ invariant(extra->last < value);
+ extra->last = value;
+ return 0;
+ }
+
+ static void unittest(void) {
+ int_omt_t o;
+ int r;
+ o.create();
+ invariant(o.size() == 0);
+
+ r = o.insert<int, intcmp>(1, 1, nullptr);
+ invariant_zero(r);
+ r = o.insert<int, intcmp>(3, 3, nullptr);
+ invariant_zero(r);
+
+ invariant(o.size() == 2);
+
+ r = o.insert<int, intcmp>(2, 2, nullptr);
+ invariant_zero(r);
+
+ invariant(o.size() == 3);
+
+ int x;
+ r = o.fetch(1, &x);
+ invariant_zero(r);
+
+ invariant(x == 2);
+
+ r = o.iterate<int, intiter>(&intiter_magic);
+ invariant_zero(r);
+
+ struct intiter2extra e = {0, 0};
+ r = o.iterate_on_range<struct intiter2extra, intiter2>(0, 2, &e);
+ invariant_zero(r);
+ invariant(e.count == 2);
+ invariant(e.last == 2);
+
+ r = o.set_at(5, 1);
+ invariant_zero(r);
+ r = o.delete_at(1);
+ invariant_zero(r);
+
+ invariant(o.size() == 2);
+
+ o.destroy();
+
+ int *XMALLOC_N(4, intarray);
+ for (int i = 0; i < 4; ++i) {
+ intarray[i] = i + 1;
+ }
+ int_omt_t left, right;
+ left.create_steal_sorted_array(&intarray, 4, 4);
+ invariant_null(intarray);
+ right.create();
+ r = right.insert<int, intcmp>(8, 8, nullptr);
+ invariant_zero(r);
+ r = right.insert<int, intcmp>(7, 7, nullptr);
+ invariant_zero(r);
+ r = right.insert<int, intcmp>(6, 6, nullptr);
+ invariant_zero(r);
+ r = right.insert<int, intcmp>(5, 5, nullptr);
+ invariant_zero(r);
+
+ int_omt_t combined;
+ combined.merge(&left, &right);
+ invariant(combined.size() == 8);
+ invariant(left.size() == 0);
+ invariant(right.size() == 0);
+ struct intiter2extra e2 = {0, 0};
+ r = combined.iterate<struct intiter2extra, intiter2>(&e2);
+ invariant_zero(r);
+ invariant(e2.count == 8);
+ invariant(e2.last == 8);
+
+ combined.destroy();
+ }
+
+} // end namespace test
+
+} // end namespace toku
+
+int main(void) {
+ toku::test::unittest();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/queue-test.cc b/storage/tokudb/PerconaFT/util/tests/queue-test.cc
new file mode 100644
index 00000000..f87e05bc
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/queue-test.cc
@@ -0,0 +1,136 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_portability.h>
+#include "toku_os.h"
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <toku_assert.h>
+#include <toku_pthread.h>
+#include "util/queue.h"
+
+static int verbose=1;
+
+static int count_0 = 0;
+static uint64_t e_max_weight=0, d_max_weight = 0; // max weight seen by enqueue thread and dequeue thread respectively.
+
+static void *start_0 (void *arg) {
+ QUEUE q = (QUEUE)arg;
+ void *item;
+ uint64_t weight;
+ long count = 0;
+ while (1) {
+ uint64_t this_max_weight;
+ int r=toku_queue_deq(q, &item, &weight, &this_max_weight);
+ if (r==EOF) break;
+ assert(r==0);
+ if (this_max_weight>d_max_weight) d_max_weight=this_max_weight;
+ long v = (long)item;
+ //printf("D(%ld)=%ld %ld\n", v, this_max_weight, d_max_weight);
+ assert(v==count);
+ count_0++;
+ count++;
+ }
+ return NULL;
+}
+
+static void enq (QUEUE q, long v, uint64_t weight) {
+ uint64_t this_max_weight;
+ int r = toku_queue_enq(q, (void*)v, (weight==0)?0:1, &this_max_weight);
+ assert(r==0);
+ if (this_max_weight>e_max_weight) e_max_weight=this_max_weight;
+ //printf("E(%ld)=%ld %ld\n", v, this_max_weight, e_max_weight);
+}
+
+static void queue_test_0 (uint64_t weight)
+// Test a queue that can hold WEIGHT items.
+{
+ //printf("\n");
+ count_0 = 0;
+ e_max_weight = 0;
+ d_max_weight = 0;
+ QUEUE q;
+ int r;
+ r = toku_queue_create(&q, weight);
+ assert(r == 0);
+ toku_pthread_t thread;
+ r = toku_pthread_create(toku_uninstrumented, &thread, nullptr, start_0, q);
+ assert(r == 0);
+ enq(q, 0L, weight);
+ enq(q, 1L, weight);
+ enq(q, 2L, weight);
+ enq(q, 3L, weight);
+ sleep(1);
+ enq(q, 4L, weight);
+ enq(q, 5L, weight);
+ r = toku_queue_eof(q); assert(r==0);
+ void *result;
+ r = toku_pthread_join(thread, &result); assert(r==0);
+ assert(result==NULL);
+ assert(count_0==6);
+ r = toku_queue_destroy(q);
+ assert(d_max_weight <= weight);
+ assert(e_max_weight <= weight);
+}
+
+
+static void parse_args (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose--;
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+ if (verbose<0) verbose=0;
+}
+
+int main (int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ queue_test_0(0LL);
+ queue_test_0(1LL);
+ queue_test_0(2LL);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/rwlock_condvar.h b/storage/tokudb/PerconaFT/util/tests/rwlock_condvar.h
new file mode 100644
index 00000000..b49c2780
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/rwlock_condvar.h
@@ -0,0 +1,149 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* Fair readers writer lock implemented using condition variables.
+ * This is maintained so that we can measure the performance of a relatively simple implementation (this one)
+ * compared to a fast one that uses compare-and-swap (the one in ../toku_rwlock.c)
+ * For now it's only for testing.
+ */
+
+
+// Fair readers/writer locks. These are fair (meaning first-come first-served. No reader starvation, and no writer starvation). And they are
+// probably faster than the linux readers/writer locks (pthread_rwlock_t).
+struct toku_cv_fair_rwlock_waiter_state; // this structure is used internally.
+typedef struct toku_cv_fair_rwlock_s {
+ toku_mutex_t mutex;
+ int state; // 0 means no locks, + is number of readers locked, -1 is a writer
+ struct toku_cv_fair_rwlock_waiter_state *waiters_head, *waiters_tail;
+} toku_cv_fair_rwlock_t;
+
+void toku_cv_fair_rwlock_init (toku_cv_fair_rwlock_t *rwlock);
+void toku_cv_fair_rwlock_destroy (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_rdlock (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_wrlock (toku_cv_fair_rwlock_t *rwlock);
+int toku_cv_fair_rwlock_unlock (toku_cv_fair_rwlock_t *rwlock);
+
+struct toku_cv_fair_rwlock_waiter_state {
+ char is_read;
+ struct toku_cv_fair_rwlock_waiter_state *next;
+ toku_cond_t cond;
+};
+
+static __thread struct toku_cv_fair_rwlock_waiter_state waitstate = {0, NULL, {PTHREAD_COND_INITIALIZER} };
+
+void toku_cv_fair_rwlock_init (toku_cv_fair_rwlock_t *rwlock) {
+ rwlock->state = 0;
+ rwlock->waiters_head = NULL;
+ rwlock->waiters_tail = NULL;
+ toku_mutex_init(toku_uninstrumented, &rwlock->mutex, nullptr);
+}
+
+void toku_cv_fair_rwlock_destroy(toku_cv_fair_rwlock_t *rwlock) {
+ toku_mutex_destroy(&rwlock->mutex);
+}
+
+int toku_cv_fair_rwlock_rdlock (toku_cv_fair_rwlock_t *rwlock) {
+ toku_mutex_lock(&rwlock->mutex);
+ if (rwlock->waiters_head!=NULL || rwlock->state<0) {
+ // Someone is ahead of me in the queue, or someone has a lock.
+ // We use per-thread-state for the condition variable. A thread cannot get control and try to reuse the waiter state for something else.
+ if (rwlock->waiters_tail) {
+ rwlock->waiters_tail->next = &waitstate;
+ } else {
+ rwlock->waiters_head = &waitstate;
+ }
+ rwlock->waiters_tail = &waitstate;
+ waitstate.next = NULL;
+ waitstate.is_read = 1;
+ do {
+ toku_cond_wait(&waitstate.cond, &rwlock->mutex);
+ } while (rwlock->waiters_head!=&waitstate || rwlock->state<0);
+ rwlock->state++;
+ rwlock->waiters_head=waitstate.next;
+ if (waitstate.next==NULL) rwlock->waiters_tail=NULL;
+ if (rwlock->waiters_head && rwlock->waiters_head->is_read) {
+ toku_cond_signal(&rwlock->waiters_head->cond);
+ }
+ } else {
+ // No one is waiting, and any holders are readers.
+ rwlock->state++;
+ }
+ toku_mutex_unlock(&rwlock->mutex);
+ return 0;
+}
+
+int toku_cv_fair_rwlock_wrlock (toku_cv_fair_rwlock_t *rwlock) {
+ toku_mutex_lock(&rwlock->mutex);
+ if (rwlock->waiters_head!=NULL || rwlock->state!=0) {
+ // Someone else is ahead of me, or someone has a lock the lock, so we must wait our turn.
+ if (rwlock->waiters_tail) {
+ rwlock->waiters_tail->next = &waitstate;
+ } else {
+ rwlock->waiters_head = &waitstate;
+ }
+ rwlock->waiters_tail = &waitstate;
+ waitstate.next = NULL;
+ waitstate.is_read = 0;
+ do {
+ toku_cond_wait(&waitstate.cond, &rwlock->mutex);
+ } while (rwlock->waiters_head!=&waitstate || rwlock->state!=0);
+ rwlock->waiters_head = waitstate.next;
+ if (waitstate.next==NULL) rwlock->waiters_tail=NULL;
+ }
+ rwlock->state = -1;
+ toku_mutex_unlock(&rwlock->mutex);
+ return 0;
+}
+
+int toku_cv_fair_rwlock_unlock (toku_cv_fair_rwlock_t *rwlock) {
+ toku_mutex_lock(&rwlock->mutex);
+ assert(rwlock->state!=0);
+ if (rwlock->state>0) {
+ rwlock->state--;
+ } else {
+ rwlock->state=0;
+ }
+ if (rwlock->state==0 && rwlock->waiters_head) {
+ toku_cond_signal(&rwlock->waiters_head->cond);
+ } else {
+ // printf(" No one to wake\n");
+ }
+ toku_mutex_unlock(&rwlock->mutex);
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/util/tests/sm-basic.cc b/storage/tokudb/PerconaFT/util/tests/sm-basic.cc
new file mode 100644
index 00000000..0e5eb836
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/sm-basic.cc
@@ -0,0 +1,77 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// test that basic scoped malloc works with a thread
+
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <toku_pthread.h>
+#include <util/scoped_malloc.h>
+
+static void sm_test(void) {
+ toku::scoped_malloc a(1);
+ {
+ toku::scoped_malloc b(2);
+ {
+ toku::scoped_malloc c(3);
+ }
+ }
+}
+
+static void *sm_test_f(void *arg) {
+ sm_test();
+ return arg;
+}
+
+int main(void) {
+ toku_scoped_malloc_init();
+
+ // run the test
+ toku_pthread_t tid;
+ int r;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, sm_test_f, nullptr);
+ assert_zero(r);
+ void *ret;
+ r = toku_pthread_join(tid, &ret);
+ assert_zero(r);
+
+ toku_scoped_malloc_destroy();
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/sm-crash-double-free.cc b/storage/tokudb/PerconaFT/util/tests/sm-crash-double-free.cc
new file mode 100644
index 00000000..5aa35655
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/sm-crash-double-free.cc
@@ -0,0 +1,79 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// force a race between the scoped malloc global destructor and a thread variable destructor
+
+#define TOKU_SCOPED_MALLOC_DEBUG 1
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <toku_pthread.h>
+#include <toku_race_tools.h>
+#include <util/scoped_malloc.h>
+
+volatile int state = 0;
+
+static void sm_test(void) {
+ toku::scoped_malloc a(1);
+}
+
+static void *sm_test_f(void *arg) {
+ sm_test();
+ state = 1;
+ while (state != 2) sleep(1);
+ return arg;
+}
+
+int main(void) {
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&state, sizeof state);
+ state = 0;
+ toku_scoped_malloc_init();
+ toku_pthread_t tid;
+ int r;
+ r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, sm_test_f, nullptr);
+ assert_zero(r);
+ void *ret;
+ while (state != 1)
+ sleep(1);
+ toku_scoped_malloc_destroy_set();
+ state = 2;
+ r = toku_pthread_join(tid, &ret);
+ assert_zero(r);
+ toku_scoped_malloc_destroy_key();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/sort-tmpl-test.cc b/storage/tokudb/PerconaFT/util/tests/sort-tmpl-test.cc
new file mode 100644
index 00000000..4db3b93d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/sort-tmpl-test.cc
@@ -0,0 +1,179 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+
+#include <stdlib.h>
+
+#include <memory.h>
+#include <util/sort.h>
+
+const int MAX_NUM = 0x0fffffffL;
+int MAGIC_EXTRA = 0xd3adb00f;
+
+static int
+int_qsort_cmp(const void *va, const void *vb) {
+ const int *CAST_FROM_VOIDP(a, va);
+ const int *CAST_FROM_VOIDP(b, vb);
+ assert(*a < MAX_NUM);
+ assert(*b < MAX_NUM);
+ return (*a > *b) - (*a < *b);
+}
+
+int int_cmp(const int &e, const int &a, const int &b);
+int
+int_cmp(const int &e, const int &a, const int &b)
+{
+ assert(e == MAGIC_EXTRA);
+ return int_qsort_cmp(&a, &b);
+}
+
+static void
+check_int_array(int a[], int nelts)
+{
+ assert(a[0] < MAX_NUM);
+ for (int i = 1; i < nelts; ++i) {
+ assert(a[i] < MAX_NUM);
+ assert(a[i-1] <= a[i]);
+ }
+}
+
+static void
+zero_array_test(void)
+{
+ int unused = MAGIC_EXTRA - 1;
+ toku::sort<int, const int, int_cmp>::mergesort_r(NULL, 0, unused);
+}
+
+static void
+dup_array_test(int nelts)
+{
+ int *XMALLOC_N(nelts, a);
+ for (int i = 0; i < nelts; ++i) {
+ a[i] = 1;
+ }
+ toku::sort<int, const int, int_cmp>::mergesort_r(a, nelts, MAGIC_EXTRA);
+ check_int_array(a, nelts);
+ toku_free(a);
+}
+
+static void
+already_sorted_test(int nelts)
+{
+ int *XMALLOC_N(nelts, a);
+ for (int i = 0; i < nelts; ++i) {
+ a[i] = i;
+ }
+ toku::sort<int, const int, int_cmp>::mergesort_r(a, nelts, MAGIC_EXTRA);
+ check_int_array(a, nelts);
+ toku_free(a);
+}
+
+static void
+random_array_test(int nelts)
+{
+ int *XMALLOC_N(nelts, a);
+ int *XMALLOC_N(nelts, b);
+ for (int i = 0; i < nelts; ++i) {
+ a[i] = rand() % MAX_NUM;
+ b[i] = a[i];
+ }
+ toku::sort<int, const int, int_cmp>::mergesort_r(a, nelts, MAGIC_EXTRA);
+ check_int_array(a, nelts);
+ qsort(b, nelts, sizeof b[0], int_qsort_cmp);
+ for (int i = 0; i < nelts; ++i) {
+ assert(a[i] == b[i]);
+ }
+ toku_free(a);
+ toku_free(b);
+}
+
+static int
+uint64_qsort_cmp(const void *va, const void *vb) {
+ const uint64_t *CAST_FROM_VOIDP(a, va);
+ const uint64_t *CAST_FROM_VOIDP(b, vb);
+ return (*a > *b) - (*a < *b);
+}
+
+int uint64_cmp(const int &e, const uint64_t &a, const uint64_t &b);
+int
+uint64_cmp(const int &e, const uint64_t &a, const uint64_t &b)
+{
+ assert(e == MAGIC_EXTRA);
+ return uint64_qsort_cmp(&a, &b);
+}
+
+static void
+random_array_test_64(int nelts)
+{
+ uint64_t *XMALLOC_N(nelts, a);
+ uint64_t *XMALLOC_N(nelts, b);
+ for (int i = 0; i < nelts; ++i) {
+ a[i] = ((uint64_t)rand() << 32ULL) | rand();
+ b[i] = a[i];
+ }
+ toku::sort<uint64_t, const int, uint64_cmp>::mergesort_r(a, nelts, MAGIC_EXTRA);
+ qsort(b, nelts, sizeof b[0], uint64_qsort_cmp);
+ for (int i = 0; i < nelts; ++i) {
+ assert(a[i] == b[i]);
+ }
+ toku_free(a);
+ toku_free(b);
+}
+
+int
+test_main(int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__)))
+{
+ zero_array_test();
+ random_array_test(10);
+ random_array_test(1000);
+ random_array_test(10001);
+ random_array_test(19999);
+ random_array_test(39999);
+ random_array_test(10000000);
+ random_array_test_64(10000000);
+ dup_array_test(10);
+ dup_array_test(1000);
+ dup_array_test(10001);
+ dup_array_test(10000000);
+ already_sorted_test(10);
+ already_sorted_test(1000);
+ already_sorted_test(10001);
+ already_sorted_test(10000000);
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test-frwlock-fair-writers.cc b/storage/tokudb/PerconaFT/util/tests/test-frwlock-fair-writers.cc
new file mode 100644
index 00000000..9a625c32
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test-frwlock-fair-writers.cc
@@ -0,0 +1,90 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// check if write locks are fair
+
+#include <stdio.h>
+#include <toku_assert.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <util/frwlock.h>
+
+toku_mutex_t rwlock_mutex;
+toku::frwlock rwlock;
+volatile int killed = 0;
+
+static void *t1_func(void *arg) {
+ int i;
+ for (i = 0; !killed; i++) {
+ toku_mutex_lock(&rwlock_mutex);
+ rwlock.write_lock(false);
+ toku_mutex_unlock(&rwlock_mutex);
+ usleep(10000);
+ toku_mutex_lock(&rwlock_mutex);
+ rwlock.write_unlock();
+ toku_mutex_unlock(&rwlock_mutex);
+ }
+ printf("%lu %d\n", (unsigned long) pthread_self(), i);
+ return arg;
+}
+
+int main(void) {
+ int r;
+
+ toku_mutex_init(toku_uninstrumented, &rwlock_mutex, nullptr);
+ rwlock.init(&rwlock_mutex);
+
+ const int nthreads = 2;
+ pthread_t tids[nthreads];
+ for (int i = 0; i < nthreads; i++) {
+ r = pthread_create(&tids[i], NULL, t1_func, NULL);
+ assert(r == 0);
+ }
+ sleep(10);
+ killed = 1;
+ for (int i = 0; i < nthreads; i++) {
+ void *ret;
+ r = pthread_join(tids[i], &ret);
+ assert(r == 0);
+ }
+
+ rwlock.deinit();
+ toku_mutex_destroy(&rwlock_mutex);
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test-kibbutz.cc b/storage/tokudb/PerconaFT/util/tests/test-kibbutz.cc
new file mode 100644
index 00000000..5672a853
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test-kibbutz.cc
@@ -0,0 +1,91 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <util/kibbutz.h>
+
+#include <memory.h>
+#include <stdio.h>
+
+#define ND 10
+#define NT 4
+bool done[ND];
+
+static void dowork (void *idv) {
+ int *CAST_FROM_VOIDP(idp, idv);
+ int id = *idp;
+ if (verbose) printf("s%d\n", id);
+ assert(!done[id]);
+ sleep(1);
+ done[id] = true;
+ sleep(1);
+ if (verbose) printf("d%d\n", id);
+}
+
+static void kibbutz_test (bool parent_finishes_first) {
+ KIBBUTZ k = NULL;
+ int r = toku_kibbutz_create(NT, &k);
+ assert(r == 0);
+ if (verbose) printf("create\n");
+ int ids[ND];
+ for (int i=0; i<ND; i++) {
+ done[i]=false;
+ ids[i] =i;
+ }
+ for (int i=0; i<ND; i++) {
+ if (verbose) printf("e%d\n", i);
+ toku_kibbutz_enq(k, dowork, &ids[i]);
+ }
+ if (!parent_finishes_first) {
+ sleep((ND+2*NT)/NT);
+ }
+ toku_kibbutz_destroy(k);
+ for (int i=0; i<ND; i++) assert(done[i]);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ kibbutz_test(false);
+ kibbutz_test(true);
+ if (verbose) printf("test ok\n");
+ return 0;
+}
+
+
diff --git a/storage/tokudb/PerconaFT/util/tests/test-kibbutz2.cc b/storage/tokudb/PerconaFT/util/tests/test-kibbutz2.cc
new file mode 100644
index 00000000..8ccd37c3
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test-kibbutz2.cc
@@ -0,0 +1,89 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <util/kibbutz.h>
+
+#include <memory.h>
+#include <stdio.h>
+
+#define ND 10
+bool done[ND];
+
+static void dowork (void *idv) {
+ int *CAST_FROM_VOIDP(idp, idv);
+ int id = *idp;
+ if (verbose) printf("s%d\n", id);
+ for (int i = 0; i < id; i++) {
+ assert(done[i]);
+ }
+ assert(!done[id]);
+ sleep(1);
+ done[id] = true;
+ sleep(1);
+ if (verbose) printf("d%d\n", id);
+}
+
+static void kibbutz_test (void) {
+ KIBBUTZ k = NULL;
+ int r = toku_kibbutz_create(1, &k);
+ assert(r == 0);
+ if (verbose) printf("create\n");
+ int ids[ND];
+ for (int i=0; i<ND; i++) {
+ done[i]=false;
+ ids[i] =i;
+ }
+ for (int i=0; i<ND; i++) {
+ if (verbose) printf("e%d\n", i);
+ toku_kibbutz_enq(k, dowork, &ids[i]);
+ }
+ toku_kibbutz_destroy(k);
+ for (int i=0; i<ND; i++) assert(done[i]);
+}
+
+int
+test_main (int argc , const char *argv[]) {
+ default_parse_args(argc, argv);
+
+ kibbutz_test();
+ if (verbose) printf("test ok\n");
+ return 0;
+}
+
+
diff --git a/storage/tokudb/PerconaFT/util/tests/test-rwlock-cheapness.cc b/storage/tokudb/PerconaFT/util/tests/test-rwlock-cheapness.cc
new file mode 100644
index 00000000..c0b43c2d
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test-rwlock-cheapness.cc
@@ -0,0 +1,254 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <portability/toku_pthread.h>
+#include <portability/toku_time.h>
+#include <util/frwlock.h>
+#include <util/rwlock.h>
+#include "rwlock_condvar.h"
+
+// We need to manually intialize partitioned counters so that the
+// ones automatically incremented by the frwlock get handled properly.
+#include <util/partitioned_counter.h>
+
+toku_mutex_t mutex;
+toku::frwlock w;
+
+static void grab_write_lock(bool expensive) {
+ toku_mutex_lock(&mutex);
+ w.write_lock(expensive);
+ toku_mutex_unlock(&mutex);
+}
+
+static void release_write_lock(void) {
+ toku_mutex_lock(&mutex);
+ w.write_unlock();
+ toku_mutex_unlock(&mutex);
+}
+
+static void grab_read_lock(void) {
+ toku_mutex_lock(&mutex);
+ w.read_lock();
+ toku_mutex_unlock(&mutex);
+}
+
+static void release_read_lock(void) {
+ toku_mutex_lock(&mutex);
+ w.read_unlock();
+ toku_mutex_unlock(&mutex);
+}
+
+static void *do_cheap_wait(void *arg) {
+ grab_write_lock(false);
+ release_write_lock();
+ return arg;
+}
+
+static void *do_expensive_wait(void *arg) {
+ grab_write_lock(true);
+ release_write_lock();
+ return arg;
+}
+
+static void *do_read_wait(void *arg) {
+ grab_read_lock();
+ release_read_lock();
+ return arg;
+}
+
+static void launch_cheap_waiter(void) {
+ toku_pthread_t tid;
+ int r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, do_cheap_wait, nullptr);
+ assert_zero(r);
+ toku_pthread_detach(tid);
+ sleep(1);
+}
+
+static void launch_expensive_waiter(void) {
+ toku_pthread_t tid;
+ int r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, do_expensive_wait, nullptr);
+ assert_zero(r);
+ toku_pthread_detach(tid);
+ sleep(1);
+}
+
+static void launch_reader(void) {
+ toku_pthread_t tid;
+ int r = toku_pthread_create(
+ toku_uninstrumented, &tid, nullptr, do_read_wait, nullptr);
+ assert_zero(r);
+ toku_pthread_detach(tid);
+ sleep(1);
+}
+
+static bool locks_are_expensive(void) {
+ toku_mutex_lock(&mutex);
+ assert(w.write_lock_is_expensive() == w.read_lock_is_expensive());
+ bool is_expensive = w.write_lock_is_expensive();
+ toku_mutex_unlock(&mutex);
+ return is_expensive;
+}
+
+static void test_write_cheapness(void) {
+ toku_mutex_init(toku_uninstrumented, &mutex, nullptr);
+ w.init(&mutex);
+
+ // single expensive write lock
+ grab_write_lock(true);
+ assert(locks_are_expensive());
+ release_write_lock();
+ assert(!locks_are_expensive());
+
+ // single cheap write lock
+ grab_write_lock(false);
+ assert(!locks_are_expensive());
+ release_write_lock();
+ assert(!locks_are_expensive());
+
+ // multiple read locks
+ grab_read_lock();
+ assert(!locks_are_expensive());
+ grab_read_lock();
+ grab_read_lock();
+ assert(!locks_are_expensive());
+ release_read_lock();
+ release_read_lock();
+ release_read_lock();
+ assert(!locks_are_expensive());
+
+ // expensive write lock and cheap writers waiting
+ grab_write_lock(true);
+ launch_cheap_waiter();
+ assert(locks_are_expensive());
+ launch_cheap_waiter();
+ launch_cheap_waiter();
+ assert(locks_are_expensive());
+ release_write_lock();
+ sleep(1);
+ assert(!locks_are_expensive());
+
+ // cheap write lock and expensive writer waiter
+ grab_write_lock(false);
+ launch_expensive_waiter();
+ assert(locks_are_expensive());
+ release_write_lock();
+ sleep(1);
+
+ // expensive write lock and expensive waiter
+ grab_write_lock(true);
+ launch_expensive_waiter();
+ assert(locks_are_expensive());
+ release_write_lock();
+ sleep(1);
+
+ // cheap write lock and cheap waiter
+ grab_write_lock(false);
+ launch_cheap_waiter();
+ assert(!locks_are_expensive());
+ release_write_lock();
+ sleep(1);
+
+ // read lock held and cheap waiter
+ grab_read_lock();
+ launch_cheap_waiter();
+ assert(!locks_are_expensive());
+ // add expensive waiter
+ launch_expensive_waiter();
+ assert(locks_are_expensive());
+ release_read_lock();
+ sleep(1);
+
+ // read lock held and expensive waiter
+ grab_read_lock();
+ launch_expensive_waiter();
+ assert(locks_are_expensive());
+ // add expensive waiter
+ launch_cheap_waiter();
+ assert(locks_are_expensive());
+ release_read_lock();
+ sleep(1);
+
+ // cheap write lock held and waiting read
+ grab_write_lock(false);
+ launch_reader();
+ assert(!locks_are_expensive());
+ launch_expensive_waiter();
+ toku_mutex_lock(&mutex);
+ assert(w.write_lock_is_expensive());
+ // tricky case here, because we have a launched reader
+ // that should be in the queue, a new read lock
+ // should piggy back off that
+ assert(!w.read_lock_is_expensive());
+ toku_mutex_unlock(&mutex);
+ release_write_lock();
+ sleep(1);
+
+ // expensive write lock held and waiting read
+ grab_write_lock(true);
+ launch_reader();
+ assert(locks_are_expensive());
+ launch_cheap_waiter();
+ assert(locks_are_expensive());
+ release_write_lock();
+ sleep(1);
+
+ w.deinit();
+ toku_mutex_destroy(&mutex);
+}
+
+int main (int UU(argc), const char* UU(argv[])) {
+ // Ultra ugly. We manually init/destroy partitioned counters
+ // and context because normally toku_ft_layer_init() does that
+ // for us, but we don't want to initialize everything.
+ partitioned_counters_init();
+ toku_context_status_init();
+ test_write_cheapness();
+ toku_context_status_destroy();
+ partitioned_counters_destroy();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test-rwlock-unfair-writers.cc b/storage/tokudb/PerconaFT/util/tests/test-rwlock-unfair-writers.cc
new file mode 100644
index 00000000..0d1fc855
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test-rwlock-unfair-writers.cc
@@ -0,0 +1,98 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// check if write locks are fair
+
+#include <stdio.h>
+#include <assert.h>
+#include <unistd.h>
+#include <pthread.h>
+
+pthread_rwlock_t rwlock;
+volatile int killed = 0;
+
+static void *t1_func(void *arg) {
+ int i;
+ for (i = 0; !killed; i++) {
+ int r;
+ r = pthread_rwlock_wrlock(&rwlock);
+ assert(r == 0);
+ usleep(10000);
+ r = pthread_rwlock_unlock(&rwlock);
+ assert(r == 0);
+ }
+ printf("%lu %d\n", (unsigned long) pthread_self(), i);
+ return arg;
+}
+
+int main(void) {
+ int r;
+#if 0
+ rwlock = PTHREAD_RWLOCK_INITIALIZER;
+#endif
+#if 0
+ pthread_rwlockattr_t attr;
+ pthread_rwlockattr_init(&attr);
+ pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+ r = pthread_rwlock_init(&rwlock, &attr);
+#endif
+#if 0
+ pthread_rwlockattr_t attr;
+ pthread_rwlockattr_init(&attr);
+ r = pthread_rwlock_init(&rwlock, &attr);
+#endif
+#if 1
+ r = pthread_rwlock_init(&rwlock, NULL);
+ assert(r == 0);
+#endif
+
+ const int nthreads = 2;
+ pthread_t tids[nthreads];
+ for (int i = 0; i < nthreads; i++) {
+ r = pthread_create(&tids[i], NULL, t1_func, NULL);
+ assert(r == 0);
+ }
+ sleep(10);
+ killed = 1;
+ for (int i = 0; i < nthreads; i++) {
+ void *ret;
+ r = pthread_join(tids[i], &ret);
+ assert(r == 0);
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test-rwlock.cc b/storage/tokudb/PerconaFT/util/tests/test-rwlock.cc
new file mode 100644
index 00000000..56dd3f6b
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test-rwlock.cc
@@ -0,0 +1,403 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Here are some timing numbers:
+// (Note: The not-quite-working version with cas can be found in r22519 of https://svn.tokutek.com/tokudb/toku/tokudb.2825/) It's about as fast as "Best cas".)
+//
+// On ramie (2.53GHz E5540)
+// Best nop time= 1.074300ns
+// Best cas time= 8.595600ns
+// Best mutex time= 19.340201ns
+// Best rwlock time= 34.024799ns
+// Best util rwlock time= 38.680500ns
+// Best prelocked time= 2.148700ns
+// Best fair rwlock time= 45.127600ns
+// On laptop
+// Best nop time= 2.876000ns
+// Best cas time= 15.362500ns
+// Best mutex time= 51.951498ns
+// Best rwlock time= 97.721201ns
+// Best util rwlock time=110.456800ns
+// Best prelocked time= 4.240100ns
+// Best fair rwlock time=113.119102ns
+//
+// Analysis: If the mutex can be prelocked (as cachetable does, it uses the same mutex for the cachetable and for the condition variable protecting the cache table)
+// then you can save quite a bit. What does the cachetable do?
+// During pin: (In the common case:) It grabs the mutex, grabs a read lock, and releases the mutex.
+// During unpin: It grabs the mutex, unlocks the rwlock lock in the pair, and releases the mutex.
+// Both actions must acquire a cachetable lock during that time, so definitely saves time to do it that way.
+
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <portability/toku_atomic.h>
+#include <portability/toku_pthread.h>
+#include <portability/toku_time.h>
+#include <util/frwlock.h>
+#include <util/rwlock.h>
+#include "rwlock_condvar.h"
+
+static int verbose=1;
+static int timing_only=0;
+
+static void parse_args (int argc, const char *argv[]) {
+ const char *progname = argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) {
+ verbose++;
+ } else if (strcmp(argv[0], "-q")==0) {
+ verbose--;
+ } else if (strcmp(argv[0], "--timing-only")==0) {
+ timing_only=1;
+ } else {
+ fprintf(stderr, "Usage: %s {-q}* {-v}* {--timing-only}\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+static const int T=6;
+static const int N=10000000;
+
+static double best_nop_time=1e12;
+static double best_fcall_time=1e12;
+static double best_cas_time=1e12;
+static double best_mutex_time=1e12;
+static double best_rwlock_time=1e12;
+static double best_util_time=1e12;
+static double best_prelocked_time=1e12;
+static double best_frwlock_time=1e12;
+static double best_frwlock_prelocked_time=1e12;
+static double mind(double a, double b) { if (a<b) return a; else return b; }
+
+#if 0
+// gcc 4.4.4 (fedora 12) doesn't introduce memory barriers on these writes, so I think that volatile is not enough for sequential consistency.
+// Intel guarantees that writes are seen in the same order as they were performed on one processor. But if there were two processors, funny things could happen.
+volatile int sc_a, sc_b;
+void sequential_consistency (void) {
+ sc_a = 1;
+ sc_b = 0;
+}
+#endif
+
+// Declaring val to be volatile produces essentially identical code as putting the asm volatile memory statements in.
+// gcc is not introducing memory barriers to force sequential consistency on volatile memory writes.
+// That's probably good enough for us, since we'll have a barrier instruction anywhere it matters.
+volatile int val = 0;
+
+static void time_nop (void) __attribute((__noinline__)); // don't want it inline, because it messes up timing.
+static void time_nop (void) {
+ struct timeval start,end;
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ if (val!=0) abort();
+ val=1;
+ //__asm__ volatile ("" : : : "memory");
+ val=0;
+ //__asm__ volatile ("" : : : "memory");
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "nop = %.6fns/(lock+unlock)\n", diff);
+ best_nop_time=mind(best_nop_time,diff);
+ }
+}
+
+// This function is defined so we can measure the cost of a function call.
+int fcall_nop (int i) __attribute__((__noinline__));
+int fcall_nop (int i) {
+ return i;
+}
+
+void time_fcall (void) __attribute((__noinline__));
+void time_fcall (void) {
+ struct timeval start,end;
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ fcall_nop(i);
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "fcall = %.6fns/(lock+unlock)\n", diff);
+ best_fcall_time=mind(best_fcall_time,diff);
+ }
+}
+
+void time_cas (void) __attribute__((__noinline__));
+void time_cas (void) {
+ volatile int64_t tval = 0;
+ struct timeval start,end;
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ { int r = toku_sync_val_compare_and_swap(&tval, 0, 1); assert(r==0); }
+ { int r = toku_sync_val_compare_and_swap(&tval, 1, 0); assert(r==1); }
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "cas = %.6fns/(lock+unlock)\n", diff);
+ best_cas_time=mind(best_cas_time,diff);
+ }
+}
+
+
+void time_pthread_mutex (void) __attribute__((__noinline__));
+void time_pthread_mutex (void) {
+ pthread_mutex_t mutex;
+ { int r = pthread_mutex_init(&mutex, NULL); assert(r==0); }
+ struct timeval start,end;
+ pthread_mutex_lock(&mutex);
+ pthread_mutex_unlock(&mutex);
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ pthread_mutex_lock(&mutex);
+ pthread_mutex_unlock(&mutex);
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "pthread_mutex = %.6fns/(lock+unlock)\n", diff);
+ best_mutex_time=mind(best_mutex_time,diff);
+ }
+ { int r = pthread_mutex_destroy(&mutex); assert(r==0); }
+}
+
+void time_pthread_rwlock (void) __attribute__((__noinline__));
+void time_pthread_rwlock (void) {
+ pthread_rwlock_t mutex;
+ { int r = pthread_rwlock_init(&mutex, NULL); assert(r==0); }
+ struct timeval start,end;
+ pthread_rwlock_rdlock(&mutex);
+ pthread_rwlock_unlock(&mutex);
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ pthread_rwlock_rdlock(&mutex);
+ pthread_rwlock_unlock(&mutex);
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "pthread_rwlock(r) = %.6fns/(lock+unlock)\n", diff);
+ best_rwlock_time=mind(best_rwlock_time,diff);
+ }
+ { int r = pthread_rwlock_destroy(&mutex); assert(r==0); }
+}
+
+static void util_rwlock_lock (RWLOCK rwlock, toku_mutex_t *mutex) {
+ toku_mutex_lock(mutex);
+ rwlock_read_lock(rwlock, mutex);
+ toku_mutex_unlock(mutex);
+}
+
+static void util_rwlock_unlock (RWLOCK rwlock, toku_mutex_t *mutex) {
+ toku_mutex_lock(mutex);
+ rwlock_read_unlock(rwlock);
+ toku_mutex_unlock(mutex);
+}
+
+// Time the read lock that's in util/rwlock.h
+void time_util_rwlock(void) __attribute((__noinline__));
+void time_util_rwlock(void) {
+ struct st_rwlock rwlock;
+ toku_mutex_t external_mutex;
+ toku_mutex_init(toku_uninstrumented, &external_mutex, nullptr);
+ rwlock_init(toku_uninstrumented, &rwlock);
+ struct timeval start, end;
+
+ util_rwlock_lock(&rwlock, &external_mutex);
+ util_rwlock_unlock(&rwlock, &external_mutex);
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ util_rwlock_lock(&rwlock, &external_mutex);
+ util_rwlock_unlock(&rwlock, &external_mutex);
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "util_rwlock(r) = %.6fns/(lock+unlock)\n", diff);
+ best_util_time=mind(best_util_time,diff);
+ }
+ rwlock_destroy(&rwlock);
+ toku_mutex_destroy(&external_mutex);
+}
+
+// Time the read lock that's in util/rwlock.h, assuming the mutex is already
+// held.
+void time_util_prelocked_rwlock(void) __attribute__((__noinline__));
+void time_util_prelocked_rwlock(void) {
+ struct st_rwlock rwlock;
+ toku_mutex_t external_mutex;
+ toku_mutex_init(toku_uninstrumented, &external_mutex, nullptr);
+ toku_mutex_lock(&external_mutex);
+ rwlock_init(toku_uninstrumented, &rwlock);
+ struct timeval start, end;
+
+ rwlock_read_lock(&rwlock, &external_mutex);
+ rwlock_read_unlock(&rwlock);
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ rwlock_read_lock(&rwlock, &external_mutex);
+ rwlock_read_unlock(&rwlock);
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "pre_util_rwlock(r) = %.6fns/(lock+unlock)\n", diff);
+ best_prelocked_time=mind(best_prelocked_time,diff);
+ }
+ rwlock_destroy(&rwlock);
+ toku_mutex_unlock(&external_mutex);
+ toku_mutex_destroy(&external_mutex);
+}
+
+void time_frwlock_prelocked(void) __attribute__((__noinline__));
+void time_frwlock_prelocked(void) {
+ toku_mutex_t external_mutex;
+ toku_mutex_init(toku_uninstrumented, &external_mutex, nullptr);
+ struct timeval start, end;
+ toku::frwlock x;
+ x.init(&external_mutex);
+ toku_mutex_lock(&external_mutex);
+ bool got_lock;
+ x.read_lock();
+ x.read_unlock();
+
+ got_lock = x.try_read_lock();
+ invariant(got_lock);
+ x.read_unlock();
+ x.write_lock(true);
+ x.write_unlock();
+ got_lock = x.try_write_lock(true);
+ invariant(got_lock);
+ x.write_unlock();
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ x.read_lock();
+ x.read_unlock();
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "frwlock_prelocked = %.6fns/(lock+unlock)\n", diff);
+ best_frwlock_prelocked_time=mind(best_frwlock_prelocked_time,diff);
+ }
+ x.deinit();
+ toku_mutex_unlock(&external_mutex);
+ toku_mutex_destroy(&external_mutex);
+}
+
+void time_frwlock(void) __attribute__((__noinline__));
+void time_frwlock(void) {
+ toku_mutex_t external_mutex;
+ toku_mutex_init(toku_uninstrumented, &external_mutex, nullptr);
+ struct timeval start, end;
+ toku::frwlock x;
+ x.init(&external_mutex);
+ toku_mutex_lock(&external_mutex);
+ x.read_lock();
+ x.read_unlock();
+ toku_mutex_unlock(&external_mutex);
+ for (int t=0; t<T; t++) {
+ gettimeofday(&start, NULL);
+ for (int i=0; i<N; i++) {
+ toku_mutex_lock(&external_mutex);
+ x.read_lock();
+ toku_mutex_unlock(&external_mutex);
+
+ toku_mutex_lock(&external_mutex);
+ x.read_unlock();
+ toku_mutex_unlock(&external_mutex);
+ }
+ gettimeofday(&end, NULL);
+ double diff = 1e9*toku_tdiff(&end, &start)/N;
+ if (verbose>1)
+ fprintf(stderr, "frwlock = %.6fns/(lock+unlock)\n", diff);
+ best_frwlock_time=mind(best_frwlock_time,diff);
+ }
+ x.deinit();
+ toku_mutex_destroy(&external_mutex);
+}
+
+int main (int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ if (timing_only) {
+ if (1) { // to make it easy to only time the templated frwlock
+ time_nop();
+ time_fcall();
+ time_cas();
+ time_pthread_mutex();
+ time_pthread_rwlock();
+ time_util_rwlock();
+ time_util_prelocked_rwlock();
+ }
+ time_frwlock();
+ time_frwlock_prelocked();
+ if (verbose>0) {
+ if (1) { // to make it easy to only time the templated frwlock
+ printf("// Best nop time=%10.6fns\n", best_nop_time);
+ printf("// Best fcall time=%10.6fns\n", best_fcall_time);
+ printf("// Best cas time=%10.6fns\n", best_cas_time);
+ printf("// Best mutex time=%10.6fns\n", best_mutex_time);
+ printf("// Best rwlock time=%10.6fns\n", best_rwlock_time);
+ printf("// Best util rwlock time=%10.6fns\n", best_util_time);
+ printf("// Best prelocked time=%10.6fns\n", best_prelocked_time);
+ }
+ printf("// Best frwlock time=%10.6fns\n", best_frwlock_time);
+ printf("// Best frwlock_pre time=%10.6fns\n", best_frwlock_prelocked_time);
+ }
+ }
+ return 0;
+}
+
diff --git a/storage/tokudb/PerconaFT/util/tests/test.h b/storage/tokudb/PerconaFT/util/tests/test.h
new file mode 100644
index 00000000..fdd2d3f6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test.h
@@ -0,0 +1,84 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <stdlib.h>
+#include <toku_portability.h>
+#include <toku_assert.h>
+#include <util/partitioned_counter.h>
+#include <string.h>
+
+#define CKERR(r) ({ int __r = r; if (__r!=0) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, __r, strerror(r)); assert(__r==0); })
+#define CKERR2(r,r2) do { if (r!=r2) fprintf(stderr, "%s:%d error %d %s, expected %d\n", __FILE__, __LINE__, r, strerror(r), r2); assert(r==r2); } while (0)
+#define CKERR2s(r,r2,r3) do { if (r!=r2 && r!=r3) fprintf(stderr, "%s:%d error %d %s, expected %d or %d\n", __FILE__, __LINE__, r, strerror(r), r2,r3); assert(r==r2||r==r3); } while (0)
+
+#define DEBUG_LINE do { \
+ fprintf(stderr, "%s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ fflush(stderr); \
+} while (0)
+
+static int verbose;
+
+static inline void
+default_parse_args (int argc, const char *argv[]) {
+ const char *progname=argv[0];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0],"-v")==0) {
+ ++verbose;
+ } else if (strcmp(argv[0],"-q")==0) {
+ verbose=0;
+ } else {
+ fprintf(stderr, "Usage:\n %s [-v] [-q]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+int test_main(int argc, const char *argv[]);
+
+int
+main(int argc, const char *argv[]) {
+ int ri = toku_portability_init();
+ assert(ri==0);
+ partitioned_counters_init();
+ int r = test_main(argc, argv);
+ partitioned_counters_destroy();
+ toku_portability_destroy();
+ return r;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test_doubly_linked_list.cc b/storage/tokudb/PerconaFT/util/tests/test_doubly_linked_list.cc
new file mode 100644
index 00000000..cac17f1a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test_doubly_linked_list.cc
@@ -0,0 +1,184 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <stdlib.h>
+#include <util/doubly_linked_list.h>
+
+using namespace toku;
+
+static void check_is_empty (DoublyLinkedList<int> *l) {
+ LinkedListElement<int> *re;
+ bool r = l->pop(&re);
+ assert(!r);
+}
+
+static void test_doubly_linked_list (void) {
+ DoublyLinkedList<int> l;
+ l.init();
+ LinkedListElement<int> e0, e1;
+
+ l.insert(&e0, 3);
+ {
+ LinkedListElement<int> *re;
+ bool r = l.pop(&re);
+ assert(r);
+ assert(re==&e0);
+ assert(re->get_container()==3);
+ }
+ check_is_empty(&l);
+
+ l.insert(&e0, 0);
+ l.insert(&e1, 1);
+ {
+ bool in[2]={true,true};
+ for (int i=0; i<2; i++) {
+ LinkedListElement<int> *re;
+ bool r = l.pop(&re);
+ assert(r);
+ int v = re->get_container();
+ assert(v==0 || v==1);
+ assert(in[v]);
+ in[v]=false;
+ }
+ }
+ check_is_empty(&l);
+}
+
+const int N=100;
+bool in[N];
+DoublyLinkedList<int> l;
+LinkedListElement<int> elts[N];
+
+static void maybe_insert_random(void) {
+ int x = random()%N;
+ if (!in[x]) {
+ if (verbose) printf("I%d ", x);
+ l.insert(&elts[x], x);
+ in[x]=true;
+ }
+}
+
+static bool checked[N];
+static int check_count;
+static int check_is_in(int v, int deadbeef) {
+ assert(deadbeef=0xdeadbeef);
+ assert(0<=v && v<N);
+ assert(!checked[v]);
+ assert(in[v]);
+ checked[v]=true;
+ check_count++;
+ return 0;
+}
+static int quit_count=0;
+static int quit_early(int v __attribute__((__unused__)), int beefbeef) {
+ assert(beefbeef=0xdeadbeef);
+ quit_count++;
+ if (quit_count==check_count) return check_count;
+ else return 0;
+}
+
+static void check_equal(void) {
+ check_count=0;
+ for (int i=0; i<N; i++) checked[i]=false;
+ {
+ int r = l.iterate<int>(check_is_in, 0xdeadbeef);
+ assert(r==0);
+ }
+ for (int i=0; i<N; i++) assert(checked[i]==in[i]);
+
+ if (check_count>0) {
+ check_count=1+random()%check_count; // quit after 1 or more iterations
+ quit_count=0;
+ int r = l.iterate<int>(quit_early, 0xbeefbeef);
+ assert(r==check_count);
+ }
+}
+
+static void test_doubly_linked_list_randomly(void) {
+ l.init();
+ for (int i=0; i<N; i++) in[i]=false;
+
+ for (int i=0; i<N/2; i++) maybe_insert_random();
+ if (verbose) printf("\n");
+
+ for (int i=0; i<N*N; i++) {
+ int x = random()%N;
+ if (in[x]) {
+ if (random()%2==0) {
+ if (verbose) printf("%dR%d ", i, x);
+ l.remove(&elts[x]);
+ in[x]=false;
+ } else {
+ LinkedListElement<int> *re;
+ bool r = l.pop(&re);
+ assert(r);
+ int v = re->get_container();
+ assert(in[v]);
+ in[v]=false;
+ if (verbose) printf("%dP%d ", i, v);
+ }
+ } else {
+ l.insert(&elts[x], x);
+ in[x]=true;
+ if (verbose) printf("%dI%d ", i, x);
+ }
+
+ check_equal();
+ }
+ if (verbose) printf("\n");
+
+ LinkedListElement<int> *re;
+ while (l.pop(&re)) {
+ int v = re->get_container();
+ assert(in[v]);
+ in[v]=false;
+ if (verbose) printf("P%d ", v);
+ }
+ for (int i=0; i<N; i++) assert(!in[i]);
+ if (verbose) printf("\n");
+}
+
+int test_main (int argc, const char *argv[]) {
+ default_parse_args(argc, argv);
+ test_doubly_linked_list();
+ for (int i=0; i<4; i++) {
+ test_doubly_linked_list_randomly();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test_partitioned_counter.cc b/storage/tokudb/PerconaFT/util/tests/test_partitioned_counter.cc
new file mode 100644
index 00000000..a4e6f842
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test_partitioned_counter.cc
@@ -0,0 +1,416 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+/* This code can either test the PARTITIONED_COUNTER abstraction or it can time various implementations. */
+
+/* Try to make counter that requires no cache misses to increment, and to get the value can be slow.
+ * I don't care much about races between the readers and writers on the counter.
+ *
+ * The problem: We observed that incrementing a counter with multiple threads is quite expensive.
+ * Here are some performance numbers:
+ * Machines: mork or mindy (Intel Xeon L5520 2.27GHz)
+ * bradley's 4-core laptop laptop (Intel Core i7-2640M 2.80GHz) sandy bridge
+ * alf 16-core server (xeon E5-2665 2.4GHz) sandybridge
+ *
+ * mork mindy bradley alf
+ * 1.22ns 1.07ns 1.27ns 0.61ns to do a ++, but it's got a race in it.
+ * 27.11ns 20.47ns 18.75ns 34.15ns to do a sync_fetch_and_add().
+ * 0.26ns 0.29ns 0.71ns 0.19ns to do with a single version of a counter
+ * 0.35ns 0.33ns 0.69ns 0.18ns pure thread-local variable (no way to add things up)
+ * 0.76ns 1.50ns 0.35ns partitioned_counter.c (using link-time optimization, otherwise the function all overwhelms everything)
+ * 2.21ns 3.32ns 0.70ns partitioned_counter.c (using gcc, the C version at r46097, not C++) This one is a little slower because it has an extra branch in it.
+ *
+ * Surprisingly, compiling this code without -fPIC doesn't make it any faster (even the pure thread-local variable is the same). -fPIC access to
+ * thread-local variables look slower since they have a function all, but they don't seem to be any slower in practice. In fact, even the puretl-ptr test
+ * which simply increments a thread-local pointer is basically the same speed as accessing thread_local variable.
+ *
+ * How it works. Each thread has a thread-local counter structure with an integer in it. To increment, we increment the thread-local structure.
+ * The other operation is to query the counters to get the sum of all the thread-local variables.
+ * The first time a pthread increments the variable we add the variable to a linked list.
+ * When a pthread ends, we use the pthread_key destructor to remove the variable from the linked list. We also have to remember the sum of everything.
+ * that has been removed from the list.
+ * To get the sum we add the sum of the destructed items, plus everything in the list.
+ *
+ */
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <toku_race_tools.h>
+#include <toku_assert.h>
+#include <portability/toku_atomic.h>
+#include <memory.h>
+#include <util/partitioned_counter.h>
+#include "test.h"
+
+// The test code includes the fastest version I could figure out to make, implemented below.
+
+struct counter_s {
+ bool inited;
+ volatile int counter;
+ struct counter_s *prev, *next;
+ int myid;
+};
+static __thread struct counter_s counter = {false,0, NULL,NULL,0};
+
+static int finished_counter=0; // counter for all threads that are done.
+
+// We use a single mutex for anything complex. We'd like to use a mutex per partitioned counter, but we must cope with the possibility of a race between
+// a terminating pthread (which calls destroy_counter()), and a call to the counter destructor. So we use a global mutex.
+static pthread_mutex_t pc_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct counter_s *head=NULL;
+static pthread_key_t counter_key;
+
+static void pc_lock (void)
+// Effect: Lock the pc mutex.
+{
+ int r = pthread_mutex_lock(&pc_mutex);
+ assert(r==0);
+}
+
+static void pc_unlock (void)
+// Effect: Unlock the pc mutex.
+{
+ int r = pthread_mutex_unlock(&pc_mutex);
+ assert(r==0);
+}
+
+static void destroy_counter (void *counterp)
+// Effect: This is the function passed to pthread_key_create that is to run whenever a thread terminates.
+// The thread-local part of the counter must be copied into the shared state, and the thread-local part of the counter must be
+// removed from the linked list of all thread-local parts.
+{
+ assert((struct counter_s*)counterp==&counter);
+ pc_lock();
+ if (counter.prev==NULL) {
+ assert(head==&counter);
+ head = counter.next;
+ } else {
+ counter.prev->next = counter.next;
+ }
+ if (counter.next!=NULL) {
+ counter.next->prev = counter.prev;
+ }
+ finished_counter += counter.counter;
+ TOKU_VALGRIND_HG_ENABLE_CHECKING(&counter.counter, sizeof(counter.counter)); // stop ignoring races
+ //printf("finished counter now %d\n", finished_counter);
+ pc_unlock();
+}
+
+static int idcounter=0;
+
+static inline void increment (void) {
+ if (!counter.inited) {
+ pc_lock();
+ struct counter_s *cp = &counter;
+ { int r = pthread_setspecific(counter_key, cp); assert(r==0); }
+ cp->prev = NULL;
+ cp->next = head;
+ if (head!=NULL) {
+ head->prev = cp;
+ }
+ head = cp;
+ cp->counter = 0;
+ cp->inited = true;
+ cp->myid = idcounter++;
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&counter.counter, sizeof(counter.counter)); // the counter increment is kind of racy.
+ pc_unlock();
+ }
+ counter.counter++;
+}
+
+static int getvals (void) {
+ pc_lock();
+ int sum=finished_counter;
+ for (struct counter_s *p=head; p; p=p->next) {
+ sum+=p->counter;
+ }
+ pc_unlock();
+ return sum;
+}
+
+/**********************************************************************************/
+/* And now for some actual test code. */
+/**********************************************************************************/
+
+static const int N=10000000;
+static const int T=20;
+
+
+PARTITIONED_COUNTER pc;
+static void *pc_doit (void *v) {
+ for (int i=0; i<N; i++) {
+ increment_partitioned_counter(pc, 1);
+ }
+ //printf("val=%ld\n", read_partitioned_counter(pc));
+ return v;
+}
+
+static void* new_doit (void* v) {
+ for (int i=0; i<N; i++) {
+ increment();
+ //if (i%0x2000 == 0) sched_yield();
+ }
+ if (0) printf("done id=%d, getvals=%d\n", counter.myid, getvals());
+ return v;
+}
+
+static int oldcounter=0;
+
+static void* old_doit (void* v) {
+ for (int i=0; i<N; i++) {
+ (void)toku_sync_fetch_and_add(&oldcounter, 1);
+ //if (i%0x1000 == 0) sched_yield();
+ }
+ return v;
+}
+
+static volatile int oldcounter_nonatomic=0;
+
+static void* old_doit_nonatomic (void* v) {
+ for (int i=0; i<N; i++) {
+ oldcounter_nonatomic++;
+ //if (i%0x1000 == 0) sched_yield();
+ }
+ return v;
+}
+
+static __thread volatile int thread_local_counter=0;
+static void* tl_doit (void *v) {
+ for (int i=0; i<N; i++) {
+ thread_local_counter++;
+ }
+ return v;
+}
+
+static float tdiff (struct timeval *start, struct timeval *end) {
+ return (end->tv_sec-start->tv_sec) +1e-6*(end->tv_usec - start->tv_usec);
+}
+
+static void pt_create (pthread_t *thread, void *(*f)(void*), void *extra) {
+ int r = pthread_create(thread, NULL, f, extra);
+ assert(r==0);
+}
+
+static void pt_join (pthread_t thread, void *expect_extra) {
+ void *result;
+ int r = pthread_join(thread, &result);
+ assert(r==0);
+ assert(result==expect_extra);
+}
+
+static void timeit (const char *description, void* (*f)(void*)) {
+ struct timeval start, end;
+ pthread_t threads[T];
+ gettimeofday(&start, 0);
+ for (int i=0; i<T; i++) {
+ pt_create(&threads[i], f, NULL);
+ }
+ for (int i=0; i<T; i++) {
+ pt_join(threads[i], NULL);
+ }
+ gettimeofday(&end, 0);
+ printf("%-10s Time=%.6fs (%7.3fns per increment)\n", description, tdiff(&start, &end), (1e9*tdiff(&start, &end)/T)/N);
+}
+
+// Do a measurement where it really is only a pointer dereference to increment the variable, which is thread local.
+static void* tl_doit_ptr (void *v) {
+ volatile uint64_t *p = (uint64_t *)v;
+ for (int i=0; i<N; i++) {
+ (*p)++;
+ }
+ return v;
+}
+
+
+static void timeit_with_thread_local_pointer (const char *description, void* (*f)(void*)) {
+ struct timeval start, end;
+ pthread_t threads[T];
+ struct { uint64_t values[8] __attribute__((__aligned__(64))); } values[T]; // pad to different cache lines.
+ gettimeofday(&start, 0);
+ for (int i=0; i<T; i++) {
+ values[i].values[0]=0;
+ pt_create(&threads[i], f, &values[i].values[0]);
+ }
+ for (int i=0; i<T; i++) {
+ pt_join(threads[i], &values[i].values[0]);
+ }
+ gettimeofday(&end, 0);
+ printf("%-10s Time=%.6fs (%7.3fns per increment)\n", description, tdiff(&start, &end), (1e9*tdiff(&start, &end)/T)/N);
+}
+
+static int verboseness_cmdarg=0;
+static bool time_cmdarg=false;
+
+static void parse_args (int argc, const char *argv[]) {
+ const char *progname = argv[1];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) verboseness_cmdarg++;
+ else if (strcmp(argv[0], "--time")==0) time_cmdarg=true;
+ else {
+ printf("Usage: %s [-v] [--time]\n Default is to run tests. --time produces timing output.\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+static void do_timeit (void) {
+ { int r = pthread_key_create(&counter_key, destroy_counter); assert(r==0); }
+ printf("%d threads\n%d increments per thread\n", T, N);
+ timeit("++", old_doit_nonatomic);
+ timeit("atomic++", old_doit);
+ timeit("fast", new_doit);
+ timeit("puretl", tl_doit);
+ timeit_with_thread_local_pointer("puretl-ptr", tl_doit_ptr);
+ pc = create_partitioned_counter();
+ timeit("pc", pc_doit);
+ destroy_partitioned_counter(pc);
+}
+
+struct test_arguments {
+ PARTITIONED_COUNTER pc;
+ uint64_t limit;
+ uint64_t total_increment_per_writer;
+ volatile uint64_t unfinished_count;
+};
+
+static void *reader_test_fun (void *ta_v) {
+ struct test_arguments *ta = (struct test_arguments *)ta_v;
+ uint64_t lastval = 0;
+ while (ta->unfinished_count>0) {
+ uint64_t thisval = read_partitioned_counter(ta->pc);
+ assert(lastval <= thisval);
+ assert(thisval <= ta->limit+2);
+ lastval = thisval;
+ if (verboseness_cmdarg && (0==(thisval & (thisval-1)))) printf("ufc=%" PRIu64 " Thisval=%" PRIu64 "\n", ta->unfinished_count,thisval);
+ }
+ uint64_t thisval = read_partitioned_counter(ta->pc);
+ assert(thisval==ta->limit+2); // we incremented two extra times in the test
+ return ta_v;
+}
+
+static void *writer_test_fun (void *ta_v) {
+ struct test_arguments *ta = (struct test_arguments *)ta_v;
+ for (uint64_t i=0; i<ta->total_increment_per_writer; i++) {
+ if (i%1000 == 0) sched_yield();
+ increment_partitioned_counter(ta->pc, 1);
+ }
+ uint64_t c __attribute__((__unused__)) = toku_sync_fetch_and_sub(&ta->unfinished_count, 1);
+ return ta_v;
+}
+
+
+static void do_testit (void) {
+ const int NGROUPS = 2;
+ uint64_t limits[NGROUPS];
+ limits [0] = 2000000;
+ limits [1] = 1000000;
+ uint64_t n_writers[NGROUPS];
+ n_writers[0] = 20;
+ n_writers[1] = 40;
+ struct test_arguments tas[NGROUPS];
+ pthread_t reader_threads[NGROUPS];
+ pthread_t *writer_threads[NGROUPS];
+ for (int i=0; i<NGROUPS; i++) {
+ tas[i].pc = create_partitioned_counter();
+ tas[i].limit = limits[i];
+ tas[i].unfinished_count = n_writers[i];
+ tas[i].total_increment_per_writer = limits[i]/n_writers[i];
+ assert(tas[i].total_increment_per_writer * n_writers[i] == limits[i]);
+ pt_create(&reader_threads[i], reader_test_fun, &tas[i]);
+ increment_partitioned_counter(tas[i].pc, 1); // make sure that the long-lived thread also increments the partitioned counter, to test for #5321.
+ MALLOC_N(n_writers[i], writer_threads[i]);
+ for (uint64_t j=0; j<n_writers[i] ; j++) {
+ pt_create(&writer_threads[i][j], writer_test_fun, &tas[i]);
+ }
+ increment_partitioned_counter(tas[i].pc, 1); // make sure that the long-lived thread also increments the partitioned counter, to test for #5321.
+ }
+ for (int i=0; i<NGROUPS; i++) {
+ pt_join(reader_threads[i], &tas[i]);
+ for (uint64_t j=0; j<n_writers[i] ; j++) {
+ pt_join(writer_threads[i][j], &tas[i]);
+ }
+ toku_free(writer_threads[i]);
+ destroy_partitioned_counter(tas[i].pc);
+ }
+}
+
+volatile int spinwait=0;
+static void* test2_fun (void* mypc_v) {
+ PARTITIONED_COUNTER mypc = (PARTITIONED_COUNTER)mypc_v;
+ increment_partitioned_counter(mypc, 3);
+ spinwait=1;
+ while (spinwait==1);
+ // mypc no longer points at a valid data structure.
+ return NULL;
+}
+
+static void do_testit2 (void)
+// This test checks to see what happens if a thread is still live when we destruct a counter.
+// A thread increments the counter, then lets us know through a spin wait, then waits until we destroy the counter.
+{
+ pthread_t t;
+ TOKU_VALGRIND_HG_DISABLE_CHECKING(&spinwait, sizeof(spinwait)); // this is a racy volatile variable.
+ {
+ PARTITIONED_COUNTER mypc = create_partitioned_counter();
+ increment_partitioned_counter(mypc, 1); // make sure that the long-lived thread also increments the partitioned counter, to test for #5321.
+ pt_create(&t, test2_fun, mypc);
+ while(spinwait==0); // wait until he incremented the counter.
+ increment_partitioned_counter(mypc, -1);
+ assert(read_partitioned_counter(mypc)==3);
+ destroy_partitioned_counter(mypc);
+ } // leave scope, so the counter goes away.
+ spinwait=2; // tell the other guy to finish up.
+ pt_join(t, NULL);
+}
+
+int test_main (int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ if (time_cmdarg) {
+ do_timeit();
+ } else {
+ do_testit();
+ do_testit2();
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/test_partitioned_counter_5833.cc b/storage/tokudb/PerconaFT/util/tests/test_partitioned_counter_5833.cc
new file mode 100644
index 00000000..52060e6a
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/test_partitioned_counter_5833.cc
@@ -0,0 +1,102 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// Demonstrate a race if #5833 isn't fixed.
+
+#include <pthread.h>
+#include <toku_portability.h>
+#include <util/partitioned_counter.h>
+#include "test.h"
+
+
+static void pt_create (pthread_t *thread, void *(*f)(void*), void *extra) {
+ int r = pthread_create(thread, NULL, f, extra);
+ assert(r==0);
+}
+
+static void pt_join (pthread_t thread, void *expect_extra) {
+ void *result;
+ int r = pthread_join(thread, &result);
+ assert(r==0);
+ assert(result==expect_extra);
+}
+
+static int verboseness_cmdarg=0;
+
+static void parse_args (int argc, const char *argv[]) {
+ const char *progname = argv[1];
+ argc--; argv++;
+ while (argc>0) {
+ if (strcmp(argv[0], "-v")==0) verboseness_cmdarg++;
+ else {
+ printf("Usage: %s [-v]\n", progname);
+ exit(1);
+ }
+ argc--; argv++;
+ }
+}
+
+#define NCOUNTERS 2
+PARTITIONED_COUNTER array_of_counters[NCOUNTERS];
+
+static void *counter_init_fun(void *tnum_pv) {
+ int *tnum_p = (int*)tnum_pv;
+ int tnum = *tnum_p;
+ assert(0<=tnum && tnum<NCOUNTERS);
+ array_of_counters[tnum] = create_partitioned_counter();
+ return tnum_pv;
+}
+
+static void do_test_5833(void) {
+ pthread_t threads[NCOUNTERS];
+ int tids[NCOUNTERS];
+ for (int i=0; i<NCOUNTERS; i++) {
+ tids[i] = i;
+ pt_create(&threads[i], counter_init_fun, &tids[i]);
+ }
+ for (int i=0; i<NCOUNTERS; i++) {
+ pt_join(threads[i], &tids[i]);
+ destroy_partitioned_counter(array_of_counters[i]);
+ }
+}
+
+int test_main(int argc, const char *argv[]) {
+ parse_args(argc, argv);
+ do_test_5833();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/threadpool-nproc-limit.cc b/storage/tokudb/PerconaFT/util/tests/threadpool-nproc-limit.cc
new file mode 100644
index 00000000..d645a3a6
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/threadpool-nproc-limit.cc
@@ -0,0 +1,119 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+// this test verifies that the toku thread pool is resilient when hitting the nproc limit.
+
+#include <util/threadpool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/resource.h>
+
+int verbose = 0;
+
+static int usage(void) {
+ fprintf(stderr, "[-q] [-v] [--verbose] (%d)\n", verbose);
+ return 1;
+}
+
+static void *f(void *arg) {
+ return arg;
+}
+
+static int dotest(int the_limit) {
+ if (verbose)
+ fprintf(stderr, "%s:%u %d\n", __FILE__, __LINE__, the_limit);
+ int r;
+ struct toku_thread_pool *pool = nullptr;
+ r = toku_thread_pool_create(&pool, 10);
+ assert(r == 0 && pool != nullptr);
+
+ struct rlimit current_nproc_limit;
+ r = getrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ struct rlimit new_nproc_limit = current_nproc_limit;
+ new_nproc_limit.rlim_cur = the_limit;
+ r = setrlimit(RLIMIT_NPROC, &new_nproc_limit);
+ assert(r == 0);
+
+ int want_n = 20;
+ int got_n = want_n;
+ r = toku_thread_pool_run(pool, 0, &got_n, f, nullptr);
+ if (r == 0)
+ assert(want_n == got_n);
+ else {
+ assert(r == EWOULDBLOCK);
+ assert(got_n <= want_n);
+ }
+
+ r = setrlimit(RLIMIT_NPROC, &current_nproc_limit);
+ assert(r == 0);
+
+ if (verbose)
+ toku_thread_pool_print(pool, stderr);
+ toku_thread_pool_destroy(&pool);
+ return got_n > 0;
+}
+
+int main(int argc, char *argv[]) {
+ // parse args
+ for (int i = 1; i < argc; i++) {
+ char *arg = argv[i];
+ if (arg[0] != '-')
+ break;
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose = verbose+1;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = verbose > 0 ? verbose-1 : 0;
+ continue;
+ }
+ return usage();
+ }
+ // set increasing nproc limits until the test succeeds in hitting the limit after > 0 threads are created
+ for (int i = 0; 1; i++) {
+ if (dotest(i))
+ break;
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/threadpool-test.cc b/storage/tokudb/PerconaFT/util/tests/threadpool-test.cc
new file mode 100644
index 00000000..83c142ed
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/threadpool-test.cc
@@ -0,0 +1,170 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <util/threadpool.h>
+
+#include <memory.h>
+#include <toku_os.h>
+#include <toku_portability.h>
+#include <portability/toku_pthread.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string.h>
+#include <errno.h>
+#if defined(HAVE_MALLOC_H)
+# include <malloc.h>
+#elif defined(HAVE_SYS_MALLOC_H)
+# include <sys/malloc.h>
+#endif
+
+struct my_threadpool {
+ THREADPOOL threadpool;
+ toku_mutex_t mutex;
+ toku_cond_t wait;
+ int closed;
+ int counter;
+};
+
+static void
+my_threadpool_init (struct my_threadpool *my_threadpool, int max_threads) {
+ int r;
+ r = toku_thread_pool_create(&my_threadpool->threadpool, max_threads);
+ assert(r == 0);
+ assert(my_threadpool != 0);
+ toku_mutex_init(toku_uninstrumented, &my_threadpool->mutex, nullptr);
+ toku_cond_init(toku_uninstrumented, &my_threadpool->wait, nullptr);
+ my_threadpool->closed = 0;
+ my_threadpool->counter = 0;
+}
+
+static void
+my_threadpool_destroy (struct my_threadpool *my_threadpool, int max_threads) {
+ toku_mutex_lock(&my_threadpool->mutex);
+ my_threadpool->closed = 1;
+ toku_cond_broadcast(&my_threadpool->wait);
+ toku_mutex_unlock(&my_threadpool->mutex);
+
+ if (verbose) printf("current %d\n", toku_thread_pool_get_current_threads(my_threadpool->threadpool));
+ toku_thread_pool_destroy(&my_threadpool->threadpool); assert(my_threadpool->threadpool == 0);
+ assert(my_threadpool->counter == max_threads);
+ toku_mutex_destroy(&my_threadpool->mutex);
+ toku_cond_destroy(&my_threadpool->wait);
+}
+
+static void *
+my_thread_f (void *arg) {
+ struct my_threadpool *CAST_FROM_VOIDP(my_threadpool, arg);
+ toku_mutex_lock(&my_threadpool->mutex);
+ my_threadpool->counter++;
+ while (!my_threadpool->closed) {
+ toku_cond_wait(&my_threadpool->wait, &my_threadpool->mutex);
+ }
+ toku_mutex_unlock(&my_threadpool->mutex);
+ if (verbose) printf("%lu:%s:exit\n", (unsigned long)toku_os_gettid(), __FUNCTION__);
+ return arg;
+}
+
+static void *my_malloc_always_fails(size_t n UU()) {
+ errno = ENOMEM;
+ return NULL;
+}
+
+static int
+usage (void) {
+ printf("threadpool-test: [-v] [-malloc-fail] [N]\n");
+ printf("-malloc-fail simulate malloc failures\n");
+ printf("N max number of threads in the thread pool\n");
+ return 1;
+}
+
+int
+test_main (int argc, const char *argv[]) {
+ int max_threads = 1;
+ int do_malloc_fail = 0;
+
+ int i;
+ for (i=1; i<argc; i++) {
+ const char *arg = argv[i];
+ if (strcmp(arg, "-h") == 0 || strcmp(arg, "-help") == 0) {
+ return usage();
+ } else if (strcmp(arg, "-v") == 0) {
+ verbose++;
+ continue;
+ } else if (strcmp(arg, "-q") == 0) {
+ verbose = 0;
+ continue;
+ } else if (strcmp(arg, "-malloc-fail") == 0) {
+ do_malloc_fail = 1;
+ continue;
+ } else
+ max_threads = atoi(arg);
+ }
+
+ struct my_threadpool my_threadpool;
+ THREADPOOL threadpool;
+
+ ZERO_STRUCT(my_threadpool);
+ my_threadpool_init(&my_threadpool, max_threads);
+ threadpool = my_threadpool.threadpool;
+ if (verbose) printf("test threadpool_set_busy\n");
+ for (i=0; i<2*max_threads; i++) {
+ assert(toku_thread_pool_get_current_threads(threadpool) == (i >= max_threads ? max_threads : i));
+ int n = 1;
+ toku_thread_pool_run(threadpool, 0, &n, my_thread_f, &my_threadpool);
+ }
+ assert(toku_thread_pool_get_current_threads(threadpool) == max_threads);
+ my_threadpool_destroy(&my_threadpool, max_threads);
+
+ if (do_malloc_fail) {
+ if (verbose) printf("test threadpool_create with malloc failure\n");
+ // test threadpool malloc fails causes ENOMEM
+
+ toku_set_func_malloc(my_malloc_always_fails);
+ int r;
+ threadpool = NULL;
+ r = toku_thread_pool_create(&threadpool, 0); assert(r == ENOMEM);
+ r = toku_thread_pool_create(&threadpool, 1); assert(r == ENOMEM);
+ toku_set_func_malloc(NULL);
+ }
+
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/threadpool-testrunf.cc b/storage/tokudb/PerconaFT/util/tests/threadpool-testrunf.cc
new file mode 100644
index 00000000..4db38c73
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/threadpool-testrunf.cc
@@ -0,0 +1,114 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <util/threadpool.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+
+int verbose = 0;
+
+static int usage(int poolsize) {
+ fprintf(stderr, "[-q] [-v] [--verbose] (%d)\n", verbose);
+ fprintf(stderr, "[--poolsize %d]\n", poolsize);
+ return 1;
+}
+
+static void *f(void *arg) {
+ return arg;
+}
+
+static void dotest(int poolsize, int nloops) {
+ int r;
+ struct toku_thread_pool *pool = NULL;
+ r = toku_thread_pool_create(&pool, poolsize);
+ assert(r == 0 && pool != NULL);
+
+ int i;
+ for (i = 0; i < nloops; i++) {
+ int n = 1;
+ r = toku_thread_pool_run(pool, 1, &n, f, NULL);
+ assert(r == 0);
+ }
+
+ if (verbose)
+ toku_thread_pool_print(pool, stderr);
+ toku_thread_pool_destroy(&pool);
+}
+
+int main(int argc, char *argv[]) {
+ // defaults
+ int poolsize = 1;
+ int nloops = 100000;
+
+ // options
+ int i;
+ for (i = 1; i < argc; i++) {
+ char *arg = argv[i];
+ if (arg[0] != '-')
+ break;
+ if (strcmp(arg, "--poolsize") == 0 && i+1 < argc) {
+ poolsize = atoi(argv[++i]);
+ continue;
+ }
+ if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) {
+ verbose = verbose+1;
+ continue;
+ }
+ if (strcmp(arg, "-q") == 0) {
+ verbose = verbose > 0 ? verbose-1 : 0;
+ continue;
+ }
+
+ return usage(poolsize);
+ }
+ int starti = i;
+
+ if (starti == argc) {
+ dotest(poolsize, nloops);
+ } else {
+ for (i = starti; i < argc; i++) {
+ nloops = atoi(argv[i]);
+ dotest(poolsize, nloops);
+ }
+ }
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/tests/x1764-test.cc b/storage/tokudb/PerconaFT/util/tests/x1764-test.cc
new file mode 100644
index 00000000..76b1d9c7
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/tests/x1764-test.cc
@@ -0,0 +1,139 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include "test.h"
+#include <util/x1764.h>
+
+static void
+test0 (void) {
+ uint32_t c = toku_x1764_memory("", 0);
+ assert(c==~(0U));
+ struct x1764 cs;
+ toku_x1764_init(&cs);
+ toku_x1764_add(&cs, "", 0);
+ c = toku_x1764_finish(&cs);
+ assert(c==~(0U));
+}
+
+static void
+test1 (void) {
+ uint64_t v=0x123456789abcdef0ULL;
+ uint32_t c;
+ int i;
+ for (i=0; i<=8; i++) {
+ uint64_t expect64 = (i==8) ? v : v&((1LL<<(8*i))-1);
+ uint32_t expect = expect64 ^ (expect64>>32);
+ c = toku_x1764_memory(&v, i);
+ //printf("i=%d c=%08x expect=%08x\n", i, c, expect);
+ assert(c==~expect);
+ }
+}
+
+// Compute checksums incrementally, using various strides
+static void
+test2 (void) {
+ enum { N=200 };
+ char v[N];
+ int i;
+ for (i=0; i<N; i++) v[i]=(char)random();
+ for (i=0; i<N; i++) {
+ int j;
+ for (j=i; j<=N; j++) {
+ // checksum from i (inclusive to j (exclusive)
+ uint32_t c = toku_x1764_memory(&v[i], j-i);
+ // Now compute the checksum incrementally with various strides.
+ int stride;
+ for (stride=1; stride<=j-i; stride++) {
+ int k;
+ struct x1764 s;
+ toku_x1764_init(&s);
+ for (k=i; k+stride<=j; k+=stride) {
+ toku_x1764_add(&s, &v[k], stride);
+ }
+ toku_x1764_add(&s, &v[k], j-k);
+ uint32_t c2 = toku_x1764_finish(&s);
+ assert(c2==c);
+ }
+ // Now use some random strides.
+ {
+ int k=i;
+ struct x1764 s;
+ toku_x1764_init(&s);
+ while (1) {
+ stride=random()%16;
+ if (k+stride>j) break;
+ toku_x1764_add(&s, &v[k], stride);
+ k+=stride;
+ }
+ toku_x1764_add(&s, &v[k], j-k);
+ uint32_t c2 = toku_x1764_finish(&s);
+ assert(c2==c);
+ }
+ }
+ }
+}
+
+static void
+test3 (void)
+// Compare the simple version to the highly optimized version.
+{
+ const int datalen = 1000;
+ char data[datalen];
+ for (int i=0; i<datalen; i++) data[i]=random();
+ for (int off=0; off<32; off++) {
+ if (verbose) {printf("."); fflush(stdout);}
+ for (int len=0; len+off<datalen; len++) {
+ uint32_t reference_sum = toku_x1764_memory_simple(data+off, len);
+ uint32_t fast_sum = toku_x1764_memory (data+off, len);
+ assert(reference_sum==fast_sum);
+ }
+ }
+}
+
+int
+test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
+ if (verbose) printf("0\n");
+ test0();
+ if (verbose) printf("1\n");
+ test1();
+ if (verbose) printf("2\n");
+ test2();
+ if (verbose) printf("3\n");
+ test3();
+ return 0;
+}
diff --git a/storage/tokudb/PerconaFT/util/threadpool.cc b/storage/tokudb/PerconaFT/util/threadpool.cc
new file mode 100644
index 00000000..6e0ccf05
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/threadpool.cc
@@ -0,0 +1,298 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <memory.h>
+#include <toku_portability.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include <toku_assert.h>
+#include <toku_list.h>
+#include <portability/toku_pthread.h>
+
+#include "threadpool.h"
+
+toku_instr_key *tpool_lock_mutex_key;
+toku_instr_key *tp_thread_wait_key;
+toku_instr_key *tp_pool_wait_free_key;
+toku_instr_key *tp_internal_thread_key;
+
+struct toku_thread {
+ struct toku_thread_pool *pool;
+ toku_pthread_t tid;
+ void *(*f)(void *arg);
+ void *arg;
+ int doexit;
+ struct toku_list free_link;
+ struct toku_list all_link;
+ toku_cond_t wait;
+};
+
+struct toku_thread_pool {
+ int max_threads;
+ int cur_threads;
+ struct toku_list free_threads;
+ struct toku_list all_threads;
+
+ toku_mutex_t lock;
+ toku_cond_t wait_free;
+
+ uint64_t gets, get_blocks;
+};
+
+static void *toku_thread_run_internal(void *arg);
+static void toku_thread_pool_lock(struct toku_thread_pool *pool);
+static void toku_thread_pool_unlock(struct toku_thread_pool *pool);
+
+static int
+toku_thread_create(struct toku_thread_pool *pool, struct toku_thread **toku_thread_return) {
+ int r;
+ struct toku_thread *MALLOC(thread);
+ if (thread == nullptr) {
+ r = get_error_errno();
+ } else {
+ memset(thread, 0, sizeof *thread);
+ thread->pool = pool;
+ toku_cond_init(*tp_thread_wait_key, &thread->wait, nullptr);
+ r = toku_pthread_create(*tp_internal_thread_key,
+ &thread->tid,
+ nullptr,
+ toku_thread_run_internal,
+ thread);
+ if (r) {
+ toku_cond_destroy(&thread->wait);
+ toku_free(thread);
+ thread = nullptr;
+ }
+ *toku_thread_return = thread;
+ }
+ return r;
+}
+
+void
+toku_thread_run(struct toku_thread *thread, void *(*f)(void *arg), void *arg) {
+ toku_thread_pool_lock(thread->pool);
+ thread->f = f;
+ thread->arg = arg;
+ toku_cond_signal(&thread->wait);
+ toku_thread_pool_unlock(thread->pool);
+}
+
+static void toku_thread_destroy(struct toku_thread *thread) {
+ int r;
+ void *ret;
+ r = toku_pthread_join(thread->tid, &ret);
+ invariant(r == 0 && ret == thread);
+ struct toku_thread_pool *pool = thread->pool;
+ toku_thread_pool_lock(pool);
+ toku_list_remove(&thread->free_link);
+ toku_thread_pool_unlock(pool);
+ toku_cond_destroy(&thread->wait);
+ toku_free(thread);
+}
+
+static void
+toku_thread_ask_exit(struct toku_thread *thread) {
+ thread->doexit = 1;
+ toku_cond_signal(&thread->wait);
+}
+
+static void *
+toku_thread_run_internal(void *arg) {
+ struct toku_thread *thread = (struct toku_thread *) arg;
+ struct toku_thread_pool *pool = thread->pool;
+ toku_thread_pool_lock(pool);
+ while (1) {
+ toku_cond_signal(&pool->wait_free);
+ void *(*thread_f)(void *); void *thread_arg; int doexit;
+ while (1) {
+ thread_f = thread->f; thread_arg = thread->arg; doexit = thread->doexit; // make copies of these variables to make helgrind happy
+ if (thread_f || doexit)
+ break;
+ toku_cond_wait(&thread->wait, &pool->lock);
+ }
+ toku_thread_pool_unlock(pool);
+ if (thread_f)
+ (void) thread_f(thread_arg);
+ if (doexit)
+ break;
+ toku_thread_pool_lock(pool);
+ thread->f = nullptr;
+ toku_list_push(&pool->free_threads, &thread->free_link);
+ }
+ return toku_pthread_done(arg);
+}
+
+int toku_thread_pool_create(struct toku_thread_pool **pool_return,
+ int max_threads) {
+ int r;
+ struct toku_thread_pool *CALLOC(pool);
+ if (pool == nullptr) {
+ r = get_error_errno();
+ } else {
+ toku_mutex_init(*tpool_lock_mutex_key, &pool->lock, nullptr);
+ toku_list_init(&pool->free_threads);
+ toku_list_init(&pool->all_threads);
+ toku_cond_init(*tp_pool_wait_free_key, &pool->wait_free, nullptr);
+ pool->cur_threads = 0;
+ pool->max_threads = max_threads;
+ *pool_return = pool;
+ r = 0;
+ }
+ return r;
+}
+
+static void
+toku_thread_pool_lock(struct toku_thread_pool *pool) {
+ toku_mutex_lock(&pool->lock);
+}
+
+static void
+toku_thread_pool_unlock(struct toku_thread_pool *pool) {
+ toku_mutex_unlock(&pool->lock);
+}
+
+void
+toku_thread_pool_destroy(struct toku_thread_pool **poolptr) {
+ struct toku_thread_pool *pool = *poolptr;
+ *poolptr = nullptr;
+
+ // ask the threads to exit
+ toku_thread_pool_lock(pool);
+ struct toku_list *list;
+ for (list = pool->all_threads.next; list != &pool->all_threads; list = list->next) {
+ struct toku_thread *thread = toku_list_struct(list, struct toku_thread, all_link);
+ toku_thread_ask_exit(thread);
+ }
+ toku_thread_pool_unlock(pool);
+
+ // wait for all of the threads to exit
+ while (!toku_list_empty(&pool->all_threads)) {
+ list = toku_list_pop_head(&pool->all_threads);
+ struct toku_thread *thread = toku_list_struct(list, struct toku_thread, all_link);
+ toku_thread_destroy(thread);
+ pool->cur_threads -= 1;
+ }
+
+ invariant(pool->cur_threads == 0);
+
+ // cleanup
+ toku_cond_destroy(&pool->wait_free);
+ toku_mutex_destroy(&pool->lock);
+
+ toku_free(pool);
+}
+
+static int
+toku_thread_pool_add(struct toku_thread_pool *pool) {
+ struct toku_thread *thread = nullptr;
+ int r = toku_thread_create(pool, &thread);
+ if (r == 0) {
+ pool->cur_threads += 1;
+ toku_list_push(&pool->all_threads, &thread->all_link);
+ toku_list_push(&pool->free_threads, &thread->free_link);
+ toku_cond_signal(&pool->wait_free);
+ }
+ return r;
+}
+
+// get one thread from the free pool.
+static int
+toku_thread_pool_get_one(struct toku_thread_pool *pool, int dowait, struct toku_thread **toku_thread_return) {
+ int r = 0;
+ toku_thread_pool_lock(pool);
+ pool->gets++;
+ while (1) {
+ if (!toku_list_empty(&pool->free_threads))
+ break;
+ if (pool->max_threads == 0 || pool->cur_threads < pool->max_threads)
+ (void) toku_thread_pool_add(pool);
+ if (toku_list_empty(&pool->free_threads) && !dowait) {
+ r = EWOULDBLOCK;
+ break;
+ }
+ pool->get_blocks++;
+ toku_cond_wait(&pool->wait_free, &pool->lock);
+ }
+ if (r == 0) {
+ struct toku_list *list = toku_list_pop_head(&pool->free_threads);
+ struct toku_thread *thread = toku_list_struct(list, struct toku_thread, free_link);
+ *toku_thread_return = thread;
+ } else
+ *toku_thread_return = nullptr;
+ toku_thread_pool_unlock(pool);
+ return r;
+}
+
+int
+toku_thread_pool_get(struct toku_thread_pool *pool, int dowait, int *nthreads, struct toku_thread **toku_thread_return) {
+ int r = 0;
+ int n = *nthreads;
+ int i;
+ for (i = 0; i < n; i++) {
+ r = toku_thread_pool_get_one(pool, dowait, &toku_thread_return[i]);
+ if (r != 0)
+ break;
+ }
+ *nthreads = i;
+ return r;
+}
+
+int
+toku_thread_pool_run(struct toku_thread_pool *pool, int dowait, int *nthreads, void *(*f)(void *arg), void *arg) {
+ int n = *nthreads;
+ struct toku_thread *tids[n];
+ int r = toku_thread_pool_get(pool, dowait, nthreads, tids);
+ if (r == 0 || r == EWOULDBLOCK) {
+ n = *nthreads;
+ for (int i = 0; i < n; i++)
+ toku_thread_run(tids[i], f, arg);
+ }
+ return r;
+}
+
+void
+toku_thread_pool_print(struct toku_thread_pool *pool, FILE *out) {
+ fprintf(out, "%s:%d %p %llu %llu\n", __FILE__, __LINE__, pool, (long long unsigned) pool->gets, (long long unsigned) pool->get_blocks);
+}
+
+int
+toku_thread_pool_get_current_threads(struct toku_thread_pool *pool) {
+ return pool->cur_threads;
+}
diff --git a/storage/tokudb/PerconaFT/util/threadpool.h b/storage/tokudb/PerconaFT/util/threadpool.h
new file mode 100644
index 00000000..eba239f8
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/threadpool.h
@@ -0,0 +1,85 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <stdio.h>
+
+// A toku_thread is toku_pthread that can be cached.
+struct toku_thread;
+
+// Run a function f on a thread
+// This function setups up the thread to run function f with argument arg and then wakes up
+// the thread to run it.
+void toku_thread_run(struct toku_thread *thread, void *(*f)(void *arg), void *arg);
+
+// A toku_thread_pool is a pool of toku_threads. These threads can be allocated from the pool
+// and can run an arbitrary function.
+struct toku_thread_pool;
+
+typedef struct toku_thread_pool *THREADPOOL;
+
+// Create a new threadpool
+// Effects: a new threadpool is allocated and initialized. the number of threads in the threadpool is limited to max_threads.
+// If max_threads == 0 then there is no limit on the number of threads in the pool.
+// Initially, there are no threads in the pool. Threads are allocated by the _get or _run functions.
+// Returns: if there are no errors, the threadpool is set and zero is returned. Otherwise, an error number is returned.
+int toku_thread_pool_create(struct toku_thread_pool **threadpoolptr, int max_threads);
+
+// Destroy a threadpool
+// Effects: the calling thread joins with all of the threads in the threadpool.
+// Effects: the threadpool memory is freed.
+// Returns: the threadpool is set to null.
+void toku_thread_pool_destroy(struct toku_thread_pool **threadpoolptr);
+
+// Get the current number of threads in the thread pool
+int toku_thread_pool_get_current_threads(struct toku_thread_pool *pool);
+
+// Get one or more threads from the thread pool
+// dowait indicates whether or not the caller blocks waiting for threads to free up
+// nthreads on input determines the number of threads that are wanted
+// nthreads on output indicates the number of threads that were allocated
+// toku_thread_return on input supplies an array of thread pointers (all NULL). This function returns the threads
+// that were allocated in the array.
+int toku_thread_pool_get(struct toku_thread_pool *pool, int dowait, int *nthreads, struct toku_thread **toku_thread_return);
+
+// Run a function f on one or more threads allocated from the thread pool
+int toku_thread_pool_run(struct toku_thread_pool *pool, int dowait, int *nthreads, void *(*f)(void *arg), void *arg);
+
+// Print the state of the thread pool
+void toku_thread_pool_print(struct toku_thread_pool *pool, FILE *out);
diff --git a/storage/tokudb/PerconaFT/util/x1764.cc b/storage/tokudb/PerconaFT/util/x1764.cc
new file mode 100644
index 00000000..22f02cf9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/x1764.cc
@@ -0,0 +1,244 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#include <toku_stdlib.h>
+#include <portability/toku_portability.h>
+
+#include "x1764.h"
+
+#define PRINT 0
+
+uint32_t toku_x1764_memory_simple (const void *buf, int len)
+{
+ const uint64_t *CAST_FROM_VOIDP(lbuf, buf);
+ uint64_t c=0;
+ while (len>=8) {
+ c = c*17 + *lbuf;
+ if (PRINT) printf("%d: c=%016" PRIx64 " sum=%016" PRIx64 "\n", __LINE__, *lbuf, c);
+ lbuf++;
+ len-=8;
+ }
+ if (len>0) {
+ const uint8_t *cbuf=(uint8_t*)lbuf;
+ int i;
+ uint64_t input=0;
+ for (i=0; i<len; i++) {
+ input |= ((uint64_t)(cbuf[i]))<<(8*i);
+ }
+ c = c*17 + input;
+ }
+ return ~((c&0xFFFFFFFF) ^ (c>>32));
+}
+
+uint32_t toku_x1764_memory (const void *vbuf, int len)
+{
+ const uint8_t *CAST_FROM_VOIDP(buf, vbuf);
+ int len_4_words = 4*sizeof(uint64_t);
+ uint64_t suma=0, sumb=0, sumc=0, sumd=0;
+ while (len >= len_4_words) {
+ suma = suma*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +0*sizeof(uint64_t));
+ sumb = sumb*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +1*sizeof(uint64_t));
+ sumc = sumc*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +2*sizeof(uint64_t));
+ sumd = sumd*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +3*sizeof(uint64_t));
+ buf += len_4_words;
+ len -= len_4_words;
+ }
+ uint64_t sum = suma*17L*17L*17L + sumb*17L*17L + sumc*17L + sumd;
+ assert(len>=0);
+ while ((uint64_t)len>=sizeof(uint64_t)) {
+ sum = sum*17 + *(uint64_t*)buf;
+ buf+=sizeof(uint64_t);
+ len-=sizeof(uint64_t);
+ }
+ if (len>0) {
+ uint64_t tailsum = 0;
+ for (int i=0; i<len; i++) {
+ tailsum |= ((uint64_t)(buf[i]))<<(8*i);
+ }
+ sum = sum*17 + tailsum;
+ }
+ return ~((sum&0xFFFFFFFF) ^ (sum>>32));
+}
+
+
+void toku_x1764_init(struct x1764 *l) {
+ l->sum=0;
+ l->input=0;
+ l->n_input_bytes=0;
+}
+
+void toku_x1764_add (struct x1764 *l, const void *vbuf, int len) {
+ if (PRINT) printf("%d: n_input_bytes=%d len=%d\n", __LINE__, l->n_input_bytes, len);
+ int n_input_bytes = l->n_input_bytes;
+ const unsigned char *CAST_FROM_VOIDP(cbuf, vbuf);
+ // Special case short inputs
+ if (len==1) {
+ uint64_t input = l->input | ((uint64_t)(*cbuf))<<(8*n_input_bytes);
+ n_input_bytes++;
+ if (n_input_bytes==8) {
+ l->sum = l->sum*17 + input;
+ l->n_input_bytes = 0;
+ l->input = 0;
+ } else {
+ l->input = input;
+ l->n_input_bytes = n_input_bytes;
+ }
+ return;
+ } else if (len==2) {
+ uint64_t input = l->input;
+ uint64_t thisv = ((uint64_t)(*(uint16_t*)cbuf));
+ if (n_input_bytes==7) {
+ l->sum = l->sum*17 + (input | (thisv<<(8*7)));
+ l->input = thisv>>8;
+ l->n_input_bytes = 1;
+ } else if (n_input_bytes==6) {
+ l->sum = l->sum*17 + (input | (thisv<<(8*6)));
+ l->input = 0;
+ l->n_input_bytes = 0;
+ } else {
+ l->input = input | (thisv<<(8*n_input_bytes));
+ l->n_input_bytes += 2;
+ }
+ return;
+ }
+
+ uint64_t sum;
+ //assert(len>=0);
+ if (n_input_bytes) {
+ uint64_t input = l->input;
+ if (len>=8) {
+ sum = l->sum;
+ while (len>=8) {
+ uint64_t thisv = *(uint64_t*)cbuf;
+ input |= thisv<<(8*n_input_bytes);
+ sum = sum*17 + input;
+ if (PRINT) printf("%d: input=%016" PRIx64 " sum=%016" PRIx64 "\n", __LINE__, input, sum);
+ input = thisv>>(8*(8-n_input_bytes));
+ if (PRINT) printf("%d: input=%016" PRIx64 "\n", __LINE__, input);
+ len-=8;
+ cbuf+=8;
+ // n_input_bytes remains unchanged
+ if (PRINT) printf("%d: n_input_bytes=%d len=%d\n", __LINE__, l->n_input_bytes, len);
+ }
+ l->sum = sum;
+ }
+ if (len>=4) {
+ uint64_t thisv = *(uint32_t*)cbuf;
+ if (n_input_bytes<4) {
+ input |= thisv<<(8*n_input_bytes);
+ if (PRINT) printf("%d: input=%016" PRIx64 "\n", __LINE__, input);
+ n_input_bytes+=4;
+ } else {
+ input |= thisv<<(8*n_input_bytes);
+ l->sum = l->sum*17 + input;
+ if (PRINT) printf("%d: input=%016" PRIx64 " sum=%016" PRIx64 "\n", __LINE__, input, l->sum);
+ input = thisv>>(8*(8-n_input_bytes));
+ n_input_bytes-=4;
+ if (PRINT) printf("%d: input=%016" PRIx64 " n_input_bytes=%d\n", __LINE__, input, n_input_bytes);
+ }
+ len-=4;
+ cbuf+=4;
+ if (PRINT) printf("%d: len=%d\n", __LINE__, len);
+ }
+ //assert(n_input_bytes<=8);
+ while (n_input_bytes<8 && len) {
+ input |= ((uint64_t)(*cbuf))<<(8*n_input_bytes);
+ n_input_bytes++;
+ cbuf++;
+ len--;
+ }
+ //assert(len>=0);
+ if (n_input_bytes<8) {
+ //assert(len==0);
+ l->input = input;
+ l->n_input_bytes = n_input_bytes;
+ if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
+ return;
+ }
+ sum = l->sum*17 + input;
+ } else {
+ //assert(len>=0);
+ sum = l->sum;
+ }
+ //assert(len>=0);
+ while (len>=8) {
+ sum = sum*17 + *(uint64_t*)cbuf;
+ cbuf+=8;
+ len -=8;
+ }
+ l->sum = sum;
+ n_input_bytes = 0;
+ uint64_t input;
+ l->n_input_bytes = len;
+ // Surprisingly, the loop is the fastest on bradley's laptop.
+ if (1) {
+ int i;
+ input=0;
+ for (i=0; i<len; i++) {
+ input |= ((uint64_t)(cbuf[i]))<<(8*i);
+ }
+ } else if (0) {
+ switch (len) {
+ case 7: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(uint16_t*)(cbuf+4)))<<32) | (((uint64_t)(*(cbuf+4)))<<48); break;
+ case 6: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(uint16_t*)(cbuf+4)))<<32); break;
+ case 5: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(cbuf+4)))<<32); break;
+ case 4: input = ((uint64_t)(*(uint32_t*)(cbuf))); break;
+ case 3: input = ((uint64_t)(*(uint16_t*)(cbuf))) | (((uint64_t)(*(cbuf+2)))<<16); break;
+ case 2: input = ((uint64_t)(*(uint16_t*)(cbuf))); break;
+ case 1: input = ((uint64_t)(*cbuf)); break;
+ case 0: input = 0; break;
+ default: abort();
+ }
+ } else {
+ input=0;
+ int i=0;
+ if (len>=4) { input = ((uint64_t)(*(uint32_t*)(cbuf))); cbuf+=4; len-=4; i=4;}
+ if (len>=2) { input |= ((uint64_t)(*(uint16_t*)(cbuf)))<<(i*8); cbuf+=2; len-=2; i+=2; }
+ if (len>=1) { input |= ((uint64_t)(*(uint8_t *)(cbuf)))<<(i*8); /*cbuf+=1; len-=1; i++;*/ }
+ }
+ l->input = input;
+ if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
+}
+uint32_t toku_x1764_finish (struct x1764 *l) {
+ if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
+ int len = l->n_input_bytes;
+ if (len>0) {
+ l->sum = l->sum*17 + l->input;
+ }
+ return ~((l->sum &0xffffffff) ^ (l->sum>>32));
+}
diff --git a/storage/tokudb/PerconaFT/util/x1764.h b/storage/tokudb/PerconaFT/util/x1764.h
new file mode 100644
index 00000000..41302658
--- /dev/null
+++ b/storage/tokudb/PerconaFT/util/x1764.h
@@ -0,0 +1,70 @@
+/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
+#ident "$Id$"
+/*======
+This file is part of PerconaFT.
+
+
+Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ PerconaFT is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License, version 3,
+ as published by the Free Software Foundation.
+
+ PerconaFT is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+======= */
+
+#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+
+#pragma once
+
+#include <toku_stdint.h>
+
+// The x1764 hash is
+// $s = \sum_i a_i*17^i$ where $a_i$ is the $i$th 64-bit number (represented in little-endian format)
+// The final 32-bit result is the xor of the high- and low-order bits of s.
+// If any odd bytes numbers are left at the end, they are filled in at the low end.
+
+
+uint32_t toku_x1764_memory (const void *buf, int len);
+// Effect: Compute x1764 on the bytes of buf. Return the 32 bit answer.
+
+uint32_t toku_x1764_memory_simple (const void *buf, int len);
+// Effect: Same as toku_x1764_memory, but not highly optimized (more likely to be correct). Useful for testing the optimized version.
+
+
+// For incrementally computing an x1764, use the following interfaces.
+struct x1764 {
+ uint64_t sum;
+ uint64_t input;
+ int n_input_bytes;
+};
+
+void toku_x1764_init(struct x1764 *l);
+// Effect: Initialize *l.
+
+void toku_x1764_add (struct x1764 *l, const void *vbuf, int len);
+// Effect: Add more bytes to *l.
+
+uint32_t toku_x1764_finish (struct x1764 *l);
+// Effect: Return the final 32-bit result.